From 655c22c77f21b360ba00b8c41b42fd0d0feec974 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Mon, 2 May 2016 13:29:10 -0400 Subject: [PATCH 0001/1936] Add an explicit test-config phase to devstack plugins This commit adds a new phase to the devstack plugin interface for configuring test environments. It runs after everything in devstack (except for the final output commands) to ensure that tempest or any other dependency is installed prior to running it. Change-Id: I52128756f18d3857963a0687de77f7cdfd11fb3e --- doc/source/plugins.rst | 2 ++ stack.sh | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst index 83e5609efa..70469d6876 100644 --- a/doc/source/plugins.rst +++ b/doc/source/plugins.rst @@ -99,6 +99,8 @@ The current full list of ``mode`` and ``phase`` are: should exist at this point. - **extra** - Called near the end after layer 1 and 2 services have been started. + - **test-config** Called at the end of devstack used to configure tempest + or any other test environments - **unstack** - Called by ``unstack.sh`` before other services are shut down. diff --git a/stack.sh b/stack.sh index 3de9af2725..0d5f16990f 100755 --- a/stack.sh +++ b/stack.sh @@ -1367,6 +1367,12 @@ if is_service_enabled cinder; then fi fi +# Run test-config +# --------------- + +# Phase: test-config +run_phase stack test-config + # Fin # === From a464ea767a027b3d3c658b8f997f43f91af0349f Mon Sep 17 00:00:00 2001 From: Yi Zhao Date: Thu, 12 May 2016 10:32:58 +0800 Subject: [PATCH 0002/1936] Fix error reported due to re-add ipv6 address This commit fixes devstack fails when re-stack due to re-add ipv6 address. Change-Id: I9ff62023dbc29a88aec3c48af331c0a49a1270bb Closes-Bug: #1579985 --- lib/neutron_plugins/services/l3 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 177dc1f028..80af0bb190 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -358,7 +358,7 @@ function _neutron_configure_router_v6 { local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/} # Configure interface for public bridge - sudo ip -6 addr add $ipv6_ext_gw_ip/$ipv6_cidr_len dev $ext_gw_interface + sudo ip -6 addr replace $ipv6_ext_gw_ip/$ipv6_cidr_len dev $ext_gw_interface sudo ip -6 route replace $FIXED_RANGE_V6 via $IPV6_ROUTER_GW_IP dev $ext_gw_interface fi _neutron_set_router_id From 958c169d796ab82521b69b35a13ee27037e68e97 Mon Sep 17 00:00:00 2001 From: Brian Ober Date: Wed, 11 May 2016 19:11:31 -0500 Subject: [PATCH 0003/1936] Enable Swift Account Management This patch enables account management by default in Swift. This will be leveraged by Tempest test cases validating account management APIs. Depends-On: Id29f5ca48f92cd139535be7064107b8a61b02856 Change-Id: Ic01432939ed9b4cf0cbf20e3244d4d76847f539f --- lib/swift | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/swift b/lib/swift index 8cb94efa66..950b3f20c0 100644 --- a/lib/swift +++ b/lib/swift @@ -428,6 +428,7 @@ function configure_swift { sed -i "/^pipeline/ { s/proxy-server/${SWIFT_EXTRAS_MIDDLEWARE_LAST} proxy-server/ ; }" ${SWIFT_CONFIG_PROXY_SERVER} iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server account_autocreate true + iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server allow_account_management true # Configure Crossdomain iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:crossdomain use "egg:swift#crossdomain" From c35110e7c5c35dd1edc310dc3d0bb8693e58d336 Mon Sep 17 00:00:00 2001 From: "Sean M. Collins" Date: Wed, 18 May 2016 10:38:51 -0400 Subject: [PATCH 0004/1936] Neutron: check if a plugin has the l3 API extension If a plugin has the L3 API extension available, issue the L3 API extension calls that creates routers and networks Change-Id: I77e269ce0025054bcf2a2f4156124f2921ba2d59 --- lib/neutron_plugins/services/l3 | 15 +++++++++------ lib/tempest | 3 +-- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index a5a6c81ea4..55597a695d 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -175,9 +175,7 @@ function create_neutron_initial_network { fi fi - AUTO_ALLOCATE_EXT=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" ext-list | grep 'auto-allocated-topology' | get_field 1) - SUBNETPOOL_EXT=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" ext-list | grep 'subnet_allocation' | get_field 1) - if is_service_enabled q-l3; then + if is_networking_extension_supported "router" && is_networking_extension_supported "external-net"; then # Create a router, and add the private subnet as one of its interfaces if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then # create a tenant-owned router. @@ -189,10 +187,8 @@ function create_neutron_initial_network { die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $Q_ROUTER_NAME" fi - # if the extension is available, then mark the external - # network as default, and provision default subnetpools EXTERNAL_NETWORK_FLAGS="--router:external" - if [[ -n $AUTO_ALLOCATE_EXT && -n $SUBNETPOOL_EXT ]]; then + if is_networking_extension_supported "auto-allocated-topology" && is_networking_extension_supported "subnet_allocation"; then EXTERNAL_NETWORK_FLAGS="$EXTERNAL_NETWORK_FLAGS --is-default" if [[ "$IP_VERSION" =~ 4.* ]]; then SUBNETPOOL_V4_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnetpool-create $SUBNETPOOL_NAME --default-prefixlen $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --shared --is-default=True | grep ' id ' | get_field 2) @@ -364,3 +360,10 @@ function is_provider_network { fi return 1 } + +function is_networking_extension_supported { + local extension=$1 + # TODO(sc68cal) cache this instead of calling every time + EXT_LIST=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" ext-list -c alias -f value) + [[ $EXT_LIST =~ $extension ]] && return 0 +} diff --git a/lib/tempest b/lib/tempest index d4d8cf25df..8cae4d987e 100644 --- a/lib/tempest +++ b/lib/tempest @@ -240,8 +240,7 @@ function configure_tempest { # the public network (for floating ip access) is only available # if the extension is enabled. - EXTERNAL_NETWORK_EXT=$(neutron ext-list | grep 'external-net' | get_field 1) - if [[ -n $EXTERNAL_NETWORK_EXT ]]; then + if is_networking_extension_supported 'external-net'; then public_network_id=$(neutron net-list | grep $PUBLIC_NETWORK_NAME | \ awk '{print $2}') fi From 0578e42fdbb6c626a3b07465ef0df3b5211efb9e Mon Sep 17 00:00:00 2001 From: Rodrigo Duarte Date: Wed, 16 Mar 2016 12:15:07 -0300 Subject: [PATCH 0005/1936] Tempest: add a Keystone reseller feature flag A new tempest test is being added in https://review.openstack.org/#/c/285541/ but it is not supported in the Kilo and Liberty branches. This patch turns on this feature flag at Devstacks's side. According to tempest policies, this patch must be merged first so the test can actually run. Change-Id: I52458a0b36e1dba233667311b35f6c3931e2e66c Depends-On: Ie69dae09c2b42e825e9d51abf158fc14788387d1 --- lib/tempest | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/tempest b/lib/tempest index d4d8cf25df..ee4550155e 100644 --- a/lib/tempest +++ b/lib/tempest @@ -282,6 +282,10 @@ function configure_tempest { iniset $TEMPEST_CONFIG identity ca_certificates_file $SSL_BUNDLE_FILE fi + # Identity Features + # TODO(rodrigods): Remove the reseller flag when Kilo and Liberty are end of life. + iniset $TEMPEST_CONFIG identity-feature-enabled reseller True + # Image # We want to be able to override this variable in the gate to avoid # doing an external HTTP fetch for this test. From 8d0d3115ccf528ff5ae1533a670fc3bd475d0bcc Mon Sep 17 00:00:00 2001 From: Ivan Kolodyazhny Date: Thu, 26 May 2016 23:41:49 +0300 Subject: [PATCH 0006/1936] Allow override of python-brick-cinderclient-ext library used by cinder Added to requirements: https://review.openstack.org/309084 Functional tests were added https://review.openstack.org/265811 But they still use the version of python-brick-cinderclient-ext from pip. This change updates devstack to pull in the changes from python-brick-cinderclient-ext patch sets instead, when configured to do so. Change-Id: I6d0f09950ea1200d3367a53aa4a3eea9be7abc66 Needed-by: I34f3b5ceaad7a50b1e9cadcc764f61c0aabe086d --- lib/cinder | 6 ++++++ stackrc | 4 ++++ tests/test_libs_from_pypi.sh | 2 +- 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/lib/cinder b/lib/cinder index 9c818c6eb2..de67593f72 100644 --- a/lib/cinder +++ b/lib/cinder @@ -40,6 +40,7 @@ fi # set up default directories GITDIR["python-cinderclient"]=$DEST/python-cinderclient GITDIR["os-brick"]=$DEST/os-brick +GITDIR["python-brick-cinderclient-ext"]=$DEST/python-brick-cinderclient-ext CINDER_DIR=$DEST/cinder # Cinder virtual environment @@ -466,6 +467,11 @@ function install_cinder { # install_cinderclient() - Collect source and prepare function install_cinderclient { + if use_library_from_git "python-brick-cinderclient-ext"; then + git_clone_by_name "python-brick-cinderclient-ext" + setup_dev_lib "python-brick-cinderclient-ext" + fi + if use_library_from_git "python-cinderclient"; then git_clone_by_name "python-cinderclient" setup_dev_lib "python-cinderclient" diff --git a/stackrc b/stackrc index 8e1900dd94..969d77d95c 100644 --- a/stackrc +++ b/stackrc @@ -285,6 +285,10 @@ TEMPEST_BRANCH=${TEMPEST_BRANCH:-master} GITREPO["python-cinderclient"]=${CINDERCLIENT_REPO:-${GIT_BASE}/openstack/python-cinderclient.git} GITBRANCH["python-cinderclient"]=${CINDERCLIENT_BRANCH:-master} +# os-brick client for local volume attachement +GITREPO["python-brick-cinderclient-ext"]=${CINDERCLIENT_REPO:-${GIT_BASE}/openstack/python-brick-cinderclient-ext.git} +GITBRANCH["python-brick-cinderclient-ext"]=${CINDERCLIENT_BRANCH:-master} + # python glance client library GITREPO["python-glanceclient"]=${GLANCECLIENT_REPO:-${GIT_BASE}/openstack/python-glanceclient.git} GITBRANCH["python-glanceclient"]=${GLANCECLIENT_BRANCH:-master} diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index a979c346b8..bb58088ef3 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -42,7 +42,7 @@ ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy" ALL_LIBS+=" debtcollector os-brick automaton futurist oslo.service" ALL_LIBS+=" oslo.cache oslo.reports osprofiler" ALL_LIBS+=" keystoneauth ironic-lib neutron-lib oslo.privsep" -ALL_LIBS+=" diskimage-builder os-vif" +ALL_LIBS+=" diskimage-builder os-vif python-brick-cinderclient-ext" # Generate the above list with # echo ${!GITREPO[@]} From c47bd1df65d6c327a34d1a9cd771940595bf8c87 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Sun, 29 May 2016 00:01:01 -0700 Subject: [PATCH 0007/1936] Remove verbose setting from l3 service plugin This is following the nail: http://lists.openstack.org/pipermail/openstack-dev/2016-May/095166.html Change-Id: I4380279992e53ec9926bbcba5524e98f5c96ce8b --- lib/neutron_plugins/services/l3 | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index a5a6c81ea4..a53995ef25 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -91,7 +91,6 @@ function _configure_neutron_l3_agent { cp $NEUTRON_DIR/etc/l3_agent.ini.sample $Q_L3_CONF_FILE - iniset $Q_L3_CONF_FILE DEFAULT verbose True iniset $Q_L3_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $Q_L3_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then From 47bf3f931c60237f50f717e4885c2b64590e3160 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 1 Jun 2016 07:01:00 +0000 Subject: [PATCH 0008/1936] Updated from generate-devstack-plugins-list Change-Id: If7c8f6162d3e846a360cc7f34ce2ebf52054f45f --- doc/source/plugin-registry.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 5f796aec15..2760f854a4 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -61,6 +61,7 @@ freezer-web-ui `git://git.openstack.org/openstack/freeze gce-api `git://git.openstack.org/openstack/gce-api `__ gnocchi `git://git.openstack.org/openstack/gnocchi `__ group-based-policy `git://git.openstack.org/openstack/group-based-policy `__ +higgins `git://git.openstack.org/openstack/higgins `__ ironic `git://git.openstack.org/openstack/ironic `__ ironic-inspector `git://git.openstack.org/openstack/ironic-inspector `__ kingbird `git://git.openstack.org/openstack/kingbird `__ @@ -70,6 +71,7 @@ magnum-ui `git://git.openstack.org/openstack/magnum manila `git://git.openstack.org/openstack/manila `__ mistral `git://git.openstack.org/openstack/mistral `__ monasca-api `git://git.openstack.org/openstack/monasca-api `__ +monasca-ceilometer `git://git.openstack.org/openstack/monasca-ceilometer `__ monasca-log-api `git://git.openstack.org/openstack/monasca-log-api `__ monasca-transform `git://git.openstack.org/openstack/monasca-transform `__ murano `git://git.openstack.org/openstack/murano `__ From 72b233c1e042dd55cc7e7785f2e1ecd6ae4fc1aa Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Wed, 1 Jun 2016 16:43:07 +0200 Subject: [PATCH 0009/1936] Allow to use Fedora 24 with devstack Looks like f24 does not have any special change compared to the previous release, we just need to add f24 where f23 present. Change-Id: Ia4a58de4973ef228735c48b33453a0562dc65258 --- files/rpms/general | 4 ++-- files/rpms/nova | 2 +- files/rpms/swift | 2 +- lib/ceph | 2 +- stack.sh | 2 +- tools/install_pip.sh | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/files/rpms/general b/files/rpms/general index 2d4a97a743..ee2e8a058b 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -7,9 +7,9 @@ gcc-c++ gettext # used for compiling message catalogs git-core graphviz # needed only for docs -iptables-services # NOPRIME f22,f23 +iptables-services # NOPRIME f22,f23,f24 java-1.7.0-openjdk-headless # NOPRIME rhel7 -java-1.8.0-openjdk-headless # NOPRIME f22,f23 +java-1.8.0-openjdk-headless # NOPRIME f22,f23,f24 libffi-devel libjpeg-turbo-devel # Pillow 3.0.0 libxml2-devel # lxml diff --git a/files/rpms/nova b/files/rpms/nova index 0312e856dd..594393e733 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -7,7 +7,7 @@ gawk genisoimage # required for config_drive iptables iputils -kernel-modules # dist:f22,f23 +kernel-modules # dist:f22,f23,f24 kpartx kvm # NOPRIME libvirt-bin # NOPRIME diff --git a/files/rpms/swift b/files/rpms/swift index 46dc59d74d..1e05167bcf 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -2,7 +2,7 @@ curl liberasurecode-devel memcached pyxattr -rsync-daemon # dist:f22,f23 +rsync-daemon # dist:f22,f23,f24 sqlite xfsprogs xinetd diff --git a/lib/ceph b/lib/ceph index 3e0839aaa2..e999647ed8 100644 --- a/lib/ceph +++ b/lib/ceph @@ -116,7 +116,7 @@ function undefine_virsh_secret { # check_os_support_ceph() - Check if the operating system provides a decent version of Ceph function check_os_support_ceph { - if [[ ! ${DISTRO} =~ (trusty|f22|f23) ]]; then + if [[ ! ${DISTRO} =~ (trusty|f22|f23|f24) ]]; then echo "WARNING: your distro $DISTRO does not provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 20 (and higher)" if [[ "$FORCE_CEPH_INSTALL" != "yes" ]]; then die $LINENO "If you wish to install Ceph on this distribution anyway run with FORCE_CEPH_INSTALL=yes" diff --git a/stack.sh b/stack.sh index 68e3d936b8..173628ebba 100755 --- a/stack.sh +++ b/stack.sh @@ -185,7 +185,7 @@ source $TOP_DIR/stackrc # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (trusty|wily|xenial|7.0|wheezy|sid|testing|jessie|f22|f23|rhel7|kvmibm1) ]]; then +if [[ ! ${DISTRO} =~ (trusty|wily|xenial|7.0|wheezy|sid|testing|jessie|f22|f23|f24|rhel7|kvmibm1) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" diff --git a/tools/install_pip.sh b/tools/install_pip.sh index dfa4f42573..12676998d2 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -116,7 +116,7 @@ get_versions # Eradicate any and all system packages -# Python in f23 and f22 depends on the python-pip package so removing it +# Python in fedora depends on the python-pip package so removing it # results in a nonfunctional system. pip on fedora installs to /usr so pip # can safely override the system pip for all versions of fedora if ! is_fedora ; then From 07dc2bf7760fa1c5b28d2dd71fb4eebd7a8921f2 Mon Sep 17 00:00:00 2001 From: Kashyap Chamarthy Date: Wed, 1 Jun 2016 12:21:00 +0200 Subject: [PATCH 0010/1936] functions-libvirt: Add log filter to capture CPU driver errors Two things: (a) Add the log filter to capture libvirt CPU manipulation driver related error messages when things fallout (e.g. CPU model comparision failures during live migration). (b) While we're at it, remove the "1:qemu_monitor" log filter, because the existing filter "1:qemu" should take care of logging the interactions with QEMU monitor console. This is the case since the introduction of VIR_LOG_INIT() macro in upstream libvirt, which performs a substring match on a given file name. (Available from libvirt version v1.2.10 onwards). Change-Id: I75befd52d9f892eb5a6236eee9a397fab7602ecc --- lib/nova_plugins/functions-libvirt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index dbb4d4fb4b..09723db166 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -108,9 +108,9 @@ EOF # source file paths, not relative paths. This screws with the matching # of '1:libvirt' making everything turn on. So use libvirt.c for now. # This will have to be re-visited when Ubuntu ships libvirt >= 1.2.3 - local log_filters="1:libvirt.c 1:qemu 1:conf 1:security 3:object 3:event 3:json 3:file 1:util 1:qemu_monitor" + local log_filters="1:libvirt.c 1:qemu 1:conf 1:security 3:object 3:event 3:json 3:file 1:util 1:cpu" else - local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:object 3:event 3:json 3:file 1:util 1:qemu_monitor" + local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:object 3:event 3:json 3:file 1:util 1:cpu" fi local log_outputs="1:file:/var/log/libvirt/libvirtd.log" if ! sudo grep -q "^log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then From 09604349f89f59ee2ac07b4daa93bd1ae52d1b79 Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Thu, 2 Jun 2016 10:29:43 -0400 Subject: [PATCH 0011/1936] Change ovs_base neutron plugin to use vercmp This plugin was using a deprecated function, vercmp_numbers(), that wasn't actually working properly because the call to 'deprecated' at the beginning was causing garbage to be returned to the caller. For example, this was always in stack.sh.log when using OVS: .../lib/neutron_plugins/ovs_base: line 57: [: too many arguments Update to use vercmp() like all other users in devstack, and remove all the old code. Change-Id: I352362cf59e492fa9f7725190f0243f2436ac347 --- functions | 55 ------------------------------------ lib/neutron_plugins/ovs_base | 2 +- 2 files changed, 1 insertion(+), 56 deletions(-) diff --git a/functions b/functions index 8cdd6d8171..aa12e1e826 100644 --- a/functions +++ b/functions @@ -511,61 +511,6 @@ function check_path_perm_sanity { } -# This function recursively compares versions, and is not meant to be -# called by anything other than vercmp_numbers below. This function does -# not work with alphabetic versions. -# -# _vercmp_r sep ver1 ver2 -function _vercmp_r { - typeset sep - typeset -a ver1=() ver2=() - sep=$1; shift - ver1=("${@:1:sep}") - ver2=("${@:sep+1}") - - if ((ver1 > ver2)); then - echo 1; return 0 - elif ((ver2 > ver1)); then - echo -1; return 0 - fi - - if ((sep <= 1)); then - echo 0; return 0 - fi - - _vercmp_r $((sep-1)) "${ver1[@]:1}" "${ver2[@]:1}" -} - - -# This function compares two versions and is meant to be called by -# external callers. Please note the function assumes non-alphabetic -# versions. For example, this will work: -# -# vercmp_numbers 1.10 1.4 -# -# The above will return "1", as 1.10 is greater than 1.4. -# -# vercmp_numbers 5.2 6.4 -# -# The above will return "-1", as 5.2 is less than 6.4. -# -# vercmp_numbers 4.0 4.0 -# -# The above will return "0", as the versions are equal. -# -# vercmp_numbers ver1 ver2 -function vercmp_numbers { - typeset v1=$1 v2=$2 sep - typeset -a ver1 ver2 - - deprecated "vercmp_numbers is deprecated for more generic vercmp" - - IFS=. read -ra ver1 <<< "$v1" - IFS=. read -ra ver2 <<< "$v2" - - _vercmp_r "${#ver1[@]}" "${ver1[@]}" "${ver2[@]}" -} - # vercmp ver1 op ver2 # Compare VER1 to VER2 # - op is one of < <= == >= > diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index 59c7737181..95f4663a52 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -54,7 +54,7 @@ function _neutron_ovs_base_install_ubuntu_dkms { local kernel_major_minor kernel_major_minor=`echo $kernel_version | cut -d. -f1-2` # From kernel 3.13 on, openvswitch-datapath-dkms is not needed - if [ `vercmp_numbers "$kernel_major_minor" "3.13"` -lt "0" ]; then + if vercmp '$kernel_major_minor' '<' '3.13'; then install_package "dkms openvswitch-datapath-dkms linux-headers-$kernel_version" fi } From 7e603d1bf8689a50ffa3dd93c901a71ea5f5ae36 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Wed, 1 Jun 2016 18:16:14 -0400 Subject: [PATCH 0012/1936] Move tempest plugin install to the end For the tempest plugin install inside the tox venv to hold we need to ensure that it's the last thing run that touches the tox venv before devstack ends. Otherwise there is a chance we'll recreate the venv in a later step of installing and configuring tempest. This commit moves the plugin installation into it's own function and calls that function as last phase of the tempest setup to make sure it runs last. Change-Id: Ie253171537e8c5a9887cc30aba1cad4b31e57663 --- extras.d/80-tempest.sh | 2 ++ lib/tempest | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/extras.d/80-tempest.sh b/extras.d/80-tempest.sh index fcf79bd4d9..6a3d121497 100644 --- a/extras.d/80-tempest.sh +++ b/extras.d/80-tempest.sh @@ -13,6 +13,8 @@ if is_service_enabled tempest; then elif [[ "$1" == "stack" && "$2" == "extra" ]]; then echo_summary "Initializing Tempest" configure_tempest + echo_summary "Installing Tempest Plugins" + install_tempest_plugins elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then # local.conf Tempest option overrides : diff --git a/lib/tempest b/lib/tempest index 6f8e29395c..c492182fee 100644 --- a/lib/tempest +++ b/lib/tempest @@ -600,6 +600,12 @@ function install_tempest { # running pip install -U on tempest requirements $TEMPEST_DIR/.tox/tempest/bin/pip install -c $REQUIREMENTS_DIR/upper-constraints.txt -r requirements.txt PROJECT_VENV["tempest"]=${TEMPEST_DIR}/.tox/tempest + popd +} + +# install_tempest_plugins() - Install any specified plugins into the tempest venv +function install_tempest_plugins { + pushd $TEMPEST_DIR if [[ $TEMPEST_PLUGINS != 0 ]] ; then tox -evenv-tempest -- pip install $TEMPEST_PLUGINS echo "Checking installed Tempest plugins:" From 6176ae6895c3c62e9406ebca0055fed9a58f35d1 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 18 May 2016 12:10:08 +0200 Subject: [PATCH 0013/1936] Use transport_url instead of driver-specif options Future oslo.messaging is going to deprecate usage of driver-specific options for hosts/port/user/password options. This change uses transport_url that exists since a while now and works with all drivers (even devstack handles only the rabbit one). Change-Id: I3006b96ff93a3468249177c31c359c2f9ddc5db6 --- lib/nova | 2 +- lib/rpc_backend | 9 ++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/lib/nova b/lib/nova index de898b23fd..e928cf264c 100644 --- a/lib/nova +++ b/lib/nova @@ -639,7 +639,7 @@ function init_nova_cells { if is_service_enabled n-cell; then cp $NOVA_CONF $NOVA_CELLS_CONF iniset $NOVA_CELLS_CONF database connection `database_connection_url $NOVA_CELLS_DB` - iniset $NOVA_CELLS_CONF oslo_messaging_rabbit rabbit_virtual_host child_cell + iniset_rpc_backend nova $NOVA_CELLS_CONF DEFAULT child_cell iniset $NOVA_CELLS_CONF DEFAULT dhcpbridge_flagfile $NOVA_CELLS_CONF iniset $NOVA_CELLS_CONF cells enable True iniset $NOVA_CELLS_CONF cells cell_type compute diff --git a/lib/rpc_backend b/lib/rpc_backend index 05e303e3e7..0ee46dca6f 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -104,8 +104,9 @@ function restart_rpc_backend { # builds transport url string function get_transport_url { + local virtual_host=$1 if is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then - echo "rabbit://$RABBIT_USERID:$RABBIT_PASSWORD@$RABBIT_HOST:5672/" + echo "rabbit://$RABBIT_USERID:$RABBIT_PASSWORD@$RABBIT_HOST:5672/$virtual_host" fi } @@ -114,11 +115,9 @@ function iniset_rpc_backend { local package=$1 local file=$2 local section=${3:-DEFAULT} + local virtual_host=$4 if is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then - iniset $file $section rpc_backend "rabbit" - iniset $file oslo_messaging_rabbit rabbit_hosts $RABBIT_HOST - iniset $file oslo_messaging_rabbit rabbit_password $RABBIT_PASSWORD - iniset $file oslo_messaging_rabbit rabbit_userid $RABBIT_USERID + iniset $file $section transport_url $(get_transport_url "$virtual_host") if [ -n "$RABBIT_HEARTBEAT_TIMEOUT_THRESHOLD" ]; then iniset $file oslo_messaging_rabbit heartbeat_timeout_threshold $RABBIT_HEARTBEAT_TIMEOUT_THRESHOLD fi From 214459cdf776a77dacd70a9205c5b8a6ceaff31a Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 2 Jun 2016 10:29:59 -0400 Subject: [PATCH 0014/1936] remove addition of gate64 cpu Nova has been fixed to not need this work around. We shouldn't keep it in devstack. Change-Id: Ie2b1c6b8ddce4a2fd94af06745d59455208f0633 --- lib/nova_plugins/functions-libvirt | 15 +---- tools/cpu_map_update.py | 88 ------------------------------ 2 files changed, 3 insertions(+), 100 deletions(-) delete mode 100755 tools/cpu_map_update.py diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index dbb4d4fb4b..b4a7a7b28f 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -121,18 +121,9 @@ EOF fi fi - # Update the libvirt cpu map with a gate64 cpu model. This enables nova - # live migration for 64bit guest OSes on heterogenous cloud "hardware". - if [[ -f /usr/share/libvirt/cpu_map.xml ]] ; then - sudo $TOP_DIR/tools/cpu_map_update.py /usr/share/libvirt/cpu_map.xml - fi - - # libvirt detects various settings on startup, as we potentially changed - # the system configuration (modules, filesystems), we need to restart - # libvirt to detect those changes. Use a stop start as otherwise the new - # cpu_map is not loaded properly on some systems (Ubuntu). - stop_service $LIBVIRT_DAEMON - start_service $LIBVIRT_DAEMON + # Service needs to be started on redhat/fedora -- do a restart for + # sanity after fiddling the config. + restart_service $LIBVIRT_DAEMON } diff --git a/tools/cpu_map_update.py b/tools/cpu_map_update.py deleted file mode 100755 index 92b7b8f9a3..0000000000 --- a/tools/cpu_map_update.py +++ /dev/null @@ -1,88 +0,0 @@ -#!/usr/bin/env python -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This small script updates the libvirt CPU map to add a gate64 cpu model -# that can be used to enable a common 64bit capable feature set across -# devstack nodes so that features like nova live migration work. - -import sys -import xml.etree.ElementTree as ET -from xml.dom import minidom - - -def update_cpu_map(tree): - root = tree.getroot() - cpus = root#.find("cpus") - x86 = None - for arch in cpus.findall("arch"): - if arch.get("name") == "x86": - x86 = arch - break - if x86 is not None: - # Create a gate64 cpu model that is core2duo less monitor, pse36, - # vme, and ssse3. - gate64 = ET.SubElement(x86, "model") - gate64.set("name", "gate64") - ET.SubElement(gate64, "vendor").set("name", "Intel") - ET.SubElement(gate64, "feature").set("name", "fpu") - ET.SubElement(gate64, "feature").set("name", "de") - ET.SubElement(gate64, "feature").set("name", "pse") - ET.SubElement(gate64, "feature").set("name", "tsc") - ET.SubElement(gate64, "feature").set("name", "msr") - ET.SubElement(gate64, "feature").set("name", "pae") - ET.SubElement(gate64, "feature").set("name", "mce") - ET.SubElement(gate64, "feature").set("name", "cx8") - ET.SubElement(gate64, "feature").set("name", "apic") - ET.SubElement(gate64, "feature").set("name", "sep") - ET.SubElement(gate64, "feature").set("name", "pge") - ET.SubElement(gate64, "feature").set("name", "cmov") - ET.SubElement(gate64, "feature").set("name", "pat") - ET.SubElement(gate64, "feature").set("name", "mmx") - ET.SubElement(gate64, "feature").set("name", "fxsr") - ET.SubElement(gate64, "feature").set("name", "sse") - ET.SubElement(gate64, "feature").set("name", "sse2") - ET.SubElement(gate64, "feature").set("name", "mtrr") - ET.SubElement(gate64, "feature").set("name", "mca") - ET.SubElement(gate64, "feature").set("name", "clflush") - ET.SubElement(gate64, "feature").set("name", "pni") - ET.SubElement(gate64, "feature").set("name", "nx") - ET.SubElement(gate64, "feature").set("name", "syscall") - ET.SubElement(gate64, "feature").set("name", "lm") - - -def format_xml(root): - # Adapted from http://pymotw.com/2/xml/etree/ElementTree/create.html - # thank you dhellmann - rough_string = ET.tostring(root, encoding="UTF-8") - dom_parsed = minidom.parseString(rough_string) - return dom_parsed.toprettyxml(" ", encoding="UTF-8") - - -def main(): - if len(sys.argv) != 2: - raise Exception("Must pass path to cpu_map.xml to update") - cpu_map = sys.argv[1] - tree = ET.parse(cpu_map) - for model in tree.getroot().iter("model"): - if model.get("name") == "gate64": - # gate64 model is already present - return - update_cpu_map(tree) - pretty_xml = format_xml(tree.getroot()) - with open(cpu_map, 'w') as f: - f.write(pretty_xml) - - -if __name__ == "__main__": - main() From 9ee1426847569637346610512fb8c9cd7bbefbb6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Dulko?= Date: Fri, 3 Jun 2016 15:34:50 +0200 Subject: [PATCH 0015/1936] Set my_ip in cinder.conf Cinder uses my_ip config option to provide iscsi_targets. It gets defaulted to the IP of the first interface in the system, which is fine for some cases, but for example with Vagrant first interface can be used only to contact with host machine. To get over it we should set my_ip to HOST_IP from local.conf and this commit implements that. Change-Id: I4d2960d92f388ac689dfa6b436dc8bfc1e129fbf Closes-Bug: 1588825 --- lib/cinder | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/cinder b/lib/cinder index de67593f72..bcf240953b 100644 --- a/lib/cinder +++ b/lib/cinder @@ -270,6 +270,7 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH iniset $CINDER_CONF oslo_concurrency lock_path $CINDER_STATE_PATH iniset $CINDER_CONF DEFAULT periodic_interval $CINDER_PERIODIC_INTERVAL + iniset $CINDER_CONF DEFAULT my_ip "$HOST_IP" iniset $CINDER_CONF DEFAULT os_region_name "$REGION_NAME" From 22c695f08a82d527668ef906e11089e6b0046a50 Mon Sep 17 00:00:00 2001 From: Flavio Percoco Date: Wed, 11 May 2016 12:49:07 -0500 Subject: [PATCH 0016/1936] Have a way to disable Glance v1 in devstack As part of the process of deprecating Glance's V1, the glance team would like to start testing V2-only environments. Therefore, this change provides a way to force other services to use V2. Change-Id: I87e77d07964eac01e9a796817cbc88bd6e59c721 --- lib/cinder | 4 ++++ lib/glance | 7 +++++++ lib/tempest | 3 +++ 3 files changed, 14 insertions(+) diff --git a/lib/cinder b/lib/cinder index 17862327c4..7763599bae 100644 --- a/lib/cinder +++ b/lib/cinder @@ -342,6 +342,10 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT glance_ca_certificates_file $SSL_BUNDLE_FILE fi + if [ "$GLANCE_V1_ENABLED" != "True" ]; then + iniset $CINDER_CONF DEFAULT glance_api_version 2 + fi + # Register SSL certificates if provided if is_ssl_enabled_service cinder; then ensure_certificates CINDER diff --git a/lib/glance b/lib/glance index cda357fa66..2ad971fed7 100644 --- a/lib/glance +++ b/lib/glance @@ -57,6 +57,7 @@ GLANCE_SCHEMA_JSON=$GLANCE_CONF_DIR/schema-image.json GLANCE_SWIFT_STORE_CONF=$GLANCE_CONF_DIR/glance-swift-store.conf GLANCE_GLARE_CONF=$GLANCE_CONF_DIR/glance-glare.conf GLANCE_GLARE_PASTE_INI=$GLANCE_CONF_DIR/glance-glare-paste.ini +GLANCE_V1_ENABLED=${GLANCE_V1_ENABLED:-True} if is_ssl_enabled_service "glance" || is_service_enabled tls-proxy; then GLANCE_SERVICE_PROTOCOL="https" @@ -134,6 +135,12 @@ function configure_glance { iniset $GLANCE_API_CONF DEFAULT disk_formats "ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso,ploop" fi + # NOTE(flaper87): To uncomment as soon as all services consuming Glance are + # able to consume V2 entirely. + if [ "$GLANCE_V1_ENABLED" != "True" ]; then + iniset $GLANCE_API_CONF DEFAULT enable_v1_api False + fi + # Store specific configs iniset $GLANCE_API_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/ if is_service_enabled g-glare; then diff --git a/lib/tempest b/lib/tempest index e556935cb5..19042ef0c2 100644 --- a/lib/tempest +++ b/lib/tempest @@ -292,6 +292,9 @@ function configure_tempest { # Image Features iniset $TEMPEST_CONFIG image-feature-enabled deactivate_image True + if [ "$GLANCE_V1_ENABLED" != "True" ]; then + iniset $TEMPEST_CONFIG image-feature-enabled api_v1 False + fi # Compute iniset $TEMPEST_CONFIG compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED From b97a60e90c46c101b6c2c9a5dbf42140bd8bb8d1 Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Fri, 3 Jun 2016 15:59:11 -0500 Subject: [PATCH 0017/1936] Keystone uwsgi performance tuning I ran some tests locally that showed that when using the uwsgi deploy the keystone server wasn't using all the processes available. When I switched from "threads" to "processes" the concurrent performance improved considerably. So I'm proposing that devstack switch to processes to improve performance. Change-Id: I8cfe9272e098e636441b7cfb51bff08d62c3336e --- lib/keystone | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/keystone b/lib/keystone index c94bcd3a71..26fa1d43da 100644 --- a/lib/keystone +++ b/lib/keystone @@ -318,10 +318,10 @@ function configure_keystone { fi iniset "$KEYSTONE_PUBLIC_UWSGI_FILE" uwsgi wsgi-file "$KEYSTONE_BIN_DIR/keystone-wsgi-public" - iniset "$KEYSTONE_PUBLIC_UWSGI_FILE" uwsgi threads $(nproc) + iniset "$KEYSTONE_PUBLIC_UWSGI_FILE" uwsgi processes $(nproc) iniset "$KEYSTONE_ADMIN_UWSGI_FILE" uwsgi wsgi-file "$KEYSTONE_BIN_DIR/keystone-wsgi-admin" - iniset "$KEYSTONE_ADMIN_UWSGI_FILE" uwsgi threads $API_WORKERS + iniset "$KEYSTONE_ADMIN_UWSGI_FILE" uwsgi processes $API_WORKERS # Common settings for file in "$KEYSTONE_PUBLIC_UWSGI_FILE" "$KEYSTONE_ADMIN_UWSGI_FILE"; do From a1c70f2a7519bd7482b2834f380f2a9f5f61fb5a Mon Sep 17 00:00:00 2001 From: Angus Lees Date: Tue, 31 May 2016 14:43:14 +1000 Subject: [PATCH 0018/1936] neutron-legacy: Consistently use `--config-file foo` This change adjusts a few instances of `--config-file=foo` to `--config-file foo` (no `=`) in order to make neutron command lines more consistent and easier to match in sudoers/rootwrap filters. This is particularly useful for oslo.privsep, which needs to start a helper command with the same `--config-file` arguments (see Ia9675dff9232e0e987a836ecaf9e842eb5c3cb18). Change-Id: I91fe18f66f3c3bc2ccd1ca8be91be2915ed3e3ec --- lib/neutron-legacy | 8 ++++---- lib/neutron_plugins/services/l3 | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 5e5207bc63..73123ef1d5 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -297,7 +297,7 @@ function _determine_config_server { } function _determine_config_l3 { - local opts="--config-file $NEUTRON_CONF --config-file=$Q_L3_CONF_FILE" + local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE" if is_service_enabled q-fwaas; then opts+=" --config-file $Q_FWAAS_CONF_FILE" fi @@ -524,7 +524,7 @@ function start_mutnauq_l2_agent { } function start_mutnauq_other_agents { - run_process q-dhcp "$AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file=$Q_DHCP_CONF_FILE" + run_process q-dhcp "$AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file $Q_DHCP_CONF_FILE" if is_service_enabled neutron-vpnaas; then : # Started by plugin @@ -532,8 +532,8 @@ function start_mutnauq_other_agents { run_process q-l3 "$AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)" fi - run_process q-meta "$AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file=$Q_META_CONF_FILE" - run_process q-lbaas "$AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME" + run_process q-meta "$AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file $Q_META_CONF_FILE" + run_process q-lbaas "$AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file $LBAAS_AGENT_CONF_FILENAME" run_process q-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME" if [ "$VIRT_DRIVER" = 'xenserver' ]; then diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index a5a6c81ea4..3a6f6eb53f 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -83,7 +83,7 @@ SUBNETPOOL_SIZE_V4=${SUBNETPOOL_SIZE_V4:-24} SUBNETPOOL_SIZE_V6=${SUBNETPOOL_SIZE_V6:-64} function _determine_config_l3 { - local opts="--config-file $NEUTRON_CONF --config-file=$Q_L3_CONF_FILE" + local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE" echo "$opts" } From f179eb7c4798d5058ebbfd1c7b851070f86fd5ac Mon Sep 17 00:00:00 2001 From: Huan Xie Date: Thu, 2 Jun 2016 01:24:22 -0700 Subject: [PATCH 0019/1936] Support installing OpenStack on XenServer 7.0 XenServer 7.0 has changed some iso files' name, this made devstack script install_os_domU.sh failed to install VM before installing OpenStack. This patch is to fix the problem, make install_os_domU.sh support 7.0 and other prior versions of XenServer Change-Id: I49459bfff2b101fc6927eb4578c5eb47cc8c3ad6 --- tools/xen/install_os_domU.sh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 8b97265e21..3a61215b5b 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -183,10 +183,8 @@ if [ -z "$templateuuid" ]; then # Copy the tools DEB to the XS web server XS_TOOLS_URL="https://github.com/downloads/citrix-openstack/warehouse/xe-guest-utilities_5.6.100-651_amd64.deb" ISO_DIR="/opt/xensource/packages/iso" - XS_TOOLS_FILE_NAME="xs-tools.deb" - XS_TOOLS_PATH="/root/$XS_TOOLS_FILE_NAME" if [ -e "$ISO_DIR" ]; then - TOOLS_ISO=$(ls -1 $ISO_DIR/xs-tools-*.iso | head -1) + TOOLS_ISO=$(ls -1 $ISO_DIR/*-tools-*.iso | head -1) TMP_DIR=/tmp/temp.$RANDOM mkdir -p $TMP_DIR mount -o loop $TOOLS_ISO $TMP_DIR From 84409516d56417464dfe0c4e6904a1a76f9fa254 Mon Sep 17 00:00:00 2001 From: "Sean M. Collins" Date: Tue, 18 Aug 2015 17:24:44 -0400 Subject: [PATCH 0020/1936] Remove fwaas from DevStack Depends-On: Iadcee07e873fcb4f099ebccc2e33780e74438140 Change-Id: Ic60cd1fa90c19dfac00be583e2ddc5633dbb68a3 --- lib/neutron-legacy | 25 -------------------- lib/neutron_plugins/services/firewall | 33 --------------------------- 2 files changed, 58 deletions(-) delete mode 100644 lib/neutron_plugins/services/firewall diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 7eb8637634..5f117631cf 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -101,7 +101,6 @@ AGENT_META_BINARY="$NEUTRON_BIN_DIR/neutron-metadata-agent" # loaded from per-plugin scripts in lib/neutron_plugins/ Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini Q_L3_CONF_FILE=$NEUTRON_CONF_DIR/l3_agent.ini -Q_FWAAS_CONF_FILE=$NEUTRON_CONF_DIR/fwaas_driver.ini Q_META_CONF_FILE=$NEUTRON_CONF_DIR/metadata_agent.ini # Default name for Neutron database @@ -267,13 +266,8 @@ source $TOP_DIR/lib/neutron_plugins/services/loadbalancer # Hardcoding for 1 service plugin for now source $TOP_DIR/lib/neutron_plugins/services/metering -# Firewall Service Plugin functions -# --------------------------------- -source $TOP_DIR/lib/neutron_plugins/services/firewall - # L3 Service functions source $TOP_DIR/lib/neutron_plugins/services/l3 - # Use security group or not if has_neutron_plugin_security_group; then Q_USE_SECGROUP=${Q_USE_SECGROUP:-True} @@ -334,10 +328,6 @@ function configure_mutnauq { if is_service_enabled q-metering; then _configure_neutron_metering fi - if is_service_enabled q-fwaas; then - deprecated "Configuring q-fwaas through devstack is deprecated" - _configure_neutron_fwaas - fi if is_service_enabled q-agt q-svc; then _configure_neutron_service fi @@ -435,10 +425,6 @@ function install_mutnauq { git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH setup_develop $NEUTRON_DIR - if is_service_enabled q-fwaas; then - git_clone $NEUTRON_FWAAS_REPO $NEUTRON_FWAAS_DIR $NEUTRON_FWAAS_BRANCH - setup_develop $NEUTRON_FWAAS_DIR - fi if is_service_enabled q-lbaas; then git_clone $NEUTRON_LBAAS_REPO $NEUTRON_LBAAS_DIR $NEUTRON_LBAAS_BRANCH setup_develop $NEUTRON_LBAAS_DIR @@ -580,9 +566,6 @@ function stop_mutnauq_other { if is_service_enabled q-lbaas; then neutron_lbaas_stop fi - if is_service_enabled q-fwaas; then - neutron_fwaas_stop - fi if is_service_enabled q-metering; then neutron_metering_stop fi @@ -877,14 +860,6 @@ function _configure_neutron_metering { neutron_agent_metering_configure_agent } -function _configure_neutron_fwaas { - if [ -f $NEUTRON_FWAAS_DIR/etc/neutron_fwaas.conf ]; then - cp $NEUTRON_FWAAS_DIR/etc/neutron_fwaas.conf $NEUTRON_CONF_DIR - fi - neutron_fwaas_configure_common - neutron_fwaas_configure_driver -} - function _configure_dvr { iniset $NEUTRON_CONF DEFAULT router_distributed True iniset $Q_L3_CONF_FILE DEFAULT agent_mode $Q_DVR_MODE diff --git a/lib/neutron_plugins/services/firewall b/lib/neutron_plugins/services/firewall deleted file mode 100644 index 40968fa04c..0000000000 --- a/lib/neutron_plugins/services/firewall +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -# Neutron firewall plugin -# --------------------------- - -# Save trace setting -_XTRACE_NEUTRON_FIREWALL=$(set +o | grep xtrace) -set +o xtrace - -FWAAS_PLUGIN=${FWAAS_PLUGIN:-neutron_fwaas.services.firewall.fwaas_plugin.FirewallPlugin} -FWAAS_DRIVER=${FWAAS_DRIVER:-neutron_fwaas.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver} - -function neutron_fwaas_configure_common { - _neutron_service_plugin_class_add $FWAAS_PLUGIN -} - -function neutron_fwaas_configure_driver { - # Uses oslo config generator to generate FWaaS sample configuration files - (cd $NEUTRON_FWAAS_DIR && exec ./tools/generate_config_file_samples.sh) - - FWAAS_DRIVER_CONF_FILENAME=/etc/neutron/fwaas_driver.ini - cp $NEUTRON_FWAAS_DIR/etc/fwaas_driver.ini.sample $FWAAS_DRIVER_CONF_FILENAME - - iniset_multiline $FWAAS_DRIVER_CONF_FILENAME fwaas enabled True - iniset_multiline $FWAAS_DRIVER_CONF_FILENAME fwaas driver "$FWAAS_DRIVER" -} - -function neutron_fwaas_stop { - : -} - -# Restore xtrace -$_XTRACE_NEUTRON_FIREWALL From 8063fee829d1e22bb7958599d8b0359846cde462 Mon Sep 17 00:00:00 2001 From: "Sean M. Collins" Date: Tue, 24 May 2016 11:27:36 -0700 Subject: [PATCH 0021/1936] Add neutron-metering agent and configuration This is the equivalent of the q-metering from neutron-legacy Change-Id: Ie2ad6e18cfd6f5cd9af0da30bc36a1cd27e39189 --- lib/neutron | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/lib/neutron b/lib/neutron index fa2e926995..966bb54b81 100644 --- a/lib/neutron +++ b/lib/neutron @@ -242,6 +242,13 @@ function configure_neutron_new { iniset $NEUTRON_CONF DEFAULT ssl_key_file "$NEUTRON_SSL_KEY" fi + # Metering + if is_service_enabled neutron-metering; then + source $TOP_DIR/neutron_plugins/services/metering + neutron_agent_metering_configure_common + neutron_agent_metering_configure_agent + fi + } # configure_neutron_rootwrap() - configure Neutron's rootwrap @@ -428,6 +435,10 @@ function start_neutron_new { if is_service_enabled neutron-metadata-agent; then run_process neutron-metadata-agent "$NEUTRON_BIN_DIR/$NEUTRON_META_BINARY $NEUTRON_CONFIG_ARG" fi + + if is_service_enabled neutron-metering; then + run_process neutron-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME" + fi } # stop_neutron() - Stop running processes (non-screen) From f90c8e105ba71fb8ac019b61d2a0bb94d9164169 Mon Sep 17 00:00:00 2001 From: Mike Turek Date: Tue, 19 Jan 2016 19:28:51 +0000 Subject: [PATCH 0022/1936] Set deploywait_timeout via localrc option Currently a hardcoded value is used for the DEPLOYWAIT timeout in tempest. The patch in review 269249 adds a config option to use instead of this hardcoded value. This patch allows the value to be set via the BUILD_TIMEOUT variable. Change-Id: Id79014fd6e07f93029111f6c28e3537e2e39be9f Related-Bug: 1526466 --- lib/tempest | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/tempest b/lib/tempest index fd98c948f2..621bd625f0 100644 --- a/lib/tempest +++ b/lib/tempest @@ -462,6 +462,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG baremetal driver_enabled True iniset $TEMPEST_CONFIG baremetal unprovision_timeout $BUILD_TIMEOUT iniset $TEMPEST_CONFIG baremetal active_timeout $BUILD_TIMEOUT + iniset $TEMPEST_CONFIG baremetal deploywait_timeout $BUILD_TIMEOUT iniset $TEMPEST_CONFIG baremetal deploy_img_dir $FILES iniset $TEMPEST_CONFIG baremetal node_uuid $IRONIC_NODE_UUID iniset $TEMPEST_CONFIG compute-feature-enabled change_password False From 36afed43534e5f50e0598ff69c7ea8249c24b5c2 Mon Sep 17 00:00:00 2001 From: Huan Xie Date: Mon, 6 Jun 2016 20:17:48 -0700 Subject: [PATCH 0023/1936] Set ima_disk_format to vhd when hypervisor is XenServer When running tempest testcase test_minimum_basic_scenario will always fail due to lack of configuration [scenario] img_disk_format=vhd in tempest.conf This patchset is to add this configuration when XenServer is used. Change-Id: I4b916200e6eefb62f148ec8b644fb23ffc7e00a6 Closes-Bug: #1589787 --- lib/tempest | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/tempest b/lib/tempest index c492182fee..1bb70c16ae 100644 --- a/lib/tempest +++ b/lib/tempest @@ -290,6 +290,7 @@ function configure_tempest { fi if [ "$VIRT_DRIVER" = "xenserver" ]; then iniset $TEMPEST_CONFIG image disk_formats "ami,ari,aki,vhd,raw,iso" + iniset $TEMPEST_CONFIG scenario img_disk_format vhd fi # Image Features From a44dd9a741a08d9f586ae8fe1d2543bcf32f6593 Mon Sep 17 00:00:00 2001 From: Timur Sufiev Date: Fri, 29 Apr 2016 14:08:51 +0300 Subject: [PATCH 0024/1936] Support Glance CORS options in devstack configuration To properly test the integration between Glance CORS feature and Horizon Javascript environment uploading image files directly to Glance (using this feature), we need to enable CORS support for Glance in integration tests. Adding corresponding Devstack variable to configure Glance in such a way that it accepts direct requests from Horizon Javascript is the prerequisite step for the integration testing of this feature. By default Horizon and Glance are located on the same host, hence default value cors.allowed_origin = http://$SERVICE_HOST should work. If a more complicated setup is desired, where Horizon is located on a different host, GLANCE_CORS_ALLOWED_ORIGIN environment variable should be exported to Devstack. Partially implements blueprint: horizon-glance-large-image-upload Change-Id: I4881fb6631c2daa2ad8946210eff4bb021957374 --- lib/glance | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/glance b/lib/glance index f2a6db6edc..ce5f867430 100644 --- a/lib/glance +++ b/lib/glance @@ -143,6 +143,13 @@ function configure_glance { iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS" + # CORS feature support - to allow calls from Horizon by default + if [ -n "$GLANCE_CORS_ALLOWED_ORIGIN" ]; then + iniset $GLANCE_API_CONF cors allowed_origin "$GLANCE_CORS_ALLOWED_ORIGIN" + else + iniset $GLANCE_API_CONF cors allowed_origin "http://$SERVICE_HOST" + fi + # Store the images in swift if enabled. if is_service_enabled s-proxy; then iniset $GLANCE_API_CONF glance_store default_store swift From 026cad84a34ff914707538470ec3ae6e2015e54b Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 7 Jun 2016 10:55:54 -0400 Subject: [PATCH 0025/1936] remove local.conf documentation for phases we don't implement The local.conf docs talk about phases which don't exist for config file processing, which makes it more confusing then it needs to be. Change-Id: If7f9255eab0535c3d57a2fd5f1bc18ba4d0801aa --- doc/source/configuration.rst | 4 ---- 1 file changed, 4 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 384ceee41a..1161b344e2 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -36,10 +36,6 @@ The defined phases are: - **local** - extracts ``localrc`` from ``local.conf`` before ``stackrc`` is sourced -- **pre-install** - runs after the system packages are installed but - before any of the source repositories are installed -- **install** - runs immediately after the repo installations are - complete - **post-config** - runs after the layer 2 services are configured and before they are started - **extra** - runs after services are started and before any files in From e34ec9901ea04b4fab81742f9556e486849dfb4e Mon Sep 17 00:00:00 2001 From: "Sean M. Collins" Date: Tue, 7 Jun 2016 12:36:50 -0400 Subject: [PATCH 0026/1936] Neutron/l3 - die if provider settings for v6 are not set If you are using provider networking, and have IP_VERSION set to include IPv6 (which we do by default) - you must set the required variables. If you do not want this behavior, set IP_VERSION=4 This arose from a third party CI system which was configured[1] to have provider networking, but would explode when hitting the router IPv6 setup step[2] since there was no IPv6 subnet created, and IPV6_SUBNET_ID would be empty, causing a python-neutronclient error and causing stack.sh to exit. [1]: http://paste.openstack.org/show/508710/ [2]: https://github.com/openstack-dev/devstack/blob/c35110e7c5c35dd1edc310dc3d0bb8693e58d336/lib/neutron_plugins/services/l3#L320 Change-Id: I267799b62284c3086ed7c3e2d8a9cbadb9ddcd60 --- lib/neutron_plugins/services/l3 | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index a5a6c81ea4..dbd2b45d15 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -150,7 +150,9 @@ function create_neutron_initial_network { die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME $project_id" fi - if [[ "$IP_VERSION" =~ .*6 ]] && [[ -n "$IPV6_PROVIDER_FIXED_RANGE" ]] && [[ -n "$IPV6_PROVIDER_NETWORK_GATEWAY" ]]; then + if [[ "$IP_VERSION" =~ .*6 ]]; then + die_if_not_set $LINENO IPV6_PROVIDER_FIXED_RANGE "IPV6_PROVIDER_FIXED_RANGE has not been set, but Q_USE_PROVIDERNET_FOR_PUBLIC is true and IP_VERSION includes 6" + die_if_not_set $LINENO IPV6_PROVIDER_NETWORK_GATEWAY "IPV6_PROVIDER_NETWORK_GATEWAY has not been set, but Q_USE_PROVIDERNET_FOR_PUBLIC is true and IP_VERSION includes 6" SUBNET_V6_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create --tenant_id $project_id --ip_version 6 --ipv6-address-mode $IPV6_ADDRESS_MODE --gateway $IPV6_PROVIDER_NETWORK_GATEWAY --name $IPV6_PROVIDER_SUBNET_NAME $NET_ID $IPV6_PROVIDER_FIXED_RANGE | grep 'id' | get_field 2) die_if_not_set $LINENO SUBNET_V6_ID "Failure creating SUBNET_V6_ID for $IPV6_PROVIDER_SUBNET_NAME $project_id" fi From 75ace7a5edbb6496633b97dc1d6b60e000ff3e5f Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Tue, 7 Jun 2016 17:44:06 -0500 Subject: [PATCH 0027/1936] Correct keystone memcached host setting keystone was configured to connect to memcached on the host IP address. Unfortunately, memcached is only listening on localhost, so this setting actually hurts performance as keystone fails to connect to the memcached server. There's no indication of this in the keystone logs since this is just how memcache client works (ignoring errors). You can verify this by 1) in /etc/memcached.conf, set -vv 2) restart memcached: service memcached restart 3) watch /var/log/memcached.log 4) There will be no output with this change, there will be output in /var/log/memcached.log Also the performance should be a lot better. Change-Id: I95d798d122e2a95e27eb1d2c4e786c3cd844440b --- lib/keystone | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index c94bcd3a71..5695004c4a 100644 --- a/lib/keystone +++ b/lib/keystone @@ -246,7 +246,7 @@ function configure_keystone { # Enable caching iniset $KEYSTONE_CONF cache enabled "True" iniset $KEYSTONE_CONF cache backend "oslo_cache.memcache_pool" - iniset $KEYSTONE_CONF cache memcache_servers $SERVICE_HOST:11211 + iniset $KEYSTONE_CONF cache memcache_servers localhost:11211 # Do not cache the catalog backend due to https://bugs.launchpad.net/keystone/+bug/1537617 iniset $KEYSTONE_CONF catalog caching "False" From febd55a230982bd5a5de9bbf2444b1aa5c459ba3 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 8 Jun 2016 07:11:03 +0000 Subject: [PATCH 0028/1936] Updated from generate-devstack-plugins-list Change-Id: I4ffb619ccdd4928d0772246a7c76353f21be6bb4 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 2760f854a4..585c91c29c 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -103,6 +103,7 @@ neutron-lbaas `git://git.openstack.org/openstack/neutro neutron-lbaas-dashboard `git://git.openstack.org/openstack/neutron-lbaas-dashboard `__ neutron-vpnaas `git://git.openstack.org/openstack/neutron-vpnaas `__ nova-docker `git://git.openstack.org/openstack/nova-docker `__ +nova-lxd `git://git.openstack.org/openstack/nova-lxd `__ nova-powervm `git://git.openstack.org/openstack/nova-powervm `__ octavia `git://git.openstack.org/openstack/octavia `__ osprofiler `git://git.openstack.org/openstack/osprofiler `__ From c425977a55dde6b99b07c716dc3cf82990bd4fa2 Mon Sep 17 00:00:00 2001 From: "Daniel P. Berrange" Date: Wed, 8 Jun 2016 16:53:06 +0100 Subject: [PATCH 0029/1936] nova.conf: set privsep helper command for os-vif plugins privsep will default to invoking privsep-helper directly via sudo, which won't work for people with a locked down sudo config. To deal with this we should explicitly configure the os-vif plugins to use nova-rootwrap for running privsep-helper. This change makes such a change for the two official in-tree os-vif plugins. Change-Id: I3d26251206a57599385f2b9f3e0ef7d91daafe35 --- lib/nova | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/nova b/lib/nova index af5d1222a0..65609369a9 100644 --- a/lib/nova +++ b/lib/nova @@ -483,6 +483,9 @@ function create_nova_conf { iniset $NOVA_CONF privsep_osbrick helper_command "sudo nova-rootwrap \$rootwrap_config privsep-helper --config-file $NOVA_CONF" + iniset $NOVA_CONF vif_plug_ovs_privileged helper_command "sudo nova-rootwrap \$rootwrap_config privsep-helper --config-file $NOVA_CONF" + iniset $NOVA_CONF vif_plug_linux_bridge_privileged helper_command "sudo nova-rootwrap \$rootwrap_config privsep-helper --config-file $NOVA_CONF" + if is_service_enabled n-api; then if is_service_enabled n-api-meta; then # If running n-api-meta as a separate service From 2ae8b09b6182d2927c684adb14c84800e460c2e2 Mon Sep 17 00:00:00 2001 From: Yi Zhao Date: Thu, 12 May 2016 12:11:24 +0800 Subject: [PATCH 0030/1936] Fix quoting in vercmp check for kernel version I352362cf59e492fa9f7725190f0243f2436ac347 switched this to vercmp, but using single-quote (') will mean that the kernel version isn't actually expanded for the comparision. I guess, like the original change, the fact it isn't working is hidden. Trusty seems to have 3.13 ... I can't imagine we support anything before this ... so I'd also be happy if someone with some OVS knowledge wants to just delete it. (This change was originally an alternative to I352362cf59e492fa9f7725190f0243f2436ac347 but got the quoting right) Change-Id: I9fa514885c20b1135fb0680cf61fc04628fbecbe Closes-Bug: #1580850 --- lib/neutron_plugins/ovs_base | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index 95f4663a52..ecf252f88b 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -54,7 +54,7 @@ function _neutron_ovs_base_install_ubuntu_dkms { local kernel_major_minor kernel_major_minor=`echo $kernel_version | cut -d. -f1-2` # From kernel 3.13 on, openvswitch-datapath-dkms is not needed - if vercmp '$kernel_major_minor' '<' '3.13'; then + if vercmp "$kernel_major_minor" "<" "3.13" ; then install_package "dkms openvswitch-datapath-dkms linux-headers-$kernel_version" fi } From 90356d82709fd5fa50d496faea978488f351ba3e Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 10 Jun 2016 07:59:45 +0000 Subject: [PATCH 0031/1936] Updated from generate-devstack-plugins-list Change-Id: I121cf87e3c4e26b4a223a3e9b8a028763994ad89 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 585c91c29c..bae2afdb39 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -64,6 +64,7 @@ group-based-policy `git://git.openstack.org/openstack/group- higgins `git://git.openstack.org/openstack/higgins `__ ironic `git://git.openstack.org/openstack/ironic `__ ironic-inspector `git://git.openstack.org/openstack/ironic-inspector `__ +ironic-staging-drivers `git://git.openstack.org/openstack/ironic-staging-drivers `__ kingbird `git://git.openstack.org/openstack/kingbird `__ kuryr `git://git.openstack.org/openstack/kuryr `__ magnum `git://git.openstack.org/openstack/magnum `__ From 5cda4911adde3e8d6b20d90e52520d0587cf4399 Mon Sep 17 00:00:00 2001 From: "Andrea Frittoli (andreaf)" Date: Thu, 9 Jun 2016 00:33:30 +0100 Subject: [PATCH 0032/1936] Enable admin_domain_scope by default in Tempest Tempest introduced a new ability to use domain scoped tokens for identity v3 admin APIs. Since domain scoped tokens can be used with the base keystone policy used in the gate, and the pre-provisioned admin user is assigned a role on the domain, turn the option alway on. Change-Id: Ib1bb958eee076364b407fc03e77e6882d92147d2 Depends-on: I91ca907992428a5a14fb8d48a4fad105d2906e27 --- lib/tempest | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/tempest b/lib/tempest index 3cd300cca1..4ddff57135 100644 --- a/lib/tempest +++ b/lib/tempest @@ -261,6 +261,8 @@ function configure_tempest { # Identity iniset $TEMPEST_CONFIG identity uri "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v2.0/" iniset $TEMPEST_CONFIG identity uri_v3 "$KEYSTONE_SERVICE_URI_V3" + # Use domain scoped tokens for admin v3 tests, v3 dynamic credentials of v3 account generation + iniset $TEMPEST_CONFIG identity admin_domain_scope True if [[ "$TEMPEST_HAS_ADMIN" == "True" ]]; then iniset $TEMPEST_CONFIG auth admin_username $admin_username iniset $TEMPEST_CONFIG auth admin_password "$password" From 0c0c09af3b14b74b035b50956e545832b26cb6af Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Fri, 13 May 2016 10:57:29 -0400 Subject: [PATCH 0033/1936] Set volume service min and max microversions Implements Blueprint: configure-tempest-volume-microversion Related to: I3d9b3fe288333721bf3b2c6c988949f2f253bfcc Change-Id: I80c6a0c46c667291c6f7fe2a036717504c110314 --- lib/tempest | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/lib/tempest b/lib/tempest index 3cd300cca1..4f065c998e 100644 --- a/lib/tempest +++ b/lib/tempest @@ -433,6 +433,19 @@ function configure_tempest { iniset $TEMPEST_CONFIG volume-feature-enabled volume_services True # TODO(ameade): Remove the api_v3 flag when Mitaka and Liberty are end of life. iniset $TEMPEST_CONFIG volume-feature-enabled api_v3 True + local tempest_volume_min_microversion=${TEMPEST_VOLUME_MIN_MICROVERSION:-None} + local tempest_volume_max_microversion=${TEMPEST_VOLUME_MAX_MICROVERSION:-"latest"} + if [ "$tempest_volume_min_microversion" == "None" ]; then + inicomment $TEMPEST_CONFIG volume min_microversion + else + iniset $TEMPEST_CONFIG volume min_microversion $tempest_volume_min_microversion + fi + + if [ "$tempest_volume_max_microversion" == "None" ]; then + inicomment $TEMPEST_CONFIG volume max_microversion + else + iniset $TEMPEST_CONFIG volume max_microversion $tempest_volume_max_microversion + fi if ! is_service_enabled c-bak; then iniset $TEMPEST_CONFIG volume-feature-enabled backup False From fd1f7ba066081eb1e2580a241f816a195c9621d3 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Sun, 12 Jun 2016 08:01:13 +0000 Subject: [PATCH 0034/1936] Updated from generate-devstack-plugins-list Change-Id: Ia42329d01758cafa5d59ca79eebd5f31515bef47 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index bae2afdb39..520181e2a9 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -84,6 +84,7 @@ networking-calico `git://git.openstack.org/openstack/networ networking-cisco `git://git.openstack.org/openstack/networking-cisco `__ networking-fortinet `git://git.openstack.org/openstack/networking-fortinet `__ networking-generic-switch `git://git.openstack.org/openstack/networking-generic-switch `__ +networking-huawei `git://git.openstack.org/openstack/networking-huawei `__ networking-infoblox `git://git.openstack.org/openstack/networking-infoblox `__ networking-l2gw `git://git.openstack.org/openstack/networking-l2gw `__ networking-midonet `git://git.openstack.org/openstack/networking-midonet `__ From a9a2f7199441d80e8d29ae9909e9b82f75f98d2e Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Tue, 14 Jun 2016 08:08:37 +0000 Subject: [PATCH 0035/1936] Updated from generate-devstack-plugins-list Change-Id: Ied84c5f38a8002228e01797d56c39315ff997142 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 520181e2a9..0ceb829d96 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -71,6 +71,7 @@ magnum `git://git.openstack.org/openstack/magnum magnum-ui `git://git.openstack.org/openstack/magnum-ui `__ manila `git://git.openstack.org/openstack/manila `__ mistral `git://git.openstack.org/openstack/mistral `__ +monasca-analytics `git://git.openstack.org/openstack/monasca-analytics `__ monasca-api `git://git.openstack.org/openstack/monasca-api `__ monasca-ceilometer `git://git.openstack.org/openstack/monasca-ceilometer `__ monasca-log-api `git://git.openstack.org/openstack/monasca-log-api `__ From 1d23b93568e113169da54cff31ff8a2908648c02 Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Mon, 13 Jun 2016 09:09:44 +0100 Subject: [PATCH 0036/1936] Fix brick cinderclient override CINDERCLIENT_REPO cannot refer to both python-cinderclient.git and python-brick-cinderclient-ext.git so make sure the overrides have different names. Bug introduced by: I6d0f09950ea1200d3367a53aa4a3eea9be7abc66 Change-Id: I9cbbf71ba08ef5394537d7b294846faa3c5be5bd --- stackrc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackrc b/stackrc index 969d77d95c..7a5b821d4e 100644 --- a/stackrc +++ b/stackrc @@ -286,8 +286,8 @@ GITREPO["python-cinderclient"]=${CINDERCLIENT_REPO:-${GIT_BASE}/openstack/python GITBRANCH["python-cinderclient"]=${CINDERCLIENT_BRANCH:-master} # os-brick client for local volume attachement -GITREPO["python-brick-cinderclient-ext"]=${CINDERCLIENT_REPO:-${GIT_BASE}/openstack/python-brick-cinderclient-ext.git} -GITBRANCH["python-brick-cinderclient-ext"]=${CINDERCLIENT_BRANCH:-master} +GITREPO["python-brick-cinderclient-ext"]=${BRICK_CINDERCLIENT_REPO:-${GIT_BASE}/openstack/python-brick-cinderclient-ext.git} +GITBRANCH["python-brick-cinderclient-ext"]=${BRICK_CINDERCLIENT_BRANCH:-master} # python glance client library GITREPO["python-glanceclient"]=${GLANCECLIENT_REPO:-${GIT_BASE}/openstack/python-glanceclient.git} From 9a0d1f90b8f0b412ebe3fa8a154f1c9222b2dde8 Mon Sep 17 00:00:00 2001 From: ricolin Date: Wed, 15 Jun 2016 01:13:45 +0800 Subject: [PATCH 0037/1936] Remove deprecated auth_plugin config Config auth_plugin in trustee group is deprecated. Change to use auth_type in trustee group instead. Closes-Bug: 1592482 Change-Id: Ib90d9c0299887201b37d26254693dc6b007a41dc --- lib/heat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/heat b/lib/heat index 730adada26..c841e0a499 100644 --- a/lib/heat +++ b/lib/heat @@ -156,7 +156,7 @@ function configure_heat { # If HEAT_DEFERRED_AUTH is unset or explicitly set to trusts, configure # the section for the client plugin associated with the trustee if [ -z "$HEAT_DEFERRED_AUTH" -o "trusts" == "$HEAT_DEFERRED_AUTH" ]; then - iniset $HEAT_CONF trustee auth_plugin password + iniset $HEAT_CONF trustee auth_type password iniset $HEAT_CONF trustee auth_url $KEYSTONE_AUTH_URI iniset $HEAT_CONF trustee username $HEAT_TRUSTEE_USER iniset $HEAT_CONF trustee password $HEAT_TRUSTEE_PASSWORD From 1c4c16ce539b73f3e198cc4f2cc74de1bb9deb13 Mon Sep 17 00:00:00 2001 From: "watanabe.isao" Date: Wed, 8 Jun 2016 14:18:10 +0900 Subject: [PATCH 0038/1936] Don't configure router in flat network Due to the fix [1] of neutron-refactor, some flat network usages of devstack installation start fale. This fix enables ML2_L3_PLUGIN to be set to empty to solve the problem. By default l3_router_plugin.L3RouterPlugin will be set to ML2_L3_PLUGIN, and for neutron, in such of configuration, router (ASA some others) will be set into supported_extension_aliases, then devstack will create a router that we do not want in a flat network. Before fix [1], we can disable q-l3 to aviod the issue. But now we don't, and we need this fix to disable the whole L3 plugin. [1] https://review.openstack.org/318145 Change-Id: I61a2142d5121e0af4cc6cdf50e6bceafaf791fb0 --- lib/neutron_plugins/ml2 | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index 30e1b036f3..b353145c7e 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -38,7 +38,9 @@ Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS=${Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS:-} Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS-port_security} # L3 Plugin to load for ML2 -ML2_L3_PLUGIN=${ML2_L3_PLUGIN:-neutron.services.l3_router.l3_router_plugin.L3RouterPlugin} +# For some flat network environment, they not want to extend L3 plugin. +# Make sure it is able to set empty to ML2_L3_PLUGIN. +ML2_L3_PLUGIN=${ML2_L3_PLUGIN-neutron.services.l3_router.l3_router_plugin.L3RouterPlugin} function populate_ml2_config { CONF=$1 From 28128e2f7a509510fe4139bd1905ccf51bd0ceb8 Mon Sep 17 00:00:00 2001 From: Andreas Scheuring Date: Thu, 14 Apr 2016 14:23:53 +0200 Subject: [PATCH 0039/1936] Docs for devstack MacVTap agent setup MacVTap mechanism driver and agent have been added during Mitaka [1][2]. Now adding the related doc to run a multinode devstack with MacVTap compute nodes. [1] https://review.openstack.org/209538 [2] https://review.openstack.org/275306 Depends-On: I0dd4c0d34d5f1c35b397e5e392ce107fb984b0ba Change-Id: Ie743a207a5faeab2e2a7274fda503699f3072e98 --- doc/source/guides/neutron.rst | 100 ++++++++++++++++++++++++++++++++++ 1 file changed, 100 insertions(+) diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst index a834314bcc..1e207970ae 100644 --- a/doc/source/guides/neutron.rst +++ b/doc/source/guides/neutron.rst @@ -362,6 +362,8 @@ the compute service ``nova-compute``. DevStack Configuration ---------------------- +.. _ovs-provider-network-controller: + The following is a snippet of the DevStack configuration on the controller node. @@ -553,3 +555,101 @@ setup, with small modifications for the interface mappings. LB_PHYSICAL_INTERFACE=eth0 PUBLIC_PHYSICAL_NETWORK=default LB_INTERFACE_MAPPINGS=default:eth0 + +Using MacVTap instead of Open vSwitch +------------------------------------------ + +Security groups are not supported by the MacVTap agent. Due to that, devstack +configures the NoopFirewall driver on the compute node. + +MacVTap agent does not support l3, dhcp and metadata agent. Due to that you can +chose between the following deployment scenarios: + +Single node with provider networks using config drive and external l3, dhcp +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +This scenario applies, if l3 and dhcp services are provided externally, or if +you do not require them. + + +:: + + [[local|localrc]] + HOST_IP=10.0.0.2 + SERVICE_HOST=10.0.0.2 + MYSQL_HOST=10.0.0.2 + RABBIT_HOST=10.0.0.2 + ADMIN_PASSWORD=secret + MYSQL_PASSWORD=secret + RABBIT_PASSWORD=secret + SERVICE_PASSWORD=secret + + Q_ML2_PLUGIN_MECHANISM_DRIVERS=macvtap + Q_USE_PROVIDER_NETWORKING=True + + #Enable Neutron services + disable_service n-net + enable_plugin neutron git://git.openstack.org/openstack/neutron + ENABLED_SERVICES+=,q-agt,q-svc + + ## MacVTap agent options + Q_AGENT=macvtap + PHYSICAL_NETWORK=default + + FIXED_RANGE="203.0.113.0/24" + NETWORK_GATEWAY=203.0.113.1 + PROVIDER_SUBNET_NAME="provider_net" + PROVIDER_NETWORK_TYPE="vlan" + SEGMENTATION_ID=2010 + + [[post-config|/$Q_PLUGIN_CONF_FILE]] + [macvtap] + physical_interface_mappings = $PHYSICAL_NETWORK:eth1 + + [[post-config|$NOVA_CONF]] + force_config_drive = True + + +Multi node with MacVTap compute node +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +This scenario applies, if you require OpenStack provided l3, dhcp or metadata +services. Those are hosted on a separate controller and network node, running +some other l2 agent technology (in this example Open vSwitch). This node needs +to be configured for VLAN tenant networks. + +For OVS, a similar configuration like described in the +:ref:`OVS Provider Network ` section can be +used. Just add the the following line to this local.conf, which also loads +the MacVTap mechanism driver: + +:: + + [[local|localrc]] + ... + Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,linuxbridge,macvtap + ... + +For the MacVTap compute node, use this local.conf: + +:: + + HOST_IP=10.0.0.3 + SERVICE_HOST=10.0.0.2 + MYSQL_HOST=10.0.0.2 + RABBIT_HOST=10.0.0.2 + ADMIN_PASSWORD=secret + MYSQL_PASSWORD=secret + RABBIT_PASSWORD=secret + SERVICE_PASSWORD=secret + + # Services that a compute node runs + disable_all_services + enable_plugin neutron git://git.openstack.org/openstack/neutron + ENABLED_SERVICES+=n-cpu,q-agt + + ## MacVTap agent options + Q_AGENT=macvtap + PHYSICAL_NETWORK=default + + [[post-config|/$Q_PLUGIN_CONF_FILE]] + [macvtap] + physical_interface_mappings = $PHYSICAL_NETWORK:eth1 From 24e29f2265e6fd496addf00adfcc13ba8533b912 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Wed, 15 Jun 2016 14:31:51 +0100 Subject: [PATCH 0040/1936] lib/neutron-plugins: Pass conf file parameter The 'neutron_plugin_configure_l3_agent' function expects a path to a configuration file as a parameter. This was not done for one call, resulting in the generation of a 'DEFAULT' file in the DevStack directory along with an invalid L3 configuration file. Resolve this. Change-Id: I5781cb1ec4cfc1699e61dbc324d0bdb824b56be1 --- lib/neutron_plugins/services/l3 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 350aed8e37..0f0ed216d0 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -99,7 +99,7 @@ function _configure_neutron_l3_agent { _neutron_setup_interface_driver $Q_L3_CONF_FILE - neutron_plugin_configure_l3_agent + neutron_plugin_configure_l3_agent $Q_L3_CONF_FILE _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True False "inet" From dbc6a3736ddd1ed7f453898c26c1f7c550f8f8c1 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 16 Jun 2016 08:08:45 +0000 Subject: [PATCH 0041/1936] Updated from generate-devstack-plugins-list Change-Id: I5cd5f2bafb7ace88c28c52994c339b95904ab03b --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 0ceb829d96..99681b335c 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -110,6 +110,7 @@ nova-lxd `git://git.openstack.org/openstack/nova-l nova-powervm `git://git.openstack.org/openstack/nova-powervm `__ octavia `git://git.openstack.org/openstack/octavia `__ osprofiler `git://git.openstack.org/openstack/osprofiler `__ +panko `git://git.openstack.org/openstack/panko `__ python-freezerclient `git://git.openstack.org/openstack/python-freezerclient `__ rally `git://git.openstack.org/openstack/rally `__ sahara `git://git.openstack.org/openstack/sahara `__ From 8906b481e0d34ae1bf64a623ce5e734f50c5f2ec Mon Sep 17 00:00:00 2001 From: Richard Theis Date: Wed, 8 Jun 2016 10:28:37 -0500 Subject: [PATCH 0042/1936] Support "geneve" ML2 plugin type driver Add support for the "geneve" ML2 plugin type driver. The networking-ovn ML2 mechanism driver uses geneve for its project network type. Geneve is part of core neutron but didn't have any DevStack configuration for it. This patch set adds the necessary options. It also removes the default for ML2 type drivers to rely on the neutron default and consolidates the tunnel ranges default for gre, vxlan and geneve by using TENANT_TUNNEL_RANGES. Change-Id: Id75651dfe57a07045a6932a0369668f33c7eef09 Partial-Bug: #1588966 --- lib/neutron-legacy | 6 +++--- lib/neutron_plugins/ml2 | 12 ++++++++---- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 73123ef1d5..dca2e98a0c 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -179,9 +179,9 @@ fi # GRE tunnels are only supported by the openvswitch. ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-True} -# If using GRE tunnels for tenant networks, specify the range of -# tunnel IDs from which tenant networks are allocated. Can be -# overridden in ``localrc`` in necessary. +# If using GRE, VXLAN or GENEVE tunnels for tenant networks, +# specify the range of IDs from which tenant networks are +# allocated. Can be overridden in ``localrc`` if necessary. TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGES:-1:1000} # To use VLANs for tenant networks, set to True in localrc. VLANs diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index 30e1b036f3..dcdf6789c8 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -25,14 +25,14 @@ fi # List of MechanismDrivers to load Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-openvswitch,linuxbridge} -# List of Type Drivers to load -Q_ML2_PLUGIN_TYPE_DRIVERS=${Q_ML2_PLUGIN_TYPE_DRIVERS:-local,flat,vlan,gre,vxlan} # Default GRE TypeDriver options Q_ML2_PLUGIN_GRE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GRE_TYPE_OPTIONS:-tunnel_id_ranges=$TENANT_TUNNEL_RANGES} # Default VXLAN TypeDriver options -Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS=${Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS:-vni_ranges=1001:2000} +Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS=${Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS:-vni_ranges=$TENANT_TUNNEL_RANGES} # Default VLAN TypeDriver options Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS=${Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS:-} +# Default GENEVE TypeDriver options +Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS:-vni_ranges=$TENANT_TUNNEL_RANGES} # List of extension drivers to load, use '-' instead of ':-' to allow people to # explicitly override this to blank Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS-port_security} @@ -111,7 +111,9 @@ function neutron_plugin_configure_service { populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS - populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 type_drivers=$Q_ML2_PLUGIN_TYPE_DRIVERS + if [[ -n "$Q_ML2_PLUGIN_TYPE_DRIVERS" ]]; then + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 type_drivers=$Q_ML2_PLUGIN_TYPE_DRIVERS + fi populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 extension_drivers=$Q_ML2_PLUGIN_EXT_DRIVERS @@ -125,6 +127,8 @@ function neutron_plugin_configure_service { populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_vlan $Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_geneve $Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS + if [[ "$Q_DVR_MODE" != "legacy" ]]; then populate_ml2_config /$Q_PLUGIN_CONF_FILE agent l2_population=True populate_ml2_config /$Q_PLUGIN_CONF_FILE agent tunnel_types=vxlan From 21e3d1e55b3140e8b14105df05c1e5253a3d04ec Mon Sep 17 00:00:00 2001 From: Rob Crittenden Date: Fri, 6 May 2016 12:35:22 -0400 Subject: [PATCH 0043/1936] Make wait_for_service more robust by checking HTTP response wait_for_service just checked to see if the remote service was started, not that it was returning data. This caused problems when the service was behind a proxy because the proxy would respond quickly but the service may not have fully started. Wait for a non-503 HTTP response code and non-7 exit code (connection error) from curl Return an error if a successful connection cannot be made. Change-Id: I059a12b1b920f703f28aca0e2f352714118dee97 --- functions | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/functions b/functions index aa12e1e826..46a7d414a1 100644 --- a/functions +++ b/functions @@ -381,12 +381,24 @@ CURL_GET="${CURL_GET:-curl -g}" # Wait for an HTTP server to start answering requests # wait_for_service timeout url +# +# If the service we want is behind a proxy, the proxy may be available +# before the service. Compliant proxies will return a 503 in this case +# Loop until we get something else. +# Also check for the case where there is no proxy and the service just +# hasn't started yet. curl returns 7 for Failed to connect to host. function wait_for_service { local timeout=$1 local url=$2 + local rval=0 time_start "wait_for_service" - timeout $timeout sh -c "while ! $CURL_GET -k --noproxy '*' -s $url >/dev/null; do sleep 1; done" + timeout $timeout bash -x < Date: Thu, 28 Apr 2016 15:45:25 -0500 Subject: [PATCH 0044/1936] Default LOG_COLOR based on interactive execution Change I4a10a49db97d413349bcfceeb8c4164936fbcc40 added colorful PS4 via tput. However, if TERM is not set (as is the case when stacking noninteractively), tput errors with the following: tput: No value for $TERM and no -T specified ...twice for every log message, thus flooding the logs. This change set turns LOG_COLOR off by default for noninteractive execution. If LOG_COLOR is set to True when noninteractive (TERM is unset), obviate the above errors by passing tput a simple -T. Change-Id: I0f8ad82375cde463160bad5bd9918f1e4b19326d Closes-Bug: 1576405 --- stackrc | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/stackrc b/stackrc index 7a5b821d4e..acb7d3f650 100644 --- a/stackrc +++ b/stackrc @@ -135,12 +135,29 @@ elif [[ -f $RC_DIR/.localrc.auto ]]; then source $RC_DIR/.localrc.auto fi +# Default for log coloring is based on interactive-or-not. +# Baseline assumption is that non-interactive invocations are for CI, +# where logs are to be presented as browsable text files; hence color +# codes should be omitted. +# Simply override LOG_COLOR if your environment is different. +if [ -t 1 ]; then + _LOG_COLOR_DEFAULT=True +else + _LOG_COLOR_DEFAULT=False +fi + # Use color for logging output (only available if syslog is not used) -LOG_COLOR=$(trueorfalse True LOG_COLOR) +LOG_COLOR=$(trueorfalse $_LOG_COLOR_DEFAULT LOG_COLOR) # Make tracing more educational if [[ "$LOG_COLOR" == "True" ]]; then - export PS4='+\[$(tput setaf 242)\]$(short_source)\[$(tput sgr0)\] ' + # tput requires TERM or -T. If neither is present, use vt100, a + # no-frills least common denominator supported everywhere. + TPUT_T= + if ! [ $TERM ]; then + TPUT_T='-T vt100' + fi + export PS4='+\[$(tput '$TPUT_T' setaf 242)\]$(short_source)\[$(tput '$TPUT_T' sgr0)\] ' else export PS4='+ $(short_source): ' fi From 60f394aee25cfd22c6c5e13622697c27acbb402d Mon Sep 17 00:00:00 2001 From: "Sean M. Collins" Date: Fri, 17 Jun 2016 16:15:30 -0400 Subject: [PATCH 0045/1936] Fix amateur level mistake. Happy Friday! Change-Id: I2b1112ce74577d6e3d50c5ea2131d46c77307571 --- lib/neutron | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index ba26c5f34d..ad68d8e62f 100644 --- a/lib/neutron +++ b/lib/neutron @@ -242,7 +242,7 @@ function configure_neutron_new { # Metering if is_service_enabled neutron-metering; then - source $TOP_DIR/neutron_plugins/services/metering + source $TOP_DIR/lib/neutron_plugins/services/metering neutron_agent_metering_configure_common neutron_agent_metering_configure_agent fi From d00cbb77b166e222fdb5adf2aa5ff7dff6294ca4 Mon Sep 17 00:00:00 2001 From: "Sean M. Collins" Date: Mon, 20 Jun 2016 13:53:44 -0400 Subject: [PATCH 0046/1936] Neutron: check if q-l3 or neutron-l3 is enabled Change-Id: I7aff59fdf0fd75e134d2ae3ba8b7d63db98cc5ed --- lib/neutron_plugins/services/l3 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 0f0ed216d0..61f0d6c099 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -289,7 +289,7 @@ function _neutron_configure_router_v4 { neutron --os-cloud devstack-admin --os-region "$REGION_NAME" router-gateway-set $ROUTER_ID $EXT_NET_ID # This logic is specific to using the l3-agent for layer 3 - if is_service_enabled q-l3; then + if is_service_enabled q-l3 || is_service_enabled neutron-l3; then # Configure and enable public bridge local ext_gw_interface="none" if is_neutron_ovs_base_plugin; then @@ -334,7 +334,7 @@ function _neutron_configure_router_v6 { fi # This logic is specific to using the l3-agent for layer 3 - if is_service_enabled q-l3; then + if is_service_enabled q-l3 || is_service_enabled neutron-l3; then # Ensure IPv6 forwarding is enabled on the host sudo sysctl -w net.ipv6.conf.all.forwarding=1 # Configure and enable public bridge From 66f4524cf491db78caaa93eca848ad897a2e6576 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 22 Jun 2016 08:08:16 +0000 Subject: [PATCH 0047/1936] Updated from generate-devstack-plugins-list Change-Id: Ie6363f08ac54b62d29096befd733eca0642379d8 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 99681b335c..eed88edbb7 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -48,6 +48,7 @@ devstack-plugin-ceph `git://git.openstack.org/openstack/devsta devstack-plugin-glusterfs `git://git.openstack.org/openstack/devstack-plugin-glusterfs `__ devstack-plugin-hdfs `git://git.openstack.org/openstack/devstack-plugin-hdfs `__ devstack-plugin-kafka `git://git.openstack.org/openstack/devstack-plugin-kafka `__ +devstack-plugin-mariadb `git://git.openstack.org/openstack/devstack-plugin-mariadb `__ devstack-plugin-nfs `git://git.openstack.org/openstack/devstack-plugin-nfs `__ devstack-plugin-pika `git://git.openstack.org/openstack/devstack-plugin-pika `__ devstack-plugin-sheepdog `git://git.openstack.org/openstack/devstack-plugin-sheepdog `__ From a9cc38a1986a7c5f78f2d1e331fcc6ac8bd1023c Mon Sep 17 00:00:00 2001 From: Kevin Zhao Date: Fri, 24 Jun 2016 04:30:12 -0400 Subject: [PATCH 0048/1936] Modify the image property for aarch64 In Aarch64, the default cdrom bus is scsi, and the default scsi controller is virtio-scsi. The cdrom with virtio bus will not be recognized by the instance. Change-Id: Ib8cec79f9e9083239092fa7348793ee3b64a9c94 Signed-off-by: Kevin Zhao --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index aa12e1e826..dfe3d8bd60 100644 --- a/functions +++ b/functions @@ -331,7 +331,7 @@ function upload_image { fi if is_arch "aarch64"; then - img_property="--property hw_machine_type=virt --property hw_cdrom_bus=virtio --property os_command_line='console=ttyAMA0'" + img_property="--property hw_machine_type=virt --property hw_cdrom_bus=scsi --property hw_scsi_model=virtio-scsi --property os_command_line='console=ttyAMA0'" fi if [ "$container_format" = "bare" ]; then From dddb2c7b5f85688de9c9b92f025df25d2f2d3016 Mon Sep 17 00:00:00 2001 From: Patrick East Date: Tue, 3 May 2016 17:34:00 -0700 Subject: [PATCH 0049/1936] Setup the Cinder image-volume cache by default This will have devstack setup the Cinder internal tenant and generic image-volume cache by default. If left alone it will use reasonable defaults. More information about configuration options and the cache can be found here: http://docs.openstack.org/admin-guide/blockstorage_image_volume_cache.html As part of this we switch the default lvm type to thin so it will work more efficiently with the image cache. Change-Id: I0b2cc261736f32d38d43c60254f0dc7225b24c01 Implements: blueprint cinder-image-volume-cache --- lib/cinder | 44 +++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 41 insertions(+), 3 deletions(-) diff --git a/lib/cinder b/lib/cinder index 607a6f83c6..fe49416e76 100644 --- a/lib/cinder +++ b/lib/cinder @@ -68,9 +68,8 @@ CINDER_SERVICE_PROTOCOL=${CINDER_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} CINDER_SERVICE_LISTEN_ADDRESS=${CINDER_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS} # What type of LVM device should Cinder use for LVM backend -# Defaults to default, which is thick, the other valid choice -# is thin, which as the name implies utilizes lvm thin provisioning. -CINDER_LVM_TYPE=${CINDER_LVM_TYPE:-default} +# Defaults to thin. For thick provisioning change to 'default' +CINDER_LVM_TYPE=${CINDER_LVM_TYPE:-thin} # Default backends # The backend format is type:name where type is one of the supported backend @@ -128,6 +127,17 @@ fi CINDER_NOVA_CATALOG_INFO=${CINDER_NOVA_CATALOG_INFO:-compute:nova:publicURL} CINDER_NOVA_CATALOG_ADMIN_INFO=${CINDER_NOVA_CATALOG_ADMIN_INFO:-compute:nova:adminURL} +# Environment variables to configure the image-volume cache +CINDER_IMG_CACHE_ENABLED=${CINDER_IMG_CACHE_ENABLED:-True} + +# For limits, if left unset, it will use cinder defaults of 0 for unlimited +CINDER_IMG_CACHE_SIZE_GB=${CINDER_IMG_CACHE_SIZE_GB:-} +CINDER_IMG_CACHE_SIZE_COUNT=${CINDER_IMG_CACHE_SIZE_COUNT:-} + +# Configure which cinder backends will have the image-volume cache, this takes the same +# form as the CINDER_ENABLED_BACKENDS config option. By default it will +# enable the cache for all cinder backends. +CINDER_CACHE_ENABLED_FOR_BACKENDS=${CINDER_CACHE_ENABLED_FOR_BACKENDS:-$CINDER_ENABLED_BACKENDS} # Functions # --------- @@ -293,6 +303,7 @@ function configure_cinder { if [[ -n "$default_name" ]]; then iniset $CINDER_CONF DEFAULT default_volume_type ${default_name} fi + configure_cinder_image_volume_cache fi if is_service_enabled swift; then @@ -394,6 +405,8 @@ function create_cinder_accounts { "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s" \ "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s" \ "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s" + + configure_cinder_internal_tenant fi } @@ -573,6 +586,31 @@ function create_cinder_volume_group { : } +function configure_cinder_internal_tenant { + # Re-use the Cinder service account for simplicity. + iniset $CINDER_CONF DEFAULT cinder_internal_tenant_project_id $(get_or_create_project $SERVICE_PROJECT_NAME) + iniset $CINDER_CONF DEFAULT cinder_internal_tenant_user_id $(get_or_create_user "cinder") +} + +function configure_cinder_image_volume_cache { + # Expect CINDER_CACHE_ENABLED_FOR_BACKENDS to be a list of backends + # similar to CINDER_ENABLED_BACKENDS with NAME:TYPE where NAME will + # be the backend specific configuration stanza in cinder.conf. + for be in ${CINDER_CACHE_ENABLED_FOR_BACKENDS//,/ }; do + local be_name=${be##*:} + + iniset $CINDER_CONF $be_name image_volume_cache_enabled $CINDER_IMG_CACHE_ENABLED + + if [[ -n $CINDER_IMG_CACHE_SIZE_GB ]]; then + iniset $CINDER_CONF $be_name image_volume_cache_max_size_gb $CINDER_IMG_CACHE_SIZE_GB + fi + + if [[ -n $CINDER_IMG_CACHE_SIZE_COUNT ]]; then + iniset $CINDER_CONF $be_name image_volume_cache_max_count $CINDER_IMG_CACHE_SIZE_COUNT + fi + done +} + # Restore xtrace $_XTRACE_CINDER From 68747349359822780e00833bca124558d835b05b Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 29 Jun 2016 08:06:00 +0000 Subject: [PATCH 0050/1936] Updated from generate-devstack-plugins-list Change-Id: If8d8b223f6764990c305274047c811dffb7d9840 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index eed88edbb7..5b6622e75c 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -103,6 +103,7 @@ networking-sfc `git://git.openstack.org/openstack/networ networking-vsphere `git://git.openstack.org/openstack/networking-vsphere `__ neutron `git://git.openstack.org/openstack/neutron `__ neutron-dynamic-routing `git://git.openstack.org/openstack/neutron-dynamic-routing `__ +neutron-fwaas `git://git.openstack.org/openstack/neutron-fwaas `__ neutron-lbaas `git://git.openstack.org/openstack/neutron-lbaas `__ neutron-lbaas-dashboard `git://git.openstack.org/openstack/neutron-lbaas-dashboard `__ neutron-vpnaas `git://git.openstack.org/openstack/neutron-vpnaas `__ From bb35715cfe68ad8d1d2ccb2b2e7eb4143e87d678 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 7 Jun 2016 11:20:55 -0400 Subject: [PATCH 0051/1936] add local.conf modifying functions This adds a set of local.conf modifying functions which make it easier for consuming projects like devstack-gate to programatically add elements to local.conf structured files. Change-Id: I3427968c2bd43aba12b3619acc27f73c74f0dabb Co-Authored-By: fumihiko kakuma --- inc/ini-config | 164 ++++++++++++++ tests/test_localconf.sh | 475 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 639 insertions(+) create mode 100755 tests/test_localconf.sh diff --git a/inc/ini-config b/inc/ini-config index 1f12343ae0..68d48d197b 100644 --- a/inc/ini-config +++ b/inc/ini-config @@ -274,6 +274,170 @@ function iniget_sections { $xtrace } +# Set a localrc var +function localrc_set { + local file=$1 + local group="local" + local conf="localrc" + local section="" + local option=$2 + local value=$3 + localconf_set "$file" "$group" "$conf" "$section" "$option" "$value" +} + +# Check if local.conf has section. +function localconf_has_section { + local file=$1 + local group=$2 + local conf=$3 + local section=$4 + local sep + sep=$(echo -ne "\x01") + local line + line=$(sed -ne "\\${sep}^\[\[${group}|${conf}\]\]${sep},\\${sep}\[\[.*\]\]${sep}{ + /\[${section}\]/p + }" "$file") + [ -n "$line" ] +} + +# Check if local.conf has option. +function localconf_has_option { + local file=$1 + local group=$2 + local conf=$3 + local section=$4 + local option=$5 + local sep + sep=$(echo -ne "\x01") + local line + if [[ -z "$section" ]]; then + line=$(sed -ne "\\${sep}^\[\[${group}|${conf}\]\]${sep},\\${sep}\[\[.*\]\]${sep}{ + /${option}[ \t]*=.*$/p + }" "$file") + else + line=$(sed -ne "\\${sep}^\[\[${group}|${conf}\]\]${sep},\\${sep}\[\[.*\]\]${sep}{ + /\[${section}\]/,/\[\[.*\]\]\|\[.*\]/{ + /${option}[ \t]*=.*$/p} + }" "$file") + fi + [ -n "$line" ] +} + +# Update option in local.conf. +function localconf_update_option { + local sudo=$1 + local file=$2 + local group=$3 + local conf=$4 + local section=$5 + local option=$6 + local value=$7 + local sep + sep=$(echo -ne "\x01") + if [[ -z "$section" ]]; then + $sudo sed -i -e "\\${sep}^\[\[${group}|${conf}\]\]${sep},\\${sep}\[\[.*\]\]${sep}{ + s${sep}^\(${option}[ \t]*=[ \t]*\).*\$${sep}\1${value}${sep} + }" "$file" + else + $sudo sed -i -e "\\${sep}^\[\[${group}|${conf}\]\]${sep},\\${sep}\[\[.*\]\]${sep}{ + /\[${section}\]/,/\[\[.*\]\]\|\[.*\]/s${sep}^\(${option}[ \t]*=[ \t]*\).*\$${sep}\1${value}${sep} + }" "$file" + fi +} + +# Add option in local.conf. +function localconf_add_option { + local sudo=$1 + local file=$2 + local group=$3 + local conf=$4 + local section=$5 + local option=$6 + local value=$7 + local sep + sep=$(echo -ne "\x01") + if [[ -z "$section" ]]; then + $sudo sed -i -e "\\${sep}^\[\[${group}|${conf}\]\]${sep} a $option=$value" "$file" + else + $sudo sed -i -e "\\${sep}^\[\[${group}|${conf}\]\]${sep},\\${sep}\[\[.*\]\]${sep}{ + /\[${section}\]/ a $option=$value + }" "$file" + fi +} + +# Add section and option in local.conf. +function localconf_add_section_and_option { + local sudo=$1 + local file=$2 + local group=$3 + local conf=$4 + local section=$5 + local option=$6 + local value=$7 + local sep + sep=$(echo -ne "\x01") + $sudo sed -i -e "\\${sep}^\[\[${group}|${conf}\]\]${sep} { + a [$section] + a $option=$value + }" "$file" +} + +# Set an option in a local.conf file. +# localconf_set [-sudo] config-file group conf-name section option value +# - if the file does not exist, it is created +function localconf_set { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local sep + sep=$(echo -ne "\x01") + local sudo="" + if [ $1 == "-sudo" ]; then + sudo="sudo " + shift + fi + local file=$1 + local group=$2 + local conf=$3 + local section=$4 + local option=$5 + local value=$6 + + if [[ -z $group || -z $conf || -z $option || -z $value ]]; then + $xtrace + return + fi + + if ! grep -q "^\[\[${group}|${conf}\]\]" "$file" 2>/dev/null; then + # Add meta section at the end if it does not exist + echo -e "\n[[${group}|${conf}]]" | $sudo tee --append "$file" > /dev/null + # Add section at the end + if [[ -n "$section" ]]; then + echo -e "[$section]" | $sudo tee --append "$file" > /dev/null + fi + # Add option at the end + echo -e "$option=$value" | $sudo tee --append "$file" > /dev/null + elif [[ -z "$section" ]]; then + if ! localconf_has_option "$file" "$group" "$conf" "$section" "$option"; then + # Add option + localconf_add_option "$sudo" "$file" "$group" "$conf" "$section" "$option" "$value" + else + # Replace it + localconf_update_option "$sudo" "$file" "$group" "$conf" "$section" "$option" "$value" + fi + elif ! localconf_has_section "$file" "$group" "$conf" "$section"; then + # Add section and option in specified meta section + localconf_add_section_and_option "$sudo" "$file" "$group" "$conf" "$section" "$option" "$value" + elif ! localconf_has_option "$file" "$group" "$conf" "$section" "$option"; then + # Add option + localconf_add_option "$sudo" "$file" "$group" "$conf" "$section" "$option" "$value" + else + # Replace it + localconf_update_option "$sudo" "$file" "$group" "$conf" "$section" "$option" "$value" + fi + $xtrace +} + # Restore xtrace $INC_CONF_TRACE diff --git a/tests/test_localconf.sh b/tests/test_localconf.sh new file mode 100755 index 0000000000..d8075df442 --- /dev/null +++ b/tests/test_localconf.sh @@ -0,0 +1,475 @@ +#!/usr/bin/env bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +# Tests for DevStack INI functions + +TOP=$(cd $(dirname "$0")/.. && pwd) + +# Import config functions +source $TOP/inc/ini-config + +source $TOP/tests/unittest.sh + +echo "Testing INI local.conf functions" + +# test that can determine if file has section in specified meta-section + +function test_localconf_has_section { + local file_localconf + local file_conf1 + local file_conf2 + file_localconf=`mktemp` + file_conf1=`mktemp` + file_conf2=`mktemp` + + cat <<- EOF > $file_localconf +[[local|localrc]] +LOCALRC_VAR1=localrc_val1 +LOCALRC_VAR2=localrc_val2 +LOCALRC_VAR3=localrc_val3 + +[[post-config|$file_conf1]] +[conf1_t1] +conf1_t1_opt1=conf1_t1_val1 +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt1=conf2_t1_val1 +conf2_t1_opt2=conf2_t1_val2 +conf2_t1_opt3=conf2_t1_val3 +EOF + + localconf_has_section $file_localconf post-config $file_conf1 conf1_t1 + assert_equal $? 0 + localconf_has_section $file_localconf post-config $file_conf1 conf1_t2 + assert_equal $? 0 + localconf_has_section $file_localconf post-config $file_conf1 conf1_t3 + assert_equal $? 0 + localconf_has_section $file_localconf post-extra $file_conf2 conf2_t1 + assert_equal $? 0 + localconf_has_section $file_localconf post-config $file_conf1 conf1_t4 + assert_equal $? 1 + localconf_has_section $file_localconf post-install $file_conf1 conf1_t1 + assert_equal $? 1 + localconf_has_section $file_localconf local localrc conf1_t2 + assert_equal $? 1 + rm -f $file_localconf $file_conf1 $file_conf2 +} + +# test that can determine if file has option in specified meta-section and section +function test_localconf_has_option { + local file_localconf + local file_conf1 + local file_conf2 + file_localconf=`mktemp` + file_conf1=`mktemp` + file_conf2=`mktemp` + cat <<- EOF > $file_localconf +[[post-config|$file_conf1]] +[conf1_t1] +conf1_t1_opt1 = conf1_t1_val1 +conf1_t1_opt2 = conf1_t1_val2 +conf1_t1_opt3 = conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[local|localrc]] +LOCALRC_VAR1=localrc_val1 +LOCALRC_VAR2=localrc_val2 +LOCALRC_VAR3=localrc_val3 + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt1=conf2_t1_val1 +conf2_t1_opt2=conf2_t1_val2 +conf2_t1_opt3=conf2_t1_val3 +EOF + + localconf_has_option $file_localconf local localrc "" LOCALRC_VAR1 + assert_equal $? 0 + localconf_has_option $file_localconf local localrc "" LOCALRC_VAR2 + assert_equal $? 0 + localconf_has_option $file_localconf local localrc "" LOCALRC_VAR3 + assert_equal $? 0 + localconf_has_option $file_localconf post-config $file_conf1 conf1_t1 conf1_t1_opt1 + assert_equal $? 0 + localconf_has_option $file_localconf post-config $file_conf1 conf1_t2 conf1_t2_opt2 + assert_equal $? 0 + localconf_has_option $file_localconf post-config $file_conf1 conf1_t3 conf1_t3_opt3 + assert_equal $? 0 + localconf_has_option $file_localconf post-extra $file_conf2 conf2_t1 conf2_t1_opt2 + assert_equal $? 0 + localconf_has_option $file_localconf post-config $file_conf1 conf1_t1_opt4 + assert_equal $? 1 + localconf_has_option $file_localconf post-install $file_conf1 conf1_t1_opt1 + assert_equal $? 1 + localconf_has_option $file_localconf local localrc conf1_t2 conf1_t2_opt1 + assert_equal $? 1 + rm -f $file_localconf $file_conf1 $file_conf2 +} + +# test that update option in specified meta-section and section +function test_localconf_update_option { + local file_localconf + local file_localconf_expected + local file_conf1 + local file_conf2 + file_localconf=`mktemp` + file_localconf_expected=`mktemp` + file_conf1=`mktemp` + file_conf2=`mktemp` + cat <<- EOF > $file_localconf +[[local|localrc]] +LOCALRC_VAR1 = localrc_val1 +LOCALRC_VAR2 = localrc_val2 +LOCALRC_VAR3 = localrc_val3 + +[[post-config|$file_conf1]] +[conf1_t1] +conf1_t1_opt1=conf1_t1_val1 +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt1=conf2_t1_val1 +conf2_t1_opt2=conf2_t1_val2 +conf2_t1_opt3=conf2_t1_val3 +EOF + cat <<- EOF > $file_localconf_expected +[[local|localrc]] +LOCALRC_VAR1 = localrc_val1 +LOCALRC_VAR2 = localrc_val2_update +LOCALRC_VAR3 = localrc_val3 + +[[post-config|$file_conf1]] +[conf1_t1] +conf1_t1_opt1=conf1_t1_val1_update +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2_update +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3_update + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt1=conf2_t1_val1 +conf2_t1_opt2=conf2_t1_val2 +conf2_t1_opt3=conf2_t1_val3_update +EOF + + localconf_update_option "$SUDO" $file_localconf local localrc "" LOCALRC_VAR2 localrc_val2_update + localconf_update_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t1 conf1_t1_opt1 conf1_t1_val1_update + localconf_update_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t2 conf1_t2_opt2 conf1_t2_val2_update + localconf_update_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t3 conf1_t3_opt3 conf1_t3_val3_update + localconf_update_option "$SUDO" $file_localconf post-extra $file_conf2 conf2_t1 conf2_t1_opt3 conf2_t1_val3_update + result=`cat $file_localconf` + result_expected=`cat $file_localconf_expected` + assert_equal "$result" "$result_expected" + localconf_update_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t2 conf1_t3_opt1 conf1_t3_val1_update + localconf_update_option "$SUDO" $file_localconf post-extra $file_conf2 conf2_t1 conf2_t1_opt4 conf2_t1_val4_update + localconf_update_option "$SUDO" $file_localconf post-install $file_conf2 conf2_t1 conf2_t1_opt1 conf2_t1_val1_update + localconf_update_option "$SUDO" $file_localconf local localrc "" LOCALRC_VAR4 localrc_val4_update + result=`cat $file_localconf` + result_expected=`cat $file_localconf_expected` + assert_equal "$result" "$result_expected" + rm -f $file_localconf $file_localconf_expected $file_conf1 $file_conf2 +} + +# test that add option in specified meta-section and section +function test_localconf_add_option { + local file_localconf + local file_localconf_expected + local file_conf1 + local file_conf2 + file_localconf=`mktemp` + file_localconf_expected=`mktemp` + file_conf1=`mktemp` + file_conf2=`mktemp` + cat <<- EOF > $file_localconf +[[post-config|$file_conf1]] +[conf1_t1] +conf1_t1_opt1=conf1_t1_val1 +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[local|localrc]] +LOCALRC_VAR1=localrc_val1 +LOCALRC_VAR2=localrc_val2 +LOCALRC_VAR3=localrc_val3 + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt1 = conf2_t1_val1 +conf2_t1_opt2 = conf2_t1_val2 +conf2_t1_opt3 = conf2_t1_val3 +EOF + cat <<- EOF > $file_localconf_expected +[[post-config|$file_conf1]] +[conf1_t1] +conf1_t1_opt4 = conf1_t1_val4 +conf1_t1_opt1=conf1_t1_val1 +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt4 = conf1_t2_val4 +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt4 = conf1_t3_val4 +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[local|localrc]] +LOCALRC_VAR4 = localrc_val4 +LOCALRC_VAR1=localrc_val1 +LOCALRC_VAR2=localrc_val2 +LOCALRC_VAR3=localrc_val3 + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt4 = conf2_t1_val4 +conf2_t1_opt1 = conf2_t1_val1 +conf2_t1_opt2 = conf2_t1_val2 +conf2_t1_opt3 = conf2_t1_val3 +EOF + + localconf_add_option "$SUDO" $file_localconf local localrc "" LOCALRC_VAR4 localrc_val4 + localconf_add_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t1 conf1_t1_opt4 conf1_t1_val4 + localconf_add_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t2 conf1_t2_opt4 conf1_t2_val4 + localconf_add_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t3 conf1_t3_opt4 conf1_t3_val4 + localconf_add_option "$SUDO" $file_localconf post-extra $file_conf2 conf2_t1 conf2_t1_opt4 conf2_t1_val4 + result=`cat $file_localconf` + result_expected=`cat $file_localconf_expected` + assert_equal "$result" "$result_expected" + localconf_add_option "$SUDO" $file_localconf local localrc.conf "" LOCALRC_VAR4 localrc_val4_update + localconf_add_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t4 conf1_t4_opt1 conf1_t4_val1 + localconf_add_option "$SUDO" $file_localconf post-extra $file_conf2 conf2_t2 conf2_t2_opt4 conf2_t2_val4 + localconf_add_option "$SUDO" $file_localconf post-install $file_conf2 conf2_t1 conf2_t1_opt4 conf2_t2_val4 + result=`cat $file_localconf` + result_expected=`cat $file_localconf_expected` + assert_equal "$result" "$result_expected" + rm -f $file_localconf $file_localconf_expected $file_conf1 $file_conf2 +} + +# test that add section and option in specified meta-section +function test_localconf_add_section_and_option { + local file_localconf + local file_localconf_expected + local file_conf1 + local file_conf2 + file_localconf=`mktemp` + file_localconf_expected=`mktemp` + file_conf1=`mktemp` + file_conf2=`mktemp` + cat <<- EOF > $file_localconf +[[post-config|$file_conf1]] +[conf1_t1] +conf1_t1_opt1=conf1_t1_val1 +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[local|localrc]] +LOCALRC_VAR1=localrc_val1 +LOCALRC_VAR2=localrc_val2 +LOCALRC_VAR3=localrc_val3 + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt1=conf2_t1_val1 +conf2_t1_opt2=conf2_t1_val2 +conf2_t1_opt3=conf2_t1_val3 +EOF + cat <<- EOF > $file_localconf_expected +[[post-config|$file_conf1]] +[conf1_t4] +conf1_t4_opt1 = conf1_t4_val1 +[conf1_t1] +conf1_t1_opt1=conf1_t1_val1 +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[local|localrc]] +LOCALRC_VAR1=localrc_val1 +LOCALRC_VAR2=localrc_val2 +LOCALRC_VAR3=localrc_val3 + +[[post-extra|$file_conf2]] +[conf2_t2] +conf2_t2_opt1 = conf2_t2_val1 +[conf2_t1] +conf2_t1_opt1=conf2_t1_val1 +conf2_t1_opt2=conf2_t1_val2 +conf2_t1_opt3=conf2_t1_val3 +EOF + + localconf_add_section_and_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t4 conf1_t4_opt1 conf1_t4_val1 + localconf_add_section_and_option "$SUDO" $file_localconf post-extra $file_conf2 conf2_t2 conf2_t2_opt1 conf2_t2_val1 + result=`cat $file_localconf` + result_expected=`cat $file_localconf_expected` + assert_equal "$result" "$result_expected" + localconf_add_section_and_option "$SUDO" $file_localconf post-install $file_conf2 conf2_t2 conf2_t2_opt1 conf2_t2_val1 + result=`cat $file_localconf` + result_expected=`cat $file_localconf_expected` + assert_equal "$result" "$result_expected" + rm -f $file_localconf $file_localconf_expected $file_conf1 $file_conf2 +} + +# test that add section and option in specified meta-section +function test_localconf_set { + local file_localconf + local file_localconf_expected + local file_conf1 + local file_conf2 + file_localconf=`mktemp` + file_localconf_expected=`mktemp` + file_conf1=`mktemp` + file_conf2=`mktemp` + cat <<- EOF > $file_localconf +[[local|localrc]] +LOCALRC_VAR1=localrc_val1 +LOCALRC_VAR2=localrc_val2 +LOCALRC_VAR3=localrc_val3 + +[[post-config|$file_conf1]] +[conf1_t1] +conf1_t1_opt1=conf1_t1_val1 +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt1=conf2_t1_val1 +conf2_t1_opt2=conf2_t1_val2 +conf2_t1_opt3=conf2_t1_val3 +EOF + cat <<- EOF > $file_localconf_expected +[[local|localrc]] +LOCALRC_VAR1=localrc_val1 +LOCALRC_VAR2=localrc_val2_update +LOCALRC_VAR3=localrc_val3 + +[[post-config|$file_conf1]] +[conf1_t4] +conf1_t4_opt1 = conf1_t4_val1 +[conf1_t1] +conf1_t1_opt1=conf1_t1_val1 +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt4 = conf2_t1_val4 +conf2_t1_opt1=conf2_t1_val1 +conf2_t1_opt2=conf2_t1_val2 +conf2_t1_opt3=conf2_t1_val3 + +[[post-install|/etc/neutron/plugin/ml2/ml2_conf.ini]] +[ml2] +ml2_opt1 = ml2_val1 +EOF + + if [[ -n "$SUDO" ]]; then + SUDO_ARG="-sudo" + else + SUDO_ARG="" + fi + localconf_set $SUDO_ARG $file_localconf post-install /etc/neutron/plugin/ml2/ml2_conf.ini ml2 ml2_opt1 ml2_val1 + localconf_set $SUDO_ARG $file_localconf local localrc "" LOCALRC_VAR2 localrc_val2_update + localconf_set $SUDO_ARG $file_localconf post-config $file_conf1 conf1_t4 conf1_t4_opt1 conf1_t4_val1 + localconf_set $SUDO_ARG $file_localconf post-extra $file_conf2 conf2_t1 conf2_t1_opt4 conf2_t1_val4 + result=`cat $file_localconf` + result_expected=`cat $file_localconf_expected` + assert_equal "$result" "$result_expected" + rm -f $file_localconf $file_localconf_expected $file_conf1 $file_conf2 +} + + +test_localconf_has_section +test_localconf_has_option +test_localconf_update_option +test_localconf_add_option +test_localconf_add_section_and_option +test_localconf_set From 9079a40e9ebf6c20d754b8221a55b183de1a0766 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Sun, 3 Jul 2016 19:20:27 -0400 Subject: [PATCH 0052/1936] nova: remove explicit file injection being disabled Change 9ce99a44cf85e431227536e2251ef05b52e61524 disabled file injection with the libvirt driver by default back in Icehouse, so devstack doesn't need to do this explicitly anymore. Change-Id: Id0c521f6f624367bd497463c8c2d99488548fcff --- lib/ceph | 1 - lib/nova_plugins/hypervisor-libvirt | 4 ---- 2 files changed, 5 deletions(-) diff --git a/lib/ceph b/lib/ceph index e999647ed8..2353de1758 100644 --- a/lib/ceph +++ b/lib/ceph @@ -301,7 +301,6 @@ function configure_ceph_nova { iniset $NOVA_CONF libvirt rbd_user ${CINDER_CEPH_USER} iniset $NOVA_CONF libvirt rbd_secret_uuid ${CINDER_CEPH_UUID} iniset $NOVA_CONF libvirt inject_key false - iniset $NOVA_CONF libvirt inject_partition -2 iniset $NOVA_CONF libvirt disk_cachemodes "network=writeback" iniset $NOVA_CONF libvirt images_type rbd iniset $NOVA_CONF libvirt images_rbd_pool ${NOVA_CEPH_POOL} diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index d0e364efc3..503d4b6c89 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -57,10 +57,6 @@ function configure_nova_hypervisor { iniset $NOVA_CONF vnc enabled "false" fi - # File injection is being disabled by default in the near future - - # disable it here for now to avoid surprises later. - iniset $NOVA_CONF libvirt inject_partition '-2' - if [[ "$LIBVIRT_TYPE" = "parallels" ]]; then iniset $NOVA_CONF libvirt connection_uri "parallels+unix:///system" iniset $NOVA_CONF libvirt images_type "ploop" From 6d3670a65280d71529f8aad8ca5a0422abffebd0 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Sun, 3 Jul 2016 19:40:25 -0400 Subject: [PATCH 0053/1936] Add a way to enable file injection for nova/tempest File injection is disabled by default for the libvirt driver in nova. This adds a variable to enable file injection for the libvirt driver and is also used to configure tempest.conf for running personality tests. Change-Id: I34790fadeffd6e3fdc65bd9feed3d6e62316896c Related-Bug: #1598581 --- lib/nova_plugins/hypervisor-libvirt | 8 ++++++++ lib/tempest | 1 + 2 files changed, 9 insertions(+) diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index 503d4b6c89..cdd078ca51 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -57,6 +57,14 @@ function configure_nova_hypervisor { iniset $NOVA_CONF vnc enabled "false" fi + if isset ENABLE_FILE_INJECTION; then + if [ "$ENABLE_FILE_INJECTION" == "True" ]; then + # -1 means use libguestfs to inspect the guest OS image for the + # root partition to use for file injection. + iniset $NOVA_CONF libvirt inject_partition '-1' + fi + fi + if [[ "$LIBVIRT_TYPE" = "parallels" ]]; then iniset $NOVA_CONF libvirt connection_uri "parallels+unix:///system" iniset $NOVA_CONF libvirt images_type "ploop" diff --git a/lib/tempest b/lib/tempest index 347b2a7dc7..ebaf4bb1cc 100644 --- a/lib/tempest +++ b/lib/tempest @@ -348,6 +348,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute max_microversion $tempest_compute_max_microversion fi + iniset $TEMPEST_CONFIG compute-feature-enabled personality ${ENABLE_FILE_INJECTION:-False} iniset $TEMPEST_CONFIG compute-feature-enabled resize True iniset $TEMPEST_CONFIG compute-feature-enabled live_migration ${LIVE_MIGRATION_AVAILABLE:-False} iniset $TEMPEST_CONFIG compute-feature-enabled change_password False From 67700ca5209c8d5605cfd94d6ac264f5247a2b8d Mon Sep 17 00:00:00 2001 From: Luz Cazares Date: Wed, 6 Jul 2016 12:13:18 -0700 Subject: [PATCH 0054/1936] Remove large_ops_number from devstack tempest Option was deleted from Tempest config file. Also test scenario was deleted. See commit I93b2fb33e97381f7c1e0cb1ef09ebc5c42c16ecc Change-Id: I750e50ba7cf8fca1dde391c2620b4a815d6b02a1 Closes-Bug: #1599619 --- lib/tempest | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/tempest b/lib/tempest index aa09e9a39f..bc3a2d897b 100644 --- a/lib/tempest +++ b/lib/tempest @@ -423,9 +423,6 @@ function configure_tempest { iniset $TEMPEST_CONFIG scenario aki_img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-vmlinuz" iniset $TEMPEST_CONFIG scenario img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" - # Large Ops Number - iniset $TEMPEST_CONFIG scenario large_ops_number ${TEMPEST_LARGE_OPS_NUMBER:-0} - # Telemetry iniset $TEMPEST_CONFIG telemetry-feature-enabled events "True" From 16edbe4356056daba18a7b912b4514fa11115e18 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Thu, 7 Jul 2016 14:43:39 -0400 Subject: [PATCH 0055/1936] Remove check for nova-volumes API The osapi_volume API in Nova has been gone forever, so we don't need to check for that anymore. Change-Id: I7303d3f434fc27a4a1a127e455a7d3b88f588537 --- lib/cinder | 4 ---- 1 file changed, 4 deletions(-) diff --git a/lib/cinder b/lib/cinder index 0ebf195422..6d56b65ef7 100644 --- a/lib/cinder +++ b/lib/cinder @@ -411,11 +411,7 @@ function create_cinder_cache_dir { } # init_cinder() - Initialize database and volume group -# Uses global ``NOVA_ENABLED_APIS`` function init_cinder { - # Force nova volumes off - NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/osapi_volume,//") - if is_service_enabled $DATABASE_BACKENDS; then # (Re)create cinder database recreate_database cinder From 7ef31d0fe4d3b93dd08edcd511a32ae66c51fdba Mon Sep 17 00:00:00 2001 From: xurong00037997 Date: Tue, 5 Jul 2016 11:09:40 +0800 Subject: [PATCH 0056/1936] neutron_plugin_configure_dhcp_agent invalid config file ADD dhcp config file name for neutron_plugin_configure_dhcp_agent Change-Id: I6578bcb40c4df2231c0b54f231ac3b78ede5a71d Closes-Bug: #1598745 --- lib/neutron-legacy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index dca2e98a0c..3a1bc64ec5 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -817,7 +817,7 @@ function _configure_neutron_dhcp_agent { _neutron_setup_interface_driver $Q_DHCP_CONF_FILE - neutron_plugin_configure_dhcp_agent + neutron_plugin_configure_dhcp_agent $Q_DHCP_CONF_FILE } From d565d62efe9a4476367be58551f6412e8e85688a Mon Sep 17 00:00:00 2001 From: Maxim Nestratov Date: Mon, 11 Jul 2016 22:33:39 +0300 Subject: [PATCH 0057/1936] ploop: specify hypervisor_type=vz property for ploop images This is necessary to make it possible to filter out compute nodes, which don't support such type of images. Change-Id: I347953876e2057e6f3dca71c2f5e8b638b85aaf8 --- functions | 1 + 1 file changed, 1 insertion(+) diff --git a/functions b/functions index 46a7d414a1..50b72ebf2f 100644 --- a/functions +++ b/functions @@ -251,6 +251,7 @@ function upload_image { image create \ "$image_name" --public \ --container-format=bare --disk-format=ploop \ + --property hypervisor_type=vz \ --property vm_mode=$vm_mode < "${image}" return fi From a9286886e413fbf055284dc167a7640a5a79b83c Mon Sep 17 00:00:00 2001 From: "Sean M. Collins" Date: Mon, 20 Jun 2016 13:23:11 -0400 Subject: [PATCH 0058/1936] Neutron: include metering service plugin in conf The common code for metering calls _neutron_service_plugin_class_add, which despite the description only just appends a service plugin to $Q_SERVICE_PLUGIN_CLASSES - it doesn't actually write it into a configuration file. So for now, read out the configuration, and append metering to it, then write it back out. Change-Id: Ice96cca8b43dcd54f2aa81461000a4597db8260d --- lib/neutron | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/neutron b/lib/neutron index ad68d8e62f..5cab8e1716 100644 --- a/lib/neutron +++ b/lib/neutron @@ -245,6 +245,12 @@ function configure_neutron_new { source $TOP_DIR/lib/neutron_plugins/services/metering neutron_agent_metering_configure_common neutron_agent_metering_configure_agent + # TODO(sc68cal) hack because we don't pass around + # $Q_SERVICE_PLUGIN_CLASSES like -legacy does + local plugins="" + plugins=$(iniget $NEUTRON_CONF DEFAULT service_plugins) + plugins+=",metering" + iniset $NEUTRON_CONF DEFAULT service_plugins $plugins fi } From 657cdcdbd18cbdbc185fbf6ef853382b357b3370 Mon Sep 17 00:00:00 2001 From: Patrick East Date: Fri, 1 Jul 2016 16:08:15 -0700 Subject: [PATCH 0059/1936] Allow for Nova to use os-brick from git This option to install os-brick from git was only added into lib/cinder previously. When testing all-in-one nodes this worked fine, but if you have multi-node setups with compute nodes that don't install any c-* services we only get packaged os-brick. With this change non-cinder nodes can now test against unreleased os-bricks. Change-Id: Ibb7423d243d57852dada0b6298463bbdfc6dc63c --- lib/cinder | 8 -------- lib/os_brick | 29 +++++++++++++++++++++++++++++ stack.sh | 6 ++++++ 3 files changed, 35 insertions(+), 8 deletions(-) create mode 100644 lib/os_brick diff --git a/lib/cinder b/lib/cinder index 0ebf195422..69ff4c4e36 100644 --- a/lib/cinder +++ b/lib/cinder @@ -39,7 +39,6 @@ fi # set up default directories GITDIR["python-cinderclient"]=$DEST/python-cinderclient -GITDIR["os-brick"]=$DEST/os-brick GITDIR["python-brick-cinderclient-ext"]=$DEST/python-brick-cinderclient-ext CINDER_DIR=$DEST/cinder @@ -445,13 +444,6 @@ function init_cinder { # install_cinder() - Collect source and prepare function install_cinder { - # Install os-brick from git so we make sure we're testing - # the latest code. - if use_library_from_git "os-brick"; then - git_clone_by_name "os-brick" - setup_dev_lib "os-brick" - fi - git_clone $CINDER_REPO $CINDER_DIR $CINDER_BRANCH setup_develop $CINDER_DIR if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then diff --git a/lib/os_brick b/lib/os_brick new file mode 100644 index 0000000000..690e321809 --- /dev/null +++ b/lib/os_brick @@ -0,0 +1,29 @@ +#!/bin/bash +# +# lib/os_brick +# Install **os-brick** python module from source + +# Dependencies: +# +# - functions +# - DEST, DATA_DIR must be defined + +# stack.sh +# --------- +# - install_os_brick + +# Save trace setting +_XTRACE_CINDER=$(set +o | grep xtrace) +set +o xtrace + + +GITDIR["os-brick"]=$DEST/os-brick + +# Install os_brick from git only if requested, otherwise it will be pulled from +# pip repositories by requirements of projects that need it. +function install_os_brick { + if use_library_from_git "os-brick"; then + git_clone_by_name "os-brick" + setup_dev_lib "os-brick" + fi +} diff --git a/stack.sh b/stack.sh index 6fbb0bee3b..5a5a040b02 100755 --- a/stack.sh +++ b/stack.sh @@ -570,6 +570,7 @@ source $TOP_DIR/lib/neutron-legacy source $TOP_DIR/lib/ldap source $TOP_DIR/lib/dstat source $TOP_DIR/lib/dlm +source $TOP_DIR/lib/os_brick # Extras Source # -------------- @@ -796,6 +797,11 @@ if is_service_enabled heat horizon; then install_heatclient fi +# Install shared libraries +if is_service_enabled cinder nova; then + install_os_brick +fi + # Install middleware install_keystonemiddleware From 6267ec01d6b4c503bca21ca6cbf95b443eff3408 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Tue, 12 Jul 2016 19:12:30 -0400 Subject: [PATCH 0060/1936] Remove juno/kilo specific tempest config This removes several config flags for Tempest now that juno and kilo are end of life. Tempest has already removed these flags too. Change-Id: I748429e73073f4202f77dfe1002687f76ee9a451 --- lib/tempest | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/lib/tempest b/lib/tempest index bc3a2d897b..01ad4f4381 100644 --- a/lib/tempest +++ b/lib/tempest @@ -356,15 +356,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute-feature-enabled live_migration ${LIVE_MIGRATION_AVAILABLE:-False} iniset $TEMPEST_CONFIG compute-feature-enabled change_password False iniset $TEMPEST_CONFIG compute-feature-enabled block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} - # TODO(mriedem): Remove the preserve_ports flag when Juno is end of life. - iniset $TEMPEST_CONFIG compute-feature-enabled preserve_ports True - # TODO(gilliard): Remove the live_migrate_paused_instances flag when Juno is end of life. - iniset $TEMPEST_CONFIG compute-feature-enabled live_migrate_paused_instances True iniset $TEMPEST_CONFIG compute-feature-enabled attach_encrypted_volume ${ATTACH_ENCRYPTED_VOLUME_AVAILABLE:-True} - # TODO(mriedem): Remove this when kilo-eol happens since the - # neutron.allow_duplicate_networks option was removed from nova in Liberty - # and is now the default behavior. - iniset $TEMPEST_CONFIG compute-feature-enabled allow_duplicate_networks ${NOVA_ALLOW_DUPLICATE_NETWORKS:-True} if is_service_enabled n-cell; then # Cells doesn't support shelving/unshelving iniset $TEMPEST_CONFIG compute-feature-enabled shelve False @@ -434,8 +426,6 @@ function configure_tempest { iniset $TEMPEST_CONFIG validation network_for_ssh $PRIVATE_NETWORK_NAME # Volume - # TODO(obutenko): Remove the incremental_backup_force flag when Kilo and Juno is end of life. - iniset $TEMPEST_CONFIG volume-feature-enabled incremental_backup_force True # TODO(ynesenenko): Remove the volume_services flag when Liberty and Kilo will correct work with host info. iniset $TEMPEST_CONFIG volume-feature-enabled volume_services True # TODO(ameade): Remove the api_v3 flag when Mitaka and Liberty are end of life. From 7886f7eb9fcd0cfdee93e4480a9fdcee2b0a2b74 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 13 Jul 2016 08:19:48 +0000 Subject: [PATCH 0061/1936] Updated from generate-devstack-plugins-list Change-Id: I4535ce00a7d15d14fdb9c3180b6b821352fe704f --- doc/source/plugin-registry.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 5b6622e75c..104fca679c 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -62,12 +62,14 @@ freezer-web-ui `git://git.openstack.org/openstack/freeze gce-api `git://git.openstack.org/openstack/gce-api `__ gnocchi `git://git.openstack.org/openstack/gnocchi `__ group-based-policy `git://git.openstack.org/openstack/group-based-policy `__ +heat `git://git.openstack.org/openstack/heat `__ higgins `git://git.openstack.org/openstack/higgins `__ ironic `git://git.openstack.org/openstack/ironic `__ ironic-inspector `git://git.openstack.org/openstack/ironic-inspector `__ ironic-staging-drivers `git://git.openstack.org/openstack/ironic-staging-drivers `__ kingbird `git://git.openstack.org/openstack/kingbird `__ kuryr `git://git.openstack.org/openstack/kuryr `__ +kuryr-libnetwork `git://git.openstack.org/openstack/kuryr-libnetwork `__ magnum `git://git.openstack.org/openstack/magnum `__ magnum-ui `git://git.openstack.org/openstack/magnum-ui `__ manila `git://git.openstack.org/openstack/manila `__ From eecb983529a9164a0dabf19ea4c3186a708fa0c3 Mon Sep 17 00:00:00 2001 From: Kashyap Chamarthy Date: Wed, 13 Jul 2016 12:34:03 +0200 Subject: [PATCH 0062/1936] Remove support for End Of Life (EOL) Fedora 22 Fedora 22 reaches its EOL on 19-JUL-2016[1]. Remove it as officially supported distribution. The current two supported Fedora distributions are Fedora 23 and Fedora 24. (Change Ia4a58de4973ef228735c48b33453a0562dc65258 already added support for Fedora 24.) [1] https://fedoramagazine.org/fedora-22-end-of-life-2016-july/ Change-Id: I5b4e1ddb6165a9065e80e84175246678a7356f18 --- files/rpms/general | 4 ++-- files/rpms/nova | 2 +- files/rpms/swift | 2 +- lib/ceph | 2 +- stack.sh | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/files/rpms/general b/files/rpms/general index ee2e8a058b..d0ceb56621 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -7,9 +7,9 @@ gcc-c++ gettext # used for compiling message catalogs git-core graphviz # needed only for docs -iptables-services # NOPRIME f22,f23,f24 +iptables-services # NOPRIME f23,f24 java-1.7.0-openjdk-headless # NOPRIME rhel7 -java-1.8.0-openjdk-headless # NOPRIME f22,f23,f24 +java-1.8.0-openjdk-headless # NOPRIME f23,f24 libffi-devel libjpeg-turbo-devel # Pillow 3.0.0 libxml2-devel # lxml diff --git a/files/rpms/nova b/files/rpms/nova index 594393e733..a883ec4399 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -7,7 +7,7 @@ gawk genisoimage # required for config_drive iptables iputils -kernel-modules # dist:f22,f23,f24 +kernel-modules # dist:f23,f24 kpartx kvm # NOPRIME libvirt-bin # NOPRIME diff --git a/files/rpms/swift b/files/rpms/swift index 1e05167bcf..bd249ee71b 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -2,7 +2,7 @@ curl liberasurecode-devel memcached pyxattr -rsync-daemon # dist:f22,f23,f24 +rsync-daemon # dist:f23,f24 sqlite xfsprogs xinetd diff --git a/lib/ceph b/lib/ceph index e999647ed8..0c8d160766 100644 --- a/lib/ceph +++ b/lib/ceph @@ -116,7 +116,7 @@ function undefine_virsh_secret { # check_os_support_ceph() - Check if the operating system provides a decent version of Ceph function check_os_support_ceph { - if [[ ! ${DISTRO} =~ (trusty|f22|f23|f24) ]]; then + if [[ ! ${DISTRO} =~ (trusty|f23|f24) ]]; then echo "WARNING: your distro $DISTRO does not provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 20 (and higher)" if [[ "$FORCE_CEPH_INSTALL" != "yes" ]]; then die $LINENO "If you wish to install Ceph on this distribution anyway run with FORCE_CEPH_INSTALL=yes" diff --git a/stack.sh b/stack.sh index 5a5a040b02..4cace9d0c2 100755 --- a/stack.sh +++ b/stack.sh @@ -185,7 +185,7 @@ source $TOP_DIR/stackrc # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (trusty|wily|xenial|7.0|wheezy|sid|testing|jessie|f22|f23|f24|rhel7|kvmibm1) ]]; then +if [[ ! ${DISTRO} =~ (trusty|wily|xenial|7.0|wheezy|sid|testing|jessie|f23|f24|rhel7|kvmibm1) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From e24707bb594eece8a3172c6763ac8fc3a1a8681f Mon Sep 17 00:00:00 2001 From: Matt McEuen Date: Mon, 11 Jul 2016 08:37:59 -0500 Subject: [PATCH 0063/1936] lib/neutron-legacy: replace ip when re-stacking Replicated Yi Zhao's fix for re-adding ipv6 addresses to neutron-legacy (review I9ff62023dbc29a88aec3c48af331c0a49a1270bb). Previously, re-stacking failed with "File exists" for ipv6 addresses on br-ex. With this change, the existing address is replaced on br-ex with the appropriate address. Change-Id: I6e6235132a34469f4e68b5bb3cf51ebdf01c83a2 --- lib/neutron-legacy | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 3a1bc64ec5..def1674e7c 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -610,7 +610,7 @@ function _move_neutron_addresses_route { # on configure we will also add $from_intf as a port on $to_intf, # assuming it is an OVS bridge. - local IP_ADD="" + local IP_REPLACE="" local IP_DEL="" local IP_UP="" local DEFAULT_ROUTE_GW @@ -635,7 +635,7 @@ function _move_neutron_addresses_route { if [[ "$IP_BRD" != "" ]]; then IP_DEL="sudo ip addr del $IP_BRD dev $from_intf" - IP_ADD="sudo ip addr add $IP_BRD dev $to_intf" + IP_REPLACE="sudo ip addr replace $IP_BRD dev $to_intf" IP_UP="sudo ip link set $to_intf up" if [[ "$af" == "inet" ]]; then IP=$(echo $IP_BRD | awk '{ print $1; exit }' | grep -o -E '(.*)/' | cut -d "/" -f1) @@ -645,7 +645,7 @@ function _move_neutron_addresses_route { # The add/del OVS port calls have to happen either before or # after the address is moved in order to not leave it orphaned. - $DEL_OVS_PORT; $IP_DEL; $IP_ADD; $IP_UP; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE; $ARP_CMD + $DEL_OVS_PORT; $IP_DEL; $IP_REPLACE; $IP_UP; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE; $ARP_CMD fi } From 5813265ce597dd5eb1105fad5922339a38ea9092 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Thu, 14 Jul 2016 12:55:51 +0200 Subject: [PATCH 0064/1936] remove OS_AUTH_TYPE from the userrc files The openstack client just gets more confused when it is specified. Change-Id: I8b498be835b63733cb38d33b02c3a8531a2da45b --- tools/create_userrc.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index b6db5d11aa..30d1a01577 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -193,7 +193,6 @@ export OS_PROJECT_NAME="$project_name" export OS_AUTH_URL="$OS_AUTH_URL" export OS_CACERT="$OS_CACERT" export NOVA_CERT="$ACCOUNT_DIR/cacert.pem" -export OS_AUTH_TYPE=v2password EOF if [ -n "$ADDPASS" ]; then echo "export OS_PASSWORD=\"$user_passwd\"" >>"$rcfile" From 79fc935640aa4695c26b4bcc8b06c605e2256311 Mon Sep 17 00:00:00 2001 From: Thiago Paiva Date: Thu, 14 Jul 2016 14:16:20 -0300 Subject: [PATCH 0065/1936] Restoring xtrace state for os-brick plugin The change Ibb7423d243d57852dada0b6298463bbdfc6dc63c that introduced the os-brick plugin introduced a flaw where the xtrace state wasn't restored after the end of the plugin's execution. The end behavior is that devstack's logs were with way less information, difficulting the debugging of the build. This patch fixes the variable that was intended to hold the xtrace state (it was using cinder's) and restoring the state at the end of the script. Change-Id: I47c6c794a9704049b089142eca5603d1183f8a10 --- lib/os_brick | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/os_brick b/lib/os_brick index 690e321809..d1cca4af44 100644 --- a/lib/os_brick +++ b/lib/os_brick @@ -13,7 +13,7 @@ # - install_os_brick # Save trace setting -_XTRACE_CINDER=$(set +o | grep xtrace) +_XTRACE_OS_BRICK=$(set +o | grep xtrace) set +o xtrace @@ -27,3 +27,6 @@ function install_os_brick { setup_dev_lib "os-brick" fi } + +# Restore xtrace +$_XTRACE_OS_BRICK \ No newline at end of file From 7b5c7dce53ab9e9778d1d72fcbbed565bfce65bd Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Fri, 15 Jul 2016 20:17:13 +0200 Subject: [PATCH 0066/1936] Introduce PUBLIC_BRIDGE_MTU variable to set br-ex MTU This variable can be used to accommodate for underlying infrastructure that does not provide full 1500-sized traffic, or maybe instead gives access to Jumbo frames. Change-Id: I38a80bac18673a30842a7b997d0669fed5aff976 Related-Bug: #1603268 --- functions | 9 +++++++++ lib/neutron_plugins/linuxbridge_agent | 1 + lib/neutron_plugins/openvswitch_agent | 2 +- lib/neutron_plugins/ovs_base | 7 ++++++- lib/neutron_plugins/services/l3 | 1 + 5 files changed, 18 insertions(+), 2 deletions(-) diff --git a/functions b/functions index 4f5e10aaa6..58565788ff 100644 --- a/functions +++ b/functions @@ -637,6 +637,15 @@ function create_disk { fi } + +# set_mtu - Set MTU on a device +function set_mtu { + local dev=$1 + local mtu=$2 + sudo ip link set mtu $mtu dev $dev +} + + # Restore xtrace $_XTRACE_FUNCTIONS diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent index 0a066354ca..e5a7b76187 100644 --- a/lib/neutron_plugins/linuxbridge_agent +++ b/lib/neutron_plugins/linuxbridge_agent @@ -50,6 +50,7 @@ function neutron_plugin_configure_dhcp_agent { function neutron_plugin_configure_l3_agent { local conf_file=$1 sudo brctl addbr $PUBLIC_BRIDGE + set_mtu $PUBLIC_BRIDGE $PUBLIC_BRIDGE_MTU iniset $conf_file DEFAULT external_network_bridge iniset $conf_file DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport } diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent index 69e38f4df1..b4a53e4c38 100644 --- a/lib/neutron_plugins/openvswitch_agent +++ b/lib/neutron_plugins/openvswitch_agent @@ -104,7 +104,7 @@ function neutron_plugin_configure_plugin_agent { sudo ovs-vsctl -- --may-exist add-port "br-$VLAN_INTERFACE" $VLAN_INTERFACE # Create external bridge and add port - _neutron_ovs_base_add_bridge $PUBLIC_BRIDGE + _neutron_ovs_base_add_public_bridge sudo ovs-vsctl -- --may-exist add-port $PUBLIC_BRIDGE $PUBLIC_INTERFACE # Set bridge mappings to "physnet1:br-$GUEST_INTERFACE_DEFAULT" diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index ecf252f88b..9e1421f5e6 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -105,11 +105,16 @@ function _neutron_ovs_base_configure_l3_agent { sudo ip link set $Q_PUBLIC_VETH_EX up sudo ip addr flush dev $Q_PUBLIC_VETH_EX else - _neutron_ovs_base_add_bridge $PUBLIC_BRIDGE + _neutron_ovs_base_add_public_bridge sudo ovs-vsctl br-set-external-id $PUBLIC_BRIDGE bridge-id $PUBLIC_BRIDGE fi } +function _neutron_ovs_base_add_public_bridge { + _neutron_ovs_base_add_bridge $PUBLIC_BRIDGE + set_mtu $PUBLIC_BRIDGE $PUBLIC_BRIDGE_MTU +} + function _neutron_ovs_base_configure_nova_vif_driver { : } diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 4ce87bdf6f..2180099ee4 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -15,6 +15,7 @@ IPV6_PROVIDER_FIXED_RANGE=${IPV6_PROVIDER_FIXED_RANGE:-} IPV6_PROVIDER_NETWORK_GATEWAY=${IPV6_PROVIDER_NETWORK_GATEWAY:-} PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} +PUBLIC_BRIDGE_MTU=${PUBLIC_BRIDGE_MTU:-1500} # If Q_USE_PUBLIC_VETH=True, create and use a veth pair instead of # PUBLIC_BRIDGE. This is intended to be used with From d55513a07c610da479a6e59ff50605f48caeabef Mon Sep 17 00:00:00 2001 From: venkatamahesh Date: Tue, 19 Jul 2016 17:34:20 +0530 Subject: [PATCH 0067/1936] Corrected the local.conf configuration file link Change-Id: Id28a9f85ae5ba789f09269163c6e5b2c8c36a7c8 Closes-Bug: #1603848 --- samples/local.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/local.conf b/samples/local.conf index 06ac18572d..6d5351f904 100644 --- a/samples/local.conf +++ b/samples/local.conf @@ -10,7 +10,7 @@ # This is a collection of some of the settings we have found to be useful # in our DevStack development environments. Additional settings are described -# in http://devstack.org/local.conf.html +# in http://docs.openstack.org/developer/devstack/configuration.html#local-conf # These should be considered as samples and are unsupported DevStack code. # The ``localrc`` section replaces the old ``localrc`` configuration file. From e2b75363699124cfd2c19de69286cfafa98aa9a0 Mon Sep 17 00:00:00 2001 From: Hironori Shiina Date: Sat, 16 Jul 2016 01:02:49 +0900 Subject: [PATCH 0068/1936] Remove remaining fwaas code Change-Id: I24fe7a559760b985bd53373523276a8f075e5974 --- lib/neutron-legacy | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index f4e577d43b..7dce5f7f3e 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -292,9 +292,6 @@ function _determine_config_server { function _determine_config_l3 { local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE" - if is_service_enabled q-fwaas; then - opts+=" --config-file $Q_FWAAS_CONF_FILE" - fi echo "$opts" } From 5d7992a0189d940c0ffbb01e93bf3a0e4eba4ec7 Mon Sep 17 00:00:00 2001 From: Kevin Zhao Date: Tue, 19 Jul 2016 15:52:12 +0000 Subject: [PATCH 0069/1936] Modify the default cpu-mode for aarh64 in Libvirt For AArch64, KVM don't recognize the cpu-mode "none", so change the default cpu-mode as host-passthrough for generating nova.conf Change-Id: I94a22e5a15a974b9c11e9f9fd996857453b6e2ca Signed-off-by: Kevin Zhao --- lib/nova_plugins/hypervisor-libvirt | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index d0e364efc3..51d807a160 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -55,6 +55,7 @@ function configure_nova_hypervisor { if is_arch "aarch64"; then # arm64 architecture currently does not support graphical consoles. iniset $NOVA_CONF vnc enabled "false" + iniset $NOVA_CONF libvirt cpu_mode "host-passthrough" fi # File injection is being disabled by default in the near future - From 881373c049eceefc4fd7f6c69543d42528830a0e Mon Sep 17 00:00:00 2001 From: Oleksii Butenko Date: Tue, 21 Jun 2016 17:29:54 +0300 Subject: [PATCH 0070/1936] Add flag for snapshot_backup We have new feature in cinder and new test for it. The test is skipped by default. Need to add flag to unskip this test on master and Mitaka. new test: I1964ce6e1298041f8238d76fa4b7029d2d23bbfb Change-Id: Ib695e60c2ed7edf30c8baef9e00f0307b1156551 --- lib/tempest | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/tempest b/lib/tempest index 01ad4f4381..e4f80b835d 100644 --- a/lib/tempest +++ b/lib/tempest @@ -426,6 +426,8 @@ function configure_tempest { iniset $TEMPEST_CONFIG validation network_for_ssh $PRIVATE_NETWORK_NAME # Volume + # TODO(obutenko): Remove snapshot_backup when liberty-eol happens. + iniset $TEMPEST_CONFIG volume-feature-enabled snapshot_backup True # TODO(ynesenenko): Remove the volume_services flag when Liberty and Kilo will correct work with host info. iniset $TEMPEST_CONFIG volume-feature-enabled volume_services True # TODO(ameade): Remove the api_v3 flag when Mitaka and Liberty are end of life. From 130a11f8aaf08ea529b6ce60dd9052451cb7bb5c Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Sun, 17 Jul 2016 22:19:30 -0600 Subject: [PATCH 0071/1936] Use real Neutron network for L3 GW by default Relying on 'external_network_bridge=br-ex' for the L3 agent has been deprecated in Neutron. This patch adjusts the devstack defaults to setup Neutron in the preferred manner (empty external_network_bridge value and correct bridge_mappings for the L2 agent). This will also help with correct MTU calculations now that the external network will have the correct segmentation type on it ('flat' now instead of 'vxlan' by default). Related-Bug: #1511578 Related-Bug: #1603493 Change-Id: Id20e67aba5dfd2044b82c700f41c6e648b529430 --- lib/neutron-legacy | 4 ++-- lib/neutron_plugins/services/l3 | 9 +++++++-- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index f4e577d43b..f83492b931 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -203,7 +203,7 @@ TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} # agent, as described below. # # Example: ``PHYSICAL_NETWORK=default`` -PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-} +PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-public} # With the openvswitch agent, if using VLANs for tenant networks, # or if using flat or VLAN provider networks, set in ``localrc`` to @@ -213,7 +213,7 @@ PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-} # port for external connectivity. # # Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` -OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-} +OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-br-ex} # With the linuxbridge agent, if using VLANs for tenant networks, # or if using flat or VLAN provider networks, set in ``localrc`` to diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 2180099ee4..816679ec79 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -17,6 +17,11 @@ IPV6_PROVIDER_NETWORK_GATEWAY=${IPV6_PROVIDER_NETWORK_GATEWAY:-} PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} PUBLIC_BRIDGE_MTU=${PUBLIC_BRIDGE_MTU:-1500} +# If Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE=True, assign the gateway IP of the public +# subnet to the public bridge interface even if Q_USE_PROVIDERNET_FOR_PUBLIC is +# used. +Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE=${Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE:-True} + # If Q_USE_PUBLIC_VETH=True, create and use a veth pair instead of # PUBLIC_BRIDGE. This is intended to be used with # Q_USE_PROVIDERNET_FOR_PUBLIC=True. @@ -51,7 +56,7 @@ Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-True} # Q_USE_PROVIDERNET_FOR_PUBLIC=True # PUBLIC_PHYSICAL_NETWORK=public # OVS_BRIDGE_MAPPINGS=public:br-ex -Q_USE_PROVIDERNET_FOR_PUBLIC=${Q_USE_PROVIDERNET_FOR_PUBLIC:-False} +Q_USE_PROVIDERNET_FOR_PUBLIC=${Q_USE_PROVIDERNET_FOR_PUBLIC:-True} PUBLIC_PHYSICAL_NETWORK=${PUBLIC_PHYSICAL_NETWORK:-public} # Generate 40-bit IPv6 Global ID to comply with RFC 4193 @@ -305,7 +310,7 @@ function _neutron_configure_router_v4 { local cidr_len=${FLOATING_RANGE#*/} local testcmd="ip -o link | grep -q $ext_gw_interface" test_with_retry "$testcmd" "$ext_gw_interface creation failed" - if [[ $(ip addr show dev $ext_gw_interface | grep -c $ext_gw_ip) == 0 && ( $Q_USE_PROVIDERNET_FOR_PUBLIC == "False" || $Q_USE_PUBLIC_VETH == "True" ) ]]; then + if [[ $(ip addr show dev $ext_gw_interface | grep -c $ext_gw_ip) == 0 && ( $Q_USE_PROVIDERNET_FOR_PUBLIC == "False" || $Q_USE_PUBLIC_VETH == "True" || $Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE == "True" ) ]]; then sudo ip addr add $ext_gw_ip/$cidr_len dev $ext_gw_interface sudo ip link set $ext_gw_interface up fi From a1e1f5128a22a7d0cdc8136063d27d64c270021d Mon Sep 17 00:00:00 2001 From: Rob Crittenden Date: Wed, 20 Jul 2016 18:12:09 -0400 Subject: [PATCH 0072/1936] Add keystone VirtualHost for port 443 when USE_SSL is True Add a VirtualHost that defines the necessary options for enabling SSL. The existing keystone Apache configuration already does all the location handling. Change-Id: I836a471a7258f14f051d3dd8bdb428286b5a11aa --- files/apache-keystone.template | 6 ++++++ lib/keystone | 3 +++ 2 files changed, 9 insertions(+) diff --git a/files/apache-keystone.template b/files/apache-keystone.template index 8a4b0f0c43..249eaa5d0e 100644 --- a/files/apache-keystone.template +++ b/files/apache-keystone.template @@ -34,6 +34,12 @@ LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" %SSLKEYFILE% +%SSLLISTEN% +%SSLLISTEN% %SSLENGINE% +%SSLLISTEN% %SSLCERTFILE% +%SSLLISTEN% %SSLKEYFILE% +%SSLLISTEN% + Alias /identity %KEYSTONE_BIN%/keystone-wsgi-public SetHandler wsgi-script diff --git a/lib/keystone b/lib/keystone index 6198e43b58..810acac63f 100644 --- a/lib/keystone +++ b/lib/keystone @@ -161,6 +161,7 @@ function _cleanup_keystone_apache_wsgi { function _config_keystone_apache_wsgi { local keystone_apache_conf keystone_apache_conf=$(apache_site_config_for keystone) + keystone_ssl_listen="#" local keystone_ssl="" local keystone_certfile="" local keystone_keyfile="" @@ -169,6 +170,7 @@ function _config_keystone_apache_wsgi { local venv_path="" if is_ssl_enabled_service key; then + keystone_ssl_listen="" keystone_ssl="SSLEngine On" keystone_certfile="SSLCertificateFile $KEYSTONE_SSL_CERT" keystone_keyfile="SSLCertificateKeyFile $KEYSTONE_SSL_KEY" @@ -186,6 +188,7 @@ function _config_keystone_apache_wsgi { s|%PUBLICPORT%|$keystone_service_port|g; s|%ADMINPORT%|$keystone_auth_port|g; s|%APACHE_NAME%|$APACHE_NAME|g; + s|%SSLLISTEN%|$keystone_ssl_listen|g; s|%SSLENGINE%|$keystone_ssl|g; s|%SSLCERTFILE%|$keystone_certfile|g; s|%SSLKEYFILE%|$keystone_keyfile|g; From 2381f336296c80834ca9ce5ed3f1c784acc4d157 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 21 Jul 2016 08:07:43 +0000 Subject: [PATCH 0073/1936] Updated from generate-devstack-plugins-list Change-Id: Ieffddf42e1b5d77dea651208a18b3de320489745 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 104fca679c..38910b0785 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -64,6 +64,7 @@ gnocchi `git://git.openstack.org/openstack/gnocch group-based-policy `git://git.openstack.org/openstack/group-based-policy `__ heat `git://git.openstack.org/openstack/heat `__ higgins `git://git.openstack.org/openstack/higgins `__ +horizon-mellanox `git://git.openstack.org/openstack/horizon-mellanox `__ ironic `git://git.openstack.org/openstack/ironic `__ ironic-inspector `git://git.openstack.org/openstack/ironic-inspector `__ ironic-staging-drivers `git://git.openstack.org/openstack/ironic-staging-drivers `__ From df6c1ffbe110495d94f607a4344e0703be54c6ce Mon Sep 17 00:00:00 2001 From: Kenneth Giusti Date: Thu, 7 Jul 2016 09:28:58 -0400 Subject: [PATCH 0074/1936] Force reinstall of virtualenv to ensure installation is valid On RHEL-based systems pip and yum share the same installation directory for virtualenv. If yum pulls in the python-virtualenv package (e.g. due to a dependency) it will clobber what pip has already installed. The file tools/fixup_stuff.sh tries to ensure that the proper virtualenv package is installed via pip. If virtualenv has already been installed via pip, then clobbered by yum, pip skips the install since it appears as if virtualenv is already installed and at the correct version. The reinstall of virtualenv must use the --force-reinstall argument to pip to fix up the damage done by yum. Change-Id: Ib0edf6c4ee8a510e9d671213de35d787f56acfed Closes-Bug: #1599863 --- tools/fixup_stuff.sh | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 193a1f7aba..4dec95eb4d 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -162,7 +162,11 @@ if is_fedora; then fi # The version of pip(1.5.4) supported by python-virtualenv(1.11.4) has -# connection issues under proxy, hence uninstalling python-virtualenv package -# and installing the latest version using pip. -uninstall_package python-virtualenv -pip_install -U virtualenv +# connection issues under proxy so re-install the latest version using +# pip. To avoid having pip's virtualenv overwritten by the distro's +# package (e.g. due to installing a distro package with a dependency +# on python-virtualenv), first install the distro python-virtualenv +# to satisfy any dependencies then use pip to overwrite it. + +install_package python-virtualenv +pip_install -U --force-reinstall virtualenv From 7da968a8be03229cfa72b215b87f17e28e23a988 Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Fri, 22 Jul 2016 06:02:22 +0000 Subject: [PATCH 0075/1936] Revert "Use real Neutron network for L3 GW by default" This reverts commit 130a11f8aaf08ea529b6ce60dd9052451cb7bb5c. Linux bridge devstack logic needs some changes first. Change-Id: I5885062ad128518c22f743db016e1a6db64f3313 Closes-Bug: #1605423 --- lib/neutron-legacy | 4 ++-- lib/neutron_plugins/services/l3 | 9 ++------- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index f83492b931..f4e577d43b 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -203,7 +203,7 @@ TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} # agent, as described below. # # Example: ``PHYSICAL_NETWORK=default`` -PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-public} +PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-} # With the openvswitch agent, if using VLANs for tenant networks, # or if using flat or VLAN provider networks, set in ``localrc`` to @@ -213,7 +213,7 @@ PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-public} # port for external connectivity. # # Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` -OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-br-ex} +OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-} # With the linuxbridge agent, if using VLANs for tenant networks, # or if using flat or VLAN provider networks, set in ``localrc`` to diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 816679ec79..2180099ee4 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -17,11 +17,6 @@ IPV6_PROVIDER_NETWORK_GATEWAY=${IPV6_PROVIDER_NETWORK_GATEWAY:-} PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} PUBLIC_BRIDGE_MTU=${PUBLIC_BRIDGE_MTU:-1500} -# If Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE=True, assign the gateway IP of the public -# subnet to the public bridge interface even if Q_USE_PROVIDERNET_FOR_PUBLIC is -# used. -Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE=${Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE:-True} - # If Q_USE_PUBLIC_VETH=True, create and use a veth pair instead of # PUBLIC_BRIDGE. This is intended to be used with # Q_USE_PROVIDERNET_FOR_PUBLIC=True. @@ -56,7 +51,7 @@ Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-True} # Q_USE_PROVIDERNET_FOR_PUBLIC=True # PUBLIC_PHYSICAL_NETWORK=public # OVS_BRIDGE_MAPPINGS=public:br-ex -Q_USE_PROVIDERNET_FOR_PUBLIC=${Q_USE_PROVIDERNET_FOR_PUBLIC:-True} +Q_USE_PROVIDERNET_FOR_PUBLIC=${Q_USE_PROVIDERNET_FOR_PUBLIC:-False} PUBLIC_PHYSICAL_NETWORK=${PUBLIC_PHYSICAL_NETWORK:-public} # Generate 40-bit IPv6 Global ID to comply with RFC 4193 @@ -310,7 +305,7 @@ function _neutron_configure_router_v4 { local cidr_len=${FLOATING_RANGE#*/} local testcmd="ip -o link | grep -q $ext_gw_interface" test_with_retry "$testcmd" "$ext_gw_interface creation failed" - if [[ $(ip addr show dev $ext_gw_interface | grep -c $ext_gw_ip) == 0 && ( $Q_USE_PROVIDERNET_FOR_PUBLIC == "False" || $Q_USE_PUBLIC_VETH == "True" || $Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE == "True" ) ]]; then + if [[ $(ip addr show dev $ext_gw_interface | grep -c $ext_gw_ip) == 0 && ( $Q_USE_PROVIDERNET_FOR_PUBLIC == "False" || $Q_USE_PUBLIC_VETH == "True" ) ]]; then sudo ip addr add $ext_gw_ip/$cidr_len dev $ext_gw_interface sudo ip link set $ext_gw_interface up fi From 7d89a798744c24ba82199fd42db995368a2e9322 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Sat, 23 Jul 2016 08:03:03 +0000 Subject: [PATCH 0076/1936] Updated from generate-devstack-plugins-list Change-Id: I222ecc69427f1ddf4498f5af29664544efe43b46 --- doc/source/plugin-registry.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 38910b0785..bdb8d8bc9c 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -69,7 +69,6 @@ ironic `git://git.openstack.org/openstack/ironic ironic-inspector `git://git.openstack.org/openstack/ironic-inspector `__ ironic-staging-drivers `git://git.openstack.org/openstack/ironic-staging-drivers `__ kingbird `git://git.openstack.org/openstack/kingbird `__ -kuryr `git://git.openstack.org/openstack/kuryr `__ kuryr-libnetwork `git://git.openstack.org/openstack/kuryr-libnetwork `__ magnum `git://git.openstack.org/openstack/magnum `__ magnum-ui `git://git.openstack.org/openstack/magnum-ui `__ From c07170abd7a382bda028c6ae14bfde5d912ab78f Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Wed, 20 Jul 2016 19:44:05 +0900 Subject: [PATCH 0077/1936] lib/neutron: Create initial networks regardless of neutron-l3 Closes-Bug: #1604768 Change-Id: I699977930675512e9767a90f317fc0faa1ea9901 --- lib/neutron | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/lib/neutron b/lib/neutron index 5cab8e1716..7bf31d0a85 100644 --- a/lib/neutron +++ b/lib/neutron @@ -425,16 +425,16 @@ function start_neutron_new { fi if is_service_enabled neutron-l3; then run_process neutron-l3 "$NEUTRON_BIN_DIR/$NEUTRON_L3_BINARY $NEUTRON_CONFIG_ARG" - # XXX(sc68cal) - Here's where plugins can wire up their own networks instead - # of the code in lib/neutron_plugins/services/l3 - if type -p neutron_plugin_create_initial_networks > /dev/null; then - neutron_plugin_create_initial_networks - else - # XXX(sc68cal) Load up the built in Neutron networking code and build a topology - source $TOP_DIR/lib/neutron_plugins/services/l3 - # Create the networks using servic - create_neutron_initial_network - fi + fi + # XXX(sc68cal) - Here's where plugins can wire up their own networks instead + # of the code in lib/neutron_plugins/services/l3 + if type -p neutron_plugin_create_initial_networks > /dev/null; then + neutron_plugin_create_initial_networks + else + # XXX(sc68cal) Load up the built in Neutron networking code and build a topology + source $TOP_DIR/lib/neutron_plugins/services/l3 + # Create the networks using servic + create_neutron_initial_network fi if is_service_enabled neutron-metadata-agent; then run_process neutron-metadata-agent "$NEUTRON_BIN_DIR/$NEUTRON_META_BINARY $NEUTRON_CONFIG_ARG" From c6f857f49ca1459f0874e8dcc83be539829e6ee3 Mon Sep 17 00:00:00 2001 From: zhangyanxian Date: Mon, 25 Jul 2016 08:44:28 +0000 Subject: [PATCH 0078/1936] Fix the typo in the file Change-Id: I3a3976bdab6743f6d741d39708b01eacf6a01074 --- tools/xen/install_os_domU.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 3a61215b5b..66b9eda474 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -247,7 +247,7 @@ else fi if [ -n "${EXIT_AFTER_JEOS_INSTALLATION:-}" ]; then - echo "User requested to quit after JEOS instalation" + echo "User requested to quit after JEOS installation" exit 0 fi From 88ccd47c88dc39b8d746afae89dea31d46558a68 Mon Sep 17 00:00:00 2001 From: Spyros Trigazis Date: Sun, 24 Jul 2016 22:13:57 +0200 Subject: [PATCH 0079/1936] Keep old behavior of setuptools for editable installs In the 25.0.0 release [1] of setuptools during any install operation the package in not overwritten. If a package is installed from another requirement via pip and then it is installed again from git, it is not updated causing check_libs_from_git to fail. [1] https://setuptools.readthedocs.io/en/latest/history.html#v25-0-0 Change-Id: Ibaa1d4157816ea649f4452756fbde25951347001 Closes-Bug: #1605998 --- inc/python | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/inc/python b/inc/python index e013dfab36..9de2831537 100644 --- a/inc/python +++ b/inc/python @@ -148,11 +148,15 @@ function pip_install { fi $xtrace + # adding SETUPTOOLS_SYS_PATH_TECHNIQUE is a workaround to keep + # the same behaviour of setuptools before version 25.0.0. + # related issue: https://github.com/pypa/pip/issues/3874 $sudo_pip \ http_proxy="${http_proxy:-}" \ https_proxy="${https_proxy:-}" \ no_proxy="${no_proxy:-}" \ PIP_FIND_LINKS=$PIP_FIND_LINKS \ + SETUPTOOLS_SYS_PATH_TECHNIQUE=rewrite \ $cmd_pip $upgrade \ $@ result=$? From ba1a64d8eb7ce8611cf518df882845908c72bb0b Mon Sep 17 00:00:00 2001 From: "Sean M. Collins" Date: Mon, 25 Jul 2016 11:32:42 -0400 Subject: [PATCH 0080/1936] lib/neutron: Add port_security ml2 extension driver Tempest currently conducts tests for this extension driver by default. Change-Id: I5f9881d0713965b66358dc9cade8d623da98d75d --- lib/neutron | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/neutron b/lib/neutron index ad68d8e62f..22584c6eb8 100644 --- a/lib/neutron +++ b/lib/neutron @@ -166,6 +166,7 @@ function configure_neutron_new { iniset $NEUTRON_PLUGIN_CONF ml2 type_drivers vxlan iniset $NEUTRON_PLUGIN_CONF ml2 mechanism_drivers openvswitch,linuxbridge iniset $NEUTRON_PLUGIN_CONF ml2_type_vxlan vni_ranges 1001:2000 + iniset $NEUTRON_PLUGIN_CONF ml2 extension_drivers port_security fi # Neutron OVS or LB agent From 8f0e97c1460f015f4eb03a8221b4b02b61559d9d Mon Sep 17 00:00:00 2001 From: WenyanZhang Date: Thu, 7 Jul 2016 18:57:32 +0800 Subject: [PATCH 0081/1936] Remove all *.pyc files in $DEST when executing clean.sh Currentlly, the *.pyc files could not be removed in any scripts or functions. But the redundant files would lead stack.sh not to find the correct script for some versions after branch switched from master to stable/mitaka in migration_helpers.sync_database_to_version. So this commit adds the process of cleaning all the *.pyc files in clean.sh. It is needed to execute clean.sh before re-stack.sh to prevent the exception. Change-Id: I9ba0674d6b20b13c0a26b22cd5d1939daa121a94 Closes-Bug: #1599124 --- clean.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/clean.sh b/clean.sh index 0641bffcf8..e0ec9f5315 100755 --- a/clean.sh +++ b/clean.sh @@ -145,3 +145,8 @@ for file in $FILES_TO_CLEAN; do done rm -rf ~/.config/openstack + +# Clean up all *.pyc files +if [[ -n "$DEST" ]] && [[ -d "$DEST" ]]; then + sudo find $DEST -name "*.pyc" -print0 | xargs -0 rm +fi From c694e55ad9707426c05a2139edee037104b009f4 Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Mon, 1 Aug 2016 12:52:44 +0100 Subject: [PATCH 0082/1936] XenAPI: Don't assume specific network interfaces The default for GUEST_INTERFACE_DEFAULT now uses the ip command to find an interface; so it will work on multiple distributions. XenAPI should not be setting a specific interface here, as it will almost always be wrong. In most cases, the calculated value for GUEST_INTERFACE_DEFAULT will be a better default. PUBLIC_INTERFACE_DEFAULT makes even less sense as it's often an internal bridge for devstack scenarios. In both cases, the right way to override these is to set GUEST_INTERFACE / PUBLIC_INTERFACE in the localrc rather than changing the _DEFAULT values. Change-Id: I0cf84438d778bf1a2481328165513c59167490e2 --- lib/nova_plugins/hypervisor-xenserver | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver index e7f1e87b61..e75226ae64 100644 --- a/lib/nova_plugins/hypervisor-xenserver +++ b/lib/nova_plugins/hypervisor-xenserver @@ -24,8 +24,6 @@ set +o xtrace # Defaults # -------- -PUBLIC_INTERFACE_DEFAULT=eth2 -GUEST_INTERFACE_DEFAULT=eth1 # Allow ``build_domU.sh`` to specify the flat network bridge via kernel args FLAT_NETWORK_BRIDGE_DEFAULT=$(sed -e 's/.* flat_network_bridge=\([[:alnum:]]*\).*$/\1/g' /proc/cmdline) if is_service_enabled neutron; then From e56318f9bc87b0ed0e1ce6fcd1216aebe3689fe8 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 1 Aug 2016 10:29:03 -0400 Subject: [PATCH 0083/1936] Remove manual setting of privsep helper The privsep helper should have a sane default for all libraries, pushing this into devstack means we cheat past a part of the upgrade that we really shouldn't be. Change-Id: I52259e2023e277e8fd62be5df4fd7f799e9b36d7 --- lib/cinder | 2 -- lib/nova | 5 ----- 2 files changed, 7 deletions(-) diff --git a/lib/cinder b/lib/cinder index 69ff4c4e36..a87f395c8e 100644 --- a/lib/cinder +++ b/lib/cinder @@ -273,8 +273,6 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT os_region_name "$REGION_NAME" - iniset $CINDER_CONF privsep_osbrick helper_command "sudo cinder-rootwrap \$rootwrap_config privsep-helper --config-file $CINDER_CONF" - if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then local enabled_backends="" local default_name="" diff --git a/lib/nova b/lib/nova index 67a80b9b16..16f6e9b39f 100644 --- a/lib/nova +++ b/lib/nova @@ -481,11 +481,6 @@ function create_nova_conf { iniset $NOVA_CONF DEFAULT bindir "/usr/bin" fi - iniset $NOVA_CONF privsep_osbrick helper_command "sudo nova-rootwrap \$rootwrap_config privsep-helper --config-file $NOVA_CONF" - - iniset $NOVA_CONF vif_plug_ovs_privileged helper_command "sudo nova-rootwrap \$rootwrap_config privsep-helper --config-file $NOVA_CONF" - iniset $NOVA_CONF vif_plug_linux_bridge_privileged helper_command "sudo nova-rootwrap \$rootwrap_config privsep-helper --config-file $NOVA_CONF" - if is_service_enabled n-api; then if is_service_enabled n-api-meta; then # If running n-api-meta as a separate service From 9124a84a968d00582bd8bada154db22b9544bfd0 Mon Sep 17 00:00:00 2001 From: Cao Xuan Hoang Date: Mon, 1 Aug 2016 14:56:02 +0700 Subject: [PATCH 0084/1936] q-l3 failed to start in case q-fwaas enabled In case q-fwaas is enabled. It will causes the q-l3 failed to start because the DevStack gave a redundant --config-file option to start q-l3 This is a follow-up patch of 84409516d56417464dfe0c4e6904a1a76f9fa254 to remove fwaas from DevStack completely. Change-Id: I630969b3556bcffba506cab02a09cc83f4430c88 Closes-Bug: #1608401 --- lib/neutron-legacy | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 5d91cdabca..44db16a435 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -292,9 +292,6 @@ function _determine_config_server { function _determine_config_l3 { local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE" - if is_service_enabled q-fwaas; then - opts+=" --config-file $Q_FWAAS_CONF_FILE" - fi echo "$opts" } From 75a6454097071523a8617fcbea0f650d42c03341 Mon Sep 17 00:00:00 2001 From: "Sean M. Collins" Date: Mon, 1 Aug 2016 14:41:10 -0400 Subject: [PATCH 0085/1936] neutron: Wait until ovs-vswitchd creates the bridge Seeing a race condition where lib/neutron code tries to set the MTU on br-ex before it exists. Thanks to some good grepping by sdague, it appears that the difference between lib/neutron and lib/neutron-legacy is that the initial bridge being created is br-int while in lib/neutron the initial bridge created is br-ex, which means there must be some kind of warm-up that occurs between the first bridge that is created by ovs-vswitchd and the second, and the second one created is much faster. So instead, let's just wait for the bridge to be created successfully. Change-Id: I271dc8b6ae5487c80d2a22153b3fc45fb247707f --- lib/neutron_plugins/ovs_base | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index 9e1421f5e6..f6d10ea4f9 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -19,7 +19,7 @@ function is_neutron_ovs_base_plugin { function _neutron_ovs_base_add_bridge { local bridge=$1 - local addbr_cmd="sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge" + local addbr_cmd="sudo ovs-vsctl -- --may-exist add-br $bridge" if [ "$OVS_DATAPATH_TYPE" != "system" ] ; then addbr_cmd="$addbr_cmd -- set Bridge $bridge datapath_type=${OVS_DATAPATH_TYPE}" From 5631ca5e126ec2d4c6802d09e312fcb8c1ab4952 Mon Sep 17 00:00:00 2001 From: Rob Crittenden Date: Tue, 2 Aug 2016 13:19:14 -0400 Subject: [PATCH 0086/1936] Clean up Horizon Apache configuration files in clean.sh The horizon cleanup function wasn't being called at all during cleanup which left the Apache configuration. Change-Id: Iff5336d0c5e79cfc82f1c648afaabb869d86020e --- clean.sh | 1 + lib/horizon | 5 ++--- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/clean.sh b/clean.sh index 0641bffcf8..452df02d80 100755 --- a/clean.sh +++ b/clean.sh @@ -95,6 +95,7 @@ cleanup_keystone cleanup_nova cleanup_neutron cleanup_swift +cleanup_horizon if is_service_enabled ldap; then cleanup_ldap diff --git a/lib/horizon b/lib/horizon index 0517e32197..78cbe8b58d 100644 --- a/lib/horizon +++ b/lib/horizon @@ -69,9 +69,8 @@ function _horizon_config_set { # cleanup_horizon() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_horizon { - local horizon_conf - horizon_conf=$(apache_site_config_for horizon) - sudo rm -f $horizon_conf + disable_apache_site horizon + sudo rm -f $(apache_site_config_for horizon) } # configure_horizon() - Set config files, create data dirs, etc From 0a099763b022ce476f921c8a4bc2ea20ce5f67b4 Mon Sep 17 00:00:00 2001 From: "Lubosz \"diltram\" Kosnik" Date: Wed, 3 Aug 2016 10:21:41 -0500 Subject: [PATCH 0087/1936] Change python version to 3.5 On Ubuntu Xenial there is no way to install python3.4 Use value specified in PYTHON3_VERSION Change-Id: Ibc69b1c8270bdd240c82cf2acfdfd0730ef0f182 --- inc/python | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inc/python b/inc/python index 9de2831537..e4cfab803c 100644 --- a/inc/python +++ b/inc/python @@ -370,7 +370,7 @@ function python3_enabled { # Install python3 packages function install_python3 { if is_ubuntu; then - apt_get install python3.4 python3.4-dev + apt_get install python${PYTHON3_VERSION} python${PYTHON3_VERSION}-dev fi } From b8286a3a06c48c10b9efc50c4f0487eba8cdf706 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 3 Aug 2016 08:09:54 -0400 Subject: [PATCH 0088/1936] remove neutron 3rd party infrastructure This was used solely by bigswitch, and everyone else has moved over to devstack plugins. Cleaning this out makes the core logic much simpler. Depends-On: I8fd2ec6e651f858d0ce109fc335189796c3264b8 (grenade removal) Change-Id: I47769fc7faae22d263ffd923165abd48f0791a2c --- lib/neutron-legacy | 49 ------------------- lib/neutron_thirdparty/README.md | 41 ---------------- lib/neutron_thirdparty/bigswitch_floodlight | 54 --------------------- lib/neutron_thirdparty/vmware_nsx | 4 -- stack.sh | 12 ----- unstack.sh | 1 - 6 files changed, 161 deletions(-) delete mode 100644 lib/neutron_thirdparty/README.md delete mode 100644 lib/neutron_thirdparty/bigswitch_floodlight delete mode 100644 lib/neutron_thirdparty/vmware_nsx diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 5d91cdabca..cda919363a 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -1003,55 +1003,6 @@ function _ssh_check_neutron { test_with_retry "$testcmd" "server $ip didn't become ssh-able" $timeout_sec } -# Neutron 3rd party programs -#--------------------------- - -# please refer to ``lib/neutron_thirdparty/README.md`` for details -NEUTRON_THIRD_PARTIES="" -for f in $TOP_DIR/lib/neutron_thirdparty/*; do - third_party=$(basename $f) - if is_service_enabled $third_party; then - source $TOP_DIR/lib/neutron_thirdparty/$third_party - NEUTRON_THIRD_PARTIES="$NEUTRON_THIRD_PARTIES,$third_party" - fi -done - -function _neutron_third_party_do { - for third_party in ${NEUTRON_THIRD_PARTIES//,/ }; do - ${1}_${third_party} - done -} - -# configure_neutron_third_party() - Set config files, create data dirs, etc -function configure_neutron_third_party { - _neutron_third_party_do configure -} - -# init_neutron_third_party() - Initialize databases, etc. -function init_neutron_third_party { - _neutron_third_party_do init -} - -# install_neutron_third_party() - Collect source and prepare -function install_neutron_third_party { - _neutron_third_party_do install -} - -# start_neutron_third_party() - Start running processes, including screen -function start_neutron_third_party { - _neutron_third_party_do start -} - -# stop_neutron_third_party - Stop running processes (non-screen) -function stop_neutron_third_party { - _neutron_third_party_do stop -} - -# check_neutron_third_party_integration() - Check that third party integration is sane -function check_neutron_third_party_integration { - _neutron_third_party_do check -} - # Restore xtrace $_XTRACE_NEUTRON diff --git a/lib/neutron_thirdparty/README.md b/lib/neutron_thirdparty/README.md deleted file mode 100644 index 905ae776a8..0000000000 --- a/lib/neutron_thirdparty/README.md +++ /dev/null @@ -1,41 +0,0 @@ -Neutron third party specific files -================================== -Some Neutron plugins require third party programs to function. -The files under the directory, ``lib/neutron_thirdparty/``, will be used -when their service are enabled. -Third party program specific configuration variables should be in this file. - -* filename: ```` - * The corresponding file name should be same to service name, ````. - -functions ---------- -``lib/neutron-legacy`` calls the following functions when the ```` is enabled - -functions to be implemented -* ``configure_``: - set config files, create data dirs, etc - e.g. - sudo python setup.py deploy - iniset $XXXX_CONF... - -* ``init_``: - initialize databases, etc - -* ``install_``: - collect source and prepare - e.g. - git clone xxx - -* ``start_``: - start running processes, including screen if USE_SCREEN=True - e.g. - run_process XXXX "$XXXX_DIR/bin/XXXX-bin" - -* ``stop_``: - stop running processes (non-screen) - e.g. - stop_process XXXX - -* ``check_``: - verify that the integration between neutron server and third-party components is sane diff --git a/lib/neutron_thirdparty/bigswitch_floodlight b/lib/neutron_thirdparty/bigswitch_floodlight deleted file mode 100644 index 45a4f2e263..0000000000 --- a/lib/neutron_thirdparty/bigswitch_floodlight +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash -# -# Big Switch/FloodLight OpenFlow Controller -# ------------------------------------------ - -# Save trace setting -_XTRACE_NEUTRON_BIGSWITCH=$(set +o | grep xtrace) -set +o xtrace - -BS_FL_CONTROLLERS_PORT=${BS_FL_CONTROLLERS_PORT:-localhost:80} -BS_FL_OF_PORT=${BS_FL_OF_PORT:-6633} - -function configure_bigswitch_floodlight { - : -} - -function init_bigswitch_floodlight { - install_neutron_agent_packages - - echo -n "Installing OVS managed by the openflow controllers:" - echo ${BS_FL_CONTROLLERS_PORT} - - # Create local OVS bridge and configure it - sudo ovs-vsctl --no-wait -- --if-exists del-br ${OVS_BRIDGE} - sudo ovs-vsctl --no-wait add-br ${OVS_BRIDGE} - sudo ovs-vsctl --no-wait br-set-external-id ${OVS_BRIDGE} bridge-id ${OVS_BRIDGE} - - ctrls= - for ctrl in `echo ${BS_FL_CONTROLLERS_PORT} | tr ',' ' '`; do - ctrl=${ctrl%:*} - ctrls="${ctrls} tcp:${ctrl}:${BS_FL_OF_PORT}" - done - echo "Adding Network conttrollers: " ${ctrls} - sudo ovs-vsctl --no-wait set-controller ${OVS_BRIDGE} ${ctrls} -} - -function install_bigswitch_floodlight { - : -} - -function start_bigswitch_floodlight { - : -} - -function stop_bigswitch_floodlight { - : -} - -function check_bigswitch_floodlight { - : -} - -# Restore xtrace -$_XTRACE_NEUTRON_BIGSWITCH diff --git a/lib/neutron_thirdparty/vmware_nsx b/lib/neutron_thirdparty/vmware_nsx deleted file mode 100644 index e182fca1ae..0000000000 --- a/lib/neutron_thirdparty/vmware_nsx +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -# REVISIT(roeyc): this file left empty so that 'enable_service vmware_nsx' -# continues to work. diff --git a/stack.sh b/stack.sh index 4cace9d0c2..823b63ba24 100755 --- a/stack.sh +++ b/stack.sh @@ -843,7 +843,6 @@ fi if is_service_enabled neutron; then # Network service stack_install_service neutron - install_neutron_third_party fi if is_service_enabled nova; then @@ -1093,15 +1092,6 @@ if is_service_enabled neutron; then fi fi -# Some Neutron plugins require network controllers which are not -# a part of the OpenStack project. Configure and start them. -if is_service_enabled neutron; then - configure_neutron_third_party - init_neutron_third_party - start_neutron_third_party -fi - - # Nova # ---- @@ -1235,11 +1225,9 @@ fi if is_service_enabled neutron-api; then echo_summary "Starting Neutron" start_neutron_api - # check_neutron_third_party_integration elif is_service_enabled q-svc; then echo_summary "Starting Neutron" start_neutron_service_and_check - check_neutron_third_party_integration elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then NM_CONF=${NOVA_CONF} if is_service_enabled n-cell; then diff --git a/unstack.sh b/unstack.sh index a69b2187ce..ece69acad8 100755 --- a/unstack.sh +++ b/unstack.sh @@ -168,7 +168,6 @@ fi if is_service_enabled neutron; then stop_neutron - stop_neutron_third_party cleanup_neutron fi From 9162608d6e1d13a64dd387486faa221c694ba913 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 4 Aug 2016 15:17:38 +1000 Subject: [PATCH 0089/1936] Revert "stackrc set the LC_ALL to C" This reverts commit f327b1e1196eacf25e7c4c9e3a7ad30c53bb961c. The problem being addressed in the original commit was that rejoin-stack.sh would run with the user's locale, instead of C that was set in stack.sh We overlooked that this gets pulled in by openrc, so it is overriding the user's locale when using clients, etc. rejoin-stack.sh was removed in I2f61bb69cc110468a91dcaa4ee7653ede7048467 so we don't have to worry about that part. A revert to not touch the user's locale seems appropriate. Change-Id: I7c858bb92ce7ba5b5d323bf3ad6776100026c7a2 Closes-Bug: #1608687 --- stack.sh | 7 +++++++ stackrc | 7 ------- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/stack.sh b/stack.sh index 4cace9d0c2..4805c69418 100755 --- a/stack.sh +++ b/stack.sh @@ -27,6 +27,13 @@ set -o xtrace # Make sure custom grep options don't get in the way unset GREP_OPTIONS +# Sanitize language settings to avoid commands bailing out +# with "unsupported locale setting" errors. +unset LANG +unset LANGUAGE +LC_ALL=C +export LC_ALL + # Make sure umask is sane umask 022 diff --git a/stackrc b/stackrc index acb7d3f650..7e565e734d 100644 --- a/stackrc +++ b/stackrc @@ -7,13 +7,6 @@ [[ -z "$_DEVSTACK_STACKRC" ]] || return 0 declare -r _DEVSTACK_STACKRC=1 -# Sanitize language settings to avoid commands bailing out -# with "unsupported locale setting" errors. -unset LANG -unset LANGUAGE -LC_ALL=C -export LC_ALL - # Find the other rc files RC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) From 6a008fa74bce0497b7902dd95300599a3026e2dd Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 3 Aug 2016 15:09:01 -0400 Subject: [PATCH 0090/1936] Change to neutron by default. nova-net is deprecated, and it's long time to switch to neutron by default. This patch does that, and has an auto configuration mode that mostly just works for the basic case. It does this by assuming that unless the user specifies an interface for it to manage, that it will not automatically have access to a physical interface. The floating range is put on br-ex (per normal), fixed ranges stay on their OVS interfaces. Because there is no dedicated interface managed by neutron, we add an iptables rule which allows guests to route out. While somewhat synthetic, it does provide a working out of the box developer experience, and is not hugely more synthetic then all the other interface / route setup we have to do for the system. You should be able to run this with a local.conf of just [[local|localrc]] ADMIN_PASSWORD=pass DATABASE_PASSWORD=pass RABBIT_PASSWORD=pass SERVICE_PASSWORD=pass And get a working neutron on a single interface box Documentation will come in subsequent patches, however getting the code out there and getting feedback is going to help shape this direction. Change-Id: I185325a684372e8a2ff25eae974a9a2a2d6277e0 --- lib/neutron_plugins/services/l3 | 18 ++++++++++++++---- lib/nova | 5 +++-- stackrc | 6 +++++- 3 files changed, 22 insertions(+), 7 deletions(-) diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 2180099ee4..61b8402818 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -102,10 +102,20 @@ function _configure_neutron_l3_agent { neutron_plugin_configure_l3_agent $Q_L3_CONF_FILE - _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True False "inet" - - if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then - _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6" + # If we've given a PUBLIC_INTERFACE to take over, then we assume + # that we can own the whole thing, and privot it into the OVS + # bridge. If we are not, we're probably on a single interface + # machine, and we just setup NAT so that fixed guests can get out. + if [[ -n "$PUBLIC_INTERFACE" ]]; then + _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True False "inet" + + if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then + _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6" + fi + else + local default_dev="" + default_dev=$(ip route | grep ^default | awk '{print $5}') + sudo iptables -t nat -A POSTROUTING -o $default_dev -s $FLOATING_RANGE -j MASQUERADE fi } diff --git a/lib/nova b/lib/nova index 16f6e9b39f..1369c409fc 100644 --- a/lib/nova +++ b/lib/nova @@ -128,7 +128,7 @@ fi # -------------------------- NETWORK_MANAGER=${NETWORK_MANAGER:-${NET_MAN:-FlatDHCPManager}} -PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT} + VLAN_INTERFACE=${VLAN_INTERFACE:-$GUEST_INTERFACE_DEFAULT} FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT} @@ -659,8 +659,9 @@ function create_nova_cache_dir { } function create_nova_conf_nova_network { + local public_interface=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT} iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NETWORK_MANAGER" - iniset $NOVA_CONF DEFAULT public_interface "$PUBLIC_INTERFACE" + iniset $NOVA_CONF DEFAULT public_interface "$public_interface" iniset $NOVA_CONF DEFAULT vlan_interface "$VLAN_INTERFACE" iniset $NOVA_CONF DEFAULT flat_network_bridge "$FLAT_NETWORK_BRIDGE" if [ -n "$FLAT_INTERFACE" ]; then diff --git a/stackrc b/stackrc index acb7d3f650..f42bd944fa 100644 --- a/stackrc +++ b/stackrc @@ -70,11 +70,13 @@ if ! isset ENABLED_SERVICES ; then # Keystone - nothing works without keystone ENABLED_SERVICES=key # Nova - services to support libvirt based openstack clouds - ENABLED_SERVICES+=,n-api,n-cpu,n-net,n-cond,n-sch,n-novnc,n-cauth + ENABLED_SERVICES+=,n-api,n-cpu,n-cond,n-sch,n-novnc,n-cauth # Glance services needed for Nova ENABLED_SERVICES+=,g-api,g-reg # Cinder ENABLED_SERVICES+=,c-sch,c-api,c-vol + # Neutron + ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,q-agt,q-l3 # Dashboard ENABLED_SERVICES+=,horizon # Additional services @@ -710,6 +712,8 @@ S3_SERVICE_PORT=${S3_SERVICE_PORT:-3333} PRIVATE_NETWORK_NAME=${PRIVATE_NETWORK_NAME:-"private"} PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-"public"} +PUBLIC_INTERFACE="" + # Set default screen name SCREEN_NAME=${SCREEN_NAME:-stack} From 78801c10f023eba12910e92c16a49e2ba7bb1e2f Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 4 Aug 2016 14:10:07 -0400 Subject: [PATCH 0091/1936] enable dns resolution from guests in neutron When running a default devstack environment, having guests that actually can resolve DNS, so that they can do package updates from well known hosts. This addresses a gap between nova-net and neutron behavior in devstack. Change-Id: I42fdc2716affd933e9158f1ef7ecb20bc664ef21 --- lib/neutron | 3 +++ lib/neutron-legacy | 2 ++ 2 files changed, 5 insertions(+) diff --git a/lib/neutron b/lib/neutron index 5cab8e1716..b91e880d64 100644 --- a/lib/neutron +++ b/lib/neutron @@ -188,6 +188,9 @@ function configure_neutron_new { cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $NEUTRON_DHCP_CONF iniset $NEUTRON_DHCP_CONF DEFAULT debug True + # make it so we have working DNS from guests + iniset $NEUTRON_DHCP_CONF DEFAULT dnsmasq_local_resolv True + iniset $NEUTRON_DHCP_CONF agent root_helper_daemon "$NEUTRON_ROOTWRAP_DAEMON_CMD" iniset $NEUTRON_DHCP_CONF DEFAULT interface_driver $NEUTRON_AGENT neutron_plugin_configure_dhcp_agent $NEUTRON_DHCP_CONF diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 44db16a435..2d85787252 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -779,6 +779,8 @@ function _configure_neutron_dhcp_agent { cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $Q_DHCP_CONF_FILE iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + # make it so we have working DNS from guests + iniset $Q_DHCP_CONF_FILE DEFAULT dnsmasq_local_resolv True iniset $Q_DHCP_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then iniset $Q_DHCP_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND" From 3ac1ea85b16cfd5c0d88d3575fc5453cc5dea7c2 Mon Sep 17 00:00:00 2001 From: bkopilov Date: Mon, 6 Jun 2016 16:00:48 +0300 Subject: [PATCH 0092/1936] Add a multibackend list to tempest.conf A change was made to tempest.conf for volume multibackend. Previously, tempest used the following, with a limit of 2 backends: backend1_name = BACKEND1 backend2_name = BACKEND2 That was changed to accomodate >2 backends. tempest.conf now uses a comma separated list: backend_names=BACKEND1,BACKEND2,BACKEND3 devstack/lib/cinder uses a comma separated list with "type:backend_name": enabled_backends = lvm:BACKEND1,ceph:BACKEND2 This is in order to use scripts in devstack/lib/cinder_backends to setup devstack basked on "type". This patch allows parsing of the CINDER_ENABLED_BACKENDS to pass the proper backend_name to tempest. Change-Id: I76973c3fad4998a0f9e534fc9f6a271c1923f7b3 --- lib/tempest | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/lib/tempest b/lib/tempest index aa09e9a39f..501480c7d9 100644 --- a/lib/tempest +++ b/lib/tempest @@ -462,15 +462,26 @@ function configure_tempest { fi # Using ``CINDER_ENABLED_BACKENDS`` + # Cinder uses a comma separated list with "type:backend_name": + # CINDER_ENABLED_BACKENDS = ceph:cephBE1,lvm:lvmBE2,foo:my_foo if [[ -n "$CINDER_ENABLED_BACKENDS" ]] && [[ $CINDER_ENABLED_BACKENDS =~ .*,.* ]]; then + # We have at least 2 backends iniset $TEMPEST_CONFIG volume-feature-enabled multi_backend "True" - local i=1 + local add_comma_seperator=0 + local backends_list='' local be + # Tempest uses a comma separated list of backend_names: + # backend_names = BACKEND_1,BACKEND_2 for be in ${CINDER_ENABLED_BACKENDS//,/ }; do - local be_name=${be##*:} - iniset $TEMPEST_CONFIG volume "backend${i}_name" "$be_name" - i=$(( i + 1 )) + if [ "$add_comma_seperator" -eq "1" ]; then + backends_list+=,${be##*:} + else + # first element in the list + backends_list+=${be##*:} + add_comma_seperator=1 + fi done + iniset $TEMPEST_CONFIG volume "backend_names" "$backends_list" fi if [ $TEMPEST_VOLUME_DRIVER != "default" -o \ From 04d51e4943fff897b7f81799d267e75897fbe672 Mon Sep 17 00:00:00 2001 From: Vasyl Saienko Date: Fri, 5 Aug 2016 13:48:23 +0300 Subject: [PATCH 0093/1936] Fix doc formating issue Change-Id: I9aa8c1fd114e4f4329e06ac263f337ea566d8355 --- doc/source/plugins.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst index 70469d6876..31987bc62b 100644 --- a/doc/source/plugins.rst +++ b/doc/source/plugins.rst @@ -99,7 +99,7 @@ The current full list of ``mode`` and ``phase`` are: should exist at this point. - **extra** - Called near the end after layer 1 and 2 services have been started. - - **test-config** Called at the end of devstack used to configure tempest + - **test-config** - Called at the end of devstack used to configure tempest or any other test environments - **unstack** - Called by ``unstack.sh`` before other services are shut From cea7ec8d4bd9f93568eb2e9e05c306535f0ec292 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 5 Aug 2016 08:29:54 -0400 Subject: [PATCH 0094/1936] update front page of devstack docs The devstack docs have gotten a bit meandering so even the quick start guide doesn't get you to a working setup without referencing other pages. This attempts to pull this back in a bit. Change-Id: I608331cbdae9cbe4f3e8bd3814415af0390a54d0 --- doc/source/guides.rst | 68 +++++++++++++ doc/source/index.rst | 206 +++++++++++++++++----------------------- doc/source/site-map.rst | 21 ++++ 3 files changed, 176 insertions(+), 119 deletions(-) create mode 100644 doc/source/guides.rst create mode 100644 doc/source/site-map.rst diff --git a/doc/source/guides.rst b/doc/source/guides.rst new file mode 100644 index 0000000000..c2c7b9163a --- /dev/null +++ b/doc/source/guides.rst @@ -0,0 +1,68 @@ +Guides +====== + +.. warning:: + + The guides are point in time contributions, and may not always be + up to date with the latest work in devstack. + +Walk through various setups used by stackers + +.. toctree:: + :glob: + :maxdepth: 1 + + guides/single-vm + guides/single-machine + guides/lxc + guides/multinode-lab + guides/neutron + guides/devstack-with-nested-kvm + guides/nova + guides/devstack-with-lbaas-v2 + +All-In-One Single VM +-------------------- + +Run :doc:`OpenStack in a VM `. The VMs launched in your cloud will be slow as +they are running in QEMU (emulation), but it is useful if you don't have +spare hardware laying around. :doc:`[Read] ` + +All-In-One Single Machine +------------------------- + +Run :doc:`OpenStack on dedicated hardware ` This can include a +server-class machine or a laptop at home. +:doc:`[Read] ` + +All-In-One LXC Container +------------------------- + +Run :doc:`OpenStack in a LXC container `. Beneficial for intermediate +and advanced users. The VMs launched in this cloud will be fully accelerated but +not all OpenStack features are supported. :doc:`[Read] ` + +Multi-Node Lab +-------------- + +Setup a :doc:`multi-node cluster ` with dedicated VLANs for VMs & Management. +:doc:`[Read] ` + +DevStack with Neutron Networking +-------------------------------- + +Building a DevStack cluster with :doc:`Neutron Networking `. +This guide is meant for building lab environments with a dedicated +control node and multiple compute nodes. + +DevStack with KVM-based Nested Virtualization +--------------------------------------------- + +Procedure to setup :doc:`DevStack with KVM-based Nested Virtualization +`. With this setup, Nova instances +will be more performant than with plain QEMU emulation. + +Nova and devstack +-------------------------------- + +Guide to working with nova features :doc:`Nova and devstack `. diff --git a/doc/source/index.rst b/doc/source/index.rst index 68ec174f3a..c1302eb930 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1,163 +1,131 @@ -DevStack -======== +.. Documentation Architecture for the devstack docs. + + It is really easy for online docs to meander over time as people + attempt to add the small bit of additional information they think + people need, into an existing information architecture. In order to + prevent that we need to be a bit strict as to what's on this front + page. + + This should *only* be the quick start narrative. Which should end + with 2 sections: what you can do with devstack once it's set up, + and how to go beyond this setup. Both should be a set of quick + links to other documents to let people explore from there. + +========== + DevStack +========== .. image:: assets/images/logo-blue.png DevStack is a series of extensible scripts used to quickly bring up a -complete OpenStack environment. It is used interactively as a -development environment and as the basis for much of the OpenStack -project's functional testing. +complete OpenStack environment based on the latest versions of +everything from git master. It is used interactively as a development +environment and as the basis for much of the OpenStack project's +functional testing. The source is available at ``__. -.. toctree:: - :glob: - :maxdepth: 1 +.. warning:: - overview - configuration - plugins - plugin-registry - faq - hacking + DevStack will make substantial changes to your system during + installation. Only run DevStack on servers or virtual machines that + are dedicated to this purpose. Quick Start ------------ - -#. Select a Linux Distribution - - Only Ubuntu 14.04 (Trusty), Fedora 22 (or Fedora 23) and CentOS/RHEL - 7 are documented here. OpenStack also runs and is packaged on other - flavors of Linux such as OpenSUSE and Debian. - -#. Install Selected OS - - In order to correctly install all the dependencies, we assume a - specific minimal version of the supported distributions to make it as - easy as possible. We recommend using a minimal install of Ubuntu or - Fedora server in a VM if this is your first time. - -#. Download DevStack - - :: - - git clone https://git.openstack.org/openstack-dev/devstack - - The ``devstack`` repo contains a script that installs OpenStack and - templates for configuration files - -#. Configure +=========== - We recommend at least a :ref:`minimal-configuration` be set up. - -#. Add Stack User - - Devstack should be run as a non-root user with sudo enabled - (standard logins to cloud images such as "ubuntu" or "cloud-user" - are usually fine). - - You can quickly create a separate `stack` user to run DevStack with - - :: - - devstack/tools/create-stack-user.sh; su stack - -#. Start the install, this will take a few minutes. - - :: +Install Linux +------------- - cd devstack; ./stack.sh +Start with a clean and minimal install of a Linux system. Devstack +attempts to support Ubuntu 14.04/16.04, Fedora 23/24, CentOS/RHEL 7, +as well as Debian and OpenSUSE. -Guides -====== +If you do not have a preference, Ubuntu 16.04 is the most tested, and +will probably go the smoothest. -Walk through various setups used by stackers +Download DevStack +----------------- -.. toctree:: - :glob: - :maxdepth: 1 +:: - guides/single-vm - guides/single-machine - guides/lxc - guides/multinode-lab - guides/neutron - guides/devstack-with-nested-kvm - guides/nova - guides/devstack-with-lbaas-v2 + git clone https://git.openstack.org/openstack-dev/devstack -All-In-One Single VM --------------------- +The ``devstack`` repo contains a script that installs OpenStack and +templates for configuration files -Run :doc:`OpenStack in a VM `. The VMs launched in your cloud will be slow as -they are running in QEMU (emulation), but it is useful if you don't have -spare hardware laying around. :doc:`[Read] ` +Create a local.conf +------------------- -All-In-One Single Machine -------------------------- +Create a ``local.conf`` file with 4 passwords preset -Run :doc:`OpenStack on dedicated hardware ` This can include a -server-class machine or a laptop at home. -:doc:`[Read] ` +:: -All-In-One LXC Container -------------------------- + [[local|localrc]] + ADMIN_PASSWORD=secret + DATABASE_PASSWORD=$ADMIN_PASSWORD + RABBIT_PASSWORD=$ADMIN_PASSWORD + SERVICE_PASSWORD=$ADMIN_PASSWORD -Run :doc:`OpenStack in a LXC container `. Beneficial for intermediate -and advanced users. The VMs launched in this cloud will be fully accelerated but -not all OpenStack features are supported. :doc:`[Read] ` +This is the minimum required config to get started with DevStack. -Multi-Node Lab +Add Stack User -------------- -Setup a :doc:`multi-node cluster ` with dedicated VLANs for VMs & Management. -:doc:`[Read] ` +Devstack should be run as a non-root user with sudo enabled +(standard logins to cloud images such as "ubuntu" or "cloud-user" +are usually fine). -DevStack with Neutron Networking --------------------------------- +You can quickly create a separate `stack` user to run DevStack with -Building a DevStack cluster with :doc:`Neutron Networking `. -This guide is meant for building lab environments with a dedicated -control node and multiple compute nodes. +:: -DevStack with KVM-based Nested Virtualization ---------------------------------------------- + devstack/tools/create-stack-user.sh; su stack -Procedure to setup :doc:`DevStack with KVM-based Nested Virtualization -`. With this setup, Nova instances -will be more performant than with plain QEMU emulation. +Start the install +----------------- -Nova and devstack --------------------------------- +:: -Guide to working with nova features :doc:`Nova and devstack `. + cd devstack; ./stack.sh -DevStack Documentation -====================== +This will take a 15 - 20 minutes, largely depending on the speed of +your internet connection. Many git trees and packages will be +installed during this process. -Overview --------- +Profit! +------- -:doc:`An overview of DevStack goals and priorities ` +You now have a working DevStack! Congrats! -Configuration -------------- +Your devstack will have installed ``keystone``, ``glance``, ``nova``, +``cinder``, ``neutron``, and ``horizon``. Floating IPs will be +available, guests have access to the external world. -:doc:`Configuring and customizing the stack ` +You can access horizon to experience the web interface to +OpenStack, and manage vms, networks, volumes, and images from +there. -Plugins -------- +You can ``source openrc`` in your shell, and then use the +``openstack`` command line tool to manage your devstack. -:doc:`Extending DevStack with new features ` +You can ``cd /opt/stack/tempest`` and run tempest tests that have +been configured to work with your devstack. -FAQ ---- +Going further +------------- -:doc:`The DevStack FAQ ` +Learn more about our :doc:`configuration system ` to +customize devstack for your needs. -Contributing ------------- +Read :doc:`guides ` for specific setups people have (note: +guides are point in time contributions, and may not always be kept +up to date to the latest devstack). -:doc:`Pitching in to make DevStack a better place ` +Enable :doc:`devstack plugins ` to support additional +services, features, and configuration not present in base devstack. +Get :doc:`the big picture ` of what we are trying to do +with devstack, and help us by :doc:`contributing to the project +`. diff --git a/doc/source/site-map.rst b/doc/source/site-map.rst new file mode 100644 index 0000000000..480d6aaf5e --- /dev/null +++ b/doc/source/site-map.rst @@ -0,0 +1,21 @@ +:orphan: + +.. the TOC on the front page actually makes the document a lot more + confusing. This lets us bury a toc which we can link in when + appropriate. + +========== + Site Map +========== + +.. toctree:: + :glob: + :maxdepth: 3 + + overview + configuration + plugins + plugin-registry + faq + hacking + guides From 6a42a85b561c35157b14f63ab02536e18e5bb0d9 Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Thu, 21 Jul 2016 11:11:54 -0700 Subject: [PATCH 0095/1936] Fixes for linux bridge and Q_USE_PROVIDER_NET ===Set bridge_mappings for linux bridge=== The external network physnet needs a bridge_mapping to the public bridge when the L2 agent is responsible for wiring. ===Add PUBLIC_PHYSICAL_NETWORK to flat_networks=== This network must be present in the ML2 flat_networks config if flat_networks is specified. ===Set ext_gw_interface to PUBLIC_BRIDGE in provider net case=== ext_gw_interface must be a bridge in a bridge_mapping when Q_USE_PROVIDERNET_FOR_PUBLIC is used. Closes-Bug: #1605423 Change-Id: I95d63f8dfd21499c599d425678bf5327b599efcc --- lib/neutron_plugins/linuxbridge_agent | 3 +++ lib/neutron_plugins/ml2 | 12 ++++++++++-- lib/neutron_plugins/services/l3 | 11 ++++++++--- 3 files changed, 21 insertions(+), 5 deletions(-) diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent index e5a7b76187..437aaeb896 100644 --- a/lib/neutron_plugins/linuxbridge_agent +++ b/lib/neutron_plugins/linuxbridge_agent @@ -62,6 +62,9 @@ function neutron_plugin_configure_plugin_agent { if [[ "$LB_INTERFACE_MAPPINGS" == "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$LB_PHYSICAL_INTERFACE" != "" ]]; then LB_INTERFACE_MAPPINGS=$PHYSICAL_NETWORK:$LB_PHYSICAL_INTERFACE fi + if [[ "$PUBLIC_BRIDGE" != "" ]] && [[ "$PUBLIC_PHYSICAL_NETWORK" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE linux_bridge bridge_mappings "$PUBLIC_PHYSICAL_NETWORK:$PUBLIC_BRIDGE" + fi if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE linux_bridge physical_interface_mappings $LB_INTERFACE_MAPPINGS fi diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index 2ece210a0b..7e8020930f 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -95,8 +95,16 @@ function neutron_plugin_configure_service { # Allow for setup the flat type network - if [[ -z "$Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS" && -n "$PHYSICAL_NETWORK" ]]; then - Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS="flat_networks=$PHYSICAL_NETWORK" + if [[ -z "$Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS" ]]; then + if [[ -n "$PHYSICAL_NETWORK" || -n "$PUBLIC_PHYSICAL_NETWORK" ]]; then + Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS="flat_networks=" + if [[ -n "$PHYSICAL_NETWORK" ]]; then + Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS+="${PHYSICAL_NETWORK}," + fi + if [[ -n "$PUBLIC_PHYSICAL_NETWORK" ]]; then + Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS+="${PUBLIC_PHYSICAL_NETWORK}," + fi + fi fi # REVISIT(rkukura): Setting firewall_driver here for # neutron.agent.securitygroups_rpc.is_firewall_enabled() which is diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 61b8402818..a4e72483b4 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -306,10 +306,15 @@ function _neutron_configure_router_v4 { if is_neutron_ovs_base_plugin; then ext_gw_interface=$(_neutron_get_ext_gw_interface) elif [[ "$Q_AGENT" = "linuxbridge" ]]; then - # Search for the brq device the neutron router and network for $FIXED_RANGE + # Get the device the neutron router and network for $FIXED_RANGE # will be using. - # e.x. brq3592e767-da for NET_ID 3592e767-da66-4bcb-9bec-cdb03cd96102 - ext_gw_interface=brq${EXT_NET_ID:0:11} + if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then + # in provider nets a bridge mapping uses the public bridge directly + ext_gw_interface=$PUBLIC_BRIDGE + else + # e.x. brq3592e767-da for NET_ID 3592e767-da66-4bcb-9bec-cdb03cd96102 + ext_gw_interface=brq${EXT_NET_ID:0:11} + fi fi if [[ "$ext_gw_interface" != "none" ]]; then local cidr_len=${FLOATING_RANGE#*/} From 894ccc90c0a0795c84ce69afd9a68863a9004f20 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 8 Aug 2016 16:19:05 -0400 Subject: [PATCH 0096/1936] disable metering extension in tempest if q-metering not running We don't run q-metering in default single host configuration, so we should make it so that tempest won't attempt to test for it either. Change-Id: I928be70e3b10fc3753fd1081631e54fa839b671d --- lib/tempest | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/tempest b/lib/tempest index e4f80b835d..10c1a71805 100644 --- a/lib/tempest +++ b/lib/tempest @@ -569,6 +569,12 @@ function configure_tempest { fi iniset $TEMPEST_CONFIG compute-feature-enabled api_extensions $compute_api_extensions # Neutron API Extensions + + # disable metering if we didn't enable the service + if ! is_service_enabled q-metering; then + DISABLE_NETWORK_API_EXTENSIONS+=", metering" + fi + local network_api_extensions=${NETWORK_API_EXTENSIONS:-"all"} if [[ ! -z "$DISABLE_NETWORK_API_EXTENSIONS" ]]; then # Enabled extensions are either the ones explicitly specified or those available on the API endpoint From ea270d50d091bad57ee0b21b405c41bf2d798631 Mon Sep 17 00:00:00 2001 From: "watanabe.isao" Date: Tue, 9 Aug 2016 16:28:52 +0900 Subject: [PATCH 0097/1936] Give PUBLIC_INTERFACE a default value Also means to make it changeable. Closes-Bug: #1611247 Change-Id: I0dc253b7ecf44a49d152f97f4858f7f2cf2ca6b2 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index f42bd944fa..4fefe8da30 100644 --- a/stackrc +++ b/stackrc @@ -712,7 +712,7 @@ S3_SERVICE_PORT=${S3_SERVICE_PORT:-3333} PRIVATE_NETWORK_NAME=${PRIVATE_NETWORK_NAME:-"private"} PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-"public"} -PUBLIC_INTERFACE="" +PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-""} # Set default screen name SCREEN_NAME=${SCREEN_NAME:-stack} From 0ffdfbdbd72ae447eb4b5e3d0f255c5498a07a36 Mon Sep 17 00:00:00 2001 From: "Lubosz \"diltram\" Kosnik" Date: Tue, 2 Aug 2016 16:35:22 -0500 Subject: [PATCH 0098/1936] Run n-cpu using LXD_GROUP when LXD virt driver Enabling nova-lxd require to run n-cpu using lxd group Change-Id: I0553dafcc797fcc1342501a558c7455261cf3daf --- lib/nova | 2 ++ stackrc | 3 +++ 2 files changed, 5 insertions(+) diff --git a/lib/nova b/lib/nova index 67a80b9b16..8fe4c29d86 100644 --- a/lib/nova +++ b/lib/nova @@ -825,6 +825,8 @@ function start_nova_compute { # ``sg`` is used in run_process to execute nova-compute as a member of the # **$LIBVIRT_GROUP** group. run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" $LIBVIRT_GROUP + elif [[ "$VIRT_DRIVER" = 'lxd' ]]; then + run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" $LXD_GROUP elif [[ "$VIRT_DRIVER" = 'fake' ]]; then local i for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`; do diff --git a/stackrc b/stackrc index acb7d3f650..b5d1bc0b6c 100644 --- a/stackrc +++ b/stackrc @@ -586,6 +586,9 @@ case "$VIRT_DRIVER" in LIBVIRT_GROUP=libvirtd fi ;; + lxd) + LXD_GROUP=${LXD_GROUP:-"lxd"} + ;; fake) NUMBER_FAKE_NOVA_COMPUTE=${NUMBER_FAKE_NOVA_COMPUTE:-1} ;; From 7b229359671f23b73e8df9f6ef6dbbe443cc96ae Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 9 Aug 2016 13:29:11 -0400 Subject: [PATCH 0099/1936] delete compute api extension config for tempest Compute API extensions are deprecated, we should never be setting this in Tempest in master. Change-Id: I6ad25fab48277abf8a000a275d3fea73a595cdf6 --- lib/tempest | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/lib/tempest b/lib/tempest index 10c1a71805..e46fc149d7 100644 --- a/lib/tempest +++ b/lib/tempest @@ -559,15 +559,7 @@ function configure_tempest { # Run ``verify_tempest_config -ur`` to retrieve enabled extensions on API endpoints # NOTE(mtreinish): This must be done after auth settings are added to the tempest config tox -evenv -- tempest verify-config -uro $tmp_cfg_file - # Nova API extensions - local compute_api_extensions=${COMPUTE_API_EXTENSIONS:-"all"} - if [[ ! -z "$DISABLE_COMPUTE_API_EXTENSIONS" ]]; then - # Enabled extensions are either the ones explicitly specified or those available on the API endpoint - compute_api_extensions=${COMPUTE_API_EXTENSIONS:-$(iniget $tmp_cfg_file compute-feature-enabled api_extensions | tr -d " ")} - # Remove disabled extensions - compute_api_extensions=$(remove_disabled_extensions $compute_api_extensions $DISABLE_COMPUTE_API_EXTENSIONS) - fi - iniset $TEMPEST_CONFIG compute-feature-enabled api_extensions $compute_api_extensions + # Neutron API Extensions # disable metering if we didn't enable the service From 5068b291f427a55b40357cc0cb77875294963e39 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Tue, 9 Aug 2016 14:52:57 -0400 Subject: [PATCH 0100/1936] tempest: toggle allow_port_security_disabled=True This enables a new test in Tempest to run on a per-branch basis since by default it's disabled because it won't pass on liberty given the bug fix isn't in liberty and won't be backported there. Depends-On: I20b8d5d2a300c83a59bdb33374fc20447ce2ede3 Change-Id: I18fd5e0978795fec39a763e1e0f07d758905b9b8 Related-Bug: #1175464 --- lib/tempest | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/tempest b/lib/tempest index d1e56a4c4d..9182fea387 100644 --- a/lib/tempest +++ b/lib/tempest @@ -352,6 +352,8 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute max_microversion $tempest_compute_max_microversion fi + # TODO(mriedem): Remove allow_port_security_disabled after liberty-eol. + iniset $TEMPEST_CONFIG compute-feature-enabled allow_port_security_disabled True iniset $TEMPEST_CONFIG compute-feature-enabled personality ${ENABLE_FILE_INJECTION:-False} iniset $TEMPEST_CONFIG compute-feature-enabled resize True iniset $TEMPEST_CONFIG compute-feature-enabled live_migration ${LIVE_MIGRATION_AVAILABLE:-False} From 5c39154c39cb23c78f3f130719614624681bd1f3 Mon Sep 17 00:00:00 2001 From: Sam Betts Date: Wed, 10 Aug 2016 15:58:27 +0100 Subject: [PATCH 0101/1936] Ensure testing configuration can run as late as possible Before the code in the extra.d plugins was removed from the devstack tree they could define the order they ran. When this code is decomposed into a plugin, there is still a need to do some form of ordering. This caused problems with the Ironic devstack plugin and Tempest because the code is run in this order: 1. The tempest configuration is run from extra.d, processing DEFAULT_INSTANCE_TYPE, and writing the flavor_ref to tempest.conf 2. The Ironic devstack plugin is run, creating the flavor needed for DEFAULT_INSTANCE_TYPE This leads to build failures as tempest can not find the required flavor, so it writes which ever flavor it can find at the time into flavor_ref. Ironic now has code it its devstack plugin duplicated from the tempest plugin to work around this problem until this is merged. This patch fixes this by using the test-config phase to move the tempest plugin as late as possible in the devstack process. Change-Id: I3d98692e69d94756e0034c83a247e05d85177f02 --- extras.d/80-tempest.sh | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/extras.d/80-tempest.sh b/extras.d/80-tempest.sh index 6a3d121497..15ecfe39eb 100644 --- a/extras.d/80-tempest.sh +++ b/extras.d/80-tempest.sh @@ -11,13 +11,16 @@ if is_service_enabled tempest; then # Tempest config must come after layer 2 services are running : elif [[ "$1" == "stack" && "$2" == "extra" ]]; then + # Tempest config must come after all other plugins are run + : + elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then + # local.conf Tempest option overrides + : + elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then echo_summary "Initializing Tempest" configure_tempest echo_summary "Installing Tempest Plugins" install_tempest_plugins - elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then - # local.conf Tempest option overrides - : fi if [[ "$1" == "unstack" ]]; then From a2ec7fdb48d5df464fec3fae1f93ae3ea1071c54 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Wed, 10 Aug 2016 11:14:00 -0700 Subject: [PATCH 0102/1936] Configure project_network_cidr for tempest This value defaults to something not (necessarily) in our fixed range, which will cause spurious test behavior. We know the value for this, so just configure it properly. Change-Id: I0ee3b71f509377dc7174ce97575e60ee2095f893 --- lib/tempest | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/tempest b/lib/tempest index 6bfa8890b6..7a1d25f195 100644 --- a/lib/tempest +++ b/lib/tempest @@ -236,6 +236,8 @@ function configure_tempest { fi fi + iniset $TEMPEST_CONFIG network project_network_cidr $FIXED_RANGE + ssh_connect_method=${TEMPEST_SSH_CONNECT_METHOD:-$ssh_connect_method} # the public network (for floating ip access) is only available From 14d86e841c964ba12a57a652ece4239ca9283c92 Mon Sep 17 00:00:00 2001 From: Gregory Haynes Date: Fri, 29 Jul 2016 03:45:37 +0000 Subject: [PATCH 0103/1936] Set oslo_messaging_notifications driver This config option is requied in order for nova notificaions to function, and enabling it doesn't cause any harm (there is another option for turning notifications on). Change-Id: I309af6cc43af485f795c368d304ebe71fceb1a03 --- lib/nova | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 67a80b9b16..c59a718b56 100644 --- a/lib/nova +++ b/lib/nova @@ -541,7 +541,6 @@ function create_nova_conf { iniset $NOVA_CONF DEFAULT instance_usage_audit "True" iniset $NOVA_CONF DEFAULT instance_usage_audit_period "hour" iniset $NOVA_CONF DEFAULT notify_on_state_change "vm_and_task_state" - iniset $NOVA_CONF oslo_messaging_notifications driver "messaging" fi # All nova-compute workers need to know the vnc configuration options @@ -582,6 +581,9 @@ function create_nova_conf { iniset $NOVA_CONF spice enabled false fi + # Set the oslo messaging driver to the typical default. This does not + # enable notifications, but it will allow them to function when enabled. + iniset $NOVA_CONF oslo_messaging_notifications driver "messaging" iniset_rpc_backend nova $NOVA_CONF iniset $NOVA_CONF glance api_servers "${GLANCE_SERVICE_PROTOCOL}://${GLANCE_HOSTPORT}" From bd6614a8e0040690aca5121cdf08a7ee17fe7984 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 11 Aug 2016 09:05:16 -0400 Subject: [PATCH 0104/1936] Add development page with devstack It turns out we never really had a document on how to work with devstack in the devstack docs. At one point this was just cultural knowledge passed down, but with the size of our community, we can't rely on that any more. Change-Id: I28f896ea507ccbba5164ebfc5415d22207f52e98 --- doc/source/assets/images/screen_session_1.png | Bin 0 -> 386703 bytes doc/source/development.rst | 140 ++++++++++++++++++ doc/source/index.rst | 3 + doc/source/site-map.rst | 1 + 4 files changed, 144 insertions(+) create mode 100644 doc/source/assets/images/screen_session_1.png create mode 100644 doc/source/development.rst diff --git a/doc/source/assets/images/screen_session_1.png b/doc/source/assets/images/screen_session_1.png new file mode 100644 index 0000000000000000000000000000000000000000..6ad6752bb10870da57b35c0fa3930419a4d0e445 GIT binary patch literal 386703 zcma&Nb980V*Cl#mCl%YaZQHg{F)Ox>ifvTvq+;8)Z9D1I*Zuq68~t97(SMzD_SyH| zJ=Wc8t-1D`D_lWN91a=_8UO&mNlJ()0RUiU004**B=Gl$2NLl1_XWgBNKyq75^`-v z{@>#dXHgAjWjixxS3^frfU<>)v$LtAalkYb06+kc6cJQ$TRqox^+FNF_G!|QzRCjR zJ)wDA$qFNj2mpccD=MNW3JlN@Q0W$iNvJBKfS$}+LDCi?B1sT;9KBvlK8YhGbeq~; z(ILip`@E35vfJgPxo&2qfsshW5eNo=LLvQM+#GfXorGOF?%HBPp3ZsA-8whEt7bW|vMDR( zPg}*8$@*(QMg|#XV@MC1ImA@{72NjEEi57@gr2Vccb;`|5m>w9s` z!=>GdGILc81;lP}lrcvIYz~~K>ACM=Jx8qWDPr#{Pcobyo|2mJf|MtdTXmhLs)|VU z-55DH&mxVLwKodeAN=l1W8>n~uBEHCnULn!}c=spa`g*;O2sL~UhEo!XoDg(}KIaA(aE4cr^&yQ|BV}lJCVfkzeTyS~YNH+Jpx`1>qW9Dq)a=(0^ckyHy;Hf^M zMxdR%y10t-nr@!-?pPZ!)zz_ZbS&!n*z{Kw2Ok{RDPuqVXy|frHhSw&Ml}VsC|+33 zeei7#mxq^?yR^TMC}0rMjlG0XZ!h)QRTCUkd#=^m+{6$Lf-6EPe`h!Z`j$(w8lyRrC5xk zYRqTjNJhuP$$39mD|Q!3iJ~@G$lLU1F;K{hGN-CD(%06Vmht;F%dOj9tT-huh1I(i zv#DWp5b@$69#vIJntlqKxq8)adUt=dq%Xad1l>kCw&f>7ct-?9Dxu3y3!b~in{jrs zv*cr`zjK!E%j^{?f271_Tp>w+#h8_KD_XDhpcpGpZu3Q4F!&wX*|RsL=Of$%YuOk3 zvSQbIkXem6YY})_X7N~Oc^#+J#T5KZY9Zfj$REU$(T*3ylz0bM?gyWXzT2;rHnxt5 z4h0^Z3GgkC)H(c^|Y#%?$Pv_3Cn?nN@n9vx!5fU%!7FaT_I zs+bM=5HOBVsY45v2fVvc=aiw<)P;vX&b!B= z`qKSF$fVZw$>-u9_=`jR#W2U>+;KD(4+ql+j={KTE!wxZ)1C`MHgx2BTL63zWf0=EL=Jd8Qs&vSbWL4d|oy(Q- zR9s7FVqNaRB=@U6gtsMHoAS>PjCng`zpS_6L{QHBD6K8frSQjw@D4c1M+;alPJ~GF z!4K`|!rK~9^~gSdfs^p`60Dl=Jv|nX#Mk8wDf@?M*;_r5eSAn_h(bKxzuaf!`gaP9 z2Lj1*Q^&PZo9-|guMEc(OqR2FwS~M^Z{aKtj5zNJY`$)>C?3grajt|2Z=9JQ`aXqL zElaXKyI-jh9S0B((mBz=hmU8W3pTCb6?VB9-HtPIl{A6BaSB;K- zJq8vBOj~z}PAoahd)C@$MH|JMOF9GWdrd>#f z22s@CDwb!^UJT{v<Q(sK6k011?}RnBhOWXL;mK4BzxRk+vw2V`As#1$!vnyQ zz!fy9nK$a#U2Xxj1I#oBTd|MPyLPE#j5PbOOjHbsLQJkZ;&R5MSD^{fHPOqL)w83p ze1_yDYzpv%iFO3n6iZ@m39e}XdwV0TU+xuk7~DpCGa`+M)V<O}NLt+}wun|*+hiv;=$g{898Z}TCGRsMNPP5Y(w^WQ1zwXkAL)w?047P&+$mC5((a zyGy4A_&s{O9(f7TjhCux-5CaEucgUBQiea@S+Kd=$A(mVj)k%Y720~{g$KHj+6#6j zPB{&6CN%xAN{f@pIyrJnXsY|2I-;y$CXL05lNQ{eu<44``AbOJ_Um;{w)}B>`crul z+3}FmZd6NKla5>U%-dnHMwFOz-7R}ZWnU!AI6|oY#}RLn`?FX>8oL=wN}o zX|GWeS32X6@@eeQ+_*UJ$9W)3Ut;-Nf24mWms@L98<|yOYUvxA;gqVig_-7e%9INK zt(pC4EBzEqwJ2Yk0_&N#UhnBDJgVifAhYGe^HG=E1FM5Ev*}5cYl@2=_ZB5Zm1!OK zW%8(k;TpP)zEWC(9DaL!Fdm0N4K zH3te{hwI`8sd}4{2cdjjp4pZMb<>D^noP~S zLaO<0-oDzWI@esE6C^4*MokxD#F*K9*H~{m;IB+8@!ta8K6+J!F#&t$XTY-7RWTY? z)G#MeaZdvpeF0*-xx~#rhi4PT*o`GjazD=Smaj#$L9ci5?xt0`719@O5q(uxYKXH* zFN79wC+BVC+ggDA2qWcJO(I%NUkau+xH$|Dx?<6XY11z0XyFHv;E|W+c7Uu&{HY9v)owW+9 z?yZ{U_H*6mc(_1k3ng5wtxh4f&>=SGUNUDOi|C3#Ec;GmxCCtfh=?obk4m!I>RngVPWnrskdl^_BA3TJ*`QP1*N@S3Y}0x*P}I8Rfq$0erg3}UF2B{{>vEE zq0kRZV+2Q^M7n*cK7#L;i4YLne;(}3MDV$$5kqtw4ZBWal|_U{2QU4Vm+9ar_z@WV z%KUWrtX~f(6xGkEg!+8WOM#q05PragbB$aq_Z_9ax#oqch~SO@*&KjQ(XtoNQ*E*P{*g!@m6iQw7ub?}oW!61D*mSjr1F z_a6z;L{vx<1r*f#?1~O-*InVNX$>_<3oae1Zq~?}EldSycK&TNQpOEu=vHUMYS<4j&RDVvinU;P;5K?pwie7Cq z-Hny!vu%l)Nb@5tL5PZE@qT)1s3x2gEgt&`XN>Y~HUX@pjzqvWz>O`i8z2Kxh%Nk< z4A#^&JWJYvXW{Ll1fK||!F6~e7rKbVKmbhm4 zW~y23Awro(kuJu*a%-5u*#<9h5@TX2ZhGX;o=0s~SXyL(GZb8VP2Ee04zE|*sr+{t z>OPo_@0Q_z9`0X2P@0qNB2XI5btTNCw-)Qh;VVnzEdKyrJD*jq>xMZBC`JL@iy41? z6hp_Z8uieMyeH4YY?7G(!|Q%*aq`2ZnRC z0Bj(W;K`9rOL7EfbVvwZ%sa^;O%-V|-qEJ7;nHuntCc4!^%eI(AHs_yr=(U?Vl@)p zJLkbk#D~L`84Mh_2wh4l5*M7$HkvZrzYcEhZyV=QQz37l#3~2jJdo zT2cW~*4IlDi?InsxKppEo;Kk^oY$;doq@O;=;$U|upd$FFZ7n)S}*k8V>^R&Yvl!Q z0IyoQvWmCg{adr0Al>O()I?56Z2`9@7cWu2=`JlL=XE=G+v6keU_dZM3aw*yekKQu ztYZQq63Q{EUni0Ipe;%%)4$5ADny7Y=*v@EI#*IQ96p}zC)g3!N4%{AM*}M$JAos= zPFazfLX^EB^iyL7Q@2l8JW6R!rveJ4J)%?_pbA}0?~2b8j9sr^6LTefNv)%H6DQjB zuOv5K2-(%hnVuA*v~iZXm|Uj8D{cF|e@Dc1_Id@IHc_CY6!6u&Twdb*hL^EduU^u# zYDtR4YZobKwt#Dtgjih*Ebc}I5P*%4`Bk3D5r%$8)ioQ^bHVawk6#baN%7PW4O)o5 znnLSH|5d0l|8P!!?lYHbVZF5g#>bRvd+ljK)O3o;92da_Q8P7^Uge42Z4nevNv7xHH0pQVfo)ZQ$eanxg=m_i$7 z)K;ZvJ(Kl>7V5V#M{Mu>u*@E7?SO>Yzg02v(c%Kqt9@y6D8K-0i8e-xFaol+dc>U- zO$f9XciXWc_Kjym4yEDqQ*p*aQ{ENW_(Y!YUTx-nE z)@~)?k;k(`Mz`n`?k|^FbYzvw!CdDXn(bh?WCyRni3GeLIm8A zv%xfP2W#9{j(~2C7Y-0nOwmzW{fiZz0p``lpE;#o@UmW6w00<}LH9okfU(AT^&1F> zfdoDuvLd&>!4NVh+SHKdZh2Sdb(*G4ESlfzeu3f--g43j@b;V@e!I6O zw{%$;`09L=_zDWqeHL7i+>Jrh--+i|cg61pHI@}iE?+hHy;Zd6Y5(qo()ugSc`teX zMZ?VSb;8ptr0*>SkDjzHEXW!2yZLd+Ne5(oPDsf89#8g1G$w!|lP%fa?9Ay~1 zL*UKbQ4}+oCBQ%%bKzQ3OR+mgQzhHS!MP*kE2fj^u%&+NH+pu5FPb9D-J4?okLS=t z{9OdJn}NAcL)}6#)Tehc+E)^cs8g4`*odP~IR2xaAs+&RqjIX9yu;EiV&msJV#n`A zWrT%|$Y2P0ZS>5BfYHBWtl3!a7tR2Q7jF00^9l=Az;Iwz2BrVs&$fcImvVqT-_TmB5B4r#i9YQc(lI0c8XkbOCkWO zzs7=enH!T0+ijXMjeE$h!TJwePq; z8D=+6MBj6RrE>H!`KlUNj~sQ~Nk4|Xs2)~LnnnwsktWJtMF}i0L(qk8!e-mymTEQ4 zBpWaTY$~gAIfC@mgFlby%6_2$Tn%l1yp|2tGHXNrYp~F-(((`ea6ku|@7^Vj=Tp76 zY04Pn+Y;SqG7D0SMuumpJv}lQF=uTkK1bHcXh?#m@2I*P`-(hs<{cSC*5wGH)W#9b-+8Yqj<|H(Htzr7;(!s zI+pPdUN$k*-{3NA^fdv&fztz^W0_EKi2|iSFbMDHV|8!w7oHs#yM1SC#-WVWFNQtZc2+#J0ALMVEqNhFX|frBss410Mwf<^5U zpz7GKfWsa_o7~9Ev)vnyCp7g8uzdx@qtT@y@)O|K@pGB8tbOg!a0iKl z&mL=0<=O9&CPYS5AC>L0yW`CKCwe<0yzA3$ENu5f=K~iGz=d!huJ+ipWpf}XnBoxe zT**Nf{_SAltrbIQN_RA4{{FoY=lAhhvMJsJkAFoNf7hxb5gUY0R*Bc=N`7A_SoB+U zhq9T=DkB3)jf`yml&sw30o_I z-enqQ0q7|ouF{zjHKkwY99L`cf1^K=H>UIcdft|o=r|gQwQXYp&X8s=R$SG6p1P7C zV2sn{xU~#&I%wSWr?HG!>R?{lWNIa!96dKxUdF{?(K}+qN=V3{*BV+5|jQ9fbDFWo|9l>t}J!i8g74Y{W;STXr zpFM?~{HR&{RRMsUL3G5`BFbZXdsdbTCmsqkfE%$uN)C-7#Z`~YU1vi%H6Rrh5GG`* zNxak4Kz?xsGyz1X{}y-OnCFGgGffNm%F1sQMYW0ukT^Gl!#|wv!23|AE1O$l1-z^d1GI-hmz* zo7x2#j7}6@<5JOAoEhj$VYd?mxtE){r7sjl6)GDgSqGAKdo?NdLfyLygL6}S@VGSAt z(xzxL-=sp0Tz#Z}dD4|(pgx=g8-Yx*X)i#U2xd#WE?|qGj;AH#71j54u_-kb#@qL= zt_oA`=#e3bF0~-L*8zc)!X)WAY_A0T0$FhlNz58O96lg^ua7=~cd`HUk46)b&S##P zNpduco9ZiPy?}DOMzKpMxEJ%Aw+eDDOn(*ds@$JRlyXa;+1-VBtA)aa30Y z@my%%DPje-wHiSksrMu20f-d6C9}4|Ts)#xLPo7=VPo4#rok0Ef>#hP_=#X;+dJdq zjYH@#nC?gdC_onZ<6hzMh)TQu$nkaxkdV7G~Xk z|0jF1Ph0b6f1qh_=qbe#-g(Lj+o();DZO5q)5TtrI;S`}`LvuAcrI#;9Q-YCw4hh9 z6_pfE>*oVQ(?`}-cGir;;!NM88M)9VdIPwwFji71bCDxJJ#5qzOP47JS9yY{(r>Cm zk^{j0e4in+)hZKNH6Kp=g!j1qG@o}~jIAep4|>^&5`BAdilQMtETW?#JCYaIlPW61 zT6=wX4-jAm2vAa@P48#qC{RK~fT0MR2#2UN`oV}U+;ZdT^etBjm+juIA3)|a_mttz z{Y{~W4q8)!PlTN|<8yq4cHbI^1zW~!TbnGUN{VF$>t4tKf|V5Vlu6~I*7n|NJ-qdS75G>jj7CAT^stYbe1&7XXnBoA@`?NRLRc~wIS}&pftIY2Y|F@ zWTmDp>keGNd$uvg^$IDV``MG`Yf`(G^zkHCIsyvK=km2@QjET;)Tu#xH=M{2U$Ijd zm>kTabE9wv*Fsz&1{#pK?yoo^qg4Rr4fdiHw-Qj92w(5}^Ic2#qKdTnNsk>X3q_c( zf?JVr6bmrIo5uCm#=}6kC?{ZQGI#KO^_`YnRwr+m&#bNT9Q5qybK&Zt4=0ijFYVdN zF}Q)~-3r_E1`A5OqLDYT?z`TqydSywDEwlUx$Bo&V0Q%!S0;X7MdpEJW?oJtsXBsY zK^ctJsX(v;=C|E%dsKZ7^1a@R>il|rTWFBioBpE(c>lESJAChW4^UEM68NqC(~JqJ z60Lz51A0&^t9ssR7xmf2$V>M{MFGfv`uvEM?Lw+DAsTXW(#YyKn8Y-N(uE(DNqEtS zWbHVFk!$4?Hev|?=nET$24$vbJM@05Lo|*$*;kjDQ*<6CY3d$??{HplKR}e#^g+bP7yP)Byl8Bwj+D9W`U? z){1ZY;qL~Vgw%kDp?$I=;MK3|Y_+u- z$(b<@WI- z*`hvC*S%EKSI8GJpQKHfbQm-k6$r!6|8#h0`*BJMueSj8FJaTy+azYK${iQ^S??^g z2Pdtz<7ac5x(|uf(`l-=v6||4B}d16@=-w#{DSlD{r23Db~09AY?1FmHKy*s>o?_Z z3jjI(G{BAkC)d6V1CoN%1eSx95YRxv4Wbuqp-!4(Wm*}BVaOtQC|a91Ex9y18mAgs z6P}C#6wFTHRJ}ohWdz#dE2CcCX_aLfWyzl+wFCg!4_qc6=*%SdSgZZpn`KGdIt!_)DeKaEk#)WcY2%N_cy~_ z^El{$!>gA2sR`47NXE3-VG%&eV0`W<@b)_Yq^_J)a!{7I`?W2#zFp7vvJPr5$)8HV zSa7Qs7TzF`cl?YxpxVMpWkCXiw3#){-f|(zln53FFzv!kCE&btc?-zYa{m>N&Gu16 ziOVFr5?rp9*xoW;rj7e5f2tb^p<7z~#xJbcHpMq8{2a?GJCSMwRUfEh}1YwJ9V|s*)HToSv z55bMw7xZIgo*84RalHP^5vf_f(Y=+&KXf14F1?q+X8p7NPtFqXabt?EXGMeaw-n#;kg1+iG6aDkD~=vMsF;Px9jj zQ6lnk9CKpYHDt+2@4TJ7Df)~pN-L_*SFmxD32{Juz02Wj!djODAwI(<90gTc*my2g zc`;nomhCb(ty01WKt*bk=X zKL#{{Nd?SgI$Xg~+BKJye4!0S-Kco5x3p@38gAtVD(XAhr+IIBi7Hg}8S0FAj<+~lEJvq-iFjHUsygb{eNB&s)e=yH~fK7$0}NKqx9NF zAR$$o_oGscX_(=DGN1A~SqPW)(ZQ&MxL9~vK+JAw`7ntwB|b%h&!S6ZHroYs(Q02h z0xmWU6^JpWL<={lOEBi--kbV#v<*ASY-;f?30*RerHjxDM>>St{dOymCW#JK12>pJ z8nmBcf7zrRdS3>{EBneYgymQ8v6tB83_N*>dBW@7y?IV%6t`b*jA(R_jy?&m){#9rrf@Sz-l^D7n2)P-bw7j}NPv?AB>!sm$mzKP zn5>X{&wY7Zkby6BYSdgPU15;_SxDQ}W*CbJSHBQ7e0@N+ebwa9P~x__*z$-6TS>a7 z$%~G1nlP97!Y!4WZ84&gRli4)S19GeYXQ^0%TI2S2gpa=_!>F-U_{p^-ws_ zBy(y@cg=_jWrMqJc%jGwAdsViqPNd(of!F4wJrmno3_}I$dDvrc5Sxycs!gh;Hyq8 zw6!OSS?`^lb>8mdqu2w|FLI4)6a5EE^EmIueaO*!uxlIh*2<$Ds3CL=CQw6Xq9YCg zkQu}52M3>fa#o~1qLl5T={HvC7NMM_;?|xWw^40jYa$)I8 z-zV(4+9t7b{(CD|p5*m*j7RiHPvG;&9wPkFfIAes0W*|Iv7OJ`3d;ttEHv>?-0s(f zv9IiJZr_PB$a;-jO7~|AU&R)yBp)_rf@FSr4`Nc#WsE(a0~hB4{d!W>w)uVx~PSP#1R@EMgi9(NT52N~r2Q=}+Ev=OCQLenLKB$?Pn~&D8 zLKz{@VHk?Jv982Vpp3iVEvAU5aIggQ$^z=`+GvH(CNvB%v|5P>mwN3wv&xq7ZwNz4KN2&)cKdwEe7+9& zeP1d$zS)c2wyD3Q)joD_9cO_iKpo=u@Nu<37rX#E7VCE>&DDlIPq|`j|9oYAnh1<8 zi@iMAo-u?Cm$QdgbHkYMF}_XuT~Ul)FKN1p^<+NF*BD`JeqERC2(;|Ad{whn~TwNjMv5U&Ieq@uMLMBTV0KtK?=fA9iRC$DfI0bp9g3F)y;0%^(c1i zpMnbL53i^ESH+U5^^v2jGgs`VF2)HN^v#dQ1hK@*APN`{{bj6t79^TN@adinsC9}h z+o)J}N$sH`^$p+S=|uPn7LDIT37G0??`B>nevd)F1yN;|um#2(WssV4yZ+1P)84u79M#_~iU#5WjQ} zA8Usr_8da)z?ATe>(yN@b0uH84^vnQNJN6C+kd?B3O`m{vjOZKfVnXOdAiqD|73~3r!Jfk zDBe?9k0BAm8#T+f;_S6)e4hv;F#w(0qoulmCcI44Z^l||KFo_8eZF>$g|lPlU*$ZS z`n@WHi{p*5S*EFPJBJClXpFL(bfZh3*=OGx?=lMa<$iLY_1{l7oOtCzI3@bcSf^$S zIAUNJCssG-aV_N-1Es({djFwuWG1N+5$>ww>^ADDA?$cC7IjvHtRQ(ax{zMu{?O!2 zyyZ4~JiBs~@}%-=WTf-o0t%iEM^C0If;)mhM3RCgu**z4ly z%$a_+`>M#1Kb@yK$TisBeCq~%(HeyU`$w#3b=;oqD#C#Hz9=@`uq;^=Zq3n!Q%4{Dek{?_k&f%lAU&e~xWerRlIYa(s=QZ7 z&Fw^ch90GX?Yjv0p!}ro+ORL9xyx~jQ6KS`FTZbOFpYq}yj<)EuM<^oRt&_+Y$T;7 zBmYb``&wN}s0`w@AY{WKPQ~T>)0j6Ud zt1xT(Hw0tP6A^j%7i~Zqo5EuZ40<-~l$!GunZ%maUz%MT;X#zoBd7Z)h86MXm&lpI zZa@*Wj^m3id8M8XU%C<$Kb718aza`%D%0=hEU#_n;JdC5%s}!*XPzdf`q6nzis+| zw=uSh!KJa|viNPG-QzO15zvGMVoU* zip7u%m{qI!C-(1Zi&D5EEv3N_%rYbMVDrvgi%U)DsPZGkdJ(NQ#f?^17a?ffw|;#^ zjE!HsG#wgtC*xvpfKVUq9XT<$8Or$9P-_#C5=8pIMg;AXp`my9msBQ_6ybvPMW{xiab7FSm8~oHKNGV z9_wBZ(w+zNf?L%~!x>@+b9*5ys*>flH6Jv$&i><9+O?uDVLtUIJ33xy9#nVGShxU_|1cSAN)DY*iU5G14?MYrxbI8<18Ng z&#lPsiGF;{x5u+(SH5+eV)T=00ZcINTXG1{lSHIt{pEObmP`f&!4B(<_$~YWZ{(^= zgw~-{y6etvgfmF_lR@&3bC;-q?MTT@PC7?~6jdK$g%QI&&1@UKxSumP1Qg+iM8E7c zPa~YE9Fi2d6MwD;{#--?IJ>^gm*;P~j+@&b@e>gm%GhHUOHu$*9VJ&Us6y{wWax|_ z77Rf*+-M{iC0l8!-eyEoa`_K*$pN z9Y+?eqba%iQW}a6puCD%>!o|}i|vu;3bE(@5(BQe@;hZC=2@qkEV@o;h8i#=*fC)& zBn5%C{XFSc8uhod0OmX_Aa;Cak4>>Xw8Z76E6iD7IM%?T4=G zRj$?h-;BMNZ3KOI4VHAF_{-tC=ND=zspiWWmYl?qC@lQz3T&|JG)2*~&A)Bu2$gzK zotZaRnlr!m0?hcY9^~|~{K%wT4@--^O8kE4OGg3>qd_Zy66*FAH8ih}?A*H6e$SGi zR=RVZWZao*Q%N}7h!@x%cq4I~WO+{&+@Jjd^&L3m>4|S)$QZ9*p+~*RY+#dP> z4)}c2JP&%D40B3Z!~5yJpp}}ajV_M!l0DK`3)JC#NkZIfH3vYweiqCm4n&<#L|wuf z<8ik2J+C?BqqDo~zm2r)v%RV4drVtQ`<12k;~m&wYUB4H^T8N4WcR*GEu~%X#!BS8e~683=hW(Mn&1m8GLDCo?A=WIE`0F>Mfq$wRXL z?pqeqbo+&d2g$1L?t<{Qha(xxfJk&^{Xkd8(jOI5#pTB&bPzKIT^e^YnoW)&Rum8* zzx?)scctpQaFthhH9*I~yB|Ngp4gVr6l8|`*BX!+O>90HH^aKX$L)hKxe zz`Z+~H&I=rhfH~*f?6V6a*^og*{07t#me@6Y|DQ9xL2s-B9 z5005=UBX}C$?%E;wr?d4^_1^@KDWFI3b#ZS5dd+Im4rlUj^W=EN!~-raj6XcRG1pP zd~3y8+4P+IZlYjdbb)LKBI2tRCbgI@i`c1 zSQ`XBJVa4e5d{VPw&#e#cZ!ui#qVPj1i!!K`8PCzCTa@V;wFBe1DO}!Af`kNE!)EeNG_kv2C&|yds z;7>WX`iz&H<12$6vIb^D^;hJbD{@(V>`Fu5{hDK5fUcdv3-Jk}&qGeUyrne( z;`9<-=Pm`mQjU4~oAz3l2s3H#zTS=Flx)L-X8opay17jI&dO^}q8jV}zW@)-qtldiRd+y*lXlI^uj5 zo@je6x@p*00mHTR1sce*Jd+1rbbgalZ1tSQz<>l)ka#%IC`-I=6P-Ogt^-l z07v_09O%N`^#9mGNH$jrjQk7!@w~s**Wg^;1`I;r9SG+> zcuNOp?fmmajOrNSuMmYFB%-@-6C6p(@rHu{DaxEzhW`1SEk zaP+v~>aWI_W^{w~9x~5>5l87);0l;tF#{0@7JCrCUYYY1KbIR*7cRmwn!z-HDQ5Bb zpi}gY1PG3;xY5QU^r%p+VOgbubt#Q@-{7zz&7E;4>O6@pu)018zU?Ho zD7U24zO@U?b1TQj0`Cq7k_m$v8ntQBQ!D2Ug<&1^s2VXQ+`X4zMe{StX=5^>VUc}HfC z*g*}b1*xqiET*eS5Qip?8_H=@IFBlUFp|`5Zv`NtMN)!W>-h81@T?lj$unhvoW%RDJALa&4BeT+o>XQn01#7(#0gEaY z-QD}*_TjCwkW8yC7A>X@_n0MMMAyKhv+*Md25`F8VkELOdCj5qt6TOkG8^xk`E#DE z>#Shsd&A(xMf3*ZCoPBzO<_-f9!qXRD@1~45**z|ME1v@ESLsB*ztUzpmhdP;_oIa zidaK7p+>+-FV^rna^P$PiLmqVKN^ef9V%lyBnquOPcq=483V@P-lmUOa-sB!~TvQWZY=X;+OdJqdz6t z{fXBMt0Vn-Z4_GL;;4gQkJwg2-}5kPrOy5U*TeyU-uJwYY}@XY6!@`wJT(xpfr^EY z%M$+|Wn9x55jfvnVrw4G-63iAt&v&W@dJ5!5K$JY26zeO(r9ZvdaQcHH}81TgO1PB!7ZXGam3vkZQm@$y_`36V}z;(<0=rpE(il#Q>1AWXoEg89GZ&ctC%eZQ3!=F)t$4L z<87UfSW%^9!(NkTwB5?7A64Lf$Rp1$v@$=Vo9R_FgmD8+yLLNwp%jgMasIN3zd14n za1#QacZj0U9*U-&h&;HAj*Kdz+`G?r3z-lMM*$o<98oX;3I)&-nCl@G=cnFEXT(~< z{K}W*ns1p2aVP;RTA36q6A=l5Ii0#DnWlx+$Yipy5-65)AVkk@IIVcKnu>e2UK9sB}qWfEWK_e3kE`_p=q!33J`IH05aMwTPW$ONsJ zhODY#`NMUk1(C9kU3Yo_)$&02$1Ft6YEp1C+~u;kb>)DCcsa6_Na! zi-7LyE_GZ$pV1@)C1WAjGlz9JYyEnoG?n1D3_5p? zp?wxVdIKO}3;-Y`wJiUbX<{=fNH&<6RO>f!i8Ho`hyx8mSJ^%LpPc`)qkeJd-YrSP z6=W#0nYF1f0_zz1FH4eb??^=mftaIBu_XcPcccv4^tUw4v;l>XWy7;YxLlZkEPtNi z_UT2;GI9Gg18M8!%iwQ3l~h872MYtufC0dYYBZKp{dTg^_UQ$XleZs5m8{-2>B2At z4tlY4ygNuSz*sigKqoD!`bCl>wCxYsN!A$8+(4on5A0QiAXOM_;LL})`@H9Lv>+$f z55Qh<3{(xLLCt#I+D`W7bVMvlr=zYJ{tvAZuEa z7zG82TKV=}iuQZ3V*Wsf-;6#?MpH%)Q!PppYnBg|00pK;B{Q6D$QFd5&K=cs3?0Id zUy3x7KWEzLdFXNr=|Vh+Z-?sI5P}4$x9Nl(&{z(m%wkn~6M}kvnzjVH(=K1W<|R0 z94MvM_5D?WP@2b>ia#yyXZW>>&!CLo+@ z75|wKEPrF;$J3|nS30Y< zU0)+2;jGpcEvz-(lgST){qvH>ygaYO5{@4;))1 zS4%{XE2G_a2A5I?quq54PN%tj2y@W(4nDt`GE>)6j3yiP(MWY{A1aT)u+Br|utZZC z^cE=(x2Qg7TMfa@*l_aR`vq##o?5NpKoi^&%(fX?Q_!L2Q3hY2k7r^wVBuUQ3#gSP zd@ETL8X%7UqXnSb-UR|T8`S`#qyX^f3J7vZ{P(^zAhuXbU$6p6LsYM`6eX)(v=}1# zwA9qgNP^}l$SNsq64agc7d2Vn#&RJAY#*sia8+S|LfTg)7uP}#efCz!`f)-*W=RLDk4XVA1ZlZd26Fx#JQSh+iD02~B;q*ohoRs(FaZJlH=GCU9*8GbF z1l#U@Xl+atNZ4vktk~GSo6WqAZYalj_kPGWqS`mVni$CrFi@aLc7=LAyy(#gO1ZJ9 z$M|l}{2poYdnfcQo%9hJE9_XkM1QK_ni^igh;Q)H7w+uE)>30DAMU<*sE&y&jL;<1 z6)A!pdaS&7f!>Q~i{EXnax(DeEqkQJvm0WM!JZLlvY^b;c0TZ&>L;oDMgz0upPqD;6A;SWzAraq~wjuszvpTNBaA zH<62>qldf}Yr~g|L;gkNKX~~+Nmrk9Em%7tIl77@lP-kS`ap!AiWb0h4FN2i%1Z-b z48(wwpCDiqn|rdlU$nuN0)}h~lM!l+yOP?6UTT-J5TW;e)t1+vndH=wVV>2J;uK?` z17z6oM^>QHj*`8;6U-%GfSlxvc^g9b=aQ1UafSG>mF{uGC@;bk-WWw?7XrR1FaFi@ zo+GJzIFWyeF~6a{ZaWC5kDTOS8DP^)EODL(5WQe2qOy@SZu zWwm0AO85Vza;vtgy!a#=@v1zo5-GAMhyK`YyP!loji&oTOo6WZifc&I+ovhnj3TPW zUOh43TeZ%3%|i15-jgE~j=+CW;cWpNy#_ZUg=5?cwz6t^HsgDZ8b0M%al`85a=)(> z_X1(Yi+tSjWqbW_`?A=iy_ZD@anW1jNsSejh8w4qv>OCESJTe(O5#B2S9I)_IS765TGk*$%hec24r#ObwH1*)5n&m4CI; zUu}7{{Mj^omp};COGFD&!MZK$4R1;%#c}n!*wZgXBp)aXX97Tt&##ktH>SiV{~WBi z;Rf;ZFt@SCLt$@UBuJ#%pWCF`^h@&GIGf1&%0(;) zk(LS%1>0)K)yYM%oQL582wPi!@KG{Uv#A4y{UIq-=9O2boQDm?R9wL_CTTkz=ycPe z8-}-oO!%>o++ZAh=sc2Ih3QonRD5fUqRLBX&b@X@1s92@S|n$mnL59y^wEh|4f%~3 z6NC23IroG2hHP%uD7bmB?R}5!$RX@b)3LC!zX@kV#uIf_R{h!&``5!ro$UBC#B>C< zzJ4`%b$DXzL50n>F(a$o)Q2!CfpB1lQ_- z=HM-0_~HPjKy>_=y70rkS2a))vhGr5gYkdXZ$ke59?0e7l{RP(onRa#1LlsNhZ!UifccNJ>>ctN z+R1l&_!BXf%2~SK?Zo?h#s3-G7$ysezt!|6z0Xwvw-n@eEk6 z@$>lo%xm`hTT<>RYkTu&L>uz}9MI04xgN#xCi^Z+{i_n=my}SzSA$E|jU1ZJTUwio zhWiyXQ69M1ao$rj8M7St;O$!N6CD-$xo;`Oa|<<9cz960;)xh}P;zW+>=x@kYv-q} zv?oCnd9lkbN~mLAKHl22%`^p*sZ2BCTN(X6>J?cy@=V))=>KD&(}_Lw))L*;oqQ7) z`zesVvZ9b&}|#G$E(MccfEvl<52ZjP_w5 z>XfMvo)F@x=lwRya-0IVG`pQ!uh?4;ErhbD7`I#Yp3sy}_t$MMPtH$4Jj*8wdGDbj zu{^U{>-#A77k?76GoAj~qA;J3%%SStXUOBWk*4~s<}1lIW(p>uSMEAOLv_A_l;{*= zwI@%X!?+-$p>ul%cz&PF@K!c|hvBFxi8?Nx17JXeQ36J_R!PbS8^8EML;K21dd%u= z@QZ!WJ1}Xah=bFjq`Yl*lnm;N>mID57B5Ef2WyFsb~}=RrntmVTLgNU%KL`!6%P{= zub7In{pMTD*3E{@3H-gNS8&(;(J*;XNWm+OG}l!YQD4PKqsL{N*;-)Vre_Qv3fFa( z;$InL$`*^r^xN}%jH}N23`D1Wy!V$)aspQ;tKy}U z$2#12V)zt6?J}nN*pY1#*VE`St-T;PW2D|I>sRXOI~k+bw$h85)2nmefYI(>ka=?V zK^Jrd9jVOsu)m;xzSRbOPn+jl#L6ZBdR}Yo8{^@13oF_*9F*N%{wbYG^G+=#)9q4m zGugZmcx3v`qxpfH#2i+io7Jm;YPWMsGym^&LLa$c)-ZWH8BQc7wstYl{s~6Ebr|P}| zRBim4eDhc10ALynv0ah$aG$BBc+7IPMTnstnef>qVuJ{P8i(O!T!>%eQw>KJd-D@I zYbTG*`^3$i4(1R0wb!ws>g#c3$HGvZX*kFbvHC#-BEzu5xb^{WXLHh&thrqwd}&^$ zF^tB4NK`L;d;=tmXQ>XP=~^;wau!i}Il`F2U50{`3&Wdglk%0SRB-E$3ct>&=B8 zql-1817N19=i!a^Dw?%M=YBBbaD_vL`*RO{d57n6U0r<>6-l9^?y2YQpMN0Iwk^ry z1<&&dflm(KMuF*9j84zTYF|JGY}1KQoz|(xPzaaGN104b z$)g6dnP8kTS~MNZq`7*jvgRnu`0~UR2{Y4>{w>4qc?u%;r^f?or~K9>=#sk`g}IIr z_Lw3iv-n0o^uO2`%Y%g!2zYdNUf~bkScLN6EWe6mJyCaXb6{!5+b>jr2a4c{;N2NIcJK7~$Q{1VkVAt^KH(nLLm?APf!;6lo4n zPTmE~UL{Md_MhBl3x?v)#glcOHwgMKZb#n?M@OsK+bi0z4pLb&L2MkXNmuz%y7q>_=_g#k$ zBLv^@0_}@>OKjk+-te3uJ}>e0X9(9YX2P#-&-#_ zGGjTugDyH)(K@T~X&MgU_P_AmcLA6{j<6W(YNdquzs(}bV~-n-9Ri_Ob2S4VS#&S& z7w=Mi47b6)8t5*p#spsWDas$swZ=-(iL}i__-p2^h{(ZJCTDbYpHKO+Lvm?-)F4Qy zG`?kOact1(B#$^kR55jXK6a}?&-bK#{G=D)d{05WJAnY(ytr8ku}T9*yrDi9IRgZ) zCi&`G)D#L7%X-pWWqI(2r@0B!8Xbyoh(Wrw&5F>{+tpQ-ar$7129V?iM zqEOo5$qq`e4^1PxhCmXa+6E05p_I&V(_SM4ZHj5scwf0nb_7Olc9C4n=1mqI{w1f# zqd13Pu4f-vCd**-(^=~4P{(q)l?r1E1!DJ*N;if!da_uB{)4ut#Y+lPBkM+*uUOlAzT`pyuKUB?XHHYC ztRyRap}U+pu~gofW$nxBV=zP9342^jKYN~>2038ULNy$@MzuKRs(hywxWzl`Emf zm?tgHeo(15cuJ++CWW#5i9#~&WUDAIa)lU@-J(l)O71m7LQ-Hec7>;md!A88$I$s% z$`AJLJ$3xHM_M=AEeH4X_38?Y5+$IyPtPDX6%(RV}<3_!gr!y`Em=} z@n&2J`^`OIF-xe5OG@XVZkAk!cZ6S96cU={$NBw=@7|zZtA4*;pRUwYa~@E2D=SQ| z5WdkKE}49S;Zv%^oYIa8U>FX+qMMIG>@0OVmGq?&=+B-Z?<8zBXSML6|IhD~n-8lF zQ%!t^9!NeR-yn(BEbh6jb|#PebFkL*T_QJo9{{at+SH7pr3^DW{j3_r{5}cUH={3SU?1&6os#Pc0p1?qZSe zwoF&olv7>d0PJgEP`(XbQ7D63M)@eSQKRWb--uDfTYHAFH}ima?#6qq%Eh1=X!L{3x8UJ*DLzT zO>K180)ns=(!DocUiDuFH8om~+}wX{-5i1H<)4k}{y?LFO?*QTTg~s`Dq~+WNg)N4 z-*Mr}%~vVzIL?i1Cv$?KSv26m{wr=fbR2eQ)O8Ee8l9ryo5S)1Isjd?&U+iTr{y(g z2+HNk-dMg1w-6*0{?Y)93nMf?X6AHu>3<=-u?`Xzph3|BEoIE4=}@=_q6dXF2bls^ z%+x8TVkK{b2zX&)Ai?zo_P61jEMUc|dFzJQKlv$oX*V**qP^&j5=((F`;5|&IDfeP z!=wSE+VwGbA0l++GqMgAqR6grD0|iU-QB)H7#NE>KqRqkOzf>J_YkB4YN%e?_k96T z`*s-7cI(}hx3UAAk>n_r;9`zSdx%X z9WY8yX6tmGJkj3apqQR1$E}0(ZX@q!miP089^&-PDV7w7^;G@k+sC;9tc3ldGyQRX zz_JZY6a+x(yT+d=la&V?l>sH z94H+9O3$a{dA>|@ORb(Mn|+Bglo?5q^CHXu;NfIX*uZ=oOIwWuxsz!b=UX6;6ek4U&Z#?E5)v zD#~0QK}HY1P7c$yx;pJ;FiKYqEewlF`J0^8UZ>V}hT=>n9SJ8AOCQrCft@fN{N)dz z5T*MI@njvoGv8SSnKKflBrdWJI^3q}2(H7aCVb zP~A>@ZmP&p8|fi`mdQ_K_A!Ki;?(;ayQk)Y`hJcaOfBxYbSo3*H_VkJ;`Ej)ajh{Z zBwem+^{+G$q_^mOT#{R_U{|vtr`WdV{A*g2jc0#(xIuceN9OpF;$_S!z8kBkPmU?F z{EPE@e(tMF$>DlcY)@Gio!x#UWr?|4N(8m%rLM-ya#bVR*0^#vo2`2rDL@Ah;^TRQ z5Pr1`{J17?9B7OM)A?{T_rSIe507E+j5GLQIl`CmL!rS9n^_#x`JYd!8pre?+IAtyFgRGjl%izB|VR zW4KMOAV$aIIGj;r0))n~(fc$_e~xs}OEhW&k7>GR(?R|m;qAZ^xxPoSx#M!RdyM!o zp;f{c=H+`SnNi3u(ukR;mPvWFBkRG|@Qm1ya;9L1B=6KNo{0*PU}%MzAm(`xB!19g zD4G8Q#%`fyG!=*!rh;Qlsm9kPU2|1bD40byUY|TP7 z0N6FzMrkZ1St!fray`Tll35hZqhqrvU2_u=yue&kqq_gy^YNS@{R8}q>&?eFiZk5coe(jm3soW)5X7JDIY4Y|WX{rJ z|6-KNZi#Mma{g?L;Df>)%8Y48LJsYxoLm=`gk~rH%botG3f{cZ{-|9#8mFl)H>2fY zQl)vDFN8NSa`MO5GM`={m+S-V&Nux?MmpSk zl=I!Y0umMO7igD!TepJ{+i|rLAGL|WddH~dD?-q+U%i&z0E5F%;m6z$*@PeiUNv?V z+|IS!H#8MIHv2ogCY8QkCcHAnilnjFT#z!MTcpoTE;%onVvrFpWmi-c3e02g;lXO- z9i6{1LwrU*-`R;@-DF~_xm>^Nr+T+Q=KthC7FQ@xW`gzLIxS_WPGNjWg`}bck3RF#TWZ2bSw2g0Z6Lth@)Z{Es%8AcP z)+%Yn57~LS`WHtgK6J5EL_%=)9dDNiI`xG&Hma88bEUCV{2yKvWzj=!;=$md7oE^B zt?7T8HI>J(EtNaE!9HG3pZ%JYDvg(n^ZT?#}vY4#wKx0 z!0$Yf0z=~)=IY+(9k>pX1G3;6Y;?o~TPjIB6(Fxk zDE90CxDa36_`6&xtncmswNA%XHCxNi(yfx}rRs}oe+&HMHoe(?Fb;>73k*nvo7aOS zkh@29TjW`E=9DY1w+BA=jnkgHMVootbPEXh%+Bivy|S8nRriVL;ptgsZ3m^_0f(8C z7w*7#83NGWdGA)6vA-?5Il0ib%-q{FN?aIqCx`cK3{&PwwFt$do?L!IKx3_~3F zg#IBz^bVDreEu7oUmp9x>N_gT`Qhs=;7cw^uUyfl)cuE(Og5eT>mgXv9MU(kp`DRw zs$u@8^F8E}Er@g)2`rZbs2}6Pbd-|#w@o{N@IA7bD6Oe1{3nFfaT*9ppZzl|$3c|s{x&k2H?C%kVBlSeNbC!O>+z03R!2Fk zY!x-|#SI=3%T^ZJC8F@8SCtzlhoLPtV7^Q}CL52o+VVdD$0Qpan10TEeB{Cq8GzA$ zJw|*_j+fQ>oNt!>VBVYP_+WtO#it^ljH#yUvpMSLojztO@wt088fCyZZ1flQlfd89 z@}kXTyTVK|mj^TeM^EOXTlql#$u9I$!7_e|k?dFb{E+jwhfDTJJMnuP3y<}R1+qtn z+q;|loCX^9cf%SSj88|O^^Uze8IEnB?o$3ljeLhWa|Oak+o4QyItUWXr=YlX$z_e! z4p#H1{aBuS16iG|McdP}Nq-GbM$_j{rl8iyb6QaQ#pVR%wL<^OwJ{7wWbc};gNtnY z$01-jeTv+YO@+Dgp4h6s#(CbDb}=+Uw-F1DM+O6fuT|9uNmnsq_@`rMUA&6DE(zCN zuJw{o-mfo2BHexYRT3|IIT@ z@zk%%bU?@WOvV{nk#cifG7H7}No)vj;DX8@O{}z*9~aNJ|ISb@lhgo{PC@~i+D0N2 z+JOa-?TzlFFETR!PSW(5lUJ5@{qjTnT~O*#a&6+y{gy@PpLJm;gvCC$p|V4(JYkIK~?TV4Z|ATYy8YUZn2y z#*54Bd8$?j3~AQkYJp54eQe8B$#Tz1yu5GjCtKJ3ZiNqcvLp7#&PjTSho)eGfK~<6 zUUPqBWIqIkKFebDHvVc;K*_@lh9v7u-!i=4^djO0+iMG?_juV!qoPH(ttsOqfPRzF zM|0$5vT~K>^Ir?OiZ{3U?5qT{)4JO{ONvd}I<-C>p}P=#`(+*36|l}w%glbF3P(bn zoSx+(jg(*Uf4Be@Vtb%+N&&k{@)ECYn33X6Plx2=9zJ_ghzgidodTG*KUdrS4D1Cd zojkNJoQQ95Yl*ct>a~qEM!n7g=oR0`B+QqTY2Wt!vGR2Yh%fgMaxDp35DL0-ka(8s z@4G5B?^u${Su9}XiI|X(0jXfQgn);FzV6z}BuTPR2!O8I#6+1W+_CoPx2fBbB7NNm zEXOU(qXI~|0M-m3fQKSK-tT84s;XE)5gdp``C8k+hOE>bz-XW4bzBrJ%Ts>Y;Mo4O zG`3g4Mi#7pw55xm`2 z-obs~>Gy|t*gdGP`5q@?Ku8P3x8z+<51KB3;|%8Od7qfCk0p# z6X3w z_oG*Vb7PZHin;cIO^oo}9}3aegnVAJE~DHP@}butQxnKvlAm+Wp?RuQ~PST9)9RN zVIdAsyY2{sOEykfr?k#&V_p|AZ9i{VRLg;Br%qj-ay)$}*~7W0Bcmd{RDF zqyqUrd}YFCnTs!J*Zm7qeO%A3ZTOG z8((p3JAc9OBQ)>HOii`ukU*b-dJFK{Rn~z=*_YKJMFir z#H*qa-mapl%II!WJ6}ZQ$Pp($CAj=KotCMYtpWl5!>zW_2-PzMBsZ_c9hPF8#e$J@ zN^iYke~8Rt0&qQ2oIn)dGLe;m^x5N5o>TakdHoP}6{6(BwKutyMZhClmZwEzxr71S zvi9^*?ScHHI&j^MgSN3$bNEWJ`C9W{YCom)R$$R#Z_GYSUZe?>a(kJ&7M!N%py+6C z^J~ZY;ft-gsd-O^-tibJ^t;4b%Ui~OLL)Wu|^Hs>Q9oG6kV*O z9NS-ow7%Pf45(sB)g3!JF!r(f7+8`Vq;_~xjn7Iq_bBGkFXbFMV|44iYXs7tEbHOW zLMq41_mP7_j=n{;(LS85i!i4Me!YD=V#&?jbc0fPat%Yrs}!qbK10S;YZV$8gN_b^7T+Iuoi7cf<`c*IKMhDyI zu^09w}^Wcu(H*F8k z_BMOIllrqCAT&L=fI!`xp{qBXZn3u4m)qabKj__f8|wFHuL5l_6WfO>D@jn;i5#6n z4=!YACxY7jTd@I1T@ku-;>sZ~J>eJSGnK^D1>*xi=k|^)r$rpI9(-r^4LHnG*U6kM&o%z9VodGY-*C%~{oyH2@+>pQ zsOWuyiqZi-EWP54Rqfn$AujPICR)eIpn)|SD;;%2J6%qvN?hr-zo&3hk?OHo%26tbKS}nvi5G4BYg4l_Nci-3fYa~@~10$zbqFvz* zIP$|qdCCA&KqMhxmVaSD5AvSjczbkD-XNbeylAXm{+fqv%ga)mnuM^+hXfJjY#QDe1ERM%-QO~{XUJS zKelZ@qNifmT6C)Y2mM9o*ByTWG5i^VGS!kNh*#Mnb4#UhOEyyoR<4F^tto#Nkh`np zx_IaYkKnn(ErLk?b4Tzc6cK>|IkBnyOm4~hu5gyBe30RkESIK^>?os9(;8$0^bd&f z(Qn=B(#atd9!Jo9m?;odH*bV@PI(y;YD?qlSdBKYnYSg5gHn=TsHz-OSu2fMiHKc9 z1UH2If`L=e{kfR+cW#tGd3qh^&(nzHbe71XPF=2s>~fukATEGgfn)6_(?w^c^3)hJ zn_4+Z-Un3uL066v5Ltt2?s7nE@;++Z_Za%11fin6(Rh=mrtUEL9!~kvboZwP6O~yw z39RJiq+l>KKX0XHpl)Lw38Y0Z2t22}+KF64`>&X+ z{EXm{31Do8q8=^a`B_fgE9uuX$0cA;H)hz;yy0SlUN4w&0bEt_nRYS`gt5QG&YPvN zLJj+YLtex!oeaa;PD@BnXEB`23d06a!|j+1VjFRXm)qj=8c<|DlW;2u^eTBz7Pb!P zKS>rU7DKyW{ilU^JGiDTvtV|)rQ;#}#qPFphc@<3)lwnVP+ti*%bQSOYu{HUnyf4V zAUw9QWUDCnWwsEiG9^(q3cB$sJYgC0P7kZl7|c$as6?&u8Cr1$mLn>2SL5n%TP72| z*zs2(z*|Gvf$6`_>RMW#VkSBZy!E5pZQXj`0N+vv1$0;cncPCp;*$E~*7iEw``wtr zbI<-o_rp*}?XbtM{7&xQ-QD(joIAGa&dVJ$81xDFvDv%D13nb&rI7RiYhk*E+uD=; z0+YWFGZ9wRy8pj68W4Bl#@(Pi5mA$*RQ|e6R;-SU{PKs{(4~ES^p*h34d`B_eEO)m zG!ezgUt@$0y+ z7VhJGzPbW+;N)Dbo}gU)ujlFOGJWwS;fFY@Y)_oa-`k<=nilj@M24i2B^zot$F)jE zejQIcWr$Lj96P+7l{We_P0pdXrh_-Ms1M~ft$*sq0WONII3FEj%J-jg-hb4z+bY%X zf&?qfAWE0FkCeUaD!!&negWVAQFaukQ#hLt2Oik*%*tE+}@08 zKP{*Km~$z{|07O?2}m=s-eznt+in@Y+Hc<6l*WY2;BfX z`);U>4^4bh!X=#ctP6?yMr)YflEy)h5;R~fl%*R@JYBY$hnnWvt`tRSc&RE zlU0a?;L$z5W^8e5UT~8v^$!Muj-maV=qna42b*O|8 za`Dd=>DM82Ba6I=B@pePw^Q0O*VzLS+S-`k;(GmNZ!LP%X^tL5(|nQ+`1bj%(#Jz; zlvS~>s$8e4cXQ~O(;x=6U6F&5OlSbQl5AHjkXnpG13pT6)o`+TZww@bw==A3X6F`6JDVaXdwv(T6HM8RWalFrUz@ZI0i0#KNQ%eS`0%$f*kds`cR*!;hzpVGs%T=wjl% zx9QSlW!FSB85B;)+nle%sy3;exx(@~KNbg)D^H%Nj2ST+39zyroH6f|Xwt>NL3eD2 zlzx zv3w^x&f=TYGU0K~QW^G$@yU`K@S`()F5DsFWGrk!Bz*dtYUh>BVsUp`wTjUDzC2!Y znQ?PEa>f^>+WA?zTdU27vAR=J+$eDMc-##Jt`IEi6mhUB4IP8 z4n%+>pn$%+E&b-`_^dWe!NgW4U+tUZZaE}oBZ>{aKdeV6hQj6uJ{?zea^LEcvDv;T zfd;!8{`ZmTHs_35COa&RTXmUN$yGSm-M&KYM6&kpM6`9_PnRZh@yttD63soU-i*Qi z0{)bz_#1Mf@mLtkIOx{A(`JCHG;sw5m>qea0W_(y;#Zd-WkM2vXeycV<@Pl(v$%vf zxJpIaf$cRawd$x=|22l#u(dVa_ewYvQ1a1R?#}jY@e*X&UcN54)PumtMv{#_-E(=9 z#=82wGuKg4z0m;D2f)!5si`bU1g_E?R1bgUdxV1xF{$`J(M3)dD;AF^qLWaV8-0wzJt-U$f zrQCG-OwGxD8zcxm3F(W2jgr$JA+PnnXSbfh)5JG+q=`ip@=}Y=s~%^@<)$!TlEOHq zKez8@1-01j>7UIiSXxl3K_I>+I#s&M8^M+YIc%(+yqv6$uD~38#CQ6-I%}eSteF;c zVjlBa`%04^?{)%KjV%3T^ok6QUhBD`8QOVYduFcp4#G@G4~>r=G9`1C-f zqXwT;IKn;M)x2^+z3}()Z^}nQxJyY=B?qeKn;;(gsXbeEY??S5X}xrv{bZmB+#~kJ z>WWqF_mLKKmrJ)U)>0q56%P(NENYY%vO}zz#+$kVdUo{{Z%Rk_f$@a}8IvVc>IFIn z>5J^&m_gaKG(3d`QcXj3n4M@h^PsTd>Iu3fdXZ6aOP>Mv+ zzr=9La18>p=2#!?t6egh!BL}JZr?0}NoeBaOBFSvEoN>*}@zD5P zG+<#x4YiTc|EU`Z0My_!H^L>2sP43_4@;5icW!G#82q#X=ETBr*y%J`{rsXsyT|C@ zrom>lr%ZJY4RK0HR9b!$-o*jkufKhDGqm0j9y!}D=4l1{!%NROi3OON_9hYKrWN8V zfBMjv`Wp}+cqDl&M@fd9NtsMFF7eXxP2;l%|8rf!pB1J74t*)ZBSrB!S!W|!h1yiS zU_jvnS7;F1|J2;g(Rq0YO_yoF@M6UA+#|i3t&jv+iQmd(m(YG+8n{cITq+=aNqJG3@-pja z7Oa=JxSjj^1fLz>^IXKG^L{J>lbE&jc+;gopp{*0mD55zNmYcAY&^ar(=FBZ8tr>Q z@RhzZqu$U=*A56!mn6tz7v650JXsYn%zevOhqjrM49 zqf6m4!~hbtBO0yU=?D8*!oC;verNdF6yt+dh*SQIS?CNII)G}rej*@!tEg&&+A1lq z^`AhAnQs7wRmxLkrIt+WT^mWVb0$9}AUIqx`8h08jZaLM!aCx!u&hh%U_o3s1 zX`ZHog)SF`m3P*#CRt!)3G&Za*=%AHq`%)Jm*jVfi=@D*fWXTeKrfY>FgDxr=xV3P z8WHT~@Z8iq2sJ$=h-UApWZJ>ln{q5~f@ukrS)o}tTH}J{*3?*s!Utut#!BYuJ+Ij~ zm%y@k<0h#D$cn2P47Jr1FEUv)=JaQflUFEA$U)?W@v)eZk$Au&3mN?P{)tG=dMl<2 z+66Ow>9%m&AE5z`ltrQZUkWgZ1aVKEWI0?bP8?$|{`}6BKFoHfelw6Zx7~Zd*VV5f{I>$Q z+)l!fNExW~a~TlrSpCoAoys6S#cZxWUizyySTwt<>w?-@61vsz4zpn;Ta-0xp5l~} z(`dyLP;FBY02;R0&hx&82=B(V#%pRWZy&;wVFNux;P%s~7))=xN}E?ek#UKydGtGYBO0>VWzE0K(*gp8t9I#$VRv(-mHggsC_}xr@b%bh( zUgB*(A%9@#Ylc?gJ5def2eLpBZpqNzR&(LfV8*pFC$&iQgtN4a>1*udnQJd)ZdLex zq|t256|vU7y%~fgl1t&!NnId{A_MFCZ$6BxDdY9tXH25m<_0T7=Ij*sLCPXaEOlv* zbLjt;V{0&TK13Jj`_Gs`U9iIgsF+lZ5&nd_$oT(OmUZbo?0iNNWrl_k;D>0zHAE9M zy>H5g`m=jT>h@J0yyp!ISqSHL6mskqg~S67a5c(^ni^vauu%l_W#v!q`*KydkExQ`)^aR8_mY2e(N(n6_B!sZTrJ~c)&5B{tkj~ zz=Sx4#b5ABSY8cLhqptgX2iJ?pa&i&A|3RwTWb9q2_QD%W!B6n`0YSyM8A}!AOzeOv1oFyzjTtF1_d^N~tjLj3S_&FC z&!8l|$nRExr<&-E&&}-oC!v<9Ye$4KX?Cv}N|TbztWtlo+DocQw!k-Fo{Z`}(A{A$ z%%@cHHn$7?xF((cwL{x?>?06){2uiXX@^*Dk%qoQ6{@vkvadAY{PTnp zLQ7x`5`Z^6G01kMw_dVxkI&mp&_y3-QVXSDGrU&RI@f~5m!Iu=_}9`M&1>lA|&LqquL9TXdC^o&SRq9`Ul@l zt2G4&lr*&pk+>3qMNriL-LZ~`C^K37P4KT63*x5h3S1Ev(1i+}kVLHCw>jvH50-1X zGTh7!6Z0I4Q_oF0TuNN>gvRd*dJHiA1DQXa#6}xSD}_4wz1JhB8p(c1w6+a;8b6Ke zV^!ldzr`-g_Gm0XV>yB<&u?^**$mXsmnB=jiB#0BN;~MLZLh5M(%33WL1I&bf`YKS z&fm1(eX3lM=TXD^4FBWfZ24Jrh~`dm@zs3n@khNUGmkMIKq&cnH>B=IVJ|FDgZ@r? z8|S2Hy?*oWTlMMh|LjWGlV8;pVu#S^yW5<9>Pg@6QxIG*L#t&G&imqhP*lDcO0W`% zYrCIeN&W)t*d0EbAfz!%)e~+IPp+@jQ4^&tELvrE(~P|X#6u!IA?8+Chws>zUFN;Y zblM+;^CBPtqgbkMffvdJtfKexLiHJJ&MGQ)^TnJdC%jTAwE_DG{A3CLX}WLa5sA9a z18nOVnL~bn;4w4UrMf>}wC;CLoH-8t0ec{Q(iHJzC&cJS-$v*-^#YDujjT%yK3aJ$gHx)+@412rtK zOm;^);e1a=Jdpf2O>_Sy)?%6!noe+JQP?w)zPqme#{LoZ;Bz8v5Ry=3fiv6s zO((OYD=MQouo!+fTTrzVYwbL6TCHSlNo?5v7*G52Cz9Eib&-5r^qY%xZErn48Ykv2 zI@>Gc+*k#aOPoLG=?TmkoU>osN@;^fAyi?`IEIx-LRC6R%KyOijaUyF+ip~Sn=!ED zBgBjG3bcGdiBGc-rD=sQ;3Sl5Up44LEr%Sws|9RmpMD?0d+1 zQ_#E|7rJA>a7}^!WnjN_{uXt5Tw^14Lj#orSE8we>YH!(XngK zG0%KB;yI=y80Dn~abF(?kgQUU;{g^0l@wD5;#-!(ey(g@dz1MSe26*ZJH&t@i4Eel zr%H3*p`Ogp(cKx(Y}lJE^v3ts$EPEuB3fbmc+)dJlLiY>@Iq-=E}Aq;5mWsiE&xOf zbztv>ymLU20p1Bi!0FvSt&K`i+RCbw-NK|mi}p7yCn-BP zXj}{@tZ|SJtJoGeCJ{>+oJi#wZdHI#^c9KEm z!P|)yvcBf<^$bX@O=duA{B{yJzp&mK@zB)@p0?m)>S53e8oWJxeqq3q|p`Q=lXR#2l_ICmr_K>?_$Z;oeV$@NZOT3g!5`v-~+hxvY z;zEDwQgBg`*Tg!Q&%ykMshibxT`$nvGWDYf96w;NwW73!UUz3m*vWbDSWJv2md@l* z8L{q1MjB6cuoLM?5KD%r)c=!N{P62fu%^LPd>T`%!{pkW+AEc_GpxaY6Fd{UIde;~ z;HP+qP}n?%1|%+h$LG|C#wRv)-3lt8Uf1>(xEC_St8jebI3` z-~hSTRE`;v!uer63f@YxZ*5CH#IZfhY#lt02*jeQNWPjcqe0OJj_{c%t}$rsQAPY+ z-$|J3>Gs`Kjb%arO+4GI4&#}O&VOIpP(XWTTPWQC+8r3kSABj-ZsVrH-~{j;r`k-# z=KT6@(UnSg(x9yT*UtZd>H_88&PqWxy}0{1ewr%D+p9PVar>C&Q`~Hk(YKd zSO{K`V9ECe(X=-!w!a|yplV?**8QGUc8wM#q*1rj{QcO)s02Jgbr0AYGf#+JZCTvk z{8fp{N_8m4C((~{x(-k#RBU^aLkhWH>l;CP5Fr)ivJ-5C|4@8g^Y9YHtT`d@i8=vg z4ZUQY2-|%~CEpGIP=06uAklJ$&XQIdk)su&4$KmzrgsN4DsiLiRQLOh(EdumgygPc zZLkv5c@z0|f>J*3)J-cW{%%#J+;E~7b@BBh{ersfQa=ZnC>7kSRpn*_=-7p=FN(qwPlTB#Il#V!D<*LN9{ zZ^#yjR|c$okh-9oTd1!8*VTeVdT)1m&q)ggup90Z3FV6%jZn~O?mp98*Twh5eE!gu zT5Nzt+JP4rf5naH?umnu(UAYE=Cr*W>}<$1h+DBWOdawApV~D%xT{*ia+yRTNIl0N zl3lGbzG+!k<3s?~CTb|SD772+J97kU+ut(BT0C9XIXcWbnW;^X+QMdgq%q`gGX3=Pir&(AZ7j~Y=}n$Mv_s$-1jgfUWW z8Tc)}o2$+nZohYoy~Ixahb`AeHywkD4TO^(SJvTzWSLyK;-##F+yj8r>iDH|sCVP$ zue@!r72fVR2eyH%V6lLrx_TT6?clI}6NTidXq%%O%HDu8xW5#8rx5SR)<924`F-r< ztZBMjhPd2z3rD!Uv6a)DwEnC{T=It)BAz2D2n^rbK2khYsJi~uYJq$plsl z3Uj)YqDlzBVus*!N$X(|co%6Y0`K>!n$T2dmfw9%GIR3#B67dL1e#CCvI^2r{y&lO z)Wyhz;#Y3}5R4yg;RUw^I#?O>$aEHM$D}fZI|gF9%_yUf7Ai%P zr05>u_tm>cMU0sO=SzG0h~&ETksDap8NnVl2X>!-8#3^;3jIwrJ5Uzt78`xSEEKYI zI%BerM=~?UURP5`PBF@V7OMN(n#rt##}=@LFe2u&?<^Y$GO(!Fek~n9*6Jo8H5q^; z$peQD*>(pnrAGd{L_KYLDHq;oG4Kh!3LlqWq#V<_1)llT^;T60bZ2Cw27jkSdQcG7 zd)nDK1OBRO8f&l|yRXU->F@A@6T(-T)X zSOMWaI$j6*V)}q!Bs2xE9eTstUeV-WP{Ubvq;v1SS(X%F;o)n& zCa4@8S4T6_#o)-EJ6z(D-3EAHfndZc%t!ARnzgKJ<)|#A;;YmZiXrPG{%5Cdzn zWd4B0x2zw``3abk2zh9sxZvGze
    Rin)lk z^)!@W9?Y&^MHix2+di9Kw<@(vKa|0^$e&FZ4?*l;%o(TE)Wy6ktfFVv5Od3ligbyD z!s&9zuKculY+3k}c~B3_o3ytQ*oRHuUX4LH_7@J~7AWcY``mowRTW!>JZZG6CRYI0gs%JHHkDqQ%>C=KKLTPkdvFw3gKxJ3s ze;L>K-xwS*RX1$^N2px?2U#{3g!SZZ3<)fBihV=v&%qpv;s9En&VTPlQXfKMJ^W6S zOkW#J2!ZTy3QxN(@Y2-as|GrdAvcfAnpI4{EdIjFk7EmaiDb!9|5bFRDmljp10yb= zlZt+7PH`HP@_V3?UDmbPhNTPv^GF&(C=+FNO)z^Uar1~UR6r<8!O9z;BeCK;W{$Pw zz-+a1JR+r0SMcqjl+$1F8j z=&M^MaxxH$?mWW4qzgmItV73$kfd6^4Mm3X%Wmu1FagLWRtAUPzhbF=O&l_vRInl6 z7-BiE$kF@tp;ACEcF+Bu6>G~Ea1I$RrmoaHtETnYBb)R1!xug5*}(*dvWcJ^arD7t z9b7=mj)@sQ3+m_UV8J50H;yW9X}6(TXNAL$cY<%|8m@cJHAri?+QjQ%MMF^tRE+Na zSzc(osl@xtm6IM&hn^2*0mU$|Q&~U7kr4{$$@6CZHA%#1GtlopddalweAV6Nu>&TX z1=$QP@!t0;=ZdA$NFS9$z5 z_V$$0t3c-%qAE^pIzuAjaSP#Kwd_2!LqMM-Di|q?DhyU^i?K3VsR8gO) zuSTT|?fk&t1x6$NsCIH(q;CclgndErc^4>h2{i{t*72pufv)0HOo%p`VgJYe|7f>& zCMGTBH4c7#P*?x#I*lH-Y;%E4#SIjG2cS=40px#SU_QBNYDyhRu2v^K;=jCx$s#77 zEzL+$8BNuAJi8vr8_AR)IVx(s)a$IA|C1+dN!vUAohRt_aw%>!x?pA9WltgK4H$;% zY&Af%44hthC7%b-8HBI;v*HYYeO$`P?aVBKLFX;YFUWnjcki1Y-_Eqd?7UQkn;b4f zVk+wP;XLAj0otm;9PfVz#-WxuV8G6*W-jHZXepfIId}G<9`}@5Q5SMl8Ld!$^9T8E z8N{DfaX;ACr5A|bXZG8#3+Lz$Rv{IqDmKWgF%}n4rrH12+N)yM!$gW5b?JCQQsa*u zWCfj(W_>FotwXGFpz{Fpv-CLl0lgNm2YWlIrck#pN8LGi@GeJ#q>BpewY#4C2g@$P z&z?q;c@k$40dd+Db;yF*yx}#QhlATrwB0Jhf)4|oHz6ku9{gi5Z5ie1gQWcSV19-k z=XgHd3Elbw>-P1log;O{wuR)fjoh!){U4tCsuu!&&Qkkoz?#S*p&@==v8!W9$kTcF zSWHwlYlx{E4@0zq%%E^KkF!syynT$dpE6fxb=p!q9A#`;_d=`&8#J&j;@vGtm7GND z0+{*uPG6WG$j+!+pGuEoEY#&#aoRld4++w?!2$bEoR7eiw}>xZtS3h)(Jm*lZ4piW z2wr}eEk1vg@Ac)YaLj4--&9H>#ELmGRqU!jO7Fjs*$wBa#Cv@NQ z`v`004e(B2wZ8>hc@eLM&DBI^Voz3EbhOEE9%c5V-=6R)<`+Z^QgM`=0(Igwd0&}x z_j=Bk!rn7@h=h0B4<4h+6@fT9Fm?k7L+017dM~->5XE!7PDX)}%VRcpTlLsiJl@J1&K4m>w7RQ~F;s4>!)o{V z@+;x+%SdxsEhMKQ*((|M&JbrLWxfogEeP9biYVF{ec~RP6|_M;r#^O}g+(@h@@{1> zr?|RL%Bf>AZRP)TI99Iet+g^-`6{nHu+zM0HEBS7p7N~Px2DgcrUd8z1bO&F^7QM3 z{dLy3MvCPKWfh0z-UwBYFa~KqvMSWMyANmD@5vL^bouYU-Kuk^gFS+nE4>l)OZ6|q zF%EQE!uvgLea!9>qNSxR9s9O!hFV&m>2QWlW8-3PZs*60tzV4H^X@SKh0 zm=}c_E%!qx9hIX2Se?2CRhq zWKE6zn}+>wYs-^Ljyd&LJ36#M__}IqM_*s-xOrQ@iJG(x20PkG@uHH&rANkDb&!3FwgNBdB$e&- z^Z^tmUz$kzxmtmHOD3z(rKrtJ%|=qO1i5XU+mxbd!}ztgC5#;i{`?gNS7hSjl*ds< z%|iT9SAoi%psnC`w&mb0H{-#7Xzf0-+T4qNLELZX3FpKVBUs`~F>?l)5PEAh8LYMX z+qnx)I1*>@_T@}Frc(wq_-2vc zFz)sud8Qk4mv9%}-q`oC*?Y0D{D8@y8BLprW_Nh#9>UjDng$+Fvaly`sp(`6x@R== z)mlI9)vU4K+~chsyYY@HN6Nv`LdKr` z@EpBn8}Y-voEby0^NP27xeZm2S8c?<>pOdB4g-3*c;B=tC9qIbD7!lG0GOUd!#T_-&RjnYD};ej84@Bn5&*LFI(0$DS2_TYj=-8*c( z{*_yAkdJix^fLIl^~(Hx^5fauc$`G9l4Q*+$-JU6h>;^TwG~I4cl+fw(rsLqnJC*X z4GB)|B{{bskt{J0+LE8bW$(qV*t_9?0ee*eJ`LoWX6Q7|^8*S#U?*(6-FA@tx4Gv@ zJ?E-V{MLBtTG+9%gs)BG4&^s_*5^GpMt4f~X!R0(sT=~CA4Sy8t8g;*+DxMB53Z}b zH${5`0i|8=H-1b93mZ*ViS<$vCKR?)i!Y`SovBsUo9Dti54nd$4ZnL)*t}f~y?uw$ zIgn`v~=6D3)It@ZOM>oe!2}N?QCsLP28rLXjl)g zMwyz`0;C1}KvDYr!LQG=h+DMEO^PISJ|u*t8P`McnrYXuFWF%h zr0N!ZLG#KoWJPHNt<1D++1lTjo3mb8V&%@ee0o%^MYG)2)pbAP;qHf3!@J4Af;U#0 z8Gm=NxDDa3W}{(s+vRU(v@!`T1^A}hH_vQ=V{T|7$r#S7u%DBJ`6q-=JfyDrifL+^ z-LqZtMxwK`4sK6>MMY2U1Ddq}1|7U9IT4a?8yu~pE#BU5E+o50FAVYvo3h>?HJ^)$ zU~pC9c{Y8@00>(hnj&!P)**-(b$sqNug}t?q?8snu^Jh_8GQidpELaZ9vnnZ*t=7* zz%yM$)tv6>Rv70BY8vz>zNS*n`%732lqpm|Rfv9$y{0U5-Qv4ggh@}bl)Rsdht~Pc5nYHo<{5qCBRYFNNRoOQlxb* zS`OWLZZVXKo$Sj(_Ud}Tc=tt8RG{M+5hF4JiQZbtw}OY7fWFBiYjO@`76eH#c&iEZ z_fQ%}f_q7MIn>1+55(#Py_2E$)hJHzI>w0XRO_M<6aQ_hELabdWqf|1$p zZ`wr){qg5%93Jg#<@79?SWm%;ldrPP>ZvnwH=jhb22YEu810UOG%8=@E>4fhSF0q} zaex+3hzon?X1u0WTC0G5ZUO9{*u((WwGBmHf>L-S%B}im^4uVz$wXR4>$=Q7;Ws?6 z&*2HCvdoGs1}?>tEff7*v=K_Mr_X9%JcQjU;XXV?0HAFeO8)DkSNNLNVL#CR+c#Vn zFk25$f-f3m!wY_GTmf4D^wT@&#CCA#m4?3_!#L4gX|)}RkT;?>rjecg(r^+yIROf9 z9CVF-EX!?ED-rS zO&<>6gz-RUmxfA~m>k+uvuNZX?w^{b4ht~xyLwoCY;gB}xSo`m2CF*I!%3eQ#C4!F z!%}7cX$f$wll?Fdp@+YGDit_k>gmqc zk@}OIqtn42cDZ_fO*xBp58ao=HHQgq z^s^1)lUb<4Y~A8^HRg-4d5d*bjqBT-s~RzO+=(OO7QSR{j1b|vZ2jQ#($2`REWaX$_GFs`v89)ya32?KSuHV;O>9z0lyWd`vdQsemAC#i7@ z4iPnYBNl6Wj5u%NI=eUq?PsJ7-inb9&H>;hP2kY`j?S>~2wh%$ipQ|M$ zk8Y}O@QXSKurQ29-WINfzCKiuUZz@nuc=YA7o!=-4uYnTfbE4=Z(^r+{v3FaAF@0g zf5ZLa%}dgi<4G<_2NA1+@KF1O#WX~|wIzwfW!u}4G%~)=-h`T@WYLf1YVMuXqqOO@ z2O#N&&v{dsUv~YaSp|vDbC#HE1UyiRZl#HFsA+9Za(|iTHNo1L*XXjyJ;VpaccD6* zHQ+ybLn`QYkIPOB-R`1a7;Z7WE)+yxgdri-l^Y7fN$=sp;R3@Y#UUZl!6dvc7=khX ze36m9I7?j9Keu=95lW1QOziIN6kiei^_sMr1P{L_?;2U;sW*<5Lbbza8Xpv?&gO!2v|YtX z2|Ah#1#`pEK4r}!LyMH)mJ)nE=CjUx52my_w4`4}U3fp@Hg7mmnPoa>`z{&`TB*P6 z*p~_`<-3T^J|{E_==rgr#~k)<$TLYfnL*Xnj>de~RztZv7Rcoj{*N^YfJEVS6J5(8pXk!JgT_}LCemWm{?AepP zj@cL~*7g0Z$Fl4dBOTJM#z;lmU+vBGK}u_VHattNCg3Z76k9P^Od!VSOc1P(t`_J= z3Z2_^-xo>`t|XyOXy{~^rnP5I1EzHy(rJ~JvOen#b?XBkJefFNCK%lgVTOhvjm!pq z+27rGXs=wI*7?n+QYVWU-s5wxEqGcRr^+l&Pa57xL#dPWr(p-=Q8V6N%Jw8;ie@uI6;gX^^qU^#*!$O~`Icd7<;ISpBs- zbSOS77&#{AyDC+Tq+9hjrrSmtB@x(WV4SPKI<%L14vK$Qs#Sp*{#CX*!Nj#XD1~TP zO2bwWqgsU2+zGExz<;0!mlOk7UCIz;WZ6qfJ&|1swD^0nGRL4LS$n$pP2zAFcf6Dw z^ZR@;|gyeRX$U|&O38!y*%)(t{MZ#VRb!}ztt#l%j8z=QmZv0EH zs$C!r2;c9*usA(m1*>$CJR@kh{u*HmOxcP}&SKgRiGf5Fu5i)e2vhi)o}d^(PmpKNyQLvWjUz$ePS<77)KZe$>asbiK=IR-fPZ$ zqLW`Y_tzV;Lwy-8vPs8_nxzoO{x^B=>aX-xQlzW-G$*>Ipce=vsRgn=SfyzZWmLgc zL;^tIZg%DTz$5HoMPDwO^k`6OSP>d29h;{8`qhM9x$5Oh-b|-yzi=?;(CIXaF7zOH@St*dh6B`n09)8a?e<3fNE3sO_jWI+O$Fhnusm+0 z>(u3>$>0f?z(UO+BPL@`eZB*OnJ@I_LB=NP)hbFa!CO@~*-fvK+|1(!^DeKXe^e)u zcfWVX0 z)Sml4i&Rbu=W%Qr4$F5#A6bi6qmg?+)`Q%h$@|lt+lx~|qEPGeGKfW(X$oVIpKJMm z?Sr%2yEJ=vDZ-2_FI!3ZolVukFW3Nbdw;vv>c5M!;WXphTAgX#|8fDwbs*6eunG`c z$Eu94CRV|O_ZO_E>!N;M^3#*eT5X+Pd zI{#E(Io&ag{XXor@roQ42RYkf*#V<0i%B6FK(#E@ZVNycew-9t zuE%LkqbvK(PJseBF3UIkVaj5f+8&Rt zeXKW7e-vUBt}yv*LFtaLzUvXhPp#wa%W9DNBDGfT9tr0Wkboz~{}RcE8JC=z!>4Ca z=E@L6`(`rxV{V^^swTg2t{rpw(Vq_npn6X^osmcIDcj=Y|Dq|I-|!%$0=7X5(7Vlz zj@y{v%`>#iQ6P(3FT;ZZ3ou@^k?|)-VYyy$xy}V;De7TxSZ7GWZyu~=b*}oF0C6D0 z%NJQAg7M00=pY<sSpwo-9Vzy2 zE!%d)F@DjJSWXu2C$#wvk6~|KXhclqb|rSeEFkch#9>zau?Y1PhB(oRLI4(fP2(_H z8e_(r!=780U25ZS1A}yJ4sZ<}?J?`TFT7la&);YQ$imfk|iB*p|72d;P9`R zAK;VgWI=hsQ%#q$%mzYgSL0KT_|j6Kug%c|J*J~)!F8FvF%pe3FeS5vIPo!Au6r=e zOWj03mtK_9Uoi@Em)|VAJPG;34o?7^XW(mDeJV!7kfTkPrzUIj5WVN@wpj+twl@tP zIJOtdh&J>cQf`=JxqZSI3wC`uJC@ckU7R>aoegYgj$JlANO<|pU#=~*uKDrjSX@~m zJs0NEQJUbUe%mwigpLr6yKe}7D(zBlN{=vw?Z^vA`62oh-k=5X|fn?mLJXf zpG#Z%O6+o2m}@0&6v+;{1Wq30ILGai7AWTDG|Hg(qFH3zO}sa>e5a+@v0B3}oyuPi z161VmB<9EgU z@JE&uE~3k=$nwL` zs^Y&Mr84@GyZn4l^$T9k9PDb2kXi2Fti*HxRe+e#g~uAQ1^M;GL;9%&#>pUMfA zEvY+M^tadtZ!$I{^6+ai%NNH|V$SG@c#q!0_37)4)Dw@?qe=tf8)y92;jldb4(3tJ$mvJ?vBKp(7fRybU^5u`a{b+lyU zKtwCix+lp=Tb>f!j$E_F{Hn5_`5(CLBEkG!@HD;%*E3lbtccmk#tuB6+61=?;RdSF zAV=gxsD+>1^Zt&Of1M<;8I`0CYU_D|j+>|;kVB!M4(|YnDm*;U2Cc$i z^Q6!x^G#p!0%Ha8dE->o_}!bOj<-4&N7Zjxyw<|e`|zTSo$Jfle!m9BjA;nL2>j@Q z?s5Zo&3oSD6aZLCtnlLsy+vW}k>vDb+*mgE_Ijy8V;vEpgkOHUStw^yNa7JgnqvWd zTfgPu0+0wg_vT(s*V;*9{IoN)>@h}AvUc8&fmfeOpgFpXZG~tmOgmh5J4DlT{)dZp z8x^F1Ar3KGZYH3Ute*9Cvy~r_gJSy0ct{I3wj_JUID)SIV|j+K#(bdnt+96f0|Fj1 z$zWpz?WP(gNxWkEo8v11VBE!%^UiPGG<%=-zc1?_9{1`M%RcqV<>&pP#UTgt^GFCCLY(^TL zM4iCbq9^Oh>P*GicZ{9D3!@*tmgzWj#<$Pr(l?E&;<@%&x zh{^lk118szXtEqyK?nc?B^HFoY}C)(w0_S^pMC?2#pDRZ5M-2)yN|!_jU|wi_Yd7x zu+EZX^1&C-kzl67qKyaM3TAQ*IDq1(u{vwff{~~}pO?a=tte17>&d^_qx}<=YaCo( zM1|wm!j{!!% zTRa_Y3Fn#LyfZfY)+@#^tOj~{E-!KPCCFDC!XtRg}r8N)KjZ1Ph+^EU*0?+Gjn zHGU;8xGWM5L~Zg{fKFl(?9ct?qQq%7HlQ>DGd78Hk(R7gnT@B#XtlzUxEw-2KY_a4 zqCF}i3t&xU=WiykCdowA0bZ3m(K~c;7?jde>eHz*y4mlyBg+k-HaWU~8^X0hDOzN^C4B(A|&?1t8({^o#_J7KH1PaipE92`f5F6%1 z4dPH`icUD6ku27MYH2Q5V|XM$b@ZNY zA^LTaw-M3?@pK(iTHZY6AuoIk)tc*7oqUQqSnC%%s>YrnCm8$MTENIs#vSnQHs4=$ zZCoS)M!RfyN;ZtLcqzwwe@tZO0G;;lYFWgVY6j6u)8UxM_)a$Y$BLzR)?hRL6{)uBpas`Xqgnm_wIjd|5xM5%=@sEJ71?jzk z>7O*6FdhZuQW;BHDFr^2UCpI+m{@Jpm@l!v_bcS&P(QZ8L6&P-s_LvLI3gNZa}hCc z)O}yzf*mD=6-Q(DRqBO7|m?|rfRtd-%o+kS|ctv#xlFI&#F&g)=m$?T>0Fc!E7?0w#P9IIGcYd zBEYlk%=7kNWBWCH!q61hc$B|!KKgXvcGqNkoKIg_&HVXl>#MR-zFoPzxnVyF$}(|# zhK8ecWMyF5sWWUavy9*mXoX?7-UKV8fC_Q67h4^tja5qeSxgYn4|xgTPV@?fR47|T zEK|H-b>@hPE#3&0vMJSO6Qr&Ey%|aeCcL}^Q8@kQ!1?J^l(L>{$E!Rw#>j^;Prnzk zwHSf#iBt|~H=46J*dxC#M|Ahf+;@Y!V87A+%`HZA?X@>ZE z!5^CH&O>EbH&-CbNzUDd{Q+C(PW}7utaCJ~mDlECb+t~L_{kfn4|et}IEjl|`fC>& zTmZG4YFI_LWQ@BbC?RbKjD4;r4|O;(T)t3Wct08bf~tj z%J)jf+Q&;K6RNt3Yx9F2G5(now%%;t2cT*QH=*`0YJusN#`-9t9swpc4h5vtKxi_B z_0-y{%lS%>jy^5#ZL=~^lhQF$5baVY{`*0;ZdDyf;O(oi;RJOS%|G+toM?z{22Ku; z#($Gto4xuQXFApxRw%Q8mkP`WsX1hRMQo6DDpi`qnpu*25EJ%9^DX&@9_fOump^km zD@X0#`N8;x>FJN{$cE@?8nevXVgh7GD04vmC&b!7-KOqJ0^w1(_~ZMJ=+K0^y!3)V zM_PZn1Z$ye*vX0Go`4_`D=Rd zc`6U+jgWwnMw@u6U|`a1_wXy}a(zZ~9GBC!f{sYAp*FNJT{_Mk&FUX}`p8-h{1RJD z?bX_7yr|bpTvS+Z+Zs+(jfFx^>W%096`+}!{nVUS7zq{@JWB(Y&Z*cFl4%{|;Up<5 z0xe=@9}k)nDKPIjxlmwbm)2Jy&E>0dfFiR6Gw#KWBpbu z)C^nbTjjbSuFz;gmk5fcI-ARIWG#O^3Lt6jO0Sbn!W%s!D;^UsJ(y^7&A5zp*-2nm zn;cm$AN`blz$FVZx09NHX5UXF4HpsNTMow$mU-6v{UK0#1m1o^!AOS!4W2cgCYp(C-XRz?_{C(iz?OnH zjZ9B-0>m%uGtBC{k&jyYbz9!^uF}%;gs5Y561lar+PCR0;k?~HZ{Fg_=%m0=Y3lvj zJ%@0{VB&Q@`XsTP;p2MpnJd)8-6?ksQ@S>vZ%e7l1qf%SkH_2L`49{*K04F!G6WEJk*^|8H~&uiU8oSN?G-R})ARbZtm z(hbb{8VsdYT}8UG@+YE_oLc+b;qht$67ox_;hW+hI^Y-<`B>e4bs+;J8;osaoblhi zU>eqq3i@TWxw-g0d6sPX{OM6oa)woPY1;K^`B;DB28;ot>QWrw(2^I$y94>*#qzY; zn1)`SxUv}1%n4$4JW{Hagj+`XlWA6d{ZzY19p@Ct8AfhuSWKkpb)dH9 z!-DEF?wjE}p=Mj}mDbw7j$3to*CR0e7ULU2la=@xYt&d3Q0ipxNb~hkA)CHlcqpQN z0J+(`%TF0#80m3p&ko-i5?WV1vHzLcVw{5uYSuI(iYYBaN)cvh`;tuvHP8LYh|DYm%(?&6`Y!O z#Z^cb+m3-I@)v^)R^k&WL09%kWg-}a{G^k{Ca6N3@-C37wAa3tHFPvVDxwpZM@K+5 zFyemCCAYbdU?qF{W1@VY{GbdcDJY~AOgZ$J5;8_ELKhzKtnP9ET%M;ksV@VEo|6Y7 zQu8ZsDsN2wa2)Qz9vAMAagUVUCmpbnCO>UxS(TR8o>=(@M6oygp3u&rbVB51yB1jB z+~9=Lnj5r%hz1j+&{xSkKk>s|1}mNe?&Bmzpi_B*-5?}xf6Itm_LRj9JNfBLx3Z~2 zG9^w#@2F*hGk!K(a>C;guZIAq-SA9W!z-GtPy{-`D=#ogLp~>H&IJJeV`uDet>#$j z&Mw$IkV!&Zz9n}Wg#wTga}t3uDfJ$8nd@OpqK7216I8LD;0hU58MAS^KjkmUWV>v9 zqyS+g{pxArzwBNmLU~B!_SNB=tV=JO^ub5L`Xrt9rc+YwYpIvqJspx&;P>0 z`zfNQEn)3056*?|X*<6yDzKOuTyi(ryElr^o6m$+vwN*2hp2TY3u|i*ChSdjJ&c8B zx;rdNEURnJt&}HOlAOTX1X(m+9mUS&y*z4TVa}8q^4v~MA`7jfSDNii*3{isx;Wd+GVEU$O+JOnD{Vp@Wa0RjCjpclVLxwWH6Qrm1 z0gNWetRpx$k8ZT}9x*E?v^s{!<4qD1scD~QL-3TU&2-{Q@Zw6W0p`Wrp-h##?WdJj zk3g3hwCHKal|1NZ>WHnxDQ(;$!GW~qvb`&xRGuQaEk)Dx1ncD}YY+y&gMdbX{P>R- zb=8I?{{)L&=pP0uFdbE`FH4)hDLNM>Rsov`Fj(S>ZQC!y^aofQuBKqVx#yD|Ymu+r z4YnuQ*w;NDCurTEoPhqe#6i{Oin2@%0v|s;bHk^+lMy;Sk;pt7JnwSsd>MO64*o%# zY(B*JHSD{UC+mryC+D^~sF2EUs;H*cHIqB}`7e9q!(?W)4%25LzF;|RzWu*Al_ z1@erKgSz%#>uG<;D;D|lZl|6iDKLuL9*eGiUJG62yleJ;SmhVfpT_MT{2G<12*b&Z z?<&G*GwuQc#2;76GVsV|Hkp* z)#vYm`6C|+sv!R7JvjA&1YLHOdu|krc&!Yv<}xt8QG?J%0-odV_~cQy)6>B0WK&Hg z^rqYi!mzD{p1*~tJNEV1GDGOESTvO?1P`Y(u7O7J`-mU6^|1M1rYsRW{kk(kLg{)L zn;FFfySmf0&-PXDY`N^LXP;81l;F^b{-K?1F?94*SDrlHO;@FXu%{Ev3_@h37V1XY zlgcR0!%pAFT=0Yh1*=oBhgruzQD8@@^7iqx`+_V*3vGa8VeB^J#ilC!RCxYzpyNLX z$M>T+Q(lwT3Io z4cwqD%lb~vh(W#qP9Xc)iiC-zP5aBzgtxZ`)G)HC6+AfLG| zNLW;IEx;m9V?0Oo&vkzltkp~c0$Q8JVq-cL{w5*rr)GzEJ=WTPzQ#|TPyH+8I$jA| zcR%6lNT~fsiq9j0&VrF4_xr@Gc0UAC9oHTSsKFji1`YL?$Vuq+nNHC!I6o6+=D`oI zY%xFSIsr8`<&;4rFgOh5K4^(jvA8+F5xoR1KkGJs{I_F!$%pn^@_3$G3d+~yl0>N& zB5do|NDdgaRxFnWGBeIl9CL@TS(OoPIt5z z9TXJvvWJIC=r~F+TS|d{6qY}s!5WvmPb3iWKYrJPS+dzNh z=?^WvZq(cf-FxnR!b|vb>n#a9zbw$x#Iuh@9;;LpW(5G#C{Bn-KFmu z+_(p}!Am=hU|kfHy#^Y!NEnJA_yXF9ea^dQ`c$SMHasjA6538PEZpkk#{h? z(~Fp?E8|G9pM<(^490SBU!(X`)-)X7f777tdUfckDpvS!o7+$txJ&fcrn3_;xmCWM z6@G4T0=GG+8IaugmkY2ncfjmQ;q(Zj%Z-Gx6%Ob+yY#ACF{J5?L0&9MLiwyz1@A3n zf2~uFcp?xc@j-Iv2>}v_*Hq>dC4$idMS`n35JUuYY5t{)y@n{Mg6T?;L6u>K?^gk{ zi^8char-(i!|ek2KBf$WM?j~|{|b7G0PiWZiJ?tpr~P*b7)=dU;qrSKokcm)3{+U% z+G%+I=>I~$65>jdjut$m;&BLJLAV&V^SonvIij!;Iyd}At4n0HfX4`FQ`jp+c=x`@ zFSJx6pC}#Zksc0*&3}bz0kbtTt>uN;(FqO?cMnI5752M8S+-W@-+?#}^Ucf=&fav9 z6Q2)mAfK0w8?b%9(`$r)7nE0+030!+Z%@q_#Oam@t z-&?U(8Q#9=7=0SR>F3x4g?Nek`i|k(f!OJ`#6x8LYo}dobKEd>>u-riBZ3-8s&Os3 z3vdz;-+;XerH}*2r-tubAZ4#5RiYY9kr6`6ryr<(@~DKewGI$7ffVJ^P{F)LR5K6p z5i{4p!}LOm%E&WSe46Fy<8JHOKBxliq{tN>WBh57$wPAOhUjL4)+enpP+n4 zp)tT-K^Xd3f9su1!+yp?NzS_GN_cj4EPiW;fVs&Y{wu`wGu>GKzRTf`pt04ohn*fY zMnska0T?Ho0Op^$M!z7r4?3L(To{I^;T!12dUP+9_x2zfd<&ZQZ@5Kpd1#av(1y~7 zzEype{MQo|kg#%x%2%&L0Ae5~z9P2fja-6RIfR+sHIT0SAE-pzxZjJJeLP>gA%^a; z+0Z9&ckYl;>{&AkM>Sk$Q9J!L~L{f3pM52 z2A4gXwlkE{OVx(g_4X$dtZt<}IZh9jDvYWdziQDN1Oqn)H0*|4%wHjI=Z&Ue$#+F2 z24-OsJORp?;dIcqr$}jJ7kg{DlKo6Q1Zt#yhbrN=8?@>33lEIO30?M%pAG<`TpSfv|yuJ88%MIp?p* z!P9O)d0V(`3D2DR+@Z%}8EU+OUu2{oqJA8Pt|zgE`=TjfNHK8O8KgzGwzO0be$#3Fw{qy2tI>TLl7scoj}_!#cOar7vObTcNm$F)Zy& zJ#Xve*YDHYe2k$Vl^=@vg8r__a>avI{;Kh#~>b8p&8QfG?daD;{YYcLriwld+Eo`P zUvH9@`gd@M1ajcuUrF~M4H&sxm**iM0jwIlg=!B2$d>%^vP!r9{`R=(YSXyV@p(mL zJOLQF^S^tbFu9DZ6}c-7pXXRh8NvV@u<~0oB9y+O_pTsvE2~>}>$dJhBISQA>R;5K z9qKtxlTb}HK9={u+=`k0stE_6N^H18octEY#iVXs#Ewh%KBzy2rW)znaQGKDhi>`u z0-9(uEs8lkIZYH~eP|P9C)YsxOxO~&Q|b0a4Z3%$pF|HJ{DYKRKRjnT|0kZkdH5wF zin+}LqT0gGHRms?rty`7%n;x=-}jwjtP>*|OK)vgs3h{OncBX-qH9lKI>~2&hw)Fo zfuryOmi{*n*NV%GP_)X_aC;7CsPzX;TObM(Lli6Bh(Cx0$+F+Wag z4!KCp9!p`oxJ`h9`MuGw@s?Z5Lv0l>0qfRFoZW<8p(Bc7Dhh?tcDLT*B!m?_H(%EA}LEAB)a z$;X*7B0#BsKnNp1IXh6{m#LgQg*mP|imcD&uq}X?N2>vqPnEdj`+wt!_Y;Izkr>k6 zD&uoeMS-YGn!_cC%A2q43`dIM>Fxx;=r8AA{?1n3ZKZCxVK>5TzCFzPFun7{q1Pzi zT_p%}LGpr0XsV>P7yZIxz2BsSNkIi5(1cT2t>9k6w=k&HjScS=&9n@aYjV2~@<)*f z{6NP?Z4#1>K(Xr`F)@{AnA5LnLoG%CAaum^M+MFH!vfg70FEjVH=mt`*UyFfh4Rie zRA~X~(d20$m#6gmuKi`%nw=6VX97+?tP~0Mhd`J z>W;|m(R!d*09Bv0=y+m^;h1#R=yC-RUjBr+CUF*%@-8NnEZjf^)vx^%>HH0&B}9OS zn>hi)jc)ffB(i*YX*qL=0I`A%HJh;Rm+9`vvq=e#nD?nQ;WBe{(}p1^IkOArg%o~ zU;EnnHMOa(SG>-g_a_`I19$YSpjdnw`Jy)*pTVqES(&;awtHKe{{d?M6*TNZ&qD0xzvm4NEjnfWP?7-CyHR_ityWd)}L69F~68avV&}UF|r9c zScGnqUhF$3dkf7JZf!6PgcS=RNbtw1<<+vpuG8g7G!(?p6EsVmt-!M+L|<|4)b*H7 zfmQIkV)%--X{VuQcSif))Eb=ll90(BH~n>UsHl?`RZ_5%tlN(3C(A`jVH)=574OE+ zHkQz4zvTtQvjBR_jUdeS7kfNr#&fqC5=U|WuyM1(aD!J+APyyW!iB0PqX4Mcz@4bA6SDi%pv1mQEc2;Q0x~PvpleR7Meuuc zqW6#v=ZL0b1}Zf@cHSbPF5S^s)CN1KopF-^D`Hx>25jJ(Q93{+_QxB3Xe40=tYV}< zU7Vh7p9l9loxQy~WjvH!B)rPhBDm@;*>qMj>@_s$k%QzH=-;U3>S0xYquVdg2P1v2#-PLP;(cVT<$g= zxWKjLx;JAoyNLZnYw!ecldESux8L}-r;To|)}ulZn1;Ic zFT%rq(!zb5m=09HWn+~oArJxlacNOzE`PYepT$97Gy2EI9nDd9GGaS-8giZ{wc_C4 z^vYl1E)972CP-u<{c1%lRe0CaB-L8kU7zz56hUwW!?!Eg|9AlKTo|hcwgKE8V~b@S zU$Ld$49ekS710zJHQb-mIgL7aUbKqt2zUm;Og6*VaKd?! zQ?OP%a7m%UsUZUyn5wljsEos4F({{6S3$l2TrirSDksl`AWcVb`vch{y#KCFCi;+$ zeym~ZF~kMnxSS$zLEFcG5tpNdmRAYXK$-_dtSe*2gnO+`)%Hu>I5MMB++*`0jQa#@ zZonrw!c~YdTMC{clT8Ik>VKobV?|Yn+}c1#^A1$(e=z1S+5UsR1}!-)esA3i8z~*E z`C?HeBT349t-Xs6bAVt@?$?1tXe9vU;JM=hsTxVrBn3hVKtb`mBd+>t{+a*m&tJTp zNL%to`UxjOx3v>M;7=;ER_HDU%{jEaE^-Os$E!TCxW2Nt_uZCE(l?OiqHsqdTSgf6 zM-W~5m+BaR10t|LIfff0woGa>if73W@f+c}-MO}dN}5AB9o;FOt2PLCnkH{G`&URr z-=i&MY_aej^b7T?edsfiTjB=$2y zhE9wdOf-zVN_Q?$kvsorLbrhzbx<0hX6kPeegix;3yq6 z@?i75KA(^rqcqzer>Upcd577GT#_vSMMhlNoQi)D5y0-roejX{>B~{5YL*opr=<=_g2UTqia~{`=>|>^58118N}_*|JrjB};P1TP5S}|0~Z`ayQ4u zuZapPCZA`jz5KZDkuqRIc6o{}9JK`A3+>gD`|L*ij-JTd&x2%nGleZSp@JF{BjYiI ztUJBfC|RmF9;otzgy1I4WCMND3z~KFUgJ}#3MB=E7%h>{XX=GK3KoQ$hD3bpu*NC+ z@2oA-?+-K5y3|V8nHPqp;lS?wD`w7SMux^37`Tgv2+d<)7&}KakoDwHV*=aSyoxMSG7P2M}IpdH-gm0wGzLse;7%ig2=bSRBm%vRco@Z?!93 zsS_N0HGCti)AMR(ryYB-ns}KZ&4=L+m|E-=AiNW_=a)lW_g8jw6j}7cc7OqN_+-Dp z*xcto_E^YkamnX|meM=f8(*bys{;P(^L`-r4xi zkX&U*-aIq~xe?1tg3&-+{1u!tOq4cN~{kskqLvMB+#q-R41-+Y3~o>CIB& zg)nGwp}ETor=Ox;ITI~^dj5uu@MCAwfa_s0-e)W+9FdbrSg%C+mvpsswFUTN>jfnU zwU?~3UQ8MX&6(V)%6Ymt{_<(uI!@79-hs`8dlg<-_<0RNI3F$3qj`8d-s!{78FDta zXE$LmOTO@+ET^)wfj1C~+<6?m)3ZPF-5Gy_@|@OuU3QOdH&3TN_&HY*!yZXya&vdBUH%9Ik6BFK8%VO4is;y-_&4=dY?lt^w!-Udu=2+b<;s*3El&1*gX1gzsoYMTPi;2NfU z#Fh4mYIDAUx#r-nCX?L_-dB%+khbGp^(=SuBLBp&;Bhg{vFGl=N0@nOnMRS*a%E5H zsb2;J<6;z*XHwTJDMGP2^e4QrtuM#A`1Lnl5CpU0# zVjC7&oDQ48<++139HDg_8{ACm6l&Ix!0~mAW~{yK%lu(dxS6Bp%(>`|G zO_N`*L5B=#<$J&VA%ZWdsc+Xd*LUkA`)P+qHz7<=@8boyScl|xQN7M$Sq3pbB2Q& z59WBIr~TebK{Qz0^;?hy@1*UugGbu#?q+!s!v+Ck9Z7;H%l4T?SRD)%6%3W0{oBq- ztie8VUvkaViFXG%)lc7U912Z?Ew2#I*2){qLiy!NO23tBd0R&(r^^`ADXbV(>d9Lf z8<$T$e#mhNzn2%|`pUhI=;vw2!7EInIN*x%{?ek!c>gCeqyuI1j$ zhVsTT+t2rAP$zEFKTq^cH-fihUo3dLDDgT}1)sZDvbDAFF!@W2KuJe;<{@1I zl$ulQ3PK#_BKLvJ16#uZQD7qZQJ&n z|9$TB;jOw~rfN_#d#0v$_wHV6btB<4`A1fGj>{Mlb(Dd^ezdznr>>hpp6TrynQZbBl8ZCAz(?IQIl?aw(zdJ)yv~ zcaUvK-~phi;qsVi?ut9jYBxO<+cmz9 zsQOnG=<%rR7^s($G z9Oin20v8nq70=&U(C_^)VU>i(u)jd|g1ET1F{&{c*4Beeu8yOc>`z2F@cY&jL-*Gu z7oq8}4sm3N(mQTwO6p2&sCgRrhLy~`eUBuDT??kF*9zIN^i7}GAvSa3hHF`)o6w$967(U~doD}6ZLCr2bJ{LllpRfB?}w-c>%0m^A= zc$5q57zMvej~GS+wFGk~O7Qlgl-xm>Zy(c|Gmnn znCe4CbUC4;y2eI~x%+zUw{UgOx|U+~KPGJ$$TmifJYJCDH@+siAVekPujd3@$7^gj z%|FKomXMO4KEI@K?o)^%m!v&-U9w0lx1TEw5-nMeapmXKTu|mmq3(Z6<-ZR!)S>~_ zyp}wKr+5!-`#cf;)-PFz%=}QA6s!E}_xD7FkyD*jB2V2Ef599ORcIoR5gNh8=CklntMDI4YU#zeo%O6GTChl@W{;N6R{V20<6a$2Uy~<|VyD1md=3|N+BG+g6mtlkw+Vlm2 z9RDQ_DTaE@4JI{VLM{96%kFmMEbp0)He7%OM#&;a;2*cP_v8`SYI?sdc=-2;sbZs~ zB>7JsPOb_41woCn8vk^<{lxVu&8??uRZQUjYYIA2;q{%#*QWv^hlDz(W_VzyPQ zs(ugBrn)zZ}IBePIrXav7WRH?_7R@X)9USR)b_k)o7rWqK4|!_`*; z{@%?pEJA8-nvIM28u(m>C~G->?HqOoWUPZ zu2yYy3eq5*zTW*uxYlJ z60GX-4SgY;bWXiW1;YW6zFPW(X(TPF%F=?!pfVD=$zMmcWc4B|K9544et{3ZW>~<> zhOxj$_Vd9+na`$gd-mu+8cO&3vD^1bvsug6(6}+g=H@LYf@Y_4IQ7+dJQt1;n_0Sk ztKP4}JqZ#0$}$Nq$D#CBPtb6f&R`F8-?bTD7W@MB+?B7u_th4NSUvQ=XyRA^l5A3wFi!sxM^%i~hW#2bT^{2ddf}_f!IB!k0sm$vOR`5`nz%oO7F%zW zwy=mYKZ&KJo<6K@65lAQ8a*?qQVno zH^YXN=t@LWpn#3jSO!M^FH{@CS3~bIip%vR=0T@r-vj0-YLJj4nM|lgdF4nV%CZPv+_UHfULc^(Twr+0qe``(*#x;zvE<;%^7Ud#G< zIT)stEyC0nZ$ZSpvV#GlKf071Uu!JWNgxV;r2zdB8Qk3<>$u~blDd0^78Vw*}f`|gW)cKkH@K)MM_GoSC;i%_S+)$(kF}Dlb-Ji z(3yRNeoh8PhO!b_-7w9gA?_ZPp7|*Q)oj`Kavu3rZ)Q0uIrUO%dSQnubvq^<)Wbkw zr`Iv4FAW;MhPg6D6?9L)Ve%y)m6`t+WU@nv;atO6ceTGV!x~I1=d2_?_1!_^Kq)sv z)mnpI@$lj^3F?qX*Gi97D(NsG|dA(Juox* z*bQ>Xj9AXAlEsWswI-rYn^Mv+l4jSUu#@Na$WxB=_thx!g~Y_<3-P~LfDtrlFy3*_5(kQwKk&LI9nIG>@GASWGW%1F zK_q~ahS$Q!;S=rW!WZy5AURT|#$p9(>JzwhW5OXsRQ>(@W_wMr4rkN-6O9rVT==PCAm)1T~2mf{p31boNqhF z0%n&J!!EZ_b;I&em;U>MBl7$Zf@kN$#vl}%`~rv_1Ru|aILV}~I_*J$!>x|DvD?hI zC%O#VZ;$!v2#cO@U9Yq0!xQQP+~wAGb=Z?y^3%+dTOu|3TicK_9lMDus^asy?TjGt zfkr`uqLXb;THg=GUaoi(4LMX+&uM=jt4HQ$X1eJu5&KT?5VzdU=x7LV3^v`@!$$bH zfuyI57op+sMT4t8oNWuqi9I;5yMR**LY5=Q>EGi04f|&lOJ8NnD(B~WNEDe2#5Id1 zQHW)s_L4Ho(x#bvj%kwy$s(2al5M7G%)|%gp`^?9^BgbKuMP%L<-C9E9H+RrOjtob z6d2YGcIxq)7KqTJ05c~QXwHk=SKITkp8 z?qHw9#-(B;FtHrnI1R>>@|r*y-iV@;=O6{yhjbXTQK2?kwNBFwxP~OMWrTZBjGXlU zQ`AFt49N(G5-R7sJb^7E(taPPpFD<*X>9Q!YA6iYZj4+_M?Zq6?~y?&p)H^d%U-n022(!GrrkpMcr13C_-3H4P;?|z&%8*!%7^0*>c0JwtZKkC{$Z*`q-uD zf>#!^&Qzg1n^&xh6&4lIqHIhy{*|z}vdbcKvM2+y6=b2nIsGkGRvAJSJ1mcZvY%YV zK!tuBLVX-dhx82Nchl~Fwt1-kWx-HsP_GyrZv}Jig|3qIcF`+e&7Ad7buy|dw;f#T zWq19VZTD75VP(ZhDB+}seX~VeW*~hEW4CS{Q>^JGDwDa7^3d5P%2J4~&hHt-=Pj`? z_TCv+HRj6_0%XAE!Ubm;lKWA;vkee^_7W=uhyWzqjs6{WyPSquDMM?{KmYpjU7kR$ z6}bMh9~}u=-+{LhjM*7HljE!Y$=BX8d3VC$0!N4K;w!da{|x?NO&sg3|1b(Q7z?E} zSK)ED#8f?Y&|?$3P_(-~&jWl{yw5D`&5%h^^kFfzdfbb=Uu)|ui}h#R>YF!&r{=u- zBeimoD$-JLCHv2483Og(*+T*uc8z=Y`b@!V!*$0ehQM5N8-lOjYQ zdE5ojXe4aQLHv4J<17oq242=uq(@6ojbp34oi0bTqIeH{ov^^uuR!AbPhxRTY0fAhoc$Z9nO)2G0Bo)Yy!b?Jc6 znKHXx=M3w_^A_W3$6o?GhJ15u#cul+J2p+RKbS1BXYz4PZVG@1+b-2^4NbLL;Q1zPdyK)TYO|c5z@!k%*w8hJF2BMVNL@sj%j4>(BhoRHN7}Xf*AziH zbY^$?2OFhGO?PY`6)9y?th!vb411QB(4i{Wu%=S;>ued7LP;O#M4UtzRhp+~fS8!n z51Co>0wtt=TGQt9;nyx$?sy7d?iieiQJkwab{xQIHU6M<$R~u0IUXlawZJ*O;VmL^ zdNML7L7+lLLvgorz!;L>+>U0--?)w2KX*9s$2{?3V5KZKZ9MKFjXpYttRp97+{U&T zpQ)?2UNfelF9!s8mv<~%@P^ZB%vj8H0{jdQA_G(`%1o_)ZVAM{rXyrAzDxyJezhMB z7`kzbEH7~Nz63gpKKv^$rVGRN5WW`^9N7M`gqxM}>-@_O1`UAdJ@5&Tt{Y2lnL4GV$o?;P5Xe+2;3qzGSeWTjrJ5wbmI)Kug`` z@t!YFhKRCTcM!xCpt?!T9Og^^J>ZAP9dxvIpsu&Vc~M-uIqmZkro>?7k!xkC^W9u6 z3$zmeroX;$*8tzCmdvP59DE$!om-tj4XmevV9PKGT3Z>`=mqu9%l8>*Corgjj?}tl@ z(h+r~D%V&wzqh@iyl5)saPHHuxiLygEfs49kqk?7&=dh6do}AcGkoA;DyQv{PkqSP zGFGRKv6U)Yg!er;=)}|Fm;qaTHX{@~)0#C&Jb-)^sZa;w;2S`Sa#2W|NVnkIR7s=C z>Q}zfC8+;~u;tO5nrp^ztk3j;8Vs!!?y80!IPu%Og1#DqR4g?OaQO%6%L{o;#I+MVWB@$@JmUn^r4IYDLE5}@VaFqlYmmT3D}=# z$<80@1nE)PN+$^eaQIRq=I2*QT2{ZxpP6>k#AEaZ;(vbJ;nY2~>-qACkM!6pn(^s* zmWW~F3b;Ki;$WcODXnZaY$*pI$ z9Kn4sA6=h^>Sq5&5r2F8cChLjSIZ9D37(4e_+7al!)2;TADqo(t$3_%qeGX$Ht!H| zIM6~1fCT&A(wTERxyJx=);Aq@v@~cgQu$p0mvQetfffSRW6o4#acxN=CRn!ZIwzE= zlAvn-8iI-R{d1tQke(67{$uve+eq(fYEKyIPqUa=ZIm8wUUM zut4E8?0m@10}q0|q4T}a^gbAA{h)Bzvi%cv{f5U0U_z1lMMh4V=)c(+DHMrRogqxi zbAZ8bss?{y^a?(J_a{WC=ij>(y-E36rI;*gY_qlEp1jfC&dRWrj=|{!FcED4ly)^4lG7vD)znU;<6&;>%}QLArxRBBo4PkOdqZJbDY#lHNPgTejz+r2WAxx3 z(>mM{I@G`3ZvND?D$Om(^jjw(UWRNm!O;{gd~GSU7~XYS<#)h9jh*_tGr_ion}NluCI zspIV}H&T5LM1rSZ1p2L~bE^#uT}Kg<0n$0^FQ2lL-P*>Y}Gp$ z9JOx#pEo5nHIo5~f;UCaTYMz-=>(QW{9ZdulYpA7$A=64I+E(wm%;|TG{ug`W~4eK zphVOen%fk&*kM{bkO=yTwQEmMt+nNpP^Ftiwt4!tyA%q3sVB|1*KRwz@askx z28E_yTV3_L+toE`8{T$b4rHff9?RBueQcH7N@)+Jr4_7XaI+1X$ugn5)@e=+PpPG> zNudz^?V0H~%W5|7$&Ofk1rnbfZ0~J&CIZ{2~1B2fQX7=Q(23}h&s zWLdr23M3~j6|R4czPT~K2n-SHAi`4NFjre zzE4R#0zd$&8AQ+|f-28;% z_Z;QvPRl!K58}e-O~GcB+c}oH7&bNT>2G)R%a-&X=I*}Z_=KEyd*?Nvi0f}8MPcCfbJNNUa^OCP`(}+!`8JuPW(wAEooBejgELX{|V>4J!E6N?3 zCLKiQxO7z&0dMPD{+@UYKWxvJm7ButcYY>q(1U!d&595F(Q;+MGyHl-*^S>uz9+@` zN$svD%OVq6xMkXvCHUpLrrdpw8mn58W>ch-6;JU(tcc#va$7e|J^ctDeI^01kq#Ku zm%L4&Hx!SqXxvGm{?}VmCLqzjFya0QOtcBUh3Vg@lr7{_lg~$iBPU($L-$r_C{jfS zmDMaHT~Pb};ov{uZq0!r2@WoS-Dj|J$eC*MPiaawOJ1+e*o}6ZR=Qf!jA^Y;+KlUH zt~P6as=l<>tJT`Nug+?Yg85lJ^t89dj)9Z|A44V3yFMVeWh%%Pz5{Mx$w+jY3Mk@j z=b@;RFaB61E#Mg+G6RP;WkrM+zCZ4T?^1qgOj$hk5my+Q(OgipDbLC_sHLA>!YthE z!QJJk-sLPxwQ=CN=PrNY>@`f$f{XWP+KBaXeH=xs4x6W09k zHd!KIa>es>0TUPZ`4)P4i7-cedl#w{b7YgzuA}DK8Dq3IK`mzw*2|>kES&aK%V@#7 z`X;`=Xnhb}?L4qtGBJ#YD})rBKWhvnN+7{q+&vg2;a`=nsp0I#sp zkpCqc7gD!?4jwyA`Nu%#aOaM5(M7e~s&3bl*L|5j1mAsbqKJ8VfVlHy}=m<0O1`&AV5tG1|Y0*Q}GSY;+x@ms@%keOOpfV z{;|%_(TUz***{u^?t+&P-nN+{zOMKBZ@9AzAv_KVz~m02m-ySw@}`2j8hjGFp4mg- z#$}twb7i?Rt?Ma#{Lnh|kJQTy_+$NH>zTT+*B{Sgpb#1P~F z{){j@L5RV7+o?P5uq=e4XVfNNEzFm1pfdL-M6js|LzZQj$FP1)aq%8L?C&*G7Q7|| zlv=2LwOJWJc>BmN8_-!zb7LW5o9%7|0RCl(!#ZhwCHI{6myGFxS?|lm`X4o&U^$J0 zN@Z8JqW(08We}KpC!4D^C~9AQZm7&Su1dIHPIy@_6oMJzAJHjmJ7)D`tC{3(GqaG# z^YT^md9*$GXH4cnxjGUlRb&Y>)XkS2#>_2>22}`x9C2vgmLbFnOpfR;fe2^M1q2d} ztW%_;W*QMXn`W`%Ysz+3r+nn%s*r2Of+Uq+gDy@Ue^;aX?#7l*?`Dt1_!^YjQ23tp z=q!a=AL2xp(FV^{|2w6-&9FqtgDU-%F$U+CBfC?nbODGwSFg3k8O&5P;vNp52;3wq z&7Eo@1dd7MkZRFTT~?OKc63EQ9Ev0?6uQF^4RCYCRz~r1d%%0CHKpoX4?J88NdOH4 z0uVQpVnW>DQvf|XRLy)iNUX>|&+L(UtovQk?eHm1muofQ_c`zL*tV|PX*kr-as6u2 z|G>E)D&kU}Hf^-rI`E&FCz~hXap{Bxm)bJ~X1IpN#dfaaU$)3&yR2#_jyV{AYY+fi zdzB$~%nJgsQRR4Vlcw6^?BKvdGg7Dn3jF4wA(#Y_^E9|14315QATLEgWGeHsyM%3z z>3b0ai$Tgr_YT3-qUJ|pIhm6O7^uZRB@amdZhIRF`_;cwwM{JL>o0jeiXoWcwI!uq zn>B(A8KTv&lNDm9$NkAfK{_=1E9?weCi*SS7$`3uBkq=>QD^T6-{W zSg~ZLwN%E{9}J&JnOE8>R@E!@VJ#AWGk$V zW8`Inuw6}$nF+zpQ<5cQ^bFJ}KY&y2HAlr4Lzwft-ABq?=%!K+(MGTU(h5!s8*m>yXDR*0ia<@YxYUx4E(#mgOq-5+%BB1 z1o3qX3zIOlmN(kEl>npPwO+n1(G}X*bX-bdYrnOjyu)A-Lp~qRBOAp7kvgyJi9mLE zNKLZ(UT2@1-&AN*ReNr=F5xP?DF$b%sAQ66fev;bN$_piH*~GT7&Lvn$CTAv zd>2PlOEh)WhV}Yj_7mEO&-+@=Onwpo%TF3h;;F3ILlvlnnlONEraJXU(N%LaOV4L7 zcghO*>JKD>TrYvsCeDesx&BIodF`x*JdvIpLtBdfQW5wFs?W@y3T^UStrA&$iMctw z>yrS)JrGvkY|__0n9{1nFYFC1Fn>E=d9H=}RrNpL+k&drrTeBukf$Ob(Utj+uzeEw z@4(<0H{MZvmx?};YNP2YPCV0JauI$ykmq6&^w;rnT1ylef!o#%9P0lXAF=Ym@T(VCw;vVSlt5aFPVle0;NMyr9|^x@$>5h z9zmwL3!6xaeaV&PIZ=opJ5_M`a#TJK);ikAe0gfvNjOt5DQmuIr6CKp1TU!=@mMS5 zZ7S6idWuolHP~uEV1)<)szW;K0VO46j;c51(7mFea^$6EuV8c)$ZqnFE2<4PvQG_ntULTg8LWwmLC@$bJ)d;W`VSi znW<*q*o5%(EvrG0(jfp)gT$&Ly2$T4JSAne8`3kHyf}AZZRw0UmNDs@tOXbx{|VpD zChs=vwD540rlcx@Uwj82K`PWPkIH8*zNjFM0S?>#hG-i)VJCG9<^}n4`!%VdEd&ac z`Y`Q`(p@1xdat*I_0J?`ixi@2Q&N+{AEHDi#-A-q_;iv@lL@c#YMGP|88j>619jS5 zUFg;N;@A%@RYK2m3X6H6x~jGHRz$YAkKelQwKjm_zMru^8q$M&0H-=pxY^&es9^A6 zGIgikeQ0G?sq&6(>nXeXt?UhLUGthZwZ*A*a+aNyG{_duaxw{tis|OS0dToeOr#IQ zqCN(eRFWtGXrXdULe+d)6C2$;R6@b4iWbX#*XYu>AiL^%=g(UO=uR{y+{24>F)6r~ z3ldn#ermxCUF4)+nfb637Pa4O4Pv*+6(xJ@WV)&I5OEGKZpSs#;`fHt5`&k+7^HTU4nyPZt6 z%@Tqi0y{jry{9gLo15QjSSHIhuj5gp_&db)L3} zI3)cTW}iHMyJsIBUV+&g91Tk#z9gn<53@px@F8kP;Bp`SYx`Gn$6!WJ`tv&s9`mv+ zofL^h8iSSZpSWW7ltJ*dZ&xA<)&y63bDuq1ey1B^;9l;17;rCsaeZX$=&}5%iKz^> zpt08M#Ewnnq8jYmyqw=~{GSm(l(P;NncS*BeU{J3iYz16QsKK)5#2^1ns2nQe*QtI zU<6dpXR|Uan??jk;4lRohum7TGzt{1^NRV^(;$_DL^3VH-$?>hP3?=H+K**VPdszAot3$8 z)6*&tw75>6=VH*czm9LK3w#d0$%wC{9mes_jI=#2vAxmbD->v+pRN|Inm_m;$ef=B z<@j~%oldK_$??4aw-xyNTB3KfVuiUq3E#$-yuu~q)%yWT5dzs-pXxpgDF2bcpjj_^`1 z)J7Y_Ma*Hfk#sfyE~n0`rd>CjBO;Ye&sZ9trJqAq%slG?8q zoI$pdxx96*mbHoU;6m0O^o|5~$GYbqvTaF4?fbXP=GVAEpRanu zNA}e3l^Y22*$o8f?@CYrGHUDHC*$(h8j2Y7M>NS%i0)hzfp7Ioylm!nyRn%3C-TRW zs*hh3^{16~_To4z4?3q0LE2fXmaeGkPhG^YsryT5G9PWT`%6KqOXReH@lMI_c!Y); z#uQMCPy6|o^`xBRt-G<@d7KKLnfD-8Tc)}&%jGqLeX>G+ft>A~tFi-_@L6GU_bskp z<#iELJ=qNIdWyYfJgWWhYm25RaS{vqK9PKg&8spteP$&h0OdU$JO)9Bb$xwW?|rR_ zqKD?>WkX$B?F@UN7Vg?q-Rp1uS!>)jSqVdM;-RrNt>#kfi8|i@R!J6b>YoJr)KYk~ z!})BuxGp+A`(+ynAwH82bb?|3MtmyNs`2OTHxsGjzguy1*>pcVcmKAd=HM}e_V)wR zZipHGMpJ15UWey$DP%n{vh=h|af{{=dwDMDrZ~v%vH$5J0mEy9KqIg@MM%pb4tXm1 z8B@6-*}hHoWZ3y5wxUTCGJC~R*4Gz{cZ}!On4vqtt$X4ZNBKEgMgIbSU3gKc%2FD$ zj@7o#r}|coB!4Cu`0u=kGei}=AI$x^knb0Yuhb~-;;dHEy8B^-+5U?YhCj2OMIPOKV4fnGBuTOUr2rIMcKs)Y}l>bQ|S29ymt*n%vIR&JG={+M=Vut>$hIi z$lKbiJsoG?EVSs$*nC*NmdmFG+36v&?jKtLVi!`6WYzV2UIQq$b#47$Ul`=|z1JIf z9(_nF*y(G&?yn-E#`Juimdv+$Qh?b?Tr0cJ>%+Fjms+N^lg(mE%u~(Q6A%QXJ05Cw zdDLqrT~3E!�LU{d+LRSTi)lW;d1@7_kfZY9S%TGp@h<&Xat3(#NrSl!IJR;Vfa zem0jE$XCH=@ERk(iAW3l)E|VS-~BXNA|O560N?9w!;cVy5~>5k>EY7u_yR)qe2^Z% zVo=Rl{yNQdH;lG1)z$>Md)ptb4dlOgGqyQ4o0~j0j1pkQ(eBFRutRG&Bj{hiuDbeN zq-152(VF8uV46$ukWC7U2@7xuq5Y1p-fucfJhZ82H2FK@#|N!axol44$F^Y4dMHBcZJGx#u+U^_c(mX+;%@1hHw zMx7@Z>^HvDxQdR_Ykx^%lF9JYmQz?4*h)2hu6Fj~r7gCH{FBvF0Cnb5Z5Mi^zC=ZW zmx`v>$781m9rU7HxoiqLJpxINt9`c5x)=}3;=*FRnu;w>E*>HQq&(g2TrmRpUekM} z;`BjW1?=mQLfNeK%e5yh`T{=SrvH>HgnO-RakIMX!1Fpg(shyEy=Q+E<*71GZ|;2IT!|5MKNOS!hMte)`FuyJ5-Vudu$G^))fE}0YlCRo7nk8)RL^}8i@VoOup6gLu zZ4*KYr zQ>u#CR6WcesbDQy7m1*HcfE36f z&;7PzESXzWNCZG)bYPN%zz!kAkKXn4t)4Q@(ZWZ@|Iz>=fQlH2wory zVL12s@j-4Ou9ZTy;?ckIlC9gdudu{xSF{R;tK(jB#d5lOIwqJ<7PGI0aQzKG9-VWOF zS0lZL^UoNCFg~7viDs9jgXHZB5)I&DFbo0!q=l^@V?@PU_%|Aa)E!ZMi;#8Mi zM@yv>|L48`@76_EU3dBae9ZszHU59+{^yAOm0+-7N8!&7`zk+Tr;ubSf*#|}epmyA zF>$|h0KvS3C@3bqJGzW+n5muuAtqstpI31VfBaX4o8z~eS0oW2=#40krpQ$&6~T41 zO?pcACKo8exkEUsZtlNg4_QUwAWyJDt{i@!b-uIYa{4ibj>KIFi5e!ohZgKT7tBAA zkg{v+BR!GeZ$wp$VA;@Z!?d1qCtFl2h#qq8r|r!t5MZ=yNkYY+e^mYRNbJEIZt zmwVaDZU~a;Ki0L7;QEn|cDsA=uAQPFH!spJ_gv9S^~X1(FZU!H8=rx(RG2<^?0jK^ zJ21&Xu@;8^Tvl#@%pkI?Gqj`6Lol{SV&kyCbn!f@!!3?mFN-vX;*hU;V>y`moqbO& zF8Ee%?YdO5bMxpcmU8k1mUIyrZ|zoYmtL#;-|b~jd>#93Fie!F~m{g5id zzG_Ba{eVOnkX*Bmf1|wo=f9|fLj%{{S6vTJrYFuE@67`+B>!L_p37I$^*9z^Z`Q8E z?u)aI^vCgDaI3CzZbzVi>x*>^A!&_Bw@`-pnVHYvHcH^K{aLtBj5W1?LH?GYbc9J1 zMwdc4B05xD9=$_n@QgzQXFw#*7edwZ3CH`^qGD_5>zdtcXASw5_Z9V$4Y_Y%c+=W2kXEt=!Fc1y%Z9VsacjjLPOw*?aLmo%iQ+>)j1#5U z3gtC|xg+eF;v(sYUuW_@J5gcvMk&?K=InJkbe zVe7|#N2n;LCEV;i^r&;a$EKF$&1X82Y~163P4bXVB6s)a`y-7@yW1L@Dpp%if@d48 z6TrgL9LjY@@c~_qJsq3>mvxtjXkkjO+OXPU&Agg(^u$W2L9{)F{GaNR_errnVergL zQNO=o;Uk_;DAC@*I;B#QF%Bop-`iBk{dVJX7O5uXT8#C`nI>k6CCkJ zCrS|1bYe3$ThEPTxcOwZk3Paxo`_!E61mXdd+fFM#@^GdQEf&#f+c?C1s+~=H4uOjo)9ys z?2Y6mvqb*jh+smQ%x7dGMnMlr-I~h_yN1J+ZS#LYFvy9Re6wIPlKzHg%SgVoqfvzi zR5og5xUj`s7UNDX%?A98thd zkbo&GET&>;fEC;SF+savUO4l z^VM?k`tj7%U^9KJ6?EJZ_Vv7S|E)**yJC%Y`v;JRP$Rh z?{EdR_QhTTI3x%}?^~v~drOCHi0IoE-P4tbC^X{&u5zqj87fCp*4k0azz$n5c%b=b z`NGT0E{H`u22C9sU(=ORB}#Ari^OOgUGwe?B{m-H`0~Wu3T7jL$&j?=g9$O&eZxyg zm=|3Bp3U(T2;BEGJ~^x&rKmHA!(AjPnJF9#hvR{OI4*noOBf2!4!d4XVd2L=+DC%e z^ph3!!SK+yj+a{sU92kh&PK~iZlPT@m|({(tr#1lrTj6!W=<2_+c@Gxz`u>oT)cOw zvc|J|NkAxnf8Mx-GXt^x0=}|RkA;~Fa73DVlFDyeRu~A^52HHKi+#imiSK+?nYYT`TNf=aBA(`(rcIj zuXa`^D!Gq=WVJkLFT7;c^)>a=MApkv+>mVntCz=P`Ms)*HwPv|_#?Q1yer5@Ah0yJ z+#Y|oAG)CF0^~*)CQ3&Tw}S=7)(7~A4S+CYf0u``3IbLRcQdL}V&W_&Lx%So-1Y1o ze{2CI)oo;Vk<-Fj)k!C~hm4pXnA~5p&JSZtCuri0fvM|e?;Lac1?rFoVSj#)g|1|o zbfX1vK%}=yqU!hT=-s2xc@0AwY!n`@eG+k#W)E!#E@VhTWBhS{CKD$Chi}lgwBS5< z7|?teaLJ9AgUAwRE!LtjQw4)BH3Uex-0rd6?Xg*FamVRwsQ@1w)_I*FxS4*WTWmqT zGBH&Q%(ffmQYkavcA*HE> zs~X;7Tp1g7gy0^-ppWta8r1&``Qw9sYZ3rKxZ4hKliGdr*FW&0!2P*_oXd-zCZ@6K zP#o~n%uwWH9)>*j55(<~`HF6twT{cTj~Q8U@@`~_LMZ@nJ20k-=?w~_FJ^P^&el$) zhvy06c4eJ^7J&wd0BlltIZ%t%=Y~&MLIWDg;z|x(t3XF zQ9{XdsU%+r552v$(xqnaVtPuggxBr2>x{;#yCY3CfL)*B%dQ(`wY3BFQA%L@&EQsNS(S z9Kx2)Ssghe$?!UloQ`O8cxC1lu`5pN*bBz$h9dSO#4@pAKTqLIk3hg~%0uG#$tZZ? zQY^CQl7!|Ny6@A2n(bUA|8vaI?CPjt*Jf+B%Jx6-b*)K>i8$&r~|mOO9)tE z!d*-IAB|I{jpZ#dP=G!Dp&X$*F56*UKC>B!O`hie1N@8=M`u7rYr_X<<)TQsjV!>$ zfC@rc-ICQwk%;q1N}+CxnHRXG+DkG`I;=&}=IobP0G;C<43;TEb{|8pMpS5=O98$Y zN=cc>mJ9#;lcP7B=vujR-p7!n{y2km04K(NJr$#PZ7fqnW{=I2qWX6nn~l3XW6MM- zX~)h}U)vbtJVr}o^hal5B6JY)3@qu8Ju{ZuT@$KZ43PlIEQ`&)La29krID5F$B7mj z@sEGxG$m_>`C1g6o zGNd$)QX?m$hINUfJ;EAv107xV z^9>Sif;S^+_!8e}0&Diy$PbbuV{uG=Sol4_QQIN0$f&pbh=n_gBDN`^#NfsTpzI&v z#%z!=BZ-fogf+;BWMvvAnnxZ$5ysWEmYbRyy*IJaTFYYLap@XAz@4UO1rSS>@lRU-+T%*)6qZ{Ypjn2WvfP>_>D%}S_1v$yfCeL$^| zw_h~?#CNvX!W#1$3ERJ%{4fMNoFRYy$VvnVEZ5t z2rDsjeL{^GXtn&&;`XLS?Ek?75=0nz!Tmm>jnP4$deQ#eS^=uPv5F!?(I0}Sz$1Vp zUg47~oj8?Ro?@db_NV}ZjJ)@#erGY$6tzj^#KMjj$un`$42CJiB`!SsS3sV5BR_B#F_0N{!`b&-$Lo$&~lY?=h9I2S*iouG$|UZRvl-j$75$6)?VgB?Ey8_@%UvC& z&J)~Yw!CFo{pV1qv4bN_QMz=dYizJHgg<{AW+()ZKTYoc zN7Xw9Nzz4I!_~HJ+wN)Gwr$(iv~3&Hwr$()p7ylux1W3CMtt9|%F4)!O6;BI>~+>& z%hZR=Jcm;k>~3LN%97VQ8!fA3eRXaOaagIUzg8~1OSj|n0bzLJxGY7hT@Qwo96r{z zY6L8Gh8A%EIXr&HC{1A+y~Lzt%^sL)eZwdPd=K9H9tzrBBao6fclqLIiH$O0N4mT~ zX#F-wNfQF#>ACRmLl{f2dA=GkOUgOY8{N@%v%*}!G zcQWEV?hd6?|$;8)}WW_S=O57&|PRerdHOFj_Mp2p{1sTu9(GC*0$8!(@ASe4ky z11;TqK>~zBgKMb=is?9G!IHHZg2{0L8Q22#=eprXPnc z>$A(i11b1#`GP^O9{e`SKWc_w0=4hcf)lpqafbnhc3g&DVjOyNzZ>6e7vIYaMB2|; z?2ouYAi4RXSexsJquu{wjiea!o-5dZ2531xsC;+WR@mw-t3NXvmG9j*z!Pv#8JIj| zqzCY;TUiM5KV$|N#JVeO)PHKnkhof3kG1{D9e^Zf7&Ia9dFRpNzk)9d;>D?l008Z; z8IB;V{t#D~qh?C0mOjfBd*!>ML4NKNJBpegv~f$5v%s!%iTM8rS$!PG!*91Z@6VGErnJMy4_VMB&hWB&|NL!ASNZvc zgv{oPs1K{TRPbdwwZ)AK=eCgc)%{yXjdyZAIlGo_eXz!m%2c8@Xm`Fv#vc*6`uw;Z zj}i*zw$+4G-Ls*stF{6V!K~Lb7YS@^T+b>JU#8LS-+zp+BuJOis<+JHRAbxFE*6*! z3B}&nB$(Y`3j-0J21-~4TXL#-TFvpW#HVXgN!EfOzG8Ws0f;pQXN10vhXcutCyq+I zi)=z0UZkUhj4)YRR~@Pq?DF<_9UoL%+js!#=e^L&_|1$TC~l9{8X@rtq>m*z+Qog5 zF*Z^DNJhW@xmjtd8Lk(|pwi~$u+LMdIO_5(xN)Tw#iAPqg8wf10#ZPRlv5D1AsW72 zy~c*IMn@VNQ<9idC?1dC zG~2wV+?5!eQW9Ue-P@{P;+ObDU?vKLbn6%|{IR?INwc9TL&<$i5Pkl9sX=l}${6TC z+ioQ~VOC(k55ZsV^F5ecIBpS<L66?q+jMcJA@g&KFErLi16E_!DN(a}TiNdy zZ3S-8J^`c#tHtp--x^crd4AOluS5YRM#x8A{K_ee=s0H`E(ywBccW;F0}~`M)9w9L|0h8i@w9kO*sU{?T8_X~({?V@T@N?*t#Ad-ZW()hu_!`5yCsK$AV z<)f7gO%gsj3-;S2ohl3gD6+0~0_C7VDS#;)0rXFEQ@&MwW)FfdlkJF1JhV?K0_-3TWOaHU37X)_4p9>cS-4@cD(1|Qe_-R#B)ZK#nCW2B#G zBsV+TVG6xwa{-BK=bZ-3Wurz)@f`KHo5Fu-^rz^_>ly2pD27>Fp*w|1CA-E?`Q;ky zI1jLq0Y+hR=869uGkiH)m;36cy)c*NxV5nyVu1P?1oBqZ=(**fm+oYT;Sqmk#p1a8 z-7Z+7Ys>s`OMgc)Z%&yiJMa#^^+678y8SwK2p=m#q|FHt4kQLuqeS1D!0rJj?c1M$ zB;*5(ZGXRio0dX-9A(@W#|wA355&x=wbAqWECO0YC=9XR`Uu8RNNXsGZ=0+P?sXkEXF@=#$roUi&2o-4VGV4gvG7 zY>e&&Yv92IG9rgonEfz20*naU;mW4?!`(+(r8>mRg= zF9)*p_w!36-vu!aSEIDM4qzR23!GPg0!UBig7_Fm2$>Ko_?zqrLZHRX?R8w`>#>kR zYE;qxj|)IrP>R~Z-n8P5q}L8w=C+^FO{qTTONyWAMsKpr7q4jc`!5KIrCWDqkK^o( zzTL3bATu+Z}Jqa4^EV=WG}zgUZOZ7kO* z6<(8D9?{GB%pzS!X=4d1Te(ra%H+nyQT7~4-M}#qOJaoqy3Za&GHPd9idokJ3C%Ot zLIX`)^PFCkt^L)L2KKo;?~0;#fvDX4@Aah9ZLIY_balmst#}cOh+PoPyo?O6;`}z8 zXeHfPDa7`CQuY>y z+t~4GbKM=f0ymTm0Pg){!q8;IirlHgs{NJJ02C~)jQq?a3_t*)aIomyWU*%& z+xucvaXU(~Z5)+Z+D4}xu_%yx5auUjpW(@}8R#f7$kC&CAf({~>oguP%GnOa)AW?i zF1@P98CQu_80J>q9O}&F*799)UMh;UU31VIn=ADHQE<~FTA4sarS3+he(ZObiSf8f zrpFaw{J4R|khd`y=R!{4rv34j(@avDELv`)+ztqlj}@$o7RSh@;E5PhEroRYBan**iRs$A;h`zsrYd7L(L7zy;) zuZS7mWZgS)K)n8@C1fS@%>Z=O!)UR{RvTh1LZ5FO$vX6Km{wm8((JXpq7xPRWzNXC z)*Mv#2np!Ro`p0lE<6@vpUCF4;_s1VLswf*Xy*aqB7e*-Nth_BB}%hs23G0*QT;5(3uXzX5VTTjiGofDd zHjeG&v#Zb;;Si8Mfy4EkooUIUvWiQw{Fx#qvOKdI3DwPK(0Q&W$ocB1tOoq>2_9$ zS@{z(Ea{UK6Fs6S!RqcH9GUd`W8wm9_j8G`J=&1&T_9bT+KR{Ow3Z0J5y0xa_SxZf z!1(l+WuA(X8I2I60JBTP2GLk{@ND>ydO=)mcwM084k1E8zl9%i#Mi7-| zTKN1k8|}NB1`s|H_6V5Ld))hl^rWu^?ZN|M@(p`_L@@X=Hz;vIF8wAU-Bw!yB_d6nzD} znov|*vRqXKBf>@q*@Y#j)-gBEIrG4+%^A??kyw<6|M9)zPh^HGEg=4b(CYDL37Bxl zPvAbd;z8TnTadu)S)~F-a+zH77fm(t!8_6_Le<880`gRs!o{)2|DRXhz(p$Qn%Ulg z0Q}?=+e6BVnPN+zsK{W>uq&6k+zz4s>nXF|gZQmRSPBmy5<4J)L^WOq6lRw3waUO_ zS&%CMLO&1>FtNnL#^6FF5i!BFN^rbn+Y@LaP*@5&E~cikanb>d=7~GJqE5xyw8{GXbpZw7EOL7y$Zau7 z(Bslx+URQEoG@@&jl^*nX`v>v>{E|kN$D^F*mn0rinddW3c+*@`u@%LN&d5J#_C}u zIE_Oh;#6AWi{;|96RxTT3y|3THoxW?EGqA~`H^vv_PP#5_9ASWE0GcxLJ<`h4vK_B zeH;waP*k)e4hoM{LLL55H{X%AAR;A-kCzqln!O@l&uHI`BP9(qF=NjX==03`kR*4$ zIy;%l;eOUaqB`Qneu5ufVt+Xh^Up^V4M_0Jt#H-R_DGE_!a-WY+5VMX*#lFw)rNel zof-rOmClWw)L1ay*L$|=F7;Y6*EyIFC~1@KL&ndc<^E4U*x1_V22_XPaG6cws5TE^ zwTMR9Uk^)TQ_QGjVyjZtDs7cGJD6J(>7qRtw17`@dAUj|H5>#smh~I!-oO*%qDJMR z9yytAJ)Fr{f&kQ3Jg6kK68c^>r{mA281$zU@yQZ}J!Nixd;J%C0Q-4|Rd7tz^e;7K zwd@4!HZ4gh)OUtNh{HgpvGa&6VNOMp=9sGLZRQOHHOKS2$|Nw%U4-fuqSB>~E~2d& zuhfZ;7L^Vz%1M_j4#Ip8odA;d<=n91iv&6r&SPF5E4yLkhh=WZ6L7L!tm;l;L?~;Qts|atP zw%8Lcj|E&1TGS^ZnMS)1a%d=Z>N584<3?}C%M+)L?c`@;tw#a0*#?7%pJg@k0t9ft z>Wqv4lGMRaHgLX*43F67o3pPOWW-6usR*!mJvMDN;-1baORa&eHP& zuCpZ?&*C2w)4LWY(tP}oSCc`u>w)WN0W{TtDaa;Mo0Z)8g1ph3vdu2WSV>Xg>2MS7 z3?uly4{6EI|4-0w7$I+-VCis)iei+mQ-;wYAI@^jVdLhSk>`tY(EDV@XF|^eTNu5< zDCV28`ZZ!QCSATmxBFyA;ujAShDjn7Rx;$&8F%oGmgwZ8Q!ISgg-xRv%P;Lk$|FVf zf%A@~;N~yI)M31&-{(8+*iG6bBTqCtlPTkp`Ys3^azuW~H-hYvhZ#395{Mvpn`gd6 zY1mwFusdW`LEV>}%@@`~4n@4ck?aj`;nMuAmL{gkJu<30t+T)#Xvvzkd{QU^i`BK# ztWvHy*|XFcH-uIn(AoQWxH!t{F`>*py-0&*hU?aAE}*8l=`I{?CbazXjE%^Wqw!4G z&rgH;{!34(eem|v{WBJ*+=g2Yr_S|n8ZqjW-9&tgo}v`t7Ub$2{H*VJJQz&y`J(T ztwYGq1i$k$z8YIGnEV^yU;P6fMZdi_KG28-Xu60j2px+Z&hP@C8k~>p$JUj;7;ZcKX{NlS^U< zMm7g#kWXXeSIenY$b}3~Xi8}|6S~+d(M(w0X%u0D(~3J{q60L-6Fn6iK;ts6fd{6@ zr6lt26~AbABw2m_h!dVHKYd^w#0GUmMDTAt12RJF&^ycRq$Xi(kou2b2T)STY~^szkL2Vn&0Zdz#|N2=^Js~zM9)kSEvFY$(!lN2;93eS zw>+nAAI8bwg;?8HDdMNvk{7ohcpBzrAb!`5b4@$A21o!Lwq)fcr9q-%QtWO=k6%_w zTVcbvq`TWoI6ba}K!}5YMPKaA=igXDk4; zMs4!n?6<#gn^vuK8}S`hQ87Q?x$?G;s3y$@#Qvx)AZsq%2;=JE1qm1|$6DDJ*i_WD zyfijMDyP+~*!P!<*L|>kc)$jE+m^|!q;S~D^G=%kX;c5G`I&ae26dVlbp@2GU;^!? zVy>bX5wB2CR*SPw@8NF9)Lv2$m%@ne7SfUYxJ%k>Iso>ZEv$@&!zePFgom~iC0z{w4f4p}#{RlTp5CG})5sEag#S;~=i4uOh%q@eQ zrR$twKnhY(M#Di9>rzazoNrWIvSu4RV@v|`n)_>Sk@ty!zuyyid+RN}>vi}5!c+xg zVmemp#MWHGp=ubxgmm3*HN{N#RN|1AH{y3LaSV%dx%$CFRr@|^`FjYp`kaya%QB|A zxh@i+zw#-gc_72z8t_p%zYT3HAOZ?sf)Lvk$5`3pagT zq~&hjJzEw%TBb?+gq#teMmf}R$^0(O8u2WDWKDlKbJD@7mMA2AEyqlb1`CX67bb4!iBgrs=+)&1%!NNHLTH{6O@G@4EF~I{M zjep9LnD|8d&@NZOOmj<5uwVxhw2rOGFd1NgmeS!l+m4()x$Y>l?c}3RiO_6JS>=fE z6BEyj@eC=C3%>+bA-us3`D?IT3OfkZ%NMa8b`;X@y5<)1e=^=%98kFNF6j3w`zEry z+RQgI2p-nNW-@^a;g?Wox)L)`Ja1UJ83FYu1z;9bHA94Qnmae}-~ocq#tO~+dh5|x z{^Qs0gXa5>q3QR8wpo8c9hKqB*9x=oe zkPZL9(Em_!3=1D{EIDlD1mC6DUl*|9@)CAX#QnzKJ#*L<2mwz0psvAY(5H&{oP>y9 z0<*j2^f`Rzfz#?JWJL-sboDDCRXT4-=xSY6Pg+1ve$ueC7oV2Qo<%R3?)3Py9HI>g zOe^s5q{&6O`H9FJb3yUJQfSks2egMFE`ARJBQZ4|hhLhW`c50( zBW>QRZH0?d@`cBPm?b&x+ur<_LEwT<_F61k@9NgISOb2A!Yx7Qb<{?9Np_jsatJh9 zix5t@eNX>a>ztha;q|9pe}Bs9A9Xeru(a5<54sjUAa$qFPB1fuk$$in<8Pe99mftO z$}jTK?|BPohXIDX{QzBX!)SCEzviC_9O9QK~?D zX_uJe?ssm$*|S4$b}^s%OK4_Ll*@=h zgjdLDKLDL~S06Xsc%OePO9jo{A20);VgU+?Q6#;_NBWe7?u9XSi~qJg*b|F%%E0V5 zxyp~TH@!;_;j3Ll*s7t?e$J!~JT-VM_)lbcr;QaPZJ}%HOKXTf-i0>|Y&e6`@*km4 zXf#=-{mOOi<>LQ2UZLQ>UH@|l|DTOWZV+6JV|KGVM{>LBh*U+m^`ZQF^CkIm)pCU* znNaBFBt>p4v{qA<3Wb7$mVn%|N30WNqy|>A9x}d9Im9SV7G1p~VA_NFxEIDCaO#eQn&SevY%i)`+Bt#kfL z^4*nT=*5|F-`O7v2Zddm2UQYk-rpl~Q6CGxcXP0{OG}rzAzCLWaO`#1Ccd%^Bqll7 z@Ia{jWl`L^NiefM$ce2eirkp3W@`^h%_iL7ms+@dzj{54i*p1W_CcgvDsCS)jb?JHb@B)oKdC!wyt9r?ublT%HzcRr5C z05K^_Qg^hzkLWkFvg?*F=p@yF%8fy2_)2AaJ6=}88*dX$g1DW%o{43wSJO{gWP_HH zr| zJv;+$$*yMph_HbOK^&K!MUnuaw9%7DpzwUM`11OnqD&TurnVK64sPom&r%nVO=+BYdgj^v=(IJ& zK<=>h6;T%qFyxkn|2V&%xeap&E_h$}*Pg|xSX^1G4)11ZXHsA0tD!}eP$^@_Y`Gt# z`-=W&#~7l&wtKvcw1JI$=0c)x`FP##5KP3gukxG0yz@m%KQTP`^IvZV4lJH|E3wp3 z3ch-#dDcQ0s_B6N$5~Fx?}Guju$y~ttzLGmvc?4p+j-R-^knQbkLTN03+2&9sC{r@ zkEI=U81mE02)3tc4pYmBV&0Y=q(8J1R{nAqeRTTOx(|rP>aX8#v0EpVY$QH*riq=Q zI*MM8IFkkPw7>a|s=ZB?6X!T8-#Qeptvh0j{@JZ~Tm2XG894j*(dxql7adRen`@`*KG)RdFjGn4CGt#4xJY}XHvV|Q zR*fzb9DiJ2M;4vEOc}33Zx^K`vzTMse+v9S2Y z(($lY!loIyLIx8*z#sA8ObQX({!%S&FQWu_i?gPO* zuP5`|$CnlbpJ6{%o7|z__tUS2IlUaq+s%Dno-I8s>e7&GS*C17$MV$w&{Fzq!Eqp1 zTc+M>cop%rr!z<^QSPksszOPXkjySImmEKR&4ux!R@o+H!98xjqJH_0T)fv+CLe%l zH}?BdUB2*L4NGlgE?mOqkvi4mtxDCqMiXv3|wFl5dBLY(agmo-HZfXx9 z@I8bT1MPS!O(Ziv{_!ZB^7ek$T&BJ`8{_3UGQvfBu%hM%M^S%+tbd`=UqZ_%OpQd6)yyd(^lla;^O-NSb<>Z7sBU8)s!u{NJt$ z@7OrKN>?#96ysE8nlB_fTbqz3fAN<|C2Tkrsat!yC^#?(2kEbfRs8_i0YPk>9d=u% z%NqsHEE`q>iCM07S)YiqLsqiZ`fT3Y`;;6modH=aH2onAtFfMc0-tSfd=+u(lkb<7 zTd^+sVl=DImI@=VMhf&NVZ!Vx-*y?drz}T(*Vhdo<32l=M_BebYBUYHc>Os-zPVy6 z+0-2b^>FCJaAI4tQ3sap5SG7+Vc>XdAo)N3`1+zwp5d?K+i_yf$yM6uoMjRdB{Z?) z;_~z1*{z+eYQX@aJ1-*d_rv(RfG4fXf4|GC56E|2Sm-OVH7WbUaAZSX6}B456~7MJ z9>V3Q(O-FNQ^P`envSIWU1v{AEUdC6aF|2%J5}}eJ>u{6rQNC}_e)Ktg4g#du`7Dx9u(m7YLf^3JNLF$sO-Es9kwfbLL587;{jw_KQD>bs3uU2Y>S z=aRd~w32OjA1`_MSZML`OKWvkW>0&Pa)J%yT$G5@Co&cH$EQ0Lr8^Gw?b z{~U8C^!MY@u>?S9cKv$Ln*C0YyRqxMoZ^DyqxN^DR`1jCexlk`J_noQ@0-X4yUV_~ z@Z|x2czQeij0qad!eN8k6+P@kqzoECKeXssyQxJ@)qL`?=9`uA{ghXnX8m;O5P&Cb(g$o897P1eR(lu;sPhHuo+3TK)9T(3kZatM=>B z5n`}K{!w7pawj=^FLzzMCT7C`E^LtW4tt3+;%vlxHIKpIp7`B#J>`Zw`vw}2JfB@8 z^nlK^tMbDwVNZv_8)@?f1XvV;wpCmZm?@5jaZ%sV6$%_q$%nXLm!v`~Y)&)FR*H=&361<8Nb?e>+Vr zFSwtNxV8s9JOJdGY0?hUWRET9f3AoHHl|jaXzbxTe)35Dr&80Q77bFC!D% zpRGvT)%*QEbprQm{Jyz?L|*>lu3JsNiDoz|){Q`U&>7#c<|qP?xd9dDzq{)Cn(28& ztzLpC2d&QX0+0ZMrMH@}yWY~$IP7F(UFe(Ca{osD50dA1U6(iZRX+}^z69qW52{-B zeW*=;y*=Hx#94ywyUC-CoeT9&amsyZ0G)nJY;o7n{8f*7@QI8Y^jy(i!|fk}_-YTG zGZRXpgktuG>TL*b!t=%K5M}_PGtBDc-ssN7{30C<8BA5KvylN)WQwZr*i;#0!vVLd zEX?)rJ)SX!-V0&pYr15W5U+F`&2%HJOM&0T%F7e1fo8xg+x2Gp+wlQ3j-+yn{S_wY z{&P1O8q|cvn=!)om`ZC+4VHStkoe)zm_jL|Y*RGU43i5n|UE>|aF3H?3^yk*-qr3<@qmNh}JGh{!Me zvK;D$yIDekB?tW;r>HtrTY1BtoU0Waj!z`m=zEra#U6f1@!DrrO6o^{8d*&NZg}f+ zx6$^Dh5)O=P$;7UJC80QG_hR|!w*x^}1_Qt6}H#bf4@}^IBFvm}w0=^X!vhoQ!=L2spo&{7qsxXB6=S zfYq+w&frsw=cCSf>(%(TJOxgzy6a^!k-L3&Gm){cr>(;@!Rz_pRoNFt3+cv30G;9p zMHCqDA->d_?}HXY-)Y2153@3KUGmDyt`0adUR=32F(9) z0hpn?DI#H#9m#l?(I(nF7nAuzw~S7+Sbo}oK(wLX`H-L|Z4xm|nUN(o$NT^s2G}Iz z5g5-(9!R}C%lI0THVY}JpqU-H3@${~1g5%4A&Z;TMrEw~8~lXdO_<(6sVRZ?CYdot z-ZT9>?jb@^Z1|Ii*W)_1f~3AVM@^TD9DoPgZvD)CB?nUPL;K^(U&OVCKCHv)lP9#8 zc5~i%E3(VvA35y)k{QMbwZG(B-`1Jz*0MxJQ4QEg_$d(vI_gxR%W!GCuPZD)R?BO zY`YEhOa9Z}&b`Ieo#A`Ac1gm2Fj&A2rFcG8>HeD&Xs^8o?4k`04muuC%0Ae?N>WHO zX|DbXSWhIo(;W7$*;~g7T>S3pV%M5$efDbMZ@+Vuvx#7C;oSBm01FWyr$8y)b zO-)qy()pQNfzWNX`ED=oHohX)_51SVd6q6C9E2i8$&@r~Fy4_FbZub)CjbMcIc@-} zQ!#{D6R#$pu4J}?l_QU*hgdZQWvyA$&<{2+roVm)qm?kolxSu$N-m0zO=%X^p!LsC zi;4`cL4rk|9lVOsP&&yQSYz-Xa|MTH+P9=sr2vwGeNsZ{k9Tt|5u~M@OK#0Wn+95L z5m5kWfil}H93fW1Kq-HPE{)ri#1&chQBKT~jJ7xZ!q5<1p=N-wdW)y{$g!aL= z(4sNoLXu-W%Cs1u&U$h!kbFpW%UXJu{!!z2$OA0tHa_?EuUC)jR|=W7Pz%W1 zD0!c!TmBq^XRf>bxCr#&WFWEGVhRiw55Lz|2H^aNULJnKk;ukjmDq*`?+bO(l4^z` zxDoS@gE$CMpE*xc9V{CivN7c(C_VZ;$QP*!pWKD$jwgh)m&>R>$4hLyZAiS-<`cD$ zzulF{M)4S*L`L&TW1t_UKFHgNa%M$sBY$Hy~?7T4johe*UJ!TxO`qO-bn$1AP% zC7ZqhA{^O@mt96`1bFTYUn_u&w3ywWon^9O|5`KykBmm1_#@T&N0=ce9qup2-nyVp zQgMf|Jmou5R6q)d_$5R3?c`KH!UbQW&a4KK`_M=Qb*o-IZ z=TwZ){M5D(IRIFJiC3THgJ@(`m{KO~KKsw}5A{i~##$azO>;(c0h~u#jI(R0?&jtZ z)b-MrKmE`2hKJ)vW#}t$-LLDQp2b$>PjEW=Hx`Hv_#S)tIg@Wp{x^RxPvSnSSIE4t ziIsI_RX}oFtlDbZ>E`w3Qi$p*XNrrkTdmN}gI|?B2n|&%f%d;O6uwhX4S@o5xdacO_2I1lV9o&Y zN%3{GE^Ec3ku5UVdXpsG*5~r53y4>pa`CTTP8`?iT6}${hX%&1y{omIl%dI$x~gM3 zt+ve};4wVt_{H_@ZMsz^7Miu^0gvF-t$ubw(m=2MBT7LdHA%x(gFD-BL{)T?l@bbo z1bLJ86U(ZL6YB(}XVPsMQ(zZ8`N!afb1OFItFG*>hkp5S3E_SSDDYX2h~c9us0@n) zldU@u&+GU#tmpe&8#!%Sd})pHKktqoP=y-LG}55p{R{E;w8^_i)VuZA*w!Xq`?nUl zZ7CV*8Xb9Weg=EwWk(*r*6R;4OnkLQl?wb9aoMNu5mQ1Zc8&)rlhExPpyUz%{o*|@ z5jbcRsrD}8&;B6)Wio#`mExiVTn14@F=J@6CNPk9w0MWQYh5b28TwBt0J0t1_Byw! zH7Y%s!d<@KNdfrj7}wA7Qv`mZW?M(OrAYA)hyPjnvt!Xc2*RtK^P zbVfVeTir3~yC zp#yzNr2!cm6^c0Wv(#u1-Paakc}@dz4Jolo;3FBS5g(_sue?gC(_a(%JeLO>_F9ke zhLP@eh?!Kp;AK1Tu%g0_-?0>pv6kcz{pq*{a*llq?WZ-snx=t8&1YwQGWnTWIl#1RJASxt}F zjd(B-C@|w9MISkR)RffOXEji2=ePPqH^NnaaJCI1Wk^6yPS=UbOj&&V37c*wv0cKE zO|abV5>|fypbm)Q+dX;GaIB+)0q#GGfBfVLjn}K$KoWBX82-t2TH2J3wDsM!HKx+O zukr!f!M2X3JejP2o-^R-NVvDqIPAOE?OkpVV6tlHJ888;@>%HoCrLsl zOo2FRo@%?sq7t*d`(H0*Cna`#lOnX%-$$QZF~FW6*G_i=%cBa3RFb0!qLU0a*F+zZ1Gcy^+tjcJQQK!7AKZ}z_3wH-+a<83S%(FOt zI|K66@>M1u7gd7=P)_STr0@4hzmG;23#F^-{sWjK{{xs}6f!84!v`7%dUTjwC@Xe` zy+W94^2OojBWriTyw3F#%rmY8o6&Swy5oU3lbm|*V2ybM;&2f?>0G!@nAF0IRL-I#795Lmy-nXfGl?AHz_dU0}y8nS}zotk{AT2r#%N5^r?h==kxgN)U< zG0WB^)$1TRdwRH$(E_}Y=Wve~3lr=EdUOkVp@=8OBhUkY67Jy&DR2WR7X}WO4g{Oj zhpT`Upl9kETqLZN$d%4})Va{HXY*{M!7AH2=!I;X_}uA+LJp>g`&L&qWpsEKjM)I> zdyX9FqFA9)dS?oe3x|`tx;@p1fUkAu@!s8i%|_y&9=%izKG(m@PQo9vAk<%*HEC)prisP}r-;_~&BIN2uPTX7hy8B37n1F7E(V4J zU$wb0dmXy32Va`J$N%0&Bi%7twyc{U$(s9LH3Q~Bg?D3sVX_T4o{#r=uy44hr|AJ10N`W}+-H*8A_*=J-r@dsD<~6E1e(RQ|b; zWLi*#W+50VWDG%ZLRg^6v>|dA0?GVZj!X*8oHoerL)M_AW%~GvTX#cAPhD(}KO5tx z)_Kt!qqhM}Fte3jIY#jA1GXim#}e3@Ph`S_h_&4mrzj0Sz>&A1te}j7coI70SgFRS zL|}5jQ)MPr*~4RWwqFY8O@h)`IDw@HmDFrV6y-VllGXE=id{`fbcoydiX`S0_f<&g z_V?FP;BfNCoT;U8Z>_*Fsr22F;C~a&^x*bQzjaSmWuia05d(BSH1cUBtzw|KEj-H@ zyj4FEJI)94pJhwn#);_IQEfd~axXETzBFoQ>@n~u>91W+*X6Uv1y?=H9834?^5kpnIK%9iE(*Cd zw^0Mvm1Nh3UA9aD2Qa;!$6PP1B|y_%D>o%Yk>q!N6I4W1BVZ`7tgp@x}PI*{ZI30n;$M|}G(ovq_s35>2 zia<&Zw9Bd_(Pn}{0%LYYLZ+@1yA(@o(cr-p&wsSvvQnWGuxs~P75+va5!VylwKL59 zXBZ3sJQ2A!17dz9(C&6>HJ~`<-`_bJC{O|moGqWa^zqK6fJ~S>M8?hJ@hta#d;kje ze2g+09CJG535umPzdpzB^oA3AjQ738&1x^Wi`(r+Gms@GVjwm>YbcIzN{t$3a;Ak4 z`0crah-q}!QoXDH8{nFBOX24P8*gN;2CP8!`4d=-E#p?4<+NUvf?!N^G*LxYPu|J- zf-?c)_KX1zR)8QNPsKA(mw50#-m&aXKx-bMS_Y zbl&$`9ughg5)hAMe6Ec)#p--F6G&iQXq97LnfKm!c~cZp#dHsqP?`Y)c3(2n#v7Ii z%)D7cA;wGofQt)ybOfWv5~HPz3!JVyE4OYDbPW#+9w>3ESL}7Q8oTuO#Z5t0p?o;u z_MbqtRq<0jQN8@*G?Cb$^imk6wA7$n6$bi1DuE*BLxBndQ0QYa1PMo;tq9QMsTYCG z91fjXwBTzV(*21CX_%V3fSYL+^_Jx|{nX>3Z~!z5Xu!QY(O7vD%I#8FTHbvHVgqH( z(gLQ|77E%py^?Z3SapF$SpupY$wPC zXfJ5nI!(R5EwWirnb;DQB9nPGXX;7{{gAJZ zw2qd?Ye@|O<3{G+g3)dH}lyzcr7c{C^IlqLrA z_~gs@%jE7*#Bz@Oe@V11^2u`!Ra@wc>>(h%CgyFb zZ|$1eUJ?ym{=!!RL|yb#iT2BdEo74<&lq%Vhj!t^s#BKgG}qYIHlL`U&jL^Lk!CVz z&-1fr4Fp93_boPU3%#zXfU+831*EK++0&^nlPn#VwOFNc=&yCr z!EEQ{uCyHBUi8by-S^$Xy*D1gSdAa$K-|xpZp=m68)LR!4QdZMF)=@GxI~hFx$%~`!HDnad6Q2KoAIEC>+raVB}_pZ)BIG@8r#$r zH=i1rCbxQWuW0K$0>xM+1X`YPd?bu9!Ksp~@yB+O6J!K58~~cx5T!&>x(GR-`RW;( zPGWX6Sb%V-yd&wgtsZMc)cwZc|JNH@ePp-ROo$#_;2-XVq-NM4&InrL z@D$HlD_JUOrgQx{1&6F9(Bv)&Itns#qAg17g0avFT(wiX$kQsF1bqP&hTuj+wbrsy zESgh^l4vN*p;)l)WgZf#`3^?k&8@wATYg;|WgvhovpvEV9&2&a41>FK;R|L`psYCaW2 zWJ7)y>-bKOT!O{heAll3EZz4y9u2FM?EKl=<6gXSq787sLh%4fC}hswKh*@$^*SK0 z8#2W+qw43KfVfXJrxteA+FX$1~s+dgV@dw!3c5kP-GorXuv=eh~ ze%9>4jBI010AVtu>0Z!hEgqu?Po`r)ym7IUPDH}>=(fB5@@x`~G+^==gJI@lLx-HA z_MvToNBSaCi5fVhk~j8;CnkeMluJ!O`b+J~0B79WS2AzZ?7Qx>AC6 zQgbbiw$|#GT%G&_Q&)%oObi=|J+X%IAS(1v{*d+knm6K;xb8_IU}7qs+=iuCP#(Wq za+--MHp(M91_EihY-ma-^g+8fCRSo2S=}q+JokYlIm_9;Q`DpvzMQ_> zU*$d>L3DmU=swvS{ z-_`D-!B^f^(QPgWb=FB8C(Zo#4aPCN-F^Ui?bh=Q0!*b`bYe@8?u&P?{~GaI-x-Z( zp>kBuGw-hoUjKh7vvO*+KCX(Xo$7ixKOap!P3(7t2$z-YqJKAe2neA@5waCJk|XcU zgWeU}kn?(~S}|mG>57+iLTz_rp~_QVEQqGVE7NgJeKEi7ROb5V(=GXu=g}k37R%AS zOfhVkA};S?>2nGTVHRyr2hk&LPiT(ArhdPkI>?SPWeQG*6jNHwGu1pztkD(SW(@~q z*zBs09UZDX5YmA&$wVUR{_?$G*F-s%UMvzdMhZu{#S!a^YG;!lJ60YduIpcHOB_V4 z32E;e6$_qFObuIVL&xq_eDKf2dtqn7Eed@l^jhOy9MT^$GPM+w!C)9})CF&V&u98x zNz9rcgZ7H2`6W<+W5{sBbRsC~<(h=}!G&|zuyR-&p8yFHIuguy^15cBV5-v&Z_3LH zB){Tc>G4^R;wIC2(v}RfbRd$sJ6-2#^e+PXVw2K7=RY)Bh~bkGoNxTWGgz8OB1(FH ztvyv8>2W@VIM&h(Sj**bpbV;_Vvi@BJhc}QY+|u#vFVYqWt2oBx)2@d?MbBKM1^4s z?BG@2<4Lt<8+Yb`J})s!OhnSb+5ZB(Hv{wsO-K4@h_rcKVeQ?z(!uE>o^1z<{52fVXVO$;4cd9N`pC?~9A@ z)X33=nStxPWA~7)1C94n@U8tW1$9_-Qaznm&$j1F`EuhE{X}nv0nQj!Uw=CZ1~4@3 zdW+2IZ&~9y!WGmDRw!IpoM}h6r6b^d03&^KE5(rbYmaEN1kYb;y2pO;boSKkF6X z>+rC_!&)`WldklY}_76+@lf}@i(&35`j~WDaJII&V44y?XlR? ztzLRcW)L!%tD`Qzlo>_forAQC0dsNXR?YVDn2zSv3$730qWS6PP zda$J?NwX~LZ^E^qDG;+J|8ZSqLNZpgqDEF;rA5ZoU^qr|~7dtq=!hmQ?9{eRv;W z+ds_0m+`d!>Ml2me&R;6+@dSJYdMwd&Vg(9$Xws_K><*7-HeqI_Rq^4PwQ}NMSP_4 zZeuc@cL|R3KqX_Ppk26iZwFQuU2{KPrH5eVo{aZ3%}9B^RtOoLul+orwzXESjA^fX z!HX60F8HcV^ID1`nBn-i@yHwooAMewimA}GrkA+oZEbV~gEk4R#KWtkWsxi{NwsYe zXo=}2^b*;bBg>KyKL&LK^2|?lIugyCUQH4E&5G~r!S9CHbV_F9-VW0BM@j%fgy|I` z;Tb$(gS}|(o)2}1aP4o%B?)vMU_SO^#u{KA<{@ zK*SniMOx8 z>b+i%8^A}MH!P%X+SqSU)5If}15JCsS}jxm8WFfR%tn(NPdPE2Qb3*!=c*UHo>1d% z&!;9mme;h3`seUvUW(D+6uYm1{I!b%@%45nOeBzHcGNlRvq#hWvMGPWpA;`wIL0aR zy#zwn)R_YlahG!|0?5?aayPxLEW(Gf?TO#+<19j9aECSL=G&HtUvI2Oa!*G?S+iRx zUT?4v#q+I#mbUT3OE;sBM}XQu7OR&!?qoLdKL5vft{HGxT{f7?UegH%SevMWk%QdMTicdBV^zb z_UP)i#ncur*k@g*3$gCbM*qR*#zzMss+>2`WjXrO3YfG>Q8mMTp5rHB4%KoS0?Oiu zvEa<0Tm1U!%PmBOBl@$T@gygnpA#VM{`yZb>r^qL^!(}zNUJ3k;kF>T=C^aZCEwd{A@I$!@LAvO z10dR%45)ncGEDQz@k?K7K)RhboKEoDe$;~#BM=S4c~s%nS4@6XE9mkp0dd(6J|S$C{43Da4X zffzW`TMbV?tMzDbUTITh!-R5}5$s*Fvm@d7kM)f&%XnvJ8RN>n0ko378W*jqug`|c z8}FgZENH6TuIf;&|JY_y@u>6OCFU$M-3F~AzQBh*R?;Qex4Q^tpwM@{S<3M6exTgh{;wrYLJ+B4B zJ?r<`lHm2tvmuZ-J_v>tiBC#3;swl*@1D`(Br29|{9hJere%*O3cQ~?4~U~}r?Bw2 zmcul0eFPPmOjq9pgk6AXZ;_Dt3sk(Qsdm$ux;s5|k;e4|UQ?ma0D96JVnE%Os;^E< z^?_>s_1xxy0dnH1?=Gc|y#lSiNv9~3H@NW2_1mel6CfJ?b&e^~_E=?nr(RY8HVxC_bg>P@eyn^LwO$Cvk~LYQ{8aq) zP(Z@Fym|QK376wkbDJV_ytj%49etnMnt+-Lm z$c7Ntp);zawDpp15@&EXZgLm?&0c55l zAIlfJzvrW>BkFIg(>@OUCgoGV;~$uiR!cLK;o9=&D=81(Syc(27W*C{lnIw2@V8Osb%civ)fy=MtSJ3 zWqIMoCW>_d^|2hz-mHZwD{2ro{dc?j_^VX&o(&l zdej?gi_dm$yZPOM?IH~?yx4; zEEpUN0RcT!g->ZQtp9AD>rd-NcYlD9h$ct0%1_}LF#QQLY|d7c7S-)EJ%sWm6%dVx zV*o}Ba*z_WxJUG4t37+kv>htn_}fplSF=Eu6DcgHHaI8OErWUppG=%B7L`wLoTl); zgvp!>x`8d)m4kIY z%yifx5LV0AUnw->mWGnXu{X}7>6I>MN%yKnuV=%ZgrcjQFf&pD-^M$DzQBo;T4x%# zBdXC$C`Nf)JCdw*t1+i}H^Fv;M7$R7r0F_j@2rT$N7oB{h@>O$E8-2E@G_g;+@>aV zaXY2!%6#Rg4CU4HEY?CsA5zek>6Z`o*5Uovp;k>R!~)0vTn)2zHID?==YdJvIgyE%#$!OLcb>7SbL;jr6pm-*7JC}KuYr^deqc@6)sT=j z1;|LX1IEYJ_BuS}eOX}12cg>?Lx8Az9n@7%x9NM%)!<) zmGA50Nad00D;Sam?G(9tqn+@V8VVrmt$)GyGffQmLqos;qsW&O*W&A!Z|<5Rp;wc6 z@mQcvwEiGLAiI7{|G$#>r=(yTa!1Ji9-kdgLd}fb?l09d|94*DV*eyfc{C7Bj@|RN z8;KEkUV=?H~otEzxT&pe2Hl{2={{pl)C={CD zKdb=0t;2E^4f?p3Hwvh6-Rq#bXWd=N)LSxIH9&6xda{(DHA(WPp8%8$J_v?hb<ND0&?{curCk5N^LO9S3GRw7l1ZvPzG}reAmxU z^}?4jB93NUgWpZLj2W29z1VrsprCJLCuBmx>OY6Nb2dt^)Im#n`f;f8=1xMX)h^B@ zF7V@ul}^mGU+FF*t#pkpm8pGk)BibD6-)dRm4ADt8@w??)YQj|)tQGakyf$JcUlM^2qKt5+G2K_Knj?7VtVbqkJjp>d zz^W>d?a;v}LZo>@S>tkf`F$p~^D`6{yiSEuAOnU_;AoJbMgZN`a!iiH;E{dR+mAnc zPMqxm|A))!YDWJ4t8dTcxkjHItuH|cVEZ3%OHf_P)bxxepDS(dJPblxy;N$k)p#lWZ z!AxKPuglJ`d*y&2a^6&L0kmBm44}DK|NcQDWpt=t*tS*AM7DbeY?VFI6C~iW*4bdl z01_~aIkO$|(?HUKL-yVjOa$JeR~2AI<@o}lS*?F2fD#7bY`vo}4|}ZJhOO0wIBBpJK=@>#I1cLrjDtz7jLtKfns}tj|L(jKmotMJtsan4wEzk>}}Nl z^+vDd)t!XGY5`>nJJ}&7+f}aA&-?V%k4-5g3~gF^BIQFO_TEb0nk9n<=PBowrH+fC zR(2=rK%1GrDZrltd2b%=R4pr&$6L!~l;x=2r^+uLXRznfdXVbY?T8?%Q|c*AiJ1u+Wz|F2PtSiH@s}?# zP#1*!U?L1a_y0j9X#sQ5pbN`0inX|Do&bjDrpGhX_L$SLTg9;JB(Uq$ISrxsU$?Yc zk}KW>_QSTU#n?kv;=_9ivwA7f!P?%=IkI)2B06cBJ!ctLErh)lt6}+yht@)$S!Q6>13232e7Y3x^B^hWH z0v&yu>A98HSPgSWMrA|oUU-@jok4Wx9H^IQe>Rl1I~tWaV1TQnZtp`720zxV-i81~ zLzQ>rQQ$v&QP!&OKwmRkPNJf6PPf~MG^btY1bKm50@q@qg2+OAK&ZTks3_3+`YFZ! zD2PJ*vH48GvD&a%vjSLtiT^d@dCGFz#uq0b9J?@N&6~bHz=2Df?0DStIPPY(K~ti= zo7|?_yEV2Ijn!zC&5#-5Mrb$QOHzY|QqTKxB(=UvB7DZ8&6q9JIuxUbzT%c^q{A|K zG+l>|M(NGfvLL`czNgWW(696l_tQKq>zg!=wG#NE zR=!9eM_Vjius@8h0_mzIQ(b2Nn6$_zaGzM@AiquM+EMIZ} z=~+SJU?>di0i3M7F^@$5N-x^zXoX%)S(}5cM1plsS?V$!vTn;M^o$Y2W8h- z0iqqKb8$Vq&FWpO7=d~ZZY!Zi#|&@CEd!ro#SOa75t%phbi-rE`QY5?@>`xNG^Z)3F6_*zgn6N;^BVelAC$){3uri*f*ZuCJ2d$ z=syb?;<<3sEycIK7Ri^ZjKZKI3V7VIUdGbcZe7Ul@Muo_F+bPJQdcF14Frs|x&YwV zG`X&742L?c$m;TzIQE|U6{x01)gSGE$_Yo|I<^Ag9B*6hka#eRGrv(MBa;->geoC> zBYwp=hIg-t3p5oSCxq9kh#3GIO?753I81X-s9Y2RA}P0{WUTErC!3ID9TJOY7w7hJ%C+N;)C;pHJEl|qDHvrAfw&i!S*uOx_dMxPp)^lZVb7SB)}%0p z{1$hbvEJWjgtJMLV#}$`C1ApTcGLwQ913E`v>(dOOZJ44EvHhO3(cip+5&9I1SJ@2 z^3`WRnPBFj(_4tG%kfxXT`7?QVGuH!6)P`~}tNcR*CYUdirhPxXKc+L1Qp z*B2C-xi~$PciQs9espOC2@OHIFZ@ojQawuNE!tW9Oh^22BfJc#HeO-AF2huK_$uM- zKutFR(o7OE*7C0X;9}CXo`;2R$H<~=KxL#(^k{xtAftJO*P^m*2K>?iNPHd;(8Rx! zG&SXDw(gN?y`}J|F0lKF@4-;g5cYsH#_v*EUeETyvd_RahC_M?f`EP5(5{uQa1TZd z<~*tGgl2eL-AIyIN7nIEtQ5cjG)LYMc{Y1dGP*s09_1}Kt0Qd(h$}GGos`jWEaYAG zM}cLi^NhM&h#xVmWvpyr?W)m|6xhh2B%(YzEk6n@H^Av8JqrqY1cHFxeVa#s(j%UR z6hWESQfZr@mI4SGWJ|Is#D`tkTdzh{b!8X~QI{59!kWg*K}o%+rZ({lh0>hAKOWU@ z?SCk&AfiVb8E(};a|CX z`kZQKX~h1I5q(ZxBrE9<5&L`&9dlBM@dND~nf|*k0D(lhx|E23)nNBj2D0>^vsUh0 z=t~0RT2^>47wll@2*~vbrWWVT=Iqx?{+^u*v0@xg{cCTt@i``0Gr`x39=HCTK5a>2 zTkPfaTi7B1E|SWSsr-6cRv@nX%>&5lw;%W6l{ux#BftP|8ojp%JpK2C_x>g#uBwm{ z&piy;#b)F`bc;5skR1`~9ls^%qwl3I*tLex9{Do3S+oE^H{A1`CkuXe*K9RM4mCHp z$&-tccXLV<^?lz49ZF_jCiipon5oU~6sT*#$8pu6ED-x{Y8%X$NUyKKvTrcPfmZS{ z$3qCREk$+o^A3|GbQ)qx!nc8v0`H`J-Er;&N|LeSSympGx?n}jiXY+ zBPf^h3TvP!E%$c}Twh>ua(DSy0Uvx3)kVc1Y&N**z0CD65zae!!xAvwpO+26tiX@HEKP3X0*BrVwA?E;4ji^(|dty)BvH zF{IXkIWi|IgQ6-Ot9*1tyP(>~V5cW1(C#d={jrNI7o`Q=PM}dR*8ip2SnZFrnC`njO zc>Lm-y#ZQSz=FtUiZ~){hG=&gXPQqwda*l>Adj|>1$SaxUurJys5m)p<0Iht8-MfC zfQ3FY@kDRCdFfJ7;kOP9*$$}kGd>|L3bIvrn>GtbPTzK<$=ajd1)n|~vI1!Pt%mFN zavD#LLb*d>DIgK4Ihw={Tu1-1gWDF9%3@!NA>Y-Bo&tQ_ga8ydN9C52k}N*SR|I!u zHcV3;OO>_!uL@bbcN!DssAGEgpqTzao~1Ua{j%d9S`T9{jtUK++P;zoTRJFGI1GA- zajwcTPq-eDAR*lo$x$A-Y0zVeoG_RQYnCWl$GOuxDWe zTg@Q2tn06O3P^N@-U>uajl8MTKqTMs{-!~Oqk=DfA=VEYI8Au|{Rl{{ST~T6E1a3J zTrKccMq3*Ifbfz&7lOU6Ri0q)ENcW=Bk~l+3x??2^jfLkM-123tYOy{!%PIku~Ol? zY|J0*YcrFDQ1i&q=iraug#lMX~jW+1wPVsCe@UOsP0 zpKI%p-<3o@oX~-PU(n8#TOrqj~;$uOK_MAXS8alim*qq%b!H|#n3Pacv@CSPpXjp99m;G6E*NeeW0OsIccV#HqTTMCPp{%6pRCt>bHWJBL)KoE*Aj);k#ly#Lvj|8r)Ezs3uBB z4H-yrO6T+@pc{#(rO_;c+gk}LW{<;N!e?uxat$}bkACn|dS zk@!|UI0471X~$v554;fg_W^uWuiW%3>SRMIL`^jerNT+?9)^xrF;l678aG46TeaF~ zSj>QBE?XMRArPJ@dLon!nvXaNZac|X*%sW)X+??rQte3Een)-G1$~fBT@WeY*@QjS zZdom8eNQs|&rBS@IjX^VUUwiLA%UsbXv>vQqQ~s2%9*JI&e&$G$iDYAH+`eIjRIA;UaWww$32mmmrG+*deeFn~cd=n1YFuXH#%!i5aihpE@`WPl9 z2b-^G+3Dw!)3>9rqOiH3l}s}n1q4l^Gn5J2s=8Venf9{h;>m%fFdRVD{}ot&k?H~T z$14W2;AYZv6ewR*9%gUwwa|nB@ZbgKMfJg8hFyHY+%qtm7!~s)R|t*44Hk)a{5`an zg^H$l=;{Wxmhx?L0|Em^osIve(0b-Wny_Iwpk^ z*q01F1es4zGfULgo7h`HR};SxBofp`G=G30hXy8gN^19@FN< z;C%HrVYP8gRZk@$*{|(4__50!q?iRI-f(ikSjLAiiam>Cl_ept`z`A-vLG6Yi@Lpi zrGKr2Tz6I6t_cGgf;aQ7d=agly=Y+CivRQOPfayY+n`Gj1U*&HICaf@fv)iZ4540( zl?vmbG%9B)ePYs!w~NRvk%);&(+ zL?RK3y#WwBkF-0ny;2G!CV6k~dj-}AcoY@pb=aB*S)JD%E3qlW09xWf@ju7H(j*bS zFg;Vr1Fj58sOTMH>xV8#Uyeyv6ykX$ zMl7o0corr11jwBjNb9LH{}Vp8Z(2akuprKONZPJ!_n5`?);@Um1Z+9DhDvPV9_6wmVWAPM-xFOf*j zC4UC~KkK{y{mK7t;5y&ypGQg~mY#TXz;7(s5RdkAmT?*gWrL*ayxy_z+r49W&>bRg zLbm=ci4RC>WvL0}>yL@VO38_~Z-!Ln#PPPwCRz-`Ii_5z z)zlLR=^tV4GvbM`SX7L)*Y70gf;NM4x}tfJlNNc|gob*tE#{k^aF}#or%p?lU{d~F z$llJryNciQ{E~wpjQnV$sX~7@gmM2(v-A@_8UcqNCqIv=W}3+UJF7TLUa#zZ_$_rD z?b%m${jSI5q(Ln1_AMR)13bK!gm=>tAQv@=t$GVh%MEkvBy0M})QuZ_h;Lw1eZqbg z*p{|kh!UhQ~cr!$lIqD(iR#|SsF<8C#d}w+s z&A5~!!-^mfzW4kGV>S1TouSd<+J-i5^j}(3`S};!wAF~$$9+$V0u*dIv<+yO6$0df z5?Ium`}3n_G=lzUXXj-Cp?AHyF*JX6Pz?_tJ4n zu~~cGOI5!+Hte#u<#Wxr2*wQ$_6CU4U6sgM)SWHxrBoY- zI(i(6>&kL642gFYrGC}v+iJ+0YT=rBeKqyVfhwUv!s%kmutG$X-57kByyYd&3etl; zy1iwQ`Sivja|iPfSIsdMm{|z?lanIu4OA<+^crV%7_?H^?~jZ7n+lvM4AJ$HZS$A) zZk9Pn+^4@viLiW(RU>cTA2#c*N~IG2ZqIhf9D*sRH4*bEG(^kInaZ=P)fC{Qi~lbR z5E@%5Vm14BeJtuL?4(paNF>MdZCBTgGz=x2@S2s!lSZ)!N~(2nuOv-+t4W3k_vqnm2XBf2(LH3zHYVj(b_Jh$l_66rMOc`*fh6%2jrqLIR@ehM#T}C_S*2Xpxa(?aQ*r=IA zW`)yGA#~%BtLUGCFv%*;I5*H`YR1s$C4sfr1jg_T-9AEmLvBY2Gm`=bInV4JxJN#g zdas<-APwdS)`NF{L2TmS$&b{On>?L7cL{29$_WAZmQIo`$(6a39BofCZ6egW*p36V z$S>X+z0M8wRzXTEv0yUWEH^*sbavF4ZzqaIL_5Ad^J5IQinKC%zAwj<_S^Q#q50bz z*K7ZoICqP|d2(9JDR~X#H5_@>)kH~+JURR^6JoC+m@y`CC*aCx_+_wPI(WHl;&o%Q z3Qi!Uc5E+{Y}fy+UJc8Cp7AfZ2x%_S3m{z9I+_sZT+rn~cX>2_#hXT}LxjpGzw5m) z1r?>IC9n2qbPO8btHmv$E6Uzfj16nB$@N!s`3{W!G_u&Uq&82B(u*43Bk=fwa+#$u zgE&bR9h}KwY-4D9*Ln{b|D6;u`#_5bV0oFQtc8zXher?1<4T2475)P+raIr{f~)@|m%qK$PPYI46HtU?V?h zB>dJ>k-^y5nmaNV6@}N%R6R0~&EkYV51}glf`q%g`&zBj>2^%>Y(f7_Lh0yf~xog}W z3o^o-MiG_%FHJ_R;bp6TN4`(upxE6r(ncts+Bcr3A;#km=lI#E7CO+PEL3r|BkEj& zRl$NX#n1d-y@JW8cn~Te9H?_Zxw-{FmUwAfelk95RN+0^T1DiCpCNrvkFGUz2N)}u zV#DtiabI*^Yg5&bN4ybxjIIvI_7=$F58|mUc$ZQuY1%Fuvlw?A#$d!B5Y9(LwXh9dpkts%r3_kAwnFFAms?-l^r=L0<|H5pO@{VdTrKip;!o*i~|Z9kK6_txTOD<8ELEVa;W zVuT;e z+x8%|v9#VIOAA-IH z_6R7U|5~TCm_s&WYg6kie=H}hx>^k)B&fk`_vg|$mKPwN$aoK`5RmaQt)TwipP30N zhjV2%6n-gWZf7{2@pn80+q~!;M+Y_P+Y(jGQ|X~msKj_zXQ@{^ho0fDFfQGpMlM%U zubo&P^--nK;mC5%^$jtDpAI+-&;K|c9pEv!7;JOeyy`1+GFF+-n>ZZM@u-~UaUZRy z;b<24RFe(+4+ISabm=ukf96hbFP?#f7oyF>rkJ+Nl@10F%!C?)FVAEzk? z*6uld3utM`%x%S4T2NGklo@rS8NGEWIMI<+_yRB>$Khh6$37>6D=+2!tPlpT_#%{> z*XL!VS;+&YMhk21d#m+~LHDa4h~;)WP--2aqzqM|-w_jAg&ZqbUrJ3Ofogh`F*mQ3 zK{3^2x8iY%ai87uFbwzmp6%1SXE%blX-@>Km(7)306C+gqAoylVAS}kNdUWliM0au z9w{s!dShdpHB*Z9l+P%Wpb0yYe2vH#eL1;=ayyTy;*j{OtWi&tOMETPoG#OB*3a4T z7eEj#r*jL%Vc{cVEIMFiNsCexUt$9Q4OL{S6e$cMc)lp??8_bW7bLsLJJ{#iMql9bbytZLPq z%dSxqh@t#YrCyiY<-wsk5E!uD{I?=5G6kOkc#L~O`0@aHa1bK=?wfC^FOBmSQpJ>- ziWw|&Bx)wA&McSv#QEF66PR*M?2h)v{Lk8-REDj+>aRf~lk1-gn_+XC5ZL@cBl>?=5d1 zZ~o%8P3U^voPd!Je;|M$Ym-XS7YmJn>GmQ8w=RPS-n3dRr_DYhgISJ4Ou55)wiCdI zv8UaXkG|{ZR(pKQcq7EGVVL8EQqEP6xIlN+1onwgO;@q0PxhF3kEJW$#y!wh|fxg6&d50r7M5E>+Cf`MF3gM zw~lAN(`1<8arS;-Wrq!k(GxmTORc7xIf__F=D}WacX=|v=uIr)t|PrnAT?q^!dq?5 z^8&Q?#GFfLVVh4~85EZxYp!vckR7Jpi(5F7n({k1z8)~T#E8-vn<+%yxa6B5x;Gme zG9p2GR5~##nkJ`#v){v?q76ShAAd`iVO^B645T4?nelRB*pIb&6>EJX!cs!F0+-Zk z1yI_k(D|16YH}F9?L)|IZz=^2f3C^_pXjO}z=m;*S*=bTr{kGbQwQ(sHe3A}pG(jO zQ_ve5LO72c8GKe<}KxW5Fj?2MaLejw9h0b_1>p$vQ; z%)S2r_p4kJc=N(>1@X`5I>FdiZI=k^&5EWT`==c31lLtUH&`KgQ}IrSj6(Ih(<&w1 zZd9LR0djFN9B+i5wk+6hPQLmvwkI;a+MRwQD)@7N{$?>Y1^SHIKB55-Sg6CMOmpkN zI}E3}_;i-%88Ki)<(~^NI!a;ges2wKu=aRd#>nw%Qr4LNc z9w!^DGRhJJQ22UMvf(tbFilXH831XLxzHFneF@~8!&5_6_3sXk}@MPl_Q zMFmC}fV&uQdL zF>AX(+RJ+UU$RkmFfqm-ni{C%i2Km6f`jQ%bi3RX`mh=1dF?EUA-nv2$z?V(eDu=i zn<8#bBl6~FrgW)8n=67&&VRM=>ztvSZLw6d8t53frZIfwx;>QQf-QJcY(oxl8M8a= z#h)1VCn{Tp%G$e1lt1&{>Ce1p>|_)bm6)}1wQu%mboHjKPmnmDP``k1uJeg-v|5Q; zsj+B&^m>I-($C1Q@io6SeJBOBB&lrQ(P+Zo;MHSZr{~YLyZzuM3Hy}AW!$yp+Tpd) zD-#|8**gm6v;J``SKqj=7U4agOpyp|;`)efd0byhuwaQWHk$Dsztqq)6W?N0^G8Y( za|n1R!fDLNFOkGj<{c5B@u`iNmaqYtXoBBja_MBJB9U)>@3PHm@91hr5D=OOOV2eW zPZxKVjY!!ByT3K6*I&{d#VqXby_nQw8hB`gitJSR|zNQ$fgh{?~7@?$QxeVGUk)6Be6sTK42X*5VhLx8tQ6xy<^u z5$vwgLLGYpr@a+@)P1zKrm(-bjmj0+b~5g0@hbmKkH%?x_KWYV8(*9wg}}%wQ(nVA zDrWa{0q7PF#!Y!Fr+gl*^C+&r0EK);zgEC#QS3w6a~d^O`q3&;wOXvN@)+#hdX%XI zW|kr?rHS-nn)HLArWxodx`%PEh_Dev@IT{Nm9vP`0VtPhTi1+IG zmm#!#(il1wv}+18@ebGcMW5DABns?#{?An&{*5r3Hll#g$?3cu27C`c3>e+bQ!v4_ z=?2>m=;~1jTN0~;ve7W}JEZ;0Nv$mvWxX@xvs`hwXqF&l`C(3lau zNvu$}FNiZJbU0p3{(lFD|Db3(ZFl}?{t5O#7=|0HiK2JnvBm@ddus)6Qfsd7cWM8F zIbTQTl>ti$-Hry}oha0bt9?5-c#jxUbj|(wuu|$>=r(1cDlNyxjHN1uNe$ z!jj-aOA<}bBK<2UK-67AI-tlKV&PuE_?@xbdsF?IJb(yt*T#rkAFTxzbw|`-t|C~T z*pxM|*UbYf2}+OaGasfn*R34@y(=CZ0AWnw@v8`Do76qDOO$!m-#LPef57k8u~GTG z*4F@i4%28=k1e3FK341fE!y8tLeZY1=aCxj&HBt4i*9YaFv& z_yfR0+jrUy`ky1f#Wna#(?D@X@7M)$6o zAdD+*q_Lewlfq1yZft)^_9=SzEL+s)xqR&un|<$fa674yJKL7rX|aiKoCXL@o>w%| z6y@egk|%ebOUP~QtJ!eI-*nOcZXQp@#{-Sy0%$C!A7>_$Tp!y%{K!d;9cXbfW=nh8 z^W^5H2hTa8J#Xt*&jJAD>-9kU*DSbLGWk|euZ++VJ@LIICABhL;;dG}SzWd+^z$ea zI@@v+;5M*1NbofXYn$0JG`|4vvTJB0S46v#r_n`+ZJICn^;fDA;g#Pc#lqlUlbHHW zl%i07?#oVZFr_&+4-}*kw6KdM1trGv1CdB=V85tK#r3oxHD)WTD{JGd6vBbRHV2cU zHBAr>(h!>kw5VI{OMGOfc)e^s^GvTkr$F4ElCg<-sekYIS-sAUFP)RWJZQ;($k3wD zbn@bpPP`I(uYd==a`lc%>1lTj;{Fj|d}9oGd{SrSwzSUq!-b&TVJ1ygTDh*dQ0BKS z1MHjVqqJabB3uE)hj%io3CVQx%GsV50}G%vws|YTH_D!>sGT9}b&zrWX)}xA(v$Qd z%792*W95;v%6&3;IA?%v(FpLKZmTPKJbpSnEP!ZF7AL_uyV$_+ouJL*oVMz8q{hCs z_1b`q2ELSg)@ZGS2nYvTm5W_%oa)z;{*q*Fi8K~j5<1MLw!{3e>up^(1Gl+l z>2Chys0FCBUk*y-pv)?1O@XTAzV}~i;^^D>w6$E=V(PuCGuN|+1#&O#VgBZt7pLveWjkKEgx`LE0)g~L0{uGHl3R9ZCNaZNQK8adp9AERqsG*s)k z{X60Ov3ehvI(5M{#xZULlndxLK^X<>67%S=6UGGi{Xp$hWaomhJjVOQJ__2xe)ROapi*q#Qt}-XY!9-s7^4+Utu2i z892xz1N7(%JtC)Dd!J(~T%b0A`bTB;ylsenz%6rBFjISV*BJI|26Du$4YFxfHKt5f zFowG#QZp0k7ppN796JzC22v$_(xU8YrvJ;a4otB;HzLG}bEv%4rk6J2V+xFvE)Mnpygu7wY zo8Ot%2K}BW_YjVi`!>y`)qO80YaUxMaM7`-Adi~G2{zRm6-t6DN;;_q#T$s1@u`n634%&`FcwK_KO=fbG0f>1B&7e^;GZ0lB(Q@% zyE!=-_NE?!d!JnY4Z~Xf=tAque4H{Z$Zq{D`X#-PMry=c(sEZEZu5K4)})6yOWU}; zGvUvJvHP@JIItA3R$TjdjR8HszoT%m>sa>YziFd72wzYRQoxW@9NSM++4BOZz!_!vlA!DAGIo~6FY!yvoi3jW_3TldpldWi<( zjIra8J9~Fq`Opw{-k5k^`Ac$66VQcGQc7<*e%cI8=Dqr`FnG(5PfMG7*XC+~6ffrB zuVTl!Ya;dH3J$-?ZL6{A;DA#9XKnGA`G5W0Xwm;?RpS_(o@PWO=a|G9Ckl(fr&Ou1 zsXjkVU?PpE^(q}88RCX4=u&CjlxiyONVX%E^p@m-y#{+Q>C-6=7xW8?UA_jXUzYGk zx>Lo;FWx~D4!lJgg_x{?F>!>hc}CxD#oTS>CtY!i0uav1sn;yBzqb!MqE4x)+Q0aq zP}W&eW>8kG?fy-%3vnD>O)CaFlGRmmif%g=sGN1hEi*D2pvMmc2Fxd@h-0*41)i8I zCN#nO$V9}}cxwKoXqU#h#!m3~(B)w`&eiXGF2{!Mb}bWFe#b+A7Er8b4W9W1iml}0 z^OJLT$GrUnf>7KZ_8csavx5$LOqh=prU2=@wEdQw`){kXu$LvNDXvVvb9p&RUX8ie z3E)aH5)y8x$K(o(A;)5 zecCe0f@qKY9*p|gK7G`=9IJh!B2BuvjP4=78-IO$W{V)hqagIfox?BPTkejca;6JI zlKXfQM_$H0>^m%QPI1_mqHPLTTvata3R@Jwwdzx1YuE;rt`yIeyX|FQs23(b`o%ZvSAguP>U zW!<*#J7XtRv29drS8Ut1Z6_7mwr$%^Dt5)TbyM#;Yn^@e+4tUOe3>7{^UOK=>s>A~ z+An%vcw3Rp;5w;}1UM29rdAYF^iXA(RUlX}gU<8fCSh$osm1)Niu&092{u(w{Aw&q zE*rH;-6Q?kzHv9!cH@`>V0)(XB~UYshMISAJI!uOcqg-Ra@?wBerel_Nhk?p^+B9& zFu%QL#I@q3eW@;J*d$R|Dz4@n0_yP@9Myhk*Sl#t9jRKge}X9E3rMlQO# zcGKC)bN(sDQB6-&9Y6+$J{K-J>C{B0MZa32O$Y>6;=s;#pckDm$+~I@4!AtT0QYo; z0?_eP@xC49E~CFrn2-#D-geu+xN5{FCxKxfXW8b7E(<3ted`wMO1oULaMf6aPsjuY zkTe@v8j@DdMs)Z)PF`6GtB9|y_%LP8^0hnI{mHTr&hOM8A%i2O6f^_DZzWR@X_k1j zsJK(UTEY08!mH6?RUJx&j1fe=l$fa7sfBuOifpWMw=xpQtK`y(qNg)1j7s}eonLeg zmNzy@Z7~&(x+MA#T$cA z`JfWn&!1MByzUNOt!V2IWx{l2^Doq8N?ytATecP`{ZqeQ2${$D_YI)P<(-RPv0~kl z0wZU#p=Q%{qKuY`3d4e0)dwX(cD*SGCnh$a{XXG)s`?0aPLA$5sWEUCAfaTRNnbD+ z+#Z6w)PonsiFTT$28`iG)(g(`0y|@FQT{(Hu(TuhiL z9Yd-1a%1@oXo}M7tYA7I+UH(f)#b|#fcSnmD}ryITu$Uoz(`C@VR800-$|NPUA<&h z#bLBX;gH=c75vz8t!;>-H^xr+Tu4pDYJ0RPVY3w?W3%TrUop?3!EP0&B7I$5uynk+ zZpf9@l<=uP*j!cZc3!Osqgu>%HkQRU8hw>2lFS1- zFApK}=l@1^g$om?S~b9`3=<;Zo)B%7;Qflqeg|ei#VABv^}&hQOJA>QIFVM@kPfEh zlfpnBjhSN|+t{n1ImAXKxX2W#q0e&%a@?j;XxzaRFKF<`n0#T*2B1GWtbkjb7~>w4 zICPTshOPi%4D7!5PI)nE@nFvqTO024<&w*P*g9)@mYGuZ^Q26v98>@#bO*<`{H<;C zOl-IS(IGgZo)R|%=4sy9nXwfl?r!df^IoNsou|+t(qE&hDirL))kyy* z)g`(lba6xGECFe)kEo&n09ZM5-p+|rK`eda5N|ItN^XH-@_8LpC|O^_6!0Lb%1)V~ z7K|4vPbjQ<%6q>%q-qI2I~B!DX+2e*!EXOdvYt=<=?U(@FrAuOC_zrtGu1OJjh!iz zD;t@P8z6nBUsQaj;I?^k?kDkmNIZP@z$ZS%U>tJrrf5F-qq2z{m+wR4ilV_7mK}D{*ObF*xQ|UyH@ZyS{(4^ zz`Lox8aU694OT+BdVhu%9|Q!$&rHSZ-zk{Fs7}>%=ICn6FL@1mYWQdK|7HQ;R9v7< zDNV6ymTs4RXxxNVcMzNdOy_z)1SM`@w&&uS;_ZZl0WqxkIDY=1R{*v+_rO5OJPm}R zK9?nX)~^8`o4zJ6MRo=}99=_!lYhZx9ADU4klVkwGt%z>J5I%y3%kVfXGvMcv6gV! z2FGf@3i|nD67|@gOr7a+s>1+(15YtLQdh`)xS$VM7n8xX>HT_`6yXiiVi3(PT!7{3 z`~~$5EusR7n^hS$MUE7Ssd#HUA+6RneQL=ElNzZ_BO;WAyyZ#7ia+y7*$+!lUs}TE zPv(7;F>t9bYC6VCA^{An0>9^PZdw}<@7_W8u2pT_yWYo*DV1oJ@S-xv|Oc~ zv#7{BDmt|C)_s~T6_h7JhT7VO3UKTPpE4jche{nQk%R&1^v`Y7zI&9(rVeW7YtYJzURv34=&@X0bBTm>{}rV8JL5WTHmnpPa_~P5J=7?Nu`qYY;7vUaMUJq8W)B$h2%j zfoVKmg5p4AymRU>S~txtwwo4_+ikt2%-Tz+gr!_vwkdBz4OyFn)ECm^fphu+7!ExP z-BDMQAYbp#X^H(f2^hzdbM863-5@G=C{Nx(@_wSElyV6S477@@6Z~IHT~`u;X(MFr zQLvLU4I3;jA2M>PJ`Qm*jS8d7(EnJBdN<# z7XjF0tekWAn1623_Bip8do_5*8zl~jf-u$2NCGGV?R?G8mSGum4h~}GM!(B)wnsEB zDn{5K#hTIf7I7!@2wcFdC?&dEdl73ci$y4C?CaFx-l~U01To*=xd|mTVdT70b6Z66 z%EP`nT+^5^0+fL@tjjtqvTbdKzVK#H7g$@@>GS)apY*duDW?aECJ}oFcpaagN7i`t zp*sbF2(L{js7?{Y-Y}Q19P@_`;!aV>D8BN2i?PHP+k<$lr(C{vE)eS&TikElc;kDF zPwCf^?K}}Uq7ysN_kMl@HoNad^c!2n)|q4-SPLbzQpNUiiD)L``*0Z}C(=l43XYCn zUF&6TPd+3^B$b^bN+N4)$lO!KGddO|h%3bGzSZbPw$)7u|1dDJ>JSq~uKoeO&-V;l zp!?_6eo$Su2m0q%c72q`z_YT`_K=Ax(zf^JB%fXykXhVE#QfzfI6!hb)2=rtn!)DN ztsckT$daBKTrifPWf$Pg7RU8uRZ=5j8=b+~*8KFUS~+%80|wPd`OF?SX}y71#q;DP zJ38}r=W+Wnh(Z*v*8yO;Sbxt!$)ae;D+>^67WAZFPU*AZ7LY-R_gYQh#gl@DmF4KAPu_B?Thx2m*((z_Myr|x^aNjAk)~rx zMZhce7n_o1M|a99FF_eOBZ+lOrokF=ND1i^)8ELl6qm73*CEc7fM9XNN=Sg53XlGk zVJs?HtA7ju{o(XKer8FuP5p*8*6wkB5-HN~Qr8hzItsn0CMyZ1^hl?>iwc_&W#mwM z4-0#Nev7<}Ox-QZsujBDFoGRy;-Pzaal%rzygjeRh8~0n5rGbBgu9qgem52M|F42z zhyq&54^)M|2*=-UfyUw3dN-0o{9)*;Ld4M2mjpzX08m7&B1xjhyGI~w+wpPOc8A2& zkv#IBAYyYQ17|c{0=j@*#{%$)dcczZl%}pi{?+1ca@~V!z#)JaFv79wk$8nC523qi z$%ZYQ_8kaENP}N?+Gxe&_JG&mfXSl$D)>7c=?7O)^jSz^cE`s(pR(8KcHZn~y33yn zQqeW(Y^$~P<~N`CB2isa z6c&uTZDuLD3gBqE2Tcx?4Z{DosNp$6V*_kn3~An-c-|3Km)Nbi<#z4jJ%uE@S8G$a z(I?ninbJl+wd4O(D#VGR>+e;H5$Z{WQI>&HtJYUl)P7Hvil{sy-8I7mAUFZzHAYF$ ze6#9JtHhtU1pzkUdeZ)VH12I9WB0rjazU*-vXJ@0Tx_{NT(HGKljMob<`6L*k}vm7 zC$2~M7}eHOyop3)XrJ_YhIJZ)rldqsg`JAu^gXzE1Cb=Nez0w5bhso~&!O~S?OUVm>t z;&Uo23;U4w73|UFbr!&+)}|oBw9g3^c$DUc5enJ}6j!%@psKd5V&m4a6+TCdR_V_C z++J^_K`Cl?U7T-y;B~=zyb(M-#~1q1hgYW3JtbWl@Q@SJac~5EZ3(V^pLg9Hk^LIw z(a4`d1CWo)??$Ym*%pkDq|e;4YN)}<#x3S)VY1t3>#%r!JZARfhk^b{V6y`!`@Lwrihng*Bn$3Ty-f-l|;s~7zrQp=krC5$g{)QV)biwYHS<#fc3U=hGW;wGf`kkgshf{15t- z;Qyvy@wtemz`~mxu0W%6hehX}`V1fBv}=xu-QCA9)%ocD>LZ2z>j{XFHoQeWid2AZ z27b*YKPL*Yh6lr@dLZMF?xR&cTTma?xiTJUM9s8VkdtjR{0g8i9@2X0TC+VhYFk5L?8Bmo~ zjUj4>Je`374a@dlEk@yTfshz?HXFW^?LYd0zJS0-!9^@U%tI#uq4HeF-(SMjl|#N& z`Y&4FCr$~d6^6^50qz{&J5H49Avyc%N)+26_;UlF8@pQW$rmm#G`0`2LUu8C zfGCyx^sFsV{vX8;Tz)Z}xkij^vk08o$S@?gs%9V8@uG7D|5Yk+0fPH-sDeeU9e#Ch z#7pZcL0Zn(kmY}+LpO3Pmg|AQ@s3b#LLjP5%QY|k^rV5U;Va8oo^p2($CYfnxD*5F zynpz8*X%tnN1|>%Vuzs;IrNS1Ko8psEsoF<&iJiAW$SGMQ)%^eiE219IQ<9PWAqR` zl>W1peer;{lX?4WAXiFsgz-&J_}K4ko~S!e(O&~;f=ExdpvM4rjsuPHBV>uijdtjB%39D)8f9A~rM28knj6w0n1?M2yt7C(>-bZ%)NTbAhpNX1`ib~_uk?J?a1 zcPVN&jW~8s1tD7SOMyh~ime_F3X=CVN>C)`8muTJ?;2He0fcYdsBG4jyT`?y zvgS6cOJEiWjO_Up1?dd#+b4OZ9nO5M)KwyqKQ8F`h#XbXhjuvDsVJ+$9Az0WI6hn1 z!QAE!&tk0DM(;|#c@Z_#Rbv7`(*L3ROsYxMS{}Kbzd)LTL*nB-NzJ2 zygSbQHDUV3Ch@&g!Qzbp3m4<*-NkTO_c!E3b~r0fA4(+#=82#tOP&T=3obtk0)T<` z&R*zbOY{)X(+T693ZIT_`GN$F5OER0-0QkeHK68l0`>n&wwPALK>BK+QK$dvfTUiF zBm{P$eJahR+4`id5WD&n+Cyt1XX;xCk>!T@rxIUKg((5(Xg9Fre573V6-J}C97WxA zr$?tn8}<=D0_T?wmcfbVI>WCRl^AO;GelkPt9_FrqmUzqC#j&HxEHvhKPsxc7;(e1 z`+QEX?zzv?FpOb|7w>tTi(_-?E#*g-9Tw(C@cQSscVXqVomS` zv?d@*Wy;y21o53-*sAOpqRs(r;g3l7hNq$VQ`0kL_s6P11p;5Vn1PeeBq zl*N_SD$_h3qtj#PfWCf6Zpi`nMWVFy!cth>&U7$d1cYL~{+a0n<6zt7BLILp2h(1S zyP*S((A#lgJJ7#d2%Z3ReHd6|da-g-+{)mU(QE{_d#vuD1?pYTID<1DRDVNWP%9?v zBLZE)`2nl8=St>{0mGOh0zw=@Y~cZ5VfY<@_M))*FR|w=6)KuQufq=m9y(PayXTbdPk+}5 z0Ylu|dVz3XiNp0KVTCuIkC(5^j{s>JS5E73{mF>$+po2y3x2>Zjj}bU8j7&EAiS4D z_qoKCDeM4h7(Ao4EYvi+QQqqr+m1)3EDM|GBCoXjy9y-iS7#7Dd)a!_VDMaB8hAF2 zRR|f@C=RCyyPaHMG51o2*a~UB49D*nTt#XXU@mr&73EVsFT$|wNWGvjbdKL_(Xpu3 zxa6A=TsA{2P*eBsgG5H;33Ae=r;O~}x#bznr!XyKR4HA}CTAH!0g39* zaX~fJxmUu16GM9)wR$$)DwTO&tB&Vib`~j)R8F+@E6ZP*Q5oOww(0Im1jYSHLQ3 zs>7$(gQywomiSyMz8&njW(Te}^~AAiM1rn}jl!-#`!huFewa6oKj>ILSl4YggU(DO^yy z{c?0*v5~H377oXytFj27B0qmh(3}5^z{_wlwG_fW1n@-@MC8i>P1J-n$5Ac6p@ls( zQaE?+LGl@~WNG|L4zL>%d^`8-SVVB)`RzHwhcA|5yQsST?cx*RMhP$yXZL8Rr9Q)6 zh0Z-p2>di%Wl)$a0#lj%k@vP;VKYyev$D|A6;JGtBg}7({04=oIXQanx(znbhVto( z?l-;Nd1hc{k!-BR-Cn0kkk;@Gb3+^k$Go-};qT%>a{W#uzW*WIGLxgK+!Yn?KeCN6 zsZZ0AO&lHR)b2gFW7k3lmu?XwUSlZWvG%wM#((1GYR|>c)dw=Z_JnD}r;GybBmgM{ z0s@d=`s2Hf&DN+6_irLDYG^5d-H0_=e?b*-Qe)_bph+bfU;x z)4!Qj;8->)?X|QQZ)-OS!Iw@lYCegW4IQM!mh|jIje9ptZ|IH*9|Ef^YM~mJ2Ah9R z7dAe&vvYx3TCzUe&Wn5$oO&EsYOv5tP(^4N=6c!SsSH_#wg-j6Dmhehwxa$}-4>r` z1V@9i|3!QtJ2V&QyxHC2pEYAWXnBzJBX;_k!|zw&iAB~(7(hh>X^LvG0}1T0yKs2D z6Dj)5T8NuxksZ2*7{Y6MC`dt%a>z!=-GgOt$B(^4Jp_ywyLoHUkJlNPA_aG?W0BDl z`ls){nr_;EbzxW|!_wZFs@({;b60yxs<8;&qM%0Gk(s*io^SL;>%pH~k=cuSIw&P5 z*S$|CJ|hdwe(P@NcqTg^6r!M;AB_tWTYk|=`m0i|_duR)@KK@@oG|u0+H1dySSqK7 z`E?(Z$kpHi`{JSo+k1R7x@GB5mbQX&wq%b4=O8yKJci{ypIbQL%UuY%QH$_1*pXyw z$~8lXQ7YE`sf_=Wo?h?6|E3d!^VOy~!cc*f045FApKhMSv-Q(Zzv|C-(N*I(@weS{ zRHdqM6-aj=uQElAJmnN)c)R{S<&AxNIXn}o8Xv$BwGAoSesw|Lj#YLw##qDUe|pJ3 zOZ3#JDWUx0-n731pgUDh_<&-DU3Bc|RIq5j%P^$!Gk38GDIl`t8W^As2bhAKad-)=CfFRNHQBv`}Ne@(6Y1 zH6Lch&wxBJBJ_*kyceZ%}t;G(|m~1~BrT@wUjjPM@yo zGn{o7LelwDu2rjM6VMVxivey`2l{}YmXBU<@v!&ek{h~=R~W=}|JH#(WiKs6TB-TO zU}m*J?_i1#J)j00dpV% zlXeeYNR?%=sTrOy%!zLU*E1p9hxpI@{ki3p-GYN713|9cO+8 zY}DV2gSOjUALn{li+1oWhs2N9H0$2|p z!BBVg2#>Z|OIt%*!>dz;8Gs}f7&L5V;EFNVLt`z>(Ekn*P(mUV+bLJgWVWH<9^B2J zSf9)5D+P1+Y+tm01V2-aA;j-_gZ24H;{c+sOK0m~TvXE?!+nf=BP2-1K#1gDG5DW| z!xoE=GbiGSFwnm2pq&|VwmLO+Yo|CI<`F@S)p;|t{B za-#*pzo`T01LEc-&Wsh38{qk4#A;{1TK@2 zGA#S+#m(MB>z!M>xpv{*_mEVBXxW<#voH~B3j%3X(RpnWn12tH!<=Jvk95E8uZk~X zBv7Q%YI7Vtx8xn0pcr$&u3d1jsj(cl+riOta`THthe&}SsegE=YF!7a`;6)IA23Kb zBR)m4W;&rfa9>PNJBC{YPBKKM_Riy}+)#4(MwDPp7`)atK2jJ(7K=Frl{^>8c|9#A zC-x_5jk8FX;bn@ZG5=9B=Q{-x!{oS0LSVslwwGbd@ z=aoN-3CDFmEVg=HfL|*L8u?;v0r&thL4L(G_aiNQ#Au6i`p42s=j8GX2a)ThX}qk| z?Ss6$6l1$nu^hE^hnc(T6E`f&P71ZdVOJ-v(_E&3RWylyg{TR`p7){^oAg6!mT9+1 zrX6Dn_QFZ?&J4=z&SkFR^XV1>DKKn|r;_LOB%Q3z8KxUg%$cudtO=wvEHbH5rl-$) zSO@jhHusz1gr0R=7dn&NlAS6SD_C7x>n>MkE{QaklV)=? zZI_4N-MWW}agee>0?g1=#Jds;_+0bBj!kC0de9c6?fzpo$mF4P5b90}40^4%HB z!;6S66Ak>^VyrSOc{yU4ERdJFyO{{?hZ32w0g@ZluloDl(`9N+roAxCAk~_-TT4_R zr0}g)N=^|JYtzFc#*vY~f z?+sIMA?~ID= zWh*#fQc0E%)-|ea*Ch&o$neXVS0n0|ythv>`<)~@MQFFWG?u_uzxxB|BJjAYUM9kn zy`E$Xst6U!eZU(?p`SrS5D-YFF&LRObflO```Rr$v&zOBJt2y7&e4DdXY*n04M=fZ zMMY=dv>4wD(d)%ajvPCS#%v&Cezq&UojPpI;ZuxY05$msrOyuD)B(A=vd%1JSa88$ z=dE5}3ZKy6dv()u-?ogs0vov?G=94TV}nwcPAIuEL`q&H)X@N;9m?i z%M(>eb{|gXnWXay2Mf*>K%}7ch?(ljiDQTI{vJ866{j^5 z?YiP&0?zc#cs1c7!M5e*;{yYj8FApP-1F?0qL4bBu>V7njdzf-Wc6$;M8WGF9bEdl zmAuAA`?NRK^>IPt06k1HoBURK)8`XyMJg}e=8dx(R2?~o zdQOta;Z~4@XWa|`Yr7dlRXE;Z9jGlebgVVU{T7*Y(2n0#M@>(E*p4`>-c=KiyQH6X z(~4>W=6Kjv4IVe4Ws&JjNz<5>l>U9v^@%*6F<7)&E9}lNgP~h!R-J0Hb+pq7$5Zz= z^nBT|?4yFDGSIGv;U1jN>%{H@)#$Z54)K)!TK2i6i#On)J>jpV7 znGnrNdQx-ka7Whp2Jp;@N)k}0#qHeU=B3TGU`djNx7fHwW{mkFYZaRY6dR%h5&l)H zm6_Z4ANO=1-{6q0E>4=u_L703Gwkml-2rClS+I9DQz8cv-D2Zw30%KVhSEgGl)UP1 z!8@OCdJLTPAfFb{RYdDuNJ&ms)@k*ev{%z!-z#Z+-kyRRI&=)=4RKhGBP$xt)YyKU zv#{V784XvInw*7oG~m!O!6-+2bDydyyz1lKjR^#4lZJJ^?=PXI>$t6T9FbxQ`YL=i zF&Hfjix~hrV2_pn;+R|&7wNR-IZR5T78ZT0JYcs;N8~&#Nn?ID)A%zpS@+Yw8RLY zASXp{nz#W^hWY&AzUaCLWA>}DngY%$>Mqys@-?AwUC0B0Oo*!0Ba_~`8YrcUhAClS zIbYz36zyH$1tAtZi*}IMm(BIro%x+g*@%-P6*`?7Aw{^K2;jSLdopBiCooEBkOZf5GnB#$^f%6-7h}CF{C?+VrZ>n5Qi=9Z0-s zf{9yJ<*8cf%px*WdRzNA@+==ZAXz9DV5Z7d3+@Y8AA0OOTaPc#N1=O60@vi*^*{{G zw{A|W6{MkjOf%+9d#J$HDf)PzU0A&PNfZ<1r47GTN$ruz-WZ6@C4g&B3Kd0)*kjdU z*_rkMS8m4k2L=;6^qy)lhYu4|namW$LYCAEm%fmNrVBQCT6>*K{aGW5S);S-c!*Qx zyYb*t%&AR81_MuyO&-PXXh6w|*Hat}v&s9VEesB(DBzBwnXdg}fyW;el#8pKx+fUTXVHOK0AICi*l~*&E4|qYF|gxE z-#_d38Y_%jN}dh=Iv);4PrIG0vhcubDl6K^gmbAdh|@JJZ?CHXIu|yVB%0+WzrhQE z=%^}RzTZdyzyE0CeU5?&K&&X))bSyQC!go8V#ED1GV%UZPEUJK=T6nET>wB??slho zto0L(p0c7L2RTj&KF@ZsKFPBnNzsnDwYGipH}}iF2rwg>F_%^ngGW|@!qnev!J>}= z|DxZ;K!ijTF-LJ(xnT+{&SX=X@Rha{Hg#-ryMwwXBLD@%7~>&^Dc zoLjuDRQYb0Ixw)I93?ddhlx*44;{KQkmFxSgme3KiP!?z611v=I z^_|km+@)&kTxsbXje>S!FFhoGFJGiH2~icRV5Pd6e0|(C7C9VS_7Eu&v@KTksGb^L znUIq9LLgHZd?$aH4C(JtDwf(8^t73Hw#RbLP5JP$^m>OYHn$meCPLtv>oI6xmxp2r z3i(IS1fP9t>CQ6~xE%+;c1keIR(GqJihtv~>X>mIIL z;y&wY!6kMo6X9DAqpYt>*gXr}!lW8JlyxAH9-prxi~vcmRXLL*4AyC=!M66TWxae- zqzvhe`?R4J@!6B?haEo+osR4J)@sY@TP)9a5Cxi7?PuNODx7J;v_^F=$@f)*_pp?G2e*D47mXosWiVdtq>+xz{<=^Lzu$3mCtIy_`yhja?_?_#cu*m#s zh)N*Sl-UPvx|EnA6dMzC`-$MRP;ahe;BeaV++dg9_9}9*J%A|%Y^ixD>~1%W=|>w^ z#7#a7o?{>1#)=0IZ$W%g=5&9?8zW47(}%YZrmh-i8ht(2@~un%dp+ZMRwF5Zc%O@6 zXzE6}eigc^WB@yC3|g!50O)35t+X-ai)UVaGaX}}EAbdf8&0%ucVS|!@wXg6DA?gI ztHcECfnrkTn!_w95WgVg=*@O)do0AQT|0s>k^uu+B|Mm<-V_B+V#(FmxDCu zPinUd8js9Dj`O6HQAXgttV+s7SZyH`1Wk)*T;#`_%#U_799Qm4Al*i|)=8j@Lr=LU za6s#OCt=Yy2T0MofnT36mLsd$$=<(kxNVTJ9ru}_G;Vm;`%5Hqt{x744V;k)IU}^) z%|2;2r9pfw&2$J79jG*EK1g>L4;vgNhw!e4@5$+*Jy>RvIz>=jc`adw7Jgse-~}HG z#rcE&t|16?(aUlB904tWGlgLoma1kBwG`tX+?9=_(>A0{;dxB>Y7w}0jZ8;;7 zWZ8PO64=`gUDR!1!*-cc$2U;zs!2b&8l|lKIUW+yMWcX;PZAy3v7e-kS08~fy@ttL zC|AwoD;%T%JOt#cvQOc=P7{$6VX;zhSchmaQ)x#uBwUpG)TDFf{EuUy`RiDMLZCY( z5zJl6x+7VnG6j5m^3Nr}#pgbZ+pIyMS<7_xzZy?u+P0Q@l-xdFx)DJbGT=uNC)8zk zZ8u_z5f#{>rTJS;sVv!o`|9=-7HN>h{GmB1-g4;Ho;ypu*-$=2l4#9z(JyoCM+5=D zy+mn;ox;hLD<>g8T4IpOw1lR?*MsQLw}fwa)s?1?lQr9w^^~J2bjW{OX$O|jln@jWIk_i1tkeC9u1h6-5e<1EOYq}0L1bqOBoV-2{P3bzSYDI42?TamuoM<0gI07ov?rs{hQsvEK2Gfx5 zA8qu}mUTPCc|@jFYj<-F;~|=8jTfoY@N7`P=7nx zPWKfE>mQES0&yLZn7ALz<@G;o+7ny8^jf5m8@tbrBJ+8H?E< zd*_*p@P{%=Q^k39?U3_(tlL*wDfhcW0u{@;(SDEsA5PcJ@Ouv0-q4J!yX~VPpK0=q zOF_joQaEp+UGv!r48m}WO`m#jK<>7VGRh;y+t~_1Xt?ETCMNgV+_=g@(4;H*ah^cX z_yc3N>*r{{5ueqn^PfT~Xu}1$E?w~`&xgU9oCyJ$k7du+2-R-w`-;KXWqs}k+1f99 zEx!6KN~&EO6KhhraVPd;3sUjz!1Kdlv&U~s?IW{j+*qt%nc`HV=;?b=Vn_n9DhGHtyX`JW1Vg ztguPK#ox*!I707I1#=KsYO#uw1$Xt0Xcz6Yo0NW=a%DClUsRXQzak@(xkzM?+OSv<-N(Rt2kX=?i{K;pB`smveiEJ zohqdj=6wUTz0zjeH3Iiry0jvxzzXj!QS`U2XDxX5`UnZ$);X;?shSc|nLHkFxQZ~g z7c8<IT4otflsB2E*+~Zsp^w?GFKsTx(;io=t0kdG+T0ht7l};GbObPY z^2)#xbtq1?tv}RWDHPPR(UvYdy9;x`%s+XmJzr#Ca28F zl-idVMWi?&zM&Uv)L-%unQ4Mrp8hrmtNXetVis&ciFT!9iyLyjiX5 zLx!K%6v+5u@dAHlpN>74{ihLMht=KhYFOTh1D%%l)$UInOd?k{zfL_qObltB25?=R zZ-%46AdmobL23QIby|rw8|hvvMk_8IvPfKz77urh8FH`}-|x|;4HpRLNQ67yl*Sz8ou-{a z=CRgiszbOf?7qr{GTe&&ggT|UahLA&}M9ppb ztPe2;`RL5*%13&cfIT;KB>S#U2IP7~&988_~AKoJ8T0L2#?iS*O znmHR(%~~s&5dhH>(*=VL#rQ(DI2q)41`;<`6`=l#gfoUfosLk2^(ECrBh77U5uOf5 zo0@I8ZY0xrZ3PD;eb&-?zpbWQr^Vl8s~|982#-vVtmsIZew=oo1wVG0Xaq#r6%LH^ zyqsMwvXVx$?N*ydfw@e3dhbs+`@GE|Y|y?ohQWN9Sx50of0hRA_`+U=`yrmg-VVf8 z%TrOTONHvi5Ei&imXpS7Tj4B_;^@FMo;kBR?L5<7?Hu{0J4NbN-(Qw5T%a&umWk)O#b+BxMr88*^i0<6?kzaW9KutHw1Vf|e&NKL;pwjS% zxJC;O@+X?ByY{Vz`z7kt#-6!J3yyr#c_x!JDcjUL(3ieh3q_Inan92`8D_6Y7&Q}2 zlNl?@yS;%8uR_Glrd1~FYu`A^ZqLO5jcukdsfRquD&w_RIs^z_o3E=X@yZe7YHCD_-$PSVZQVM~ z3lS_QvcY{pRRQ1XyZ21w&!pkJgLchlDhQ~`Kee*&TMN_Q%MTIWYUYn&8zwwk){IcK z<^ZnGW&SY1xY9>Pk&QKTgbbHH@0YL*MoOo!%UR=N5+k*G(_SlH(tMK!hlg*pm*kBc zcc*I8FMkW4?YQDhoswH?!XJmzZxx^MFNY24UN9iOH_ET88I3g{etsflnJh}$0l=b@ zw}_vz0K=j80}QKA%H`GPQhOW?``?i|cBld&I{Y{g>RNGsE@1PNI(MwgKe9r3>46^f zWk?$~6|D!v4SST2j|I~}I^5_-n%#7E;GAawXejn*<&#l9*PaYzw6*`3PvT$maj2S= z?!&;(b}|UBRl)A$6B6Kl}Qmf{+E>8q{0x_|)GR&%JwQ179H9b_4eYSG4VUMM@Jz`=ViN1*V1`JjRDv3BUD zrrqiX{6;SK#m4(7dnTT3c^dv|6KQ5#%u+sk48wq8BopjUP$UW?3t=K;@YSWBxKI`Y zaFy37%@+k@ed#_rG~}d#GSN)>1jGX8T^_E;Ck=0tt%I|Bk|eAVHBiE-OC`LW<6YNK zcuezEVx0rttl9kr@{NsLKVs#&H@T_kE-yj(^L3%k0u3xmB9Cf5HHMO7y91wb#9t?v z#*}%zk{7n~s*@WDw;b)33Rgw4UR60MYv=H>rz`07(z~7?2r%lY)I9Ef8Ydj?y3jZA z@)O@y@uqM??#0fnXtVhNcWi9j#q$5*>z%_R>zek_jx))0jEQYcl8!U6ZQHgru`#i2 z+xARs+vddj<$2!s`y2Jw*jwO!6A@lauH7l| z19EQaSjM`#Gc^WL_W!g1a`L(`V2LrlM1^y?Z)($ll;s-f%c>`I(i@?G$P7J)4tbLe2F5>A9^a$y_R=RK zdWOi#M;c~z`rnPq%fJ7vD;+mAG+PQi?%ZB$FTDEg5MFP$k13gc-JR{C%;>#KeZL|S zcqayDl{<$!Pl8fHSE|VJ&#F;`m6N+m@bRPAZjoZ~FQ{LUPR&yWiy&wwGN95n%6_`O z%9N#|@GWa_WswYMTf(XEI}RUH-jQUrIm+`F>lhUXwg%H5Y;A(3Tn~#6LZgeu;zBaO z?-}fYayYJbdU;#e$J^WG>X zYmbBJT&Y@9(cT_By|hQdcbIwaH*czd-d?jxpGh^AS3(3@$&AY zmaE|rJ%*-8ot9>T10r**|gn(;o(RJVGu^Xo}&MTb_;vAgt$b?L|= z+qtLrVeMcW!kfoJeL8L!-2xDC{)&msU-id}GfNDPgOj7ZJ7+mK7IW1;5-OSCWFFJ5 z=53=V&E_~Y|MSq*npKzU<#cv`Qx#t#^f={<3RQl?N9$iPsH1!d5z{0mdFHD!Xvk6$Pk|YXu=+)1Cc!PmVmZmMdru#PV1b*l*YP-WC7D z1)vfhDY3f-hSSTX=Z=rC=E~@(9OxIW$ZH_22Fo5|XT8LxnAjd*OaD;17Q|8C7Ex)5 z#akK7ypYn}m!xm4@j9Ixtv_KuS>V09x=il8`+K=L-1V#}{V{sc$qu@_dr8`B44pvZ zz3vFRCg<>RW6voXPyHz#)_FB~se~r(UL60l=2H9P-xkw;6zu+%0DS0bXm$2+n6F=`UzxF+ zua|qlo|)>8o$}u`zj2F}Lt&N)kKPH2nD4iKxOpm`2~?To)#;C@Ps0=~{D#e&QBM9y zHk(j7r!|_EET8$h1-Idy1-i#LTOCn{J!g%OXr%RYyQ4N0dM8AnO5#afhYyIlYT%XzWmIA?0CL2`g(G^WOCkA9br|3RTm#P&Mq{wJ9mlUe4@^y zZHE5MH`5RF{LwCTV+I>Oz6P?goV{aNY+PS&(dh_F;;h!vOH9VWP1BneeSJ(I(%N?(?(tPfJs^GeA{Rka-B0S|d`jzE| z_`It7#zkw#3)!bV;aku0jNbhG%Pf)GPjn{+4nU}%%g~LpzoyXqP!mo#TB9z1_;-&h zi)zJY_Mln+VSBg>@#pCb5AR^2S6f-`#w6ztEcTm1$n6$vBuCOD5$`ukhPestl&j^J zq6d}o&dHb`Y)jXhLFgf9fRas>8)xyQ!jFsFzPvbI#GBAuB z9X)m}fEfxN$~+*}Ep~b+PhbXZgy_+!hq09Cw8jb4Z>PY8VB6CuAkC%tx)XIxjTJQ#| zSNmH1j=U(dXX9G^FJ*Rr;y%vRr(A#6duICH_Hf;jr|G!AZKr3akuP5A-mgKiao61( znFxKv#&~xbDU5#5>;9eI-|EiBbXjnn|17tolfokGy+7EFukO`~rR>@D)8JnELnHt4 zsLD)B=WW<}_b{pJrWz;P>wy|>4qxkSqY`uNb^^@1eKH7S+3=iXvO4j=JHh=Be8h73 z_UmwNmARz8IFs0WmV(?6l4N{M z)gBVX0)AC61f9&ni^rI>WXB4&;IRbJhWDRUbzVfxjyk%LNBA=}IAFf>H{s4FRhM&d zN$svDNnDFetsfwBzMsDN^n&YkN_3^XoK)ja&9gj`Cyv2$wuAr>t^w4lGhU(jQbC16 z0so<-_W_uUPOZrLyAGj7s*m&R0+lfLjt)8wBTNApeiUQ74) z-UBn@3w6OH;ge@k_ZSX^8>CWOYyjnc#}(ggujA%j#X;%m#?aJ9yvGCtV57&9S>4$i zCa`Jfd?b5vdd{)n5et+G4tK&d&piVMVtX-9O3M_5A+38m$IC5K?>~W7y!BC8n@9t1 zeu>3DRD_#5S77tWJK=sL`NzK0f(BHIr^`zDqvOa%bRy<MXw22>`xfdDRX|nHpig)hg8S!j7rgYJk2boK z4dyVq=7aI>!>*PtcS)$~N^hr&uJB=0`fjfM4@$YkmgD-4ZeHcIO|bc-P~A#H(Un!W~wTKz3V-H{RC#r7O6E8600TNCiB-P^3nM6 zJ!kZwXg#w*k5*LnopFmz{x?O~wZ=7NV%R^U6JUFWBtPVsy7cFx={3{wG9jhoQQn^X z2x5FfRCNnaR*XBR{6CQww#BQVPvpgudGSA4F%-D(PFIcxSgH!~T(O+@*#{iezhf~3 zy(q98e!AZq%;sEl;SD&9Gnl)VqbiIC_xwZS-%G+rIB{rNZ1AE z0xJdar#&`3t-%z^oqoZtEnhG>c)yt&JV;$ssIM)Wx;s%5g#4uEx{JO4(o&*cLnt;7 z6LZ}@ltr0=1q(|dBuigPx=e}3Q8=y``72q3OpXve1g>Ms25ImQ&03p%uRl!)yaGVP z|BmN5ajYaXh1TjxSzWzHP2@w>@bYb@MTD;W5;CD)zoD1_l54`$68}*5XV<}Q!y&A&orc~ z9fE|&Y_4%Gid9ZymPFBPN$`sADt6q=fbOihxcn+l!OHJCa?a8~obP%$<({oSU9qN$t0P8>U`hU`y{>u?={v`7TqkVp`b0B;Q z$V%zV5~*PR=PV@Q*x#qf_yWY$Yz7DAWzVnR+o@7P7S%}6#dBhL<^(z|sX68L z%UsS*=L?hGsnIv5-Z{#V<-(zH3np`@KmGZR+MJbX)9?gL5+?fmPRdlO3Ljx8MuGTISA(lOOQT6dG(_DZ=tW7R{3JW=| zTGnMCtvQ-BSpvDyFKt%m|GBFON R$xzlaC#SbVcz|hlIoj`TE1>tBkg75vd~BEuQn zNO*H!J-%0^mD{|3i-qHon^Pt|);2opP+7eG9vcYq=XU;Fyer|?WJSc!5^qZE3y31l z-y+K2Nxdk;BFJFsS|^4L$x*<2vWxx-se72_EcblrrNS4Mr8ED=c<bPEuo^{Jr-&N06Lr zkf@ISz}trX42vf|mRdpezAQeeR=>nB--7V_?+9=%`P zf2WeNr-=cYuj@W9##T|(yRMEYjxF)%Q`m6cNq4X@;|p)*B};5Scdo*m)DTIt*US+K z8E;cxXwz?Q2Mb`AP`iK`$?|R+a(=2tcIt#!p0gdCP2Ry!hKo zL+qJ1E3Ot#Bz8AfET{hV{h8{)TGq{5wcuW_pAZ*VU#d0f@-|h6HZu@+(TLQ|4?^?$ zh_SE+;H|_2Kh+wfKg>f&y%P>+zuEUqZ=sKSkRG$iVcYRUNt2^(vFgP4*YPMtl%;iY zS=DjtVFP16c~+C)C&cm7(@{I@)^KyBXqtkV2w_>wnr>kXtwLn+uL6^&tnK3Gj zRBi66!@3*#@Mlsk&)opd6LCau-L*1zwm_A^qk7*WWMSdP@D^57Q(%mx03J5lAWi0c zqoo@3pIzI$gSV3v)p_j0Fu7P&cikM_+QD#=xE5uzbJ9=*Pbrzt*2NknDy#tCO*8kK zO|vJ@iKtWO1Y7WEkhP#yJm+B?fHLneeMm}Rn`>l28XI&P7LRYDmJ8iEp4(MgJup)f zWI0V69S?qqu~LAK7v@;Py=`Ou4?~Cqpe8zkT2<@-w)#M38cS-N$DJFpvQ~D}C!3Il zH1ncC1yWtVXSU^ZUP^%gM3Le4+mAUf7kw8%v#Do^PjtvaGcHxbXj=*gA^4O4x9foRh(y{c8_dea0vrD7@W*aoqS(%e>bETlHj7)BfT#j<#F1LClC^@1@sNOi z?65Wq?cMM!=b8$(M6E9iL^E60=je?P=0d~_ukw{8L1ba~-x#9cw`njKxgh>G17u~+ zi`a+7nRHL=wc>YGTi$$_wID4p)@L;lT2#BVYab`~9%`2pvCe@a3QL^!=M6M80F*Wq zl=}{2a(mNJO%NvS-xjaD{6h;6jx8uxih*%8#Dc3j6G)0?>fY&#N|*T*i01an^lY=A zteUz5HJ%pN@HL)dO-MRq*{?c9TX^d`l`$a@XqJVjDLS%yFgs*@azW4b$5(P+qG%7Px@)5S*3T|mn`%gD*dKFW1U1;s_#(rftwQh-W&A%b|GZa(kdqJx{e-$Fq*7+0Ue_QD|&j z(}z5lbfHKJ#+yX3Q(f8QN?V+WF^7!M%AfFNRB8^lhW%~(l{Pv*KUs=OU zq!IPTC~uw-qVT72pLSVz&f*e9CPd}^=^LLLlP<8xuIfEGKFtTTHM?__AYXkWvpn?Y z9+LD?l>{L9*5^^r{-rUuI%PLLbRV6o69HK8SDD{B+$gFzNm$HH*r_bb6v?#}uo~g%R++ z9XBd-&i#Pd_{o2;_++q}B%tB-InaQ?A2Wx{pxjj-&Rf6$_VAdbGpl2CrSU%LrMxsv z>bXps`cs*j298TsJ%;Ud{@KH6sTZ239Z6CaS$P`!KI5Z zp6_tug5eW54lRc*)l1(nSFic*&Y#$ovBOjkKg8lrMl+dTQe9tfs&XiOaZjE4fhWf5G7PE``MltYw`_7u{6^Olw>slG*-CY-Z= zK5BDH{BX1%fwItW@3V~0qxkJ8X*I3NY&(=op(DLHxwz`@ei5Qe>%$4UXE@t&i`h{% zin+XDjycu4L^^HUAM-NApE4BeA)!r5{GvDByt4QYg)jjUUa< zICFLI`=PNyE5Gl81(?NgMeO*fDs675$?3tHbpItx0Mv-ew?XXaPN<+FQ)h@_Y4iCz$42a%y z$B7T&npJ)6J1gl8qucK@dSipP?>}W!d<}WTKZ!zz4#SX@FfF0Vlf*E zb17~dOTG$00}dc5Ll-&bBuCBjI#wGUI(R3&#HmU@d%tf?a|FFaQ`_Fi!M-=+l<3H2?;FQzySS)dR{}n)Fd|7AyV+poS=<_Dl>8I<;bC|#r!s2Lzd1b!2*gJ zvZ^}4vKd8lyI_=l7N%YA!$E^Yh=-O%5@NEcC!+qwHV{^4`H*-0dMG83a!%{u=hB2DAOI$p0q=;n+u!2=UisTWwHz zc)n@MT|-+Whs_FeJ(w@)ah|I~@7&yo2M=xnm*t zmr)#dWP;a01;=IrC~_{OuZdtK9v4;oo3NeQ`rplibk8zlPB;Hkwsof3XTN#DkLi{z z#yPmG#0?YMZ_WR~mp51L9~vtsL|b+izZ8QjlE#J|1?v1G!zOc=qlK4ua-c@u)P=EA z+vA;BoJWa+v8wju$-wjcU^U`a<-IknDMU;*;b3rmn|NGf?D@(DvM#aZRv}sFk+dLf z#oBs*GX}~g%=g*Y$u!Lm&ML%Z-7|D+Y}cP|g7tE7YrV>R2!2~ke}=qIq7Ub_dr9xV zmR{nTYf-B|q$reKdZ@De3xOJCthP$q=g%`=+FT|E*rcO*l&~+jqYKMQ)7e!~`=*M` zMGzAH3Xb?ja}QM{sFEe#A7Zayx~J;CjBT>H*~_6c>j24_cRh0jAKE6ke6O0`xQp=T z3@=MjOjK_EFX?fM*Pgb^{R)h>TWS%S(C$gZ*YlOc3CffFjGYn8wWh;`#?*G_%=Xw` zWjc?zlQu<;%l1fTyx39h2`7*Wwmp?{y_&Tw=aEL~nQ^1eLn&+2(fLUGU}9v}Q%2OhXfz&Du183YQ$N&>4AKNEi!cNb;S$$sVzJ z_=<p%)ByCxc9N+LZHyEp1s)bDV$yUf3ybb1WJ|GG&FBYlVTEB1OlG+tXN5mJ*Z| z<>DqzMF#V|#?Od~&8sh-G=#t#(~s9@z=mb_38D&{{0WC*U2(2`riR8M+&!IO% zZh%SL{AO!2P1Mj-3E$j%!5OCmS$t~I25nFw@~p+h_~6|?O2J>R7FUs>%0lP^0;*Ll zz-?03VEY_V6y~jf-l(w<28cAIo+^SRE!ce%#trTPYmn?x-Cqqy12B)MMB>5+v@I zw_lo7&>;k3GOC=xYc*G_?JxdE7u+^J&=B)WK0UATxo);e@AXzkRW2$WJq~(j(s=wu z38E;gQ3|>TMpuwBUb>>&r(%!-yL+NXMK_F1ZM~F*BJ$Y>3A^6C z>l%g9)(0lqhb~5T=oH?%i;cS0SvT(8>uQJI^taz<)x|f{3$G-c$8jv)2;E1VXF^M@ z?ienti(IfewcVSSs+=ZZ5I?b8swme{?+d0{(+p3?JBumj#!|_ED&iV?phvu*_Ne-- zLFWi|mDxSLOn5m%@;~~KicxYb4bT1mM(cj8tn}%KBeWqV&(bTx!xM=jDHg@m!e?Bc z;L4CA8#gMLhqR^6k0lBysZ<@(aKQ0)L_K7YM|D8MxY$z!G;Z1_U5JKkQezs~nIegE zL`WpkDn1yyDS$`83(v<4NUIoG6;&0x$TK#e@!TyII5ye!6>0BFcfCc9r|@MLXfEhm+LjlPaJN%vug)% zj+R39Aj;jfNx(xIG(dCx)uwT1f>x@T%O?WG60xy-1!x}-T+dCtn1a)x$5^G02X9;*Ln617uv=8sZ#zXa z?oK7&dYIX2>PV;pYdW;3^0_7s1Xhupt~ z;Vqg>^PUOJZmH=cOq^Kq`-`oCz=M<%c8&UQJTX=amIMcDC(7G`wV8UHxKNdtylN3<8 zG;xmFP}ncem_b%z5hrdF~*(3&^daBI#o)gBhtU35NIO)s|Cz#QhhbWEj4jV9dTZgr)d^iZH{hDe{sGj|%$ zY8^&6?CRf8uUKLoa4EFqf%R3bUuU44d2=|yt2-AjLGt+b8 z$6HcseYPC{E^rUBC44pkYD_laXB>Io=$bGG<%W z`oP#m9cF7GWWisn)rcrAZNl+`Hh*<}5HcpW-`rQwojY6P&q?*K@ocvMAtAJ^Ckk&E@RTU z`imp=!=Zu!p>HZ_HS6gRlQr~AWigm9qqJButsLb@IZhB0QFqhc7pgYCsW`Z=Lhme8 zZA?|+kWQX%Nko5UeylNO9kYkqaN&LbwPVnOC2-e(lr8!Rr7SR;WBUTM!;xOlvvndh z;YH8X*T@HdC~+@vpa|Q_(p{OqUQbS++Z64C0E8m&Z;3Tr$mVJ>wb8#%RF)I_V>fdt zsrTT+hL0}Zs2uq-wWx+2Dvp0QGD}08Tu8-Suw2vMv*dCw%2#1C6thnxem=Za8Ru-V z=E{VjMh#YTTOJ2eF0PXR1s#)l2chGU?U6L?KVf-MDC0uj{K5NbmE*YAHpiZomNm7f zoR;$k=cx~acc*Uj^(3>5`clovO3T#;Wr72caZf)MJD68($TMqC4x*x(G%8xGLst_s zuD7DZGg4&TVg{uiL{oS6J1PcMPwf?=2Q?ykqRr&i+VQHZu}Li1@hJw&7*4e~I4#=o zkRJUxEznQPYfnwk=gm_&Pn3sJ-(6F?l${ds*AY~T`S=BUOsl9Z@No9Z!~~}OiOb0$ zMpEHY3)3D`%bb7pDaY?g#0D#*Q_1D<^;#p`Ph8-mO2N428O8kldYZ|vdK~M|HSC^Q zuA`wuFaTh7g>hN7W?$`#KMIGHm6sVm#_*<^mFLkdUh>a_5>}BmLb?z}eD%F!iv8TY zIVAPXcaJO2j$=0$N25{xbA|ITE-&ceO2l)>PjmVEXF$~q36v|Wl2ZF%S<*%l1BRE1 z{ok`pw#R@Kcpr$=1A0IYW?aVl+WrrJb$bqB`0$D}Sg|lW`_$zGiAa;6@}HUIMHwtM zvUvXt-_L)w5mNErR|Y1I<{a&cvxKS^`h)};CWX(V;7}3^|NIIZ3hv*r{I54HKel&* zhqwxp4RVdsn!G46@GS?aS|xK4HTw80^*`*{63j_zM(=Ewv+{dReuE-S=1$?DV{ZI>kN~eh}h5V&4j; z|E>8N`(XI-@@=Pbsn+_)i#OUUY#P5gp$aw0wkNi_{QUy{wK=6GwyKgl;!&@f!rq9k z&C-?u@6H`VlqYdiS+U{pxmLy}!wTiw6~f zogdZjoB~VnDz)N-7oj#{jOwXreP!E*7AvZJxRx3$0f*S%PBUX!B=-)|iwT(eVn33! zI8p9}bPSI^wG3e@wAs~bmJ?TvA}f{ZU-|?20FFPTUbNN#MnM{5$%YGAS4wzDW^6qO z$KT1?&-Bnh3BE=oV=ZQ%mngmcX~<@_=9#|29c%TL7G(KE&A%gFfY8@oy|YiouHzDt z^6~r6QDOhEr=A5V3U=fv7!mUHXE}>Sn>hm$qH@kb4*Hc`GK(onaY8y&3 z2dv^_BbRdIQge-VO~k|hXQ-88*6|5!NTji zHv;~tP2oLirv^$0riwv-gxe|6m_`)$w#@erB9iG#6crY1DC=*~ZM!?HXm9P8rmd;; zF)3yP0RE~E8*2-AZ7w$(BmyfpJeDr)Wjr?8lcOuvz4m#p?G-p#M1BhtYHR@&gyW;l zWTRP8!Ij&}a2&WajUjV^)O8CXi?oHRFMU%0+;?Cpw9sF^IY>}GB7;qV z`y)&b*SJu`eFXq0Xmta8=4W-Y8G4G&+SnA$IFKe~Mc4?C@aQoY+JyS&2m8jbw&g4U zU{b9uUb~tIs<&XeW+j|A8QcV!GuKmykGc$V6`AFX9q@L*m%4Ij6Vne7ae_#T0NooR zfbe3CB^Sag@YW9A5wxRfOK8vV{>^V1_1Ya5&PaCKt*IB4`FkEZ2(py%@k|qU0tT>x zSKSrBxbL<{r^WioZs+xSpq!{4?lasHzN|zE=E@`2SCDP^3+Wq&d$!_x7e$a+M%pyl%T~P|@wbf?OWiR3khfRSl+>*W%p&smbJiD2T7DIbqyeCDMh_?& zPW{0|lgkyHq3XDy3lF1}t(8!Pu5eEc%m6E~`-o_125Oo44$pt9_^2W>B0~@-fZPfc z)e$*OnnEea=x@8U>yX3Hw%`2e2^GArKZIg*1K?}bACz!v3;k0rN8ewy6}Xt(w+X+6 z^b@+Al}Lm_5}s~&`{~^b{i`bU7LqHrCF`!i-HUzUZZJ0pCyYvG>4_?C{KknC<&*z4H-C6qsC|{nyI&8%F2W7_jY=?yfJ<@oa%DMdO|QU7P{bA+aNx6Lh) zTaXRHCGueaEO|YDk9pH28AN>;7j0MS=)@5LK$^R=7!N0u9u*JU!o(^70sx4z0~ku$ zf6oIL2tsC^t=_@VeNs5To@(xFyZ+Jx-;LfF|8<4qv#{HGY?kSZzDa1os*~<#eo9mv z8F>WHfL}hLsa6Ab6o>%!xNE9K9bgMt6w^1P;-*IxN8<};r4^-3+tI$e&ySG?#1CgU z@aqn@y@&Amrf|we{nP86CvCocaHkZt0cWTYIyo{y;|rG=625J;bM>~xI<1WP^bqPG zrX0A_1>z7_-;bk*0STA-oxjTxv zog8)9CD?>oB-J+~>~^A@!Nx~97~Xi)fpj_E*HN8w+uH|2n9&PEQC4ItbOF`#ynRmg zZO?y{;#wM80`l>hs8ML>fG~pf3mTc^EiJ*y_pjG{XEBrso%koSFy*HC-?7VGa}#>l z07RVMLTP2~1FJ%l#Yffov_M_lesOT@FZw&$YR@PaKk>v5h7uAJ5LJkUdM3H#4}!z; zYdWy9v?I+jLE4O)YzDgE!Or}qY(^LmzO0beSfm-jI>Fk?#K&KKe*3Y0eL(1i!yMD{ z^WxWl_sg87`}*^|uTa5_wLg+yZQsw6N5H02I|^erxB@wx1|J)d9$}MgbUs$BB7QD5 zrExUAfF;yiZp2_;1RdMk#NY`Q_Z{vst0G)=htBh1Ah>utE9`tVb~N8zR_et6zDk*y zeTPw$m?`u`Y^}3Va1WMJ#b~R-fhgDG>t>fTYO|0*|TYr zd~i3oqe9=*rHPC9&#dhzzu6G@`Sd2vpF8p_GmNT>BObI{fBR>g0*4v&{Le&@e_XFE z*34eZz7IEg zm}!j4+nxmI{r5l)WrOrU7R2w%Xk)sqQR1k0gGiBd6+ZeZwe&May$78PX7HEV2kfr& zsAA8pAXFr`0c}WMLfaf4yqw;(Qn|ye-oh3IFhDzVjknC5lr&KieTZZyOmu6Uhqo!? zJ=j;rzM)EG2yQ?FS5~Uu*!m+CPRUE%7Y%q*K2>mMnvM@xZYD-csu9S*cDAeCadS+4 ze7}qL8xuRF6c$7zxOMZy=lit(rH8im-b7*_IY*iECO^x|ja+ty?u5n=hYH@S1JSS9yawwxl0}$ZdH# zdX>o-HU21NW1oDST! z1+Q$v1K2Tb#wy@DHxwA`DLC6!wh%0nVJ4WS>sAJG4$@^HqGxU~&O2r7jKE!I_p6Sx zZ6qAQov<$A8FfN54ioYeykO?l%V!_Ny<;)Zg7}Hp*vh^!kWpx7&P1n1ipr{#*o8LSo?q;yyQ*PUN$YH>G{HK$AljIj7R0CTjw_!OH{coP|( zJ&4|5cgkaoM>(VFaXIh&9oKc!puW>m=H!9r^!_%_AzVCP>8BQ%6(jt^?ysbkMEO(Oy7o7tq2Zr=Mx6s%fo^zp z@$xh5F@5Qh^5Bpoo@_NEh=N|R&XB5Z?B?4AK~J12N+n(9PJ+G#X|NaC_HVoRH z+E*xTbZSJN!xDj|@8CS9{^Z~u#oCBG{t_2C+tjgjwqm{qUfmg7985I!hyKF4m}c8J z&cS5LlbE$?Ahz1>(b6gl2-uK`8m&L`=z@d7w2u}%wQ+t^#a~bN>fU3LNttp+TBGvi zZ=T@8kU?MW*K(lcBkG%NQd4!eogo9jy0*JN4;ifacDS({kd}a-i6xbjT&pcEEy!3R zVTac61ws+e!B9)M{KcqMPhDlKmL`owLC06P?k6)QJHb1l;Yn#Os{ol`lO*gR~-p{Iej4fQ$Y1C zS@Yjc|43#n-?LYZ(F2pcZ#fPD!Qh8_r;y-al_2McGMwY{rEjQJnZ#Qwz#EOL{nLAM(lAu0>pmt7!4R8zS6A%XkD;g*dJTFU~wUs z*^q!0cpK2)LZJ3<1!tIHCeRTJShsY^t#^=Ur8+CZux!s77&SsNlVuBfck)7yWKf;f zk$ApZCgh~TRG@w+1^tK7Do<#g8uwrNgUu+c+wYF}J2Cp_tQqzD>^NLU;hw2F5);-% zTf6Oc=ifGXg1qo^e_Y*sUxl>EuCRg#;f7G6pOS0T87GgqknWOodSt{7c+RPYxd-bP zh;3a}P^N<*#{-yQnbRdKqWT0O1C*pPk0B!L1>cdJv|yQBx69P4qDc+3Ni^FfO|3jn zhS0RfTffC&qt&XZ%LnmSWB4LGho|S~h>b?VulZ(Jjn>6QayS?ntBWeQ7&l{BH7=!_ zYzpmtt^^<*9kZnhebh+@gJ|#Rir5?ffcrBye5TFEkh+;X#+07^c4FAeRZa_R@Wb%uNHeOpGpvApdfowACz0s}ehg2LlOo%|cxSJ<{ z{&FCXSHD7df(mzCt!B2?)T5G%V&}on?eS+Q8*%Qp%=F%XV0i!_ir3$F4h?ihn=yii z&*#yq1x$hn=Dp^3%FKn=)u=bsL=Pl$)Q=pVS8>}$&d`i@l6lt21uw*1dFo9qBPqmT zi3)?5n+oRqTSnyRNoI7m=7Uxh4pVDFqgi9LgT+saWI4ZnwN%ELJ};)} zH>mu_Z&m&_jre<@kHJ0@#upNU0T?g5h@54tQ#o$xi#lfP{HmH!6?!@R>R0ief*;0Q zXeO#Z`X^m`EKggVy2CogdErpGauAq$bsT!o^I%FB0v}O_665UPL~4Fc90wxZy_+n9 z4)oV?>QU4T$itw~KUWT{Jra!c#Fp(uSbV5utQFBErn@Ci0hr2uN*C3U)R-)f#J)Yw zYB{=v2*I9%O=pzp*8daoUso55o);g&ZO>2jo=+}-#%nMwSH=5%7JY~YQ(sTQAxvJk zBM%O`1tU{t)WNK#NVMkoAJGOH#JQiadWbi05EO?qS_4+j_$!bB;GTIf5lgdE`3na| z2;J7TozDI=*FA1d_k0eY%?G{PbGRY~Zk@{*1+id}&Y+Fvx1WTNix35xJ24d6_c++g z2x-FwuI*`;yl+pEh_Gb4b?b9P+qVjfx4j^3FGb6+2-)qBT@i&TdIpi?tX?2pWET?>qv#nchl56)aCC14=_2$%?Rb3kWm-)SGdI{3!@jzdj)$@1xk zJ-ZB4us+Ty5ID~Ixg1>tM5Hd1aigW@+y&DBJM|zYXXFf>NPqdq4>ma}{}H4@cnwc6 zczs0*hT3+D%Xca_Qb7RtRk6B#iuwyASPt2N9PNyAps77UMa)#!(VrcI^LnDfqwq%` zVa}$4t-TzUh4aB-YS z@lW?of0s2zNW{_f`JvVsX^IByV9=l|mw6=kkx9xm03H~8pRBz+fkI`*jyyYGzilAu zd&U|xamYxFj}O_TDik-9lYejZ zR<@q1Z6bs+$-cuKF{mc$5Pp^<$k_wIt^-S~%$PQCD*o>-`JZV#&?CpQPDux0%?@yL zDHI&hlR|=%z>e>Qvsww4k1@O>w&IG0)0M&jyXRcUut`x5n~VRA71Iv0)vTSQ!kuw7 z{ks{Q}d0*--oVN*oC1)eM!NH4x5LyNgtXgS?Wz z;M)wIY%r*1!GVU{!)tyt-F`4SW67QgD_(hB=>RMQy)NXj}Erz0c7;mZ9ClAin99;q)I)ZaF+tFZ4C|t zoVbGWsVFJz9>7oofQ(4K3H@ROVNVQw=L|+E$P4v8M8T-yDHvhRhx#AR>`#5?Ogxoy zIk4JTQ-OM+6gxirIzO1v56k+cI?w`17+Z9p6LFxUpx6h{em9yTwKvI2ebs{>QvH-1v7khQIw|Uc!cOL-JC+k;NqfqL=C)m0y_K zGb;3d?Ebz+tNnH=CR4k>6y$fym?|m-?Zl@CNq!e>hn}to5L1p+xGVTKeA_}|VmGI> zDyqJDBAfeAlgszM5^jizojmv&@0QZs1D3T+5 zJ!$Aq>qH78koK1c7<|`YYTN190dB_~-bPCJi>W`u;6_53N9Ps@|Mi=!Oz%Lc9K=AN zrM=j%=J^F)TtM(fKyG7KM-$$|2f6nJVseOT@=oi^cW3Nxs4{QS`&^cp*f~RjvU#3& z#f&^g}{#{qn%-o(ALqhx?{m z?wOkkl+o)xta^j0+{{?tUtwGQmpTDs%Wm$-CQ`lH>(_F&fGNzaYP2a|K#Icks|IZg z4D%%zpf|MKzC}S39zAY~F2!?k{Q&LdczDmlS^4$OBfDg)7 zROGW8tFN~(kgr+7K;6w1M<+8dgqrT&Liy)lr%DDIBxn%G)>J&pw2BZIHJ#rk@tyh@bFTF?% zT2H{%*Ps3uKP6Ne;6?7BflfH#Uo2>!#CI~eJ{$J5_=U!%>;(N4NTGxj(X;r8hA-mu zl=(@XicLxy=GJjKp$)@n0N>#4 zUok6QfW~_=bFpxUok5=7w7T3MU$Qv!zX=5l;tmR4TD|=nm;#5o3zvYJ*HeSVO&CN( zHk6nD4j)eV9|T6N4mO&vCec(xV&<1x(QmQ;jPw5or1|%&Q8|v;k^bdRw?n5W=Z_zh zAPqN-+!Uxn#ywoZ&z~HeZvD`ZHK)vm#6Ds?Y&l#G4P-5M;nW;t6{^nu1eRR<@=%d^ z-V^N}-F1DE6&XX5w`+?SW|rp3poYy|*}R}AEWz}(*r(%tCcG|>{Q8PIzsuC}aG!rU zU({$om9okEa7$&`Xm?t<xTOOA?z*N;%cHc?Z!P2G!QgEa7b`>cXxMp zcXxMpcXxMhoZ#;6H2QMSGc(70d+P`E-bdG}wO3uW>b#;gU7Lf7NCCqQXOYX@&+qFG z@$l~_CcpNpQA6h!g(clwmW#l%QRL$stY6Oz1QeNtt>m(YYBs^Mhcujjb}5LQZlCY% z(CZLU?P+rq=2<_PbC$(~Z;f zQ+lfLSsn+EKD`fi*MCUaODT7kD%L7j5(Lo;ELVicCp7<8=7uRkK%VGwgUkHFezTXNvH*EZxOl%IL zy-m5>*E3_?GhioH4tz9-@lOT5RZ3~pl6$?=dZiJj2Ck0vUHEvo7r%c;aaPE5bkfN| zNlR8fvx(6W)rw=doOa=wf6L&~qAj`q_wHL<4winx$#oyG)$zMnsJQ5%^IGK$%7Dq7 z;|s^Kl2uLsMKhli7>Td;g#vKQTHakg&NefPOF?c@V-?ma5y6@Ir=tGu^p~E7hW@}Z z1epB*+vvpuaj^S#Zo@S{$Ii7D9>1nz*S^ci!DxwLk!Ej~{;0fG02lY)k9=#?X4DSF zW~=`{y#NJuv%m4;l9Z6bXN%rmp*(A==7SPP8yiPceO9T{@|}t;XKOZSBA!Io^)DV} z-L#FruYNmEe>gpGFe^FTvf!x8sZ@!AYfa+2#IzR@Jyba?AV`n#9$aZ4;T^DmUAv-r z7<@P_l~Ekf)+H{l;Ldqnlsi+RDF<3VWmkO7sW}E=g?!RsIs_yNct3>i3@b(4+_uh| zk}lKL)-dN*InIf5NHF}Fa2yFy&-vCx&4r_lOGQJM$oUolvh~xyK6!9JIVSR_>vh8S zqE}_w=i0{kb*RtRliZ2eY9KCUSvXdrLW3Nz`or8z@sI7SzhftK=ga(+4U!!l=j(iO zOb?sSM-I4~nsy@X1;fYP9>-BU54WfF3Y5Z~9gxP`LI$fOdjl(LaRRl89Ei%*PRc1^ z=4WTW{Ohjc=;O&_u|lA2Jpn{NL2=`8kp!IrnwYqw_ex98uiTO#0NP;ra88j-FrODm zC{*jACk>C_{%Si>^*}>vDHIT98cWS5_ zc551?c)2GtNUNAneSOPT-3soedhCioY9Q*P)H)%gS|$E#CacGwVePZl^(H$#2Lv_G z`{RG+XU*ULW0Agv1WbMN>3M%K+uuGiJKGigH{)xfW4R&i;kFLr{&TZ#w0ziS=5S~_ z5$QcCmukGh;GkG_qSAeQ^^qzVLFDWD?C-QwR;J@R;3{Y`$_tc(!xXksNL-KFVshq~ zrkmrW(ci8^_3^mZ=_t%_&ix6a(zIx-rS8gK?R{aXRxh%PY0(nGj@aTix$V_Y(6W-e zq0x`w_c!VXn5^v_gf#OMyR4tX%H}(Tfxqp+pPejGSfaz}oA&%QtU!6Lb|%hKyAk3mhhW=s#_gQ)}CibUC zMb#1U*_B%EyO)nx#G4ta#b5Y3J#UR_!Q!m&)TQ|5>i~5fGxlJ7i3rZ-88w3#StjK3 zC`k^~Icb`hYS(7elekt!P0f_72+1|91$87qW6RO1M4Va28$VS;L(CKM@-La5i&M>{ zti=ZJXh{ydO(_e3XbbKdD}D7NCo1-hMg)Pv9#lsmH|NR1L3QO8WVU!rH^;n&=9dwh z>`2L1;#@YWf&X%1B3Hy*QO1J_skDsYAfguz`D=X49);h{;;pF1{=KgSfl34D&I3ioMsTkuW zz_o*+E)ifh9QmTi!seOM*hw0tpwQ(6fGA)hc?ZA4E({>*aCmnVrH1)~MXM?u9>xfNegN%b= z^inRO1gbyphi6Pt`4)ah?cG%8X8cB0$g>+Xqpbj1CeBP&}aQ-03*t!6zzlKb}o^vtihIaLo(oq&hPR9@Xo}=GrWz_ z+&V*twNS6nxRkFz7ONqzZa@Gi?SmF-+FD6Yk~c&evR>sc`4YpySmkkPG*^eQP7NC+ zbrnTDdf#ieXe~5)R<#qW}R3VW29*fPvJ7|_iR`|9v&D&Ap zt;g%bj?>+wv|O)1eY-ms6!GqbKDzVi8aSg`7S%fOo>04zSnqD~-(O9A{n30zIKGji z56CA2kv;#*36kGKLQXo4YxVxxp#DOFt3Yd`tmaELZ%|#Vw!bZc;qdmtXz~YAMrS)c z7ycaK6k?=;_L%gUba@7(H0M#ukiqkw6jgeZ7!N9*h8cMP5wxCbU4SU*i8#ofDIcpN zLnkcX3T4YeI8k4AHG$TAjJ)Cuh_gK=-Sv&EBIZy2s>w)36MFXEX82T)TIbhbyrO zl8~c>%kB@c778Dxsc&9Wz-fqkG-5b$Dk^H%t=12U)fYcaxc}f5;IraM^ zvi0r;cURZ#++DA!8VinbmTXoQXkFj`&(Cag*`{lqx_?aGCkIcr88WHEqlpcyT6Wn* zBCdD=-5V(XI0E$m)ibuX7Q<&$<&Gqq^$rfpf3ADp3@YLI*x#!m)*%`?+dU37CsOLg zL<~vG`6$}?PjAv?t)`g@Grphl&C~Cni%y_&p|+@wa4Jqt6!LvJ7rzMo9S{i8sa{5Q zR!o7LEX;EZMWBQq9phS;2?^_t7s6mV^=`J2pq%-M*o%?S;A6jaMb@6KC1lE!nMEIO zeS(xvRu}SI)e#6@E#^Qf{}2>eqLziykdb<(;)R%*CFjm-i$C(*4sR6Kc$yx2G5zc+ zr}*AK^C+G)fkID*!r&)MXqXk?my!ZQdPW6*`MxskGRw{Fn~jbkU#bDlBF;sT^h9dT zK#@Ikid*&~ju^dYtXvbmB-)K!@!Ep*jcz~O9M84rbU50}zrAtBvxzsH3S;pQyIIL1 zya)`;DCN1es>xjUMqTmJH|D=W$&j|@vW-T$iBKDDywqUmgjuhm;U96ChuYm(4($n; zuf5F%@ej8it;rCb&(~t8oV7QH+u4d%o_v*7V})k0KUyH;n^%CM6*b8Zpr4GO+wQYO zj`qwvCB~%j%0oJ&7VTH}t(fZly-17~kI^E?zJ&G;D{6tH>V;i+Df-Jzt*Dxr9yqOq zpE@rMf#`8veVz0d&RC5GLe?$F!w(LJ!6w_IzbIj3T5EZO+I)gW;8eS#NX{kC?mj++ zBMq7J`Xh`!t$CKvvEp{!DKD_3i$i{r&z{&|US0e^^TLvIvY6coyn?h6;b1oDWDd=! z=Pt;8Hw6s?9hyj(d|_>%uo;WPm3yifL5w4GTY`z8|6BAnvR5xwMtSvPj#$R@O^P=$ zM&R6w0sqxffYn(a>-8qki z{n!riGz)T!8Ehh?HL)`b)yR5h9!rRv(kz2neBm2rNaATJ5f>~*2k-{P)<#Phc>*4Fw-1wa{(EK>r@EPPA7p#Uu1YJX1(S%paaM9JyUih?_uFklEuKZ;u4IE>au z@p@ll^)sfh9i00SC(rS>n`Yg(>-)O6PWv&8fzlZHrj9bhYsBLfQtuF++Lkt-t4+-b zjYWwVwxf{emV3-#(avf=8({r|^&5A!!bq9E53MwpLBFHaGK#xNcJDzFL2GcyC34oC z#qF@F_S)uE8Q($^9m1|ra&@KgFbt8F{+x)-pC4o!ikP7fU#A(Yz`5w;nzdJ}Lpo6A zcS%>CEwUWThb&vSN&7A_c^vj>*i~Rgarw7p!XY!}SeZi0Id8Qvi<#f;-0z;m zO8e{D0er!kcxGzlUuEQ(Z%DX%8g8Q2{^w}{4z|$eyV-^ibb7d_=!30{Vdkz&aeoT} zmuqx+P>CC;W>652eELHyXdkZUQ*<1*-<|%wq+0(cSy!8Y@UY-~fb2MrNDFpB#;JC5q)-BK`>f@?FxG!y5lo ztwwR^jU$Uk#|@n~4&_cD;DGO_?|gfuF_uyj6w(_5e=76-^Py026RT!w&<(T3f#g&S z#$0Z!eDhRk7H2F`V&=;c*QC0%lRTW*y(<#`hMb#hFy}W0hOXC%@m}gCa3`nbS-v4s zT@gQ3_V*}+5KqD?OhK3#U+3>cF6~;;7BP*iA$+mTiSDQVM@f9Z4;3GlT*eN?HJ_Dg zo2#d`^I20B`lBtMT|3y(GH z#aWEwvIir!zInh@abK=|o~pgrVOxDO^7x&NdUiT7IQVbGzk!}`bEYh1tZR{Er729U zo@yNKkdqk!R_{!d-ZrEm-$9#0EA9oYa--QW7E0USKact4xtT|Xm#J9y8!ZXTJV2gk zyf;CVPFtIiJt48`B#ZTuRl^7=&ajzMDoTnKY>f(Gwr}s_3`aqgXXl|MC(K!;XeD@i zX}Q;_gpS1n>I&Ef{lb_ zd~*MjHpFrzP#o3S7Z9I*#d~#Imq4PAzydP6F5I{WysP*9c$z7#PU>REv0U3r@&q4wBs9H zLPg&QWXYrgsSj=)P3H^_B};MIAvlgAQNzH z*JIRmV?G_$ul{P;|K>sfX8{8^)B+ZI=ZNp$FSmKjL_i!@eA%byRI>n{e|?T$PTRCA zFEh~}uPzhc=pOD406ewXHXP(ClIB+gTO=5&uQ^S7#!J$v?kxy9HuWt2{q$(xbWsOB z+B$untro^O(j)iX;Nd-*6U>tt+{RB`cbFkbb@u2x7P%%P#<3OYR8c|k&JuEjJFGf_ zbUQ_k#l7m)@{PPK`||8-{?-IdjZXecp=O2RKtAgO_dIAugRznqR7N^LD7W20=DxL$ z0D8W*ln)pUKAg!;$p2S-bZ{xIPZ;(|e!D~!o-%m>A%vXX$8~p*!GF@zZAnK$K5Mjo zbecc17QYf*3feg}xBq^tllcR-AeZkRbHNmN%PJiXS=yhf7Em@vyk0Nm9}O4@bZmMV zRN499n~wGOW1n~@ELSo%A{9Q9-jg)TV*x7zFKw;@7P?Q3MsE>B7Q^5a$(lu!OT}T z<2I13)E^f^P&RUj{>n+!B{E%IqUSTC2HQUUy&E94YRQe)0VlUV*bQ=^(aNA~)FLbkg6aGB)$tyb z8fXqq3720^np<~xzw$W{B~@!zP#C%2I!G@szi5V>i`u;2lCvf?B-7bh&+8#YmuUPg zo|hW0kr><^*B}qn9M#ymcW}I>_IHyfQ$_lhpX@VJFvB_C%&M6mu>vZBIKQ~!VTh>I zbtaWEqxMb@WyeVs*Vd(4I~iPjzA_M}2sGy(-l5UixP}RZ2(GBD5-U-ZFQ6c!))}Y7 zXgHFskGoa~gUT<38lhrRXU#b%Tct%U_lEU)S=svSx^o6=?g^h0`yKffwAa;6?sTZu zO4K&_W*KE91)IQ^%6}`Kj7o2}18Q}`KkqF}{$~+-a4Ggr80O!D`_#tbu*H{a5xhDU za`8nIkSYNe@Al7iz(;1d2B@VfutyJYd$YJ3m9mxXg!+n~v8=bzFI$Ql7 zYd})U5#loce_Fl>@6MU+)YyKBRmUmNPy(PRlMreZ z5wHKWZhY|lwn$))}xgx?#&JTM}i^2#)IL2J6SX-HOqp&|8mK;#3{AK zkNLn(Rw`2OI$7JGYXB{rno6o3LTdDTX@HMXk9$ydd@c8=jc%iGw+1{Q+b|T zU#0{#EzHU)y$(ngnRvsVIJz;50&^x(DGEiR#zDj8_9v0k%qR6A-j{COTNxBiv)6tiZaQ5|FW76-Zt2_t(v*- znamR?3V!_a)E%Ruupif>ljPBL>=JKi+57DIHb3{WNBirt&v8u#50au`66#q}EUOmV zT$GGAoyShh4ZCm7h4ZTle(77( zdwngzrN&tPj`4@tP`dVGujav@;R1YI2*OvMgF%{dS~vN)hFl9So7Aw#$@}J&S%#V6 zT+`d>NvL5(iQ)0VxPhs@5G>YT;6$OrD5!A`+B~q+(tH&eFWY62EO8X^-{ZvitR}(n z7Xl-KJY|kFLBU(JVn;dH`@%xaJ7$n(Ng26_q)T$soET(+jBUp zrVRkIb!u|-L?9CWrdvKtI-LzD{fn?@aQZQ>wRYcDuSj(&2IVJm@kRC?MWgOT_>!_C z)zT@wgr{moTT!}rf-nN5Y zOhUVo8VQxu!?#3m(>HKU(?ocH?s()Bbf*B9TCTmRg17$7y;@(AN$DMFO11K6!P%z6 zw5-%ix|9he6}-bEfy5Tu$9cTZc?q13&>)}CWzen5&#_t-w%QFlPeAYe+}uh*M!LL`b|~=abpI*oXptwGlN6tDJZK@ za+R6HwOaDh=jNqbG6G*wo8m$&u5f;E+Q~S1j-792EVi%|lZ2660B2X(*&FPkaan`dUSA~FBC<=~Qx5dAIC`W;)9Ql&Y*eN|54sX50Jc|GN^077&+uNx z3r7x;JnP%v`jV>O2aW^k{K-`uN5rhnWf2~obz~#f=Q}#2wtld^cs)VWuP% z7d`F6lF&$bn`s^NQx{|}TRK~)&6Q%(<$SN10BCxI0L}e6ui$qNE}v=@l)qKi$5nt8P{l9AGT~s~U44nweD0F4 zBVc{uF&PSolgg3YWEnYBl+kf`CdeKb^%u$Lt|Aoql--b8+^UU0*6*J{``DmDgS-PNTgTiiXf*V9aIkWBR=O?9PPoMNPh> zPqpvFtV+YWkg(uG)jt#0h{mjZP{}?}EnYLMnpTh5&rLS@No%u}5Rxr3X|3^qf2JoR zdD~Y;6LMQ*HI&+UCHl903SaG^5anXg!Os5j&%^pB^^;)E)H&pg>Si1{V`w z$iFWMJ87)q%@pwhdK2#716yaaMr13aJPd=tiE4h(zMICJraZ7am3XP4XSIQNIr0(> zKc?}v1h>UtU>2nq4gln2DjXWks#%3PUtCwS#c_U9i_T=QA9HY;6gpSX&ay;0qcr{w z8snMb@YM@%7{+6%7%98hzOl3KiOqVG&>A&=p;5`1h_|>kUycYIo zBxld39*?SyvYcya7OI-nR56ExE*kVbl0{M3*p2dHWku?|JmJBBV9?Ea8$0vfN4!4w zWI4Xawf=lC?5EE`7%3#-Nyx-)ZW{B{%8I!r;R;!}H>jxg_Ug*DTdqDym?0M{0({Tu z%hXI>LMA1D58b7w9Cp$XyAsMIatxDtlcN?ZLSLbnPuS;^Vk-DB+s!&VS&=HB+R|*{ zA};XrHzqzg?@=4~ccm+!a}G!P5Xe742+;cz1Yf(;$D;j55X5Kz*3BzY*ovCJC6)TO z;P7hJNw~N3J&5YAhfz`Ve-xYD`d@*tfFbp0WqS4MZ{##yJgP?oy5#S=Rnm`)rCIg% zw%{cP5K}p3Bk%XFbE^?7SZDP27CIBmk6&ljR@bs{bPYtVCxJZCvANYu84(EjLHe>@ zuPQu)f(jeLGnKs4+9}pm+wroHKY@&9mX=JQy(qtlLEG&QzgF&_CwvZoZK3@h0)b=?(1Hfe{b9wfzMCh09 z)SO&m*;@((%#l7tDaOP~({YW!kGJsyRr)A0t-0fsQ{a^5OO5TVc+CDrojj4mPowq) zSTjc(j|UD)DA~>1Mn*&_I;H30tVYU25g8+0oKORIC`EUji43{tKQSml=-)j{F$K)n zvtp{UyC2f4PfIBdXm4$6Sp8H;-NVXq&WlY%OM0!f-P7g=GU7CBvD)7`(*wI zY!=Q*$It1@s)+ z#+PY0NeNqtGUxU;4i9{urzfGPhWa`?DI~IX+EpoH8jV{=Z#?c3o<3xt?|qRicfGWi zoQk2mU_0kta!o~{3vZho(rO2tPA$;_6>#u}|D7+pxZ1ep4SaBecnIkCf9&km+(-bu zY#Qpo|7ebW9G3!LV_M$-5*{tPqAqM$2$K<3>4`RgSr@#PD@K+#ko9uJgx}KsNsqar znm7PdO-$$n2WnBobWOumr)Rr>qHKhwyHdRB?CHBYEvMAI7Wxk}c;sz34V|y{KIh2U zFD51-f&jd3Zq@&`O`>Ug;)e#L=Djk%OkKu;o*IB>MXwf}iB0<|S3u)0LN&R|8d1Tt z7zaCU>rFAFT>DqcrRkSAoHt(usu0>b>b~! z>RF5UV`cCymLMW)zO`xs&A$+b6<>YV`RYxN4`iEoL#OdiG#M|)y8%oY-?KbuR=SN$ zA4=G$Sf;>09`VbeTBHEU6dqibOr3|>h=^sNTmkFla?5Ydk7+r#n9J~4Qc{>~>_4P7 ziySdsU3&OPiCP@>Hz#FuK6Xl*mkx5-q}9x{z?TReFD#xLxzB>1E+uIP&4{a+34Aiy zZMw?2n12oi&vkBkD{Q=fs+g#hG$j{ZCM5?eWrOunR63v_mL$NRS>YwrB6Tzo#@;&a zu4SHZB9{`9!o3C2ff^Gtx@HZi*BTy=6fc2AgS?@%FdyF~pGLmUKZ%1zK&e`u)(>I4 z2R`XBaF!u*k|W;$d?2GQrb$jk1spF$PAbw~Dl2gIp=DcF)NHV=S2mJj?_^L(H2yR8A_uvT`7cG5i0H$M7cp)@Pl zX#}Tcd`GrP_+3uYRi)U91YiqAvS+>+a1^ynpEFGGiMw%WtS$`!&?otioKg)|Uw*>* z1f>7EBqn+NgFO?PHF4z5OcCN*^PmLIQy*vJucB!4J*?7+TwW(al{BV3^&|yK02;>M zv|_*8A0G?G+J^x=yXq)MYMNh_Jei%6d;nydMk^K0Z+(<`I27^GhYBZSxBm-qDW1!= zX*VBQJ#ZB#_GP*9U~@igi#@t}a(B$5Lr_Hj5o0}Bqb|NHxpp{`u-u=Mh1qCJ8U97e zKAoxaBj<2J`mmgeDk9DyrSjq($3IQHHo@jic(XK@RNOY?!65|Q6#dkmRIM9u-IF^Q zd{8ll->Kn2efv;^FB;&q4gn_I@zc z!{g$n&3_#61nOptgw6#r;cwK`Y{F0ptAKwUBPT(RK2(X#EA3oy?mdkYisz`eU&bxO zNGn>E5gHlUNQzNqeKq9sqF(gdcKm%P1 z)e;#1^4T`*K7+E86Q*hU?mY{;0oMG={?=RL6*^u@u64&c?Ddk00r@kf9FX9N%lS## zO#^##d39&kz3(8LbgqWfoOe|0gIROF|5Yu;c@J#S37;f@3Nl4kBL z0l7`+P_RxIzifSR5J_9)n;3}LVzZ)V*ZUSFL&O{##2Ar!6`#xtl>SQ@iy zV>k*fqF|#!OQ@3HLH!yf2(V)5Mp^fXsHLJ>qL~rb<(N&emp~0Nu4>4c1giV8(Q9sqrLSe8ZmNyv5-yoFO(H(IMgtdD-hw-F4hNaYxDzn0%fCBcE z6czCx(XWv?l!&ZBg=z``3-`@5jis5CWV#Jb2s{c|U<_CppN00G-Ao)$#fpk*!a+^q z)?5@2(O`_QXJl zWst1vo@A>&?dg?$hl6YR&Kdg>a3#gHLidqb?k%(S{rgG8*8@Bj{e7JoXNIiBM!=Ni zzkqH;|9?XdTAcU2g~kg313u{`IreK%Qxe$?2L#7nOAk^C$ln$QHh!GYyTjS&5``%w z?)NeNL7AMMc&u$izJG%+!wQ@K_SZ!n#!0_|VD`4cYc1|mb^?l__KCverEhgB7G?ES zj%jvmiK_&U&*3lRW0!Z)?ZTkJ{()`^b*a5%SS@;?Zxa(jetq5>)PA>+-M{?i}u6+ zmuG6aCsRje!l};IuMu?zUp`(SkbZ$L5&lrSD5tp+QY3 zfwU@GNgN3~Dmuw8Nc*XN-km7bkc+L75K5Iw{5Cp&eRjO5_P z5=!)vs9N2}RqritqZs1+Wb!93Z}(3$B2Wdj&9Ujh%wpIYZhva+;Pm#gnRlkyVT*8& z#V0q5KUJ9OYw%Y*&tkkZU#S*3pT3JzCwPu8e8F2BDaRx?`&k`0x{Q78{&@vKD2+0euS_1=_nfOnll0msQci$KYnpx&} z`r4Etjny61k7Qf31>N*d;qvm%tTg*Oi04bv@1$AtsU4*ExQ}QJZTE*tW}XxnD9@ zgIE-ABj?uSsoTH@+>LJ7oxfkw7T+Q&d}BG<(pdtHv=9zjp2x$k7IWdQsX58uaQLgc z(8KU-7$vi+VdDk)oCL`u%m17cK9ZM?i>;%xV^(R~t(aRQC4u^)@fXA>^BK^eO6Ptj9Mm-|L74a)hl18!Mom?=fk(Q!hEwjmpu7zru-1uzZ;w z7`GaVn7Hc&@&5F@Vwlc?nWW)zlyBD(Mq@__D)0>HzDt1KbK0mW z%3yc?-Q;ulb{ErLGom5d;-kT?c;>@dl;VN*FOfuBa1lQFMW8@P56Zlm%(RY z0^IEC&a{<>L;tDU-Io+lc+$Zm&05|WzY_WTy@P6lw#cpu4yrXP#kej~$X)v{z?aZ7 zan*`CDiji++$X_&dd7VN%1;_4i+aNbyqV>gRDS6B4o9#w;N>v4t86Fi!b0(lqBM;R zG18;f-sSg0%b-SXhBD*MnwdcH$K%)CBh7!lm1yrm`uff$|DCVvxYW{|4INxXv9IK# z{&70a@#8sZ1pjQc#+32Gt0I0TU6zb)y_ZtoOdn=-2?DbuipH;EKJn z6%)oEdXA2TlmOd2vUpBtG_Jrl@2ap{sXxC2woqORzY{Z2OBobvKt~Psxin7wL^Na` z_+N;BsQ-=lU#%7SF-CnT#^;FOH5VBr*naD;RNkQWi0XVSGc2r{uGKgDtzKZQH6Z|_ zc0fbc%scQt~EA@axA z8Cx`qeAxhj;)lC5T{Sd7!^i2FMsdo>qi2^oyb&+&n@!EK_9CcnNZpM5#IXQ1xylBV zw%i%6lm!J?6k&VW4HQ_V*$FgVx30k`x-cYdjz6e0Gz!w34`V_U8o{b`M7Pm9`Pn)E z#uVP1mW$YmMU`E>=*`Q5gFoQVvy3pIXCbi`qb$<6{OQR@=afp0QeZm1_oY{bGaq*# z_x9;U@Vnu;g$*M>I+nzVy=}+1>tVmC&prwy5#@QVOjO3V%|MHQ{yI#sY%F|dD7%1N zdo8nmDzTzdhUOGh<|@Ka#JN9P2f}AEHuKzQ^i})-%>2M1hz=)6-I#Dv^FdvPe{ojU zJlO%4y(jel^{z`>qdjDYCuJE#jIB+#JxD%~t?Zvp>vB1h+&#`nXUM!zs6*iNe6e7k zdZE!z=>+*8x9VJ?=ht+n_w~n(I;wXGZQT&N*y=|R`8ij=c5h;W3gdW|ZFx_Olbz7T ze_gOjV&jgcbPbya(>;m>z~WVZ>q)P-Z1#W>VGJXf)Ac^n#kMhDfw$~R%ofYzOdw%sXwlh&^mtTB!B&Sg1dE74ynK~5BnNWUSa1~>_ zSI&lNx<|*2n0GWe*(_8Ss&PF z^+p4~0-bb_+FbU(K-XKk?)=SIpX+xN`pVRhO`ix|#*2mXvn=H>>oilJQ5)D$m>M^z z60|uzp?a#}J}4mmN%b5alZwgbp(6n@wpw#nkUB zcwettq?fUPrfVpy)%lx&ZHSAeHizc@NS3E(TWAl1@;7w#TkXI|Djs7l&9VBiwz0Dv z(s`C5rG$L@qmjjutu+x|iJXOB_pMu3Ej-{GZf#_D@g=2*5OhWDED0HZxa+3!(sd|- z2!1Z=C%`vhQ%0>{neFo>Tf@LtUVCx-s?o7|k7{Xhg*TGmU4RGP7m#^?p+VajJu9x!LaBka7xI1k57PEb&1k zrphL3LBcla9k1in(YbDIY>k=oZQ5zoL;2tnJmP60P0Rw=lxVD~~(U z8DUz?@w01-y@#)qo1BFTlvr^vhmK7Pg30Jq9%a8V>}Y|4*?gwo+8;Odea>>d#V@Xr zyyQ{xybJG0#A8lz8@v^F@>1T7 zYA*17qB3=N?65%NJRz8wK2D%Z4`Zt@Aw7?@zEJchI5V245v_!KFK{rX)VBd$9LA|P zYQbIga}Vm$5ElNg-R}?<2i%$>`8y`T2jWwlYLVI0KZBOQ*tDuK>Uhk z#KmH|CLBKvu~l$2W;zVJP<3OUSZs&LEBi-wUx{^=9ESkzRlZ9w;1mXMPDpjYNP_Js zkg(W|_Hr<)(|2Fp=tR%fn(dH8@{MG)YR&Lq(Z0*2q4OJKUJHCw^kQF1s@$C0)E9K^R; z|9!&^tYqnK{p*+~|L}xN^C|*#rG5QdC-P}7*qGv5+T^5Qw@>KIM5s7-ACm)GbuKx| zI>$t4QnO^#5nrbLwHr!DEPsbWxx9l0|F3I6_Lpmbq}LSM%UEL^i(3y=>og1byV6>= z<%wGr4np=_xO3s;d6agP=(Y;KFnjmh>JGkyB{(qkY<*)tpoEr&yqekTsxJFLR)EVx zl{jq@XKH-if3aAD0ldb+bYdZatiqVpjqHL^k3o*qVEP6Bjsc)N7*1tsn+*K+^R^V{ zyJ(TJvOlx{k?zMJ_&)@k+V}*59!l=mG>kg&(mp+sea`$t2NA@mFnvbwMUPhFeklR6 zvH-H!`lgpdsZMbLS%RV?TR*SXjp$>PQpnxmFbr=@));0w2s?*PcP4=B&{zxj1KHx?;3eS_U?@#D~#RhA>y+<%}_1T;9nh?r96T^n-9sQQTGQD2!# zE!@0mbxy$I;N_*bwl$J)z79yFL8X&`4T3A;+og?Wbu{*5tHhVgJT8~wRrB4bdmW{? zcb|}9n6o_iubm1jseUq-rBzgW4ruupI|F3V6}TxvW%Y?Eb?MC07=C`uEn=NN=sSoO z6Vd6QjyV*|txxvn<=-AG>ec#e2twD$uT~dOL{k@xx+JB}0lJR(fgSfPB}Y#AkNYgB zE@Qcv6-g8X>sN=p)yci%$8c=*#2R(X5|S$qpi{U1a7?8Mb((w>)Sj?`(vx{EvH4JY17G4WbXv-s{F&gSi^_d031_~ zC}oaXj*0_PZIJKLmt!0oAoh=W-i$cg(}8POiul753{}We1Bs^7cXJWc18Zp%a1qz0dTV1WnaTZ1|sl40@bR&9Q@EkRcC91WVaiO8Ily16o%# zvSCvMmEgg2hW2EcIMi~2q*I+;X6BvUssRlSWVxStRvYW1Y_4f-Eq>?J7I-p0st1K? zZ{xzN(u=IE;K(X=N1}h#qai&QKwWlwo{6BcLnyC)vRMr1jJW(x6X}ef+CE3NF~l^F z=V<*_numh>TdhlFi&;emiOBAVO6xa;K>S!bS(CwGw98^cV-$h~*MvR1r3I>9K`S^T4GqcTfVZ5SA-7$M%gW3+VY+_Xj=aUf(eyeT|OS)ZG`%6L>a z#ZW7T;GQlpn*m;4@G(!JufV~DT(m! zv+1f7HEh|!VZ~s)An)4bZT2m(BH=Rf)lnv2tXhL1@s2*md#tWfoG)PL%$~|wF~YhW z6e&0+eAUiqCi$eut}JP9|I*cNQeEdAE9p$m5iYabB;FIMgFuT@i5#VPGn0GJ_?+B{ z5Anmq+_F#a={zq6a3d@2-#va5=N7g(RuxRB*^*gUt-k$8=+uQt}j z*1Gu>5_E=`7V#L@yObn|g?>FJM*T_l%HzoIg&4`Iw3&QpyLQy`>#gCSyodFM3fimN z{v0Gv`5$@IIe0Y6tH?^fd@9@=7KOm6V0gj({W$N4zk)(GZDKY}P$>7hwxp<&_a`l@ zGA_b;WA#@FgX}g(a%aJLp zF|{Pb7VFu9S^^cBRZ z|FaNa`mzvsbn8*8zT-}DOFgkHoWK(ojO#u{>dFsce=#oF`q6A(=8;jRRo_x^jQN@r z8+R(=7;5$M1@y~Z1%V-WEVXg0RYGa0WE4{$hxUqgLuZ$sI@OIoK6{I-0t zev&U^RFSDryR^UN(y1NPNF+i|n0&|-igzY9dtYqNWls-s_F7D=cnE;W6GoM2Z)U*A zyosll7DlxyQdWfXBBsj94?L~xooq>C^mawKVpKlyfp{#kCkwdf{QY(+^SW7ec)35V zJNF1)j$U^^Eb=EfN-jU&mz;D5M){M1ZW!6CxW!xZJrH;*E!vh)%O>t0U?Q@WO6AVNkIgSy9JaY!uw%=Rx{ky=+{xlMtr;X3kaA z9*;4!%^K*IUt3w8v&uJe6miytaVdorE9UEHQrh}isPsv@K)~sRH^GBH8F*kw6?b78 zN-$AFf2`uD%SxNRm1j zLpr0X*GExgX!E9a{-p#s-FAH%h_%f1$&|h--`<)-KA0PaX-*g!$*OBrD8Em+vC27{ zcR2r;(vnf3N~|)*C`j5*D>q`RvHexrvK9j=xN198x2yVRZn5l!>MCuE1G6ZuQ3(rB zxJaI!tnO2qjSaLjBL4Ifx3^$zww+7+y;N+Sc&>Y}xGDahqX4S>qG`dyqWPkE^dEi~ z^AoeC%L6^`+b_qop8?hRlcGO&y{AMMJ9W|wOE& zXe`D9IVOwd-8wNV~o+kj4~!V{`fcg8xQV;;10pv-QC^Y-CY{F{Cl5$Z`C<<&wS{Ie)y_;)%fOGYmPbQP-n{I{OLyv zGnXh+)S6Kjn)Q2dy*Iyeo)FOOKGn;nVqEF>=Z7Ggb2_zGACY)`1i^_%|HOA;^Czge z8&btrD#^s0|MUX9ZT=KETO^Agsq+tH+EFL%QKY}Sz&jS!RImFAmighXUF?*B$OGD< z_j<*P6gC^Mzs0g{$ei|0nkLbse(7xgiK1p?Cxp2)(g>U7YivU2B?i)r|NJ?;W1h=Q zI*~$p;tkMQ?A9lwyKEL2ss0>$pQwx&S8#lScS74rr_>x4|ByQ`>yo<(Jw3bd7T%DV zTYH}8Bn1H|^CerAfal?6*WJi+FR9-KrFd5TVTr2G_YJys5*&ERq3=iYnP^^Dh(B|W z0>h03M5%*0UwE~^)~FS`zOkn5$EElcPC}V3DTn}7Ud9C$06fBloMQhqkwwPMUu$9# zNp0fm2ckR6?PBsx!I-508X2E?wG-)vs@f5btUq{oP!M{~Bcji&=cn@gas}7`vG2Gt z9>58v{-QwcH^zG@`v5iT={HZ+BfjMNWZBQY7P<-J(u5T;HI&c!3Q`?IwKp7>)$Z%) zJGZrd895aoqBdrAK3N!m?%b#w%8Ps!$^!98WVw7i5H$9pTSYfw>Y+9xBoLPzoaykSd0e#)2afH7gzGoCh0hL5@eMUx5T&^zWfm%H3R3FDQX+4j1IN|{ z?xe?jQJw=t21TP98|>aDPgXrh?jL3A7MagQ`vcxn^FDqEwQJn$eo>oWV?Hgo+gU6 z<^*o!m~JZLj*VoT_e~;aX2k!Yg#HQ6-yf8Wc;)Zir36%rF(9McNR`FS68DooC-Kw) zgEbU{2=lIl_oU0)=y>ItW*Pnk_laDB5 zc%i0>@TAHjL2;`3v25=fA16a1^$&kDSo*)vFk6C<{$IW3d6e{jj9a~;<7BHH(O{Y) zV>8Uo*arUvk~5m#wGY+!oVTHn=geoWKam!LHp9ruyu_&^jHaHtHjGRs z+ZG(9I`7FYN}X=1^(Qr#x?W7{nZD$`6#Ba)FmDZ=a3)40`f%jn&}E*@&!vr6K{v$9 z@=_XBl4W^drEch^+Qig;Acth}g337-5GM1BVmqX~v`8kL{$7X65AsVy5)BY>f%4fO#0){%jT`_?U{~!rmI5`>11!xB^H%u zebc%}ToG}m*E=#CRK8BlJ%3gSqKI8Cxz!x9_Syps`W7KXxJY$FdY;X5+dJix9C?T5 zH_!X%>`cpw-Gxnje1!-TYBvX@Klg9^N+zjvAV|Q@a#oOSLa+E~KNnnRW))J1-Ria! z`a^}2@ZB5xv+Cmuk_y#!N~qf!+5)pUXsyjVCdmcG=49vZJ(e!hQXm-E-wk(pE7y3e zN^EENsMqZ zac=<%i1%+_kj3bP-g~i{e_B=?eIzSh-MTk9kwW;0(AWk)EbkaXT#=$fL++J_Zw4hZFnH6#?p^7Vg zJ0k;11O_)^j=l`G?GXX=7TY0LwA4t0HgG?U{7UC+`ISI%VZNH-7nNzCsLpds+& zQ@HAT{T6wC?t-9P>IRnbbb=?BRKHxskI)ILxPc%yLC0MW3bA-;L+TmluQI)wBg8J) zuDZBfr&|3OrOHnsj9?uSC5F*;wJ%@kn6nYf0k@_G`%evzc~(sf5_KG@EmxRUgl6$VxrI4S4}dykdWz3mGt zvY{LD@2q&TWSI%KgCR%WeI1?HEjhM7JWotNdp7~_bp0A!vpa7w8 zsSyiF1O!k(;R#z7LY zLC^`+$M*-kT=}47|A1TMsjlA~i>}I_RO#4W^f4h7C+Wda*#3}=wtHswZ6B(lKO9eQDb>xL#0iUO;I?<|2Wu&c zibw;cBlEfk8bi38T~rc5#I_{%8jaR{2sg4cRCb176wJhpAH@J6Yh|Zj#t7p+vF+k$ z`0+>tjNpT5m#QsWOL8fNFJ8?nx#Y)bB-k{h7ck>~|Sv+|pXXie6c)zV=}wnR`eimhmM2t^OZ%tB_=`<*~z)9(Mn zr2RKS$Pw)1#0Umx>NnowQkm`~lPSXZQXXh{EwS#gA=mE=2*;@UN(W7{Jl?-e4Yz7xv8;tVkw7jp(j zp*_H$l?u|sSs~Feyc*$NR0=A*PvXM8aPCEM{CxxKexjao`e?z_ANg&9oRQm=P~Hr!6%6=$ zb9pr|cry?;lyrlC?oO=5&?U`kE95Q^_O2U9aFO!~GkP6Rk1;XVRI;fHu;9o5VfH{) zIg$vCenk*d;i#68wL-O+r~UO;)-ZXjXC9GKH0GWfQBD{wk~l=YLVSy+^_fJP_pd>W z#xH3bdgaz5b#&u?caz0Bgp2a;p->Q0jk^Y;AoPWIERXc|*?1-7dGeU8RBAe;eriM3 z1{9O={(Qt8@9$N>Kp;CyF|9lFjv^~y5p(MM*M8VVxkpu$dz;<^2|dd|pNP&@5!uXO zPZO)aM$*GI#15Zel00}ze}9Oz29i{AbSDdAju7Ca}NwFI@!LZ=Jfb@MbpMrRYCe+okXoT z#M=@ootlVw{(PI_7J{YkSCFo3(1GaFjqX<$1yqleA6Ba%fteC2HUt2lpv4&pDqvi@ z{(zmy(e*4HB0Ur`qg`Xd`5{0cYSn3X*)ALF45`Y(@y3z?r{(hcpwLQLUPq9G0;cg* zSh5V#H*e{2*L^kKFS*3x-RObl#R0yDCVPbRrIq-}>(Z0|ThZWD1a!{fDh5y3S~;!< z{Z)^$RS{%@{*kw*C+GQjIC#PDXt9n2uubWk@9axf-hw<&*=UdI!VlOC_J7z##|zN2La z+szTjvDTuRZ!M%c*7txQo_mwDx~evd2|}2#=WD|`F-_EidmLspn1LF9frXB^x%Qj$Sr zm3A23pY5Nx$0WkIUlDUgH75AxUxS zUWxkLBK0#np3Jgj6|T*%dJ_I<_24!b~%1ocoEfDLnny`OO`n#L!^KiP68 za(=Tj{#rGVVu)kV^>}jCOa8(_#%yDDDeaeKj8xuCi1EGgBzalNsx@dRebga}z{G4_ zT-@8Z+PDGKqgSO^_|ZVomD;KoWtiGwL1XDgQTmhZmgB+!v$ zIXn&{L@_0>u#dsjgs*Jm@N_$%4zm;8`T4ej1GvfZ2hHVKFTEBktP1EOewmNC_pncy zxmB8fS?GwH7-Do)!Kpn&m6~D?Z}4U>iSn@DVIYW6eMMPkn*AC@vWW9&qV|V$BIWqp zQT;j)l0D!kU7c?xI2xh&rz^kQ_4=eb5H5(&e`$q@zUwP?y;72Yt%zmrXQ9=7hVeU~ z9{-f9PR@))-44Q8*9^lXA75?w#WaPF5!k~}r4`(P^2Wu3Mjo3Zrmk`FW^ys%W=|Wm z>g&hexEpGhY!3JeW_f5V-(PRz2V^;4hK_KLUBixEZNV6dAYQ8GYZ8Sv zjW4Xrzy8J!RZSyXbK97TcNe({8W|?cRhZ){U%{ zR^SJXI&v>Jp3=gb)?<_&86DATcF#uEs54AGr0J2n{3YK06}qekIF-$-5>m zh`Z?Xirx%@;hA*;1#LaRxILn@)?n8E(r}~2RQa)Llb;dy&sF*o@CK>Pvh#G`w?Gw4YMTlK7=jvT_ z0DcUQmj*!oYJGu?9!uv@Mv!57f#+b>I?y8~&lkEFNgu4~Aw(u(eQXWijs}odpf`uZ za9{EAa2~nLq9h5T6A5>9sLfBHo8iCNa@>1I3KG9@DH73%FkUu@y85bip?(YUBkO^3 zg#@5TumrZ%<{@cLyE#~O`)y%e?evOwxCU@(7|&FiFs13}PgU+Hw$*NX6p?i^(eWel zKZGt23|c{VsNrJ-r2|_U(Ec)_aKFMeXh$)a=shFnR`kD2RQ9FQ-DCmnc!exBE*DQ*9(*&@o|PoW&~1=gGz$RBps) zY9f6oZ7WhWB?h|K$gwdH!#E5%CBQwJQ-vETfvnOgF}#fVzeZ6Cq=F7M(Ww;r*~O|H z5lF!(swmAjwvp@fv*wSWpZ@&JT?z*~`x3u(10xJ>&HNR^=wSigtRZmKHLDpT1f#d2 z=!m-ssdak1icdrtE3;CU=J*T~OCbZC`tM1%KM1!$}V*Tsg+9u;m%3OTX$>`$X1eO; zMsYeiTZ>)y%%q=aH0S00a@5r#Qj#6_`7^CwMxTer@fo$5W`#H>I{JKT`KVn~b~!W` zCOBSyg!D29-#`#tZX65hs`=TsQ>x#8g3LQ$$aPct%vPI~_;z|-jF-FyPTMwixu+>e z>fLgR16Y4LIdM^6wNX?Uw&wk(sw5=tFR;HG>;kZk%UxVf5wf7F^E)^@Ip%ab>AiN zgVx8|OjR6F$Dx?Ho*s;?M-towxDfg*Sx$nnGHp~%w$DxN)z>;abj47Af0SB*-|MmL z^qyR@n7%}ma5`;F!NYium~9Pww)W`S?oG%sVQS1rM>xo~BCF3|c$X<#u@tjvJR-fN z>n&Bk92Dhg-Yl}OBPv5VFYB%#$Qo)acD#XmB_4zEo1#@#+{)i3r_R*lT&`BAeba9b?c81BSarxnRXoNB za6X4p(O<*hwxZhSyWw&;Iun(%_^#28+fmVXVR<#+GL;`Kj*IP4icvBfbx5A!i76Ac z^{m-gZ@s3Xv{=nf)QP%Y>BePYg`ko#}$Q;P@#GfmE7(FIzazd}^9O^ImT`Jd6WUS7hT_)_-jutb|jb zDTSYJB6qEDDjmX&xC;FD5_=A3dObN`R56nQ#QJ-335!VHv*BO{uX9zXgsNh|vJIPR z)Db~ZU5W7445Q$Vn=WAq9TWo1l_3I$n#Sf%7w_d%0lo*nt`~A<*)K}OAI!e)fC_7~ zWYy49XC4QPFJxh49hT>z!NPCO%-(WhAQl=RycPv+spYN6P=>!(X8@7C70j*O;`HUc z3k#gk8?@m?Z)?Hj9FRvbFt;6rrWN&65-W68pK2M3!C}%>dywtLW%U`+{o#9@Um+LZ_bQyBbWCPcSszU~ zDKji2i!-3um153j-jB>DV(MXY?R~p|b5RS~LQE7s!86JVh5QgiP#!waQd3oisS$ku zw^=m8YK??T#@3vu|0L0P_*vEmyKTWx6?d3d^iy`Ofj&L{?>laXfz1Rs^cQ|qzNup1 zj3~0@gMkI&hqIY6+$LGzX1ZHWzn`DCTqiRIWq0L?7O~}azCls5{DA+JMsgUGaV50j zqML|V@ev)?t(Jcn)FPT}+OHgjUo4M#`d{gjc!qo3=PwUvwE0Qjn_z&}VxPsF8cZ_0 zxHc-d*}@Ggzj)KiiL|hNbM2h%%35-3gRsTFW%$UZ9ZX$gXxqo|r?1Fx9FE?5GUZs8 z{a4CDAmV3ns44tcx!8%rpOds0Jbq~wt}oJq&Meh2RSh2bQdKa0q?Wz(cV84E)H4D% ztX8kluT6FmA#)NKuC^xdy$JS&1DLt|19x8Z^Fh-A|4wNH<5vOPz33-Pu1u*zNE!3= z8Kx-gy0p8*6^o;GP6J2QMBCG}iE+ail8)ka08_&%6X177uzxfWfJptNb_%Itbe$ z#~Vp^$wkreDfG<_S5r0uZV3Hl-DqQDvC*)bVMU6+POX#b;E9NA6P-!BX2^DciCfP{ zlQLBtCH1<@i@rp_?(`paL&lYmt%P7#ik?-qm85w9%e8cb)#b=5aPyfg(iPu);PR1p z)r09vDq-|)bxHsEAsXi)V_S+4BcwP z$-95*m%c-Uq(T5;wd=M7BA|ZW?N;>k9X4}!7Pp!~sH>`mSOOcBSv|WY1e+7>BSt;U`RV z=L_g(aFm1^v#TmU7*+3BqI*02>B!4J%a{lck&6N*VOrS z?c+l4(%z^^XY6KJR)x`_?bJSTBPHYHKi`R)*dvoliHUQF%lB*eu2dblwqB{8J^Ycq zXNY>e)_J5Oa;4s?sgr}8sRKSmos3?J?{5sAM_vkQ=q&DfBP|DKyOT*Ldw3fMooFu& zF0Kg96|AHgY0gS?y+&s&UGvxTypD_VLs;bfT$C@rxX111(hTe4>8s%@qCvx)kb8KJl zl%P@jBYUy{bA5}I*hn4Lo(ajnKNJ=J#JHqJ?ByHy@dPMY&-wg18C-QOcUvWeGDlg< z-~I>?ej)1)Q3>kI`>-BU6AE?PTRnUzorzCgq=j(PBW>@%`1{7XnJ&ir{U1I9QZWy)^%o^xbX#~J_fB1M$ zE3*OG=6Lk#qOT)r_dn^R3M@f9HxcSR@mXg|B!UEhcuN`@swVJM*TwCcD=b4S0cK6Y zj9L{{!RoCWFcwpMy7 zn!q&q%s~bD8f++y4p6}{7;A95k%+shnG(lcz2$h@+AduxtJo=zoTm60;b;_)v34K- z&lQJ921zV5!S}D9`EN8~=qMA4G2+aB%eAIhG^uJO{K8$ur5!2@WRE5PS)uprJE= zSCDz=Z6-rI8OfMukpCFdt!*;I4*e#^gg4r`UXXSKw?@d-O{)PLxnr(i&Kt-?E`D1! z-*r$m7j5=5cOdB;czBcmk^Zy~THg9wGr}!zPMj{AjPl6HTe;;&g>vF^0C$4M&vTIH zks#z@PSla_`VIO0!X(l*b*a2*j*Pl=0Y{CJ6r;jbXA_FqHj|mRgnCr*gy}zQB`hr{<>LEB8#QAjPw2mE>`2~@6 zizwkShJE9Sl5(SqcL9mr+vDlDU+D8;qLarg+m-c6F3h}Cg=Wx3cCF2dS)Hz&8uL%R z^x4Oa-@2SI#7A$;C)Cazt!7bq?|B@R{Uy zS*T~(*RKM-$Tg!1je?G(#GokXT)Dxu zEYYL4O=9II|@jOq`fVzSB7OyErwDP^}3XqIitS1n<3It{g*qqatel zj`k zbFK$66guE{KI5WiM-U6^r^Mwf=?a3g_KjI)T3X`+U^*FzsWrzZ;f^ab*B37jX?c0+ z`TcU+G2}?ccO_f|ZAYYshNPr)rk;jA*8{eryaa7D1wK^`xD#jZ|1H&A zY%Z=~svK{NdR};KD{Favh8$^J{2}E~ph!QC)^?%&Vb(bpp5hS=U7EWhe8SL=@LqMB zF7m1{k7bvrc`d6cb?LtX#(@_d&N1V7 zO*`h{OJ*a&Uhrw<5O}H=mf;0xEU&%nWmW(cpHD_=bm`8#*GeVY-`MTGRd~G77cpgr z`lME|1HF94bD{M-*q0H=o*x&41qDCl+TAQ$0v zoDe*Z?Y5)(S_VHf_2Ye;<4-?|s=Kgw&iZt{)jo}*4YI$V10kH*NW@&eIrtbmMB85Ujnj)v!1x3|CLj;(_`;pS+)*;6Nb z^;s1M>#^X}46tI)o@eH4SUd+17(M5fJz8Y&5>& zo>+9%&=Gqo%yb4kaYFSi&dC(C?NWI=SmCuj-!2pHANS1qQEhz?aBynlNvl++eXD%{ z=Z?_58qd=Z3H?$`(mH-BFxbsI2yUaf$DQRe0_}TmHi&RFKAfoZk)j>iU#H5w z0U|kHo=QV*oEsX-G)>iF->&-f$3ai|wh1IVat~%2%dZ=@F5`}Pzy<2gHe-4H4yt`J z1}E>!yRJG>`Z62Mp2w~Q*raUTs!?%DK2EmqvJV4}lLd$NW3nmIQtI&YJ8`1Olj`4F zH;%jt>1#74!`v_Y5yVM~53-ZXdodjAowR*5Bz%`BCjmsq8D}e!*Bzy?x<7VKa8oVs zH*YU2wsoa{3LB+!03)j)Xzn*3?^HjMEQup^9Tz?-uVx7tB8Mp!SA(x_=$6W!rc?Zf zZjF5FSdSx-=kA$>y7M-qOtecuayevh{YLW4!@M`C#|+9}Zbk#eq? z#j3QipX({8X`K)_t2UK+DnF#T;cXGTFArgM$p*mfS>lbQ^C3oHJ;j}o_XxPQ$x5pyq&%7PI z^{R4@x!lcc*QWX%wPTx|i_Ii838?lX4$AdyiX|E!g8)m7#ZJNvCrjXQ#FIljTK-9j z?br2pdn@KF5b_0rJOyg>qJK;@X$AA3Q=BY`NLV)9uhN}0BYp?8l*O*sQ>7=T8pyL* z{Bd*m`y?yM_dddBUQUQJ`NyDl%{KiujBw9&{7BfT+ISk$GIO`4n>%+D1q$YE>y7h9 z<-J^8Hl#m)DV)6+!gr6^XD8K$-EPL8J4davv#InURT-A@FNg%aSvQPD znDfAS(pH~B{P9ZXW)dn5JYJ3H|GWH^6`Uq9TfJ2&Dw!H8o!ZVpg$*E<(7cCRE^q5+ z3f3;X$Nwg}9r_)DL$@{b>`|7vlx2#ILMmG0Iq1me?FX~{Jljsv(W2MOt8!p(RCbyB z^d+6^ui ze)LBID}S_W9;emY%)VON!r1bvS>HeB5?y@yo`i!XYTLwQaXZ=hMJj>C2QtfPV9Q6_ zg{7`vzL-x>d^(3);#MZNwbtTkph=-sCC%eS1ToyX_nlhMPDkUy4e@f?KR(r)@Afin zn^L?5kC9~CM<-(u0>GkgpP81jK{}es!p2=jIGI+|aUcxV*^(G}9Y|RVIUc*Mi{f)y`umtIzkRWR^2D}x9~)73h1Sd8Kzk@m9GFQ&hWD4-P2q#NWyw_s zk(Dg9$qpSQRhh3i_ZJD4+}b$nw5Nk_o|Kul;_9MEgm)v=3$s7!Zp@6!v81x&QYLG- znTR4JPpj6y7t#$!bS-#Jz$qw;(^lD{aj>hO^C~3^{GoyER_a`ME%kckZdeF0qcE&c zhjS*k$4~^6CE32ofj8K6l{!k`NhC^s^iQHZmH%&I|lj z_9`@70$uFt!vod?B#e(Gv7f&1(*){|`P*96{0N7UFF0}^%K|dl?kRX&Ev_-I-*QmWOqC(;*MY}0Oxzjji^MJR06dN~x_%%8Y%dAiJo=IJjUoyOYhzDm0m=;9lH z$nlp*>1}s_ovRK*MntFb$WD#B7)ymn9ZJfKInPu$?ll)rEm^aJ@~W&;Su-7M^!(*jH$ILqCAmU1x+>7>Bh?me+fnzPKLV!7(*fvU^Y(FB71(o^U(o+^7;R1?q?q!3&3Q_7 zzBChc!g~p0R`_|HpU;~hj`cp%*qB(6aYvWuBK>oMKSPwzyP9!RruAuhRZc%lkg&RE z@9)-YM6#ADwtR_~RxqSXYpbm{AynsZakV8vnyDA)5(Qv!t}X8cY87 zM5O1mqfRK(p;||be~vPv=g9Q1G?x2)zHtslsNj6~KPLc!IO_!;GBX19(6=k`onYj+ z+*PBdCNk9Wg+wFDVph`d{ zSu3guEy1yrskamD2X7?tp*wSM*5d`jd+{&G3Bw#O1`t`pt0jrs?qW`JbTVOD_a`~1 z9qB(&YrJGM@}jbYq*u;47$;{Dqq6>z|8Ej^2#B7!E%hG>V=fh&Av9NFt1G_<*nA&Z z>kU2FOVV`@yf6nB-)xiFlWGadkgep@gv<6;ChnI<$M4Gyq2Ww zU$-)W8Y)&xQH6w0!KrX#>v{v9zsU@0w{PVg_?$R9-lmwoH%yo}8~ZMst^fK{{?K~g z^$K`wuQ*S3czvL;~&mKM{k&@ z4J#NVi18vNM5?FO2?zu4dKL9fHm}dMc^!P^K5JjnUZ0PFP02RqfUBHBzEQ!LDN#Q8 zoOwvEy_>p{?{KIZ;=mlIvyu7IEn@gXS?tT(K6e{)qy|P=3o4stLcGBmU5ETUZjhQm z{+@J3o63jkJw(^63Y9RcH=z3TQ%-{1PtNqS$1rmnoevS%g5o=7{r*x4`@zx{)G>_f z%=p^SGRpeUs8VA}6=|(1t%Duqfry@~0fcaxxjUNL|J2bhENq-Re1l}b|EXArQndg& zk0r(-DD4)HO@sfu4bmb)3;VYs{c`sec_NPu&g+ihePUg8K;6Fd%+%#ULpxLil1ehn zvPxN--Yt7oRHq0mk-nTKFJo&Psq9b4#7S2hSKkPYhH&-!w}^H$)xqhwrw$3>_25l6Kn zp>vY<<%zd>+jge>r6Y_D@zUMiWKqKze43nRGLgwCRKH1>B!5xy=v82O9?1T?dq%k; zHD{xl1R(#K;0UG+l)?o}N`Q3@pqs^C(q@^up+)$ru0|ada88-5YbR)mXM)m|>;apP zdvB++CJ)5Ig=IPbF zC&k#qqo0ZtB;HT3dHLG&x=z8fC&}Gv{;!1rtw0JPZ=Z~`yZrBa6g(6^axw^&ZZgfk z6QBlgg#)i|;)&ElO}^u4Dn1>7;{ot+_hO*06%ZA=M;m+AmO*0Krj`anE{>bE1v$8B z<6uhvluvCm$QXbkmzB_9zRx`+^VKjqPJ<=3u+KD^-u8xTTWr_YcZH^T1VfgkE4cKw z#>Ja@E8f{ZYz`!x|E{Et#^`EcvOxtGSpq*X2<6P8@3g9hpj>RiBWxI-|pv%RX=L!LHxnHI?w-h?C z1xhv8CnD4GH*oF-kZ^LdlbJd}BA|Y)L6m>OEh=shJ*faK{>{b=W(6^m1l6jp40dDxVh*{p%#mx+hxImO)9JNaRgDu6NBCx zgO+z=+0?>pn(F~++wo+LW#v{{dwS#u*yUuVXT#-o#Fg8qr<2mG-~T}Y(UuCPFJlGO zW(K)3Y>ip=j6m=)XoCIh%PsOvJ+*&M&x0}>c zt1Z>g(@R@h{FftGz+ns1v*$rU&0s~txR5%n;MXO0`-!k-pO%hngB@mAc}b5Abk_=O zw2A_ptruej#9~YnoJ$S=qVwOU3@9 zuxv&}lMfE#1H0WEI35XQg|S}}eexxW_&=WTjiuBh7ZJk` z!ViKYT8-_oug)o`HJoE5)?6ROZvdSHf(LUokvtQ(*;z9Ol&Qi<;l{Hi7+3KJdasG1 z!rA7;D0SOU@%!HbXs54Z4;vaJG~+9&H(XV_OltIQkw^UyR2F_dR2J=;I-ebxs?-}P zX!*Ijk6>0wVUO{3iSbHZ;%mQ(sA1nNOV zZ0yQ#?)zDF+O=eS6kL(ny)EHYsmyXefsLGa?xYa0JvO8{J9C4BBj9yYsN#+>&3I9r z%!=#AadWlJc=UbO4X_t#F6xL7FC%T)MKy$Pboo(9LxM*>HIB^1u<$0)%ku(s08s$rc@gR=XV7fA*b{ z>mD%L5fe;j6p*Z_{U~9WgLzo^{O`95?MyPm1aF>@$s`_jn?&5RJ46Bh3N=WE78cP| z>d$DPs)$=7+1Rt0`$rFR<9Gwibs>GB%FT8l>+Gq|L!T?RDr9Fu#|-7bscyBA$t*2H zjRj7R&$GJu22l55f!7xK=yh?+0%zx12iNQAjHLfv*ZatypB_I!i0}2zI+ax8WCZ;h(^=gEcJ6&?{kbS(mxPRCCwuS~Gqlxrc`Z<$= z&E_aSr#_K;^$Oc6KPWk1Vv_-RFZ0QG=uPpV@Wr2vqE$ z^r%-H4^a7Lnu1wZMZ&L#S}&djb`9l<6pt#`4&sB+yP<&KY_RObuyGB0$B22BTA=6J ziRNp>F6X=|5_Onb{<$?J*c2<(31x$}M(K_T9Ux73X6dNX1^f7zHs+GDYd`m?bD>P8 z6%z2 z()~{p1|j2L@ftbRWobmOlgxT41yZw{wmc>5cI;^mwe0nlo|e1huo=`GG7=!kBd@fQWh zXsQ@BjBm|EYSV6t8!`oLq0}X%`I$~q5r%uWZrr?t5P^*a zHaXV75+sqe1E&*^^t_)~gc)|T3OeMv%e zb?)4SXObG z5%5fq+Y)g+`ml>}U+stFLoRs*k#*zzWpC83`MqA5Qo%*?_AgauBG{zJrdzf~jA(1A z6D}P6!?ketg*8LLt#i$;#rL^;gS|@i^s9k!2jrg|P3JNcU+fNNGwgCnNBOnn?RTxg zZzp`74_czUs@&yfCBN+)gUf^isgL>{!cH!0{ayaTZ!T9OZT8G)0 zU-Af=n~D_M?2VZf7tPKsFAB@qPaQPx+YVFK?UT&Z3?w##iU$P8pZ%@}7}?|MK;n$j|WjeB!=E zzu1@Z|FHFzQE_zL)@WnFJ-E9&1a}V>+})kVg1fuByA#~q-JPJp2@Z{Od0zR>ckex8 z^p6_VRW)|ivc1=udrgX$aez=o$~j5_*~}-AMnDIgpNf&;ca^z7rc!#1*{jQGP$k$% zd#JMg$Do;d#(xYcf<*$1wDG)JgCO_eCta zuTH}IR^8!P`~MgX7X9b!PRBtH^Z7*{ljWD86HuCz0UC$9{hf?51|ee*Hi>|zcvE`} zsP$!@H_EiMZgDZh*Sw0rpB5@Z@wGu?66kH_V9wV2ZQMjfLYj&jg041XCM<++8LDhF z03c+-y&X=4#Ni$&9%*UF-8p>Kw{=mMs#?Sv^6O1jKk7*S)hdR1?2u^J%+&(NUz)tRt->5#W^c<$* zuQyT*O&V;y+_3pFJnwA#zTtM?%!~^}683x}KI-~-p)JjEX&(hP42z!zjgkoavD+N> z`(mu$yg!+~;6VRq+doi8V)Qb-C(0G?xg9z2o4E7_mi#>2nD|!SDk1EnbKCuMB`4kK zWyQD3XIC(D!CHh#_jQnr|L?|yR_z53BzH!#)<%lir6DpuYkmi3yI}WDFvkR*T~tD9 zfaR);V9z{1aAY^pbg|{-80zPAhu9|QQ5za}gvy+sj=ype(VTxpHE8&LiRABP(xJKB z_*sZ$FM-T+{&8qwH#*kqnT_V599#H2V?l|kzY!1uuO2}9xkS~z;fiwO0@a6 z(H^sIPo?vI$M-eZNy8oR&PAN8?BeReZ2KcgL3!2(HeA{E*rQe*n!0W;JT84s;;aKI zB^it`rtmZo;`#Z65@EJfHUC&Wa+OOn7-F4-v@DpC#fB<)!_uDkg4{i0q6KK8IJo*9 zgIOr_3pYFvg38~LlyI+2h=2=wGhnYdie+|m4pPbkM|5W<5r}VWZG?70IO^)gSX1yv z!!gZNlW!u3E!s2g3^i=rWzkVS7cB894uv=kb|nOK%mbL&00o*^1B?8v@1G1y^lhj!s@25hwVMuI6>5)tJMcHoC`Timo1H zx<`(5aReltS>#d6?EGNn?Q${~9=#PoKwM|ZBm!-@>-ke*#Sh4+tyu4QZrY_Rx9>#) zA^GZ|dTW6M??2z`){WZRPJe@{*m94XALQOn|04CEW)o)jA9y)FUn&8J{lVzef_acK zlxL)_I1su??@l#b?~-51)DE81hy)wXSMrc+&hE9vb~!gH87TJ`Eq2c3VC(m3g?uD# zHh*bsc^6tu-%M}&RA_f8pK|khh2*~79u${~1AgH%zSc2cwSU}s?Emfem9O6s?gMQa zI-gH%B~@AkR*^1sw#?1s>yiVwB`acIVZ&E!D0NG!HW6#W$}VVlmnSdgjTk z=5?2=6&t)}f+7jgnM+#gF3bG5mAkxb50g2~d~%Q4;MMk$J0UTg2dwArDU*`2uO7Od z&x3yyxV`3nHY=gJNxsyyl(@k?^AD!gNT(a!kZ>xwnKS30%JlJ#>#>cvP!yyQfbM&1 z19ru?2=f)~m5`3mJWmnYjmiFY>N`(61XjNgt1~+t0_olsmAaA$0AhIlPR^vY- zw9=}Gp-8ZuvhO_ugX<&}4!|L8ufw9~aWo~F^JY;;ctGXjy<0)S z^j8UVQnT4wwRib4GD+icRNSGb<3a|llD*mX+}#%IomFfHYNMZmtG1PCxL+-`PB7nG zvs^hv~zOS}eL}s9X z{Z>ou)@@(KYI&l|mRPqrl-{9?XRa{|+tabX*QH^TDtk(* zDrnbzCA~J3Xl$AL0H=UP-|C{90U~#r(FOV$ME7cC z3)LiI(Oq?;3jbJ~2@r$@6mRkpd2IpI=lhP$YUAeW^N>|EzBWLM!Fd>jxS7VV&2&n$IC&G3A%yKD+_l_ z?V$rzf3Gu{%3$=??Rl}Iz%_ekVt+_ zzdTmdtltDQ+JG2&%w(E&e%a4F-rbDP#V?{nL^4%t$(P&lZ--m$3>Z}vr)}IP#WO$k zUM*Uzxq5ZbKWwK7n6R>~f^B5%y1(y_&s70351N8;VvfkLHs@SQumEO^HQeu*cKcd`)&u`LY(Z;f{vYwn+<8EOIb%*QW55ad^5Ef9DeBM1BF+cPGFW9rT3 zvFytb@T&E!>YeH=Z?}-X6BLLvi~dVUb#b};F}b|pF-pxZ)+3+~X4UxY`odgZy&1Hc zoL;$XcqEicjT^Po;kk1xD%J00kCAG!e>~+WxUJJxQ_`a{s80}*GY5a8ZU2N@@f0vH zJ~;5hD&rZ7AEQokN)Fs>PWH;x)LGHQht3=3u$$G%NSQ`%O}MOApR~*nc29`bNVIT= z!E)!=hVo0scw?Y^6QesP_{M3i0V%iJ;Z|1_jR?A`m{7^itb$gGTZwMgwstd;ykYmO zU!Khq4*Y_6M4-3nw{o{&x8q#nc6t?h&$f*!f`I$N$Q&fluS#_C{L_9z7=hECFr}fv zDNE0|^$Q{{yAC~Z(S*|5g^nl#}(G8 z0j~ObC9~X|&aW8%e9F{xTU=3gi_hC2I=rw8!&*>@2UjXxdYsy^cX<_$347^HWl0AY zqt65q7S9tR82W>5|JN-*}M6L}-M_E06OTeqJfA-p{*f*eb9ELT<-Yu(~JJFTUF5{>CNn!iuADivLMr)g-*dBUQSEsnJpSuKgb^BU>{{(1I4Z!0_? zp4bIvuucg?PVkK>VLs;lO{>)N4jn6QucutqYQul}-lh6Loc9a6oyDwkJ=uu&Q8hu- zj3MY)Qh_4ax5-`B5>)rXH-CPrRcO54jg*MBxBYXs531D%Zuow%Tr8_jAE5^G&1@&J zL$MvpvI2Q<_w{;l!p(;i4Ybo(6~z-h6k@uw2P~_9lORLr0INJCdqJt+m`N9A%rT*l zn`V3bQJHkbo4B4Ggg|bEVtA4Oe-NdXUVFCh*hkKejV~2P6v4;EnS;Gva(McMieG8| zp~}V9ua~=jcC5%M*O@Q6FZ3~3v^}f`9dj?J9N9_Seq%o|tDF~n1P++k8s~t*TRCpB z))S#=X;mA}95XN1mX1RAyGw<>Mvb?IRTYl%6m!b;lSyL|!{B%Xg!d$vUzxSbN6_qA zQ)|t@v@Q%uGJ`lZV%56ptQ1HG)GxUYxNakJPkzAem?4|0so`MM+n|EUsmAk$Ld$hF z!R&aj$hH1uLtFo_ll>J%e^4xM9jG{2cBJ|^!}Kc?87U()S$+Ta$gi5RU^{I)Q>l&6^E$UD8op)(&x-ZXPtx|j@U+ojiuwJ&SC|6b zY(y8K^Ou^4ytTJ3#yT|GJioq>kWP(XQfJ-KBU|)ntvt`hPY=L27J27>Uq*5ez>(~? zM`0w8t5CK$MWS?%&l4sm%`z!wNsbMTCWG|DT4BH(3diD{N=ha)0(5%4tJ88}pd~PiIo(J;8r4+w<@~&5S{G z8AHqQn>a%^C%fDFJd}vkoZeqnUM)x|(;8kF@5a4y)`m5{%P3J#k-(MDeQ2rn)m;9e z+rt%-SN+wkb`%lo12?C>pG@{Wii5oxO033MAFW@DH;R>_6LGa@iLj&NSkt>i@tJCg zMv-ykCbjW-yUkrvb>0y~7Katk23?~4->|E3dV{0)@1z%}8C^3tvT9%N3myt^IoU0E z%1=zr2ECuuqGxc~H`D`8ly+}6xI_^r6eQfTc$>lv&@0#!fr@`Q(H*7`VQPCJ^xM_C z%aiBod>yKq4!TV-SpP~@{6=iVcw#abW4?RDmBh0hmYV6FPjVdh3s*yLSEKxkMOzAw?G* zp6jM0eITK00ep%Q#g;CmtBvE-*(&$bbitjJI?*Qr)Fi`CJZ1vMsd!Q#kAM!G@T|5(=xH za*>btL8gFXHzAV)=#y*vqWK2iO_)~+__x}_fIu)hIyzfD8CM>C>MNpAJKV(0OxVgOyDlm5OBz~qQRK-NncNdBgX|z7hRgdT;oI!G?9`N zjJu&zSry?e`aZ3cM6V*K!}<15VkLRf9Xbe}IijLVtw241=zf+G`*KrV??u_YxnnAV z)zE0%rlN)jC_(KH(r!ywih1)?g=A01p*ftn>DD!MMf4Lb4Q`L$j|bJcQS14eapd&z z=aMlAD^yq6lE(0HRtbM+pQz<9akSUWRv{}?(->+D%CO1k%E{T1IN{2+#=r;T><-$n zZt|vnf2_J#vCbWO;cikfykpm4LGkV&9>l{Js@8bH72wkZrJ@D`Klq|fz^7x#I?QksvfxIN2+O{YRDSd^x}Ywv60A7)%`I_x#o&Tk(PCpoWwE0G zO*=bwoh^hXO-Rc@E89b5&xoIPHI(`^Wh?l=%Eyiu7|=&AMQdx6Ah5-7j%Y|qYil4R zM}ix}h!U@g5Y#J_apm;z_c%3DyJz47G1OjErFRglSNc^^24Nt-=LZY8vd8X`(T}`y zav#mmo{s=yBX|znr{xBzji_xZ!&3w=H*ksyIr==T3q3z+eglJ1`Ce#!14&>t(SS0O zVFu36PQM~6z%e_V^~GR(?A4&1RQ=2{%K$Q)x#9AU!7l(IaWEgv;Mh|I8x%(0r0w?# z6+H;R@NWM>i<1bW+=u@h%DiosgL{<93T<6ylToqYi90+|90S4Dpp_th-4Uz_ z+3~Lpjvh;>P4ar2f7(c>wlUlK3>cNvyW^9)CtG6@8wdM>5vf9aH8`LEHRxEVFcV*5 zW6{{-uc)O)EZRJ&LV@JYt3nto9}hC zc!i;2$?fx5yQkThtgz{Ix`P(7^EJ=P)iYw#QHfy2Ot;uJ3@H}(ywonw*bnL+(=tj6vpAiw!Jd0GG(D0Q4 z9z%J^`=h!N|J3seq7*Gdw(|UxH9c}3z5SB&&wjU8C4H_YhTNkNoJSN1x8@2 z@QTxUoSSn`1qjuVO=&#$z`JAo92ftDT_2kz=a26$7k;g5Lk7%Q&Ws}UrJI?l?N{KR zwtT6ES1nOSP|A|DI8M_zZOOrJ5-4_eHo#-+_;*-sK~Xb6O%DNZ%?$`q0ml62D@PLH z;p7lC-8VZ?GgzCi?_g3@wLHO5y;L#=Ji$%deS{Hh=vRvLcAl^_96N6gmg+?|g9>VV zTtxiJzxJFab%IK<47MbHC~HOGgbTr5;`g<^*!Mr@D`5M4z>ZX+paniD|7wS}(~Cji zH5fTfeiRp)HQ@W!9BfB6vcHcRtrcX5{H`gWfyV$)rDg2H;R4iTKL7`kMWWOAk0c} z*Hjy?vj9Wze5mmndnd4GDLZ6;n<$qrVAiWjGtdkDaI*h~3V5l^Wdu<@7y+97fPH=8 z*?gp-?{nKTn}u+5vDbZ)IMf7)Q0wW0)NM(vht4?t5Jp9dIFAU?Di`Xw@(!8h_rzX^ zC%onM4sX2WC~w3C$Y;;X>F8{a8%XD4lC2DZ^{}S+27MFY8_hw68TQSp`k8IuxJk#pk&u< z*Dun2N!GF?4!vp~0gd=eu zu9%=!2E!FF%3Yn=`Qx`2hABB4*^~LfeZ{_IcBj|D5gn7hd$x+(Zh^@2^l;O>vL#I9 zzKEBF9R=VKVZXCi35gk5@XuanSiI;I4reSkxes{-o>lV#Ewlw(H6#&F))h?-VTP2< z$!<&fMV2>oW5RdB1$t--0nd9RVoWLruU=1l3-hb{K_9#khQ~n;fxCPI zn*iO&S2bYTVUF;iYL+h#e^az7@{Xi4nFN?w0WD7pQ^g;8@i7rgC_1iwKpepCI65e4 z{xSAUAN!c^a4GmI=;8(Osg-HFV$fVNqw#~v?$yT7Aw`2-38rb`&H8No>`JUVqAemy zzGAZGp8V9q@Tz$9X%={%*RCQq$JYASNkwYTmd?5|jBEl?FUL&p+xz&@)(uZNb>Z@7 zlx4+sOpLc4RDm}7UU&9mq6uRHw)2f31M73cXCsOag~kmlS_PPa)3@$+SP;?xM91kn z`8~0ZV00bZ94}(#yTr9Y=Q05axZ_vz(R_8}Je2b32>x$2t5E|>y=pcB;T(|;+bDdU zSStPD#sCyl$SmzXmYtBcS*)Fu1F0s6-VnYjZJn7{C3;_it)nb!2DL1j2&eAcjQlL zxp;JblJ(PG%|?c`0D7D8y>3zLaQ#9-{@{mS`JNyIvO`N*9Sqn*Rt!$C?)4kT1Z9|A zQHMQ@e^%z6H%1t64WsG++Vxh^A^OyazfNl%isrB2gKg%f^q zI6`Xw2o^O2)+E$R|IN>V$_g_)(aq25cvRB2qS0ej=tUmQCr8GqmU1utisV-EH?w`# zuOFht0y@t4V;eJRLOij=qsfl&d=E)EzmmEioUwbYpOl25n8u?HZhuAgn?eEuu!H~|1UtQz^{hdj z5T<1+eHoj-xmr;ICHfn&Fex&ipGZM7hn^;;k)&gp9$}BPmgb0$$0R3G{(#&7?MI6k?lqk}o06Sd42qCKivzMaP zDhr&uQ!-b_Q`m?$Ug#ZkOZ?V@cink3kE}S;l(efhJ6?V0KhWpEmJ=klX8Wd_c}$wJ zQWWtZw*h-UVNoN}yUJ*Ly5oH_GdE4-P$jgTAyC{x23FrB<>S4Xb$DM#qzuq3n=! z7*&+dNerk+LjBgiuTTcCGIjm(!Q9`M{g)4HfO}g07T(&J`?C#)N9?{xU=bX$j7+_` zubWguzlY{0rwRJr!^&eTx6Y&yi7U%E=)3f`%uvPl1gv+k^v|W4ipJeYZ;pTTF7jEn z8qj-N;jA<)a^RgUskaz;4AAyp$YDaGDa%#dSPLMn-8}qqP0TpD5r4!!akbWzHordi zWK=jVDg(B|t9eWmy4Pg%T)zBg3FwBel_2niKFF6;V`j_w# zJcNJgMi@3VL|BD-@ua^O-CZ|x!r_VtN9h_Q6LHU?^qA}ke%AW@d{E}~ISl?I+{|B_ zR+=pMUIO&^cd~hGSqTRT2f$okQK$)YbqEH`12kPf83+Ktg3(7>{@&1x3t=$m@GG#)8%mIFF$y_fVem=IDvywCaOwOHBEymOZkJ1{EL|}k`Pf;2t>K_ zADc7HV1ue)&ZEi6imIzv%DF)4-Ikc~wf3^UK{SAM2;B6k)R7GAwA6L(O182pi30@V z&l*Q?mJA_=A0H%T+fvgoDG944ROAi+MksMF-hou3yJH=`=|kg_I+KVIJ8l;?^VPJQ zTdRjMc3svc=aGD6&b3(aA*xQ_{X2ZCPPHKJl;2geI}t^8W+5XN9A0jYfte$cnYw_sIS7l1vd!trC<5L>~;$_QS5S1iK|7gE!g=-r@rBS ze2dm1z0<8&=sJ;-wl|t^L`%9xIQ5dBsM9Wn{N2RrEi_DDC4+%>QvvXWyrx#pD);+=j-NugzwDS! z#xRRNTaG#v9j$$hj~~#g=>|kdSl%CFD6?bFqQMP>gt@J=^eLn=-)Wg}*!Uo6!=*Vm z-o|*xw*sMH^N@g#PK%wX?SAgXhIX8?qOka4qkVPSM@hj@UcXz(q6?Yw+wauG!9$z1ykoztxL z3lSs%bY0ysJ~e~H5ld%k%!Oc|jbYUa`_{&w<8+zE_w4(dQ#4y+K}Ip5A;qQKA19{* zuQWCu@+1r;!KPla_Ir=Lr_nklkP)nlQ0!KxKi@${yL-npfIM9cTpl{X}NmmxZ7EOzD?6+O3~F& zSS97;Yq8_B-Ins$o`O!6?s%uue1=E2-w%~9;pOaT?piGO$~Lw8oDi~#3dkXM(=YSU z-g$^g>H~|+DwFe^X7auVOdGY(!g?5)PCr$ZeO8pDL@WYE18d7ve+{He?fypU7uEV?@eQ~Y1_ELYq6-S_8ScAC(c z?8KWP3?5&wYufOY2hQx!iC9tJdsC9swgMMyiJg(~8CB@|_=`0hU@N*Mtz>wlqgwBM z?wE%7FL&;Qs@mXZ6b2Haf6)nOM++)3*E7|e1%L3Q7xo@}4{th<<(1KvH&1@wB!L~_H?#i^w?LD$)R6URKbhBZh`rSkyc|GnM zSn4l~c02)R60^Lk>hT9*m;_VqAvhw(Z~5P%zrKAXdPx&_^izq0f(~R%a~K)=GuR%t z@2rC{1CHWkiSbx=7JNz&1%)Oc%?H$>F?RZOXK$Ix+oq@hugRO<_C){!FL`una)n=h zTvV@iy7%maJig&1pq`h1Hgz_P;G={AM(8D!NrMh`vDS~EsI+fm=0Giut$Pb>)vJq(I zJu=T5T7ugjZyL{e18Y@a%h39HN1E@!DGJz4z_rm2neo4Mf#}!tA#GCee@qCYfh1+H zec^Tf-D0QUthDt{T&L1olu-6Hk8gOKAS6u>#epZ^EzG?0$}jrSl&M8|14M>t?@1p5 z%N1tqc~9C8VI!imhyUK$&g?;9FKH{;kdd~R?t2)}R<;F@T7+a|5&o^1jTJ-7k+LUC zB}ewyYO#vUHXnJW)3PHv1hlHL4AyWJ=F8{%$4Y+M`a#Uk8E(#U zi%cp?wnvC0)O%&SV|=bWQ%N8UFzz1lNI!y5KzRn|;cdYMtb6Y^fHSeiGBHEmS!qlJFr6!9{?37Gn3K!GnY zRDW6+a;*P)-65T98#G`D8JUeG@bB06MP!= zIS*Hy-C+0Bf|46@z!fD*oy_`k`(8z44>ED_Df}Slx7HwnwNyh2DxFo_iG6&N&BFJ(tc_-9Im% zlkF)8{3_?vMqk~9$UAJf9yDb{uStz=A|blf@+zK=rXuEVglpRAFnK{{MOUJ)nyO9h zE2e@G*QDE??1BbdfLSZEF{5NTE6+(YQkDj8BxWk9mzD3dEmaU#1bsMtO0g|*PfPfy zAMVkS9T~A%!{SbUFi!PJqAF9V|1V$&fH>t>C%wq&L-ts7-sA~?y_rq07mZhVwI67necbIvobV=?ga|e{p*U`4>8 zQB~&lhO^A8&Z$M|VUZ=TrHg^8jpId37qgoxqDZx5f#wcf*3~a2WM{6^=H+4B&p>`G zM|=RB45i$w$~ePFa4^*~eQn|mChg@J-*v(N54%9!)vFZeSj10cG|S<2&Id49VwhAH zJ~L_E-5l}guY@mFzj0l3Wv>jQA$&5|l3j3h3Y0{b42M^I6j3Z5R?0dlQt*v|xC?wD z3B-H#+*?@GS;eh87jYADw{x@mZMP~wG=!WXR^j8aNWdnxc49wd_ZHk5th2>C)gsWe z6aV$#Y_oFotSAHF$p)2bS1{qD;dO`i>1KsOb;9i^M>u*0h}pp(pVukiNeJAW;N;V^ zcEgR$QeFyauBGa_{3>$ld+Jy{3q|cS)BR$x@f^M9-6}U2c^0ecBqhz*%s?tqsl$qU4MW(ir;ym#xoQWdvabOfT^m_$qBe1B9*+FrP*RzLr&Pz z8SHszFsyS7rWTk;6;~ktoLvBB@Y;mEeVj_QvRe{8e)JwD?qwx32|pa{*%!IH+fvgxhl!85YtX3v#d6aG&4T2Fd`QgCP8PwK5qbit$_7!Egfdc?Xgu-~245jM6a#st3;Y6ds^RN$r=g?7A31#{L=hKSS7i;I0ZF z^qf*moT?zeLgi(WulN^sLzH3qd{7tCeo`1KP^}M`f$e(2t^ z+`KVdebhjg&sVqNL6Q6I-`S30OSgNsYYozV-j?iw430dlW(WoCS#hrcuJ0}0xnYbm zmGZ{dAoJF)NLrjG$;L>wzuI!SOiA0bvq)s}p8m?QMV1}^gIxKtyQwdm6TOSs*#ygB z%U%MejsPY%&^La|yJEJ#em(T^s2ke**_rXqoqV7es9UOu6=g^=3-8ZWZ+qcWouN3P{F5iO(oO?|i?5XYx4_wl~Ie0%LLg}%TGJ$GK!4pb)`JAQSle=Fp;e-e2@MEuLJr(=_^H*C*!FZEY> z0}&OQD1-nn-d1!HWwH}amwWS(q2hD8KFZw5y>cRh_@wq9v`}COeCl%c&pYfu;t)`x z&I)_ll0UM-Rnhu0nB*3Q^~{%)l8+B- zbO=6E%MIN#-=t{QS06&nITlr%^zf31)XaKc|&@y_ng%M%m!jFOQ zaF^kZc)Q|XCikgt4qznbuL!w<(XmdMKHRJbzFuk|EXnskPvpfn!T)dVu`Uwgl*Jat z0(Wo5&VnlK`D+g1AcIW&DZnpmOrxbWhZX?oO|`#ed5i@PzRDoA9Xrw^ z0N5FN$xz5!uYFQFjT6&>kBRX%AVV%5)^53Pt<>vcXyt# zv~{VB<%yR+uyBxESV_aW5Rj*a^9-2b6Ta)l$450+?iNk*s7VCSva88&-~|qRh6*RN z=I&Um2Vtc-r&6$1d%|JYV1SgGNs;lr)j9oVX6FxoU;@FIX3Kca?;AWg!q3E*-9~`U zOa!6E)(EZpXC|s$p`ml;dN8v*E&c`AhwhzXnVg!}x%~s3!(_+5xRprx)Ds3j>|-<3 zkA1TT9LKeEh^B#H*8dx&bnp4b91~WHPv{38()OVQ^~V3GfFT_h;z#nYdp46VJI@=a zsnJ-Ri?-a16J^$*%UA_MVGDufD&cNeCo{vR#8waueq9@L4h!7=Yy2bschw$bL_+`M z$R8dkbiTqAqEgsA+6Kmmu+Ld-*rr3nwxjg^(fxveaEfYXyHOagddL!GjTeIR{4`!L zqM^O!)Ho%G|E;?948<@M9{6a&M0+Uz^q4LPG*!OX>3 z(Wm^eI5wlE?L5g{ZlVbyYrq{~C7?<6v-;7gdPk50NA`!M27P1BC60E)qJ|Re+&(N# zn!Y0a(dPJ1?80TtfI||sV|9#9I~9_paV4%;Af!-46 z^&Xi^2N8ul^J>@u;eO~6SXNv!_&%*v;`OqRZ&CH)nQkVP^Z zuHnq9jT!2q&+Jw&qEkLoFT+H<6|^6syGDyfb=03mT|}ybUD<#e+{&`QO~s4^CEG!z zl12lbZfxCiM`o=2pI?m>GI@6k>kP9FmDKEB<;-c(EhNRXs7RgG({;_B`YXKY(|I^U zSz;dFA8(saM%023Pxgrye1*>R*C@Vl_?(l57ap2R&P6fAnSushLg{R;L{2gRI53=5 zkBfIT!WH^~H(fxUGm+kge*+Z$G%|>(_XP2gg^26OgZ2LoEl5pAcPAn&jF9H8q^bd9 zFo~!!%A*Ehe<8Z)CNeEW`&d!*FTlg8_7!3H-q{IOmKnkQh?O_yt9Wv2MaNk1G3T66 zWNTtGJMf?>5U@2tHsWXx%LY3dkEnhldGEbA=S)tatx;lr#FRq8U#H!_y>S7=UcNDH z%XSecAobNKT$#p1-d%n2*cCwvnv^BSGeQP>#dmB>fm#Tyc+G`U9mImkXz_Ie^2w|y zs7vc2){%r_efgtGLk;eGuC=@t76P(BRq%af#93?6Vwt=Gk{GpFqf&acn<)X-xEl!d zUJs*~i&DQ3ceEBA;>}bAMi}guC4haR-gd!Ut6wa{D>lPu4xRb#d|RK$vIIamG!cS= z{rew7BfN(A{;DA2`ga%A%_Z!!UhSSjab$-7D~R^xKad-5!EXuesII!HCT?%M^;Hs1 zq67oMq@L%RyirucTklObeGoA>!-6?U-9Llfmrz~q>EURg^--q&riD`k1k|@@)%W3Zp(oASRi>LLE?iZrB*U-clDjwsJ~WO1RghS76( zk{|&z;P1IzXECt_79ZPdjz3SjLQzC68Jx@K6^p`d&<7)GyUySr*WqbtY1Pi5gcWV5 z!{$AiNa9eWq{vA4Q#W-l?0z1CB+(Lt?0z~)T3mn<1&I^0-JfsIC(kGM2RNe7V5e}k z&lYfYq0FJqWzSmg5veBOCl2##c1aNzJ>0EBLD1ibz#QF!9}U zlv&i#F6*%|A9nczk$2upcVB1)6e<@|MP}*=Tzh$c0W~edxhv|GX5;I1%OB1qHh+ZF zXJyu!GoY^WyCrZmYGhkY0(moN9R9fKU2W#*Hd`&^IYPSfmb#^7^N-Qs#-|22=9$94 z>|lfvf(Kb@a8z&U>qH9H_Gf>HHyc?;bTdeXA`$R7AI}0|cd{a43nr12nZ59uxgvjc zLCz5<#`{CixCly@W;oPO9*YqZ@XoBw>cv4k=9`Cohn8ns{)h?ffec{g`_2|wGg2*~ zN&ygi&Y8NbVI4#g;Z>fpC-p2$_4{+0__gvEC58_4=3-6V%{j8tO-uW5PT#MH zr-dX0Uw5q*%`#G4rE?}#s7o`5|8p;d_3Oc=7r%9EuUxRCrfA?&`5-=WL3$>tv|M1$ zdc&ET3u1-1!9iG27BIx&(|sb5oZxVoHF>ARrwu%b)fIxnZU?6sIRttqL-u# z*2s1fDrqNBqFzw-dp%f5lv zGse$4cdNnGCuJsA$Y8la3W-7#Hk*-|BYOvHk0->FUk$VIG+%7IbQCOVY@5tTAh3V( z!f^^A1<>FS5mngU_YVRsRQ-P~r||%ED7Q9rL3!h?dTtMfRnij0L^XJb%CMN1RV``s z``kSPtypZLAIDpNlNNu-E4}0$5#cO-E0KsscIyOoOYEPC^ zbWKIGlpO!|O7&Fs_~HRaeoT46}Y_Pr<=JPFJa>nvSuF&?B9($YZ@&}<~9r0vW9*Xo*J z&IS0!aJ8!BSq|O4PHjd?S>fId<|Jz`agOI6fM0b0K zj?4}_V%o0(p>n}`tM&WC?s6({gggp66qb+pbItia+gVI%2qOzS_A2NxF7D`YRFo163+V!?$?CqIt|yR$Xn?$A2sbu=ekNQZ7eht6ip2rS2~a zo{sX?;iU7UB!$>g9Pu%clAt^wq**SUSGaN7`ND(bhMSZP0BHnNv84C(e>-O5+nYM* zq)icNwX5oz3U|q;wc+P3#Q9v9Vx}ciiso;_lih@d1C4<%&b=XvO1*e7>+;FQF%?x` zor`SLwTxmsfucC$j&|KS$z;E%uE?z|>Ue_F_6^ycm`G_B&cpdD8!N1SFxa(5NR2Po z{9odJ0Dna2{TtW^@xXen7p>rH`%2IDFW34n`7#>xio(|&aEo1k!~ko#G!Z?}v<1Wz z5|y9_@nRVUFIxsuj~~3ogd@K&@px>8FytDhZTBpJT-Z?=jbldUWJiio;@`!&#sA|c z@@E?CPd-|AeM@_L?2l0L;HFOG+-z+;3n+V?Q$H%wzP(NZ$Y)4>)p&R8_-(DBc3c-If)7CVawd~*3=u^_(R8%EmAz0tJcPKO>3JC)zLhRyTZx>%) z2A(kF`tt$)JK~w4L3aYv6kc`J+ri_LNv=$m()k~XWa6e}78A&wlP)EHBV3`j&A&Kd z+dIA8sW#rYPleNe?b(#Vd9_zdW;d0J`uJ{rblmg>QKiSt@`i9{t>Sk@rM2N&q|wg; z&?*H2A{y?HjuxRO=f~y5wKoMn3^JUggqq&pDtW*z*dG>EzvmTCsbRZ5s!y`-w8W$8 zck63DcIW#mE4_s(dQI>mvmPEg4|^VKG~M|UH*F8>2Q!elJ5L*ymEO;CQb1{-g_mt1 z`=e+Mymf}Lh-M)kE_hDCYC3kOeN{Yyt+y1-uFLam5XjR^t*zA*5sCXd-jLwRVXjz& zj!wk!3$Nd;vas3Q0~;#wW!U?_Y|z3%9;Y~c*XSPI+}Vt6h3ZuQknIV=d2=z9g*hdW z;g-jyXF+WQ=W#@(zKycAKDr>uZlk#YaxwMi$5i%K59?-0T-K5P8y7n1ge2P(YOblHL#1E>?e~VoyDt&CF!`iqqz5&9kE?%AYe6%u` z+69h~TvK&i-(SFzxp^DJ>*|IW%;8#NDkvQ<1FsP?-FcFQ80`PbazA{HO9MtabGv*_ zE1+OGF>S|gBKKTK_k=b(O|V~1W5R$!R&m4+S8U9cCx;7dN9}%^3i+CaI=>2{MC&uK zJ(&YA6#ZVV0zN8RX|(8;bqZ^wy;CYMr1N^ae5_?cYVh9M1>OXFHP?-^q*{wJry?sDijyhMFi1|j&FM0 z5d|rJ(=XYyqoPw)y(IHCm`TNoZ_!cQQ{Q7|)TNAeQncXX>?+O9WVzBOgU(C=?S4;8 zZ+8(8gsBSd=p4aO(#2u;U}PS(O)#Sp-1fg#(g-ly?rr4R(`hzGZO5>xDl_vJ3Q4~kcliL|@zqJ9l79o$ zWxSPrv;81+Bd@`8&y)sMr=h6>a@!AQzKe`~E#&N6q_0dv8Yp#<1hg_g8`3ttpQ7GD znTk^%+73b-Cpk*Io(gjQ9wp-dr`~WaKBk;ePXDaC9hF5B?S8Si#&aXy?-v{uRbM$7 zV*{!-Obui(9yPvcC0I<2Diq~F_w7IMF%gI~`zCh$`*O@rd2$iQ8!|5IZz7hRk!&}M zp+mb?f9m}7FCL}Kb_Q&=vgC+$cs01g6}I)R=%co|)5_OyvNVyS!K2!w-7GL~`e-nK zq9gY4x4-Gk5VIV&yiOYn0Le_hy->)TDn5z+j7*_XcDrmr<&(5cL_wEmR>%q%R0dEq zvGKG_ngV+$1_IDIPo&SH30yBAKzw?XF;1CAYh1jU z+mrJBhznglsqU(ZK43^+*le@bL&F@R-1%r0v1q+SDkV*HWcA@QZ;g7k`PwncUg7NK zY^Avf*)6g0_h)U@2hQQ3KT46MKzt}~*WDo7U0QCB_F#p8{FS;URrMBZsB`neM$l#7T$jo_rT)3kX+b>|8_SlK)0LgyV!+MYY`8>Kkfo*;-TEA2Yy+bM- z11HL!{I0!Z&m}NIPTwresX6nAlRc>sAv@%2)IfC=6RXY>qAYjf!s%YmQKiDy&{M&a z91tk&#xk=A7JX`?zWZ0$dj{wc>@HfMB5j3szfs#?Y4BA|&o$;%qyzYkZ^&UxcAax+ z?O&{NOmmgu_$vBb&u@GE1G}Q`ggSO#U*J}5S|e3?6$GGVyZZr!+TslxTQ56XEI5EW zz2!v$c$pDx+H#!QU%BBdAg_>y5z$rTr>nVyg5$wsixUdjMYl&cdzLOkduR6#x1N&T z`BdLg?VMPsi;eZPCfrlxr-C;bJ%^7bL8>iJH+8C)?@85*_9~xbh0J2X`!_z0Z>CybMl~GqsSGF(6?!sQn(5c zh+Xxuow|$f*lO+cZ-^R9TiDw{FO>fBc7vkDd$IXrBfP%PczXD$CgxVw$9(s!My*OA zU)M{2lb$>9XF;znxU5`10k|vN-sGa3ZgH@9Se%ZMd-Xn;`;O6cKC**>O|jB&hZy(} zbrr@@)G~U&kR6qyk#i@Eck;T3uhaxu0~Dy&qs^1Qaf)N{bw&>g4K|)j+;G(uE1auZ zutBM>g|JBF4>MdH*MAq5CC zR{F-ZgZb?FekS{9nvdl+WOX;64LrdD0t*5y{AmdVsyFLDtnzFFc8Mj#iQ=_a481+m z^-X4`*+^?Yuza9L@pDtgvN;l*_uUBiz<#t!WRfn7f1O2U`uCwAS?KvXr@h$K#ORHz z#(w1VN;FgPiyat04gYZQuN`9dagM0@IwNr-#5}Ti!l4Ru=!PVH{L^sgSN@mtP)PEh zVA^9C#(#!Z8^jdfe`Qomwj{z!jXrBE{tjkWuT;#_(D?~z{SdvJ=Y3O@%Hr=lVKqR^ z({urTEX1GUmtdurgdOkXbL%I`y{|R-jRRpMnDr2#=8{x?M*-*O7?JH`>2!%3n_jUP zkO}T;9(9={kS4_o-NbrXuIwgKA?O}&ZR!Zf@pkcFv2NTzFgY#dI5pM%9PVR0PS~VQ4LSvR{ih{@Be#94nImXgqWZDo=$DSr#biaSlsmV z`EJh@qz;W+=>0}>Bj?-UkcR(o#omRBF{-4kC;=gnF{BYI1y4XL+c(eY=?Ry($)NZH z0AOSdcuRFVza#bg3GWKullesmSrMUXg4;#pn^+W;?*d5^Jn%8varBkE26I3uZPYfo z{-)(KX?5+m$38#L4R^BgqexRW#mKql8fq7iALMlG`RNJ2yNhWkz}0C7=rM12s3}i% z&rwIs|2Nm$DBJ62w01D5^C8&rDE7lRWx??H%+`OdJoagN7@b!-@ACcDN?^omxc0N{ zeQQswTV^+qD>~{rs37fJpYp9eU8h0@bEH)7*L#(W8H=Ydo$1SaN+)oP!M!bV+9HI- zi(-ivcYHLFYeX)fU|`l<0VKMB!i$jXHD&u9v zyoC)r{Q{589cb4!R;n?5OmhH__Exd4JJt=XU)Hc0q-BC)eAcmlrEe)&i>E)wdRoJf*DbOkaE>q5B248c5VNjzlBKn!i6x zNScJ+dqn7|Bb|6TmTc+vWS{ij`D49M+PxkglD2r1j~DFk+Y<)&E`8Grt%vH-Cv2)F zh<@R0*~LSt zqngZ2?|7Y;7-pwn)!GTRj0!77y!*DQ)84VdI;c!9nRa?;0FrbtO;$0`@yG z+N7{DzAgKBKL7oul2Z2#!Pjd&sL^H|#*h{W8q7A& zo35%=5Qwh@Up)q2Mlba(F_&0qP>2vDJ7Qwh4^^@P%IUt5i++y# zmt3}(aUCl_;-V~JlG?#dw1}Q46#Ii8bM-U!m4{wJ>I=!yqp$FT-*Vl2%~6rxk{|9^ z`|pr47XSKZ%(GMnU$z}e>)>@(wUd)n3DA-D{}|*&c?tfxBpPsv_mDO&@Z=DLEv`uoI5tfraH z+g%wjs*vu=8A|lBt!AS6b~p`Mv#p$+K^3dzk5TMAL3Ph)9fh#fH*w|u&t&7LLu>m- zW-k;GP62b}eStIczljF#KZzzNjLth9Tb&^BE9ab_MrhWQ)I8Y2phRhwTnM%vjO{!{IQcl{ z^YKzIO`l2u$?v@<#L>lt&<7dH;1|(?J;TNSq?+<6_!|DdugQpjcE=WCqe-qzn9ldL zrRGqAum4yWTGb_A;Dqk%LkB~QH#!0rT|Lr-V(ws+y{(>XQ8r%&2gw%HHWvmPi8BJ; z`UDm`JDdXE1@!K}LRG`RzD>SQoi*)}?fOpZx4u*@eO&lkF5hc)O*z?IeA|}S_xJEB zfyMEAq5-5_&;3kt^!qTPR6EsKtG^I|>JqJh^~lv;#sb`=PPCkYF%-|IIE~@MbgJMG z0)=|4e_03?z^~vy_9}fDUtSriOr|=pxL{UD(i>MkS3n%0!E3zB{8lT6#GEy1!-vCS1 z2rIQ~Y#HBVn^aaowlgXF7siNJ$D-nY+CLe2qZRE%yw+i4;mHxqFr_2Dhu?f23j!C3 z=29v z{sQR42hhTT&r+N9O4*w#s*bq|QCA>6T{i&W$PwjJ1#qO{>U4wcq*g?KFrR!5TiYHW zNXy$)cM9%obdL#j-q@4oNqY*p3e?vgxyxBPJhv9j*xJF59r@n!=Y-jvaPi@C@}i+( zpuXboR{I*=?akfC(Z0K*E<4k=kXVOnb)gp@Q+OqAG>=8RjPG5td*w5gcw|bMU60d< zflfw3plpYw^Tun4T1P!4ZVm4yDjdghL_Ba8HdofDforh0gb&IkCBWsb2?NDmRYdb> z45zsfk0_kU0e;&;9Dln8biA zgp_(Zn972cxNx??YXL49zEPX{1)1k1JR8_yO{!1r#&5EaP4%kqwa@``s3x0a9q&ku zEJpYfthg8W<3HXFR@Iz&)DG%S*qr~gl|F|D6n)+Ag$v8+A@~If4uh( zFVB$Q5veQ&yC{@C`Q;@zGrLyUhL9soxzrGFx4Ag)+ij_lnO|^gQ&?|mGr$am0ib;X zyxt~lrQ$k07UxbOOFEt(lzuF}WMqINtmmp>?Dax%r;gVaTH1lWC7cjVKerfkKb=71 zwrz?1uTF-&b(POspSC$C0Q6h;z5kk=1uXqQ7gnNErR$zbQ!U{D6+|n6+v>O^U3r7s zB|dV8dZ*dc%^C*cs`&w$td{f|&f=?`p5*C~ z+*4`$9+LH1+w4151Ak(dP0sH!BPtHZk~^Cpvv-tzX)7l{cb7bUPZEYTYn}7R8uuD~ z3epy?GXEwM=K7%9401Q3-Y3q3G=YU2EH1+MrT?uB)~GVEaDLIDbJ4M2-xH_mWE*D1p&acEsJ=I;_V_6C!^UIxLFF3* z?^|_tbWJX$O+|yto~fz+wg1PHB7vv+BS`V951#>u*D`|9#&^AW>gfdofC_~qBCMi% zETvRDk(O|Og>UikwRX#oK=&gWi>GTp0e|iyJE`rJgzI`EmUzPolj92C2LyyG_kSN) z{C69c;*3?8*PXyQ-QKbIedJT+bDkv(Jy%<;omikQ-^d;rksne@a-CCpzwzJj+R?iu zWJTxHGdqYg8+hkzFjq-NPulwtgu-{ulal83ot3@{?79H}*6z&e?Fr27-thc5y1Yiv zxw38k8Rm1QJ$G?D14(iKvIW)o)bTQ-nu=jphZ8G|=CrUex0Zi>cCWYOh)rf(G(gd~ z{vN|ZoXnLHpFl42_RL+#h6C5+M_=(&ZD{fGo`*hl=P5xV^s~ zd+L5_ax+;8tjtUNer`;sn@}iy##<;zcoaw7g4@Lx@fH1j%>rw#<3%P@7QzD{7P?~f z6=!+H;BkZ~mJXhUVdJg_j!Q3l8vldr>yP{F&EQMQQv;$6a~AXOfW9U^=w`#e4vu6H z@EA*Kotc3Zt|JLBaF)`E^ayl;!_om6wVrmvCIJ2MY!QAs#mq*Ab{3eIY6)g0U2~Yg zt|j6)qK5MvAF8IuvyEsQIRxAngVi`On|I#R7{l}2Wnhu9zM#h-*VdN#vyt>x7*{IW zxR>u)a7+c!^L-Vl%HmaH5Yxv&&{MHKQ()=A-u1ay=^~frmiMVA=Hb@M^L$G|+xTjK zWw18{#y?%)qdQax+3s;K%+X)4^XN*h=A&loq1my?ldNKJJl)evjLGp&r3y#_A?`2D z^_bN)D-{&CJUI5c@8~KUHa_NK-mpV*Qdg_=>36zrdNwIduv6*AnLNp(@O->r1P&U6 zt#b$Jo*5@Tx6suWMVKQM>$y|kuo3-U`Mf*%<-PY1c5iakB&(+B+qdwuF{v-p1o8iHwY5|#d~A6Y zEgH$jfqsmNItYn%FbjowcJ2;qX7#)I$`Y^S(84W8OGFRs$!0CY5U8jFP_gnw0#vVm zBn*U3w7|LRadC(~D>g#8n02F-k(W7&du?t$Ppf|}rZTy-G_@$swo|=;m8qYS3KA9} zms4C}F_Co7WNn;+d|4xFV@b`aHLqo7`^4F~X*dhuhorVjy4UAlu7TuJy`MLAu9ml-}sALj4zgbLhU;RLJ+{_Ve@0 zCT%_zzXBZ{DGC$2@o}qDJ)mwG*AdjN=@((d&GuB9l`Ohfy4VWQ`p8Q8Pf6mfi_VlL zEQIfxL@vC3F{Zf=Jbk4ycmVp%WVRwHLwS6)!UXKYq%t8l@BXC>-D*~&HfDMhQgN7$ zceRQL!NVh}qK|kDsVbPaPu+cw&v0xiBa&6`kYBSQU;7hj;3u#}+p&43hYG-4T3b4- z3`DxRvp;*p9xSH_tnp&LM*bo^IL;Am{iyX`JeFHNXd^-I%JOBt<{7WuOB7%R&!Z;T z5FN2HPbHSp!s?WhC(4DSB4Y}B)Ptn4k0^)JL(mq#X&QZkD))IN`H<7&U86! z{K6%<`RrRm{Hg8r4QV=nly7K>p!kmO3K<&j4U~%NA46zBw-hvJB2GnNicfl#+1K!W ziFoZsHhXbZ&1>wjJ8rcV)%Es}0bwY9HK9S1CI>2W^&}RO{SONOoHCM60YL4vg3t3b z55MfY&jy9!c+|nWuPNuX^0t1E>%D-hyc0y(aHN;7H^wL2Ek3lRw=7V<@429pNGJ&rHe$-kV+`tqp;v&F!iMen%G70y& zY_8i@CN)ubsc{L}!Y0E*uk7p$Ir`tIJjvhaljuIQ(oQr(BE^92n>Zc_($TJ3HoUX@ z^A`$vt^rR7O)UR0@aOj+IAj5l_H0rl#bT3H(v1a>wyFuC{cFTv0M+Kip*}|Dan`xSgg03d z>A(YLkhNwCsMh_e@C!k1!0t6l1&)PRA0wk>!vaN&?)7LZZHj((v*Wr#mGYy#bL8S1 zk;CX{9|z~|9{6ghsw7lrGtF%`N?mvBk^SFNM^5H&Lfru8fEcJlo=^6E_=Oswz^9fi z>a6KI0PY&Xrb`k1Z5vu$vphh`m=?Nw(T|MSihD&X!Ie_kovB>g7(#T_1wvBSPUvAG z>B>kQh9ad|v3gQF8j=ieB05LCTw(ZUL3XCGvUv+#isO7q6HsMuWFR;CQWB=`CA$*3-YrFnu(p!0Kym~{F|$Zc z`wM;Rr>llb&?#q#r(x7$$J5=ByiwT*Qc;xzLiOW<1y4+z&x&p~eQd+;9Lu#>2B1dx z=EI~F&^7Mv%BKdQr}~l;t87hnA%a71blP-qIHLyG@_Vyjt=G%W@gn^g)AC}FICXJ5 zOekrzw8@!L?-!xSS4Ll3T8)mhVH;bUga%E<6@3c&RHzRs{F zN53?1R=|?jN^0V4K6m!s5f`%cUumdyF*@M-#=ZVXvLY$Vl;r%0(;n}u{lpR*Ko9vR z^H(InDP-9wD-qiEH4IBCdO^(xwZsx*P5l@P0DBnGl<+pLpz)U>tm$>v$ug|9D<~jZ z*w)As%|$YY`;I7dGO>DUVI+1kN2v}Tyyj84hQZHdp7L&3F*nvRHT$Vef7vR->Nr2; z;wS#8?R`6%sh`fmwolWOk%WAcuj<7hw!j0sYMKz|6Lm+&ROg_BOPaU4rNon5cUqTv zGx`E&Wnzc{jTKG6AIpp%nKNE>0on>4hiyo#BIUB9B{70N8pIIh_iCI-uMR9neV%)a z{y{Nintd%9MKkf_SRV@XI>6}adQo(nPiu=@4&Cmc-RvR&iPi`*ob!w&6D}DkBeXD6 z6AMTAzs*!N$CkW{BOziZgj0QG0B5~-ztsAT?@CuGsk249j0Y8Fz*)vce82Ott(49J4n zwI|bc(CwLNS8Zq5*u)oz-!YRp4E_^4COk}o9llkQlu~#kuUnssy4iekIWHSJO2{nK z;V>vwQ~4rrvk@?!ZNcwlYP9(B@y1l4nP+l4eHtnmCF)jVB&?Xl{q_NG*NX4ImVESO zJQL{eetD}U1O%I}^VupwBe#AGL~t&)og4dE zPWn|=neJ`SHyDKd7O&T}1<#%(Jr0}p={@~kNaAY0I)D9(Uy$6m4SJldscDkkZ2vH$ zy|d-G3jL5jr#bh%sS>yYCspJuC`mE8o(~+3?yE+4EFOm*GQF3O2=_N! zfm`N0D9_L#R!k14Vk8l`!(m~y>jN%^eT0RZge_^_H38$}iY1Hku{UV?Zp>6sSCM*l1$Tj z_^rq(O<9BEhp>V&if1P5Aq{wg&O9zI=ePFL?6rCnV@ky-@TfxQ9FfBLlmke%5d^vr zf)1g|jWMM0PEv)*W-N_4SYnubjq)f8F+6VC_kyoHB(db9i~-fd!;i)h<8q*ZkgKJQvys{KY2l# z+F-Ya6H1E7cvn}_5L8wP-bD2WsfGLXU5h*$HdB!hAu>V%Uysw3#p(&QNulyYaPB+p zz%j(2k+a2^oN{pJH7b@I(k42R_O6_HW= zG3J6fvVEBYHNHbi0g?OgetM=!r~aC0dn#0zdn_9wr zApfBX<7klCYwqfaswAN6YDmgiCpTLc2nnE|aoQ;{p6mb(-IzUo^NB@hE9T&Pw0Vc9 z;O)`~1LwR5JQifGzJ1g9W2Xih7-Q!}=5|#J-fgkDEBuV^*+1IY9V)C|{^Jk8@Lb1) zHO0vG#jWdw*F-h+Pjg9}SAA5sIiK@Q&o-kz8SGi|{mwS|Ny79B7j?gQ5OKx2%H@Y~ z?f1Y+C@k0tA$YZYEpvT|3=J|d$(8c6(l#RddS(6&CWT_dU5)FzW=fiS`IZ)upr5Om zyeM#4{HtqInok#u$3lnRM8OMYMU^6-={e`qUm;R1?)lNc^-z>XhPS=Pw|6(fALOIC zC%s}KaHWjJYzyzgHCs93yZ&u#q)_PM;F5q+@PherQdC;Y!OCa9I{Dt(Gs($f&U~%a zhbnTash})hE1^|H)PS zL3Nc4(z6;d;yPZuW8;0G6yNK3@CUUp`evC8u{*O)H zv-ANrBpSZw7X*w8FYAgXr5~U{^Mg}j0`}Ic?9>bY>p4B{-@E%MQ^aXE{nX!94X?1Q zHLdLpGpfQmA2|3MN)1o0<(_2VJ4nIGKS%y9(eoevzPbJe!u-@3!@?ip1(_?bJRb8P8j z+O3`UA9QDqQVDv{WcpP!HR`njkedx7J+B_ zVDX#eH`cgyyxZK?JjiXD>eXj2l?l~EGax75uQ%q4yfPKnlHUwf-}{0mkF3pVE@p@s zdF$OUZ`RjHjj(>W$-K2eZfmOueq&>GQN@M*laC25i929=Ejt-68MTgzQz&s(+QND} z!Gv^O5OOS;@M*5|22&2rjL*I9AJ1^XpaD<482*-Mtb@)tlzJy$GLvY+m#t%VNEFt z8s7Mhj}PtD90kh0TwK;)c(<~=@XtTWzLv*Sh}Xm0r7V@?Itdc{^%*cydqWw4B<`W~ z?9A&zbG~jpTx-_c{q`zL;c&F{y_Oisn?-OlOV5YFGG=#+{31Qw-u&|jIiE9?tqLJzO(Rr9oJDE3rg{{IpH@7sxYART zh44MZHq^DCBoL|)KBX?@ud~Z#k&K2ZvkUd6yYAr4dG+q9B6MImLq-w`AS(hjkW(EF z1c36)|I8w6q+apa75B3ETL*jwLyhgRdw^y9CQiMD63WZA%3w_E`@$2jmARCP=1HQP z0lALqH~i4?MtMkOd8NXe4oK}^O*P76{wW@EzAr((&4K{oiZsff>5QKzZ|Lo%IJO`{ zzV+egxWCx?;0hZa_&vj7&64hf|IU$E!FalE7;;6Y4@4-Gp^y=C$oopcah&Y74WsgY z#lVg-(Brry8C=t(wy=*l{x^;O4ZYl6lGH54|}IF`zg>N(Z7epVH|I6m8#|PblHK9 zv4(zyBO=}tXj;!f=l|yoT28}ZWzg5G5Fw9Vxw*_u?w@dThqAbdOvKvRB_i+4uRv#kN#l9H)4q2`^4TVs+{@s8gm6gmAE&&j@ z-b~=5=H~Su7Odj7(Sc2I|EQm{=3)PJOGM@Tb#`Wtwy5W6)AO-gthR^gT zeabGmG7?*WlHN_~E?k7C&GH_kRcqA*vj(^2Udc&;y-Uw%8OjUk3S<>emP!)N15m?)kH1-EZrQ_{Jrp8{LrZV6*I>Zd5w8{-#W?CDPPhFZRGZkKLRlz;WJLn zM)A7)cc9hD-sM{}{-~gGyx>dys-V`!6wvm^0z%omVhQxWyCEn!<@MxL{TCU2Fr~Mwwlh(U&mPOYA^$v{-lB;F>+WCU8>@EP zZUtb^tTI|&gs1E1idwoTu3<)0Ki%sef)w2GtiXrp7Wc1owz(XkMyvAc8}-*cAic4; z-I?a=V^hx^!7c;@+#K>y4ZsX16GjD5Gm7P9JMF1I5eBnnUfAJ>OAK2iWQsV*DV z*F#;ZZs+jTHfJ<3o<6h_qg37rUMBUo$V3H~2qstqJYKiV_BwER3fG^+e@4bJS>#s2 z2FkbHgwPdPexpO4{X2m{HG9q*tnamzvHFc5r~ggrY&!P9my~e5XCM3$+e6xqpm9vo zRmh;zeOHgF6C2iPy?U$Ac#mS=od+f?dw<6~MkXlRL~6i(V&2XXZ_tVSdDpZPj^=s6 zwCqt(r}VRg4;Up5CFqa5l?4f>>ejc z_!c@vZQ=ntF60tqDEdp-F@35+BZxbDicN7191@e?7+ zb0W-7H%mxZWp#p-I3^I-mT)#8dfJcW8iks_yRyk9n;z$R7VVffzj!O~4lWnoReN3{BGw!LiXL@9B;T5-Uoy~yleDk#p2 z=94j}HjZ&PGDoTXiymGZV3#85$*F|2>nx)@8G3wq3)`;3%?f%PIFDVF*50twnFNHJ z{F168(&~s!k|Y0@y^w+v{^>LL6BjBtJSI1y0q9R_*~ULN0ymSF8Gf^}BUB4#-vI3` zIm~$NkPs1|^MbHcPEXAqDv$u?mv^XN)k{vKq=6t%%Df&I+y5G}p^99YvCOi5QrH`= zK-ls^$N4739jKGelU?l3c$B&j?Ss}JEPWO*U>I~WSaRS(4e*RZl~Nq4J<6tSwIQID z6Js4Le+(|K|B~PJv@8=5Wr5NAagPO0~WE zH5N}tsiXo{79(~(I}5dtI-Zm&oa2lu`>UOxEP^+>S(Wtw(!)BCe{IQ``dSORIec=l z#sYZWVT5k=UAG{Tqr*CFRJ-4lndD;4^+X;>|ye_M~@8O6Ju zAIqTLqmk>wtCXx4eg9>0US*Y}>sClXe~q)`7F`B*4pZH_WUUm#D741KggO} zSPaTXnx6@avX}cFwLq3Qqyel~UP!illZ3ktVlZ{iZt0KpEs6c}N=vxDxeV*(AO!=_ z+#a&B;IV*j+gbT^muPuqtl4;Aw2d2m8ar7FcLxEhOrO#o8>h7RYOb!t&yDrdq=(1J z2!HbfLaGWt5^6f>hr5Xm1DohU%7d!_ zr-J?bFLqoyghqueH|$Ws{&dB;Jvs9V9Gn28t%t95cV1=7#ow#J6pS7&Xnx_&P!;)r z?q9AdB7Z7xA8ACG(N`dENOcc4dE^Pco+>HR9UZc~Xol+O%Ohkd-g!b&XA!?bUiw_9 z+ub#n5zU_AmAi1>p}{B~oxL3N1+c)97d!2m3T)Ni#B@s2FBfJAXbmxf1v3l*f}yr- zX~f%oi%j_oh;(a(kzvUHFU+kHN15<{vysY4Jm2pzL)ZHvyx2{l8%M6rM}~gvK>FEi zUNNU&uP=QL6JVNFMlzRfCMO7)mT5aVD=jcUqKSLrX&`U+=cHGE-o z{51YE4+v1C)Bv_1mZ{!ga-e`>W%z_j(|On%cG}{o*neqqvmeySv7!GZ=1Tt0?v|gW z1Fi5ZjqD$M=?b-Wg#-$2CE{|m%XM=~Exv=JjKwU7w6W8w{(XW2*qICQ?~{ICntW?p zuwG0w-iYF%o8!yz(qS_vFt;w9txOI8**Z#OsxKr&UeYhUj@q7=SdZEOuG|m8Ke@lG z6D|6TTd^dIvE)+KUFn55eq1e{jIwrS8rv@KK{M1Ndcb*WIO+pJSMUGU(8~h0ap;M1LYvC8AF-$=yFVPd=Le?0f6JLB7Wux(6X?2t zJ_DUicD2OJtsgY~MIQgx)Y^8_6ZZ~);PKVqm5QW=ItO=Mx_PGWYJ@SK?P@`n1X1kt zEe1C>4k$6^8Wk^=c7;8evAg%mQq5S1Azj0Th01~Z(7)IBL0Yi$3SiAMAQ?r@C-}#U zny0g@9>`C^3$e?&3}GoK_yz^gWyj0M(mTCr!4f*}PBZ>@7#1q!#fLr8k_Gs8HzJ3ME9>xE#e zD1Z2&3NP@AVZT34I|{!z0u6xcw(?Ks_QuB15n5dSzaT*MD16cXlRX5_j=nC&X13}w z)j^3fp&`A!-0VQG*JCOthuY9MvL%7(O4fiNct(JgVUyDxlztnN*^on{a}_%u5*lHP zGBCpw2CyYQiO=AU0=^Psn#@e&UIwr8=fpfFVPF4o%*RrkR=0s6UJSb`%OSd#l>7JI zobIV8n{PqqBVUg`zkjw7du2o`x1WU&;QN<;ZQ5dr+P~VXX?$$DiwoHIyIg_JvoxV0 zbJ!XB<`CMY6$((N7(rf=tAaI}NCUVu+9Zs1*pD`MJFRFOh#VhiF4Kb57iVlKAvd?C zk;F0wDuY0<2>p0xqXI%VSr%F@np3ouzCsftVxLN{+uReI!{vXk-*zKpgehlGbD6oy zlldhk#H@Mv`~o3Jg7h!-;Ar#Ogvtz9Pz6#J2Es!whL8^0Yu3xDEG;wIPjMWUDe^2s z)t7;AuG0o`HpNRj5&jysSkRg3pAnl@pgE5|S4t`U`X1J*T zO-co?{rg0_$-72%2gqJkLkiD#%X}h0Wrka0l)y_BY?}}&%C7c%v{dHBJYeisZ5FA) zB;&09EPe!ldtzG;!O!ss(d*_fS6Wi<60naJI!E1aLK1Xu+2IYIF&x&_ag>xHA*}^W zdsOF1p#r#FtyaGu_t$1ne+qB#&e{g-HJ7)(*6cDX^72gH0bt-vzL>a zpbibFV7uO^Y7s(1v9EecGWEG$qD9OJzqi(IB$5!g_NEVkA6Df6BA>4Upt%oi>V`r= z6}4UP+EELiZAL`?X@s~70Lb|k6(9}`bu_t+%W4acSmONROW+FV9@YP}3_2yDOzQ#& z9`OjDNc|5BkTt*v%I#|ltZg=K!4U*T=Ie4ezk6Q6Wa&oaf`bA>*VDF;(O(EHrQQxvBCw$Uhc!ueh1y`=qag zeP6p_Z_s*Js9ZKH%+x4EtbcwZNZpoM#_gPQVj=+mC}05=w>Gbi^gl)==0f5HL$(Ig zpE(o$hv=J};D`XixPBJ{evSeb5z(e09OR1{88D{Gp+*CQUCYv)-wC_V1ue0kDxJSY zaw@lC;4B60Uzo^~u)e{$*fc|XH*g=U=o#8F#gqI%NrFh8ZHMgi!>+ z+=p;?We=5s4-L+LakNecolT5du5Fr-D7aAfM*S1f_S6vUj9wM+(`Tu>o&s|uVyJvi zK4q<$Sf7&BOQMDf0Vu`T7S6!SWyUDyzfHCo)!Sqe_o+{AE`*HIYduSh2sskLn}7o2 z9V^4uu1$!3(O%@6wVu$@_3TdwR-n^VZp7lTe*2A!Hld2c9hdD2T_}e{Ck3Nq5^J&L z);FepaAvVMt-Zt{3-%iCQi$A~ID5e%I%2!Y9io#?pet5*nZ&rc|7ab7U@S7no@DM= zLB@Bfko-L7fh@hy*@AzIPf9yp$vFal2OoS;;v7J|Q#hb+9iyo{11FyNa41W{^ZpTs zWC0UY@u|Tcrd#s2Cs3DFr5_$=h1<}m-8%s8tBe~+GQU98G!AKQOZ$My22F zUa>Xq87j!)&sK9rxS^qBmnZ~DJLZr8OHM_WhKnzKit4s^k96%TXmMMgI!(lmCVG9e zqCFN+N3c18-e_olu$RVq6g`SHp2qEUW$jU4AtM5$$S_%@xN(SNTaA|I(h5mwVE=Cr z2_D@pb9pU50{Un=UWI4j#)OgBmPCP1Hh_Z$^n)Pg^ zEKVVfMz<&ymUs_jIHC1TFd3P|7xAJ8vb<0txRCgekoe7)rq;_aio*Ep?2QR3Iqr{K zVl!$!*CWA_je0eJp+$9fmH;)Z-81d;9lb!9_upaj*osbiihVFgn+vCi{NB)u7uns9 zddED-5dC5cr|Uz{G#8#YTfQ7EgzE@so>0myi!;8O&YHbbRXXA5)t7!MAL6S$cH8W{r{59_zOqbz<W1PK{M$GLozI+uocwE4dBd~=N> zo%x;@*b^7$w6_{VbL7c5H%?W?J5jA^v#jAqh$ze=F$JI|0z;=0x>@@IdzN}e2gUc~d z>d|ttaqO4DlQmv50AWIyW`7_!LnZCuTFWHjag@~4&piTY)*^2>7CcB`t6kt(X#fF4+GFi4x#;3k} zL1pt8-QUL`locf34OBPxY?xQVy~RLH+x0T32zG2aSuQJ*z56>xtg0$5(q5~Cx^7c* zaU)p6ndhck9IcvV$ElRqCYMCsjKGy$nOXd~0WVBVF>n`FSP0JBZX_$MNYS$rwq5;i zQa=X9AB1V6A5YENxqgN2DUba4bsBLei0y#HL?8J&Y19aY)0c#(e75NWNvtsZ?YwO| z5ibGnG1loVycn)uOU_O>cH9#v+1)mT)?#I7h2Y; z(=j;q99qrU%wgnt!^@}2Q=?947in@$+jFax@G?PiVc`AI^W|MhE`I;8LEcS)hZU66 z5-2nEecqPL4n3#!F-=kDcDFXUwfL&MTo{6MxBA2biuS+?JiwTgMILK1dFGYTIC&N* zB}8frOqjc%9YwB8y1*Ox0_}bl=C5_J(po1}nD|XY` z$epZ4XuqWJR9bxdR9?0Lom09cFm66c^aITyV?l`E zRsiM?q@=own~ExgV?IDpxq-q3^lx_Z1%=Q?iE5j9#7A-)t>nWe;6U97{k4divzB8K z1{QO*nBg*gD!0q|>F~)kPD-#zjqzU7`A^N|mgI)bwT2zQw8NJMogJX@xkU?`(2-;6 zutyuM)r{;{e)HA60{6Q)_2xe-`Q}ino)XnPdz4ypFV_-I7(ME#txC$6v&6U*w8hLo zS>;Ir<{6<0XvX<+chQYx%3b}qn;5F96LsSx4Z?M$i!JH zp;MFNV$ZOYqvH{FRd|A3TKLa9uEL2hWiw25FE!4^lO3NEZc?&42{+{=$+~k&()09y z65O@q^{{X@Payje@NS;i5ia}^Vc@e~mFaqL-H{@5srHe@o_R)B!d9nMvalnM^fC2Y zdiXZqj;fIRaseUJU#apqEIsO8b^AwxGwVpwOkV->OdI z?U2!B)Od94$x!z3dwM69i4+YFIuK@XV|Kk6ifYf3aodIA6uJ9>P;+6T`4IloAyqo# zX@=&mzDPseB3&Q+Lys|I>tBK{TVjru4dab~O4VFngA@kR_#+%ghRAC z_HiY9_O?BI>vNe)fb5VutI?I;Yt1ASLXJOwgha-=&F*B_1nhc33}UX(J)a_X-Lhb7 z>xV}}X=boozt$pv2#9mroyxw^gpJm&*o!+6WBJGq4SzZ4n>!s7rq9=UCu}iF225;o z(w^twxm+mZ-2d$8rM@2eo><`r2y^sCp-VRF>hAXftyssLqy0QfGfZ)9v70)x*I%8Lwj0sA5Ks zlv1FDP6R!z+k{>?UE)yK9rlV~A1Pn^%C?-?JaLNdf-i79P%wN&JSk%?4Oz)|g;K3q zo(Z_leKz8K+- zN=&anX!r%zRS%_45C4=SA|s)yzW@9>@KS4M0d#s zB6!!Zd7XF1WAEt8*U;c-|4lwhOHyoO2puw8F)XC_c_N*2-;$e6Z^-5joHZiYLHpg9 zXVyAvL8>h0-$fw&(XI#D7=!B?%Y&2p3xqI)WAo&M{}P>LDTIn93kd$J)ml~VDQy<> z-pDCMZ?&p4-$n@b^@)Kx)+F(GqMDzN$>7@r#8P)ln6|SC(~l8;a$~N8ge?&fnG`f( z;JXUeH$}Cgz;Xv|u*t%z;1~j!f&BDJ6Gh#b%-0~h>(J3gX8mB8x}lD|ute{QoUZNr zuM-`EBesXR5k8aQvO;cY%C@BfHSv2a-+O!1%^jbI@qX>&BSHASnv)I~G#zj^CKA*H z?8dT9Xe|Ermj^r-wZ!({6~27A=Ic@ z@t*=~Vq8kv^&sGNDC>wbFy8{Kz!g!&;4W zkr$1Es5R*HWJ#*x6c|~EH|;Bs8%a;YTKBs(_vUcKY!p?CLiO)syYdp($VvF#!yrSi zJ4sB3G>pn@#g;$lZ88Tniu0DG3~$;Q`6g}eLnM^@ha+qIG&HvVMIXdG6X{24X4Gxe zU%!sZT|9*Z53IMcd#mf^@7I=hODC$jDJKu)BmIb1O43UD8_z?&sub|T8%L>;`zMI| zJeab)`R(t_Y{#@e3st@Lqhv1JCpUo!>9z3A4>@iOyKoLIHye3N_8n9BjzmK(pw_B_ zOL;d{jW8)eb1aL_*cZs(H`o@nGxsQ#0e#5Nz}+b;#=QE%#DnBA!I<75oCJ+c!t?i(KGKBP!jPisdZ0uWe^nnpsbfX`JO>pmJ>t-Lg*AY?1!Bd-68M%4z#%S{ye;*3-enjCg#D>GPDM) zsVZW3K7#|>d|A$$S$dpK$T;XcNuM?bh`JECZb2e2>yynq!KM%71}y#x&932!ru8pz z1Dit+QW%%Un>U$`n~u#zh+q9&Q?GE!aGE6aW@1cp-N)z9*-ghY%Q&=HHhn^aI4pib z3Zp;@|D=zvuQr5uf5DKqmi1ac5`_c_(`%>Z80n-M4caa8L4gt;;S{fbUx8g+^Oe zbz2VetSR$6bbcgX{YWyoXIYXvi8|Z4A5|% z+{+q(zhVAUi9sJAK4X!QNA~%&uiMfTOel5^kG5YeR}^6+?P#_0z*l=%tf2gE=$?ik z4TmI7Pp**M&y7@sP*`g$BER}&CyRax1_e^dD|(pJTv#YGG`<`Tls-s}`meu#sEBYvp z+FL$$k3Z*0!udhffPLM|VIcPv<}z>m+U&V5Bq;Iu^h#!?_)QM-{nl6rRo4?_>D>Ki z<6CnBMMS1pW{BCcda{7J>MBWQSNKhSHWyh;J^H^j(*SepKQCV}s2k}!PE$2L$tnF9G?1j44E*aYT8K zXc;l4`KKE+t`KUb9SzMC5;U$`iG1pr<&0Mr7ZP_a|DrDQ$rO`a4lGv(?3*y1!oe%1b-PD4w|8-{lAY zR>(*2fM5Y?YlA%-lQLnt4)ggjduOE#{Lk(FDR-!@2(8MBMLa>b-Efo=c4A0uI0*cZt^ElE4B9zf(|8Ga{KNWNl zAw?sZu*w^;Q#HKe`MXXy-k?L`!I)-@%0~YUrT~ll&u|!Af@ywjB*u;8JO3c)YwmUo z@T%`<7wyAkpi3BM7Wz&XoP->_)u+4V+czXH!2uC2x%A*xJh0*=cs{+s0fkE^xudeM zmF-KrK#$f2xEUtQ_f9MFHnuMU_+3GybT}U`?XwP)Ibs|0C_N~*hkh zVTGBSV84*gb^C%l$=f$!RB?Il#XqoIL7e=AT@WHn4W9n>4~T^7UpTZ42fjI%BI-q0 znN_><*&fIwn}NZFXZb5dY<5BLia`U`_Cu5D9ZoOLWSnU%4=XZ)<*)W$$KsJEs|2la z$v{S9rs=W?&a2SOTfLyKmqCmqyPMaV;UU4a)}tvaC&u*$dS%kefFB??a@GT`|t%^^XKYL&zqM|Prt3Pq_=o?M?4 zAo;@b*g+jx(T$Cr9anXLH^$Ys)O1#`Vylb3i#aG;(a zEZW0x%It94HyVBsCjtRHQ!nIi^d*`rVdPuTSM@d)T>0E8bLMMsFpI6g72QR6&is2ARJU>Xx`fYL!s_`=z!u3TC(OI<-f-*Cp-`1Gb)jdxQ=~~0#x8Ntqv1_zJ zE0#~9;2TD7Tkx~XL0|>&wo>Kc%eazr-!plxp-ybaBvCBW!w=u&g?lnVE{f>EjNl3- zyD|6()l^iyt5b4y0(Y9&+m0!CnEOy^3HxY$Xp&@CrmF1Axo*!rT zt|1dCEQGSn83e{p(|Imi)1G=J3FmG{?6!LWv|lC@sX&pBpvfR|5u0?NLuB$TT3ur` zk4~vX$J*r$_aRyJ70zZLNH=^^YIM0^EMKq>gWb4VIS(%Cp8U#2;WNZKCDa+dsE3Q4 zzkXpa4;G&bi4x2QC(bOv1UHt8eIFq5_Yg^GOp~tkYAvio?{^yP3^OOs?%OtlZv&yW znCg?aEMbELPPr*LY^>DGpYyB2&soW8bRvh`=s1-mg~jvygsrG zb_84Bs&F{tvpGt1D4V)WQV(1>5zGa`s9r!e0T?%CgiUQ&B>fY~9bZp|;Ta=&$My;& zXBTtlj$+Vy=WufbaU*%BwL(c~!so3+www+0gbigW)CZ!NFi6 zNxu>LD14_@SY_L9rk04(3|`>^%P!>0uEd(?HQU&c5d=F$9L%yawDf56BH*PR9P3^8 zZ%Rcb=RNH*PzVw54NouZ=iu6XGTtn)7S6al0jRf*J*Y>{q#L!61 z3_$~eMN&RJF*>Ltf_V@d4W1}-pw+t2ve8BgLiIDMY5m;7IOHPAYV&O`yYf=}qm3qZ zU*2`dVz1C!CmtBKn5u~+&dd>7%k=-u3A8WAf`c0(e`cYevaW zNkmEpWwv(}Y*yNr;L-9=O~bO7$-lnM%UK=RD6YVG3trv`+aS|$m%xQ;md6+8_7y)| zN?W}?Q+B}8z15Mtd8oPO`txm%Z>wd}vPe$r98F2Bg5=b?xx*O!5Bbyf;IT&FF;h6g z32O6`0m|&PihAE>GU-W>wS3_S5@2~KUx-X6`U8H2c(n_ z#Tt3UsZWag6C?cwb&~$mbM`n6eUf0U-=Qx#?NvzVQJXQ}rV&#F2*4VXsxS7Gv#<%& z$vC|ak3*w7BU+KjPOzvLyLZ)Dj0hwmdoyY^+7Z(gG=rBbu7rU*RI7E(tVo~nakK4x5reHwR~=HIubLHkyAwr` zAV#{!M-Hcnn431gO}29yJtV5D%6?CP0Bd0Fr85@dd^+|7Qyh{>I9j7NF!az_4po4N z@Fp+s`P`97>cU0Fq0AAx)J+tk5I9aDKU(J!Y`VyKrW6KxY{r4c#HiMgRuT_#x$`C1 zEge};Ca*C|fk^iac)ULNYcLwas{XHfe@a;+jB&;bJ>{PU%h4^zs5VFmkcj3a7iNm} z7v6Na_Q`D$?dN7O9e2^Ue2E=v_Hn&FAJ|29gq<%{x1isPO7L z>0exA4rgyEtpC+&I*>ox8T}z^49r(cW6P;N_^hH}>mQmS=va*mr?wiyfw7IDQO1R+?!-z#sR^dN`XDB9zWRTI&`Lf2Js_3z`I~ZjFwRdG(nJU$O(LKkyHD9NduJSUm)X$Kc9XCKc1EmK3 zCMv!to|He+U+D56;aXd0Z(OxIS=TBar#1!;x6UOdWfXqU?*o(RYoN{)l!7#rIODu; zJvI@}O{eI`QE~%Vr2f`Il~Ma=E2AO|wb+^{q^@if8STww_CK}&-OXtKC_KXXQa}@F zToi*7Vu)5A}v>WbU30^KTf;L zH^8`@D{0-_Z;2ayU#-C|9T+@1uxFH7(eakBytikdEB2aHFDb;Y+)Pm*eYqw@X&;*Q z!<$_>GN7Oz@!SI?&afvXA~$0vg4mr`C0+l+9%EJt%(5)v{T^U8bY-SNvuD?f)x}lR z(Sh>kce@%#Q)Zh5{#yA_BGy`Ti3pX?v2uaR9O|j#N=h>QF;J>(t zV41(LEN-Tp7KDD_aLXT=Ie3L{gzXitthj1~D4x*6<{mO3*bAZv&kE*x4NDBo^_ZvF zaUnW(@y*J8tv~}rt~XCt#)!3|{oyI>Lv#>3SaS48B>ElEUtUhyeT>jYz?Pfx2X#2w zZ-2S=y@JZj?_JHt$}b}BzENr>yUlWu-_;o&y8%$O?c2s?yvV5o-5lMs${ z`i+Umy@OrBJJ9q`X5Lg?fw?2}A0t=oz(I03Grg@dT_pqTL1OG^jr6oEgYo}^LFH>d zi~nHIe3XSgH3cgMfz~`yAk2s3noXQieAdl%efPQ zIjY#Rf8i$qE2SCLi*Tzn+_eHD*(JZ`JWp)fOxfC|vaah60l6Yg=Ic z{59Y}iWkHKU|LP}W#-(O$-&XDs{afdsQvX$q*3?~u}VirFf{9(&Qm|OZ-?stMVATY zovtim1X|QrpS=Q@-zV+Vw?oQEenjwsO`dT$(_d}95#n@Ky1H%^q0XV%8gi#SFFs&? zwf}VGmEz~%l7JTQM`&}66z3a;jBknk2dBtLait`A@4W4?Tqr$_ZT0qi_6Qb{&vVj?cTjL zH*<0*kEUqo+@DW)=cQPnN_&p(FV_C9Fg13$3Zbw)Q|1@{7v8sLDgJb*NQB?2;f;Oi z2!&y;Mmh_!OsMcyVE^swzJC9cGJ^7mYx(ydE}*sf_?Gm`LUuyMA`-X9sAiA=`qog4 zfHt8rM!BQbkMEoA&Q9)HURmGJ+F1rq%_THM#oR+3;r<=pE1)}`5a-G>I{)fj^OBks zz#Q}{-46wRzc}cIU~Jtop%i>s>EZ*Hf-B_eYEpx@f9peJDN%H|gzcJ)loE&-S(;NJ z*M@1INS)g!)t22?G-L_puX8_<{g<}761#zPwtv+rLxf~roG4}FI$#mth^ z0t|0xgVUv*BLaV1Od{;(E2{r|)I{pTr$Xa65p;D5dB|9-E}^F9^C zBIl|kq35L0@v*TW4_-5o^V2F>l99cLm2nF{-a^^gmXZ91> zciCtO$T_qvW*=bMtb(Hy z3JZu+H6*WP!kkX(Ie8z(mvibd6M$snZx1CJsEtCFSLq#5Q1<9TY8$z2ZjSzZ*O zhxLdtPpdm$LMLCM+u1|~@L1uA-hNJ2uSO;{V6{v?5|6|E7QdELfZ+jSZ8`ko{$d3A z1_vOQ=IL}XP5RE6kmnarF((0aqco5Iv=?+oTvu0%C!{=k%6W6JlQV2Nkj%t!I}`;- z<+jhyUW)%yov1STb;IWvv(B@S4F{HIkQ1KCB4zzKsBN@#s;p8aOP8k{Lwy}YPVPm| zE~{4m@mEJX{Bif-O`X`d*va1%a%kA6VgM#VUa+H=NV5Hd(rmg|hq`PgFR4>!sYzXu z*p2cz#-=NpvemBe5C?D^P{xOKTUL;*RbXj}IfJoFkcHoR?J9-@kV|rT>Ef!KZ!EB! z3E=RUGH$xpa5>x^M^0VBtF>$XxU~uo#C{VzW!)@}EYLP=jH)eLFSC789- zN;=_&3DsGjr%!O{sL>Blrq*vjODKUvO{)je7I%f+zS)J8qXn~=cmOo?wB2e6d%!IOBKOv&T& zL>W_!Pb^>s0oHclYhw|v>$=jpX}9}}I_u}B;1B$&A>1EM*$hT+`$q)LwU1xpCp-y8 zHCC62KqfEgXbiBvUMI}9Y>9O_Met^r^NL0o4U9<=tXNN~vf8p`1_o9fs$0Xyn;}+^ ztN34ESMl z7V(=4;5l$eWXx6vm+@jGCb4!7A3)yQ@7=iM$ILm~i(%GTbWo&Mhg51|>k%C-Iy^xP zfF>-mNuVNhYWdn9XZCKi1##yKcMftisrBsMx8qG-n23|NE13%&evDDSJ0E*90pz6G zz0TcTmXMG}A|r>5!&y{yCKGuwWH#_iF3+#eOWzg%pFj@Zqh1quHYxJkI6BkvY}$EU z3-E&v`Bef$V$PcEX*No2;fxsYV!4V?>B(?>RDIJod;atEGITG)JceD+R7%VwavN-lLzrEbv=gANVKO8=ZG6l1Kh4H?6 zh%hDuFi>C5E|;9?xRnfIhE*pN7~pPHCc<5xGmkE!;G~d#8%T~$Li=SnHlo*t;e}|% z3vZck&3#iY<8{?*dN+{8ywqJ1gopbI(qncyYh(+nwxYaHthTx7g&45)nbjb*_?f$I z!{UV*u^&{GuIu$TW{WmFm#jIwjAfqzptR9Y!&o_ELn5n|@Zo6t)>y#e0+hD$m{xDw z$6jds49u%1KAx}1XN|ibtUX}av?UKG-)~EfozYL` zVnh(tQjB|y;trrK{HjZG`{lV)p`OM&Y}Jn2x7dq0u>!uyA56T!igo{s8e4#FrCy(K zkbO|XJ*ieVmlE9NOue7=E@n#)?L;(XVsq>&yt){IeqX`IV;N?H@COHL;~$_scS?kR ztA1Hd*bmd{(_Xu|%1LU$&yMS}ZT$iDf@FoRv7ScZ*f*La#uNH0u_m}i?(d(_l79S=3#3-S7 zIO7AHm(%WSxO%Kicl7L<6`Mcz^-Cb1&cGwlbVfCd~RfMf)33`oV zcV>8M&M_qR|H`V54ro8t4NacsHBs{*g458d7s-%b?_leNhldDdOd7t&V-bRd4ZE zd1FWW03|0m5cxmsvuuq9NG3_Iuxk^HYb+i>0Y!0e5{jP_XBRr&j=2+k&R;Bc^v>1> zDrfmPGh1lrQb+pKAE9fEF`t+ZI1Ln-0A|#4vA@;hy;^!ziYm>=?1qsd`(%P{TunT9 zRfvi8wN3Z%Qm`xg=p>XPcJwCj%-u*uH(p`8e~yyIyR0IO2q9tmmyl8>CpZTjPy! zml>9l=UH7R)={{4KhW~#GqmaN9S25@BImtT*P zu`Lt5DAUTvwUh3~OO0jyMn*htG*3(Kr!!@(R`y@a2eucm*EncyJWd?NUF>(kYi%*^ z=XTz4&MM0TLKr&gTS^Qz3~j!I={~g)rn_H^nxg7ARQ(eXe^* z(>6HblmDsC3zir!e(|lv+?f#iCu1Z7=bo!#nwoH5{`y{hc$)NW${65tXe)Ina2 zg)A-(jyABv^8Bew_1Z+ei*LldybggeK7XY#RkCPK&HoTBd=Ia$@^km5v*89uGfKclHkzWy@T$rhlU#aRpW*TudxMM_T&@N@T04h`oR)*67p zdCN(r*Ml>=gVJlV`Xse%3Klo@3o!%Hr^mKX&Dl9UZNz$#kogy6D1lpsk zNuxL;umWDZIwQ*OnX{7(&z8n}^`TdA*LwaRL8x-xST8J(Hsj1~+c z$5R$Id~Nl|Qd(gwSvG>$Y4#3lefvN9eRAx}PLT4&3N1(Z+>ONG_+Qs3bx(847d}jc z#6bGqWa1ZM*GTnv>~#5ty2$u>Oz-motDFrl^p*0Om)g<2LySo-atOuSz({p{%w&mz zKpom0lHzOj=#?QJ8VG2FE6LpfeNgck;XmnYj>}~O1!K&EGuoLlqNj0UaD{nrpH-N; zs%>Rt<;xGj@q{@?_2;N1oF+q*MziTn=E^dUWl0U0YJFxY+y_eUynnyc$*Z_Kh_743s46*ey!VkgkgAryxAFg^JdD1kJ z(a#3DxFd!T(bgz$ob->AI%2zhAyS8F{)oN~tF94^>no06CzA~7&yE#*ksX(#P!LqP z=B{qz`g3584pZj7q)~hB%0-Dr=NOn#!?|d;_*Zl!%=XTwCD(j!uSVuZKd`kECffBW z%Wj=uvS`A$8a}cW>En1v-wsQ#cJpYaQO3$!)Cej({rwAeyTjd+oK;ly=h_;>0N$3# zd~VF>@W&D~liLDU#VS4&KJF^aXzEn$;kN&A(Nvs9SqSq{?7f`V-3c4h&U{v9R(c|a z*UsjH|LX5s7>i}gl5BGhqgD5uHio4FJNmRV{fRXf3ZHf&xY9u-Pc-iUAvUp zDD-HoI8Z{NvYr-GcAdICn5&1KopyfU#zd?ddi85SvGd}5@s`1`UJer<3%vck0~XoB z-Hqa-0&lYE#Tk{Tnn)q&P$71rGGo1{Bh06F#3r)y-iT_AG@ zY#_g4ZWmOq`+N7&81>}WQ6mRKU3BZmn8$qa24gba`|_~YijN-uRzFhK>NU>e81)%p zDQ||LINgx0RWsP#ex|}W9R-`al{(S5;5Zscr@`v&>5phw@G{Yx(P`Iu`*XyrfeHoP znXccpR^_{=`;Al)ptdD^6(y&^7K%7Z{b`8~!+8pUmWF5y4nr=2E=KyzrKb$nWB9cog04}@3nsZwf!l#ja7`rZqs$xDKi(~sGPU;Y1FaG?|>t2K>soO@Hyw$H{AaDCF&oFsdqS5kg zFs`qeZ_$@(I$G4`_$`Wp;;F*SEb=G<+QP!y;yr|4mNJ=?Wi*MnE<9L-+FTFu84>f~ zzFu$RcU`tH0yJKJ49v%~Ew}=aff=ycQ^0wH&M|tjdNB6kEoomMx*a8&Y9Vs(Kx+qe z>^Y_EF^wgg5gT|TuPqs!ah5)}XPw>euoG5W3BmhTa&(Pu@wnVw$xQyNaB2BtZu=p_ zV#m@nxXC`El}o4DW@Fu9fg%AZm>Tu)^fvmA-GkJ8d>%?VVPUXAq5X}92z~suuZ;(Y zDYtjJh+sN3DGjE_Yzd@D2aESJ5PSpf`Z|*iL!oKSqsy4_exGZ{c`2OaoDlYvCwkWS z4IIZ!-xJc~t!Pql@d1oMf*X}oa9DCK+bMG@t?%r;t#`g$8z0-X7Yetog$lK_lGDl6 zOQnuRd7>jz%*)@aZ?%r6(XzY=;#s}jT7znt_Ecpf`pBx9lB#dJ_Oy3`6}-%RH`c3w z5-cHP8PlQbhHD zTKHc12+j;Fni@K0uv6MX>r!9&sxz%WxG&ZFHB(haOYT#)zh3p9(9#kO!6jc zt{=0+Z>RGbOGnYZ4?9_V+q+xVDNC_!cVX)>rd1&Cy7F*~*SoM87p#8b(|y+R9Q49Hq?z_?z_o}Afs{xZ@Y9R3XPWB+5ZY~X zd;TMKWJW3u$B`Lbk^dQ7M5^796%PE3*DWp1>@>@*xt{goc{s$A)~BMwXIFzCukA|0 z38tM?<>qpBTnKKdLx{WmbokIwr3M`6gM4)|XpMh+y8CCg3WjHX_x_Xr`Tpx+VM%5G z1G9p*{=okQfHK+c@LcfmBVp58O6%%cG$|t<(WBEHR8zxI_5;k7NB@Y>#Uu_FU(h14 z&ZMuaAgN{%Jv*C1Wu_?GoLyx@$e9}paUjx8_3>v2keyF8*ug>Y|HHJhNeDjLs$;Nf~jGEr{_5|oh-{)W}iHDBp*g;H~- z(d0-CbH($9F6iyW$&49iE2e61g?xD0#fI^CW&UDn7+#w6p>}BoTO4t(rVWN`dvmE; zoA9j>;&3EZbsiI0h#)yK5#Wxwe$m}pe>8)V+L}aMnkDjIr65RmIuRa=_IfwaSVvbJ zmP$5j$HP@;No-|8cZ{vY;Xn4ErIRMK(MyE01^12=H!Hh^yM(MktNk^n{Es%U^vs>6 zh3hxI5pJyZ-X!S_l@#hxX+=nBGfU~zCM`rT#jsa;a-2*k@lg?X=y=RM_X2Cr@R3Sk zC}fu-m|PBSAE%=!DB*pYk6=4{iBk8mp4>pz>$e(*3 z%))J6Pqw44r>iP_CbshUZ)EC}CoTktQq*(Fc63_h@V7F6p1n)X7GANdvii*YoVIJL zaUV}iFKiko4?WSa6~Dg%Oan}Ggq-m+s9!INVDSBv%3{{Go`{KMu(+dspEZyy`3kESy6y2*yx|?PowSgmpKLD#m3KwH>m?j9|9N{iOH4aQ`VCnXW4?@ zR5Fwt+L~YA@Q$W!Gk8>AgWIBfc)P@P_z5veK$&>BAzjeza=#81DnDL~-&V5NyDFYW zm`v3}fZD8~4Z5!v>UXvo9qvN%_N5CBeY1uW=FFaty#=esNB-M4)+9-LYpYc(*3Xh* zNav2)zc%$c0D0Z--(`*>|70>~ioe4fXX;(g?ZxQ7xj#1Yjvx=Vba2~DeGd*H7UGMh z;17JpD}JIpw93)K@U?jZP0*sakk!fUS!Lj*pjYPq42PaKVYqpI*LHPSoe&y)^%U1G ztL}|?A||XdP9N>4a+F<-9`+%4zY9}^xhQGN^m)tUL7XdEU&bsbNyO8%sW56ua&Q3` zfO#-q&R1;|20+)_4+}It7EIxXg_q*d(R^Y;i}_Hq7N0q6y7Gg1#gE7{{4^H~4B|mr zO!y~O8?x?DBqC3PkyM2rlS0ZtVz132t=i>y2ATEo?TdZy@ML_CI-~KrP_6Ax!v4IA znq+u&z+5;>qPNFI>~G9DFCeb7UxF|<=jBxEzT3h1WN zW2!q@(*J6laYM(0mp6)KK-m^bZ)kg+PZ#@@pc5O!D|#PB-Jdu~j){hc2CE?)E6Ea^+Hux1~jyW&L{Ls3tYACI5ckX=NFV&=DiMC2uI#p6_y^BG;A0x zf3%_9uv(n3j#u~B*>KIsR8ldN#@MuX%erjHO<|^6?H=9D=+U&8H{)3vSJa0d!F>|A z88fW9`~D}AoZf~%++7ZRPFSq^?+A8$s(@WUa-?i>||2p lpBevW#qa`_ +session. + +.. code-block:: bash + + os3:~> screen -list + There is a screen on: + 28994.stack (08/10/2016 09:01:33 PM) (Detached) + 1 Socket in /var/run/screen/S-sdague. + +You can attach to this screen session using ``screen -r`` which gives +you a view of the services in action. + +.. image:: assets/images/screen_session_1.png + :width: 100% + +Basic Screen Commands +--------------------- + +The following minimal commands will be useful to using screen: + +* ``ctrl-a n`` - go to next window. Next is assumed to be right of + current window. +* ``ctrl-a p`` - go to previous window. Previous is assumed to be left + of current window. +* ``ctrl-a [`` - entry copy/scrollback mode. This allows you to + navigate back through the logs with the up arrow. +* ``ctrl-a d`` - detach from screen. Gets you back to a normal + terminal, while leaving everything running. + +For more about using screen, see the excellent `screen manual +`_. + +Patching a Service +================== + +If you want to make a quick change to a running service the easiest +way to do this is: + +* attach to screen +* navigate to the window in question +* ``ctrl-c`` to kill the service +* make appropriate changes to the code +* ``up arrow`` in the screen window to display the command used to run + that service +* ``enter`` to restart the service + +This works for services, except those running under Apache (currently +just ``keystone`` by default). + +.. warning:: + + All changes you are making are in checked out git trees that + DevStack thinks it has full control over. Uncommitted work, or + work committed to the master branch, may be overwritten during + subsequent DevStack runs. + +Testing a Patch Series +====================== + +When testing a larger set of patches, or patches that will impact more +than one service within a project, it is often less confusing to use +custom git locations, and make all your changes in a dedicated git +tree. + +In your ``local.conf`` you can add ``**_REPO``, ``**_BRANCH`` for most projects +to use a custom git tree instead of the default upstream ones. + +For instance: + +.. code-block:: bash + + [[local|localrc]] + NOVA_REPO=/home/sdague/nova + NOVA_BRANCH=fold_disk_config + +Will use a custom git tree and branch when doing any devstack +operations, such as ``stack.sh``. + +When testing complicated changes committing to these trees, then doing +``./unstack.sh && ./stack.sh`` is often a valuable way to +iterate. This does take longer per iteration than direct patching, as +the whole devstack needs to rebuild. + +You can use this same approach to test patches that are up for review +in gerrit by using the ref name that gerrit assigns to each change. + +.. code-block:: bash + + [[local|localrc]] + NOVA_BRANCH=refs/changes/10/353710/1 + + +Testing Changes to Apache Based Services +======================================== + +When testing changes to Apache based services, such as ``keystone``, +you can either use the Testing a Patch Series approach above, or make +changes in the code tree and issue an apache restart. + + +Testing Changes to Libraries +============================ + +When testing changes to libraries consumed by OpenStack services (such +as oslo or any of the python-fooclient libraries) things are a little +more complicated. By default we only test with released versions of +these libraries that are on pypi. + +You must first override this with the setting ``LIBS_FROM_GIT``. This +will enable your DevStack with the git version of that library instead +of the released version. + +After that point you can also specify ``**_REPO``, ``**_BRANCH`` to use +your changes instead of just upstream master. + +.. code-block:: bash + + [[local|localrc]] + LIBS_FROM_GIT=oslo.policy + OSLOPOLICY_REPO=/home/sdague/oslo.policy + OSLOPOLICY_BRANCH=better_exception + +Because libraries are used by many services, library changes really +need to go through a full ``./unstack.sh && ./stack.sh`` to see your +changes in action. + +To figure out the repo / branch names for every library that's +supported, you'll need to read the devstack source. diff --git a/doc/source/index.rst b/doc/source/index.rst index c1302eb930..d89637e796 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -113,6 +113,9 @@ You can ``source openrc`` in your shell, and then use the You can ``cd /opt/stack/tempest`` and run tempest tests that have been configured to work with your devstack. +You can :doc:`make code changes to OpenStack and validate them +`. + Going further ------------- diff --git a/doc/source/site-map.rst b/doc/source/site-map.rst index 480d6aaf5e..74b944b5e1 100644 --- a/doc/source/site-map.rst +++ b/doc/source/site-map.rst @@ -17,5 +17,6 @@ plugins plugin-registry faq + development hacking guides From 3d5f03abe515009aea34599c0e5b0d541afcac0a Mon Sep 17 00:00:00 2001 From: Andrey Pavlov Date: Wed, 10 Aug 2016 12:46:50 +0300 Subject: [PATCH 0105/1936] fix nova's fake virt driver plugin nova's config was changed - now virt driver parameter needs not full path for virt driver Change-Id: I2a32b4dee3f27efc78bd1e546a96cfbc1225b8b5 --- lib/nova_plugins/hypervisor-fake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova_plugins/hypervisor-fake b/lib/nova_plugins/hypervisor-fake index 2434dce884..6ac219961e 100644 --- a/lib/nova_plugins/hypervisor-fake +++ b/lib/nova_plugins/hypervisor-fake @@ -36,7 +36,7 @@ function cleanup_nova_hypervisor { # configure_nova_hypervisor - Set config files, create data dirs, etc function configure_nova_hypervisor { - iniset $NOVA_CONF DEFAULT compute_driver "nova.virt.fake.FakeDriver" + iniset $NOVA_CONF DEFAULT compute_driver "fake.FakeDriver" # Disable arbitrary limits iniset $NOVA_CONF DEFAULT quota_instances -1 iniset $NOVA_CONF DEFAULT quota_cores -1 From bc883df1c23fab66f681f2c4fe9c0d3affadc671 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 12 Aug 2016 07:21:59 -0400 Subject: [PATCH 0106/1936] add networking write up for devstack This explains the current state of networking in devstack, and a couple of scenarios that people might want to try out for local testing. Change-Id: I2be35f4345bf9306c981ef6f0186b48da7d06772 --- doc/source/index.rst | 3 +- doc/source/networking.rst | 97 +++++++++++++++++++++++++++++++++++++++ doc/source/site-map.rst | 1 + 3 files changed, 100 insertions(+), 1 deletion(-) create mode 100644 doc/source/networking.rst diff --git a/doc/source/index.rst b/doc/source/index.rst index d89637e796..435011bb96 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -120,7 +120,8 @@ Going further ------------- Learn more about our :doc:`configuration system ` to -customize devstack for your needs. +customize devstack for your needs. Including making adjustments to the +default :doc:`networking `. Read :doc:`guides ` for specific setups people have (note: guides are point in time contributions, and may not always be kept diff --git a/doc/source/networking.rst b/doc/source/networking.rst new file mode 100644 index 0000000000..1d56c3367e --- /dev/null +++ b/doc/source/networking.rst @@ -0,0 +1,97 @@ +===================== + DevStack Networking +===================== + +An important part of the DevStack experience is networking that works +by default for created guests. This might not be optimal for your +particular testing environment, so this document tries it's best to +explain what's going on. + +Defaults +======== + +If you don't specify any configuration you will get the following: + +* neutron (including l3 with openvswitch) +* private project networks for each openstack project +* a floating ip range of 172.24.4.0/24 with the gateway of 172.24.4.1 +* the demo project configured with fixed ips on 10.0.0.0/24 +* a ``br-ex`` interface controlled by neutron for all it's networking + (this is not connected to any physical interfaces). +* DNS resolution for guests based on the resolv.conf for you host +* an ip masq rule that allows created guests to route out + +This creates an environment which is isolated to the single +host. Guests can get to the external network for package +updates. Tempest tests will work in this environment. + +.. note:: + + By default all OpenStack environments have security group rules + which block all inbound packets to guests. If you want to be able + to ssh / ping your created guests you should run the following. + + .. code-block:: bash + + openstack security group rule create --proto icmp --dst-port 0 default + openstack security group rule create --proto tcp --dst-port 22 default + +Locally Accessible Guests +========================= + +If you want to make you guests accessible other machines on your +network, we have to connect ``br-ex`` to a physical interface. + +Dedicated Guest Interface +------------------------- + +If you have 2 or more interfaces on your devstack server, you can +allocate an interface to neutron to fully manage. This **should not** +be the same interface you use to ssh into the devstack server itself. + +This is done by setting with the ``PUBLIC_INTERFACE`` attribute. + +.. code-block:: bash + + [[local|localrc]] + PUBLIC_INTERFACE=eth1 + +That will put all layer 2 traffic from your guests onto the main +network. When running in this mode the ip masq rule is **not** added +in your devstack, you are responsible for making routing work on your +local network. + +Shared Guest Interface +---------------------- + +.. warning:: + + This is not a recommended configuration. Because of interactions + between ovs and bridging, if you reboot your box with active + networking you may loose network connectivity to your system. + +If you need your guests accessible on the network, but only have 1 +interface (using something like a NUC), you can share your one +network. But in order for this to work you need to manually set a lot +of addresses, and have them all exactly correct. + +.. code-block:: bash + + [[local|localrc]] + PUBLIC_INTERFACE=eth0 + HOST_IP=10.42.0.52 + FLOATING_RANGE=10.42.0.52/24 + PUBLIC_NETWORK_GATEWAY=10.42.0.1 + Q_FLOATING_ALLOCATION_POOL=start=10.42.0.250,end=10.42.0.254 + +In order for this scenario to work the floating ip network must match +the default networking on your server. This breaks HOST_IP detection, +as we exclude the floating range by default, so you have to specify +that manually. + +The ``PUBLIC_NETWORK_GATEWAY`` is the gateway that server would normally +use to get off the network. ``Q_FLOATING_ALLOCATION_POOL`` controls +the range of floating ips that will be handed out. As we are sharing +your existing network, you'll want to give it a slice that your local +dhcp server is not allocating. Otherwise you could easily have +conflicting ip addresses, and cause havoc with your local network. diff --git a/doc/source/site-map.rst b/doc/source/site-map.rst index 74b944b5e1..801fc66b80 100644 --- a/doc/source/site-map.rst +++ b/doc/source/site-map.rst @@ -14,6 +14,7 @@ overview configuration + networking plugins plugin-registry faq From b1a4f34bfc6adb9ab4577520601c44e8ccff277d Mon Sep 17 00:00:00 2001 From: Andrew Laski Date: Fri, 27 May 2016 15:23:54 -0400 Subject: [PATCH 0107/1936] Setup cellsv2 for Nova Run "nova-manage cell_v2 simple_cell_setup --transport_url ..." after Nova is started. This will add all compute hosts into a new cell, and setup a db for cell0. Change-Id: I50a955b97d0e18426406c15397bdfbc9e807d908 Depends-On: I559f9c87e89926414b368cac9442dec4eadcb89b --- lib/nova | 5 +++++ stack.sh | 1 + 2 files changed, 6 insertions(+) diff --git a/lib/nova b/lib/nova index af5d1222a0..4c079f46b9 100644 --- a/lib/nova +++ b/lib/nova @@ -937,6 +937,11 @@ function create_flavors { fi } +# create_cell(): Group the available hosts into a cell +function create_cell { + nova-manage cell_v2 simple_cell_setup --transport-url $(get_transport_url) +} + # Restore xtrace $_XTRACE_LIB_NOVA diff --git a/stack.sh b/stack.sh index 68e3d936b8..948475a270 100755 --- a/stack.sh +++ b/stack.sh @@ -1263,6 +1263,7 @@ if is_service_enabled nova; then echo_summary "Starting Nova" start_nova create_flavors + create_cell fi if is_service_enabled cinder; then echo_summary "Starting Cinder" From 06f3639a70dc5884107a4045bef5a9de1fb725a5 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 12 Aug 2016 09:35:42 -0500 Subject: [PATCH 0108/1936] Add os-client-config and osc-lib from source There is currently a hole in our testing that lets os-client-config, which sits at the bottom of the dependency chain for some key pieces like neutronclient and python-openstackclient, introduce gate breakages. Step one in fixing this is allowing os-client-config to be optionally installed from source so that jobs can be put into its gate to exercise its master vs devstack installs. Additionally, osc-lib is a new and lovely library that's going to need the same things. We're putting both in install_oslo, even though they're not oslo libraries, because that'll make grenade work properly. Co-Authored-By: Monty Taylor Change-Id: I747480b6063a62e82ca2b030f274d3e87bf28b3b --- lib/oslo | 4 ++++ stackrc | 9 +++++++++ tests/test_libs_from_pypi.sh | 4 ++-- 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/lib/oslo b/lib/oslo index 1773da2975..e34e48ad03 100644 --- a/lib/oslo +++ b/lib/oslo @@ -26,6 +26,8 @@ GITDIR["automaton"]=$DEST/automaton GITDIR["cliff"]=$DEST/cliff GITDIR["debtcollector"]=$DEST/debtcollector GITDIR["futurist"]=$DEST/futurist +GITDIR["os-client-config"]=$DEST/os-client-config +GITDIR["osc-lib"]=$DEST/osc-lib GITDIR["oslo.cache"]=$DEST/oslo.cache GITDIR["oslo.concurrency"]=$DEST/oslo.concurrency GITDIR["oslo.config"]=$DEST/oslo.config @@ -71,6 +73,8 @@ function install_oslo { _do_install_oslo_lib "cliff" _do_install_oslo_lib "debtcollector" _do_install_oslo_lib "futurist" + _do_install_oslo_lib "osc-lib" + _do_install_oslo_lib "os-client-config" _do_install_oslo_lib "oslo.cache" _do_install_oslo_lib "oslo.concurrency" _do_install_oslo_lib "oslo.config" diff --git a/stackrc b/stackrc index 4fefe8da30..bfb897b338 100644 --- a/stackrc +++ b/stackrc @@ -508,10 +508,19 @@ GITDIR["ceilometermiddleware"]=$DEST/ceilometermiddleware GITREPO["os-brick"]=${OS_BRICK_REPO:-${GIT_BASE}/openstack/os-brick.git} GITBRANCH["os-brick"]=${OS_BRICK_BRANCH:-master} +# os-client-config to manage clouds.yaml and friends +GITREPO["os-client-config"]=${OS_CLIENT_CONFIG_REPO:-${GIT_BASE}/openstack/os-client-config.git} +GITBRANCH["os-client-config"]=${OS_CLIENT_CONFIG_BRANCH:-master} +GITDIR["os-client-config"]=$DEST/os-client-config + # os-vif library to communicate between Neutron to Nova GITREPO["os-vif"]=${OS_VIF_REPO:-${GIT_BASE}/openstack/os-vif.git} GITBRANCH["os-vif"]=${OS_VIF_BRANCH:-master} +# osc-lib OpenStackClient common lib +GITREPO["osc-lib"]=${OSC_LIB_REPO:-${GIT_BASE}/openstack/osc-lib.git} +GITBRANCH["osc-lib"]=${OSC_LIB_BRANCH:-master} + # ironic common lib GITREPO["ironic-lib"]=${IRONIC_LIB_REPO:-${GIT_BASE}/openstack/ironic-lib.git} GITBRANCH["ironic-lib"]=${IRONIC_LIB_BRANCH:-master} diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index bb58088ef3..fb55023886 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -36,8 +36,8 @@ ALL_LIBS+=" oslo.messaging oslo.log cliff python-heatclient stevedore" ALL_LIBS+=" python-cinderclient glance_store oslo.concurrency oslo.db" ALL_LIBS+=" oslo.versionedobjects oslo.vmware keystonemiddleware" ALL_LIBS+=" oslo.serialization django_openstack_auth" -ALL_LIBS+=" python-openstackclient oslo.rootwrap oslo.i18n" -ALL_LIBS+=" oslo.utils python-swiftclient" +ALL_LIBS+=" python-openstackclient osc-lib os-client-config oslo.rootwrap" +ALL_LIBS+=" oslo.i18n oslo.utils python-swiftclient" ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy" ALL_LIBS+=" debtcollector os-brick automaton futurist oslo.service" ALL_LIBS+=" oslo.cache oslo.reports osprofiler" From 14b12a74f6a258b6e97dbf85b0ccfd74028b83b0 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Fri, 12 Aug 2016 19:07:12 -0700 Subject: [PATCH 0109/1936] Remove stale config l3|dhcp_agent_manager options There is no longer a trace of these options anywhere in the Neutron codebase. These can be safely removed. Change-Id: Ibf00e158248e2a20248917c8cfc0011d30da6a82 --- lib/neutron_plugins/linuxbridge_agent | 3 +-- lib/neutron_plugins/openvswitch_agent | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent index 0a066354ca..67d0405c23 100644 --- a/lib/neutron_plugins/linuxbridge_agent +++ b/lib/neutron_plugins/linuxbridge_agent @@ -44,14 +44,13 @@ function neutron_plugin_configure_debug_command { function neutron_plugin_configure_dhcp_agent { local conf_file=$1 - iniset $conf_file DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport + : } function neutron_plugin_configure_l3_agent { local conf_file=$1 sudo brctl addbr $PUBLIC_BRIDGE iniset $conf_file DEFAULT external_network_bridge - iniset $conf_file DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport } function neutron_plugin_configure_plugin_agent { diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent index 69e38f4df1..02eebe1bfd 100644 --- a/lib/neutron_plugins/openvswitch_agent +++ b/lib/neutron_plugins/openvswitch_agent @@ -29,13 +29,12 @@ function neutron_plugin_configure_debug_command { function neutron_plugin_configure_dhcp_agent { local conf_file=$1 - iniset $conf_file DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport + : } function neutron_plugin_configure_l3_agent { local conf_file=$1 _neutron_ovs_base_configure_l3_agent - iniset $conf_file DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport } function neutron_plugin_configure_plugin_agent { From b08b673def8f5c80b7a91ba2127edbb13c39c26a Mon Sep 17 00:00:00 2001 From: Pavlo Shchelokovskyy Date: Sun, 14 Aug 2016 13:26:13 +0300 Subject: [PATCH 0110/1936] Allow properly overriding DEST Change the order of variable declarations in stackrc so that setting custom DEST in local.conf is also affecting DATA_DIR, SERVICE_DIR and SUBUNIT_OUTPUT. Change-Id: I00847bb6733febf105855ae6fc577a7c904ec4b4 Closes-Bug: #1285720 --- stackrc | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/stackrc b/stackrc index 4fefe8da30..9513e5a714 100644 --- a/stackrc +++ b/stackrc @@ -23,15 +23,6 @@ source $RC_DIR/functions # Destination path for installation DEST=/opt/stack -# Destination for working data -DATA_DIR=${DEST}/data - -# Destination for status files -SERVICE_DIR=${DEST}/status - -# Path for subunit output file -SUBUNIT_OUTPUT=${DEST}/devstack.subunit - # Determine stack user if [[ $EUID -eq 0 ]]; then STACK_USER=stack @@ -137,6 +128,15 @@ elif [[ -f $RC_DIR/.localrc.auto ]]; then source $RC_DIR/.localrc.auto fi +# Destination for working data +DATA_DIR=${DATA_DIR:-$DEST/data} + +# Destination for status files +SERVICE_DIR=${SERVICE_DIR:-$DEST/status} + +# Path for subunit output file +SUBUNIT_OUTPUT=${SUBUNIT_OUTPUT:-$DEST/devstack.subunit} + # Default for log coloring is based on interactive-or-not. # Baseline assumption is that non-interactive invocations are for CI, # where logs are to be presented as browsable text files; hence color From 65be33f68e613cb054d8bf3057535976b6df4918 Mon Sep 17 00:00:00 2001 From: Isaac Beckman Date: Sun, 14 Aug 2016 15:27:40 +0300 Subject: [PATCH 0111/1936] Remove lib/cinder_backends/xiv Since support for sections was added to devstack local.conf parsing we don't need this, and actually prefer just using the sections in local.conf. Change-Id: I5908fdf7ad127997bb1f4a6bbb16d0d8cf073ddd --- lib/cinder_backends/xiv | 86 ----------------------------------------- 1 file changed, 86 deletions(-) delete mode 100644 lib/cinder_backends/xiv diff --git a/lib/cinder_backends/xiv b/lib/cinder_backends/xiv deleted file mode 100644 index e8b5da05d5..0000000000 --- a/lib/cinder_backends/xiv +++ /dev/null @@ -1,86 +0,0 @@ -#!/bin/bash -# -# Copyright 2014 IBM Corp. -# Copyright (c) 2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Authors: -# Alon Marx -# - -# lib/cinder_plugins/xiv -# Configure the xiv_ds8k driver for xiv testing - -# Enable xiv_ds8k driver for xiv with: -# -# CINDER_ENABLED_BACKENDS+=,xiv: -# XIV_DRIVER_VERSION= -# SAN_IP= -# SAN_LOGIN= -# SAN_PASSWORD= -# SAN_CLUSTERNAME= -# CONNECTION_TYPE= iscsi|fc -# XIV_CHAP= disabled|enabled - -# Dependencies: -# -# - ``functions`` file -# - ``cinder`` configurations - -# configure_cinder_backend_xiv - Configure Cinder for xiv backends - -# Save trace setting -_XTRACE_CINDER_XIV=$(set +o | grep xtrace) -set +o xtrace - -# Defaults -# -------- -# Set up default directories - - -# Entry Points -# ------------ - -# configure_cinder_backend_xiv - Set config files, create data dirs, etc -function configure_cinder_backend_xiv { - - local be_name=$1 - - python -c 'from xiv_ds8k_openstack.xiv_nova_proxy import XIVNovaProxy' - if [ $? -ne 0 ]; then - die $LINENO "XIV_DS8K driver is missing. Please install first" - fi - - # For reference: - # ``XIV_DS8K_BACKEND='IBM-XIV_'${SAN_IP}'_'${SAN_CLUSTERNAME}'_'${CONNECTION_TYPE}`` - iniset $CINDER_CONF DEFAULT xiv_ds8k_driver_version $XIV_DRIVER_VERSION - - iniset $CINDER_CONF $be_name san_ip $SAN_IP - iniset $CINDER_CONF $be_name san_login $SAN_LOGIN - iniset $CINDER_CONF $be_name san_password $SAN_PASSWORD - iniset $CINDER_CONF $be_name san_clustername $SAN_CLUSTERNAME - iniset $CINDER_CONF $be_name xiv_ds8k_connection_type $CONNECTION_TYPE - iniset $CINDER_CONF $be_name volume_backend_name $be_name - iniset $CINDER_CONF $be_name volume_driver 'cinder.volume.drivers.ibm.xiv_ds8k.XIVDS8KDriver' - iniset $CINDER_CONF $be_name xiv_ds8k_proxy 'xiv_ds8k_openstack.xiv_nova_proxy.XIVNovaProxy' - iniset $CINDER_CONF $be_name xiv_chap $XIV_CHAP -} - -# Restore xtrace -$_XTRACE_CINDER_XIV - -# Local variables: -# mode: shell-script -# End: From 9301e8875b8c50e486a3751b3d894d4ccdc94f84 Mon Sep 17 00:00:00 2001 From: Jim Rollenhagen Date: Mon, 15 Aug 2016 14:58:00 -0400 Subject: [PATCH 0112/1936] Revert "Setup cellsv2 for Nova" This reverts commit b1a4f34bfc6adb9ab4577520601c44e8ccff277d, which breaks Ironic's jobs. For example: http://logs.openstack.org/90/355390/1/check/gate-tempest-dsvm-ironic-ipa-wholedisk-agent_ssh-tinyipa/b9e264a/logs/devstacklog.txt.gz#_2016-08-15_17_31_13_358 Change-Id: Iafc579f73fe4a97056944872e2f2c8cd1a62c21c --- lib/nova | 5 ----- stack.sh | 1 - 2 files changed, 6 deletions(-) diff --git a/lib/nova b/lib/nova index 8ac82008b4..1369c409fc 100644 --- a/lib/nova +++ b/lib/nova @@ -936,11 +936,6 @@ function create_flavors { fi } -# create_cell(): Group the available hosts into a cell -function create_cell { - nova-manage cell_v2 simple_cell_setup --transport-url $(get_transport_url) -} - # Restore xtrace $_XTRACE_LIB_NOVA diff --git a/stack.sh b/stack.sh index b1903e7095..823b63ba24 100755 --- a/stack.sh +++ b/stack.sh @@ -1257,7 +1257,6 @@ if is_service_enabled nova; then echo_summary "Starting Nova" start_nova create_flavors - create_cell fi if is_service_enabled cinder; then echo_summary "Starting Cinder" From 62f6eb68fdca2ecd30fbf0a1092b6b36dad368b0 Mon Sep 17 00:00:00 2001 From: lvdongbing Date: Wed, 17 Aug 2016 03:44:42 -0400 Subject: [PATCH 0113/1936] Change quota_injected_file_path_bytes to quota_injected_file_path_length 'quota_injected_file_path_bytes' has been renamed to 'quota_injected_file_path_length' long time ago, this patch fixes this issue in devstack. Change-Id: I5d3c52c5ded5321435d2d395b682c4c0725279a7 --- lib/nova_plugins/hypervisor-fake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova_plugins/hypervisor-fake b/lib/nova_plugins/hypervisor-fake index 6ac219961e..f9b95c1816 100644 --- a/lib/nova_plugins/hypervisor-fake +++ b/lib/nova_plugins/hypervisor-fake @@ -45,7 +45,7 @@ function configure_nova_hypervisor { iniset $NOVA_CONF DEFAULT quota_fixed_ips -1 iniset $NOVA_CONF DEFAULT quota_metadata_items -1 iniset $NOVA_CONF DEFAULT quota_injected_files -1 - iniset $NOVA_CONF DEFAULT quota_injected_file_path_bytes -1 + iniset $NOVA_CONF DEFAULT quota_injected_file_path_length -1 iniset $NOVA_CONF DEFAULT quota_security_groups -1 iniset $NOVA_CONF DEFAULT quota_security_group_rules -1 iniset $NOVA_CONF DEFAULT quota_key_pairs -1 From 023ac218059934a0746fa41c1473bd93ba9cc04a Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 17 Aug 2016 08:11:12 +0000 Subject: [PATCH 0114/1936] Updated from generate-devstack-plugins-list Change-Id: I9379f01103bcb1ae7417c8bd14746aee9fb722ea --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index bdb8d8bc9c..bb8527041e 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -73,6 +73,7 @@ kuryr-libnetwork `git://git.openstack.org/openstack/kuryr- magnum `git://git.openstack.org/openstack/magnum `__ magnum-ui `git://git.openstack.org/openstack/magnum-ui `__ manila `git://git.openstack.org/openstack/manila `__ +masakari `git://git.openstack.org/openstack/masakari `__ mistral `git://git.openstack.org/openstack/mistral `__ monasca-analytics `git://git.openstack.org/openstack/monasca-analytics `__ monasca-api `git://git.openstack.org/openstack/monasca-api `__ From d7a3f5c4cc55e61bdcb5eb57bf823cee2e059828 Mon Sep 17 00:00:00 2001 From: Matt Van Dijk Date: Tue, 16 Aug 2016 15:46:58 +0000 Subject: [PATCH 0115/1936] Make the Neutron l3 plugin use the subnetpools The plugin creates subnetpools but does not use them when creating the default subnets. It uses CIDR values that overlap with the default pools. Change this to use the subnetpools. Change-Id: I6171c13507e420f146801d323cb1011be36c1e8c Closes-bug: 1613717 --- lib/neutron_plugins/services/l3 | 51 ++++++++++++++++++++++++--------- 1 file changed, 38 insertions(+), 13 deletions(-) diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 61b8402818..5e52306206 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -149,6 +149,15 @@ function create_neutron_initial_network { neutron_plugin_create_initial_network_profile $PHYSICAL_NETWORK fi + if is_networking_extension_supported "auto-allocated-topology"; then + if [[ "$IP_VERSION" =~ 4.* ]]; then + SUBNETPOOL_V4_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnetpool-create $SUBNETPOOL_NAME --default-prefixlen $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --shared --is-default=True | grep ' id ' | get_field 2) + fi + if [[ "$IP_VERSION" =~ .*6 ]]; then + SUBNETPOOL_V6_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnetpool-create $SUBNETPOOL_NAME --default-prefixlen $SUBNETPOOL_SIZE_V6 --pool-prefix $SUBNETPOOL_PREFIX_V6 --shared --is-default=True | grep ' id ' | get_field 2) + fi + fi + if is_provider_network; then die_if_not_set $LINENO PHYSICAL_NETWORK "You must specify the PHYSICAL_NETWORK" die_if_not_set $LINENO PROVIDER_NETWORK_TYPE "You must specify the PROVIDER_NETWORK_TYPE" @@ -156,14 +165,20 @@ function create_neutron_initial_network { die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $project_id" if [[ "$IP_VERSION" =~ 4.* ]]; then - SUBNET_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create --tenant_id $project_id --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --name $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) + if [ -z $SUBNETPOOL_V4_ID ]; then + fixed_range_v4=$FIXED_RANGE + fi + SUBNET_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create --tenant_id $project_id --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --name $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnetpool $SUBNETPOOL_V4_ID} $NET_ID $fixed_range_v4 | grep ' id ' | get_field 2) die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME $project_id" fi if [[ "$IP_VERSION" =~ .*6 ]]; then die_if_not_set $LINENO IPV6_PROVIDER_FIXED_RANGE "IPV6_PROVIDER_FIXED_RANGE has not been set, but Q_USE_PROVIDERNET_FOR_PUBLIC is true and IP_VERSION includes 6" die_if_not_set $LINENO IPV6_PROVIDER_NETWORK_GATEWAY "IPV6_PROVIDER_NETWORK_GATEWAY has not been set, but Q_USE_PROVIDERNET_FOR_PUBLIC is true and IP_VERSION includes 6" - SUBNET_V6_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create --tenant_id $project_id --ip_version 6 --ipv6-address-mode $IPV6_ADDRESS_MODE --gateway $IPV6_PROVIDER_NETWORK_GATEWAY --name $IPV6_PROVIDER_SUBNET_NAME $NET_ID $IPV6_PROVIDER_FIXED_RANGE | grep 'id' | get_field 2) + if [ -z $SUBNETPOOL_V6_ID ]; then + fixed_range_v6=$IPV6_PROVIDER_FIXED_RANGE + fi + SUBNET_V6_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create --tenant_id $project_id --ip_version 6 --ipv6-address-mode $IPV6_ADDRESS_MODE --gateway $IPV6_PROVIDER_NETWORK_GATEWAY --name $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnetpool $SUBNETPOOL_V6_ID} $NET_ID $fixed_range_v6 | grep 'id' | get_field 2) die_if_not_set $LINENO SUBNET_V6_ID "Failure creating SUBNET_V6_ID for $IPV6_PROVIDER_SUBNET_NAME $project_id" fi @@ -200,14 +215,8 @@ function create_neutron_initial_network { fi EXTERNAL_NETWORK_FLAGS="--router:external" - if is_networking_extension_supported "auto-allocated-topology" && is_networking_extension_supported "subnet_allocation"; then + if is_networking_extension_supported "auto-allocated-topology"; then EXTERNAL_NETWORK_FLAGS="$EXTERNAL_NETWORK_FLAGS --is-default" - if [[ "$IP_VERSION" =~ 4.* ]]; then - SUBNETPOOL_V4_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnetpool-create $SUBNETPOOL_NAME --default-prefixlen $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --shared --is-default=True | grep ' id ' | get_field 2) - fi - if [[ "$IP_VERSION" =~ .*6 ]]; then - SUBNETPOOL_V6_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnetpool-create $SUBNETPOOL_NAME --default-prefixlen $SUBNETPOOL_SIZE_V6 --pool-prefix $SUBNETPOOL_PREFIX_V6 --shared --is-default=True | grep ' id ' | get_field 2) - fi fi # Create an external network, and a subnet. Configure the external network as router gw if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then @@ -232,11 +241,15 @@ function create_neutron_initial_network { # Create private IPv4 subnet function _neutron_create_private_subnet_v4 { local project_id=$1 + if [ -z $SUBNETPOOL_V4_ID ]; then + fixed_range_v4=$FIXED_RANGE + fi local subnet_params="--tenant-id $project_id " subnet_params+="--ip_version 4 " subnet_params+="--gateway $NETWORK_GATEWAY " subnet_params+="--name $PRIVATE_SUBNET_NAME " - subnet_params+="$NET_ID $FIXED_RANGE" + subnet_params+="${SUBNETPOOL_V4_ID:+--subnetpool $SUBNETPOOL_V4_ID} " + subnet_params+="$NET_ID $fixed_range_v4" local subnet_id subnet_id=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create $subnet_params | grep ' id ' | get_field 2) die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet for $project_id" @@ -249,11 +262,15 @@ function _neutron_create_private_subnet_v6 { die_if_not_set $LINENO IPV6_RA_MODE "IPV6 RA Mode not set" die_if_not_set $LINENO IPV6_ADDRESS_MODE "IPV6 Address Mode not set" local ipv6_modes="--ipv6-ra-mode $IPV6_RA_MODE --ipv6-address-mode $IPV6_ADDRESS_MODE" + if [ -z $SUBNETPOOL_V6_ID ]; then + fixed_range_v6=$FIXED_RANGE_V6 + fi local subnet_params="--tenant-id $project_id " subnet_params+="--ip_version 6 " subnet_params+="--gateway $IPV6_PRIVATE_NETWORK_GATEWAY " subnet_params+="--name $IPV6_PRIVATE_SUBNET_NAME " - subnet_params+="$NET_ID $FIXED_RANGE_V6 $ipv6_modes" + subnet_params+="${SUBNETPOOL_V6_ID:+--subnetpool $SUBNETPOOL_V6_ID} " + subnet_params+="$NET_ID $fixed_range_v6 $ipv6_modes" local ipv6_subnet_id ipv6_subnet_id=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create $subnet_params | grep ' id ' | get_field 2) die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet for $project_id" @@ -321,7 +338,11 @@ function _neutron_configure_router_v4 { fi ROUTER_GW_IP=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F'ip_address' '{ print $2 }' | cut -f3 -d\" | tr '\n' ' ') die_if_not_set $LINENO ROUTER_GW_IP "Failure retrieving ROUTER_GW_IP" - sudo ip route replace $FIXED_RANGE via $ROUTER_GW_IP + local replace_range=${SUBNETPOOL_PREFIX_V4} + if [[ -z "${SUBNETPOOL_V4_ID}" ]]; then + replace_range=${FIXED_RANGE} + fi + sudo ip route replace $replace_range via $ROUTER_GW_IP fi _neutron_set_router_id fi @@ -360,7 +381,11 @@ function _neutron_configure_router_v6 { # Configure interface for public bridge sudo ip -6 addr replace $ipv6_ext_gw_ip/$ipv6_cidr_len dev $ext_gw_interface - sudo ip -6 route replace $FIXED_RANGE_V6 via $IPV6_ROUTER_GW_IP dev $ext_gw_interface + local replace_range=${SUBNETPOOL_PREFIX_V6} + if [[ -z "${SUBNETPOOL_V6_ID}" ]]; then + replace_range=${FIXED_RANGE_V6} + fi + sudo ip -6 route replace $replace_range via $IPV6_ROUTER_GW_IP dev $ext_gw_interface fi _neutron_set_router_id fi From 556139e710392c4f2586504b17490d8b63c4b21d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Thu, 18 Aug 2016 15:09:44 +0200 Subject: [PATCH 0116/1936] fix ceph config file path variable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CEPH_CONF does not exist anymore, resulting both cinder-volume and cinder-backup being configured with an empty rbd_ceph_conf option. Using CEPH_CONF_FILE to fix this. Change-Id: I1aa590aba900a4a94698917e45a0ea5c6f497f18 Signed-off-by: Sébastien Han --- lib/cinder_backends/ceph | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/cinder_backends/ceph b/lib/cinder_backends/ceph index 9bff5bef4f..ba86ccf8f4 100644 --- a/lib/cinder_backends/ceph +++ b/lib/cinder_backends/ceph @@ -45,7 +45,7 @@ function configure_cinder_backend_ceph { iniset $CINDER_CONF $be_name volume_backend_name $be_name iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.rbd.RBDDriver" - iniset $CINDER_CONF $be_name rbd_ceph_conf "$CEPH_CONF" + iniset $CINDER_CONF $be_name rbd_ceph_conf "$CEPH_CONF_FILE" iniset $CINDER_CONF $be_name rbd_pool "$CINDER_CEPH_POOL" iniset $CINDER_CONF $be_name rbd_user "$CINDER_CEPH_USER" iniset $CINDER_CONF $be_name rbd_uuid "$CINDER_CEPH_UUID" @@ -66,7 +66,7 @@ function configure_cinder_backend_ceph { sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph" - iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF" + iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF_FILE" iniset $CINDER_CONF DEFAULT backup_ceph_pool "$CINDER_BAK_CEPH_POOL" iniset $CINDER_CONF DEFAULT backup_ceph_user "$CINDER_BAK_CEPH_USER" iniset $CINDER_CONF DEFAULT backup_ceph_stripe_unit 0 From 0a318ab8a6ede38a330b5bff10479772ab4776f0 Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Thu, 18 Aug 2016 13:50:18 -0500 Subject: [PATCH 0117/1936] Switch keystone to dogpile.cache.memcached backend memcache_pool is there to keep a limited number of thread-associated connections open rather than a connection for every thread. If you don't have a huge number of threads it doesn't offer anything. Keystone is an example of a service where memcache_pool doesn't improve things -- eventlet isn't supported anymore and more threads is not useful due to GIL. As such, keystone cache backend is changed to dogpile.cache.memcached. See https://review.openstack.org/357407 for the oslo.cache help text change. Change-Id: I4452a8c4968073cdea4c0f384453a5a28519fa08 --- lib/keystone | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index 6198e43b58..6d8a43c0c2 100644 --- a/lib/keystone +++ b/lib/keystone @@ -245,7 +245,7 @@ function configure_keystone { # Enable caching iniset $KEYSTONE_CONF cache enabled "True" - iniset $KEYSTONE_CONF cache backend "oslo_cache.memcache_pool" + iniset $KEYSTONE_CONF cache backend "dogpile.cache.memcached" iniset $KEYSTONE_CONF cache memcache_servers localhost:11211 # Do not cache the catalog backend due to https://bugs.launchpad.net/keystone/+bug/1537617 From 7682ea88a6ab8693b215646f16748dbbc2476cc4 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 18 Aug 2016 16:19:36 -0400 Subject: [PATCH 0118/1936] Change default for FORCE_CONFIG_DRIVE We really should be using the metadata server more in our normal testing, this changes the default to use it. Change-Id: I8ef14e6110da1160163c0106e32032d27226f929 --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index e187220198..235b533d77 100644 --- a/lib/nova +++ b/lib/nova @@ -83,7 +83,7 @@ METADATA_SERVICE_PORT=${METADATA_SERVICE_PORT:-8775} # Option to enable/disable config drive # NOTE: Set ``FORCE_CONFIG_DRIVE="False"`` to turn OFF config drive -FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"True"} +FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"False"} # Nova supports pluggable schedulers. The default ``FilterScheduler`` # should work in most cases. From 85ad108adf84718d5d7747e892ba236a21fc2dac Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Mon, 22 Aug 2016 17:00:50 +0000 Subject: [PATCH 0119/1936] Fix typo in tools/info.sh comment A trivial fix to a typo in tools/info.sh discovered while looking at it to see what it was. Change-Id: I9fb8906c375f99e43cdd0f41f7e4510c901fd3a1 --- tools/info.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/info.sh b/tools/info.sh index c056fa73f8..282667f9d0 100755 --- a/tools/info.sh +++ b/tools/info.sh @@ -8,7 +8,7 @@ # Output types are git,localrc,os,pip,pkg: # # git||[] -# localtc|= +# localrc|= # os|= # pip|| # pkg|| From 31813e92199386f19dd7e993a9e951d293503a01 Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Mon, 22 Aug 2016 15:39:22 -0400 Subject: [PATCH 0120/1936] Let neutron use default gateway IP for subnets By default, FIXED_RANGE and NETWORK_GATEWAY (and the IPv6 equivalents) are in the same subnet. But if FIXED_RANGE is over-ridden in local.conf we could create a subnet with an invalid gateway address. Since neutron will pick the lowest host IP as the gateway by default, do not specify them unless the user has specifically set them. Do this for both the private and public subnets, as well as the public IPv4 subnet. Change-Id: Ifc71400a3af1f131bb8a9722188e13de5bd3c806 --- lib/neutron_plugins/services/l3 | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index a4e72483b4..c6f14d35f6 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -63,14 +63,14 @@ IPV6_ADDRESS_MODE=${IPV6_ADDRESS_MODE:-slaac} IPV6_PUBLIC_SUBNET_NAME=${IPV6_PUBLIC_SUBNET_NAME:-ipv6-public-subnet} IPV6_PRIVATE_SUBNET_NAME=${IPV6_PRIVATE_SUBNET_NAME:-ipv6-private-subnet} FIXED_RANGE_V6=${FIXED_RANGE_V6:-fd$IPV6_GLOBAL_ID::/64} -IPV6_PRIVATE_NETWORK_GATEWAY=${IPV6_PRIVATE_NETWORK_GATEWAY:-fd$IPV6_GLOBAL_ID::1} +IPV6_PRIVATE_NETWORK_GATEWAY=${IPV6_PRIVATE_NETWORK_GATEWAY:-} IPV6_PUBLIC_RANGE=${IPV6_PUBLIC_RANGE:-2001:db8::/64} IPV6_PUBLIC_NETWORK_GATEWAY=${IPV6_PUBLIC_NETWORK_GATEWAY:-2001:db8::2} IPV6_ROUTER_GW_IP=${IPV6_ROUTER_GW_IP:-2001:db8::1} # Gateway and subnet defaults, in case they are not customized in localrc -NETWORK_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1} -PUBLIC_NETWORK_GATEWAY=${PUBLIC_NETWORK_GATEWAY:-172.24.4.1} +NETWORK_GATEWAY=${NETWORK_GATEWAY:-} +PUBLIC_NETWORK_GATEWAY=${PUBLIC_NETWORK_GATEWAY:-} PRIVATE_SUBNET_NAME=${PRIVATE_SUBNET_NAME:-"private-subnet"} PUBLIC_SUBNET_NAME=${PUBLIC_SUBNET_NAME:-"public-subnet"} @@ -234,7 +234,9 @@ function _neutron_create_private_subnet_v4 { local project_id=$1 local subnet_params="--tenant-id $project_id " subnet_params+="--ip_version 4 " - subnet_params+="--gateway $NETWORK_GATEWAY " + if [[ -n "$NETWORK_GATEWAY" ]]; then + subnet_params+="--gateway $NETWORK_GATEWAY " + fi subnet_params+="--name $PRIVATE_SUBNET_NAME " subnet_params+="$NET_ID $FIXED_RANGE" local subnet_id @@ -251,7 +253,9 @@ function _neutron_create_private_subnet_v6 { local ipv6_modes="--ipv6-ra-mode $IPV6_RA_MODE --ipv6-address-mode $IPV6_ADDRESS_MODE" local subnet_params="--tenant-id $project_id " subnet_params+="--ip_version 6 " - subnet_params+="--gateway $IPV6_PRIVATE_NETWORK_GATEWAY " + if [[ -n "$IPV6_PRIVATE_NETWORK_GATEWAY" ]]; then + subnet_params+="--gateway $IPV6_PRIVATE_NETWORK_GATEWAY " + fi subnet_params+="--name $IPV6_PRIVATE_SUBNET_NAME " subnet_params+="$NET_ID $FIXED_RANGE_V6 $ipv6_modes" local ipv6_subnet_id @@ -262,9 +266,11 @@ function _neutron_create_private_subnet_v6 { # Create public IPv4 subnet function _neutron_create_public_subnet_v4 { - local subnet_params+="--ip_version 4 " + local subnet_params="--ip_version 4 " subnet_params+="${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} " - subnet_params+="--gateway $PUBLIC_NETWORK_GATEWAY " + if [[ -n "$PUBLIC_NETWORK_GATEWAY" ]]; then + subnet_params+="--gateway $PUBLIC_NETWORK_GATEWAY " + fi subnet_params+="--name $PUBLIC_SUBNET_NAME " subnet_params+="$EXT_NET_ID $FLOATING_RANGE " subnet_params+="-- --enable_dhcp=False" From 01acdabb1d65594a5fd74a6c72dcd5f5b0c1b0c6 Mon Sep 17 00:00:00 2001 From: igor Date: Fri, 29 Jul 2016 13:11:53 +0200 Subject: [PATCH 0121/1936] Fixes language: "following allowing" -> "allowing" Stops propagation of initially erroneous construct. Change-Id: I2197de57cbac98e87fb39fa0dca4c691fe5e856f --- HACKING.rst | 2 +- exercises/aggregates.sh | 2 +- exercises/boot_from_volume.sh | 2 +- exercises/client-args.sh | 2 +- exercises/client-env.sh | 2 +- exercises/floating_ips.sh | 2 +- exercises/neutron-adv-test.sh | 2 +- exercises/sec_groups.sh | 2 +- exercises/swift.sh | 2 +- exercises/volumes.sh | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/HACKING.rst b/HACKING.rst index d763c75b8b..b76cb6c4a7 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -219,7 +219,7 @@ These scripts are executed serially by ``exercise.sh`` in testing situations. set -o errexit # Print the commands being run so that we can see the command that triggers - # an error. It is also useful for following allowing as the install occurs. + # an error. It is also useful for following as the install occurs. set -o xtrace * Settings and configuration are stored in ``exerciserc``, which must be diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh index 808ef76e2f..8cbca54fb2 100755 --- a/exercises/aggregates.sh +++ b/exercises/aggregates.sh @@ -20,7 +20,7 @@ echo "**************************************************" set -o errexit # Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. +# an error. It is also useful for following as the install occurs. set -o xtrace diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index 84ac08f017..7478bdf138 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -16,7 +16,7 @@ echo "*********************************************************************" set -o errexit # Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. +# an error. It is also useful for following as the install occurs. set -o xtrace diff --git a/exercises/client-args.sh b/exercises/client-args.sh index 2c8fe81390..b380968da8 100755 --- a/exercises/client-args.sh +++ b/exercises/client-args.sh @@ -13,7 +13,7 @@ echo "*********************************************************************" set -o errexit # Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. +# an error. It is also useful for following as the install occurs. set -o xtrace diff --git a/exercises/client-env.sh b/exercises/client-env.sh index 6ab4d08715..fff04df9f2 100755 --- a/exercises/client-env.sh +++ b/exercises/client-env.sh @@ -13,7 +13,7 @@ echo "*********************************************************************" set -o errexit # Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. +# an error. It is also useful for following as the install occurs. set -o xtrace diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 485208ba30..5abc7137b3 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -13,7 +13,7 @@ echo "*********************************************************************" set -o errexit # Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. +# an error. It is also useful for following as the install occurs. set -o xtrace diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh index 81150061ca..dc6bbbb5c7 100755 --- a/exercises/neutron-adv-test.sh +++ b/exercises/neutron-adv-test.sh @@ -20,7 +20,7 @@ echo "*********************************************************************" set -o errtrace # Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. +# an error. It is also useful for following as the install occurs. set -o xtrace # Environment diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh index 5f8b0a4d5d..2f78e393eb 100755 --- a/exercises/sec_groups.sh +++ b/exercises/sec_groups.sh @@ -13,7 +13,7 @@ echo "*********************************************************************" set -o errexit # Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. +# an error. It is also useful for following as the install occurs. set -o xtrace diff --git a/exercises/swift.sh b/exercises/swift.sh index 4a41e0f1ed..8aa376b8a7 100755 --- a/exercises/swift.sh +++ b/exercises/swift.sh @@ -13,7 +13,7 @@ echo "*********************************************************************" set -o errexit # Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. +# an error. It is also useful for following as the install occurs. set -o xtrace diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 0de1226fee..e7c3560894 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -13,7 +13,7 @@ echo "*********************************************************************" set -o errexit # Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. +# an error. It is also useful for following as the install occurs. set -o xtrace From 7eb672d1a95794644db8c34411f44734927bd87f Mon Sep 17 00:00:00 2001 From: Jan Stodt Date: Wed, 24 Aug 2016 15:29:06 +0200 Subject: [PATCH 0122/1936] Update doc to reflect neutron became devstack default Removing the explicit enablment of Neutron services, as with [1] they are configured as defaults in stackrc. [1] https://review.openstack.org/#/c/350750/ Change-Id: Ic8910cd28fe37842f7d824e68bd2ea705e7e52de --- doc/source/guides/neutron.rst | 21 +-------------------- 1 file changed, 1 insertion(+), 20 deletions(-) diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst index c5b1634f62..b26fd1e8ee 100644 --- a/doc/source/guides/neutron.rst +++ b/doc/source/guides/neutron.rst @@ -76,12 +76,6 @@ serving as a hypervisor for guest instances. RABBIT_PASSWORD=secret SERVICE_PASSWORD=secret - # Do not use Nova-Network - disable_service n-net - # Enable Neutron - ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,q-agt,q-l3 - - ## Neutron options Q_USE_SECGROUP=True FLOATING_RANGE="172.18.161.0/24" @@ -389,11 +383,7 @@ controller node. Q_USE_PROVIDER_NETWORKING=True - # Do not use Nova-Network - disable_service n-net - - # Neutron - ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,q-agt + disable_service q-l3 ## Neutron Networking options used to create Neutron Subnets @@ -530,12 +520,6 @@ setup, with small modifications for the interface mappings. RABBIT_PASSWORD=secret SERVICE_PASSWORD=secret - # Do not use Nova-Network - disable_service n-net - # Enable Neutron - ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,q-agt,q-l3 - - ## Neutron options Q_USE_SECGROUP=True FLOATING_RANGE="172.18.161.0/24" @@ -582,10 +566,7 @@ you do not require them. Q_ML2_PLUGIN_MECHANISM_DRIVERS=macvtap Q_USE_PROVIDER_NETWORKING=True - #Enable Neutron services - disable_service n-net enable_plugin neutron git://git.openstack.org/openstack/neutron - ENABLED_SERVICES+=,q-agt,q-svc ## MacVTap agent options Q_AGENT=macvtap From 88f8558d874072536e7660a233f24207a7089651 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Sun, 14 Aug 2016 06:55:42 -0700 Subject: [PATCH 0123/1936] Enable neutron to work in a multi node setup On the controller node where devstack is being run should create the neutron network. The compute node should not. The the case that we want to run a multi-node neutron setup we need to configure the following (in the case that a plugin does not have any agents running on the compute node): ENABLED_SERVICES=n-cpu,neutron In addition to this the code did not enable decomposed plugins to configure their nova configurations if necessary. This patch ensure that the multi-node support works. Change-Id: I8e80edd453a1106ca666d6c531b2433be631bce4 Closes-bug: #1613069 --- lib/neutron | 3 +++ lib/neutron_plugins/services/l3 | 8 ++++++++ stack.sh | 5 +---- 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/lib/neutron b/lib/neutron index c1552e3d06..7442efd2c1 100644 --- a/lib/neutron +++ b/lib/neutron @@ -305,6 +305,9 @@ function configure_neutron_nova_new { iniset $NOVA_CONF DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver + # optionally set options in nova_conf + neutron_plugin_create_nova_conf + if is_service_enabled neutron-metadata-agent; then iniset $NOVA_CONF neutron service_metadata_proxy "True" fi diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index a4e72483b4..334cd7a694 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -140,6 +140,14 @@ function _neutron_get_ext_gw_interface { } function create_neutron_initial_network { + if ! is_service_enabled q-svc && ! is_service_enabled neutron-api; then + echo "Controller services not enabled. No networks configured!" + return + fi + if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "False" ]]; then + echo "Network creation disabled!" + return + fi local project_id project_id=$(openstack project list | grep " demo " | get_field 1) die_if_not_set $LINENO project_id "Failure retrieving project_id for demo" diff --git a/stack.sh b/stack.sh index 823b63ba24..ce34cd5ef3 100755 --- a/stack.sh +++ b/stack.sh @@ -1248,10 +1248,7 @@ if is_service_enabled neutron; then start_neutron fi # Once neutron agents are started setup initial network elements -if is_service_enabled q-svc && [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]]; then - echo_summary "Creating initial neutron network elements" - create_neutron_initial_network -fi +create_neutron_initial_network if is_service_enabled nova; then echo_summary "Starting Nova" From c12d1d9ce06ad9f524430469b15a066edadcddde Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 23 Aug 2016 19:07:57 -0500 Subject: [PATCH 0124/1936] Accept IPv6 RAs on the interface with the default route Because neutron sets ipv6 forwarding settings, we stop accepting RAs from IPv6-only host environments. This leads to a loss of external connectivity, which is bad for zuul running tests and stuff. Setting accept_ra to 2 will cause the RAs to be accepted. Change-Id: Ia044fff2a1731ab6c04f82aea47096b425e0c0a0 --- lib/neutron_plugins/services/l3 | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index a4e72483b4..6bbac2c6a9 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -83,6 +83,9 @@ SUBNETPOOL_PREFIX_V6=${SUBNETPOOL_PREFIX_V6:-2001:db8:8000::/48} SUBNETPOOL_SIZE_V4=${SUBNETPOOL_SIZE_V4:-24} SUBNETPOOL_SIZE_V6=${SUBNETPOOL_SIZE_V6:-64} +default_route_dev=$(ip route | grep ^default | awk '{print $5}') +die_if_not_set $LINENO default_route_dev "Failure retrieving default route device" + function _determine_config_l3 { local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE" echo "$opts" @@ -113,9 +116,7 @@ function _configure_neutron_l3_agent { _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6" fi else - local default_dev="" - default_dev=$(ip route | grep ^default | awk '{print $5}') - sudo iptables -t nat -A POSTROUTING -o $default_dev -s $FLOATING_RANGE -j MASQUERADE + sudo iptables -t nat -A POSTROUTING -o $default_route_dev -s $FLOATING_RANGE -j MASQUERADE fi } @@ -351,6 +352,11 @@ function _neutron_configure_router_v6 { # This logic is specific to using the l3-agent for layer 3 if is_service_enabled q-l3 || is_service_enabled neutron-l3; then + # Ensure IPv6 RAs are accepted on the interface with the default route. + # This is needed for neutron-based devstack clouds to work in + # IPv6-only clouds in the gate. Please do not remove this without + # talking to folks in Infra. + sudo sysctl -w net.ipv6.conf.$default_route_dev.accept_ra=2 # Ensure IPv6 forwarding is enabled on the host sudo sysctl -w net.ipv6.conf.all.forwarding=1 # Configure and enable public bridge From 7e40c6406b553c489515a6ca79014bcaaa5a7db3 Mon Sep 17 00:00:00 2001 From: Doug Wiegley Date: Sat, 20 Aug 2016 16:32:14 +0000 Subject: [PATCH 0125/1936] Remove lbaas from devstack proper, take 2 p-c patches have merged, neutron-lbaas removal is in the merge queue. This reverts commit b3f26cb66c70b599c4d77945f2bdadd9537c7c35. Depends-On: I506949e75bc62681412358ba689cb07b16311b68 Change-Id: I98d62c13ef90b20a9c67ef4f1720efcaa366fb31 --- lib/neutron-legacy | 40 +----------------- lib/neutron_plugins/services/loadbalancer | 51 ----------------------- stackrc | 4 -- 3 files changed, 1 insertion(+), 94 deletions(-) delete mode 100644 lib/neutron_plugins/services/loadbalancer diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 25fb6b7847..e8f9aeb938 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -74,7 +74,6 @@ GITDIR["python-neutronclient"]=$DEST/python-neutronclient NEUTRON_DIR=$DEST/neutron NEUTRON_FWAAS_DIR=$DEST/neutron-fwaas -NEUTRON_LBAAS_DIR=$DEST/neutron-lbaas NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron} # Support entry points installation of console scripts @@ -88,9 +87,6 @@ NEUTRON_CONF_DIR=/etc/neutron NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"} -# Default provider for load balancer service -DEFAULT_LB_PROVIDER=LOADBALANCER:Haproxy:neutron_lbaas.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default - # Agent binaries. Note, binary paths for other agents are set in per-service # scripts in lib/neutron_plugins/services/ AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent" @@ -252,12 +248,6 @@ if [ -f $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN ]; then source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN fi -# Agent loadbalancer service plugin functions -# ------------------------------------------- - -# Hardcoding for 1 service plugin for now -source $TOP_DIR/lib/neutron_plugins/services/loadbalancer - # Agent metering service plugin functions # ------------------------------------------- @@ -316,10 +306,6 @@ function configure_mutnauq { iniset_rpc_backend neutron $NEUTRON_CONF # goes before q-svc to init Q_SERVICE_PLUGIN_CLASSES - if is_service_enabled q-lbaas; then - deprecated "Configuring q-lbaas through devstack is deprecated" - _configure_neutron_lbaas - fi if is_service_enabled q-metering; then _configure_neutron_metering fi @@ -418,10 +404,6 @@ function install_mutnauq { git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH setup_develop $NEUTRON_DIR - if is_service_enabled q-lbaas; then - git_clone $NEUTRON_LBAAS_REPO $NEUTRON_LBAAS_DIR $NEUTRON_LBAAS_BRANCH - setup_develop $NEUTRON_LBAAS_DIR - fi if [ "$VIRT_DRIVER" == 'xenserver' ]; then local dom0_ip @@ -452,10 +434,6 @@ function install_neutron_agent_packages { if is_service_enabled q-agt q-dhcp q-l3; then neutron_plugin_install_agent_packages fi - - if is_service_enabled q-lbaas; then - neutron_agent_lbaas_install_agent_packages - fi } # Start running processes, including screen @@ -516,7 +494,6 @@ function start_mutnauq_other_agents { fi run_process q-meta "$AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file $Q_META_CONF_FILE" - run_process q-lbaas "$AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file $LBAAS_AGENT_CONF_FILENAME" run_process q-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME" if [ "$VIRT_DRIVER" = 'xenserver' ]; then @@ -556,9 +533,6 @@ function stop_mutnauq_other { stop_process q-meta fi - if is_service_enabled q-lbaas; then - neutron_lbaas_stop - fi if is_service_enabled q-metering; then neutron_metering_stop fi @@ -662,7 +636,7 @@ function cleanup_mutnauq { fi # delete all namespaces created by neutron - for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|qlbaas|fip|snat)-[0-9a-f-]*'); do + for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|fip|snat)-[0-9a-f-]*'); do sudo ip netns delete ${ns} done } @@ -818,18 +792,6 @@ function _configure_neutron_ceilometer_notifications { iniset $NEUTRON_CONF oslo_messaging_notifications driver messaging } -function _configure_neutron_lbaas { - # Uses oslo config generator to generate LBaaS sample configuration files - (cd $NEUTRON_LBAAS_DIR && exec ./tools/generate_config_file_samples.sh) - - if [ -f $NEUTRON_LBAAS_DIR/etc/neutron_lbaas.conf.sample ]; then - cp $NEUTRON_LBAAS_DIR/etc/neutron_lbaas.conf.sample $NEUTRON_CONF_DIR/neutron_lbaas.conf - iniset $NEUTRON_CONF_DIR/neutron_lbaas.conf service_providers service_provider $DEFAULT_LB_PROVIDER - fi - neutron_agent_lbaas_configure_common - neutron_agent_lbaas_configure_agent -} - function _configure_neutron_metering { neutron_agent_metering_configure_common neutron_agent_metering_configure_agent diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer deleted file mode 100644 index 30e9480f2e..0000000000 --- a/lib/neutron_plugins/services/loadbalancer +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/bash - -# Neutron loadbalancer plugin -# --------------------------- - -# Save trace setting -_XTRACE_NEUTRON_LB=$(set +o | grep xtrace) -set +o xtrace - - -AGENT_LBAAS_BINARY="$NEUTRON_BIN_DIR/neutron-lbaas-agent" -LBAAS_PLUGIN=neutron_lbaas.services.loadbalancer.plugin.LoadBalancerPlugin - -function neutron_agent_lbaas_install_agent_packages { - if is_ubuntu || is_fedora || is_suse; then - install_package haproxy - fi -} - -function neutron_agent_lbaas_configure_common { - _neutron_service_plugin_class_add $LBAAS_PLUGIN - _neutron_deploy_rootwrap_filters $NEUTRON_LBAAS_DIR -} - -function neutron_agent_lbaas_configure_agent { - LBAAS_AGENT_CONF_PATH=/etc/neutron/services/loadbalancer/haproxy - mkdir -p $LBAAS_AGENT_CONF_PATH - - LBAAS_AGENT_CONF_FILENAME="$LBAAS_AGENT_CONF_PATH/lbaas_agent.ini" - - cp $NEUTRON_LBAAS_DIR/etc/lbaas_agent.ini.sample $LBAAS_AGENT_CONF_FILENAME - - # ovs_use_veth needs to be set before the plugin configuration - # occurs to allow plugins to override the setting. - iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT ovs_use_veth $Q_OVS_USE_VETH - - neutron_plugin_setup_interface_driver $LBAAS_AGENT_CONF_FILENAME - - if is_fedora; then - iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT user_group "nobody" - iniset $LBAAS_AGENT_CONF_FILENAME haproxy user_group "nobody" - fi -} - -function neutron_lbaas_stop { - pids=$(ps aux | awk '/haproxy/ { print $2 }') - [ ! -z "$pids" ] && sudo kill $pids || true -} - -# Restore xtrace -$_XTRACE_NEUTRON_LB diff --git a/stackrc b/stackrc index bfb897b338..446510c291 100644 --- a/stackrc +++ b/stackrc @@ -266,10 +266,6 @@ NEUTRON_BRANCH=${NEUTRON_BRANCH:-master} NEUTRON_FWAAS_REPO=${NEUTRON_FWAAS_REPO:-${GIT_BASE}/openstack/neutron-fwaas.git} NEUTRON_FWAAS_BRANCH=${NEUTRON_FWAAS_BRANCH:-master} -# neutron lbaas service -NEUTRON_LBAAS_REPO=${NEUTRON_LBAAS_REPO:-${GIT_BASE}/openstack/neutron-lbaas.git} -NEUTRON_LBAAS_BRANCH=${NEUTRON_LBAAS_BRANCH:-master} - # compute service NOVA_REPO=${NOVA_REPO:-${GIT_BASE}/openstack/nova.git} NOVA_BRANCH=${NOVA_BRANCH:-master} From 1c5fc97136cfd9f89afc1d51239af343e0dda6a3 Mon Sep 17 00:00:00 2001 From: zhurong Date: Thu, 25 Aug 2016 23:35:21 +0800 Subject: [PATCH 0126/1936] Add files/*.deb and files/*.deb.* to gitignore Change-Id: Ieb8bc6a0852eabce987fef9441cca8770b711a38 --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index a470ff5d27..d1781bc730 100644 --- a/.gitignore +++ b/.gitignore @@ -15,6 +15,8 @@ files/*.gz files/*.vmdk files/*.rpm files/*.rpm.* +files/*.deb +files/*.deb.* files/*.qcow2 files/*.img files/images From 1554adef26bd3bd184ddab668660428bdf392232 Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Fri, 22 Jul 2016 09:40:19 -0700 Subject: [PATCH 0127/1936] Revert "Revert "Use real Neutron network for L3 GW by default"" This reverts commit 7da968a8be03229cfa72b215b87f17e28e23a988. Change-Id: I9ed28ccf6af611b280ada3420d7d2a833178fcac --- lib/neutron-legacy | 4 ++-- lib/neutron_plugins/services/l3 | 9 +++++++-- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index e8f9aeb938..b1b5230fdd 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -199,7 +199,7 @@ TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} # agent, as described below. # # Example: ``PHYSICAL_NETWORK=default`` -PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-} +PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-public} # With the openvswitch agent, if using VLANs for tenant networks, # or if using flat or VLAN provider networks, set in ``localrc`` to @@ -209,7 +209,7 @@ PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-} # port for external connectivity. # # Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` -OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-} +OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-br-ex} # With the linuxbridge agent, if using VLANs for tenant networks, # or if using flat or VLAN provider networks, set in ``localrc`` to diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 6eefb77597..b598241c78 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -17,6 +17,11 @@ IPV6_PROVIDER_NETWORK_GATEWAY=${IPV6_PROVIDER_NETWORK_GATEWAY:-} PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} PUBLIC_BRIDGE_MTU=${PUBLIC_BRIDGE_MTU:-1500} +# If Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE=True, assign the gateway IP of the public +# subnet to the public bridge interface even if Q_USE_PROVIDERNET_FOR_PUBLIC is +# used. +Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE=${Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE:-True} + # If Q_USE_PUBLIC_VETH=True, create and use a veth pair instead of # PUBLIC_BRIDGE. This is intended to be used with # Q_USE_PROVIDERNET_FOR_PUBLIC=True. @@ -51,7 +56,7 @@ Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-True} # Q_USE_PROVIDERNET_FOR_PUBLIC=True # PUBLIC_PHYSICAL_NETWORK=public # OVS_BRIDGE_MAPPINGS=public:br-ex -Q_USE_PROVIDERNET_FOR_PUBLIC=${Q_USE_PROVIDERNET_FOR_PUBLIC:-False} +Q_USE_PROVIDERNET_FOR_PUBLIC=${Q_USE_PROVIDERNET_FOR_PUBLIC:-True} PUBLIC_PHYSICAL_NETWORK=${PUBLIC_PHYSICAL_NETWORK:-public} # Generate 40-bit IPv6 Global ID to comply with RFC 4193 @@ -327,7 +332,7 @@ function _neutron_configure_router_v4 { local cidr_len=${FLOATING_RANGE#*/} local testcmd="ip -o link | grep -q $ext_gw_interface" test_with_retry "$testcmd" "$ext_gw_interface creation failed" - if [[ $(ip addr show dev $ext_gw_interface | grep -c $ext_gw_ip) == 0 && ( $Q_USE_PROVIDERNET_FOR_PUBLIC == "False" || $Q_USE_PUBLIC_VETH == "True" ) ]]; then + if [[ $(ip addr show dev $ext_gw_interface | grep -c $ext_gw_ip) == 0 && ( $Q_USE_PROVIDERNET_FOR_PUBLIC == "False" || $Q_USE_PUBLIC_VETH == "True" || $Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE == "True" ) ]]; then sudo ip addr add $ext_gw_ip/$cidr_len dev $ext_gw_interface sudo ip link set $ext_gw_interface up fi From 69d4a71dfe86e8111101dcd1bcf5a4138a7956a4 Mon Sep 17 00:00:00 2001 From: Lance Bragstad Date: Sat, 27 Aug 2016 01:01:37 +0000 Subject: [PATCH 0128/1936] Allow setup of credential encryption using fernet With the addition of encrypted credential in keystone, we need to be able to add setup steps in devstack to configure the credential repository with encryption keys. Depends-On: I97e7701bc5b8765d207cc721793643bcefa2d4e2 Depends-On: Id3e8922adc154cfec5f7a36613e22eb0b49eeffe Change-Id: I433da9a257daa21ec3b5996b2bca571211f1fbba --- lib/keystone | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/keystone b/lib/keystone index 6198e43b58..797ff50646 100644 --- a/lib/keystone +++ b/lib/keystone @@ -345,6 +345,8 @@ function configure_keystone { iniset $KEYSTONE_CONF fernet_tokens key_repository "$KEYSTONE_CONF_DIR/fernet-keys/" + iniset $KEYSTONE_CONF credential key_repository "$KEYSTONE_CONF_DIR/credential-keys/" + # Configure the project created by the 'keystone-manage bootstrap' as the cloud-admin project. # The users from this project are globally admin as before, but it also # allows policy changes in order to clarify the adminess scope. @@ -514,6 +516,9 @@ function init_keystone { rm -rf "$KEYSTONE_CONF_DIR/fernet-keys/" $KEYSTONE_BIN_DIR/keystone-manage --config-file $KEYSTONE_CONF fernet_setup fi + rm -rf "$KEYSTONE_CONF_DIR/credential-keys/" + $KEYSTONE_BIN_DIR/keystone-manage --config-file $KEYSTONE_CONF credential_setup + } # install_keystoneauth() - Collect source and prepare From 8f586fbefe10893343c6cf9d906f1e6734e89e88 Mon Sep 17 00:00:00 2001 From: Janki Chhatbar Date: Mon, 18 Jul 2016 08:19:25 +0530 Subject: [PATCH 0129/1936] Start virtlogd service Ensure the virtlogd service is started, to work-around various platform issues where it isn't started correctly. Closes-Bug: #1603009 Change-Id: I548b377df6b2f0c287429e4387ee33184a82a64d --- lib/nova_plugins/functions-libvirt | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 4e5a748e45..cc013c3181 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -124,6 +124,12 @@ EOF # Service needs to be started on redhat/fedora -- do a restart for # sanity after fiddling the config. restart_service $LIBVIRT_DAEMON + + # Restart virtlogd companion service to ensure it is running properly + # https://bugs.launchpad.net/ubuntu/+source/libvirt/+bug/1577455 + # https://bugzilla.redhat.com/show_bug.cgi?id=1290357 + # (not all platforms have it; libvirt 1.3+ only, thus the ignore) + restart_service virtlogd || true } From 26edd7b071f84e2fd2a9953086209e02b0c27ef3 Mon Sep 17 00:00:00 2001 From: Huan Xie Date: Mon, 8 Aug 2016 07:23:36 +0000 Subject: [PATCH 0130/1936] Install conntrack in XenServer Dom0 Neutron openvswitch agent running in compute node will control the actual connection of the VMs in Dom0 via conntrack-tools, but Dom0 doesn't install conntrack-tools RPM by default. This patch is to add such support with XenServer 7.0 and above. Change-Id: Iec56db761015d4b7baa5a5f54314f4ff3fa67e02 --- lib/nova_plugins/hypervisor-xenserver | 1 + tools/xen/functions | 22 ++++++++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver index e75226ae64..e5d25da3a3 100644 --- a/lib/nova_plugins/hypervisor-xenserver +++ b/lib/nova_plugins/hypervisor-xenserver @@ -87,6 +87,7 @@ CRONTAB cat $TOP_DIR/tools/xen/functions echo "create_directory_for_images" echo "create_directory_for_kernels" + echo "install_conntrack_tools" } | $ssh_dom0 } diff --git a/tools/xen/functions b/tools/xen/functions index cf145686b5..e1864eb4bb 100644 --- a/tools/xen/functions +++ b/tools/xen/functions @@ -305,3 +305,25 @@ function get_domid { xe vm-list name-label="$vm_name_label" params=dom-id minimal=true } + +function install_conntrack_tools { + local xs_host + local xs_ver_major + local centos_ver + local conntrack_conf + xs_host=$(xe host-list --minimal) + xs_ver_major=$(xe host-param-get uuid=$xs_host param-name=software-version param-key=product_version_text_short | cut -d'.' -f 1) + if [ $xs_ver_major -gt 6 ]; then + # Only support conntrack-tools in Dom0 with XS7.0 and above + if [ ! -f /usr/sbin/conntrackd ]; then + sed -i s/#baseurl=/baseurl=/g /etc/yum.repos.d/CentOS-Base.repo + centos_ver=$(yum version nogroups |grep Installed | cut -d' ' -f 2 | cut -d'.' -f1-2 | tr '-' '.') + yum install -y --enablerepo=base --releasever=$centos_ver conntrack-tools + # Backup conntrackd.conf after install conntrack-tools, use the one with statistic mode + mv /etc/conntrackd/conntrackd.conf /etc/conntrackd/conntrackd.conf.back + conntrack_conf=$(find /usr/share/doc -name conntrackd.conf |grep stats) + cp $conntrack_conf /etc/conntrackd/conntrackd.conf + fi + service conntrackd restart + fi +} From 4d6017566a2fd550b418609c8452e6cf35dd29a7 Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Tue, 12 Jul 2016 19:34:09 +0000 Subject: [PATCH 0131/1936] Add support for placement API to devstack Uses lib/placement, but relies on some functionality from lib/nova. This leads to some weirdness since the nova has special status in stack.sh. If/when placement is extracted it may be good to follow the devstack plugin structure instead. Because the placement code is currently a part of nova, there are dependencies in lib/placement on a some $NOVA_* variable and, if virtenv is being used, the virtualenv used by nova. Because placement currently runs using nova's configuration settings, not a lot actually happens in lib/placement: apache is configured and keystone accounts and endpoints are created. If PLACEMENT_DB_ENABLED is true then a separate placement db will be configured. When complete the initial version of the placement service will provide support for managing resource providers, inventories and allocations. The placement api only runs under mod-wsgi. Change-Id: I53dd3e6b41de17387a0e179fc9ac64c143b6a9eb --- clean.sh | 1 + files/apache-placement-api.template | 25 ++++ lib/placement | 201 ++++++++++++++++++++++++++++ stack.sh | 17 +++ unstack.sh | 5 + 5 files changed, 249 insertions(+) create mode 100644 files/apache-placement-api.template create mode 100644 lib/placement diff --git a/clean.sh b/clean.sh index 452df02d80..bace3f53fe 100755 --- a/clean.sh +++ b/clean.sh @@ -46,6 +46,7 @@ source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone source $TOP_DIR/lib/glance source $TOP_DIR/lib/nova +source $TOP_DIR/lib/placement source $TOP_DIR/lib/cinder source $TOP_DIR/lib/swift source $TOP_DIR/lib/heat diff --git a/files/apache-placement-api.template b/files/apache-placement-api.template new file mode 100644 index 0000000000..b89ef96776 --- /dev/null +++ b/files/apache-placement-api.template @@ -0,0 +1,25 @@ +Listen %PUBLICPORT% + + + WSGIDaemonProcess placement-api processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% + WSGIProcessGroup placement-api + WSGIScriptAlias / %PUBLICWSGI% + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%M" + + ErrorLog /var/log/%APACHE_NAME%/placement-api.log + %SSLENGINE% + %SSLCERTFILE% + %SSLKEYFILE% + + +Alias /placement %PUBLICWSGI% + + SetHandler wsgi-script + Options +ExecCGI + WSGIProcessGroup placement-api + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + diff --git a/lib/placement b/lib/placement new file mode 100644 index 0000000000..4e80c55c26 --- /dev/null +++ b/lib/placement @@ -0,0 +1,201 @@ +#!/bin/bash +# +# lib/placement +# Functions to control the configuration and operation of the **Placement** service +# +# Currently the placement service is embedded in nova. Eventually we +# expect this to change so this file is started as a separate entity +# despite making use of some *NOVA* variables and files. + +# Dependencies: +# +# - ``functions`` file +# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined +# - ``FILES`` + +# ``stack.sh`` calls the entry points in this order: +# +# - install_placement +# - cleanup_placement +# - configure_placement +# - init_placement +# - start_placement +# - stop_placement + +# Save trace setting +_XTRACE_LIB_PLACEMENT=$(set +o | grep xtrace) +set +o xtrace + +# Defaults +# -------- + +PLACEMENT_CONF_DIR=/etc/nova +PLACEMENT_CONF=$PLACEMENT_CONF_DIR/nova.conf +PLACEMENT_AUTH_STRATEGY=${PLACEMENT_AUTH_STRATEGY:-placement} + + +# The placement service can optionally use a separate database +# connection. Set PLACEMENT_DB_ENABLED to True to use it. +# NOTE(cdent): This functionality depends on some code that is not +# yet merged in nova but is coming soon. +PLACEMENT_DB_ENABLED=$(trueorfalse False PLACEMENT_DB_ENABLED) + +if is_suse; then + PLACEMENT_WSGI_DIR=${PLACEMENT_WSGI_DIR:-/srv/www/htdocs/placement} +else + PLACEMENT_WSGI_DIR=${PLACEMENT_WSGI_DIR:-/var/www/placement} +fi + +if is_ssl_enabled_service "placement-api" || is_service_enabled tls-proxy; then + PLACEMENT_SERVICE_PROTOCOL="https" +fi + +# Public facing bits +PLACEMENT_SERVICE_PROTOCOL=${PLACEMENT_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} +PLACEMENT_SERVICE_HOST=${PLACEMENT_SERVICE_HOST:-$SERVICE_HOST} +PLACEMENT_SERVICE_PORT=${PLACEMENT_SERVICE_PORT:-8778} + +# Functions +# --------- + +# Test if any placement services are enabled +# is_placement_enabled +function is_placement_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"placement-" ]] && return 0 + return 1 +} + +# cleanup_placement() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_placement { + sudo rm -f $(apache_site_config_for placement-api) +} + +# _config_placement_apache_wsgi() - Set WSGI config files +function _config_placement_apache_wsgi { + sudo mkdir -p $PLACEMENT_WSGI_DIR + + local placement_api_apache_conf + local placement_api_port=$PLACEMENT_SERVICE_PORT + local venv_path="" + placement_api_apache_conf=$(apache_site_config_for placement-api) + + # reuse nova's cert if a cert is being used + if is_ssl_enabled_service "placement-api"; then + placement_ssl="SSLEngine On" + placement_certfile="SSLCertificateFile $NOVA_SSL_CERT" + placement_keyfile="SSLCertificateKeyFile $NOVA_SSL_KEY" + fi + # reuse nova's venv if there is one as placement code lives + # there + if [[ ${USE_VENV} = True ]]; then + venv_path="python-path=${PROJECT_VENV["nova"]}/lib/$(python_version)/site-packages" + fi + + # copy wsgi application file + sudo cp $NOVA_DIR/nova/api/openstack/placement/placement-api.py $PLACEMENT_WSGI_DIR/placement-api + + sudo cp $FILES/apache-placement-api.template $placement_api_apache_conf + sudo sed -e " + s|%PUBLICPORT%|$placement_api_port|g; + s|%APACHE_NAME%|$APACHE_NAME|g; + s|%PUBLICWSGI%|$PLACEMENT_WSGI_DIR/placement-api|g; + s|%SSLENGINE%|$placement_ssl|g; + s|%SSLCERTFILE%|$placement_certfile|g; + s|%SSLKEYFILE%|$placement_keyfile|g; + s|%USER%|$STACK_USER|g; + s|%VIRTUALENV%|$venv_path|g + s|%APIWORKERS%|$API_WORKERS|g + " -i $placement_api_apache_conf +} + +# configure_placement() - Set config files, create data dirs, etc +function configure_placement { + if [ "$PLACEMENT_DB_ENABLED" != False ]; then + iniset $PLACEMENT_CONF placement_database connection `database_connection_url placement` + fi + + iniset $NOVA_CONF placement auth_type "password" + iniset $NOVA_CONF placement auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v3" + iniset $NOVA_CONF placement username placement + iniset $NOVA_CONF placement password "$SERVICE_PASSWORD" + iniset $NOVA_CONF placement user_domain_name "Default" + iniset $NOVA_CONF placement project_name "$SERVICE_TENANT_NAME" + iniset $NOVA_CONF placement project_domain_name "Default" + iniset $NOVA_CONF placement region_name "$REGION_NAME" + # TODO(cdent): auth_strategy, which is common to see in these + # blocks is not currently used here. For the time being the + # placement api uses the auth_strategy configuration setting + # established by the nova api. This avoids, for the time, being, + # creating redundant configuration items that are just used for + # testing. + + _config_placement_apache_wsgi +} + +# create_placement_accounts() - Set up required placement accounts +# and service and endpoints. +function create_placement_accounts { + create_service_user "placement" "admin" + local placement_api_url="$PLACEMENT_SERVICE_PROTOCOL://$PLACEMENT_SERVICE_HOST/placement" + get_or_create_service "placement" "placement" "Placement Service" + get_or_create_endpoint \ + "placement" \ + "$REGION_NAME" \ + "$placement_api_url" \ + "$placement_api_url" \ + "$placement_api_url" +} + +# init_placement() - Create service user and endpoints +# If PLACEMENT_DB_ENABLED is true, create the separate placement db +# using, for now, the api_db migrations. +function init_placement { + if [ "$PLACEMENT_DB_ENABLED" != False ]; then + recreate_database placement + $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF api_db sync + fi + create_placement_accounts +} + +# install_placement() - Collect source and prepare +function install_placement { + install_apache_wsgi + if is_ssl_enabled_service "placement-api"; then + enable_mod_ssl + fi +} + +# start_placement_api() - Start the API processes ahead of other things +function start_placement_api { + # Get right service port for testing + local service_port=$PLACEMENT_SERVICE_PORT + local placement_api_port=$PLACEMENT_SERVICE_PORT + + enable_apache_site placement-api + restart_apache_server + tail_log placement-api /var/log/$APACHE_NAME/placement-api.log + + echo "Waiting for placement-api to start..." + if ! wait_for_service $SERVICE_TIMEOUT $PLACEMENT_SERVICE_PROTOCOL://$PLACEMENT_SERVICE_HOST/placement; then + die $LINENO "placement-api did not start" + fi +} + +function start_placement { + start_placement_api +} + +# stop_placement() - Disable the api service and stop it. +function stop_placement { + disable_apache_site placement-api + restart_apache_server +} + +# Restore xtrace +$_XTRACE_LIB_PLACEMENT + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/stack.sh b/stack.sh index 98cdfc40df..cf157ab520 100755 --- a/stack.sh +++ b/stack.sh @@ -569,6 +569,7 @@ source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone source $TOP_DIR/lib/glance source $TOP_DIR/lib/nova +source $TOP_DIR/lib/placement source $TOP_DIR/lib/cinder source $TOP_DIR/lib/swift source $TOP_DIR/lib/heat @@ -859,6 +860,13 @@ if is_service_enabled nova; then configure_nova fi +if is_service_enabled placement; then + # placement api + stack_install_service placement + cleanup_placement + configure_placement +fi + if is_service_enabled horizon; then # django openstack_auth install_django_openstack_auth @@ -1160,6 +1168,11 @@ if is_service_enabled nova; then init_nova_cells fi +if is_service_enabled placement; then + echo_summary "Configuring placement" + init_placement +fi + # Extras Configuration # ==================== @@ -1265,6 +1278,10 @@ if is_service_enabled nova; then start_nova create_flavors fi +if is_service_enabled placement; then + echo_summary "Starting Placement" + start_placement +fi if is_service_enabled cinder; then echo_summary "Starting Cinder" start_cinder diff --git a/unstack.sh b/unstack.sh index ece69acad8..d93b8353db 100755 --- a/unstack.sh +++ b/unstack.sh @@ -63,6 +63,7 @@ source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone source $TOP_DIR/lib/glance source $TOP_DIR/lib/nova +source $TOP_DIR/lib/placement source $TOP_DIR/lib/cinder source $TOP_DIR/lib/swift source $TOP_DIR/lib/heat @@ -111,6 +112,10 @@ if is_service_enabled nova; then stop_nova fi +if is_service_enabled placement; then + stop_placement +fi + if is_service_enabled glance; then stop_glance fi From 5237d16d69cf54487b72dd8d12862cb4339efcbc Mon Sep 17 00:00:00 2001 From: Eric Harney Date: Tue, 30 Aug 2016 10:59:52 -0400 Subject: [PATCH 0132/1936] LVM: Handle missing loop device at cleanup When the loop device is not present because something has gone wrong, this will print an error: "losetup: option requires an argument -- 'd'" Just skip the losetup -d in this case. Change-Id: Iedc439b1ae924e9a599f6522eb081b83d43190c7 --- lib/lvm | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/lvm b/lib/lvm index b9d7c390a2..d35a76fb5f 100644 --- a/lib/lvm +++ b/lib/lvm @@ -58,7 +58,9 @@ function _clean_lvm_backing_file { if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then local vg_dev vg_dev=$(sudo losetup -j $backing_file | awk -F':' '/'$BACKING_FILE_SUFFIX'/ { print $1}') - sudo losetup -d $vg_dev + if [[ -n "$vg_dev" ]]; then + sudo losetup -d $vg_dev + fi rm -f $backing_file fi } From c9f6327844c794789ea3c3905cc8e04523262f71 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Tue, 30 Aug 2016 17:21:30 -0400 Subject: [PATCH 0133/1936] Remove lib/ceph All jobs using ceph as a storage backend have been moved over to using the devstack-plugin-ceph repo in project-config so we should be safe to remove the now unused lib/ceph file. The files are left in place because the devstack plugin does not install xfsprogs but it's used by the create_disk function. And the ceph cinder backend file is left in place since the devstack-plugin-ceph repo uses that by setting CINDER_ENABLED_BACKENDS=${CINDER_ENABLED_BACKENDS:-ceph}. Change-Id: I3fb09fc92bc6ab614e86d701ea46d5741a76b7a8 --- extras.d/60-ceph.sh | 75 --------- functions-common | 2 +- lib/ceph | 381 -------------------------------------------- 3 files changed, 1 insertion(+), 457 deletions(-) delete mode 100644 extras.d/60-ceph.sh delete mode 100644 lib/ceph diff --git a/extras.d/60-ceph.sh b/extras.d/60-ceph.sh deleted file mode 100644 index cc90128176..0000000000 --- a/extras.d/60-ceph.sh +++ /dev/null @@ -1,75 +0,0 @@ -# ceph.sh - DevStack extras script to install Ceph - -if is_service_enabled ceph; then - if [[ "$1" == "source" ]]; then - # Initial source - source $TOP_DIR/lib/ceph - elif [[ "$1" == "stack" && "$2" == "pre-install" ]]; then - echo_summary "Installing Ceph" - check_os_support_ceph - if [ "$REMOTE_CEPH" = "False" ]; then - install_ceph - echo_summary "Configuring Ceph" - configure_ceph - # NOTE (leseb): Do everything here because we need to have Ceph started before the main - # OpenStack components. Ceph OSD must start here otherwise we can't upload any images. - echo_summary "Initializing Ceph" - init_ceph - start_ceph - else - install_ceph_remote - fi - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - if is_service_enabled glance; then - echo_summary "Configuring Glance for Ceph" - configure_ceph_glance - fi - if is_service_enabled nova; then - echo_summary "Configuring Nova for Ceph" - configure_ceph_nova - fi - if is_service_enabled cinder; then - echo_summary "Configuring Cinder for Ceph" - configure_ceph_cinder - fi - if is_service_enabled n-cpu; then - # NOTE (leseb): the part below is a requirement to attach Ceph block devices - echo_summary "Configuring libvirt secret" - import_libvirt_secret_ceph - fi - - if [ "$REMOTE_CEPH" = "False" ]; then - if is_service_enabled glance; then - echo_summary "Configuring Glance for Ceph" - configure_ceph_embedded_glance - fi - if is_service_enabled nova; then - echo_summary "Configuring Nova for Ceph" - configure_ceph_embedded_nova - fi - if is_service_enabled cinder; then - echo_summary "Configuring Cinder for Ceph" - configure_ceph_embedded_cinder - fi - fi - fi - - if [[ "$1" == "unstack" ]]; then - if [ "$REMOTE_CEPH" = "True" ]; then - cleanup_ceph_remote - else - cleanup_ceph_embedded - stop_ceph - fi - cleanup_ceph_general - fi - - if [[ "$1" == "clean" ]]; then - if [ "$REMOTE_CEPH" = "True" ]; then - cleanup_ceph_remote - else - cleanup_ceph_embedded - fi - cleanup_ceph_general - fi -fi diff --git a/functions-common b/functions-common index 3fdd71bffc..6312188ea8 100644 --- a/functions-common +++ b/functions-common @@ -1880,7 +1880,7 @@ function run_phase { # white listed elements in tree. We want these to move out # over time as well, but they are in tree, so we need to # manage that. - local exceptions="60-ceph.sh 80-tempest.sh" + local exceptions="80-tempest.sh" local extra extra=$(basename $extra_plugin_file_name) if [[ ! ( $exceptions =~ "$extra" ) ]]; then diff --git a/lib/ceph b/lib/ceph deleted file mode 100644 index 1e55c48d68..0000000000 --- a/lib/ceph +++ /dev/null @@ -1,381 +0,0 @@ -#!/bin/bash -# -# lib/ceph -# Functions to control the configuration and operation of the **Ceph** storage service - -# Dependencies: -# -# - ``functions`` file -# - ``CEPH_DATA_DIR`` or ``DATA_DIR`` must be defined - -# ``stack.sh`` calls the entry points in this order (via ``extras.d/60-ceph.sh``): -# -# - install_ceph -# - configure_ceph -# - init_ceph -# - start_ceph -# - stop_ceph -# - cleanup_ceph - -# Save trace setting -_XTRACE_LIB_CEPH=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# Set ``CEPH_DATA_DIR`` to the location of Ceph drives and objects. -# Default is the common DevStack data directory. -CEPH_DATA_DIR=${CEPH_DATA_DIR:-/var/lib/ceph} -CEPH_DISK_IMAGE=${CEPH_DATA_DIR}/drives/images/ceph.img - -# Set ``CEPH_CONF_DIR`` to the location of the configuration files. -# Default is ``/etc/ceph``. -CEPH_CONF_DIR=${CEPH_CONF_DIR:-/etc/ceph} - -# DevStack will create a loop-back disk formatted as XFS to store the -# Ceph data. Set ``CEPH_LOOPBACK_DISK_SIZE`` to the disk size in -# kilobytes. -# Default is 1 gigabyte. -CEPH_LOOPBACK_DISK_SIZE_DEFAULT=4G -CEPH_LOOPBACK_DISK_SIZE=${CEPH_LOOPBACK_DISK_SIZE:-$CEPH_LOOPBACK_DISK_SIZE_DEFAULT} - -# Common -CEPH_FSID=$(uuidgen) -CEPH_CONF_FILE=${CEPH_CONF_DIR}/ceph.conf - -# Glance -GLANCE_CEPH_USER=${GLANCE_CEPH_USER:-glance} -GLANCE_CEPH_POOL=${GLANCE_CEPH_POOL:-images} -GLANCE_CEPH_POOL_PG=${GLANCE_CEPH_POOL_PG:-8} -GLANCE_CEPH_POOL_PGP=${GLANCE_CEPH_POOL_PGP:-8} - -# Nova -NOVA_CEPH_POOL=${NOVA_CEPH_POOL:-vms} -NOVA_CEPH_POOL_PG=${NOVA_CEPH_POOL_PG:-8} -NOVA_CEPH_POOL_PGP=${NOVA_CEPH_POOL_PGP:-8} - -# Cinder -CINDER_CEPH_POOL=${CINDER_CEPH_POOL:-volumes} -CINDER_CEPH_POOL_PG=${CINDER_CEPH_POOL_PG:-8} -CINDER_CEPH_POOL_PGP=${CINDER_CEPH_POOL_PGP:-8} -CINDER_CEPH_USER=${CINDER_CEPH_USER:-cinder} -CINDER_CEPH_UUID=${CINDER_CEPH_UUID:-$(uuidgen)} - -# Set ``CEPH_REPLICAS`` to configure how many replicas are to be -# configured for your Ceph cluster. By default we are configuring -# only one replica since this is way less CPU and memory intensive. If -# you are planning to test Ceph replication feel free to increase this value -CEPH_REPLICAS=${CEPH_REPLICAS:-1} -CEPH_REPLICAS_SEQ=$(seq ${CEPH_REPLICAS}) - -# Connect to an existing Ceph cluster -REMOTE_CEPH=$(trueorfalse False REMOTE_CEPH) -REMOTE_CEPH_ADMIN_KEY_PATH=${REMOTE_CEPH_ADMIN_KEY_PATH:-$CEPH_CONF_DIR/ceph.client.admin.keyring} - -# Cinder encrypted volume tests are not supported with a Ceph backend due to -# bug 1463525. -ATTACH_ENCRYPTED_VOLUME_AVAILABLE=False - - -# Functions -# ------------ - -function get_ceph_version { - local ceph_version_str - ceph_version_str=$(sudo ceph daemon mon.$(hostname) version | cut -d '"' -f 4 | cut -f 1,2 -d '.') - echo $ceph_version_str -} - -# import_libvirt_secret_ceph() - Imports Cinder user key into libvirt -# so it can connect to the Ceph cluster while attaching a Cinder block device -function import_libvirt_secret_ceph { - cat > secret.xml < - ${CINDER_CEPH_UUID} - - client.${CINDER_CEPH_USER} secret - - -EOF - sudo virsh secret-define --file secret.xml - sudo virsh secret-set-value --secret ${CINDER_CEPH_UUID} --base64 $(sudo ceph -c ${CEPH_CONF_FILE} auth get-key client.${CINDER_CEPH_USER}) - sudo rm -f secret.xml -} - -# undefine_virsh_secret() - Undefine Cinder key secret from libvirt -function undefine_virsh_secret { - if is_service_enabled cinder || is_service_enabled nova; then - local virsh_uuid - virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }') - sudo virsh secret-undefine ${virsh_uuid} >/dev/null 2>&1 - fi -} - - -# check_os_support_ceph() - Check if the operating system provides a decent version of Ceph -function check_os_support_ceph { - if [[ ! ${DISTRO} =~ (trusty|f23|f24) ]]; then - echo "WARNING: your distro $DISTRO does not provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 20 (and higher)" - if [[ "$FORCE_CEPH_INSTALL" != "yes" ]]; then - die $LINENO "If you wish to install Ceph on this distribution anyway run with FORCE_CEPH_INSTALL=yes" - fi - NO_UPDATE_REPOS=False - fi -} - -# cleanup_ceph() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_ceph_remote { - # do a proper cleanup from here to avoid leftover on the remote Ceph cluster - if is_service_enabled glance; then - sudo ceph osd pool delete $GLANCE_CEPH_POOL $GLANCE_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1 - sudo ceph auth del client.$GLANCE_CEPH_USER > /dev/null 2>&1 - fi - if is_service_enabled cinder; then - sudo ceph osd pool delete $CINDER_CEPH_POOL $CINDER_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1 - sudo ceph auth del client.$CINDER_CEPH_USER > /dev/null 2>&1 - fi - if is_service_enabled c-bak; then - sudo ceph osd pool delete $CINDER_BAK_CEPH_POOL $CINDER_BAK_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1 - sudo ceph auth del client.$CINDER_BAK_CEPH_USER > /dev/null 2>&1 - fi - if is_service_enabled nova; then - iniset $NOVA_CONF libvirt rbd_secret_uuid "" - sudo ceph osd pool delete $NOVA_CEPH_POOL $NOVA_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1 - fi -} - -function cleanup_ceph_embedded { - sudo killall -w -9 ceph-mon - sudo killall -w -9 ceph-osd - sudo rm -rf ${CEPH_DATA_DIR}/*/* - if egrep -q ${CEPH_DATA_DIR} /proc/mounts; then - sudo umount ${CEPH_DATA_DIR} - fi - if [[ -e ${CEPH_DISK_IMAGE} ]]; then - sudo rm -f ${CEPH_DISK_IMAGE} - fi - - # purge ceph config file and keys - sudo rm -rf ${CEPH_CONF_DIR}/* -} - -function cleanup_ceph_general { - undefine_virsh_secret -} - - -# configure_ceph() - Set config files, create data dirs, etc -function configure_ceph { - local count=0 - - # create a backing file disk - create_disk ${CEPH_DISK_IMAGE} ${CEPH_DATA_DIR} ${CEPH_LOOPBACK_DISK_SIZE} - - # populate ceph directory - sudo mkdir -p ${CEPH_DATA_DIR}/{bootstrap-mds,bootstrap-osd,mds,mon,osd,tmp} - - # create ceph monitor initial key and directory - sudo ceph-authtool /var/lib/ceph/tmp/keyring.mon.$(hostname) \ - --create-keyring --name=mon. --add-key=$(ceph-authtool --gen-print-key) \ - --cap mon 'allow *' - sudo mkdir /var/lib/ceph/mon/ceph-$(hostname) - - # create a default ceph configuration file - sudo tee ${CEPH_CONF_FILE} > /dev/null < /dev/null - sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring - fi -} - -function configure_ceph_embedded_cinder { - # Configure Cinder service options, ceph pool, ceph user and ceph key - sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} size ${CEPH_REPLICAS} - if [[ $CEPH_REPLICAS -ne 1 ]]; then - sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} crush_ruleset ${RULE_ID} - fi -} - -# configure_ceph_cinder() - Cinder config needs to come after Cinder is set up -function configure_ceph_cinder { - sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP} - sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} \ - mon "allow r" \ - osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rwx pool=${GLANCE_CEPH_POOL}" | \ - sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring - sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring -} - -# init_ceph() - Initialize databases, etc. -function init_ceph { - # clean up from previous (possibly aborted) runs - # make sure to kill all ceph processes first - sudo pkill -f ceph-mon || true - sudo pkill -f ceph-osd || true -} - -# install_ceph() - Collect source and prepare -function install_ceph_remote { - install_package ceph-common -} - -function install_ceph { - install_package ceph -} - -# start_ceph() - Start running processes, including screen -function start_ceph { - if is_ubuntu; then - sudo initctl emit ceph-mon id=$(hostname) - for id in $(sudo ceph -c ${CEPH_CONF_FILE} osd ls); do - sudo start ceph-osd id=${id} - done - else - sudo service ceph start - fi -} - -# stop_ceph() - Stop running processes (non-screen) -function stop_ceph { - if is_ubuntu; then - sudo service ceph-mon-all stop > /dev/null 2>&1 - sudo service ceph-osd-all stop > /dev/null 2>&1 - else - sudo service ceph stop > /dev/null 2>&1 - fi -} - - -# Restore xtrace -$_XTRACE_LIB_CEPH - -## Local variables: -## mode: shell-script -## End: From 0385caa2ee503486cab1a81dbdda691aa259134c Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 31 Aug 2016 10:07:06 +1000 Subject: [PATCH 0134/1936] Fix comment in plugin-registry header Fix the comment to actually be a comment. Regenerate page. Although we've got a pretty cool system for generating this, I wonder if anyone actually looks at it? Maybe it's just helpful as a form of SEO. Change-Id: I15aaa983716f9ee897293c2954ca7ae561951372 --- data/devstack-plugins-registry.header | 20 +++++++++----------- doc/source/plugin-registry.rst | 20 +++++++++----------- 2 files changed, 18 insertions(+), 22 deletions(-) diff --git a/data/devstack-plugins-registry.header b/data/devstack-plugins-registry.header index 6119ab5284..576dbbd35a 100644 --- a/data/devstack-plugins-registry.header +++ b/data/devstack-plugins-registry.header @@ -1,18 +1,16 @@ -.. +.. Note to patch submitters: - Note to patch submitters: + # ============================= # + # THIS FILE IS AUTOGENERATED ! # + # ============================= # - # ============================= # - # THIS FILE IS AUTOGENERATED ! # - # ============================= # + ** Plugins are found automatically and added to this list ** - ** Plugins are found automatically and added to this list ** + This file is created by a periodic proposal job. You should not + edit this file. - This file is created by a periodic proposal job. You should not - edit this file. - - You should edit the files data/devstack-plugins-registry.footer - data/devstack-plugins-registry.header to modify this text. + You should edit the files data/devstack-plugins-registry.footer + data/devstack-plugins-registry.header to modify this text. ========================== DevStack Plugin Registry diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index bb8527041e..d6df1efa4f 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -1,18 +1,16 @@ -.. +.. Note to patch submitters: - Note to patch submitters: + # ============================= # + # THIS FILE IS AUTOGENERATED ! # + # ============================= # - # ============================= # - # THIS FILE IS AUTOGENERATED ! # - # ============================= # + ** Plugins are found automatically and added to this list ** - ** Plugins are found automatically and added to this list ** + This file is created by a periodic proposal job. You should not + edit this file. - This file is created by a periodic proposal job. You should not - edit this file. - - You should edit the files data/devstack-plugins-registry.footer - data/devstack-plugins-registry.header to modify this text. + You should edit the files data/devstack-plugins-registry.footer + data/devstack-plugins-registry.header to modify this text. ========================== DevStack Plugin Registry From d3fab7bfc16284a24a66f1724805c35535432fc0 Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Tue, 30 Aug 2016 18:13:52 -0700 Subject: [PATCH 0135/1936] Revert "Allow properly overriding DEST" After I00847bb6733febf105855ae6fc577a7c904ec4b4, we cannot see the test result (testr_result.html) on gate jobs. So let's revert the patch for verifying the test result on the gate. Change-Id: I9db1ff9f43b22d1634a43c7d5e502cc205aa26f2 Closes-Bug: #1617476 --- stackrc | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/stackrc b/stackrc index f7bdbc2b04..c14085a910 100644 --- a/stackrc +++ b/stackrc @@ -16,6 +16,15 @@ source $RC_DIR/functions # Destination path for installation DEST=/opt/stack +# Destination for working data +DATA_DIR=${DEST}/data + +# Destination for status files +SERVICE_DIR=${DEST}/status + +# Path for subunit output file +SUBUNIT_OUTPUT=${DEST}/devstack.subunit + # Determine stack user if [[ $EUID -eq 0 ]]; then STACK_USER=stack @@ -121,15 +130,6 @@ elif [[ -f $RC_DIR/.localrc.auto ]]; then source $RC_DIR/.localrc.auto fi -# Destination for working data -DATA_DIR=${DATA_DIR:-$DEST/data} - -# Destination for status files -SERVICE_DIR=${SERVICE_DIR:-$DEST/status} - -# Path for subunit output file -SUBUNIT_OUTPUT=${SUBUNIT_OUTPUT:-$DEST/devstack.subunit} - # Default for log coloring is based on interactive-or-not. # Baseline assumption is that non-interactive invocations are for CI, # where logs are to be presented as browsable text files; hence color From 43ff27b7e8e9d209d8f1fc0483da717296be93bc Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 30 Aug 2016 21:13:15 -0400 Subject: [PATCH 0136/1936] Use wsgi-script installed nova-placement-api We should use the standard install nova-placement-api script which is managed by the python package instead of a one off copy procedure. Depends-On: I00d032554de273d7493cfb467f81687c08fd5389 Change-Id: I74b39d6a0cedea7c18ce8080dcddb43d13df1de8 --- lib/placement | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/lib/placement b/lib/placement index 4e80c55c26..56a4b56675 100644 --- a/lib/placement +++ b/lib/placement @@ -40,12 +40,6 @@ PLACEMENT_AUTH_STRATEGY=${PLACEMENT_AUTH_STRATEGY:-placement} # yet merged in nova but is coming soon. PLACEMENT_DB_ENABLED=$(trueorfalse False PLACEMENT_DB_ENABLED) -if is_suse; then - PLACEMENT_WSGI_DIR=${PLACEMENT_WSGI_DIR:-/srv/www/htdocs/placement} -else - PLACEMENT_WSGI_DIR=${PLACEMENT_WSGI_DIR:-/var/www/placement} -fi - if is_ssl_enabled_service "placement-api" || is_service_enabled tls-proxy; then PLACEMENT_SERVICE_PROTOCOL="https" fi @@ -73,11 +67,11 @@ function cleanup_placement { # _config_placement_apache_wsgi() - Set WSGI config files function _config_placement_apache_wsgi { - sudo mkdir -p $PLACEMENT_WSGI_DIR - local placement_api_apache_conf local placement_api_port=$PLACEMENT_SERVICE_PORT local venv_path="" + local nova_bin_dir="" + nova_bin_dir=$(get_python_exec_prefix) placement_api_apache_conf=$(apache_site_config_for placement-api) # reuse nova's cert if a cert is being used @@ -90,16 +84,14 @@ function _config_placement_apache_wsgi { # there if [[ ${USE_VENV} = True ]]; then venv_path="python-path=${PROJECT_VENV["nova"]}/lib/$(python_version)/site-packages" + nova_bin_dir=${PROJECT_VENV["nova"]}/bin fi - # copy wsgi application file - sudo cp $NOVA_DIR/nova/api/openstack/placement/placement-api.py $PLACEMENT_WSGI_DIR/placement-api - sudo cp $FILES/apache-placement-api.template $placement_api_apache_conf sudo sed -e " s|%PUBLICPORT%|$placement_api_port|g; s|%APACHE_NAME%|$APACHE_NAME|g; - s|%PUBLICWSGI%|$PLACEMENT_WSGI_DIR/placement-api|g; + s|%PUBLICWSGI%|$nova_bin_dir/nova-placement-api|g; s|%SSLENGINE%|$placement_ssl|g; s|%SSLCERTFILE%|$placement_certfile|g; s|%SSLKEYFILE%|$placement_keyfile|g; From 11817482c03b3994cde93320f138d5a7ccd821d3 Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Thu, 21 Jul 2016 16:02:49 +0900 Subject: [PATCH 0137/1936] lib/neutron: Use NEUTRON_PLUGIN instead of hardcoding ml2 Change-Id: Ia60381694f30978984cdf33e3141dc153d294e17 --- lib/neutron | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index c1552e3d06..3fbcbd9a4d 100644 --- a/lib/neutron +++ b/lib/neutron @@ -139,7 +139,7 @@ function configure_neutron_new { cp $NEUTRON_DIR/etc/api-paste.ini $NEUTRON_CONF_DIR/api-paste.ini - iniset $NEUTRON_CONF DEFAULT core_plugin ml2 + iniset $NEUTRON_CONF DEFAULT core_plugin $NEUTRON_PLUGIN iniset $NEUTRON_CONF DEFAULT policy_file $policy_file iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips True From a25ae6c03de2d87bca4f708ffc338c66164eec3b Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 31 Aug 2016 08:56:37 -0400 Subject: [PATCH 0138/1936] convert apache logs to color for screen If we left the ansi color codes in apache logs, we can run a sed script to convert the escaped escapes back to ansi escapes which make the logs colorized again. There are 8 \ because we need to end up with 2 in the final sed, and we get interopolated twice. How much fun is escape interpolation? All the fun. Change-Id: Id8531cf03ba80f0df62f20add02e757bd63d4f2d --- functions-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions-common b/functions-common index 3fdd71bffc..7524eab68f 100644 --- a/functions-common +++ b/functions-common @@ -1686,7 +1686,7 @@ function tail_log { local logfile=$2 if [[ "$USE_SCREEN" = "True" ]]; then - screen_process "$name" "sudo tail -f $logfile" + screen_process "$name" "sudo tail -f $logfile | sed 's/\\\\\\\\x1b/\o033/g'" fi } From 44bf88cd3990c5255e1a0b94842446b8a472b87d Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Wed, 31 Aug 2016 10:39:46 -0400 Subject: [PATCH 0139/1936] Fix region name placement config option The placement config option for setting the region is actually 'os_region_name', not 'region_name', see: https://review.openstack.org/#/c/358797/13/nova/conf/placement.py Change-Id: I62e79c6860e2329428e3115d14ee86f5ff15d7e8 --- lib/placement | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/placement b/lib/placement index 56a4b56675..165c670206 100644 --- a/lib/placement +++ b/lib/placement @@ -114,7 +114,7 @@ function configure_placement { iniset $NOVA_CONF placement user_domain_name "Default" iniset $NOVA_CONF placement project_name "$SERVICE_TENANT_NAME" iniset $NOVA_CONF placement project_domain_name "Default" - iniset $NOVA_CONF placement region_name "$REGION_NAME" + iniset $NOVA_CONF placement os_region_name "$REGION_NAME" # TODO(cdent): auth_strategy, which is common to see in these # blocks is not currently used here. For the time being the # placement api uses the auth_strategy configuration setting From eb6ef2f80ea11aa7ba3f9ff52cb03704db3547d4 Mon Sep 17 00:00:00 2001 From: Andrew Laski Date: Mon, 29 Aug 2016 10:02:41 -0400 Subject: [PATCH 0140/1936] Create the nova_api_cell0 database As part of Nova cellsv2 there is now a third database that must be setup for use by Nova. This database is an exact copy of the 'nova' database. Only do this if NOVA_CONFIGURE_CELLSV2 is overridden. Change-Id: I8775b8066ba85fbdbcdfb42c28cb567fc7759fe5 --- lib/nova | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/lib/nova b/lib/nova index 235b533d77..f5a798c923 100644 --- a/lib/nova +++ b/lib/nova @@ -85,6 +85,9 @@ METADATA_SERVICE_PORT=${METADATA_SERVICE_PORT:-8775} # NOTE: Set ``FORCE_CONFIG_DRIVE="False"`` to turn OFF config drive FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"False"} +# Option to initialize CellsV2 environment +NOVA_CONFIGURE_CELLSV2=$(trueorfalse False NOVA_CONFIGURE_CELLSV2) + # Nova supports pluggable schedulers. The default ``FilterScheduler`` # should work in most cases. SCHEDULER=${SCHEDULER:-filter_scheduler} @@ -682,10 +685,15 @@ function init_nova { # All nova components talk to a central database. # Only do this step once on the API node for an entire cluster. if is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-api; then - # (Re)create nova database + # (Re)create nova databases recreate_database nova + if [ "$NOVA_CONFIGURE_CELLSV2" != "False" ]; then + recreate_database nova_api_cell0 + fi - # Migrate nova database + # Migrate nova database. If "nova-manage cell_v2 simple_cell_setup" has + # been run this migrates the "nova" and "nova_api_cell0" database. + # Otherwise it just migrates the "nova" database. $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db sync if is_service_enabled n-cell; then From 0bf25506b03c78dd809e3638cabcf1fc11e9f1a0 Mon Sep 17 00:00:00 2001 From: melanie witt Date: Tue, 30 Aug 2016 22:14:04 +0000 Subject: [PATCH 0141/1936] Move RABBIT_USERID to lib/rpc_backend This moves setting of RABBIT_USERID from stack.sh to lib/rpc_backend so it may be used in grenade runs, which don't have the defaulted value from stack.sh. The RABBIT_USERID is needed in order to call get_transport_url in lib/rpc_backend. Change-Id: I6f211e9102f79418f9f94a15784f91c4150ab8a7 --- lib/rpc_backend | 2 ++ stack.sh | 1 - 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/rpc_backend b/lib/rpc_backend index 0ee46dca6f..97b1aa409a 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -24,6 +24,8 @@ _XTRACE_RPC_BACKEND=$(set +o | grep xtrace) set +o xtrace +RABBIT_USERID=${RABBIT_USERID:-stackrabbit} + # Functions # --------- diff --git a/stack.sh b/stack.sh index cf157ab520..09466a6894 100755 --- a/stack.sh +++ b/stack.sh @@ -664,7 +664,6 @@ initialize_database_backends && echo "Using $DATABASE_TYPE database backend" || # Rabbit connection info # In multi node DevStack, second node needs ``RABBIT_USERID``, but rabbit # isn't enabled. -RABBIT_USERID=${RABBIT_USERID:-stackrabbit} if is_service_enabled rabbit; then RABBIT_HOST=${RABBIT_HOST:-$SERVICE_HOST} read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT." From 542abbab3bc58c40a499531165943df4bf2e4ab0 Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Wed, 31 Aug 2016 15:47:56 -0500 Subject: [PATCH 0142/1936] Keystone uwsgi set lazy-apps=true MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Keystone had a problem where there was a memcached socket opened very early on startup which then got shared between worker processes when running under uwsgi. This can be prevented by setting lazy-apps so this is the recommended setting. See http://uwsgi-docs.readthedocs.io/en/latest/ThingsToKnow.html """uWSGI tries to (ab)use the Copy On Write semantics of the fork() call whenever possible. By default it will fork after having loaded your applications to share as much of their memory as possible. If this behavior is undesirable for some reason, use the lazy-apps option. This will instruct uWSGI to load the applications after each worker’s fork(). Beware as there is an older options named lazy that is way more invasive and highly discouraged (it is still here only for backward compatibility) """ Change-Id: I6f271dc906528f0c86060452deaf15df81b267d2 Related-Bug: 1600394 --- lib/keystone | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/keystone b/lib/keystone index 6198e43b58..6e5d3dc42c 100644 --- a/lib/keystone +++ b/lib/keystone @@ -338,6 +338,8 @@ function configure_keystone { iniset "$file" uwsgi buffer-size 65535 # Make sure the client doesn't try to re-use the connection. iniset "$file" uwsgi add-header "Connection: close" + # This ensures that file descriptors aren't shared between processes. + iniset "$file" uwsgi lazy-apps true done fi From eb77eef8a57f8a4d012d8bee316e56b1764919de Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Wed, 31 Aug 2016 16:26:17 -0500 Subject: [PATCH 0143/1936] Keystone re-enable catalog caching Catalog caching was disabled due to bug 1537617, but this has been fixed for some time. Re-enabling to get some performance back. Change-Id: Ic0edf5c70a5040edf3393dbd1e110ab5fb56c110 Related-Bug: 1537617 --- lib/keystone | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/keystone b/lib/keystone index 6198e43b58..f8119ebeea 100644 --- a/lib/keystone +++ b/lib/keystone @@ -248,9 +248,6 @@ function configure_keystone { iniset $KEYSTONE_CONF cache backend "oslo_cache.memcache_pool" iniset $KEYSTONE_CONF cache memcache_servers localhost:11211 - # Do not cache the catalog backend due to https://bugs.launchpad.net/keystone/+bug/1537617 - iniset $KEYSTONE_CONF catalog caching "False" - iniset_rpc_backend keystone $KEYSTONE_CONF # Register SSL certificates if provided From 40b433fb07d2612075a30e2e18fbae094c2c8ec0 Mon Sep 17 00:00:00 2001 From: Clay Gerrard Date: Thu, 1 Sep 2016 02:07:12 -0700 Subject: [PATCH 0144/1936] Make unstack.sh more independently deterministic In some initialization conditions (having never ran stack.sh) the result of unstack.sh is dependent on if the user had previously installed lvm2 or disabled the cinder service. This change makes all results the same with a bit of LBYL. There's also a drive-by to put a comment back where it belongs after being accidentally moved in the related change. Related-Change: I09b1a7bee0785e5e1bb7dc96158a654bd3f15c83 Change-Id: I9a7e052677d60cbbbdd582877f3c6c48c387f668 Closes-Bug: #1619195 --- unstack.sh | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/unstack.sh b/unstack.sh index d93b8353db..f888896cb9 100755 --- a/unstack.sh +++ b/unstack.sh @@ -189,11 +189,13 @@ if [[ -n "$SCREEN" ]]; then fi fi -# BUG: maybe it doesn't exist? We should isolate this further down. # NOTE: Cinder automatically installs the lvm2 package, independently of the -# enabled backends. So if Cinder is enabled, we are sure lvm (lvremove, -# /etc/lvm/lvm.conf, etc.) is here. -if is_service_enabled cinder; then +# enabled backends. So if Cinder is enabled, and installed successfully we are +# sure lvm2 (lvremove, /etc/lvm/lvm.conf, etc.) is here. +if is_service_enabled cinder && is_package_installed lvm2; then + # Using /bin/true here indicates a BUG - maybe the + # DEFAULT_VOLUME_GROUP_NAME doesn't exist? We should + # isolate this further down in lib/cinder cleanup. clean_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME || /bin/true clean_lvm_filter fi From 6930ba312f787e0459f7455ac6ba2a70b3c7c37d Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 1 Sep 2016 07:25:28 -0500 Subject: [PATCH 0145/1936] Specify a cloud for openstack complete there is a bug in openstackclient that is causing a password prompt Related-Bug: 1619274 here. Change-Id: I3aee25845ece846ed2b35aa242dc684b55ac2381 --- stack.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 09466a6894..1cc4eca1b9 100755 --- a/stack.sh +++ b/stack.sh @@ -1383,7 +1383,12 @@ check_libs_from_git # =============== # Prepare bash completion for OSC -openstack complete | sudo tee /etc/bash_completion.d/osc.bash_completion > /dev/null +# +# BUG: https://bugs.launchpad.net/python-openstackclient/+bug/1619274 +# the os-cloud param should not be required but if we don't provide it +# then this command hangs indefinitely if something is wrong with +# default environment credentials. +openstack --os-cloud=devstack complete | sudo tee /etc/bash_completion.d/osc.bash_completion > /dev/null # If cinder is configured, set global_filter for PV devices if is_service_enabled cinder; then From 71119b47a0e3fb86b349732a1777be8c83d2e6c2 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Mon, 15 Aug 2016 12:06:55 -0700 Subject: [PATCH 0146/1936] Setup cellsv2 for Nova This patch setup cellsv2 for Nova after plugin initialization phase. Since this requires compute hosts to be started, we need to do it after we have initialized all other plugins. Things like ironic aren't setup when we were running this as part of nova setup, and thus this command can fail. When cellsv1 is used (n-cell is enabled) skip calling cells_v2 simple_cell_setup, which will never have hosts at the top level and which will always fail. Change-Id: Ic7d0115da51d6ea17ee49071af259a7789c62ab9 Depends-On: I9bbaa4c92503222c9fd015fe075926b50f3dcc8c --- lib/nova | 9 +++++++++ stack.sh | 8 ++++++++ 2 files changed, 17 insertions(+) diff --git a/lib/nova b/lib/nova index f5a798c923..670f605fd6 100644 --- a/lib/nova +++ b/lib/nova @@ -946,6 +946,15 @@ function create_flavors { fi } +# create_cell(): Group the available hosts into a cell +function create_cell { + if ! is_service_enabled n-cell; then + nova-manage cell_v2 simple_cell_setup --transport-url $(get_transport_url) + else + echo 'Skipping cellsv2 setup for this cellsv1 configuration' + fi +} + # Restore xtrace $_XTRACE_LIB_NOVA diff --git a/stack.sh b/stack.sh index 1cc4eca1b9..4b071841b8 100755 --- a/stack.sh +++ b/stack.sh @@ -1379,6 +1379,14 @@ service_check check_libs_from_git +# Configure nova cellsv2 +# ---------------------- + +# Do this late because it requires compute hosts to have started +if is_service_enabled n-api && [ "$NOVA_CONFIGURE_CELLSV2" == "True" ]; then + create_cell +fi + # Bash completion # =============== From 085f2fb0e2d399281b137c9fa95a7a39e270be7d Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Fri, 2 Sep 2016 14:00:39 -0700 Subject: [PATCH 0147/1936] Remove the same configurations as the default The default value of force_dhcp_release is True on Nova side: https://github.com/openstack/nova/blob/master/nova/conf/network.py#L306 The default value of vnc.enabled is True on Nova side: https://github.com/openstack/nova/blob/master/nova/conf/vnc.py#L27 The default value of spice.enabled is False on Nova side: https://github.com/openstack/nova/blob/master/nova/conf/spice.py#L30 So it is not necessary to set them on Devstack. Change-Id: I67cc103269d2d612ad443c72f2a77d7ae2ca09e3 --- lib/nova | 4 ---- 1 file changed, 4 deletions(-) diff --git a/lib/nova b/lib/nova index 670f605fd6..8496868821 100644 --- a/lib/nova +++ b/lib/nova @@ -461,7 +461,6 @@ function create_nova_conf { iniset $NOVA_CONF DEFAULT rootwrap_config "$NOVA_CONF_DIR/rootwrap.conf" iniset $NOVA_CONF DEFAULT scheduler_driver "$SCHEDULER" iniset $NOVA_CONF DEFAULT scheduler_default_filters "$FILTERS" - iniset $NOVA_CONF DEFAULT force_dhcp_release "True" iniset $NOVA_CONF DEFAULT default_floating_pool "$PUBLIC_NETWORK_NAME" iniset $NOVA_CONF DEFAULT s3_host "$SERVICE_HOST" iniset $NOVA_CONF DEFAULT s3_port "$S3_SERVICE_PORT" @@ -557,7 +556,6 @@ function create_nova_conf { # For multi-host, this should be the management ip of the compute host. VNCSERVER_LISTEN=${VNCSERVER_LISTEN=$NOVA_SERVICE_LOCAL_HOST} VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=$NOVA_SERVICE_LOCAL_HOST} - iniset $NOVA_CONF vnc enabled true iniset $NOVA_CONF vnc vncserver_listen "$VNCSERVER_LISTEN" iniset $NOVA_CONF vnc vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" iniset $NOVA_CONF vnc novncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" @@ -575,8 +573,6 @@ function create_nova_conf { iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN" iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS" iniset $NOVA_CONF spice html5proxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" - else - iniset $NOVA_CONF spice enabled false fi # Set the oslo messaging driver to the typical default. This does not From 0a40710b9f82555abd0478498d743a973680f5d9 Mon Sep 17 00:00:00 2001 From: Lenny Verkhovsky Date: Sun, 4 Sep 2016 12:52:01 +0000 Subject: [PATCH 0148/1936] Adding vfio to cgroup_device_acl during libvirt configuration vfio should be added to cgroups on some of the Distributions like Ubuntu 16-04 Change-Id: I7239858b6307e37bf1237b92d69a0520ab5ad304 --- lib/nova_plugins/functions-libvirt | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index cc013c3181..6b7c7c263b 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -65,6 +65,7 @@ cgroup_device_acl = [ "/dev/random", "/dev/urandom", "/dev/ptmx", "/dev/kvm", "/dev/kqemu", "/dev/rtc", "/dev/hpet","/dev/net/tun", + "/dev/vfio/vfio", ] EOF fi From 0063495b00409651c3bd02a7b3a31ba5b6ea40c0 Mon Sep 17 00:00:00 2001 From: Andreas Scheuring Date: Fri, 26 Aug 2016 10:29:20 +0200 Subject: [PATCH 0149/1936] Make PIP_GET_PIP_URL configurable via local.conf The default get_pip url regulary times out when starting devstack from behind company firewalls. Making this a configureable variable, user can make use of internal git-pip.py mirrors without modifying any code. Change-Id: I66a5534d51ab23a4d8586c27d37b4b6b8a6892c9 --- tools/install_pip.sh | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 12676998d2..a5ccb19399 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -24,7 +24,20 @@ set -o xtrace FILES=$TOP_DIR/files -PIP_GET_PIP_URL=https://bootstrap.pypa.io/get-pip.py +# The URL from where the get-pip.py file gets downloaded. If a local +# get-pip.py mirror is available, PIP_GET_PIP_URL can be set to that +# mirror in local.conf to avoid download timeouts. +# Example: +# PIP_GET_PIP_URL="http://local-server/get-pip.py" +# +# Note that if get-pip.py already exists in $FILES this script will +# not re-download or check for a new version. For example, this is +# done by openstack-infra diskimage-builder elements as part of image +# preparation [1]. This prevents any network access, which can be +# unreliable in CI situations. +# [1] http://git.openstack.org/cgit/openstack-infra/project-config/tree/nodepool/elements/cache-devstack/source-repository-pip + +PIP_GET_PIP_URL=${PIP_GET_PIP_URL:-"https://bootstrap.pypa.io/get-pip.py"} LOCAL_PIP="$FILES/$(basename $PIP_GET_PIP_URL)" GetDistro From f3daa9548efd53e09d141f588342d4d94683b6c8 Mon Sep 17 00:00:00 2001 From: scottda Date: Tue, 6 Sep 2016 13:45:32 -0600 Subject: [PATCH 0150/1936] Add thin-provisioning-tools for Cinder Ubuntu's LVM packaging does not support thin provisioning by default: /usr/sbin/thin_check: execvp failed: No such file or directory This is fixed with install of thin-provisioning-tools. Change-Id: I31f572934ea94cae6e2aea27a2c731ee5bca68d3 Closes-Bug: #1615134 --- files/debs/cinder | 1 + 1 file changed, 1 insertion(+) diff --git a/files/debs/cinder b/files/debs/cinder index 3595e011da..c1b79fda47 100644 --- a/files/debs/cinder +++ b/files/debs/cinder @@ -3,3 +3,4 @@ open-iscsi open-iscsi-utils # Deprecated since quantal dist:precise qemu-utils tgt # NOPRIME +thin-provisioning-tools From 95469032656c442982730ea307644d47d7755a81 Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Thu, 8 Sep 2016 17:08:36 -0700 Subject: [PATCH 0151/1936] Set sensible default for LB_PHYSICAL_INT Configure the linux bridge physical interface to use the interface for the default route on the current host. In the future we should consider using a dangling interface so that we aren't affecting the host instances networking but this roughly matches what testing has been using in the past. Change-Id: I7859437f97e6cab929e90208fe56f7efd62dfe01 --- lib/neutron-legacy | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index b1b5230fdd..1b7c4db507 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -211,13 +211,15 @@ PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-public} # Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-br-ex} +default_route_dev=$(ip route | grep ^default | awk '{print $5}') +die_if_not_set $LINENO default_route_dev "Failure retrieving default route device" # With the linuxbridge agent, if using VLANs for tenant networks, # or if using flat or VLAN provider networks, set in ``localrc`` to # the name of the network interface to use for the physical # network. # # Example: ``LB_PHYSICAL_INTERFACE=eth1`` -LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-} +LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-$default_route_dev} # When Neutron tunnels are enabled it is needed to specify the # IP address of the end point in the local server. This IP is set From 9c69eacabaac45a7ea3574744975fac5cf1ac10d Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 12 Sep 2016 14:58:20 +1000 Subject: [PATCH 0152/1936] Don't add deb-* packaging repos to plugin list A couple of hundred of these were added with Ia02f4e1819ac47b12b4ce4381e04253eb26e9f70 and you can see in some of the proposals at I21fd2b3866efe66dd1f7173003c2521688aa7fd6 they're starting to match. Just ignore packaging repos as they're not really relevant for the purposes of plugin list. Change-Id: Iaf9e0c0fb672a70c3aee1bbcf587bb0d387e5945 --- tools/generate-devstack-plugins-list.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/generate-devstack-plugins-list.py b/tools/generate-devstack-plugins-list.py index bbad1bf502..56f12e7ab6 100644 --- a/tools/generate-devstack-plugins-list.py +++ b/tools/generate-devstack-plugins-list.py @@ -46,6 +46,9 @@ def is_in_openstack_namespace(proj): # Check if this project has a plugin file def has_devstack_plugin(proj): + # Don't link in the deb packaging repos + if "openstack/deb-" in proj: + return False r = requests.get("https://git.openstack.org/cgit/%s/plain/devstack/plugin.sh" % proj) return r.status_code == 200 From 6390d5ef8236028c8dc00c5514e7ceac60ffd627 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Mon, 12 Sep 2016 11:23:19 -0400 Subject: [PATCH 0153/1936] libvirt: install python-guestfs when ENABLE_FILE_INJECTION=True There is a bit of a weird history here, but the net is we're not installing python-guestfs when ENABLE_FILE_INJECTION is set, which it is in the gate-tempest-dsvm-neutron-full-ssh job, which makes file injection (personality) tests fail. The history: Commit 0ae942b41c6dcd0fe7353e7d68574194fb72a66d moved installing python-guestfs to the hypervisor-libvirt file and it was conditional on a flag to enable file injection and the backing distro. Commit a3c94468baa159840a47c34cf94d97d816208313 removed the ability to configure nova for file injection, which never made any Tempest tests fail because we didn't have a job that tested file injection with ssh, which is what gate-tempest-dsvm-neutron-full-ssh does. Commit 6d3670a65280d71529f8aad8ca5a0422abffebd0 added the ability back to enable file injection and the gate-tempest-dsvm-neutron-full-ssh job uses it, but missed added the condition back in from 0ae942b41 which installed the python-guestfs package. This change adds that back in. Change-Id: I1c1ef093b70007100646c086dc5724cd64751d00 Closes-Bug: #1622649 --- lib/nova_plugins/hypervisor-libvirt | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index 20dde8ecc5..b4eb3c12a2 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -100,6 +100,14 @@ function install_nova_hypervisor { yum_install libcgroup-tools fi fi + + if [[ "$ENABLE_FILE_INJECTION" == "True" ]] ; then + if is_ubuntu; then + install_package python-guestfs + elif is_fedora || is_suse; then + install_package python-libguestfs + fi + fi } # start_nova_hypervisor - Start any required external services From 14cb490d1e9837c237ade5c0a63de56faae5e89d Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Thu, 8 Sep 2016 13:07:59 -0400 Subject: [PATCH 0154/1936] nova: stop setting deprecated use_usb_tablet option The use_usb_tablet option is replaced by the pointer_model option. Depends-On: Id18b5503799922e4096bde296a9e7bb4f2a994aa Change-Id: Ic2a49f88df988c6404c1c72e9ee28a487e4f7908 --- lib/nova_plugins/hypervisor-libvirt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index b4eb3c12a2..167ab6f2e7 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -40,7 +40,8 @@ function configure_nova_hypervisor { configure_libvirt iniset $NOVA_CONF libvirt virt_type "$LIBVIRT_TYPE" iniset $NOVA_CONF libvirt cpu_mode "none" - iniset $NOVA_CONF libvirt use_usb_tablet "False" + # Do not enable USB tablet input devices to avoid QEMU CPU overhead. + iniset $NOVA_CONF DEFAULT pointer_model "ps2mouse" iniset $NOVA_CONF libvirt live_migration_uri "qemu+ssh://$STACK_USER@%s/system" iniset $NOVA_CONF DEFAULT default_ephemeral_format "ext4" iniset $NOVA_CONF DEFAULT compute_driver "libvirt.LibvirtDriver" From 57d390c5d7a52befc21d4d50ee676fb904b76d6d Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 14 Sep 2016 06:59:40 +0000 Subject: [PATCH 0155/1936] Updated from generate-devstack-plugins-list Change-Id: I21fd2b3866efe66dd1f7173003c2521688aa7fd6 --- doc/source/plugin-registry.rst | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index d6df1efa4f..a59a5464b9 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -1,16 +1,16 @@ -.. Note to patch submitters: +.. Note to patch submitters: - # ============================= # - # THIS FILE IS AUTOGENERATED ! # - # ============================= # + # ============================= # + # THIS FILE IS AUTOGENERATED ! # + # ============================= # - ** Plugins are found automatically and added to this list ** + ** Plugins are found automatically and added to this list ** - This file is created by a periodic proposal job. You should not - edit this file. + This file is created by a periodic proposal job. You should not + edit this file. - You should edit the files data/devstack-plugins-registry.footer - data/devstack-plugins-registry.header to modify this text. + You should edit the files data/devstack-plugins-registry.footer + data/devstack-plugins-registry.header to modify this text. ========================== DevStack Plugin Registry @@ -24,6 +24,7 @@ official OpenStack projects. ====================================== === Plugin Name URL ====================================== === +almanach `git://git.openstack.org/openstack/almanach `__ aodh `git://git.openstack.org/openstack/aodh `__ app-catalog-ui `git://git.openstack.org/openstack/app-catalog-ui `__ astara `git://git.openstack.org/openstack/astara `__ @@ -61,11 +62,12 @@ gce-api `git://git.openstack.org/openstack/gce-ap gnocchi `git://git.openstack.org/openstack/gnocchi `__ group-based-policy `git://git.openstack.org/openstack/group-based-policy `__ heat `git://git.openstack.org/openstack/heat `__ -higgins `git://git.openstack.org/openstack/higgins `__ horizon-mellanox `git://git.openstack.org/openstack/horizon-mellanox `__ ironic `git://git.openstack.org/openstack/ironic `__ ironic-inspector `git://git.openstack.org/openstack/ironic-inspector `__ ironic-staging-drivers `git://git.openstack.org/openstack/ironic-staging-drivers `__ +karbor `git://git.openstack.org/openstack/karbor `__ +karbor-dashboard `git://git.openstack.org/openstack/karbor-dashboard `__ kingbird `git://git.openstack.org/openstack/kingbird `__ kuryr-libnetwork `git://git.openstack.org/openstack/kuryr-libnetwork `__ magnum `git://git.openstack.org/openstack/magnum `__ @@ -110,6 +112,7 @@ neutron-lbaas-dashboard `git://git.openstack.org/openstack/neutro neutron-vpnaas `git://git.openstack.org/openstack/neutron-vpnaas `__ nova-docker `git://git.openstack.org/openstack/nova-docker `__ nova-lxd `git://git.openstack.org/openstack/nova-lxd `__ +nova-mksproxy `git://git.openstack.org/openstack/nova-mksproxy `__ nova-powervm `git://git.openstack.org/openstack/nova-powervm `__ octavia `git://git.openstack.org/openstack/octavia `__ osprofiler `git://git.openstack.org/openstack/osprofiler `__ @@ -122,8 +125,6 @@ scalpels `git://git.openstack.org/openstack/scalpe searchlight `git://git.openstack.org/openstack/searchlight `__ searchlight-ui `git://git.openstack.org/openstack/searchlight-ui `__ senlin `git://git.openstack.org/openstack/senlin `__ -smaug `git://git.openstack.org/openstack/smaug `__ -smaug-dashboard `git://git.openstack.org/openstack/smaug-dashboard `__ solum `git://git.openstack.org/openstack/solum `__ tacker `git://git.openstack.org/openstack/tacker `__ tap-as-a-service `git://git.openstack.org/openstack/tap-as-a-service `__ @@ -137,6 +138,8 @@ watcher `git://git.openstack.org/openstack/watche watcher-dashboard `git://git.openstack.org/openstack/watcher-dashboard `__ zaqar `git://git.openstack.org/openstack/zaqar `__ zaqar-ui `git://git.openstack.org/openstack/zaqar-ui `__ +zun `git://git.openstack.org/openstack/zun `__ +zun-ui `git://git.openstack.org/openstack/zun-ui `__ ====================================== === From cb3a216c2315041f1665dda2b81555883ba2dc6b Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 15 Sep 2016 06:52:36 +0000 Subject: [PATCH 0156/1936] Updated from generate-devstack-plugins-list Change-Id: Ib75ecf5c822f3778978177c793399afbd802bf70 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index a59a5464b9..e9b0480344 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -129,6 +129,7 @@ solum `git://git.openstack.org/openstack/solum tacker `git://git.openstack.org/openstack/tacker `__ tap-as-a-service `git://git.openstack.org/openstack/tap-as-a-service `__ tricircle `git://git.openstack.org/openstack/tricircle `__ +trio2o `git://git.openstack.org/openstack/trio2o `__ trove `git://git.openstack.org/openstack/trove `__ trove-dashboard `git://git.openstack.org/openstack/trove-dashboard `__ vitrage `git://git.openstack.org/openstack/vitrage `__ From 81d89cf3584a5edadbaa2514305cf5721b29cdff Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 16 Sep 2016 06:52:00 +0000 Subject: [PATCH 0157/1936] Updated from generate-devstack-plugins-list Change-Id: I20039918452f6aa430037ae986f7cd88bd220d76 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index e9b0480344..9d023bf5e2 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -103,6 +103,7 @@ networking-ovs-dpdk `git://git.openstack.org/openstack/networ networking-plumgrid `git://git.openstack.org/openstack/networking-plumgrid `__ networking-powervm `git://git.openstack.org/openstack/networking-powervm `__ networking-sfc `git://git.openstack.org/openstack/networking-sfc `__ +networking-vpp `git://git.openstack.org/openstack/networking-vpp `__ networking-vsphere `git://git.openstack.org/openstack/networking-vsphere `__ neutron `git://git.openstack.org/openstack/neutron `__ neutron-dynamic-routing `git://git.openstack.org/openstack/neutron-dynamic-routing `__ From 734f144f5d47c9b76562d5b5c705428be0963aec Mon Sep 17 00:00:00 2001 From: Henry Gessau Date: Sat, 17 Sep 2016 19:28:53 -0400 Subject: [PATCH 0158/1936] Neutron L3: account for all default routes Some systems may have more than one default route. Set up iptables NAT rules on all v4 default route devices. Accept RAs on all v6 default route devices. Closes-Bug: #1624773 Change-Id: If58509297497ea33c6c156f083a4394000bd0561 --- lib/neutron_plugins/services/l3 | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 2e96284357..09c08e3280 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -88,8 +88,10 @@ SUBNETPOOL_PREFIX_V6=${SUBNETPOOL_PREFIX_V6:-2001:db8:8000::/48} SUBNETPOOL_SIZE_V4=${SUBNETPOOL_SIZE_V4:-24} SUBNETPOOL_SIZE_V6=${SUBNETPOOL_SIZE_V6:-64} -default_route_dev=$(ip route | grep ^default | awk '{print $5}') -die_if_not_set $LINENO default_route_dev "Failure retrieving default route device" +default_v4_route_devs=$(ip -4 route | grep ^default | awk '{print $5}') +die_if_not_set $LINENO default_v4_route_devs "Failure retrieving default IPv4 route devices" + +default_v6_route_devs=$(ip -6 route | grep ^default | awk '{print $5}') function _determine_config_l3 { local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE" @@ -121,7 +123,9 @@ function _configure_neutron_l3_agent { _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6" fi else - sudo iptables -t nat -A POSTROUTING -o $default_route_dev -s $FLOATING_RANGE -j MASQUERADE + for d in $default_v4_route_devs; do + sudo iptables -t nat -A POSTROUTING -o $d -s $FLOATING_RANGE -j MASQUERADE + done fi } @@ -371,11 +375,13 @@ function _neutron_configure_router_v6 { # This logic is specific to using the l3-agent for layer 3 if is_service_enabled q-l3 || is_service_enabled neutron-l3; then - # Ensure IPv6 RAs are accepted on the interface with the default route. + # Ensure IPv6 RAs are accepted on interfaces with a default route. # This is needed for neutron-based devstack clouds to work in # IPv6-only clouds in the gate. Please do not remove this without # talking to folks in Infra. - sudo sysctl -w net.ipv6.conf.$default_route_dev.accept_ra=2 + for d in $default_v6_route_devs; do + sudo sysctl -w net.ipv6.conf.$d.accept_ra=2 + done # Ensure IPv6 forwarding is enabled on the host sudo sysctl -w net.ipv6.conf.all.forwarding=1 # Configure and enable public bridge From d2ef615d8f2edc9d8b535c94fca1a5afde3d0694 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Tue, 20 Sep 2016 04:12:11 -0700 Subject: [PATCH 0159/1936] Neutron: enable setting debug as True or False The flag ENABLE_DEBUG_LOG_LEVEL indicates if this should be set or not. This will now be supported in Neutron. Change-Id: I3afe0546b379873247fee1ef9f4cc2708a7b5713 --- lib/neutron | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/neutron b/lib/neutron index 7442efd2c1..3efa685caf 100644 --- a/lib/neutron +++ b/lib/neutron @@ -126,7 +126,7 @@ function configure_neutron_new { iniset $NEUTRON_CONF oslo_concurrency lock_path $NEUTRON_STATE_PATH/lock iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG - iniset $NEUTRON_CONF DEFAULT debug True + iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset_rpc_backend neutron $NEUTRON_CONF @@ -172,7 +172,7 @@ function configure_neutron_new { # Neutron OVS or LB agent if is_service_enabled neutron-agent; then iniset $NEUTRON_PLUGIN_CONF agent tunnel_types vxlan - iniset $NEUTRON_PLUGIN_CONF DEFAULT debug True + iniset $NEUTRON_PLUGIN_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL # Configure the neutron agent if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then @@ -188,7 +188,7 @@ function configure_neutron_new { if is_service_enabled neutron-dhcp; then cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $NEUTRON_DHCP_CONF - iniset $NEUTRON_DHCP_CONF DEFAULT debug True + iniset $NEUTRON_DHCP_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL # make it so we have working DNS from guests iniset $NEUTRON_DHCP_CONF DEFAULT dnsmasq_local_resolv True @@ -202,7 +202,7 @@ function configure_neutron_new { iniset $NEUTRON_L3_CONF DEFAULT interface_driver $NEUTRON_AGENT iniset $NEUTRON_CONF DEFAULT service_plugins router iniset $NEUTRON_L3_CONF agent root_helper_daemon "$NEUTRON_ROOTWRAP_DAEMON_CMD" - iniset $NEUTRON_L3_CONF DEFAULT debug True + iniset $NEUTRON_L3_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL neutron_plugin_configure_l3_agent $NEUTRON_L3_CONF fi @@ -210,7 +210,7 @@ function configure_neutron_new { if is_service_enabled neutron-metadata-agent; then cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $NEUTRON_META_CONF - iniset $NEUTRON_META_CONF DEFAULT debug True + iniset $NEUTRON_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $NEUTRON_META_CONF DEFAULT nova_metadata_ip $SERVICE_HOST iniset $NEUTRON_META_CONF agent root_helper_daemon "$NEUTRON_ROOTWRAP_DAEMON_CMD" From 4b49e409f853104dae021dfca1a9342ec9ac4709 Mon Sep 17 00:00:00 2001 From: Gregory Haynes Date: Wed, 31 Aug 2016 18:19:51 -0700 Subject: [PATCH 0160/1936] Use apache for tls-proxy ssl termination Stud is now abandonware (see https://github.com/bumptech/stud) and is not packaged in xenial. Lets use Apache for SSL termination since its there already. Change-Id: Ifcba410f5969521e8b3d30f02795541c1661f83a --- files/debs/tls-proxy | 2 +- functions-common | 10 ++++++ lib/apache | 31 +++++++++++++++--- lib/cinder | 4 +-- lib/glance | 4 +-- lib/keystone | 4 +-- lib/neutron | 2 +- lib/neutron-legacy | 2 +- lib/nova | 2 +- lib/swift | 2 +- lib/tls | 78 +++++++++++++++++++++++++++++++------------- stack.sh | 1 + 12 files changed, 103 insertions(+), 39 deletions(-) diff --git a/files/debs/tls-proxy b/files/debs/tls-proxy index dce9c07d3f..5bd8e213a2 100644 --- a/files/debs/tls-proxy +++ b/files/debs/tls-proxy @@ -1 +1 @@ -stud +apache2 diff --git a/functions-common b/functions-common index 1a4a8f8eea..4716567be3 100644 --- a/functions-common +++ b/functions-common @@ -2304,6 +2304,16 @@ function stop_service { fi } +# Service wrapper to stop services +# reload_service service-name +function reload_service { + if [ -x /bin/systemctl ]; then + sudo /bin/systemctl reload $1 + else + sudo service $1 reload + fi +} + # Test with a finite retry loop. # function test_with_retry { diff --git a/lib/apache b/lib/apache index 2c84c7a481..740f58835e 100644 --- a/lib/apache +++ b/lib/apache @@ -42,24 +42,40 @@ fi # Functions # --------- + +# Enable apache mod and restart apache if it isn't already enabled. +function enable_apache_mod { + local mod=$1 + # Apache installation, because we mark it NOPRIME + if is_ubuntu || is_suse ; then + if ! a2query -m $mod ; then + sudo a2enmod $mod + restart_apache_server + fi + elif is_fedora; then + # pass + true + else + exit_distro_not_supported "apache enable mod" + fi +} + # install_apache_wsgi() - Install Apache server and wsgi module function install_apache_wsgi { # Apache installation, because we mark it NOPRIME if is_ubuntu; then # Install apache2, which is NOPRIME'd install_package apache2 libapache2-mod-wsgi - # WSGI isn't enabled by default, enable it - sudo a2enmod wsgi elif is_fedora; then sudo rm -f /etc/httpd/conf.d/000-* install_package httpd mod_wsgi elif is_suse; then install_package apache2 apache2-mod_wsgi - # WSGI isn't enabled by default, enable it - sudo a2enmod wsgi else - exit_distro_not_supported "apache installation" + exit_distro_not_supported "apache wsgi installation" fi + # WSGI isn't enabled by default, enable it + enable_apache_mod wsgi # ensure mod_version enabled for . This is # built-in statically on anything recent, but precise (2.2) @@ -192,6 +208,11 @@ function restart_apache_server { time_stop "restart_apache_server" } +# reload_apache_server +function reload_apache_server { + reload_service $APACHE_NAME +} + # Restore xtrace $_XTRACE_LIB_APACHE diff --git a/lib/cinder b/lib/cinder index a87f395c8e..0fe950b6c2 100644 --- a/lib/cinder +++ b/lib/cinder @@ -305,8 +305,8 @@ function configure_cinder { if is_service_enabled tls-proxy; then # Set the service port for a proxy to take the original iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT - iniset $CINDER_CONF DEFAULT public_endpoint $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT + iniset $CINDER_CONF DEFAULT osapi_volume_base_URL $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT fi if [ "$SYSLOG" != "False" ]; then @@ -534,7 +534,7 @@ function start_cinder { # Start proxies if enabled if is_service_enabled c-api && is_service_enabled tls-proxy; then - start_tls_proxy '*' $CINDER_SERVICE_PORT $CINDER_SERVICE_HOST $CINDER_SERVICE_PORT_INT & + start_tls_proxy cinder '*' $CINDER_SERVICE_PORT $CINDER_SERVICE_HOST $CINDER_SERVICE_PORT_INT fi } diff --git a/lib/glance b/lib/glance index 8d95aad73f..a31e564104 100644 --- a/lib/glance +++ b/lib/glance @@ -383,8 +383,8 @@ function install_glance { function start_glance { local service_protocol=$GLANCE_SERVICE_PROTOCOL if is_service_enabled tls-proxy; then - start_tls_proxy '*' $GLANCE_SERVICE_PORT $GLANCE_SERVICE_HOST $GLANCE_SERVICE_PORT_INT & - start_tls_proxy '*' $GLANCE_REGISTRY_PORT $GLANCE_SERVICE_HOST $GLANCE_REGISTRY_PORT_INT & + start_tls_proxy glance-service '*' $GLANCE_SERVICE_PORT $GLANCE_SERVICE_HOST $GLANCE_SERVICE_PORT_INT + start_tls_proxy glance-registry '*' $GLANCE_REGISTRY_PORT $GLANCE_SERVICE_HOST $GLANCE_REGISTRY_PORT_INT fi run_process g-reg "$GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf" diff --git a/lib/keystone b/lib/keystone index 6198e43b58..f52cc3fb4b 100644 --- a/lib/keystone +++ b/lib/keystone @@ -609,8 +609,8 @@ function start_keystone { # Start proxies if enabled if is_service_enabled tls-proxy; then - start_tls_proxy '*' $KEYSTONE_SERVICE_PORT $KEYSTONE_SERVICE_HOST $KEYSTONE_SERVICE_PORT_INT & - start_tls_proxy '*' $KEYSTONE_AUTH_PORT $KEYSTONE_AUTH_HOST $KEYSTONE_AUTH_PORT_INT & + start_tls_proxy keystone-service '*' $KEYSTONE_SERVICE_PORT $KEYSTONE_SERVICE_HOST $KEYSTONE_SERVICE_PORT_INT + start_tls_proxy keystone-auth '*' $KEYSTONE_AUTH_PORT $KEYSTONE_AUTH_HOST $KEYSTONE_AUTH_PORT_INT fi # (re)start memcached to make sure we have a clean memcache. diff --git a/lib/neutron b/lib/neutron index c1552e3d06..e37701879d 100644 --- a/lib/neutron +++ b/lib/neutron @@ -409,7 +409,7 @@ function start_neutron_api { # Start proxy if enabled if is_service_enabled tls-proxy; then - start_tls_proxy '*' $NEUTRON_SERVICE_PORT $NEUTRON_SERVICE_HOST $NEUTRON_SERVICE_PORT_INT & + start_tls_proxy neutron '*' $NEUTRON_SERVICE_PORT $NEUTRON_SERVICE_HOST $NEUTRON_SERVICE_PORT_INT fi } diff --git a/lib/neutron-legacy b/lib/neutron-legacy index b1b5230fdd..18100994f3 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -460,7 +460,7 @@ function start_neutron_service_and_check { # Start proxy if enabled if is_service_enabled tls-proxy; then - start_tls_proxy '*' $Q_PORT $Q_HOST $Q_PORT_INT & + start_tls_proxy neutron '*' $Q_PORT $Q_HOST $Q_PORT_INT fi } diff --git a/lib/nova b/lib/nova index 235b533d77..8970a7c693 100644 --- a/lib/nova +++ b/lib/nova @@ -800,7 +800,7 @@ function start_nova_api { # Start proxies if enabled if is_service_enabled tls-proxy; then - start_tls_proxy '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT & + start_tls_proxy nova '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT fi export PATH=$old_path diff --git a/lib/swift b/lib/swift index 0c74411a9c..f9ea028cea 100644 --- a/lib/swift +++ b/lib/swift @@ -806,7 +806,7 @@ function start_swift { done if is_service_enabled tls-proxy; then local proxy_port=${SWIFT_DEFAULT_BIND_PORT} - start_tls_proxy '*' $proxy_port $SERVICE_HOST $SWIFT_DEFAULT_BIND_PORT_INT & + start_tls_proxy swift '*' $proxy_port $SERVICE_HOST $SWIFT_DEFAULT_BIND_PORT_INT fi run_process s-proxy "$SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v" if [[ ${SWIFT_REPLICAS} == 1 ]]; then diff --git a/lib/tls b/lib/tls index ca57ed44e0..2c4e18d388 100644 --- a/lib/tls +++ b/lib/tls @@ -16,7 +16,6 @@ # # - configure_CA # - init_CA -# - cleanup_CA # - configure_proxy # - start_tls_proxy @@ -221,19 +220,6 @@ function init_CA { fi } -# Clean up the CA files -# cleanup_CA -function cleanup_CA { - if is_fedora; then - sudo rm -f /usr/share/pki/ca-trust-source/anchors/devstack-chain.pem - sudo update-ca-trust - elif is_ubuntu; then - sudo rm -f /usr/local/share/ca-certificates/devstack-int.crt - sudo rm -f /usr/local/share/ca-certificates/devstack-root.crt - sudo update-ca-certificates - fi -} - # Create an initial server cert # init_cert function init_cert { @@ -455,26 +441,72 @@ function enable_mod_ssl { # Starts the TLS proxy for the given IP/ports # start_tls_proxy front-host front-port back-host back-port function start_tls_proxy { - local f_host=$1 - local f_port=$2 - local b_host=$3 - local b_port=$4 - - stud $STUD_PROTO -f $f_host,$f_port -b $b_host,$b_port $DEVSTACK_CERT 2>/dev/null + local b_service="$1-tls-proxy" + local f_host=$2 + local f_port=$3 + local b_host=$4 + local b_port=$5 + + local config_file + config_file=$(apache_site_config_for $b_service) + local listen_string + # Default apache configs on ubuntu and centos listen on 80 and 443 + # newer apache seems fine with duplicate listen directive but older + # apache does not so special case 80 and 443. + if [[ "$f_port" == "80" ]] || [[ "$f_port" == "443" ]]; then + listen_string="" + elif [[ "$f_host" == '*' ]] ; then + listen_string="Listen $f_port" + else + listen_string="Listen $f_host:$f_port" + fi + sudo bash -c "cat >$config_file" << EOF +$listen_string + + + SSLEngine On + SSLCertificateFile $DEVSTACK_CERT + + + ProxyPass http://$b_host:$b_port/ retry=5 nocanon + ProxyPassReverse http://$b_host:$b_port/ + + +EOF + for mod in ssl proxy proxy_http; do + enable_apache_mod $mod + done + enable_apache_site $b_service + # Only a reload is required to pull in new vhosts + # Note that a restart reliably fails on centos7 and trusty + # because apache can't open port 80 because the old apache + # still has it open. Using reload fixes trusty but centos7 + # still doesn't work. + reload_apache_server } # Cleanup Functions # ================= -# Stops all stud processes. This should be done only after all services +# Stops the apache service. This should be done only after all services # using tls configuration are down. function stop_tls_proxy { - killall stud + stop_apache_server } -# Remove CA along with configuration, as well as the local server certificate +# Clean up the CA files +# cleanup_CA function cleanup_CA { + if is_fedora; then + sudo rm -f /usr/share/pki/ca-trust-source/anchors/devstack-chain.pem + sudo update-ca-trust + elif is_ubuntu; then + sudo rm -f /usr/local/share/ca-certificates/devstack-int.crt + sudo rm -f /usr/local/share/ca-certificates/devstack-root.crt + sudo update-ca-certificates + fi + rm -rf "$DATA_DIR/CA" "$DEVSTACK_CERT" } diff --git a/stack.sh b/stack.sh index 09466a6894..119ca8561b 100755 --- a/stack.sh +++ b/stack.sh @@ -1052,6 +1052,7 @@ EOF if is_service_enabled tls-proxy; then echo "export OS_CACERT=$INT_CA_DIR/ca-chain.pem" >> $TOP_DIR/userrc_early + start_tls_proxy http-services '*' 443 $SERVICE_HOST 80 fi source $TOP_DIR/userrc_early From 4c813ac0fb64c7b4eeec7f67692ccfd7affd2153 Mon Sep 17 00:00:00 2001 From: Luigi Toscano Date: Wed, 21 Sep 2016 19:39:36 +0200 Subject: [PATCH 0161/1936] Fix the logic of if used with is_service_enabled The value to be evaluated is the returned value, not the output of the command. Change-Id: I22d7c967e911bcfee6b1910f666dbbc647c00085 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index b491bf82ef..048f3464b3 100644 --- a/lib/tempest +++ b/lib/tempest @@ -317,7 +317,7 @@ function configure_tempest { # set the equiv validation option here as well to ensure they are # in sync. They shouldn't be separate options. iniset $TEMPEST_CONFIG validation connect_method $ssh_connect_method - if [[ ! $(is_service_enabled n-cell) && ! $(is_service_enabled neutron) ]]; then + if ! is_service_enabled n-cell && ! is_service_enabled neutron; then iniset $TEMPEST_CONFIG compute fixed_network_name $PRIVATE_NETWORK_NAME fi From a5d965a3d7c558239d770428e1dd815943a8d887 Mon Sep 17 00:00:00 2001 From: Patrick East Date: Wed, 3 Aug 2016 14:44:53 -0700 Subject: [PATCH 0162/1936] Use userrc_early for all nodes Instead of only using the userrc_early when keystone is an enabled service we will do it on all runs of stack.sh. This way services can be split up more across devstack nodes, and you can do configuration requiring credentials on nodes that don't install keystone. Change-Id: I74574ae9f45a74bcbcc8e3149228ecb795ab4fb7 --- stack.sh | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/stack.sh b/stack.sh index 6a5a2a318f..8f548997d4 100755 --- a/stack.sh +++ b/stack.sh @@ -1021,21 +1021,12 @@ start_dstat # Keystone # -------- -if is_service_enabled keystone; then - echo_summary "Starting Keystone" - - if [ "$KEYSTONE_AUTH_HOST" == "$SERVICE_HOST" ]; then - init_keystone - start_keystone - bootstrap_keystone - fi - - # Rather than just export these, we write them out to a - # intermediate userrc file that can also be used to debug if - # something goes wrong between here and running - # tools/create_userrc.sh (this script relies on services other - # than keystone being available, so we can't call it right now) - cat > $TOP_DIR/userrc_early < $TOP_DIR/userrc_early <> $TOP_DIR/userrc_early - start_tls_proxy http-services '*' 443 $SERVICE_HOST 80 - fi +if is_service_enabled tls-proxy; then + echo "export OS_CACERT=$INT_CA_DIR/ca-chain.pem" >> $TOP_DIR/userrc_early + start_tls_proxy http-services '*' 443 $SERVICE_HOST 80 +fi + +source $TOP_DIR/userrc_early + +if is_service_enabled keystone; then + echo_summary "Starting Keystone" - source $TOP_DIR/userrc_early + if [ "$KEYSTONE_AUTH_HOST" == "$SERVICE_HOST" ]; then + init_keystone + start_keystone + bootstrap_keystone + fi create_keystone_accounts create_nova_accounts From a80d4097a973acaff9c7718334487a1182ba7dc6 Mon Sep 17 00:00:00 2001 From: Kevin Zhao Date: Thu, 11 Aug 2016 10:41:34 +0000 Subject: [PATCH 0163/1936] Modify the default Qemu packages name for AArch64. In Debian jessie and later release,there is no packages called "qemu-kvm" for AArch64. Also modify the libguestfs0 packages for AArch64 Closes-bug: #1612182 Change-Id: I5eb6bd137896eb9abfc4f8dbb41b41105e4820cd Signed-off-by: Kevin Zhao --- lib/nova_plugins/functions-libvirt | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 4e5a748e45..790227beb5 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -23,12 +23,7 @@ DEBUG_LIBVIRT=$(trueorfalse True DEBUG_LIBVIRT) # Installs required distro-specific libvirt packages. function install_libvirt { if is_ubuntu; then - if is_arch "aarch64" && [[ ${DISTRO} == "trusty" ]]; then - install_package qemu-system - else - install_package qemu-kvm - install_package libguestfs0 - fi + install_package qemu-system install_package libvirt-bin libvirt-dev pip_install_gr libvirt-python if [[ "$EBTABLES_RACE_FIX" == "True" ]]; then From a4705403aa6a4ba0f5ea8c67bff89a67d0be2070 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 23 Sep 2016 15:08:48 +1000 Subject: [PATCH 0164/1936] yum_install: fix awk return code TIL: Similarly, all the END rules are merged, and executed when all the input is exhausted (or when an exit statement is executed). i.e. matching YUM_FAILED calls "exit", which falls through to the END rules which calls "exit result" ... which is zero. i.e. if the return code is 1 then we actually hide that and return with zero. This is rather annoying because errors that should halt to alert us of a package install failure pass through, only for you to have to debug much later on seemingly unrelated problems. This always sets "result" and thus should be returning the right thing. I've updated the documentation to hopefully make it clearer what's going on. Change-Id: Ia15b7dc55efb8d3e3e945241b67a468b8a914672 --- functions-common | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/functions-common b/functions-common index 1a4a8f8eea..120aa970d9 100644 --- a/functions-common +++ b/functions-common @@ -1346,20 +1346,26 @@ function yum_install { time_start "yum_install" - # - We run with LC_ALL=C so string matching *should* be OK - # - Exit 1 if the failure might get better with a retry. - # - Exit 2 if it is fatal. - parse_yum_result=' \ - BEGIN { result=0 } \ - /^YUM_FAILED/ { exit $2 } \ - /^No package/ { result=2 } \ - /^Failed:/ { result=2 } \ - //{ print } \ + # This is a bit tricky, because yum -y assumes missing or failed + # packages are OK (see [1]). We want devstack to stop if we are + # installing missing packages. + # + # Thus we manually match on the output (stack.sh runs in a fixed + # locale, so lang shouldn't change). + # + # If yum returns !0, we echo the result as "YUM_FAILED" and return + # that from the awk (we're subverting -e with this trick). + # Otherwise we use awk to look for failure strings and return "2" + # to indicate a terminal failure. + # + # [1] https://bugzilla.redhat.com/show_bug.cgi?id=965567 + parse_yum_result=' \ + BEGIN { result=0 } \ + /^YUM_FAILED/ { result=$2 } \ + /^No package/ { result=2 } \ + /^Failed:/ { result=2 } \ + //{ print } \ END { exit result }' - - # The manual check for missing packages is because yum -y assumes - # missing or failed packages are OK. - # See https://bugzilla.redhat.com/show_bug.cgi?id=965567 (sudo_with_proxies "${YUM:-yum}" install -y "$@" 2>&1 || echo YUM_FAILED $?) \ | awk "$parse_yum_result" && result=$? || result=$? From 499a9e39c1bfeb39928b04a9f1cc84a91521dcad Mon Sep 17 00:00:00 2001 From: John Hua Date: Mon, 26 Sep 2016 11:43:49 +0800 Subject: [PATCH 0165/1936] XenAPI: Remove legacy tty image UPLOAD_LEGACY_TTY was for console access, but it's no longer used. Change-Id: I294c8716be2e6ee9f53108d4eb41faf99e975538 --- stack.sh | 5 ----- 1 file changed, 5 deletions(-) diff --git a/stack.sh b/stack.sh index 6a5a2a318f..9aa770f2b9 100755 --- a/stack.sh +++ b/stack.sh @@ -1221,11 +1221,6 @@ if is_service_enabled g-reg; then echo_summary "Uploading images" - # Option to upload legacy ami-tty, which works with xenserver - if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then - IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}https://github.com/downloads/citrix-openstack/warehouse/tty.tgz" - fi - for image_url in ${IMAGE_URLS//,/ }; do upload_image $image_url done From 323b726783d6d4ef24a0c9f0d7c77b9e8b152c61 Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Fri, 23 Sep 2016 13:33:40 -0700 Subject: [PATCH 0166/1936] Don't make root CA if it exists To support multinode testing where we just copy the CA to all the instances don't remake the CA if it already exists. The end result is that you can trusty a single chain and all your clients will be happy regardless of which host they are talking to. Change-Id: I90892e6828a59fa37af717361a2f1eed15a87ae4 --- lib/tls | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/lib/tls b/lib/tls index 2c4e18d388..2443d7d31d 100644 --- a/lib/tls +++ b/lib/tls @@ -322,15 +322,17 @@ function make_root_CA { create_CA_base $ca_dir create_CA_config $ca_dir 'Root CA' - # Create a self-signed certificate valid for 5 years - $OPENSSL req -config $ca_dir/ca.conf \ - -x509 \ - -nodes \ - -newkey rsa \ - -days 21360 \ - -keyout $ca_dir/private/cacert.key \ - -out $ca_dir/cacert.pem \ - -outform PEM + if [ ! -r "$ca_dir/cacert.pem" ]; then + # Create a self-signed certificate valid for 5 years + $OPENSSL req -config $ca_dir/ca.conf \ + -x509 \ + -nodes \ + -newkey rsa \ + -days 21360 \ + -keyout $ca_dir/private/cacert.key \ + -out $ca_dir/cacert.pem \ + -outform PEM + fi } # If a non-system python-requests is installed then it will use the @@ -507,7 +509,7 @@ function cleanup_CA { sudo update-ca-certificates fi - rm -rf "$DATA_DIR/CA" "$DEVSTACK_CERT" + rm -rf "$INT_CA_DIR" "$ROOT_CA_DIR" "$DEVSTACK_CERT" } # Tell emacs to use shell-script-mode From 890342ed878f4a8f556ae733b6bd6c872308a937 Mon Sep 17 00:00:00 2001 From: Jens Rosenboom Date: Tue, 13 Sep 2016 22:41:41 +0200 Subject: [PATCH 0167/1936] Work around issue in glance_store swift driver With [1] glance_store introduced default settings for user_domain_id and project_domain_id. Sadly since these are always passed to the keystone client, they override any settings to user_domain_name and project_domain_name that are made in the config, leading to authentication failures. So as a workaround until [2] is fixed, we explicitly place the corresponding domain_ids into the config. [1] https://review.openstack.org/297665 [2] https://bugs.launchpad.net/tempest/+bug/1620999 Change-Id: Ica81a1a176614392291f2db4cc6398ed30663aed --- lib/glance | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/lib/glance b/lib/glance index 8d95aad73f..5274714a6c 100644 --- a/lib/glance +++ b/lib/glance @@ -187,8 +187,6 @@ function configure_glance { iniset $GLANCE_SWIFT_STORE_CONF ref1 key $SERVICE_PASSWORD iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $KEYSTONE_SERVICE_URI/v3 - iniset $GLANCE_SWIFT_STORE_CONF ref1 user_domain_name $SERVICE_DOMAIN_NAME - iniset $GLANCE_SWIFT_STORE_CONF ref1 project_domain_name $SERVICE_DOMAIN_NAME iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_version 3 # commenting is not strictly necessary but it's confusing to have bad values in conf @@ -312,6 +310,11 @@ function create_glance_accounts { "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" \ "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" \ "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" + + # Note(frickler): Crude workaround for https://bugs.launchpad.net/glance-store/+bug/1620999 + service_domain_id=$(get_or_create_domain $SERVICE_DOMAIN_NAME) + iniset $GLANCE_SWIFT_STORE_CONF ref1 project_domain_id $service_domain_id + iniset $GLANCE_SWIFT_STORE_CONF ref1 user_domain_id $service_domain_id fi # Add glance-glare service and endpoints From 04e69de6c513e1cddaaa74eb2ff428a5db4d223b Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Wed, 27 Jul 2016 08:05:05 +1000 Subject: [PATCH 0168/1936] Mount identity admin script at /identity_admin The /identity_admin endpoint is the port 80/443 equivalent of the service that typically runs on port 35357. In v2 some operations must be performed on the admin endpoint whereas on v3 the services on 5000 and 35357 are exactly the same. This would be why the service was mounted at /identity_v2_admin however that is misleading because both the v2 and v3 services are present on that endpoint. This is particularly confusing because we set this as the OS_AUTH_URL endpoint and it makes it seem like we are doing v2 authentication when we are not. Change-Id: If73735026079fb19ca5bd44b3a4dc1f507b5c99d --- files/apache-keystone.template | 4 ++-- lib/keystone | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/files/apache-keystone.template b/files/apache-keystone.template index 8a4b0f0c43..428544f25c 100644 --- a/files/apache-keystone.template +++ b/files/apache-keystone.template @@ -44,8 +44,8 @@ Alias /identity %KEYSTONE_BIN%/keystone-wsgi-public WSGIPassAuthorization On -Alias /identity_v2_admin %KEYSTONE_BIN%/keystone-wsgi-admin - +Alias /identity_admin %KEYSTONE_BIN%/keystone-wsgi-admin + SetHandler wsgi-script Options +ExecCGI diff --git a/lib/keystone b/lib/keystone index 851db042ba..23f09a5499 100644 --- a/lib/keystone +++ b/lib/keystone @@ -124,7 +124,7 @@ fi # complete URIs if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then # If running in Apache, use path access rather than port. - KEYSTONE_AUTH_URI=${KEYSTONE_AUTH_PROTOCOL}://${KEYSTONE_AUTH_HOST}/identity_v2_admin + KEYSTONE_AUTH_URI=${KEYSTONE_AUTH_PROTOCOL}://${KEYSTONE_AUTH_HOST}/identity_admin KEYSTONE_SERVICE_URI=${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}/identity else KEYSTONE_AUTH_URI=${KEYSTONE_AUTH_PROTOCOL}://${KEYSTONE_AUTH_HOST}:${KEYSTONE_AUTH_PORT} From 69e3c0aac99981f17c76c22111e5c397824b8428 Mon Sep 17 00:00:00 2001 From: Ian Cordasco Date: Mon, 26 Sep 2016 12:53:14 -0500 Subject: [PATCH 0169/1936] Update certificate creation for urllib3 urllib3 1.18 was released today and contains new more correct hostname matching that takes into account the ipAddress portion of a certificate and disallows matching an IP Address against a DNS hostname. Change-Id: I37d247b68911dc85f55adec6a7952ed321c1b1d8 --- functions-common | 12 ++++++++++++ lib/tls | 5 ++++- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/functions-common b/functions-common index 4716567be3..9544c8102f 100644 --- a/functions-common +++ b/functions-common @@ -2207,6 +2207,18 @@ function cidr2netmask { echo ${1-0}.${2-0}.${3-0}.${4-0} } +# Check if this is a valid ipv4 address string +function is_ipv4_address { + local address=$1 + local regex='([0-9]{1,3}.){3}[0-9]{1,3}' + # TODO(clarkb) make this more robust + if [[ "$address" =~ $regex ]] ; then + return 0 + else + return 1 + fi +} + # Gracefully cp only if source file/dir exists # cp_it source destination function cp_it { diff --git a/lib/tls b/lib/tls index 2c4e18d388..78d476fbf2 100644 --- a/lib/tls +++ b/lib/tls @@ -226,7 +226,7 @@ function init_cert { if [[ ! -r $DEVSTACK_CERT ]]; then if [[ -n "$TLS_IP" ]]; then # Lie to let incomplete match routines work - TLS_IP="DNS:$TLS_IP" + TLS_IP="DNS:$TLS_IP,IP:$TLS_IP" fi make_cert $INT_CA_DIR $DEVSTACK_CERT_NAME $DEVSTACK_HOSTNAME "$TLS_IP" @@ -249,6 +249,9 @@ function make_cert { else alt_names="$alt_names,DNS:$SERVICE_HOST" fi + if is_ipv4_address "$SERVICE_HOST" ; then + alt_names="$alt_names,IP:$SERVICE_HOST" + fi fi # Only generate the certificate if it doesn't exist yet on the disk From 4a55d2a66092e351726251bb21a1d82b3501bdcd Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Wed, 24 Aug 2016 15:30:09 +0900 Subject: [PATCH 0170/1936] lib/neutron: Rename core plugin variables NEUTRON_PLUGIN -> NEUTRON_CORE_PLUGIN NEUTRON_PLUGIN_* -> NEUTRON_CORE_PLUGIN_* Change-Id: I1d93d8bd1e6e3bbca0e56a2da0684ab3f3fbb525 --- lib/neutron | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/lib/neutron b/lib/neutron index ab84f7eefe..5370392796 100644 --- a/lib/neutron +++ b/lib/neutron @@ -47,10 +47,10 @@ NEUTRON_STATE_PATH=${NEUTRON_STATE_PATH:=$DATA_DIR/neutron} NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron} # By default, use the ML2 plugin -NEUTRON_PLUGIN=${NEUTRON_PLUGIN:-ml2} -NEUTRON_PLUGIN_CONF_FILENAME=${NEUTRON_PLUGIN_CONF_FILENAME:-ml2_conf.ini} -NEUTRON_PLUGIN_CONF_PATH=$NEUTRON_CONF_DIR/plugins/$NEUTRON_PLUGIN -NEUTRON_PLUGIN_CONF=$NEUTRON_PLUGIN_CONF_PATH/$NEUTRON_PLUGIN_CONF_FILENAME +NEUTRON_CORE_PLUGIN=${NEUTRON_CORE_PLUGIN:-ml2} +NEUTRON_CORE_PLUGIN_CONF_FILENAME=${NEUTRON_CORE_PLUGIN_CONF_FILENAME:-ml2_conf.ini} +NEUTRON_CORE_PLUGIN_CONF_PATH=$NEUTRON_CONF_DIR/plugins/$NEUTRON_CORE_PLUGIN +NEUTRON_CORE_PLUGIN_CONF=$NEUTRON_CORE_PLUGIN_CONF_PATH/$NEUTRON_CORE_PLUGIN_CONF_FILENAME NEUTRON_AGENT_BINARY=${NEUTRON_AGENT_BINARY:-neutron-$NEUTRON_AGENT-agent} NEUTRON_L3_BINARY=${NEUTRON_L3_BINARY:-neutron-l3-agent} @@ -117,9 +117,9 @@ function configure_neutron_new { configure_neutron_rootwrap - mkdir -p $NEUTRON_PLUGIN_CONF_PATH + mkdir -p $NEUTRON_CORE_PLUGIN_CONF_PATH - cp $NEUTRON_DIR/etc/neutron/plugins/$NEUTRON_PLUGIN/$NEUTRON_PLUGIN_CONF_FILENAME.sample $NEUTRON_PLUGIN_CONF + cp $NEUTRON_DIR/etc/neutron/plugins/$NEUTRON_CORE_PLUGIN/$NEUTRON_CORE_PLUGIN_CONF_FILENAME.sample $NEUTRON_CORE_PLUGIN_CONF iniset $NEUTRON_CONF database connection `database_connection_url neutron` iniset $NEUTRON_CONF DEFAULT state_path $NEUTRON_STATE_PATH @@ -139,7 +139,7 @@ function configure_neutron_new { cp $NEUTRON_DIR/etc/api-paste.ini $NEUTRON_CONF_DIR/api-paste.ini - iniset $NEUTRON_CONF DEFAULT core_plugin $NEUTRON_PLUGIN + iniset $NEUTRON_CONF DEFAULT core_plugin $NEUTRON_CORE_PLUGIN iniset $NEUTRON_CONF DEFAULT policy_file $policy_file iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips True @@ -162,25 +162,25 @@ function configure_neutron_new { # Configure VXLAN # TODO(sc68cal) not hardcode? - iniset $NEUTRON_PLUGIN_CONF ml2 tenant_network_types vxlan - iniset $NEUTRON_PLUGIN_CONF ml2 type_drivers vxlan - iniset $NEUTRON_PLUGIN_CONF ml2 mechanism_drivers openvswitch,linuxbridge - iniset $NEUTRON_PLUGIN_CONF ml2_type_vxlan vni_ranges 1001:2000 - iniset $NEUTRON_PLUGIN_CONF ml2 extension_drivers port_security + iniset $NEUTRON_CORE_PLUGIN_CONF ml2 tenant_network_types vxlan + iniset $NEUTRON_CORE_PLUGIN_CONF ml2 type_drivers vxlan + iniset $NEUTRON_CORE_PLUGIN_CONF ml2 mechanism_drivers openvswitch,linuxbridge + iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vxlan vni_ranges 1001:2000 + iniset $NEUTRON_CORE_PLUGIN_CONF ml2 extension_drivers port_security fi # Neutron OVS or LB agent if is_service_enabled neutron-agent; then - iniset $NEUTRON_PLUGIN_CONF agent tunnel_types vxlan - iniset $NEUTRON_PLUGIN_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $NEUTRON_CORE_PLUGIN_CONF agent tunnel_types vxlan + iniset $NEUTRON_CORE_PLUGIN_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL # Configure the neutron agent if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then - iniset $NEUTRON_PLUGIN_CONF securitygroup iptables - iniset $NEUTRON_PLUGIN_CONF vxlan local_ip $HOST_IP + iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup iptables + iniset $NEUTRON_CORE_PLUGIN_CONF vxlan local_ip $HOST_IP else - iniset $NEUTRON_PLUGIN_CONF securitygroup iptables_hybrid - iniset $NEUTRON_PLUGIN_CONF ovs local_ip $HOST_IP + iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup iptables_hybrid + iniset $NEUTRON_CORE_PLUGIN_CONF ovs local_ip $HOST_IP fi fi @@ -397,7 +397,7 @@ function start_neutron_api { # Start the Neutron service # TODO(sc68cal) Stop hard coding this - run_process neutron-api "$NEUTRON_BIN_DIR/neutron-server --config-file $NEUTRON_CONF --config-file $NEUTRON_PLUGIN_CONF" + run_process neutron-api "$NEUTRON_BIN_DIR/neutron-server --config-file $NEUTRON_CONF --config-file $NEUTRON_CORE_PLUGIN_CONF" if is_ssl_enabled_service "neutron"; then ssl_ca="--ca-certificate=${SSL_BUNDLE_FILE}" @@ -475,9 +475,9 @@ function _set_config_files { NEUTRON_CONFIG_ARG+=" --config-file $NEUTRON_CONF" - #TODO(sc68cal) OVS and LB agent uses settings in NEUTRON_PLUGIN_CONF (ml2_conf.ini) but others may not + #TODO(sc68cal) OVS and LB agent uses settings in NEUTRON_CORE_PLUGIN_CONF (ml2_conf.ini) but others may not if is_service_enabled neutron-agent; then - NEUTRON_CONFIG_ARG+=" --config-file $NEUTRON_PLUGIN_CONF" + NEUTRON_CONFIG_ARG+=" --config-file $NEUTRON_CORE_PLUGIN_CONF" fi if is_service_enabled neutron-dhcp; then From 1aa436813566081c24f0223b2ac48c2389c244ce Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Thu, 21 Jul 2016 19:37:04 +0900 Subject: [PATCH 0171/1936] Move Q_ROUTER_NAME to where it belongs The motivation is to make it more friendly with lib/neutron. ie. independent from lib/neutron-legacy Change-Id: I19821b009cbf1bc715a6c7b2854e4c77d2041ec4 --- lib/neutron-legacy | 2 -- lib/neutron_plugins/services/l3 | 3 +++ lib/tempest | 1 - 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 123ba4238f..9e926a000f 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -125,8 +125,6 @@ Q_USE_ROOTWRAP_DAEMON=$(trueorfalse True Q_USE_ROOTWRAP_DAEMON) Q_META_DATA_IP=${Q_META_DATA_IP:-$SERVICE_HOST} # Allow Overlapping IP among subnets Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True} -# The name of the default q-l3 router -Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1} Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True} Q_NOTIFY_NOVA_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_DATA_CHANGES:-True} VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True} diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 408b322702..54dae2b4c7 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -22,6 +22,9 @@ PUBLIC_BRIDGE_MTU=${PUBLIC_BRIDGE_MTU:-1500} # used. Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE=${Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE:-True} +# The name of the default router +Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1} + # If Q_USE_PUBLIC_VETH=True, create and use a veth pair instead of # PUBLIC_BRIDGE. This is intended to be used with # Q_USE_PROVIDERNET_FOR_PUBLIC=True. diff --git a/lib/tempest b/lib/tempest index b491bf82ef..0845ad0afa 100644 --- a/lib/tempest +++ b/lib/tempest @@ -15,7 +15,6 @@ # - ``SERVICE_HOST`` # - ``BASE_SQL_CONN`` ``lib/database`` declares # - ``PUBLIC_NETWORK_NAME`` -# - ``Q_ROUTER_NAME`` # - ``VIRT_DRIVER`` # - ``LIBVIRT_TYPE`` # - ``KEYSTONE_SERVICE_PROTOCOL``, ``KEYSTONE_SERVICE_HOST`` from lib/keystone From b34d459bbc100e65fbc308438b3b7f72bd5fb5b6 Mon Sep 17 00:00:00 2001 From: Drago Rosson Date: Mon, 26 Sep 2016 13:23:23 -0500 Subject: [PATCH 0172/1936] Allow default IPv6 route device names to have dots When dots are used with sysctl, they are reinterpreted as slashes. Route devices can have dots in their names, so when they are used in a sysctl command that also uses dots, its dot will be replaced with a slash, causing an error. Change-Id: Ie32126a3aa8d646568d7d37ec4874419b9658935 Closes-Bug: #1627770 --- lib/neutron_plugins/services/l3 | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 408b322702..8c26b2b199 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -401,7 +401,10 @@ function _neutron_configure_router_v6 { # IPv6-only clouds in the gate. Please do not remove this without # talking to folks in Infra. for d in $default_v6_route_devs; do - sudo sysctl -w net.ipv6.conf.$d.accept_ra=2 + # Slashes must be used in this sysctl command because route devices + # can have dots in their names. If dots were used, dots in the + # device name would be reinterpreted as a slash, causing an error. + sudo sysctl -w net/ipv6/conf/$d/accept_ra=2 done # Ensure IPv6 forwarding is enabled on the host sudo sysctl -w net.ipv6.conf.all.forwarding=1 From aa7ec81fd794d4ab02b96f726ae08f53abc90073 Mon Sep 17 00:00:00 2001 From: Hironori Shiina Date: Wed, 28 Sep 2016 20:21:57 +0900 Subject: [PATCH 0173/1936] Fix typo in documentation This patch just removes a duplicate 'the'. Change-Id: I3393a51d55ba8ec1639c2548781f8972f0d5c9d0 --- doc/source/guides/neutron.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst index b26fd1e8ee..05f60bae5f 100644 --- a/doc/source/guides/neutron.rst +++ b/doc/source/guides/neutron.rst @@ -595,7 +595,7 @@ to be configured for VLAN tenant networks. For OVS, a similar configuration like described in the :ref:`OVS Provider Network ` section can be -used. Just add the the following line to this local.conf, which also loads +used. Just add the following line to this local.conf, which also loads the MacVTap mechanism driver: :: From 0009869caebe3b671c84f19cbde694547e3dd11b Mon Sep 17 00:00:00 2001 From: Andrew Laski Date: Wed, 28 Sep 2016 15:05:31 -0400 Subject: [PATCH 0174/1936] Don't install Nova policy.json Nova ships with an empty policy.json file which it does not need. oslo.policy previously required the empty file to be there but as of version 1.14.0 it is possible to run with no policy file at all. Since there are no policies defined in the sample file let's no install it. Change-Id: I85a251376dfe38caa4b100861bf764014a98bc37 Depends-On: I09fa842ffbe75bed269cef6edc9c82d18bfe9297 --- lib/nova | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/nova b/lib/nova index 334cba6dd7..3e9857a778 100644 --- a/lib/nova +++ b/lib/nova @@ -302,8 +302,6 @@ function configure_nova { # Put config files in ``/etc/nova`` for everyone to find sudo install -d -o $STACK_USER $NOVA_CONF_DIR - install_default_policy nova - configure_rootwrap nova if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then From b3a210f643989603d192b32a40b2001664f8ed73 Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Thu, 29 Sep 2016 13:26:30 +0000 Subject: [PATCH 0175/1936] Enable bridge firewalling if iptables are used With the plan [1] to stop enabling it by Neutron iptables firewall driver itself, deployment tools should catch up and enable the firewall themselves. This is needed for distributions that decided to disable the kernel firewall by default (upstream kernel has it enabled). This is also needed for distributions that ship newer kernels but don't load the br_netfilter module before starting nova-network or Neutron iptables firewall driver. In the latter case, firewall may not work, depending on the order of operations executed by the driver. To isolate devstack setups from the difference in distribution kernel configuration and version, the following steps are done: - we load bridge kernel module, and br_netfilter if present, to get access to sysctl knobs controlling the firewall; - once knobs are available, we unconditionally set them to 1, to make sure the firewall is in effect. More details at: http://wiki.libvirt.org/page/Net.bridge.bridge-nf-call_and_sysctl.conf [1] I9137ea017624ac92a05f73863b77f9ee4681bbe7 Change-Id: Id6bfd9595f0772a63d1096ef83ebbb6cd630fafd Related-Bug: #1622914 --- functions | 18 ++++++++++++++++++ lib/neutron | 2 ++ lib/neutron_plugins/linuxbridge_agent | 1 + lib/neutron_plugins/ovs_base | 1 + lib/nova | 6 +++++- 5 files changed, 27 insertions(+), 1 deletion(-) diff --git a/functions b/functions index 58565788ff..6a0ac67b69 100644 --- a/functions +++ b/functions @@ -646,6 +646,24 @@ function set_mtu { } +# enable_kernel_bridge_firewall - Enable kernel support for bridge firewalling +function enable_kernel_bridge_firewall { + # Load bridge module. This module provides access to firewall for bridged + # frames; and also on older kernels (pre-3.18) it provides sysctl knobs to + # enable/disable bridge firewalling + sudo modprobe bridge + # For newer kernels (3.18+), those sysctl settings are split into a separate + # kernel module (br_netfilter). Load it too, if present. + sudo modprobe br_netfilter 2>> /dev/null || : + # Enable bridge firewalling in case it's disabled in kernel (upstream + # default is enabled, but some distributions may decide to change it). + # This is at least needed for RHEL 7.2 and earlier releases. + for proto in arp ip ip6; do + sudo sysctl -w net.bridge.bridge-nf-call-${proto}tables=1 + done +} + + # Restore xtrace $_XTRACE_FUNCTIONS diff --git a/lib/neutron b/lib/neutron index 5370392796..9e9eb2d2b1 100644 --- a/lib/neutron +++ b/lib/neutron @@ -182,6 +182,8 @@ function configure_neutron_new { iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup iptables_hybrid iniset $NEUTRON_CORE_PLUGIN_CONF ovs local_ip $HOST_IP fi + + enable_kernel_bridge_firewall fi # DHCP Agent diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent index 7d59e1347c..d0de2f5e5d 100644 --- a/lib/neutron_plugins/linuxbridge_agent +++ b/lib/neutron_plugins/linuxbridge_agent @@ -69,6 +69,7 @@ function neutron_plugin_configure_plugin_agent { fi if [[ "$Q_USE_SECGROUP" == "True" ]]; then iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver + enable_kernel_bridge_firewall else iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver fi diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index f6d10ea4f9..3cd6c85053 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -84,6 +84,7 @@ function _neutron_ovs_base_configure_debug_command { function _neutron_ovs_base_configure_firewall_driver { if [[ "$Q_USE_SECGROUP" == "True" ]]; then iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver + enable_kernel_bridge_firewall else iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver fi diff --git a/lib/nova b/lib/nova index 334cba6dd7..d321b73205 100644 --- a/lib/nova +++ b/lib/nova @@ -866,9 +866,13 @@ function start_nova_rest { run_process n-cond "$NOVA_BIN_DIR/nova-conductor --config-file $compute_cell_conf" run_process n-cell-region "$NOVA_BIN_DIR/nova-cells --config-file $api_cell_conf" run_process n-cell-child "$NOVA_BIN_DIR/nova-cells --config-file $compute_cell_conf" - run_process n-crt "$NOVA_BIN_DIR/nova-cert --config-file $api_cell_conf" + + if is_service_enabled n-net; then + enable_kernel_bridge_firewall + fi run_process n-net "$NOVA_BIN_DIR/nova-network --config-file $compute_cell_conf" + run_process n-sch "$NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf" run_process n-api-meta "$NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf" From c12c12f6630f591d20dd5098be1fe105f2cc790b Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 30 Sep 2016 06:57:24 +0000 Subject: [PATCH 0176/1936] Updated from generate-devstack-plugins-list Change-Id: Id9ce2cbdad53665ca2f6b7e57cb2553cb89cd982 --- doc/source/plugin-registry.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 9d023bf5e2..f29b0d7ae2 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -59,6 +59,7 @@ freezer `git://git.openstack.org/openstack/freeze freezer-api `git://git.openstack.org/openstack/freezer-api `__ freezer-web-ui `git://git.openstack.org/openstack/freezer-web-ui `__ gce-api `git://git.openstack.org/openstack/gce-api `__ +glare `git://git.openstack.org/openstack/glare `__ gnocchi `git://git.openstack.org/openstack/gnocchi `__ group-based-policy `git://git.openstack.org/openstack/group-based-policy `__ heat `git://git.openstack.org/openstack/heat `__ @@ -69,6 +70,7 @@ ironic-staging-drivers `git://git.openstack.org/openstack/ironic karbor `git://git.openstack.org/openstack/karbor `__ karbor-dashboard `git://git.openstack.org/openstack/karbor-dashboard `__ kingbird `git://git.openstack.org/openstack/kingbird `__ +kuryr-kubernetes `git://git.openstack.org/openstack/kuryr-kubernetes `__ kuryr-libnetwork `git://git.openstack.org/openstack/kuryr-libnetwork `__ magnum `git://git.openstack.org/openstack/magnum `__ magnum-ui `git://git.openstack.org/openstack/magnum-ui `__ @@ -111,6 +113,7 @@ neutron-fwaas `git://git.openstack.org/openstack/neutro neutron-lbaas `git://git.openstack.org/openstack/neutron-lbaas `__ neutron-lbaas-dashboard `git://git.openstack.org/openstack/neutron-lbaas-dashboard `__ neutron-vpnaas `git://git.openstack.org/openstack/neutron-vpnaas `__ +nimble `git://git.openstack.org/openstack/nimble `__ nova-docker `git://git.openstack.org/openstack/nova-docker `__ nova-lxd `git://git.openstack.org/openstack/nova-lxd `__ nova-mksproxy `git://git.openstack.org/openstack/nova-mksproxy `__ From c58a15575d3d202c1ecb19ebba82a908dfb66028 Mon Sep 17 00:00:00 2001 From: rajinir Date: Tue, 27 Sep 2016 17:14:59 -0500 Subject: [PATCH 0177/1936] Neutron L3 subnetpool creation should be optional Added an option to make subnetpools to be optional as it ignores the public network specified in FIXED_RANGE. DocImpact Change-Id: Ic89ceca76afda67da5545111972c3348011f294f Closes-Bug: #1628267 --- doc/source/guides/neutron.rst | 2 ++ lib/neutron_plugins/services/l3 | 13 ++++++++----- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst index b26fd1e8ee..78150f3742 100644 --- a/doc/source/guides/neutron.rst +++ b/doc/source/guides/neutron.rst @@ -392,6 +392,7 @@ controller node. PROVIDER_SUBNET_NAME="provider_net" PROVIDER_NETWORK_TYPE="vlan" SEGMENTATION_ID=2010 + USE_SUBNETPOOL=False In this configuration we are defining FIXED_RANGE to be a publicly routed IPv4 subnet. In this specific instance we are using @@ -577,6 +578,7 @@ you do not require them. PROVIDER_SUBNET_NAME="provider_net" PROVIDER_NETWORK_TYPE="vlan" SEGMENTATION_ID=2010 + USE_SUBNETPOOL=False [[post-config|/$Q_PLUGIN_CONF_FILE]] [macvtap] diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 408b322702..33db4cb538 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -80,6 +80,7 @@ PRIVATE_SUBNET_NAME=${PRIVATE_SUBNET_NAME:-"private-subnet"} PUBLIC_SUBNET_NAME=${PUBLIC_SUBNET_NAME:-"public-subnet"} # Subnetpool defaults +USE_SUBNETPOOL=${USE_SUBNETPOOL:-True} SUBNETPOOL_NAME=${SUBNETPOOL_NAME:-"shared-default-subnetpool"} SUBNETPOOL_PREFIX_V4=${SUBNETPOOL_PREFIX_V4:-10.0.0.0/8} @@ -168,11 +169,13 @@ function create_neutron_initial_network { fi if is_networking_extension_supported "auto-allocated-topology"; then - if [[ "$IP_VERSION" =~ 4.* ]]; then - SUBNETPOOL_V4_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnetpool-create $SUBNETPOOL_NAME --default-prefixlen $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --shared --is-default=True | grep ' id ' | get_field 2) - fi - if [[ "$IP_VERSION" =~ .*6 ]]; then - SUBNETPOOL_V6_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnetpool-create $SUBNETPOOL_NAME --default-prefixlen $SUBNETPOOL_SIZE_V6 --pool-prefix $SUBNETPOOL_PREFIX_V6 --shared --is-default=True | grep ' id ' | get_field 2) + if [[ "$USE_SUBNETPOOL" == "True" ]]; then + if [[ "$IP_VERSION" =~ 4.* ]]; then + SUBNETPOOL_V4_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnetpool-create $SUBNETPOOL_NAME --default-prefixlen $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --shared --is-default=True | grep ' id ' | get_field 2) + fi + if [[ "$IP_VERSION" =~ .*6 ]]; then + SUBNETPOOL_V6_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnetpool-create $SUBNETPOOL_NAME --default-prefixlen $SUBNETPOOL_SIZE_V6 --pool-prefix $SUBNETPOOL_PREFIX_V6 --shared --is-default=True | grep ' id ' | get_field 2) + fi fi fi From c271b3ea1fe16561b7a46507fdf961668182b256 Mon Sep 17 00:00:00 2001 From: Mike Perez Date: Mon, 3 Oct 2016 16:00:33 -0700 Subject: [PATCH 0178/1936] Update OpenStackClient role list commands This command is deprecated. The new command is role assignment list. Change-Id: I8dba0be21d5af6751bea13d8ff29cd4b7589ab3e --- doc/source/guides/multinode-lab.rst | 2 +- exercises/neutron-adv-test.sh | 2 +- functions-common | 32 +++++++++-------------------- 3 files changed, 12 insertions(+), 24 deletions(-) diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index c996f95743..8751eb8d6a 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -260,7 +260,7 @@ for scripting: openstack user create $NAME --password=$PASSWORD --project $PROJECT openstack role add Member --user $NAME --project $PROJECT # The Member role is created by stack.sh - # openstack role list + # openstack role assignment list Swift ----- diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh index dc6bbbb5c7..e003c56331 100755 --- a/exercises/neutron-adv-test.sh +++ b/exercises/neutron-adv-test.sh @@ -148,7 +148,7 @@ function get_user_id { function get_role_id { local ROLE_NAME=$1 local ROLE_ID - ROLE_ID=`openstack role list | grep $ROLE_NAME | awk '{print $2}'` + ROLE_ID=`openstack role assignment list | grep $ROLE_NAME | awk '{print $2}'` die_if_not_set $LINENO ROLE_ID "Failure retrieving ROLE_ID for $ROLE_NAME" echo "$ROLE_ID" } diff --git a/functions-common b/functions-common index 9544c8102f..87e6bb453d 100644 --- a/functions-common +++ b/functions-common @@ -865,11 +865,9 @@ function get_or_add_user_project_role { domain_args=$(_get_domain_args $4 $5) # Gets user role id - user_role_id=$(openstack role list \ + user_role_id=$(openstack role assignment list \ --user $2 \ - --column "ID" \ --project $3 \ - --column "Name" \ $domain_args \ | grep " $1 " | get_field 1) if [[ -z "$user_role_id" ]]; then @@ -878,11 +876,9 @@ function get_or_add_user_project_role { --user $2 \ --project $3 \ $domain_args - user_role_id=$(openstack role list \ + user_role_id=$(openstack role assignment list \ --user $2 \ - --column "ID" \ --project $3 \ - --column "Name" \ $domain_args \ | grep " $1 " | get_field 1) fi @@ -894,22 +890,18 @@ function get_or_add_user_project_role { function get_or_add_user_domain_role { local user_role_id # Gets user role id - user_role_id=$(openstack role list \ + user_role_id=$(openstack role assignment list \ --user $2 \ - --column "ID" \ --domain $3 \ - --column "Name" \ | grep " $1 " | get_field 1) if [[ -z "$user_role_id" ]]; then # Adds role to user and get it openstack role add $1 \ --user $2 \ --domain $3 - user_role_id=$(openstack role list \ + user_role_id=$(openstack role assignment list \ --user $2 \ - --column "ID" \ --domain $3 \ - --column "Name" \ | grep " $1 " | get_field 1) fi echo $user_role_id @@ -920,13 +912,11 @@ function get_or_add_user_domain_role { function get_or_add_user_domain_role { local user_role_id # Gets user role id - user_role_id=$(openstack role list \ + user_role_id=$(openstack role assignment list \ --user $2 \ --os-url=$KEYSTONE_SERVICE_URI_V3 \ --os-identity-api-version=3 \ - --column "ID" \ --domain $3 \ - --column "Name" \ | grep " $1 " | get_field 1) if [[ -z "$user_role_id" ]]; then # Adds role to user and get it @@ -935,13 +925,11 @@ function get_or_add_user_domain_role { --domain $3 \ --os-url=$KEYSTONE_SERVICE_URI_V3 \ --os-identity-api-version=3 - user_role_id=$(openstack role list \ + user_role_id=$(openstack role assignment list \ --user $2 \ --os-url=$KEYSTONE_SERVICE_URI_V3 \ --os-identity-api-version=3 \ - --column "ID" \ --domain $3 \ - --column "Name" \ | grep " $1 " | get_field 1) fi echo $user_role_id @@ -952,19 +940,19 @@ function get_or_add_user_domain_role { function get_or_add_group_project_role { local group_role_id # Gets group role id - group_role_id=$(openstack role list \ + group_role_id=$(openstack role assignment list \ --group $2 \ --project $3 \ - -c "ID" -f value) + -f value) if [[ -z "$group_role_id" ]]; then # Adds role to group and get it openstack role add $1 \ --group $2 \ --project $3 - group_role_id=$(openstack role list \ + group_role_id=$(openstack role assignment list \ --group $2 \ --project $3 \ - -c "ID" -f value) + -f value) fi echo $group_role_id } From ca89d071b3c249fba55a824f7f4fc247b7c22948 Mon Sep 17 00:00:00 2001 From: Miguel Angel Ajo Date: Tue, 4 Oct 2016 18:17:44 +0200 Subject: [PATCH 0179/1936] Reduce the scope of the subnet pool prefix in neutron Some of the clouds used for CI use the 10.2xx.0.0/16 range for VMs, and collide with the wider 10.0.0.0/8. This setting allows for creation of 256 subnets out of the pool. Change-Id: I48c86f94098f1501f0e7f90a265dda7e81440eb0 Closes-Bug: 1629133 --- lib/neutron_plugins/services/l3 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 54dae2b4c7..d998c065cf 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -85,7 +85,7 @@ PUBLIC_SUBNET_NAME=${PUBLIC_SUBNET_NAME:-"public-subnet"} # Subnetpool defaults SUBNETPOOL_NAME=${SUBNETPOOL_NAME:-"shared-default-subnetpool"} -SUBNETPOOL_PREFIX_V4=${SUBNETPOOL_PREFIX_V4:-10.0.0.0/8} +SUBNETPOOL_PREFIX_V4=${SUBNETPOOL_PREFIX_V4:-10.0.0.0/16} SUBNETPOOL_PREFIX_V6=${SUBNETPOOL_PREFIX_V6:-2001:db8:8000::/48} SUBNETPOOL_SIZE_V4=${SUBNETPOOL_SIZE_V4:-24} From 57df186c132c522231aab1e577d879f7fa51c992 Mon Sep 17 00:00:00 2001 From: Clenimar Filemon Date: Thu, 30 Jun 2016 17:30:26 -0300 Subject: [PATCH 0180/1936] Make Nova/Ironic communication use Identity v3 As long as nova already supports an Identity v3 auth flow when talking to ironic (Id837d26bb21c158de0504627e488c0692aef1e24), make it use v3 by default. This way we don't fail in a keystone v3-only situation, for example. Change-Id: I028dfb52108d0630f47a53f8b420b70d4979eb55 --- lib/nova_plugins/hypervisor-ironic | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic index c40427c089..7ffd14d046 100644 --- a/lib/nova_plugins/hypervisor-ironic +++ b/lib/nova_plugins/hypervisor-ironic @@ -45,11 +45,13 @@ function configure_nova_hypervisor { iniset $NOVA_CONF DEFAULT ram_allocation_ratio 1.0 iniset $NOVA_CONF DEFAULT reserved_host_memory_mb 0 # ironic section - iniset $NOVA_CONF ironic admin_username admin - iniset $NOVA_CONF ironic admin_password $ADMIN_PASSWORD - iniset $NOVA_CONF ironic admin_url $KEYSTONE_AUTH_URI/v2.0 - iniset $NOVA_CONF ironic admin_tenant_name demo - iniset $NOVA_CONF ironic api_endpoint $IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT/v1 + iniset $NOVA_CONF ironic auth_type password + iniset $NOVA_CONF ironic username admin + iniset $NOVA_CONF ironic password $ADMIN_PASSWORD + iniset $NOVA_CONF ironic auth_url $KEYSTONE_AUTH_URI/v3 + iniset $NOVA_CONF ironic project_domain_id default + iniset $NOVA_CONF ironic user_domain_id default + iniset $NOVA_CONF ironic project_name demo } # install_nova_hypervisor() - Install external components From a283526c88d98f7668d369f4c20db44d3d06425a Mon Sep 17 00:00:00 2001 From: Daniel Gonzalez Date: Fri, 15 Jul 2016 19:13:38 +0200 Subject: [PATCH 0181/1936] Set cinder coordination backend url If DLM is enabled, cinder should be configured to use the correct backend url for the dlm. At the moment only zookeeper is supported, as it is the only backend currently supported in devstack. Change-Id: I7afc8dc95bc5b3f11b888e10607615c1212c45f4 --- lib/cinder | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/cinder b/lib/cinder index 0fe950b6c2..4ed944cfcb 100644 --- a/lib/cinder +++ b/lib/cinder @@ -358,6 +358,13 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT os_privileged_user_password "$SERVICE_PASSWORD" iniset $CINDER_CONF DEFAULT os_privileged_user_tenant "$SERVICE_PROJECT_NAME" iniset $CINDER_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" + + # Set the backend url according to the configured dlm backend + if is_dlm_enabled; then + if [[ "$(dlm_backend)" == "zookeeper" ]]; then + iniset $CINDER_CONF coordination backend_url "zake://" + fi + fi } # create_cinder_accounts() - Set up common required cinder accounts From 66ce5c257ae32e269ede901f1737d04e194a6457 Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Wed, 5 Oct 2016 12:11:05 -0700 Subject: [PATCH 0182/1936] Update apache tls proxy logs This creates log files per proxy vhost and sets the log level to info to help debug potential issues with tls proxying. Change-Id: I02a62224662b021b35c293909ba045b4b74e1df8 --- lib/apache | 1 + lib/tls | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/lib/apache b/lib/apache index 740f58835e..8a38cc45e5 100644 --- a/lib/apache +++ b/lib/apache @@ -39,6 +39,7 @@ elif is_suse; then APACHE_NAME=apache2 APACHE_CONF_DIR=${APACHE_CONF_DIR:-/etc/$APACHE_NAME/vhosts.d} fi +APACHE_LOG_DIR="/var/log/${APACHE_NAME}" # Functions # --------- diff --git a/lib/tls b/lib/tls index c78ea5b191..6697dc875e 100644 --- a/lib/tls +++ b/lib/tls @@ -476,6 +476,11 @@ $listen_string ProxyPass http://$b_host:$b_port/ retry=5 nocanon ProxyPassReverse http://$b_host:$b_port/ + ErrorLog $APACHE_LOG_DIR/tls-proxy_error.log + ErrorLogFormat "[%{u}t] [%-m:%l] [pid %P:tid %T] %7F: %E: [client\ %a] [frontend\ %A] %M% ,\ referer\ %{Referer}i" + LogLevel info + CustomLog $APACHE_LOG_DIR/tls-proxy_access.log common + LogFormat "%v %h %l %u %t \"%r\" %>s %b" EOF for mod in ssl proxy proxy_http; do From 05dc1aad6c4633dbad53bc8e5a574d871becec43 Mon Sep 17 00:00:00 2001 From: Jan Stodt Date: Thu, 25 Aug 2016 15:46:02 +0200 Subject: [PATCH 0183/1936] Fix provider networking error message This fix replaces Q_USE_PROVIDERNET_FOR_PUBLIC with Q_USE_PROVIDER_NETWORKING in the error messages introduced by [1]. The error is thrown when provider networking with IPv6 has been requested via local.conf, but no provider IPv6 range or provider IPv6 gateway is provided. But if a provider network should be used over the private network is determined along the variable Q_USE_PROVIDER_NETWORKING and not Q_USE_PROVIDERNET_FOR_PUBLIC. The variable Q_USE_PROVIDERNET_FOR_PUBLIC determines if a provider network should be used as public network. This happens a few lines later in the code and is not related to those error messages. [1] https://review.openstack.org/#/c/326638/1/lib/neutron_plugins/ services/l3 Change-Id: I50aa1e9d2027eef598c95404851e51c31a397fbb --- lib/neutron_plugins/services/l3 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 408b322702..14928fb09c 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -191,8 +191,8 @@ function create_neutron_initial_network { fi if [[ "$IP_VERSION" =~ .*6 ]]; then - die_if_not_set $LINENO IPV6_PROVIDER_FIXED_RANGE "IPV6_PROVIDER_FIXED_RANGE has not been set, but Q_USE_PROVIDERNET_FOR_PUBLIC is true and IP_VERSION includes 6" - die_if_not_set $LINENO IPV6_PROVIDER_NETWORK_GATEWAY "IPV6_PROVIDER_NETWORK_GATEWAY has not been set, but Q_USE_PROVIDERNET_FOR_PUBLIC is true and IP_VERSION includes 6" + die_if_not_set $LINENO IPV6_PROVIDER_FIXED_RANGE "IPV6_PROVIDER_FIXED_RANGE has not been set, but Q_USE_PROVIDER_NETWORKING is true and IP_VERSION includes 6" + die_if_not_set $LINENO IPV6_PROVIDER_NETWORK_GATEWAY "IPV6_PROVIDER_NETWORK_GATEWAY has not been set, but Q_USE_PROVIDER_NETWORKING is true and IP_VERSION includes 6" if [ -z $SUBNETPOOL_V6_ID ]; then fixed_range_v6=$IPV6_PROVIDER_FIXED_RANGE fi From ec498cd0619805c409b28f81c6a7bcd3a01136ed Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 6 Oct 2016 15:00:44 -0400 Subject: [PATCH 0184/1936] remove sites-enabled/keystone.conf link on clean doing a clean.sh / stack.sh cycle with USE_SSL=True was failing because we were no longer cleaning up the keystone site fully, so some of the early mod_ssl queries hit an invalid apache configuration. Change-Id: Ic6f3f601e532ec50c0234d928c25b378d9e95e32 --- lib/keystone | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/lib/keystone b/lib/keystone index 13fa50b17e..f9ee8eb761 100644 --- a/lib/keystone +++ b/lib/keystone @@ -25,7 +25,6 @@ # - create_keystone_accounts # - stop_keystone # - cleanup_keystone -# - _cleanup_keystone_apache_wsgi # Save trace setting _XTRACE_KEYSTONE=$(set +o | grep xtrace) @@ -149,11 +148,7 @@ function is_keystone_enabled { # cleanup_keystone() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_keystone { - _cleanup_keystone_apache_wsgi -} - -# _cleanup_keystone_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file -function _cleanup_keystone_apache_wsgi { + disable_apache_site keystone sudo rm -f $(apache_site_config_for keystone) } From 148d0e6e08caaaa9c6ca828c8404e5e82b6de8ee Mon Sep 17 00:00:00 2001 From: Clay Gerrard Date: Thu, 1 Sep 2016 02:38:06 -0700 Subject: [PATCH 0185/1936] Clarify default IP_VERSION in docs The prior art on other options in the same document seemed to be calling out the default in a pre-formatted block after describing the possible values. I believe the default value for the option was first changed [1], then the docs were fixed [2], then the information was unintentionally dropped from the docs [3]. 1. Related-Change: If0e0b818355e4cb1338f7fa72af5e81e24361574 2. Related-Change: Ib6603b4f6ea0b4079f9a4ea46e723ecbb2ea371d 3. Related-Change: Iddd27cb54f1d9f062b9c47ff9ad6a2bef3650d6b Change-Id: I662403db3b08a351a680587440ad1f15a6f8ee5d --- doc/source/configuration.rst | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 1161b344e2..22809ebd7a 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -521,16 +521,14 @@ created flavors as follows: IP Version ---------- -``IP_VERSION`` can be used to configure DevStack to create either an -IPv4, IPv6, or dual-stack self service project data-network by with +``IP_VERSION`` can be used to configure Neutron to create either an +IPv4, IPv6, or dual-stack self-service project data-network by with either ``IP_VERSION=4``, ``IP_VERSION=6``, or ``IP_VERSION=4+6`` -respectively. This functionality requires that the Neutron networking -service is enabled by setting the following options: +respectively. :: - disable_service n-net - enable_service q-svc q-agt q-dhcp q-l3 + IP_VERSION=4+6 The following optional variables can be used to alter the default IPv6 behavior: From f06455e1b55b5419b6546a0d85ebfa734bf3c6b4 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 7 Oct 2016 06:57:03 -0400 Subject: [PATCH 0186/1936] Add a screen session for tls logs When tls is enabled, we aren't bringing the logs to the forefront, which makes it hard to debug when things go wrong. This does that. Change-Id: I7c6c7e324e16da6b9bfa44f4bad17401ca4ed7e3 --- lib/tls | 7 +++++++ stack.sh | 4 ++++ 2 files changed, 11 insertions(+) diff --git a/lib/tls b/lib/tls index 6697dc875e..40f3e81438 100644 --- a/lib/tls +++ b/lib/tls @@ -495,6 +495,13 @@ EOF reload_apache_server } +# Follow TLS proxy +function follow_tls_proxy { + sudo touch /var/log/$APACHE_NAME/tls-proxy_error.log + tail_log tls-error /var/log/$APACHE_NAME/tls-proxy_error.log + sudo touch /var/log/$APACHE_NAME/tls-proxy_access.log + tail_log tls-proxy /var/log/$APACHE_NAME/tls-proxy_access.log +} # Cleanup Functions # ================= diff --git a/stack.sh b/stack.sh index 9aa770f2b9..5170d1b8a4 100755 --- a/stack.sh +++ b/stack.sh @@ -993,6 +993,10 @@ if [[ "$USE_SCREEN" == "True" ]]; then fi screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS" screen -r $SCREEN_NAME -X setenv PROMPT_COMMAND /bin/true + + if is_service_enabled tls-proxy; then + follow_tls_proxy + fi fi # Clear ``screenrc`` file From cac6ef09c5a7ecc0e9959b1e3ace48c41a0add71 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Tue, 11 Oct 2016 08:23:48 +0200 Subject: [PATCH 0187/1936] Remove the heat stack owner role We should not require any special role for heat since very long time. We should use the same roles as with the primary user. Change-Id: Id9150f94c30505ed0da33b8fbc2a5a7bd4fcf5d0 --- lib/tempest | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/lib/tempest b/lib/tempest index 6ffc927176..27023c2081 100644 --- a/lib/tempest +++ b/lib/tempest @@ -390,11 +390,6 @@ function configure_tempest { # Orchestration Tests if is_service_enabled heat; then - # Though this is not needed by heat, some tempest tests explicitly - # try to set this role. Removing them from the tempest tests breaks - # some non-devstack CIs. - get_or_create_role "heat_stack_owner" - if [[ ! -z "$HEAT_CFN_IMAGE_URL" ]]; then iniset $TEMPEST_CONFIG orchestration image_ref $(basename "${HEAT_CFN_IMAGE_URL%.*}") fi @@ -408,7 +403,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG orchestration instance_type "m1.heat" fi iniset $TEMPEST_CONFIG orchestration build_timeout 900 - iniset $TEMPEST_CONFIG orchestration stack_owner_role "heat_stack_owner" + iniset $TEMPEST_CONFIG orchestration stack_owner_role Member fi # Scenario From 5ff77d6a2ac81a411505dc1d3b7b814eb7fbb259 Mon Sep 17 00:00:00 2001 From: Steve Martinelli Date: Tue, 6 Sep 2016 19:10:22 +0000 Subject: [PATCH 0188/1936] Remove workaround for `openstack complete` This reverts commit 6930ba312f787e0459f7455ac6ba2a70b3c7c37d. By reverting this patch we are no longer using the bandaid fix mentioned in the code. The latest openstackclient release (3.3.0) fixes the bug. Related-Bug: 1619274 Change-Id: I20e3c5a92b97bf46c8d2318cd37044f0f36e1745 --- stack.sh | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/stack.sh b/stack.sh index 1cc4eca1b9..09466a6894 100755 --- a/stack.sh +++ b/stack.sh @@ -1383,12 +1383,7 @@ check_libs_from_git # =============== # Prepare bash completion for OSC -# -# BUG: https://bugs.launchpad.net/python-openstackclient/+bug/1619274 -# the os-cloud param should not be required but if we don't provide it -# then this command hangs indefinitely if something is wrong with -# default environment credentials. -openstack --os-cloud=devstack complete | sudo tee /etc/bash_completion.d/osc.bash_completion > /dev/null +openstack complete | sudo tee /etc/bash_completion.d/osc.bash_completion > /dev/null # If cinder is configured, set global_filter for PV devices if is_service_enabled cinder; then From 2bd8d906813679cfc22f5bc62882a289906a64af Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Thu, 13 Oct 2016 08:24:57 +0200 Subject: [PATCH 0189/1936] Objects versions new style config allow_versions is going to be a deprecated option. Setting up the object versining in the new way. [1] http://docs.openstack.org/developer/swift/overview_object_versioning.html Change-Id: Ia520fbb6eb535b08ce83c0cb4bea31e7bba55eb3 --- lib/swift | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/swift b/lib/swift index f9ea028cea..b175f2e5c1 100644 --- a/lib/swift +++ b/lib/swift @@ -397,6 +397,9 @@ function configure_swift { iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server node_timeout 120 iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server conn_timeout 20 + # Versioned Writes + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:versioned_writes allow_versioned_writes true + # Configure Ceilometer if is_service_enabled ceilometer; then iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer "set log_level" "WARN" @@ -489,8 +492,6 @@ EOF generate_swift_config_services ${swift_node_config} ${node_number} $(( CONTAINER_PORT_BASE + 10 * (node_number - 1) )) container iniuncomment ${swift_node_config} DEFAULT bind_ip iniset ${swift_node_config} DEFAULT bind_ip ${SWIFT_SERVICE_LISTEN_ADDRESS} - iniuncomment ${swift_node_config} app:container-server allow_versions - iniset ${swift_node_config} app:container-server allow_versions "true" swift_node_config=${SWIFT_CONF_DIR}/account-server/${node_number}.conf cp ${SWIFT_DIR}/etc/account-server.conf-sample ${swift_node_config} From d8682dbdf9f8247c20c4aa9612a560d2a9022c09 Mon Sep 17 00:00:00 2001 From: Eric Harney Date: Fri, 14 Oct 2016 14:36:29 -0400 Subject: [PATCH 0190/1936] worlddump: Add cinder-volume guru meditation report cinder also supports GMR, it would be good to have this when debugging gate failures. Change-Id: I4db6dfd810d011faaca084e0dacaec4031b2a4ff --- tools/worlddump.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/worlddump.py b/tools/worlddump.py index e1ef544a55..1ce931efd5 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -34,6 +34,7 @@ 'neutron-linuxbridge-agent', 'neutron-metadata-agent', 'neutron-openvswitch-agent', + 'cinder-volume', ) From 51db6d33c11b12fae0b2a404062cbee2b18a4427 Mon Sep 17 00:00:00 2001 From: Chuck Short Date: Sat, 15 Oct 2016 09:39:52 -0400 Subject: [PATCH 0191/1936] Replace wily support with yakkety Ubuntu wily support is EOL so lets make room for yakkety. Change-Id: Ib13d43f6d89bdf7c684cd34655a077a13e237be3 Signed-off-by: Chuck Short --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 5e566e0388..fab2edd299 100755 --- a/stack.sh +++ b/stack.sh @@ -192,7 +192,7 @@ source $TOP_DIR/stackrc # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (trusty|wily|xenial|7.0|wheezy|sid|testing|jessie|f23|f24|rhel7|kvmibm1) ]]; then +if [[ ! ${DISTRO} =~ (trusty|xenial|yakkety|7.0|wheezy|sid|testing|jessie|f23|f24|rhel7|kvmibm1) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From bc5cfc05842a4d4514082a2431020c5b881e52a1 Mon Sep 17 00:00:00 2001 From: Hongbin Lu Date: Sun, 9 Oct 2016 16:31:15 +0000 Subject: [PATCH 0192/1936] Allow running nova compute with docker group A use case is from Zun project that needs to start n-cpu with "docker" group. Change-Id: Ib8f193ea1edf1f148e9ba505205495170ebf6d67 --- lib/nova | 2 ++ stackrc | 3 +++ 2 files changed, 5 insertions(+) diff --git a/lib/nova b/lib/nova index 334cba6dd7..8da8ef2d29 100644 --- a/lib/nova +++ b/lib/nova @@ -829,6 +829,8 @@ function start_nova_compute { run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" $LIBVIRT_GROUP elif [[ "$VIRT_DRIVER" = 'lxd' ]]; then run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" $LXD_GROUP + elif [[ "$VIRT_DRIVER" = 'docker' ]]; then + run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" $DOCKER_GROUP elif [[ "$VIRT_DRIVER" = 'fake' ]]; then local i for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`; do diff --git a/stackrc b/stackrc index c419ef4b15..bf92a77d81 100644 --- a/stackrc +++ b/stackrc @@ -589,6 +589,9 @@ case "$VIRT_DRIVER" in lxd) LXD_GROUP=${LXD_GROUP:-"lxd"} ;; + docker) + DOCKER_GROUP=${DOCKER_GROUP:-"docker"} + ;; fake) NUMBER_FAKE_NOVA_COMPUTE=${NUMBER_FAKE_NOVA_COMPUTE:-1} ;; From 201c01f19b855aec3521c206fc0add5a01940c4b Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 18 Oct 2016 07:24:00 -0500 Subject: [PATCH 0193/1936] Don't clone dib-utils in install_heat It's not used, and a recent change to trim down projects lists in devstack-gate broke devstack in the gate that enabled heat. Change-Id: I405423bdc9ba8dd9b30fce6fdceacccf662d5da3 --- lib/heat | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/lib/heat b/lib/heat index c841e0a499..0863128f81 100644 --- a/lib/heat +++ b/lib/heat @@ -40,7 +40,6 @@ HEAT_DIR=$DEST/heat HEAT_CFNTOOLS_DIR=$DEST/heat-cfntools HEAT_TEMPLATES_REPO_DIR=$DEST/heat-templates OCC_DIR=$DEST/os-collect-config -DIB_UTILS_DIR=$DEST/dib-utils ORC_DIR=$DEST/os-refresh-config OAC_DIR=$DEST/os-apply-config @@ -276,7 +275,6 @@ function install_heat_other { git_clone $OAC_REPO $OAC_DIR $OAC_BRANCH git_clone $OCC_REPO $OCC_DIR $OCC_BRANCH git_clone $ORC_REPO $ORC_DIR $ORC_BRANCH - git_clone $DIB_UTILS_REPO $DIB_UTILS_DIR $DIB_UTILS_BRANCH } # start_heat() - Start running processes, including screen @@ -420,7 +418,7 @@ function create_heat_accounts { # build_heat_pip_mirror() - Build a pip mirror containing heat agent projects function build_heat_pip_mirror { - local project_dirs="$OCC_DIR $OAC_DIR $ORC_DIR $HEAT_CFNTOOLS_DIR $DIB_UTILS_DIR" + local project_dirs="$OCC_DIR $OAC_DIR $ORC_DIR $HEAT_CFNTOOLS_DIR" local projpath proj package rm -rf $HEAT_PIP_REPO From 94ab1a4aa8ed1e438b66cc41f3637e78c8323dd0 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 19 Oct 2016 08:16:16 +0000 Subject: [PATCH 0194/1936] Updated from generate-devstack-plugins-list Change-Id: I15cdb23e00664efe637de5cbc7b5a1e8efa21d13 --- doc/source/plugin-registry.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index f29b0d7ae2..771f2ef6a7 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -121,7 +121,6 @@ nova-powervm `git://git.openstack.org/openstack/nova-p octavia `git://git.openstack.org/openstack/octavia `__ osprofiler `git://git.openstack.org/openstack/osprofiler `__ panko `git://git.openstack.org/openstack/panko `__ -python-freezerclient `git://git.openstack.org/openstack/python-freezerclient `__ rally `git://git.openstack.org/openstack/rally `__ sahara `git://git.openstack.org/openstack/sahara `__ sahara-dashboard `git://git.openstack.org/openstack/sahara-dashboard `__ From aa47a0a8be9a9aced97d6a083d955949635fa3b3 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Thu, 20 Oct 2016 22:27:25 -0400 Subject: [PATCH 0195/1936] ceph: set rbd_secret_uuid in cinder.conf, not rbd_uuid The ceph cinder backend script was setting the wrong config option in cinder.conf for the secret uuid. This was being masked by a bug in nova which is failing on this bug when trying to fix the nova bug...right. It makes sense. See: http://docs.ceph.com/docs/master/rbd/rbd-openstack/#configuring-cinder Change-Id: I4655cae3212d589177d2570403b563a83aad529a Closes-Bug: #1635488 --- lib/cinder_backends/ceph | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/cinder_backends/ceph b/lib/cinder_backends/ceph index 9bff5bef4f..a213d5e005 100644 --- a/lib/cinder_backends/ceph +++ b/lib/cinder_backends/ceph @@ -48,7 +48,7 @@ function configure_cinder_backend_ceph { iniset $CINDER_CONF $be_name rbd_ceph_conf "$CEPH_CONF" iniset $CINDER_CONF $be_name rbd_pool "$CINDER_CEPH_POOL" iniset $CINDER_CONF $be_name rbd_user "$CINDER_CEPH_USER" - iniset $CINDER_CONF $be_name rbd_uuid "$CINDER_CEPH_UUID" + iniset $CINDER_CONF $be_name rbd_secret_uuid "$CINDER_CEPH_UUID" iniset $CINDER_CONF $be_name rbd_flatten_volume_from_snapshot False iniset $CINDER_CONF $be_name rbd_max_clone_depth 5 iniset $CINDER_CONF DEFAULT glance_api_version 2 From 4440da85637aa8239f24bd4f23a9a8e1ac2d0b8d Mon Sep 17 00:00:00 2001 From: Adam Young Date: Wed, 26 Oct 2016 11:40:08 +0200 Subject: [PATCH 0196/1936] Unset admin_project config options for Keystone Until the policy changes land for Nova, Glance, etc, this value is not used. Additionally, by having it set, it actually makes it hard/impossible for the required changes to land in the other services. Disable/comment out the changes in the Keystone specific lib file for now, and we will re-enable once the Services can make use of them. Change-Id: Ia1de9083c21107dac2f0abb56bda166bdb37a69d --- lib/keystone | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/keystone b/lib/keystone index f9ee8eb761..8db0876f8b 100644 --- a/lib/keystone +++ b/lib/keystone @@ -344,8 +344,8 @@ function configure_keystone { # Configure the project created by the 'keystone-manage bootstrap' as the cloud-admin project. # The users from this project are globally admin as before, but it also # allows policy changes in order to clarify the adminess scope. - iniset $KEYSTONE_CONF resource admin_project_domain_name Default - iniset $KEYSTONE_CONF resource admin_project_name admin + #iniset $KEYSTONE_CONF resource admin_project_domain_name Default + #iniset $KEYSTONE_CONF resource admin_project_name admin } # create_keystone_accounts() - Sets up common required keystone accounts From d5f730caf41430a142fcf37b216dfa4d69ec4d2c Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Thu, 21 Jul 2016 18:39:44 +0900 Subject: [PATCH 0197/1936] lib/neutron: Remove remaining references to Q_ variables Q_ variables belong to neutron-legacy. These are True by default in neutron. Remove them in favor of post-config meta section. Change-Id: If691a79b09003f85a07c9f33e0379a2b21e48141 --- lib/neutron | 4 ---- 1 file changed, 4 deletions(-) diff --git a/lib/neutron b/lib/neutron index 9e9eb2d2b1..53fc5fcd15 100644 --- a/lib/neutron +++ b/lib/neutron @@ -147,10 +147,6 @@ function configure_neutron_new { iniset $NEUTRON_CONF DEFAULT auth_strategy $NEUTRON_AUTH_STRATEGY configure_auth_token_middleware $NEUTRON_CONF neutron $NEUTRON_AUTH_CACHE_DIR keystone_authtoken - # Configuration for neutron notifations to nova. - iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES - iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES - iniset $NEUTRON_CONF nova auth_type password iniset $NEUTRON_CONF nova auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v3" iniset $NEUTRON_CONF nova username nova From 07edde1c14d8ab91533c1ef6fe0470f088d5c8bb Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Wed, 19 Oct 2016 19:21:00 +0000 Subject: [PATCH 0198/1936] lib/neutron: Create initial topology only on controller node To avoid it being created multiple times for multinode setup. Note: This reverts "Enable neutron to work in a multi node setup" (commit 88f8558d874072536e7660a233f24207a7089651) partly and fixes the issue differently. The configuration in question uses the new lib/neutron. (not neutron-legacy) In that case, calling create_neutron_initial_network from stack.sh directly is a wrong way, as create_neutron_initial_network is sourced by neutron-legacy. The new neutron code should not rely on the legacy one. Closes-Bug: #1613069 Change-Id: I868afeb065d80d8ccd57630b90658e330ab94251 --- lib/neutron | 20 +++++++++++--------- lib/neutron_plugins/services/l3 | 8 -------- stack.sh | 5 ++++- 3 files changed, 15 insertions(+), 18 deletions(-) diff --git a/lib/neutron b/lib/neutron index 53fc5fcd15..9923721e74 100644 --- a/lib/neutron +++ b/lib/neutron @@ -431,15 +431,17 @@ function start_neutron_new { if is_service_enabled neutron-l3; then run_process neutron-l3 "$NEUTRON_BIN_DIR/$NEUTRON_L3_BINARY $NEUTRON_CONFIG_ARG" fi - # XXX(sc68cal) - Here's where plugins can wire up their own networks instead - # of the code in lib/neutron_plugins/services/l3 - if type -p neutron_plugin_create_initial_networks > /dev/null; then - neutron_plugin_create_initial_networks - else - # XXX(sc68cal) Load up the built in Neutron networking code and build a topology - source $TOP_DIR/lib/neutron_plugins/services/l3 - # Create the networks using servic - create_neutron_initial_network + if is_service_enabled neutron-api; then + # XXX(sc68cal) - Here's where plugins can wire up their own networks instead + # of the code in lib/neutron_plugins/services/l3 + if type -p neutron_plugin_create_initial_networks > /dev/null; then + neutron_plugin_create_initial_networks + else + # XXX(sc68cal) Load up the built in Neutron networking code and build a topology + source $TOP_DIR/lib/neutron_plugins/services/l3 + # Create the networks using servic + create_neutron_initial_network + fi fi if is_service_enabled neutron-metadata-agent; then run_process neutron-metadata-agent "$NEUTRON_BIN_DIR/$NEUTRON_META_BINARY $NEUTRON_CONFIG_ARG" diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index aa61a109fa..9c9143a225 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -154,14 +154,6 @@ function _neutron_get_ext_gw_interface { } function create_neutron_initial_network { - if ! is_service_enabled q-svc && ! is_service_enabled neutron-api; then - echo "Controller services not enabled. No networks configured!" - return - fi - if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "False" ]]; then - echo "Network creation disabled!" - return - fi local project_id project_id=$(openstack project list | grep " demo " | get_field 1) die_if_not_set $LINENO project_id "Failure retrieving project_id for demo" diff --git a/stack.sh b/stack.sh index fab2edd299..2783efed6b 100755 --- a/stack.sh +++ b/stack.sh @@ -1267,7 +1267,10 @@ if is_service_enabled neutron; then start_neutron fi # Once neutron agents are started setup initial network elements -create_neutron_initial_network +if is_service_enabled q-svc && [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]]; then + echo_summary "Creating initial neutron network elements" + create_neutron_initial_network +fi if is_service_enabled nova; then echo_summary "Starting Nova" From 7b07ccf5e4b7f724eceedb069a51c29284b357aa Mon Sep 17 00:00:00 2001 From: Eric Brown Date: Thu, 27 Oct 2016 06:14:00 -0700 Subject: [PATCH 0199/1936] Remove deprecated/obsolete ldap options The devstack ldap configuration for keystone is still using some old options that are no longer valid. The write support is being removed this release. And in previous releases, the ldap assignment driver support was removed and was not removed here. Change-Id: I538626b681eaee6a7ac10dfbc29605b73fbe13bf --- lib/keystone | 7 ------- 1 file changed, 7 deletions(-) diff --git a/lib/keystone b/lib/keystone index f9ee8eb761..b2dfa833e3 100644 --- a/lib/keystone +++ b/lib/keystone @@ -221,13 +221,6 @@ function configure_keystone { iniset $KEYSTONE_CONF ldap password $LDAP_PASSWORD iniset $KEYSTONE_CONF ldap user $LDAP_MANAGER_DN iniset $KEYSTONE_CONF ldap suffix $LDAP_BASE_DN - iniset $KEYSTONE_CONF ldap use_dumb_member "True" - iniset $KEYSTONE_CONF ldap user_attribute_ignore "enabled,email,tenants,default_project_id" - iniset $KEYSTONE_CONF ldap tenant_attribute_ignore "enabled" - iniset $KEYSTONE_CONF ldap tenant_domain_id_attribute "businessCategory" - iniset $KEYSTONE_CONF ldap tenant_desc_attribute "description" - iniset $KEYSTONE_CONF ldap tenant_tree_dn "ou=Projects,$LDAP_BASE_DN" - iniset $KEYSTONE_CONF ldap user_domain_id_attribute "businessCategory" iniset $KEYSTONE_CONF ldap user_tree_dn "ou=Users,$LDAP_BASE_DN" iniset $KEYSTONE_CONF DEFAULT member_role_id "9fe2ff9ee4384b1894a90878d3e92bab" iniset $KEYSTONE_CONF DEFAULT member_role_name "_member_" From fb73d85afe7df7b93b9a460372a8ad6558e887e0 Mon Sep 17 00:00:00 2001 From: Eric Brown Date: Fri, 28 Oct 2016 02:03:37 -0700 Subject: [PATCH 0200/1936] Remove unused KEYSTONE_CATALOG_BACKEND This patch simply removes a var noted to be removed back in Newton. Change-Id: I7c66e1d8d65f562596543ed8ca402dba8c8ea271 --- lib/keystone | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/keystone b/lib/keystone index f9ee8eb761..e2303c08de 100644 --- a/lib/keystone +++ b/lib/keystone @@ -51,9 +51,6 @@ KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone} KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf KEYSTONE_PASTE_INI=${KEYSTONE_PASTE_INI:-$KEYSTONE_CONF_DIR/keystone-paste.ini} -# NOTE(sdague): remove in Newton -KEYSTONE_CATALOG_BACKEND="sql" - # Toggle for deploying Keystone under HTTPD + mod_wsgi # Deprecated in Mitaka, use KEYSTONE_DEPLOY instead. KEYSTONE_USE_MOD_WSGI=${KEYSTONE_USE_MOD_WSGI:-${ENABLE_HTTPD_MOD_WSGI_SERVICES}} From dc486bc12fb63ecb5939f8d29dd9cf7a659847cd Mon Sep 17 00:00:00 2001 From: Steve Martinelli Date: Thu, 8 Sep 2016 02:29:25 +0000 Subject: [PATCH 0201/1936] Switch fernet to be the default token provider this is the first patch in a series to actually make fernet the default token provider in keystone. the patches for grenade, release notes, and actually switching the value in keystone all depend on this patch first. reasons for switching over: - fernet tokens are the recommended token provider - the install guide for newton recommends deployers use fernet tokens [0] - we previously attempted this switch but ran into timing issues [1], the timing issues have been resolved [2] [0] http://docs.openstack.org/newton/install-guide-ubuntu/keystone-install.html [1] 153db269705f37d4144ad3fcf26dc67269755d7d [2] https://review.openstack.org/#/q/topic:make-fernet-default Change-Id: I3b819ae8d2924f3bece03902e05d1a8c5e5923f1 --- lib/keystone | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index b2dfa833e3..fd92f31afb 100644 --- a/lib/keystone +++ b/lib/keystone @@ -88,7 +88,7 @@ KEYSTONE_RESOURCE_BACKEND=${KEYSTONE_RESOURCE_BACKEND:-sql} # Select Keystone's token provider (and format) # Choose from 'uuid', 'pki', 'pkiz', or 'fernet' -KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-} +KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-fernet} KEYSTONE_TOKEN_FORMAT=$(echo ${KEYSTONE_TOKEN_FORMAT} | tr '[:upper:]' '[:lower:]') # Set Keystone interface configuration From ade65b813b33bfb71c707b5ac6995e8ca4099fb5 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Tue, 1 Nov 2016 06:46:36 +0000 Subject: [PATCH 0202/1936] Updated from generate-devstack-plugins-list Change-Id: I1b357b2e668ff5ed56c5deb9d71709a7526e17ea --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 771f2ef6a7..7c97171806 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -69,6 +69,7 @@ ironic-inspector `git://git.openstack.org/openstack/ironic ironic-staging-drivers `git://git.openstack.org/openstack/ironic-staging-drivers `__ karbor `git://git.openstack.org/openstack/karbor `__ karbor-dashboard `git://git.openstack.org/openstack/karbor-dashboard `__ +keystone `git://git.openstack.org/openstack/keystone `__ kingbird `git://git.openstack.org/openstack/kingbird `__ kuryr-kubernetes `git://git.openstack.org/openstack/kuryr-kubernetes `__ kuryr-libnetwork `git://git.openstack.org/openstack/kuryr-libnetwork `__ From c9c9d31d3eb98f3d6382cedfd2aebc75ce236d1f Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Thu, 15 Sep 2016 20:33:22 -0400 Subject: [PATCH 0203/1936] tempest: set network-feature-enabled.port_security Sets the port_security feature flag in tempest.conf if the port_security extension is enabled, which it's not by default in neutron but is set by default in devstack. This adds global variable for setting the port_security extension in ml2.conf and in tempest.conf so we only have to set this in one place. Depends-On: I1efd5c838aa0d73cc6e8864e3041eea25850198d Change-Id: I6334b200e42edd785f74cfb41520627393039619 Related-Bug: #1624082 --- lib/neutron | 4 +++- lib/neutron_plugins/ml2 | 6 +++++- lib/tempest | 1 + stackrc | 3 +++ 4 files changed, 12 insertions(+), 2 deletions(-) diff --git a/lib/neutron b/lib/neutron index 53fc5fcd15..415344e88d 100644 --- a/lib/neutron +++ b/lib/neutron @@ -162,7 +162,9 @@ function configure_neutron_new { iniset $NEUTRON_CORE_PLUGIN_CONF ml2 type_drivers vxlan iniset $NEUTRON_CORE_PLUGIN_CONF ml2 mechanism_drivers openvswitch,linuxbridge iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vxlan vni_ranges 1001:2000 - iniset $NEUTRON_CORE_PLUGIN_CONF ml2 extension_drivers port_security + if [[ "$NEUTRON_PORT_SECURITY" = "True" ]]; then + iniset $NEUTRON_CORE_PLUGIN_CONF ml2 extension_drivers port_security + fi fi # Neutron OVS or LB agent diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index 7e8020930f..e429714f06 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -35,7 +35,11 @@ Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS=${Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS:-} Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS:-vni_ranges=$TENANT_TUNNEL_RANGES} # List of extension drivers to load, use '-' instead of ':-' to allow people to # explicitly override this to blank -Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS-port_security} +if [[ "$NEUTRON_PORT_SECURITY" = "True" ]]; then + Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS-port_security} +else + Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS:-} +fi # L3 Plugin to load for ML2 # For some flat network environment, they not want to extend L3 plugin. diff --git a/lib/tempest b/lib/tempest index 6ffc927176..0d018432af 100644 --- a/lib/tempest +++ b/lib/tempest @@ -387,6 +387,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG network default_network "$FIXED_RANGE" iniset $TEMPEST_CONFIG network-feature-enabled ipv6 "$IPV6_ENABLED" iniset $TEMPEST_CONFIG network-feature-enabled ipv6_subnet_attributes "$IPV6_SUBNET_ATTRIBUTES_ENABLED" + iniset $TEMPEST_CONFIG network-feature-enabled port_security $NEUTRON_PORT_SECURITY # Orchestration Tests if is_service_enabled heat; then diff --git a/stackrc b/stackrc index c419ef4b15..ea8b044faf 100644 --- a/stackrc +++ b/stackrc @@ -778,6 +778,9 @@ fi HOST_IPV6=$(get_default_host_ip "" "" "$HOST_IP_IFACE" "$HOST_IPV6" "inet6") +# Whether or not the port_security extension should be enabled for Neutron. +NEUTRON_PORT_SECURITY=$(trueorfalse True NEUTRON_PORT_SECURITY) + # SERVICE IP version # This is the IP version that services should be listening on, as well # as using to register their endpoints with keystone. From 2d9959c53b7866e0dbfb0bcda15c59db1c3de6b6 Mon Sep 17 00:00:00 2001 From: "Castulo J. Martinez" Date: Tue, 1 Nov 2016 13:34:20 -0700 Subject: [PATCH 0204/1936] Removing config values no longer in tempest.conf This commit removes some config values for tempest that no longer exist in tempest/config.py therefore are no longer needed in tempest.conf. Change-Id: I5778973012e57e8d9df9bf864590f8ed7fe05561 --- lib/tempest | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/lib/tempest b/lib/tempest index 6ffc927176..3f9ebca225 100644 --- a/lib/tempest +++ b/lib/tempest @@ -267,8 +267,7 @@ function configure_tempest { if [[ "$TEMPEST_HAS_ADMIN" == "True" ]]; then iniset $TEMPEST_CONFIG auth admin_username $admin_username iniset $TEMPEST_CONFIG auth admin_password "$password" - iniset $TEMPEST_CONFIG auth admin_tenant_name $admin_project_name - iniset $TEMPEST_CONFIG auth admin_tenant_id $admin_project_id + iniset $TEMPEST_CONFIG auth admin_project_name $admin_project_name iniset $TEMPEST_CONFIG auth admin_domain_name $admin_domain_name fi if [ "$ENABLE_IDENTITY_V2" == "False" ]; then @@ -306,15 +305,10 @@ function configure_tempest { fi # Compute - iniset $TEMPEST_CONFIG compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED iniset $TEMPEST_CONFIG compute image_ref $image_uuid iniset $TEMPEST_CONFIG compute image_ref_alt $image_uuid_alt - iniset $TEMPEST_CONFIG compute image_alt_ssh_user ${ALT_INSTANCE_USER:-cirros} iniset $TEMPEST_CONFIG compute flavor_ref $flavor_ref iniset $TEMPEST_CONFIG compute flavor_ref_alt $flavor_ref_alt - iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method - # set the equiv validation option here as well to ensure they are - # in sync. They shouldn't be separate options. iniset $TEMPEST_CONFIG validation connect_method $ssh_connect_method if ! is_service_enabled n-cell && ! is_service_enabled neutron; then iniset $TEMPEST_CONFIG compute fixed_network_name $PRIVATE_NETWORK_NAME @@ -419,9 +413,6 @@ function configure_tempest { iniset $TEMPEST_CONFIG scenario aki_img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-vmlinuz" iniset $TEMPEST_CONFIG scenario img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" - # Telemetry - iniset $TEMPEST_CONFIG telemetry-feature-enabled events "True" - # Validation iniset $TEMPEST_CONFIG validation run_validation ${TEMPEST_RUN_VALIDATION:-False} iniset $TEMPEST_CONFIG validation ip_version_for_ssh 4 From 54482cd80b9d64f2d7043cd8560504d9d5752fc1 Mon Sep 17 00:00:00 2001 From: Huan Xie Date: Sun, 9 Oct 2016 09:47:48 +0000 Subject: [PATCH 0205/1936] Stop q-domua process when XenServer is used When using XenServer, it will create two neutron-openvswitch-agent q-agt and q-domua even it's single box environment, but it didn't stop the q-domua, this patch is to stop q-domua in unstack.sh Change-Id: I511ed534bfb7d5fe6136f6a0b33f1d749d30862c Closes-Bug: #1631721 --- lib/neutron-legacy | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 9e926a000f..938d5662ec 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -511,6 +511,10 @@ function start_neutron_agents { function stop_mutnauq_l2_agent { stop_process q-agt + + if [ "$VIRT_DRIVER" = 'xenserver' ]; then + stop_process q-domua + fi } # stop_mutnauq_other() - Stop running processes (non-screen) From 75bf9722392c9fc1ca7c6a95b99e8897c86d4271 Mon Sep 17 00:00:00 2001 From: Paulo Ewerton Date: Fri, 22 Jan 2016 19:13:31 +0000 Subject: [PATCH 0206/1936] Move default Keystone API version to v3 This patch sets Keystone v3 as default in services configuration files and in the openrc and stackrc scripts. Change-Id: I24546f02067ea23d088d383b85e3a78d7b43f166 Partially-Implements: bp keystonev3 --- lib/glance | 2 +- lib/tempest | 12 +++++++----- openrc | 4 ++-- stackrc | 2 +- tools/create_userrc.sh | 2 +- 5 files changed, 12 insertions(+), 10 deletions(-) diff --git a/lib/glance b/lib/glance index 5259174799..da9cd43536 100644 --- a/lib/glance +++ b/lib/glance @@ -235,7 +235,7 @@ function configure_glance { iniset $GLANCE_CACHE_CONF DEFAULT use_syslog $SYSLOG iniset $GLANCE_CACHE_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_url - iniset $GLANCE_CACHE_CONF DEFAULT auth_url $KEYSTONE_AUTH_URI/v2.0 + iniset $GLANCE_CACHE_CONF DEFAULT auth_url $KEYSTONE_AUTH_URI/v3 iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_tenant_name iniset $GLANCE_CACHE_CONF DEFAULT admin_tenant_name $SERVICE_PROJECT_NAME iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_user diff --git a/lib/tempest b/lib/tempest index 0d018432af..166a521bd9 100644 --- a/lib/tempest +++ b/lib/tempest @@ -271,13 +271,15 @@ function configure_tempest { iniset $TEMPEST_CONFIG auth admin_tenant_id $admin_project_id iniset $TEMPEST_CONFIG auth admin_domain_name $admin_domain_name fi - if [ "$ENABLE_IDENTITY_V2" == "False" ]; then - # Only Identity v3 is available; then skip Identity API v2 tests + if [ "$ENABLE_IDENTITY_V2" == "True" ]; then + # Run Identity API v2 tests ONLY if needed + iniset $TEMPEST_CONFIG identity-feature-enabled api_v2 True + iniset $TEMPEST_CONFIG identity auth_version ${TEMPEST_AUTH_VERSION:-v2} + else + # Skip Identity API v2 tests by default iniset $TEMPEST_CONFIG identity-feature-enabled api_v2 False - # In addition, use v3 auth tokens for running all Tempest tests + # Use v3 auth tokens for running all Tempest tests iniset $TEMPEST_CONFIG identity auth_version v3 - else - iniset $TEMPEST_CONFIG identity auth_version ${TEMPEST_AUTH_VERSION:-v2} fi if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then diff --git a/openrc b/openrc index 8d8ae8b030..d1c61297a1 100644 --- a/openrc +++ b/openrc @@ -81,12 +81,12 @@ KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-$SERVICE_PROTOCOL} KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST} # Identity API version -export OS_IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-2.0} +export OS_IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-3} # Authenticating against an OpenStack cloud using Keystone returns a **Token** # and **Service Catalog**. The catalog contains the endpoints for all services # the user/project has access to - including nova, glance, keystone, swift, ... -# We currently recommend using the 2.0 *identity api*. +# We currently recommend using the version 3 *identity api*. # export OS_AUTH_URL=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:5000/v${OS_IDENTITY_API_VERSION} diff --git a/stackrc b/stackrc index ea8b044faf..e4ff1b7eee 100644 --- a/stackrc +++ b/stackrc @@ -158,7 +158,7 @@ else fi # Configure Identity API version: 2.0, 3 -IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-2.0} +IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-3} # Set the option ENABLE_IDENTITY_V2 to True. It defines whether the DevStack # deployment will be deploying the Identity v2 pipelines. If this option is set diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index 30d1a01577..f4a4edcbe2 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -152,7 +152,7 @@ if [ -z "$OS_USERNAME" ]; then fi if [ -z "$OS_AUTH_URL" ]; then - export OS_AUTH_URL=http://localhost:5000/v2.0/ + export OS_AUTH_URL=http://localhost:5000/v3/ fi if [ -z "$OS_USER_DOMAIN_ID" -a -z "$OS_USER_DOMAIN_NAME" ]; then From 30ab23cd9b103470a7d89c4c88bccba789884c36 Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Wed, 2 Nov 2016 16:30:31 -0400 Subject: [PATCH 0207/1936] Fix stevedore warning with neutron firewall_driver The initial start of the neutron OVS agent always prints a warning: WARNING stevedore.named [] Could not load neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver There's an alias for that in setup.cfg called iptables_hybrid that would avoid it. Change-Id: I3f5bf782f4f27dc123e462e494741a8a941641ec --- lib/neutron_plugins/ovs_base | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index 3cd6c85053..baf7d7f34b 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -83,10 +83,10 @@ function _neutron_ovs_base_configure_debug_command { function _neutron_ovs_base_configure_firewall_driver { if [[ "$Q_USE_SECGROUP" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver + iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver iptables_hybrid enable_kernel_bridge_firewall else - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver + iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver noop fi } From f881a0e4ee995a2e2122e0e048cc8b7ccc88d6a0 Mon Sep 17 00:00:00 2001 From: Huan Xie Date: Tue, 27 Sep 2016 05:57:06 +0000 Subject: [PATCH 0208/1936] XenAPI: Enable linux bridge in Dom0 for neturon When using neutron network under xenserver, we must enable linux bridge in Dom0 as neutron will use linux bridge qbr in compute node for security group. But by default XenServer use openvswitch and disabled linux bridge. This patch is to remove this restriction. Change-Id: I0e8124ff2323810fdc46c717a750ce7e8f4aa0c6 --- lib/nova_plugins/hypervisor-xenserver | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver index e5d25da3a3..a63e72e764 100644 --- a/lib/nova_plugins/hypervisor-xenserver +++ b/lib/nova_plugins/hypervisor-xenserver @@ -90,6 +90,10 @@ CRONTAB echo "install_conntrack_tools" } | $ssh_dom0 + if is_service_enabled neutron; then + # Remove restriction on linux bridge in Dom0 when neutron is enabled + $ssh_dom0 "rm -f /etc/modprobe.d/blacklist-bridge*" + fi } # install_nova_hypervisor() - Install external components From 62f29a98d70298372590957bcec7b9f7bc59d963 Mon Sep 17 00:00:00 2001 From: Pierre Riteau Date: Thu, 3 Nov 2016 10:10:03 +0000 Subject: [PATCH 0209/1936] Update stable branch example The stable/juno branch was deleted 11 months ago: http://lists.openstack.org/pipermail/openstack-announce/2015-December/000869.html Update the example to Newton, which should keep it valid for a while. Change-Id: I4cd8738862a529fd319be2ec5694d00defd94f84 --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 4ba4619c6d..ff5598b0c5 100644 --- a/README.md +++ b/README.md @@ -25,9 +25,9 @@ in a clean and disposable vm when you are first getting started. The DevStack master branch generally points to trunk versions of OpenStack components. For older, stable versions, look for branches named stable/[release] in the DevStack repo. For example, you can do the -following to create a juno OpenStack cloud: +following to create a Newton OpenStack cloud: - git checkout stable/juno + git checkout stable/newton ./stack.sh You can also pick specific OpenStack project releases by setting the appropriate From 4f11ff30cf2f7c674316fe8c98f1c006f397013f Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 27 Oct 2016 06:15:23 -0700 Subject: [PATCH 0210/1936] Adopt openstack client for neutron commands The neutron client is going to be deprecated during the Ocata timeframe, so it is time to start switching to the openstack client to invoke networking commands. use of neutron client in neutron-legacy has been left as is. The command for setting the router gateway is left as follow up. Change-Id: I0a63e03d7d4a08ad6c27f2729fc298322baab397 --- doc/source/guides/devstack-with-lbaas-v2.rst | 12 ++-- exercises/neutron-adv-test.sh | 12 ++-- lib/neutron_plugins/services/l3 | 76 ++++++++++---------- lib/tempest | 2 +- tools/ping_neutron.sh | 2 +- 5 files changed, 51 insertions(+), 53 deletions(-) diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst index 0c439ad3ad..21bea9973b 100644 --- a/doc/source/guides/devstack-with-lbaas-v2.rst +++ b/doc/source/guides/devstack-with-lbaas-v2.rst @@ -66,21 +66,21 @@ Run stack.sh and do some sanity checks ./stack.sh . ./openrc - neutron net-list # should show public and private networks + openstack network list # should show public and private networks Create two nova instances that we can use as test http servers: :: #create nova instances on private network - nova boot --image $(nova image-list | awk '/ cirros-.*-x86_64-uec / {print $2}') --flavor 1 --nic net-id=$(neutron net-list | awk '/ private / {print $2}') node1 - nova boot --image $(nova image-list | awk '/ cirros-.*-x86_64-uec / {print $2}') --flavor 1 --nic net-id=$(neutron net-list | awk '/ private / {print $2}') node2 + nova boot --image $(nova image-list | awk '/ cirros-.*-x86_64-uec / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node1 + nova boot --image $(nova image-list | awk '/ cirros-.*-x86_64-uec / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node2 nova list # should show the nova instances just created #add secgroup rules to allow ssh etc.. - neutron security-group-rule-create default --protocol icmp - neutron security-group-rule-create default --protocol tcp --port-range-min 22 --port-range-max 22 - neutron security-group-rule-create default --protocol tcp --port-range-min 80 --port-range-max 80 + openstack security group rule create default --protocol icmp + openstack security group rule create default --protocol tcp --dst-port 22:22 + openstack security group rule create default --protocol tcp --dst-port 80:80 Set up a simple web server on each of these instances. ssh into each instance (username 'cirros', password 'cubswin:)') and run diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh index e003c56331..bfd45eca5c 100755 --- a/exercises/neutron-adv-test.sh +++ b/exercises/neutron-adv-test.sh @@ -156,7 +156,7 @@ function get_role_id { function get_network_id { local NETWORK_NAME="$1" local NETWORK_ID - NETWORK_ID=`neutron net-list -F id -- --name=$NETWORK_NAME | awk "NR==4" | awk '{print $2}'` + NETWORK_ID=`openstack network list | grep $NETWORK_NAME | awk '{print $2}'` echo $NETWORK_ID } @@ -234,9 +234,9 @@ function create_network { PROJECT_ID=$(get_project_id $PROJECT) source $TOP_DIR/openrc $PROJECT $PROJECT local NET_ID - NET_ID=$(neutron net-create --project-id $PROJECT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' ) + NET_ID=$(openstack network create --project $PROJECT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' ) die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PROJECT_ID $NET_NAME $EXTRA" - neutron subnet-create --ip-version 4 --project-id $PROJECT_ID --gateway $GATEWAY --subnetpool None $NET_ID $CIDR + openstack subnet create --ip-version 4 --project $PROJECT_ID --gateway $GATEWAY --subnet-pool None --network $NET_ID --subnet-range $CIDR "${NET_NAME}_subnet" neutron_debug_admin probe-create --device-owner compute $NET_ID source $TOP_DIR/openrc demo demo } @@ -325,10 +325,10 @@ function delete_network { PROJECT_ID=$(get_project_id $PROJECT) #TODO(nati) comment out until l3-agent merged #for res in port subnet net router;do - for net_id in `neutron net-list -c id -c name | grep $NET_NAME | awk '{print $2}'`;do + for net_id in `openstack network list -c ID -c Name | grep $NET_NAME | awk '{print $2}'`;do delete_probe $net_id - neutron subnet-list | grep $net_id | awk '{print $2}' | xargs -I% neutron subnet-delete % - neutron net-delete $net_id + openstack subnet list | grep $net_id | awk '{print $2}' | xargs -I% openstack subnet delete % + openstack network delete $net_id done source $TOP_DIR/openrc demo demo } diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index aa61a109fa..ddc615589f 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -174,10 +174,10 @@ function create_neutron_initial_network { if is_networking_extension_supported "auto-allocated-topology"; then if [[ "$USE_SUBNETPOOL" == "True" ]]; then if [[ "$IP_VERSION" =~ 4.* ]]; then - SUBNETPOOL_V4_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnetpool-create $SUBNETPOOL_NAME --default-prefixlen $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --shared --is-default=True | grep ' id ' | get_field 2) + SUBNETPOOL_V4_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME --default-prefix-length $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --share --default | grep ' id ' | get_field 2) fi if [[ "$IP_VERSION" =~ .*6 ]]; then - SUBNETPOOL_V6_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnetpool-create $SUBNETPOOL_NAME --default-prefixlen $SUBNETPOOL_SIZE_V6 --pool-prefix $SUBNETPOOL_PREFIX_V6 --shared --is-default=True | grep ' id ' | get_field 2) + SUBNETPOOL_V6_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME --default-prefix-length $SUBNETPOOL_SIZE_V6 --pool-prefix $SUBNETPOOL_PREFIX_V6 --share --default | grep ' id ' | get_field 2) fi fi fi @@ -185,14 +185,14 @@ function create_neutron_initial_network { if is_provider_network; then die_if_not_set $LINENO PHYSICAL_NETWORK "You must specify the PHYSICAL_NETWORK" die_if_not_set $LINENO PROVIDER_NETWORK_TYPE "You must specify the PROVIDER_NETWORK_TYPE" - NET_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" net-create $PHYSICAL_NETWORK --tenant_id $project_id --provider:network_type $PROVIDER_NETWORK_TYPE --provider:physical_network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider:segmentation_id $SEGMENTATION_ID} --shared | grep ' id ' | get_field 2) + NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --project $project_id --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share | grep ' id ' | get_field 2) die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $project_id" if [[ "$IP_VERSION" =~ 4.* ]]; then if [ -z $SUBNETPOOL_V4_ID ]; then fixed_range_v4=$FIXED_RANGE fi - SUBNET_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create --tenant_id $project_id --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --name $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnetpool $SUBNETPOOL_V4_ID} $NET_ID $fixed_range_v4 | grep ' id ' | get_field 2) + SUBNET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID --subnet-range $fixed_range_v4 | grep ' id ' | get_field 2) die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME $project_id" fi @@ -202,7 +202,7 @@ function create_neutron_initial_network { if [ -z $SUBNETPOOL_V6_ID ]; then fixed_range_v6=$IPV6_PROVIDER_FIXED_RANGE fi - SUBNET_V6_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create --tenant_id $project_id --ip_version 6 --ipv6-address-mode $IPV6_ADDRESS_MODE --gateway $IPV6_PROVIDER_NETWORK_GATEWAY --name $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnetpool $SUBNETPOOL_V6_ID} $NET_ID $fixed_range_v6 | grep 'id' | get_field 2) + SUBNET_V6_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 6 --ipv6-address-mode $IPV6_ADDRESS_MODE --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID $fixed_range_v6 | grep 'id' | get_field 2) die_if_not_set $LINENO SUBNET_V6_ID "Failure creating SUBNET_V6_ID for $IPV6_PROVIDER_SUBNET_NAME $project_id" fi @@ -212,7 +212,7 @@ function create_neutron_initial_network { sudo ip link set $PUBLIC_INTERFACE up fi else - NET_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" net-create --tenant-id $project_id "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) + NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create --project $project_id "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PRIVATE_NETWORK_NAME $project_id" if [[ "$IP_VERSION" =~ 4.* ]]; then @@ -230,23 +230,23 @@ function create_neutron_initial_network { # Create a router, and add the private subnet as one of its interfaces if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then # create a tenant-owned router. - ROUTER_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" router-create --tenant-id $project_id $Q_ROUTER_NAME | grep ' id ' | get_field 2) + ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create --project $project_id $Q_ROUTER_NAME | grep ' id ' | get_field 2) die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $project_id $Q_ROUTER_NAME" else # Plugin only supports creating a single router, which should be admin owned. - ROUTER_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" router-create $Q_ROUTER_NAME | grep ' id ' | get_field 2) + ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create $Q_ROUTER_NAME | grep ' id ' | get_field 2) die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $Q_ROUTER_NAME" fi - EXTERNAL_NETWORK_FLAGS="--router:external" + EXTERNAL_NETWORK_FLAGS="--external" if is_networking_extension_supported "auto-allocated-topology"; then - EXTERNAL_NETWORK_FLAGS="$EXTERNAL_NETWORK_FLAGS --is-default" + EXTERNAL_NETWORK_FLAGS="$EXTERNAL_NETWORK_FLAGS --default" fi # Create an external network, and a subnet. Configure the external network as router gw if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then - EXT_NET_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" net-create "$PUBLIC_NETWORK_NAME" -- $EXTERNAL_NETWORK_FLAGS --provider:network_type=flat --provider:physical_network=${PUBLIC_PHYSICAL_NETWORK} | grep ' id ' | get_field 2) + EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type flat --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} | grep ' id ' | get_field 2) else - EXT_NET_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" net-create "$PUBLIC_NETWORK_NAME" -- $EXTERNAL_NETWORK_FLAGS | grep ' id ' | get_field 2) + EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS | grep ' id ' | get_field 2) fi die_if_not_set $LINENO EXT_NET_ID "Failure creating EXT_NET_ID for $PUBLIC_NETWORK_NAME" @@ -268,16 +268,16 @@ function _neutron_create_private_subnet_v4 { if [ -z $SUBNETPOOL_V4_ID ]; then fixed_range_v4=$FIXED_RANGE fi - local subnet_params="--tenant-id $project_id " - subnet_params+="--ip_version 4 " + local subnet_params="--project $project_id " + subnet_params+="--ip-version 4 " if [[ -n "$NETWORK_GATEWAY" ]]; then subnet_params+="--gateway $NETWORK_GATEWAY " fi - subnet_params+="--name $PRIVATE_SUBNET_NAME " - subnet_params+="${SUBNETPOOL_V4_ID:+--subnetpool $SUBNETPOOL_V4_ID} " - subnet_params+="$NET_ID $fixed_range_v4" + subnet_params+="${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} " + subnet_params+="${fixed_range_v4:+--subnet-range $fixed_range_v4} " + subnet_params+="--network $NET_ID $PRIVATE_SUBNET_NAME" local subnet_id - subnet_id=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create $subnet_params | grep ' id ' | get_field 2) + subnet_id=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2) die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet for $project_id" echo $subnet_id } @@ -291,52 +291,50 @@ function _neutron_create_private_subnet_v6 { if [ -z $SUBNETPOOL_V6_ID ]; then fixed_range_v6=$FIXED_RANGE_V6 fi - local subnet_params="--tenant-id $project_id " - subnet_params+="--ip_version 6 " + local subnet_params="--project $project_id " + subnet_params+="--ip-version 6 " if [[ -n "$IPV6_PRIVATE_NETWORK_GATEWAY" ]]; then subnet_params+="--gateway $IPV6_PRIVATE_NETWORK_GATEWAY " fi - subnet_params+="--name $IPV6_PRIVATE_SUBNET_NAME " - subnet_params+="${SUBNETPOOL_V6_ID:+--subnetpool $SUBNETPOOL_V6_ID} " - subnet_params+="$NET_ID $fixed_range_v6 $ipv6_modes" + subnet_params+="${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} " + subnet_params+="${fixed_range_v6:+--subnet-range $fixed_range_v6 $ipv6_modes} " + subnet_params+="--network $NET_ID $IPV6_PRIVATE_SUBNET_NAME " local ipv6_subnet_id - ipv6_subnet_id=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create $subnet_params | grep ' id ' | get_field 2) + ipv6_subnet_id=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2) die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet for $project_id" echo $ipv6_subnet_id } # Create public IPv4 subnet function _neutron_create_public_subnet_v4 { - local subnet_params="--ip_version 4 " + local subnet_params="--ip-version 4 " subnet_params+="${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} " if [[ -n "$PUBLIC_NETWORK_GATEWAY" ]]; then subnet_params+="--gateway $PUBLIC_NETWORK_GATEWAY " fi - subnet_params+="--name $PUBLIC_SUBNET_NAME " - subnet_params+="$EXT_NET_ID $FLOATING_RANGE " - subnet_params+="-- --enable_dhcp=False" + subnet_params+="--network $EXT_NET_ID --subnet-range $FLOATING_RANGE --no-dhcp " + subnet_params+="$PUBLIC_SUBNET_NAME" local id_and_ext_gw_ip - id_and_ext_gw_ip=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create $subnet_params | grep -e 'gateway_ip' -e ' id ') + id_and_ext_gw_ip=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ') die_if_not_set $LINENO id_and_ext_gw_ip "Failure creating public IPv4 subnet" echo $id_and_ext_gw_ip } # Create public IPv6 subnet function _neutron_create_public_subnet_v6 { - local subnet_params="--ip_version 6 " + local subnet_params="--ip-version 6 " subnet_params+="--gateway $IPV6_PUBLIC_NETWORK_GATEWAY " - subnet_params+="--name $IPV6_PUBLIC_SUBNET_NAME " - subnet_params+="$EXT_NET_ID $IPV6_PUBLIC_RANGE " - subnet_params+="-- --enable_dhcp=False" + subnet_params+="--network $EXT_NET_ID --subnet-range $IPV6_PUBLIC_RANGE --no-dhcp " + subnet_params+="$IPV6_PUBLIC_SUBNET_NAME" local ipv6_id_and_ext_gw_ip - ipv6_id_and_ext_gw_ip=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create $subnet_params | grep -e 'gateway_ip' -e ' id ') + ipv6_id_and_ext_gw_ip=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ') die_if_not_set $LINENO ipv6_id_and_ext_gw_ip "Failure creating an IPv6 public subnet" echo $ipv6_id_and_ext_gw_ip } # Configure neutron router for IPv4 public access function _neutron_configure_router_v4 { - neutron --os-cloud devstack-admin --os-region "$REGION_NAME" router-interface-add $ROUTER_ID $SUBNET_ID + openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router add subnet $ROUTER_ID $SUBNET_ID # Create a public subnet on the external network local id_and_ext_gw_ip id_and_ext_gw_ip=$(_neutron_create_public_subnet_v4 $EXT_NET_ID) @@ -371,7 +369,7 @@ function _neutron_configure_router_v4 { sudo ip addr add $ext_gw_ip/$cidr_len dev $ext_gw_interface sudo ip link set $ext_gw_interface up fi - ROUTER_GW_IP=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F'ip_address' '{ print $2 }' | cut -f3 -d\" | tr '\n' ' ') + ROUTER_GW_IP=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' --device-owner network:router_gateway | awk -F'ip_address' '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ') die_if_not_set $LINENO ROUTER_GW_IP "Failure retrieving ROUTER_GW_IP" local replace_range=${SUBNETPOOL_PREFIX_V4} if [[ -z "${SUBNETPOOL_V4_ID}" ]]; then @@ -385,7 +383,7 @@ function _neutron_configure_router_v4 { # Configure neutron router for IPv6 public access function _neutron_configure_router_v6 { - neutron --os-cloud devstack-admin --os-region "$REGION_NAME" router-interface-add $ROUTER_ID $IPV6_SUBNET_ID + openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router add subnet $ROUTER_ID $IPV6_SUBNET_ID # Create a public subnet on the external network local ipv6_id_and_ext_gw_ip ipv6_id_and_ext_gw_ip=$(_neutron_create_public_subnet_v6 $EXT_NET_ID) @@ -416,7 +414,7 @@ function _neutron_configure_router_v6 { sudo sysctl -w net.ipv6.conf.all.forwarding=1 # Configure and enable public bridge # Override global IPV6_ROUTER_GW_IP with the true value from neutron - IPV6_ROUTER_GW_IP=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" port-list -c fixed_ips | grep $ipv6_pub_subnet_id | awk -F'ip_address' '{ print $2 }' | cut -f3 -d\" | tr '\n' ' ') + IPV6_ROUTER_GW_IP=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' | grep $ipv6_pub_subnet_id | awk -F'ip_address' '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ') die_if_not_set $LINENO IPV6_ROUTER_GW_IP "Failure retrieving IPV6_ROUTER_GW_IP" if is_neutron_ovs_base_plugin; then @@ -446,6 +444,6 @@ function is_provider_network { function is_networking_extension_supported { local extension=$1 # TODO(sc68cal) cache this instead of calling every time - EXT_LIST=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" ext-list -c alias -f value) + EXT_LIST=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" extension list --network -c Alias -f value) [[ $EXT_LIST =~ $extension ]] && return 0 } diff --git a/lib/tempest b/lib/tempest index 6ffc927176..2444ee9967 100644 --- a/lib/tempest +++ b/lib/tempest @@ -242,7 +242,7 @@ function configure_tempest { # the public network (for floating ip access) is only available # if the extension is enabled. if is_networking_extension_supported 'external-net'; then - public_network_id=$(neutron net-list | grep $PUBLIC_NETWORK_NAME | \ + public_network_id=$(openstack network list | grep $PUBLIC_NETWORK_NAME | \ awk '{print $2}') fi diff --git a/tools/ping_neutron.sh b/tools/ping_neutron.sh index dba7502652..c75575406a 100755 --- a/tools/ping_neutron.sh +++ b/tools/ping_neutron.sh @@ -54,7 +54,7 @@ fi REMAINING_ARGS="${@:2}" # BUG: with duplicate network names, this fails pretty hard. -NET_ID=$(neutron net-list | grep "$NET_NAME" | awk '{print $2}') +NET_ID=$(openstack network list | grep "$NET_NAME" | awk '{print $2}') PROBE_ID=$(neutron-debug probe-list -c id -c network_id | grep "$NET_ID" | awk '{print $2}' | head -n 1) # This runs a command inside the specific netns From f166081d0e4a3b04b157a25e94f0bb2a1769f813 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Tue, 1 Nov 2016 15:44:06 -0400 Subject: [PATCH 0211/1936] Always setup cells v2 Nova is going to land a database migration in Ocata under change I72fb724dc13e1a5f4e97c58915b538ba761c582d which enforces that at least the simple cells v2 setup is performed, which creates the cell mappings, cell0 and host mappings. Before we can land that change in Nova we have to make cells v2 setup a default in the integrated gate jobs. Depends-On: Ie44e615384df464516aa30b9044b5e54b7d995bb Change-Id: If1af9c478e8ea2420f2523a9bb8b70fafddc86b7 --- lib/nova | 7 +------ stack.sh | 2 +- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/lib/nova b/lib/nova index f38fb8b146..09b4aa5587 100644 --- a/lib/nova +++ b/lib/nova @@ -85,9 +85,6 @@ METADATA_SERVICE_PORT=${METADATA_SERVICE_PORT:-8775} # NOTE: Set ``FORCE_CONFIG_DRIVE="False"`` to turn OFF config drive FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"False"} -# Option to initialize CellsV2 environment -NOVA_CONFIGURE_CELLSV2=$(trueorfalse False NOVA_CONFIGURE_CELLSV2) - # Nova supports pluggable schedulers. The default ``FilterScheduler`` # should work in most cases. SCHEDULER=${SCHEDULER:-filter_scheduler} @@ -681,9 +678,7 @@ function init_nova { if is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-api; then # (Re)create nova databases recreate_database nova - if [ "$NOVA_CONFIGURE_CELLSV2" != "False" ]; then - recreate_database nova_api_cell0 - fi + recreate_database nova_api_cell0 # Migrate nova database. If "nova-manage cell_v2 simple_cell_setup" has # been run this migrates the "nova" and "nova_api_cell0" database. diff --git a/stack.sh b/stack.sh index fab2edd299..e908a9a335 100755 --- a/stack.sh +++ b/stack.sh @@ -1380,7 +1380,7 @@ check_libs_from_git # ---------------------- # Do this late because it requires compute hosts to have started -if is_service_enabled n-api && [ "$NOVA_CONFIGURE_CELLSV2" == "True" ]; then +if is_service_enabled n-api; then create_cell fi From 8c7cec52d0a263d6e5e54afdfdb73f408df68a75 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 4 Nov 2016 11:31:22 +0000 Subject: [PATCH 0212/1936] Updated from generate-devstack-plugins-list Change-Id: Id23209fa26e39d569b7e4b4e95d42e72fd92c32e --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 7c97171806..69311350ae 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -119,6 +119,7 @@ nova-docker `git://git.openstack.org/openstack/nova-d nova-lxd `git://git.openstack.org/openstack/nova-lxd `__ nova-mksproxy `git://git.openstack.org/openstack/nova-mksproxy `__ nova-powervm `git://git.openstack.org/openstack/nova-powervm `__ +oaktree `git://git.openstack.org/openstack/oaktree `__ octavia `git://git.openstack.org/openstack/octavia `__ osprofiler `git://git.openstack.org/openstack/osprofiler `__ panko `git://git.openstack.org/openstack/panko `__ From 7c0af1bfb8d39fe4edad4b79b930d90077f4454e Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 10 Nov 2016 06:49:01 +0000 Subject: [PATCH 0213/1936] Updated from generate-devstack-plugins-list Change-Id: Ifce4376733d55452a1ce85df75a4203ac2f2aff9 --- doc/source/plugin-registry.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 69311350ae..6ece9978ac 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -78,6 +78,7 @@ magnum-ui `git://git.openstack.org/openstack/magnum manila `git://git.openstack.org/openstack/manila `__ masakari `git://git.openstack.org/openstack/masakari `__ mistral `git://git.openstack.org/openstack/mistral `__ +mixmatch `git://git.openstack.org/openstack/mixmatch `__ monasca-analytics `git://git.openstack.org/openstack/monasca-analytics `__ monasca-api `git://git.openstack.org/openstack/monasca-api `__ monasca-ceilometer `git://git.openstack.org/openstack/monasca-ceilometer `__ @@ -85,6 +86,7 @@ monasca-log-api `git://git.openstack.org/openstack/monasc monasca-transform `git://git.openstack.org/openstack/monasca-transform `__ murano `git://git.openstack.org/openstack/murano `__ networking-6wind `git://git.openstack.org/openstack/networking-6wind `__ +networking-arista `git://git.openstack.org/openstack/networking-arista `__ networking-bagpipe `git://git.openstack.org/openstack/networking-bagpipe `__ networking-bgpvpn `git://git.openstack.org/openstack/networking-bgpvpn `__ networking-brocade `git://git.openstack.org/openstack/networking-brocade `__ From bac2e4ddc28393fbd3c6f45248737893b5d0fd97 Mon Sep 17 00:00:00 2001 From: Carlos Goncalves Date: Fri, 11 Nov 2016 15:02:57 +0100 Subject: [PATCH 0214/1936] Update local.sh sample file to use OSC Besides updating to OSC CLI, this patch also fixes an argument name typo present before in 'nova keypair-add' (--pub_key should be --pub-key). Specifying $OS_PROJECT_NAME in case user is associated to multiple projects containing security groups with same name (e.g. 'default'). Change-Id: I776f6edfc4c6c798a39d3260827a18c695f05c87 --- samples/local.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/samples/local.sh b/samples/local.sh index 634f6ddb17..9cd0bdcc17 100755 --- a/samples/local.sh +++ b/samples/local.sh @@ -36,7 +36,7 @@ if is_service_enabled nova; then # Add first keypair found in localhost:$HOME/.ssh for i in $HOME/.ssh/id_rsa.pub $HOME/.ssh/id_dsa.pub; do if [[ -r $i ]]; then - nova keypair-add --pub_key=$i `hostname` + openstack keypair create --public-key $i `hostname` break fi done @@ -53,8 +53,8 @@ if is_service_enabled nova; then MI_NAME=m1.micro # Create micro flavor if not present - if [[ -z $(nova flavor-list | grep $MI_NAME) ]]; then - nova flavor-create $MI_NAME 6 128 0 1 + if [[ -z $(openstack flavor list | grep $MI_NAME) ]]; then + openstack flavor create $MI_NAME --id 6 --ram 128 --disk 0 --vcpus 1 fi @@ -62,7 +62,7 @@ if is_service_enabled nova; then # ---------- # Add tcp/22 and icmp to default security group - nova secgroup-add-rule default tcp 22 22 0.0.0.0/0 - nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0 + openstack security group rule create --project $OS_PROJECT_NAME default --protocol tcp --ingress --dst-port 22 + openstack security group rule create --project $OS_PROJECT_NAME default --protocol icmp fi From bd42d4918eebc7ca2a72bb459bc5f8d953623e5f Mon Sep 17 00:00:00 2001 From: "John L. Villalovos" Date: Sat, 12 Nov 2016 11:09:15 -0800 Subject: [PATCH 0215/1936] Remove call to undefined teardown_neutron_debug Previously the usage of neutron debug ports was removed by 5e01c47e4d671166b9396c507a7105a5ac8256dc but there was still call to teardown_neutron_debug. Recently a change to devstack-gate 1d6cc0771a3399300117f488e9d71e7ea46a4d82 caused that call to be triggered and breaking the gate-devstack-dsvm-updown job. This patch deletes the call and comments regarding setup_neutron_debug and teardown_neutron_debug. Change-Id: Ifdacb0cec1307db469bd66f551474539184cf2cd --- lib/neutron-legacy | 2 -- unstack.sh | 5 ----- 2 files changed, 7 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 9e926a000f..613e0f1365 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -24,11 +24,9 @@ # - check_neutron_third_party_integration # - start_neutron_agents # - create_neutron_initial_network -# - setup_neutron_debug # # ``unstack.sh`` calls the entry points in this order: # -# - teardown_neutron_debug # - stop_neutron # - stop_neutron_third_party # - cleanup_neutron diff --git a/unstack.sh b/unstack.sh index f888896cb9..c05d1f0952 100755 --- a/unstack.sh +++ b/unstack.sh @@ -97,11 +97,6 @@ set -o xtrace # Phase: unstack run_phase unstack -if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then - source $TOP_DIR/openrc - teardown_neutron_debug -fi - # Call service stop if is_service_enabled heat; then From 2b3ac6dc64a6d31a1dae2ede6483bf9f73a3918f Mon Sep 17 00:00:00 2001 From: Jianghua Wang Date: Wed, 9 Nov 2016 12:50:00 +0800 Subject: [PATCH 0216/1936] Specify the correct image parameters for XenServer The deprecated AMI image file opts will be removed soon. See https://review.openstack.org/#/c/338377. So we can't use the fallback mechanism anymore. This patch is to specify the correct image parameters for XenServer. Change-Id: Ic287a3ed1725c42ea29022158bc9720c9a96533f --- lib/tempest | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/lib/tempest b/lib/tempest index 657c3ad592..56c97d52a9 100644 --- a/lib/tempest +++ b/lib/tempest @@ -295,7 +295,6 @@ function configure_tempest { fi if [ "$VIRT_DRIVER" = "xenserver" ]; then iniset $TEMPEST_CONFIG image disk_formats "ami,ari,aki,vhd,raw,iso" - iniset $TEMPEST_CONFIG scenario img_disk_format vhd fi # Image Features @@ -407,12 +406,20 @@ function configure_tempest { fi # Scenario - SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES/images/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec} + if [ "$VIRT_DRIVER" = "xenserver" ]; then + SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES} + SCENARIO_IMAGE_FILE="cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.vhd.tgz" + iniset $TEMPEST_CONFIG scenario img_disk_format vhd + iniset $TEMPEST_CONFIG scenario img_container_format ovf + else + SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES/images/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec} + SCENARIO_IMAGE_FILE="cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" + fi iniset $TEMPEST_CONFIG scenario img_dir $SCENARIO_IMAGE_DIR + iniset $TEMPEST_CONFIG scenario img_file $SCENARIO_IMAGE_FILE iniset $TEMPEST_CONFIG scenario ami_img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-blank.img" iniset $TEMPEST_CONFIG scenario ari_img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-initrd" iniset $TEMPEST_CONFIG scenario aki_img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-vmlinuz" - iniset $TEMPEST_CONFIG scenario img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # Validation iniset $TEMPEST_CONFIG validation run_validation ${TEMPEST_RUN_VALIDATION:-False} From 8f985b6ff1179d19872e83c97035fa49a8f660f6 Mon Sep 17 00:00:00 2001 From: xiaolihope Date: Wed, 18 May 2016 00:32:08 +0800 Subject: [PATCH 0217/1936] Remove heat code from devstack tree This removes all of the heat code from the devstack tree, in favor of the devstack plugin in Heat's tree. Depends-On: I4bed1e5cef5afa7b049b07640086a86a3f881e13 Depends-On: Ic392bcc24bc374ee8511a94f1d8f6ac23131c7e3 Change-Id: I5b60422bf1f5fa78aa8f3383f7a222e0356d9e42 --- clean.sh | 3 +- files/apache-heat-api-cfn.template | 27 -- files/apache-heat-api-cloudwatch.template | 27 -- files/apache-heat-api.template | 27 -- files/apache-heat-pip-repo.template | 15 - files/debs/heat | 1 - lib/heat | 467 ---------------------- stack.sh | 27 -- stackrc | 8 - tests/test_libs_from_pypi.sh | 2 +- unstack.sh | 5 - 11 files changed, 2 insertions(+), 607 deletions(-) delete mode 100644 files/apache-heat-api-cfn.template delete mode 100644 files/apache-heat-api-cloudwatch.template delete mode 100644 files/apache-heat-api.template delete mode 100644 files/apache-heat-pip-repo.template delete mode 100644 files/debs/heat delete mode 100644 lib/heat diff --git a/clean.sh b/clean.sh index bace3f53fe..b59635a821 100755 --- a/clean.sh +++ b/clean.sh @@ -49,7 +49,6 @@ source $TOP_DIR/lib/nova source $TOP_DIR/lib/placement source $TOP_DIR/lib/cinder source $TOP_DIR/lib/swift -source $TOP_DIR/lib/heat source $TOP_DIR/lib/neutron source $TOP_DIR/lib/neutron-legacy @@ -108,7 +107,7 @@ if is_service_enabled nova && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; th fi # Clean out /etc -sudo rm -rf /etc/keystone /etc/glance /etc/nova /etc/cinder /etc/swift /etc/heat /etc/neutron /etc/openstack/ +sudo rm -rf /etc/keystone /etc/glance /etc/nova /etc/cinder /etc/swift /etc/neutron /etc/openstack/ # Clean out tgt sudo rm -f /etc/tgt/conf.d/* diff --git a/files/apache-heat-api-cfn.template b/files/apache-heat-api-cfn.template deleted file mode 100644 index ab33c66f7e..0000000000 --- a/files/apache-heat-api-cfn.template +++ /dev/null @@ -1,27 +0,0 @@ -Listen %PUBLICPORT% - - - WSGIDaemonProcess heat-api-cfn processes=2 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% - WSGIProcessGroup heat-api-cfn - WSGIScriptAlias / %HEAT_BIN_DIR%/heat-wsgi-api-cfn - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - AllowEncodedSlashes On - = 2.4> - ErrorLogFormat "%{cu}t %M" - - ErrorLog /var/log/%APACHE_NAME%/heat-api-cfn.log - %SSLENGINE% - %SSLCERTFILE% - %SSLKEYFILE% - - - = 2.4> - Require all granted - - - Order allow,deny - Allow from all - - - diff --git a/files/apache-heat-api-cloudwatch.template b/files/apache-heat-api-cloudwatch.template deleted file mode 100644 index 06c91bbdb1..0000000000 --- a/files/apache-heat-api-cloudwatch.template +++ /dev/null @@ -1,27 +0,0 @@ -Listen %PUBLICPORT% - - - WSGIDaemonProcess heat-api-cloudwatch processes=2 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% - WSGIProcessGroup heat-api-cloudwatch - WSGIScriptAlias / %HEAT_BIN_DIR%/heat-wsgi-api-cloudwatch - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - AllowEncodedSlashes On - = 2.4> - ErrorLogFormat "%{cu}t %M" - - ErrorLog /var/log/%APACHE_NAME%/heat-api-cloudwatch.log - %SSLENGINE% - %SSLCERTFILE% - %SSLKEYFILE% - - - = 2.4> - Require all granted - - - Order allow,deny - Allow from all - - - diff --git a/files/apache-heat-api.template b/files/apache-heat-api.template deleted file mode 100644 index 4924b3978b..0000000000 --- a/files/apache-heat-api.template +++ /dev/null @@ -1,27 +0,0 @@ -Listen %PUBLICPORT% - - - WSGIDaemonProcess heat-api processes=3 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% - WSGIProcessGroup heat-api - WSGIScriptAlias / %HEAT_BIN_DIR%/heat-wsgi-api - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - AllowEncodedSlashes On - = 2.4> - ErrorLogFormat "%{cu}t %M" - - ErrorLog /var/log/%APACHE_NAME%/heat-api.log - %SSLENGINE% - %SSLCERTFILE% - %SSLKEYFILE% - - - = 2.4> - Require all granted - - - Order allow,deny - Allow from all - - - diff --git a/files/apache-heat-pip-repo.template b/files/apache-heat-pip-repo.template deleted file mode 100644 index d88ac3e35a..0000000000 --- a/files/apache-heat-pip-repo.template +++ /dev/null @@ -1,15 +0,0 @@ -Listen %HEAT_PIP_REPO_PORT% - - - DocumentRoot %HEAT_PIP_REPO% - - DirectoryIndex index.html - Require all granted - Order allow,deny - allow from all - - - ErrorLog /var/log/%APACHE_NAME%/heat_pip_repo_error.log - LogLevel warn - CustomLog /var/log/%APACHE_NAME%/heat_pip_repo_access.log combined - diff --git a/files/debs/heat b/files/debs/heat deleted file mode 100644 index 1ecbc780b1..0000000000 --- a/files/debs/heat +++ /dev/null @@ -1 +0,0 @@ -gettext # dist:trusty diff --git a/lib/heat b/lib/heat deleted file mode 100644 index 0863128f81..0000000000 --- a/lib/heat +++ /dev/null @@ -1,467 +0,0 @@ -#!/bin/bash -# -# lib/heat -# Install and start **Heat** service - -# To enable, add the following to localrc -# -# ENABLED_SERVICES+=,heat,h-api,h-api-cfn,h-api-cw,h-eng - -# Dependencies: -# (none) - -# stack.sh -# --------- -# - install_heatclient -# - install_heat -# - configure_heatclient -# - configure_heat -# - _config_heat_apache_wsgi -# - init_heat -# - start_heat -# - stop_heat -# - cleanup_heat - -# Save trace setting -_XTRACE_HEAT=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# set up default directories -GITDIR["python-heatclient"]=$DEST/python-heatclient - -# Toggle for deploying Heat-API under HTTPD + mod_wsgi -HEAT_USE_MOD_WSGI=${HEAT_USE_MOD_WSGI:-False} - -HEAT_DIR=$DEST/heat -HEAT_CFNTOOLS_DIR=$DEST/heat-cfntools -HEAT_TEMPLATES_REPO_DIR=$DEST/heat-templates -OCC_DIR=$DEST/os-collect-config -ORC_DIR=$DEST/os-refresh-config -OAC_DIR=$DEST/os-apply-config - -HEAT_PIP_REPO=$DATA_DIR/heat-pip-repo -HEAT_PIP_REPO_PORT=${HEAT_PIP_REPO_PORT:-8899} - -HEAT_AUTH_CACHE_DIR=${HEAT_AUTH_CACHE_DIR:-/var/cache/heat} -HEAT_STANDALONE=$(trueorfalse False HEAT_STANDALONE) -HEAT_ENABLE_ADOPT_ABANDON=$(trueorfalse False HEAT_ENABLE_ADOPT_ABANDON) -HEAT_CONF_DIR=/etc/heat -HEAT_CONF=$HEAT_CONF_DIR/heat.conf -HEAT_ENV_DIR=$HEAT_CONF_DIR/environment.d -HEAT_TEMPLATES_DIR=$HEAT_CONF_DIR/templates -HEAT_API_HOST=${HEAT_API_HOST:-$HOST_IP} -HEAT_API_PORT=${HEAT_API_PORT:-8004} -HEAT_SERVICE_USER=${HEAT_SERVICE_USER:-heat} -HEAT_TRUSTEE_USER=${HEAT_TRUSTEE_USER:-$HEAT_SERVICE_USER} -HEAT_TRUSTEE_PASSWORD=${HEAT_TRUSTEE_PASSWORD:-$SERVICE_PASSWORD} -HEAT_TRUSTEE_DOMAIN=${HEAT_TRUSTEE_DOMAIN:-default} - -# Support entry points installation of console scripts -HEAT_BIN_DIR=$(get_python_exec_prefix) - -# other default options -if [[ "$HEAT_STANDALONE" = "True" ]]; then - # for standalone, use defaults which require no service user - HEAT_STACK_DOMAIN=$(trueorfalse False HEAT_STACK_DOMAIN) - HEAT_DEFERRED_AUTH=${HEAT_DEFERRED_AUTH:-password} - if [[ ${HEAT_DEFERRED_AUTH} != "password" ]]; then - # Heat does not support keystone trusts when deployed in - # standalone mode - die $LINENO \ - 'HEAT_DEFERRED_AUTH can only be set to "password" when HEAT_STANDALONE is True.' - fi -else - HEAT_STACK_DOMAIN=$(trueorfalse True HEAT_STACK_DOMAIN) - HEAT_DEFERRED_AUTH=${HEAT_DEFERRED_AUTH:-} -fi -HEAT_PLUGIN_DIR=${HEAT_PLUGIN_DIR:-$DATA_DIR/heat/plugins} -ENABLE_HEAT_PLUGINS=${ENABLE_HEAT_PLUGINS:-} - -# Functions -# --------- - -# Test if any Heat services are enabled -# is_heat_enabled -function is_heat_enabled { - [[ ,${ENABLED_SERVICES} =~ ,"h-" ]] && return 0 - return 1 -} - -# cleanup_heat() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_heat { - sudo rm -rf $HEAT_AUTH_CACHE_DIR - sudo rm -rf $HEAT_ENV_DIR - sudo rm -rf $HEAT_TEMPLATES_DIR - sudo rm -rf $HEAT_CONF_DIR -} - -# configure_heat() - Set config files, create data dirs, etc -function configure_heat { - - sudo install -d -o $STACK_USER $HEAT_CONF_DIR - # remove old config files - rm -f $HEAT_CONF_DIR/heat-*.conf - - HEAT_API_CFN_HOST=${HEAT_API_CFN_HOST:-$HOST_IP} - HEAT_API_CFN_PORT=${HEAT_API_CFN_PORT:-8000} - HEAT_ENGINE_HOST=${HEAT_ENGINE_HOST:-$SERVICE_HOST} - HEAT_ENGINE_PORT=${HEAT_ENGINE_PORT:-8001} - HEAT_API_CW_HOST=${HEAT_API_CW_HOST:-$HOST_IP} - HEAT_API_CW_PORT=${HEAT_API_CW_PORT:-8003} - HEAT_API_PASTE_FILE=$HEAT_CONF_DIR/api-paste.ini - HEAT_POLICY_FILE=$HEAT_CONF_DIR/policy.json - - cp $HEAT_DIR/etc/heat/api-paste.ini $HEAT_API_PASTE_FILE - cp $HEAT_DIR/etc/heat/policy.json $HEAT_POLICY_FILE - - # common options - iniset_rpc_backend heat $HEAT_CONF - iniset $HEAT_CONF DEFAULT heat_metadata_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT - iniset $HEAT_CONF DEFAULT heat_waitcondition_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1/waitcondition - iniset $HEAT_CONF DEFAULT heat_watch_server_url http://$HEAT_API_CW_HOST:$HEAT_API_CW_PORT - iniset $HEAT_CONF database connection `database_connection_url heat` - iniset $HEAT_CONF DEFAULT auth_encryption_key $(generate_hex_string 16) - - iniset $HEAT_CONF DEFAULT region_name_for_services "$REGION_NAME" - - # logging - iniset $HEAT_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $HEAT_CONF DEFAULT use_syslog $SYSLOG - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$HEAT_USE_MOD_WSGI" == "False" ] ; then - # Add color to logging output - setup_colorized_logging $HEAT_CONF DEFAULT tenant user - fi - - if [ ! -z "$HEAT_DEFERRED_AUTH" ]; then - iniset $HEAT_CONF DEFAULT deferred_auth_method $HEAT_DEFERRED_AUTH - fi - - if [ "$HEAT_USE_MOD_WSGI" == "True" ]; then - _config_heat_apache_wsgi - fi - - if [[ "$HEAT_STANDALONE" = "True" ]]; then - iniset $HEAT_CONF paste_deploy flavor standalone - iniset $HEAT_CONF clients_heat url "http://$HEAT_API_HOST:$HEAT_API_PORT/v1/%(tenant_id)s" - else - configure_auth_token_middleware $HEAT_CONF heat $HEAT_AUTH_CACHE_DIR - fi - - # If HEAT_DEFERRED_AUTH is unset or explicitly set to trusts, configure - # the section for the client plugin associated with the trustee - if [ -z "$HEAT_DEFERRED_AUTH" -o "trusts" == "$HEAT_DEFERRED_AUTH" ]; then - iniset $HEAT_CONF trustee auth_type password - iniset $HEAT_CONF trustee auth_url $KEYSTONE_AUTH_URI - iniset $HEAT_CONF trustee username $HEAT_TRUSTEE_USER - iniset $HEAT_CONF trustee password $HEAT_TRUSTEE_PASSWORD - iniset $HEAT_CONF trustee user_domain_id $HEAT_TRUSTEE_DOMAIN - fi - - # clients_keystone - iniset $HEAT_CONF clients_keystone auth_uri $KEYSTONE_AUTH_URI - - # OpenStack API - iniset $HEAT_CONF heat_api bind_port $HEAT_API_PORT - iniset $HEAT_CONF heat_api workers "$API_WORKERS" - - # Cloudformation API - iniset $HEAT_CONF heat_api_cfn bind_port $HEAT_API_CFN_PORT - - # Cloudwatch API - iniset $HEAT_CONF heat_api_cloudwatch bind_port $HEAT_API_CW_PORT - - if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then - iniset $HEAT_CONF clients_keystone ca_file $SSL_BUNDLE_FILE - fi - - if is_ssl_enabled_service "nova" || is_service_enabled tls-proxy; then - iniset $HEAT_CONF clients_nova ca_file $SSL_BUNDLE_FILE - fi - - if is_ssl_enabled_service "cinder" || is_service_enabled tls-proxy; then - iniset $HEAT_CONF clients_cinder ca_file $SSL_BUNDLE_FILE - fi - - if [[ "$HEAT_ENABLE_ADOPT_ABANDON" = "True" ]]; then - iniset $HEAT_CONF DEFAULT enable_stack_adopt true - iniset $HEAT_CONF DEFAULT enable_stack_abandon true - fi - - iniset $HEAT_CONF cache enabled "True" - iniset $HEAT_CONF cache backend "dogpile.cache.memory" - - sudo install -d -o $STACK_USER $HEAT_ENV_DIR $HEAT_TEMPLATES_DIR - - # copy the default environment - cp $HEAT_DIR/etc/heat/environment.d/* $HEAT_ENV_DIR/ - - # copy the default templates - cp $HEAT_DIR/etc/heat/templates/* $HEAT_TEMPLATES_DIR/ - - # Enable heat plugins. - # NOTE(nic): The symlink nonsense is necessary because when - # plugins are installed in "developer mode", the final component - # of their target directory is always "resources", which confuses - # Heat's plugin loader into believing that all plugins are named - # "resources", and therefore are all the same plugin; so it - # will only load one of them. Linking them all to a common - # location with unique names avoids that type of collision, - # while still allowing the plugins to be edited in-tree. - local err_count=0 - - if [ -n "$ENABLE_HEAT_PLUGINS" ]; then - mkdir -p $HEAT_PLUGIN_DIR - # Clean up cruft from any previous runs - rm -f $HEAT_PLUGIN_DIR/* - iniset $HEAT_CONF DEFAULT plugin_dirs $HEAT_PLUGIN_DIR - fi - - for heat_plugin in $ENABLE_HEAT_PLUGINS; do - if [ -d $HEAT_DIR/contrib/$heat_plugin ]; then - setup_package $HEAT_DIR/contrib/$heat_plugin -e - ln -s $HEAT_DIR/contrib/$heat_plugin/$heat_plugin/resources $HEAT_PLUGIN_DIR/$heat_plugin - else - : # clear retval on the test so that we can roll up errors - err $LINENO "Requested Heat plugin(${heat_plugin}) not found." - err_count=$(($err_count + 1)) - fi - done - [ $err_count -eq 0 ] || die $LINENO "$err_count of the requested Heat plugins could not be installed." -} - -# init_heat() - Initialize database -function init_heat { - - # (re)create heat database - recreate_database heat - - $HEAT_BIN_DIR/heat-manage --config-file $HEAT_CONF db_sync - create_heat_cache_dir -} - -# create_heat_cache_dir() - Part of the init_heat() process -function create_heat_cache_dir { - # Create cache dirs - sudo install -d -o $STACK_USER $HEAT_AUTH_CACHE_DIR -} - -# install_heatclient() - Collect source and prepare -function install_heatclient { - if use_library_from_git "python-heatclient"; then - git_clone_by_name "python-heatclient" - setup_dev_lib "python-heatclient" - sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-heatclient"]}/tools/,/etc/bash_completion.d/}heat.bash_completion - fi -} - -# install_heat() - Collect source and prepare -function install_heat { - git_clone $HEAT_REPO $HEAT_DIR $HEAT_BRANCH - setup_develop $HEAT_DIR - if [ "$HEAT_USE_MOD_WSGI" == "True" ]; then - install_apache_wsgi - fi -} - -# install_heat_other() - Collect source and prepare -function install_heat_other { - git_clone $HEAT_CFNTOOLS_REPO $HEAT_CFNTOOLS_DIR $HEAT_CFNTOOLS_BRANCH - git_clone $HEAT_TEMPLATES_REPO $HEAT_TEMPLATES_REPO_DIR $HEAT_TEMPLATES_BRANCH - git_clone $OAC_REPO $OAC_DIR $OAC_BRANCH - git_clone $OCC_REPO $OCC_DIR $OCC_BRANCH - git_clone $ORC_REPO $ORC_DIR $ORC_BRANCH -} - -# start_heat() - Start running processes, including screen -function start_heat { - run_process h-eng "$HEAT_BIN_DIR/heat-engine --config-file=$HEAT_CONF" - - # If the site is not enabled then we are in a grenade scenario - local enabled_site_file - enabled_site_file=$(apache_site_config_for heat-api) - if [ -f ${enabled_site_file} ] && [ "$HEAT_USE_MOD_WSGI" == "True" ]; then - enable_apache_site heat-api - enable_apache_site heat-api-cfn - enable_apache_site heat-api-cloudwatch - restart_apache_server - tail_log heat-api /var/log/$APACHE_NAME/heat-api.log - tail_log heat-api-cfn /var/log/$APACHE_NAME/heat-api-cfn.log - tail_log heat-api-cloudwatch /var/log/$APACHE_NAME/heat-api-cloudwatch.log - else - run_process h-api "$HEAT_BIN_DIR/heat-api --config-file=$HEAT_CONF" - run_process h-api-cfn "$HEAT_BIN_DIR/heat-api-cfn --config-file=$HEAT_CONF" - run_process h-api-cw "$HEAT_BIN_DIR/heat-api-cloudwatch --config-file=$HEAT_CONF" - fi -} - -# stop_heat() - Stop running processes -function stop_heat { - # Kill the screen windows - stop_process h-eng - - if [ "$HEAT_USE_MOD_WSGI" == "True" ]; then - disable_apache_site heat-api - disable_apache_site heat-api-cfn - disable_apache_site heat-api-cloudwatch - restart_apache_server - else - local serv - for serv in h-api h-api-cfn h-api-cw; do - stop_process $serv - done - fi - -} - -# _cleanup_heat_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file -function _cleanup_heat_apache_wsgi { - sudo rm -f $(apache_site_config_for heat-api) - sudo rm -f $(apache_site_config_for heat-api-cfn) - sudo rm -f $(apache_site_config_for heat-api-cloudwatch) -} - -# _config_heat_apache_wsgi() - Set WSGI config files of Heat -function _config_heat_apache_wsgi { - - local heat_apache_conf - heat_apache_conf=$(apache_site_config_for heat-api) - local heat_cfn_apache_conf - heat_cfn_apache_conf=$(apache_site_config_for heat-api-cfn) - local heat_cloudwatch_apache_conf - heat_cloudwatch_apache_conf=$(apache_site_config_for heat-api-cloudwatch) - local heat_ssl="" - local heat_certfile="" - local heat_keyfile="" - local heat_api_port=$HEAT_API_PORT - local heat_cfn_api_port=$HEAT_API_CFN_PORT - local heat_cw_api_port=$HEAT_API_CW_PORT - local venv_path="" - - sudo cp $FILES/apache-heat-api.template $heat_apache_conf - sudo sed -e " - s|%PUBLICPORT%|$heat_api_port|g; - s|%APACHE_NAME%|$APACHE_NAME|g; - s|%HEAT_BIN_DIR%|$HEAT_BIN_DIR|g; - s|%SSLENGINE%|$heat_ssl|g; - s|%SSLCERTFILE%|$heat_certfile|g; - s|%SSLKEYFILE%|$heat_keyfile|g; - s|%USER%|$STACK_USER|g; - s|%VIRTUALENV%|$venv_path|g - " -i $heat_apache_conf - - sudo cp $FILES/apache-heat-api-cfn.template $heat_cfn_apache_conf - sudo sed -e " - s|%PUBLICPORT%|$heat_cfn_api_port|g; - s|%APACHE_NAME%|$APACHE_NAME|g; - s|%HEAT_BIN_DIR%|$HEAT_BIN_DIR|g; - s|%SSLENGINE%|$heat_ssl|g; - s|%SSLCERTFILE%|$heat_certfile|g; - s|%SSLKEYFILE%|$heat_keyfile|g; - s|%USER%|$STACK_USER|g; - s|%VIRTUALENV%|$venv_path|g - " -i $heat_cfn_apache_conf - - sudo cp $FILES/apache-heat-api-cloudwatch.template $heat_cloudwatch_apache_conf - sudo sed -e " - s|%PUBLICPORT%|$heat_cw_api_port|g; - s|%APACHE_NAME%|$APACHE_NAME|g; - s|%HEAT_BIN_DIR%|$HEAT_BIN_DIR|g; - s|%SSLENGINE%|$heat_ssl|g; - s|%SSLCERTFILE%|$heat_certfile|g; - s|%SSLKEYFILE%|$heat_keyfile|g; - s|%USER%|$STACK_USER|g; - s|%VIRTUALENV%|$venv_path|g - " -i $heat_cloudwatch_apache_conf -} - - -# create_heat_accounts() - Set up common required heat accounts -function create_heat_accounts { - if [[ "$HEAT_STANDALONE" != "True" ]]; then - - create_service_user "heat" "admin" - get_or_create_service "heat" "orchestration" "Heat Orchestration Service" - get_or_create_endpoint \ - "orchestration" \ - "$REGION_NAME" \ - "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(project_id)s" \ - "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(project_id)s" \ - "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(project_id)s" - - get_or_create_service "heat-cfn" "cloudformation" "Heat CloudFormation Service" - get_or_create_endpoint \ - "cloudformation" \ - "$REGION_NAME" \ - "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \ - "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \ - "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" - - # heat_stack_user role is for users created by Heat - get_or_create_role "heat_stack_user" - fi - - if [[ "$HEAT_STACK_DOMAIN" == "True" ]]; then - # domain -> heat and user -> heat_domain_admin - domain_id=$(get_or_create_domain heat 'Owns users and projects created by heat') - iniset $HEAT_CONF DEFAULT stack_user_domain_id ${domain_id} - get_or_create_user heat_domain_admin $SERVICE_PASSWORD heat - get_or_add_user_domain_role admin heat_domain_admin heat - iniset $HEAT_CONF DEFAULT stack_domain_admin heat_domain_admin - iniset $HEAT_CONF DEFAULT stack_domain_admin_password $SERVICE_PASSWORD - fi -} - -# build_heat_pip_mirror() - Build a pip mirror containing heat agent projects -function build_heat_pip_mirror { - local project_dirs="$OCC_DIR $OAC_DIR $ORC_DIR $HEAT_CFNTOOLS_DIR" - local projpath proj package - - rm -rf $HEAT_PIP_REPO - mkdir -p $HEAT_PIP_REPO - - echo "" > $HEAT_PIP_REPO/index.html - for projpath in $project_dirs; do - proj=$(basename $projpath) - mkdir -p $HEAT_PIP_REPO/$proj - pushd $projpath - rm -rf dist - python setup.py sdist - pushd dist - package=$(ls *) - mv $package $HEAT_PIP_REPO/$proj/$package - popd - - echo "$package" > $HEAT_PIP_REPO/$proj/index.html - echo "$proj
    " >> $HEAT_PIP_REPO/index.html - - popd - done - - echo "" >> $HEAT_PIP_REPO/index.html - - local heat_pip_repo_apache_conf - heat_pip_repo_apache_conf=$(apache_site_config_for heat_pip_repo) - - sudo cp $FILES/apache-heat-pip-repo.template $heat_pip_repo_apache_conf - sudo sed -e " - s|%HEAT_PIP_REPO%|$HEAT_PIP_REPO|g; - s|%HEAT_PIP_REPO_PORT%|$HEAT_PIP_REPO_PORT|g; - s|%APACHE_NAME%|$APACHE_NAME|g; - " -i $heat_pip_repo_apache_conf - enable_apache_site heat_pip_repo - restart_apache_server - sudo iptables -I INPUT -d $HOST_IP -p tcp --dport $HEAT_PIP_REPO_PORT -j ACCEPT || true -} - -# Restore xtrace -$_XTRACE_HEAT - -# Tell emacs to use shell-script-mode -## Local variables: -## mode: shell-script -## End: diff --git a/stack.sh b/stack.sh index 54485b60b9..aa3b1f6709 100755 --- a/stack.sh +++ b/stack.sh @@ -572,7 +572,6 @@ source $TOP_DIR/lib/nova source $TOP_DIR/lib/placement source $TOP_DIR/lib/cinder source $TOP_DIR/lib/swift -source $TOP_DIR/lib/heat source $TOP_DIR/lib/neutron source $TOP_DIR/lib/neutron-legacy source $TOP_DIR/lib/ldap @@ -800,9 +799,6 @@ fi if is_service_enabled neutron nova horizon; then install_neutronclient fi -if is_service_enabled heat horizon; then - install_heatclient -fi # Install shared libraries if is_service_enabled cinder nova; then @@ -873,13 +869,6 @@ if is_service_enabled horizon; then stack_install_service horizon fi -if is_service_enabled heat; then - stack_install_service heat - install_heat_other - cleanup_heat - configure_heat -fi - if is_service_enabled tls-proxy || [ "$USE_SSL" == "True" ]; then configure_CA init_CA @@ -1071,10 +1060,6 @@ if is_service_enabled keystone; then create_swift_accounts fi - if is_service_enabled heat; then - create_heat_accounts - fi - fi # Write a clouds.yaml file @@ -1284,18 +1269,6 @@ if is_service_enabled cinder; then create_volume_types fi -# Configure and launch Heat engine, api and metadata -if is_service_enabled heat; then - # Initialize heat - echo_summary "Configuring Heat" - init_heat - echo_summary "Starting Heat" - start_heat - if [ "$HEAT_BUILD_PIP_MIRROR" = "True" ]; then - echo_summary "Building Heat pip mirror" - build_heat_pip_mirror - fi -fi if is_service_enabled horizon; then echo_summary "Starting Horizon" diff --git a/stackrc b/stackrc index ea8b044faf..b47cd18d13 100644 --- a/stackrc +++ b/stackrc @@ -239,10 +239,6 @@ CINDER_BRANCH=${CINDER_BRANCH:-master} GLANCE_REPO=${GLANCE_REPO:-${GIT_BASE}/openstack/glance.git} GLANCE_BRANCH=${GLANCE_BRANCH:-master} -# heat service -HEAT_REPO=${HEAT_REPO:-${GIT_BASE}/openstack/heat.git} -HEAT_BRANCH=${HEAT_BRANCH:-master} - # django powered web control panel for openstack HORIZON_REPO=${HORIZON_REPO:-${GIT_BASE}/openstack/horizon.git} HORIZON_BRANCH=${HORIZON_BRANCH:-master} @@ -301,10 +297,6 @@ GITBRANCH["python-brick-cinderclient-ext"]=${BRICK_CINDERCLIENT_BRANCH:-master} GITREPO["python-glanceclient"]=${GLANCECLIENT_REPO:-${GIT_BASE}/openstack/python-glanceclient.git} GITBRANCH["python-glanceclient"]=${GLANCECLIENT_BRANCH:-master} -# python heat client library -GITREPO["python-heatclient"]=${HEATCLIENT_REPO:-${GIT_BASE}/openstack/python-heatclient.git} -GITBRANCH["python-heatclient"]=${HEATCLIENT_BRANCH:-master} - # ironic client GITREPO["python-ironicclient"]=${IRONICCLIENT_REPO:-${GIT_BASE}/openstack/python-ironicclient.git} GITBRANCH["python-ironicclient"]=${IRONICCLIENT_BRANCH:-master} diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index fb55023886..415fec506d 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -32,7 +32,7 @@ done ALL_LIBS="python-novaclient oslo.config pbr oslo.context" ALL_LIBS+=" python-keystoneclient taskflow oslo.middleware pycadf" ALL_LIBS+=" python-glanceclient python-ironicclient" -ALL_LIBS+=" oslo.messaging oslo.log cliff python-heatclient stevedore" +ALL_LIBS+=" oslo.messaging oslo.log cliff stevedore" ALL_LIBS+=" python-cinderclient glance_store oslo.concurrency oslo.db" ALL_LIBS+=" oslo.versionedobjects oslo.vmware keystonemiddleware" ALL_LIBS+=" oslo.serialization django_openstack_auth" diff --git a/unstack.sh b/unstack.sh index c05d1f0952..6cd039f638 100755 --- a/unstack.sh +++ b/unstack.sh @@ -66,7 +66,6 @@ source $TOP_DIR/lib/nova source $TOP_DIR/lib/placement source $TOP_DIR/lib/cinder source $TOP_DIR/lib/swift -source $TOP_DIR/lib/heat source $TOP_DIR/lib/neutron source $TOP_DIR/lib/neutron-legacy source $TOP_DIR/lib/ldap @@ -99,10 +98,6 @@ run_phase unstack # Call service stop -if is_service_enabled heat; then - stop_heat -fi - if is_service_enabled nova; then stop_nova fi From c30b8def82c14e161c0242307e117697e24e1ece Mon Sep 17 00:00:00 2001 From: "Daniel P. Berrange" Date: Mon, 14 Nov 2016 13:23:14 +0000 Subject: [PATCH 0218/1936] Move certificate setup earlier in deployment Currently the x509 certificate setup is done after all the openstack services have been deployed. This is OK because none of the services require that the x509 certs exist when they are being deployed. With the integration of TLS into the nova novnc proxy (and later spice & serial proxy) service, x509 certs will need to exist before Nova is deployed. The CA setup must thus be moved earlier in the devstack deployment flow, prior to the setup of any services. One part of the CA setup, however, fixes up the global cert bundle locations and this can only be done after the python requests module is install, thus must remain in its current location. Change-Id: Idcd264fb73bb88dc2f4280c53c013dfe4364afff --- lib/tls | 1 - stack.sh | 14 ++++++++------ tools/make_cert.sh | 1 + 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/lib/tls b/lib/tls index 40f3e81438..14cdf19d8e 100644 --- a/lib/tls +++ b/lib/tls @@ -201,7 +201,6 @@ subjectAltName = \$ENV::SUBJECT_ALT_NAME # Create root and intermediate CAs # init_CA function init_CA { - fix_system_ca_bundle_path # Ensure CAs are built make_root_CA $ROOT_CA_DIR make_int_CA $INT_CA_DIR $ROOT_CA_DIR diff --git a/stack.sh b/stack.sh index 54485b60b9..f20c9d9ae3 100755 --- a/stack.sh +++ b/stack.sh @@ -809,6 +809,13 @@ if is_service_enabled cinder nova; then install_os_brick fi +# Setup TLS certs +if is_service_enabled tls-proxy || [ "$USE_SSL" == "True" ]; then + configure_CA + init_CA + init_cert +fi + # Install middleware install_keystonemiddleware @@ -881,14 +888,9 @@ if is_service_enabled heat; then fi if is_service_enabled tls-proxy || [ "$USE_SSL" == "True" ]; then - configure_CA - init_CA - init_cert - # Add name to ``/etc/hosts``. - # Don't be naive and add to existing line! + fix_system_ca_bundle_path fi - # Extras Install # -------------- diff --git a/tools/make_cert.sh b/tools/make_cert.sh index 2628b40524..e91464fc0f 100755 --- a/tools/make_cert.sh +++ b/tools/make_cert.sh @@ -45,6 +45,7 @@ DEVSTACK_CERT=$DATA_DIR/$DEVSTACK_CERT_NAME.pem # Make sure the CA is set up configure_CA +fix_system_ca_bundle_path init_CA # Create the server cert From caa822fce46d0fab1163bbae4fb5d520745aef20 Mon Sep 17 00:00:00 2001 From: Jianghua Wang Date: Fri, 11 Nov 2016 18:23:17 +0800 Subject: [PATCH 0219/1936] XenAPI: correct the ml2 configuration for neutron XenAPI requires two instances of L2Agent: the standard one manages OVS bridges in DomU and the service name is called as q-agt in Devstack; the other new L2Agent manages OVS bridges in Dom0 and the service name is called as q-domuA. In order to support the new agent q-domuA, it requires some XenAPI-specific configurations. But unfortunately those XenAPI-specific configurations were configured in the standard agent file, meaning other changes made to the standard agent file would not have the correct effect. So it has caused issues, for example, floating IP addresses are not reachable. This fix is to move the XenAPI-specific configurations from the stardard agent configuration file to the XenAPI-specific agent configuration file so that it won't impact the standard agent's behavior. Change-Id: I45944e84a1f81d016aa00da6d782801ee8457ea4 --- lib/neutron_plugins/openvswitch_agent | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent index e27b8a634a..0af2f2035c 100644 --- a/lib/neutron_plugins/openvswitch_agent +++ b/lib/neutron_plugins/openvswitch_agent @@ -81,19 +81,19 @@ function neutron_plugin_configure_plugin_agent { # integration bridge. This is enabled by using a root wrapper # that executes commands on dom0 via a XenAPI plugin. # XenAPI does not support daemon rootwrap now, so set root_helper_daemon empty - iniset /$Q_PLUGIN_CONF_FILE agent root_helper "$Q_RR_DOM0_COMMAND" - iniset /$Q_PLUGIN_CONF_FILE agent root_helper_daemon "" + iniset "/$Q_PLUGIN_CONF_FILE.domU" agent root_helper "$Q_RR_DOM0_COMMAND" + iniset "/$Q_PLUGIN_CONF_FILE.domU" agent root_helper_daemon "" # Disable minimize polling, so that it can always detect OVS and Port changes # This is a problem of xenserver + neutron, bug has been reported # https://bugs.launchpad.net/neutron/+bug/1495423 - iniset /$Q_PLUGIN_CONF_FILE agent minimize_polling False + iniset "/$Q_PLUGIN_CONF_FILE.domU" agent minimize_polling False # Set "physical" mapping - iniset /$Q_PLUGIN_CONF_FILE ovs bridge_mappings "physnet1:$FLAT_NETWORK_BRIDGE" + iniset "/$Q_PLUGIN_CONF_FILE.domU" ovs bridge_mappings "physnet1:$FLAT_NETWORK_BRIDGE" # XEN_INTEGRATION_BRIDGE is the integration bridge in dom0 - iniset /$Q_PLUGIN_CONF_FILE ovs integration_bridge $XEN_INTEGRATION_BRIDGE + iniset "/$Q_PLUGIN_CONF_FILE.domU" ovs integration_bridge $XEN_INTEGRATION_BRIDGE # Set up domU's L2 agent: @@ -107,11 +107,11 @@ function neutron_plugin_configure_plugin_agent { sudo ovs-vsctl -- --may-exist add-port $PUBLIC_BRIDGE $PUBLIC_INTERFACE # Set bridge mappings to "physnet1:br-$GUEST_INTERFACE_DEFAULT" - iniset "/$Q_PLUGIN_CONF_FILE.domU" ovs bridge_mappings "physnet1:br-$VLAN_INTERFACE,physnet-ex:$PUBLIC_BRIDGE" + iniset /$Q_PLUGIN_CONF_FILE ovs bridge_mappings "physnet1:br-$VLAN_INTERFACE,physnet-ex:$PUBLIC_BRIDGE" # Set integration bridge to domU's - iniset "/$Q_PLUGIN_CONF_FILE.domU" ovs integration_bridge $OVS_BRIDGE + iniset /$Q_PLUGIN_CONF_FILE ovs integration_bridge $OVS_BRIDGE # Set root wrap - iniset "/$Q_PLUGIN_CONF_FILE.domU" agent root_helper "$Q_RR_COMMAND" + iniset /$Q_PLUGIN_CONF_FILE agent root_helper "$Q_RR_COMMAND" fi iniset /$Q_PLUGIN_CONF_FILE agent tunnel_types $Q_TUNNEL_TYPES iniset /$Q_PLUGIN_CONF_FILE ovs datapath_type $OVS_DATAPATH_TYPE From df5e69114fa2af663c50e734b4a842c727e6a7e9 Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Tue, 15 Nov 2016 15:02:23 -0800 Subject: [PATCH 0220/1936] Stop setting route pointing back to tenant router This removes the logic to add a route pointing to the IPv4 tenant private network range since the router is performing SNAT. If reaching the IPs via the route worked at all, it was by accident since this behavior is certainly not guaranteed by Neutron. Change-Id: If45e3fc15c050cfbac11b57c1eaf137dd7ed816f --- lib/neutron_plugins/services/l3 | 5 ----- 1 file changed, 5 deletions(-) diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index aa61a109fa..e9ee03f865 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -373,11 +373,6 @@ function _neutron_configure_router_v4 { fi ROUTER_GW_IP=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F'ip_address' '{ print $2 }' | cut -f3 -d\" | tr '\n' ' ') die_if_not_set $LINENO ROUTER_GW_IP "Failure retrieving ROUTER_GW_IP" - local replace_range=${SUBNETPOOL_PREFIX_V4} - if [[ -z "${SUBNETPOOL_V4_ID}" ]]; then - replace_range=${FIXED_RANGE} - fi - sudo ip route replace $replace_range via $ROUTER_GW_IP fi _neutron_set_router_id fi From 4bfbc291eefd92d8b7885f36275b7ff541e067ab Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Tue, 15 Nov 2016 17:26:05 -0800 Subject: [PATCH 0221/1936] Derive IP ranges from new ADDRS_SAFE_TO_USE vars The switch to using subnetpools caused quite a bit of confusion because it didn't respect the value of FIXED_RANGE. This caused conflicts in the gate with it's default IPv4 value of 10.0.0.0/8. This patch does a few things to address the issue: * It introduces the IPV4_ADDRS_SAFE_TO_USE and IPV6_ADDRS_SAFE_TO_USE values and adjusts all of the FIXED_RANGE and SUBNETPOOL_PREFIX values to dervive from them by default. * This addresses the concern that was raised about implying that SUBNETPOOL_PREFIX and FIXED_RANGE are equivalent when setting SUBNETPOOL_PREFIX=FIXED_RANGE by default. Now we have a new value for the operator specify a chunk of addresses that are safe to use for private networks without implementation implications. * Backwards compatibility is maintained by alloing users to override override all of these values. * The default for IPV4_ADDRS_SAFE_TO_USE uses /22 instead of /24 * Because we want to be able to use subnetpools for auto allocated topologies and we want to be able to have a large chunk of instances on each network, we needed a little more breathing room in the default v4 network size. * SUBNET_POOL_SIZE_V4 default is changed from 24 to 26 * In conjuction with this change and the one above, the default subnetpool will support up to 16 64-address allocations. * This should be enough to cover any regular gate scenarios. * If someone wants a bigger/smaller subnet, they can ask for that in the API request, change this value themselves, or use a different network entirely. * FIXED_RANGE_V6 defaults to a max prefix of /64 from IPV6_ADDRS_SAFE_TO_USE * This avoids the private subnet in the non-subnetpool case from being larger than /64 to avoid issues identified in rfc 7421. * Users can still explicitly set this value to whatever they want. This 'max' behavior is only for the default. * This allows IPV6_ADDRS_SAFE_TO_USE to default to a /56, which leaves tons of room for v6 subnetpools. Closes-Bug: #1629133 Change-Id: I7b32804d47bec743c0b13e434e6a7958728896ea --- doc/source/configuration.rst | 16 ++++++++-------- doc/source/guides/neutron.rst | 12 ++++++------ doc/source/networking.rst | 21 ++++++++++++++++++++- lib/neutron_plugins/services/l3 | 11 +++++++---- stackrc | 3 ++- 5 files changed, 43 insertions(+), 20 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 22809ebd7a..bc3f5584b7 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -63,7 +63,7 @@ exists it will be used instead to preserve backward-compatibility. :: [[local|localrc]] - FIXED_RANGE=10.254.1.0/24 + IPV4_ADDRS_SAFE_TO_USE=10.254.1.0/24 ADMIN_PASSWORD=speciale LOGFILE=$DEST/logs/stack.sh.log @@ -161,8 +161,8 @@ values that most often need to be set. - no logging - pre-set the passwords to prevent interactive prompts -- move network ranges away from the local network (``FIXED_RANGE`` and - ``FLOATING_RANGE``, commented out below) +- move network ranges away from the local network (``IPV4_ADDRS_SAFE_TO_USE`` + and ``FLOATING_RANGE``, commented out below) - set the host IP if detection is unreliable (``HOST_IP``, commented out below) @@ -173,7 +173,7 @@ values that most often need to be set. DATABASE_PASSWORD=$ADMIN_PASSWORD RABBIT_PASSWORD=$ADMIN_PASSWORD SERVICE_PASSWORD=$ADMIN_PASSWORD - #FIXED_RANGE=172.31.1.0/24 + #IPV4_ADDRS_SAFE_TO_USE=172.31.1.0/24 #FLOATING_RANGE=192.168.20.0/25 #HOST_IP=10.3.4.5 @@ -537,12 +537,12 @@ behavior: IPV6_RA_MODE=slaac IPV6_ADDRESS_MODE=slaac - FIXED_RANGE_V6=fd$IPV6_GLOBAL_ID::/64 + IPV6_ADDRS_SAFE_TO_USE=fd$IPV6_GLOBAL_ID::/56 IPV6_PRIVATE_NETWORK_GATEWAY=fd$IPV6_GLOBAL_ID::1 -*Note*: ``FIXED_RANGE_V6`` and ``IPV6_PRIVATE_NETWORK_GATEWAY`` can be -configured with any valid IPv6 prefix. The default values make use of -an auto-generated ``IPV6_GLOBAL_ID`` to comply with RFC4193. +*Note*: ``IPV6_ADDRS_SAFE_TO_USE`` and ``IPV6_PRIVATE_NETWORK_GATEWAY`` +can be configured with any valid IPv6 prefix. The default values make +use of an auto-generated ``IPV6_GLOBAL_ID`` to comply with RFC4193. Service Version ~~~~~~~~~~~~~~~ diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst index bc6816c7e6..092809a1cf 100644 --- a/doc/source/guides/neutron.rst +++ b/doc/source/guides/neutron.rst @@ -79,7 +79,7 @@ serving as a hypervisor for guest instances. ## Neutron options Q_USE_SECGROUP=True FLOATING_RANGE="172.18.161.0/24" - FIXED_RANGE="10.0.0.0/24" + IPV4_ADDRS_SAFE_TO_USE="10.0.0.0/22" Q_FLOATING_ALLOCATION_POOL=start=172.18.161.250,end=172.18.161.254 PUBLIC_NETWORK_GATEWAY="172.18.161.1" PUBLIC_INTERFACE=eth0 @@ -387,17 +387,17 @@ controller node. ## Neutron Networking options used to create Neutron Subnets - FIXED_RANGE="203.0.113.0/24" + IPV4_ADDRS_SAFE_TO_USE="203.0.113.0/24" NETWORK_GATEWAY=203.0.113.1 PROVIDER_SUBNET_NAME="provider_net" PROVIDER_NETWORK_TYPE="vlan" SEGMENTATION_ID=2010 USE_SUBNETPOOL=False -In this configuration we are defining FIXED_RANGE to be a +In this configuration we are defining IPV4_ADDRS_SAFE_TO_USE to be a publicly routed IPv4 subnet. In this specific instance we are using the special TEST-NET-3 subnet defined in `RFC 5737 `_, -which is used for documentation. In your DevStack setup, FIXED_RANGE +which is used for documentation. In your DevStack setup, IPV4_ADDRS_SAFE_TO_USE would be a public IP address range that you or your organization has allocated to you, so that you could access your instances from the public internet. @@ -524,7 +524,7 @@ setup, with small modifications for the interface mappings. ## Neutron options Q_USE_SECGROUP=True FLOATING_RANGE="172.18.161.0/24" - FIXED_RANGE="10.0.0.0/24" + IPV4_ADDRS_SAFE_TO_USE="10.0.0.0/24" Q_FLOATING_ALLOCATION_POOL=start=172.18.161.250,end=172.18.161.254 PUBLIC_NETWORK_GATEWAY="172.18.161.1" PUBLIC_INTERFACE=eth0 @@ -573,7 +573,7 @@ you do not require them. Q_AGENT=macvtap PHYSICAL_NETWORK=default - FIXED_RANGE="203.0.113.0/24" + IPV4_ADDRS_SAFE_TO_USE="203.0.113.0/24" NETWORK_GATEWAY=203.0.113.1 PROVIDER_SUBNET_NAME="provider_net" PROVIDER_NETWORK_TYPE="vlan" diff --git a/doc/source/networking.rst b/doc/source/networking.rst index 1d56c3367e..2301a2e931 100644 --- a/doc/source/networking.rst +++ b/doc/source/networking.rst @@ -15,7 +15,8 @@ If you don't specify any configuration you will get the following: * neutron (including l3 with openvswitch) * private project networks for each openstack project * a floating ip range of 172.24.4.0/24 with the gateway of 172.24.4.1 -* the demo project configured with fixed ips on 10.0.0.0/24 +* the demo project configured with fixed ips on a subnet allocated from + the 10.0.0.0/22 range * a ``br-ex`` interface controlled by neutron for all it's networking (this is not connected to any physical interfaces). * DNS resolution for guests based on the resolv.conf for you host @@ -95,3 +96,21 @@ the range of floating ips that will be handed out. As we are sharing your existing network, you'll want to give it a slice that your local dhcp server is not allocating. Otherwise you could easily have conflicting ip addresses, and cause havoc with your local network. + + +Private Network Addressing +========================== + +The private networks addresses are controlled by the ``IPV4_ADDRS_SAFE_TO_USE`` +and the ``IPV6_ADDRS_SAFE_TO_USE`` variables. This allows users to specify one +single variable of safe internal IPs to use that will be referenced whether or +not subnetpools are in use. + +For IPv4, ``FIXED_RANGE`` and ``SUBNETPOOL_PREFIX_V4`` will just default to +the value of ``IPV4_ADDRS_SAFE_TO_USE`` directly. + +For IPv6, ``FIXED_RANGE`` will default to the first /64 of the value of +``IPV6_ADDRS_SAFE_TO_USE``. If ``IPV6_ADDRS_SAFE_TO_USE`` is /64 or smaller, +``FIXED_RANGE`` will just use the value of that directly. +``SUBNETPOOL_PREFIX_V6`` will just default to the value of +``IPV6_ADDRS_SAFE_TO_USE`` directly. diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index ddc615589f..56eb22387b 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -70,7 +70,10 @@ IPV6_RA_MODE=${IPV6_RA_MODE:-slaac} IPV6_ADDRESS_MODE=${IPV6_ADDRESS_MODE:-slaac} IPV6_PUBLIC_SUBNET_NAME=${IPV6_PUBLIC_SUBNET_NAME:-ipv6-public-subnet} IPV6_PRIVATE_SUBNET_NAME=${IPV6_PRIVATE_SUBNET_NAME:-ipv6-private-subnet} -FIXED_RANGE_V6=${FIXED_RANGE_V6:-fd$IPV6_GLOBAL_ID::/64} +IPV6_ADDRS_SAFE_TO_USE=${IPV6_ADDRS_SAFE_TO_USE:-fd$IPV6_GLOBAL_ID::/56} +# if we got larger than a /64 safe to use, we only use the first /64 to +# avoid side effects outlined in rfc7421 +FIXED_RANGE_V6=${FIXED_RANGE_V6:-$(echo $IPV6_ADDRS_SAFE_TO_USE | awk -F '/' '{ print ($2>63 ? $2 : 64) }')} IPV6_PRIVATE_NETWORK_GATEWAY=${IPV6_PRIVATE_NETWORK_GATEWAY:-} IPV6_PUBLIC_RANGE=${IPV6_PUBLIC_RANGE:-2001:db8::/64} IPV6_PUBLIC_NETWORK_GATEWAY=${IPV6_PUBLIC_NETWORK_GATEWAY:-2001:db8::2} @@ -86,10 +89,10 @@ PUBLIC_SUBNET_NAME=${PUBLIC_SUBNET_NAME:-"public-subnet"} USE_SUBNETPOOL=${USE_SUBNETPOOL:-True} SUBNETPOOL_NAME=${SUBNETPOOL_NAME:-"shared-default-subnetpool"} -SUBNETPOOL_PREFIX_V4=${SUBNETPOOL_PREFIX_V4:-10.0.0.0/16} -SUBNETPOOL_PREFIX_V6=${SUBNETPOOL_PREFIX_V6:-2001:db8:8000::/48} +SUBNETPOOL_PREFIX_V4=${SUBNETPOOL_PREFIX_V4:-$IPV4_ADDRS_SAFE_TO_USE} +SUBNETPOOL_PREFIX_V6=${SUBNETPOOL_PREFIX_V6:-$IPV6_ADDRS_SAFE_TO_USE} -SUBNETPOOL_SIZE_V4=${SUBNETPOOL_SIZE_V4:-24} +SUBNETPOOL_SIZE_V4=${SUBNETPOOL_SIZE_V4:-26} SUBNETPOOL_SIZE_V6=${SUBNETPOOL_SIZE_V6:-64} default_v4_route_devs=$(ip -4 route | grep ^default | awk '{print $5}') diff --git a/stackrc b/stackrc index ea8b044faf..8210eb9ad2 100644 --- a/stackrc +++ b/stackrc @@ -765,7 +765,8 @@ ENABLE_DEBUG_LOG_LEVEL=$(trueorfalse True ENABLE_DEBUG_LOG_LEVEL) # Note that setting ``FIXED_RANGE`` may be necessary when running DevStack # in an OpenStack cloud that uses either of these address ranges internally. FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24} -FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24} +IPV4_ADDRS_SAFE_TO_USE=${IPV4_ADDRS_SAFE_TO_USE:-10.0.0.0/22} +FIXED_RANGE=${FIXED_RANGE:-$IPV4_ADDRS_SAFE_TO_USE} FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256} HOST_IP_IFACE=${HOST_IP_IFACE:-} HOST_IP=${HOST_IP:-} From 21d84c29b22fb69b00739ae40d69f80d5182a94e Mon Sep 17 00:00:00 2001 From: "John L. Villalovos" Date: Fri, 11 Nov 2016 15:26:11 -0800 Subject: [PATCH 0222/1936] If plugin is enabled multiple times fail When using the enable_plugin command and grenade jobs it can be easy to enable the same plugin twice, as the grenade job has a registration section and the configuration in project-config can also enable it due to code-reuse in project-config. If a plugin is enabled twice it will likely fail, though it won't be obvious that it was due to the plugin being enabled multiple times. This change makes it so if it sees the same plugin name is enabled more than once it will die and an error message outputted. Change-Id: I9f1d7e58b861b04473b6a57c9ad404203fb7277a --- functions-common | 3 +++ 1 file changed, 3 insertions(+) diff --git a/functions-common b/functions-common index 87e6bb453d..eeced621a9 100644 --- a/functions-common +++ b/functions-common @@ -1767,6 +1767,9 @@ function enable_plugin { local name=$1 local url=$2 local branch=${3:-master} + if [[ ",${DEVSTACK_PLUGINS}," =~ ,${name}, ]]; then + die $LINENO "Plugin attempted to be enabled twice: ${name} ${url} ${branch}" + fi DEVSTACK_PLUGINS+=",$name" GITREPO[$name]=$url GITDIR[$name]=$DEST/$name From ea1abcde180ec0f7fbcec604650f0e95db503102 Mon Sep 17 00:00:00 2001 From: Jordan Pittier Date: Wed, 16 Nov 2016 18:47:44 +0100 Subject: [PATCH 0223/1936] Swift: fix rsyncd.conf, ports were incorrect Swift port base was changed in Ifd95b99004aead5ddc8ae1a8dd3ccd9c4f2abe91 but we forgot to update the rsyncd.conf. This patch update the rsyncd.conf file. Change-Id: Id457c047c672a810c4c0c7721b6beeb01b719879 --- files/swift/rsyncd.conf | 48 ++++++++++++++++++++--------------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/files/swift/rsyncd.conf b/files/swift/rsyncd.conf index c670531b31..c49f716fa7 100644 --- a/files/swift/rsyncd.conf +++ b/files/swift/rsyncd.conf @@ -4,76 +4,76 @@ log file = %SWIFT_DATA_DIR%/logs/rsyncd.log pid file = %SWIFT_DATA_DIR%/run/rsyncd.pid address = 127.0.0.1 -[account6012] +[account6612] max connections = 25 path = %SWIFT_DATA_DIR%/1/node/ read only = false -lock file = %SWIFT_DATA_DIR%/run/account6012.lock +lock file = %SWIFT_DATA_DIR%/run/account6612.lock -[account6022] +[account6622] max connections = 25 path = %SWIFT_DATA_DIR%/2/node/ read only = false -lock file = %SWIFT_DATA_DIR%/run/account6022.lock +lock file = %SWIFT_DATA_DIR%/run/account6622.lock -[account6032] +[account6632] max connections = 25 path = %SWIFT_DATA_DIR%/3/node/ read only = false -lock file = %SWIFT_DATA_DIR%/run/account6032.lock +lock file = %SWIFT_DATA_DIR%/run/account6632.lock -[account6042] +[account6642] max connections = 25 path = %SWIFT_DATA_DIR%/4/node/ read only = false -lock file = %SWIFT_DATA_DIR%/run/account6042.lock +lock file = %SWIFT_DATA_DIR%/run/account6642.lock -[container6011] +[container6611] max connections = 25 path = %SWIFT_DATA_DIR%/1/node/ read only = false -lock file = %SWIFT_DATA_DIR%/run/container6011.lock +lock file = %SWIFT_DATA_DIR%/run/container6611.lock -[container6021] +[container6621] max connections = 25 path = %SWIFT_DATA_DIR%/2/node/ read only = false -lock file = %SWIFT_DATA_DIR%/run/container6021.lock +lock file = %SWIFT_DATA_DIR%/run/container6621.lock -[container6031] +[container6631] max connections = 25 path = %SWIFT_DATA_DIR%/3/node/ read only = false -lock file = %SWIFT_DATA_DIR%/run/container6031.lock +lock file = %SWIFT_DATA_DIR%/run/container6631.lock -[container6041] +[container6641] max connections = 25 path = %SWIFT_DATA_DIR%/4/node/ read only = false -lock file = %SWIFT_DATA_DIR%/run/container6041.lock +lock file = %SWIFT_DATA_DIR%/run/container6641.lock -[object6010] +[object6613] max connections = 25 path = %SWIFT_DATA_DIR%/1/node/ read only = false -lock file = %SWIFT_DATA_DIR%/run/object6010.lock +lock file = %SWIFT_DATA_DIR%/run/object6613.lock -[object6020] +[object6623] max connections = 25 path = %SWIFT_DATA_DIR%/2/node/ read only = false -lock file = %SWIFT_DATA_DIR%/run/object6020.lock +lock file = %SWIFT_DATA_DIR%/run/object6623.lock -[object6030] +[object6633] max connections = 25 path = %SWIFT_DATA_DIR%/3/node/ read only = false -lock file = %SWIFT_DATA_DIR%/run/object6030.lock +lock file = %SWIFT_DATA_DIR%/run/object6633.lock -[object6040] +[object6643] max connections = 25 path = %SWIFT_DATA_DIR%/4/node/ read only = false -lock file = %SWIFT_DATA_DIR%/run/object6040.lock +lock file = %SWIFT_DATA_DIR%/run/object6643.lock From 56b39126853ff47525c89c7ba3c02422a639ddc3 Mon Sep 17 00:00:00 2001 From: "Gary W. Smith" Date: Wed, 16 Nov 2016 22:03:43 -0800 Subject: [PATCH 0224/1936] Use -y on zypper remove to avoid hanging When using zypper remove, include the -y option to avoid stack.sh from hanging waiting for user confirmation. Due to output buffering, the script could hang before giving the user the prompt to enter Y to continue, making it unclear why the script was hanging. Change-Id: I5ea761e5ae0829439953c385f8e7d0546acba886 Closes-Bug: 1642736 --- functions-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions-common b/functions-common index 8e14b5ec7b..d5014fd80a 100644 --- a/functions-common +++ b/functions-common @@ -1318,7 +1318,7 @@ function uninstall_package { elif is_fedora; then sudo ${YUM:-yum} remove -y "$@" ||: elif is_suse; then - sudo zypper rm "$@" ||: + sudo zypper remove -y "$@" ||: else exit_distro_not_supported "uninstalling packages" fi From f5069f35a176109e3e28347ce7e0418ffc57c4cb Mon Sep 17 00:00:00 2001 From: Jordan Pittier Date: Tue, 8 Nov 2016 12:10:12 +0100 Subject: [PATCH 0225/1936] lib/lvm: don't use $VOLUME_GROUP variable anymore We should be using $VOLUME_GROUP_NAME instead since Icehouse. $VOLUME_GROUP_NAME has been introduced in I93b8ef32832269d730c76a6dc24ddb4f20c6d9df and $VOLUME_GROUP is nowadays only use as a fallback to $VOLUME_GROUP_NAME. As a code comment in lib/lvm says it we kept the $VOLUME_GROUP around as "for compatibility with icehouse-generation Grenade". Icehouse is long gone so now seems a good time to remove any usage of $VOLUME_GROUP. Change-Id: Id3051b5a196c45266c39fde4f08401aaacf0f6bd --- doc/source/configuration.rst | 4 ++-- doc/source/guides/multinode-lab.rst | 6 +++--- lib/lvm | 6 +----- 3 files changed, 6 insertions(+), 10 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 22809ebd7a..391e5bec70 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -703,13 +703,13 @@ Cinder ~~~~~~ The logical volume group used to hold the Cinder-managed volumes is -set by ``VOLUME_GROUP``, the logical volume name prefix is set with +set by ``VOLUME_GROUP_NAME``, the logical volume name prefix is set with ``VOLUME_NAME_PREFIX`` and the size of the volume backing file is set with ``VOLUME_BACKING_FILE_SIZE``. :: - VOLUME_GROUP="stack-volumes" + VOLUME_GROUP_NAME="stack-volumes" VOLUME_NAME_PREFIX="volume-" VOLUME_BACKING_FILE_SIZE=10250M diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index 8751eb8d6a..81c08dc747 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -294,10 +294,10 @@ created inside OpenStack. The size can be overridden by setting ``stack-volumes`` can be pre-created on any physical volume supported by Linux's LVM. The name of the volume group can be changed by setting -``VOLUME_GROUP`` in ``localrc``. ``stack.sh`` deletes all logical -volumes in ``VOLUME_GROUP`` that begin with ``VOLUME_NAME_PREFIX`` as +``VOLUME_GROUP_NAME`` in ``localrc``. ``stack.sh`` deletes all logical +volumes in ``VOLUME_GROUP_NAME`` that begin with ``VOLUME_NAME_PREFIX`` as part of cleaning up from previous runs. It is recommended to not use the -root volume group as ``VOLUME_GROUP``. +root volume group as ``VOLUME_GROUP_NAME``. The details of creating the volume group depends on the server hardware involved but looks something like this: diff --git a/lib/lvm b/lib/lvm index d35a76fb5f..99c7ba9b9f 100644 --- a/lib/lvm +++ b/lib/lvm @@ -23,11 +23,7 @@ set +o xtrace # Defaults # -------- # Name of the lvm volume groups to use/create for iscsi volumes -# This monkey-motion is for compatibility with icehouse-generation Grenade -# If ``VOLUME_GROUP`` is set, use it, otherwise we'll build a VG name based -# on ``VOLUME_GROUP_NAME`` that includes the backend name -# Grenade doesn't use ``VOLUME_GROUP2`` so it is left out -VOLUME_GROUP_NAME=${VOLUME_GROUP:-${VOLUME_GROUP_NAME:-stack-volumes}} +VOLUME_GROUP_NAME=${VOLUME_GROUP_NAME:-stack-volumes} DEFAULT_VOLUME_GROUP_NAME=$VOLUME_GROUP_NAME-default # Backing file name is of the form $VOLUME_GROUP$BACKING_FILE_SUFFIX From 7938d83d3bd37ef63cfea60e188918848ea87858 Mon Sep 17 00:00:00 2001 From: Michael Turek Date: Tue, 12 Apr 2016 14:55:21 -0400 Subject: [PATCH 0226/1936] Allow provider network to be used for ssh validation Currently devstack assumes that the network used for ssh validation is the private network. This patch adds a hook that sets the network used for ssh validation based on whether or not provider networking is being used. It also moves the function 'is_provider_network' into functions-common as it will now be used by both tempest and neutron. Change-Id: I265c9e26c9bfb18b7e201f27d8912b8bec235872 --- functions-common | 8 ++++++++ lib/neutron_plugins/services/l3 | 7 ------- lib/tempest | 7 ++++++- 3 files changed, 14 insertions(+), 8 deletions(-) diff --git a/functions-common b/functions-common index 8e14b5ec7b..56d0a64f39 100644 --- a/functions-common +++ b/functions-common @@ -2260,6 +2260,14 @@ function maskip { echo $subnet } +function is_provider_network { + if [ "$Q_USE_PROVIDER_NETWORKING" == "True" ]; then + return 0 + fi + return 1 +} + + # Return the current python as "python." function python_version { local python_version diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index ddc615589f..9c475e9171 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -434,13 +434,6 @@ function _neutron_configure_router_v6 { fi } -function is_provider_network { - if [ "$Q_USE_PROVIDER_NETWORKING" == "True" ]; then - return 0 - fi - return 1 -} - function is_networking_extension_supported { local extension=$1 # TODO(sc68cal) cache this instead of calling every time diff --git a/lib/tempest b/lib/tempest index f43036e12b..5e636379d0 100644 --- a/lib/tempest +++ b/lib/tempest @@ -414,12 +414,17 @@ function configure_tempest { iniset $TEMPEST_CONFIG scenario aki_img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-vmlinuz" iniset $TEMPEST_CONFIG scenario img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" + # If using provider networking, use the physical network for validation rather than private + TEMPEST_SSH_NETWORK_NAME=$PRIVATE_NETWORK_NAME + if is_provider_network; then + TEMPEST_SSH_NETWORK_NAME=$PHYSICAL_NETWORK + fi # Validation iniset $TEMPEST_CONFIG validation run_validation ${TEMPEST_RUN_VALIDATION:-False} iniset $TEMPEST_CONFIG validation ip_version_for_ssh 4 iniset $TEMPEST_CONFIG validation ssh_timeout $BUILD_TIMEOUT iniset $TEMPEST_CONFIG validation image_ssh_user ${DEFAULT_INSTANCE_USER:-cirros} - iniset $TEMPEST_CONFIG validation network_for_ssh $PRIVATE_NETWORK_NAME + iniset $TEMPEST_CONFIG validation network_for_ssh $TEMPEST_SSH_NETWORK_NAME # Volume # TODO(obutenko): Remove snapshot_backup when liberty-eol happens. From 37f48f3c208de1031ef4589d15194c4079ad4a97 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Tr=C4=99bski?= Date: Mon, 14 Nov 2016 09:10:57 +0100 Subject: [PATCH 0227/1936] Always set ALLOWED_HOSTS for horizon If devstack is deployed in the VM with defined public IP address (like 192.168.10.6) it is not possible to access the Horizon from the browser. This is because DEBUG=True means that ALLOWED_HOSTS, if not set, is equal to ['localhost', '127.0.0.1', '[::1]'] according to Django's documentation. Change-Id: I74ae99569dafa10eee7066713a05fb49183e3fca --- lib/horizon | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/horizon b/lib/horizon index 78cbe8b58d..50896503a1 100644 --- a/lib/horizon +++ b/lib/horizon @@ -97,6 +97,11 @@ function configure_horizon { _horizon_config_set $local_settings "" OPENSTACK_API_VERSIONS {\"identity\":3} _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_URL "\"${KEYSTONE_SERVICE_URI}/v3\"" + # note(trebskit): if HOST_IP points at non-localhost ip address, horizon cannot be accessed + # from outside the virtual machine. This fixes is meant primarily for local development + # purpose + _horizon_config_set $local_settings "" ALLOWED_HOSTS [\"$HOST_IP\"] + if [ -f $SSL_BUNDLE_FILE ]; then _horizon_config_set $local_settings "" OPENSTACK_SSL_CACERT \"${SSL_BUNDLE_FILE}\" fi From d038b60e65dcf6916445a03fe87a11a39c6516c8 Mon Sep 17 00:00:00 2001 From: Jordan Pittier Date: Fri, 4 Nov 2016 13:32:39 +0100 Subject: [PATCH 0228/1936] Neutron: use "OSC show -f value -c id" instead of "OSC list + grep" We should leverage server-side filtering. Change-Id: I3deef791868769b0b7cfc405d73dff57458ca427 --- exercises/neutron-adv-test.sh | 2 +- lib/tempest | 3 +-- tools/ping_neutron.sh | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh index bfd45eca5c..e8c8f62140 100755 --- a/exercises/neutron-adv-test.sh +++ b/exercises/neutron-adv-test.sh @@ -156,7 +156,7 @@ function get_role_id { function get_network_id { local NETWORK_NAME="$1" local NETWORK_ID - NETWORK_ID=`openstack network list | grep $NETWORK_NAME | awk '{print $2}'` + NETWORK_ID=`openstack network show -f value -c id $NETWORK_NAME` echo $NETWORK_ID } diff --git a/lib/tempest b/lib/tempest index f43036e12b..2816740180 100644 --- a/lib/tempest +++ b/lib/tempest @@ -242,8 +242,7 @@ function configure_tempest { # the public network (for floating ip access) is only available # if the extension is enabled. if is_networking_extension_supported 'external-net'; then - public_network_id=$(openstack network list | grep $PUBLIC_NETWORK_NAME | \ - awk '{print $2}') + public_network_id=$(openstack network show -f value -c id $PUBLIC_NETWORK_NAME) fi iniset $TEMPEST_CONFIG DEFAULT use_syslog $SYSLOG diff --git a/tools/ping_neutron.sh b/tools/ping_neutron.sh index c75575406a..73fe3f3bdf 100755 --- a/tools/ping_neutron.sh +++ b/tools/ping_neutron.sh @@ -54,7 +54,7 @@ fi REMAINING_ARGS="${@:2}" # BUG: with duplicate network names, this fails pretty hard. -NET_ID=$(openstack network list | grep "$NET_NAME" | awk '{print $2}') +NET_ID=$(openstack network show -f value -c id "$NET_NAME") PROBE_ID=$(neutron-debug probe-list -c id -c network_id | grep "$NET_ID" | awk '{print $2}' | head -n 1) # This runs a command inside the specific netns From a5afa7d81ad380eeb91f7c58dd07facb214b347b Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Fri, 18 Nov 2016 12:32:19 -0800 Subject: [PATCH 0229/1936] Fix default ipv6 fixed range var The intent was to make any ipv6 safe addr range bigger than a /64 a /64 when setting the fixed range. Unfortunately the awk only emited the mask and not the addr. Fix this by sprinkling the address back in. Fixes-Bug: 1643055 Change-Id: I526d4c748fd404ecb3c77afcbb056aa95090c409 --- lib/neutron_plugins/services/l3 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 2099757b03..6d518e25d8 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -73,7 +73,7 @@ IPV6_PRIVATE_SUBNET_NAME=${IPV6_PRIVATE_SUBNET_NAME:-ipv6-private-subnet} IPV6_ADDRS_SAFE_TO_USE=${IPV6_ADDRS_SAFE_TO_USE:-fd$IPV6_GLOBAL_ID::/56} # if we got larger than a /64 safe to use, we only use the first /64 to # avoid side effects outlined in rfc7421 -FIXED_RANGE_V6=${FIXED_RANGE_V6:-$(echo $IPV6_ADDRS_SAFE_TO_USE | awk -F '/' '{ print ($2>63 ? $2 : 64) }')} +FIXED_RANGE_V6=${FIXED_RANGE_V6:-$(echo $IPV6_ADDRS_SAFE_TO_USE | awk -F '/' '{ print $1"/"($2>63 ? $2 : 64) }')} IPV6_PRIVATE_NETWORK_GATEWAY=${IPV6_PRIVATE_NETWORK_GATEWAY:-} IPV6_PUBLIC_RANGE=${IPV6_PUBLIC_RANGE:-2001:db8::/64} IPV6_PUBLIC_NETWORK_GATEWAY=${IPV6_PUBLIC_NETWORK_GATEWAY:-2001:db8::2} From 90742fc1be7b4ad77bf3fda110d5a9bda4eb2100 Mon Sep 17 00:00:00 2001 From: Jakub Wachowski Date: Fri, 18 Nov 2016 14:28:47 +0100 Subject: [PATCH 0230/1936] Pass branch parameter to git clone Without this parameter, when we set GIT_DEPTH, it may happen that we clone only master and then cannot checkout branch Change-Id: I39376914f8bfc286a308c99db6bc92cddab195b5 --- functions-common | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/functions-common b/functions-common index d5014fd80a..458f3de363 100644 --- a/functions-common +++ b/functions-common @@ -534,10 +534,8 @@ function git_clone { echo "the project to the \$PROJECTS variable in the job definition." die $LINENO "Cloning not allowed in this configuration" fi - git_timed clone $git_clone_flags $git_remote $git_dest - cd $git_dest - # This checkout syntax works for both branches and tags - git checkout $git_ref + # '--branch' can also take tags + git_timed clone $git_clone_flags $git_remote $git_dest --branch $git_ref elif [[ "$RECLONE" = "True" ]]; then # if it does exist then simulate what clone does if asked to RECLONE cd $git_dest From 82fa946d8fad036d9d5fae57f9f138c31cc210d4 Mon Sep 17 00:00:00 2001 From: Jordan Pittier Date: Wed, 2 Nov 2016 11:15:42 +0100 Subject: [PATCH 0231/1936] lib/tempest: use OSC to create nova flavors. OSC is the future and it's available now. Change-Id: Ib0dac761673a0c4f05a328ee530018e8bb269c4c --- lib/tempest | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/tempest b/lib/tempest index f43036e12b..928fbbdb6c 100644 --- a/lib/tempest +++ b/lib/tempest @@ -193,11 +193,11 @@ function configure_tempest { available_flavors=$(nova flavor-list) if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then - nova flavor-create m1.nano 42 64 0 1 + openstack flavor create --id 42 --ram 64 --disk 0 --vcpus 1 m1.nano fi flavor_ref=42 if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then - nova flavor-create m1.micro 84 128 0 1 + openstack flavor create --id 84 --ram 128 --disk 0 --vcpus 1 m1.micro fi flavor_ref_alt=84 else @@ -398,7 +398,7 @@ function configure_tempest { # build a specialized heat flavor available_flavors=$(nova flavor-list) if [[ ! ( $available_flavors =~ 'm1.heat' ) ]]; then - nova flavor-create m1.heat 451 512 0 1 + openstack flavor create --id 451 --ram 512 --disk 0 --vcpus 1 m1.heat fi iniset $TEMPEST_CONFIG orchestration instance_type "m1.heat" fi From 541617b15ef678cd5daf05d19ceea84ec1c8ebf9 Mon Sep 17 00:00:00 2001 From: Hidekazu Nakamura Date: Wed, 9 Nov 2016 15:27:19 +0900 Subject: [PATCH 0232/1936] Improve Live Migration section in multinode-lab.rst Added root user login check to ensure SSH keys exchanged. Change-Id: I8513bc883ac273af8dcc2f61562a838d33b45a82 --- doc/source/guides/multinode-lab.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index 8751eb8d6a..a98c2700a5 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -400,6 +400,10 @@ SSH keys need to be exchanged between each compute node: ssh-keyscan -H DEST_HOSTNAME | sudo tee -a /root/.ssh/known_hosts +3. Verify that login via ssh works without a password:: + + ssh -i /root/.ssh/id_rsa.pub stack@DESTINATION + In essence, this means that every compute node's root user's public RSA key must exist in every other compute node's stack user's authorized_keys file and every compute node's public ECDSA key needs to be in every other compute From a5b72b053efec9af8a57c59b35f96142e4f38433 Mon Sep 17 00:00:00 2001 From: Kaitlin Farr Date: Tue, 26 Jan 2016 22:46:13 -0500 Subject: [PATCH 0233/1936] Set fixed-key key manager With the key manager refactoring in nova and cinder, the key manager class will need to be explicitly set. Nova key manager refactoring: Ib563b0ea4b8b4bc1833bf52bf49a68546c384996 Cinder key manager refactoring: Ief8885bb4ca8d62b03cf1a52c25dd0e62c835bfe Change-Id: I733279864ee1a4aaffc9c8eed81b5e12f8d8821b Implements: blueprint use-castellan-key-manager --- lib/cinder | 2 ++ lib/nova | 2 ++ 2 files changed, 4 insertions(+) diff --git a/lib/cinder b/lib/cinder index c4a49cd992..c7ce0f5aa3 100644 --- a/lib/cinder +++ b/lib/cinder @@ -283,6 +283,8 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT os_region_name "$REGION_NAME" + iniset $CINDER_CONF key_manager api_class cinder.keymgr.conf_key_mgr.ConfKeyManager + if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then local enabled_backends="" local default_name="" diff --git a/lib/nova b/lib/nova index ca9a6c7f50..d3e8ce85af 100644 --- a/lib/nova +++ b/lib/nova @@ -475,6 +475,8 @@ function create_nova_conf { iniset $NOVA_CONF DEFAULT metadata_listen "$NOVA_SERVICE_LISTEN_ADDRESS" iniset $NOVA_CONF DEFAULT s3_listen "$NOVA_SERVICE_LISTEN_ADDRESS" + iniset $NOVA_CONF key_manager api_class nova.keymgr.conf_key_mgr.ConfKeyManager + if is_fedora || is_suse; then # nova defaults to /usr/local/bin, but fedora and suse pip like to # install things in /usr/bin From 2ccd1203423ea29cc7b8c462aa17e18981d32d60 Mon Sep 17 00:00:00 2001 From: David Lyle Date: Mon, 21 Nov 2016 22:16:17 +0000 Subject: [PATCH 0234/1936] Revert "Always set ALLOWED_HOSTS for horizon" This reverts commit 37f48f3c208de1031ef4589d15194c4079ad4a97. Change-Id: I8c853c35c5b0b61925c27b461ad625266381f73b --- lib/horizon | 5 ----- 1 file changed, 5 deletions(-) diff --git a/lib/horizon b/lib/horizon index 50896503a1..78cbe8b58d 100644 --- a/lib/horizon +++ b/lib/horizon @@ -97,11 +97,6 @@ function configure_horizon { _horizon_config_set $local_settings "" OPENSTACK_API_VERSIONS {\"identity\":3} _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_URL "\"${KEYSTONE_SERVICE_URI}/v3\"" - # note(trebskit): if HOST_IP points at non-localhost ip address, horizon cannot be accessed - # from outside the virtual machine. This fixes is meant primarily for local development - # purpose - _horizon_config_set $local_settings "" ALLOWED_HOSTS [\"$HOST_IP\"] - if [ -f $SSL_BUNDLE_FILE ]; then _horizon_config_set $local_settings "" OPENSTACK_SSL_CACERT \"${SSL_BUNDLE_FILE}\" fi From b6cbf922d79d7189dab7d68dc6014fa8682aad9d Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Mon, 21 Nov 2016 21:10:49 -0500 Subject: [PATCH 0235/1936] Change CINDER_LVM_TYPE back to 'default' as the default Change dddb2c7b5f85688de9c9b92f025df25d2f2d3016 recently changed devstack to enable the Cinder image cache by default and changed to use thinly provisioned LVM volumes by default. Since then we've had a spike in thin LVM snapshot test failures in the gate, which is by far our top gate bug at 219 hits in the last 10 days. So unless there is a fix on the Cinder side, this changes the default lvm_type back to 'default' for thick provisioning. Change-Id: I1c53bbe40177fe104ed0a222124bbc45c553b817 Related-Bug: #1642111 --- lib/cinder | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/cinder b/lib/cinder index c4a49cd992..9ff74e80be 100644 --- a/lib/cinder +++ b/lib/cinder @@ -68,8 +68,12 @@ CINDER_SERVICE_PROTOCOL=${CINDER_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} CINDER_SERVICE_LISTEN_ADDRESS=${CINDER_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS} # What type of LVM device should Cinder use for LVM backend -# Defaults to thin. For thick provisioning change to 'default' -CINDER_LVM_TYPE=${CINDER_LVM_TYPE:-thin} +# Defaults to default, which is thick, the other valid choice +# is thin, which as the name implies utilizes lvm thin provisioning. +# Thinly provisioned LVM volumes may be more efficient when using the Cinder +# image cache, but there are also known race failures with volume snapshots +# and thinly provisioned LVM volumes, see bug 1642111 for details. +CINDER_LVM_TYPE=${CINDER_LVM_TYPE:-default} # Default backends # The backend format is type:name where type is one of the supported backend From 7f95baa5706980a0910741693abee73abea98b89 Mon Sep 17 00:00:00 2001 From: John Schwarz Date: Mon, 21 Nov 2016 15:52:36 +0200 Subject: [PATCH 0236/1936] Fix devstack with linuxbridge without l3 agent The linuxbridge agent for Neutron expects that the public bridge will already be created by the time it starts. On devstack, this only occurs as part of the l3 agent configuration. If a compute node doesn't have an l3 agent and is using a linuxbridge agent, then br-ex won't be created and the process will not be able to start (causing stack.sh to fail). This causes the gate-grenade-dsvm-neutron-linuxbridge-multinode-nv gate to fail. Closes-Bug: #1643562 Change-Id: I6f441c6febb5070ad885569d9c798634d0272b6c --- lib/neutron_plugins/linuxbridge_agent | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent index d0de2f5e5d..194981ee40 100644 --- a/lib/neutron_plugins/linuxbridge_agent +++ b/lib/neutron_plugins/linuxbridge_agent @@ -62,7 +62,9 @@ function neutron_plugin_configure_plugin_agent { LB_INTERFACE_MAPPINGS=$PHYSICAL_NETWORK:$LB_PHYSICAL_INTERFACE fi if [[ "$PUBLIC_BRIDGE" != "" ]] && [[ "$PUBLIC_PHYSICAL_NETWORK" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE linux_bridge bridge_mappings "$PUBLIC_PHYSICAL_NETWORK:$PUBLIC_BRIDGE" + if is_service_enabled neutron-l3; then + iniset /$Q_PLUGIN_CONF_FILE linux_bridge bridge_mappings "$PUBLIC_PHYSICAL_NETWORK:$PUBLIC_BRIDGE" + fi fi if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE linux_bridge physical_interface_mappings $LB_INTERFACE_MAPPINGS From 471855ecd1b36c744162ec7c6c52768e1a1073e2 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Wed, 23 Nov 2016 12:24:52 +0100 Subject: [PATCH 0237/1936] Add Fedora 25 support Fedora 25 released, adding the required knobs. Change-Id: I8ce86aa9f23e9572d69eadcfc81af2e96d5be64f --- files/rpms/general | 4 ++-- files/rpms/nova | 2 +- files/rpms/swift | 2 +- stack.sh | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/files/rpms/general b/files/rpms/general index d0ceb56621..77d2fa5f0a 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -7,9 +7,9 @@ gcc-c++ gettext # used for compiling message catalogs git-core graphviz # needed only for docs -iptables-services # NOPRIME f23,f24 +iptables-services # NOPRIME f23,f24,f25 java-1.7.0-openjdk-headless # NOPRIME rhel7 -java-1.8.0-openjdk-headless # NOPRIME f23,f24 +java-1.8.0-openjdk-headless # NOPRIME f23,f24,f25 libffi-devel libjpeg-turbo-devel # Pillow 3.0.0 libxml2-devel # lxml diff --git a/files/rpms/nova b/files/rpms/nova index a883ec4399..45f1c94f1f 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -7,7 +7,7 @@ gawk genisoimage # required for config_drive iptables iputils -kernel-modules # dist:f23,f24 +kernel-modules # dist:f23,f24,f25 kpartx kvm # NOPRIME libvirt-bin # NOPRIME diff --git a/files/rpms/swift b/files/rpms/swift index bd249ee71b..2f12df0e3b 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -2,7 +2,7 @@ curl liberasurecode-devel memcached pyxattr -rsync-daemon # dist:f23,f24 +rsync-daemon # dist:f23,f24,f25 sqlite xfsprogs xinetd diff --git a/stack.sh b/stack.sh index f20c9d9ae3..d5dbfb275c 100755 --- a/stack.sh +++ b/stack.sh @@ -192,7 +192,7 @@ source $TOP_DIR/stackrc # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (trusty|xenial|yakkety|7.0|wheezy|sid|testing|jessie|f23|f24|rhel7|kvmibm1) ]]; then +if [[ ! ${DISTRO} =~ (trusty|xenial|yakkety|7.0|wheezy|sid|testing|jessie|f23|f24|f25|rhel7|kvmibm1) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From bcaadd63d8739b528f0121775d6612aebd333a4f Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Wed, 23 Nov 2016 12:43:02 +0100 Subject: [PATCH 0238/1936] Apache reload issue when it is stopped Since 4b49e409f853104dae021dfca1a9342ec9ac4709 devstack started to use reload instead of restart. Using reload in devstack for a fresh install, does not makes too much sense unless multiple service plugin touches the same service configs. Systemd rejects to reload something, which was not loaded before. $ sudo /bin/systemctl reload httpd httpd.service is not active, cannot reload. We will switch to `reload-or-restart` action instead of `reload`, it is more likely the action what the previous patch wanted. Change-Id: I70d597fbe4a8923d937ba8432e29edefb27d1058 --- functions-common | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/functions-common b/functions-common index d5014fd80a..951010fdad 100644 --- a/functions-common +++ b/functions-common @@ -2310,11 +2310,12 @@ function stop_service { fi } -# Service wrapper to stop services +# Service wrapper to reload services +# If the service was not in running state it will start it # reload_service service-name function reload_service { if [ -x /bin/systemctl ]; then - sudo /bin/systemctl reload $1 + sudo /bin/systemctl reload-or-restart $1 else sudo service $1 reload fi From 203716646472135e81bc7e8984bb200d87c0bf1c Mon Sep 17 00:00:00 2001 From: Jordan Pittier Date: Wed, 23 Nov 2016 15:51:10 +0100 Subject: [PATCH 0239/1936] stackrc: update code comment now that Neutron is enabled by default No need to explain how to enable Neutron now that it's enabled by default. Keep but reformat the 'how to enable swift' part though. Change-Id: I3f9b7796fad10abf1039e4c68eb2cd5ef6cdbc99 --- stackrc | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/stackrc b/stackrc index 31f07594ad..b5018dea0f 100644 --- a/stackrc +++ b/stackrc @@ -44,20 +44,10 @@ KEYSTONE_REGION_NAME=${KEYSTONE_REGION_NAME:-$REGION_NAME} # Specify which services to launch. These generally correspond to # screen tabs. To change the default list, use the ``enable_service`` and # ``disable_service`` functions in ``local.conf``. -# For example, to enable Swift add this to ``local.conf``: -# enable_service s-proxy s-object s-container s-account -# In order to enable Neutron (a single node setup) add the following +# For example, to enable Swift as part of DevStack add the following # settings in ``local.conf``: # [[local|localrc]] -# disable_service n-net -# enable_service q-svc -# enable_service q-agt -# enable_service q-dhcp -# enable_service q-l3 -# enable_service q-meta -# # Optional, to enable tempest configuration as part of DevStack -# enable_service tempest - +# enable_service s-proxy s-object s-container s-account # This allows us to pass ``ENABLED_SERVICES`` if ! isset ENABLED_SERVICES ; then # Keystone - nothing works without keystone From 5c4691af3e9cf9d0fe9a2afa2bbe1fc73796610d Mon Sep 17 00:00:00 2001 From: Dan Kolb Date: Wed, 16 Nov 2016 13:47:49 -0600 Subject: [PATCH 0240/1936] remove listen directive bound to only ipv4 Using devstack on a RHEL based system results in "Listen 0.0.0.0:80" being added to the /etc/httpd/conf/httpd.conf. This configures Apache to only listen to port 80 on an IPv4 interface. This makes it not possible to access Horizon via IPv6 without re-configuring and restarting httpd. Removing this sed leaves the default "Listen 80" from the rpm package, which binds to all interfaces and will allow connection to Horizon via IPv6. Change-Id: I9fe8cbebff0ca6a30ceeaae0f7e035c9bb828d44 --- lib/horizon | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/lib/horizon b/lib/horizon index 78cbe8b58d..3271e2a654 100644 --- a/lib/horizon +++ b/lib/horizon @@ -121,9 +121,7 @@ function configure_horizon { if is_ubuntu; then disable_apache_site 000-default sudo touch $horizon_conf - elif is_fedora; then - sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf - elif is_suse; then + elif is_fedora || is_suse; then : # nothing to do else exit_distro_not_supported "horizon apache configuration" From 868a6631721219bac4042eca88e290c278589e26 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Tr=C4=99bski?= Date: Mon, 14 Nov 2016 09:10:57 +0100 Subject: [PATCH 0241/1936] Always set ALLOWED_HOSTS=['*'] for horizon If devstack is deployed in the VM with defined public IP address (like 192.168.10.6) it is not possible to access the Horizon from the browser. This is because DEBUG=True means that ALLOWED_HOSTS, if not set, is equal to ['localhost', '127.0.0.1', '[::1]'] according to Django's documentation. Change-Id: I0ab2b57e459dbfa3b01b3e9388bbcefac076a142 Co-Authored-By: David Lyle Closes-Bug: #1643050 --- lib/horizon | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/horizon b/lib/horizon index 78cbe8b58d..f5e9c7e0a7 100644 --- a/lib/horizon +++ b/lib/horizon @@ -97,6 +97,11 @@ function configure_horizon { _horizon_config_set $local_settings "" OPENSTACK_API_VERSIONS {\"identity\":3} _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_URL "\"${KEYSTONE_SERVICE_URI}/v3\"" + # note(trebskit): if HOST_IP points at non-localhost ip address, horizon cannot be accessed + # from outside the virtual machine. This fixes is meant primarily for local development + # purpose + _horizon_config_set $local_settings "" ALLOWED_HOSTS [\"*\"] + if [ -f $SSL_BUNDLE_FILE ]; then _horizon_config_set $local_settings "" OPENSTACK_SSL_CACERT \"${SSL_BUNDLE_FILE}\" fi From 0dab8d63b3e5431bd5edb562f466296478285251 Mon Sep 17 00:00:00 2001 From: Peter Stachowski Date: Mon, 21 Nov 2016 20:36:31 +0000 Subject: [PATCH 0242/1936] (doc) Fixed references to FIXED_RANGE_V6 In the 'Private Network Addressing' section of the doc, there are references to FIXED_RANGE when referring to V6 networks. These have been changed to FIXED_RANGE_V6. Also fixed a few typos and grammatical errors when giving the doc a quick read-through looking for more references to FIXED_RANGE. Change-Id: Iaa530c476ce2b36a3f616945ddd2e24fa599a16c --- doc/source/networking.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/doc/source/networking.rst b/doc/source/networking.rst index 2301a2e931..bdbeaaa7a8 100644 --- a/doc/source/networking.rst +++ b/doc/source/networking.rst @@ -4,7 +4,7 @@ An important part of the DevStack experience is networking that works by default for created guests. This might not be optimal for your -particular testing environment, so this document tries it's best to +particular testing environment, so this document tries its best to explain what's going on. Defaults @@ -17,9 +17,9 @@ If you don't specify any configuration you will get the following: * a floating ip range of 172.24.4.0/24 with the gateway of 172.24.4.1 * the demo project configured with fixed ips on a subnet allocated from the 10.0.0.0/22 range -* a ``br-ex`` interface controlled by neutron for all it's networking +* a ``br-ex`` interface controlled by neutron for all its networking (this is not connected to any physical interfaces). -* DNS resolution for guests based on the resolv.conf for you host +* DNS resolution for guests based on the resolv.conf for your host * an ip masq rule that allows created guests to route out This creates an environment which is isolated to the single @@ -40,7 +40,7 @@ updates. Tempest tests will work in this environment. Locally Accessible Guests ========================= -If you want to make you guests accessible other machines on your +If you want to make you guests accessible from other machines on your network, we have to connect ``br-ex`` to a physical interface. Dedicated Guest Interface @@ -109,8 +109,8 @@ not subnetpools are in use. For IPv4, ``FIXED_RANGE`` and ``SUBNETPOOL_PREFIX_V4`` will just default to the value of ``IPV4_ADDRS_SAFE_TO_USE`` directly. -For IPv6, ``FIXED_RANGE`` will default to the first /64 of the value of +For IPv6, ``FIXED_RANGE_V6`` will default to the first /64 of the value of ``IPV6_ADDRS_SAFE_TO_USE``. If ``IPV6_ADDRS_SAFE_TO_USE`` is /64 or smaller, -``FIXED_RANGE`` will just use the value of that directly. +``FIXED_RANGE_V6`` will just use the value of that directly. ``SUBNETPOOL_PREFIX_V6`` will just default to the value of ``IPV6_ADDRS_SAFE_TO_USE`` directly. From f0252a9d8147cfbeaedbae4d6c2a8be0e97e72b0 Mon Sep 17 00:00:00 2001 From: Rob Cresswell Date: Fri, 25 Nov 2016 11:29:31 +0000 Subject: [PATCH 0243/1936] Remove Horizon setup scripts usage of run_tests.sh Horizon is removing run_tests in favour of tox during Ocata, as part of https://blueprints.launchpad.net/horizon/+spec/enhance-tox. To complete this move, we need to remove any reliance on run_tests. Change-Id: Ia8ad073aee68d1660d3bb5a68ec07516d8ce0665 --- lib/horizon | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/horizon b/lib/horizon index 78cbe8b58d..08d6259526 100644 --- a/lib/horizon +++ b/lib/horizon @@ -81,7 +81,7 @@ function configure_horizon { # Horizon is installed as develop mode, so we can compile here. # Message catalog compilation is handled by Django admin script, # so compiling them after the installation avoids Django installation twice. - (cd $HORIZON_DIR; ./run_tests.sh -N --compilemessages) + (cd $HORIZON_DIR; python manage.py compilemessages) # ``local_settings.py`` is used to override horizon default settings. local local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py From 02f3f9a6bbb6c4af989ad6cf504d5c49d7c9b4e2 Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Sat, 26 Nov 2016 00:43:07 +0900 Subject: [PATCH 0244/1936] meta-config: Fix consecutive same sections The current coding fails to process local.conf like the following. Note: This example is taken from a real use case. [1] [[post-config|$NEUTRON_CONF]] [qos] notification_drivers = midonet [[post-config|$NEUTRON_CONF]] [quotas] # x10 of default quotas (at the time of writing) quota_network=100 quota_subnet=100 quota_port=500 quota_router=100 quota_floatingip=500 quota_security_group=100 quota_security_group_rule=1000 [1] https://review.openstack.org/#/c/400627/ Closes-Bug: #1583214 Change-Id: Ie571b5fa5a33d9ed09f30ba7c7724b958ce17616 --- inc/meta-config | 10 ++++------ tests/test_meta_config.sh | 29 ++++++++++++++++++++++++++++- 2 files changed, 32 insertions(+), 7 deletions(-) diff --git a/inc/meta-config b/inc/meta-config index 6eb7a00f69..6252135747 100644 --- a/inc/meta-config +++ b/inc/meta-config @@ -40,12 +40,10 @@ function get_meta_section { $CONFIG_AWK_CMD -v matchgroup=$matchgroup -v configfile=$configfile ' BEGIN { group = "" } /^\[\[.+\|.*\]\]/ { - if (group == "") { - gsub("[][]", "", $1); - split($1, a, "|"); - if (a[1] == matchgroup && a[2] == configfile) { - group=a[1] - } + gsub("[][]", "", $1); + split($1, a, "|"); + if (a[1] == matchgroup && a[2] == configfile) { + group=a[1] } else { group="" } diff --git a/tests/test_meta_config.sh b/tests/test_meta_config.sh index 327fb56185..92f9c01f69 100755 --- a/tests/test_meta_config.sh +++ b/tests/test_meta_config.sh @@ -125,6 +125,14 @@ foo=bar [[test10|does-not-exist-dir/test.conf]] foo=bar +[[test11|test-same.conf]] +[DEFAULT] +foo=bar + +[[test11|test-same.conf]] +[some] +random=config + [[test-multi-sections|test-multi-sections.conf]] [sec-1] cfg_item1 = abcd @@ -147,6 +155,9 @@ cfg_item1 = abcd cfg_item2 = efgh cfg_item2 = \${FOO_BAR_BAZ} +[[test11|test-same.conf]] +[another] +non = sense EOF echo -n "get_meta_section_files: test0 doesn't exist: " @@ -385,8 +396,24 @@ EXPECT_VAL=255 check_result "$VAL" "$EXPECT_VAL" set -e +echo -n "merge_config_file test11 same section: " +rm -f test-same.conf +merge_config_group test.conf test11 +VAL=$(cat test-same.conf) +EXPECT_VAL=' +[DEFAULT] +foo = bar + +[some] +random = config + +[another] +non = sense' +check_result "$VAL" "$EXPECT_VAL" + + rm -f test.conf test1c.conf test2a.conf \ test-space.conf test-equals.conf test-strip.conf \ test-colon.conf test-env.conf test-multiline.conf \ - test-multi-sections.conf + test-multi-sections.conf test-same.conf rm -rf test-etc From d9ec42028df9ad12aa7760e21c664d6927790d9e Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Thu, 21 Jul 2016 16:14:52 +0900 Subject: [PATCH 0245/1936] lib/neutron: Add a utility method to add service plugin Partial-Bug: #1604664 Change-Id: I6d49ac188f7f1cfc8da314a26c9c5fc4b6d65bf4 --- lib/neutron | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/lib/neutron b/lib/neutron index 1bb14f21b2..d30e18562f 100644 --- a/lib/neutron +++ b/lib/neutron @@ -200,7 +200,7 @@ function configure_neutron_new { if is_service_enabled neutron-l3; then cp $NEUTRON_DIR/etc/l3_agent.ini.sample $NEUTRON_L3_CONF iniset $NEUTRON_L3_CONF DEFAULT interface_driver $NEUTRON_AGENT - iniset $NEUTRON_CONF DEFAULT service_plugins router + neutron_service_plugin_class_add router iniset $NEUTRON_L3_CONF agent root_helper_daemon "$NEUTRON_ROOTWRAP_DAEMON_CMD" iniset $NEUTRON_L3_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL neutron_plugin_configure_l3_agent $NEUTRON_L3_CONF @@ -249,14 +249,8 @@ function configure_neutron_new { source $TOP_DIR/lib/neutron_plugins/services/metering neutron_agent_metering_configure_common neutron_agent_metering_configure_agent - # TODO(sc68cal) hack because we don't pass around - # $Q_SERVICE_PLUGIN_CLASSES like -legacy does - local plugins="" - plugins=$(iniget $NEUTRON_CONF DEFAULT service_plugins) - plugins+=",metering" - iniset $NEUTRON_CONF DEFAULT service_plugins $plugins + neutron_service_plugin_class_add metering fi - } # configure_neutron_rootwrap() - configure Neutron's rootwrap @@ -496,6 +490,16 @@ function _set_config_files { } +# neutron_service_plugin_class_add() - add service plugin class +function neutron_service_plugin_class_add_new { + local service_plugin_class=$1 + local plugins="" + + plugins=$(iniget $NEUTRON_CONF DEFAULT service_plugins) + plugins+=",${service_plugin_class}" + iniset $NEUTRON_CONF DEFAULT service_plugins $plugins +} + # Dispatch functions # These are needed for compatibility between the old and new implementations # where there are function name overlaps. These will be removed when @@ -555,6 +559,15 @@ function install_neutron { fi } +function neutron_service_plugin_class_add { + if is_neutron_legacy_enabled; then + # Call back to old function + _neutron_service_plugin_class_add "$@" + else + neutron_service_plugin_class_add_new "$@" + fi +} + function start_neutron { if is_neutron_legacy_enabled; then # Call back to old function From 89a855f784644f2c4d88331a0d29f85e5fcd741c Mon Sep 17 00:00:00 2001 From: Sharat Sharma Date: Mon, 3 Oct 2016 12:10:23 +0530 Subject: [PATCH 0246/1936] Changed the order of steps in the devstack install document The order of the steps were a bit confusing for the first timers in the devstack document. So, changed the order of installation steps to make it clear. Change-Id: Ifaa051887dab95719b9ca5d1b2fbe2f5f549d269 Closes-Bug: #1627939 --- doc/source/index.rst | 43 ++++++++++++++++++++++++++----------------- 1 file changed, 26 insertions(+), 17 deletions(-) diff --git a/doc/source/index.rst b/doc/source/index.rst index 435011bb96..b8dd506aab 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -45,12 +45,34 @@ as well as Debian and OpenSUSE. If you do not have a preference, Ubuntu 16.04 is the most tested, and will probably go the smoothest. +Add Stack User +-------------- + +Devstack should be run as a non-root user with sudo enabled +(standard logins to cloud images such as "ubuntu" or "cloud-user" +are usually fine). + +You can quickly create a separate `stack` user to run DevStack with + +:: + + $ adduser stack + +Since this user will be making many changes to your system, it should +have sudo privileges: + +:: + + $ echo "stack ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers + $ su stack + Download DevStack ----------------- :: - git clone https://git.openstack.org/openstack-dev/devstack + $ git clone https://git.openstack.org/openstack-dev/devstack + $ cd devstack The ``devstack`` repo contains a script that installs OpenStack and templates for configuration files @@ -58,8 +80,8 @@ templates for configuration files Create a local.conf ------------------- -Create a ``local.conf`` file with 4 passwords preset - +Create a ``local.conf`` file with 4 passwords preset at the root of the +devstack git repo. :: [[local|localrc]] @@ -70,25 +92,12 @@ Create a ``local.conf`` file with 4 passwords preset This is the minimum required config to get started with DevStack. -Add Stack User --------------- - -Devstack should be run as a non-root user with sudo enabled -(standard logins to cloud images such as "ubuntu" or "cloud-user" -are usually fine). - -You can quickly create a separate `stack` user to run DevStack with - -:: - - devstack/tools/create-stack-user.sh; su stack - Start the install ----------------- :: - cd devstack; ./stack.sh + ./stack.sh This will take a 15 - 20 minutes, largely depending on the speed of your internet connection. Many git trees and packages will be From 2a5981402ff26973c77ccf534164e4318dd8897a Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Tue, 29 Nov 2016 03:48:34 -0800 Subject: [PATCH 0247/1936] Provide configuration file to ovs-cleanup utility There may be cases when the configuration of the OVS is different from the default one. This enables one to make use of the neutron configuration file to contain all of the OVS settings. Change-Id: I728cf8cdc653667c076b07b39c13c1278281c01b Closes-bug: #1645691 --- lib/neutron_plugins/ovs_base | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index baf7d7f34b..10043252d6 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -30,7 +30,7 @@ function _neutron_ovs_base_add_bridge { function _neutron_ovs_base_setup_bridge { local bridge=$1 - neutron-ovs-cleanup + neutron-ovs-cleanup --config-file $NEUTRON_CONF _neutron_ovs_base_add_bridge $bridge sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge } @@ -97,7 +97,7 @@ function _neutron_ovs_base_configure_l3_agent { iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE fi - neutron-ovs-cleanup + neutron-ovs-cleanup --config-file $NEUTRON_CONF if [[ "$Q_USE_PUBLIC_VETH" = "True" ]]; then ip link show $Q_PUBLIC_VETH_INT > /dev/null 2>&1 || sudo ip link add $Q_PUBLIC_VETH_INT type veth \ From cfb9f057ea5896687d95cdcc5aa5216ef32b87f8 Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Tue, 29 Nov 2016 10:43:05 -0800 Subject: [PATCH 0248/1936] Tune apache connections for tls proxy We are seeing connection errors to the proxy occasionally. These errors do not result in a logged http request or error to the backends, resulting in a theory that the proxy itself may just not be able to handle the number of connections. More than double the total number of connections that will be accepted by the proxy in an attempt to fix this. Change-Id: Iefa6c43451dd1f95927528d2ce0003c84248847f Related-bug: 1630664 --- lib/apache | 5 +++++ lib/tls | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+) diff --git a/lib/apache b/lib/apache index 8a38cc45e5..2dc626f130 100644 --- a/lib/apache +++ b/lib/apache @@ -29,15 +29,20 @@ APACHE_GROUP=${APACHE_GROUP:-$(id -gn $APACHE_USER)} # Set up apache name and configuration directory +# Note that APACHE_CONF_DIR is really more accurately apache's vhost +# configuration dir but we can't just change this because public interfaces. if is_ubuntu; then APACHE_NAME=apache2 APACHE_CONF_DIR=${APACHE_CONF_DIR:-/etc/$APACHE_NAME/sites-available} + APACHE_SETTINGS_DIR=${APACHE_SETTINGS_DIR:-/etc/$APACHE_NAME/conf-enabled} elif is_fedora; then APACHE_NAME=httpd APACHE_CONF_DIR=${APACHE_CONF_DIR:-/etc/$APACHE_NAME/conf.d} + APACHE_SETTINGS_DIR=${APACHE_SETTINGS_DIR:-/etc/$APACHE_NAME/conf.d} elif is_suse; then APACHE_NAME=apache2 APACHE_CONF_DIR=${APACHE_CONF_DIR:-/etc/$APACHE_NAME/vhosts.d} + APACHE_SETTINGS_DIR=${APACHE_SETTINGS_DIR:-/etc/$APACHE_NAME/conf.d} fi APACHE_LOG_DIR="/var/log/${APACHE_NAME}" diff --git a/lib/tls b/lib/tls index 14cdf19d8e..57b5e525ac 100644 --- a/lib/tls +++ b/lib/tls @@ -442,6 +442,52 @@ function enable_mod_ssl { # Proxy Functions # =============== +function tune_apache_connections { + local tuning_file=$APACHE_SETTINGS_DIR/connection-tuning.conf + if ! [ -f $tuning_file ] ; then + sudo bash -c "cat > $tuning_file" << EOF +# worker MPM +# StartServers: initial number of server processes to start +# MinSpareThreads: minimum number of worker threads which are kept spare +# MaxSpareThreads: maximum number of worker threads which are kept spare +# ThreadLimit: ThreadsPerChild can be changed to this maximum value during a +# graceful restart. ThreadLimit can only be changed by stopping +# and starting Apache. +# ThreadsPerChild: constant number of worker threads in each server process +# MaxClients: maximum number of simultaneous client connections +# MaxRequestsPerChild: maximum number of requests a server process serves +# +# The apache defaults are too conservative if we want reliable tempest +# testing. Bump these values up from ~400 max clients to 1024 max clients. + +# Note that the next three conf values must be changed together. +# MaxClients = ServerLimit * ThreadsPerChild +ServerLimit 32 +ThreadsPerChild 32 +MaxClients 1024 +StartServers 3 +MinSpareThreads 96 +MaxSpareThreads 192 +ThreadLimit 64 +MaxRequestsPerChild 0 + + +# Note that the next three conf values must be changed together. +# MaxClients = ServerLimit * ThreadsPerChild +ServerLimit 32 +ThreadsPerChild 32 +MaxClients 1024 +StartServers 3 +MinSpareThreads 96 +MaxSpareThreads 192 +ThreadLimit 64 +MaxRequestsPerChild 0 + +EOF + restart_apache_server + fi +} + # Starts the TLS proxy for the given IP/ports # start_tls_proxy front-host front-port back-host back-port function start_tls_proxy { @@ -451,6 +497,8 @@ function start_tls_proxy { local b_host=$4 local b_port=$5 + tune_apache_connections + local config_file config_file=$(apache_site_config_for $b_service) local listen_string From 6653d74fc4cbff1b06888fe53a0712117e2394dc Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Tue, 29 Nov 2016 23:30:18 +0000 Subject: [PATCH 0249/1936] Revert "Fix devstack with linuxbridge without l3 agent" I am seeing red all over the linuxbridge gate. This reverts commit 7f95baa5706980a0910741693abee73abea98b89. Change-Id: I179f761f991db4b63c0e3445a9a13e43ffe68992 --- lib/neutron_plugins/linuxbridge_agent | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent index 194981ee40..d0de2f5e5d 100644 --- a/lib/neutron_plugins/linuxbridge_agent +++ b/lib/neutron_plugins/linuxbridge_agent @@ -62,9 +62,7 @@ function neutron_plugin_configure_plugin_agent { LB_INTERFACE_MAPPINGS=$PHYSICAL_NETWORK:$LB_PHYSICAL_INTERFACE fi if [[ "$PUBLIC_BRIDGE" != "" ]] && [[ "$PUBLIC_PHYSICAL_NETWORK" != "" ]]; then - if is_service_enabled neutron-l3; then - iniset /$Q_PLUGIN_CONF_FILE linux_bridge bridge_mappings "$PUBLIC_PHYSICAL_NETWORK:$PUBLIC_BRIDGE" - fi + iniset /$Q_PLUGIN_CONF_FILE linux_bridge bridge_mappings "$PUBLIC_PHYSICAL_NETWORK:$PUBLIC_BRIDGE" fi if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE linux_bridge physical_interface_mappings $LB_INTERFACE_MAPPINGS From 2864150940bfb003e86bf103cb18b32bdb4a080b Mon Sep 17 00:00:00 2001 From: Huan Xie Date: Tue, 20 Sep 2016 06:49:50 +0000 Subject: [PATCH 0250/1936] Make neutron ml2 use ovs native interface Neutron has changed to use ovs native interface by default, but when the hypervisor is XenServer, we cannot use ovs native interface without extra configurations in neutron-openvswitch-agent(q-agt) in compute node. This patch is to add the needed configurations automatically during deployment, so user needn't to do it manually and restart q-agt. Change-Id: Ibc69d3cdb4d75833f2ac16840c62bcacf460dd4f --- lib/neutron_plugins/openvswitch_agent | 5 +++++ lib/nova_plugins/hypervisor-xenserver | 9 +++++++++ 2 files changed, 14 insertions(+) diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent index e27b8a634a..8860d7b0f2 100644 --- a/lib/neutron_plugins/openvswitch_agent +++ b/lib/neutron_plugins/openvswitch_agent @@ -95,6 +95,11 @@ function neutron_plugin_configure_plugin_agent { # XEN_INTEGRATION_BRIDGE is the integration bridge in dom0 iniset /$Q_PLUGIN_CONF_FILE ovs integration_bridge $XEN_INTEGRATION_BRIDGE + # Set OVS native interface for ovs-agent in compute node + XEN_DOM0_IP=$(echo "$XENAPI_CONNECTION_URL" | cut -d "/" -f 3) + iniset /$Q_PLUGIN_CONF_FILE ovs ovsdb_connection tcp:$XEN_DOM0_IP:6640 + iniset /$Q_PLUGIN_CONF_FILE ovs of_listen_address $HOST_IP + # Set up domU's L2 agent: # Create a bridge "br-$VLAN_INTERFACE" diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver index a63e72e764..b053856348 100644 --- a/lib/nova_plugins/hypervisor-xenserver +++ b/lib/nova_plugins/hypervisor-xenserver @@ -93,6 +93,15 @@ CRONTAB if is_service_enabled neutron; then # Remove restriction on linux bridge in Dom0 when neutron is enabled $ssh_dom0 "rm -f /etc/modprobe.d/blacklist-bridge*" + + count=`$ssh_dom0 "iptables -t filter -L XenServerDevstack |wc -l"` + if [ "$count" = "0" ]; then + { + echo "iptables -t filter --new XenServerDevstack" + echo "iptables -t filter -I INPUT -j XenServerDevstack" + echo "iptables -t filter -I XenServerDevstack -p tcp --dport 6640 -j ACCEPT" + } | $ssh_dom0 + fi fi } From 50bf4fc0758df3dfb0bf9fa6e5f56ecebd1200e4 Mon Sep 17 00:00:00 2001 From: "Sean M. Collins" Date: Wed, 30 Nov 2016 14:27:36 -0500 Subject: [PATCH 0251/1936] lib/neutron-legacy - Use stevedore alias for ML2 plugin Similar to 30ab23cd9b103470a7d89c4c88bccba789884c36, fix the plugin name to avoid warnings like: WARNING stevedore.named [-] Could not load neutron.plugins.ml2.plugin.Ml2Plugin Change-Id: Ibb45f1305816b255ba2419ba662d9e29eff68f58 --- lib/neutron_plugins/ml2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index e429714f06..eed0fc2da4 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -63,7 +63,7 @@ function populate_ml2_config { function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ml2 Q_PLUGIN_CONF_FILENAME=ml2_conf.ini - Q_PLUGIN_CLASS="neutron.plugins.ml2.plugin.Ml2Plugin" + Q_PLUGIN_CLASS="ml2" # The ML2 plugin delegates L3 routing/NAT functionality to # the L3 service plugin which must therefore be specified. _neutron_service_plugin_class_add $ML2_L3_PLUGIN From df875c5f16980228e2443c50298f35063f2b54ae Mon Sep 17 00:00:00 2001 From: Huan Xie Date: Wed, 30 Nov 2016 19:47:24 -0800 Subject: [PATCH 0252/1936] Reconfig ml2 ovs configuration for neutron on XenServer XenServer already support OVS native mode and I have a patch for configuring it https://review.openstack.org/#/c/372952/ which is fine. But we have another patch which revert the usage of ml2_confi.ini and ml2_conf.ini.domU https://review.openstack.org/#/c/396573/. Both patches work well separately. But the two should have some dependent relationship. Once one merged, the other should change accordingly. Sorry that we missed the dependency. This patch is to fix the ovs config based on reverted ml2_conf.ini and ml2_conf.ini.domU to make sure we configure the correct IP for ovs agent Change-Id: Ib53e37e210cc849f161dd6630f81e5b2331a91d5 --- lib/neutron_plugins/openvswitch_agent | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent index f009966a01..76a1a4f3a1 100644 --- a/lib/neutron_plugins/openvswitch_agent +++ b/lib/neutron_plugins/openvswitch_agent @@ -97,8 +97,8 @@ function neutron_plugin_configure_plugin_agent { # Set OVS native interface for ovs-agent in compute node XEN_DOM0_IP=$(echo "$XENAPI_CONNECTION_URL" | cut -d "/" -f 3) - iniset /$Q_PLUGIN_CONF_FILE ovs ovsdb_connection tcp:$XEN_DOM0_IP:6640 - iniset /$Q_PLUGIN_CONF_FILE ovs of_listen_address $HOST_IP + iniset /$Q_PLUGIN_CONF_FILE.domU ovs ovsdb_connection tcp:$XEN_DOM0_IP:6640 + iniset /$Q_PLUGIN_CONF_FILE.domU ovs of_listen_address $HOST_IP # Set up domU's L2 agent: From 1d83a08a385ad079c546d8a0ac9558dc72eaa9fd Mon Sep 17 00:00:00 2001 From: "Sean M. Collins" Date: Wed, 30 Nov 2016 14:22:24 -0500 Subject: [PATCH 0253/1936] lib/neutron: Remove type_driver configuration Instead, rely on what Neutron ships by default. Change-Id: I8bdc646d24b3edf10f421e4472a790caeb8e4887 --- lib/neutron | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index d30e18562f..4915dcce31 100644 --- a/lib/neutron +++ b/lib/neutron @@ -159,7 +159,6 @@ function configure_neutron_new { # Configure VXLAN # TODO(sc68cal) not hardcode? iniset $NEUTRON_CORE_PLUGIN_CONF ml2 tenant_network_types vxlan - iniset $NEUTRON_CORE_PLUGIN_CONF ml2 type_drivers vxlan iniset $NEUTRON_CORE_PLUGIN_CONF ml2 mechanism_drivers openvswitch,linuxbridge iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vxlan vni_ranges 1001:2000 if [[ "$NEUTRON_PORT_SECURITY" = "True" ]]; then From 50ce0a8032bf315df20c594df1dd3d0822168b4d Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Thu, 1 Dec 2016 08:45:43 +0000 Subject: [PATCH 0254/1936] Revert "Set cinder coordination backend url" gate-tempest-dsvm-layer4 job is failing due to this change as no module found on zake[1]. Heat not able to create cinder stack. This is blocking tempest now so we will revert revert this once find the real cause. This reverts commit a283526c88d98f7668d369f4c20db44d3d06425a. ..1 http://logs.openstack.org/64/405064/2/check/gate-tempest-dsvm-layer4/feb21fe/logs/screen-c-vol.txt.gz#_2016-12-01_05_43_40_369 Change-Id: I50981db7760688a25a2bf4de9a41153ede76f4aa --- lib/cinder | 7 ------- 1 file changed, 7 deletions(-) diff --git a/lib/cinder b/lib/cinder index 4ed944cfcb..0fe950b6c2 100644 --- a/lib/cinder +++ b/lib/cinder @@ -358,13 +358,6 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT os_privileged_user_password "$SERVICE_PASSWORD" iniset $CINDER_CONF DEFAULT os_privileged_user_tenant "$SERVICE_PROJECT_NAME" iniset $CINDER_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" - - # Set the backend url according to the configured dlm backend - if is_dlm_enabled; then - if [[ "$(dlm_backend)" == "zookeeper" ]]; then - iniset $CINDER_CONF coordination backend_url "zake://" - fi - fi } # create_cinder_accounts() - Set up common required cinder accounts From c74315e055a770ef3508276442816fffde07cfc6 Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Thu, 21 Jul 2016 17:49:43 +0900 Subject: [PATCH 0255/1936] Load neutron-legacy only if enabled To avoid using legacy functions accidentially. Depends-On: Ida1f83b6b3ef9b76be13c063c7e35a8703214078 Change-Id: I3ff136fc8330c92007cdfe91b77d7f9865eabd8d --- lib/neutron | 13 +++++++++++++ lib/neutron-legacy | 2 +- stack.sh | 1 - unstack.sh | 1 - 4 files changed, 14 insertions(+), 3 deletions(-) diff --git a/lib/neutron b/lib/neutron index d30e18562f..852787db36 100644 --- a/lib/neutron +++ b/lib/neutron @@ -90,6 +90,10 @@ function is_neutron_legacy_enabled { return 1 } +if is_neutron_legacy_enabled; then + source $TOP_DIR/lib/neutron-legacy +fi + # cleanup_neutron() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_neutron_new { @@ -568,6 +572,15 @@ function neutron_service_plugin_class_add { fi } +function install_neutron_agent_packages { + if is_neutron_legacy_enabled; then + # Call back to old function + install_neutron_agent_packages_mutnauq "$@" + else + : + fi +} + function start_neutron { if is_neutron_legacy_enabled; then # Call back to old function diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 0b0caf18b5..37d278344e 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -423,7 +423,7 @@ function install_mutnauq { } # install_neutron_agent_packages() - Collect source and prepare -function install_neutron_agent_packages { +function install_neutron_agent_packages_mutnauq { # radvd doesn't come with the OS. Install it if the l3 service is enabled. if is_service_enabled q-l3; then install_package radvd diff --git a/stack.sh b/stack.sh index 74edb10afc..17cc248a59 100755 --- a/stack.sh +++ b/stack.sh @@ -574,7 +574,6 @@ source $TOP_DIR/lib/cinder source $TOP_DIR/lib/swift source $TOP_DIR/lib/heat source $TOP_DIR/lib/neutron -source $TOP_DIR/lib/neutron-legacy source $TOP_DIR/lib/ldap source $TOP_DIR/lib/dstat source $TOP_DIR/lib/dlm diff --git a/unstack.sh b/unstack.sh index c05d1f0952..fe2bb8bfea 100755 --- a/unstack.sh +++ b/unstack.sh @@ -68,7 +68,6 @@ source $TOP_DIR/lib/cinder source $TOP_DIR/lib/swift source $TOP_DIR/lib/heat source $TOP_DIR/lib/neutron -source $TOP_DIR/lib/neutron-legacy source $TOP_DIR/lib/ldap source $TOP_DIR/lib/dstat source $TOP_DIR/lib/dlm From 8b1bbd690c3aea36dea804542c1bc56274b2ad31 Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Thu, 1 Dec 2016 22:29:12 +0900 Subject: [PATCH 0256/1936] Make deprecated() output to stderr So that it can be used by functions like _determine_config_server, which is used like RESULT=$(_determine_config_server). Change-Id: Ia4e641c5529b95ada30ae662221f370bc7fa88a7 --- functions-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions-common b/functions-common index cc1d42b0d6..f07b382ce6 100644 --- a/functions-common +++ b/functions-common @@ -216,7 +216,7 @@ function die_if_not_set { function deprecated { local text=$1 DEPRECATED_TEXT+="\n$text" - echo "WARNING: $text" + echo "WARNING: $text" >&2 } # Prints line number and "message" in error format From eede9ddb1d0f0970672f02bcdfb33923603e65e7 Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Fri, 15 Jul 2016 10:27:53 +0900 Subject: [PATCH 0257/1936] Deprecate Q_PLUGIN_EXTRA_CONF_PATH This single global variable is no longer useful as we have multiple repositories and devstack plugins nowadays. Also, add a utility function, neutron_server_config_add, for devstack plugins to add an extra config file. Related-Bug: #1599936 Change-Id: I90112823ef96ae2fba97d7b09b00bec8cb816d8d --- lib/neutron | 26 ++++++++++++++++++++- lib/neutron-legacy | 56 +++++++++++++++++++++++++++++----------------- 2 files changed, 60 insertions(+), 22 deletions(-) diff --git a/lib/neutron b/lib/neutron index 852787db36..590e03818d 100644 --- a/lib/neutron +++ b/lib/neutron @@ -73,6 +73,9 @@ NEUTRON_ROOTWRAP_DAEMON_CMD="sudo $NEUTRON_ROOTWRAP-daemon $NEUTRON_ROOTWRAP_CON # Add all enabled config files to a single config arg NEUTRON_CONFIG_ARG=${NEUTRON_CONFIG_ARG:-""} +# Additional neutron api config files +declare -a _NEUTRON_SERVER_EXTRA_CONF_FILES_ABS + # Functions # --------- @@ -393,9 +396,17 @@ function start_neutron_api { service_protocol="http" fi + local opts = "" + opts+="--config-file $NEUTRON_CONF" + opts+="--config-file $NEUTRON_CORE_PLUGIN_CONF" + local cfg_file + for cfg_file in ${_NEUTRON_SERVER_EXTRA_CONF_FILES_ABS[@]}; do + opts+=" --config-file $cfg_file" + done + # Start the Neutron service # TODO(sc68cal) Stop hard coding this - run_process neutron-api "$NEUTRON_BIN_DIR/neutron-server --config-file $NEUTRON_CONF --config-file $NEUTRON_CORE_PLUGIN_CONF" + run_process neutron-api "$NEUTRON_BIN_DIR/neutron-server $ops" if is_ssl_enabled_service "neutron"; then ssl_ca="--ca-certificate=${SSL_BUNDLE_FILE}" @@ -504,6 +515,10 @@ function neutron_service_plugin_class_add_new { iniset $NEUTRON_CONF DEFAULT service_plugins $plugins } +function neutron_server_config_add_new { + _NEUTRON_SERVER_EXTRA_CONF_FILES_ABS+=($1) +} + # Dispatch functions # These are needed for compatibility between the old and new implementations # where there are function name overlaps. These will be removed when @@ -581,6 +596,15 @@ function install_neutron_agent_packages { fi } +function neutron_server_config_add { + if is_neutron_legacy_enabled; then + # Call back to old function + mutnauq_server_config_add "$@" + else + neutron_server_config_add_new "$@" + fi +} + function start_neutron { if is_neutron_legacy_enabled; then # Call back to old function diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 37d278344e..29c187e1e1 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -128,10 +128,24 @@ Q_NOTIFY_NOVA_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_DATA_CHANGES:-True} VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True} VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300} +# The directory which contains files for Q_PLUGIN_EXTRA_CONF_FILES. +# /etc/neutron is assumed by many of devstack plugins. Do not change. +_Q_PLUGIN_EXTRA_CONF_PATH=/etc/neutron + # List of config file names in addition to the main plugin config file -# See _configure_neutron_common() for details about setting it up +# To add additional plugin config files, use ``neutron_server_config_add`` +# utility function. For example: +# +# ``neutron_server_config_add file1`` +# +# These config files are relative to ``/etc/neutron``. The above +# example would specify ``--config-file /etc/neutron/file1`` for +# neutron server. declare -a Q_PLUGIN_EXTRA_CONF_FILES +# same as Q_PLUGIN_EXTRA_CONF_FILES, but with absolute path. +declare -a _Q_PLUGIN_EXTRA_CONF_FILES_ABS + Q_RR_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then @@ -270,9 +284,23 @@ set +o xtrace # --------- function _determine_config_server { + if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" != '' ]]; then + if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" = "$_Q_PLUGIN_EXTRA_CONF_PATH" ]]; then + deprecated "Q_PLUGIN_EXTRA_CONF_PATH is deprecated" + else + die $LINENO "Q_PLUGIN_EXTRA_CONF_PATH is deprecated" + fi + fi + if [[ ${#Q_PLUGIN_EXTRA_CONF_FILES[@]} > 0 ]]; then + deprecated "Q_PLUGIN_EXTRA_CONF_FILES is deprecated. Use neutron_server_config_add instead." + fi + for cfg_file in ${Q_PLUGIN_EXTRA_CONF_FILES[@]}; do + _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($_Q_PLUGIN_EXTRA_CONF_PATH/$cfg_file) + done + local cfg_file local opts="--config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" - for cfg_file in ${Q_PLUGIN_EXTRA_CONF_FILES[@]}; do + for cfg_file in ${_Q_PLUGIN_EXTRA_CONF_FILES_ABS[@]}; do opts+=" --config-file $cfg_file" done echo "$opts" @@ -668,11 +696,6 @@ function _configure_neutron_common { # Set plugin-specific variables ``Q_DB_NAME``, ``Q_PLUGIN_CLASS``. # For main plugin config file, set ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``. - # For additional plugin config files, set ``Q_PLUGIN_EXTRA_CONF_PATH`` and - # ``Q_PLUGIN_EXTRA_CONF_FILES``. For example: - # - # ``Q_PLUGIN_EXTRA_CONF_PATH=/path/to/plugins`` - # ``Q_PLUGIN_EXTRA_CONF_FILES=(file1 file2)`` neutron_plugin_configure_common if [[ "$Q_PLUGIN_CONF_PATH" == '' || "$Q_PLUGIN_CONF_FILENAME" == '' || "$Q_PLUGIN_CLASS" == '' ]]; then @@ -699,20 +722,6 @@ function _configure_neutron_common { # NOTE(freerunner): Need to adjust Region Name for nova in multiregion installation iniset $NEUTRON_CONF nova region_name $REGION_NAME - # If addition config files are set, make sure their path name is set as well - if [[ ${#Q_PLUGIN_EXTRA_CONF_FILES[@]} > 0 && $Q_PLUGIN_EXTRA_CONF_PATH == '' ]]; then - die $LINENO "Neutron additional plugin config not set.. exiting" - fi - - # If additional config files exist, copy them over to neutron configuration - # directory - if [[ $Q_PLUGIN_EXTRA_CONF_PATH != '' ]]; then - local f - for (( f=0; $f < ${#Q_PLUGIN_EXTRA_CONF_FILES[@]}; f+=1 )); do - Q_PLUGIN_EXTRA_CONF_FILES[$f]=$Q_PLUGIN_EXTRA_CONF_PATH/${Q_PLUGIN_EXTRA_CONF_FILES[$f]} - done - fi - if [ "$VIRT_DRIVER" = 'fake' ]; then # Disable arbitrary limits iniset $NEUTRON_CONF quotas quota_network -1 @@ -863,6 +872,11 @@ function _neutron_service_plugin_class_add { fi } +# mutnauq_server_config_add() - add server config file +function mutnauq_server_config_add { + _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($1) +} + # _neutron_deploy_rootwrap_filters() - deploy rootwrap filters to $Q_CONF_ROOTWRAP_D (owned by root). function _neutron_deploy_rootwrap_filters { if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then From 8bf8c8f3810766d19b92f9cb21231095beca30c6 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 1 Dec 2016 10:24:06 -0500 Subject: [PATCH 0258/1936] Add test-config local.conf processing We need a local.conf processing phase after every run phase which lets us override config options after that point. We didn't explicitly support this for test-config before, which broke some CI systems when we moved tempest to use this later phase. Closes-Bug: #1646391 Change-Id: I7d693afa19acf3e8231e84e45b7a868628ebdbc0 --- stack.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stack.sh b/stack.sh index 9aa770f2b9..4b8552c30a 100755 --- a/stack.sh +++ b/stack.sh @@ -1407,6 +1407,9 @@ fi # Phase: test-config run_phase stack test-config +# Apply late configuration from ``local.conf`` if it exists for layer 2 services +# Phase: test-config +merge_config_group $TOP_DIR/local.conf test-config # Fin # === From 1c442eebc8fe005af453bd610e750a1919a2b3ed Mon Sep 17 00:00:00 2001 From: Andrea Frittoli Date: Wed, 30 Nov 2016 20:44:44 +0000 Subject: [PATCH 0259/1936] Fix libguestfs on Ubuntu libguestfs does not work on ubuntu because the kernel is not world readable. This breaks file injection with libvirt. See https://bugs.launchpad.net/ubuntu/+source/linux/+bug/759725 for more details. The workaround proposed by Ubuntu is to relax the kernel ACL if needed, so we need to do that in case file injection is enabled on an Ubuntu host running libvirt. Partial-bug: #1646002 Change-Id: I405793b9e145308e51a08710d8e5df720aec6fde --- lib/nova_plugins/hypervisor-libvirt | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index 167ab6f2e7..f3c8add696 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -105,6 +105,16 @@ function install_nova_hypervisor { if [[ "$ENABLE_FILE_INJECTION" == "True" ]] ; then if is_ubuntu; then install_package python-guestfs + # NOTE(andreaf) Ubuntu kernel can only be read by root, which breaks libguestfs: + # https://bugs.launchpad.net/ubuntu/+source/linux/+bug/759725) + INSTALLED_KERNELS="$(ls /boot/vmlinuz-*)" + for kernel in $INSTALLED_KERNELS; do + STAT_OVERRIDE="root root 644 ${kernel}" + # unstack won't remove the statoverride, so make this idempotent + if [[ ! $(dpkg-statoverride --list | grep "$STAT_OVERRIDE") ]]; then + sudo dpkg-statoverride --add --update $STAT_OVERRIDE + fi + done elif is_fedora || is_suse; then install_package python-libguestfs fi From 1c08b191fd04e028a8d908c9ccb4fdc536248c96 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Tue, 29 Nov 2016 15:58:53 -0800 Subject: [PATCH 0260/1936] Fix devstack with linuxbridge without l3 agent The linuxbridge agent for Neutron expects that the public bridge will already be created by the time it starts. On devstack, this only occurs as part of the l3 agent configuration. If a compute node doesn't have an l3 agent and is using a linuxbridge agent, then br-ex won't be created and the process will not be able to start (causing stack.sh to fail). This causes the gate-grenade-dsvm-neutron-linuxbridge-multinode-nv gate to fail. To avoid the issue, skip the bridge mappings setup unless L3 is configured. This is done in a backward compatible fashion: if localrc uses the old q-l3 tags, the is_service_enabled neutron-l3 would not be able to succeed. Closes-Bug: #1643562 Change-Id: I292ff0dc080fb84b5f879ba2f00f03eff295b55b --- lib/neutron_plugins/linuxbridge_agent | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent index d0de2f5e5d..0c8ccb8718 100644 --- a/lib/neutron_plugins/linuxbridge_agent +++ b/lib/neutron_plugins/linuxbridge_agent @@ -62,7 +62,9 @@ function neutron_plugin_configure_plugin_agent { LB_INTERFACE_MAPPINGS=$PHYSICAL_NETWORK:$LB_PHYSICAL_INTERFACE fi if [[ "$PUBLIC_BRIDGE" != "" ]] && [[ "$PUBLIC_PHYSICAL_NETWORK" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE linux_bridge bridge_mappings "$PUBLIC_PHYSICAL_NETWORK:$PUBLIC_BRIDGE" + if is_service_enabled q-l3 || is_service_enabled neutron-l3; then + iniset /$Q_PLUGIN_CONF_FILE linux_bridge bridge_mappings "$PUBLIC_PHYSICAL_NETWORK:$PUBLIC_BRIDGE" + fi fi if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE linux_bridge physical_interface_mappings $LB_INTERFACE_MAPPINGS From 6b8a115b95c7a637e01b8145aa961892ca3b9cd1 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Sat, 3 Dec 2016 06:56:32 +0000 Subject: [PATCH 0261/1936] Updated from generate-devstack-plugins-list Change-Id: I21c31b0778486971719f1281428c7a089f583656 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 6ece9978ac..89f74c3afc 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -76,6 +76,7 @@ kuryr-libnetwork `git://git.openstack.org/openstack/kuryr- magnum `git://git.openstack.org/openstack/magnum `__ magnum-ui `git://git.openstack.org/openstack/magnum-ui `__ manila `git://git.openstack.org/openstack/manila `__ +manila-ui `git://git.openstack.org/openstack/manila-ui `__ masakari `git://git.openstack.org/openstack/masakari `__ mistral `git://git.openstack.org/openstack/mistral `__ mixmatch `git://git.openstack.org/openstack/mixmatch `__ From 8a834940b893b0535887d8cf08504dab4b9b710e Mon Sep 17 00:00:00 2001 From: jeckxie Date: Mon, 5 Dec 2016 15:32:25 +0800 Subject: [PATCH 0262/1936] Missing parameter in comment Change-Id: Iba55013b3be00974321ad58a276bb37ff2cac4da --- lib/lvm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/lvm b/lib/lvm index 99c7ba9b9f..0cebd92f77 100644 --- a/lib/lvm +++ b/lib/lvm @@ -101,7 +101,7 @@ function _create_lvm_volume_group { # init_lvm_volume_group() initializes the volume group creating the backing # file if necessary # -# Usage: init_lvm_volume_group() $vg +# Usage: init_lvm_volume_group() $vg $size function init_lvm_volume_group { local vg=$1 local size=$2 From ae61e6f3d25df98f4ebeca9695a3c73239ce82b2 Mon Sep 17 00:00:00 2001 From: Anton Merzlyakov Date: Wed, 30 Nov 2016 14:29:18 +0300 Subject: [PATCH 0263/1936] Postgres: fix detection of existing roles Role "root" it is hardcode. In general case role name comes from local.conf: string "DATABASE_USER=" Change-Id: Iedfca48e04d23c313851f48d68ac40ba29340805 --- lib/databases/postgresql | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/databases/postgresql b/lib/databases/postgresql index 14425a53b7..1f347f5548 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -47,7 +47,7 @@ function recreate_database_postgresql { } function configure_database_postgresql { - local pg_conf pg_dir pg_hba root_roles version + local pg_conf pg_dir pg_hba check_role version echo_summary "Configuring and starting PostgreSQL" if is_fedora; then pg_hba=/var/lib/pgsql/data/pg_hba.conf @@ -85,8 +85,8 @@ function configure_database_postgresql { restart_service postgresql # Create the role if it's not here or else alter it. - root_roles=$(sudo -u root sudo -u postgres -i psql -t -c "SELECT 'HERE' from pg_roles where rolname='root'") - if [[ ${root_roles} == *HERE ]];then + check_role=$(sudo -u root sudo -u postgres -i psql -t -c "SELECT 'HERE' from pg_roles where rolname='$DATABASE_USER'") + if [[ ${check_role} == *HERE ]];then sudo -u root sudo -u postgres -i psql -c "ALTER ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" else sudo -u root sudo -u postgres -i psql -c "CREATE ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" From 6d66e647ca35910cbca4d940b5203d7307efa1db Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 5 Dec 2016 06:28:26 -0500 Subject: [PATCH 0264/1936] don't setup cells if n-cpu isn't also running create_cell requires n-api and at least one n-cpu up and running. If we have a configuration where it is not guarunteed that there is an n-cpu at the end of a devstack run we have to skip this step and make the user run it manually later. Change-Id: I2287ab29f3c1a7252271dcce81673ef365615296 --- stack.sh | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 9c63f5feac..05a7666ce5 100755 --- a/stack.sh +++ b/stack.sh @@ -1386,7 +1386,15 @@ check_libs_from_git # Do this late because it requires compute hosts to have started if is_service_enabled n-api; then - create_cell + if is_service_enabled n-cpu; then + create_cell + else + # Some CI systems like Hyper-V build the control plane on + # Linux, and join in non Linux Computes after setup. This + # allows them to delay the processing until after their whole + # environment is up. + echo_summary "SKIPPING Cell setup because n-cpu is not enabled. You will have to do this manually before you have a working environment." + fi fi # Bash completion From 8e0fc9dc8e89994cedfa152e4ff5d2895d19c8a8 Mon Sep 17 00:00:00 2001 From: Vasyl Saienko Date: Tue, 6 Dec 2016 09:35:02 +0200 Subject: [PATCH 0265/1936] Fix doc formating Fix formating issue in the doc/source/plugin.rst Trivial fix. Change-Id: Ifc9a43aebdab0c15c83485cf0b6d4133d7194a9d --- doc/source/plugins.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst index 31987bc62b..5b3c6cf714 100644 --- a/doc/source/plugins.rst +++ b/doc/source/plugins.rst @@ -99,7 +99,7 @@ The current full list of ``mode`` and ``phase`` are: should exist at this point. - **extra** - Called near the end after layer 1 and 2 services have been started. - - **test-config** - Called at the end of devstack used to configure tempest + - **test-config** - Called at the end of devstack used to configure tempest or any other test environments - **unstack** - Called by ``unstack.sh`` before other services are shut From 9abb26d69666e836d310ad769d65cb811e2708e7 Mon Sep 17 00:00:00 2001 From: Jens Rosenboom Date: Wed, 7 Dec 2016 21:12:55 +0100 Subject: [PATCH 0266/1936] Try to set initial mysql root password also on Ubuntu On Ubuntu nodes, devstack tries to predefine the initial mysql root password by doing some debconf-set-selections, but these will not take effect if the corresponding package has been installed earlier. So just try to set it every time, like we do on other distros. Change-Id: I2c167051fc5e53dd0ccf82a60ab085cd9cdea28d --- lib/databases/mysql | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index f6cc9224af..89ae082c81 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -82,10 +82,9 @@ function configure_database_mysql { fi # Set the root password - only works the first time. For Ubuntu, we already - # did that with debconf before installing the package. - if ! is_ubuntu; then - sudo mysqladmin -u root password $DATABASE_PASSWORD || true - fi + # did that with debconf before installing the package, but we still try, + # because the package might have been installed already. + sudo mysqladmin -u root password $DATABASE_PASSWORD || true # Update the DB to give user '$DATABASE_USER'@'%' full control of the all databases: sudo mysql -uroot -p$DATABASE_PASSWORD -h127.0.0.1 -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';" From 470580bba68e5dc727ceb4d575649cb143eef7d5 Mon Sep 17 00:00:00 2001 From: ghanshyam Date: Thu, 8 Dec 2016 13:10:59 +0900 Subject: [PATCH 0267/1936] Remove baremetal tempest config setting from devstack Now all configuration are present in Ironic tempest plugin and those are going to be removed from tempest in Id518a6d87d0949737cd1c50cb6a83149b85e5f85 Patch- I73c649625d106fc7f068e12e21eaacba8f43cbbb set those in ironic devstack plugin. We can remove all baremetal config setting from devstack. Along with moved one this patch deletes other unused baremetal config setting. Change-Id: If826321ebc0c20ea372d206d49383f3826c9b547 Depends-On: Id518a6d87d0949737cd1c50cb6a83149b85e5f85 Depends-On: I73c649625d106fc7f068e12e21eaacba8f43cbbb --- lib/tempest | 6 ------ 1 file changed, 6 deletions(-) diff --git a/lib/tempest b/lib/tempest index 6dc83b558e..39b565f62f 100644 --- a/lib/tempest +++ b/lib/tempest @@ -494,12 +494,6 @@ function configure_tempest { # Baremetal if [ "$VIRT_DRIVER" = "ironic" ] ; then - iniset $TEMPEST_CONFIG baremetal driver_enabled True - iniset $TEMPEST_CONFIG baremetal unprovision_timeout $BUILD_TIMEOUT - iniset $TEMPEST_CONFIG baremetal active_timeout $BUILD_TIMEOUT - iniset $TEMPEST_CONFIG baremetal deploywait_timeout $BUILD_TIMEOUT - iniset $TEMPEST_CONFIG baremetal deploy_img_dir $FILES - iniset $TEMPEST_CONFIG baremetal node_uuid $IRONIC_NODE_UUID iniset $TEMPEST_CONFIG compute-feature-enabled change_password False iniset $TEMPEST_CONFIG compute-feature-enabled console_output False iniset $TEMPEST_CONFIG compute-feature-enabled interface_attach False From 8040232e05a68ce18e4ddfa233140d604c94cc77 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 8 Dec 2016 06:54:23 +0000 Subject: [PATCH 0268/1936] Updated from generate-devstack-plugins-list Change-Id: I857aea09db183a03393acdaa9bfcde8f24cec4f8 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 89f74c3afc..9a75090713 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -58,6 +58,7 @@ ec2-api `git://git.openstack.org/openstack/ec2-ap freezer `git://git.openstack.org/openstack/freezer `__ freezer-api `git://git.openstack.org/openstack/freezer-api `__ freezer-web-ui `git://git.openstack.org/openstack/freezer-web-ui `__ +fuxi `git://git.openstack.org/openstack/fuxi `__ gce-api `git://git.openstack.org/openstack/gce-api `__ glare `git://git.openstack.org/openstack/glare `__ gnocchi `git://git.openstack.org/openstack/gnocchi `__ From 30b58bfa277820e0d41a7dc5edd06d8b4ff53500 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Fri, 9 Dec 2016 00:58:54 +0100 Subject: [PATCH 0269/1936] Update openvswitch restart for suse Starting with SLE12 SP2 and with openSUSE Leap the distro-shipped openvswitch is the normal systemd openvswitch.service service file and no longer the older openvswitch-switch Sysv5 init script. Add a special case for that. Change-Id: I5152f2585c3d4d18853988d6290039d6b1713b99 --- lib/neutron_plugins/ovs_base | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index 10043252d6..62a4d00bcd 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -69,7 +69,11 @@ function _neutron_ovs_base_install_agent_packages { restart_service openvswitch sudo systemctl enable openvswitch elif is_suse; then - restart_service openvswitch-switch + if [[ $DISTRO == "sle12" ]] && [[ $os_RELEASE -lt 12.2 ]]; then + restart_service openvswitch-switch + else + restart_service openvswitch + fi fi } From ea774b684d5116c626eb7b64eed9de4ecd5821a4 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Fri, 9 Dec 2016 00:57:57 +0100 Subject: [PATCH 0270/1936] [opensuse] add python-xml to general deps list the python-xml is a subpackage from the standard cpython package that that contains elementtree and other bits that are needed almost everywhere in OpenStack but isn't installed on a absolutely minimal openSUSE Leap installation. This package doesn't exist on pip but is a SUSE only invention, so just treat it similar to a bindep. Change-Id: I82887c2e6895740d1b16d1269574519450ca783e --- files/rpms-suse/general | 1 + 1 file changed, 1 insertion(+) diff --git a/files/rpms-suse/general b/files/rpms-suse/general index 3b19071770..1044c25288 100644 --- a/files/rpms-suse/general +++ b/files/rpms-suse/general @@ -21,6 +21,7 @@ postgresql-devel # psycopg2 psmisc python-cmd2 # dist:opensuse-12.3 python-devel # pyOpenSSL +python-xml screen tar tcpdump From 16fb9bddf2dfc4ecdda5aeed5d09d10e78558f16 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 9 Dec 2016 06:53:39 +0000 Subject: [PATCH 0271/1936] Updated from generate-devstack-plugins-list Change-Id: Ia08c86b2adff782b42a83509359c38d7d531e481 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 9a75090713..358e1e4ba5 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -79,6 +79,7 @@ magnum-ui `git://git.openstack.org/openstack/magnum manila `git://git.openstack.org/openstack/manila `__ manila-ui `git://git.openstack.org/openstack/manila-ui `__ masakari `git://git.openstack.org/openstack/masakari `__ +meteos `git://git.openstack.org/openstack/meteos `__ mistral `git://git.openstack.org/openstack/mistral `__ mixmatch `git://git.openstack.org/openstack/mixmatch `__ monasca-analytics `git://git.openstack.org/openstack/monasca-analytics `__ From f575aefdfa2db204f975b3b4b178da1e292f4372 Mon Sep 17 00:00:00 2001 From: Roman Podoliaka Date: Tue, 11 Oct 2016 13:15:55 +0300 Subject: [PATCH 0272/1936] lib/nova: create api db first Since I21ae13a6c029e8ac89484faa212434911160fd51 nova-manage db sync may try to make a request to api db in order to get cell mapping and will fail, as the db is not created yet. While this is non fatal, we could avoid the error anyway. Change-Id: I19483e9420071d484f029779bcc8c6d623c210ce Related-Bug: #1631033 --- lib/nova | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/nova b/lib/nova index d5db5eaeb7..9f0cddeaf3 100644 --- a/lib/nova +++ b/lib/nova @@ -676,6 +676,9 @@ function init_nova { # All nova components talk to a central database. # Only do this step once on the API node for an entire cluster. if is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-api; then + recreate_database $NOVA_API_DB + $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF api_db sync + # (Re)create nova databases recreate_database nova recreate_database nova_api_cell0 @@ -689,9 +692,6 @@ function init_nova { recreate_database $NOVA_CELLS_DB fi - recreate_database $NOVA_API_DB - $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF api_db sync - # Run online migrations on the new databases # Needed for flavor conversion $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db online_data_migrations From 22b63666de41bf62b008cff709168c5e84a0bf3b Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 9 Dec 2016 07:33:01 -0500 Subject: [PATCH 0273/1936] Warn instead of die on undefined config names When using local.conf in multinode envs not everything is going to be defined in all places. Eventually we probably want to make it so we have a host role for these sections or something. But for now warn instead of die when we can't find a config var. Change-Id: I6959099373f035fbfe9e540a44e4c52b8e7c95c0 Closes-Bug: #2000824 --- inc/meta-config | 3 ++- tests/test_meta_config.sh | 11 +++++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/inc/meta-config b/inc/meta-config index 6252135747..be73b60800 100644 --- a/inc/meta-config +++ b/inc/meta-config @@ -181,7 +181,8 @@ function merge_config_group { realconfigfile=$(eval "echo $configfile") if [[ -z $realconfigfile ]]; then - die $LINENO "bogus config file specification: $configfile is undefined" + warn $LINENO "unknown config file specification: $configfile is undefined" + break fi dir=$(dirname $realconfigfile) if [[ -d $dir ]]; then diff --git a/tests/test_meta_config.sh b/tests/test_meta_config.sh index 92f9c01f69..087aaf468b 100755 --- a/tests/test_meta_config.sh +++ b/tests/test_meta_config.sh @@ -29,6 +29,10 @@ function die { exit -1 } +function warn { + return 0 +} + TEST_1C_ADD="[eee] type=new multi = foo2" @@ -92,7 +96,7 @@ $TEST_1C_ADD [[test3|test-space.conf]] [DEFAULT] attribute=value - + # the above line has a single space [[test4|\$TEST4_DIR/\$TEST4_FILE]] @@ -378,11 +382,10 @@ set -e echo -n "merge_config_group test9 undefined conf file: " set +e -# function is expected to fail and exit, running it -# in a subprocess to let this script proceed +# function is expected to trigger warn and continue (merge_config_group test.conf test9) VAL=$? -EXPECT_VAL=255 +EXPECT_VAL=0 check_result "$VAL" "$EXPECT_VAL" set -e From 7f68548538a5855cff8fe1debf4e65ff74074e09 Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Tue, 13 Dec 2016 15:47:11 +1100 Subject: [PATCH 0274/1936] Always add the service role to service users When creating a service user we allow the user to be created with a different role. Currently in auth_token middleware we want to check that the service token is specified with the service role so we should always add the service role and optionally add additional roles. Change-Id: Ie954a679674b4795079b539ebc8d4d2dcbd7dacc --- lib/keystone | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/lib/keystone b/lib/keystone index fd1d1d4a61..a8de3efa51 100644 --- a/lib/keystone +++ b/lib/keystone @@ -445,14 +445,16 @@ function create_keystone_accounts { # # create_service_user [role] # -# The role defaults to the service role. It is allowed to be provided as optional as historically +# We always add the service role, other roles are also allowed to be added as historically # a lot of projects have configured themselves with the admin or other role here if they are # using this user for other purposes beyond simply auth_token middleware. function create_service_user { - local role=${2:-service} - get_or_create_user "$1" "$SERVICE_PASSWORD" "$SERVICE_DOMAIN_NAME" - get_or_add_user_project_role "$role" "$1" "$SERVICE_PROJECT_NAME" "$SERVICE_DOMAIN_NAME" "$SERVICE_DOMAIN_NAME" + get_or_add_user_project_role service "$1" "$SERVICE_PROJECT_NAME" "$SERVICE_DOMAIN_NAME" "$SERVICE_DOMAIN_NAME" + + if [[ -n "$2" ]]; then + get_or_add_user_project_role "$2" "$1" "$SERVICE_PROJECT_NAME" "$SERVICE_DOMAIN_NAME" "$SERVICE_DOMAIN_NAME" + fi } # Configure the service to use the auth token middleware. From edcb7e5bb321d247c9667933f3ff7c72973caa6f Mon Sep 17 00:00:00 2001 From: "Sean M. Collins" Date: Thu, 15 Dec 2016 11:29:28 -0500 Subject: [PATCH 0275/1936] lib/neutron: Fix some settings * Set ml2_type_flat setting so that the public flat network is created correctly * Set securitygroup driver correctly It should be set as: [securitygroup] firewall_driver = iptables Change-Id: I7369b45fbc5a47ce958693c67a1902a8cb24f367 --- lib/neutron | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/neutron b/lib/neutron index 4915dcce31..5d03543180 100644 --- a/lib/neutron +++ b/lib/neutron @@ -161,6 +161,7 @@ function configure_neutron_new { iniset $NEUTRON_CORE_PLUGIN_CONF ml2 tenant_network_types vxlan iniset $NEUTRON_CORE_PLUGIN_CONF ml2 mechanism_drivers openvswitch,linuxbridge iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vxlan vni_ranges 1001:2000 + iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_flat flat_networks public if [[ "$NEUTRON_PORT_SECURITY" = "True" ]]; then iniset $NEUTRON_CORE_PLUGIN_CONF ml2 extension_drivers port_security fi @@ -173,10 +174,10 @@ function configure_neutron_new { # Configure the neutron agent if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then - iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup iptables + iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup firewall_driver iptables iniset $NEUTRON_CORE_PLUGIN_CONF vxlan local_ip $HOST_IP else - iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup iptables_hybrid + iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup firewall_driver iptables_hybrid iniset $NEUTRON_CORE_PLUGIN_CONF ovs local_ip $HOST_IP fi From a09cb5acf908e04b6509484fdff43016ebebf2cf Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Wed, 14 Dec 2016 07:57:26 -0800 Subject: [PATCH 0276/1936] Use java version independent package on Ubuntu Different versions of Ubuntu ship with different versions of Java. Trusty had 7, Xenial has 8, and so on. This causes problems when we hardcode a versioned package name into our dep lists as that version may not exist everywhere. Thankfully Ubuntu provides a default-jre-headless package that we can use instead that maps properly onto whatever java version is correct. Change-Id: I4e5da215c8f7aa426494686d5043995ce5d3c3af --- files/debs/general | 2 +- pkg/elasticsearch.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/files/debs/general b/files/debs/general index a1f2a4b159..c121770fa2 100644 --- a/files/debs/general +++ b/files/debs/general @@ -2,6 +2,7 @@ bc bridge-utils bsdmainutils curl +default-jre-headless # NOPRIME g++ gcc gettext # used for compiling message catalogs @@ -17,7 +18,6 @@ libxml2-dev # lxml libxslt1-dev # lxml libyaml-dev lsof # useful when debugging -openjdk-7-jre-headless # NOPRIME openssh-server openssl pkg-config diff --git a/pkg/elasticsearch.sh b/pkg/elasticsearch.sh index 856eaff36f..fefd454312 100755 --- a/pkg/elasticsearch.sh +++ b/pkg/elasticsearch.sh @@ -83,7 +83,7 @@ function install_elasticsearch { return fi if is_ubuntu; then - is_package_installed openjdk-7-jre-headless || install_package openjdk-7-jre-headless + is_package_installed default-jre-headless || install_package default-jre-headless sudo dpkg -i ${FILES}/elasticsearch-${ELASTICSEARCH_VERSION}.deb sudo update-rc.d elasticsearch defaults 95 10 From 8e14240d232366b8b1dd7de59800595211698fc1 Mon Sep 17 00:00:00 2001 From: Eric Berglund Date: Tue, 29 Nov 2016 17:33:28 -0600 Subject: [PATCH 0277/1936] Confirm network is created before setting public_network_id The current code assumes that there exists a public openstack network and uses that assumption to set the public_network_id variable in tempest lib. If NEUTRON_CREATE_INITIAL_NETWORKS is set to false this step will fail as there is no public network to be found. This change adds a check for NEUTRON_CREATE_INITIAL_NETWORKS before attempting to set this variable. Change-Id: I62e74d350d6533fa842d64c15b01b1a3d42c71c2 Closes-Bug: #1645900 --- lib/tempest | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 6dc83b558e..a1b84b6bfc 100644 --- a/lib/tempest +++ b/lib/tempest @@ -241,7 +241,9 @@ function configure_tempest { # the public network (for floating ip access) is only available # if the extension is enabled. - if is_networking_extension_supported 'external-net'; then + # If NEUTRON_CREATE_INITIAL_NETWORKS is not true, there is no network created + # and the public_network_id should not be set. + if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]] && is_networking_extension_supported 'external-net'; then public_network_id=$(openstack network show -f value -c id $PUBLIC_NETWORK_NAME) fi From 51a225c5d7f7d8655be62cab0e4d677b6839f35a Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 15 Dec 2016 16:32:08 -0500 Subject: [PATCH 0278/1936] Create mechanism for enabling placement-client on subnodes When doing multinode devstack we need a way to specify that we've enabled for the placement service. We use a pseudo service of placement-client for this. Change-Id: I04a655fbc58913b3d607400a7f677be299499142 --- lib/placement | 15 ++++++++------- stack.sh | 10 ++++++++++ 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/lib/placement b/lib/placement index 165c670206..ff630e29d8 100644 --- a/lib/placement +++ b/lib/placement @@ -55,7 +55,7 @@ PLACEMENT_SERVICE_PORT=${PLACEMENT_SERVICE_PORT:-8778} # Test if any placement services are enabled # is_placement_enabled function is_placement_enabled { - [[ ,${ENABLED_SERVICES} =~ ,"placement-" ]] && return 0 + [[ ,${ENABLED_SERVICES} =~ ,"placement-api" ]] && return 0 return 1 } @@ -101,12 +101,7 @@ function _config_placement_apache_wsgi { " -i $placement_api_apache_conf } -# configure_placement() - Set config files, create data dirs, etc -function configure_placement { - if [ "$PLACEMENT_DB_ENABLED" != False ]; then - iniset $PLACEMENT_CONF placement_database connection `database_connection_url placement` - fi - +function configure_placement_nova_compute { iniset $NOVA_CONF placement auth_type "password" iniset $NOVA_CONF placement auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v3" iniset $NOVA_CONF placement username placement @@ -121,7 +116,13 @@ function configure_placement { # established by the nova api. This avoids, for the time, being, # creating redundant configuration items that are just used for # testing. +} +# configure_placement() - Set config files, create data dirs, etc +function configure_placement { + if [ "$PLACEMENT_DB_ENABLED" != False ]; then + iniset $PLACEMENT_CONF placement_database connection `database_connection_url placement` + fi _config_placement_apache_wsgi } diff --git a/stack.sh b/stack.sh index f4bac30dac..1817f61704 100755 --- a/stack.sh +++ b/stack.sh @@ -869,6 +869,16 @@ if is_service_enabled placement; then configure_placement fi +# create a placement-client fake service to know we need to configure +# placement connectivity. We configure the placement service for nova +# if placement-api or placement-client is active, and n-cpu on the +# same box. +if is_service_enabled placement placement-client; then + if is_service_enabled n-cpu; then + configure_placement_nova_compute + fi +fi + if is_service_enabled horizon; then # django openstack_auth install_django_openstack_auth From ea3e87d4e5a7a23870e86bbe133064a99e9519cf Mon Sep 17 00:00:00 2001 From: Julia Varlamova Date: Fri, 16 Dec 2016 14:39:31 +0400 Subject: [PATCH 0279/1936] Add new parameter RECREATE_KEYSTONE_DB If 'RECREATE_KEYSTONE_DB=False' database won't be recreated. It would be useful for multinode Grenade tests for Keystone. This parameter will help us to deploy multiple services on different machines talking to the same DB. Devstack recreates Keystone DB each time during Keystone service installation. If our controller node is already deployed, Keystone DB already contains important information about OpenStack services and their endpoints. When the second Keystone node is being deployed, we don't want to delete records about controllers' services endpoints. Partially-Implements: bp rolling-upgrade-testing Change-Id: Ia8d07b4295ca165be01e44466c95d5275f596e83 --- lib/keystone | 6 ++++-- stackrc | 4 ++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/lib/keystone b/lib/keystone index fd1d1d4a61..0d54dee2db 100644 --- a/lib/keystone +++ b/lib/keystone @@ -488,8 +488,10 @@ function init_keystone { init_ldap fi - # (Re)create keystone database - recreate_database keystone + if [[ "$RECREATE_KEYSTONE_DB" == True ]]; then + # (Re)create keystone database + recreate_database keystone + fi # Initialize keystone database $KEYSTONE_BIN_DIR/keystone-manage --config-file $KEYSTONE_CONF db_sync diff --git a/stackrc b/stackrc index e7771cfab5..ae87b22e13 100644 --- a/stackrc +++ b/stackrc @@ -819,6 +819,10 @@ GIT_DEPTH=${GIT_DEPTH:-0} # Use native SSL for servers in ``SSL_ENABLED_SERVICES`` USE_SSL=$(trueorfalse False USE_SSL) +# We may not need to recreate database in case 2 Keystone services +# sharing the same database. It would be useful for multinode Grenade tests. +RECREATE_KEYSTONE_DB=$(trueorfalse True RECREATE_KEYSTONE_DB) + # ebtables is inherently racey. If you run it by two or more processes # simultaneously it will collide, badly, in the kernel and produce # failures or corruption of ebtables. The only way around it is for From 3f771b7bcb04a22c056f87e9e9bae76209fd3555 Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Sat, 17 Dec 2016 04:12:24 +0000 Subject: [PATCH 0280/1936] iptables: don't enable arptables firewall Neutron doesn't use any arptables based firewall rules. This should somewhat optimize kernel packet processing performance. I think the setting came from: http://wiki.libvirt.org/page/Net.bridge.bridge-nf-call_and_sysctl.conf but does not apply to the way we use iptables. Change-Id: I41796c76172f5243e4f9c4902363abb1f19d0d12 Closes-Bug: #1651765 --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index 6a0ac67b69..0be9794d9a 100644 --- a/functions +++ b/functions @@ -658,7 +658,7 @@ function enable_kernel_bridge_firewall { # Enable bridge firewalling in case it's disabled in kernel (upstream # default is enabled, but some distributions may decide to change it). # This is at least needed for RHEL 7.2 and earlier releases. - for proto in arp ip ip6; do + for proto in ip ip6; do sudo sysctl -w net.bridge.bridge-nf-call-${proto}tables=1 done } From 91070d7e408de28ae6971fb480f499aa5ba41919 Mon Sep 17 00:00:00 2001 From: Sylvain Bauza Date: Mon, 19 Dec 2016 18:07:55 +0100 Subject: [PATCH 0281/1936] Add placement services to default devstack Now that the placement service is mandatory for running Nova in Ocata, we want to enable it by default when running devstack by default. In the past, we added a placement-client service with I04a655fbc58913b3d607400a7f677be299499142 Devstack-gate will also be able to run a multinode devstack with the help of Ibd760c642e3c1ffff2dd61be48e30530b0d24720 Change-Id: I273c3c8299ee329bed425f3e7cd4b583ed1187a4 --- stackrc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stackrc b/stackrc index e7771cfab5..f6cf0ce171 100644 --- a/stackrc +++ b/stackrc @@ -54,6 +54,8 @@ if ! isset ENABLED_SERVICES ; then ENABLED_SERVICES=key # Nova - services to support libvirt based openstack clouds ENABLED_SERVICES+=,n-api,n-cpu,n-cond,n-sch,n-novnc,n-cauth + # Placement service needed for Nova + ENABLED_SERVICES+=,placement-api,placement-client # Glance services needed for Nova ENABLED_SERVICES+=,g-api,g-reg # Cinder From 7cdefd4e08e52e246582d5a91a71207e3b0a7bf9 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Tue, 20 Dec 2016 07:02:50 +0000 Subject: [PATCH 0282/1936] Updated from generate-devstack-plugins-list Change-Id: Ica922f54ffd53e81b3ec034c2ce9b91715304954 --- doc/source/plugin-registry.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 358e1e4ba5..a6f509e238 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -95,6 +95,7 @@ networking-bgpvpn `git://git.openstack.org/openstack/networ networking-brocade `git://git.openstack.org/openstack/networking-brocade `__ networking-calico `git://git.openstack.org/openstack/networking-calico `__ networking-cisco `git://git.openstack.org/openstack/networking-cisco `__ +networking-dpm `git://git.openstack.org/openstack/networking-dpm `__ networking-fortinet `git://git.openstack.org/openstack/networking-fortinet `__ networking-generic-switch `git://git.openstack.org/openstack/networking-generic-switch `__ networking-huawei `git://git.openstack.org/openstack/networking-huawei `__ @@ -121,6 +122,7 @@ neutron-lbaas-dashboard `git://git.openstack.org/openstack/neutro neutron-vpnaas `git://git.openstack.org/openstack/neutron-vpnaas `__ nimble `git://git.openstack.org/openstack/nimble `__ nova-docker `git://git.openstack.org/openstack/nova-docker `__ +nova-dpm `git://git.openstack.org/openstack/nova-dpm `__ nova-lxd `git://git.openstack.org/openstack/nova-lxd `__ nova-mksproxy `git://git.openstack.org/openstack/nova-mksproxy `__ nova-powervm `git://git.openstack.org/openstack/nova-powervm `__ From 95ed7c6f0513debdc0c692e78b898c08b84eab99 Mon Sep 17 00:00:00 2001 From: Zane Bitter Date: Tue, 20 Dec 2016 17:29:14 -0500 Subject: [PATCH 0283/1936] Don't buffer log output in sed Services that run inside Apache use tail -f on the corresponding log file to display output in the screen session. However, they also use sed to replace some control characters, and this means that the output is buffered. This results in debugging experiences that range from "impossible" (the log you want isn't shown) to "Kafkaesque nightmare" (the log you want isn't shown, except that sometimes it is, and sometimes it isn't even though you double-checked and you're completely sure that you must have output a log, but when you check back later you realise it actually is and you wonder if history is actually not mutable after all and begin to question what is real and what is not). This adds the --unbuffered option to ensure streaming output. Change-Id: I665ff5f047156401d8152f478d834ac40ff31658 --- functions-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions-common b/functions-common index cc1d42b0d6..9423f9ed90 100644 --- a/functions-common +++ b/functions-common @@ -1678,7 +1678,7 @@ function tail_log { local logfile=$2 if [[ "$USE_SCREEN" = "True" ]]; then - screen_process "$name" "sudo tail -f $logfile | sed 's/\\\\\\\\x1b/\o033/g'" + screen_process "$name" "sudo tail -f $logfile | sed -u 's/\\\\\\\\x1b/\o033/g'" fi } From 20eb274b753ed7cfeabd4eca8865774fe4bed182 Mon Sep 17 00:00:00 2001 From: Pawel Koniszewski Date: Wed, 21 Dec 2016 13:27:09 +0100 Subject: [PATCH 0284/1936] Move RABBIT_HOST to lib/rpc_backend This moves setting of RABBIT_HOST from stack.sh to lib/rpc_backend so it may be used in grenade runs, which don't have the defaulted value from stack.sh. The RABBIT_HOST is needed in order to call get_transport_url in lib/rpc_backend. Change-Id: I504f7fac7bb9a8c158e20046dbd1dd2d507db02b Closes-Bug: #1649586 Depends-On: I3d4d7b309e50f4e2970cda55aada02d68c4fa705 --- lib/rpc_backend | 3 +++ stack.sh | 1 - 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/rpc_backend b/lib/rpc_backend index 97b1aa409a..a21f781b4e 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -25,6 +25,9 @@ _XTRACE_RPC_BACKEND=$(set +o | grep xtrace) set +o xtrace RABBIT_USERID=${RABBIT_USERID:-stackrabbit} +if is_service_enabled rabbit; then + RABBIT_HOST=${RABBIT_HOST:-$SERVICE_HOST} +fi # Functions # --------- diff --git a/stack.sh b/stack.sh index 1817f61704..5de2b6366f 100755 --- a/stack.sh +++ b/stack.sh @@ -664,7 +664,6 @@ initialize_database_backends && echo "Using $DATABASE_TYPE database backend" || # In multi node DevStack, second node needs ``RABBIT_USERID``, but rabbit # isn't enabled. if is_service_enabled rabbit; then - RABBIT_HOST=${RABBIT_HOST:-$SERVICE_HOST} read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT." fi From 70a620846483c3f65500dbfe26aef7f3dc1ce50c Mon Sep 17 00:00:00 2001 From: Sylvain Bauza Date: Tue, 20 Dec 2016 15:34:29 +0100 Subject: [PATCH 0285/1936] Placement service only using default HTTPd ports The placement API configuration was binding a specific port *and* was supporting to be called by the default HTTPd ports using a Location directive. Given that the corresponding service catalog entry for the placement service type doesn't mention the specific application port but is rather using the default port 80, we can remove that specific port and just use the default config. Note that we still need to use a VirtualHost directive for the specific placement config because ErrorLog is only scoped for either server or virtualhost but can't be set for a Location (or a Directory) context. Change-Id: I9a26dcff4b879cf9e82e43a3d1aca2e4fe6aa3e6 --- files/apache-placement-api.template | 8 +++++--- lib/placement | 7 ------- 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/files/apache-placement-api.template b/files/apache-placement-api.template index b89ef96776..011abb95fc 100644 --- a/files/apache-placement-api.template +++ b/files/apache-placement-api.template @@ -1,6 +1,8 @@ -Listen %PUBLICPORT% - - +# NOTE(sbauza): This virtualhost is only here because some directives can +# only be set by a virtualhost or server context, so that's why the port is not bound. +# TODO(sbauza): Find a better way to identify a free port that is not corresponding to an existing +# vhost. + WSGIDaemonProcess placement-api processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% WSGIProcessGroup placement-api WSGIScriptAlias / %PUBLICWSGI% diff --git a/lib/placement b/lib/placement index ff630e29d8..93b72eb5e0 100644 --- a/lib/placement +++ b/lib/placement @@ -47,7 +47,6 @@ fi # Public facing bits PLACEMENT_SERVICE_PROTOCOL=${PLACEMENT_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} PLACEMENT_SERVICE_HOST=${PLACEMENT_SERVICE_HOST:-$SERVICE_HOST} -PLACEMENT_SERVICE_PORT=${PLACEMENT_SERVICE_PORT:-8778} # Functions # --------- @@ -68,7 +67,6 @@ function cleanup_placement { # _config_placement_apache_wsgi() - Set WSGI config files function _config_placement_apache_wsgi { local placement_api_apache_conf - local placement_api_port=$PLACEMENT_SERVICE_PORT local venv_path="" local nova_bin_dir="" nova_bin_dir=$(get_python_exec_prefix) @@ -89,7 +87,6 @@ function _config_placement_apache_wsgi { sudo cp $FILES/apache-placement-api.template $placement_api_apache_conf sudo sed -e " - s|%PUBLICPORT%|$placement_api_port|g; s|%APACHE_NAME%|$APACHE_NAME|g; s|%PUBLICWSGI%|$nova_bin_dir/nova-placement-api|g; s|%SSLENGINE%|$placement_ssl|g; @@ -161,10 +158,6 @@ function install_placement { # start_placement_api() - Start the API processes ahead of other things function start_placement_api { - # Get right service port for testing - local service_port=$PLACEMENT_SERVICE_PORT - local placement_api_port=$PLACEMENT_SERVICE_PORT - enable_apache_site placement-api restart_apache_server tail_log placement-api /var/log/$APACHE_NAME/placement-api.log From 8a92b7f1deeb1b9e99b41b7d7b8e22ca9636b6af Mon Sep 17 00:00:00 2001 From: Pawel Koniszewski Date: Wed, 5 Oct 2016 16:14:19 +0200 Subject: [PATCH 0286/1936] Add new configuration option for LM+grenade job In order to test whether live migration is backward compatible we need to live migrate VM back and forth between two versions of nova. This configuration option will allow to reuse existing tests just by adding if condition in a method that invokes live migration and validates outcome: * If set to False, it will use existing behaviour * If set to True, it will live migrate VM, validate whether it succeded, then live migrate the same VM once again and again validate the result Depends-On: Icaeca404ec3e4b8f3cd489789fdac6117740ec43 Change-Id: I8da2b3bd0c08d9a3111d3531c346d06bd52cae7b --- lib/tempest | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/tempest b/lib/tempest index 6dc83b558e..efd698db1c 100644 --- a/lib/tempest +++ b/lib/tempest @@ -354,6 +354,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute-feature-enabled live_migration ${LIVE_MIGRATION_AVAILABLE:-False} iniset $TEMPEST_CONFIG compute-feature-enabled change_password False iniset $TEMPEST_CONFIG compute-feature-enabled block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} + iniset $TEMPEST_CONFIG compute-feature-enabled live_migrate_back_and_forth ${LIVE_MIGRATE_BACK_AND_FORTH:-False} iniset $TEMPEST_CONFIG compute-feature-enabled attach_encrypted_volume ${ATTACH_ENCRYPTED_VOLUME_AVAILABLE:-True} if is_service_enabled n-cell; then # Cells doesn't support shelving/unshelving From 36f81ff6d568395bd2fd7f48bc983b9756924e3d Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Wed, 2 Nov 2016 17:50:56 -0700 Subject: [PATCH 0287/1936] Switch to using openstack client for setting the router's gateway Depends-on: Ifb5a4d1965cd7e75c0c8cf2cfb677e0628b699dc Change-Id: Iba58f5275cacc7bc82fa2f2d2b96315c2350ab70 --- lib/neutron_plugins/services/l3 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 569a366c79..ae086ce9a3 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -337,7 +337,7 @@ function _neutron_configure_router_v4 { ext_gw_ip=$(echo $id_and_ext_gw_ip | get_field 2) PUB_SUBNET_ID=$(echo $id_and_ext_gw_ip | get_field 5) # Configure the external network as the default router gateway - neutron --os-cloud devstack-admin --os-region "$REGION_NAME" router-gateway-set $ROUTER_ID $EXT_NET_ID + openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID # This logic is specific to using the l3-agent for layer 3 if is_service_enabled q-l3 || is_service_enabled neutron-l3; then @@ -385,7 +385,7 @@ function _neutron_configure_router_v6 { # If the external network has not already been set as the default router # gateway when configuring an IPv4 public subnet, do so now if [[ "$IP_VERSION" == "6" ]]; then - neutron --os-cloud devstack-admin --os-region "$REGION_NAME" router-gateway-set $ROUTER_ID $EXT_NET_ID + openstack --os-cloud devstack-admin --os-region "$REGION_NAME" set --external-gateway $EXT_NET_ID $ROUTER_ID fi # This logic is specific to using the l3-agent for layer 3 From afa8a00c14f100cf610ac076f07db2379cfcee20 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Mon, 19 Dec 2016 09:51:01 -0500 Subject: [PATCH 0288/1936] Switch to python 3.5 Use trueorfalse to normalize the values for USE_PYTHON3 Install 3.5 instead of 3.4 When USE_PYTHON3 is specified. Also, since not many packages are classified correctly, fallback to looking for just "Programming Language :: Python :: 3" and log a message for the package to highlight the problem. Also special case some services that are *almost* ready Depends-On: Id48e1b328230fcdf97ed1cb4b97f4c3f9cf6eb8a Depends-On: Ib7d9aa0e0b74a936002e0eea0b3af05102b06a62 Change-Id: I243ea4b76f0d5ef57a03b5b0798a05468ee6de9b --- inc/python | 53 ++++++++++++++++++++++++++++++++++++++++++++++++++++- lib/apache | 10 +++++++++- lib/horizon | 12 ++++++++++-- stackrc | 4 ++-- 4 files changed, 73 insertions(+), 6 deletions(-) diff --git a/inc/python b/inc/python index e4cfab803c..54fd90533a 100644 --- a/inc/python +++ b/inc/python @@ -76,6 +76,27 @@ function get_python_versions_for_package { | grep 'Language' | cut -f5 -d: | grep '\.' | tr '\n' ' ' } +# Check for python3 classifier in local directory +function check_python3_support_for_package_local { + local name=$1 + cd $name + set +e + classifier=$(python setup.py --classifiers \ + | grep 'Programming Language :: Python :: 3$') + set -e + echo $classifier +} + +# Check for python3 classifier on pypi +function check_python3_support_for_package_remote { + local name=$1 + set +e + classifier=$(curl -s -L "https://pypi.python.org/pypi/$name/json" \ + | grep '"Programming Language :: Python :: 3"') + set -e + echo $classifier +} + # Wrapper for ``pip install`` to set cache and proxy environment variables # Uses globals ``OFFLINE``, ``PIP_VIRTUAL_ENV``, # ``PIP_UPGRADE``, ``TRACK_DEPENDS``, ``*_proxy``, @@ -123,9 +144,39 @@ function pip_install { # default pip local package_dir=${!#} local python_versions - if [[ -d "$package_dir" ]]; then + + # Special case some services that have experimental + # support for python3 in progress, but don't claim support + # in their classifier + echo "Check python version for : $package_dir" + if [[ ${package_dir##*/} == "nova" || ${package_dir##*/} == "glance" || ${package_dir##*/} == "cinder" ]]; then + echo "Using $PYTHON3_VERSION version to install $package_dir" + sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8" + cmd_pip=$(get_pip_command $PYTHON3_VERSION) + elif [[ -d "$package_dir" ]]; then python_versions=$(get_python_versions_for_package $package_dir) if [[ $python_versions =~ $PYTHON3_VERSION ]]; then + echo "Using $PYTHON3_VERSION version to install $package_dir" + sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8" + cmd_pip=$(get_pip_command $PYTHON3_VERSION) + else + # The package may not have yet advertised python3.5 + # support so check for just python3 classifier and log + # a warning. + python3_classifier=$(check_python3_support_for_package_local $package_dir) + if [[ ! -z "$python3_classifier" ]]; then + echo "Using $PYTHON3_VERSION version to install $package_dir" + sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8" + cmd_pip=$(get_pip_command $PYTHON3_VERSION) + fi + fi + else + # Check pypi as we don't have the package on disk + package=$(echo $package_dir | grep -o '^[.a-zA-Z0-9_-]*') + python3_classifier=$(check_python3_support_for_package_remote $package) + if [[ ! -z "$python3_classifier" ]]; then + echo "Using $PYTHON3_VERSION version to install $package" + sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8" cmd_pip=$(get_pip_command $PYTHON3_VERSION) fi fi diff --git a/lib/apache b/lib/apache index 2dc626f130..d1a11ae18b 100644 --- a/lib/apache +++ b/lib/apache @@ -71,7 +71,15 @@ function install_apache_wsgi { # Apache installation, because we mark it NOPRIME if is_ubuntu; then # Install apache2, which is NOPRIME'd - install_package apache2 libapache2-mod-wsgi + install_package apache2 + if python3_enabled; then + if is_package_installed libapache2-mod-wsgi; then + uninstall_package libapache2-mod-wsgi + fi + install_package libapache2-mod-wsgi-py3 + else + install_package libapache2-mod-wsgi + fi elif is_fedora; then sudo rm -f /etc/httpd/conf.d/000-* install_package httpd mod_wsgi diff --git a/lib/horizon b/lib/horizon index 830da095f6..4cabbe483c 100644 --- a/lib/horizon +++ b/lib/horizon @@ -81,7 +81,11 @@ function configure_horizon { # Horizon is installed as develop mode, so we can compile here. # Message catalog compilation is handled by Django admin script, # so compiling them after the installation avoids Django installation twice. - (cd $HORIZON_DIR; python manage.py compilemessages) + if python3_enabled; then + (cd $HORIZON_DIR; python${PYTHON3_VERSION} manage.py compilemessages) + else + (cd $HORIZON_DIR; python manage.py compilemessages) + fi # ``local_settings.py`` is used to override horizon default settings. local local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py @@ -162,7 +166,11 @@ function install_django_openstack_auth { git_clone_by_name "django_openstack_auth" # Compile message catalogs before installation _prepare_message_catalog_compilation - (cd $dir; python setup.py compile_catalog) + if python3_enabled; then + (cd $dir; python${PYTHON3_VERSION} setup.py compile_catalog) + else + (cd $dir; python setup.py compile_catalog) + fi setup_dev_lib "django_openstack_auth" fi # if we aren't using this library from git, then we just let it diff --git a/stackrc b/stackrc index ae87b22e13..cb9b8176ca 100644 --- a/stackrc +++ b/stackrc @@ -101,12 +101,12 @@ if [[ -r $RC_DIR/.localrc.password ]]; then fi # Control whether Python 3 should be used. -export USE_PYTHON3=${USE_PYTHON3:-False} +export USE_PYTHON3=$(trueorfalse False USE_PYTHON3) # When Python 3 is supported by an application, adding the specific # version of Python 3 to this variable will install the app using that # version of the interpreter instead of 2.7. -export PYTHON3_VERSION=${PYTHON3_VERSION:-3.4} +export PYTHON3_VERSION=${PYTHON3_VERSION:-3.5} # Just to be more explicit on the Python 2 version to use. export PYTHON2_VERSION=${PYTHON2_VERSION:-2.7} From 09cef5a2330031ce4e3dd28dff02ad926e83be19 Mon Sep 17 00:00:00 2001 From: Jordan Pittier Date: Wed, 28 Dec 2016 23:03:28 +0100 Subject: [PATCH 0289/1936] lib/tempest: Liberty EOL: remove an useless feature flag The snapshot_backup feature flag was introduced in Ib695e60c2ed7edf30c8baef9e00f0307b1156551 to enable Tempest tests introduced in I1964ce6e1298041f8238d76fa4b7029d2d23bbfb But I1964ce6e1298041f8238d76fa4b7029d2d23bbfb was never merged so that feature flag was never really useful. Change-Id: I4e0bc786d2320907cb101fc788ad51444628537d --- lib/tempest | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index 6dc83b558e..7d68092893 100644 --- a/lib/tempest +++ b/lib/tempest @@ -430,8 +430,6 @@ function configure_tempest { iniset $TEMPEST_CONFIG validation network_for_ssh $TEMPEST_SSH_NETWORK_NAME # Volume - # TODO(obutenko): Remove snapshot_backup when liberty-eol happens. - iniset $TEMPEST_CONFIG volume-feature-enabled snapshot_backup True # TODO(ynesenenko): Remove the volume_services flag when Liberty and Kilo will correct work with host info. iniset $TEMPEST_CONFIG volume-feature-enabled volume_services True # TODO(ameade): Remove the api_v3 flag when Mitaka and Liberty are end of life. From 2bf5b416a748ff6c1489b870bab3956aa1b24aee Mon Sep 17 00:00:00 2001 From: Jordan Pittier Date: Wed, 28 Dec 2016 23:22:17 +0100 Subject: [PATCH 0290/1936] lib/tempest: don't set cli_dir and dashboard_url config options CLI tests have been removed from Tempest in I4f8638f1c048bbdb598dd181f4af272ef9923806 Dashboard tests have been removed from Tempest in I2a69ebed2947a5ab5e5ca79557130bd093e168dd Change-Id: I6df74a07e209b07fd3feae762c9cdab16e09414f --- lib/tempest | 6 ------ 1 file changed, 6 deletions(-) diff --git a/lib/tempest b/lib/tempest index 6dc83b558e..c3cfae3c7f 100644 --- a/lib/tempest +++ b/lib/tempest @@ -486,12 +486,6 @@ function configure_tempest { iniset $TEMPEST_CONFIG volume storage_protocol "$TEMPEST_STORAGE_PROTOCOL" fi - # Dashboard - iniset $TEMPEST_CONFIG dashboard dashboard_url "http://$SERVICE_HOST/" - - # CLI - iniset $TEMPEST_CONFIG cli cli_dir $NOVA_BIN_DIR - # Baremetal if [ "$VIRT_DRIVER" = "ironic" ] ; then iniset $TEMPEST_CONFIG baremetal driver_enabled True From 42e73db840213b581c2848aef1ae87b5f6e61faa Mon Sep 17 00:00:00 2001 From: Jordan Pittier Date: Wed, 28 Dec 2016 23:34:06 +0100 Subject: [PATCH 0291/1936] lib/tempest: remove the allow_port_security_disabled option Now that Liberty is EOLed, the feature flag is not needed anymore. Change-Id: I5206535761773d4bcb02ebb8f25d1b0c1b59110c Depends-On: If0b2168080a0b0ecdc6682ef69856a0879f4f6d3 --- lib/tempest | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index 6dc83b558e..7e4c7788b3 100644 --- a/lib/tempest +++ b/lib/tempest @@ -347,8 +347,6 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute max_microversion $tempest_compute_max_microversion fi - # TODO(mriedem): Remove allow_port_security_disabled after liberty-eol. - iniset $TEMPEST_CONFIG compute-feature-enabled allow_port_security_disabled True iniset $TEMPEST_CONFIG compute-feature-enabled personality ${ENABLE_FILE_INJECTION:-False} iniset $TEMPEST_CONFIG compute-feature-enabled resize True iniset $TEMPEST_CONFIG compute-feature-enabled live_migration ${LIVE_MIGRATION_AVAILABLE:-False} From ee3383f7198fbe4563425a3d4fdc1dc2529f2950 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Tue, 3 Jan 2017 07:02:56 +0000 Subject: [PATCH 0292/1936] Updated from generate-devstack-plugins-list Change-Id: I8ea9f059757ef9b40dc38b93a9ae8072c5be9e4c --- doc/source/plugin-registry.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index a6f509e238..cb9c437458 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -95,6 +95,7 @@ networking-bgpvpn `git://git.openstack.org/openstack/networ networking-brocade `git://git.openstack.org/openstack/networking-brocade `__ networking-calico `git://git.openstack.org/openstack/networking-calico `__ networking-cisco `git://git.openstack.org/openstack/networking-cisco `__ +networking-cumulus `git://git.openstack.org/openstack/networking-cumulus `__ networking-dpm `git://git.openstack.org/openstack/networking-dpm `__ networking-fortinet `git://git.openstack.org/openstack/networking-fortinet `__ networking-generic-switch `git://git.openstack.org/openstack/networking-generic-switch `__ @@ -130,6 +131,7 @@ oaktree `git://git.openstack.org/openstack/oaktre octavia `git://git.openstack.org/openstack/octavia `__ osprofiler `git://git.openstack.org/openstack/osprofiler `__ panko `git://git.openstack.org/openstack/panko `__ +picasso `git://git.openstack.org/openstack/picasso `__ rally `git://git.openstack.org/openstack/rally `__ sahara `git://git.openstack.org/openstack/sahara `__ sahara-dashboard `git://git.openstack.org/openstack/sahara-dashboard `__ From 0c0d848b8ce151c72b21114ac741508351a0bf37 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Tue, 3 Jan 2017 08:52:25 -0500 Subject: [PATCH 0293/1936] Add swift and uwsgi to py35 whitelist Really close to getting swift and keystone under uwsgi working, so let's white list them. Won't affect any existing jobs, so we should be good. Change-Id: I51d56d16a5b175bd45dee09edc0b2748d72a5d06 --- inc/python | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/inc/python b/inc/python index 54fd90533a..5a9a9ed588 100644 --- a/inc/python +++ b/inc/python @@ -149,7 +149,9 @@ function pip_install { # support for python3 in progress, but don't claim support # in their classifier echo "Check python version for : $package_dir" - if [[ ${package_dir##*/} == "nova" || ${package_dir##*/} == "glance" || ${package_dir##*/} == "cinder" ]]; then + if [[ ${package_dir##*/} == "nova" || ${package_dir##*/} == "glance" || \ + ${package_dir##*/} == "cinder" || ${package_dir##*/} == "swift" || \ + ${package_dir##*/} == "uwsgi" ]]; then echo "Using $PYTHON3_VERSION version to install $package_dir" sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8" cmd_pip=$(get_pip_command $PYTHON3_VERSION) From 2a7e909b3f9480d3a61bd9ec497a954c833d14c8 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Tue, 3 Jan 2017 21:11:55 -0500 Subject: [PATCH 0294/1936] Explicitly set use_neutron=False in nova.conf when running nova-net nova-network has been deprecated since Netwon and Nova change I8388c29ad310cd8800084b4d5c026013158bfbed is switching the default value of use_neutron to True, so we need devstack to explicitly set use_neutron=False when running and configuring nova-network. Part of blueprint use-neutron-by-default Change-Id: I82721b5d10711401b9b0ebc2b0ed07cc8287bbf7 --- lib/nova | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/nova b/lib/nova index d5db5eaeb7..50c0d4708e 100644 --- a/lib/nova +++ b/lib/nova @@ -663,6 +663,7 @@ function create_nova_conf_nova_network { if [ -n "$FLAT_INTERFACE" ]; then iniset $NOVA_CONF DEFAULT flat_interface "$FLAT_INTERFACE" fi + iniset $NOVA_CONF DEFAULT use_neutron False } # create_nova_keys_dir() - Part of the init_nova() process From 52b10746154d57c067b152009bbc13024c2951d9 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 1 Dec 2016 16:11:17 +0100 Subject: [PATCH 0295/1936] cinder: configure dlm when this one is zookeeper Previous this was set the zake, but that was revert to missing dependencies issue and because zake is a test fixture and not somthing to deploy. This change configures the Cinder dlm with this one is zookeeper. And it installs tooz and the extra dependencies needed for the zookeeper driver. To do it, this commit have to introduce a new method for package installation: 'pip_install_gr_extras package extra1,extra2'. Change-Id: Idca310c08e345db59840eb31434c6cb1f849fa70 --- inc/python | 14 ++++++++++++++ lib/cinder | 7 +++++++ lib/dlm | 1 + 3 files changed, 22 insertions(+) diff --git a/inc/python b/inc/python index 5a9a9ed588..04cde34fe1 100644 --- a/inc/python +++ b/inc/python @@ -69,6 +69,20 @@ function pip_install_gr { pip_install $clean_name } +# Wrapper for ``pip install`` that only installs versions of libraries +# from the global-requirements specification with extras. +# +# Uses globals ``REQUIREMENTS_DIR`` +# +# pip_install_gr_extras packagename extra1,extra2,... +function pip_install_gr_extras { + local name=$1 + local extras=$2 + local clean_name + clean_name=$(get_from_global_requirements $name) + pip_install $clean_name[$extras] +} + # Determine the python versions supported by a package function get_python_versions_for_package { local name=$1 diff --git a/lib/cinder b/lib/cinder index f6ad780bb9..40f0f16d6b 100644 --- a/lib/cinder +++ b/lib/cinder @@ -373,6 +373,13 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT os_privileged_user_password "$SERVICE_PASSWORD" iniset $CINDER_CONF DEFAULT os_privileged_user_tenant "$SERVICE_PROJECT_NAME" iniset $CINDER_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" + + # Set the backend url according to the configured dlm backend + if is_dlm_enabled; then + if [[ "$(dlm_backend)" == "zookeeper" ]]; then + iniset $CINDER_CONF coordination backend_url "zookeeper://${SERVICE_HOST}:2181" + fi + fi } # create_cinder_accounts() - Set up common required cinder accounts diff --git a/lib/dlm b/lib/dlm index e391535910..b5ac0f5d33 100644 --- a/lib/dlm +++ b/lib/dlm @@ -91,6 +91,7 @@ function configure_dlm { # install_dlm() - Collect source and prepare function install_dlm { if is_dlm_enabled; then + pip_install_gr_extras tooz zookeeper if is_ubuntu; then install_package zookeeperd elif is_fedora; then From 80b1d0ae7db263dada7fdc4d9d8190d0518b8f6c Mon Sep 17 00:00:00 2001 From: Jens Rosenboom Date: Wed, 4 Jan 2017 16:58:04 +0100 Subject: [PATCH 0296/1936] Fix placement service for identity-v3 The domain_name to be used needs to be $SERVICE_DOMAIN_NAME, as this is changed in devstack from "Default" to "service". Change-Id: I6351c1b2ca7ea4448e13eb87455bff4058df4fa7 --- lib/placement | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/placement b/lib/placement index 93b72eb5e0..871e282f32 100644 --- a/lib/placement +++ b/lib/placement @@ -103,9 +103,9 @@ function configure_placement_nova_compute { iniset $NOVA_CONF placement auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v3" iniset $NOVA_CONF placement username placement iniset $NOVA_CONF placement password "$SERVICE_PASSWORD" - iniset $NOVA_CONF placement user_domain_name "Default" + iniset $NOVA_CONF placement user_domain_name "$SERVICE_DOMAIN_NAME" iniset $NOVA_CONF placement project_name "$SERVICE_TENANT_NAME" - iniset $NOVA_CONF placement project_domain_name "Default" + iniset $NOVA_CONF placement project_domain_name "$SERVICE_DOMAIN_NAME" iniset $NOVA_CONF placement os_region_name "$REGION_NAME" # TODO(cdent): auth_strategy, which is common to see in these # blocks is not currently used here. For the time being the From 53a49d104a59678ec36c5b8f4406ad51d69cf5f7 Mon Sep 17 00:00:00 2001 From: Hongbin Lu Date: Fri, 23 Dec 2016 16:16:50 -0600 Subject: [PATCH 0297/1936] Add virt driver 'zun' to devstack According to the feedback in the TC meeting [1], we renamed the Nova virt driver from "docker" to "zun" [2] to avoid name collision to nova-docker. This rename also help to clarify the difference between these two drivers. [1] http://eavesdrop.openstack.org/meetings/tc/2016/ tc.2016-11-29-20.01.log.html [2] https://review.openstack.org/#/c/414651/ Change-Id: I747080953ae4d1d35ed334831100413b6e4466c4 --- lib/nova | 2 +- stackrc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/nova b/lib/nova index 50c0d4708e..8f11e0f311 100644 --- a/lib/nova +++ b/lib/nova @@ -823,7 +823,7 @@ function start_nova_compute { run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" $LIBVIRT_GROUP elif [[ "$VIRT_DRIVER" = 'lxd' ]]; then run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" $LXD_GROUP - elif [[ "$VIRT_DRIVER" = 'docker' ]]; then + elif [[ "$VIRT_DRIVER" = 'docker' || "$VIRT_DRIVER" = 'zun' ]]; then run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" $DOCKER_GROUP elif [[ "$VIRT_DRIVER" = 'fake' ]]; then local i diff --git a/stackrc b/stackrc index 7ce6c513b0..19f5b53372 100644 --- a/stackrc +++ b/stackrc @@ -573,7 +573,7 @@ case "$VIRT_DRIVER" in lxd) LXD_GROUP=${LXD_GROUP:-"lxd"} ;; - docker) + docker|zun) DOCKER_GROUP=${DOCKER_GROUP:-"docker"} ;; fake) From 85879f1f8157a60fc190eb2731e4edd0deb1dae3 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Wed, 4 Jan 2017 19:51:50 -0500 Subject: [PATCH 0298/1936] Use the installed swift scripts This commit switches how scripts we use to launch the installed version in the path. Previously the scripts were manually executed in the source repo, but this has issues if you're trying to run with py3 in a system where python == py2. Setuptools already does the shebang magic for us at install time, so we just need to use the installed version of the script. Change-Id: Iaa4d80ec607a2aa200400330e16cad3a4ca782ac --- lib/swift | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/swift b/lib/swift index b175f2e5c1..761ae74b6b 100644 --- a/lib/swift +++ b/lib/swift @@ -809,10 +809,10 @@ function start_swift { local proxy_port=${SWIFT_DEFAULT_BIND_PORT} start_tls_proxy swift '*' $proxy_port $SERVICE_HOST $SWIFT_DEFAULT_BIND_PORT_INT fi - run_process s-proxy "$SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v" + run_process s-proxy "swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v" if [[ ${SWIFT_REPLICAS} == 1 ]]; then for type in object container account; do - run_process s-${type} "$SWIFT_DIR/bin/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v" + run_process s-${type} "swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v" done fi From 0f97841dfa0187a34f3345ba5f6e58d398e82832 Mon Sep 17 00:00:00 2001 From: yatin Date: Thu, 5 Jan 2017 12:32:57 +0530 Subject: [PATCH 0299/1936] Remove duplicate entry for flat_networks Currently if PHYSICAL_NETWORK and PUBLIC_PHYSICAL_NETWORK are same then duplicate entry is created in ml2_conf.ini like below: flat_networks = public,public, With this patch, if PHYSICAL_NETWORK and PUBLIC_PHYSICAL_NETWORK are same then add only PHYSICAL_NETWORK to flat_networks in ml2_conf.ini Change-Id: Iae4d1ee3882f6d96b4e4abd52ecc673a620563b5 Closes-Bug: #1654148 --- lib/neutron_plugins/ml2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index e429714f06..d798929d36 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -105,7 +105,7 @@ function neutron_plugin_configure_service { if [[ -n "$PHYSICAL_NETWORK" ]]; then Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS+="${PHYSICAL_NETWORK}," fi - if [[ -n "$PUBLIC_PHYSICAL_NETWORK" ]]; then + if [[ -n "$PUBLIC_PHYSICAL_NETWORK" ]] && [[ "${PHYSICAL_NETWORK}" != "$PUBLIC_PHYSICAL_NETWORK" ]]; then Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS+="${PUBLIC_PHYSICAL_NETWORK}," fi fi From 80f3635521c23235b75c6a9cbb8e8edcaf667ae6 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Mon, 2 Jan 2017 09:30:16 -0500 Subject: [PATCH 0300/1936] Run Swift services under py35 * iniuncomment followed by iniset for reseller_prefix just adds a duplicate line in the config file that configparser does not like so just remove the uncomment * fall back to http:// url for glance->swift keystone authentication * insecure flag to talk to swift Depends-On: I51d56d16a5b175bd45dee09edc0b2748d72a5d06 Change-Id: I02ed01e20f8dce195c51273e8384130af53384ce --- lib/glance | 11 ++++++++++- lib/swift | 1 - 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/lib/glance b/lib/glance index da9cd43536..4ba1d20bd7 100644 --- a/lib/glance +++ b/lib/glance @@ -161,6 +161,9 @@ function configure_glance { if is_service_enabled s-proxy; then iniset $GLANCE_API_CONF glance_store default_store swift iniset $GLANCE_API_CONF glance_store swift_store_create_container_on_put True + if python3_enabled; then + iniset $GLANCE_API_CONF glance_store swift_store_auth_insecure True + fi iniset $GLANCE_API_CONF glance_store swift_store_config_file $GLANCE_SWIFT_STORE_CONF iniset $GLANCE_API_CONF glance_store default_swift_reference ref1 @@ -186,7 +189,13 @@ function configure_glance { fi iniset $GLANCE_SWIFT_STORE_CONF ref1 key $SERVICE_PASSWORD - iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $KEYSTONE_SERVICE_URI/v3 + if python3_enabled; then + # NOTE(dims): Currently the glance_store+swift does not support either an insecure flag + # or ability to specify the CACERT. So fallback to http:// url + iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address ${KEYSTONE_SERVICE_URI/https/http}/v3 + else + iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $KEYSTONE_SERVICE_URI/v3 + fi iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_version 3 # commenting is not strictly necessary but it's confusing to have bad values in conf diff --git a/lib/swift b/lib/swift index b175f2e5c1..aafe09d310 100644 --- a/lib/swift +++ b/lib/swift @@ -454,7 +454,6 @@ function configure_swift { # out. Make sure we uncomment Tempauth after we uncomment Keystoneauth # otherwise, this code also sets the reseller_prefix for Keystoneauth. iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth account_autocreate - iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth reseller_prefix iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth reseller_prefix "TEMPAUTH" if is_service_enabled swift3; then From b51a8862b1f80a947815094148bc229ba3d58ae1 Mon Sep 17 00:00:00 2001 From: Rodrigo Duarte Date: Mon, 26 Sep 2016 15:22:35 -0300 Subject: [PATCH 0301/1936] Adds keystone security compliance settings The PCI-DSS feature has been introduced during the Newton release and its settings are disabled by default. This patch adds the possibility to enable some of them during DevStack setup. Change-Id: If6b5eb3e3cbc43eb241c94d18af80ad50be08772 Depends-On: Id97ca26f93b742cc3d8d49e98afc581f22360504 --- lib/keystone | 12 ++++++++++++ lib/tempest | 6 ++++++ 2 files changed, 18 insertions(+) diff --git a/lib/keystone b/lib/keystone index 825fe44fca..34730b892a 100644 --- a/lib/keystone +++ b/lib/keystone @@ -131,6 +131,12 @@ fi KEYSTONE_AUTH_URI_V3=$KEYSTONE_AUTH_URI/v3 KEYSTONE_SERVICE_URI_V3=$KEYSTONE_SERVICE_URI/v3 +# Security compliance +KEYSTONE_SECURITY_COMPLIANCE_ENABLED=${KEYSTONE_SECURITY_COMPLIANCE_ENABLED:-True} +KEYSTONE_LOCKOUT_FAILURE_ATTEMPTS=${KEYSTONE_LOCKOUT_FAILURE_ATTEMPTS:-2} +KEYSTONE_LOCKOUT_DURATION=${KEYSTONE_LOCKOUT_DURATION:-5} +KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT=${KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT:-2} + # Functions # --------- @@ -339,6 +345,12 @@ function configure_keystone { # allows policy changes in order to clarify the adminess scope. #iniset $KEYSTONE_CONF resource admin_project_domain_name Default #iniset $KEYSTONE_CONF resource admin_project_name admin + + if [[ "$KEYSTONE_SECURITY_COMPLIANCE_ENABLED" = True ]]; then + iniset $KEYSTONE_CONF security_compliance lockout_failure_attempts $KEYSTONE_LOCKOUT_FAILURE_ATTEMPTS + iniset $KEYSTONE_CONF security_compliance lockout_duration $KEYSTONE_LOCKOUT_DURATION + iniset $KEYSTONE_CONF security_compliance unique_last_password_count $KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT + fi } # create_keystone_accounts() - Sets up common required keystone accounts diff --git a/lib/tempest b/lib/tempest index 3915c570ee..4b8fbb7a46 100644 --- a/lib/tempest +++ b/lib/tempest @@ -261,6 +261,9 @@ function configure_tempest { # Identity iniset $TEMPEST_CONFIG identity uri "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v2.0/" iniset $TEMPEST_CONFIG identity uri_v3 "$KEYSTONE_SERVICE_URI_V3" + iniset $TEMPEST_CONFIG identity user_lockout_failure_attempts $KEYSTONE_LOCKOUT_FAILURE_ATTEMPTS + iniset $TEMPEST_CONFIG identity user_lockout_duration $KEYSTONE_LOCKOUT_DURATION + iniset $TEMPEST_CONFIG identity user_unique_last_password_count $KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT # Use domain scoped tokens for admin v3 tests, v3 dynamic credentials of v3 account generation iniset $TEMPEST_CONFIG identity admin_domain_scope True if [[ "$TEMPEST_HAS_ADMIN" == "True" ]]; then @@ -285,6 +288,9 @@ function configure_tempest { fi # Identity Features + if [[ "$KEYSTONE_SECURITY_COMPLIANCE_ENABLED" = True ]]; then + iniset $TEMPEST_CONFIG identity-feature-enabled security_compliance True + fi # TODO(rodrigods): Remove the reseller flag when Kilo and Liberty are end of life. iniset $TEMPEST_CONFIG identity-feature-enabled reseller True From 51ecf0a869720ae5e8845b95fd2973b2760dcecf Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Thu, 5 Jan 2017 16:11:17 -0500 Subject: [PATCH 0302/1936] Introduce a PYTHON env var * $PYTHON will have the path to python runtime to be used * Use $PYTHON to run all the scripts Change-Id: Ib5ab7820fc18cae5e50ea47302b610494197ad47 --- functions-common | 6 +++--- lib/horizon | 12 ++---------- tools/install_prereqs.sh | 3 +++ 3 files changed, 8 insertions(+), 13 deletions(-) diff --git a/functions-common b/functions-common index 8d03b88d24..8d32bb4148 100644 --- a/functions-common +++ b/functions-common @@ -87,7 +87,7 @@ function write_clouds_yaml { CA_CERT_ARG="--os-cacert $SSL_BUNDLE_FILE" fi # demo -> devstack - $TOP_DIR/tools/update_clouds_yaml.py \ + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ --file $CLOUDS_YAML \ --os-cloud devstack \ --os-region-name $REGION_NAME \ @@ -99,7 +99,7 @@ function write_clouds_yaml { --os-project-name demo # alt_demo -> devstack-alt - $TOP_DIR/tools/update_clouds_yaml.py \ + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ --file $CLOUDS_YAML \ --os-cloud devstack-alt \ --os-region-name $REGION_NAME \ @@ -111,7 +111,7 @@ function write_clouds_yaml { --os-project-name alt_demo # admin -> devstack-admin - $TOP_DIR/tools/update_clouds_yaml.py \ + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ --file $CLOUDS_YAML \ --os-cloud devstack-admin \ --os-region-name $REGION_NAME \ diff --git a/lib/horizon b/lib/horizon index 4cabbe483c..9c7ec005a2 100644 --- a/lib/horizon +++ b/lib/horizon @@ -81,11 +81,7 @@ function configure_horizon { # Horizon is installed as develop mode, so we can compile here. # Message catalog compilation is handled by Django admin script, # so compiling them after the installation avoids Django installation twice. - if python3_enabled; then - (cd $HORIZON_DIR; python${PYTHON3_VERSION} manage.py compilemessages) - else - (cd $HORIZON_DIR; python manage.py compilemessages) - fi + (cd $HORIZON_DIR; $PYTHON manage.py compilemessages) # ``local_settings.py`` is used to override horizon default settings. local local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py @@ -166,11 +162,7 @@ function install_django_openstack_auth { git_clone_by_name "django_openstack_auth" # Compile message catalogs before installation _prepare_message_catalog_compilation - if python3_enabled; then - (cd $dir; python${PYTHON3_VERSION} setup.py compile_catalog) - else - (cd $dir; python setup.py compile_catalog) - fi + (cd $dir; $PYTHON setup.py compile_catalog) setup_dev_lib "django_openstack_auth" fi # if we aren't using this library from git, then we just let it diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh index 8895e1e77c..da59093581 100755 --- a/tools/install_prereqs.sh +++ b/tools/install_prereqs.sh @@ -83,6 +83,9 @@ fi if python3_enabled; then install_python3 + export PYTHON=$(which python${PYTHON3_VERSION} 2>/dev/null || which python3 2>/dev/null) +else + export PYTHON=$(which python 2>/dev/null) fi # Mark end of run From 09698d0e0e21f2cb401418db94d725d305acdc7d Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Fri, 6 Jan 2017 11:25:46 +0100 Subject: [PATCH 0303/1936] Fix typo in commands to enable nested KVM with kvm-amd Change-Id: Ie3c6df2409385d9c6bbc50b3b1f8b20689478466 --- doc/source/guides/devstack-with-nested-kvm.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/guides/devstack-with-nested-kvm.rst b/doc/source/guides/devstack-with-nested-kvm.rst index 85a5656198..3732f06fd8 100644 --- a/doc/source/guides/devstack-with-nested-kvm.rst +++ b/doc/source/guides/devstack-with-nested-kvm.rst @@ -73,7 +73,7 @@ back: :: sudo rmmod kvm-amd - sudo sh -c "echo 'options amd nested=1' >> /etc/modprobe.d/dist.conf" + sudo sh -c "echo 'options kvm-amd nested=1' >> /etc/modprobe.d/dist.conf" sudo modprobe kvm-amd Ensure the Nested KVM Kernel module parameter for AMD is enabled on the From 0c6956862e6ac1cdb51b674c872183074df98c50 Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Fri, 23 Dec 2016 14:35:45 +0000 Subject: [PATCH 0304/1936] Remove default image logic for Ironic from DevStack The logic to set the default image for Ironic has been moved into the Ironic tree. This patch is just removing it from DevStack. Change-Id: Iaeb177f194adc83e40d86696e5553f9f72bbd1f9 Depends-On: Id828b41dc44113ce1cd094ce5fc245989699d4ff --- stackrc | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/stackrc b/stackrc index 19f5b53372..d8d0ee4b3e 100644 --- a/stackrc +++ b/stackrc @@ -647,14 +647,9 @@ if [[ "$DOWNLOAD_DEFAULT_IMAGES" == "True" ]]; then IMAGE_URLS+="http://ca.downloads.xensource.com/OpenStack/cirros-0.3.4-x86_64-disk.vhd.tgz" IMAGE_URLS+=",http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz";; ironic) - # Ironic can do both partition and full disk images, depending on the driver - if [[ -z "${IRONIC_DEPLOY_DRIVER%%agent*}" ]]; then - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-x86_64-disk} - else - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-x86_64-uec} - fi - IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz" - IMAGE_URLS+=",http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-disk.img";; + # NOTE(lucasagomes): The logic setting the default image + # now lives in the Ironic tree + ;; *) # Default to Cirros with kernel, ramdisk and disk image DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec} IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec.tar.gz";; From 78c26504e28291c6e5b7d04b6fb985cc82c53bb8 Mon Sep 17 00:00:00 2001 From: Jordan Pittier Date: Wed, 28 Dec 2016 22:53:33 +0100 Subject: [PATCH 0305/1936] lib/tempest: remove the identity_feature_enabled.reseller flag. Now that Liberty is EOLed, all supported versions of OpenStack have the 'reseller' [1] feature. [1]: http://specs.openstack.org/openstack/keystone-specs/specs/kilo/reseller.html Change-Id: Id823f1969fbd2cf28542a0ef0f905ddae4a0318c Depends-On: Ia86c6f351919bddf2611524bf0b143aa09dbddee --- lib/tempest | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index 4b8fbb7a46..aa3877b4ab 100644 --- a/lib/tempest +++ b/lib/tempest @@ -291,8 +291,6 @@ function configure_tempest { if [[ "$KEYSTONE_SECURITY_COMPLIANCE_ENABLED" = True ]]; then iniset $TEMPEST_CONFIG identity-feature-enabled security_compliance True fi - # TODO(rodrigods): Remove the reseller flag when Kilo and Liberty are end of life. - iniset $TEMPEST_CONFIG identity-feature-enabled reseller True # Image # We want to be able to override this variable in the gate to avoid From c60818421152e5711fc93cab79122379c4e243aa Mon Sep 17 00:00:00 2001 From: Huan Xie Date: Wed, 16 Nov 2016 00:40:32 -0800 Subject: [PATCH 0306/1936] XenAPI: Use XenServer DevStack plugins Hypervisor XenServer will change to use os-xenapi in the future, this will need DevStack changes, this patch is to remove install Dom0 plugins part to our own DevStack plugins. Change-Id: Ic327135b893a77672fd42af919f47f181e932773 --- lib/nova_plugins/hypervisor-xenserver | 27 ++++++++++++++++++--------- tools/xen/xenrc | 1 + 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver index b053856348..0046a366c9 100644 --- a/lib/nova_plugins/hypervisor-xenserver +++ b/lib/nova_plugins/hypervisor-xenserver @@ -48,6 +48,21 @@ function configure_nova_hypervisor { if [ -z "$XENAPI_CONNECTION_URL" ]; then die $LINENO "XENAPI_CONNECTION_URL is not specified" fi + + # Check os-xenapi plugin is enabled + local plugins="${DEVSTACK_PLUGINS}" + local plugin + local found=0 + for plugin in ${plugins//,/ }; do + if [[ "$plugin" = "os-xenapi" ]]; then + found=1 + break + fi + done + if [[ $found -ne 1 ]]; then + die $LINENO "os-xenapi plugin is not specified. Please enable this plugin in local.conf" + fi + read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN." iniset $NOVA_CONF DEFAULT compute_driver "xenapi.XenAPIDriver" iniset $NOVA_CONF xenserver connection_url "$XENAPI_CONNECTION_URL" @@ -64,14 +79,6 @@ function configure_nova_hypervisor { local ssh_dom0 ssh_dom0="sudo -u $DOMZERO_USER ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@$dom0_ip" - # Find where the plugins should go in dom0 - xen_functions=`cat $TOP_DIR/tools/xen/functions` - PLUGIN_DIR=`$ssh_dom0 "$xen_functions; set -eux; xapi_plugin_location"` - - # install nova plugins to dom0 - tar -czf - -C $NOVA_DIR/plugins/xenserver/xenapi/etc/xapi.d/plugins/ ./ | - $ssh_dom0 "tar -xzf - -C $PLUGIN_DIR && chmod a+x $PLUGIN_DIR/*" - # install console logrotate script tar -czf - -C $NOVA_DIR/tools/xenserver/ rotate_xen_guest_logs.sh | $ssh_dom0 'tar -xzf - -C /root/ && chmod +x /root/rotate_xen_guest_logs.sh && mkdir -p /var/log/xen/guest' @@ -107,7 +114,9 @@ CRONTAB # install_nova_hypervisor() - Install external components function install_nova_hypervisor { - pip_install_gr xenapi + # xenapi functionality is now included in os-xenapi library which houses the plugin + # so this function intentionally left blank + : } # start_nova_hypervisor - Start any required external services diff --git a/tools/xen/xenrc b/tools/xen/xenrc index bb27454e30..2161247b76 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -101,6 +101,7 @@ set +u ## Note that the lines below are coming from stackrc to support ## new-style config files +source $RC_DIR/functions-common # allow local overrides of env variables, including repo config if [[ -f $RC_DIR/localrc ]]; then From 94129c7d02902e0f000c09c8245be341df1c5965 Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Mon, 9 Jan 2017 21:24:24 +0000 Subject: [PATCH 0307/1936] allow config to manage python3 use explicitly Add variables ENABLED_PYTHON3_PACKAGES and DISABLED_PYTHON3_PACKAGES to work like ENABLED_SERVICES and DISABLED_SERVICES and to manage which packages are installed using Python 3. Move the list of whitelisted packages in pip_install to the default for ENABLED_PYTHON3_PACKAGES, except swift which is not enabled by default for now. Add enable_python3_package and disable_python3_package functions to make editing the variables from local.conf easier. Add python3_enabled_for and python3_disabled_for functions to check the settings against packages being installed by pip. Update pip_install to check if python3 is disabled for a service, then see if it is explicitly enabled, and only then fall back to looking at the classifiers in the packaging metadata. Update pip_install messages to give more detail about why the choice between python 2 and 3 is being made for a given package. Change-Id: I69857d4e11f4767928614a3b637c894bcd03491f Signed-off-by: Doug Hellmann --- inc/python | 119 ++++++++++++++++++++++++++++++++++++++++--- stackrc | 12 ++++- tests/test_python.sh | 30 +++++++++++ 3 files changed, 153 insertions(+), 8 deletions(-) create mode 100755 tests/test_python.sh diff --git a/inc/python b/inc/python index 5a9a9ed588..1c581ba22b 100644 --- a/inc/python +++ b/inc/python @@ -97,6 +97,111 @@ function check_python3_support_for_package_remote { echo $classifier } +# python3_enabled_for() checks if the service(s) specified as arguments are +# enabled by the user in ``ENABLED_PYTHON3_PACKAGES``. +# +# Multiple services specified as arguments are ``OR``'ed together; the test +# is a short-circuit boolean, i.e it returns on the first match. +# +# Uses global ``ENABLED_PYTHON3_PACKAGES`` +# python3_enabled_for dir [dir ...] +function python3_enabled_for { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + local enabled=1 + local dirs=$@ + local dir + for dir in ${dirs}; do + [[ ,${ENABLED_PYTHON3_PACKAGES}, =~ ,${dir}, ]] && enabled=0 + done + + $xtrace + return $enabled +} + +# python3_disabled_for() checks if the service(s) specified as arguments are +# disabled by the user in ``DISABLED_PYTHON3_PACKAGES``. +# +# Multiple services specified as arguments are ``OR``'ed together; the test +# is a short-circuit boolean, i.e it returns on the first match. +# +# Uses global ``DISABLED_PYTHON3_PACKAGES`` +# python3_disabled_for dir [dir ...] +function python3_disabled_for { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + local enabled=1 + local dirs=$@ + local dir + for dir in ${dirs}; do + [[ ,${DISABLED_PYTHON3_PACKAGES}, =~ ,${dir}, ]] && enabled=0 + done + + $xtrace + return $enabled +} + +# enable_python3_package() adds the repositories passed as argument to the +# ``ENABLED_PYTHON3_PACKAGES`` list, if they are not already present. +# +# For example: +# enable_python3_package nova +# +# Uses global ``ENABLED_PYTHON3_PACKAGES`` +# enable_python3_package dir [dir ...] +function enable_python3_package { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + local tmpsvcs="${ENABLED_PYTHON3_PACKAGES}" + local python3 + for dir in $@; do + if [[ ,${DISABLED_PYTHON3_PACKAGES}, =~ ,${dir}, ]]; then + warn $LINENO "Attempt to enable_python3_package ${dir} when it has been disabled" + continue + fi + if ! python3_enabled_for $dir; then + tmpsvcs+=",$dir" + fi + done + ENABLED_PYTHON3_PACKAGES=$(_cleanup_service_list "$tmpsvcs") + + $xtrace +} + +# disable_python3_package() prepares the services passed as argument to be +# removed from the ``ENABLED_PYTHON3_PACKAGES`` list, if they are present. +# +# For example: +# disable_python3_package swift +# +# Uses globals ``ENABLED_PYTHON3_PACKAGES`` and ``DISABLED_PYTHON3_PACKAGES`` +# disable_python3_package dir [dir ...] +function disable_python3_package { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + local disabled_svcs="${DISABLED_PYTHON3_PACKAGES}" + local enabled_svcs=",${ENABLED_PYTHON3_PACKAGES}," + local dir + for dir in $@; do + disabled_svcs+=",$dir" + if python3_enabled_for $dir; then + enabled_svcs=${enabled_svcs//,$dir,/,} + fi + done + DISABLED_PYTHON3_PACKAGES=$(_cleanup_service_list "$disabled_svcs") + ENABLED_PYTHON3_PACKAGES=$(_cleanup_service_list "$enabled_svcs") + + $xtrace +} + # Wrapper for ``pip install`` to set cache and proxy environment variables # Uses globals ``OFFLINE``, ``PIP_VIRTUAL_ENV``, # ``PIP_UPGRADE``, ``TRACK_DEPENDS``, ``*_proxy``, @@ -149,16 +254,16 @@ function pip_install { # support for python3 in progress, but don't claim support # in their classifier echo "Check python version for : $package_dir" - if [[ ${package_dir##*/} == "nova" || ${package_dir##*/} == "glance" || \ - ${package_dir##*/} == "cinder" || ${package_dir##*/} == "swift" || \ - ${package_dir##*/} == "uwsgi" ]]; then - echo "Using $PYTHON3_VERSION version to install $package_dir" + if python3_disabled_for ${package_dir##*/}; then + echo "Explicitly using $PYTHON2_VERSION version to install $package_dir based on DISABLED_PYTHON3_PACKAGES" + elif python3_enabled_for ${package_dir##*/}; then + echo "Explicitly using $PYTHON3_VERSION version to install $package_dir based on ENABLED_PYTHON3_PACKAGES" sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8" cmd_pip=$(get_pip_command $PYTHON3_VERSION) elif [[ -d "$package_dir" ]]; then python_versions=$(get_python_versions_for_package $package_dir) if [[ $python_versions =~ $PYTHON3_VERSION ]]; then - echo "Using $PYTHON3_VERSION version to install $package_dir" + echo "Automatically using $PYTHON3_VERSION version to install $package_dir based on classifiers" sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8" cmd_pip=$(get_pip_command $PYTHON3_VERSION) else @@ -167,7 +272,7 @@ function pip_install { # a warning. python3_classifier=$(check_python3_support_for_package_local $package_dir) if [[ ! -z "$python3_classifier" ]]; then - echo "Using $PYTHON3_VERSION version to install $package_dir" + echo "Automatically using $PYTHON3_VERSION version to install $package_dir based on local package settings" sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8" cmd_pip=$(get_pip_command $PYTHON3_VERSION) fi @@ -177,7 +282,7 @@ function pip_install { package=$(echo $package_dir | grep -o '^[.a-zA-Z0-9_-]*') python3_classifier=$(check_python3_support_for_package_remote $package) if [[ ! -z "$python3_classifier" ]]; then - echo "Using $PYTHON3_VERSION version to install $package" + echo "Automatically using $PYTHON3_VERSION version to install $package based on remote package settings" sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8" cmd_pip=$(get_pip_command $PYTHON3_VERSION) fi diff --git a/stackrc b/stackrc index 19f5b53372..ae7177214f 100644 --- a/stackrc +++ b/stackrc @@ -102,9 +102,19 @@ if [[ -r $RC_DIR/.localrc.password ]]; then source $RC_DIR/.localrc.password fi -# Control whether Python 3 should be used. +# Control whether Python 3 should be used at all. export USE_PYTHON3=$(trueorfalse False USE_PYTHON3) +# Control whether Python 3 is enabled for specific services by the +# base name of the directory from which they are installed. See +# enable_python3_package to edit this variable and use_python3_for to +# test membership. +export ENABLED_PYTHON3_PACKAGES="nova,glance,cinder,uwsgi" + +# Explicitly list services not to run under Python 3. See +# disable_python3_package to edit this variable. +export DISABLED_PYTHON3_PACKAGES="" + # When Python 3 is supported by an application, adding the specific # version of Python 3 to this variable will install the app using that # version of the interpreter instead of 2.7. diff --git a/tests/test_python.sh b/tests/test_python.sh new file mode 100755 index 0000000000..8652798778 --- /dev/null +++ b/tests/test_python.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +# Tests for DevStack INI functions + +TOP=$(cd $(dirname "$0")/.. && pwd) + +source $TOP/functions-common +source $TOP/inc/python + +source $TOP/tests/unittest.sh + +echo "Testing Python 3 functions" + +# Initialize variables manipulated by functions under test. +export ENABLED_PYTHON3_PACKAGES="" +export DISABLED_PYTHON3_PACKAGES="" + +assert_false "should not be enabled yet" python3_enabled_for testpackage1 + +enable_python3_package testpackage1 +assert_equal "$ENABLED_PYTHON3_PACKAGES" "testpackage1" "unexpected result" +assert_true "should be enabled" python3_enabled_for testpackage1 + +assert_false "should not be disabled yet" python3_disabled_for testpackage2 + +disable_python3_package testpackage2 +assert_equal "$DISABLED_PYTHON3_PACKAGES" "testpackage2" "unexpected result" +assert_true "should be disabled" python3_disabled_for testpackage2 + +report_results From a2eb89417fbb6d61526b1819cbe3d0a60537eedd Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Mon, 9 Jan 2017 22:11:49 +0000 Subject: [PATCH 0308/1936] install LIBS_FROM_GIT using python 2 and 3 where appropriate When installing a library from source and python 3 is enabled, first run the installation process with python 2 enabled to ensure the library is also installed under python 2 for any services not yet running under 3. The python 3 version is installed second so that command line tools installed with libraries are installed under python 3 when python 3 is enabled. Change-Id: Ibb0f7a68d21081bf7652a0c1515080c0c54888ca Signed-off-by: Doug Hellmann --- inc/python | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/inc/python b/inc/python index 1c581ba22b..086056350f 100644 --- a/inc/python +++ b/inc/python @@ -399,6 +399,16 @@ function setup_lib { function setup_dev_lib { local name=$1 local dir=${GITDIR[$name]} + if python3_enabled; then + # Turn off Python 3 mode and install the package again, + # forcing a Python 2 installation. This ensures that all libs + # being used for development are installed under both versions + # of Python. + echo "Installing $name again without Python 3 enabled" + USE_PYTHON3=False + setup_develop $dir + USE_PYTHON3=True + fi setup_develop $dir } From 14e16e42f95cd02aaee1db0d5357027ea81a50e2 Mon Sep 17 00:00:00 2001 From: Pushkar Umaranikar Date: Fri, 9 Dec 2016 20:20:42 +0000 Subject: [PATCH 0309/1936] Setup service user configuration in nova.conf In Nova, service token will be passed along with user token to communicate with services when dealing with long running tasks like live migration. This change addresses adding service user configuration for nova in devstack. Part of Nova blueprint use-service-tokens Depends-On: I51eb0a8937fa39a2e5dafb1ad915e7113ea61f72 Co-Authored-By: Sarafraj Singh Change-Id: I2d7348c4a72af96c0ed2ef6c0ab75d16e9aec8fc --- lib/nova | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/lib/nova b/lib/nova index 8f11e0f311..bdfab3dea9 100644 --- a/lib/nova +++ b/lib/nova @@ -161,6 +161,14 @@ NOVA_ALLOW_MOVE_TO_SAME_HOST=$(trueorfalse True NOVA_ALLOW_MOVE_TO_SAME_HOST) TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test} TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29} +# Other Nova configurations +# ---------------------------- + +# ``NOVA_USE_SERVICE_TOKEN`` is a mode where service token is passed along with +# user token while communicating to external RESP API's like Neutron, Cinder +# and Glance. +NOVA_USE_SERVICE_TOKEN=$(trueorfalse False NOVA_USE_SERVICE_TOKEN) + # Functions # --------- @@ -619,6 +627,22 @@ function create_nova_conf { fi iniset $NOVA_CONF DEFAULT dhcpbridge_flagfile "$NOVA_CONF_DIR/nova-dhcpbridge.conf" + + if [ "$NOVA_USE_SERVICE_TOKEN" == "True" ]; then + init_nova_service_user_conf + fi +} + +function init_nova_service_user_conf { + iniset $NOVA_CONF service_user send_service_user_token True + iniset $NOVA_CONF service_user auth_type password + iniset $NOVA_CONF service_user auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT" + iniset $NOVA_CONF service_user username nova + iniset $NOVA_CONF service_user password "$SERVICE_PASSWORD" + iniset $NOVA_CONF service_user user_domain_name "$SERVICE_DOMAIN_NAME" + iniset $NOVA_CONF service_user project_name "$SERVICE_PROJECT_NAME" + iniset $NOVA_CONF service_user project_domain_name "$SERVICE_DOMAIN_NAME" + iniset $NOVA_CONF service_user auth_strategy keystone } function init_nova_cells { From afac732d5d9b03da7abbd61f814c2f0d1a9039da Mon Sep 17 00:00:00 2001 From: Jordan Pittier Date: Wed, 11 Jan 2017 18:45:27 +0100 Subject: [PATCH 0310/1936] lib/tempest: allow tweaking volume-feature-enabled/manage_snapshot Only a few Cinder backends support the 'manage snapshot' feature. So we need a feature flag here. Luckily the LVM driver does support this feature so default the feature flag to True in devstack(/Gate) but introduce a variable to tweak the config. Change-Id: Ifcb9f91059f08bdf2faf2a8d65229aba5742ee1c Depends-On: I77be1cf85a946bf72e852f6378f0d7b43af8023a --- lib/tempest | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/tempest b/lib/tempest index aa3877b4ab..60ba542894 100644 --- a/lib/tempest +++ b/lib/tempest @@ -434,6 +434,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG validation network_for_ssh $TEMPEST_SSH_NETWORK_NAME # Volume + iniset $TEMPEST_CONFIG volume-feature-enabled manage_snapshot $(trueorfalse True TEMPEST_VOLUME_MANAGE_SNAPSHOT) # TODO(ynesenenko): Remove the volume_services flag when Liberty and Kilo will correct work with host info. iniset $TEMPEST_CONFIG volume-feature-enabled volume_services True # TODO(ameade): Remove the api_v3 flag when Mitaka and Liberty are end of life. From 92575baa6b010ec09fea3e715030da8be61c0c64 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Wed, 21 Sep 2016 16:15:31 -0400 Subject: [PATCH 0311/1936] tempest: configure compute-feature-enabled.swap_volume if libvirt The only virt driver in nova that supports the swap volume API is libvirt so enable testing that in Tempest only if using libvirt. Depends on two changes: 1. The Tempest change that adds the new config option and test. Depends-On: I2d4779de8d21aa84533f4f92d347e932db2de58e 2. A nova fix for correctly waiting for the block copy job in the guest to complete. Depends-On: I0c52917a5555a70c4973f37dea1aebf878dd73b4 Change-Id: Ibb6b309574d2c6a06fcecb0626ea21527fb7f412 --- lib/tempest | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/lib/tempest b/lib/tempest index aa3877b4ab..2f2721311e 100644 --- a/lib/tempest +++ b/lib/tempest @@ -508,13 +508,17 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute-feature-enabled suspend False fi - # Libvirt-LXC - if [ "$VIRT_DRIVER" = "libvirt" ] && [ "$LIBVIRT_TYPE" = "lxc" ]; then - iniset $TEMPEST_CONFIG compute-feature-enabled rescue False - iniset $TEMPEST_CONFIG compute-feature-enabled resize False - iniset $TEMPEST_CONFIG compute-feature-enabled shelve False - iniset $TEMPEST_CONFIG compute-feature-enabled snapshot False - iniset $TEMPEST_CONFIG compute-feature-enabled suspend False + # Libvirt + if [ "$VIRT_DRIVER" = "libvirt" ]; then + # Libvirt-LXC + if [ "$LIBVIRT_TYPE" = "lxc" ]; then + iniset $TEMPEST_CONFIG compute-feature-enabled rescue False + iniset $TEMPEST_CONFIG compute-feature-enabled resize False + iniset $TEMPEST_CONFIG compute-feature-enabled shelve False + iniset $TEMPEST_CONFIG compute-feature-enabled snapshot False + iniset $TEMPEST_CONFIG compute-feature-enabled suspend False + fi + iniset $TEMPEST_CONFIG compute-feature-enabled swap_volume True fi # ``service_available`` From e194330f2753c8cacbcb18668ee32b4722cba2ec Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Wed, 4 Jan 2017 07:26:58 -0800 Subject: [PATCH 0312/1936] Don't skip cellsv2 setup for cellsv1 Since cellsv2 setup is no longer optional, we can't even exclude cellsv1 from this step. Since cellsv1 users can't use the simple command, this does the individual steps as needed. Depends-On: Icfbb17cce8ce8b03dc8b7b4ffb202db01e5218a6 Change-Id: I3c9101a34b2bb0804fc4deda62dbb8637e7b8f94 --- lib/nova | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index d5db5eaeb7..cb3ec51d6c 100644 --- a/lib/nova +++ b/lib/nova @@ -948,7 +948,11 @@ function create_cell { if ! is_service_enabled n-cell; then nova-manage cell_v2 simple_cell_setup --transport-url $(get_transport_url) else - echo 'Skipping cellsv2 setup for this cellsv1 configuration' + # NOTE(danms): map_cell0 always returns 1 right now; remove this when that is fixed + (nova-manage cell_v2 map_cell0 || true) + nova-manage --config-file $NOVA_CELLS_CONF --verbose cell_v2 map_cell_and_hosts \ + --transport-url $(get_transport_url child_cell) --name 'cell1' + nova-manage db sync fi } From 1ec93a8fc21850c98fbfd4b292d329b4349e5cff Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Thu, 12 Jan 2017 16:11:11 -0500 Subject: [PATCH 0313/1936] Create private IPv6 subnet specifying mode flags $ipv6_modes should always be passed when creating the default IPv6 subnet, not just when fixed_range_v6 is set. Without it the default was DHCPv6, which cirros doesn't support out of the box. Was broken in change-over from neutron to openstack cli. Change-Id: Iadd39b1ce02fe0b3781bd3ae04adfd20d7e12d9f Closes-bug: #1656098 --- lib/neutron_plugins/services/l3 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index cd0c1ed59a..cead46ff3b 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -292,8 +292,8 @@ function _neutron_create_private_subnet_v6 { subnet_params+="--gateway $IPV6_PRIVATE_NETWORK_GATEWAY " fi subnet_params+="${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} " - subnet_params+="${fixed_range_v6:+--subnet-range $fixed_range_v6 $ipv6_modes} " - subnet_params+="--network $NET_ID $IPV6_PRIVATE_SUBNET_NAME " + subnet_params+="${fixed_range_v6:+--subnet-range $fixed_range_v6} " + subnet_params+="$ipv6_modes --network $NET_ID $IPV6_PRIVATE_SUBNET_NAME " local ipv6_subnet_id ipv6_subnet_id=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2) die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet for $project_id" From dd07151366175e8f9c61c4a1eabde4ed5d38dd62 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 13 Jan 2017 07:11:15 +0000 Subject: [PATCH 0314/1936] Updated from generate-devstack-plugins-list Change-Id: I65fd56546af13453274601ec2b923c5d03b8a8ab --- doc/source/plugin-registry.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index cb9c437458..4cfbcb16ba 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -82,6 +82,7 @@ masakari `git://git.openstack.org/openstack/masaka meteos `git://git.openstack.org/openstack/meteos `__ mistral `git://git.openstack.org/openstack/mistral `__ mixmatch `git://git.openstack.org/openstack/mixmatch `__ +mogan `git://git.openstack.org/openstack/mogan `__ monasca-analytics `git://git.openstack.org/openstack/monasca-analytics `__ monasca-api `git://git.openstack.org/openstack/monasca-api `__ monasca-ceilometer `git://git.openstack.org/openstack/monasca-ceilometer `__ @@ -121,7 +122,6 @@ neutron-fwaas `git://git.openstack.org/openstack/neutro neutron-lbaas `git://git.openstack.org/openstack/neutron-lbaas `__ neutron-lbaas-dashboard `git://git.openstack.org/openstack/neutron-lbaas-dashboard `__ neutron-vpnaas `git://git.openstack.org/openstack/neutron-vpnaas `__ -nimble `git://git.openstack.org/openstack/nimble `__ nova-docker `git://git.openstack.org/openstack/nova-docker `__ nova-dpm `git://git.openstack.org/openstack/nova-dpm `__ nova-lxd `git://git.openstack.org/openstack/nova-lxd `__ @@ -129,6 +129,7 @@ nova-mksproxy `git://git.openstack.org/openstack/nova-m nova-powervm `git://git.openstack.org/openstack/nova-powervm `__ oaktree `git://git.openstack.org/openstack/oaktree `__ octavia `git://git.openstack.org/openstack/octavia `__ +os-xenapi `git://git.openstack.org/openstack/os-xenapi `__ osprofiler `git://git.openstack.org/openstack/osprofiler `__ panko `git://git.openstack.org/openstack/panko `__ picasso `git://git.openstack.org/openstack/picasso `__ From b612b6281a424556f6ed30d421214d0aa32ded55 Mon Sep 17 00:00:00 2001 From: Victor Morales Date: Thu, 12 Jan 2017 19:53:07 -0600 Subject: [PATCH 0315/1936] Use delete action for clean up *.pyc files Findutils added in release 4.2.3 a new --delete action for deleting matching files. This action performs better than -exec rm {} \; because it doesn't have to spawn an external process. This change uses a new action whenever is possible. Change-Id: Iff16a86b18e924cfe78ac7c6107910940ce51e03 --- clean.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/clean.sh b/clean.sh index e369eda26c..90b21eb353 100755 --- a/clean.sh +++ b/clean.sh @@ -149,5 +149,10 @@ rm -rf ~/.config/openstack # Clean up all *.pyc files if [[ -n "$DEST" ]] && [[ -d "$DEST" ]]; then - sudo find $DEST -name "*.pyc" -print0 | xargs -0 rm + find_version=`find --version | awk '{ print $NF; exit}'` + if vercmp "$find_version" "<" "4.2.3" ; then + sudo find $DEST -name "*.pyc" -print0 | xargs -0 rm + else + sudo find $DEST -name "*.pyc" -delete + fi fi From 29bb53fd3e7762c1fea023842ba09fcbfab60252 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Sun, 15 Jan 2017 09:50:40 -0800 Subject: [PATCH 0316/1936] Fix cellsv2 cell0 database name For some reason we were defaulting the name of the cell0 database to nova_api_cell0 instead of nova_cell0. Devstack inherited that to make things work, but we don't really want that. This patch makes us use the proper name and create the cell0 mapping accordingly. As a side effect, it also starts the process of unifying the cellsv1 and cellsv2 paths by creating the cell0 mapping the same for both. Change-Id: I4e7f6c5eaa068c98e5c4ef3feaee50d8e4f5d484 --- lib/nova | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/lib/nova b/lib/nova index cb3ec51d6c..d8bc6c9593 100644 --- a/lib/nova +++ b/lib/nova @@ -678,10 +678,10 @@ function init_nova { if is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-api; then # (Re)create nova databases recreate_database nova - recreate_database nova_api_cell0 + recreate_database nova_cell0 # Migrate nova database. If "nova-manage cell_v2 simple_cell_setup" has - # been run this migrates the "nova" and "nova_api_cell0" database. + # been run this migrates the "nova" and "nova_cell0" database. # Otherwise it just migrates the "nova" database. $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db sync @@ -945,11 +945,12 @@ function create_flavors { # create_cell(): Group the available hosts into a cell function create_cell { + # NOTE(danms): map_cell0 always returns 1 right now; remove this when that is fixed + (nova-manage cell_v2 map_cell0 --database_connection `database_connection_url nova_cell0`|| true) + if ! is_service_enabled n-cell; then nova-manage cell_v2 simple_cell_setup --transport-url $(get_transport_url) else - # NOTE(danms): map_cell0 always returns 1 right now; remove this when that is fixed - (nova-manage cell_v2 map_cell0 || true) nova-manage --config-file $NOVA_CELLS_CONF --verbose cell_v2 map_cell_and_hosts \ --transport-url $(get_transport_url child_cell) --name 'cell1' nova-manage db sync From ba3c8f48bb07707e421f1c5aac5acedd1e63d3fd Mon Sep 17 00:00:00 2001 From: Huan Xie Date: Sun, 15 Jan 2017 20:07:04 -0800 Subject: [PATCH 0317/1936] Change the way to get conntrack-tools version In the incoming XenServer, it failed to install conntrack-tools in Dom0 due to the bash script which is trying to find the correct CentOS release version to be used in yum command. This patch is to fix the problem Change-Id: If7f169e118ccb7c29fc479c361417a916dc40b40 --- tools/xen/functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/xen/functions b/tools/xen/functions index e1864eb4bb..93f3413d6d 100644 --- a/tools/xen/functions +++ b/tools/xen/functions @@ -317,7 +317,7 @@ function install_conntrack_tools { # Only support conntrack-tools in Dom0 with XS7.0 and above if [ ! -f /usr/sbin/conntrackd ]; then sed -i s/#baseurl=/baseurl=/g /etc/yum.repos.d/CentOS-Base.repo - centos_ver=$(yum version nogroups |grep Installed | cut -d' ' -f 2 | cut -d'.' -f1-2 | tr '-' '.') + centos_ver=$(yum version nogroups |grep Installed | cut -d' ' -f 2 | cut -d'/' -f 1 | cut -d'-' -f 1) yum install -y --enablerepo=base --releasever=$centos_ver conntrack-tools # Backup conntrackd.conf after install conntrack-tools, use the one with statistic mode mv /etc/conntrackd/conntrackd.conf /etc/conntrackd/conntrackd.conf.back From 3eb7c97a6ddc4780ebcb833c1b86cf4b86b5e14d Mon Sep 17 00:00:00 2001 From: Patrick East Date: Fri, 13 Jan 2017 11:44:54 -0800 Subject: [PATCH 0318/1936] Restrict enabling the manage_snapshot volume feature The test that is in tempest for this feature is specific to LVM and will *not* work for other backends regardless of them supporting the feature. It shouldn't default to enabled for everyone, only for LVM. If others want to opt-in they can, but its definitely the minority that would. Change-Id: I21347f2a5069059e6413208b254d5acd246faaea --- lib/tempest | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 5a072291f1..deffcfae61 100644 --- a/lib/tempest +++ b/lib/tempest @@ -434,7 +434,11 @@ function configure_tempest { iniset $TEMPEST_CONFIG validation network_for_ssh $TEMPEST_SSH_NETWORK_NAME # Volume - iniset $TEMPEST_CONFIG volume-feature-enabled manage_snapshot $(trueorfalse True TEMPEST_VOLUME_MANAGE_SNAPSHOT) + # Only turn on TEMPEST_VOLUME_MANAGE_SNAPSHOT by default for "lvm" backends + if [[ "$CINDER_ENABLED_BACKENDS" == *"lvm"* ]]; then + TEMPEST_VOLUME_MANAGE_SNAPSHOT=${TEMPEST_VOLUME_MANAGE_SNAPSHOT:-True} + fi + iniset $TEMPEST_CONFIG volume-feature-enabled manage_snapshot $(trueorfalse False TEMPEST_VOLUME_MANAGE_SNAPSHOT) # TODO(ynesenenko): Remove the volume_services flag when Liberty and Kilo will correct work with host info. iniset $TEMPEST_CONFIG volume-feature-enabled volume_services True # TODO(ameade): Remove the api_v3 flag when Mitaka and Liberty are end of life. From 1b457c9acfe7c0bcf7ba06dd4029d1e4ef9cdded Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 18 Jan 2017 07:53:33 -0500 Subject: [PATCH 0319/1936] remove db setting when no nova service need it We shouldn't have the db connection laying around if services don't need it. Change-Id: I9290e80c499c0c4644094e3c0666fd0ab002a23c --- lib/nova | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/lib/nova b/lib/nova index 450242b579..3e29c1320d 100644 --- a/lib/nova +++ b/lib/nova @@ -465,8 +465,6 @@ function create_nova_conf { else iniset $NOVA_CONF DEFAULT my_ip "$HOST_IP" fi - iniset $NOVA_CONF database connection `database_connection_url nova` - iniset $NOVA_CONF api_database connection `database_connection_url nova_api` iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x" iniset $NOVA_CONF DEFAULT osapi_compute_listen "$NOVA_SERVICE_LISTEN_ADDRESS" iniset $NOVA_CONF DEFAULT metadata_listen "$NOVA_SERVICE_LISTEN_ADDRESS" @@ -478,6 +476,14 @@ function create_nova_conf { iniset $NOVA_CONF DEFAULT bindir "/usr/bin" fi + # only setup database connections if there are services that + # require them running on the host. The ensures that n-cpu doesn't + # leak a need to use the db in a multinode scenario. + if is_service_enabled n-api n-cond n-sched; then + iniset $NOVA_CONF database connection `database_connection_url nova` + iniset $NOVA_CONF api_database connection `database_connection_url nova_api` + fi + if is_service_enabled n-api; then if is_service_enabled n-api-meta; then # If running n-api-meta as a separate service From f80e2cfee85a200ba204ac00bada74695abcd964 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 18 Jan 2017 15:42:32 -0500 Subject: [PATCH 0320/1936] add install_devstack_tools An initial install for devstack-tools, this will need to use all the fun pip extra variables for installation, however the current pip_install always prefers python2, and we only want to do python3 here. Change-Id: I3dcdb35130f76fad81cb7b0d4001b7e96efbbd84 --- inc/python | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/inc/python b/inc/python index 04cde34fe1..5afc07f636 100644 --- a/inc/python +++ b/inc/python @@ -441,6 +441,13 @@ function install_python3 { fi } +function install_devstack_tools { + # intentionally old to ensure devstack-gate has control + local dstools_version=${DSTOOLS_VERSION:-0.1.2} + install_python3 + sudo pip3 install -U devstack-tools==${dstools_version} +} + # Restore xtrace $INC_PY_TRACE From b6753cea8cd0e07d982048d2f856c6168fc74a92 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 5 Apr 2016 11:52:44 -0400 Subject: [PATCH 0321/1936] simplify colorized logging setup This makes setup_colorized_logging be a thing which takes a single parameter and doesn't let projects do things differently. It also changes the order of values from user / project to project / user to represent the hierachy more clearly. Change-Id: I8c0ba7da54be588e3e068734feb4f78ed7c5a14a --- functions | 8 ++++---- lib/cinder | 2 +- lib/glance | 6 +++--- lib/keystone | 2 +- lib/neutron-legacy | 2 +- lib/nova | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/functions b/functions index 0be9794d9a..3e0e0d5559 100644 --- a/functions +++ b/functions @@ -578,11 +578,11 @@ function vercmp { # setup_colorized_logging something.conf SOMESECTION function setup_colorized_logging { local conf_file=$1 - local conf_section=$2 - local project_var=${3:-"project_name"} - local user_var=${4:-"user_name"} + local conf_section="DEFAULT" + local project_var="project_name" + local user_var="user_name" # Add color to logging output - iniset $conf_file $conf_section logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %("$user_var")s %("$project_var")s%(color)s] %(instance)s%(color)s%(message)s" + iniset $conf_file $conf_section logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %("$project_var")s %("$user_var")s%(color)s] %(instance)s%(color)s%(message)s" iniset $conf_file $conf_section logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" iniset $conf_file $conf_section logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" iniset $conf_file $conf_section logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" diff --git a/lib/cinder b/lib/cinder index 40f0f16d6b..cf5bb25cfe 100644 --- a/lib/cinder +++ b/lib/cinder @@ -334,7 +334,7 @@ function configure_cinder { # Format logging if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$CINDER_USE_MOD_WSGI" == "False" ]; then - setup_colorized_logging $CINDER_CONF DEFAULT "project_id" "user_id" + setup_colorized_logging $CINDER_CONF else # Set req-id, project-name and resource in log format iniset $CINDER_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(project_name)s] %(resource)s%(message)s" diff --git a/lib/glance b/lib/glance index 4ba1d20bd7..17361143df 100644 --- a/lib/glance +++ b/lib/glance @@ -230,8 +230,8 @@ function configure_glance { # Format logging if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then - setup_colorized_logging $GLANCE_API_CONF DEFAULT tenant user - setup_colorized_logging $GLANCE_REGISTRY_CONF DEFAULT tenant user + setup_colorized_logging $GLANCE_API_CONF + setup_colorized_logging $GLANCE_REGISTRY_CONF fi cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI @@ -273,7 +273,7 @@ function configure_glance { if is_service_enabled g-glare; then local dburl dburl=`database_connection_url glance` - setup_colorized_logging $GLANCE_GLARE_CONF DEFAULT tenant user + setup_colorized_logging $GLANCE_GLARE_CONF iniset $GLANCE_GLARE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $GLANCE_GLARE_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS iniset $GLANCE_GLARE_CONF DEFAULT bind_port $GLANCE_GLARE_PORT diff --git a/lib/keystone b/lib/keystone index 34730b892a..474af8be1d 100644 --- a/lib/keystone +++ b/lib/keystone @@ -284,7 +284,7 @@ function configure_keystone { # Format logging if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$KEYSTONE_DEPLOY" != "mod_wsgi" ] ; then - setup_colorized_logging $KEYSTONE_CONF DEFAULT + setup_colorized_logging $KEYSTONE_CONF fi iniset $KEYSTONE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 29c187e1e1..cf59b2d6dd 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -733,7 +733,7 @@ function _configure_neutron_common { # Format logging if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then - setup_colorized_logging $NEUTRON_CONF DEFAULT project_id + setup_colorized_logging $NEUTRON_CONF else # Show user_name and project_name by default like in nova iniset $NEUTRON_CONF DEFAULT logging_user_identity_format "%(user_name)s %(project_name)s" diff --git a/lib/nova b/lib/nova index 450242b579..617cf5c3aa 100644 --- a/lib/nova +++ b/lib/nova @@ -520,7 +520,7 @@ function create_nova_conf { fi # Format logging if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$NOVA_USE_MOD_WSGI" == "False" ] ; then - setup_colorized_logging $NOVA_CONF DEFAULT + setup_colorized_logging $NOVA_CONF else # Show user_name and project_name instead of user_id and project_id iniset $NOVA_CONF DEFAULT logging_user_identity_format "%(user_name)s %(project_name)s" From 9751be66fa45681d069af200632515c3693833d0 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 5 Apr 2016 12:08:57 -0400 Subject: [PATCH 0322/1936] unify logging setup on all services This provides a single setup_logging function which builds consistent colorization if the config supports it, otherwise builds the identity strings that we need to actually keep track of requests. Change-Id: Iffe30326a5b974ad141aed6288f61e0d6fd18ca9 --- functions | 18 ++++++++++++++++++ lib/cinder | 7 +------ lib/glance | 8 +++----- lib/neutron-legacy | 7 +------ lib/nova | 8 ++------ 5 files changed, 25 insertions(+), 23 deletions(-) diff --git a/functions b/functions index 3e0e0d5559..89ee3672d3 100644 --- a/functions +++ b/functions @@ -569,6 +569,19 @@ function vercmp { esac } +# This sets up defaults we like in devstack for logging for tracking +# down issues, and makes sure everything is done the same between +# projects. +function setup_logging { + local conf_file=$1 + local other_cond=${2:-"False"} + if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$other_cond" == "False" ]; then + setup_colorized_logging $conf_file + else + setup_standard_logging_identity $conf_file + fi +} + # This function sets log formatting options for colorizing log # output to stdout. It is meant to be called by lib modules. # The last two parameters are optional and can be used to specify @@ -588,6 +601,11 @@ function setup_colorized_logging { iniset $conf_file $conf_section logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" } +function setup_standard_logging_identity { + local conf_file=$1 + iniset $conf_file DEFAULT logging_user_identity_format "%(project_name)s %(user_name)s" +} + # These functions are provided for basic fall-back functionality for # projects that include parts of DevStack (Grenade). stack.sh will # override these with more specific versions for DevStack (with fancy diff --git a/lib/cinder b/lib/cinder index cf5bb25cfe..870ee0b905 100644 --- a/lib/cinder +++ b/lib/cinder @@ -333,12 +333,7 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT volume_clear $CINDER_VOLUME_CLEAR # Format logging - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$CINDER_USE_MOD_WSGI" == "False" ]; then - setup_colorized_logging $CINDER_CONF - else - # Set req-id, project-name and resource in log format - iniset $CINDER_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(project_name)s] %(resource)s%(message)s" - fi + setup_logging $CINDER_CONF $CINDER_USE_MOD_WSGI if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then _cinder_config_apache_wsgi diff --git a/lib/glance b/lib/glance index 17361143df..26c41507ed 100644 --- a/lib/glance +++ b/lib/glance @@ -229,10 +229,8 @@ function configure_glance { fi # Format logging - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then - setup_colorized_logging $GLANCE_API_CONF - setup_colorized_logging $GLANCE_REGISTRY_CONF - fi + setup_logging $GLANCE_API_CONF + setup_logging $GLANCE_REGISTRY_CONF cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI @@ -273,7 +271,7 @@ function configure_glance { if is_service_enabled g-glare; then local dburl dburl=`database_connection_url glance` - setup_colorized_logging $GLANCE_GLARE_CONF + setup_logging $GLANCE_GLARE_CONF iniset $GLANCE_GLARE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $GLANCE_GLARE_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS iniset $GLANCE_GLARE_CONF DEFAULT bind_port $GLANCE_GLARE_PORT diff --git a/lib/neutron-legacy b/lib/neutron-legacy index cf59b2d6dd..5ec61f9645 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -732,12 +732,7 @@ function _configure_neutron_common { fi # Format logging - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then - setup_colorized_logging $NEUTRON_CONF - else - # Show user_name and project_name by default like in nova - iniset $NEUTRON_CONF DEFAULT logging_user_identity_format "%(user_name)s %(project_name)s" - fi + setup_logging $NEUTRON_CONF if is_service_enabled tls-proxy; then # Set the service port for a proxy to take the original diff --git a/lib/nova b/lib/nova index 617cf5c3aa..c47f81a530 100644 --- a/lib/nova +++ b/lib/nova @@ -519,12 +519,8 @@ function create_nova_conf { iniset $NOVA_CONF DEFAULT force_config_drive "$FORCE_CONFIG_DRIVE" fi # Format logging - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$NOVA_USE_MOD_WSGI" == "False" ] ; then - setup_colorized_logging $NOVA_CONF - else - # Show user_name and project_name instead of user_id and project_id - iniset $NOVA_CONF DEFAULT logging_user_identity_format "%(user_name)s %(project_name)s" - fi + setup_logging $NOVA_CONF $NOVA_USE_MOD_WSGI + if [ "$NOVA_USE_MOD_WSGI" == "True" ]; then _config_nova_apache_wsgi fi From fb1c7b51b57b11d263c8b08d4f12992fb58748cb Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 17 Jan 2017 12:17:48 -0500 Subject: [PATCH 0323/1936] remove obsolete s3 conf from nova.conf Change-Id: If0b42c647382b739ee4f1d98ea0394ec7b171ebf --- lib/nova | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/nova b/lib/nova index 3e29c1320d..85561ca570 100644 --- a/lib/nova +++ b/lib/nova @@ -457,8 +457,6 @@ function create_nova_conf { iniset $NOVA_CONF DEFAULT scheduler_driver "$SCHEDULER" iniset $NOVA_CONF DEFAULT scheduler_default_filters "$FILTERS" iniset $NOVA_CONF DEFAULT default_floating_pool "$PUBLIC_NETWORK_NAME" - iniset $NOVA_CONF DEFAULT s3_host "$SERVICE_HOST" - iniset $NOVA_CONF DEFAULT s3_port "$S3_SERVICE_PORT" if [[ $SERVICE_IP_VERSION == 6 ]]; then iniset $NOVA_CONF DEFAULT my_ip "$HOST_IPV6" iniset $NOVA_CONF DEFAULT use_ipv6 "True" @@ -468,7 +466,6 @@ function create_nova_conf { iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x" iniset $NOVA_CONF DEFAULT osapi_compute_listen "$NOVA_SERVICE_LISTEN_ADDRESS" iniset $NOVA_CONF DEFAULT metadata_listen "$NOVA_SERVICE_LISTEN_ADDRESS" - iniset $NOVA_CONF DEFAULT s3_listen "$NOVA_SERVICE_LISTEN_ADDRESS" if is_fedora || is_suse; then # nova defaults to /usr/local/bin, but fedora and suse pip like to From 465ee459867bc972fdd6568839db800a12289e05 Mon Sep 17 00:00:00 2001 From: Jianghua Wang Date: Tue, 8 Nov 2016 17:49:45 +0800 Subject: [PATCH 0324/1936] XenAPI: enable root_helper_daemon for XenServer The daemon mode of root helper for XenAPI has been implemented by this change which has been merged to neutron: https://review.openstack.org/#/c/390931/ It will help to import the performance. Let's enable this mode by default in devstack. Change-Id: I52246bef3e4434dfc49446535b122580bc475ac3 --- lib/neutron_plugins/openvswitch_agent | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent index 76a1a4f3a1..c5c616a7df 100644 --- a/lib/neutron_plugins/openvswitch_agent +++ b/lib/neutron_plugins/openvswitch_agent @@ -81,8 +81,11 @@ function neutron_plugin_configure_plugin_agent { # integration bridge. This is enabled by using a root wrapper # that executes commands on dom0 via a XenAPI plugin. # XenAPI does not support daemon rootwrap now, so set root_helper_daemon empty - iniset "/$Q_PLUGIN_CONF_FILE.domU" agent root_helper "$Q_RR_DOM0_COMMAND" - iniset "/$Q_PLUGIN_CONF_FILE.domU" agent root_helper_daemon "" + iniset "/$Q_PLUGIN_CONF_FILE.domU" agent root_helper "" + iniset "/$Q_PLUGIN_CONF_FILE.domU" agent root_helper_daemon "xenapi_root_helper" + iniset "/$Q_PLUGIN_CONF_FILE.domU" xenapi connection_url "$XENAPI_CONNECTION_URL" + iniset "/$Q_PLUGIN_CONF_FILE.domU" xenapi connection_username "$XENAPI_USER" + iniset "/$Q_PLUGIN_CONF_FILE.domU" xenapi connection_password "$XENAPI_PASSWORD" # Disable minimize polling, so that it can always detect OVS and Port changes # This is a problem of xenserver + neutron, bug has been reported From 10db2b8e6559724673a3efc691c595f307eb12d6 Mon Sep 17 00:00:00 2001 From: Michelle Mandel Date: Wed, 20 Jul 2016 11:39:42 -0400 Subject: [PATCH 0325/1936] Enable VNC console in tempest.conf If the NoVNC service is enabled, enable vnc_console in tempest.conf. This will allow tempest tests that interact with VNC to be executed. Change-Id: Idb38a3b11e2f61f23adf1ec23c04ddccd72e7539 Depends-On: I09aed8de28f1ba2637382e870134ced38808df29 --- lib/tempest | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/tempest b/lib/tempest index 331252267c..9f89dbe120 100644 --- a/lib/tempest +++ b/lib/tempest @@ -377,6 +377,10 @@ function configure_tempest { fi fi + if is_service_enabled n-novnc; then + iniset $TEMPEST_CONFIG compute-feature-enabled vnc_console True + fi + # Network iniset $TEMPEST_CONFIG network api_version 2.0 iniset $TEMPEST_CONFIG network project_networks_reachable false From 5713497aa5819f240050fee05d047c480845a8cd Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Fri, 20 Jan 2017 09:01:49 -0500 Subject: [PATCH 0326/1936] Add discover_hosts.sh script This adds a simple script to run the 'nova-manage cell_v2 discover_hosts' command which will be used by devstack-gate to discover the compute hosts after devstack is fully setup. This allows us to manage the branches where this can run from devstack rather than require branch logic in devstack-gate. Change-Id: Icc595d60de373471aa7ee8fb9f3a81fc12d80438 Depends-On: I4823737246a8e9cc4eaebf67ff6bdba8bf42ab29 --- tools/discover_hosts.sh | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100755 tools/discover_hosts.sh diff --git a/tools/discover_hosts.sh b/tools/discover_hosts.sh new file mode 100755 index 0000000000..65966c3d04 --- /dev/null +++ b/tools/discover_hosts.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +# **discover_hosts.sh** + +# This is just a very simple script to run the +# "nova-manage cell_v2 discover_hosts" command +# which is needed to discover compute nodes and +# register them with a parent cell in Nova. +# This assumes that /etc/nova/nova.conf exists +# and has the following entries filled in: +# +# [api_database] +# connection = This is the URL to the nova_api database +# +# In other words this should be run on the primary +# (API) node in a multi-node setup. + +nova-manage cell_v2 discover_hosts --verbose From 5c6aa56e11b2dc79adc70154187d3a029e8e0976 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 26 Jan 2017 11:31:58 +0100 Subject: [PATCH 0327/1936] Don't assume nova-manage is present tools/discover_hosts.sh is run by devstack-gate, and breaks all dsvm job that doesn't use nova. nova-manage is perhaps not installed if nova services are not enabled. This change checks the presence of nova-. Change-Id: Ic555d241f98d0fa027897c69a7115d1be88f6c96 --- tools/discover_hosts.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/discover_hosts.sh b/tools/discover_hosts.sh index 65966c3d04..4ec6a40511 100755 --- a/tools/discover_hosts.sh +++ b/tools/discover_hosts.sh @@ -15,4 +15,6 @@ # In other words this should be run on the primary # (API) node in a multi-node setup. -nova-manage cell_v2 discover_hosts --verbose +if [[ -x $(which nova-manage) ]]; then + nova-manage cell_v2 discover_hosts --verbose +fi From e9a9fbef8fc29437d1f69727e2ab73c31e957488 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Mon, 30 Jan 2017 22:22:43 -0500 Subject: [PATCH 0328/1936] Don't run swap_volume tests in Tempest if cells v1 is enabled Cells v1 apparently doesn't support the swap volume API which was recently enabled for testing in change: 92575baa6b010ec09fea3e715030da8be61c0c64 Rather than revert that change, we should just handle the cells v1 case and not enable that test in that environment. Change-Id: I80f52e8299641098d90d3c374a80770fc45b8122 Closes-Bug: #1660511 --- lib/tempest | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 050ac382c5..7cafadbf3f 100644 --- a/lib/tempest +++ b/lib/tempest @@ -516,8 +516,10 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute-feature-enabled shelve False iniset $TEMPEST_CONFIG compute-feature-enabled snapshot False iniset $TEMPEST_CONFIG compute-feature-enabled suspend False + elif ! is_service_enabled n-cell; then + # cells v1 does not support swapping volumes + iniset $TEMPEST_CONFIG compute-feature-enabled swap_volume True fi - iniset $TEMPEST_CONFIG compute-feature-enabled swap_volume True fi # ``service_available`` From e1644ac1d86d4836ca26e89258b5aa6e93b9f770 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Tue, 31 Jan 2017 11:59:09 -0500 Subject: [PATCH 0329/1936] Remove OS_NO_CACHE from openrc Nothing uses this variable either in devstack or libraries, so it's dead code (at least on master), and we can remove it. Change-Id: I5975c476ae5b26402c209d6e5746e7a5a5a91507 --- openrc | 4 ---- 1 file changed, 4 deletions(-) diff --git a/openrc b/openrc index d1c61297a1..483b5af387 100644 --- a/openrc +++ b/openrc @@ -53,10 +53,6 @@ export OS_USERNAME=${OS_USERNAME:-demo} # or NOVA_PASSWORD. export OS_PASSWORD=${ADMIN_PASSWORD:-secret} -# Don't put the key into a keyring by default. Testing for development is much -# easier with this off. -export OS_NO_CACHE=${OS_NO_CACHE:-1} - # Region export OS_REGION_NAME=${REGION_NAME:-RegionOne} From ac5fdb4c4090efd682cc5c55aa30ec433da29fc7 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Tue, 31 Jan 2017 15:20:18 -0500 Subject: [PATCH 0330/1936] nova: call map_cell0 much earlier in the setup The map_cell0 command creates a cell mapping record in the nova_api database, and the nova-manage db sync command will migrate the db schema for the nova_cell0 database. This patch takes advantage of that by moving the map_cell0 call much earlier in the setup process so we get the nova_cell0 db schema migrated at the same time as the main nova db. This also removes the || true condition around map_cell0 since it's idempotent now due to fix: aa7b6ebbb254f00fcb548832941ca9dbd3996d9f Change-Id: Ice4fbb1771270c618b2acbc933d4fbfb6805df81 --- lib/nova | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/lib/nova b/lib/nova index 4d336f6271..ba9cac5161 100644 --- a/lib/nova +++ b/lib/nova @@ -683,9 +683,13 @@ function init_nova { recreate_database nova recreate_database nova_cell0 - # Migrate nova database. If "nova-manage cell_v2 simple_cell_setup" has - # been run this migrates the "nova" and "nova_cell0" database. - # Otherwise it just migrates the "nova" database. + # map_cell0 will create the cell mapping record in the nova_api DB so + # this needs to come after the api_db sync happens. We also want to run + # this before the db sync below since that will migrate both the nova + # and nova_cell0 databases. + nova-manage cell_v2 map_cell0 --database_connection `database_connection_url nova_cell0` + + # Migrate nova and nova_cell0 databases. $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db sync if is_service_enabled n-cell; then @@ -945,9 +949,6 @@ function create_flavors { # create_cell(): Group the available hosts into a cell function create_cell { - # NOTE(danms): map_cell0 always returns 1 right now; remove this when that is fixed - (nova-manage cell_v2 map_cell0 --database_connection `database_connection_url nova_cell0`|| true) - if ! is_service_enabled n-cell; then nova-manage cell_v2 simple_cell_setup --transport-url $(get_transport_url) else From 705b3785ca5383179c970c7d707bde9f69d96635 Mon Sep 17 00:00:00 2001 From: PranaliD Date: Wed, 1 Feb 2017 11:32:09 +0530 Subject: [PATCH 0331/1936] Corrected router gateway set command While configuring the external network as the default router gateway for IPV6 in lib/neutron_plugins/services/l3, "router" keyword is missing in the command. Corrected the command. Change-Id: I055bea5137a841f709d4865ec9a43d6b53f8f4c9 Closes-Bug: 1660712 --- lib/neutron_plugins/services/l3 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index cd0c1ed59a..00b5a63756 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -385,7 +385,7 @@ function _neutron_configure_router_v6 { # If the external network has not already been set as the default router # gateway when configuring an IPv4 public subnet, do so now if [[ "$IP_VERSION" == "6" ]]; then - openstack --os-cloud devstack-admin --os-region "$REGION_NAME" set --external-gateway $EXT_NET_ID $ROUTER_ID + openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID fi # This logic is specific to using the l3-agent for layer 3 From debc695ddfc8b7b2aeb53c01c624e15f69ed9fa2 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 1 Feb 2017 07:27:18 +0000 Subject: [PATCH 0332/1936] Updated from generate-devstack-plugins-list Change-Id: Ia858ddf2e688903af2c76d532ddf7780bd591ef9 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 4cfbcb16ba..2721eda776 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -80,6 +80,7 @@ manila `git://git.openstack.org/openstack/manila manila-ui `git://git.openstack.org/openstack/manila-ui `__ masakari `git://git.openstack.org/openstack/masakari `__ meteos `git://git.openstack.org/openstack/meteos `__ +meteos-ui `git://git.openstack.org/openstack/meteos-ui `__ mistral `git://git.openstack.org/openstack/mistral `__ mixmatch `git://git.openstack.org/openstack/mixmatch `__ mogan `git://git.openstack.org/openstack/mogan `__ From 6d79ebcae10dddb804856e7838aa391749266ebb Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Thu, 2 Feb 2017 10:52:53 -0500 Subject: [PATCH 0333/1936] Add top memory consuming process to dstat output Right now we under pressure because of increasing memory consumption in dsvm jobs. So it'll be good to see which process is eating the most ram at a given time. It may not end up being useful, but it doesn't hurt to at least display just in case. Change-Id: I096bf4b425db51358240335e41f6238d1ec1bb40 --- tools/dstat.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/dstat.sh b/tools/dstat.sh index 3c0b3be089..1c80fb70f8 100755 --- a/tools/dstat.sh +++ b/tools/dstat.sh @@ -13,7 +13,7 @@ LOGDIR=$1 # Command line arguments for primary DStat process. -DSTAT_OPTS="-tcmndrylpg --top-cpu-adv --top-io-adv --swap" +DSTAT_OPTS="-tcmndrylpg --top-cpu-adv --top-io-adv --top-mem --swap" # Command-line arguments for secondary background DStat process. DSTAT_CSV_OPTS="-tcmndrylpg --output $LOGDIR/dstat-csv.log" From 06f2ea2b962ffafeb415770d7eec5ee7886ee3b5 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 2 Feb 2017 16:47:00 -0800 Subject: [PATCH 0334/1936] Cut back on the number of workers ran for neutron The dedicated RPC worker is overkill in single or multinode devstack deployments. Also metadata API workers was left default, which meant they were as many as the CPU cores. Related-bug: 1656386 Change-Id: Ibbf7787dfa48e13a51f961f3e0ee2b8f49964759 --- lib/neutron | 1 + lib/neutron-legacy | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/lib/neutron b/lib/neutron index f6c705c2b0..9b032b72e2 100644 --- a/lib/neutron +++ b/lib/neutron @@ -219,6 +219,7 @@ function configure_neutron_new { iniset $NEUTRON_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $NEUTRON_META_CONF DEFAULT nova_metadata_ip $SERVICE_HOST + iniset $NEUTRON_META_CONF DEFAULT metadata_workers $API_WORKERS iniset $NEUTRON_META_CONF agent root_helper_daemon "$NEUTRON_ROOTWRAP_DAEMON_CMD" # TODO(dtroyer): remove the v2.0 hard code below diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 5ec61f9645..af91470e65 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -359,6 +359,10 @@ function configure_mutnauq { fi iniset $NEUTRON_CONF DEFAULT api_workers "$API_WORKERS" + # devstack is not a tool for running uber scale OpenStack + # clouds, therefore running without a dedicated RPC worker + # for state reports is more than adequate. + iniset $NEUTRON_CONF DEFAULT rpc_state_report_workers 0 } function create_nova_conf_neutron { @@ -788,6 +792,7 @@ function _configure_neutron_metadata_agent { iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP + iniset $Q_META_CONF_FILE DEFAULT metadata_workers $API_WORKERS iniset $Q_META_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then iniset $Q_META_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND" From 18682324856285743c7f2b54f1dc0523ea74a70f Mon Sep 17 00:00:00 2001 From: Jordan Pittier Date: Wed, 28 Dec 2016 23:12:42 +0100 Subject: [PATCH 0335/1936] lib/tempest: Liberty EOL: remove the volume_services feature flag Now that Liberty is EOLed, the feature flag is not needed anymore. Change-Id: Ib82cb21edbda383d17f8cf69fedc884f2357fead Depends-On: I7073106988a79aad19c6b95bb050d2eaf00c36c0 --- lib/tempest | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index 2efaebc694..5c1113130c 100644 --- a/lib/tempest +++ b/lib/tempest @@ -443,8 +443,7 @@ function configure_tempest { TEMPEST_VOLUME_MANAGE_SNAPSHOT=${TEMPEST_VOLUME_MANAGE_SNAPSHOT:-True} fi iniset $TEMPEST_CONFIG volume-feature-enabled manage_snapshot $(trueorfalse False TEMPEST_VOLUME_MANAGE_SNAPSHOT) - # TODO(ynesenenko): Remove the volume_services flag when Liberty and Kilo will correct work with host info. - iniset $TEMPEST_CONFIG volume-feature-enabled volume_services True + # TODO(ameade): Remove the api_v3 flag when Mitaka and Liberty are end of life. iniset $TEMPEST_CONFIG volume-feature-enabled api_v3 True local tempest_volume_min_microversion=${TEMPEST_VOLUME_MIN_MICROVERSION:-None} From 9d49ed983435cd0ff3791224dff47a4d51f0bf01 Mon Sep 17 00:00:00 2001 From: Joanna Taryma Date: Mon, 6 Feb 2017 11:30:15 -0800 Subject: [PATCH 0336/1936] Added printing exit code of component process if it failed to start When command failed and component failed to start, original exit code was overwritten due to original command being executed in background. This commit adds information about command's exit code to echoed message about component's start up failure. Change-Id: I8a3dd485b1b1f2d70d42c5610baac7c0c713f53a Signed-off-by: Joanna Taryma --- functions-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions-common b/functions-common index 8d32bb4148..be8b07b0f7 100644 --- a/functions-common +++ b/functions-common @@ -1552,7 +1552,7 @@ function screen_process { # Append the process to the screen rc file screen_rc "$name" "$command" - screen -S $SCREEN_NAME -p $name -X stuff "$command & echo \$! >$SERVICE_DIR/$SCREEN_NAME/${name}.pid; fg || echo \"$name failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/${name}.failure\"$NL" + screen -S $SCREEN_NAME -p $name -X stuff "$command & echo \$! >$SERVICE_DIR/$SCREEN_NAME/${name}.pid; fg || echo \"$name failed to start. Exit code: \$?\" | tee \"$SERVICE_DIR/$SCREEN_NAME/${name}.failure\"$NL" } # Screen rc file builder From 09949e0dc61d9ef420b1528d0549092f88bc422a Mon Sep 17 00:00:00 2001 From: "John L. Villalovos" Date: Mon, 6 Feb 2017 13:46:32 -0800 Subject: [PATCH 0337/1936] worlddump: Use __future__ print_function Commit e7361775c112e32ea517eddc344641897d273d25 changed the code to use Python 3 style print function, but when doing 'print()' in Python 2.7 it would print '()': >>> print() () Import the __future__ print function so that a blank line will be printed as expected. This will now work the same in Python 2 & 3. Change-Id: I61742e107278f2327c18c9ab0de52d1914f16c97 --- tools/worlddump.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/worlddump.py b/tools/worlddump.py index 1ce931efd5..1244dfbec3 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -17,6 +17,8 @@ """Dump the state of the world for post mortem.""" +from __future__ import print_function + import argparse import datetime from distutils import spawn From fd406772975a4a16c8c510d8bb11b2097017347c Mon Sep 17 00:00:00 2001 From: Jordan Pittier Date: Tue, 7 Feb 2017 14:43:32 +0100 Subject: [PATCH 0338/1936] lib/tempest: remove an unused network option Tempest doesn't support the `api_version` config option for networking anymore. I can't track which Tempest patch removed it, but it's been more than 2 years. Change-Id: I4012f470e8c317803203b6fa1e265600dbc49b3d --- lib/tempest | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 2efaebc694..937436dc5c 100644 --- a/lib/tempest +++ b/lib/tempest @@ -382,7 +382,6 @@ function configure_tempest { fi # Network - iniset $TEMPEST_CONFIG network api_version 2.0 iniset $TEMPEST_CONFIG network project_networks_reachable false iniset $TEMPEST_CONFIG network public_network_id "$public_network_id" iniset $TEMPEST_CONFIG network public_router_id "$public_router_id" From 40aae6adbfce1bd896d5f7b0e281e798b56d1ca8 Mon Sep 17 00:00:00 2001 From: Hirofumi Ichihara Date: Wed, 8 Feb 2017 00:08:53 +0900 Subject: [PATCH 0339/1936] Down PUBLIC_BRIDGE before trying to delete it When cleanup devstack with linuxbridge, PUBLIC_BRIDGE should be DOWN before trying to delete it. Change-Id: I2d205cbe4d92a03ee5c376a23282d9880dd9a1df Closes-Bug: #1662543 --- lib/neutron_plugins/linuxbridge_agent | 1 + 1 file changed, 1 insertion(+) mode change 100644 => 100755 lib/neutron_plugins/linuxbridge_agent diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent old mode 100644 new mode 100755 index 0c8ccb8718..5885616951 --- a/lib/neutron_plugins/linuxbridge_agent +++ b/lib/neutron_plugins/linuxbridge_agent @@ -8,6 +8,7 @@ _XTRACE_NEUTRON_LB=$(set +o | grep xtrace) set +o xtrace function neutron_lb_cleanup { + sudo ip link set $PUBLIC_BRIDGE down sudo brctl delbr $PUBLIC_BRIDGE if [[ "$Q_ML2_TENANT_NETWORK_TYPE" = "vxlan" ]]; then From 88b84094ec1e2a18b6edba91f510cae53bc9c821 Mon Sep 17 00:00:00 2001 From: Roman Podoliaka Date: Tue, 7 Feb 2017 13:34:12 +0200 Subject: [PATCH 0340/1936] mysql: set default sql_mode to TRADITIONAL We currently use a more permisive STRICT_ALL_TABLES mode, but that's not what modern MySQL versions default to (i.e. TRADITIONAL): https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html#sql-mode-changes (non-Devstack deployments will most likely use TRADITIONAL as well) Due to the fact that we default to TRADITIONAL in oslo.db, this produces annoying warnings on MySQL 5.7 versions we use in the gate: Warning: (3090, u"Changing sql mode 'NO_AUTO_CREATE_USER' is deprecated. It will be removed in a future release.") https://git.openstack.org/cgit/openstack/oslo.db/tree/oslo_db/options.py#n49 Unlike STRICT_ALL_TABLES, TRADITIONAL mode includes NO_AUTO_CREATE_USER, and MySQL emits this warning on switching it on: https://dev.mysql.com/worklog/task/?id=8326 So we have two options here: 1) make oslo.db default to STRICT_ALL_TABLES 2) make Devstack default to TRADITIONAL The latter seems to be more appropriate as: 1) it's what modern MySQL versions default to 2) it's what people are actually using, if they do not override the oslo.db default 3) it's more strict Closes-Bug: #1652452 Change-Id: Ie6d823c9f8465ac9f2ce4825929d1a50438fab45 --- lib/databases/mysql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index 89ae082c81..7bbcace399 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -94,7 +94,7 @@ function configure_database_mysql { # Change bind-address from localhost (127.0.0.1) to any (::) and # set default db type to InnoDB iniset -sudo $my_conf mysqld bind-address "$SERVICE_LISTEN_ADDRESS" - iniset -sudo $my_conf mysqld sql_mode STRICT_ALL_TABLES + iniset -sudo $my_conf mysqld sql_mode TRADITIONAL iniset -sudo $my_conf mysqld default-storage-engine InnoDB iniset -sudo $my_conf mysqld max_connections 1024 iniset -sudo $my_conf mysqld query_cache_type OFF From cfc3edc97c3075c800e8366e3ff4e1d21578caca Mon Sep 17 00:00:00 2001 From: Kevin Zhao Date: Wed, 8 Feb 2017 10:54:29 +0800 Subject: [PATCH 0341/1936] Add "--nvram" to virsh undefine domain when clean_nova For the instance which has boot by uefi, we should use virsh undefine --nvram to undefine it. Check the libvirt version for whether it supports nvram and use new undefine parameters since this parameters is compatible with those instance which don't use uefi. Closes-bug: #1612613 Change-Id: Ibca1450e965df1481e6cd6b0d597b4323d667e60 Signed-off-by: Kevin Zhao --- lib/nova | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 4d336f6271..5f7a658e6a 100644 --- a/lib/nova +++ b/lib/nova @@ -202,7 +202,10 @@ function cleanup_nova { instances=`sudo virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"` if [ ! "$instances" = "" ]; then echo $instances | xargs -n1 sudo virsh destroy || true - echo $instances | xargs -n1 sudo virsh undefine --managed-save || true + if ! xargs -n1 sudo virsh undefine --managed-save --nvram <<< $instances; then + # Can't delete with nvram flags, then just try without this flag + xargs -n1 sudo virsh undefine --managed-save <<< $instances + fi fi # Logout and delete iscsi sessions From d9aaae95f2b84170bf35e037715e4963d89f940c Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 8 Feb 2017 07:49:26 -0500 Subject: [PATCH 0342/1936] Generate deprecation warning for postgresql Change-Id: I599e6d84b70bb6a7718ae48dd0cfc91796af189e --- lib/databases/postgresql | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/databases/postgresql b/lib/databases/postgresql index 1f347f5548..618834b550 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -95,6 +95,7 @@ function configure_database_postgresql { function install_database_postgresql { echo_summary "Installing postgresql" + deprecated "Use of postgresql in devstack is deprecated, and will be removed during the Pike cycle" local pgpass=$HOME/.pgpass if [[ ! -e $pgpass ]]; then cat < $pgpass From 26e5a00957768df61ed81bba7ba3011c97d1290b Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 8 Feb 2017 14:19:00 +0000 Subject: [PATCH 0343/1936] Updated from generate-devstack-plugins-list Change-Id: I4cf11a25b1d107cc5ab9664cae47a5a13f7a2450 --- doc/source/plugin-registry.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 2721eda776..17da67b816 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -123,7 +123,6 @@ neutron-fwaas `git://git.openstack.org/openstack/neutro neutron-lbaas `git://git.openstack.org/openstack/neutron-lbaas `__ neutron-lbaas-dashboard `git://git.openstack.org/openstack/neutron-lbaas-dashboard `__ neutron-vpnaas `git://git.openstack.org/openstack/neutron-vpnaas `__ -nova-docker `git://git.openstack.org/openstack/nova-docker `__ nova-dpm `git://git.openstack.org/openstack/nova-dpm `__ nova-lxd `git://git.openstack.org/openstack/nova-lxd `__ nova-mksproxy `git://git.openstack.org/openstack/nova-mksproxy `__ From 999dd7e989ae850bec7158a0058c0d38893ecdae Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 9 Feb 2017 17:56:40 -0500 Subject: [PATCH 0344/1936] only apply ebtables race fix on trusty Change-Id: Ifc83e7301d9d921ce9ceed349f116584ce03842b --- lib/nova_plugins/functions-libvirt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 5e7695a2b2..47b054bc58 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -26,7 +26,7 @@ function install_libvirt { install_package qemu-system install_package libvirt-bin libvirt-dev pip_install_gr libvirt-python - if [[ "$EBTABLES_RACE_FIX" == "True" ]]; then + if [[ ${DISTRO} == "trusty" && ${EBTABLES_RACE_FIX} == "True" ]]; then # Work around for bug #1501558. We can remove this once we # get to a version of Ubuntu that has new enough libvirt. TOP_DIR=$TOP_DIR $TOP_DIR/tools/install_ebtables_workaround.sh From 47bcf4fbcb6572cb72a8f4e268a09bf3edff23d8 Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Fri, 13 Jan 2017 18:27:38 +0000 Subject: [PATCH 0345/1936] Removed neutron_plugin_configure_debug_command functions Those are not called by devstack anymore. This cleanup also gets rid of code that attempts to set external_network_bridge to an empty value, which triggers a deprecation warning for the option since it's going to be removed in a next Neutron release. Change-Id: I5adcbab877b4e8742522de81b1a85acfc33160d7 --- lib/neutron_plugins/README.md | 1 - lib/neutron_plugins/bigswitch_floodlight | 4 ---- lib/neutron_plugins/brocade | 4 ---- lib/neutron_plugins/cisco | 5 ----- lib/neutron_plugins/linuxbridge_agent | 4 ---- lib/neutron_plugins/nuage | 4 ---- lib/neutron_plugins/openvswitch_agent | 4 ---- lib/neutron_plugins/ovs_base | 8 -------- 8 files changed, 34 deletions(-) diff --git a/lib/neutron_plugins/README.md b/lib/neutron_plugins/README.md index f03000e7cb..ed40886fda 100644 --- a/lib/neutron_plugins/README.md +++ b/lib/neutron_plugins/README.md @@ -24,7 +24,6 @@ functions * ``neutron_plugin_configure_common`` : set plugin-specific variables, ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``, ``Q_PLUGIN_CLASS`` -* ``neutron_plugin_configure_debug_command`` * ``neutron_plugin_configure_dhcp_agent`` * ``neutron_plugin_configure_l3_agent`` * ``neutron_plugin_configure_plugin_agent`` diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight index 586ded79b4..52c6ad58b5 100644 --- a/lib/neutron_plugins/bigswitch_floodlight +++ b/lib/neutron_plugins/bigswitch_floodlight @@ -26,10 +26,6 @@ function neutron_plugin_configure_common { BS_FL_CONTROLLER_TIMEOUT=${BS_FL_CONTROLLER_TIMEOUT:-10} } -function neutron_plugin_configure_debug_command { - _neutron_ovs_base_configure_debug_command -} - function neutron_plugin_configure_dhcp_agent { : } diff --git a/lib/neutron_plugins/brocade b/lib/neutron_plugins/brocade index 6ba0a66c3f..1b42e7794e 100644 --- a/lib/neutron_plugins/brocade +++ b/lib/neutron_plugins/brocade @@ -49,10 +49,6 @@ function neutron_plugin_configure_service { } -function neutron_plugin_configure_debug_command { - iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge -} - function neutron_plugin_configure_dhcp_agent { iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport } diff --git a/lib/neutron_plugins/cisco b/lib/neutron_plugins/cisco index fc2cb8ad17..b397169b59 100644 --- a/lib/neutron_plugins/cisco +++ b/lib/neutron_plugins/cisco @@ -45,7 +45,6 @@ source $TOP_DIR/lib/neutron_plugins/openvswitch _prefix_function neutron_plugin_create_nova_conf ovs _prefix_function neutron_plugin_install_agent_packages ovs _prefix_function neutron_plugin_configure_common ovs -_prefix_function neutron_plugin_configure_debug_command ovs _prefix_function neutron_plugin_configure_dhcp_agent ovs _prefix_function neutron_plugin_configure_l3_agent ovs _prefix_function neutron_plugin_configure_plugin_agent ovs @@ -83,10 +82,6 @@ function neutron_plugin_configure_common { Q_PLUGIN_CLASS="neutron.plugins.cisco.network_plugin.PluginV2" } -function neutron_plugin_configure_debug_command { - : -} - function neutron_plugin_configure_dhcp_agent { iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport } diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent index 5885616951..f1216377fa 100755 --- a/lib/neutron_plugins/linuxbridge_agent +++ b/lib/neutron_plugins/linuxbridge_agent @@ -39,10 +39,6 @@ function neutron_plugin_install_agent_packages { install_package bridge-utils } -function neutron_plugin_configure_debug_command { - iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge -} - function neutron_plugin_configure_dhcp_agent { local conf_file=$1 : diff --git a/lib/neutron_plugins/nuage b/lib/neutron_plugins/nuage index 61e634e453..1c04aaac9a 100644 --- a/lib/neutron_plugins/nuage +++ b/lib/neutron_plugins/nuage @@ -33,10 +33,6 @@ function neutron_plugin_configure_common { NUAGE_CNA_DEF_NETPART_NAME=${NUAGE_CNA_DEF_NETPART_NAME:-''} } -function neutron_plugin_configure_debug_command { - : -} - function neutron_plugin_configure_dhcp_agent { : } diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent index 76a1a4f3a1..61305e1779 100644 --- a/lib/neutron_plugins/openvswitch_agent +++ b/lib/neutron_plugins/openvswitch_agent @@ -23,10 +23,6 @@ function neutron_plugin_install_agent_packages { _neutron_ovs_base_install_agent_packages } -function neutron_plugin_configure_debug_command { - _neutron_ovs_base_configure_debug_command -} - function neutron_plugin_configure_dhcp_agent { local conf_file=$1 : diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index 62a4d00bcd..795d2bb5b2 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -77,14 +77,6 @@ function _neutron_ovs_base_install_agent_packages { fi } -function _neutron_ovs_base_configure_debug_command { - if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then - iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge "" - else - iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE - fi -} - function _neutron_ovs_base_configure_firewall_driver { if [[ "$Q_USE_SECGROUP" == "True" ]]; then iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver iptables_hybrid From 952ecb6fec87e98bf7677cd4b481a20a8ffe36aa Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Fri, 13 Jan 2017 18:30:19 +0000 Subject: [PATCH 0346/1936] Don't set external_network_bridge by default Since the empty value is the default for the option, and when explcitly set in config file, it triggers a deprecation warning for the option, avoid setting it unless we actually need to override the new default value. Change-Id: If423114d7a52da29b97d1fb473a955d9d69a1a3e --- lib/neutron_plugins/brocade | 1 - lib/neutron_plugins/linuxbridge_agent | 1 - lib/neutron_plugins/ovs_base | 4 +--- 3 files changed, 1 insertion(+), 5 deletions(-) diff --git a/lib/neutron_plugins/brocade b/lib/neutron_plugins/brocade index 1b42e7794e..310b72e5ad 100644 --- a/lib/neutron_plugins/brocade +++ b/lib/neutron_plugins/brocade @@ -54,7 +54,6 @@ function neutron_plugin_configure_dhcp_agent { } function neutron_plugin_configure_l3_agent { - iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport } diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent index f1216377fa..dfed49b5e4 100755 --- a/lib/neutron_plugins/linuxbridge_agent +++ b/lib/neutron_plugins/linuxbridge_agent @@ -48,7 +48,6 @@ function neutron_plugin_configure_l3_agent { local conf_file=$1 sudo brctl addbr $PUBLIC_BRIDGE set_mtu $PUBLIC_BRIDGE $PUBLIC_BRIDGE_MTU - iniset $conf_file DEFAULT external_network_bridge } function neutron_plugin_configure_plugin_agent { diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index 795d2bb5b2..1a97001824 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -87,9 +87,7 @@ function _neutron_ovs_base_configure_firewall_driver { } function _neutron_ovs_base_configure_l3_agent { - if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then - iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge "" - else + if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" != "True" ]; then iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE fi From e0a37cf21e43fbb4ba3f9f8fa5321a0a0e1bedf1 Mon Sep 17 00:00:00 2001 From: Jordan Pittier Date: Fri, 10 Feb 2017 15:01:37 +0100 Subject: [PATCH 0347/1936] tls proxy: immediately close a connection to the backend Force mod_proxy to immediately close a connection to the backend after being used, and thus, disable its persistent connection and pool for that backend. Let's see if that helps fixing bug #1630664 (the Connection aborted/ BadStatusLine thing). We already have an ER query (in queries/1630664.yaml) that should show whether this is effective. Change-Id: I03b09f7df5c6e134ec4091a2f8dfe8ef614d1951 --- lib/tls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tls b/lib/tls index 57b5e525ac..d96ca4f197 100644 --- a/lib/tls +++ b/lib/tls @@ -520,7 +520,7 @@ $listen_string SSLCertificateFile $DEVSTACK_CERT - ProxyPass http://$b_host:$b_port/ retry=5 nocanon + ProxyPass http://$b_host:$b_port/ retry=5 disablereuse=on keepalive=off nocanon ProxyPassReverse http://$b_host:$b_port/ ErrorLog $APACHE_LOG_DIR/tls-proxy_error.log From 9f2dcd333103553626db1924a019e151e3e7252e Mon Sep 17 00:00:00 2001 From: "Andrea Frittoli (andreaf)" Date: Sun, 12 Feb 2017 22:14:15 +0000 Subject: [PATCH 0348/1936] Use the latest 0.3.5 version of CirrOS This new version of CirrOS is built on top of 0.3 branch, so wrt version 0.3.4 it includes only two commits: - Cherry-pick of the fix for https://launchpad.net/bugs/1564948 - One extra fix to make the above working on 0.3 Cherry-picked commit is http://bazaar.launchpad.net/~cirros-dev/cirros/trunk/revision/366 A Tempest test for hard reboot in some cases hits the case where host key are empty. This triggers bugs/1564948, i.e. the ssh daemon does not start at all, and the Tempest test fails with "connection refused", which is misleading. The new version of CirrOS solves this problem as it ensure host keys are generated if missing, and the sshd deamon started. I tested the scenario of missing host keys in Iea74c63925be17a1df894c1a2c23f5ba2793e0c6 using a private build of what then became 0.3.5. Change-Id: I5c154ec25555e768954538fc22b4f5d5975b2deb --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index d8d0ee4b3e..52a0ff9cb9 100644 --- a/stackrc +++ b/stackrc @@ -614,7 +614,7 @@ esac #IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image #IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image -CIRROS_VERSION=${CIRROS_VERSION:-"0.3.4"} +CIRROS_VERSION=${CIRROS_VERSION:-"0.3.5"} CIRROS_ARCH=${CIRROS_ARCH:-"x86_64"} # Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of From ee37d20f80d3a4871edd17a16c12e8ea5f0afadf Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 8 Feb 2017 11:24:31 -0500 Subject: [PATCH 0349/1936] pass role by name not id Change-Id: Ie67758bed3563c9a46a5180eaa9c8d47721fffd8 --- lib/keystone | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/lib/keystone b/lib/keystone index 474af8be1d..c1a6d43410 100644 --- a/lib/keystone +++ b/lib/keystone @@ -384,8 +384,7 @@ function create_keystone_accounts { admin_project=$(openstack project show "admin" -f value -c id) local admin_user admin_user=$(openstack user show "admin" -f value -c id) - local admin_role - admin_role=$(openstack role show "admin" -f value -c id) + local admin_role="admin" get_or_add_user_domain_role $admin_role $admin_user default @@ -403,13 +402,20 @@ function create_keystone_accounts { get_or_create_role ResellerAdmin # The Member role is used by Horizon and Swift so we need to keep it: - local member_role - member_role=$(get_or_create_role "Member") + local member_role="member" + + # Captial Member role is legacy hard coded in Horizon / Swift + # configs. Keep it around. + get_or_create_role "Member" + + # The reality is that the rest of the roles listed below honestly + # should work by symbolic names. + get_or_create_role $member_role # another_role demonstrates that an arbitrary role may be created and used # TODO(sleepsonthefloor): show how this can be used for rbac in the future! - local another_role - another_role=$(get_or_create_role "anotherrole") + local another_role="anotherrole" + get_or_create_role $another_role # invisible project - admin can't see this one local invis_project From 9ef346f59da92416b0e27ead8a846bc8dbd68c0b Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Mon, 13 Feb 2017 15:09:35 +0100 Subject: [PATCH 0350/1936] Using sudo in the Quick start section Adding sudo to the example commands in the quick start section. Also adding '-' as su argument in order to use the stack user's env (home). Change-Id: I23ab38104d05c3f4c8d48b55e66cf19dc4e4f90d --- doc/source/index.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/source/index.rst b/doc/source/index.rst index b8dd506aab..edd6595da2 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -56,15 +56,15 @@ You can quickly create a separate `stack` user to run DevStack with :: - $ adduser stack + $ sudo adduser stack Since this user will be making many changes to your system, it should have sudo privileges: :: - $ echo "stack ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers - $ su stack + $ sudo tee <<<"stack ALL=(ALL) NOPASSWD: ALL" /etc/sudoers + $ sudo su - stack Download DevStack ----------------- From 11eb2017ef70a758c54cc984e90e445205886e7e Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 13 Feb 2017 16:16:59 -0500 Subject: [PATCH 0351/1936] simplify endpoints used in devstack The proliferation of internal/admin endpoints is mostly legacy and based on some specific deployment patterns. These are not used by everyone, and for the devstack case aren't really that useful. We should simplify our service catalog down to the minimum we need for development. Change-Id: Ided7a65c81b3a0b56f0184847fc82e17c29a771e --- functions-common | 12 ++++++++---- lib/cinder | 6 ------ lib/glance | 4 ---- lib/neutron | 2 -- lib/neutron-legacy | 2 -- lib/nova | 4 ---- lib/placement | 2 -- lib/swift | 3 +-- 8 files changed, 9 insertions(+), 26 deletions(-) diff --git a/functions-common b/functions-common index 8d32bb4148..f0940e57cf 100644 --- a/functions-common +++ b/functions-common @@ -992,7 +992,7 @@ function _get_or_create_endpoint_with_interface { } # Gets or creates endpoint -# Usage: get_or_create_endpoint +# Usage: get_or_create_endpoint [adminurl] [internalurl] function get_or_create_endpoint { # NOTE(jamielennnox): when converting to v3 endpoint creation we go from # creating one endpoint with multiple urls to multiple endpoints each with @@ -1004,9 +1004,13 @@ function get_or_create_endpoint { # endpoints they need. local public_id public_id=$(_get_or_create_endpoint_with_interface $1 public $3 $2) - _get_or_create_endpoint_with_interface $1 admin $4 $2 - _get_or_create_endpoint_with_interface $1 internal $5 $2 - + # only create admin/internal urls if provided content for them + if [[ -n "$4" ]]; then + _get_or_create_endpoint_with_interface $1 admin $4 $2 + fi + if [[ -n "$5" ]]; then + _get_or_create_endpoint_with_interface $1 internal $5 $2 + fi # return the public id to indicate success, and this is the endpoint most likely wanted echo $public_id } diff --git a/lib/cinder b/lib/cinder index 870ee0b905..24967d4151 100644 --- a/lib/cinder +++ b/lib/cinder @@ -395,24 +395,18 @@ function create_cinder_accounts { get_or_create_endpoint \ "volume" \ "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(project_id)s" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(project_id)s" \ "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(project_id)s" get_or_create_service "cinderv2" "volumev2" "Cinder Volume Service V2" get_or_create_endpoint \ "volumev2" \ "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(project_id)s" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(project_id)s" \ "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(project_id)s" get_or_create_service "cinderv3" "volumev3" "Cinder Volume Service V3" get_or_create_endpoint \ "volumev3" \ "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s" \ "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s" configure_cinder_internal_tenant diff --git a/lib/glance b/lib/glance index 26c41507ed..58f1deff6f 100644 --- a/lib/glance +++ b/lib/glance @@ -314,8 +314,6 @@ function create_glance_accounts { get_or_create_endpoint \ "image" \ "$REGION_NAME" \ - "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" \ - "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" \ "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" # Note(frickler): Crude workaround for https://bugs.launchpad.net/glance-store/+bug/1620999 @@ -331,8 +329,6 @@ function create_glance_accounts { get_or_create_endpoint "artifact" \ "$REGION_NAME" \ - "$GLANCE_SERVICE_PROTOCOL://$GLANCE_GLARE_HOSTPORT" \ - "$GLANCE_SERVICE_PROTOCOL://$GLANCE_GLARE_HOSTPORT" \ "$GLANCE_SERVICE_PROTOCOL://$GLANCE_GLARE_HOSTPORT" fi } diff --git a/lib/neutron b/lib/neutron index 9b032b72e2..19568eaf25 100644 --- a/lib/neutron +++ b/lib/neutron @@ -330,8 +330,6 @@ function create_neutron_accounts_new { "network" "Neutron Service") get_or_create_endpoint $neutron_service \ "$REGION_NAME" \ - "$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST:$NEUTRON_SERVICE_PORT/" \ - "$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST:$NEUTRON_SERVICE_PORT/" \ "$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST:$NEUTRON_SERVICE_PORT/" fi } diff --git a/lib/neutron-legacy b/lib/neutron-legacy index af91470e65..b381b642c6 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -410,8 +410,6 @@ function create_mutnauq_accounts { get_or_create_endpoint \ "network" \ "$REGION_NAME" \ - "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/" \ - "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/" \ "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/" fi } diff --git a/lib/nova b/lib/nova index 4d336f6271..7c2fddbc75 100644 --- a/lib/nova +++ b/lib/nova @@ -407,16 +407,12 @@ function create_nova_accounts { get_or_create_endpoint \ "compute_legacy" \ "$REGION_NAME" \ - "$nova_api_url/v2/\$(project_id)s" \ - "$nova_api_url/v2/\$(project_id)s" \ "$nova_api_url/v2/\$(project_id)s" get_or_create_service "nova" "compute" "Nova Compute Service" get_or_create_endpoint \ "compute" \ "$REGION_NAME" \ - "$nova_api_url/v2.1" \ - "$nova_api_url/v2.1" \ "$nova_api_url/v2.1" fi diff --git a/lib/placement b/lib/placement index 871e282f32..e7ffe3330b 100644 --- a/lib/placement +++ b/lib/placement @@ -132,8 +132,6 @@ function create_placement_accounts { get_or_create_endpoint \ "placement" \ "$REGION_NAME" \ - "$placement_api_url" \ - "$placement_api_url" \ "$placement_api_url" } diff --git a/lib/swift b/lib/swift index 03fd454dc6..5b510e5930 100644 --- a/lib/swift +++ b/lib/swift @@ -636,8 +636,7 @@ function create_swift_accounts { "object-store" \ "$REGION_NAME" \ "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_\$(project_id)s" \ - "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT" \ - "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_\$(project_id)s" + "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT" local swift_project_test1 swift_project_test1=$(get_or_create_project swiftprojecttest1 default) From bfff93e1568a236be26a8e6c67e758b877f55a2a Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 13 Feb 2017 16:18:08 -0500 Subject: [PATCH 0352/1936] remove keystone internal uri There is really no reason to have an internal uri referenced for keystone in devstack. Change-Id: If0588a0ab512d94451992e7131dd3c8a33c5797c --- lib/keystone | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/keystone b/lib/keystone index 474af8be1d..45f0fa4fff 100644 --- a/lib/keystone +++ b/lib/keystone @@ -654,8 +654,7 @@ function bootstrap_keystone { --bootstrap-service-name keystone \ --bootstrap-region-id "$REGION_NAME" \ --bootstrap-admin-url "$KEYSTONE_AUTH_URI" \ - --bootstrap-public-url "$KEYSTONE_SERVICE_URI" \ - --bootstrap-internal-url "$KEYSTONE_SERVICE_URI" + --bootstrap-public-url "$KEYSTONE_SERVICE_URI" } # Restore xtrace From ff10ac318ce4be49d8bceafb7fea92ae1655b497 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Mon, 13 Feb 2017 12:44:24 -0500 Subject: [PATCH 0353/1936] Remove distro support based on new libvirt minimum Nova is going to increase the minimum required libvirt in Pike to 1.2.9 in change: I9a972e3fde2e4e552f6fc98350820c07873c3de3 Based on the libvirt distro support matrix wiki [1] that drops support for Ubuntu Trusty and Debian 7.0/Wheezy. Trusty has libvirt 1.2.2 and Wheezy has 0.9.12 (the Wheezy support should have been removed long ago apparently). The 7.0 removed here is for Wheezy also based on commit b2ef890db3d78b24f9da2f4dd80502165c669ad0. This does not undo the check for "trusty" with the EBTABLES_RACE_FIX in lib/nova_plugins/function-libvirt since you can still force devstack to run on Trusty if you specify the FORCE=yes variable. Note that RHEL 7.1 has libvirt 1.2.8 so it won't technically work with devstack and nova + pike + libvirt, but with the way os_RELEASE is calculated the minor version is dropped for RHEL distros so we just get "rhel7". Also note that this doesn't attempt to continue supporting Trusty or Wheezy if nova is not configured to use libvirt, simply in order to start moving forward on devstack distro support in general and to keep some sanity and closeness to what we test with in the CI system. While we're in here, we also drop Fedora 23 and add Ubuntu Zesty. [1] https://wiki.openstack.org/wiki/LibvirtDistroSupportMatrix Depends-On: I9a972e3fde2e4e552f6fc98350820c07873c3de3 Depends-On: If69f99bd789e646b0261e27a8a061efde32436f7 Change-Id: I6617283afd798af37e64913b7865cea3c8a62aba --- functions-common | 4 ++-- stack.sh | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/functions-common b/functions-common index 8d32bb4148..f4171049c0 100644 --- a/functions-common +++ b/functions-common @@ -302,9 +302,9 @@ function warn { # such as "install_package" further abstract things in better ways. # # ``os_VENDOR`` - vendor name: ``Ubuntu``, ``Fedora``, etc -# ``os_RELEASE`` - major release: ``14.04`` (Ubuntu), ``20`` (Fedora) +# ``os_RELEASE`` - major release: ``16.04`` (Ubuntu), ``23`` (Fedora) # ``os_PACKAGE`` - package type: ``deb`` or ``rpm`` -# ``os_CODENAME`` - vendor's codename for release: ``trusty`` +# ``os_CODENAME`` - vendor's codename for release: ``xenial`` declare os_VENDOR os_RELEASE os_PACKAGE os_CODENAME diff --git a/stack.sh b/stack.sh index 7d440a7c20..02bcc58c0a 100755 --- a/stack.sh +++ b/stack.sh @@ -12,7 +12,7 @@ # a multi-node developer install. # To keep this script simple we assume you are running on a recent **Ubuntu** -# (14.04 Trusty or newer), **Fedora** (F20 or newer), or **CentOS/RHEL** +# (16.04 Xenial or newer), **Fedora** (F24 or newer), or **CentOS/RHEL** # (7 or newer) machine. (It may work on other platforms but support for those # platforms is left to those who added them to DevStack.) It should work in # a VM or physical server. Additionally, we maintain a list of ``deb`` and @@ -192,7 +192,7 @@ source $TOP_DIR/stackrc # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (trusty|xenial|yakkety|7.0|wheezy|sid|testing|jessie|f23|f24|f25|rhel7|kvmibm1) ]]; then +if [[ ! ${DISTRO} =~ (xenial|yakkety|zesty|sid|testing|jessie|f24|f25|rhel7|kvmibm1) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From 6fc332d85279865c32f50b081efb25ba7b671a9a Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Wed, 6 Jul 2016 13:44:55 -0400 Subject: [PATCH 0354/1936] Switch devstack to use qcow cirros img This commit switches devstack to use the published qcow2 cirros image instead of the AMI version. Using AMI was mostly a historical artifact dating pretty far back, but in the real world no one really uses AMI images with openstack clouds. This change reflects that and also enables tempest ro remove its deprecated config options for using AMI as a fallback on misconfiguration (which was just there to support devstack's defaults) Change-Id: Id65ebae73b28da7185cb349b714b659af51ef77f --- lib/tempest | 7 ++----- stackrc | 12 ++++++------ 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/lib/tempest b/lib/tempest index 937436dc5c..108256ef81 100644 --- a/lib/tempest +++ b/lib/tempest @@ -415,14 +415,11 @@ function configure_tempest { iniset $TEMPEST_CONFIG scenario img_disk_format vhd iniset $TEMPEST_CONFIG scenario img_container_format ovf else - SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES/images/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec} - SCENARIO_IMAGE_FILE="cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" + SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES} + SCENARIO_IMAGE_FILE=$DEFAULT_IMAGE_NAME fi iniset $TEMPEST_CONFIG scenario img_dir $SCENARIO_IMAGE_DIR iniset $TEMPEST_CONFIG scenario img_file $SCENARIO_IMAGE_FILE - iniset $TEMPEST_CONFIG scenario ami_img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-blank.img" - iniset $TEMPEST_CONFIG scenario ari_img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-initrd" - iniset $TEMPEST_CONFIG scenario aki_img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-vmlinuz" # If using provider networking, use the physical network for validation rather than private TEMPEST_SSH_NETWORK_NAME=$PRIVATE_NETWORK_NAME diff --git a/stackrc b/stackrc index d8d0ee4b3e..b1c0825dd6 100644 --- a/stackrc +++ b/stackrc @@ -634,9 +634,9 @@ if [[ "$DOWNLOAD_DEFAULT_IMAGES" == "True" ]]; then lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs} IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz";; - *) # otherwise, use the uec style image (with kernel, ramdisk, disk) - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec} - IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec.tar.gz";; + *) # otherwise, use the qcow image + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img} + IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img";; esac ;; vsphere) @@ -650,9 +650,9 @@ if [[ "$DOWNLOAD_DEFAULT_IMAGES" == "True" ]]; then # NOTE(lucasagomes): The logic setting the default image # now lives in the Ironic tree ;; - *) # Default to Cirros with kernel, ramdisk and disk image - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec} - IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec.tar.gz";; + *) # Default to Cirros qcow2 image file + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img} + IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img";; esac DOWNLOAD_DEFAULT_IMAGES=False fi From bc3d01c8ec4f79c852b9cd2b0a7d679b2a777aa6 Mon Sep 17 00:00:00 2001 From: Jordan Pittier Date: Tue, 14 Feb 2017 15:35:59 +0000 Subject: [PATCH 0355/1936] Revert "tls proxy: immediately close a connection to the backend" This reverts commit e0a37cf21e43fbb4ba3f9f8fa5321a0a0e1bedf1. This didn't help fixing bug #1630664. Issue seems to be between client<--->Apache2, not between Apache2<--->eventlet Change-Id: I092c1bbf0c5848b50fc9e491d1e9211451208a89 --- lib/tls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tls b/lib/tls index d96ca4f197..57b5e525ac 100644 --- a/lib/tls +++ b/lib/tls @@ -520,7 +520,7 @@ $listen_string SSLCertificateFile $DEVSTACK_CERT - ProxyPass http://$b_host:$b_port/ retry=5 disablereuse=on keepalive=off nocanon + ProxyPass http://$b_host:$b_port/ retry=5 nocanon ProxyPassReverse http://$b_host:$b_port/ ErrorLog $APACHE_LOG_DIR/tls-proxy_error.log From 437092518172770c549dabafaf9f81e3766719ce Mon Sep 17 00:00:00 2001 From: Jordan Pittier Date: Tue, 14 Feb 2017 16:48:20 +0100 Subject: [PATCH 0356/1936] TLS proxy: disable HTTP KeepAlive There's a race condition when a client makes a request "at the same time" the HTTP connection is being closed by Apache because the `KeepAliveTimeout` is expired. This is explained in detail and can be reproduce using https://github.com/mikem23/keepalive-race or https://github.com/JordanP/openstack-snippets/blob/master/keepalive-race/keep-alive-race.py Just disable KeepAlive to fix the ('Connection aborted.', BadStatusLine("''",)) error we are seeing. Change-Id: I46e9f70ee740ec7996c98d386d5289c1491e9436 --- lib/tls | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/tls b/lib/tls index d96ca4f197..e044066c6a 100644 --- a/lib/tls +++ b/lib/tls @@ -519,6 +519,10 @@ $listen_string SSLEngine On SSLCertificateFile $DEVSTACK_CERT + # Disable KeepAlive to fix bug #1630664 a.k.a the + # ('Connection aborted.', BadStatusLine("''",)) error + KeepAlive Off + ProxyPass http://$b_host:$b_port/ retry=5 disablereuse=on keepalive=off nocanon ProxyPassReverse http://$b_host:$b_port/ From c6e6939e89a44a408065eb4585963175f8d0d6e3 Mon Sep 17 00:00:00 2001 From: "John L. Villalovos" Date: Mon, 6 Feb 2017 14:24:42 -0800 Subject: [PATCH 0357/1936] Fix error in 'ip netns' parsing Sometimes when doing worlddump would see a command line like this: sudo ip netns exec (id: ip addr This would cause an error to be seen in console.log: 2017-02-07 00:03:03.659570 | /bin/sh: 1: Syntax error: "(" unexpected This is caused by there sometimes being extra data returned from the 'ip netns' command [1]. For example it might look like: qrouter-0805fd7d-c493-4fa6-82ca-1c6c9b23cd9e (id: 1) qdhcp-bb2cc6ae-2ae8-474f-adda-a94059b872b5 (id: 0) [1] https://lwn.net/Articles/629715/ Change-Id: Icece442023125ef55696b8d92a975d37e358b1b4 Closes-Bug: 1653969 --- tools/worlddump.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tools/worlddump.py b/tools/worlddump.py index 1ce931efd5..8b418ede5a 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -151,7 +151,11 @@ def iptables_dump(): def _netns_list(): process = subprocess.Popen(['ip', 'netns'], stdout=subprocess.PIPE) stdout, _ = process.communicate() - return stdout.split() + # NOTE(jlvillal): Sometimes 'ip netns list' can return output like: + # qrouter-0805fd7d-c493-4fa6-82ca-1c6c9b23cd9e (id: 1) + # qdhcp-bb2cc6ae-2ae8-474f-adda-a94059b872b5 (id: 0) + output = [x.split()[0] for x in stdout.splitlines()] + return output def network_dump(): From 88312fa61f889307de85cd04b12250db57210470 Mon Sep 17 00:00:00 2001 From: Jordan Pittier Date: Wed, 15 Feb 2017 16:48:04 +0100 Subject: [PATCH 0358/1936] Do not run cinder API V1 tests anymore by default I think now is a good time to stop running Cinder V1 tests. It should save quite some Infra resources and jobs should run faster too. Also, remove some useless variables in lib/tempest. Change-Id: I0edf1d88c136c3b910a5773690a603eeacb50266 --- lib/tempest | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/lib/tempest b/lib/tempest index 937436dc5c..4cceda6942 100644 --- a/lib/tempest +++ b/lib/tempest @@ -48,10 +48,6 @@ TEMPEST_CONFIG_DIR=${TEMPEST_CONFIG_DIR:-$TEMPEST_DIR/etc} TEMPEST_CONFIG=$TEMPEST_CONFIG_DIR/tempest.conf TEMPEST_STATE_PATH=${TEMPEST_STATE_PATH:=$DATA_DIR/tempest} -NOVA_SOURCE_DIR=$DEST/nova - -BUILD_INTERVAL=1 - # This is the timeout that tempest will wait for a VM to change state, # spawn, delete, etc. # The default is set to 196 seconds. @@ -446,6 +442,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG volume-feature-enabled volume_services True # TODO(ameade): Remove the api_v3 flag when Mitaka and Liberty are end of life. iniset $TEMPEST_CONFIG volume-feature-enabled api_v3 True + iniset $TEMPEST_CONFIG volume-feature-enabled api_v1 $(trueorfalse False TEMPEST_VOLUME_API_V1) local tempest_volume_min_microversion=${TEMPEST_VOLUME_MIN_MICROVERSION:-None} local tempest_volume_max_microversion=${TEMPEST_VOLUME_MAX_MICROVERSION:-"latest"} if [ "$tempest_volume_min_microversion" == "None" ]; then From 0b259c3abdafa99e7194e62c9a47483ddcf6b65a Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 17 Feb 2017 11:51:36 -0500 Subject: [PATCH 0359/1936] only set nova catalog if it's not the default This ensures we only set the nova catalog when it's not the default, instead of also putting defaults in devstack. Change-Id: Ibb0dcb8bae2e9223db302d7b19e8fbee4ebbf0e3 --- lib/cinder | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/lib/cinder b/lib/cinder index 24967d4151..767fd00d34 100644 --- a/lib/cinder +++ b/lib/cinder @@ -125,12 +125,6 @@ if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then done fi -# Change the default nova_catalog_info and nova_catalog_admin_info values in -# cinder so that the service name cinder is searching for matches that set for -# nova in keystone. -CINDER_NOVA_CATALOG_INFO=${CINDER_NOVA_CATALOG_INFO:-compute:nova:publicURL} -CINDER_NOVA_CATALOG_ADMIN_INFO=${CINDER_NOVA_CATALOG_ADMIN_INFO:-compute:nova:adminURL} - # Environment variables to configure the image-volume cache CINDER_IMG_CACHE_ENABLED=${CINDER_IMG_CACHE_ENABLED:-True} @@ -268,8 +262,15 @@ function configure_cinder { configure_auth_token_middleware $CINDER_CONF cinder $CINDER_AUTH_CACHE_DIR - iniset $CINDER_CONF DEFAULT nova_catalog_info $CINDER_NOVA_CATALOG_INFO - iniset $CINDER_CONF DEFAULT nova_catalog_admin_info $CINDER_NOVA_CATALOG_ADMIN_INFO + # Change the default nova_catalog_info and nova_catalog_admin_info values in + # cinder so that the service name cinder is searching for matches that set for + # nova in keystone. + if [[ -n "$CINDER_NOVA_CATALOG_INFO" ]]; then + iniset $CINDER_CONF DEFAULT nova_catalog_info $CINDER_NOVA_CATALOG_INFO + fi + if [[ -n "$CINDER_NOVA_CATALOG_ADMIN_INFO" ]]; then + iniset $CINDER_CONF DEFAULT nova_catalog_admin_info $CINDER_NOVA_CATALOG_ADMIN_INFO + fi iniset $CINDER_CONF DEFAULT auth_strategy keystone iniset $CINDER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL From 6e49cab0913c13cb2fbf2620a9abe20cfc5f7ce3 Mon Sep 17 00:00:00 2001 From: Jianghua Wang Date: Wed, 22 Feb 2017 11:42:22 +0800 Subject: [PATCH 0360/1936] Xen: support Ubuntu Xenial for xen DevStack Upgrade xen tool to install Ubuntu Xenial; change the upstart task to systemd task to finish the OpenStack installation by using devstack. Change-Id: I8129923be3c41e7f60e9d32348a5ea8e07d4845b --- tools/xen/build_xva.sh | 78 ++++++++++---------- tools/xen/install_os_domU.sh | 2 +- tools/xen/scripts/install_ubuntu_template.sh | 2 +- tools/xen/xenrc | 4 +- 4 files changed, 42 insertions(+), 44 deletions(-) diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh index 25bf58cb8a..34ef719ab9 100755 --- a/tools/xen/build_xva.sh +++ b/tools/xen/build_xva.sh @@ -96,48 +96,27 @@ mkdir -p $STAGING_DIR/opt/stack/devstack tar xf /tmp/devstack.tar -C $STAGING_DIR/opt/stack/devstack cd $TOP_DIR -# Create an upstart job (task) for devstack, which can interact with the console -cat >$STAGING_DIR/etc/init/devstack.conf << EOF -start on stopped rc RUNLEVEL=[2345] - -console output -task - -pre-start script - rm -f /opt/stack/runsh.succeeded -end script - -script - initctl stop hvc0 || true - - # Read any leftover characters from standard input - while read -n 1 -s -t 0.1 -r ignored; do - true - done - - clear - - chown -R $STACK_USER /opt/stack - - su -c "/opt/stack/run.sh" $STACK_USER - - # Update /etc/issue - { - echo "OpenStack VM - Installed by DevStack" - IPADDR=\$(ip -4 address show eth0 | sed -n 's/.*inet \\([0-9\.]\\+\\).*/\1/p') - echo " Management IP: \$IPADDR" - echo -n " Devstack run: " - if [ -e /opt/stack/runsh.succeeded ]; then - echo "SUCCEEDED" - else - echo "FAILED" - fi - echo "" - } > /etc/issue - initctl start hvc0 > /dev/null 2>&1 -end script +# Create an systemd task for devstack +cat >$STAGING_DIR/etc/systemd/system/devstack.service << EOF +[Unit] +Description=Install OpenStack by DevStack + +[Service] +Type=oneshot +RemainAfterExit=yes +ExecStartPre=/bin/rm -f /opt/stack/runsh.succeeded +ExecStart=/bin/su -c "/opt/stack/run.sh" stack +StandardOutput=tty +StandardError=tty + +[Install] +WantedBy=multi-user.target + EOF +# enable this service +ln -s $STAGING_DIR/etc/systemd/system/devstack.service $STAGING_DIR/etc/systemd/system/multi-user.target.wants/devstack.service + # Configure the hostname echo $GUEST_NAME > $STAGING_DIR/etc/hostname @@ -178,6 +157,8 @@ set -eux ( flock -n 9 || exit 1 + sudo chown -R stack /opt/stack + [ -e /opt/stack/runsh.succeeded ] && rm /opt/stack/runsh.succeeded echo \$\$ >> /opt/stack/run_sh.pid @@ -187,7 +168,24 @@ set -eux # Got to the end - success touch /opt/stack/runsh.succeeded + + # Update /etc/issue + ( + echo "OpenStack VM - Installed by DevStack" + IPADDR=$(ip -4 address show eth0 | sed -n 's/.*inet \([0-9\.]\+\).*/\1/p') + echo " Management IP: $IPADDR" + echo -n " Devstack run: " + if [ -e /opt/stack/runsh.succeeded ]; then + echo "SUCCEEDED" + else + echo "FAILED" + fi + echo "" + ) > /opt/stack/issue + sudo cp /opt/stack/issue /etc/issue + rm /opt/stack/run_sh.pid ) 9> /opt/stack/.runsh_lock EOF + chmod 755 $STAGING_DIR/opt/stack/run.sh diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 66b9eda474..d2e2c572c6 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -424,7 +424,7 @@ else echo "looking at the console of your domU / checking the log files." echo "" echo "ssh into your domU now: 'ssh stack@$OS_VM_MANAGEMENT_ADDRESS' using your password" - echo "and then do: 'sudo service devstack status' to check if devstack is still running." + echo "and then do: 'sudo systemctl status devstack' to check if devstack is still running." echo "Check that /opt/stack/runsh.succeeded exists" echo "" echo "When devstack completes, you can visit the OpenStack Dashboard" diff --git a/tools/xen/scripts/install_ubuntu_template.sh b/tools/xen/scripts/install_ubuntu_template.sh index d80ed095e8..6ea364255e 100755 --- a/tools/xen/scripts/install_ubuntu_template.sh +++ b/tools/xen/scripts/install_ubuntu_template.sh @@ -50,7 +50,7 @@ disk_size=$(($OSDOMU_VDI_GB * 1024 * 1024 * 1024)) # however these need to be answered before the netinstall # is ready to fetch the preseed file, and as such must be here # to get a fully automated install -pvargs="-- quiet console=hvc0 partman/default_filesystem=ext3 \ +pvargs="quiet console=hvc0 partman/default_filesystem=ext3 \ console-setup/ask_detect=false locale=${UBUNTU_INST_LOCALE} \ keyboard-configuration/layoutcode=${UBUNTU_INST_KEYBOARD} \ netcfg/choose_interface=eth0 \ diff --git a/tools/xen/xenrc b/tools/xen/xenrc index 2161247b76..60be02f3fe 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -63,8 +63,8 @@ PUB_IP=${PUB_IP:-172.24.4.10} PUB_NETMASK=${PUB_NETMASK:-255.255.255.0} # Ubuntu install settings -UBUNTU_INST_RELEASE="trusty" -UBUNTU_INST_TEMPLATE_NAME="Ubuntu 14.04 (64-bit) for DevStack" +UBUNTU_INST_RELEASE="xenial" +UBUNTU_INST_TEMPLATE_NAME="Ubuntu 16.04 (64-bit) for DevStack" # For 12.04 use "precise" and update template name # However, for 12.04, you should be using # XenServer 6.1 and later or XCP 1.6 or later From 447f141d4f0e1b7be7f186714f18236c069b486e Mon Sep 17 00:00:00 2001 From: Ivan Kolodyazhny Date: Thu, 28 Jul 2016 18:42:25 +0300 Subject: [PATCH 0361/1936] Added support for fake drivers as Cinder backend FakeLoggingVolumeDriver will be used for functional Cinder tests to prevent dependencies on any storage. FakeGateDriver is based on LVM and will be used to run Tempest tests for such features like CG's, replication, etc. Depends-On: I383bcdb531c7d52c0fdbb6875de73f1274a92854 Change-Id: I2dc8ea416f5eb3fcc9d2e959533497e464220ff5 --- lib/cinder_backends/fake | 47 ++++++++++++++++++++++ lib/cinder_backends/fake_gate | 74 +++++++++++++++++++++++++++++++++++ 2 files changed, 121 insertions(+) create mode 100644 lib/cinder_backends/fake create mode 100644 lib/cinder_backends/fake_gate diff --git a/lib/cinder_backends/fake b/lib/cinder_backends/fake new file mode 100644 index 0000000000..4749aced69 --- /dev/null +++ b/lib/cinder_backends/fake @@ -0,0 +1,47 @@ +#!/bin/bash +# +# lib/cinder_backends/fake +# Configure the Fake backend + +# Enable with: +# +# CINDER_ENABLED_BACKENDS+=,fake:fake + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# CINDER_CONF + +# clean_cinder_backend_fake - called from clean_cinder() +# configure_cinder_backend_fake - called from configure_cinder() +# init_cinder_backend_fake - called from init_cinder() + + +# Save trace setting +_XTRACE_CINDER_FAKE=$(set +o | grep xtrace) +set +o xtrace + + +function cleanup_cinder_backend_fake { + local be_name=$1 +} + +function configure_cinder_backend_fake { + local be_name=$1 + + iniset $CINDER_CONF $be_name volume_backend_name $be_name + iniset $CINDER_CONF $be_name volume_driver "cinder.tests.fake_driver.FakeLoggingVolumeDriver" + +} + +function init_cinder_backend_fake { + local be_name=$1 +} + +# Restore xtrace +$_XTRACE_CINDER_FAKE + +# mode: shell-script +# End: diff --git a/lib/cinder_backends/fake_gate b/lib/cinder_backends/fake_gate new file mode 100644 index 0000000000..6b1f848790 --- /dev/null +++ b/lib/cinder_backends/fake_gate @@ -0,0 +1,74 @@ +#!/bin/bash +# +# lib/cinder_backends/lvm +# Configure the LVM backend + +# Enable with: +# +# CINDER_ENABLED_BACKENDS+=,fake_gate:lvmname + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# CINDER_CONF +# DATA_DIR +# VOLUME_GROUP_NAME + +# clean_cinder_backend_lvm - called from clean_cinder() +# configure_cinder_backend_lvm - called from configure_cinder() +# init_cinder_backend_lvm - called from init_cinder() + + +# Save trace setting +_XTRACE_CINDER_LVM=$(set +o | grep xtrace) +set +o xtrace + + +# TODO: resurrect backing device...need to know how to set values +#VOLUME_BACKING_DEVICE=${VOLUME_BACKING_DEVICE:-} + +# Entry Points +# ------------ + +# cleanup_cinder_backend_lvm - Delete volume group and remove backing file +# cleanup_cinder_backend_lvm $be_name +function cleanup_cinder_backend_lvm { + local be_name=$1 + + # Campsite rule: leave behind a volume group at least as clean as we found it + clean_lvm_volume_group $VOLUME_GROUP_NAME-$be_name + clean_lvm_filter +} + +# configure_cinder_backend_lvm - Set config files, create data dirs, etc +# configure_cinder_backend_lvm $be_name +function configure_cinder_backend_lvm { + local be_name=$1 + + iniset $CINDER_CONF $be_name volume_backend_name $be_name + iniset $CINDER_CONF $be_name volume_driver "cinder.tests.fake_driver.FakeGateDriver" + iniset $CINDER_CONF $be_name volume_group $VOLUME_GROUP_NAME-$be_name + iniset $CINDER_CONF $be_name iscsi_helper "$CINDER_ISCSI_HELPER" + iniset $CINDER_CONF $be_name lvm_type "$CINDER_LVM_TYPE" + + if [[ "$CINDER_VOLUME_CLEAR" == "non" ]]; then + iniset $CINDER_CONF $be_name volume_clear none + fi +} + +# init_cinder_backend_lvm - Initialize volume group +# init_cinder_backend_lvm $be_name +function init_cinder_backend_lvm { + local be_name=$1 + + # Start with a clean volume group + init_lvm_volume_group $VOLUME_GROUP_NAME-$be_name $VOLUME_BACKING_FILE_SIZE +} + +# Restore xtrace +$_XTRACE_CINDER_LVM + +# mode: shell-script +# End: From ed887d8b9f91f8c75b45770d5c250b9939f36619 Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Wed, 22 Feb 2017 14:21:33 -0500 Subject: [PATCH 0362/1936] lib/neutron: Fix conf handling - Remove extra spaces - Fix a missing space in the generated option string - Fix a fatal typo Change-Id: Ieca1c3e3c7e2ff59089ef45435e126ce7ff4f9b5 Closes-Bug: #1667073 --- lib/neutron | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/neutron b/lib/neutron index 19568eaf25..7d427555a4 100644 --- a/lib/neutron +++ b/lib/neutron @@ -395,9 +395,9 @@ function start_neutron_api { service_protocol="http" fi - local opts = "" - opts+="--config-file $NEUTRON_CONF" - opts+="--config-file $NEUTRON_CORE_PLUGIN_CONF" + local opts="" + opts+=" --config-file $NEUTRON_CONF" + opts+=" --config-file $NEUTRON_CORE_PLUGIN_CONF" local cfg_file for cfg_file in ${_NEUTRON_SERVER_EXTRA_CONF_FILES_ABS[@]}; do opts+=" --config-file $cfg_file" @@ -405,7 +405,7 @@ function start_neutron_api { # Start the Neutron service # TODO(sc68cal) Stop hard coding this - run_process neutron-api "$NEUTRON_BIN_DIR/neutron-server $ops" + run_process neutron-api "$NEUTRON_BIN_DIR/neutron-server $opts" if is_ssl_enabled_service "neutron"; then ssl_ca="--ca-certificate=${SSL_BUNDLE_FILE}" From 84e45c91434c3c7e6796f9a201fd9b0fb8f7adcd Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Wed, 22 Feb 2017 14:25:14 -0500 Subject: [PATCH 0363/1936] lib/neutron: Fix an extra comma in service_plugins Closes-Bug: #1667077 Change-Id: Ib63a94a931c38a7b2a5fc91a8339a9cd657f7927 --- lib/neutron | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index 7d427555a4..f277062b32 100644 --- a/lib/neutron +++ b/lib/neutron @@ -510,7 +510,10 @@ function neutron_service_plugin_class_add_new { local plugins="" plugins=$(iniget $NEUTRON_CONF DEFAULT service_plugins) - plugins+=",${service_plugin_class}" + if [ $plugins ]; then + plugins+="," + fi + plugins+="${service_plugin_class}" iniset $NEUTRON_CONF DEFAULT service_plugins $plugins } From f58b37356323262d9ce22815a4cf4e129195c679 Mon Sep 17 00:00:00 2001 From: Prashant Shetty Date: Thu, 23 Feb 2017 13:48:12 +0000 Subject: [PATCH 0364/1936] Enable placement section on controller in multinode setup Currently placement api section will be configured on controller only if service n-cpu is running. It breaks multi node setup. Closes-Bug: #1667219 Change-Id: I8b0f60f253859f704bb9831d7dac8f55df353ac7 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 7d440a7c20..9a0cec601f 100755 --- a/stack.sh +++ b/stack.sh @@ -872,7 +872,7 @@ fi # if placement-api or placement-client is active, and n-cpu on the # same box. if is_service_enabled placement placement-client; then - if is_service_enabled n-cpu; then + if is_service_enabled n-cpu || is_service_enabled n-sch; then configure_placement_nova_compute fi fi From 6f0205b03630ecb308877f65ca3d4ab9020bc28d Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Wed, 22 Feb 2017 05:59:30 -0800 Subject: [PATCH 0365/1936] Refactor rpc backend vhost creation The creation of the cellsv1 rpc vhost was buried in the restart function, which makes it hard to extend. This breaks it out into a helper method and moves the conditional logic into the nova module itself. Change-Id: Ib0e377aabe45c27bb6ce59ca275ce73085e8b9d2 --- lib/nova | 1 + lib/rpc_backend | 19 +++++++++++++------ 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/lib/nova b/lib/nova index 4c264209d5..f5ab20100e 100644 --- a/lib/nova +++ b/lib/nova @@ -644,6 +644,7 @@ function init_nova_cells { if is_service_enabled n-cell; then cp $NOVA_CONF $NOVA_CELLS_CONF iniset $NOVA_CELLS_CONF database connection `database_connection_url $NOVA_CELLS_DB` + rpc_backend_add_vhost child_cell iniset_rpc_backend nova $NOVA_CELLS_CONF DEFAULT child_cell iniset $NOVA_CELLS_CONF DEFAULT dhcpbridge_flagfile $NOVA_CELLS_CONF iniset $NOVA_CELLS_CONF cells enable True diff --git a/lib/rpc_backend b/lib/rpc_backend index a21f781b4e..3c1404e998 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -97,13 +97,20 @@ function restart_rpc_backend { break done - if is_service_enabled n-cell; then - # Add partitioned access for the child cell - if [ -z `sudo rabbitmqctl list_vhosts | grep child_cell` ]; then - sudo rabbitmqctl add_vhost child_cell - sudo rabbitmqctl set_permissions -p child_cell $RABBIT_USERID ".*" ".*" ".*" - fi + fi +} + +# adds a vhost to the rpc backend +function rpc_backend_add_vhost { + local vhost="$1" + if is_service_enabled rabbit; then + if [ -z `sudo rabbitmqctl list_vhosts | grep $vhost` ]; then + sudo rabbitmqctl add_vhost $vhost + sudo rabbitmqctl set_permissions -p $vhost $RABBIT_USERID ".*" ".*" ".*" fi + else + echo 'RPC backend does not support vhosts' + return 1 fi } From 615e115474a570a9d3b7f6edfec365d1bbd31dc3 Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Thu, 23 Feb 2017 10:41:51 +0000 Subject: [PATCH 0366/1936] lib/neutron: set variables needed for sane l3 agent setup Since for the new devstack library we still rely on some functions from ovs_base, we need to initialize them with sane default values so that setup works as intended and as lib/neutron-legacy behaves by default for external connectivity setup. Change-Id: I412ed4f988b8e03a3e3a08066375b55a6e6aa3e6 --- lib/neutron | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lib/neutron b/lib/neutron index d80e9d90a1..10f488de90 100644 --- a/lib/neutron +++ b/lib/neutron @@ -70,6 +70,14 @@ NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron) NEUTRON_ROOTWRAP_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf NEUTRON_ROOTWRAP_DAEMON_CMD="sudo $NEUTRON_ROOTWRAP-daemon $NEUTRON_ROOTWRAP_CONF_FILE" +# This is needed because _neutron_ovs_base_configure_l3_agent will set +# external_network_bridge +Q_USE_PROVIDERNET_FOR_PUBLIC=${Q_USE_PROVIDERNET_FOR_PUBLIC:-True} +# This is needed because _neutron_ovs_base_configure_l3_agent uses it to create +# an external network bridge +PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} +PUBLIC_BRIDGE_MTU=${PUBLIC_BRIDGE_MTU:-1500} + # Additional neutron api config files declare -a _NEUTRON_SERVER_EXTRA_CONF_FILES_ABS From bf697f50650beecea46b665b706fa5bb5ecb1ede Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Thu, 23 Feb 2017 12:09:01 +0000 Subject: [PATCH 0367/1936] lib/neutron: untangle metering configuration from legacy The old code assumed _neutron_service_plugin_class_add presence, as well as used a configuration file path that is not standard (under /etc/neutron/services/ instead of /etc/neutron/metering_agent.ini). The patch untangles metering configuration in the new library from that old and bad code, and reimplements it inline. This should help the effort to switch gate from lib/neutron-legacy to lib/neutron. Change-Id: I0d235498af4b6a70bd5dae6ea178d5aa8ba41e80 --- lib/neutron | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/lib/neutron b/lib/neutron index d80e9d90a1..1aabf04b8a 100644 --- a/lib/neutron +++ b/lib/neutron @@ -52,9 +52,13 @@ NEUTRON_CORE_PLUGIN_CONF_FILENAME=${NEUTRON_CORE_PLUGIN_CONF_FILENAME:-ml2_conf. NEUTRON_CORE_PLUGIN_CONF_PATH=$NEUTRON_CONF_DIR/plugins/$NEUTRON_CORE_PLUGIN NEUTRON_CORE_PLUGIN_CONF=$NEUTRON_CORE_PLUGIN_CONF_PATH/$NEUTRON_CORE_PLUGIN_CONF_FILENAME +NEUTRON_METERING_AGENT_CONF_FILENAME=${NEUTRON_METERING_AGENT_CONF_FILENAME:-metering_agent.ini} +NEUTRON_METERING_AGENT_CONF=$NEUTRON_CONF_DIR/$NEUTRON_METERING_AGENT_CONF_FILENAME + NEUTRON_AGENT_BINARY=${NEUTRON_AGENT_BINARY:-neutron-$NEUTRON_AGENT-agent} NEUTRON_L3_BINARY=${NEUTRON_L3_BINARY:-neutron-l3-agent} NEUTRON_META_BINARY=${NEUTRON_META_BINARY:-neutron-metadata-agent} +NEUTRON_METERING_BINARY=${NEUTRON_METERING_BINARY:-neutron-metering-agent} # Public facing bits if is_ssl_enabled_service "neutron" || is_service_enabled tls-proxy; then @@ -251,9 +255,7 @@ function configure_neutron_new { # Metering if is_service_enabled neutron-metering; then - source $TOP_DIR/lib/neutron_plugins/services/metering - neutron_agent_metering_configure_common - neutron_agent_metering_configure_agent + cp $NEUTRON_DIR/etc/metering_agent.ini.sample $NEUTRON_METERING_AGENT_CONF neutron_service_plugin_class_add metering fi } @@ -454,7 +456,7 @@ function start_neutron_new { fi if is_service_enabled neutron-metering; then - run_process neutron-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME" + run_process neutron-metering "$NEUTRON_METERING_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_METERING_AGENT_CONF" fi } From 1d0841286e8eb4fbb18334cf664d89d78fef8efe Mon Sep 17 00:00:00 2001 From: Daniel Alvarez Date: Thu, 16 Feb 2017 09:38:13 +0000 Subject: [PATCH 0368/1936] Adding haproxy package to Neutron This patch adds haproxy package to devstack as Neutron will rely on it for serving metadata instead of the current Python implementation. haproxy will reduce the memory footprint from ~50MB to ~1.5MB for serving metadata. It will be spawned for every Neutron router so, for large deployments, it will be a significant memory reduction. Change-Id: I36a5531cacc21c0d4bb7f20d4bec6da65d04c262 --- files/debs/neutron | 1 + files/rpms-suse/neutron | 1 + files/rpms/neutron | 1 + 3 files changed, 3 insertions(+) diff --git a/files/debs/neutron b/files/debs/neutron index 2307fa54d5..e30f678f7a 100644 --- a/files/debs/neutron +++ b/files/debs/neutron @@ -2,6 +2,7 @@ acl dnsmasq-base dnsmasq-utils # for dhcp_release only available in dist:precise ebtables +haproxy # to serve as metadata proxy inside router/dhcp namespaces iptables iputils-arping iputils-ping diff --git a/files/rpms-suse/neutron b/files/rpms-suse/neutron index e9abc6eca6..d1cc73f115 100644 --- a/files/rpms-suse/neutron +++ b/files/rpms-suse/neutron @@ -2,6 +2,7 @@ acl dnsmasq dnsmasq-utils # dist:opensuse-12.3,opensuse-13.1 ebtables +haproxy # to serve as metadata proxy inside router/dhcp namespaces iptables iputils mariadb # NOPRIME diff --git a/files/rpms/neutron b/files/rpms/neutron index 2e49a0cf93..a4e029a6eb 100644 --- a/files/rpms/neutron +++ b/files/rpms/neutron @@ -2,6 +2,7 @@ acl dnsmasq # for q-dhcp dnsmasq-utils # for dhcp_release ebtables +haproxy # to serve as metadata proxy inside router/dhcp namespaces iptables iputils mysql-devel From 79b55f51e4a2a44e7e2a7a8c7df435e3451a4e22 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Wed, 22 Feb 2017 07:00:59 -0800 Subject: [PATCH 0369/1936] Fix file permissions for lib/neutron_plugins/linuxbridge_agent The permissions changed with commit 40aae6adbfce1bd896d5f7b0e281e798b56d1ca8 TrivialFix Change-Id: I100cb9589309f9289b4581265a5e4206464ddc0e --- lib/neutron_plugins/linuxbridge_agent | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 lib/neutron_plugins/linuxbridge_agent diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent old mode 100755 new mode 100644 From 807de8e5907f4bce8a697c194383d161b0e9c572 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 24 Feb 2017 14:55:33 +0100 Subject: [PATCH 0370/1936] Set OS_AUTH_TYPE to password Devstack configures keystone for auth mechanism but don't tell keystoneauth1 library that it should use keystone too. In simple case, this is not an issue because some application set 'password' by default (like the openstack cli). But applications can have no default or another default. Change-Id: Idd1e1d2e7546fce7531175440788a8c7cb27aec1 --- openrc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/openrc b/openrc index 483b5af387..7d5f0bbf3e 100644 --- a/openrc +++ b/openrc @@ -79,6 +79,9 @@ KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST} # Identity API version export OS_IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-3} +# Ask keystoneauth1 to use keystone +export OS_AUTH_TYPE=password + # Authenticating against an OpenStack cloud using Keystone returns a **Token** # and **Service Catalog**. The catalog contains the endpoints for all services # the user/project has access to - including nova, glance, keystone, swift, ... From f069acf9ee4ecb0532d55158c99356faa4ff6fc9 Mon Sep 17 00:00:00 2001 From: Jens Rosenboom Date: Fri, 24 Feb 2017 16:25:59 +0100 Subject: [PATCH 0371/1936] Make subnet pool names unique Using the same name for two different subnet pools means that one needs to reference them by their UUID. Choosing unique names will allow us to use the name to reference the pool later on. At the same time simplify the command used for pool creation by instructing OSC to only output the value that we are interested in. Change-Id: Idedcb6328925d44cdd0f415450ec4ebbc272401d --- lib/neutron_plugins/services/l3 | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index e87a30cadb..381162693d 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -87,7 +87,8 @@ PUBLIC_SUBNET_NAME=${PUBLIC_SUBNET_NAME:-"public-subnet"} # Subnetpool defaults USE_SUBNETPOOL=${USE_SUBNETPOOL:-True} -SUBNETPOOL_NAME=${SUBNETPOOL_NAME:-"shared-default-subnetpool"} +SUBNETPOOL_NAME_V4=${SUBNETPOOL_NAME:-"shared-default-subnetpool-v4"} +SUBNETPOOL_NAME_V6=${SUBNETPOOL_NAME:-"shared-default-subnetpool-v6"} SUBNETPOOL_PREFIX_V4=${SUBNETPOOL_PREFIX_V4:-$IPV4_ADDRS_SAFE_TO_USE} SUBNETPOOL_PREFIX_V6=${SUBNETPOOL_PREFIX_V6:-$IPV6_ADDRS_SAFE_TO_USE} @@ -169,10 +170,10 @@ function create_neutron_initial_network { if is_networking_extension_supported "auto-allocated-topology"; then if [[ "$USE_SUBNETPOOL" == "True" ]]; then if [[ "$IP_VERSION" =~ 4.* ]]; then - SUBNETPOOL_V4_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME --default-prefix-length $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --share --default | grep ' id ' | get_field 2) + SUBNETPOOL_V4_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME_V4 --default-prefix-length $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --share --default -f value -c id) fi if [[ "$IP_VERSION" =~ .*6 ]]; then - SUBNETPOOL_V6_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME --default-prefix-length $SUBNETPOOL_SIZE_V6 --pool-prefix $SUBNETPOOL_PREFIX_V6 --share --default | grep ' id ' | get_field 2) + SUBNETPOOL_V6_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME_V6 --default-prefix-length $SUBNETPOOL_SIZE_V6 --pool-prefix $SUBNETPOOL_PREFIX_V6 --share --default -f value -c id) fi fi fi From a1875b1ffcf29a7c1645d8e21402da331c102b6e Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Thu, 23 Feb 2017 05:44:22 +0900 Subject: [PATCH 0372/1936] neutron-legacy: Defer service_plugins configuration This allows post-config phase to use neutron_service_plugin_class_add so that devstack plugins can use it in the same way for both of neutron-legacy and its "modern" counterpart, lib/neutron. Closes-Bug: #1667037 Change-Id: I9068fd608e82e70db8d725f92269a26920efebcb --- lib/neutron-legacy | 13 ++++++++----- stack.sh | 1 + 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index b381b642c6..446c7144f5 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -20,6 +20,7 @@ # - init_neutron_third_party # - start_neutron_third_party # - create_nova_conf_neutron +# - configure_neutron_after_post_config # - start_neutron_service_and_check # - check_neutron_third_party_integration # - start_neutron_agents @@ -331,7 +332,6 @@ function configure_mutnauq { _configure_neutron_common iniset_rpc_backend neutron $NEUTRON_CONF - # goes before q-svc to init Q_SERVICE_PLUGIN_CLASSES if is_service_enabled q-metering; then _configure_neutron_metering fi @@ -464,6 +464,13 @@ function install_neutron_agent_packages_mutnauq { fi } +# Finish neutron configuration +function configure_neutron_after_post_config { + if [[ $Q_SERVICE_PLUGIN_CLASSES != '' ]]; then + iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES + fi +} + # Start running processes, including screen function start_neutron_service_and_check { local service_port=$Q_PORT @@ -836,10 +843,6 @@ function _configure_neutron_service { # Update either configuration file with plugin iniset $NEUTRON_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS - if [[ $Q_SERVICE_PLUGIN_CLASSES != '' ]]; then - iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES - fi - iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $NEUTRON_CONF oslo_policy policy_file $Q_POLICY_FILE iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP diff --git a/stack.sh b/stack.sh index 7d440a7c20..19b07ab06b 100755 --- a/stack.sh +++ b/stack.sh @@ -1241,6 +1241,7 @@ if is_service_enabled neutron-api; then start_neutron_api elif is_service_enabled q-svc; then echo_summary "Starting Neutron" + configure_neutron_after_post_config start_neutron_service_and_check elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then NM_CONF=${NOVA_CONF} From c043b6f8a84a575ea5cedc71499a81414a610e70 Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Thu, 23 Feb 2017 22:30:08 -0500 Subject: [PATCH 0373/1936] lib/neutron: Add neutron_deploy_rootwrap_filters Change-Id: Icfa2cfa662013324d38099b82ce0f58ed8377e60 --- lib/neutron | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/lib/neutron b/lib/neutron index f277062b32..c43cb674ba 100644 --- a/lib/neutron +++ b/lib/neutron @@ -521,6 +521,13 @@ function neutron_server_config_add_new { _NEUTRON_SERVER_EXTRA_CONF_FILES_ABS+=($1) } +# neutron_deploy_rootwrap_filters() - deploy rootwrap filters +function neutron_deploy_rootwrap_filters_new { + local srcdir=$1 + sudo install -d -o root -g root -m 755 $NEUTRON_CONF_DIR/rootwrap.d + sudo install -o root -g root -m 644 $srcdir/etc/neutron/rootwrap.d/*.filters $NEUTRON_CONF_DIR/rootwrap.d +} + # Dispatch functions # These are needed for compatibility between the old and new implementations # where there are function name overlaps. These will be removed when @@ -626,5 +633,14 @@ function stop_neutron { fi } +function neutron_deploy_rootwrap_filters { + if is_neutron_legacy_enabled; then + # Call back to old function + _neutron_deploy_rootwrap_filters "$@" + else + neutron_deploy_rootwrap_filters_new "$@" + fi +} + # Restore xtrace $XTRACE From 3a6916e76811cbd1962636e4fe8016f79ed6028e Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Sat, 25 Feb 2017 05:12:38 +0000 Subject: [PATCH 0374/1936] Install same packages for neutron-* services as for q-* This patch creates symlinks between neutron-* files tracking runtime dependencies for services, and q-* files. Since lib/neutron is the new recommended way to deploy neutron with devstack, this patch made neutron-* files real files, while q-* files are just symlinks. Change-Id: I0f361f31160d0fee40ad3e8de873edd05173e54e --- files/debs/neutron-agent | 1 + files/debs/neutron-l3 | 3 +++ files/debs/q-agt | 2 +- files/debs/q-l3 | 4 +--- files/rpms-suse/neutron-agent | 1 + files/rpms-suse/neutron-l3 | 2 ++ files/rpms-suse/q-agt | 2 +- files/rpms-suse/q-l3 | 3 +-- files/rpms/neutron-agent | 1 + files/rpms/neutron-l3 | 2 ++ files/rpms/q-agt | 2 +- files/rpms/q-l3 | 3 +-- 12 files changed, 16 insertions(+), 10 deletions(-) create mode 100644 files/debs/neutron-agent create mode 100644 files/debs/neutron-l3 mode change 100644 => 120000 files/debs/q-agt mode change 100644 => 120000 files/debs/q-l3 create mode 100644 files/rpms-suse/neutron-agent create mode 100644 files/rpms-suse/neutron-l3 mode change 100644 => 120000 files/rpms-suse/q-agt mode change 100644 => 120000 files/rpms-suse/q-l3 create mode 100644 files/rpms/neutron-agent create mode 100644 files/rpms/neutron-l3 mode change 100644 => 120000 files/rpms/q-agt mode change 100644 => 120000 files/rpms/q-l3 diff --git a/files/debs/neutron-agent b/files/debs/neutron-agent new file mode 100644 index 0000000000..ea8819e884 --- /dev/null +++ b/files/debs/neutron-agent @@ -0,0 +1 @@ +ipset diff --git a/files/debs/neutron-l3 b/files/debs/neutron-l3 new file mode 100644 index 0000000000..106a6a35aa --- /dev/null +++ b/files/debs/neutron-l3 @@ -0,0 +1,3 @@ +conntrack +conntrackd +keepalived diff --git a/files/debs/q-agt b/files/debs/q-agt deleted file mode 100644 index ea8819e884..0000000000 --- a/files/debs/q-agt +++ /dev/null @@ -1 +0,0 @@ -ipset diff --git a/files/debs/q-agt b/files/debs/q-agt new file mode 120000 index 0000000000..99fe353094 --- /dev/null +++ b/files/debs/q-agt @@ -0,0 +1 @@ +neutron-agent \ No newline at end of file diff --git a/files/debs/q-l3 b/files/debs/q-l3 deleted file mode 100644 index 106a6a35aa..0000000000 --- a/files/debs/q-l3 +++ /dev/null @@ -1,3 +0,0 @@ -conntrack -conntrackd -keepalived diff --git a/files/debs/q-l3 b/files/debs/q-l3 new file mode 120000 index 0000000000..0a5ca2a45f --- /dev/null +++ b/files/debs/q-l3 @@ -0,0 +1 @@ +neutron-l3 \ No newline at end of file diff --git a/files/rpms-suse/neutron-agent b/files/rpms-suse/neutron-agent new file mode 100644 index 0000000000..ea8819e884 --- /dev/null +++ b/files/rpms-suse/neutron-agent @@ -0,0 +1 @@ +ipset diff --git a/files/rpms-suse/neutron-l3 b/files/rpms-suse/neutron-l3 new file mode 100644 index 0000000000..a7a190c063 --- /dev/null +++ b/files/rpms-suse/neutron-l3 @@ -0,0 +1,2 @@ +conntrack-tools +keepalived diff --git a/files/rpms-suse/q-agt b/files/rpms-suse/q-agt deleted file mode 100644 index ea8819e884..0000000000 --- a/files/rpms-suse/q-agt +++ /dev/null @@ -1 +0,0 @@ -ipset diff --git a/files/rpms-suse/q-agt b/files/rpms-suse/q-agt new file mode 120000 index 0000000000..99fe353094 --- /dev/null +++ b/files/rpms-suse/q-agt @@ -0,0 +1 @@ +neutron-agent \ No newline at end of file diff --git a/files/rpms-suse/q-l3 b/files/rpms-suse/q-l3 deleted file mode 100644 index a7a190c063..0000000000 --- a/files/rpms-suse/q-l3 +++ /dev/null @@ -1,2 +0,0 @@ -conntrack-tools -keepalived diff --git a/files/rpms-suse/q-l3 b/files/rpms-suse/q-l3 new file mode 120000 index 0000000000..0a5ca2a45f --- /dev/null +++ b/files/rpms-suse/q-l3 @@ -0,0 +1 @@ +neutron-l3 \ No newline at end of file diff --git a/files/rpms/neutron-agent b/files/rpms/neutron-agent new file mode 100644 index 0000000000..ea8819e884 --- /dev/null +++ b/files/rpms/neutron-agent @@ -0,0 +1 @@ +ipset diff --git a/files/rpms/neutron-l3 b/files/rpms/neutron-l3 new file mode 100644 index 0000000000..a7a190c063 --- /dev/null +++ b/files/rpms/neutron-l3 @@ -0,0 +1,2 @@ +conntrack-tools +keepalived diff --git a/files/rpms/q-agt b/files/rpms/q-agt deleted file mode 100644 index ea8819e884..0000000000 --- a/files/rpms/q-agt +++ /dev/null @@ -1 +0,0 @@ -ipset diff --git a/files/rpms/q-agt b/files/rpms/q-agt new file mode 120000 index 0000000000..99fe353094 --- /dev/null +++ b/files/rpms/q-agt @@ -0,0 +1 @@ +neutron-agent \ No newline at end of file diff --git a/files/rpms/q-l3 b/files/rpms/q-l3 deleted file mode 100644 index a7a190c063..0000000000 --- a/files/rpms/q-l3 +++ /dev/null @@ -1,2 +0,0 @@ -conntrack-tools -keepalived diff --git a/files/rpms/q-l3 b/files/rpms/q-l3 new file mode 120000 index 0000000000..0a5ca2a45f --- /dev/null +++ b/files/rpms/q-l3 @@ -0,0 +1 @@ +neutron-l3 \ No newline at end of file From 1e7f738f284b85ed95d514fb13fbc1afb6b31087 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 27 Feb 2017 11:19:40 +1100 Subject: [PATCH 0375/1936] Ensure we install setuptools from requirements Use pip_install_gr so we get the version pinned by requirements. The depends-on is an example of where we're trying to pin to workaround issues. Depends-On: I9c57c08a150571c5bb62235d502839394d53a4c1 Change-Id: I780cca681b12a3e9d228dbf2fd9fa6e8ab1a82e1 --- tools/install_pip.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index a5ccb19399..dbe52782a4 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -144,6 +144,9 @@ if [[ -n $PYPI_ALTERNATIVE_URL ]]; then fi set -x -pip_install -U setuptools + +# Note setuptools is part of requirements.txt and we want to make sure +# we obey any versioning as described there. +pip_install_gr setuptools get_versions From 52bb64105fd559ee69a8ec1c0733e1f7448401a5 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 27 Feb 2017 15:11:11 +1100 Subject: [PATCH 0376/1936] Use qemu-kvm-ev package on centos For the latest qemu-kvm, you have to use the qemu-kvm-ev package, which is based off the qemu-kvm-rhev package, which is explained in [1] but you probably can't read it. The gist is, that qemu-kvm-rhev is a later build of kvm that is incompatible with the base version provided. qemu-kvm-rhev is only provided with the RHV (ovirt) and RHOS (openstack) products. CentOS rebuilds this package as qemu-kvm-ev as part of it's virtualisation SIG. I9a972e3fde2e4e552f6fc98350820c07873c3de3 has bumped up the minimum qemu version to 2.1.0. It seems there is a an issue (bug #1668164) where having the qemu-system package installed gets picked up if installed, and reports the incorrect version to nova, causing failure. This removes the installs from files/rpms/nova as it is all being done in function-libvirt. We only install the qemu-kvm-ev package on centos and remove the old work-around. [1] https://access.redhat.com/solutions/629513 [2] https://wiki.centos.org/SpecialInterestGroup/Virtualization Change-Id: Ide91b261f35fb19d8bd7155ca016fa3b76a45ea1 --- files/rpms/nova | 5 ----- lib/nova_plugins/functions-libvirt | 21 +++++++++++++-------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/files/rpms/nova b/files/rpms/nova index 45f1c94f1f..a368c552aa 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -9,10 +9,6 @@ iptables iputils kernel-modules # dist:f23,f24,f25 kpartx -kvm # NOPRIME -libvirt-bin # NOPRIME -libvirt-devel # NOPRIME -libvirt-python # NOPRIME libxml2-python m2crypto mysql-devel @@ -21,7 +17,6 @@ mysql-server # NOPRIME numpy # needed by websockify for spice console parted polkit -qemu-kvm # NOPRIME rabbitmq-server # NOPRIME sqlite sudo diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 47b054bc58..d225ef84fb 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -34,18 +34,23 @@ function install_libvirt { #pip_install_gr elif is_fedora || is_suse; then # On "KVM for IBM z Systems", kvm does not have its own package - if [[ ! ${DISTRO} =~ "kvmibm1" ]]; then + if [[ ! ${DISTRO} =~ "kvmibm1" && ! ${DISTRO} =~ "rhel7" ]]; then install_package kvm fi - # there is a dependency issue with kvm (which is really just a - # wrapper to qemu-system-x86) that leaves some bios files out, - # so install qemu-kvm (which shouldn't strictly be needed, as - # everything has been merged into qemu-system-x86) to bring in - # the right packages. see - # https://bugzilla.redhat.com/show_bug.cgi?id=1235890 - install_package qemu-kvm + + if [[ ${DISTRO} =~ "rhel7" ]]; then + # On centos7 install the qemu-kvm-ev package, which is a + # later version of qemu-kvm rebuilt from the qemu-kvm-rhev + # package by the virt SIG (as required by nova). This + # package is only provided for RHOS (openstack) or RHV + # (ovirt) in RHEL. We have already insalled the RDO + # repositories which provide this. + install_package qemu-kvm-ev + fi + install_package libvirt libvirt-devel pip_install_gr libvirt-python + fi } From 0bf75a471ecce8c05718644e7e070b4d5a8657d6 Mon Sep 17 00:00:00 2001 From: Denis Buliga Date: Mon, 6 Feb 2017 16:56:46 +0200 Subject: [PATCH 0377/1936] Skips enabling kernel bridge firewall in container Calling enable_kernel_bridge_firewall inside a container, devstack will crash because it tries to load a kernel module by calling 'sudo modprobe' on net.bridge. Change-Id: Id4718c065d5a8c507d49f38e19c2796a64221aa4 Closes-Bug: #1662194 --- functions | 10 ++++++++++ lib/neutron | 4 +++- lib/neutron_plugins/linuxbridge_agent | 4 +++- lib/neutron_plugins/ovs_base | 4 +++- lib/nova | 4 +++- 5 files changed, 22 insertions(+), 4 deletions(-) diff --git a/functions b/functions index 89ee3672d3..f262fbccc4 100644 --- a/functions +++ b/functions @@ -664,6 +664,16 @@ function set_mtu { } +# running_in_container - Returns true otherwise false +function running_in_container { + if grep -q lxc /proc/1/cgroup; then + return 0 + fi + + return 1 +} + + # enable_kernel_bridge_firewall - Enable kernel support for bridge firewalling function enable_kernel_bridge_firewall { # Load bridge module. This module provides access to firewall for bridged diff --git a/lib/neutron b/lib/neutron index f6c705c2b0..a43fdeb903 100644 --- a/lib/neutron +++ b/lib/neutron @@ -188,7 +188,9 @@ function configure_neutron_new { iniset $NEUTRON_CORE_PLUGIN_CONF ovs local_ip $HOST_IP fi - enable_kernel_bridge_firewall + if ! running_in_container; then + enable_kernel_bridge_firewall + fi fi # DHCP Agent diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent index 0c8ccb8718..f031fc7c59 100644 --- a/lib/neutron_plugins/linuxbridge_agent +++ b/lib/neutron_plugins/linuxbridge_agent @@ -71,7 +71,9 @@ function neutron_plugin_configure_plugin_agent { fi if [[ "$Q_USE_SECGROUP" == "True" ]]; then iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver - enable_kernel_bridge_firewall + if ! running_in_container; then + enable_kernel_bridge_firewall + fi else iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver fi diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index 62a4d00bcd..733a5c13e2 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -88,7 +88,9 @@ function _neutron_ovs_base_configure_debug_command { function _neutron_ovs_base_configure_firewall_driver { if [[ "$Q_USE_SECGROUP" == "True" ]]; then iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver iptables_hybrid - enable_kernel_bridge_firewall + if ! running_in_container; then + enable_kernel_bridge_firewall + fi else iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver noop fi diff --git a/lib/nova b/lib/nova index 4d336f6271..d1c0d470a7 100644 --- a/lib/nova +++ b/lib/nova @@ -864,7 +864,9 @@ function start_nova_rest { run_process n-crt "$NOVA_BIN_DIR/nova-cert --config-file $api_cell_conf" if is_service_enabled n-net; then - enable_kernel_bridge_firewall + if ! running_in_container; then + enable_kernel_bridge_firewall + fi fi run_process n-net "$NOVA_BIN_DIR/nova-network --config-file $compute_cell_conf" From 4af6eeac524f2ba316b0198a72ef8d916df9d56f Mon Sep 17 00:00:00 2001 From: Eli Qiao Date: Tue, 28 Feb 2017 15:13:02 +0800 Subject: [PATCH 0378/1936] cinder: wait for cinder-api for wsgi too Wait for cinder-api's status if CINDER_USE_MOD_WSGI=True Change-Id: I40e5e08633572f877a25280496141423f232d447 --- lib/cinder | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/lib/cinder b/lib/cinder index 767fd00d34..130345f560 100644 --- a/lib/cinder +++ b/lib/cinder @@ -528,10 +528,11 @@ function start_cinder { tail_log c-api /var/log/$APACHE_NAME/c-api.log else run_process c-api "$CINDER_BIN_DIR/cinder-api --config-file $CINDER_CONF" - echo "Waiting for Cinder API to start..." - if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$CINDER_SERVICE_HOST:$service_port; then - die $LINENO "c-api did not start" - fi + fi + + echo "Waiting for Cinder API to start..." + if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$CINDER_SERVICE_HOST:$service_port; then + die $LINENO "c-api did not start" fi run_process c-sch "$CINDER_BIN_DIR/cinder-scheduler --config-file $CINDER_CONF" From e624e48ddf8ba9e8bd12e1de5990ae74bb269922 Mon Sep 17 00:00:00 2001 From: Eli Qiao Date: Tue, 28 Feb 2017 15:16:16 +0800 Subject: [PATCH 0379/1936] nova: Fix comments for _config_nova_apache_wsgi Comments cleanup. Change-Id: I02748b906f7bb75240bc4e5259005cd72ef49f6b --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index f5ab20100e..b4a5fb143c 100644 --- a/lib/nova +++ b/lib/nova @@ -247,7 +247,7 @@ function _cleanup_nova_apache_wsgi { sudo rm -f $(apache_site_config_for nova-metadata) } -# _config_nova_apache_wsgi() - Set WSGI config files of Keystone +# _config_nova_apache_wsgi() - Set WSGI config files of Nova API function _config_nova_apache_wsgi { sudo mkdir -p $NOVA_WSGI_DIR From 19f4b3faae5f64a497bf6c13fb29b65301ae499c Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Thu, 23 Feb 2017 20:44:18 +0000 Subject: [PATCH 0380/1936] lib/neutron: stop loading all config files into all processes DHCP agent should not load core plugin config file; L3 agent has no interest in metadata agent configuration file; etc. It's a mistake to form a single global list of configuration files and pass it into all processes. Every process should have its own list, that may or may not have some files in common with other processes. The only file that is common to all neutron processes is neutron.conf, and we could in theory keep it into the common list. But I decided at this point it's better to be explicit about what's loaded into services. Also the order of arguments is important, and neutron.conf should always be the first CLI argument, which is hard to achieve by keeping neutron.conf file in the global list. Plugins may be interested in loading additional files into neutron processes. For example, dragonflow needs to load /etc/neutron/dragonflow.ini into neutron-server. But we should not necessarily load all those files into all processes, so such extendable lists should be per process. Besides, neutron_server_config_add_new is already available to use to append additional configuration files for neutron-server. That's why the patch completely kills the NEUTRON_CONFIG_ARG variable. Depends-On: I4bd54a41a45486a5601373f9a9cce74d7686d1aa Change-Id: Ia3c3862399bba335db5edf9ea70f850fb2638d09 --- lib/neutron | 40 ++++++---------------------------------- 1 file changed, 6 insertions(+), 34 deletions(-) diff --git a/lib/neutron b/lib/neutron index 19568eaf25..d80e9d90a1 100644 --- a/lib/neutron +++ b/lib/neutron @@ -70,9 +70,6 @@ NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron) NEUTRON_ROOTWRAP_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf NEUTRON_ROOTWRAP_DAEMON_CMD="sudo $NEUTRON_ROOTWRAP-daemon $NEUTRON_ROOTWRAP_CONF_FILE" -# Add all enabled config files to a single config arg -NEUTRON_CONFIG_ARG=${NEUTRON_CONFIG_ARG:-""} - # Additional neutron api config files declare -a _NEUTRON_SERVER_EXTRA_CONF_FILES_ABS @@ -347,7 +344,7 @@ function init_neutron_new { recreate_database neutron # Run Neutron db migrations - $NEUTRON_BIN_DIR/neutron-db-manage $NEUTRON_CONFIG_ARG upgrade heads + $NEUTRON_BIN_DIR/neutron-db-manage upgrade heads create_neutron_cache_dir } @@ -426,20 +423,19 @@ function start_neutron_api { # start_neutron() - Start running processes, including screen function start_neutron_new { - _set_config_files - # Start up the neutron agents if enabled # TODO(sc68cal) Make this pluggable so different DevStack plugins for different Neutron plugins # can resolve the $NEUTRON_AGENT_BINARY if is_service_enabled neutron-agent; then - run_process neutron-agent "$NEUTRON_BIN_DIR/$NEUTRON_AGENT_BINARY $NEUTRON_CONFIG_ARG" + # TODO(ihrachys) stop loading ml2_conf.ini into agents, instead load agent specific files + run_process neutron-agent "$NEUTRON_BIN_DIR/$NEUTRON_AGENT_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_CORE_PLUGIN_CONF" fi if is_service_enabled neutron-dhcp; then neutron_plugin_configure_dhcp_agent $NEUTRON_DHCP_CONF - run_process neutron-dhcp "$NEUTRON_BIN_DIR/$NEUTRON_DHCP_BINARY $NEUTRON_CONFIG_ARG" + run_process neutron-dhcp "$NEUTRON_BIN_DIR/$NEUTRON_DHCP_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_DHCP_CONF" fi if is_service_enabled neutron-l3; then - run_process neutron-l3 "$NEUTRON_BIN_DIR/$NEUTRON_L3_BINARY $NEUTRON_CONFIG_ARG" + run_process neutron-l3 "$NEUTRON_BIN_DIR/$NEUTRON_L3_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_L3_CONF" fi if is_service_enabled neutron-api; then # XXX(sc68cal) - Here's where plugins can wire up their own networks instead @@ -454,7 +450,7 @@ function start_neutron_new { fi fi if is_service_enabled neutron-metadata-agent; then - run_process neutron-metadata-agent "$NEUTRON_BIN_DIR/$NEUTRON_META_BINARY $NEUTRON_CONFIG_ARG" + run_process neutron-metadata-agent "$NEUTRON_BIN_DIR/$NEUTRON_META_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_META_CONF" fi if is_service_enabled neutron-metering; then @@ -480,30 +476,6 @@ function stop_neutron_new { fi } -# Compile the lost of enabled config files -function _set_config_files { - - NEUTRON_CONFIG_ARG+=" --config-file $NEUTRON_CONF" - - #TODO(sc68cal) OVS and LB agent uses settings in NEUTRON_CORE_PLUGIN_CONF (ml2_conf.ini) but others may not - if is_service_enabled neutron-agent; then - NEUTRON_CONFIG_ARG+=" --config-file $NEUTRON_CORE_PLUGIN_CONF" - fi - - if is_service_enabled neutron-dhcp; then - NEUTRON_CONFIG_ARG+=" --config-file $NEUTRON_DHCP_CONF" - fi - - if is_service_enabled neutron-l3; then - NEUTRON_CONFIG_ARG+=" --config-file $NEUTRON_L3_CONF" - fi - - if is_service_enabled neutron-metadata-agent; then - NEUTRON_CONFIG_ARG+=" --config-file $NEUTRON_META_CONF" - fi - -} - # neutron_service_plugin_class_add() - add service plugin class function neutron_service_plugin_class_add_new { local service_plugin_class=$1 From 0b1ea080a256b02610f1c9a840a3c2a3f4ea0e68 Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Tue, 28 Feb 2017 14:04:59 -0500 Subject: [PATCH 0381/1936] install OSC with py3 by default Add python-openstackclient to the list of packages installed under Python 3 by default, so that jobs running with Python 3 exercise the client that way. Change-Id: I9778a6810bb3e4850132cfc19e583d50fed23ef5 Signed-off-by: Doug Hellmann --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 95f017b9b1..9673074a5e 100644 --- a/stackrc +++ b/stackrc @@ -109,7 +109,7 @@ export USE_PYTHON3=$(trueorfalse False USE_PYTHON3) # base name of the directory from which they are installed. See # enable_python3_package to edit this variable and use_python3_for to # test membership. -export ENABLED_PYTHON3_PACKAGES="nova,glance,cinder,uwsgi" +export ENABLED_PYTHON3_PACKAGES="nova,glance,cinder,uwsgi,python-openstackclient" # Explicitly list services not to run under Python 3. See # disable_python3_package to edit this variable. From f119121d21fa0446197b26378091677daac1606a Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 16 Feb 2017 16:33:08 -0500 Subject: [PATCH 0382/1936] clean up virt drivers that we do image logic for openvz is not in the nova tree, and is referencing a crazy old image, we're going to assume that if anyone is using this they can build a devstack plugin. drop doing anything by default because this actually requires that we special case things like ironic in tree to *not* do anything by default. Change-Id: I9d33b98263c3d52a95b9983e90eb0b341fa1d363 --- stackrc | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/stackrc b/stackrc index 95f017b9b1..97819d712b 100644 --- a/stackrc +++ b/stackrc @@ -636,9 +636,6 @@ if [[ "$DOWNLOAD_DEFAULT_IMAGES" == "True" ]]; then IMAGE_URLS+="," fi case "$VIRT_DRIVER" in - openvz) - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-12.04-x86_64} - IMAGE_URLS+="http://download.openvz.org/template/precreated/ubuntu-12.04-x86_64.tar.gz";; libvirt) case "$LIBVIRT_TYPE" in lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc @@ -656,13 +653,6 @@ if [[ "$DOWNLOAD_DEFAULT_IMAGES" == "True" ]]; then DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.4-x86_64-disk} IMAGE_URLS+="http://ca.downloads.xensource.com/OpenStack/cirros-0.3.4-x86_64-disk.vhd.tgz" IMAGE_URLS+=",http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz";; - ironic) - # NOTE(lucasagomes): The logic setting the default image - # now lives in the Ironic tree - ;; - *) # Default to Cirros qcow2 image file - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img} - IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img";; esac DOWNLOAD_DEFAULT_IMAGES=False fi From b1d8519b40845c3b28c32e530010aa015f970185 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 23 Feb 2017 08:01:32 -0800 Subject: [PATCH 0383/1936] Move rpc backend start/configure to earlier Because things like nova may need to create vhosts in the rpc backend, we need to have started and created credentials before we configure the service. Change-Id: I01c9c5288e197fc50a8a4a032e3a32cd166eb180 --- stack.sh | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/stack.sh b/stack.sh index 94315e1368..2f43db7418 100755 --- a/stack.sh +++ b/stack.sh @@ -761,6 +761,7 @@ install_infra run_phase stack pre-install install_rpc_backend +restart_rpc_backend # NOTE(sdague): dlm install is conditional on one being enabled by configuration install_dlm @@ -952,11 +953,6 @@ EOF fi -# Finalize queue installation -# ---------------------------- -restart_rpc_backend - - # Export Certificate Authority Bundle # ----------------------------------- From 94c400cc5428f24d96ed98678a988bd26485e63e Mon Sep 17 00:00:00 2001 From: Rodrigo Duarte Sousa Date: Thu, 2 Feb 2017 14:48:28 -0300 Subject: [PATCH 0384/1936] Add allow_global_implied_dsr_disabled feature flag This patch enabled the "allow_global_implied_dsr_disabled" feature flag. This is a feature flag toggle for bug 1590578 which is fixed in Newton and Ocata. This option can be removed after Mitaka is end of life. Change-Id: I70e3ce79ee6d9b00cc48bb178bd423d0196f6588 Related-Bug: #1590578 --- lib/tempest | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/tempest b/lib/tempest index 128e9728fe..d95a9f5c3d 100644 --- a/lib/tempest +++ b/lib/tempest @@ -290,6 +290,10 @@ function configure_tempest { iniset $TEMPEST_CONFIG identity-feature-enabled security_compliance True fi + # TODO(rodrigods): This is a feature flag for bug 1590578 which is fixed in + # Newton and Ocata. This option can be removed after Mitaka is end of life. + iniset $TEMPEST_CONFIG identity-feature-enabled forbid_global_implied_dsr True + # Image # We want to be able to override this variable in the gate to avoid # doing an external HTTP fetch for this test. From 008aa3e095904130e191f5867ec47d4cf53353da Mon Sep 17 00:00:00 2001 From: Evgeny Antyshev Date: Thu, 2 Mar 2017 11:14:25 +0000 Subject: [PATCH 0385/1936] Fix install_libvirt for other RHEL-based distros Since https://review.openstack.org/#/c/438325 landed it only works for Centos 7, but not for other RHEL-based distributions: Virtuozzo and, probably, RHEV. Both of above have own version for qemu-kvm package: qemu-kvm-vz and qemu-kvm-rhev, accordingly. These packages provide "qemu-kvm", like qemu-kvm-ev, and, when you call "yum install qemu-kvm", they replace the default OS package. Change-Id: I46da627c0da8925064862fdc283db81591979285 --- lib/nova_plugins/functions-libvirt | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index d225ef84fb..56bb6bda1c 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -39,13 +39,11 @@ function install_libvirt { fi if [[ ${DISTRO} =~ "rhel7" ]]; then - # On centos7 install the qemu-kvm-ev package, which is a - # later version of qemu-kvm rebuilt from the qemu-kvm-rhev - # package by the virt SIG (as required by nova). This - # package is only provided for RHOS (openstack) or RHV - # (ovirt) in RHEL. We have already insalled the RDO - # repositories which provide this. - install_package qemu-kvm-ev + # This should install the latest qemu-kvm build, + # which is called qemu-kvm-ev in centos7 + # (as the default OS qemu-kvm package is usually rather old, + # and should be updated by above) + install_package qemu-kvm fi install_package libvirt libvirt-devel From 1e66388c5f2b81b4fc5d544dbf5fde2935218bd0 Mon Sep 17 00:00:00 2001 From: Amrith Kumar Date: Mon, 27 Feb 2017 13:29:03 -0500 Subject: [PATCH 0386/1936] put mysql on a memory diet We propose several MySQL configuration parameter changes (with explanations) to reduce the memory footprint of MySQL. A demonstration of the improvement is provided in https://etherpad.openstack.org/p/change-438668. As Clint provided some of the descriptions that I've used, I have listed him as a co-author (thanks Clint). Let this serve as a warning to all that commetors may be enlisted :) Change-Id: Icb2d6ea91d3d45a68ce99c817a746b10039479cc Co-Authored-By: Clint 'SpamapS' Byrum --- lib/databases/mysql | 183 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 182 insertions(+), 1 deletion(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index 7bbcace399..e2c83433d2 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -96,10 +96,191 @@ function configure_database_mysql { iniset -sudo $my_conf mysqld bind-address "$SERVICE_LISTEN_ADDRESS" iniset -sudo $my_conf mysqld sql_mode TRADITIONAL iniset -sudo $my_conf mysqld default-storage-engine InnoDB - iniset -sudo $my_conf mysqld max_connections 1024 + + # the number of connections has been throttled to 256. In the + # event that the gate jobs report "Too many connections" it is + # indicative of a problem that could be the result of one of many + # things. For more details about debugging this error, refer + # https://dev.mysql.com/doc/refman/5.5/en/too-many-connections.html. + # Note that the problem may not ONLY be an issue with MySQL + # connections. If the number of fd's at the OS is too low, you + # could see errors manifest as MySQL "too many connections". + iniset -sudo $my_conf mysqld max_connections 256 iniset -sudo $my_conf mysqld query_cache_type OFF iniset -sudo $my_conf mysqld query_cache_size 0 + # Additional settings to put MySQL on a memory diet. These + # settings are used in conjunction with the cap on max_connections + # as the total memory used by MySQL can be simply viewed as + # fixed-allocations + max_connections * variable-allocations. A + # nifty tool to help with this is + # http://www.mysqlcalculator.com/. A short description of each of + # the settings follows. + + # binlog_cache_size, determines the size of cache to hold changes + # to the binary log during a transaction, for each connection. For + # more details, refer + # https://dev.mysql.com/doc/refman/5.6/en/replication-options-binary-log.html#sysvar_binlog_cache_size + # When binary logging is enabled, a smaller binlog cache could + # result in more frequent flushes to the disk and a larger value + # would result in less flushes to the disk but higher memory + # usage. This however only has to do with large transactions; if + # you have a small transaction the binlog cache is necessarily + # flushed on a transaction commit. This is a per-connection cache. + iniset -sudo $my_conf mysqld binlog_cache_size 4K + + # binlog_stmt_cache_size determines the size of cache to hold non + # transactional statements in the binary log. For more details, + # refer + # https://dev.mysql.com/doc/refman/5.6/en/replication-options-binary-log.html#sysvar_binlog_stmt_cache_size + # This cache holds changes to non-transactional tables (read: + # MyISAM) or any non-transactional statements which cause + # modifications to data (truncate is an example). These are + # written to disk immediately on completion of the statement or + # when the cache is full. If the cache is too small, you get + # frequent writes to the disk (flush) and if the cache is too + # large, it takes up more memory. This is a per-connection cache. + iniset -sudo $my_conf mysqld binlog_stmt_cache_size 4K + + # bulk_insert_buffer_size for MyISAM tables that use a special + # cache for insert statements and load statements, this cache is + # used to optimize writes to the disk. If the value is set to 0, + # the optimization is disabled. For more details refer + # https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_bulk_insert_buffer_size + # We set this to 0 which could result in higher disk I/O (I/O on + # each insert block completion). + iniset -sudo $my_conf mysqld bulk_insert_buffer_size 0 + + # host_cache_size controls a DNS lookup optimization. For more + # details refer + # https://dev.mysql.com/doc/refman/5.6/en/host-cache.html + iniset -sudo $my_conf mysqld host_cache_size 0 + + # innodb_buffer_pool_size This is the size of the server wide + # buffer pool. It is the cache for all data blocks being used by + # the server and is managed as a LRU chain. Dirty blocks either + # age off the list or are forced off when the list is + # full. Setting this to 5MB (default 128MB) reduces the amount of + # memory used by the server and this will result in more disk I/O + # in cases where (a) there is considerable write activity that + # overwhelms the allocated cache, or (b) there is considerable + # read activity on a data set that exceeds the allocated + # cache. For more details, refer + # https://dev.mysql.com/doc/refman/5.6/en/innodb-parameters.html#sysvar_innodb_buffer_pool_size + iniset -sudo $my_conf mysqld innodb_buffer_pool_size 5M + + # innodb_ft_cache_size and innodb_ft_total_cache_size control the + # per-connection full text search cache and the server wide + # maximum full text search cache. We should not be using full text + # search and the value is set to the minimum allowable. The former + # is a per-connection cache size and the latter is server + # wide. For more details, refer + # https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_ft_cache_size + # and + # https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_ft_total_cache_size + iniset -sudo $my_conf mysqld innodb_ft_cache_size 1600000 + iniset -sudo $my_conf mysqld innodb_ft_total_cache_size 32000000 + + # innodb_log_buffer_size This buffer is used to buffer + # transactions in-memory before writing them to the innodb + # internal transaction log. Large transactions, or high amounts of + # concurrency, will cause the system to fill this faster and thus + # make the system more disk-bound. For more details, refer + # https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_log_buffer_size + iniset -sudo $my_conf mysqld innodb_log_buffer_size 256K + + # innodb_sort_buffer_size, This buffer is used for sorting when + # InnoDB is creating indexes. Could cause that to be slower, but + # only if tables are large. This is a per-connection setting. For + # more details, refer + # https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_sort_buffer_size + iniset -sudo $my_conf mysqld innodb_sort_buffer_size 64K + + # join_buffer_size, This buffer makes table and index scans + # faster. So this setting could make some queries more disk + # bound. This is a per-connection setting. For more details refer + # https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_join_buffer_size. + iniset -sudo $my_conf mysqld join_buffer_size 128 + + # key_buffer_size defines the index blocks used for MyISAM tables + # and shared between threads. This is a server wide setting. For + # more details see + # https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_key_buffer_size + iniset -sudo $my_conf mysqld key_buffer_size 8 + + # max_heap_table_size sets the maximum amount of memory for MEMORY + # tables (which we don't use). The value is set to 16k, the + # minimum allowed. For more details, see + # https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_max_heap_table_size + iniset -sudo $my_conf mysqld max_heap_table_size 16K + + # net_buffer_length Each client has a buffer for incoming and + # outgoing data, both start with a size of net_buffer_length and + # can grow (in steps of 2x) upto a size of max_allowed_packet. For + # more details see + # https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_net_buffer_length + iniset -sudo $my_conf mysqld net_buffer_length 1K + + # read_buffer_size, read_rnd_buffer_size are per-thread buffer + # used for scans on MyISAM tables. It is a per-connection setting + # and so we set it to the minimum value allowable. Same for + # read_rnd_buffer_size. For more details refer + # https://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_read_buffer_size + # and + # https://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_read_rnd_buffer_size + iniset -sudo $my_conf mysqld read_buffer_size 8200 + iniset -sudo $my_conf mysqld read_rnd_buffer_size 8200 + + # sort_buffer_size when a sort is requested, it will be performed + # in memory in a buffer of this size (allocated per connection) + # and if the data exceeds this size it will spill to disk. The + # innodb and myisam variables are used in computing indices for + # tables using the specified storage engine. Since we don't + # dynamically reindex (except during upgrade) these values should + # never be material. Obviously performance of disk based sorts is + # worse than in memory sorts and therefore a high value here will + # improve sort performance for large data. For more details, + # refer: + # https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_sort_buffer_size + # and + # https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_sort_buffer_size + # and + # https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_myisam_sort_buffer_size + iniset -sudo $my_conf mysqld sort_buffer_size 32K + iniset -sudo $my_conf mysqld innodb_sort_buffer_size 64K + iniset -sudo $my_conf mysqld myisam_sort_buffer_size 4K + + # thread_cache_size specifies how many internal threads to cache + # for use with incoming connections. We set this to 0 whic means + # that each connection will cause a new thread to be created. This + # could cause connections to take marginally longer on os'es with + # slow pthread_create calls. For more details, refer + # https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_thread_cache_size + iniset -sudo $my_conf mysqld thread_cache_size 0 + + # thread_stack is the per connection stack size, the minimum is + # 128k and the default is 192k on 32bit and 256k on 64bit + # systems. We set this to 192k. Complex queries which require + # recursion, stored procedures or other memory intensive + # operations could exhaust this and generate a very characteristic + # failure ("stack overflow") which is cleanly detected and the + # query is killed. For more details see + # https://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_thread_stack + iniset -sudo $my_conf mysqld thread_stack 196608 + + # tmp_table_size is the maximum size of an in-memory temporary + # table. Temporary tables are created by MySQL as part of a + # multi-step query plan. The actual size of the temp table will be + # the lesser of tmp_table_size and max_heap_table_size. If a + # temporary table exceeds this size, it will be spooled to disk + # using the internal_tmp_disk_storage_engine (default + # MyISAM). Queries that often generate in-memory temporary tables + # include queries that have sorts, distinct, or group by + # operations, also queries that perform IN joins. For more details + # see + # https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_tmp_table_size + iniset -sudo $my_conf mysqld tmp_table_size 1K + if [[ "$DATABASE_QUERY_LOGGING" == "True" ]]; then echo_summary "Enabling MySQL query logging" if is_fedora; then From 0ce4ba915bb6649884e1a3b6c72a8e879eb562f9 Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Fri, 24 Feb 2017 05:13:53 +0000 Subject: [PATCH 0387/1936] Use lib/keystone to configure [nova] section in neutron.conf This simplifies neutron library code and makes it less prone to breakage in the future. So far there are no specific known issues with existing code per se, it works, still. Change-Id: I28f1997d226baae902dae5ca8ee6cd4fd89efe31 --- lib/neutron | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/lib/neutron b/lib/neutron index f277062b32..f9d044225f 100644 --- a/lib/neutron +++ b/lib/neutron @@ -153,15 +153,7 @@ function configure_neutron_new { iniset $NEUTRON_CONF DEFAULT auth_strategy $NEUTRON_AUTH_STRATEGY configure_auth_token_middleware $NEUTRON_CONF neutron $NEUTRON_AUTH_CACHE_DIR keystone_authtoken - - iniset $NEUTRON_CONF nova auth_type password - iniset $NEUTRON_CONF nova auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v3" - iniset $NEUTRON_CONF nova username nova - iniset $NEUTRON_CONF nova password $SERVICE_PASSWORD - iniset $NEUTRON_CONF nova user_domain_id default - iniset $NEUTRON_CONF nova project_name $SERVICE_TENANT_NAME - iniset $NEUTRON_CONF nova project_domain_id default - iniset $NEUTRON_CONF nova region_name $REGION_NAME + configure_auth_token_middleware $NEUTRON_CONF nova $NEUTRON_AUTH_CACHE_DIR nova # Configure VXLAN # TODO(sc68cal) not hardcode? @@ -240,10 +232,6 @@ function configure_neutron_new { iniset $NEUTRON_CONF DEFAULT bind_port "$NEUTRON_SERVICE_PORT_INT" fi - if is_ssl_enabled_service "nova"; then - iniset $NEUTRON_CONF nova cafile $SSL_BUNDLE_FILE - fi - if is_ssl_enabled_service "neutron"; then ensure_certificates NEUTRON From f15224c740b880842e8d34e9a6c2ad08ba34448f Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Thu, 2 Mar 2017 12:45:47 -0500 Subject: [PATCH 0388/1936] Create cell1 cell before n-api starts Change ac5fdb4c4090efd682cc5c55aa30ec433da29fc7 introduced a problem for gnocchi CI because the deployments steps are now: 1. create cell0 2. start nova-api (with multiple workers) 3. install ceilometer via extras 4. ceilometer calls nova-api to list servers; at this point nova-api getes the list of cells and caches them, which will just be cell0 5. create cell1 via simple_cell_setup which also discovers the n-cpu node so we can schedule instances 6. gnocchi tests create and list instances and at this point it hits an n-api worker that only has cell0 cached so it does not find some test servers it created and fails. The cell0 and cell1 cells should be created in the nova_api db before starting n-api so that when we first list instances, we store both cells in the cache that's in n-api. This deployment order is also how the nova docs describe rolling out cells v2 but the way we were doing this devstack wasn't following that, or accounting for when devstack plugins are loaded via extras. This change creates the main cell1 cell earlier in the setup before n-api is started, and then changes to just run discover_hosts at the end after n-cpu is running (which is what simple_cell_setup and map_cell_and_hosts would do implicitly). Change-Id: I38eab6707340253a10159a169ae61d34784c2d28 Related-Bug: #1669473 --- lib/nova | 19 ++++++++----------- stack.sh | 2 +- 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/lib/nova b/lib/nova index 0ce6699ad1..ac6df9dcf2 100644 --- a/lib/nova +++ b/lib/nova @@ -664,6 +664,10 @@ function init_nova_cells { $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CELLS_CONF db sync $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CELLS_CONF cell create --name=region --cell_type=parent --username=$RABBIT_USERID --hostname=$RABBIT_HOST --port=5672 --password=$RABBIT_PASSWORD --virtual_host=/ --woffset=0 --wscale=1 $NOVA_BIN_DIR/nova-manage cell create --name=child --cell_type=child --username=$RABBIT_USERID --hostname=$RABBIT_HOST --port=5672 --password=$RABBIT_PASSWORD --virtual_host=child_cell --woffset=0 --wscale=1 + + # Creates the single cells v2 cell for the child cell (v1) nova db. + nova-manage --config-file $NOVA_CELLS_CONF cell_v2 create_cell \ + --transport-url $(get_transport_url child_cell) --name 'cell1' fi } @@ -720,6 +724,10 @@ function init_nova { # Run online migrations on the new databases # Needed for flavor conversion $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db online_data_migrations + + # create the cell1 cell for the main nova db where the hosts live + nova-manage cell_v2 create_cell --transport-url $(get_transport_url) \ + --name 'cell1' fi create_nova_cache_dir @@ -968,17 +976,6 @@ function create_flavors { fi } -# create_cell(): Group the available hosts into a cell -function create_cell { - if ! is_service_enabled n-cell; then - nova-manage cell_v2 simple_cell_setup --transport-url $(get_transport_url) - else - nova-manage --config-file $NOVA_CELLS_CONF --verbose cell_v2 map_cell_and_hosts \ - --transport-url $(get_transport_url child_cell) --name 'cell1' - nova-manage db sync - fi -} - # Restore xtrace $_XTRACE_LIB_NOVA diff --git a/stack.sh b/stack.sh index 2f43db7418..4cee385a33 100755 --- a/stack.sh +++ b/stack.sh @@ -1364,7 +1364,7 @@ check_libs_from_git # Do this late because it requires compute hosts to have started if is_service_enabled n-api; then if is_service_enabled n-cpu; then - create_cell + $TOP_DIR/tools/discover_hosts.sh else # Some CI systems like Hyper-V build the control plane on # Linux, and join in non Linux Computes after setup. This From 80e82eac4cd127a68fceea37270d09f9cbd71c75 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Fri, 15 Jul 2016 22:53:17 -0400 Subject: [PATCH 0389/1936] Disable glance v1 by default Nova has been supporting glance v2 since Newton and removed support for glance v1 in Ocata: 97e7b97210139a7f7888f0d6901e499664de02a3 We should disable glance v1 by default because there are several test paths in Tempest which don't get run when glance v1 is available because it uses glance v1 rather than v2. Depends-On: I54db379f6fbe859fd9f1b0cdd5b74102539ab265 Change-Id: I7f962a07317cdad917ee896d79e49ee18938d074 --- lib/glance | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/glance b/lib/glance index 58f1deff6f..0077de5e17 100644 --- a/lib/glance +++ b/lib/glance @@ -57,7 +57,7 @@ GLANCE_SCHEMA_JSON=$GLANCE_CONF_DIR/schema-image.json GLANCE_SWIFT_STORE_CONF=$GLANCE_CONF_DIR/glance-swift-store.conf GLANCE_GLARE_CONF=$GLANCE_CONF_DIR/glance-glare.conf GLANCE_GLARE_PASTE_INI=$GLANCE_CONF_DIR/glance-glare-paste.ini -GLANCE_V1_ENABLED=${GLANCE_V1_ENABLED:-True} +GLANCE_V1_ENABLED=${GLANCE_V1_ENABLED:-False} if is_ssl_enabled_service "glance" || is_service_enabled tls-proxy; then GLANCE_SERVICE_PROTOCOL="https" From dc6e55021861afceae2a7778b299df94ceab0a6e Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Fri, 3 Mar 2017 16:55:50 -0800 Subject: [PATCH 0390/1936] Change auth_version to v3 on Tempest Keystone v3 API is CURRENT and the v2 API is deprecated now. So we need to change the default config of auth_version to fit for current API status. Depends-On: Id5e5ed9bf4f8b0f9eb376bfc7c5801f0956da1d9 Change-Id: I801e6740258ddea2a1b628a209970e0307d39d12 --- lib/tempest | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/lib/tempest b/lib/tempest index d95a9f5c3d..b48dd07f1b 100644 --- a/lib/tempest +++ b/lib/tempest @@ -273,13 +273,11 @@ function configure_tempest { if [ "$ENABLE_IDENTITY_V2" == "True" ]; then # Run Identity API v2 tests ONLY if needed iniset $TEMPEST_CONFIG identity-feature-enabled api_v2 True - iniset $TEMPEST_CONFIG identity auth_version ${TEMPEST_AUTH_VERSION:-v2} else # Skip Identity API v2 tests by default iniset $TEMPEST_CONFIG identity-feature-enabled api_v2 False - # Use v3 auth tokens for running all Tempest tests - iniset $TEMPEST_CONFIG identity auth_version v3 fi + iniset $TEMPEST_CONFIG identity auth_version ${TEMPEST_AUTH_VERSION:-v3} if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then iniset $TEMPEST_CONFIG identity ca_certificates_file $SSL_BUNDLE_FILE From 2b4735f1b3ac2834fa46a288ba5d09290acfbf3d Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Fri, 10 Feb 2017 06:17:37 +0000 Subject: [PATCH 0391/1936] Added list of mlock-using processes to peakmem_tracker output The change makes peakmem_tracker list processes that lock memory pages from swapping to disk. It may be helpful when debugging oom-killer job failures in gate in case when dstat shows that swap is not fully used when oom-killer is triggered. The peakmem_tracker service was renamed into memory_tracker to reflect its new broader scope. Needed-By: I5862d92478397eac2e61b8a61ce3437b698678be Change-Id: I1dca120448ee87930fe903fd81277b58efaefc92 --- lib/dstat | 14 ++-- .../{peakmem_tracker.sh => memory_tracker.sh} | 64 ++++++++++++------- tools/mlock_report.py | 59 +++++++++++++++++ 3 files changed, 111 insertions(+), 26 deletions(-) rename tools/{peakmem_tracker.sh => memory_tracker.sh} (51%) create mode 100755 tools/mlock_report.py diff --git a/lib/dstat b/lib/dstat index b705948094..62795f5e72 100644 --- a/lib/dstat +++ b/lib/dstat @@ -21,16 +21,22 @@ function start_dstat { # A better kind of sysstat, with the top process per time slice run_process dstat "$TOP_DIR/tools/dstat.sh $LOGDIR" - # To enable peakmem_tracker add: - # enable_service peakmem_tracker + # To enable memory_tracker add: + # enable_service memory_tracker # to your localrc - run_process peakmem_tracker "$TOP_DIR/tools/peakmem_tracker.sh" + run_process memory_tracker "$TOP_DIR/tools/memory_tracker.sh" + + # remove support for the old name when it's no longer used (sometime in Queens) + if is_service_enabled peakmem_tracker; then + deprecated "Use of peakmem_tracker in devstack is deprecated, use memory_tracker instead" + run_process peakmem_tracker "$TOP_DIR/tools/memory_tracker.sh" + fi } # stop_dstat() stop dstat process function stop_dstat { stop_process dstat - stop_process peakmem_tracker + stop_process memory_tracker } # Restore xtrace diff --git a/tools/peakmem_tracker.sh b/tools/memory_tracker.sh similarity index 51% rename from tools/peakmem_tracker.sh rename to tools/memory_tracker.sh index ecbd79a0bc..dac0267a7e 100755 --- a/tools/peakmem_tracker.sh +++ b/tools/memory_tracker.sh @@ -21,11 +21,15 @@ SLEEP_TIME=20 # around reclaimable memory. However, it is not available until 3.14 # kernel (i.e. Ubuntu LTS Trusty misses it). In that case, we fall # back to free+buffers+cache as the available memory. -USE_MEM_AVAILBLE=0 +USE_MEM_AVAILABLE=0 if grep -q '^MemAvailable:' /proc/meminfo; then USE_MEM_AVAILABLE=1 fi +function get_mem_unevictable { + awk '/^Unevictable:/ {print $2}' /proc/meminfo +} + function get_mem_available { if [[ $USE_MEM_AVAILABLE -eq 1 ]]; then awk '/^MemAvailable:/ {print $2}' /proc/meminfo @@ -37,40 +41,56 @@ function get_mem_available { fi } -# whenever we see less memory available than last time, dump the -# snapshot of current usage; i.e. checking the latest entry in the -# file will give the peak-memory usage function tracker { local low_point + local unevictable_point low_point=$(get_mem_available) + # log mlocked memory at least on first iteration + unevictable_point=0 while [ 1 ]; do local mem_available mem_available=$(get_mem_available) - if [[ $mem_available -lt $low_point ]]; then - low_point=$mem_available + local unevictable + unevictable=$(get_mem_unevictable) + + if [ $mem_available -lt $low_point -o $unevictable -ne $unevictable_point ]; then echo "[[[" date + + # whenever we see less memory available than last time, dump the + # snapshot of current usage; i.e. checking the latest entry in the file + # will give the peak-memory usage + if [[ $mem_available -lt $low_point ]]; then + low_point=$mem_available + echo "---" + # always available greppable output; given difference in + # meminfo output as described above... + echo "memory_tracker low_point: $mem_available" + echo "---" + cat /proc/meminfo + echo "---" + # would hierarchial view be more useful (-H)? output is + # not sorted by usage then, however, and the first + # question is "what's using up the memory" + # + # there are a lot of kernel threads, especially on a 8-cpu + # system. do a best-effort removal to improve + # signal/noise ratio of output. + ps --sort=-pmem -eo pid:10,pmem:6,rss:15,ppid:10,cputime:10,nlwp:8,wchan:25,args:100 | + grep -v ']$' + fi echo "---" - # always available greppable output; given difference in - # meminfo output as described above... - echo "peakmem_tracker low_point: $mem_available" - echo "---" - cat /proc/meminfo - echo "---" - # would hierarchial view be more useful (-H)? output is - # not sorted by usage then, however, and the first - # question is "what's using up the memory" - # - # there are a lot of kernel threads, especially on a 8-cpu - # system. do a best-effort removal to improve - # signal/noise ratio of output. - ps --sort=-pmem -eo pid:10,pmem:6,rss:15,ppid:10,cputime:10,nlwp:8,wchan:25,args:100 | - grep -v ']$' + + # list processes that lock memory from swap + if [[ $unevictable -ne $unevictable_point ]]; then + unevictable_point=$unevictable + sudo ./tools/mlock_report.py + fi + echo "]]]" fi - sleep $SLEEP_TIME done } diff --git a/tools/mlock_report.py b/tools/mlock_report.py new file mode 100755 index 0000000000..1d23af90d7 --- /dev/null +++ b/tools/mlock_report.py @@ -0,0 +1,59 @@ +#!/usr/bin/env python + +# This tool lists processes that lock memory pages from swapping to disk. + +import re +import subprocess + +import psutil + + +SUMMARY_REGEX = re.compile(r".*\s+(?P[\d]+)\s+KB") + + +def main(): + try: + print _get_report() + except Exception as e: + print "Failure listing processes locking memory: %s" % str(e) + + +def _get_report(): + mlock_users = [] + for proc in psutil.process_iter(): + pid = proc.pid + # sadly psutil does not expose locked pages info, that's why we + # call to pmap and parse the output here + try: + out = subprocess.check_output(['pmap', '-XX', str(pid)]) + except subprocess.CalledProcessError as e: + # 42 means process just vanished, which is ok + if e.returncode == 42: + continue + raise + last_line = out.splitlines()[-1] + + # some processes don't provide a memory map, for example those + # running as kernel services, so we need to skip those that don't + # match + result = SUMMARY_REGEX.match(last_line) + if result: + locked = int(result.group('locked')) + if locked: + mlock_users.append({'name': proc.name(), + 'pid': pid, + 'locked': locked}) + + # produce a single line log message with per process mlock stats + if mlock_users: + return "; ".join( + "[%(name)s (pid:%(pid)s)]=%(locked)dKB" % args + # log heavy users first + for args in sorted(mlock_users, key=lambda d: d['locked']) + ) + else: + return "no locked memory" + + +if __name__ == "__main__": + main() From 45da777d2526acd355da974eb338695559dd64c8 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Sun, 5 Mar 2017 13:07:39 -0500 Subject: [PATCH 0392/1936] Change to messagingv2 for oslo_messaging_notifications.driver The oslo.messaging docs on the notification messaging driver says that "messaging" (1.0) is a legacy format and you should use messagingv2 unless otherwise required for that old format. By default we should be testing with messagingv2. Change-Id: I3031afe7551a0c8dde46e1ccfacff445fb68e122 --- lib/cinder | 2 +- lib/glance | 4 ++-- lib/neutron-legacy | 2 +- lib/nova | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/cinder b/lib/cinder index 767fd00d34..c17cea06c7 100644 --- a/lib/cinder +++ b/lib/cinder @@ -315,7 +315,7 @@ function configure_cinder { fi if is_service_enabled ceilometer; then - iniset $CINDER_CONF oslo_messaging_notifications driver "messaging" + iniset $CINDER_CONF oslo_messaging_notifications driver "messagingv2" fi if is_service_enabled tls-proxy; then diff --git a/lib/glance b/lib/glance index 58f1deff6f..0ba2cfa3d4 100644 --- a/lib/glance +++ b/lib/glance @@ -112,7 +112,7 @@ function configure_glance { iniset $GLANCE_REGISTRY_CONF DEFAULT workers "$API_WORKERS" iniset $GLANCE_REGISTRY_CONF paste_deploy flavor keystone configure_auth_token_middleware $GLANCE_REGISTRY_CONF glance $GLANCE_AUTH_CACHE_DIR/registry - iniset $GLANCE_REGISTRY_CONF oslo_messaging_notifications driver messaging + iniset $GLANCE_REGISTRY_CONF oslo_messaging_notifications driver messagingv2 iniset_rpc_backend glance $GLANCE_REGISTRY_CONF iniset $GLANCE_REGISTRY_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" @@ -125,7 +125,7 @@ function configure_glance { iniset $GLANCE_API_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ iniset $GLANCE_API_CONF paste_deploy flavor keystone+cachemanagement configure_auth_token_middleware $GLANCE_API_CONF glance $GLANCE_AUTH_CACHE_DIR/api - iniset $GLANCE_API_CONF oslo_messaging_notifications driver messaging + iniset $GLANCE_API_CONF oslo_messaging_notifications driver messagingv2 iniset_rpc_backend glance $GLANCE_API_CONF if [ "$VIRT_DRIVER" = 'xenserver' ]; then iniset $GLANCE_API_CONF DEFAULT container_formats "ami,ari,aki,bare,ovf,tgz" diff --git a/lib/neutron-legacy b/lib/neutron-legacy index b381b642c6..1a16a44986 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -798,7 +798,7 @@ function _configure_neutron_metadata_agent { } function _configure_neutron_ceilometer_notifications { - iniset $NEUTRON_CONF oslo_messaging_notifications driver messaging + iniset $NEUTRON_CONF oslo_messaging_notifications driver messagingv2 } function _configure_neutron_metering { diff --git a/lib/nova b/lib/nova index 79f07f2a99..4c9f30f593 100644 --- a/lib/nova +++ b/lib/nova @@ -575,7 +575,7 @@ function create_nova_conf { # Set the oslo messaging driver to the typical default. This does not # enable notifications, but it will allow them to function when enabled. - iniset $NOVA_CONF oslo_messaging_notifications driver "messaging" + iniset $NOVA_CONF oslo_messaging_notifications driver "messagingv2" iniset_rpc_backend nova $NOVA_CONF iniset $NOVA_CONF glance api_servers "${GLANCE_SERVICE_PROTOCOL}://${GLANCE_HOSTPORT}" From e65ab4a1c57a56161ebbf51a10b3c47a665037d7 Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Fri, 24 Feb 2017 17:47:55 +0000 Subject: [PATCH 0393/1936] lib/neutron: configure root_helper for agents Before the patch, we were only configuring root_helper_daemon to point to oslo.rootwrap, but not root_helper. (The former is used for long running commands only, while the latter is used for short lived commands.) This made neutron agents to directly call to sudo when a privileged process was to be executed. This failed because /etc/sudoers was not configured to allow anything except the rootwrap call itself. This patch simplifies rootwrap handling in the code; it also sets root_helper to point to rootwrap; as well as configure daemon in sudoers. While at it, we also set l2 agent to use rootwrap too. Hopefully, it will be enough for agents to actually configure backend as needed. Change-Id: Ib05a6e0e024f534d7f616d41d70fb67ecf6daeaf --- lib/neutron | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/lib/neutron b/lib/neutron index 44d41f8cf5..e72c9fe6ea 100644 --- a/lib/neutron +++ b/lib/neutron @@ -72,7 +72,8 @@ NEUTRON_SERVICE_PROTOCOL=${NEUTRON_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} NEUTRON_AUTH_STRATEGY=${NEUTRON_AUTH_STRATEGY:-keystone} NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron) NEUTRON_ROOTWRAP_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf -NEUTRON_ROOTWRAP_DAEMON_CMD="sudo $NEUTRON_ROOTWRAP-daemon $NEUTRON_ROOTWRAP_CONF_FILE" +NEUTRON_ROOTWRAP_CMD="$NEUTRON_ROOTWRAP $NEUTRON_ROOTWRAP_CONF_FILE" +NEUTRON_ROOTWRAP_DAEMON_CMD="$NEUTRON_ROOTWRAP-daemon $NEUTRON_ROOTWRAP_CONF_FILE" # Additional neutron api config files declare -a _NEUTRON_SERVER_EXTRA_CONF_FILES_ABS @@ -115,6 +116,13 @@ function cleanup_neutron_new { done } +# configure_root_helper_options() - Configure agent rootwrap helper options +function configure_root_helper_options { + local conffile=$1 + iniset $conffile agent root_helper "sudo $NEUTRON_ROOTWRAP_CMD" + iniset $conffile agent root_helper_daemon "sudo $NEUTRON_ROOTWRAP_DAEMON_CMD" +} + # configure_neutron() - Set config files, create data dirs, etc function configure_neutron_new { sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR @@ -171,6 +179,7 @@ function configure_neutron_new { if is_service_enabled neutron-agent; then iniset $NEUTRON_CORE_PLUGIN_CONF agent tunnel_types vxlan iniset $NEUTRON_CORE_PLUGIN_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + configure_root_helper_options $NEUTRON_CORE_PLUGIN_CONF # Configure the neutron agent if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then @@ -194,7 +203,7 @@ function configure_neutron_new { # make it so we have working DNS from guests iniset $NEUTRON_DHCP_CONF DEFAULT dnsmasq_local_resolv True - iniset $NEUTRON_DHCP_CONF agent root_helper_daemon "$NEUTRON_ROOTWRAP_DAEMON_CMD" + configure_root_helper_options $NEUTRON_DHCP_CONF iniset $NEUTRON_DHCP_CONF DEFAULT interface_driver $NEUTRON_AGENT neutron_plugin_configure_dhcp_agent $NEUTRON_DHCP_CONF fi @@ -203,7 +212,7 @@ function configure_neutron_new { cp $NEUTRON_DIR/etc/l3_agent.ini.sample $NEUTRON_L3_CONF iniset $NEUTRON_L3_CONF DEFAULT interface_driver $NEUTRON_AGENT neutron_service_plugin_class_add router - iniset $NEUTRON_L3_CONF agent root_helper_daemon "$NEUTRON_ROOTWRAP_DAEMON_CMD" + configure_root_helper_options $NEUTRON_L3_CONF iniset $NEUTRON_L3_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL neutron_plugin_configure_l3_agent $NEUTRON_L3_CONF fi @@ -215,7 +224,8 @@ function configure_neutron_new { iniset $NEUTRON_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $NEUTRON_META_CONF DEFAULT nova_metadata_ip $SERVICE_HOST iniset $NEUTRON_META_CONF DEFAULT metadata_workers $API_WORKERS - iniset $NEUTRON_META_CONF agent root_helper_daemon "$NEUTRON_ROOTWRAP_DAEMON_CMD" + # TODO(ihrachys) do we really need to set rootwrap for metadata agent? + configure_root_helper_options $NEUTRON_META_CONF # TODO(dtroyer): remove the v2.0 hard code below iniset $NEUTRON_META_CONF DEFAULT auth_url $KEYSTONE_SERVICE_URI/v2.0 @@ -252,12 +262,6 @@ function configure_neutron_new { # configure_neutron_rootwrap() - configure Neutron's rootwrap function configure_neutron_rootwrap { - # Set the paths of certain binaries - neutron_rootwrap=$(get_rootwrap_location neutron) - - # Specify ``rootwrap.conf`` as first parameter to neutron-rootwrap - local rootwrap_sudoer_cmd="${neutron_rootwrap} $NEUTRON_CONF_DIR/rootwrap.conf" - # Deploy new rootwrap filters files (owned by root). # Wipe any existing rootwrap.d files first if [[ -d $NEUTRON_CONF_DIR/rootwrap.d ]]; then @@ -274,7 +278,8 @@ function configure_neutron_rootwrap { # Set up the rootwrap sudoers for Neutron tempfile=`mktemp` - echo "$STACK_USER ALL=(root) NOPASSWD: $rootwrap_sudoer_cmd *" >$tempfile + echo "$STACK_USER ALL=(root) NOPASSWD: $NEUTRON_ROOTWRAP_CMD *" >$tempfile + echo "$STACK_USER ALL=(root) NOPASSWD: $NEUTRON_ROOTWRAP_DAEMON_CMD" >>$tempfile chmod 0440 $tempfile sudo chown root:root $tempfile sudo mv $tempfile /etc/sudoers.d/neutron-rootwrap From c2c89e4b35a9ad5fb1a2670943328a9725b37001 Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Thu, 23 Feb 2017 20:15:47 -0600 Subject: [PATCH 0394/1936] Use KEYSTONE_SERVICE_URI consistently lib/keystone builds KEYSTONE_SERVICE_URI so that other services don't need to reconstruct the identity URI. Many services already use it, but some parts were still building the identity URI from the different parts. This will allow changing the identity URI to include a path (e.g., to http:///identity) in 1 place rather than in multiple places. Change-Id: I58cbdbe591d8869807545e0815480fc3375e0479 --- lib/neutron | 2 +- lib/nova | 2 +- lib/placement | 2 +- lib/tempest | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/neutron b/lib/neutron index b30c5c8e89..ab005a9a01 100644 --- a/lib/neutron +++ b/lib/neutron @@ -282,7 +282,7 @@ function configure_neutron_rootwrap { function configure_neutron_nova_new { iniset $NOVA_CONF DEFAULT use_neutron True iniset $NOVA_CONF neutron auth_type "password" - iniset $NOVA_CONF neutron auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v3" + iniset $NOVA_CONF neutron auth_url "$KEYSTONE_SERVICE_URI/v3" iniset $NOVA_CONF neutron username neutron iniset $NOVA_CONF neutron password "$SERVICE_PASSWORD" iniset $NOVA_CONF neutron user_domain_name "Default" diff --git a/lib/nova b/lib/nova index 4c9f30f593..a36a740265 100644 --- a/lib/nova +++ b/lib/nova @@ -631,7 +631,7 @@ function create_nova_conf { function init_nova_service_user_conf { iniset $NOVA_CONF service_user send_service_user_token True iniset $NOVA_CONF service_user auth_type password - iniset $NOVA_CONF service_user auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT" + iniset $NOVA_CONF service_user auth_url "$KEYSTONE_SERVICE_URI" iniset $NOVA_CONF service_user username nova iniset $NOVA_CONF service_user password "$SERVICE_PASSWORD" iniset $NOVA_CONF service_user user_domain_name "$SERVICE_DOMAIN_NAME" diff --git a/lib/placement b/lib/placement index e7ffe3330b..4cc5cd8b6a 100644 --- a/lib/placement +++ b/lib/placement @@ -100,7 +100,7 @@ function _config_placement_apache_wsgi { function configure_placement_nova_compute { iniset $NOVA_CONF placement auth_type "password" - iniset $NOVA_CONF placement auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v3" + iniset $NOVA_CONF placement auth_url "$KEYSTONE_SERVICE_URI/v3" iniset $NOVA_CONF placement username placement iniset $NOVA_CONF placement password "$SERVICE_PASSWORD" iniset $NOVA_CONF placement user_domain_name "$SERVICE_DOMAIN_NAME" diff --git a/lib/tempest b/lib/tempest index d95a9f5c3d..25db046829 100644 --- a/lib/tempest +++ b/lib/tempest @@ -17,7 +17,7 @@ # - ``PUBLIC_NETWORK_NAME`` # - ``VIRT_DRIVER`` # - ``LIBVIRT_TYPE`` -# - ``KEYSTONE_SERVICE_PROTOCOL``, ``KEYSTONE_SERVICE_HOST`` from lib/keystone +# - ``KEYSTONE_SERVICE_URI``, ``KEYSTONE_SERVICE_URI_V3`` from lib/keystone # # Optional Dependencies: # @@ -257,7 +257,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG volume build_timeout $BUILD_TIMEOUT # Identity - iniset $TEMPEST_CONFIG identity uri "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v2.0/" + iniset $TEMPEST_CONFIG identity uri "$KEYSTONE_SERVICE_URI/v2.0/" iniset $TEMPEST_CONFIG identity uri_v3 "$KEYSTONE_SERVICE_URI_V3" iniset $TEMPEST_CONFIG identity user_lockout_failure_attempts $KEYSTONE_LOCKOUT_FAILURE_ATTEMPTS iniset $TEMPEST_CONFIG identity user_lockout_duration $KEYSTONE_LOCKOUT_DURATION From 73a3e2decd2e7d9fd652fb40c4e697794d9abd05 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Tue, 7 Mar 2017 08:49:22 +0000 Subject: [PATCH 0395/1936] Updated from generate-devstack-plugins-list Change-Id: Iffde64ae167fa32a377d20a9628c17286f1bf958 --- doc/source/plugin-registry.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 17da67b816..58d393ae74 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -108,7 +108,6 @@ networking-midonet `git://git.openstack.org/openstack/networ networking-mlnx `git://git.openstack.org/openstack/networking-mlnx `__ networking-nec `git://git.openstack.org/openstack/networking-nec `__ networking-odl `git://git.openstack.org/openstack/networking-odl `__ -networking-ofagent `git://git.openstack.org/openstack/networking-ofagent `__ networking-onos `git://git.openstack.org/openstack/networking-onos `__ networking-ovn `git://git.openstack.org/openstack/networking-ovn `__ networking-ovs-dpdk `git://git.openstack.org/openstack/networking-ovs-dpdk `__ From f511c368f8e1e6690b7be4da926cad97d07ee85f Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Tue, 7 Mar 2017 06:31:49 +0000 Subject: [PATCH 0396/1936] lib/neutron: allow to add more ml2 extension drivers The patch will allow jobs to append new ml2 extension drivers without overriding port_security. Change-Id: I45f017d1b8a3054452c3166ed4fb460d21959adb --- lib/neutron | 23 ++++++++++++++++++++++- lib/neutron-legacy | 10 ++++++++++ 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index c97104ec46..e69e25fd55 100644 --- a/lib/neutron +++ b/lib/neutron @@ -163,7 +163,7 @@ function configure_neutron_new { iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vxlan vni_ranges 1001:2000 iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_flat flat_networks public if [[ "$NEUTRON_PORT_SECURITY" = "True" ]]; then - iniset $NEUTRON_CORE_PLUGIN_CONF ml2 extension_drivers port_security + neutron_ml2_extension_driver_add port_security fi fi @@ -481,6 +481,18 @@ function neutron_service_plugin_class_add_new { iniset $NEUTRON_CONF DEFAULT service_plugins $plugins } +function _neutron_ml2_extension_driver_add { + local driver=$1 + local drivers="" + + drivers=$(iniget $NEUTRON_CORE_PLUGIN_CONF ml2 extension_drivers) + if [ $drivers ]; then + drivers+="," + fi + drivers+="${driver}" + iniset $NEUTRON_CORE_PLUGIN_CONF ml2 extension_drivers $drivers +} + function neutron_server_config_add_new { _NEUTRON_SERVER_EXTRA_CONF_FILES_ABS+=($1) } @@ -553,6 +565,15 @@ function neutron_service_plugin_class_add { fi } +function neutron_ml2_extension_driver_add { + if is_neutron_legacy_enabled; then + # Call back to old function + _neutron_ml2_extension_driver_add_old "$@" + else + _neutron_ml2_extension_driver_add "$@" + fi +} + function install_neutron_agent_packages { if is_neutron_legacy_enabled; then # Call back to old function diff --git a/lib/neutron-legacy b/lib/neutron-legacy index ccab527f66..86a2b1d58e 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -870,6 +870,16 @@ function _neutron_service_plugin_class_add { fi } +# _neutron_ml2_extension_driver_add_old() - add ML2 extension driver +function _neutron_ml2_extension_driver_add_old { + local extension=$1 + if [[ $Q_ML2_PLUGIN_EXT_DRIVERS == '' ]]; then + Q_ML2_PLUGIN_EXT_DRIVERS=$extension + elif [[ ! ,${Q_ML2_PLUGIN_EXT_DRIVERS}, =~ ,${extension}, ]]; then + Q_ML2_PLUGIN_EXT_DRIVERS="$Q_ML2_PLUGIN_EXT_DRIVERS,$extension" + fi +} + # mutnauq_server_config_add() - add server config file function mutnauq_server_config_add { _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($1) From afef8bf097356d7bb37c57d0daa44653c7905c17 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 6 Mar 2017 14:07:23 -0500 Subject: [PATCH 0397/1936] Make declared variables global When variables use the 'declare' directive, it is by default a local variable. While other variables have global scope. For example: declare -A AN_ARRAY # local in scope foo=1 # global in scope This causes errors to occur as some of the variables will be local only and others will be global. Update the code, as appropriate, so that variables using the 'declare' directive also include the '-g' flag to have them also be global. Not every instance of a declared variable has been updated. Closes-Bug: #1669509 Co-Authored-By: John L. Villalovos Change-Id: I2180b68fe861ad19c6d4ec0df0f9f8a528347862 --- functions | 2 +- functions-common | 18 +++++++++--------- inc/python | 2 +- lib/neutron | 2 +- lib/neutron-legacy | 4 ++-- stackrc | 2 +- 6 files changed, 15 insertions(+), 15 deletions(-) diff --git a/functions b/functions index 89ee3672d3..48c821c0a6 100644 --- a/functions +++ b/functions @@ -12,7 +12,7 @@ # ensure we don't re-source this in the same environment [[ -z "$_DEVSTACK_FUNCTIONS" ]] || return 0 -declare -r _DEVSTACK_FUNCTIONS=1 +declare -r -g _DEVSTACK_FUNCTIONS=1 # Include the common functions FUNC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) diff --git a/functions-common b/functions-common index 0d1b01f2fe..64f97f005a 100644 --- a/functions-common +++ b/functions-common @@ -37,12 +37,12 @@ set +o xtrace # ensure we don't re-source this in the same environment [[ -z "$_DEVSTACK_FUNCTIONS_COMMON" ]] || return 0 -declare -r _DEVSTACK_FUNCTIONS_COMMON=1 +declare -r -g _DEVSTACK_FUNCTIONS_COMMON=1 # Global Config Variables -declare -A GITREPO -declare -A GITBRANCH -declare -A GITDIR +declare -A -g GITREPO +declare -A -g GITBRANCH +declare -A -g GITDIR TRACK_DEPENDS=${TRACK_DEPENDS:-False} @@ -306,7 +306,7 @@ function warn { # ``os_PACKAGE`` - package type: ``deb`` or ``rpm`` # ``os_CODENAME`` - vendor's codename for release: ``xenial`` -declare os_VENDOR os_RELEASE os_PACKAGE os_CODENAME +declare -g os_VENDOR os_RELEASE os_PACKAGE os_CODENAME # Make a *best effort* attempt to install lsb_release packages for the # user if not available. Note can't use generic install_package* @@ -361,7 +361,7 @@ function GetOSVersion { # Translate the OS version values into common nomenclature # Sets global ``DISTRO`` from the ``os_*`` values -declare DISTRO +declare -g DISTRO function GetDistro { GetOSVersion @@ -2376,9 +2376,9 @@ function sudo_with_proxies { # Resolution is only in whole seconds, so should be used for long # running activities. -declare -A _TIME_TOTAL -declare -A _TIME_START -declare -r _TIME_BEGIN=$(date +%s) +declare -A -g _TIME_TOTAL +declare -A -g _TIME_START +declare -r -g _TIME_BEGIN=$(date +%s) # time_start $name # diff --git a/inc/python b/inc/python index 5afc07f636..d0e45df489 100644 --- a/inc/python +++ b/inc/python @@ -19,7 +19,7 @@ set +o xtrace # PROJECT_VENV contains the name of the virtual environment for each # project. A null value installs to the system Python directories. -declare -A PROJECT_VENV +declare -A -g PROJECT_VENV # Python Functions diff --git a/lib/neutron b/lib/neutron index 19568eaf25..4a547ac0bf 100644 --- a/lib/neutron +++ b/lib/neutron @@ -74,7 +74,7 @@ NEUTRON_ROOTWRAP_DAEMON_CMD="sudo $NEUTRON_ROOTWRAP-daemon $NEUTRON_ROOTWRAP_CON NEUTRON_CONFIG_ARG=${NEUTRON_CONFIG_ARG:-""} # Additional neutron api config files -declare -a _NEUTRON_SERVER_EXTRA_CONF_FILES_ABS +declare -a -g _NEUTRON_SERVER_EXTRA_CONF_FILES_ABS # Functions # --------- diff --git a/lib/neutron-legacy b/lib/neutron-legacy index b381b642c6..96d86743ab 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -141,10 +141,10 @@ _Q_PLUGIN_EXTRA_CONF_PATH=/etc/neutron # These config files are relative to ``/etc/neutron``. The above # example would specify ``--config-file /etc/neutron/file1`` for # neutron server. -declare -a Q_PLUGIN_EXTRA_CONF_FILES +declare -a -g Q_PLUGIN_EXTRA_CONF_FILES # same as Q_PLUGIN_EXTRA_CONF_FILES, but with absolute path. -declare -a _Q_PLUGIN_EXTRA_CONF_FILES_ABS +declare -a -g _Q_PLUGIN_EXTRA_CONF_FILES_ABS Q_RR_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf diff --git a/stackrc b/stackrc index afe385c2a1..14c5960ed4 100644 --- a/stackrc +++ b/stackrc @@ -5,7 +5,7 @@ # ensure we don't re-source this in the same environment [[ -z "$_DEVSTACK_STACKRC" ]] || return 0 -declare -r _DEVSTACK_STACKRC=1 +declare -r -g _DEVSTACK_STACKRC=1 # Find the other rc files RC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) From d5919d08ba131ac2bde27251c8b4e77bf428e876 Mon Sep 17 00:00:00 2001 From: "jeremy.zhang" Date: Wed, 8 Mar 2017 15:27:37 +0800 Subject: [PATCH 0398/1936] Restrict enabling the manage_volume volume feature Not all Cinder backends support the 'manage volume' feature. The test that in tempest for this feature is specific to LVM and will *not* work for other work backends regardless of them supporting the feature. Change-Id: I055aa66738deb5ae2fb925429cec565e3901340c --- lib/tempest | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index d95a9f5c3d..95b25bcf4e 100644 --- a/lib/tempest +++ b/lib/tempest @@ -439,7 +439,11 @@ function configure_tempest { TEMPEST_VOLUME_MANAGE_SNAPSHOT=${TEMPEST_VOLUME_MANAGE_SNAPSHOT:-True} fi iniset $TEMPEST_CONFIG volume-feature-enabled manage_snapshot $(trueorfalse False TEMPEST_VOLUME_MANAGE_SNAPSHOT) - + # Only turn on TEMPEST_VOLUME_MANAGE_VOLUME by default for "lvm" backends + if [[ "$CINDER_ENABLED_BACKENDS" == *"lvm"* ]]; then + TEMPEST_VOLUME_MANAGE_VOLUME=${TEMPEST_VOLUME_MANAGE_VOLUME:-True} + fi + iniset $TEMPEST_CONFIG volume-feature-enabled manage_volume $(trueorfalse False TEMPEST_VOLUME_MANAGE_VOLUME) # TODO(ameade): Remove the api_v3 flag when Mitaka and Liberty are end of life. iniset $TEMPEST_CONFIG volume-feature-enabled api_v3 True iniset $TEMPEST_CONFIG volume-feature-enabled api_v1 $(trueorfalse False TEMPEST_VOLUME_API_V1) From b75a4928707feb53d0e306f8b289096eb5c0b37b Mon Sep 17 00:00:00 2001 From: Sylvain Bauza Date: Fri, 16 Oct 2015 15:57:50 +0200 Subject: [PATCH 0399/1936] Add a new FAQ entry for dev environments Since it's pretty common to see blogposts recommending to mount /opt/stack remotely or editing inline the code, adding some notes about the potential risk of a reclone that could impact weeks of work. Change-Id: I733d40b76fb02d8edf3719533fc8202547771871 Co-Authored-By: Stephen Finucane --- doc/source/faq.rst | 51 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/doc/source/faq.rst b/doc/source/faq.rst index 7793d8eb68..e6c1d6ed41 100644 --- a/doc/source/faq.rst +++ b/doc/source/faq.rst @@ -18,6 +18,57 @@ production systems. Your best choice is probably to choose a `distribution of OpenStack `__. +Can I use DevStack as a development environment? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Sure, you can. That said, there are a couple of things you should note before +doing so: + +- DevStack makes a lot of configuration changes to your system and should not + be run in your main development environment. + +- All the repositories that DevStack clones when deploying are considered + volatile by default and thus are subject to hard resets. This is necessary to + keep you in sync with the latest upstream, which is what you want in a CI + situation, but it can result in branches being overwritten and files being + removed. + + The corollary of this is that if you are working on a specific project, using + the DevStack project repository (defaulted to ``/opt/stack/``) as + the single master repository for storing all your work is not recommended. + This behavior can be overridden by setting the ``RECLONE`` config option to + ``no``. Alternatively, you can avoid running ``stack.sh`` to redeploy by + restarting services manually. In any case, you should generally ensure work + in progress is pushed to Gerrit or otherwise backed up before running + ``stack.sh``. + +- If you use DevStack within a VM, you may wish to mount a local OpenStack + directory, such as ``~/src/openstack``, inside the VM and configure DevStack + to use this as the clone location using the ``{PROJECT}_REPO`` config + variables. For example, assuming you're using Vagrant and sharing your home + directory, you should place the following in ``local.conf``: + + .. code-block:: shell + + NEUTRON_REPO=/home/vagrant/src/neutron + NOVA_REPO=/home/vagrant/src/nova + KEYSTONE_REPO=/home/vagrant/src/keystone + GLANCE_REPO=/home/vagrant/src/glance + SWIFT_REPO=/home/vagrant/src/swift + HORIZON_REPO=/home/vagrant/src/horizon + CINDER_REPO=/home/vagrant/src/cinder + HEAT_REPO=/home/vagrant/src/heat + TEMPEST_REPO=/home/vagrant/src/tempest + HEATCLIENT_REPO=/home/vagrant/src/python-heatclient + GLANCECLIENT_REPO=/home/vagrant/src/python-glanceclient + NOVACLIENT_REPO=/home/vagrant/src/python-novaclient + NEUTRONCLIENT_REPO=/home/vagrant/src/python-neutronclient + OPENSTACKCLIENT_REPO=/home/vagrant/src/python-openstackclient + HEAT_CFNTOOLS_REPO=/home/vagrant/src/heat-cfntools + HEAT_TEMPLATES_REPO=/home/vagrant/src/heat-templates + NEUTRON_FWAAS_REPO=/home/vagrant/src/neutron-fwaas + # ... + Why a shell script, why not chef/puppet/... ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ From 93a415601562853301d44e82ab07ee6bf1734945 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89douard=20Thuleau?= Date: Thu, 9 Mar 2017 18:53:18 +0100 Subject: [PATCH 0400/1936] Don't create keystone account for disabled service If service is disabled, don't expect to create the corresponding keystone account. Change-Id: I007088862de1c8643eca3a6b9b313f0125b9b8d1 --- stack.sh | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/stack.sh b/stack.sh index 4cee385a33..f08d56f60a 100755 --- a/stack.sh +++ b/stack.sh @@ -1057,11 +1057,18 @@ if is_service_enabled keystone; then fi create_keystone_accounts - create_nova_accounts - create_glance_accounts - create_cinder_accounts - create_neutron_accounts - + if is_service_enabled nova; then + create_nova_accounts + fi + if is_service_enabled glance; then + create_glance_accounts + fi + if is_service_enabled cinder; then + create_cinder_accounts + fi + if is_service_enabled neutron; then + create_neutron_accounts + fi if is_service_enabled swift; then create_swift_accounts fi From 9bc7708c801221a009a8e5f963c4343d81a6c913 Mon Sep 17 00:00:00 2001 From: Dave Chen Date: Fri, 10 Mar 2017 05:34:21 +0800 Subject: [PATCH 0401/1936] Update the enabled service on compute nodes Change the service from 'n-network' to 'q-agt' since Nova network is not supported by default. Change-Id: I085aac75d4c1b721498afa568c8ecfd5abc7b20c --- doc/source/guides/multinode-lab.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index dfc9936915..1b7f4cd356 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -176,7 +176,7 @@ machines, create a ``local.conf`` with: MYSQL_HOST=$SERVICE_HOST RABBIT_HOST=$SERVICE_HOST GLANCE_HOSTPORT=$SERVICE_HOST:9292 - ENABLED_SERVICES=n-cpu,n-net,n-api-meta,c-vol + ENABLED_SERVICES=n-cpu,q-agt,n-api-meta,c-vol NOVA_VNC_ENABLED=True NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_auto.html" VNCSERVER_LISTEN=$HOST_IP From 1be04a0769124c8d44a22189d42d42712e87fed9 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 10 Mar 2017 08:05:05 +0000 Subject: [PATCH 0402/1936] Updated from generate-devstack-plugins-list Change-Id: I95309e2857f0361b65dc6fbaf686aab94626c086 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 58d393ae74..266ab0dcc0 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -149,6 +149,7 @@ trove-dashboard `git://git.openstack.org/openstack/trove- vitrage `git://git.openstack.org/openstack/vitrage `__ vitrage-dashboard `git://git.openstack.org/openstack/vitrage-dashboard `__ vmware-nsx `git://git.openstack.org/openstack/vmware-nsx `__ +vmware-vspc `git://git.openstack.org/openstack/vmware-vspc `__ watcher `git://git.openstack.org/openstack/watcher `__ watcher-dashboard `git://git.openstack.org/openstack/watcher-dashboard `__ zaqar `git://git.openstack.org/openstack/zaqar `__ From d15f222e2b2c3ab40ae719cca115f984178e71e4 Mon Sep 17 00:00:00 2001 From: Ben Swartzlander Date: Tue, 24 Jan 2017 00:23:41 -0500 Subject: [PATCH 0403/1936] Fix IPv6 provider networks Add a missing --subnet-range argument when creating an ipv6 provider network. Also changed SUBNET_V6_ID to IPV6_SUBNET_ID. And remove the --ipv6-address-mode arg because it doesn't apply to subnets on routers. Change-Id: I82796804a06e758e458606dc9eb400bcd08ad6e4 --- lib/neutron_plugins/services/l3 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index e87a30cadb..07974feb10 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -197,8 +197,8 @@ function create_neutron_initial_network { if [ -z $SUBNETPOOL_V6_ID ]; then fixed_range_v6=$IPV6_PROVIDER_FIXED_RANGE fi - SUBNET_V6_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 6 --ipv6-address-mode $IPV6_ADDRESS_MODE --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID $fixed_range_v6 | grep 'id' | get_field 2) - die_if_not_set $LINENO SUBNET_V6_ID "Failure creating SUBNET_V6_ID for $IPV6_PROVIDER_SUBNET_NAME $project_id" + IPV6_SUBNET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID --subnet-range $fixed_range_v6 | grep ' id ' | get_field 2) + die_if_not_set $LINENO IPV6_SUBNET_ID "Failure creating IPV6_SUBNET_ID for $IPV6_PROVIDER_SUBNET_NAME $project_id" fi if [[ $Q_AGENT == "openvswitch" ]]; then From b763dbda739c644ca048059d402d5667fe0e45ef Mon Sep 17 00:00:00 2001 From: wangxiyuan Date: Tue, 7 Mar 2017 20:47:58 +0800 Subject: [PATCH 0404/1936] Remove glare from glance glare has been removed from glance already. Now error will be raised if enable g-glare in local.conf. Remove the glare support by glance. Change-Id: I9a389af194dd2b8aed75d3c921293d800f8c591b --- lib/glance | 73 ------------------------------------------------------ 1 file changed, 73 deletions(-) diff --git a/lib/glance b/lib/glance index 6125f45cd8..2f4aa5f0f8 100644 --- a/lib/glance +++ b/lib/glance @@ -55,8 +55,6 @@ GLANCE_CACHE_CONF=$GLANCE_CONF_DIR/glance-cache.conf GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json GLANCE_SCHEMA_JSON=$GLANCE_CONF_DIR/schema-image.json GLANCE_SWIFT_STORE_CONF=$GLANCE_CONF_DIR/glance-swift-store.conf -GLANCE_GLARE_CONF=$GLANCE_CONF_DIR/glance-glare.conf -GLANCE_GLARE_PASTE_INI=$GLANCE_CONF_DIR/glance-glare-paste.ini GLANCE_V1_ENABLED=${GLANCE_V1_ENABLED:-False} if is_ssl_enabled_service "glance" || is_service_enabled tls-proxy; then @@ -72,8 +70,6 @@ GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT} GLANCE_SERVICE_PROTOCOL=${GLANCE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} GLANCE_REGISTRY_PORT=${GLANCE_REGISTRY_PORT:-9191} GLANCE_REGISTRY_PORT_INT=${GLANCE_REGISTRY_PORT_INT:-19191} -GLANCE_GLARE_PORT=${GLANCE_GLARE_PORT:-9494} -GLANCE_GLARE_HOSTPORT=${GLANCE_GLARE_HOSTPORT:-$GLANCE_SERVICE_HOST:$GLANCE_GLARE_PORT} # Functions # --------- @@ -98,9 +94,6 @@ function configure_glance { sudo install -d -o $STACK_USER $GLANCE_CONF_DIR $GLANCE_METADEF_DIR # Copy over our glance configurations and update them - if is_service_enabled g-glare; then - cp $GLANCE_DIR/etc/glance-glare.conf $GLANCE_GLARE_CONF - fi cp $GLANCE_DIR/etc/glance-registry.conf $GLANCE_REGISTRY_CONF iniset $GLANCE_REGISTRY_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $GLANCE_REGISTRY_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS @@ -143,9 +136,6 @@ function configure_glance { # Store specific configs iniset $GLANCE_API_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/ - if is_service_enabled g-glare; then - iniset $GLANCE_GLARE_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/ - fi iniset $GLANCE_API_CONF DEFAULT registry_host $GLANCE_SERVICE_HOST iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS" @@ -172,22 +162,6 @@ function configure_glance { iniset $GLANCE_SWIFT_STORE_CONF ref1 user $SERVICE_PROJECT_NAME:glance-swift - # Store the glare in swift if enabled. - if is_service_enabled g-glare; then - iniset $GLANCE_GLARE_CONF glance_store default_store swift - iniset $GLANCE_GLARE_CONF glance_store swift_store_create_container_on_put True - - iniset $GLANCE_GLARE_CONF glance_store swift_store_config_file $GLANCE_SWIFT_STORE_CONF - iniset $GLANCE_GLARE_CONF glance_store default_swift_reference ref1 - iniset $GLANCE_GLARE_CONF glance_store stores "file, http, swift" - iniset $GLANCE_GLARE_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" - - # commenting is not strictly necessary but it's confusing to have bad values in conf - inicomment $GLANCE_GLARE_CONF glance_store swift_store_user - inicomment $GLANCE_GLARE_CONF glance_store swift_store_key - inicomment $GLANCE_GLARE_CONF glance_store swift_store_auth_address - fi - iniset $GLANCE_SWIFT_STORE_CONF ref1 key $SERVICE_PASSWORD if python3_enabled; then # NOTE(dims): Currently the glance_store+swift does not support either an insecure flag @@ -266,29 +240,6 @@ function configure_glance { iniset $GLANCE_API_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/%(project_id)s" iniset $GLANCE_CACHE_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/%(project_id)s" fi - - # Configure GLANCE_GLARE (Glance Glare) - if is_service_enabled g-glare; then - local dburl - dburl=`database_connection_url glance` - setup_logging $GLANCE_GLARE_CONF - iniset $GLANCE_GLARE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $GLANCE_GLARE_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS - iniset $GLANCE_GLARE_CONF DEFAULT bind_port $GLANCE_GLARE_PORT - inicomment $GLANCE_GLARE_CONF DEFAULT log_file - iniset $GLANCE_GLARE_CONF DEFAULT workers "$API_WORKERS" - - iniset $GLANCE_GLARE_CONF database connection $dburl - iniset $GLANCE_GLARE_CONF paste_deploy flavor keystone - configure_auth_token_middleware $GLANCE_GLARE_CONF glare $GLANCE_AUTH_CACHE_DIR/artifact - # Register SSL certificates if provided - if is_ssl_enabled_service glance; then - ensure_certificates GLANCE - iniset $GLANCE_GLARE_CONF DEFAULT cert_file "$GLANCE_SSL_CERT" - iniset $GLANCE_GLARE_CONF DEFAULT key_file "$GLANCE_SSL_KEY" - fi - cp $GLANCE_DIR/etc/glance-glare-paste.ini $GLANCE_GLARE_PASTE_INI - fi } # create_glance_accounts() - Set up common required glance accounts @@ -298,7 +249,6 @@ function configure_glance { # SERVICE_PROJECT_NAME glance service # SERVICE_PROJECT_NAME glance-swift ResellerAdmin (if Swift is enabled) # SERVICE_PROJECT_NAME glance-search search (if Search is enabled) -# SERVICE_PROJECT_NAME glare service (if enabled) function create_glance_accounts { if is_service_enabled g-api; then @@ -321,16 +271,6 @@ function create_glance_accounts { iniset $GLANCE_SWIFT_STORE_CONF ref1 project_domain_id $service_domain_id iniset $GLANCE_SWIFT_STORE_CONF ref1 user_domain_id $service_domain_id fi - - # Add glance-glare service and endpoints - if is_service_enabled g-glare; then - create_service_user "glare" - get_or_create_service "glare" "artifact" "Glance Artifact Service" - - get_or_create_endpoint "artifact" \ - "$REGION_NAME" \ - "$GLANCE_SERVICE_PROTOCOL://$GLANCE_GLARE_HOSTPORT" - fi } # create_glance_cache_dir() - Part of the init_glance() process @@ -400,15 +340,6 @@ function start_glance { if ! wait_for_service $SERVICE_TIMEOUT $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT; then die $LINENO "g-api did not start" fi - - #Start g-glare after g-reg/g-api - if is_service_enabled g-glare; then - run_process g-glare "$GLANCE_BIN_DIR/glance-glare --config-file=$GLANCE_CONF_DIR/glance-glare.conf" - echo "Waiting for Glare [g-glare] ($GLANCE_GLARE_HOSTPORT) to start..." - if ! wait_for_service $SERVICE_TIMEOUT $GLANCE_SERVICE_PROTOCOL://$GLANCE_GLARE_HOSTPORT; then - die $LINENO " Glare [g-glare] did not start" - fi - fi } # stop_glance() - Stop running processes @@ -416,10 +347,6 @@ function stop_glance { # Kill the Glance screen windows stop_process g-api stop_process g-reg - - if is_service_enabled g-glare; then - stop_process g-glare - fi } # Restore xtrace From 3345a6d316bd50f005d22bfd231a4f9c0a5b7d62 Mon Sep 17 00:00:00 2001 From: "John L. Villalovos" Date: Mon, 13 Mar 2017 13:47:34 -0700 Subject: [PATCH 0405/1936] Use string comparison to compare UUID values Was using the '-ne' integer comparison operator to compare UUID values. This caused error messages like: /opt/stack/new/devstack/lib/tempest: line 226: [[: dfae26ac-1780-4677-902d: value too great for base (error token is "902d") Change it to use '!=' string comparison operator Change-Id: Ib7c9197dd0fe58addf33b4f82beea6de64f6b10b --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index a9461d42ff..cf7eb6f722 100644 --- a/lib/tempest +++ b/lib/tempest @@ -223,7 +223,7 @@ function configure_tempest { # Ensure ``flavor_ref`` and ``flavor_ref_alt`` have different values. # Some resize instance in tempest tests depends on this. for f in ${flavors[@]:1}; do - if [[ $f -ne $flavor_ref ]]; then + if [[ "$f" != "$flavor_ref" ]]; then flavor_ref_alt=$f break fi From 99d5d1ce82e59347d6e90a8756dd6a08153a1486 Mon Sep 17 00:00:00 2001 From: Jordan Pittier Date: Wed, 15 Mar 2017 13:42:25 +0100 Subject: [PATCH 0406/1936] FAQ: remove references to stable/kilo: use stable/ocata instead It's 2017, some of our newest OpenStack developers/users may not even know what stable/kilo is/was. Change-Id: I00f39cc80af7e1632293bf057d95040b6bfa48e0 --- doc/source/faq.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/faq.rst b/doc/source/faq.rst index 7793d8eb68..f03304f37f 100644 --- a/doc/source/faq.rst +++ b/doc/source/faq.rst @@ -130,8 +130,8 @@ How do I run a specific OpenStack release? DevStack master tracks the upstream master of all the projects. If you would like to run a stable branch of OpenStack, you should use the corresponding stable branch of DevStack as well. For instance the -``stable/kilo`` version of DevStack will already default to all the -projects running at ``stable/kilo`` levels. +``stable/ocata`` version of DevStack will already default to all the +projects running at ``stable/ocata`` levels. Note: it's also possible to manually adjust the ``*_BRANCH`` variables further if you would like to test specific milestones, or even custom From 4b59fbb8573c57b27cf1ceb2a043cbe9cf7fd111 Mon Sep 17 00:00:00 2001 From: Jens Rosenboom Date: Wed, 15 Mar 2017 21:58:48 +0000 Subject: [PATCH 0407/1936] Revert "put mysql on a memory diet" The diet seems to be too strict, jobs failing with "out of sort memory". Needs more investigation before resubmitting. This reverts commit 1e66388c5f2b81b4fc5d544dbf5fde2935218bd0. Change-Id: Ic10effaaf047eb3527082baab889772c5e57fa90 --- lib/databases/mysql | 183 +------------------------------------------- 1 file changed, 1 insertion(+), 182 deletions(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index e2c83433d2..7bbcace399 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -96,191 +96,10 @@ function configure_database_mysql { iniset -sudo $my_conf mysqld bind-address "$SERVICE_LISTEN_ADDRESS" iniset -sudo $my_conf mysqld sql_mode TRADITIONAL iniset -sudo $my_conf mysqld default-storage-engine InnoDB - - # the number of connections has been throttled to 256. In the - # event that the gate jobs report "Too many connections" it is - # indicative of a problem that could be the result of one of many - # things. For more details about debugging this error, refer - # https://dev.mysql.com/doc/refman/5.5/en/too-many-connections.html. - # Note that the problem may not ONLY be an issue with MySQL - # connections. If the number of fd's at the OS is too low, you - # could see errors manifest as MySQL "too many connections". - iniset -sudo $my_conf mysqld max_connections 256 + iniset -sudo $my_conf mysqld max_connections 1024 iniset -sudo $my_conf mysqld query_cache_type OFF iniset -sudo $my_conf mysqld query_cache_size 0 - # Additional settings to put MySQL on a memory diet. These - # settings are used in conjunction with the cap on max_connections - # as the total memory used by MySQL can be simply viewed as - # fixed-allocations + max_connections * variable-allocations. A - # nifty tool to help with this is - # http://www.mysqlcalculator.com/. A short description of each of - # the settings follows. - - # binlog_cache_size, determines the size of cache to hold changes - # to the binary log during a transaction, for each connection. For - # more details, refer - # https://dev.mysql.com/doc/refman/5.6/en/replication-options-binary-log.html#sysvar_binlog_cache_size - # When binary logging is enabled, a smaller binlog cache could - # result in more frequent flushes to the disk and a larger value - # would result in less flushes to the disk but higher memory - # usage. This however only has to do with large transactions; if - # you have a small transaction the binlog cache is necessarily - # flushed on a transaction commit. This is a per-connection cache. - iniset -sudo $my_conf mysqld binlog_cache_size 4K - - # binlog_stmt_cache_size determines the size of cache to hold non - # transactional statements in the binary log. For more details, - # refer - # https://dev.mysql.com/doc/refman/5.6/en/replication-options-binary-log.html#sysvar_binlog_stmt_cache_size - # This cache holds changes to non-transactional tables (read: - # MyISAM) or any non-transactional statements which cause - # modifications to data (truncate is an example). These are - # written to disk immediately on completion of the statement or - # when the cache is full. If the cache is too small, you get - # frequent writes to the disk (flush) and if the cache is too - # large, it takes up more memory. This is a per-connection cache. - iniset -sudo $my_conf mysqld binlog_stmt_cache_size 4K - - # bulk_insert_buffer_size for MyISAM tables that use a special - # cache for insert statements and load statements, this cache is - # used to optimize writes to the disk. If the value is set to 0, - # the optimization is disabled. For more details refer - # https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_bulk_insert_buffer_size - # We set this to 0 which could result in higher disk I/O (I/O on - # each insert block completion). - iniset -sudo $my_conf mysqld bulk_insert_buffer_size 0 - - # host_cache_size controls a DNS lookup optimization. For more - # details refer - # https://dev.mysql.com/doc/refman/5.6/en/host-cache.html - iniset -sudo $my_conf mysqld host_cache_size 0 - - # innodb_buffer_pool_size This is the size of the server wide - # buffer pool. It is the cache for all data blocks being used by - # the server and is managed as a LRU chain. Dirty blocks either - # age off the list or are forced off when the list is - # full. Setting this to 5MB (default 128MB) reduces the amount of - # memory used by the server and this will result in more disk I/O - # in cases where (a) there is considerable write activity that - # overwhelms the allocated cache, or (b) there is considerable - # read activity on a data set that exceeds the allocated - # cache. For more details, refer - # https://dev.mysql.com/doc/refman/5.6/en/innodb-parameters.html#sysvar_innodb_buffer_pool_size - iniset -sudo $my_conf mysqld innodb_buffer_pool_size 5M - - # innodb_ft_cache_size and innodb_ft_total_cache_size control the - # per-connection full text search cache and the server wide - # maximum full text search cache. We should not be using full text - # search and the value is set to the minimum allowable. The former - # is a per-connection cache size and the latter is server - # wide. For more details, refer - # https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_ft_cache_size - # and - # https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_ft_total_cache_size - iniset -sudo $my_conf mysqld innodb_ft_cache_size 1600000 - iniset -sudo $my_conf mysqld innodb_ft_total_cache_size 32000000 - - # innodb_log_buffer_size This buffer is used to buffer - # transactions in-memory before writing them to the innodb - # internal transaction log. Large transactions, or high amounts of - # concurrency, will cause the system to fill this faster and thus - # make the system more disk-bound. For more details, refer - # https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_log_buffer_size - iniset -sudo $my_conf mysqld innodb_log_buffer_size 256K - - # innodb_sort_buffer_size, This buffer is used for sorting when - # InnoDB is creating indexes. Could cause that to be slower, but - # only if tables are large. This is a per-connection setting. For - # more details, refer - # https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_sort_buffer_size - iniset -sudo $my_conf mysqld innodb_sort_buffer_size 64K - - # join_buffer_size, This buffer makes table and index scans - # faster. So this setting could make some queries more disk - # bound. This is a per-connection setting. For more details refer - # https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_join_buffer_size. - iniset -sudo $my_conf mysqld join_buffer_size 128 - - # key_buffer_size defines the index blocks used for MyISAM tables - # and shared between threads. This is a server wide setting. For - # more details see - # https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_key_buffer_size - iniset -sudo $my_conf mysqld key_buffer_size 8 - - # max_heap_table_size sets the maximum amount of memory for MEMORY - # tables (which we don't use). The value is set to 16k, the - # minimum allowed. For more details, see - # https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_max_heap_table_size - iniset -sudo $my_conf mysqld max_heap_table_size 16K - - # net_buffer_length Each client has a buffer for incoming and - # outgoing data, both start with a size of net_buffer_length and - # can grow (in steps of 2x) upto a size of max_allowed_packet. For - # more details see - # https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_net_buffer_length - iniset -sudo $my_conf mysqld net_buffer_length 1K - - # read_buffer_size, read_rnd_buffer_size are per-thread buffer - # used for scans on MyISAM tables. It is a per-connection setting - # and so we set it to the minimum value allowable. Same for - # read_rnd_buffer_size. For more details refer - # https://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_read_buffer_size - # and - # https://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_read_rnd_buffer_size - iniset -sudo $my_conf mysqld read_buffer_size 8200 - iniset -sudo $my_conf mysqld read_rnd_buffer_size 8200 - - # sort_buffer_size when a sort is requested, it will be performed - # in memory in a buffer of this size (allocated per connection) - # and if the data exceeds this size it will spill to disk. The - # innodb and myisam variables are used in computing indices for - # tables using the specified storage engine. Since we don't - # dynamically reindex (except during upgrade) these values should - # never be material. Obviously performance of disk based sorts is - # worse than in memory sorts and therefore a high value here will - # improve sort performance for large data. For more details, - # refer: - # https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_sort_buffer_size - # and - # https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_sort_buffer_size - # and - # https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_myisam_sort_buffer_size - iniset -sudo $my_conf mysqld sort_buffer_size 32K - iniset -sudo $my_conf mysqld innodb_sort_buffer_size 64K - iniset -sudo $my_conf mysqld myisam_sort_buffer_size 4K - - # thread_cache_size specifies how many internal threads to cache - # for use with incoming connections. We set this to 0 whic means - # that each connection will cause a new thread to be created. This - # could cause connections to take marginally longer on os'es with - # slow pthread_create calls. For more details, refer - # https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_thread_cache_size - iniset -sudo $my_conf mysqld thread_cache_size 0 - - # thread_stack is the per connection stack size, the minimum is - # 128k and the default is 192k on 32bit and 256k on 64bit - # systems. We set this to 192k. Complex queries which require - # recursion, stored procedures or other memory intensive - # operations could exhaust this and generate a very characteristic - # failure ("stack overflow") which is cleanly detected and the - # query is killed. For more details see - # https://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_thread_stack - iniset -sudo $my_conf mysqld thread_stack 196608 - - # tmp_table_size is the maximum size of an in-memory temporary - # table. Temporary tables are created by MySQL as part of a - # multi-step query plan. The actual size of the temp table will be - # the lesser of tmp_table_size and max_heap_table_size. If a - # temporary table exceeds this size, it will be spooled to disk - # using the internal_tmp_disk_storage_engine (default - # MyISAM). Queries that often generate in-memory temporary tables - # include queries that have sorts, distinct, or group by - # operations, also queries that perform IN joins. For more details - # see - # https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_tmp_table_size - iniset -sudo $my_conf mysqld tmp_table_size 1K - if [[ "$DATABASE_QUERY_LOGGING" == "True" ]]; then echo_summary "Enabling MySQL query logging" if is_fedora; then From dab52d755481500dce3bf5f990037cde5d8022eb Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 16 Mar 2017 07:51:22 +0000 Subject: [PATCH 0408/1936] Updated from generate-devstack-plugins-list Change-Id: I55dea8141046350f9cf06afb7edc957c35397b38 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 266ab0dcc0..cc55c0bd20 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -128,6 +128,7 @@ nova-mksproxy `git://git.openstack.org/openstack/nova-m nova-powervm `git://git.openstack.org/openstack/nova-powervm `__ oaktree `git://git.openstack.org/openstack/oaktree `__ octavia `git://git.openstack.org/openstack/octavia `__ +octavia-dashboard `git://git.openstack.org/openstack/octavia-dashboard `__ os-xenapi `git://git.openstack.org/openstack/os-xenapi `__ osprofiler `git://git.openstack.org/openstack/osprofiler `__ panko `git://git.openstack.org/openstack/panko `__ From 5f8bd0e452957c2b8f7c2a9c962342e69d33b17e Mon Sep 17 00:00:00 2001 From: jianghua Date: Tue, 14 Mar 2017 08:04:53 +0000 Subject: [PATCH 0409/1936] Xen: restrict devstack VM use upto 8 vCPUs. Change-Id: Iea6086a3feb03dff581d9c04560133c4f57a6f6c Closes-Bug: 1672642 --- tools/xen/functions | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tools/xen/functions b/tools/xen/functions index 93f3413d6d..bc0c515e01 100644 --- a/tools/xen/functions +++ b/tools/xen/functions @@ -294,6 +294,18 @@ function max_vcpus { # Assert ithas a numeric nonzero value expr "$cpu_count" + 0 + # 8 VCPUs should be enough for devstack VM; avoid using too + # many VCPUs: + # 1. too many VCPUs may trigger a kernel bug which result VM + # not able to boot: + # https://kernel.googlesource.com/pub/scm/linux/kernel/git/wsa/linux/+/e2e004acc7cbe3c531e752a270a74e95cde3ea48 + # 2. The remaining CPUs can be used for other purpose: + # e.g. boot test VMs. + MAX_VCPUS=8 + if [ $cpu_count -ge $MAX_VCPUS ]; then + cpu_count=$MAX_VCPUS + fi + xe vm-param-set uuid=$vm VCPUs-max=$cpu_count xe vm-param-set uuid=$vm VCPUs-at-startup=$cpu_count } From fe1d3d6a7b3faa804800292c48f6ed2d75157963 Mon Sep 17 00:00:00 2001 From: Jordan Pittier Date: Wed, 15 Mar 2017 13:27:20 +0100 Subject: [PATCH 0410/1936] Apache Keystone Template: reduce the number of processes to 3 Now Apache2 has 5 dedicated processes for Keystone Admin and 5 for Keystone Public. As each Apache process consumes some memory and we arbitrarly decided 5 was a good number more than 2 years ago, maybe now (with the recent memory pressure we feel) is a good time to reconcider. With 5 processes our peakmem_tracker.py script reports a max RSS size for the "wsgi:keystone-ad" and "wsgi:keystone-pu" processes of 2 (public and admin) * 5 (number of processes) * 90 Mo (RSS of each process) = 900 Mo. With 3 processes, the overall max RSS for Keystone is 2 * 3 * 90 = 540 Mo. Note that this is RSS memory, but using the "smem" linux command on my laptop, I noticed that the USS (Unique set size, i.e RSS excluding shared memory) is around 80Mo per process. So reducing the number of processes will actually reduce memory consumption. Change-Id: Iba72d94aa15ecaa87c0115ad26d6bpeakmem_tracker62d5b3bea0a --- files/apache-keystone.template | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/files/apache-keystone.template b/files/apache-keystone.template index 84dc273200..128436027d 100644 --- a/files/apache-keystone.template +++ b/files/apache-keystone.template @@ -7,7 +7,7 @@ LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" - WSGIDaemonProcess keystone-public processes=5 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% + WSGIDaemonProcess keystone-public processes=3 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% WSGIProcessGroup keystone-public WSGIScriptAlias / %KEYSTONE_BIN%/keystone-wsgi-public WSGIApplicationGroup %{GLOBAL} @@ -21,7 +21,7 @@ LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" - WSGIDaemonProcess keystone-admin processes=5 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% + WSGIDaemonProcess keystone-admin processes=3 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% WSGIProcessGroup keystone-admin WSGIScriptAlias / %KEYSTONE_BIN%/keystone-wsgi-admin WSGIApplicationGroup %{GLOBAL} From 8cf9acd577a30bf9e6a54a9d82b9b7fc9ae769fb Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Thu, 16 Mar 2017 14:06:58 -0700 Subject: [PATCH 0411/1936] Tune apache connection limits down We are facing memory pressure in gate testing. Apache is fairly large so tune its connection limits down to try and squeeze out more useable memory. THis should be fine for dev envs, also tlsproxy is not enabled by default so we can check that this tuning works well on a subset of jobs before making it default everywhere. Data comparisons done with gate-tempest-dsvm-neutron-full-ubuntu-xenial jobs. Old: http://logs.openstack.org/37/447037/2/check/gate-tempest-dsvm-neutron-full-ubuntu-xenial/721fc6f/logs/screen-peakmem_tracker.txt.gz PID %MEM RSS PPID TIME NLWP WCHAN COMMAND 20504 0.2 16660 19589 00:00:00 34 - /usr/sbin/apache2 -k start 20505 0.2 16600 19589 00:00:00 34 - /usr/sbin/apache2 -k start 20672 0.2 16600 19589 00:00:00 34 - /usr/sbin/apache2 -k start 20503 0.1 14388 19589 00:00:00 34 - /usr/sbin/apache2 -k start 19589 0.1 9964 1 00:00:00 1 - /usr/sbin/apache2 -k start Total RSS: 74212 New: http://logs.openstack.org/41/446741/1/check/gate-tempest-dsvm-neutron-full-ubuntu-xenial/fa4d2e6/logs/screen-peakmem_tracker.txt.gz PID %MEM RSS PPID TIME NLWP WCHAN COMMAND 8036 0.1 15316 8018 00:00:01 34 - /usr/sbin/apache2 -k start 8037 0.1 15228 8018 00:00:01 34 - /usr/sbin/apache2 -k start 8018 0.1 8584 1 00:00:00 1 - /usr/sbin/apache2 -k start Total RSS: 39128 Note RSS here is in KB. Total difference is 35084KB or about 34MB. Not the biggest change, but we seem to be functional and it almost halves the apache overhead. Change-Id: If82fa347db140021197a215113df4ce38fb4fd17 --- lib/tls | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/lib/tls b/lib/tls index f9ef554a6c..fb2fa3a17b 100644 --- a/lib/tls +++ b/lib/tls @@ -457,29 +457,30 @@ function tune_apache_connections { # MaxClients: maximum number of simultaneous client connections # MaxRequestsPerChild: maximum number of requests a server process serves # -# The apache defaults are too conservative if we want reliable tempest -# testing. Bump these values up from ~400 max clients to 1024 max clients. +# We want to be memory thrifty so tune down apache to allow 256 total +# connections. This should still be plenty for a dev env yet lighter than +# apache defaults. # Note that the next three conf values must be changed together. # MaxClients = ServerLimit * ThreadsPerChild -ServerLimit 32 +ServerLimit 8 ThreadsPerChild 32 -MaxClients 1024 -StartServers 3 -MinSpareThreads 96 -MaxSpareThreads 192 +MaxClients 256 +StartServers 2 +MinSpareThreads 32 +MaxSpareThreads 96 ThreadLimit 64 MaxRequestsPerChild 0 # Note that the next three conf values must be changed together. # MaxClients = ServerLimit * ThreadsPerChild -ServerLimit 32 +ServerLimit 8 ThreadsPerChild 32 -MaxClients 1024 -StartServers 3 -MinSpareThreads 96 -MaxSpareThreads 192 +MaxClients 256 +StartServers 2 +MinSpareThreads 32 +MaxSpareThreads 96 ThreadLimit 64 MaxRequestsPerChild 0 From 4ae92b846b45ac9ef02e9d19b5d9ddcb4ac05bf3 Mon Sep 17 00:00:00 2001 From: Julian Edwards Date: Thu, 16 Mar 2017 09:01:39 +1000 Subject: [PATCH 0412/1936] Make running_in_container work in more containers Instead of grepping for 'lxc' in /proc/1/cgroup, use systemd's features. This now at least also works in LXD containers. Change-Id: I35e807c26f0b1fbba83ddbe04cfb4901a7a95cbe --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index 1aa7517a8a..4435f2de2f 100644 --- a/functions +++ b/functions @@ -666,7 +666,7 @@ function set_mtu { # running_in_container - Returns true otherwise false function running_in_container { - if grep -q lxc /proc/1/cgroup; then + if [[ $(systemd-detect-virt --container) == 'none' ]]; then return 0 fi From a4c57cadf2fcc448dbee04a5f911af5613038678 Mon Sep 17 00:00:00 2001 From: David Rabel Date: Fri, 17 Mar 2017 16:14:00 +0100 Subject: [PATCH 0413/1936] Replace "sid" and "testing" by "stretch" Supported Debian distros (codenames) are "sid", "testing", and "jessie", but it should be "stretch" and "jessie". "testing" is no codename and therefore should be replaced by "stretch". "sid" changes all the time and cannot be guaranteed to run correctly or is at least not tested. Change-Id: Id4b80a055452bbff69036d4dc1adeda46ce99664 Closes-Bug: #1673810 Closes-Bug: #1674416 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index f08d56f60a..20cdc1dfcc 100755 --- a/stack.sh +++ b/stack.sh @@ -192,7 +192,7 @@ source $TOP_DIR/stackrc # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (xenial|yakkety|zesty|sid|testing|jessie|f24|f25|rhel7|kvmibm1) ]]; then +if [[ ! ${DISTRO} =~ (xenial|yakkety|zesty|stretch|jessie|f24|f25|rhel7|kvmibm1) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From bacfb94390e0680fac13cb7f7236b9d5d0e89b89 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 20 Mar 2017 22:27:20 -0700 Subject: [PATCH 0414/1936] Enable openSUSE to work in Python 3.x environments Add packages required to run devstack with USE_PYTHON3=True. Change-Id: Iee43c9335bd82c10cfaeffb02d1d99290c34bb83 --- inc/python | 2 ++ 1 file changed, 2 insertions(+) diff --git a/inc/python b/inc/python index a4819c2d9d..2443c4d465 100644 --- a/inc/python +++ b/inc/python @@ -553,6 +553,8 @@ function python3_enabled { function install_python3 { if is_ubuntu; then apt_get install python${PYTHON3_VERSION} python${PYTHON3_VERSION}-dev + elif is_suse; then + install_package python3-devel python3-dbm fi } From 583c52066da6738d12201bf3fcbb02b6c6690cfc Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 21 Mar 2017 11:15:05 -0400 Subject: [PATCH 0415/1936] rebuild the tempest tox env during install When redoing a stack.sh over and over again, tempest venv can get out of sync and cause issues until deleted. We should rebuild that tempest venv on every stack. Change-Id: I2f66bb1a7ccf9f89e11db1326d8553589e52fbf2 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index cf7eb6f722..1160ee723c 100644 --- a/lib/tempest +++ b/lib/tempest @@ -618,7 +618,7 @@ function install_tempest { git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH pip_install tox pushd $TEMPEST_DIR - tox --notest -efull + tox -r --notest -efull # NOTE(mtreinish) Respect constraints in the tempest full venv, things that # are using a tox job other than full will not be respecting constraints but # running pip install -U on tempest requirements From 983cccb75be919677bacb8c9b292550075b0358f Mon Sep 17 00:00:00 2001 From: Jim Rollenhagen Date: Tue, 21 Mar 2017 18:37:24 -0400 Subject: [PATCH 0416/1936] Enable baremetal scheduler filters when using ironic These are recommended for all ironic deploys; turn them on. Change-Id: Ia3df144e626266ed1774c4cd9863aedb876c409f --- lib/nova_plugins/hypervisor-ironic | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic index 7ffd14d046..c9544fe6c7 100644 --- a/lib/nova_plugins/hypervisor-ironic +++ b/lib/nova_plugins/hypervisor-ironic @@ -42,6 +42,7 @@ function configure_nova_hypervisor { iniset $NOVA_CONF DEFAULT compute_driver ironic.IronicDriver iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER iniset $NOVA_CONF DEFAULT scheduler_host_manager ironic_host_manager + iniset $NOVA_CONF filter_scheduler use_baremetal_filters True iniset $NOVA_CONF DEFAULT ram_allocation_ratio 1.0 iniset $NOVA_CONF DEFAULT reserved_host_memory_mb 0 # ironic section From c779b00840b79ce9c0b3a3c80ade5267e6fcb5f8 Mon Sep 17 00:00:00 2001 From: Huan Xie Date: Thu, 16 Feb 2017 20:10:36 -0800 Subject: [PATCH 0417/1936] Remove XenServer specific ovs agent config With XenServer we have two neutron-openvswitch-agent(q-agt, q-domua) For the q-domua it is specific for XenServer, this patch is to move the specific configurations to os-xenapi which we have devstack plugin in that repo Depends-On: Ic816404c84f6a8899d01a77cb67fbfb421653e6b Change-Id: I8a31c81d9475387fe4ed7030b70b26098e588771 --- lib/neutron-legacy | 27 ----------- lib/neutron_plugins/openvswitch_agent | 65 --------------------------- lib/nova_plugins/hypervisor-xenserver | 14 ------ 3 files changed, 106 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index ccab527f66..41bd0c0d78 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -432,24 +432,6 @@ function install_mutnauq { git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH setup_develop $NEUTRON_DIR - - if [ "$VIRT_DRIVER" == 'xenserver' ]; then - local dom0_ip - dom0_ip=$(echo "$XENAPI_CONNECTION_URL" | cut -d "/" -f 3-) - - local ssh_dom0 - ssh_dom0="sudo -u $DOMZERO_USER ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@$dom0_ip" - - # Find where the plugins should go in dom0 - local xen_functions - xen_functions=$(cat $TOP_DIR/tools/xen/functions) - local plugin_dir - plugin_dir=$($ssh_dom0 "$xen_functions; set -eux; xapi_plugin_location") - - # install neutron plugins to dom0 - tar -czf - -C $NEUTRON_DIR/neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/etc/xapi.d/plugins/ ./ | - $ssh_dom0 "tar -xzf - -C $plugin_dir && chmod a+x $plugin_dir/*" - fi } # install_neutron_agent_packages() - Collect source and prepare @@ -523,11 +505,6 @@ function start_mutnauq_other_agents { run_process q-meta "$AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file $Q_META_CONF_FILE" run_process q-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME" - - if [ "$VIRT_DRIVER" = 'xenserver' ]; then - # For XenServer, start an agent for the domU openvswitch - run_process q-domua "$AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE.domU" - fi } # Start running processes, including screen @@ -539,10 +516,6 @@ function start_neutron_agents { function stop_mutnauq_l2_agent { stop_process q-agt - - if [ "$VIRT_DRIVER" = 'xenserver' ]; then - stop_process q-domua - fi } # stop_mutnauq_other() - Stop running processes (non-screen) diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent index acab582212..b65a2587c2 100644 --- a/lib/neutron_plugins/openvswitch_agent +++ b/lib/neutron_plugins/openvswitch_agent @@ -11,12 +11,6 @@ source $TOP_DIR/lib/neutron_plugins/ovs_base function neutron_plugin_create_nova_conf { _neutron_ovs_base_configure_nova_vif_driver - if [ "$VIRT_DRIVER" == 'xenserver' ]; then - iniset $NOVA_CONF xenserver vif_driver nova.virt.xenapi.vif.XenAPIOpenVswitchDriver - iniset $NOVA_CONF xenserver ovs_integration_bridge $XEN_INTEGRATION_BRIDGE - # Disable nova's firewall so that it does not conflict with neutron - iniset $NOVA_CONF DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver - fi } function neutron_plugin_install_agent_packages { @@ -58,65 +52,6 @@ function neutron_plugin_configure_plugin_agent { fi AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-openvswitch-agent" - if [ "$VIRT_DRIVER" == 'xenserver' ]; then - # Make a copy of our config for domU - sudo cp /$Q_PLUGIN_CONF_FILE "/$Q_PLUGIN_CONF_FILE.domU" - - # change domU's config file to STACK_USER - sudo chown $STACK_USER:$STACK_USER /$Q_PLUGIN_CONF_FILE.domU - - # Deal with Dom0's L2 Agent: - Q_RR_DOM0_COMMAND="$NEUTRON_BIN_DIR/neutron-rootwrap-xen-dom0 $Q_RR_CONF_FILE" - - # For now, duplicate the xen configuration already found in nova.conf - iniset $Q_RR_CONF_FILE xenapi xenapi_connection_url "$XENAPI_CONNECTION_URL" - iniset $Q_RR_CONF_FILE xenapi xenapi_connection_username "$XENAPI_USER" - iniset $Q_RR_CONF_FILE xenapi xenapi_connection_password "$XENAPI_PASSWORD" - - # Under XS/XCP, the ovs agent needs to target the dom0 - # integration bridge. This is enabled by using a root wrapper - # that executes commands on dom0 via a XenAPI plugin. - # XenAPI does not support daemon rootwrap now, so set root_helper_daemon empty - iniset "/$Q_PLUGIN_CONF_FILE.domU" agent root_helper "" - iniset "/$Q_PLUGIN_CONF_FILE.domU" agent root_helper_daemon "xenapi_root_helper" - iniset "/$Q_PLUGIN_CONF_FILE.domU" xenapi connection_url "$XENAPI_CONNECTION_URL" - iniset "/$Q_PLUGIN_CONF_FILE.domU" xenapi connection_username "$XENAPI_USER" - iniset "/$Q_PLUGIN_CONF_FILE.domU" xenapi connection_password "$XENAPI_PASSWORD" - - # Disable minimize polling, so that it can always detect OVS and Port changes - # This is a problem of xenserver + neutron, bug has been reported - # https://bugs.launchpad.net/neutron/+bug/1495423 - iniset "/$Q_PLUGIN_CONF_FILE.domU" agent minimize_polling False - - # Set "physical" mapping - iniset "/$Q_PLUGIN_CONF_FILE.domU" ovs bridge_mappings "physnet1:$FLAT_NETWORK_BRIDGE" - - # XEN_INTEGRATION_BRIDGE is the integration bridge in dom0 - iniset "/$Q_PLUGIN_CONF_FILE.domU" ovs integration_bridge $XEN_INTEGRATION_BRIDGE - - # Set OVS native interface for ovs-agent in compute node - XEN_DOM0_IP=$(echo "$XENAPI_CONNECTION_URL" | cut -d "/" -f 3) - iniset /$Q_PLUGIN_CONF_FILE.domU ovs ovsdb_connection tcp:$XEN_DOM0_IP:6640 - iniset /$Q_PLUGIN_CONF_FILE.domU ovs of_listen_address $HOST_IP - - # Set up domU's L2 agent: - - # Create a bridge "br-$VLAN_INTERFACE" - _neutron_ovs_base_add_bridge "br-$VLAN_INTERFACE" - # Add $VLAN_INTERFACE to that bridge - sudo ovs-vsctl -- --may-exist add-port "br-$VLAN_INTERFACE" $VLAN_INTERFACE - - # Create external bridge and add port - _neutron_ovs_base_add_public_bridge - sudo ovs-vsctl -- --may-exist add-port $PUBLIC_BRIDGE $PUBLIC_INTERFACE - - # Set bridge mappings to "physnet1:br-$GUEST_INTERFACE_DEFAULT" - iniset /$Q_PLUGIN_CONF_FILE ovs bridge_mappings "physnet1:br-$VLAN_INTERFACE,physnet-ex:$PUBLIC_BRIDGE" - # Set integration bridge to domU's - iniset /$Q_PLUGIN_CONF_FILE ovs integration_bridge $OVS_BRIDGE - # Set root wrap - iniset /$Q_PLUGIN_CONF_FILE agent root_helper "$Q_RR_COMMAND" - fi iniset /$Q_PLUGIN_CONF_FILE agent tunnel_types $Q_TUNNEL_TYPES iniset /$Q_PLUGIN_CONF_FILE ovs datapath_type $OVS_DATAPATH_TYPE } diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver index 0046a366c9..4abb92a654 100644 --- a/lib/nova_plugins/hypervisor-xenserver +++ b/lib/nova_plugins/hypervisor-xenserver @@ -96,20 +96,6 @@ CRONTAB echo "create_directory_for_kernels" echo "install_conntrack_tools" } | $ssh_dom0 - - if is_service_enabled neutron; then - # Remove restriction on linux bridge in Dom0 when neutron is enabled - $ssh_dom0 "rm -f /etc/modprobe.d/blacklist-bridge*" - - count=`$ssh_dom0 "iptables -t filter -L XenServerDevstack |wc -l"` - if [ "$count" = "0" ]; then - { - echo "iptables -t filter --new XenServerDevstack" - echo "iptables -t filter -I INPUT -j XenServerDevstack" - echo "iptables -t filter -I XenServerDevstack -p tcp --dport 6640 -j ACCEPT" - } | $ssh_dom0 - fi - fi } # install_nova_hypervisor() - Install external components From fca0da5069f0c90ddcd38d489cb81597d569953a Mon Sep 17 00:00:00 2001 From: David Rabel Date: Fri, 17 Mar 2017 14:47:18 +0100 Subject: [PATCH 0418/1936] Create /opt/stack and make it home directory single-machine.rst and index.rst Before this, one had to create /opt/stack manually and chown it to the stack user. Now it is created when the user is created. This is the same way the multi-node guide handles it. A stack group is created too. Change-Id: I5363d81c8fb38796f565cc6ebf6ab2dee2673989 Closes-Bug: #1673787 Closes-Bug: #1671409 --- doc/source/guides/single-machine.rst | 2 +- doc/source/index.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/guides/single-machine.rst b/doc/source/guides/single-machine.rst index 011c41fbce..48a4fa8b12 100644 --- a/doc/source/guides/single-machine.rst +++ b/doc/source/guides/single-machine.rst @@ -47,7 +47,7 @@ below) :: - adduser stack + useradd -s /bin/bash -d /opt/stack -m stack Since this user will be making many changes to your system, it will need to have sudo privileges: diff --git a/doc/source/index.rst b/doc/source/index.rst index edd6595da2..f8d500813e 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -56,7 +56,7 @@ You can quickly create a separate `stack` user to run DevStack with :: - $ sudo adduser stack + $ sudo useradd -s /bin/bash -d /opt/stack -m stack Since this user will be making many changes to your system, it should have sudo privileges: From 530d90c9c245e11feaae5b4031bd4d8b95e4db84 Mon Sep 17 00:00:00 2001 From: David Rabel Date: Wed, 22 Mar 2017 08:25:26 +0100 Subject: [PATCH 0419/1936] Delete unnecessary groupadd in multinode-lab Change-Id: I13c86a19c421d8ef102f35bcae63f6dc69317268 Closes-Bug: #1674897 --- doc/source/guides/multinode-lab.rst | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index 1b7f4cd356..484ebba571 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -73,8 +73,7 @@ Otherwise create the stack user: :: - groupadd stack - useradd -g stack -s /bin/bash -d /opt/stack -m stack + useradd -s /bin/bash -d /opt/stack -m stack This user will be making many changes to your system during installation and operation so it needs to have sudo privileges to root without a From 9e64bad03ab11918f6b08f7926685bfb9d7e4596 Mon Sep 17 00:00:00 2001 From: Huan Xie Date: Thu, 9 Mar 2017 20:05:57 -0800 Subject: [PATCH 0420/1936] Use br-int when XenServer is hypervisor Previously we use a specific integration bridge for neutron ovs agent which is running in compute node, but this isn't necessary, this patch is to remove the specific integration bridge for XenSever and remove the custom integration bridge definition Depends-On: I675565e1ea6c887d40d7a53f62968c4aa385ecca Change-Id: If5886e3711765a97f40f20e478f958b988b5a620 --- lib/nova_plugins/hypervisor-xenserver | 4 ---- tools/xen/README.md | 5 ----- tools/xen/install_os_domU.sh | 12 +----------- tools/xen/xenrc | 1 - 4 files changed, 1 insertion(+), 21 deletions(-) diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver index 0046a366c9..67b3d76932 100644 --- a/lib/nova_plugins/hypervisor-xenserver +++ b/lib/nova_plugins/hypervisor-xenserver @@ -26,10 +26,6 @@ set +o xtrace # Allow ``build_domU.sh`` to specify the flat network bridge via kernel args FLAT_NETWORK_BRIDGE_DEFAULT=$(sed -e 's/.* flat_network_bridge=\([[:alnum:]]*\).*$/\1/g' /proc/cmdline) -if is_service_enabled neutron; then - XEN_INTEGRATION_BRIDGE_DEFAULT=$(sed -e 's/.* xen_integration_bridge=\([[:alnum:]]*\).*$/\1/g' /proc/cmdline) - XEN_INTEGRATION_BRIDGE=${XEN_INTEGRATION_BRIDGE:-$XEN_INTEGRATION_BRIDGE_DEFAULT} -fi VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1} diff --git a/tools/xen/README.md b/tools/xen/README.md index 7062ecb48c..9559e773d3 100644 --- a/tools/xen/README.md +++ b/tools/xen/README.md @@ -171,8 +171,3 @@ VM as `TEMPLATE_FILENAME`: umount "$mountdir" rm -rf "$mountdir" -### Migrate OpenStack DomU to another host - -Given you need to migrate your DomU with OpenStack installed to another host, -you need to set `XEN_INTEGRATION_BRIDGE` in localrc if neutron network is used. -It is the bridge for `XEN_INT_BRIDGE_OR_NET_NAME` network created in Dom0 diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index d2e2c572c6..ac7af0df43 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -66,10 +66,6 @@ setup_network "$VM_BRIDGE_OR_NET_NAME" setup_network "$MGT_BRIDGE_OR_NET_NAME" setup_network "$PUB_BRIDGE_OR_NET_NAME" -# With neutron, one more network is required, which is internal to the -# hypervisor, and used by the VMs -setup_network "$XEN_INT_BRIDGE_OR_NET_NAME" - if parameter_is_specified "FLAT_NETWORK_BRIDGE"; then if [ "$(bridge_for "$VM_BRIDGE_OR_NET_NAME")" != "$(bridge_for "$FLAT_NETWORK_BRIDGE")" ]; then cat >&2 << EOF @@ -292,15 +288,9 @@ add_interface "$GUEST_NAME" "$PUB_BRIDGE_OR_NET_NAME" "$PUB_DEV_NR" # $THIS_DIR/build_xva.sh "$GUEST_NAME" -# Attach a network interface for the integration network (so that the bridge -# is created by XenServer). This is required for Neutron. Also pass that as a -# kernel parameter for DomU -attach_network "$XEN_INT_BRIDGE_OR_NET_NAME" - XEN_INTEGRATION_BRIDGE_DEFAULT=$(bridge_for "$XEN_INT_BRIDGE_OR_NET_NAME") append_kernel_cmdline \ - "$GUEST_NAME" \ - "xen_integration_bridge=${XEN_INTEGRATION_BRIDGE_DEFAULT}" + "$GUEST_NAME" FLAT_NETWORK_BRIDGE="${FLAT_NETWORK_BRIDGE:-$(bridge_for "$VM_BRIDGE_OR_NET_NAME")}" append_kernel_cmdline "$GUEST_NAME" "flat_network_bridge=${FLAT_NETWORK_BRIDGE}" diff --git a/tools/xen/xenrc b/tools/xen/xenrc index 60be02f3fe..169e0427a8 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -29,7 +29,6 @@ OSDOMU_VDI_GB=8 # Get the management network from the XS installation VM_BRIDGE_OR_NET_NAME="OpenStack VM Network" PUB_BRIDGE_OR_NET_NAME="OpenStack Public Network" -XEN_INT_BRIDGE_OR_NET_NAME="OpenStack VM Integration Network" # VM Password GUEST_PASSWORD=${GUEST_PASSWORD:-secret} From d18d7c86581b419541b291990173f5fb33a45445 Mon Sep 17 00:00:00 2001 From: kesper Date: Thu, 23 Mar 2017 05:52:33 +0000 Subject: [PATCH 0421/1936] Change for the 'running_in_container' check This commit change check of 'running_in_container' method so that other services ironic, nova and neutron will not break. Change-Id: I42eb587cfaebf37944cb10e459b8b8f7b4b4e4ba --- functions | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/functions b/functions index 4435f2de2f..872f21691a 100644 --- a/functions +++ b/functions @@ -666,11 +666,7 @@ function set_mtu { # running_in_container - Returns true otherwise false function running_in_container { - if [[ $(systemd-detect-virt --container) == 'none' ]]; then - return 0 - fi - - return 1 + [[ $(systemd-detect-virt --container) != 'none' ]] } From 5ae945244ec22634f95897cb50e44fb7c2da43a6 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 23 Mar 2017 12:45:29 -0400 Subject: [PATCH 0422/1936] run memory_tracker as root In order to get memlocked pages this needs to be run as root, just start it as root so that we don't have issues with the inability to run sudo later in the run. Change-Id: I7adab8cbb6d89d4717e427aec22e316d27bea075 --- lib/dstat | 4 ++-- tools/memory_tracker.sh | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/dstat b/lib/dstat index 62795f5e72..517e4237ac 100644 --- a/lib/dstat +++ b/lib/dstat @@ -24,12 +24,12 @@ function start_dstat { # To enable memory_tracker add: # enable_service memory_tracker # to your localrc - run_process memory_tracker "$TOP_DIR/tools/memory_tracker.sh" + run_process memory_tracker "sudo $TOP_DIR/tools/memory_tracker.sh" # remove support for the old name when it's no longer used (sometime in Queens) if is_service_enabled peakmem_tracker; then deprecated "Use of peakmem_tracker in devstack is deprecated, use memory_tracker instead" - run_process peakmem_tracker "$TOP_DIR/tools/memory_tracker.sh" + run_process peakmem_tracker "sudo $TOP_DIR/tools/memory_tracker.sh" fi } diff --git a/tools/memory_tracker.sh b/tools/memory_tracker.sh index dac0267a7e..7397c03941 100755 --- a/tools/memory_tracker.sh +++ b/tools/memory_tracker.sh @@ -86,7 +86,7 @@ function tracker { # list processes that lock memory from swap if [[ $unevictable -ne $unevictable_point ]]; then unevictable_point=$unevictable - sudo ./tools/mlock_report.py + ./tools/mlock_report.py fi echo "]]]" From 682e0abe1a58edcdde40a92df1f060dc8fa758c8 Mon Sep 17 00:00:00 2001 From: David Rabel Date: Fri, 17 Mar 2017 19:19:00 +0100 Subject: [PATCH 0423/1936] Do not use libvirt-bin package anymore The package libvirt-bin is a transitional package in Debian and should not be used anymore. Ubuntu Xenial is an exception here. Because of that this change also adds the possibility to use "not:" to exclude distros in files/debs/* just as "dist:" limits distros. Depends-On: Icc59ea79f54d4ff8751f2e353ee3530fff3d961e Closes-Bug: #1673840 Change-Id: I3998a7178d14ec40eae5cb199d66da9546cd6ccf --- files/debs/nova | 4 +++- functions-common | 15 +++++++++++++++ lib/nova_plugins/functions-libvirt | 6 +++++- 3 files changed, 23 insertions(+), 2 deletions(-) diff --git a/files/debs/nova b/files/debs/nova index 58dad411a8..5e14aec836 100644 --- a/files/debs/nova +++ b/files/debs/nova @@ -10,7 +10,9 @@ iputils-arping kpartx libjs-jquery-tablesorter # Needed for coverage html reports libmysqlclient-dev -libvirt-bin # NOPRIME +libvirt-bin # dist:xenial NOPRIME +libvirt-clients # not:xenial NOPRIME +libvirt-daemon-system # not:xenial NOPRIME libvirt-dev # NOPRIME mysql-server # NOPRIME parted diff --git a/functions-common b/functions-common index a86cfd8a63..82881e6dc0 100644 --- a/functions-common +++ b/functions-common @@ -1148,6 +1148,19 @@ function _parse_package_files { fi fi + # Look for # not:xxx in comment + if [[ $line =~ (.*)#.*not:([^ ]*) ]]; then + # We are using BASH regexp matching feature. + package=${BASH_REMATCH[1]} + distros=${BASH_REMATCH[2]} + # In bash ${VAR,,} will lowercase VAR + # Look for a match in the distro list + if [[ ${distros,,} =~ ${DISTRO,,} ]]; then + # If match then skip this package + inst_pkg=0 + fi + fi + if [[ $inst_pkg = 1 ]]; then echo $package fi @@ -1166,6 +1179,8 @@ function _parse_package_files { # - ``# NOPRIME`` defers installation to be performed later in `stack.sh` # - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection # of the package to the distros listed. The distro names are case insensitive. +# - ``# not:DISTRO`` or ``not:DISTRO1,DISTRO2`` limits the selection +# of the package to the distros not listed. The distro names are case insensitive. function get_packages { local xtrace xtrace=$(set +o | grep xtrace) diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 56bb6bda1c..7af23cfd2a 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -24,7 +24,11 @@ DEBUG_LIBVIRT=$(trueorfalse True DEBUG_LIBVIRT) function install_libvirt { if is_ubuntu; then install_package qemu-system - install_package libvirt-bin libvirt-dev + if [[ ${DISTRO} == "xenial" ]]; then + install_package libvirt-bin libvirt-dev + else + install_package libvirt-clients libvirt-daemon-system libvirt-dev + fi pip_install_gr libvirt-python if [[ ${DISTRO} == "trusty" && ${EBTABLES_RACE_FIX} == "True" ]]; then # Work around for bug #1501558. We can remove this once we From 2954e337e2d2993bd3fee3f2861b3fc2f85312bc Mon Sep 17 00:00:00 2001 From: Luz Cazares Date: Thu, 23 Mar 2017 17:34:19 +0000 Subject: [PATCH 0424/1936] Update devstack supported OS's on documentation Since Feb 25th devstack supported operating systems changed due to Nova increasing its minimum required libvirt version. Further details see: I6617283afd798af37e64913b7865cea3c8a62aba This patch is to update versions on devstack documentation. Change-Id: I12bb59b0903a728376ee9422213c2903b9138249 --- doc/source/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/index.rst b/doc/source/index.rst index edd6595da2..eb10bdd420 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -39,7 +39,7 @@ Install Linux ------------- Start with a clean and minimal install of a Linux system. Devstack -attempts to support Ubuntu 14.04/16.04, Fedora 23/24, CentOS/RHEL 7, +attempts to support Ubuntu 16.04/17.04, Fedora 24/25, CentOS/RHEL 7, as well as Debian and OpenSUSE. If you do not have a preference, Ubuntu 16.04 is the most tested, and From 1298f1bacd039347c1d2a58ddea6efc2e0db25e1 Mon Sep 17 00:00:00 2001 From: Jordan Pittier Date: Thu, 23 Mar 2017 10:59:49 +0100 Subject: [PATCH 0425/1936] Remove the EBTABLES_RACE_FIX added for Trusty Now that we don't support Ubuntu Trusty anymore, we can remove the ebtables race workaround. Closes-Bug: #1675714 Change-Id: I70483f871e35fcaa933d1b7bac7dbb396aa22cef --- files/ebtables.workaround | 23 --------------------- lib/nova_plugins/functions-libvirt | 5 ----- stackrc | 11 ---------- tools/install_ebtables_workaround.sh | 31 ---------------------------- 4 files changed, 70 deletions(-) delete mode 100644 files/ebtables.workaround delete mode 100755 tools/install_ebtables_workaround.sh diff --git a/files/ebtables.workaround b/files/ebtables.workaround deleted file mode 100644 index c8af51fad5..0000000000 --- a/files/ebtables.workaround +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash -# -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# -# This is a terrible, terrible, truly terrible work around for -# environments that have libvirt < 1.2.11. ebtables requires that you -# specifically tell it you would like to not race and get punched in -# the face when 2 run at the same time with a --concurrent flag. - -flock -w 300 /var/lock/ebtables.nova /sbin/ebtables.real $@ diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 56bb6bda1c..a0242388e5 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -26,11 +26,6 @@ function install_libvirt { install_package qemu-system install_package libvirt-bin libvirt-dev pip_install_gr libvirt-python - if [[ ${DISTRO} == "trusty" && ${EBTABLES_RACE_FIX} == "True" ]]; then - # Work around for bug #1501558. We can remove this once we - # get to a version of Ubuntu that has new enough libvirt. - TOP_DIR=$TOP_DIR $TOP_DIR/tools/install_ebtables_workaround.sh - fi #pip_install_gr elif is_fedora || is_suse; then # On "KVM for IBM z Systems", kvm does not have its own package diff --git a/stackrc b/stackrc index c3b94d02f8..3ae3b0c8ef 100644 --- a/stackrc +++ b/stackrc @@ -820,17 +820,6 @@ USE_SSL=$(trueorfalse False USE_SSL) # sharing the same database. It would be useful for multinode Grenade tests. RECREATE_KEYSTONE_DB=$(trueorfalse True RECREATE_KEYSTONE_DB) -# ebtables is inherently racey. If you run it by two or more processes -# simultaneously it will collide, badly, in the kernel and produce -# failures or corruption of ebtables. The only way around it is for -# all tools running ebtables to only ever do so with the --concurrent -# flag. This requires libvirt >= 1.2.11. -# -# If you don't have this then the following work around will replace -# ebtables with a wrapper script so that it is safe to run without -# that flag. -EBTABLES_RACE_FIX=$(trueorfalse False EBTABLES_RACE_FIX) - # Following entries need to be last items in file # Compatibility bits required by other callers like Grenade diff --git a/tools/install_ebtables_workaround.sh b/tools/install_ebtables_workaround.sh deleted file mode 100755 index 45ced87f13..0000000000 --- a/tools/install_ebtables_workaround.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash -eu -# -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# -# This replaces the ebtables on your system with a wrapper script that -# does implicit locking. This is needed if libvirt < 1.2.11 on your platform. - -EBTABLES=/sbin/ebtables -EBTABLESREAL=/sbin/ebtables.real -FILES=$TOP_DIR/files - -if [[ -f "$EBTABLES" ]]; then - if file $EBTABLES | grep ELF; then - sudo mv $EBTABLES $EBTABLESREAL - sudo install -m 0755 $FILES/ebtables.workaround $EBTABLES - echo "Replaced ebtables with locking workaround" - fi -fi From 9573edb4ebc8af68002f499f54da3616a1bfaa48 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 28 Mar 2017 19:37:39 +1100 Subject: [PATCH 0426/1936] Run mlock_report under python3 psutil is only installed under python3 for the 3.5 gate jobs. Call mlock_report.py with $PYTHON so we support both environments. Updates to mlock_report.py for python3 compatability Change-Id: If7926ce6a2996b766c49b010a7f6640ae624f860 --- tools/memory_tracker.sh | 4 +++- tools/mlock_report.py | 7 ++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/tools/memory_tracker.sh b/tools/memory_tracker.sh index 7397c03941..cbdeb8f420 100755 --- a/tools/memory_tracker.sh +++ b/tools/memory_tracker.sh @@ -14,6 +14,8 @@ set -o errexit +PYTHON=${PYTHON:-python} + # time to sleep between checks SLEEP_TIME=20 @@ -86,7 +88,7 @@ function tracker { # list processes that lock memory from swap if [[ $unevictable -ne $unevictable_point ]]; then unevictable_point=$unevictable - ./tools/mlock_report.py + ${PYTHON} ./tools/mlock_report.py fi echo "]]]" diff --git a/tools/mlock_report.py b/tools/mlock_report.py index 1d23af90d7..2169cc2dce 100755 --- a/tools/mlock_report.py +++ b/tools/mlock_report.py @@ -8,14 +8,15 @@ import psutil -SUMMARY_REGEX = re.compile(r".*\s+(?P[\d]+)\s+KB") +SUMMARY_REGEX = re.compile(b".*\s+(?P[\d]+)\s+KB") def main(): try: - print _get_report() + print(_get_report()) except Exception as e: - print "Failure listing processes locking memory: %s" % str(e) + print("Failure listing processes locking memory: %s" % str(e)) + raise def _get_report(): From 5edae54855b6b9af4283cef07e0b0d1a0f90cd3e Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 21 Mar 2017 20:50:24 -0400 Subject: [PATCH 0427/1936] initial work to enable systemd service running During the PTG there was a discussion that the screen developer workflow wasn't nearly as useful as it once was. There were now too many services to see them all on one screen, and one of the most common service restart scenarios was not restarting one service, but a bunch to get code to take effect. This implements a 3rd way of running services instead of direct forking via bash, or running under screen, which is running as systemd units. Logging is adjusted because it's redundant to log datetime in oslo.log when journald has that. Swift needed to have services launched by absolute path to work. This is disabled by default, but with instructions on using it. The long term intent is to make this the way to run devstack, which would be the same between both the gate and local use. Some changes were also needed to run_process to pass the run User in. A hack around the keystone uwsgi launcher was done at the same time to remove a run_process feature that only keystone uwsgi uses. Change-Id: I836bf27c4cfdc449628aa7641fb96a5489d5d4e7 --- SYSTEMD.rst | 177 +++++++++++++++++++++++++++++++++++++++++++++++ functions | 15 +++- functions-common | 75 +++++++++++++++++++- lib/dstat | 4 +- lib/keystone | 7 +- lib/swift | 13 +++- stackrc | 17 +++++ 7 files changed, 298 insertions(+), 10 deletions(-) create mode 100644 SYSTEMD.rst diff --git a/SYSTEMD.rst b/SYSTEMD.rst new file mode 100644 index 0000000000..b6ed19335d --- /dev/null +++ b/SYSTEMD.rst @@ -0,0 +1,177 @@ +=========================== + Using Systemd in DevStack +=========================== + +.. note:: + + This is an in progress document as we work out the way forward here + with DevStack and systemd. + +DevStack can be run with all the services as systemd unit +files. Systemd is now the default init system for nearly every Linux +distro, and systemd encodes and solves many of the problems related to +poorly running processes. + +Why this instead of screen? +=========================== + +The screen model for DevStack was invented when the number of services +that a DevStack user was going to run was typically < 10. This made +screen hot keys to jump around very easy. However, the landscape has +changed (not all services are stoppable in screen as some are under +Apache, there are typically at least 20 items) + +There is also a common developer workflow of changing code in more +than one service, and needing to restart a bunch of services for that +to take effect. + +To enable this add the following to your local.conf:: + + USE_SYSTEMD=True + + + +Unit Structure +============== + +.. note:: + + Originally we actually wanted to do this as user units, however + there are issues with running this under non interactive + shells. For now, we'll be running as system units. Some user unit + code is left in place in case we can switch back later. + +All DevStack user units are created as a part of the DevStack slice +given the name ``devstack@$servicename.service``. This lets us do +certain operations at the slice level. + +Manipulating Units +================== + +Assuming the unit ``n-cpu`` to make the examples more clear. + +Enable a unit (allows it to be started):: + + sudo systemctl enable devstack@n-cpu.service + +Disable a unit:: + + sudo systemctl disable devstack@n-cpu.service + +Start a unit:: + + sudo systemctl start devstack@n-cpu.service + +Stop a unit:: + + sudo systemctl stop devstack@n-cpu.service + +Restart a unit:: + + sudo systemctl restart devstack@n-cpu.service + +See status of a unit:: + + sudo systemctl status devstack@n-cpu.service + + +Querying Logs +============= + +One of the other major things that comes with systemd is journald, a +consolidated way to access logs (including querying through structured +metadata). This is accessed by the user via ``journalctl`` command. + + +Logs can be accessed through ``journalctl``. journalctl has powerful +query facilities. We'll start with some common options. + +Follow logs for a specific service:: + + journalctl -f --unit devstack@n-cpu.service + +Following logs for multiple services simultaneously:: + + journalctl -f --unit devstack@n-cpu.service --user-unit + devstack@n-cond.service + +Use higher precision time stamps:: + + journalctl -f -o short-precise --unit devstack@n-cpu.service + + +Known Issues +============ + +Be careful about systemd python libraries. There are 3 of them on +pypi, and they are all very different. They unfortunately all install +into the ``systemd`` namespace, which can cause some issues. + +- ``systemd-python`` - this is the upstream maintained library, it has + a version number like systemd itself (currently ``233``). This is + the one you want. +- ``systemd`` - a python 3 only library, not what you want. +- ``python-systemd`` - another library you don't want. Installing it + on a system will break ansible's ability to run. + + +If we were using user units, the ``[Service]`` - ``Group=`` parameter +doesn't seem to work with user units, even though the documentation +says that it should. This means that we will need to do an explicit +``/usr/bin/sg``. This has the downside of making the SYSLOG_IDENTIFIER +be ``sg``. We can explicitly set that with ``SyslogIdentifier=``, but +it's really unfortunate that we're going to need this work +around. This is currently not a problem because we're only using +system units. + +Future Work +=========== + +oslo.log journald +----------------- + +Journald has an extremely rich mechanism for direct logging including +structured metadata. We should enhance oslo.log to take advantage of +that. It would let us do things like:: + + journalctl REQUEST_ID=...... + + journalctl INSTANCE_ID=...... + +And get all lines related to the request id or instance id. + +sub targets/slices +------------------ + +We might want to create per project slices so that it's easy to +follow, restart all services of a single project (like swift) without +impacting other services. + +log colorizing +-------------- + +We lose log colorization through this process. We might want to build +a custom colorizer that we could run journalctl output through +optionally for people. + +user units +---------- + +It would be great if we could do services as user units, so that there +is a clear separation of code being run as not root, to ensure running +as root never accidentally gets baked in as an assumption to +services. However, user units interact poorly with devstack-gate and +the way that commands are run as users with ansible and su. + +Maybe someday we can figure that out. + +References +========== + +- Arch Linux Wiki - https://wiki.archlinux.org/index.php/Systemd/User +- Python interface to journald - + https://www.freedesktop.org/software/systemd/python-systemd/journal.html +- Systemd documentation on service files - + https://www.freedesktop.org/software/systemd/man/systemd.service.html +- Systemd documentation on exec (can be used to impact service runs) - + https://www.freedesktop.org/software/systemd/man/systemd.exec.html diff --git a/functions b/functions index 872f21691a..f6679fdebe 100644 --- a/functions +++ b/functions @@ -575,7 +575,9 @@ function vercmp { function setup_logging { local conf_file=$1 local other_cond=${2:-"False"} - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$other_cond" == "False" ]; then + if [[ "$USE_SYSTEMD" == "True" ]]; then + setup_systemd_logging $conf_file + elif [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$other_cond" == "False" ]; then setup_colorized_logging $conf_file else setup_standard_logging_identity $conf_file @@ -601,6 +603,17 @@ function setup_colorized_logging { iniset $conf_file $conf_section logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" } +function setup_systemd_logging { + local conf_file=$1 + local conf_section="DEFAULT" + local project_var="project_name" + local user_var="user_name" + iniset $conf_file $conf_section logging_context_format_string "%(levelname)s %(name)s [%(request_id)s %("$project_var")s %("$user_var")s] %(instance)s%(message)s" + iniset $conf_file $conf_section logging_default_format_string "%(levelname)s %(name)s [-] %(instance)s%(color)s%(message)s" + iniset $conf_file $conf_section logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" + iniset $conf_file $conf_section logging_exception_prefix "ERROR %(name)s %(instance)s" +} + function setup_standard_logging_identity { local conf_file=$1 iniset $conf_file DEFAULT logging_user_identity_format "%(project_name)s %(user_name)s" diff --git a/functions-common b/functions-common index a86cfd8a63..ec68644757 100644 --- a/functions-common +++ b/functions-common @@ -1443,6 +1443,59 @@ function _run_process { exit 0 } +function write_user_unit_file { + local service=$1 + local command="$2" + local group=$3 + local user=$4 + local extra="" + if [[ -n "$group" ]]; then + extra="Group=$group" + fi + local unitfile="$SYSTEMD_DIR/$service" + mkdir -p $SYSTEMD_DIR + + iniset -sudo $unitfile "Unit" "Description" "Devstack $service" + iniset -sudo $unitfile "Service" "User" "$user" + iniset -sudo $unitfile "Service" "ExecStart" "$command" + if [[ -n "$group" ]]; then + iniset -sudo $unitfile "Service" "Group" "$group" + fi + iniset -sudo $unitfile "Install" "WantedBy" "multi-user.target" + + # changes to existing units sometimes need a refresh + $SYSTEMCTL daemon-reload +} + +function _run_under_systemd { + local service=$1 + local command="$2" + local cmd=$command + local systemd_service="devstack@$service.service" + local group=$3 + local user=${4:-$STACK_USER} + write_user_unit_file $systemd_service "$cmd" "$group" "$user" + + $SYSTEMCTL enable $systemd_service + $SYSTEMCTL start $systemd_service + _journal_log $service $systemd_service +} + +function _journal_log { + local service=$1 + local unit=$2 + local logfile="${service}.log.${CURRENT_LOG_TIME}" + local real_logfile="${LOGDIR}/${logfile}" + if [[ -n ${LOGDIR} ]]; then + $JOURNALCTL_F $2 > "$real_logfile" & + bash -c "cd '$LOGDIR' && ln -sf '$logfile' ${service}.log" + if [[ -n ${SCREEN_LOGDIR} ]]; then + # Drop the backward-compat symlink + ln -sf "$real_logfile" ${SCREEN_LOGDIR}/screen-${service}.log + fi + fi +} + # Helper to remove the ``*.failure`` files under ``$SERVICE_DIR/$SCREEN_NAME``. # This is used for ``service_check`` when all the ``screen_it`` are called finished # Uses globals ``SCREEN_NAME``, ``SERVICE_DIR`` @@ -1478,16 +1531,24 @@ function run_process { local service=$1 local command="$2" local group=$3 - local subservice=$4 + local user=$4 - local name=${subservice:-$service} + local name=$service time_start "run_process" if is_service_enabled $service; then - if [[ "$USE_SCREEN" = "True" ]]; then + if [[ "$USE_SYSTEMD" = "True" ]]; then + _run_under_systemd "$name" "$command" "$group" "$user" + elif [[ "$USE_SCREEN" = "True" ]]; then + if [[ "$user" == "root" ]]; then + command="sudo $command" + fi screen_process "$name" "$command" "$group" else # Spawn directly without screen + if [[ "$user" == "root" ]]; then + command="sudo $command" + fi _run_process "$name" "$command" "$group" & fi fi @@ -1618,6 +1679,14 @@ function stop_process { if is_service_enabled $service; then # Kill via pid if we have one available + if [[ "$USE_SYSTEMD" == "True" ]]; then + # Only do this for units which appear enabled, this also + # catches units that don't really exist for cases like + # keystone without a failure. + $SYSTEMCTL stop devstack@$service.service + $SYSTEMCTL disable devstack@$service.service + fi + if [[ -r $SERVICE_DIR/$SCREEN_NAME/$service.pid ]]; then pkill -g $(cat $SERVICE_DIR/$SCREEN_NAME/$service.pid) # oslo.service tends to stop actually shutting down diff --git a/lib/dstat b/lib/dstat index 517e4237ac..982b70387e 100644 --- a/lib/dstat +++ b/lib/dstat @@ -24,12 +24,12 @@ function start_dstat { # To enable memory_tracker add: # enable_service memory_tracker # to your localrc - run_process memory_tracker "sudo $TOP_DIR/tools/memory_tracker.sh" + run_process memory_tracker "$TOP_DIR/tools/memory_tracker.sh" "" "root" # remove support for the old name when it's no longer used (sometime in Queens) if is_service_enabled peakmem_tracker; then deprecated "Use of peakmem_tracker in devstack is deprecated, use memory_tracker instead" - run_process peakmem_tracker "sudo $TOP_DIR/tools/memory_tracker.sh" + run_process peakmem_tracker "$TOP_DIR/tools/memory_tracker.sh" "" "root" fi } diff --git a/lib/keystone b/lib/keystone index 530f3b42d9..af607c344b 100644 --- a/lib/keystone +++ b/lib/keystone @@ -602,8 +602,11 @@ function start_keystone { tail_log key /var/log/$APACHE_NAME/keystone.log tail_log key-access /var/log/$APACHE_NAME/keystone_access.log else # uwsgi - run_process key "$KEYSTONE_BIN_DIR/uwsgi $KEYSTONE_PUBLIC_UWSGI_FILE" "" "key-p" - run_process key "$KEYSTONE_BIN_DIR/uwsgi $KEYSTONE_ADMIN_UWSGI_FILE" "" "key-a" + # TODO(sdague): we should really get down to a single keystone here + enable_service key-p + enable_service key-a + run_process key-p "$KEYSTONE_BIN_DIR/uwsgi $KEYSTONE_PUBLIC_UWSGI_FILE" "" + run_process key-a "$KEYSTONE_BIN_DIR/uwsgi $KEYSTONE_ADMIN_UWSGI_FILE" "" fi echo "Waiting for keystone to start..." diff --git a/lib/swift b/lib/swift index 5b510e5930..6c2af61551 100644 --- a/lib/swift +++ b/lib/swift @@ -38,6 +38,15 @@ fi # Set up default directories GITDIR["python-swiftclient"]=$DEST/python-swiftclient +# Swift virtual environment +if [[ ${USE_VENV} = True ]]; then + PROJECT_VENV["swift"]=${SWIFT_DIR}.venv + SWIFT_BIN_DIR=${PROJECT_VENV["swift"]}/bin +else + SWIFT_BIN_DIR=$(get_python_exec_prefix) +fi + + SWIFT_DIR=$DEST/swift SWIFT_AUTH_CACHE_DIR=${SWIFT_AUTH_CACHE_DIR:-/var/cache/swift} SWIFT_APACHE_WSGI_DIR=${SWIFT_APACHE_WSGI_DIR:-/var/www/swift} @@ -807,10 +816,10 @@ function start_swift { local proxy_port=${SWIFT_DEFAULT_BIND_PORT} start_tls_proxy swift '*' $proxy_port $SERVICE_HOST $SWIFT_DEFAULT_BIND_PORT_INT fi - run_process s-proxy "swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v" + run_process s-proxy "$SWIFT_BIN_DIR/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v" if [[ ${SWIFT_REPLICAS} == 1 ]]; then for type in object container account; do - run_process s-${type} "swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v" + run_process s-${type} "$SWIFT_BIN_DIR/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v" done fi diff --git a/stackrc b/stackrc index c3b94d02f8..61501b5696 100644 --- a/stackrc +++ b/stackrc @@ -87,6 +87,23 @@ HORIZON_APACHE_ROOT="/dashboard" # be disabled for automated testing by setting this value to False. USE_SCREEN=$(trueorfalse True USE_SCREEN) +# Whether to use SYSTEMD to manage services +USE_SYSTEMD=$(trueorfalse False USE_SYSTEMD) +USER_UNITS=$(trueorfalse False USER_UNITS) +if [[ "$USER_UNITS" == "True" ]]; then + SYSTEMD_DIR="$HOME/.local/share/systemd/user" + SYSTEMCTL="systemctl --user" + JOURNALCTL_F="journalctl -f -o short-precise --user-unit" +else + SYSTEMD_DIR="/etc/systemd/system" + SYSTEMCTL="sudo systemctl" + JOURNALCTL_F="journalctl -f -o short-precise --unit" +fi + +if [[ "$USE_SYSTEMD" == "True" ]]; then + USE_SCREEN=False +fi + # When using screen, should we keep a log file on disk? You might # want this False if you have a long-running setup where verbose logs # can fill-up the host. From 213c9a151f907988b283e4223404d0a09a5d4aa3 Mon Sep 17 00:00:00 2001 From: Lance Bragstad Date: Tue, 28 Mar 2017 14:32:28 +0000 Subject: [PATCH 0428/1936] Remove installation of keystone policy file As of Id6e3c0ac54b21d85e68625a5b52fe2559fb70f24 keystone's policy file is empty and it is no longer required at runtime. This commit updates devstack to not deploy a policy file for keystone because devstack doesn't specify any policy overrides. Instead, we can remove the sample policy file and rely on the defaults that have been registered in code. This is the same approach nova took with policy in I85a251376dfe38caa4b100861bf764014a98bc37. Change-Id: Ib1d9a51a78e2a84a3d7294dc8782605a681fa9e8 --- lib/keystone | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index 530f3b42d9..ea33abae96 100644 --- a/lib/keystone +++ b/lib/keystone @@ -202,7 +202,6 @@ function configure_keystone { if [[ "$KEYSTONE_CONF_DIR" != "$KEYSTONE_DIR/etc" ]]; then install -m 600 $KEYSTONE_DIR/etc/keystone.conf.sample $KEYSTONE_CONF - cp -p $KEYSTONE_DIR/etc/policy.json $KEYSTONE_CONF_DIR if [[ -f "$KEYSTONE_DIR/etc/keystone-paste.ini" ]]; then cp -p "$KEYSTONE_DIR/etc/keystone-paste.ini" "$KEYSTONE_PASTE_INI" fi From 50686fe244cf5df95c59ffdfa124bedb4f253dbf Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 29 Mar 2017 08:28:40 +0000 Subject: [PATCH 0429/1936] Updated from generate-devstack-plugins-list Change-Id: Id1313b029df86ea446b90ad086cc436702379dc9 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index cc55c0bd20..cfa54551ae 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -68,6 +68,7 @@ horizon-mellanox `git://git.openstack.org/openstack/horizo ironic `git://git.openstack.org/openstack/ironic `__ ironic-inspector `git://git.openstack.org/openstack/ironic-inspector `__ ironic-staging-drivers `git://git.openstack.org/openstack/ironic-staging-drivers `__ +ironic-ui `git://git.openstack.org/openstack/ironic-ui `__ karbor `git://git.openstack.org/openstack/karbor `__ karbor-dashboard `git://git.openstack.org/openstack/karbor-dashboard `__ keystone `git://git.openstack.org/openstack/keystone `__ From 980d65f58672388538e588cdd52c9169feec4a91 Mon Sep 17 00:00:00 2001 From: Jordan Pittier Date: Mon, 27 Mar 2017 14:29:58 +0200 Subject: [PATCH 0430/1936] Swift: Optionally start only the necessary services Currently Devstack starts all Swift services, including those in charge of "consistency convergence" (remember Swift is eventually consistent), data scrubbing, hard-deletion (*-reaper services) cleanup. But when running with Replication Factor 1 some of those services are not needed at all. Besides, the fonctionnalities provided by some of these services are not tested at all (neither in Tempest nor in Swift functional tests). Thus, in light of saving some Mo of RAM, this patch introduces a config flag to start only a minimal set of Swift services, just what's required to make all of our current tests pass. The default value for this new config flag is set to start all services, that is to maintain Devstack's current behavior. For sake of completeness, here is the list of services that are not going to be started is the config flag is toggled, and the associated RSS according to our peakmem_tracker 40004 swift-object-replicator /etc/swift/object-server/1.conf 34320 swift-container-replicator /etc/swift/container-server/1.conf 33584 swift-object-auditor /etc/swift/object-server/1.conf 33328 swift-object-reconstructor /etc/swift/object-server/1.conf 31936 swift-object-updater /etc/swift/object-server/1.conf 31492 swift-account-reaper /etc/swift/account-server/1.conf 31076 swift-account-replicator /etc/swift/account-server/1.conf 29540 swift-container-updater /etc/swift/container-server/1.conf 29220 swift-account-auditor /etc/swift/account-server/1.conf 29036 swift-container-auditor /etc/swift/container-server/1.conf So we are looking at saving at most ~350Mo of RAM (could be less because RSS doesn't account for shared memory). A follow-up patch will soon be proposed in devstack-gate to not run those additional services in our Gate jobs. Change-Id: I8a0d03ac0296a74e38efd185beb8513866eaf0c4 --- lib/swift | 55 +++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 37 insertions(+), 18 deletions(-) diff --git a/lib/swift b/lib/swift index 6c2af61551..96e2f03e5f 100644 --- a/lib/swift +++ b/lib/swift @@ -128,6 +128,11 @@ SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9} SWIFT_REPLICAS=${SWIFT_REPLICAS:-1} SWIFT_REPLICAS_SEQ=$(seq ${SWIFT_REPLICAS}) +# Set ``SWIFT_START_ALL_SERVICES`` to control whether all Swift +# services (including the *-auditor, *-replicator, *-reconstructor, etc. +# daemons) should be started. +SWIFT_START_ALL_SERVICES=$(trueorfalse True SWIFT_START_ALL_SERVICES) + # Set ``SWIFT_LOG_TOKEN_LENGTH`` to configure how many characters of an auth # token should be placed in the logs. When keystone is used with PKI tokens, # the token values can be huge, seemingly larger the 2K, at the least. We @@ -786,8 +791,11 @@ function start_swift { fi if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then + # Apache should serve the "PACO" a.k.a "main" services restart_apache_server + # The rest of the services should be started in backgroud swift-init --run-dir=${SWIFT_DATA_DIR}/run rest start + # Be we still want the logs of Swift Proxy in our screen session tail_log s-proxy /var/log/$APACHE_NAME/proxy-server if [[ ${SWIFT_REPLICAS} == 1 ]]; then for type in object container account; do @@ -797,31 +805,42 @@ function start_swift { return 0 fi - # By default with only one replica we are launching the proxy, - # container, account and object server in screen in foreground and - # other services in background. If we have ``SWIFT_REPLICAS`` set to something - # greater than one we first spawn all the Swift services then kill the proxy - # service so we can run it in foreground in screen. ``swift-init ... - # {stop|restart}`` exits with '1' if no servers are running, ignore it just - # in case - local todo type - swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true + + # By default with only one replica we are launching the proxy, container + # account and object server in screen in foreground. Then, the rest of + # the services is optionally started. + # + # If we have ``SWIFT_REPLICAS`` set to something greater than one + # we first spawn *all* the Swift services then kill the proxy service + # so we can run it in foreground in screen. + # + # ``swift-init ... {stop|restart}`` exits with '1' if no servers are + # running, ignore it just in case if [[ ${SWIFT_REPLICAS} == 1 ]]; then - todo="object container account" + local foreground_services type + + foreground_services="object container account" + for type in ${foreground_services}; do + run_process s-${type} "$SWIFT_BIN_DIR/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v" + done + + if [[ "$SWIFT_START_ALL_SERVICES" == "True" ]]; then + swift-init --run-dir=${SWIFT_DATA_DIR}/run rest start + else + # The container-sync daemon is strictly needed to pass the container + # sync Tempest tests. + swift-init --run-dir=${SWIFT_DATA_DIR}/run container-sync start + fi + else + swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true + swift-init --run-dir=${SWIFT_DATA_DIR}/run proxy stop || true fi - for type in proxy ${todo}; do - swift-init --run-dir=${SWIFT_DATA_DIR}/run ${type} stop || true - done + if is_service_enabled tls-proxy; then local proxy_port=${SWIFT_DEFAULT_BIND_PORT} start_tls_proxy swift '*' $proxy_port $SERVICE_HOST $SWIFT_DEFAULT_BIND_PORT_INT fi run_process s-proxy "$SWIFT_BIN_DIR/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v" - if [[ ${SWIFT_REPLICAS} == 1 ]]; then - for type in object container account; do - run_process s-${type} "$SWIFT_BIN_DIR/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v" - done - fi if [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]]; then swift_configure_tempurls From f85e0ba353c71fc5c8bacebe006e68bebf2af024 Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Fri, 17 Mar 2017 12:54:30 -0700 Subject: [PATCH 0431/1936] Enable Kernel Samepage Merging by default In an effort to reduce memory consumption enable KSM by default. The biggest win here is when using libvirt with nova or ironic with its fake baremetal instances. In theory any process that runs duplicates with mergeable memory will benefit though. Change-Id: I4c5addfd3e83b6516023b36cbaabd5169f0d5ceb --- stack.sh | 16 ++++++++++++++++ stackrc | 8 ++++++++ 2 files changed, 24 insertions(+) diff --git a/stack.sh b/stack.sh index 20cdc1dfcc..504d94f148 100755 --- a/stack.sh +++ b/stack.sh @@ -1006,6 +1006,22 @@ init_service_check # Save configuration values save_stackenv $LINENO +# Kernel Samepage Merging (KSM) +# ----------------------------- + +# Processes that mark their memory as mergeable can share identical memory +# pages if KSM is enabled. This is particularly useful for nova + libvirt +# backends but any other setup that marks its memory as mergeable can take +# advantage. The drawback is there is higher cpu load; however, we tend to +# be memory bound not cpu bound so enable KSM by default but allow people +# to opt out if the CPU time is more important to them. + +if [[ "ENABLE_KSM" == "True" ]] ; then + if [[ -f /sys/kernel/mm/ksm/run ]] ; then + sudo sh -c "echo 1 > /sys/kernel/mm/ksm/run" + fi +fi + # Start Services # ============== diff --git a/stackrc b/stackrc index 61501b5696..b53f791380 100644 --- a/stackrc +++ b/stackrc @@ -104,6 +104,14 @@ if [[ "$USE_SYSTEMD" == "True" ]]; then USE_SCREEN=False fi +# Whether or not to enable Kernel Samepage Merging (KSM) if available. +# This allows programs that mark their memory as mergeable to share +# memory pages if they are identical. This is particularly useful with +# libvirt backends. This reduces memory useage at the cost of CPU overhead +# to scan memory. We default to enabling it because we tend to be more +# memory constrained than CPU bound. +ENABLE_KSM=$(trueorfalse True ENABLE_KSM) + # When using screen, should we keep a log file on disk? You might # want this False if you have a long-running setup where verbose logs # can fill-up the host. From bfcc760b9650b09af073e1c6beb489069e5eec0d Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 29 Mar 2017 11:52:06 +1100 Subject: [PATCH 0432/1936] Enable libvirt coredumps This adds a flag and basic config for enabling coredumps for libvirt. Partial-Bug: 1643911 Co-Authored-By: Matthew Booth Change-Id: If7cd54e804a5a389a0d82a325b58f5b41b8ef0db --- lib/nova_plugins/functions-libvirt | 49 +++++++++++++++++++++++++----- tools/worlddump.py | 9 ++++++ 2 files changed, 50 insertions(+), 8 deletions(-) diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 56bb6bda1c..1ae049226a 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -20,8 +20,46 @@ set +o xtrace # extremely verbose.) DEBUG_LIBVIRT=$(trueorfalse True DEBUG_LIBVIRT) +# Try to enable coredumps for libvirt +# Currently fairly specific to OpenStackCI hosts +DEBUG_LIBVIRT_COREDUMPS=$(trueorfalse False DEBUG_LIBVIRT_COREDUMPS) + +# Only Xenial is left with libvirt-bin. Everywhere else is libvirtd +if is_ubuntu && [ ! -f /etc/init.d/libvirtd ]; then + LIBVIRT_DAEMON=libvirt-bin +else + LIBVIRT_DAEMON=libvirtd +fi + +# Enable coredumps for libvirt +# Bug: https://bugs.launchpad.net/nova/+bug/1643911 +function _enable_coredump { + local confdir=/etc/systemd/system/${LIBVIRT_DAEMON}.service.d + local conffile=${confdir}/coredump.conf + + # Create a coredump directory, and instruct the kernel to save to + # here + sudo mkdir -p /var/core + sudo chmod a+wrx /var/core + echo '/var/core/core.%e.%p.%h.%t' | \ + sudo tee /proc/sys/kernel/core_pattern + + # Drop a config file to up the core ulimit + sudo mkdir -p ${confdir} + sudo tee ${conffile} < Date: Thu, 30 Mar 2017 07:18:49 -0400 Subject: [PATCH 0433/1936] Updated docs from finding more things about systemd Change-Id: I3d807cd342f30eada04a6be2af7db482f9c4a796 --- SYSTEMD.rst | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/SYSTEMD.rst b/SYSTEMD.rst index b6ed19335d..729fdf47b6 100644 --- a/SYSTEMD.rst +++ b/SYSTEMD.rst @@ -74,6 +74,20 @@ See status of a unit:: sudo systemctl status devstack@n-cpu.service +Operating on more than one unit at a time +----------------------------------------- + +Systemd supports wildcarding for unit operations. To restart every +service in devstack you can do that following:: + + sudo systemctl restart devstack@* + +Or to see the status of all Nova processes you can do:: + + sudo systemctl status devstack@n-* + +We'll eventually make the unit names a bit more meaningful so that +it's easier to understand what you are restarting. Querying Logs ============= @@ -92,9 +106,13 @@ Follow logs for a specific service:: Following logs for multiple services simultaneously:: - journalctl -f --unit devstack@n-cpu.service --user-unit + journalctl -f --unit devstack@n-cpu.service --unit devstack@n-cond.service +or you can even do wild cards to follow all the nova services:: + + journalctl -f --unit devstack@n-* + Use higher precision time stamps:: journalctl -f -o short-precise --unit devstack@n-cpu.service @@ -138,14 +156,8 @@ that. It would let us do things like:: journalctl INSTANCE_ID=...... -And get all lines related to the request id or instance id. - -sub targets/slices ------------------- - -We might want to create per project slices so that it's easy to -follow, restart all services of a single project (like swift) without -impacting other services. +And get all lines related to the request id or instance id. (Note: +this work has been started at https://review.openstack.org/#/c/451525/) log colorizing -------------- From 571ba8b3bb6b600a9b2479be1e88f829b086631b Mon Sep 17 00:00:00 2001 From: Hongbin Lu Date: Wed, 29 Mar 2017 22:06:54 -0400 Subject: [PATCH 0434/1936] Change mode of DATA_DIR to 0755 as well It looks libvirt/qemu requires mode 0755 to functioning properly, but DATA_DIR won't be set to 0755 if it is different from default. Change-Id: I37ca0b02b6a75b3756860b547e84c37ccfc99d13 Closes-Bug: #1677421 --- stack.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/stack.sh b/stack.sh index 20cdc1dfcc..310676ac5c 100755 --- a/stack.sh +++ b/stack.sh @@ -328,6 +328,7 @@ fi DATA_DIR=${DATA_DIR:-${DEST}/data} sudo mkdir -p $DATA_DIR safe_chown -R $STACK_USER $DATA_DIR +safe_chmod 0755 $DATA_DIR # Configure proper hostname # Certain services such as rabbitmq require that the local hostname resolves From 87535a5b9fa77471d67432a8da793e00fafc8870 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Mon, 3 Apr 2017 08:24:23 +0000 Subject: [PATCH 0435/1936] Updated from generate-devstack-plugins-list Change-Id: I68d8812558e1f1f09ab5f30145ce5c0943cae7db --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index cfa54551ae..beb6abbb9e 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -69,6 +69,7 @@ ironic `git://git.openstack.org/openstack/ironic ironic-inspector `git://git.openstack.org/openstack/ironic-inspector `__ ironic-staging-drivers `git://git.openstack.org/openstack/ironic-staging-drivers `__ ironic-ui `git://git.openstack.org/openstack/ironic-ui `__ +k8s-cloud-provider `git://git.openstack.org/openstack/k8s-cloud-provider `__ karbor `git://git.openstack.org/openstack/karbor `__ karbor-dashboard `git://git.openstack.org/openstack/karbor-dashboard `__ keystone `git://git.openstack.org/openstack/keystone `__ From 9c5ffd8d132866bd6120696e138e5b7b42dc3f23 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Wed, 29 Mar 2017 16:47:57 -0400 Subject: [PATCH 0436/1936] Handle uwsgi on systemd properly uwsgi is a different service type under systemd and shouldn't be run as a standard oneshot type. The uwsgi docs outline a good pattern for writing systemd unit files: http://uwsgi-docs.readthedocs.io/en/latest/Systemd.html This commit takes those suggestions and creates a separate path for writing uwsgi unit files. Change-Id: I9b541b86781afdded311dba058cedd783e1a0dfa --- functions-common | 32 +++++++++++++++++++++++++++++++- lib/keystone | 7 +++++-- 2 files changed, 36 insertions(+), 3 deletions(-) diff --git a/functions-common b/functions-common index ec68644757..5b096c60f7 100644 --- a/functions-common +++ b/functions-common @@ -1467,6 +1467,32 @@ function write_user_unit_file { $SYSTEMCTL daemon-reload } +function write_uwsgi_user_unit_file { + local service=$1 + local command="$2" + local group=$3 + local user=$4 + local unitfile="$SYSTEMD_DIR/$service" + mkdir -p $SYSTEMD_DIR + + iniset -sudo $unitfile "Unit" "Description" "Devstack $service" + iniset -sudo $unitfile "Service" "User" "$user" + iniset -sudo $unitfile "Service" "ExecStart" "$command" + iniset -sudo $unitfile "Service" "Type" "notify" + iniset -sudo $unitfile "Service" "KillSignal" "SIGQUIT" + iniset -sudo $unitfile "Service" "Restart" "Always" + iniset -sudo $unitfile "Service" "NotifyAccess" "all" + iniset -sudo $unitfile "Service" "RestartForceExitStatus" "100" + + if [[ -n "$group" ]]; then + iniset -sudo $unitfile "Service" "Group" "$group" + fi + iniset -sudo $unitfile "Install" "WantedBy" "multi-user.target" + + # changes to existing units sometimes need a refresh + $SYSTEMCTL daemon-reload +} + function _run_under_systemd { local service=$1 local command="$2" @@ -1474,7 +1500,11 @@ function _run_under_systemd { local systemd_service="devstack@$service.service" local group=$3 local user=${4:-$STACK_USER} - write_user_unit_file $systemd_service "$cmd" "$group" "$user" + if [[ "$command" =~ "uwsgi" ]] ; then + write_uwsgi_user_unit_file $systemd_service "$cmd" "$group" "$user" + else + write_user_unit_file $systemd_service "$cmd" "$group" "$user" + fi $SYSTEMCTL enable $systemd_service $SYSTEMCTL start $systemd_service diff --git a/lib/keystone b/lib/keystone index d4b3a66e24..3db3c8d4aa 100644 --- a/lib/keystone +++ b/lib/keystone @@ -604,8 +604,8 @@ function start_keystone { # TODO(sdague): we should really get down to a single keystone here enable_service key-p enable_service key-a - run_process key-p "$KEYSTONE_BIN_DIR/uwsgi $KEYSTONE_PUBLIC_UWSGI_FILE" "" - run_process key-a "$KEYSTONE_BIN_DIR/uwsgi $KEYSTONE_ADMIN_UWSGI_FILE" "" + run_process key-p "$KEYSTONE_BIN_DIR/uwsgi --ini $KEYSTONE_PUBLIC_UWSGI_FILE" "" + run_process key-a "$KEYSTONE_BIN_DIR/uwsgi --ini $KEYSTONE_ADMIN_UWSGI_FILE" "" fi echo "Waiting for keystone to start..." @@ -638,6 +638,9 @@ function stop_keystone { if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then disable_apache_site keystone restart_apache_server + else + stop_process key-p + stop_process key-a fi # Kill the Keystone screen window stop_process key From 5cd44dbe6c0a641258f36d7959952bc971435b74 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 5 Apr 2017 07:23:37 -0400 Subject: [PATCH 0437/1936] add use_journal if we are enabling systemd This is going to be a new option in oslo.log, which we can start setting early to make it take effect. Change-Id: If0e5e4717a1810c759058f33608fbac7543f2d85 --- functions | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/functions b/functions index f6679fdebe..c99e435175 100644 --- a/functions +++ b/functions @@ -606,11 +606,13 @@ function setup_colorized_logging { function setup_systemd_logging { local conf_file=$1 local conf_section="DEFAULT" - local project_var="project_name" - local user_var="user_name" - iniset $conf_file $conf_section logging_context_format_string "%(levelname)s %(name)s [%(request_id)s %("$project_var")s %("$user_var")s] %(instance)s%(message)s" - iniset $conf_file $conf_section logging_default_format_string "%(levelname)s %(name)s [-] %(instance)s%(color)s%(message)s" - iniset $conf_file $conf_section logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" + iniset $conf_file $conf_section use_journal "True" + iniset $conf_file $conf_section logging_context_format_string \ + "%(levelname)s %(name)s [%(request_id)s %(project_name)s %(user_name)s] %(instance)s%(message)s" + iniset $conf_file $conf_section logging_default_format_string \ + "%(levelname)s %(name)s [-] %(instance)s%(color)s%(message)s" + iniset $conf_file $conf_section logging_debug_format_suffix \ + "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" iniset $conf_file $conf_section logging_exception_prefix "ERROR %(name)s %(instance)s" } From eb235814d3436953d548bb83c65851f7cec6d1e7 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 6 Apr 2017 10:52:48 -0400 Subject: [PATCH 0438/1936] Install systemd python bindings by default These are needed for oslo.log to use journald support. They are *probably* already installed, but just in case we force install them. Change-Id: I0dc66bd2628ff4b3e1caa7ab4366d7f36ff7ea94 --- files/debs/general | 1 + files/rpms/general | 1 + 2 files changed, 2 insertions(+) diff --git a/files/debs/general b/files/debs/general index c121770fa2..3a0e24182a 100644 --- a/files/debs/general +++ b/files/debs/general @@ -25,6 +25,7 @@ psmisc python2.7 python-dev python-gdbm # needed for testr +python-systemd screen tar tcpdump diff --git a/files/rpms/general b/files/rpms/general index 77d2fa5f0a..baba06b5ab 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -27,6 +27,7 @@ pyOpenSSL # version in pip uses too much memory python-devel redhat-rpm-config # missing dep for gcc hardening flags, see rhbz#1217376 screen +systemd-python tar tcpdump unzip From 9fecc2ad04a9414bfb3127d80ee02fed1d36a30d Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 7 Apr 2017 12:28:40 -0500 Subject: [PATCH 0439/1936] Add OpenStackSDK as a lib install This is required to install python-openstacksdk from source for testing other projects against master. Change-Id: Iee7b043ac7d381dadf89d26098f69e935ed81d6b --- lib/oslo | 2 ++ stackrc | 4 ++++ tests/test_libs_from_pypi.sh | 2 +- 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/oslo b/lib/oslo index e34e48ad03..1a78bdfe73 100644 --- a/lib/oslo +++ b/lib/oslo @@ -48,6 +48,7 @@ GITDIR["oslo.versionedobjects"]=$DEST/oslo.versionedobjects GITDIR["oslo.vmware"]=$DEST/oslo.vmware GITDIR["osprofiler"]=$DEST/osprofiler GITDIR["pycadf"]=$DEST/pycadf +GITDIR["python-openstacksdk"]=$DEST/python-openstacksdk GITDIR["stevedore"]=$DEST/stevedore GITDIR["taskflow"]=$DEST/taskflow GITDIR["tooz"]=$DEST/tooz @@ -95,6 +96,7 @@ function install_oslo { _do_install_oslo_lib "oslo.vmware" _do_install_oslo_lib "osprofiler" _do_install_oslo_lib "pycadf" + _do_install_oslo_lib "python-openstacksdk" _do_install_oslo_lib "stevedore" _do_install_oslo_lib "taskflow" _do_install_oslo_lib "tooz" diff --git a/stackrc b/stackrc index 5ace0fbf20..00aab8d3c4 100644 --- a/stackrc +++ b/stackrc @@ -529,6 +529,10 @@ GITBRANCH["os-vif"]=${OS_VIF_BRANCH:-master} GITREPO["osc-lib"]=${OSC_LIB_REPO:-${GIT_BASE}/openstack/osc-lib.git} GITBRANCH["osc-lib"]=${OSC_LIB_BRANCH:-master} +# python-openstacksdk OpenStack Python SDK +GITREPO["python-openstacksdk"]=${OPENSTACKSDK_REPO:-${GIT_BASE}/openstack/python-openstacksdk.git} +GITBRANCH["python-openstacksdk"]=${OPENSTACKSDK_BRANCH:-master} + # ironic common lib GITREPO["ironic-lib"]=${IRONIC_LIB_REPO:-${GIT_BASE}/openstack/ironic-lib.git} GITBRANCH["ironic-lib"]=${IRONIC_LIB_BRANCH:-master} diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index 415fec506d..3d4bcd2596 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -37,7 +37,7 @@ ALL_LIBS+=" python-cinderclient glance_store oslo.concurrency oslo.db" ALL_LIBS+=" oslo.versionedobjects oslo.vmware keystonemiddleware" ALL_LIBS+=" oslo.serialization django_openstack_auth" ALL_LIBS+=" python-openstackclient osc-lib os-client-config oslo.rootwrap" -ALL_LIBS+=" oslo.i18n oslo.utils python-swiftclient" +ALL_LIBS+=" oslo.i18n oslo.utils python-openstacksdk python-swiftclient" ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy" ALL_LIBS+=" debtcollector os-brick automaton futurist oslo.service" ALL_LIBS+=" oslo.cache oslo.reports osprofiler" From 7f80649eaf1969a4b3d22fa8c654dc68f7000f30 Mon Sep 17 00:00:00 2001 From: Leticia Wanderley Date: Thu, 6 Apr 2017 20:40:19 -0300 Subject: [PATCH 0440/1936] Removes double colon on script comments A few comment lines on stack.sh had two colons where only one colon was necessary. Change-Id: Ia02e1ca28a8fadc2e5477201887a1f4d59996db8 --- stack.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index d8f3ad6b53..759a8dbacc 100755 --- a/stack.sh +++ b/stack.sh @@ -161,16 +161,16 @@ rm -f $TOP_DIR/.localrc.auto extract_localrc_section $TOP_DIR/local.conf $TOP_DIR/localrc $TOP_DIR/.localrc.auto # ``stack.sh`` is customizable by setting environment variables. Override a -# default setting via export:: +# default setting via export: # # export DATABASE_PASSWORD=anothersecret # ./stack.sh # -# or by setting the variable on the command line:: +# or by setting the variable on the command line: # # DATABASE_PASSWORD=simple ./stack.sh # -# Persistent variables can be placed in a ``local.conf`` file:: +# Persistent variables can be placed in a ``local.conf`` file: # # [[local|localrc]] # DATABASE_PASSWORD=anothersecret From 2a2db2efc522306acf90b2fb9cf0106cf7cf8e6d Mon Sep 17 00:00:00 2001 From: Huan Xie Date: Sun, 9 Apr 2017 22:37:50 -0700 Subject: [PATCH 0441/1936] XenAPI: Remove final references to Integration bridge The change to remove references of XEN_INTEGRATION_BRIDGE (If5886e3711765a97f40f20e478f958b988b5a620) unfortunately left some code which should have been removed. This remaining code caused an error in some situations when deploying from scratch (which the CI avoids for expediency) Change-Id: Ia568462c9cca8cff8fcfada8148d185609d61a7d --- tools/xen/install_os_domU.sh | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index ac7af0df43..f4ca71a906 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -288,10 +288,6 @@ add_interface "$GUEST_NAME" "$PUB_BRIDGE_OR_NET_NAME" "$PUB_DEV_NR" # $THIS_DIR/build_xva.sh "$GUEST_NAME" -XEN_INTEGRATION_BRIDGE_DEFAULT=$(bridge_for "$XEN_INT_BRIDGE_OR_NET_NAME") -append_kernel_cmdline \ - "$GUEST_NAME" - FLAT_NETWORK_BRIDGE="${FLAT_NETWORK_BRIDGE:-$(bridge_for "$VM_BRIDGE_OR_NET_NAME")}" append_kernel_cmdline "$GUEST_NAME" "flat_network_bridge=${FLAT_NETWORK_BRIDGE}" From c9a9e415b9a955525a407b78650f93e9193c8117 Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Wed, 29 Mar 2017 10:28:55 -0700 Subject: [PATCH 0442/1936] Test using UCA for libvirt 2.5.0 We have had issues with libvirt 1.3.1 which is stock on Xenial. Try using 2.5.0 from UCA instead. Related-Bug: 1643911 Related-Bug: 1646779 Related-Bug: 1638982 Change-Id: Ia4434541c71f050fe1ffb54f4c4c1e302391d00b --- stackrc | 8 ++++++-- tools/fixup_stuff.sh | 27 +++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 2 deletions(-) diff --git a/stackrc b/stackrc index b53f791380..fc03f49f29 100644 --- a/stackrc +++ b/stackrc @@ -599,8 +599,12 @@ VIRT_DRIVER=${VIRT_DRIVER:-$DEFAULT_VIRT_DRIVER} case "$VIRT_DRIVER" in ironic|libvirt) LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm} - if [[ "$os_VENDOR" =~ (Debian) ]]; then - LIBVIRT_GROUP=libvirt + if [[ "$os_VENDOR" =~ (Debian|Ubuntu) ]]; then + # The groups change with newer libvirt. Older Ubuntu used + # 'libvirtd', but now uses libvirt like Debian. Do a quick check + # to see if libvirtd group already exists to handle grenade's case. + LIBVIRT_GROUP=$(cut -d ':' -f 1 /etc/group | grep 'libvirtd$' || true) + LIBVIRT_GROUP=${LIBVIRT_GROUP:-libvirt} else LIBVIRT_GROUP=libvirtd fi diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 4dec95eb4d..6f680b860e 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -67,6 +67,33 @@ else echo_summary "WARNING: unable to reserve keystone ports" fi +# Ubuntu Cloud Archive +#--------------------- +# We've found that Libvirt on Xenial is flaky and crashes enough to be +# a regular top e-r bug. Opt into Ubuntu Cloud Archive if on Xenial to +# get newer Libvirt. +if [[ "$DISTRO" = "xenial" ]]; then + # This pulls in apt-add-repository + install_package "software-properties-common" + # Use UCA for newer libvirt. Should give us libvirt 2.5.0. + if [[ -f /etc/ci/mirror_info.sh ]] ; then + # If we are on a nodepool provided host and it has told us about where + # we can find local mirrors then use that mirror. + source /etc/ci/mirror_info.sh + + sudo apt-add-repository -y "deb $NODEPOOL_UCA_MIRROR xenial-updates/ocata main" + + # Disable use of libvirt wheel here as presence of mirror implies + # presence of cached wheel build against older libvirt binary. + # TODO(clarkb) figure out how to use wheel again. + sudo bash -c 'echo "no-binary = libvirt-python" >> /etc/pip.conf' + else + # Otherwise use upstream UCA + sudo add-apt-repository -y cloud-archive:ocata + fi + sudo apt-get update +fi + # Python Packages # --------------- From a48ffa8c5823d2e0c26fff6ec9804f9da1981ffa Mon Sep 17 00:00:00 2001 From: Cyril Roelandt Date: Thu, 6 Apr 2017 15:23:13 +0200 Subject: [PATCH 0443/1936] Do not ask users to overwrite their /etc/sudoers file Give instructions to add permissions for stack user without touching main config. Closes-Bug: #1680459 Closes-Bug: #1681418 Change-Id: Idd27e684e63c616466de28c07551729a1e091bdd --- doc/source/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/index.rst b/doc/source/index.rst index c3bac9db72..cbd697112a 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -63,7 +63,7 @@ have sudo privileges: :: - $ sudo tee <<<"stack ALL=(ALL) NOPASSWD: ALL" /etc/sudoers + $ echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack $ sudo su - stack Download DevStack From f68f6f2e33cd83c6a0a317abf12bd8a26d5504b6 Mon Sep 17 00:00:00 2001 From: youri jeong Date: Wed, 12 Apr 2017 19:23:40 +0900 Subject: [PATCH 0444/1936] fix typo fix typo for tools/dstat.sh retreive must be retrieve Change-Id: I7a817ec02e7156c886d7d6abb28688bfe2ef5998 --- tools/dstat.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/dstat.sh b/tools/dstat.sh index 1c80fb70f8..ae7306ecb7 100755 --- a/tools/dstat.sh +++ b/tools/dstat.sh @@ -9,7 +9,7 @@ # Assumes: # - dstat command is installed -# Retreive log directory as argument from calling script. +# Retrieve log directory as argument from calling script. LOGDIR=$1 # Command line arguments for primary DStat process. From 3d4c6d2dd16280de150b462ae51ccf85a932c7c1 Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Wed, 12 Apr 2017 13:57:36 -0700 Subject: [PATCH 0445/1936] Install netcat for libvirt live migration Libvirt live migration requires netcat. It appears that newer UCA packages may not automagically pull this in so explicitly list it as a dependency of nova compute here. Note that netcat/netcat-traditional do not appear to work and netcat-openbsd is required. Change-Id: If2dbc53d082fea779448998ea12b821bd037a14e --- files/debs/n-cpu | 1 + 1 file changed, 1 insertion(+) diff --git a/files/debs/n-cpu b/files/debs/n-cpu index 69ac430290..d8bbf59d07 100644 --- a/files/debs/n-cpu +++ b/files/debs/n-cpu @@ -2,6 +2,7 @@ cryptsetup genisoimage gir1.2-libosinfo-1.0 lvm2 # NOPRIME +netcat-openbsd open-iscsi python-guestfs # NOPRIME qemu-utils From 8f8b274e60ac94bd6b2486ea075217411550c257 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 13 Apr 2017 09:34:12 -0400 Subject: [PATCH 0446/1936] Clean up apache 2.2 cruft from Ubuntu 12.04 All the apache 2.2 handling is obsolete now, as we don't support those distros, so get rid of it. Change-Id: I9c0f78af2b32afabb2c4264aebc92089c4694f91 --- lib/apache | 48 +++--------------------------------------------- 1 file changed, 3 insertions(+), 45 deletions(-) diff --git a/lib/apache b/lib/apache index d1a11ae18b..f438407aca 100644 --- a/lib/apache +++ b/lib/apache @@ -90,49 +90,15 @@ function install_apache_wsgi { fi # WSGI isn't enabled by default, enable it enable_apache_mod wsgi - - # ensure mod_version enabled for . This is - # built-in statically on anything recent, but precise (2.2) - # doesn't have it enabled - sudo a2enmod version || true -} - -# get_apache_version() - return the version of Apache installed -# This function is used to determine the Apache version installed. There are -# various differences between Apache 2.2 and 2.4 that warrant special handling. -function get_apache_version { - if is_ubuntu; then - local version_str - version_str=$(sudo /usr/sbin/apache2ctl -v | awk '/Server version/ {print $3}' | cut -f2 -d/) - elif is_fedora; then - local version_str - version_str=$(rpm -qa --queryformat '%{VERSION}' httpd) - elif is_suse; then - local version_str - version_str=$(rpm -qa --queryformat '%{VERSION}' apache2) - else - exit_distro_not_supported "cannot determine apache version" - fi - if [[ "$version_str" =~ ^2\.2\. ]]; then - echo "2.2" - elif [[ "$version_str" =~ ^2\.4\. ]]; then - echo "2.4" - else - exit_distro_not_supported "apache version not supported" - fi } # apache_site_config_for() - The filename of the site's configuration file. # This function uses the global variables APACHE_NAME and APACHE_CONF_DIR. # -# On Ubuntu 14.04, the site configuration file must have a .conf suffix for a2ensite and a2dissite to +# On Ubuntu 14.04+, the site configuration file must have a .conf suffix for a2ensite and a2dissite to # recognise it. a2ensite and a2dissite ignore the .conf suffix used as parameter. The default sites' # files are 000-default.conf and default-ssl.conf. # -# On Ubuntu 12.04, the site configuration file may have any format, as long as it is in -# /etc/apache2/sites-available/. a2ensite and a2dissite need the entire file name to work. The default -# sites' files are default and default-ssl. -# # On Fedora and openSUSE, any file in /etc/httpd/conf.d/ whose name ends with .conf is enabled. # # On RHEL and CentOS, things should hopefully work as in Fedora. @@ -141,22 +107,14 @@ function get_apache_version { # +----------------------+--------------------+--------------------------+--------------------------+ # | Distribution | File name | Site enabling command | Site disabling command | # +----------------------+--------------------+--------------------------+--------------------------+ -# | Ubuntu 12.04 | site | a2ensite site | a2dissite site | # | Ubuntu 14.04 | site.conf | a2ensite site | a2dissite site | # | Fedora, RHEL, CentOS | site.conf.disabled | mv site.conf{.disabled,} | mv site.conf{,.disabled} | # +----------------------+--------------------+--------------------------+--------------------------+ function apache_site_config_for { local site=$@ if is_ubuntu; then - local apache_version - apache_version=$(get_apache_version) - if [[ "$apache_version" == "2.2" ]]; then - # Ubuntu 12.04 - Apache 2.2 - echo $APACHE_CONF_DIR/${site} - else - # Ubuntu 14.04 - Apache 2.4 - echo $APACHE_CONF_DIR/${site}.conf - fi + # Ubuntu 14.04 - Apache 2.4 + echo $APACHE_CONF_DIR/${site}.conf elif is_fedora || is_suse; then # fedora conf.d is only imported if it ends with .conf so this is approx the same local enabled_site_file="$APACHE_CONF_DIR/${site}.conf" From 1f92d44544998291165942ae59626ccac7731fc8 Mon Sep 17 00:00:00 2001 From: Paul Belanger Date: Thu, 13 Apr 2017 12:07:57 -0400 Subject: [PATCH 0447/1936] Use apt_get_update after we setup UCA It is possible some CI system are using an http_proxy. Use the helper function to cover this use case. Change-Id: Iee685147ca0244fc7de328a765f937602223de20 Signed-off-by: Paul Belanger --- tools/fixup_stuff.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 6f680b860e..f3ba702a3b 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -91,7 +91,9 @@ if [[ "$DISTRO" = "xenial" ]]; then # Otherwise use upstream UCA sudo add-apt-repository -y cloud-archive:ocata fi - sudo apt-get update + # Force update our APT repos, since we added UCA above. + REPOS_UPDATED=False + apt_get_update fi From 13a29ab787657491aff3b463e1a328a0872f65d0 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 13 Apr 2017 08:31:00 -0400 Subject: [PATCH 0448/1936] Add python3-systemd package Otherwise journal logging under python3 doesn't work Change-Id: Ib136d88a522c40482a3e94d0386a26600236f135 --- files/debs/general | 1 + 1 file changed, 1 insertion(+) diff --git a/files/debs/general b/files/debs/general index 3a0e24182a..edf64ce932 100644 --- a/files/debs/general +++ b/files/debs/general @@ -23,6 +23,7 @@ openssl pkg-config psmisc python2.7 +python3-systemd python-dev python-gdbm # needed for testr python-systemd From bc4b8eb5bf4804cb2e6e5d9d07f1b87d46a6a689 Mon Sep 17 00:00:00 2001 From: Paul Belanger Date: Thu, 13 Apr 2017 15:06:36 -0400 Subject: [PATCH 0449/1936] Enable EPEL mirror by default We recently disabled EPEL in openstack-infra, enable it again. Change-Id: I213b302b34b740354d63b69e8ac7f4e1b3d3cdd7 Signed-off-by: Paul Belanger --- stack.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/stack.sh b/stack.sh index 759a8dbacc..dfc2bd9162 100755 --- a/stack.sh +++ b/stack.sh @@ -348,6 +348,10 @@ SKIP_EPEL_INSTALL=$(trueorfalse False SKIP_EPEL_INSTALL) # is pre-installed. if [[ -f /etc/nodepool/provider ]]; then SKIP_EPEL_INSTALL=True + if is_fedora; then + # However, EPEL is not enabled by default. + sudo yum-config-manager --enable epel + fi fi if is_fedora && [[ $DISTRO == "rhel7" ]] && \ From 4222ee35f64d1950ecfc146b51738b74c316e758 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 13 Apr 2017 20:33:42 -0400 Subject: [PATCH 0450/1936] Make auth_uri available in the swift test setup The swift functional tests use a config which requires keystone ports, we're about to make those go away. This exposes the actual auth_uri to swift for consumption. Change-Id: I5868dfdb8e5f0972ba04e359d212b04351502436 --- lib/swift | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/swift b/lib/swift index 96e2f03e5f..5eac904559 100644 --- a/lib/swift +++ b/lib/swift @@ -538,6 +538,7 @@ EOF auth_vers=$(iniget ${testfile} func_test auth_version) iniset ${testfile} func_test auth_host ${KEYSTONE_SERVICE_HOST} iniset ${testfile} func_test auth_port ${KEYSTONE_AUTH_PORT} + iniset ${testfile} func_test auth_uri ${KEYSTONE_AUTH_URI} if [[ $auth_vers == "3" ]]; then iniset ${testfile} func_test auth_prefix /v3/ else From 968ebeee4146b306c026ca9b51d43ae6a993d1e2 Mon Sep 17 00:00:00 2001 From: Brianna Poulos Date: Fri, 14 Apr 2017 11:33:56 -0400 Subject: [PATCH 0451/1936] Add castellan to LIBS_FROM_GIT Allow castellan to be installed from git instead of pip. Castellan has recently been moved under the oslo framework, and the barbican-tempest-plugin tests which use castellan would benefit from the ability to usd castellan from git instead of pip. Change-Id: I96edca90c61aec84637b7b1ce842eff04c521923 --- lib/oslo | 2 ++ stackrc | 4 ++++ tests/test_libs_from_pypi.sh | 1 + 3 files changed, 7 insertions(+) diff --git a/lib/oslo b/lib/oslo index 1a78bdfe73..182295c167 100644 --- a/lib/oslo +++ b/lib/oslo @@ -23,6 +23,7 @@ set +o xtrace # Defaults # -------- GITDIR["automaton"]=$DEST/automaton +GITDIR["castellan"]=$DEST/castellan GITDIR["cliff"]=$DEST/cliff GITDIR["debtcollector"]=$DEST/debtcollector GITDIR["futurist"]=$DEST/futurist @@ -71,6 +72,7 @@ function _do_install_oslo_lib { # install_oslo() - Collect source and prepare function install_oslo { _do_install_oslo_lib "automaton" + _do_install_oslo_lib "castellan" _do_install_oslo_lib "cliff" _do_install_oslo_lib "debtcollector" _do_install_oslo_lib "futurist" diff --git a/stackrc b/stackrc index 88f75413c4..72ad52d765 100644 --- a/stackrc +++ b/stackrc @@ -364,6 +364,10 @@ GITDIR["python-openstackclient"]=$DEST/python-openstackclient # ################### +# castellan key manager interface +GITREPO["castellan"]=${CASTELLAN_REPO:-${GIT_BASE}/openstack/castellan.git} +GITBRANCH["castellan"]=${CASTELLAN_BRANCH:-master} + # cliff command line framework GITREPO["cliff"]=${CLIFF_REPO:-${GIT_BASE}/openstack/cliff.git} GITBRANCH["cliff"]=${CLIFF_BRANCH:-master} diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index 3d4bcd2596..314a9d56e3 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -43,6 +43,7 @@ ALL_LIBS+=" debtcollector os-brick automaton futurist oslo.service" ALL_LIBS+=" oslo.cache oslo.reports osprofiler" ALL_LIBS+=" keystoneauth ironic-lib neutron-lib oslo.privsep" ALL_LIBS+=" diskimage-builder os-vif python-brick-cinderclient-ext" +ALL_LIBS+=" castellan" # Generate the above list with # echo ${!GITREPO[@]} From f9c2a68338aa566051bb301aa0f1b3dec44f5c90 Mon Sep 17 00:00:00 2001 From: Brianna Poulos Date: Fri, 14 Apr 2017 13:00:19 -0400 Subject: [PATCH 0452/1936] Add cursive to LIBS_FROM_GIT Allow cursive to be installed from git instead of pip. The barbican-tempest-plugin, which uses cursive indirectly through nova and glance, would benefit from the ability to use cursive from git instead of pip. Change-Id: Icae7d310f1ee392d080e7c8e421a26d7c0ef4727 --- lib/oslo | 2 ++ stackrc | 4 ++++ tests/test_libs_from_pypi.sh | 2 +- 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/oslo b/lib/oslo index 1a78bdfe73..8eada67c0f 100644 --- a/lib/oslo +++ b/lib/oslo @@ -24,6 +24,7 @@ set +o xtrace # -------- GITDIR["automaton"]=$DEST/automaton GITDIR["cliff"]=$DEST/cliff +GITDIR["cursive"]=$DEST/cursive GITDIR["debtcollector"]=$DEST/debtcollector GITDIR["futurist"]=$DEST/futurist GITDIR["os-client-config"]=$DEST/os-client-config @@ -72,6 +73,7 @@ function _do_install_oslo_lib { function install_oslo { _do_install_oslo_lib "automaton" _do_install_oslo_lib "cliff" + _do_install_oslo_lib "cursive" _do_install_oslo_lib "debtcollector" _do_install_oslo_lib "futurist" _do_install_oslo_lib "osc-lib" diff --git a/stackrc b/stackrc index 88f75413c4..56c19cd7f2 100644 --- a/stackrc +++ b/stackrc @@ -483,6 +483,10 @@ GITBRANCH["pbr"]=${PBR_BRANCH:-master} # ################## +# cursive library +GITREPO["cursive"]=${CURSIVE_REPO:-${GIT_BASE}/openstack/cursive.git} +GITBRANCH["cursive"]=${CURSIVE_BRANCH:-master} + # glance store library GITREPO["glance_store"]=${GLANCE_STORE_REPO:-${GIT_BASE}/openstack/glance_store.git} GITBRANCH["glance_store"]=${GLANCE_STORE_BRANCH:-master} diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index 3d4bcd2596..ad36312778 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -40,7 +40,7 @@ ALL_LIBS+=" python-openstackclient osc-lib os-client-config oslo.rootwrap" ALL_LIBS+=" oslo.i18n oslo.utils python-openstacksdk python-swiftclient" ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy" ALL_LIBS+=" debtcollector os-brick automaton futurist oslo.service" -ALL_LIBS+=" oslo.cache oslo.reports osprofiler" +ALL_LIBS+=" oslo.cache oslo.reports osprofiler cursive" ALL_LIBS+=" keystoneauth ironic-lib neutron-lib oslo.privsep" ALL_LIBS+=" diskimage-builder os-vif python-brick-cinderclient-ext" From 4da0fa8c1387e3888de1f4174b478e82e7cc7a67 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 13 Apr 2017 08:56:44 -0400 Subject: [PATCH 0453/1936] Always install apache and proxy-uwsgi We're going to want to start using it by default so just start with always installing it. This should not negatively impact anything else. Also had to fix the test using cowsay, now that cowsay depends on cowsay-off. Part of uwsgi in devstack. Change-Id: I8306a992d9d006bc0130a255145a6880065aa0df --- files/debs/general | 3 +++ files/rpms/general | 2 ++ tests/test_functions.sh | 2 +- 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/files/debs/general b/files/debs/general index edf64ce932..20490c6072 100644 --- a/files/debs/general +++ b/files/debs/general @@ -1,3 +1,5 @@ +apache2 +apache2-dev bc bridge-utils bsdmainutils @@ -9,6 +11,7 @@ gettext # used for compiling message catalogs git graphviz # needed for docs iputils-ping +libapache2-mod-proxy-uwsgi libffi-dev # for pyOpenSSL libjpeg-dev # Pillow 3.0.0 libmysqlclient-dev # MySQL-python diff --git a/files/rpms/general b/files/rpms/general index baba06b5ab..106aa6ae88 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -7,6 +7,8 @@ gcc-c++ gettext # used for compiling message catalogs git-core graphviz # needed only for docs +httpd +httpd-devel iptables-services # NOPRIME f23,f24,f25 java-1.7.0-openjdk-headless # NOPRIME rhel7 java-1.8.0-openjdk-headless # NOPRIME f23,f24,f25 diff --git a/tests/test_functions.sh b/tests/test_functions.sh index 8aae23dcb8..adf20cdb80 100755 --- a/tests/test_functions.sh +++ b/tests/test_functions.sh @@ -224,7 +224,7 @@ fi # test against removed package...was a bug on Ubuntu if is_ubuntu; then - PKG=cowsay + PKG=cowsay-off if ! (dpkg -s $PKG >/dev/null 2>&1); then # it was never installed...set up the condition sudo apt-get install -y cowsay >/dev/null 2>&1 From 2b85cf0f06b099f9a771e9fbdbdef173c9d04784 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 13 Apr 2017 09:02:14 -0400 Subject: [PATCH 0454/1936] Just use normal restart for apache We're now in a systemd world where systemd is managing the restart effectively, there is no reason to be tricksy with apache now that we're not working around weird upstartd issues. Change-Id: Ifadfd504eb10a90db5177ea9180b9cd8331a2948 --- lib/apache | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/lib/apache b/lib/apache index f438407aca..e36d0c998f 100644 --- a/lib/apache +++ b/lib/apache @@ -173,11 +173,7 @@ function restart_apache_server { # Apache can be slow to stop, doing an explicit stop, sleep, start helps # to mitigate issues where apache will claim a port it's listening on is # still in use and fail to start. - time_start "restart_apache_server" - stop_service $APACHE_NAME - sleep 3 - start_service $APACHE_NAME - time_stop "restart_apache_server" + restart_service $APACHE_NAME } # reload_apache_server From 2f8c88e0532b6b712cc386a9c15d833d3629b19a Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 13 Apr 2017 09:08:39 -0400 Subject: [PATCH 0455/1936] Factor out code to write uwsgi config files Instead of this code all existing in keystone inline, factor out into a dedicated set of functions, and make keystone use this. This drops uwsgi supporting https directly, but that's not going to be a supported model going forward once we get to proxy only anyway. Change-Id: I1d89be1f1b36f26eaf543b99bde6fdc5701474fe --- lib/apache | 53 ++++++++++++++++++++++++++++++++++++++++++++++++++++ lib/keystone | 47 ++++++++-------------------------------------- 2 files changed, 61 insertions(+), 39 deletions(-) diff --git a/lib/apache b/lib/apache index e36d0c998f..fc73b49912 100644 --- a/lib/apache +++ b/lib/apache @@ -181,6 +181,59 @@ function reload_apache_server { reload_service $APACHE_NAME } +function write_uwsgi_config { + local file=$1 + local wsgi=$2 + local url=$3 + local http=$4 + local name="" + name=$(basename $wsgi) + local socket="/tmp/${name}.socket" + + # always cleanup given that we are using iniset here + rm -rf $file + iniset "$file" uwsgi wsgi-file "$wsgi" + iniset "$file" uwsgi socket "$socket" + iniset "$file" uwsgi processes $API_WORKERS + # This is running standalone + iniset "$file" uwsgi master true + # Set die-on-term & exit-on-reload so that uwsgi shuts down + iniset "$file" uwsgi die-on-term true + iniset "$file" uwsgi exit-on-reload true + iniset "$file" uwsgi enable-threads true + iniset "$file" uwsgi plugins python + # uwsgi recommends this to prevent thundering herd on accept. + iniset "$file" uwsgi thunder-lock true + # Override the default size for headers from the 4k default. + iniset "$file" uwsgi buffer-size 65535 + # Make sure the client doesn't try to re-use the connection. + iniset "$file" uwsgi add-header "Connection: close" + # This ensures that file descriptors aren't shared between processes. + iniset "$file" uwsgi lazy-apps true + iniset "$file" uwsgi chmod-socket 666 + + # If we said bind directly to http, then do that and don't start the apache proxy + if [[ -n "$http" ]]; then + iniset "$file" uwsgi http $http + else + local apache_conf="" + apache_conf=$(apache_site_config_for $name) + echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}/\"" | sudo tee $apache_conf + enable_apache_site $name + reload_apache_server + fi +} + +function remove_uwsgi_config { + local file=$1 + local wsgi=$2 + local name="" + name=$(basename $wsgi) + + rm -rf $file + disable_apache_site $name +} + # Restore xtrace $_XTRACE_LIB_APACHE diff --git a/lib/keystone b/lib/keystone index 3db3c8d4aa..936af6a5e1 100644 --- a/lib/keystone +++ b/lib/keystone @@ -50,6 +50,10 @@ fi KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone} KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf KEYSTONE_PASTE_INI=${KEYSTONE_PASTE_INI:-$KEYSTONE_CONF_DIR/keystone-paste.ini} +KEYSTONE_PUBLIC_UWSGI_CONF=$KEYSTONE_CONF_DIR/keystone-uwsgi-public.ini +KEYSTONE_ADMIN_UWSGI_CONF=$KEYSTONE_CONF_DIR/keystone-uwsgi-admin.ini +KEYSTONE_PUBLIC_UWSGI=$KEYSTONE_BIN_DIR/keystone-wsgi-public +KEYSTONE_ADMIN_UWSGI=$KEYSTONE_BIN_DIR/keystone-wsgi-admin # Toggle for deploying Keystone under HTTPD + mod_wsgi # Deprecated in Mitaka, use KEYSTONE_DEPLOY instead. @@ -293,44 +297,9 @@ function configure_keystone { _config_keystone_apache_wsgi else # uwsgi # iniset creates these files when it's called if they don't exist. - KEYSTONE_PUBLIC_UWSGI_FILE=$KEYSTONE_CONF_DIR/keystone-uwsgi-public.ini - KEYSTONE_ADMIN_UWSGI_FILE=$KEYSTONE_CONF_DIR/keystone-uwsgi-admin.ini - - rm -f "$KEYSTONE_PUBLIC_UWSGI_FILE" - rm -f "$KEYSTONE_ADMIN_UWSGI_FILE" - - if is_ssl_enabled_service key; then - iniset "$KEYSTONE_PUBLIC_UWSGI_FILE" uwsgi https $KEYSTONE_SERVICE_HOST:$service_port,$KEYSTONE_SSL_CERT,$KEYSTONE_SSL_KEY - iniset "$KEYSTONE_ADMIN_UWSGI_FILE" uwsgi https $KEYSTONE_ADMIN_BIND_HOST:$auth_port,$KEYSTONE_SSL_CERT,$KEYSTONE_SSL_KEY - else - iniset "$KEYSTONE_PUBLIC_UWSGI_FILE" uwsgi http $KEYSTONE_SERVICE_HOST:$service_port - iniset "$KEYSTONE_ADMIN_UWSGI_FILE" uwsgi http $KEYSTONE_ADMIN_BIND_HOST:$auth_port - fi - iniset "$KEYSTONE_PUBLIC_UWSGI_FILE" uwsgi wsgi-file "$KEYSTONE_BIN_DIR/keystone-wsgi-public" - iniset "$KEYSTONE_PUBLIC_UWSGI_FILE" uwsgi processes $(nproc) - - iniset "$KEYSTONE_ADMIN_UWSGI_FILE" uwsgi wsgi-file "$KEYSTONE_BIN_DIR/keystone-wsgi-admin" - iniset "$KEYSTONE_ADMIN_UWSGI_FILE" uwsgi processes $API_WORKERS - - # Common settings - for file in "$KEYSTONE_PUBLIC_UWSGI_FILE" "$KEYSTONE_ADMIN_UWSGI_FILE"; do - # This is running standalone - iniset "$file" uwsgi master true - # Set die-on-term & exit-on-reload so that uwsgi shuts down - iniset "$file" uwsgi die-on-term true - iniset "$file" uwsgi exit-on-reload true - iniset "$file" uwsgi enable-threads true - iniset "$file" uwsgi plugins python - # uwsgi recommends this to prevent thundering herd on accept. - iniset "$file" uwsgi thunder-lock true - # Override the default size for headers from the 4k default. - iniset "$file" uwsgi buffer-size 65535 - # Make sure the client doesn't try to re-use the connection. - iniset "$file" uwsgi add-header "Connection: close" - # This ensures that file descriptors aren't shared between processes. - iniset "$file" uwsgi lazy-apps true - done + write_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" "/identity" "$KEYSTONE_SERVICE_HOST:$service_port" + write_uwsgi_config "$KEYSTONE_ADMIN_UWSGI_CONF" "$KEYSTONE_ADMIN_UWSGI" "/identity_admin" "$KEYSTONE_ADMIN_BIND_HOST:$auth_port" fi iniset $KEYSTONE_CONF DEFAULT max_token_size 16384 @@ -604,8 +573,8 @@ function start_keystone { # TODO(sdague): we should really get down to a single keystone here enable_service key-p enable_service key-a - run_process key-p "$KEYSTONE_BIN_DIR/uwsgi --ini $KEYSTONE_PUBLIC_UWSGI_FILE" "" - run_process key-a "$KEYSTONE_BIN_DIR/uwsgi --ini $KEYSTONE_ADMIN_UWSGI_FILE" "" + run_process key-p "$KEYSTONE_BIN_DIR/uwsgi --ini $KEYSTONE_PUBLIC_UWSGI_CONF" "" + run_process key-a "$KEYSTONE_BIN_DIR/uwsgi --ini $KEYSTONE_ADMIN_UWSGI_CONF" "" fi echo "Waiting for keystone to start..." From f3b2f4c85307b14f115a020f5eaf6c92026b55b4 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 13 Apr 2017 10:11:48 -0400 Subject: [PATCH 0456/1936] Remove USE_SSL support tls-proxy is the way we're now doing a standard install using https between services. There is a lot more work to make services directly handle https, and having python daemons do that directly is a bit of an anti pattern. Nothing currently tests this in project-config from my recent grepping, so in the interest of long term maintenance, delete it all. Change-Id: I910df4ceab6f24f3d9c484e0433c93b06f17d6e1 --- lib/cinder | 20 ++------------------ lib/glance | 17 +++-------------- lib/keystone | 17 +---------------- lib/neutron | 21 +++------------------ lib/neutron-legacy | 17 +---------------- lib/nova | 24 +++--------------------- lib/placement | 11 +---------- lib/swift | 9 +-------- lib/tempest | 2 +- lib/tls | 25 ++++++------------------- stack.sh | 11 ++--------- stackrc | 3 --- unstack.sh | 3 --- 13 files changed, 24 insertions(+), 156 deletions(-) diff --git a/lib/cinder b/lib/cinder index c17cea06c7..ac61537c41 100644 --- a/lib/cinder +++ b/lib/cinder @@ -58,7 +58,7 @@ CINDER_CONF=$CINDER_CONF_DIR/cinder.conf CINDER_API_PASTE_INI=$CINDER_CONF_DIR/api-paste.ini # Public facing bits -if is_ssl_enabled_service "cinder" || is_service_enabled tls-proxy; then +if is_service_enabled tls-proxy; then CINDER_SERVICE_PROTOCOL="https" fi CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST} @@ -215,11 +215,6 @@ function _cinder_config_apache_wsgi { local cinder_api_port=$CINDER_SERVICE_PORT local venv_path="" - if is_ssl_enabled_service c-api; then - cinder_ssl="SSLEngine On" - cinder_certfile="SSLCertificateFile $CINDER_SSL_CERT" - cinder_keyfile="SSLCertificateKeyFile $CINDER_SSL_KEY" - fi if [[ ${USE_VENV} = True ]]; then venv_path="python-path=${PROJECT_VENV["cinder"]}/lib/python2.7/site-packages" fi @@ -347,7 +342,7 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT osapi_volume_workers "$API_WORKERS" iniset $CINDER_CONF DEFAULT glance_api_servers "${GLANCE_SERVICE_PROTOCOL}://${GLANCE_HOSTPORT}" - if is_ssl_enabled_service glance || is_service_enabled tls-proxy; then + if is_service_enabled tls-proxy; then iniset $CINDER_CONF DEFAULT glance_protocol https iniset $CINDER_CONF DEFAULT glance_ca_certificates_file $SSL_BUNDLE_FILE fi @@ -356,14 +351,6 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT glance_api_version 2 fi - # Register SSL certificates if provided - if is_ssl_enabled_service cinder; then - ensure_certificates CINDER - - iniset $CINDER_CONF DEFAULT ssl_cert_file "$CINDER_SSL_CERT" - iniset $CINDER_CONF DEFAULT ssl_key_file "$CINDER_SSL_KEY" - fi - # Set os_privileged_user credentials (used for os-assisted-snapshots) iniset $CINDER_CONF DEFAULT os_privileged_user_name nova iniset $CINDER_CONF DEFAULT os_privileged_user_password "$SERVICE_PASSWORD" @@ -464,9 +451,6 @@ function install_cinder { if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then install_apache_wsgi - if is_ssl_enabled_service "c-api"; then - enable_mod_ssl - fi fi } diff --git a/lib/glance b/lib/glance index 2f4aa5f0f8..23a1cbf2c7 100644 --- a/lib/glance +++ b/lib/glance @@ -57,7 +57,7 @@ GLANCE_SCHEMA_JSON=$GLANCE_CONF_DIR/schema-image.json GLANCE_SWIFT_STORE_CONF=$GLANCE_CONF_DIR/glance-swift-store.conf GLANCE_V1_ENABLED=${GLANCE_V1_ENABLED:-False} -if is_ssl_enabled_service "glance" || is_service_enabled tls-proxy; then +if is_service_enabled tls-proxy; then GLANCE_SERVICE_PROTOCOL="https" fi @@ -187,18 +187,7 @@ function configure_glance { iniset $GLANCE_REGISTRY_CONF keystone_authtoken identity_uri $KEYSTONE_AUTH_URI fi - # Register SSL certificates if provided - if is_ssl_enabled_service glance; then - ensure_certificates GLANCE - - iniset $GLANCE_API_CONF DEFAULT cert_file "$GLANCE_SSL_CERT" - iniset $GLANCE_API_CONF DEFAULT key_file "$GLANCE_SSL_KEY" - - iniset $GLANCE_REGISTRY_CONF DEFAULT cert_file "$GLANCE_SSL_CERT" - iniset $GLANCE_REGISTRY_CONF DEFAULT key_file "$GLANCE_SSL_KEY" - fi - - if is_ssl_enabled_service glance || is_service_enabled tls-proxy; then + if is_service_enabled tls-proxy; then iniset $GLANCE_API_CONF DEFAULT registry_client_protocol https fi @@ -233,7 +222,7 @@ function configure_glance { cp -p $GLANCE_DIR/etc/metadefs/*.json $GLANCE_METADEF_DIR - if is_ssl_enabled_service "cinder" || is_service_enabled tls-proxy; then + if is_service_enabled tls-proxy; then CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST} CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776} diff --git a/lib/keystone b/lib/keystone index 936af6a5e1..45ba2c5352 100644 --- a/lib/keystone +++ b/lib/keystone @@ -116,7 +116,7 @@ SERVICE_PROJECT_NAME=${SERVICE_PROJECT_NAME:-service} SERVICE_TENANT_NAME=${SERVICE_PROJECT_NAME:-service} # if we are running with SSL use https protocols -if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then +if is_service_enabled tls-proxy; then KEYSTONE_AUTH_PROTOCOL="https" KEYSTONE_SERVICE_PROTOCOL="https" fi @@ -171,12 +171,6 @@ function _config_keystone_apache_wsgi { local keystone_auth_port=$KEYSTONE_AUTH_PORT local venv_path="" - if is_ssl_enabled_service key; then - keystone_ssl_listen="" - keystone_ssl="SSLEngine On" - keystone_certfile="SSLCertificateFile $KEYSTONE_SSL_CERT" - keystone_keyfile="SSLCertificateKeyFile $KEYSTONE_SSL_KEY" - fi if is_service_enabled tls-proxy; then keystone_service_port=$KEYSTONE_SERVICE_PORT_INT keystone_auth_port=$KEYSTONE_AUTH_PORT_INT @@ -247,11 +241,6 @@ function configure_keystone { iniset_rpc_backend keystone $KEYSTONE_CONF - # Register SSL certificates if provided - if is_ssl_enabled_service key; then - ensure_certificates KEYSTONE - fi - local service_port=$KEYSTONE_SERVICE_PORT local auth_port=$KEYSTONE_AUTH_PORT @@ -297,7 +286,6 @@ function configure_keystone { _config_keystone_apache_wsgi else # uwsgi # iniset creates these files when it's called if they don't exist. - write_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" "/identity" "$KEYSTONE_SERVICE_HOST:$service_port" write_uwsgi_config "$KEYSTONE_ADMIN_UWSGI_CONF" "$KEYSTONE_ADMIN_UWSGI" "/identity_admin" "$KEYSTONE_ADMIN_BIND_HOST:$auth_port" fi @@ -546,9 +534,6 @@ function install_keystone { if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then install_apache_wsgi - if is_ssl_enabled_service "key"; then - enable_mod_ssl - fi elif [ "$KEYSTONE_DEPLOY" == "uwsgi" ]; then pip_install uwsgi fi diff --git a/lib/neutron b/lib/neutron index dd914664ae..492a0ee8fb 100644 --- a/lib/neutron +++ b/lib/neutron @@ -61,7 +61,7 @@ NEUTRON_META_BINARY=${NEUTRON_META_BINARY:-neutron-metadata-agent} NEUTRON_METERING_BINARY=${NEUTRON_METERING_BINARY:-neutron-metering-agent} # Public facing bits -if is_ssl_enabled_service "neutron" || is_service_enabled tls-proxy; then +if is_service_enabled tls-proxy; then NEUTRON_SERVICE_PROTOCOL="https" fi NEUTRON_SERVICE_HOST=${NEUTRON_SERVICE_HOST:-$SERVICE_HOST} @@ -243,14 +243,6 @@ function configure_neutron_new { iniset $NEUTRON_CONF DEFAULT bind_port "$NEUTRON_SERVICE_PORT_INT" fi - if is_ssl_enabled_service "neutron"; then - ensure_certificates NEUTRON - - iniset $NEUTRON_CONF DEFAULT use_ssl True - iniset $NEUTRON_CONF DEFAULT ssl_cert_file "$NEUTRON_SSL_CERT" - iniset $NEUTRON_CONF DEFAULT ssl_key_file "$NEUTRON_SSL_KEY" - fi - # Metering if is_service_enabled neutron-metering; then cp $NEUTRON_DIR/etc/metering_agent.ini.sample $NEUTRON_METERING_AGENT_CONF @@ -404,17 +396,10 @@ function start_neutron_api { # TODO(sc68cal) Stop hard coding this run_process neutron-api "$NEUTRON_BIN_DIR/neutron-server $opts" - if is_ssl_enabled_service "neutron"; then - ssl_ca="--ca-certificate=${SSL_BUNDLE_FILE}" - local testcmd="wget ${ssl_ca} --no-proxy -q -O- $service_protocol://$NEUTRON_SERVICE_HOST:$service_port" - test_with_retry "$testcmd" "Neutron did not start" $SERVICE_TIMEOUT - else - if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$NEUTRON_SERVICE_HOST:$service_port; then - die $LINENO "neutron-api did not start" - fi + if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$NEUTRON_SERVICE_HOST:$service_port; then + die $LINENO "neutron-api did not start" fi - # Start proxy if enabled if is_service_enabled tls-proxy; then start_tls_proxy neutron '*' $NEUTRON_SERVICE_PORT $NEUTRON_SERVICE_HOST $NEUTRON_SERVICE_PORT_INT diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 74f36e0ce3..1dfd5fec7d 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -61,7 +61,7 @@ deprecated "Using lib/neutron-legacy is deprecated, and it will be removed in the future" -if is_ssl_enabled_service "neutron" || is_service_enabled tls-proxy; then +if is_service_enabled tls-proxy; then Q_PROTOCOL="https" fi @@ -461,9 +461,6 @@ function start_neutron_service_and_check { # Start the Neutron service run_process q-svc "$NEUTRON_BIN_DIR/neutron-server $cfg_file_options" echo "Waiting for Neutron to start..." - if is_ssl_enabled_service "neutron"; then - ssl_ca="--ca-certificate=${SSL_BUNDLE_FILE}" - fi local testcmd="wget ${ssl_ca} --no-proxy -q -O- $service_protocol://$Q_HOST:$service_port" test_with_retry "$testcmd" "Neutron did not start" $SERVICE_TIMEOUT @@ -714,18 +711,6 @@ function _configure_neutron_common { iniset $NEUTRON_CONF DEFAULT bind_port "$Q_PORT_INT" fi - if is_ssl_enabled_service "nova"; then - iniset $NEUTRON_CONF nova cafile $SSL_BUNDLE_FILE - fi - - if is_ssl_enabled_service "neutron"; then - ensure_certificates NEUTRON - - iniset $NEUTRON_CONF DEFAULT use_ssl True - iniset $NEUTRON_CONF DEFAULT ssl_cert_file "$NEUTRON_SSL_CERT" - iniset $NEUTRON_CONF DEFAULT ssl_key_file "$NEUTRON_SSL_KEY" - fi - _neutron_setup_rootwrap } diff --git a/lib/nova b/lib/nova index a36a740265..cba9acdd79 100644 --- a/lib/nova +++ b/lib/nova @@ -68,7 +68,7 @@ fi # Toggle for deploying Nova-API under HTTPD + mod_wsgi NOVA_USE_MOD_WSGI=${NOVA_USE_MOD_WSGI:-False} -if is_ssl_enabled_service "nova" || is_service_enabled tls-proxy; then +if is_service_enabled tls-proxy; then NOVA_SERVICE_PROTOCOL="https" fi @@ -262,11 +262,6 @@ function _config_nova_apache_wsgi { local nova_metadata_port=$METADATA_SERVICE_PORT local venv_path="" - if is_ssl_enabled_service nova-api; then - nova_ssl="SSLEngine On" - nova_certfile="SSLCertificateFile $NOVA_SSL_CERT" - nova_keyfile="SSLCertificateKeyFile $NOVA_SSL_KEY" - fi if [[ ${USE_VENV} = True ]]; then venv_path="python-path=${PROJECT_VENV["nova"]}/lib/$(python_version)/site-packages" fi @@ -501,7 +496,7 @@ function create_nova_conf { fi if is_service_enabled cinder; then - if is_ssl_enabled_service "cinder" || is_service_enabled tls-proxy; then + if is_service_enabled tls-proxy; then CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST} CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776} iniset $NOVA_CONF cinder cafile $SSL_BUNDLE_FILE @@ -586,20 +581,10 @@ function create_nova_conf { iniset $NOVA_CONF cinder os_region_name "$REGION_NAME" - if is_ssl_enabled_service glance || is_service_enabled tls-proxy; then + if is_service_enabled tls-proxy; then iniset $NOVA_CONF DEFAULT glance_protocol https fi - # Register SSL certificates if provided - if is_ssl_enabled_service nova; then - ensure_certificates NOVA - - iniset $NOVA_CONF DEFAULT ssl_cert_file "$NOVA_SSL_CERT" - iniset $NOVA_CONF DEFAULT ssl_key_file "$NOVA_SSL_KEY" - - iniset $NOVA_CONF DEFAULT enabled_ssl_apis "$NOVA_ENABLED_APIS" - fi - if is_service_enabled n-sproxy; then iniset $NOVA_CONF serial_console serialproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" iniset $NOVA_CONF serial_console enabled True @@ -790,9 +775,6 @@ function install_nova { if [ "$NOVA_USE_MOD_WSGI" == "True" ]; then install_apache_wsgi - if is_ssl_enabled_service "nova-api"; then - enable_mod_ssl - fi fi } diff --git a/lib/placement b/lib/placement index 4cc5cd8b6a..a29784b93c 100644 --- a/lib/placement +++ b/lib/placement @@ -40,7 +40,7 @@ PLACEMENT_AUTH_STRATEGY=${PLACEMENT_AUTH_STRATEGY:-placement} # yet merged in nova but is coming soon. PLACEMENT_DB_ENABLED=$(trueorfalse False PLACEMENT_DB_ENABLED) -if is_ssl_enabled_service "placement-api" || is_service_enabled tls-proxy; then +if is_service_enabled tls-proxy; then PLACEMENT_SERVICE_PROTOCOL="https" fi @@ -72,12 +72,6 @@ function _config_placement_apache_wsgi { nova_bin_dir=$(get_python_exec_prefix) placement_api_apache_conf=$(apache_site_config_for placement-api) - # reuse nova's cert if a cert is being used - if is_ssl_enabled_service "placement-api"; then - placement_ssl="SSLEngine On" - placement_certfile="SSLCertificateFile $NOVA_SSL_CERT" - placement_keyfile="SSLCertificateKeyFile $NOVA_SSL_KEY" - fi # reuse nova's venv if there is one as placement code lives # there if [[ ${USE_VENV} = True ]]; then @@ -149,9 +143,6 @@ function init_placement { # install_placement() - Collect source and prepare function install_placement { install_apache_wsgi - if is_ssl_enabled_service "placement-api"; then - enable_mod_ssl - fi } # start_placement_api() - Start the API processes ahead of other things diff --git a/lib/swift b/lib/swift index 5eac904559..d764b25fdb 100644 --- a/lib/swift +++ b/lib/swift @@ -31,7 +31,7 @@ set +o xtrace # Defaults # -------- -if is_ssl_enabled_service "s-proxy" || is_service_enabled tls-proxy; then +if is_service_enabled tls-proxy; then SWIFT_SERVICE_PROTOCOL="https" fi @@ -398,13 +398,6 @@ function configure_swift { iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT} fi - if is_ssl_enabled_service s-proxy; then - ensure_certificates SWIFT - - iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT cert_file "$SWIFT_SSL_CERT" - iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT key_file "$SWIFT_SSL_KEY" - fi - # DevStack is commonly run in a small slow environment, so bump the timeouts up. # ``node_timeout`` is the node read operation response time to the proxy server # ``conn_timeout`` is how long it takes a connect() system call to return diff --git a/lib/tempest b/lib/tempest index f6fc57dc72..04f8f6a3f6 100644 --- a/lib/tempest +++ b/lib/tempest @@ -279,7 +279,7 @@ function configure_tempest { fi iniset $TEMPEST_CONFIG identity auth_version ${TEMPEST_AUTH_VERSION:-v3} - if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then + if is_service_enabled tls-proxy; then iniset $TEMPEST_CONFIG identity ca_certificates_file $SSL_BUNDLE_FILE fi diff --git a/lib/tls b/lib/tls index fb2fa3a17b..c2c92a1bae 100644 --- a/lib/tls +++ b/lib/tls @@ -343,7 +343,7 @@ function make_root_CA { # one. If the value for the CA is not rooted in /etc then we know # we need to change it. function fix_system_ca_bundle_path { - if is_service_enabled tls-proxy || [ "$USE_SSL" == "True" ]; then + if is_service_enabled tls-proxy; then local capath capath=$(python -c $'try:\n from requests import certs\n print certs.where()\nexcept ImportError: pass') @@ -362,27 +362,14 @@ function fix_system_ca_bundle_path { } -# Certificate Input Configuration -# =============================== - -# check to see if the service(s) specified are to be SSL enabled. -# -# Multiple services specified as arguments are ``OR``'ed together; the test -# is a short-circuit boolean, i.e it returns on the first match. -# -# Uses global ``SSL_ENABLED_SERVICES`` +# Only for compatibility, return if the tls-proxy is enabled function is_ssl_enabled_service { - local services=$@ - local service="" - if [ "$USE_SSL" == "False" ]; then - return 1 - fi - for service in ${services}; do - [[ ,${SSL_ENABLED_SERVICES}, =~ ,${service}, ]] && return 0 - done - return 1 + return is_service_enabled tls-proxy } +# Certificate Input Configuration +# =============================== + # Ensure that the certificates for a service are in place. This function does # not check that a service is SSL enabled, this should already have been # completed. diff --git a/stack.sh b/stack.sh index 759a8dbacc..635a328190 100755 --- a/stack.sh +++ b/stack.sh @@ -539,13 +539,6 @@ rm -f $SSL_BUNDLE_FILE source $TOP_DIR/lib/database source $TOP_DIR/lib/rpc_backend -# Service to enable with SSL if ``USE_SSL`` is True -SSL_ENABLED_SERVICES="key,nova,cinder,glance,s-proxy,neutron" - -if is_service_enabled tls-proxy && [ "$USE_SSL" == "True" ]; then - die $LINENO "tls-proxy and SSL are mutually exclusive" -fi - # Configure Projects # ================== @@ -806,7 +799,7 @@ if is_service_enabled cinder nova; then fi # Setup TLS certs -if is_service_enabled tls-proxy || [ "$USE_SSL" == "True" ]; then +if is_service_enabled tls-proxy; then configure_CA init_CA init_cert @@ -886,7 +879,7 @@ if is_service_enabled horizon; then stack_install_service horizon fi -if is_service_enabled tls-proxy || [ "$USE_SSL" == "True" ]; then +if is_service_enabled tls-proxy; then fix_system_ca_bundle_path fi diff --git a/stackrc b/stackrc index 88f75413c4..3ceb78c0d6 100644 --- a/stackrc +++ b/stackrc @@ -846,9 +846,6 @@ SYSLOG_PORT=${SYSLOG_PORT:-516} # Set to 0 to disable shallow cloning GIT_DEPTH=${GIT_DEPTH:-0} -# Use native SSL for servers in ``SSL_ENABLED_SERVICES`` -USE_SSL=$(trueorfalse False USE_SSL) - # We may not need to recreate database in case 2 Keystone services # sharing the same database. It would be useful for multinode Grenade tests. RECREATE_KEYSTONE_DB=$(trueorfalse True RECREATE_KEYSTONE_DB) diff --git a/unstack.sh b/unstack.sh index b0ebaf725e..485fed7f80 100755 --- a/unstack.sh +++ b/unstack.sh @@ -129,9 +129,6 @@ if is_service_enabled tls-proxy; then stop_tls_proxy cleanup_CA fi -if [ "$USE_SSL" == "True" ]; then - cleanup_CA -fi SCSI_PERSIST_DIR=$CINDER_STATE_PATH/volumes/* From a1446b960fa7c21bc1e7141921d6fc95c6e212d2 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 17 Apr 2017 14:31:21 -0400 Subject: [PATCH 0457/1936] always retry proxy errors When an apache worker gets a proxy error, it will not retry talking to the backend server until the retry timeout expires. We bring up the proxy server *before* the backend server, and poll it. If we are running a small number of workers, there is a likely chance that we're going to hit one that errored before the backend was up, thus failing for now real reason. Set this to 0 instead to mean always retry failed connections. Change-Id: I9e584f087bd375f71ddf0c70f83205c425094a17 Ref: https://httpd.apache.org/docs/2.4/mod/mod_proxy.html#proxypass --- lib/tls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tls b/lib/tls index c2c92a1bae..7a7b10454e 100644 --- a/lib/tls +++ b/lib/tls @@ -512,7 +512,7 @@ $listen_string KeepAlive Off - ProxyPass http://$b_host:$b_port/ retry=5 nocanon + ProxyPass http://$b_host:$b_port/ retry=0 nocanon ProxyPassReverse http://$b_host:$b_port/ ErrorLog $APACHE_LOG_DIR/tls-proxy_error.log From 604e598e2abca37c84d6cd3c84ad8fa5295fb327 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 13 Apr 2017 13:28:12 -0400 Subject: [PATCH 0458/1936] Install and enable apache uwsgi proxy The uwsgi proxy version that comes with Ubuntu xenial is too old, so we have to build it from source. This is a temporary solution until the next LTS. This lays the ground work for using it in keystone. Change-Id: I00fb1759e6988c7df0ce0f3df5ff1ce9fd7cd381 --- lib/apache | 42 ++++++++++++++++++++++++++++++++++++++++++ stack.sh | 3 +++ 2 files changed, 45 insertions(+) diff --git a/lib/apache b/lib/apache index fc73b49912..9fed1003a4 100644 --- a/lib/apache +++ b/lib/apache @@ -66,6 +66,48 @@ function enable_apache_mod { fi } +# NOTE(sdague): Install uwsgi including apache module, we need to get +# to 2.0.6+ to get a working mod_proxy_uwsgi. We can probably build a +# check for that and do it differently for different platforms. +function install_apache_uwsgi { + local apxs="apxs2" + if is_fedora; then + apxs="apxs" + fi + + # Ubuntu xenial is back level on uwsgi so the proxy doesn't + # actually work. Hence we have to build from source for now. + # + # Centos 7 actually has the module in epel, but there was a big + # push to disable epel by default. As such, compile from source + # there as well. + + local dir + dir=$(mktemp -d) + pushd $dir + pip_install uwsgi + pip download uwsgi -c $REQUIREMENTS_DIR/upper-constraints.txt + local uwsgi + uwsgi=$(ls uwsgi*) + tar xvf $uwsgi + cd uwsgi*/apache2 + sudo $apxs -i -c mod_proxy_uwsgi.c + popd + # delete the temp directory + sudo rm -rf $dir + + if is_ubuntu; then + # we've got to enable proxy and proxy_uwsgi for this to work + sudo a2enmod proxy + sudo a2enmod proxy_uwsgi + elif is_fedora; then + # redhat is missing a nice way to turn on/off modules + echo "LoadModule proxy_uwsgi_module modules/mod_proxy_uwsgi.so" \ + | sudo tee /etc/httpd/conf.modules.d/02-proxy-uwsgi.conf + fi + restart_apache_server +} + # install_apache_wsgi() - Install Apache server and wsgi module function install_apache_wsgi { # Apache installation, because we mark it NOPRIME diff --git a/stack.sh b/stack.sh index 635a328190..e12b3fe06e 100755 --- a/stack.sh +++ b/stack.sh @@ -780,6 +780,9 @@ echo_summary "Installing OpenStack project source" # Install Oslo libraries install_oslo +# Install uwsgi +install_apache_uwsgi + # Install client libraries install_keystoneauth install_keystoneclient From 64ffff9b7d79b9e75616cf43f9f7b31c89026f30 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 13 Apr 2017 13:36:42 -0400 Subject: [PATCH 0459/1936] Convert placement to new uwsgi mode This converts the placement API to use the new WSGI_MODE variable (which is not introduced until the next changeset). We do this so that placement and keystone patches can be reviewed independently, but there are some hidden coupling of mod_wsgi setup which happens only in keystone, so if we do keystone first, it breaks placement. Change-Id: Id5b2c67701bcc7b12c8e3764c7199d10f85df80f --- lib/placement | 37 ++++++++++++++++++++++++++++++------- 1 file changed, 30 insertions(+), 7 deletions(-) diff --git a/lib/placement b/lib/placement index a29784b93c..fd09cd8c4e 100644 --- a/lib/placement +++ b/lib/placement @@ -32,7 +32,15 @@ set +o xtrace PLACEMENT_CONF_DIR=/etc/nova PLACEMENT_CONF=$PLACEMENT_CONF_DIR/nova.conf PLACEMENT_AUTH_STRATEGY=${PLACEMENT_AUTH_STRATEGY:-placement} - +# Nova virtual environment +if [[ ${USE_VENV} = True ]]; then + PROJECT_VENV["nova"]=${NOVA_DIR}.venv + PLACEMENT_BIN_DIR=${PROJECT_VENV["nova"]}/bin +else + PLACEMENT_BIN_DIR=$(get_python_exec_prefix) +fi +PLACEMENT_UWSGI=$PLACEMENT_BIN_DIR/nova-placement-api +PLACEMENT_UWSGI_CONF=$PLACEMENT_CONF_DIR/placement-uwsgi.ini # The placement service can optionally use a separate database # connection. Set PLACEMENT_DB_ENABLED to True to use it. @@ -114,7 +122,13 @@ function configure_placement { if [ "$PLACEMENT_DB_ENABLED" != False ]; then iniset $PLACEMENT_CONF placement_database connection `database_connection_url placement` fi - _config_placement_apache_wsgi + # TODO(sdague): this really should flag off of something else, but + # it won't really work without systemd today. + if [[ "$WSGI_MODE" == "uwsgi" ]]; then + write_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI" "/placement" + else + _config_placement_apache_wsgi + fi } # create_placement_accounts() - Set up required placement accounts @@ -147,9 +161,13 @@ function install_placement { # start_placement_api() - Start the API processes ahead of other things function start_placement_api { - enable_apache_site placement-api - restart_apache_server - tail_log placement-api /var/log/$APACHE_NAME/placement-api.log + if [[ "$WSGI_MODE" == "uwsgi" ]]; then + run_process "placement-api" "$PLACEMENT_BIN_DIR/uwsgi --ini $PLACEMENT_UWSGI_CONF" + else + enable_apache_site placement-api + restart_apache_server + tail_log placement-api /var/log/$APACHE_NAME/placement-api.log + fi echo "Waiting for placement-api to start..." if ! wait_for_service $SERVICE_TIMEOUT $PLACEMENT_SERVICE_PROTOCOL://$PLACEMENT_SERVICE_HOST/placement; then @@ -163,8 +181,13 @@ function start_placement { # stop_placement() - Disable the api service and stop it. function stop_placement { - disable_apache_site placement-api - restart_apache_server + if [[ "$WSGI_MODE" == "uwsgi" ]]; then + stop_process "placement-api" + remove_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI" + else + disable_apache_site placement-api + restart_apache_server + fi } # Restore xtrace From 6ed53156b6198e69d59d1cf3a3497e96f5b7a870 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 13 Apr 2017 13:33:16 -0400 Subject: [PATCH 0460/1936] Convert keystone to use uwsgi with the proxy This makes keystone use the proxy uwsgi module when running in uwsgi mode. It also introduces a new stackrc variable which is WSGI_MODE that we can use to control the conditionals in services that current work with mod_wsgi. Also update retry timeouts on proxy pass so that workers don't disable their connections during polling for initial activity. Change-Id: I46294fb24e3c23fa19fcfd7d6c9ee8a932354702 --- lib/apache | 2 +- lib/keystone | 56 +++++++++++++++++++++------------------------------- openrc | 4 +--- stackrc | 6 ++++++ 4 files changed, 30 insertions(+), 38 deletions(-) diff --git a/lib/apache b/lib/apache index 9fed1003a4..20700d802f 100644 --- a/lib/apache +++ b/lib/apache @@ -260,7 +260,7 @@ function write_uwsgi_config { else local apache_conf="" apache_conf=$(apache_site_config_for $name) - echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}/\"" | sudo tee $apache_conf + echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}/\" retry=0 " | sudo tee $apache_conf enable_apache_site $name reload_apache_server fi diff --git a/lib/keystone b/lib/keystone index 45ba2c5352..a26ef8afd2 100644 --- a/lib/keystone +++ b/lib/keystone @@ -55,21 +55,13 @@ KEYSTONE_ADMIN_UWSGI_CONF=$KEYSTONE_CONF_DIR/keystone-uwsgi-admin.ini KEYSTONE_PUBLIC_UWSGI=$KEYSTONE_BIN_DIR/keystone-wsgi-public KEYSTONE_ADMIN_UWSGI=$KEYSTONE_BIN_DIR/keystone-wsgi-admin -# Toggle for deploying Keystone under HTTPD + mod_wsgi -# Deprecated in Mitaka, use KEYSTONE_DEPLOY instead. -KEYSTONE_USE_MOD_WSGI=${KEYSTONE_USE_MOD_WSGI:-${ENABLE_HTTPD_MOD_WSGI_SERVICES}} - # KEYSTONE_DEPLOY defines how keystone is deployed, allowed values: # - mod_wsgi : Run keystone under Apache HTTPd mod_wsgi # - uwsgi : Run keystone under uwsgi -if [ -z "$KEYSTONE_DEPLOY" ]; then - if [ -z "$KEYSTONE_USE_MOD_WSGI" ]; then - KEYSTONE_DEPLOY=mod_wsgi - elif [ "$KEYSTONE_USE_MOD_WSGI" == True ]; then - KEYSTONE_DEPLOY=mod_wsgi - else - KEYSTONE_DEPLOY=uwsgi - fi +if [[ "$WSGI_MODE" == "uwsgi" ]]; then + KEYSTONE_DEPLOY=uwsgi +else + KEYSTONE_DEPLOY=mod_wsgi fi # Select the token persistence backend driver @@ -121,15 +113,8 @@ if is_service_enabled tls-proxy; then KEYSTONE_SERVICE_PROTOCOL="https" fi -# complete URIs -if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then - # If running in Apache, use path access rather than port. - KEYSTONE_AUTH_URI=${KEYSTONE_AUTH_PROTOCOL}://${KEYSTONE_AUTH_HOST}/identity_admin - KEYSTONE_SERVICE_URI=${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}/identity -else - KEYSTONE_AUTH_URI=${KEYSTONE_AUTH_PROTOCOL}://${KEYSTONE_AUTH_HOST}:${KEYSTONE_AUTH_PORT} - KEYSTONE_SERVICE_URI=${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}:${KEYSTONE_SERVICE_PORT} -fi +KEYSTONE_AUTH_URI=${KEYSTONE_AUTH_PROTOCOL}://${KEYSTONE_AUTH_HOST}/identity_admin +KEYSTONE_SERVICE_URI=${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}/identity # V3 URIs KEYSTONE_AUTH_URI_V3=$KEYSTONE_AUTH_URI/v3 @@ -155,8 +140,15 @@ function is_keystone_enabled { # cleanup_keystone() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_keystone { - disable_apache_site keystone - sudo rm -f $(apache_site_config_for keystone) + if [[ "$WSGI_MODE" == "uwsgi" ]]; then + remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" + remove_uwsgi_config "$KEYSTONE_ADMIN_UWSGI_CONF" "$KEYSTONE_ADMIN_UWSGI" + sudo rm -f $(apache_site_config_for keystone-wsgi-public) + sudo rm -f $(apache_site_config_for keystone-wsgi-admin) + else + disable_apache_site keystone + sudo rm -f $(apache_site_config_for keystone) + fi } # _config_keystone_apache_wsgi() - Set WSGI config files of Keystone @@ -256,10 +248,8 @@ function configure_keystone { # work when you want to use a different port (in the case of proxy), or you # don't want the port (in the case of putting keystone on a path in # apache). - if is_service_enabled tls-proxy || [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then - iniset $KEYSTONE_CONF DEFAULT public_endpoint $KEYSTONE_SERVICE_URI - iniset $KEYSTONE_CONF DEFAULT admin_endpoint $KEYSTONE_AUTH_URI - fi + iniset $KEYSTONE_CONF DEFAULT public_endpoint $KEYSTONE_SERVICE_URI + iniset $KEYSTONE_CONF DEFAULT admin_endpoint $KEYSTONE_AUTH_URI if [[ "$KEYSTONE_TOKEN_FORMAT" != "" ]]; then iniset $KEYSTONE_CONF token provider $KEYSTONE_TOKEN_FORMAT @@ -285,9 +275,8 @@ function configure_keystone { iniset $KEYSTONE_CONF DEFAULT logging_exception_prefix "%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s" _config_keystone_apache_wsgi else # uwsgi - # iniset creates these files when it's called if they don't exist. - write_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" "/identity" "$KEYSTONE_SERVICE_HOST:$service_port" - write_uwsgi_config "$KEYSTONE_ADMIN_UWSGI_CONF" "$KEYSTONE_ADMIN_UWSGI" "/identity_admin" "$KEYSTONE_ADMIN_BIND_HOST:$auth_port" + write_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" "/identity" + write_uwsgi_config "$KEYSTONE_ADMIN_UWSGI_CONF" "$KEYSTONE_ADMIN_UWSGI" "/identity_admin" fi iniset $KEYSTONE_CONF DEFAULT max_token_size 16384 @@ -568,10 +557,7 @@ function start_keystone { # unencryted traffic at this point. # If running in Apache, use the path rather than port. - local service_uri=$auth_protocol://$KEYSTONE_SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ - if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then - service_uri=$auth_protocol://$KEYSTONE_SERVICE_HOST/identity/v$IDENTITY_API_VERSION/ - fi + local service_uri=$auth_protocol://$KEYSTONE_SERVICE_HOST/identity/v$IDENTITY_API_VERSION/ if ! wait_for_service $SERVICE_TIMEOUT $service_uri; then die $LINENO "keystone did not start" @@ -595,6 +581,8 @@ function stop_keystone { else stop_process key-p stop_process key-a + remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" + remove_uwsgi_config "$KEYSTONE_ADMIN_UWSGI_CONF" "$KEYSTONE_ADMIN_UWSGI" fi # Kill the Keystone screen window stop_process key diff --git a/openrc b/openrc index 483b5af387..4cdb50efef 100644 --- a/openrc +++ b/openrc @@ -73,8 +73,6 @@ else fi SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http} -KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-$SERVICE_PROTOCOL} -KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST} # Identity API version export OS_IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-3} @@ -84,7 +82,7 @@ export OS_IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-3} # the user/project has access to - including nova, glance, keystone, swift, ... # We currently recommend using the version 3 *identity api*. # -export OS_AUTH_URL=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:5000/v${OS_IDENTITY_API_VERSION} +export OS_AUTH_URL=$KEYSTONE_AUTH_URI # Currently, in order to use openstackclient with Identity API v3, # we need to set the domain which the user and project belong to. diff --git a/stackrc b/stackrc index 3ceb78c0d6..adcb9426ea 100644 --- a/stackrc +++ b/stackrc @@ -225,6 +225,12 @@ DATABASE_QUERY_LOGGING=$(trueorfalse False DATABASE_QUERY_LOGGING) # Zero disables timeouts GIT_TIMEOUT=${GIT_TIMEOUT:-0} +# How should we be handling WSGI deployments. By default we're going +# to allow for 2 modes, which is "uwsgi" which runs with an apache +# proxy uwsgi in front of it, or "mod_wsgi", which runs in +# apache. mod_wsgi is deprecated, don't use it. +WSGI_MODE=${WSGI_MODE:-"uwsgi"} + # Repositories # ------------ From 921da2654bfece3d5352d42e8f92b57cac21b9cd Mon Sep 17 00:00:00 2001 From: Prabhuraj Kamaraj Date: Tue, 18 Apr 2017 05:11:52 +0000 Subject: [PATCH 0461/1936] Adding placement-client to compute node local.conf for multinode setup Change-Id: Ie98f908d5a932da259ae13934af45d535fcffb82 Closes-Bug: #1682362 --- doc/source/guides/multinode-lab.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index 484ebba571..1a8ddbc194 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -175,7 +175,7 @@ machines, create a ``local.conf`` with: MYSQL_HOST=$SERVICE_HOST RABBIT_HOST=$SERVICE_HOST GLANCE_HOSTPORT=$SERVICE_HOST:9292 - ENABLED_SERVICES=n-cpu,q-agt,n-api-meta,c-vol + ENABLED_SERVICES=n-cpu,q-agt,n-api-meta,c-vol,placement-client NOVA_VNC_ENABLED=True NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_auto.html" VNCSERVER_LISTEN=$HOST_IP From e0be9e3a2e527754a72d6282883370c09159fac1 Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Tue, 18 Apr 2017 16:52:25 +0100 Subject: [PATCH 0462/1936] Remove a TODO that no longer applies The removed TODO was talking about USE_SYSTEMD, not WSGI_MODE. WSGI_MODE makes sense, so the TODO has been done. Change-Id: Ib574ef123ea4c82d4d88012c990cd1ad660d7879 --- lib/placement | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/placement b/lib/placement index fd09cd8c4e..4755a58bf2 100644 --- a/lib/placement +++ b/lib/placement @@ -122,8 +122,7 @@ function configure_placement { if [ "$PLACEMENT_DB_ENABLED" != False ]; then iniset $PLACEMENT_CONF placement_database connection `database_connection_url placement` fi - # TODO(sdague): this really should flag off of something else, but - # it won't really work without systemd today. + if [[ "$WSGI_MODE" == "uwsgi" ]]; then write_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI" "/placement" else From 2fcdaac56e20f1002ae76c6ae1b30a093452d21b Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Tue, 18 Apr 2017 16:54:12 +0100 Subject: [PATCH 0463/1936] Make a2dissite fail softly if the site is not enabled a2dissite will return a non-zero error code if the site that is being disabled is not currently enabled (that is, if the conf file for it does not exist). This can happen during development if you've been messing with files by hand. Rather than exploding out of a ./stack.sh, accept the missing file as meaning "it's disabled" and carry one. The rpm version of disable, which does not use a2dissite, does this already. Change-Id: Ie5dfd42efdff4bdba5ffaa765af000dd8e1d596e --- lib/apache | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/apache b/lib/apache index 20700d802f..f88f1d3504 100644 --- a/lib/apache +++ b/lib/apache @@ -186,7 +186,7 @@ function enable_apache_site { function disable_apache_site { local site=$@ if is_ubuntu; then - sudo a2dissite ${site} + sudo a2dissite ${site} || true elif is_fedora || is_suse; then local enabled_site_file="$APACHE_CONF_DIR/${site}.conf" # Do nothing if no site config exists From 0effa1a6cb6960b0d1cf82087e6d60c066b7a5f6 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 18 Apr 2017 15:16:37 -0400 Subject: [PATCH 0464/1936] remove some unused *_PROTOCOL from export Things like SERVICE_PROTOCOL and KEYSTONE_AUTH_PROTOCOL shouldn't really be exported in openrc as they encourage using them directly to build up keystone urls instead of actually using the OS_AUTH_URL. Remove them. Change-Id: I4b7cc680f7f14dae29b706a227be540c9e212cad --- functions-common | 4 ++-- openrc | 2 -- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/functions-common b/functions-common index 90c92000b3..56dd20307f 100644 --- a/functions-common +++ b/functions-common @@ -48,8 +48,8 @@ TRACK_DEPENDS=${TRACK_DEPENDS:-False} # Save these variables to .stackenv STACK_ENV_VARS="BASE_SQL_CONN DATA_DIR DEST ENABLED_SERVICES HOST_IP \ - KEYSTONE_AUTH_PROTOCOL KEYSTONE_AUTH_URI KEYSTONE_SERVICE_URI \ - LOGFILE OS_CACERT SERVICE_HOST SERVICE_PROTOCOL STACK_USER TLS_IP \ + KEYSTONE_AUTH_URI KEYSTONE_SERVICE_URI \ + LOGFILE OS_CACERT SERVICE_HOST STACK_USER TLS_IP \ HOST_IPV6 SERVICE_IP_VERSION" diff --git a/openrc b/openrc index 4cdb50efef..828cab3c6d 100644 --- a/openrc +++ b/openrc @@ -72,8 +72,6 @@ else GLANCE_HOST=${GLANCE_HOST:-$HOST_IP} fi -SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http} - # Identity API version export OS_IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-3} From f43ea4776668e259b3053e0c1d4b7d13685f424d Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Tue, 18 Apr 2017 21:51:57 -0700 Subject: [PATCH 0465/1936] Send useful auth_port and auth_prefix to swift's test.conf Until we can test with a version of swiftclient that knows how to eat auth_uri, swift still needs a working gate. Change-Id: I09f9ad5c87b542df962a79898e06fbf1e968b1e3 Related-Change: I46294fb24e3c23fa19fcfd7d6c9ee8a932354702 Related-Change: Ie427f3b0b9eb834ff940fa5d52444a5a6cdcab15 --- lib/swift | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/lib/swift b/lib/swift index d764b25fdb..8b75fcc505 100644 --- a/lib/swift +++ b/lib/swift @@ -530,12 +530,16 @@ EOF local auth_vers auth_vers=$(iniget ${testfile} func_test auth_version) iniset ${testfile} func_test auth_host ${KEYSTONE_SERVICE_HOST} - iniset ${testfile} func_test auth_port ${KEYSTONE_AUTH_PORT} + if [[ "$KEYSTONE_AUTH_PROTOCOL" == "https" ]]; then + iniset ${testfile} func_test auth_port 443 + else + iniset ${testfile} func_test auth_port 80 + fi iniset ${testfile} func_test auth_uri ${KEYSTONE_AUTH_URI} - if [[ $auth_vers == "3" ]]; then - iniset ${testfile} func_test auth_prefix /v3/ + if [[ "$auth_vers" == "3" ]]; then + iniset ${testfile} func_test auth_prefix /identity/v3/ else - iniset ${testfile} func_test auth_prefix /v2.0/ + iniset ${testfile} func_test auth_prefix /identity/v2.0/ fi fi From 09a08aacf4235b673de948278aa3cbfdc6cbbdc0 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 19 Apr 2017 09:24:43 +0000 Subject: [PATCH 0466/1936] Updated from generate-devstack-plugins-list Change-Id: If6c07fd6b56d776a5548564b72e637f6bd3dfbfc --- doc/source/plugin-registry.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index beb6abbb9e..96a2733690 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -44,6 +44,7 @@ devstack-plugin-additional-pkg-repos `git://git.openstack.org/openstack/devsta devstack-plugin-amqp1 `git://git.openstack.org/openstack/devstack-plugin-amqp1 `__ devstack-plugin-bdd `git://git.openstack.org/openstack/devstack-plugin-bdd `__ devstack-plugin-ceph `git://git.openstack.org/openstack/devstack-plugin-ceph `__ +devstack-plugin-container `git://git.openstack.org/openstack/devstack-plugin-container `__ devstack-plugin-glusterfs `git://git.openstack.org/openstack/devstack-plugin-glusterfs `__ devstack-plugin-hdfs `git://git.openstack.org/openstack/devstack-plugin-hdfs `__ devstack-plugin-kafka `git://git.openstack.org/openstack/devstack-plugin-kafka `__ @@ -86,6 +87,7 @@ meteos-ui `git://git.openstack.org/openstack/meteos mistral `git://git.openstack.org/openstack/mistral `__ mixmatch `git://git.openstack.org/openstack/mixmatch `__ mogan `git://git.openstack.org/openstack/mogan `__ +mogan-ui `git://git.openstack.org/openstack/mogan-ui `__ monasca-analytics `git://git.openstack.org/openstack/monasca-analytics `__ monasca-api `git://git.openstack.org/openstack/monasca-api `__ monasca-ceilometer `git://git.openstack.org/openstack/monasca-ceilometer `__ From 37c7843aad745fcde7584777e8a7ec896bb154e2 Mon Sep 17 00:00:00 2001 From: Paul Belanger Date: Wed, 19 Apr 2017 13:19:21 -0400 Subject: [PATCH 0467/1936] Increase rsyslog buffer sizes. Swift proxy logs to syslog during the devstack-gate tempest runs. To better capture the swift logs increase the rsyslog buffer size to 6k bytes allowing for longer messages like tracebacks. This was setup by openstack-infra previous during our diskimage builds. I03e42964e14d9f930c07ed047851bdf775639c59 Change-Id: Iaa232335865410600c93f47d4777ed4f1bce08e2 Signed-off-by: Paul Belanger --- lib/swift | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/swift b/lib/swift index 8b75fcc505..16f398a613 100644 --- a/lib/swift +++ b/lib/swift @@ -554,6 +554,7 @@ EOF if [[ $SYSLOG != "False" ]]; then sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \ tee /etc/rsyslog.d/10-swift.conf + echo "MaxMessageSize 6k" | sudo tee /etc/rsyslog.d/99-maxsize.conf # restart syslog to take the changes sudo killall -HUP rsyslogd fi From 1b245cef7f078139f95260b080558422063f363f Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 19 Apr 2017 15:51:52 -0400 Subject: [PATCH 0468/1936] Cleanup duplicate get_or_add_user_domain_role It turns out that we ended up with duplicate versions of this function merging on top of each other within 3 days, and gerrit didn't catch it. Boo gerrit. Boo bash. Change-Id: Ic6aa2f9bafdec906de2bc51d5929beeec48a6a40 --- functions-common | 28 ---------------------------- 1 file changed, 28 deletions(-) diff --git a/functions-common b/functions-common index 90c92000b3..35b48603c5 100644 --- a/functions-common +++ b/functions-common @@ -905,34 +905,6 @@ function get_or_add_user_domain_role { echo $user_role_id } -# Gets or adds user role to domain -# Usage: get_or_add_user_domain_role -function get_or_add_user_domain_role { - local user_role_id - # Gets user role id - user_role_id=$(openstack role assignment list \ - --user $2 \ - --os-url=$KEYSTONE_SERVICE_URI_V3 \ - --os-identity-api-version=3 \ - --domain $3 \ - | grep " $1 " | get_field 1) - if [[ -z "$user_role_id" ]]; then - # Adds role to user and get it - openstack role add $1 \ - --user $2 \ - --domain $3 \ - --os-url=$KEYSTONE_SERVICE_URI_V3 \ - --os-identity-api-version=3 - user_role_id=$(openstack role assignment list \ - --user $2 \ - --os-url=$KEYSTONE_SERVICE_URI_V3 \ - --os-identity-api-version=3 \ - --domain $3 \ - | grep " $1 " | get_field 1) - fi - echo $user_role_id -} - # Gets or adds group role to project # Usage: get_or_add_group_project_role function get_or_add_group_project_role { From 9bfabc68c4ff2094ed8463b11382d6982743b768 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 20 Apr 2017 15:11:43 -0400 Subject: [PATCH 0469/1936] Make openrc more robust for grenade scenarios This makes openrc more robust for the grenade scenarios by having a sane fallback when stackrc is not found. Change-Id: I297ba519d581d2b6fb4d80d59434acace054bada --- openrc | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/openrc b/openrc index 4cdb50efef..b748055be1 100644 --- a/openrc +++ b/openrc @@ -82,7 +82,12 @@ export OS_IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-3} # the user/project has access to - including nova, glance, keystone, swift, ... # We currently recommend using the version 3 *identity api*. # -export OS_AUTH_URL=$KEYSTONE_AUTH_URI + +# If you don't have a working .stackenv, this is the backup possition +KEYSTONE_BACKUP=$SERVICE_PROTOCOL://$SERVICE_HOST:5000 +KEYSTONE_AUTH_URI=${KEYSTONE_AUTH_URI:-$KEYSTONE_BACKUP} + +export OS_AUTH_URL=${OS_AUTH_URL:-$KEYSTONE_AUTH_URI} # Currently, in order to use openstackclient with Identity API v3, # we need to set the domain which the user and project belong to. From ef1e88ec09bda30cad19589629744b59e12b4093 Mon Sep 17 00:00:00 2001 From: Andrea Frittoli Date: Thu, 20 Apr 2017 23:37:32 +0100 Subject: [PATCH 0470/1936] Enable ssh validation by default The -ssh job with ssh validation enabled has been quite stable for a while now [0] so I think it's time to add ssh validation to the integration gate to prevent regressions from lurking in. Doing this in devstack ensures that the change only affects master as we didn't test ssh validation on on stable branches. [0] http://status.openstack.org/openstack-health/#/g/build_name/gate-tempest-dsvm-neutron-full-ssh?duration=P3M Change-Id: I187e560911f5d5d482eb7959e5174068c4c9a801 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index cf7eb6f722..1640f2c21a 100644 --- a/lib/tempest +++ b/lib/tempest @@ -425,7 +425,7 @@ function configure_tempest { TEMPEST_SSH_NETWORK_NAME=$PHYSICAL_NETWORK fi # Validation - iniset $TEMPEST_CONFIG validation run_validation ${TEMPEST_RUN_VALIDATION:-False} + iniset $TEMPEST_CONFIG validation run_validation ${TEMPEST_RUN_VALIDATION:-True} iniset $TEMPEST_CONFIG validation ip_version_for_ssh 4 iniset $TEMPEST_CONFIG validation ssh_timeout $BUILD_TIMEOUT iniset $TEMPEST_CONFIG validation image_ssh_user ${DEFAULT_INSTANCE_USER:-cirros} From aa26baacb8206967d61aef74686be94e3f8c90b8 Mon Sep 17 00:00:00 2001 From: rabi Date: Thu, 20 Apr 2017 10:55:16 +0530 Subject: [PATCH 0471/1936] Create custom dir for uwsgi domain sockets On Centos, apache has a private view of /tmp and thus can't see this socket, causing keystone to fail. This happened after I46294fb24e3c23fa19fcfd7d6c9ee8a932354702. Move it to /var/run. Closes-Bug: #1684360 Change-Id: I47f091656802719c259752454ec88bf50760b967 --- lib/apache | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/apache b/lib/apache index f88f1d3504..afeac158ca 100644 --- a/lib/apache +++ b/lib/apache @@ -230,7 +230,12 @@ function write_uwsgi_config { local http=$4 local name="" name=$(basename $wsgi) - local socket="/tmp/${name}.socket" + + # create a home for the sockets; note don't use /tmp -- apache has + # a private view of it on some platforms. + local socket_dir='/var/run/uwsgi' + sudo install -d -o $STACK_USER -m 755 $socket_dir + local socket="$socket_dir/${name}.socket" # always cleanup given that we are using iniset here rm -rf $file From 26e431dbd76dff958f65871c852eb9d61d8d00d8 Mon Sep 17 00:00:00 2001 From: Thomas Herve Date: Thu, 13 Apr 2017 14:27:35 +0200 Subject: [PATCH 0472/1936] Define a new function for notifications URL This defines a new function get_notification_url, which returns the URL of RabbitMQ when you want connect to it, and uses in ceilometermiddleware. This fixes an issue when we try to use AMQP for RPC, but not for notifications. Change-Id: I14450b2440806a17a90e5ddefc243868fdbe4f2c --- lib/rpc_backend | 9 +++++++++ lib/swift | 2 +- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/lib/rpc_backend b/lib/rpc_backend index 3c1404e998..3177e88ef2 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -122,6 +122,15 @@ function get_transport_url { fi } +# Repeat the definition, in case get_transport_url is overriden for RPC purpose. +# get_notification_url can then be used to talk to rabbit for notifications. +function get_notification_url { + local virtual_host=$1 + if is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then + echo "rabbit://$RABBIT_USERID:$RABBIT_PASSWORD@$RABBIT_HOST:5672/$virtual_host" + fi +} + # iniset configuration function iniset_rpc_backend { local package=$1 diff --git a/lib/swift b/lib/swift index 96e2f03e5f..f3ff2412bd 100644 --- a/lib/swift +++ b/lib/swift @@ -419,7 +419,7 @@ function configure_swift { iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer "set log_level" "WARN" iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer paste.filter_factory "ceilometermiddleware.swift:filter_factory" iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer control_exchange "swift" - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer url $(get_transport_url) + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer url $(get_notification_url) iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer driver "messaging" iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer topic "notifications" SWIFT_EXTRAS_MIDDLEWARE_LAST="${SWIFT_EXTRAS_MIDDLEWARE_LAST} ceilometer" From f6a2d2cd4edd06408690081d6207ff73b76f543a Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 26 Apr 2017 10:50:29 +1000 Subject: [PATCH 0473/1936] Always restart apache As described in [1], it seems that mod_wsgi is not "graceful" reload safe. Upon re-init, it can end up in a segfault loop. The "reload" (not *restart*) after setting up uwsgi was added with I1d89be1f1b36f26eaf543b99bde6fdc5701474fe but not causing an issue until uwsgi was enabled. We do not notice in the gate, because the TLS setup ends up doing a restart after this setup. In the period between the write_uwsgi_config and that restart, Apache is sitting in a segfault loop, but we never noticed because we don't try talking to it. Other jobs that don't do any further apache configuration have started failing, however. Looking at the original comments around "reload_apache_server" I'm not sure if it is still necessary. [2] shows it is not used outside these two calls. [1] https://bugzilla.redhat.com/show_bug.cgi?id=1445540 [2] http://codesearch.openstack.org/?q=reload_apache_server&i=nope&files=&repos= Closes-Bug: #1686210 Change-Id: I5234bae0595efdcd30305a32bf9c121072a3625e --- lib/apache | 7 +------ lib/tls | 7 +------ 2 files changed, 2 insertions(+), 12 deletions(-) diff --git a/lib/apache b/lib/apache index afeac158ca..34ac660266 100644 --- a/lib/apache +++ b/lib/apache @@ -218,11 +218,6 @@ function restart_apache_server { restart_service $APACHE_NAME } -# reload_apache_server -function reload_apache_server { - reload_service $APACHE_NAME -} - function write_uwsgi_config { local file=$1 local wsgi=$2 @@ -267,7 +262,7 @@ function write_uwsgi_config { apache_conf=$(apache_site_config_for $name) echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}/\" retry=0 " | sudo tee $apache_conf enable_apache_site $name - reload_apache_server + restart_apache_server fi } diff --git a/lib/tls b/lib/tls index 7a7b10454e..238687c5dd 100644 --- a/lib/tls +++ b/lib/tls @@ -526,12 +526,7 @@ EOF enable_apache_mod $mod done enable_apache_site $b_service - # Only a reload is required to pull in new vhosts - # Note that a restart reliably fails on centos7 and trusty - # because apache can't open port 80 because the old apache - # still has it open. Using reload fixes trusty but centos7 - # still doesn't work. - reload_apache_server + restart_apache_server } # Follow TLS proxy From 92e6b1a0e83cbfeeb1d29bbe2b8c71e212f885fd Mon Sep 17 00:00:00 2001 From: Andreas Scheuring Date: Wed, 26 Apr 2017 13:40:42 +0200 Subject: [PATCH 0474/1936] default gateway regex: use exact match for iface name If the current interface has a default gateway configured is determined by the regex default.+ If for example 'enc1' is used, but also an interface 'enc1800' is present, the regex will also match the 'enc1800' default gateway. This patch fixes this by looking for . This way 'enc1800' is not matched. Change-Id: Id1d58f5be6296c3a37aef788359ae8fe0fe11d8b --- lib/neutron-legacy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 1dfd5fec7d..a409164386 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -570,7 +570,7 @@ function _move_neutron_addresses_route { local IP_DEL="" local IP_UP="" local DEFAULT_ROUTE_GW - DEFAULT_ROUTE_GW=$(ip -f $af r | awk "/default.+$from_intf/ { print \$3; exit }") + DEFAULT_ROUTE_GW=$(ip -f $af r | awk "/default.+$from_intf\s/ { print \$3; exit }") local ADD_OVS_PORT="" local DEL_OVS_PORT="" local ARP_CMD="" From befe0925e7cb3d3b34ee9e856c4bc822a499975f Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Thu, 16 Feb 2017 15:45:11 -0500 Subject: [PATCH 0475/1936] Differentiate between DEFAULT_IMAGE_NAME and filename The DEFAULT_IMAGE_NAME variable is used to reference the name of the default image in glance after it has been uploaded by devstack. It is used both inside and outside of devstack for that purpose. However, when configuring tempest there are some tests which also do image uploads and need a filename for specifying which file they should upload into glance for testing purposes. Previously we were just using DEFAULT_IMAGE_NAME for both purposes, but this causes a conflict if the name of the image we upload into glance does not have a file extension. So instead of conflating the things this commit differentiates between them and adds a new DEFAULT_IMAGE_FILE_NAME variable to use for this purpose. Change-Id: Icf74badcf2093d8c75db538232b10b3ac7b86eb8 --- lib/tempest | 3 ++- stackrc | 12 ++++++++---- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/lib/tempest b/lib/tempest index 128e9728fe..69a2500a5d 100644 --- a/lib/tempest +++ b/lib/tempest @@ -11,6 +11,7 @@ # - ``DEST``, ``FILES`` # - ``ADMIN_PASSWORD`` # - ``DEFAULT_IMAGE_NAME`` +# - ``DEFAULT_IMAGE_FILE_NAME`` # - ``S3_SERVICE_PORT`` # - ``SERVICE_HOST`` # - ``BASE_SQL_CONN`` ``lib/database`` declares @@ -412,7 +413,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG scenario img_container_format ovf else SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES} - SCENARIO_IMAGE_FILE=$DEFAULT_IMAGE_NAME + SCENARIO_IMAGE_FILE=$DEFAULT_IMAGE_FILE_NAME fi iniset $TEMPEST_CONFIG scenario img_dir $SCENARIO_IMAGE_DIR iniset $TEMPEST_CONFIG scenario img_file $SCENARIO_IMAGE_FILE diff --git a/stackrc b/stackrc index 97819d712b..399e45ee28 100644 --- a/stackrc +++ b/stackrc @@ -640,17 +640,21 @@ if [[ "$DOWNLOAD_DEFAULT_IMAGES" == "True" ]]; then case "$LIBVIRT_TYPE" in lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs} - IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz";; + DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz} + IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; *) # otherwise, use the qcow image - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img} - IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img";; + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk} + DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img} + IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; esac ;; vsphere) DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.2-i386-disk.vmdk} - IMAGE_URLS+="http://partnerweb.vmware.com/programs/vmdkimage/cirros-0.3.2-i386-disk.vmdk";; + DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-$DEFAULT_IMAGE_NAME} + IMAGE_URLS+="http://partnerweb.vmware.com/programs/vmdkimage/${DEFAULT_IMAGE_FILE_NAME}";; xenserver) DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.4-x86_64-disk} + DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.4-x86_64-disk.vhd.tgz} IMAGE_URLS+="http://ca.downloads.xensource.com/OpenStack/cirros-0.3.4-x86_64-disk.vhd.tgz" IMAGE_URLS+=",http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz";; esac From 71d20e6582e07a4567abc785e7237872574b6ac8 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 21 Apr 2017 11:48:12 +1000 Subject: [PATCH 0476/1936] Always disable use of libvirt wheel with UCA It's not only using our upstream caches that you might get an old libvirt-python wheel that is incompatible with UCA. Move the ignore out of the mirror check to apply it globally. This is an alternative to Iba301a8c80c9ed584f5fb5a816f3d2cf5f5f0e77 Change-Id: I588b1e8e49aa60f3ce976dc1b6c8013ba1d88079 --- tools/fixup_stuff.sh | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index f3ba702a3b..d07d2675c6 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -82,15 +82,17 @@ if [[ "$DISTRO" = "xenial" ]]; then source /etc/ci/mirror_info.sh sudo apt-add-repository -y "deb $NODEPOOL_UCA_MIRROR xenial-updates/ocata main" - - # Disable use of libvirt wheel here as presence of mirror implies - # presence of cached wheel build against older libvirt binary. - # TODO(clarkb) figure out how to use wheel again. - sudo bash -c 'echo "no-binary = libvirt-python" >> /etc/pip.conf' else # Otherwise use upstream UCA sudo add-apt-repository -y cloud-archive:ocata fi + + # Disable use of libvirt wheel since a cached wheel build might be + # against older libvirt binary. Particularly a problem if using + # the openstack wheel mirrors, but can hit locally too. + # TODO(clarkb) figure out how to use upstream wheel again. + iniset -sudo /etc/pip.conf "global" "no-binary" "libvirt-python" + # Force update our APT repos, since we added UCA above. REPOS_UPDATED=False apt_get_update From a881b887211a6bc9ce24a9b42400dffe88abdb67 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 19 Apr 2017 15:42:34 +1000 Subject: [PATCH 0477/1936] Switch to lioadm for centos cinder Centos/RHEL 7 doesn't support tgtd. While the packages are still in EPEL, there's no point in testing because nobody runs like this. Switch cinder to use lioadm which uses LIO, and update package installations. Depends-On: I964917d13d9415223845ac17eb804ee7faceaf6f Change-Id: Idc5a4e856bfc93e9dc650d565a98a8e9b3df3481 --- files/rpms/cinder | 3 ++- lib/cinder | 21 ++++++++++++++------- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/files/rpms/cinder b/files/rpms/cinder index 0274642fd6..2c7b45baaf 100644 --- a/files/rpms/cinder +++ b/files/rpms/cinder @@ -1,4 +1,5 @@ iscsi-initiator-utils lvm2 qemu-img -scsi-target-utils # NOPRIME +scsi-target-utils # not:rhel7,f24,f25 NOPRIME +targetcli # dist:rhel7,f24,f25 NOPRIME \ No newline at end of file diff --git a/lib/cinder b/lib/cinder index 9fc25c75bb..3c182ccb26 100644 --- a/lib/cinder +++ b/lib/cinder @@ -109,7 +109,16 @@ CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]') # https://bugs.launchpad.net/cinder/+bug/1180976 CINDER_PERIODIC_INTERVAL=${CINDER_PERIODIC_INTERVAL:-60} -CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-tgtadm} +# Centos7 switched to using LIO and that's all that's supported, +# although the tgt bits are in EPEL we don't want that for CI +if is_fedora; then + CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-lioadm} + if [[ ${CINDER_ISCSI_HELPER} != "lioadm" ]]; then + die "lioadm is the only valid Cinder iscsi_helper config on this platform" + fi +else + CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-tgtadm} +fi # Toggle for deploying Cinder under HTTPD + mod_wsgi CINDER_USE_MOD_WSGI=${CINDER_USE_MOD_WSGI:-False} @@ -443,12 +452,10 @@ function init_cinder { function install_cinder { git_clone $CINDER_REPO $CINDER_DIR $CINDER_BRANCH setup_develop $CINDER_DIR - if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then - if is_fedora; then - install_package scsi-target-utils - else - install_package tgt - fi + if [[ "$CINDER_ISCSI_HELPER" == "tgtadm" ]]; then + install_package tgt + elif [[ "$CINDER_ISCI_HELPER" == "lioadm" ]]; then + install_package targetcli fi if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then From bc6c992e3c5cf7ae48e64203cc83cb7665c5050e Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 27 Apr 2017 16:07:51 +1000 Subject: [PATCH 0478/1936] Remove fping requirement The os-fping API was deprecated in nova in I92064cbcb5f6414da0c9d294f912a860428af698. I can't see anything obviously using it on codesearch. This is only in EPEL for centos, which I'm trying to remove. But I think less dependencies is always better than more in general hence the removal. This is essentially a revert of Ibdc7479a9038321e4fc3953774a6f3e1dac90530 Change-Id: I163fc48c860bae2a92c83cfdaed26b2e54630e20 --- files/debs/n-api | 1 - files/rpms-suse/n-api | 1 - files/rpms/n-api | 1 - 3 files changed, 3 deletions(-) delete mode 100644 files/debs/n-api delete mode 100644 files/rpms/n-api diff --git a/files/debs/n-api b/files/debs/n-api deleted file mode 100644 index 0928cd56b9..0000000000 --- a/files/debs/n-api +++ /dev/null @@ -1 +0,0 @@ -fping diff --git a/files/rpms-suse/n-api b/files/rpms-suse/n-api index af5ac2fc54..0f08daace3 100644 --- a/files/rpms-suse/n-api +++ b/files/rpms-suse/n-api @@ -1,2 +1 @@ -fping python-dateutil diff --git a/files/rpms/n-api b/files/rpms/n-api deleted file mode 100644 index 0928cd56b9..0000000000 --- a/files/rpms/n-api +++ /dev/null @@ -1 +0,0 @@ -fping From faffde1f970c0786d0256e4d51725fbe2ceda063 Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Thu, 27 Apr 2017 09:54:27 -0700 Subject: [PATCH 0479/1936] Use string cert CA defaults Switch from sha1 to sha256 and from 1024 bits to 2048 bits. Do this because things don't like the old inseucre sha1+1024bits combo. Change-Id: Iae2958969aed0cd880844e19e8055c8bdc7d064d --- lib/tls | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/tls b/lib/tls index 238687c5dd..7c6b967bc4 100644 --- a/lib/tls +++ b/lib/tls @@ -113,11 +113,11 @@ new_certs_dir = \$dir/newcerts certificate = \$dir/cacert.pem private_key = \$dir/private/cacert.key RANDFILE = \$dir/private/.rand -default_md = default +default_md = sha256 [ req ] -default_bits = 1024 -default_md = sha1 +default_bits = 2048 +default_md = sha256 prompt = no distinguished_name = ca_distinguished_name From e43f60ba2a3a227918e5fdb652a0adc2643f618d Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 27 Apr 2017 18:27:36 -0500 Subject: [PATCH 0480/1936] Use the proper keystone endpoints in clouds.yaml KEYSTONE_SERVICE_API is the keystone endpoint and it is what we should use. The admin url should DIAF - but until it does, it CERTAINLY should not be the thing we put into clouds.yaml. Change-Id: If8196a04f852f633e0b7548793f68c92376aa6da --- functions-common | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/functions-common b/functions-common index 35b48603c5..be34bd6ee5 100644 --- a/functions-common +++ b/functions-common @@ -93,7 +93,7 @@ function write_clouds_yaml { --os-region-name $REGION_NAME \ --os-identity-api-version 3 \ $CA_CERT_ARG \ - --os-auth-url $KEYSTONE_AUTH_URI \ + --os-auth-url $KEYSTONE_SERVICE_URI \ --os-username demo \ --os-password $ADMIN_PASSWORD \ --os-project-name demo @@ -105,7 +105,7 @@ function write_clouds_yaml { --os-region-name $REGION_NAME \ --os-identity-api-version 3 \ $CA_CERT_ARG \ - --os-auth-url $KEYSTONE_AUTH_URI \ + --os-auth-url $KEYSTONE_SERVICE_URI \ --os-username alt_demo \ --os-password $ADMIN_PASSWORD \ --os-project-name alt_demo @@ -117,7 +117,7 @@ function write_clouds_yaml { --os-region-name $REGION_NAME \ --os-identity-api-version 3 \ $CA_CERT_ARG \ - --os-auth-url $KEYSTONE_AUTH_URI \ + --os-auth-url $KEYSTONE_SERVICE_URI \ --os-username admin \ --os-password $ADMIN_PASSWORD \ --os-project-name admin From c006bbdeb26df2c60f43d222bdf918f9e24d551f Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 26 Apr 2017 06:57:58 -0400 Subject: [PATCH 0481/1936] make USE_SCREEN=False imply USE_SYSTEMD=True In order to start making the transition in the gate make USE_SCREEN=False also mean USE_SYSTEMD=True. We'll never actually declare USE_SYSTEMD=True in the gate (as that doesn't exist for stable branches), but this will let us roll over the existing transition. We also have to install systemd-python 234 because we are recording exception info in the journal, and all versions before that had a bug in processing that. Remove the somewhat pointless screen following journalctl commands. We really don't want or need those, and they tend to build up over time. Depends-On: I24513f5cbac2c34cf0130bf812ff2df6ad76657c Change-Id: I6af6d1857effaf662a9d72bd394864934eacbe70 --- files/debs/general | 3 +-- files/rpms/general | 2 +- functions-common | 22 ++++------------------ stack.sh | 3 +++ stackrc | 9 +++++++++ 5 files changed, 18 insertions(+), 21 deletions(-) diff --git a/files/debs/general b/files/debs/general index 20490c6072..1dde03b7fe 100644 --- a/files/debs/general +++ b/files/debs/general @@ -17,6 +17,7 @@ libjpeg-dev # Pillow 3.0.0 libmysqlclient-dev # MySQL-python libpq-dev # psycopg2 libssl-dev # for pyOpenSSL +libsystemd-dev # for systemd-python libxml2-dev # lxml libxslt1-dev # lxml libyaml-dev @@ -26,10 +27,8 @@ openssl pkg-config psmisc python2.7 -python3-systemd python-dev python-gdbm # needed for testr -python-systemd screen tar tcpdump diff --git a/files/rpms/general b/files/rpms/general index 106aa6ae88..1393d18328 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -29,7 +29,7 @@ pyOpenSSL # version in pip uses too much memory python-devel redhat-rpm-config # missing dep for gcc hardening flags, see rhbz#1217376 screen -systemd-python +systemd-devel # for systemd-python tar tcpdump unzip diff --git a/functions-common b/functions-common index 35b48603c5..65c38a58eb 100644 --- a/functions-common +++ b/functions-common @@ -1495,22 +1495,6 @@ function _run_under_systemd { $SYSTEMCTL enable $systemd_service $SYSTEMCTL start $systemd_service - _journal_log $service $systemd_service -} - -function _journal_log { - local service=$1 - local unit=$2 - local logfile="${service}.log.${CURRENT_LOG_TIME}" - local real_logfile="${LOGDIR}/${logfile}" - if [[ -n ${LOGDIR} ]]; then - $JOURNALCTL_F $2 > "$real_logfile" & - bash -c "cd '$LOGDIR' && ln -sf '$logfile' ${service}.log" - if [[ -n ${SCREEN_LOGDIR} ]]; then - # Drop the backward-compat symlink - ln -sf "$real_logfile" ${SCREEN_LOGDIR}/screen-${service}.log - fi - fi } # Helper to remove the ``*.failure`` files under ``$SERVICE_DIR/$SCREEN_NAME``. @@ -1700,8 +1684,10 @@ function stop_process { # Only do this for units which appear enabled, this also # catches units that don't really exist for cases like # keystone without a failure. - $SYSTEMCTL stop devstack@$service.service - $SYSTEMCTL disable devstack@$service.service + if $SYSTEMCTL is-enabled devstack@$service.service; then + $SYSTEMCTL stop devstack@$service.service + $SYSTEMCTL disable devstack@$service.service + fi fi if [[ -r $SERVICE_DIR/$SCREEN_NAME/$service.pid ]]; then diff --git a/stack.sh b/stack.sh index 31ea2e1112..8864a18d4f 100755 --- a/stack.sh +++ b/stack.sh @@ -746,6 +746,9 @@ fi # Do the ugly hacks for broken packages and distros source $TOP_DIR/tools/fixup_stuff.sh +if [[ "$USE_SYSTEMD" == "True" ]]; then + pip_install_gr systemd-python +fi # Virtual Environment # ------------------- diff --git a/stackrc b/stackrc index ed1cf6e074..6ff9585984 100644 --- a/stackrc +++ b/stackrc @@ -157,6 +157,15 @@ elif [[ -f $RC_DIR/.localrc.auto ]]; then source $RC_DIR/.localrc.auto fi +# if we are forcing off USE_SCREEN (as we do in the gate), force on +# systemd. This allows us to drop one of 3 paths through the code. +if [[ "$USE_SCREEN" == "False" ]]; then + # Remove in Pike: this gets us through grenade upgrade + if [[ "$GRENADE_PHASE" != "target" ]]; then + USE_SYSTEMD="True" + fi +fi + # Default for log coloring is based on interactive-or-not. # Baseline assumption is that non-interactive invocations are for CI, # where logs are to be presented as browsable text files; hence color From fa898f5174e3aff9c65366024c170ce3cdda306d Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Tue, 25 Apr 2017 01:30:10 -0400 Subject: [PATCH 0482/1936] Add external lock dir to glance api config As we move to enabling glance-api to use a wsgi script that might be run as multiple processes, there are a couple places where external synchronization is necessary. To use this we need to set the lock_path config option from oslo.concurrency so external locks will work. Change-Id: I9a66a8636d12037ff9aa4fb73cc3f9b9343dd7e9 --- lib/glance | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/glance b/lib/glance index 23a1cbf2c7..57f07f5aba 100644 --- a/lib/glance +++ b/lib/glance @@ -43,6 +43,7 @@ fi GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache} GLANCE_IMAGE_DIR=${GLANCE_IMAGE_DIR:=$DATA_DIR/glance/images} +GLANCE_LOCK_DIR=${GLANCE_LOCK_DIR:=$DATA_DIR/glance/locks} GLANCE_AUTH_CACHE_DIR=${GLANCE_AUTH_CACHE_DIR:-/var/cache/glance} GLANCE_CONF_DIR=${GLANCE_CONF_DIR:-/etc/glance} @@ -116,6 +117,7 @@ function configure_glance { iniset $GLANCE_API_CONF database connection $dburl iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG iniset $GLANCE_API_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ + iniset $GLANCE_API_CONF DEFAULT lock_path $GLANCE_LOCK_DIR iniset $GLANCE_API_CONF paste_deploy flavor keystone+cachemanagement configure_auth_token_middleware $GLANCE_API_CONF glance $GLANCE_AUTH_CACHE_DIR/api iniset $GLANCE_API_CONF oslo_messaging_notifications driver messagingv2 From 38d4782c9f6b8558fea2c5268b9bf5de5070a0fe Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 19 Apr 2017 16:12:00 -0400 Subject: [PATCH 0483/1936] remove auth_uri from keystonemiddleware config auth_uri is not a keystonemiddleware option, and it's use in config files is confusing at best. Remove it for clarity. Change-Id: Ie3a9ab30d81809363444d5f3b41588b3889dc185 --- lib/keystone | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/keystone b/lib/keystone index a26ef8afd2..75a06791de 100644 --- a/lib/keystone +++ b/lib/keystone @@ -434,14 +434,13 @@ function configure_auth_token_middleware { local section=${4:-keystone_authtoken} iniset $conf_file $section auth_type password - iniset $conf_file $section auth_url $KEYSTONE_AUTH_URI + iniset $conf_file $section auth_url $KEYSTONE_SERVICE_URI iniset $conf_file $section username $admin_user iniset $conf_file $section password $SERVICE_PASSWORD iniset $conf_file $section user_domain_name "$SERVICE_DOMAIN_NAME" iniset $conf_file $section project_name $SERVICE_PROJECT_NAME iniset $conf_file $section project_domain_name "$SERVICE_DOMAIN_NAME" - iniset $conf_file $section auth_uri $KEYSTONE_SERVICE_URI iniset $conf_file $section cafile $SSL_BUNDLE_FILE iniset $conf_file $section signing_dir $signing_dir iniset $conf_file $section memcached_servers $SERVICE_HOST:11211 From bb4431126b80be4805f1072442676797e6019576 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 19 Apr 2017 16:22:42 -0400 Subject: [PATCH 0484/1936] Try to remove /identity_admin We should be able to operate without the identity admin endpoint, given that in v3 it's all the same. This floats that out there to see if we can or not. Change-Id: Ic233f6b43dd1e3cfdadff0f18aba4ea78825a996 --- lib/keystone | 14 ++++++-------- lib/tempest | 4 ++++ 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/lib/keystone b/lib/keystone index 75a06791de..fd1e29ce28 100644 --- a/lib/keystone +++ b/lib/keystone @@ -113,8 +113,9 @@ if is_service_enabled tls-proxy; then KEYSTONE_SERVICE_PROTOCOL="https" fi -KEYSTONE_AUTH_URI=${KEYSTONE_AUTH_PROTOCOL}://${KEYSTONE_AUTH_HOST}/identity_admin KEYSTONE_SERVICE_URI=${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}/identity +# for compat +KEYSTONE_AUTH_URI=$KEYSTONE_SERVICE_URI # V3 URIs KEYSTONE_AUTH_URI_V3=$KEYSTONE_AUTH_URI/v3 @@ -141,6 +142,7 @@ function is_keystone_enabled { # runs that a clean run would need to clean up function cleanup_keystone { if [[ "$WSGI_MODE" == "uwsgi" ]]; then + # TODO: remove admin at pike-2 remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" remove_uwsgi_config "$KEYSTONE_ADMIN_UWSGI_CONF" "$KEYSTONE_ADMIN_UWSGI" sudo rm -f $(apache_site_config_for keystone-wsgi-public) @@ -543,11 +545,7 @@ function start_keystone { tail_log key /var/log/$APACHE_NAME/keystone.log tail_log key-access /var/log/$APACHE_NAME/keystone_access.log else # uwsgi - # TODO(sdague): we should really get down to a single keystone here - enable_service key-p - enable_service key-a - run_process key-p "$KEYSTONE_BIN_DIR/uwsgi --ini $KEYSTONE_PUBLIC_UWSGI_CONF" "" - run_process key-a "$KEYSTONE_BIN_DIR/uwsgi --ini $KEYSTONE_ADMIN_UWSGI_CONF" "" + run_process keystone "$KEYSTONE_BIN_DIR/uwsgi --ini $KEYSTONE_PUBLIC_UWSGI_CONF" "" fi echo "Waiting for keystone to start..." @@ -578,9 +576,9 @@ function stop_keystone { disable_apache_site keystone restart_apache_server else - stop_process key-p - stop_process key-a + stop_process keystone remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" + # TODO(remove in at pike-2) remove_uwsgi_config "$KEYSTONE_ADMIN_UWSGI_CONF" "$KEYSTONE_ADMIN_UWSGI" fi # Kill the Keystone screen window diff --git a/lib/tempest b/lib/tempest index f19686a862..c5853c9a83 100644 --- a/lib/tempest +++ b/lib/tempest @@ -278,6 +278,10 @@ function configure_tempest { iniset $TEMPEST_CONFIG identity-feature-enabled api_v2 False fi iniset $TEMPEST_CONFIG identity auth_version ${TEMPEST_AUTH_VERSION:-v3} + if [[ "$TEMPEST_AUTH_VERSION" != "v2.0" ]]; then + # we're going to disable v2 admin unless we're using v2.0 by default. + iniset $TEMPEST_CONFIG identity-feature-enabled api_v2_admin False + fi if is_service_enabled tls-proxy; then iniset $TEMPEST_CONFIG identity ca_certificates_file $SSL_BUNDLE_FILE From c13b8a1f33a3ea78388794896a3e7f3256c89ec9 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 20 Apr 2017 06:54:51 -0400 Subject: [PATCH 0485/1936] try to use unversioned keystone endpoints everywhere Change-Id: Iad2a3654d8ba181a7ad452d8aba872a8313d4ece --- lib/glance | 2 +- lib/neutron | 4 ++-- lib/neutron-legacy | 2 +- lib/nova_plugins/hypervisor-ironic | 2 +- lib/placement | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/glance b/lib/glance index 57f07f5aba..d6438a6b48 100644 --- a/lib/glance +++ b/lib/glance @@ -207,7 +207,7 @@ function configure_glance { iniset $GLANCE_CACHE_CONF DEFAULT use_syslog $SYSLOG iniset $GLANCE_CACHE_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_url - iniset $GLANCE_CACHE_CONF DEFAULT auth_url $KEYSTONE_AUTH_URI/v3 + iniset $GLANCE_CACHE_CONF DEFAULT auth_url $KEYSTONE_AUTH_URI iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_tenant_name iniset $GLANCE_CACHE_CONF DEFAULT admin_tenant_name $SERVICE_PROJECT_NAME iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_user diff --git a/lib/neutron b/lib/neutron index 492a0ee8fb..c7f62ed4a9 100644 --- a/lib/neutron +++ b/lib/neutron @@ -226,7 +226,7 @@ function configure_neutron_new { iniset $NEUTRON_META_CONF agent root_helper_daemon "$NEUTRON_ROOTWRAP_DAEMON_CMD" # TODO(dtroyer): remove the v2.0 hard code below - iniset $NEUTRON_META_CONF DEFAULT auth_url $KEYSTONE_SERVICE_URI/v2.0 + iniset $NEUTRON_META_CONF DEFAULT auth_url $KEYSTONE_SERVICE_URI configure_auth_token_middleware $NEUTRON_META_CONF neutron $NEUTRON_AUTH_CACHE_DIR DEFAULT fi @@ -284,7 +284,7 @@ function configure_neutron_rootwrap { function configure_neutron_nova_new { iniset $NOVA_CONF DEFAULT use_neutron True iniset $NOVA_CONF neutron auth_type "password" - iniset $NOVA_CONF neutron auth_url "$KEYSTONE_SERVICE_URI/v3" + iniset $NOVA_CONF neutron auth_url "$KEYSTONE_SERVICE_URI" iniset $NOVA_CONF neutron username neutron iniset $NOVA_CONF neutron password "$SERVICE_PASSWORD" iniset $NOVA_CONF neutron user_domain_name "Default" diff --git a/lib/neutron-legacy b/lib/neutron-legacy index af9a3d25b9..fa508f8180 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -368,7 +368,7 @@ function configure_mutnauq { function create_nova_conf_neutron { iniset $NOVA_CONF DEFAULT use_neutron True iniset $NOVA_CONF neutron auth_type "password" - iniset $NOVA_CONF neutron auth_url "$KEYSTONE_AUTH_URI/v3" + iniset $NOVA_CONF neutron auth_url "$KEYSTONE_AUTH_URI" iniset $NOVA_CONF neutron username "$Q_ADMIN_USERNAME" iniset $NOVA_CONF neutron password "$SERVICE_PASSWORD" iniset $NOVA_CONF neutron user_domain_name "$SERVICE_DOMAIN_NAME" diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic index c9544fe6c7..7d47ef070c 100644 --- a/lib/nova_plugins/hypervisor-ironic +++ b/lib/nova_plugins/hypervisor-ironic @@ -49,7 +49,7 @@ function configure_nova_hypervisor { iniset $NOVA_CONF ironic auth_type password iniset $NOVA_CONF ironic username admin iniset $NOVA_CONF ironic password $ADMIN_PASSWORD - iniset $NOVA_CONF ironic auth_url $KEYSTONE_AUTH_URI/v3 + iniset $NOVA_CONF ironic auth_url $KEYSTONE_AUTH_URI iniset $NOVA_CONF ironic project_domain_id default iniset $NOVA_CONF ironic user_domain_id default iniset $NOVA_CONF ironic project_name demo diff --git a/lib/placement b/lib/placement index 4755a58bf2..2c51ed69b4 100644 --- a/lib/placement +++ b/lib/placement @@ -102,7 +102,7 @@ function _config_placement_apache_wsgi { function configure_placement_nova_compute { iniset $NOVA_CONF placement auth_type "password" - iniset $NOVA_CONF placement auth_url "$KEYSTONE_SERVICE_URI/v3" + iniset $NOVA_CONF placement auth_url "$KEYSTONE_SERVICE_URI" iniset $NOVA_CONF placement username placement iniset $NOVA_CONF placement password "$SERVICE_PASSWORD" iniset $NOVA_CONF placement user_domain_name "$SERVICE_DOMAIN_NAME" From 803acffcf9254e328426ad67380a99f4f5b164ec Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 1 May 2017 10:52:38 -0400 Subject: [PATCH 0486/1936] Make ./clean.sh work in more situations When transitioning between different wsgi modes, or service modes, we should really safely stop and cleanup things that are started in any service mode, which makes it easier to ensure that we don't leave things around from past runs. Change-Id: I33acbee39e1a2da2bfd79a5dd54b84a12a778be1 --- functions-common | 15 ++++++--------- lib/keystone | 20 ++++++++++---------- lib/placement | 1 + 3 files changed, 17 insertions(+), 19 deletions(-) diff --git a/functions-common b/functions-common index 65c38a58eb..90f540017c 100644 --- a/functions-common +++ b/functions-common @@ -1679,15 +1679,12 @@ function stop_process { SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} if is_service_enabled $service; then - # Kill via pid if we have one available - if [[ "$USE_SYSTEMD" == "True" ]]; then - # Only do this for units which appear enabled, this also - # catches units that don't really exist for cases like - # keystone without a failure. - if $SYSTEMCTL is-enabled devstack@$service.service; then - $SYSTEMCTL stop devstack@$service.service - $SYSTEMCTL disable devstack@$service.service - fi + # Only do this for units which appear enabled, this also + # catches units that don't really exist for cases like + # keystone without a failure. + if $SYSTEMCTL is-enabled devstack@$service.service; then + $SYSTEMCTL stop devstack@$service.service + $SYSTEMCTL disable devstack@$service.service fi if [[ -r $SERVICE_DIR/$SCREEN_NAME/$service.pid ]]; then diff --git a/lib/keystone b/lib/keystone index fd1e29ce28..61b0a0c287 100644 --- a/lib/keystone +++ b/lib/keystone @@ -141,16 +141,16 @@ function is_keystone_enabled { # cleanup_keystone() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_keystone { - if [[ "$WSGI_MODE" == "uwsgi" ]]; then - # TODO: remove admin at pike-2 - remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" - remove_uwsgi_config "$KEYSTONE_ADMIN_UWSGI_CONF" "$KEYSTONE_ADMIN_UWSGI" - sudo rm -f $(apache_site_config_for keystone-wsgi-public) - sudo rm -f $(apache_site_config_for keystone-wsgi-admin) - else - disable_apache_site keystone - sudo rm -f $(apache_site_config_for keystone) - fi + # TODO: remove admin at pike-2 + # These files will be created if we are running WSGI_MODE="uwsgi" + remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" + remove_uwsgi_config "$KEYSTONE_ADMIN_UWSGI_CONF" "$KEYSTONE_ADMIN_UWSGI" + sudo rm -f $(apache_site_config_for keystone-wsgi-public) + sudo rm -f $(apache_site_config_for keystone-wsgi-admin) + + # These files will be created if we are running WSGI_MODE="mod_wsgi" + disable_apache_site keystone + sudo rm -f $(apache_site_config_for keystone) } # _config_keystone_apache_wsgi() - Set WSGI config files of Keystone diff --git a/lib/placement b/lib/placement index 2c51ed69b4..ad12824a38 100644 --- a/lib/placement +++ b/lib/placement @@ -69,6 +69,7 @@ function is_placement_enabled { # cleanup_placement() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_placement { + sudo rm -f $(apache_site_config_for nova-placement-api) sudo rm -f $(apache_site_config_for placement-api) } From 8b8441f3becbae2e704932569bff384dcc5c6713 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 2 May 2017 06:14:11 -0400 Subject: [PATCH 0487/1936] Default developer use case to systemd This moves the developer use case over to systemd, and updates all the relevant docs to discuss the systemd workflow instead of screen. It does so by defaulting USE_SCREEN=False, so will not impact people that set it explicitly. Change-Id: I6d664612bc2b850eb7f56852afbc841867223ab7 --- doc/source/configuration.rst | 42 +++----------- doc/source/development.rst | 81 ++++++++++----------------- doc/source/faq.rst | 5 +- doc/source/site-map.rst | 1 + SYSTEMD.rst => doc/source/systemd.rst | 36 +++--------- stack.sh | 7 +++ stackrc | 16 ++++-- 7 files changed, 68 insertions(+), 120 deletions(-) rename SYSTEMD.rst => doc/source/systemd.rst (85%) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 53ae82ff8a..318e044909 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -278,43 +278,22 @@ number of days of old log files to keep. LOGDAYS=1 -The some of the project logs (Nova, Cinder, etc) will be colorized by -default (if ``SYSLOG`` is not set below); this can be turned off by -setting ``LOG_COLOR`` to ``False``. - - :: +Some coloring is used during the DevStack runs to make it easier to +see what is going on. This can be disabled with:: LOG_COLOR=False Logging the Service Output ~~~~~~~~~~~~~~~~~~~~~~~~~~ -DevStack will log the ``stdout`` output of the services it starts. -When using ``screen`` this logs the output in the screen windows to a -file. Without ``screen`` this simply redirects stdout of the service -process to a file in ``LOGDIR``. - - :: - - LOGDIR=$DEST/logs +By default, services run under ``systemd`` and are natively logging to +the systemd journal. -Note the use of ``DEST`` to locate the main install directory; this -is why we suggest setting it in ``local.conf``. - -Enabling Syslog -~~~~~~~~~~~~~~~ - -Logging all services to a single syslog can be convenient. Enable -syslogging by setting ``SYSLOG`` to ``True``. If the destination log -host is not localhost ``SYSLOG_HOST`` and ``SYSLOG_PORT`` can be used -to direct the message stream to the log host. - - :: +To query the logs use the ``journalctl`` command, such as:: - SYSLOG=True - SYSLOG_HOST=$HOST_IP - SYSLOG_PORT=516 + journalctl --unit devstack@* +More examples can be found in :ref:`journalctl-examples`. Example Logging Configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -326,7 +305,6 @@ a file, keep service logs and disable color in the stored files. [[local|localrc]] DEST=/opt/stack/ - LOGDIR=$DEST/logs LOGFILE=$LOGDIR/stack.sh.log LOG_COLOR=False @@ -587,9 +565,7 @@ Swift Swift is disabled by default. When enabled, it is configured with only one replica to avoid being IO/memory intensive on a small -VM. When running with only one replica the account, container and -object services will run directly in screen. The others services like -replicator, updaters or auditor runs in background. +VM. If you would like to enable Swift you can add this to your ``localrc`` section: @@ -630,7 +606,7 @@ install the swift3 middleware emulation. Swift will be configured to act as a S3 endpoint for Keystone so effectively replacing the ``nova-objectstore``. -Only Swift proxy server is launched in the screen session all other +Only Swift proxy server is launched in the systemd system all other services are started in background and managed by ``swift-init`` tool. Heat diff --git a/doc/source/development.rst b/doc/source/development.rst index 776ac6cb64..a3f2747604 100644 --- a/doc/source/development.rst +++ b/doc/source/development.rst @@ -8,56 +8,33 @@ with it? Inspecting Services =================== -By default most services in DevStack are running in a `screen -`_ -session. +By default most services in DevStack are running as `systemd` units +named `devstack@$servicename.service`. You can see running services +with. .. code-block:: bash - os3:~> screen -list - There is a screen on: - 28994.stack (08/10/2016 09:01:33 PM) (Detached) - 1 Socket in /var/run/screen/S-sdague. + sudo systemctl status --unit="devstack@*" -You can attach to this screen session using ``screen -r`` which gives -you a view of the services in action. - -.. image:: assets/images/screen_session_1.png - :width: 100% - -Basic Screen Commands ---------------------- - -The following minimal commands will be useful to using screen: - -* ``ctrl-a n`` - go to next window. Next is assumed to be right of - current window. -* ``ctrl-a p`` - go to previous window. Previous is assumed to be left - of current window. -* ``ctrl-a [`` - entry copy/scrollback mode. This allows you to - navigate back through the logs with the up arrow. -* ``ctrl-a d`` - detach from screen. Gets you back to a normal - terminal, while leaving everything running. - -For more about using screen, see the excellent `screen manual -`_. +To learn more about the basics of systemd, see :doc:`/systemd` Patching a Service ================== If you want to make a quick change to a running service the easiest -way to do this is: +way to do that is to change the code directly in /opt/stack/$service +and then restart the affected daemons. + +.. code-block:: bash + + sudo systemctl restart --unit=devstack@n-cpu.service + +If your change impacts more than one daemon you can restart by +wildcard as well. -* attach to screen -* navigate to the window in question -* ``ctrl-c`` to kill the service -* make appropriate changes to the code -* ``up arrow`` in the screen window to display the command used to run - that service -* ``enter`` to restart the service +.. code-block:: bash -This works for services, except those running under Apache (currently -just ``keystone`` by default). + sudo systemctl restart --unit="devstack@n-*" .. warning:: @@ -102,14 +79,6 @@ in gerrit by using the ref name that gerrit assigns to each change. NOVA_BRANCH=refs/changes/10/353710/1 -Testing Changes to Apache Based Services -======================================== - -When testing changes to Apache based services, such as ``keystone``, -you can either use the Testing a Patch Series approach above, or make -changes in the code tree and issue an apache restart. - - Testing Changes to Libraries ============================ @@ -132,9 +101,17 @@ your changes instead of just upstream master. OSLOPOLICY_REPO=/home/sdague/oslo.policy OSLOPOLICY_BRANCH=better_exception -Because libraries are used by many services, library changes really -need to go through a full ``./unstack.sh && ./stack.sh`` to see your -changes in action. +As libraries are not installed `editable` by pip, after you make any +local changes you will need to: + +* cd to top of library path +* sudo pip install -U . +* restart all services you want to use the new library + +You can do that with wildcards such as + +.. code-block:: bash + + sudo systemctl restart --unit="devstack@n-*" -To figure out the repo / branch names for every library that's -supported, you'll need to read the devstack source. +which will restart all nova services. diff --git a/doc/source/faq.rst b/doc/source/faq.rst index f03304f37f..cb2f3281f6 100644 --- a/doc/source/faq.rst +++ b/doc/source/faq.rst @@ -41,8 +41,9 @@ Why not use packages? ~~~~~~~~~~~~~~~~~~~~~ Unlike packages, DevStack leaves your cloud ready to develop - -checkouts of the code and services running in screen. However, many -people are doing the hard work of packaging and recipes for production +checkouts of the code and services running locally under systemd, +making it easy to hack on and test new patches. However, many people +are doing the hard work of packaging and recipes for production deployments. Why isn't $MY\_FAVORITE\_DISTRO supported? diff --git a/doc/source/site-map.rst b/doc/source/site-map.rst index 801fc66b80..022cc73959 100644 --- a/doc/source/site-map.rst +++ b/doc/source/site-map.rst @@ -21,3 +21,4 @@ development hacking guides + systemd diff --git a/SYSTEMD.rst b/doc/source/systemd.rst similarity index 85% rename from SYSTEMD.rst rename to doc/source/systemd.rst index 729fdf47b6..efe79e4b0c 100644 --- a/SYSTEMD.rst +++ b/doc/source/systemd.rst @@ -2,12 +2,7 @@ Using Systemd in DevStack =========================== -.. note:: - - This is an in progress document as we work out the way forward here - with DevStack and systemd. - -DevStack can be run with all the services as systemd unit +By default DevStack is run with all the services as systemd unit files. Systemd is now the default init system for nearly every Linux distro, and systemd encodes and solves many of the problems related to poorly running processes. @@ -25,12 +20,6 @@ There is also a common developer workflow of changing code in more than one service, and needing to restart a bunch of services for that to take effect. -To enable this add the following to your local.conf:: - - USE_SYSTEMD=True - - - Unit Structure ============== @@ -42,8 +31,9 @@ Unit Structure code is left in place in case we can switch back later. All DevStack user units are created as a part of the DevStack slice -given the name ``devstack@$servicename.service``. This lets us do -certain operations at the slice level. +given the name ``devstack@$servicename.service``. This makes it easy +to understand which services are part of the devstack run, and lets us +disable / stop them in a single command. Manipulating Units ================== @@ -89,6 +79,8 @@ Or to see the status of all Nova processes you can do:: We'll eventually make the unit names a bit more meaningful so that it's easier to understand what you are restarting. +.. _journalctl-examples: + Querying Logs ============= @@ -126,7 +118,7 @@ pypi, and they are all very different. They unfortunately all install into the ``systemd`` namespace, which can cause some issues. - ``systemd-python`` - this is the upstream maintained library, it has - a version number like systemd itself (currently ``233``). This is + a version number like systemd itself (currently ``234``). This is the one you want. - ``systemd`` - a python 3 only library, not what you want. - ``python-systemd`` - another library you don't want. Installing it @@ -145,20 +137,6 @@ system units. Future Work =========== -oslo.log journald ------------------ - -Journald has an extremely rich mechanism for direct logging including -structured metadata. We should enhance oslo.log to take advantage of -that. It would let us do things like:: - - journalctl REQUEST_ID=...... - - journalctl INSTANCE_ID=...... - -And get all lines related to the request id or instance id. (Note: -this work has been started at https://review.openstack.org/#/c/451525/) - log colorizing -------------- diff --git a/stack.sh b/stack.sh index 40ebf5a853..2c10200f75 100755 --- a/stack.sh +++ b/stack.sh @@ -1474,6 +1474,13 @@ if [[ -n "$DEPRECATED_TEXT" ]]; then echo_summary "WARNING: $DEPRECATED_TEXT" fi +# If USE_SYSTEMD is enabled, tell the user about using it. +if [[ "$USE_SYSTEMD" == "True" ]]; then + echo "Services are running under systemd unit files." + echo "For more information see: " + echo "https://docs.openstack.org/developer/devstack/systemd.html" +fi + # Indicate how long this took to run (bash maintained variable ``SECONDS``) echo_summary "stack.sh completed in $SECONDS seconds." diff --git a/stackrc b/stackrc index d5c0d74e25..1300f45cda 100644 --- a/stackrc +++ b/stackrc @@ -80,12 +80,19 @@ NOVA_ENABLED_APIS=osapi_compute,metadata # Set the root URL for Horizon HORIZON_APACHE_ROOT="/dashboard" +# TODO(sdague): Queens +# +# All the non systemd paths should be removed in queens, they only +# exist in Pike to support testing from grenade. Ensure that all this +# is cleaned up and purged, which should dramatically simplify the +# devstack codebase. + # Whether to use 'dev mode' for screen windows. Dev mode works by # stuffing text into the screen windows so that a developer can use # ctrl-c, up-arrow, enter to restart the service. Starting services # this way is slightly unreliable, and a bit slower, so this can # be disabled for automated testing by setting this value to False. -USE_SCREEN=$(trueorfalse True USE_SCREEN) +USE_SCREEN=$(trueorfalse False USE_SCREEN) # Whether to use SYSTEMD to manage services USE_SYSTEMD=$(trueorfalse False USE_SYSTEMD) @@ -100,9 +107,6 @@ else JOURNALCTL_F="journalctl -f -o short-precise --unit" fi -if [[ "$USE_SYSTEMD" == "True" ]]; then - USE_SCREEN=False -fi # Whether or not to enable Kernel Samepage Merging (KSM) if available. # This allows programs that mark their memory as mergeable to share @@ -157,6 +161,10 @@ elif [[ -f $RC_DIR/.localrc.auto ]]; then source $RC_DIR/.localrc.auto fi +# TODO(sdague): Delete all this in Queens. +if [[ "$USE_SYSTEMD" == "True" ]]; then + USE_SCREEN=False +fi # if we are forcing off USE_SCREEN (as we do in the gate), force on # systemd. This allows us to drop one of 3 paths through the code. if [[ "$USE_SCREEN" == "False" ]]; then From 27f66e9830727addd3d84b7a23eee907210540bb Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 2 May 2017 09:08:17 -0400 Subject: [PATCH 0488/1936] Fix logging when under systemd Both keystone and neutron didn't yet have systemd awareness for setting up logging (i.e. drop the extra date / time stamps) Change-Id: Ib442c603c9afb679676976c37c2c6122201ae846 --- lib/keystone | 4 +--- lib/neutron | 7 +------ 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/lib/keystone b/lib/keystone index 61b0a0c287..4bb6893089 100644 --- a/lib/keystone +++ b/lib/keystone @@ -267,9 +267,7 @@ function configure_keystone { fi # Format logging - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$KEYSTONE_DEPLOY" != "mod_wsgi" ] ; then - setup_colorized_logging $KEYSTONE_CONF - fi + setup_logging $KEYSTONE_CONF iniset $KEYSTONE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL diff --git a/lib/neutron b/lib/neutron index c7f62ed4a9..941a697816 100644 --- a/lib/neutron +++ b/lib/neutron @@ -231,12 +231,7 @@ function configure_neutron_new { fi # Format logging - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then - setup_colorized_logging $NEUTRON_CONF DEFAULT project_id - else - # Show user_name and project_name by default - iniset $NEUTRON_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" - fi + setup_logging $NEUTRON_CONF if is_service_enabled tls-proxy; then # Set the service port for a proxy to take the original From 29c1f981feded19f81f873e5147428ec87cd7c52 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 2 May 2017 08:40:26 -0400 Subject: [PATCH 0489/1936] convert README to rst Everything else in our documentation is RST, we should convert the Readme to RST as well (as github supports this, so it's fine to read it there) Change-Id: If2aabf629affc09b5daa570f4ca3bdf268cb53b0 --- README.md => README.rst | 26 ++++++++++++++++---------- setup.cfg | 2 +- 2 files changed, 17 insertions(+), 11 deletions(-) rename README.md => README.rst (89%) diff --git a/README.md b/README.rst similarity index 89% rename from README.md rename to README.rst index ff5598b0c5..dfa68b927f 100644 --- a/README.md +++ b/README.rst @@ -1,6 +1,7 @@ DevStack is a set of scripts and utilities to quickly deploy an OpenStack cloud. -# Goals +Goals +===== * To quickly build dev OpenStack environments in a clean Ubuntu or Fedora environment @@ -20,12 +21,13 @@ execute before you run them, as they install software and will alter your networking configuration. We strongly recommend that you run `stack.sh` in a clean and disposable vm when you are first getting started. -# Versions +Versions +======== The DevStack master branch generally points to trunk versions of OpenStack components. For older, stable versions, look for branches named stable/[release] in the DevStack repo. For example, you can do the -following to create a Newton OpenStack cloud: +following to create a Newton OpenStack cloud:: git checkout stable/newton ./stack.sh @@ -38,7 +40,8 @@ milestone-proposed branches that need to be tested:: GLANCE_REPO=git://git.openstack.org/openstack/glance.git GLANCE_BRANCH=milestone-proposed -# Start A Dev Cloud +Start A Dev Cloud +================= Installing in a dedicated disposable VM is safer than installing on your dev machine! Plus you can pick one of the supported Linux distros for @@ -54,14 +57,15 @@ endpoints, like so: * Keystone: http://myhost:5000/v2.0/ We also provide an environment file that you can use to interact with your -cloud via CLI: +cloud via CLI:: # source openrc file to load your environment with OpenStack CLI creds . openrc # list instances - nova list + openstack server list -# DevStack Execution Environment +DevStack Execution Environment +============================== DevStack runs rampant over the system it runs on, installing things and uninstalling other things. Running this on a system you care about is a recipe @@ -81,10 +85,12 @@ check it out to see what DevStack's expectations are for the account it runs under. Many people simply use their usual login (the default 'ubuntu' login on a UEC image for example). -# Customizing +Customizing +=========== DevStack can be extensively configured via the configuration file `local.conf`. It is likely that you will need to provide and modify this file if you want anything other than the most basic setup. Start -by reading the [configuration guide](doc/source/configuration.rst) for -details of the configuration file and the many available options. +by reading the `configuration guide +_` +for details of the configuration file and the many available options. diff --git a/setup.cfg b/setup.cfg index e4b2888dcb..3487f6541b 100644 --- a/setup.cfg +++ b/setup.cfg @@ -2,7 +2,7 @@ name = DevStack summary = OpenStack DevStack description-file = - README.md + README.rst author = OpenStack author-email = openstack-dev@lists.openstack.org home-page = http://docs.openstack.org/developer/devstack From 2686b36c32cf99e3fad5e55466d8c582b58f82cb Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 2 May 2017 08:49:17 -0400 Subject: [PATCH 0490/1936] Correct various inaccuracies in overview.rst This still was referencing nova-network Change-Id: Id584d3fd26482330f42319aee4cb36981066db55 --- doc/source/overview.rst | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/doc/source/overview.rst b/doc/source/overview.rst index d245035a1a..e64819372e 100644 --- a/doc/source/overview.rst +++ b/doc/source/overview.rst @@ -20,11 +20,11 @@ Base OS *The OpenStack Technical Committee (TC) has defined the current CI strategy to include the latest Ubuntu release and the latest RHEL -release (for Python 2.6 testing).* +release.* - Ubuntu: current LTS release plus current development release - Fedora: current release plus previous release -- RHEL: current major release +- RHEL/Centos: current major release - Other OS platforms may continue to be included but the maintenance of those platforms shall not be assumed simply due to their presence. Having a listed point-of-contact for each additional OS will greatly @@ -38,7 +38,6 @@ Databases *As packaged by the host OS* - MySQL -- PostgreSQL Queues ------ @@ -46,7 +45,6 @@ Queues *As packaged by the host OS* - Rabbit -- Qpid Web Server ---------- @@ -58,9 +56,6 @@ Web Server OpenStack Network ----------------- -*Defaults to nova network, optionally use neutron* - -- Nova Network: FlatDHCP - Neutron: A basic configuration approximating the original FlatDHCP mode using linuxbridge or OpenVSwitch. @@ -68,9 +63,9 @@ Services -------- The default services configured by DevStack are Identity (keystone), -Object Storage (swift), Image Service (glance), Block Storage (cinder), -Compute (nova), Networking (nova), Dashboard (horizon), Orchestration -(heat) +Object Storage (swift), Image Service (glance), Block Storage +(cinder), Compute (nova), Networking (neutron), Dashboard (horizon), +Orchestration (heat) Additional services not included directly in DevStack can be tied in to ``stack.sh`` using the :doc:`plugin mechanism ` to call @@ -80,8 +75,7 @@ Node Configurations ------------------- - single node -- multi-node is not tested regularly by the core team, and even then - only minimal configurations are reviewed +- multi-node configurations as are tested by the gate Exercises --------- From 3336b4bed6455e9e77a7696b353d8798270c5b2e Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 2 May 2017 08:45:34 -0400 Subject: [PATCH 0491/1936] remove out of date heat references This removes some remnant heat code and references now that heat is running in a plugin. Before merging this patch the heat team should verify they got everything they needed into their heat plugin, as there were more parts left than I was expecting. Change-Id: I477e3a6e75591aa8ff836c28f7ef56aa1b5f8727 --- doc/source/configuration.rst | 23 ----------------------- doc/source/faq.rst | 10 ---------- doc/source/overview.rst | 3 +-- lib/tempest | 18 ------------------ stack.sh | 6 +----- stackrc | 8 -------- 6 files changed, 2 insertions(+), 66 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 318e044909..66b8702f76 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -609,29 +609,6 @@ act as a S3 endpoint for Keystone so effectively replacing the Only Swift proxy server is launched in the systemd system all other services are started in background and managed by ``swift-init`` tool. -Heat -~~~~ - -Heat is disabled by default (see ``stackrc`` file). To enable it -explicitly you'll need the following settings in your ``localrc`` -section - -:: - - enable_service heat h-api h-api-cfn h-api-cw h-eng - -Heat can also run in standalone mode, and be configured to orchestrate -on an external OpenStack cloud. To launch only Heat in standalone mode -you'll need the following settings in your ``localrc`` section - -:: - - disable_all_services - enable_service rabbit mysql heat h-api h-api-cfn h-api-cw h-eng - HEAT_STANDALONE=True - KEYSTONE_SERVICE_HOST=... - KEYSTONE_AUTH_HOST=... - Tempest ~~~~~~~ diff --git a/doc/source/faq.rst b/doc/source/faq.rst index cb2f3281f6..a186336f54 100644 --- a/doc/source/faq.rst +++ b/doc/source/faq.rst @@ -159,16 +159,6 @@ to a working IP address; setting it to 127.0.0.1 in ``/etc/hosts`` is often good enough for a single-node installation. And in an extreme case, use ``clean.sh`` to eradicate it and try again. -Configure ``local.conf`` thusly: - - :: - - [[local|localrc]] - HEAT_STANDALONE=True - ENABLED_SERVICES=rabbit,mysql,heat,h-api,h-api-cfn,h-api-cw,h-eng - KEYSTONE_SERVICE_HOST= - KEYSTONE_AUTH_HOST= - Why are my configuration changes ignored? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/overview.rst b/doc/source/overview.rst index e64819372e..c07a8e6d67 100644 --- a/doc/source/overview.rst +++ b/doc/source/overview.rst @@ -64,8 +64,7 @@ Services The default services configured by DevStack are Identity (keystone), Object Storage (swift), Image Service (glance), Block Storage -(cinder), Compute (nova), Networking (neutron), Dashboard (horizon), -Orchestration (heat) +(cinder), Compute (nova), Networking (neutron), Dashboard (horizon) Additional services not included directly in DevStack can be tied in to ``stack.sh`` using the :doc:`plugin mechanism ` to call diff --git a/lib/tempest b/lib/tempest index fd8e3e7e14..47785ec06b 100644 --- a/lib/tempest +++ b/lib/tempest @@ -394,24 +394,6 @@ function configure_tempest { iniset $TEMPEST_CONFIG network-feature-enabled ipv6_subnet_attributes "$IPV6_SUBNET_ATTRIBUTES_ENABLED" iniset $TEMPEST_CONFIG network-feature-enabled port_security $NEUTRON_PORT_SECURITY - # Orchestration Tests - if is_service_enabled heat; then - if [[ ! -z "$HEAT_CFN_IMAGE_URL" ]]; then - iniset $TEMPEST_CONFIG orchestration image_ref $(basename "${HEAT_CFN_IMAGE_URL%.*}") - fi - # Nova might not be enabled, especially when we want to test tempest scenario/API that only create Neutron resources - if is_service_enabled nova; then - # build a specialized heat flavor - available_flavors=$(nova flavor-list) - if [[ ! ( $available_flavors =~ 'm1.heat' ) ]]; then - openstack flavor create --id 451 --ram 512 --disk 0 --vcpus 1 m1.heat - fi - iniset $TEMPEST_CONFIG orchestration instance_type "m1.heat" - fi - iniset $TEMPEST_CONFIG orchestration build_timeout 900 - iniset $TEMPEST_CONFIG orchestration stack_owner_role Member - fi - # Scenario if [ "$VIRT_DRIVER" = "xenserver" ]; then SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES} diff --git a/stack.sh b/stack.sh index 2c10200f75..bddf11f830 100755 --- a/stack.sh +++ b/stack.sh @@ -2,7 +2,7 @@ # ``stack.sh`` is an opinionated OpenStack developer installation. It # installs and configures various combinations of **Cinder**, **Glance**, -# **Heat**, **Horizon**, **Keystone**, **Nova**, **Neutron**, and **Swift** +# **Horizon**, **Keystone**, **Nova**, **Neutron**, and **Swift** # This script's options can be changed by setting appropriate environment # variables. You can configure things like which git repositories to use, @@ -1328,10 +1328,6 @@ if is_service_enabled nova && is_service_enabled keystone; then USERRC_PARAMS="$USERRC_PARAMS --os-cacert $SSL_BUNDLE_FILE" fi - if [[ "$HEAT_STANDALONE" = "True" ]]; then - USERRC_PARAMS="$USERRC_PARAMS --heat-url http://$HEAT_API_HOST:$HEAT_API_PORT/v1" - fi - $TOP_DIR/tools/create_userrc.sh $USERRC_PARAMS fi diff --git a/stackrc b/stackrc index 1300f45cda..35856ca73e 100644 --- a/stackrc +++ b/stackrc @@ -518,14 +518,6 @@ GITBRANCH["cursive"]=${CURSIVE_BRANCH:-master} GITREPO["glance_store"]=${GLANCE_STORE_REPO:-${GIT_BASE}/openstack/glance_store.git} GITBRANCH["glance_store"]=${GLANCE_STORE_BRANCH:-master} -# heat-cfntools server agent -HEAT_CFNTOOLS_REPO=${HEAT_CFNTOOLS_REPO:-${GIT_BASE}/openstack/heat-cfntools.git} -HEAT_CFNTOOLS_BRANCH=${HEAT_CFNTOOLS_BRANCH:-master} - -# heat example templates and elements -HEAT_TEMPLATES_REPO=${HEAT_TEMPLATES_REPO:-${GIT_BASE}/openstack/heat-templates.git} -HEAT_TEMPLATES_BRANCH=${HEAT_TEMPLATES_BRANCH:-master} - # django openstack_auth library GITREPO["django_openstack_auth"]=${HORIZONAUTH_REPO:-${GIT_BASE}/openstack/django_openstack_auth.git} GITBRANCH["django_openstack_auth"]=${HORIZONAUTH_BRANCH:-master} From b2bfe5617ac43a678a9113d5c955f5456c151319 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 3 May 2017 09:58:21 -0400 Subject: [PATCH 0492/1936] Turn off use_journal because of eventlet concerns use_journal uses the systemd native path for logging, however there are concerns that this might be negatively interacting with eventlet. To be on the safe side fall back to stdout. This introduces a USE_JOURNAL option which will let folks turn this back on for testing. This also adjusts the debug lines. When using the journal the pid reported by systemd is correct. When using stdout, it will be the parent process id, so we need to keep it to see which child each thing is coming from. Change-Id: Id7891c532bf99c099252e82d511a37a49506fea9 --- functions | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/functions b/functions index c99e435175..8968a1861b 100644 --- a/functions +++ b/functions @@ -606,13 +606,25 @@ function setup_colorized_logging { function setup_systemd_logging { local conf_file=$1 local conf_section="DEFAULT" - iniset $conf_file $conf_section use_journal "True" + # NOTE(sdague): this is a nice to have, and means we're using the + # native systemd path, which provides for things like search on + # request-id. However, there may be an eventlet interaction here, + # so going off for now. + USE_JOURNAL=$(trueorfalse USE_JOURNAL False) + if [[ "$USE_JOURNAL" == "True" ]]; then + iniset $conf_file $conf_section use_journal "True" + # if we are using the journal directly, our process id is already correct + iniset $conf_file $conf_section logging_debug_format_suffix \ + "{{%(funcName)s %(pathname)s:%(lineno)d}}" + else + iniset $conf_file $conf_section logging_debug_format_suffix \ + "{{(pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d}}" + fi + iniset $conf_file $conf_section logging_context_format_string \ "%(levelname)s %(name)s [%(request_id)s %(project_name)s %(user_name)s] %(instance)s%(message)s" iniset $conf_file $conf_section logging_default_format_string \ "%(levelname)s %(name)s [-] %(instance)s%(color)s%(message)s" - iniset $conf_file $conf_section logging_debug_format_suffix \ - "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" iniset $conf_file $conf_section logging_exception_prefix "ERROR %(name)s %(instance)s" } From 401de4d20cb204b811dd5877e9e7688d72eb6e39 Mon Sep 17 00:00:00 2001 From: TommyLike Date: Thu, 4 May 2017 17:56:22 +0800 Subject: [PATCH 0493/1936] Copy 'resource_filters.json' file to cinder config folder Cinder use 'resource_filters.json' to config allowed filters, copy the new added json file when set config files. Change-Id: I397cb5859e2b3349af3cb07ee02b6463c6eccc35 Depends-On: 27aeba0b5d3cf64286125937e8336ba1d3b26b16 --- lib/cinder | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/cinder b/lib/cinder index 9fc25c75bb..e3a687b4f2 100644 --- a/lib/cinder +++ b/lib/cinder @@ -244,6 +244,10 @@ function configure_cinder { configure_rootwrap cinder + if [[ -f "$CINDER_DIR/etc/cinder/resource_filters.json" ]]; then + cp -p "$CINDER_DIR/etc/cinder/resource_filters.json" "$CINDER_CONF_DIR/resource_filters.json" + fi + cp $CINDER_DIR/etc/cinder/api-paste.ini $CINDER_API_PASTE_INI inicomment $CINDER_API_PASTE_INI filter:authtoken auth_host From 148d58c3519aca4723edd6eb59fc7150b92e74d7 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 4 May 2017 13:08:25 -0400 Subject: [PATCH 0494/1936] Make devstack fail early for common systemd pitfalls There are a couple of issues that have ended up being hit by devstack plugin authors which we can detect and error in a much nicer way instead of them having a cryptic systemd failure. Change-Id: I45e4ac363aeefb4503015f9e1b57c58f79b58f40 --- functions-common | 33 ++++++++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/functions-common b/functions-common index 90f540017c..fe608b4a48 100644 --- a/functions-common +++ b/functions-common @@ -1480,10 +1480,41 @@ function write_uwsgi_user_unit_file { $SYSTEMCTL daemon-reload } +function _common_systemd_pitfalls { + local cmd=$1 + # do some sanity checks on $cmd to see things we don't expect to work + + if [[ "$cmd" =~ "sudo" ]]; then + local msg=< Date: Thu, 4 May 2017 21:13:29 +0200 Subject: [PATCH 0495/1936] Fix systemctl option: removes --unit option in doc --unit option is for journalctl, not systemctl. Just executing systemctl without "--unit=" works. Change-Id: I9752561332e62ec6327b17b12d2d868892718041 --- doc/source/development.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/source/development.rst b/doc/source/development.rst index a3f2747604..957de9b0e1 100644 --- a/doc/source/development.rst +++ b/doc/source/development.rst @@ -14,7 +14,7 @@ with. .. code-block:: bash - sudo systemctl status --unit="devstack@*" + sudo systemctl status "devstack@*" To learn more about the basics of systemd, see :doc:`/systemd` @@ -27,14 +27,14 @@ and then restart the affected daemons. .. code-block:: bash - sudo systemctl restart --unit=devstack@n-cpu.service + sudo systemctl restart devstack@n-cpu.service If your change impacts more than one daemon you can restart by wildcard as well. .. code-block:: bash - sudo systemctl restart --unit="devstack@n-*" + sudo systemctl restart "devstack@n-*" .. warning:: @@ -112,6 +112,6 @@ You can do that with wildcards such as .. code-block:: bash - sudo systemctl restart --unit="devstack@n-*" + sudo systemctl restart "devstack@n-*" which will restart all nova services. From e264b4ab323385e8c861da79c96cf9d643ca395d Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 4 May 2017 15:56:37 -0400 Subject: [PATCH 0496/1936] disable journald rate limiting systemd-journald has rate limiting built in, but that's not sufficient for the level of logging of OpenStack services during test runs. Disable the rate limiting so that no log messages are lost. Change-Id: I64599aba74c5a39276bb8f946cd236600b9cc81b --- stack.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/stack.sh b/stack.sh index 2c10200f75..9fdb47ecbd 100755 --- a/stack.sh +++ b/stack.sh @@ -748,6 +748,10 @@ source $TOP_DIR/tools/fixup_stuff.sh if [[ "$USE_SYSTEMD" == "True" ]]; then pip_install_gr systemd-python + # the default rate limit of 1000 messages / 30 seconds is not + # sufficient given how verbose our logging is. + iniset -sudo /etc/systemd/journald.conf "Journal" "RateLimitBurst" "0" + sudo systemctl restart systemd-journald fi # Virtual Environment From eaadffe07b0516f103321c84013b755061ba7fb5 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 4 May 2017 16:05:19 -0400 Subject: [PATCH 0497/1936] Always have ./clean.sh run ./unstack.sh There was detection code in clean.sh to only run it if a screen session was found, but in systemd world, that's obviously not true. This was causing me (and others) substantial confusion. Change-Id: I204e94cd86b8c67012aabfca74796e593151c3a4 --- clean.sh | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/clean.sh b/clean.sh index 90b21eb353..ef38fbf4f8 100755 --- a/clean.sh +++ b/clean.sh @@ -64,13 +64,8 @@ if [[ -d $TOP_DIR/extras.d ]]; then done fi -# See if there is anything running... -# need to adapt when run_service is merged -SESSION=$(screen -ls | awk '/[0-9].stack/ { print $1 }') -if [[ -n "$SESSION" ]]; then - # Let unstack.sh do its thing first - $TOP_DIR/unstack.sh --all -fi +# Let unstack.sh do its thing first +$TOP_DIR/unstack.sh --all # Run extras # ========== From f41bf2a92fab6ffcea48d8b295c04ace4d2fa7ff Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 5 May 2017 07:50:52 -0400 Subject: [PATCH 0498/1936] Set SyslogIdentifier for uwsgi processes Now that we aren't using native journal messages by default, the syslog identifier of all the uwsgi processes is the same. We should be more explicit with those. Change-Id: Id5406d02407b022d4016517c2e18890973876d88 --- functions-common | 1 + 1 file changed, 1 insertion(+) diff --git a/functions-common b/functions-common index 90f540017c..c3f37b9493 100644 --- a/functions-common +++ b/functions-common @@ -1463,6 +1463,7 @@ function write_uwsgi_user_unit_file { mkdir -p $SYSTEMD_DIR iniset -sudo $unitfile "Unit" "Description" "Devstack $service" + iniset -sudo $unitfile "Service" "SyslogIdentifier" "$service" iniset -sudo $unitfile "Service" "User" "$user" iniset -sudo $unitfile "Service" "ExecStart" "$command" iniset -sudo $unitfile "Service" "Type" "notify" From 621704f4b3503b2509db18a9a0db134d54789fab Mon Sep 17 00:00:00 2001 From: Mike Perez Date: Fri, 5 May 2017 08:59:25 -0700 Subject: [PATCH 0499/1936] Stop setting osapi_volume_base_URL This option is a duplicate of public_endpoint for Cinder. Change-Id: I8aee1b9f93a09d2e92bde80c0e413e1540723bac Depends-On: I2a74af7906d14cbc49b8cf0a88c344ca30fcbd26 --- lib/cinder | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/cinder b/lib/cinder index 9fc25c75bb..b3de1915e4 100644 --- a/lib/cinder +++ b/lib/cinder @@ -319,7 +319,6 @@ function configure_cinder { # Set the service port for a proxy to take the original iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT iniset $CINDER_CONF DEFAULT public_endpoint $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT - iniset $CINDER_CONF DEFAULT osapi_volume_base_URL $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT fi if [ "$SYSLOG" != "False" ]; then From 1df17c94f512f0d1a084ba70cc0d4feb05feb8fb Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Mon, 1 May 2017 17:00:42 +0900 Subject: [PATCH 0500/1936] lib/neutron: Don't assume plugin config is provided by neutron repo It isn't the case for plugins provided by subprojects. Change-Id: Ic28a9da9f11355e5e1eae5db9b2d4ee6ed081b55 --- lib/neutron | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index 492a0ee8fb..fd08a125be 100644 --- a/lib/neutron +++ b/lib/neutron @@ -135,7 +135,11 @@ function configure_neutron_new { mkdir -p $NEUTRON_CORE_PLUGIN_CONF_PATH - cp $NEUTRON_DIR/etc/neutron/plugins/$NEUTRON_CORE_PLUGIN/$NEUTRON_CORE_PLUGIN_CONF_FILENAME.sample $NEUTRON_CORE_PLUGIN_CONF + # NOTE(yamamoto): A decomposed plugin should prepare the config file in + # its devstack plugin. + if [ -f $NEUTRON_DIR/etc/neutron/plugins/$NEUTRON_CORE_PLUGIN/$NEUTRON_CORE_PLUGIN_CONF_FILENAME.sample ]; then + cp $NEUTRON_DIR/etc/neutron/plugins/$NEUTRON_CORE_PLUGIN/$NEUTRON_CORE_PLUGIN_CONF_FILENAME.sample $NEUTRON_CORE_PLUGIN_CONF + fi iniset $NEUTRON_CONF database connection `database_connection_url neutron` iniset $NEUTRON_CONF DEFAULT state_path $NEUTRON_STATE_PATH From f28e7ef6ba48030d7bc5c6ea27f28b8a1fc281e9 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Sun, 7 May 2017 22:02:10 -0400 Subject: [PATCH 0501/1936] uninstall libvirt-python and reinstall libvirt-python compiles against the currently installed libvirt. If you upgrade that, it needs to rebuild, however it won't change versions, so pip install just noops. Force an uninstall / reinstall of it every time to handle potential upgrades of libvirt. Change-Id: If34541b34aa6d55eedaf6c603fd1fe92eb887308 --- inc/python | 14 ++++++++++++++ lib/nova_plugins/functions-libvirt | 3 +++ 2 files changed, 17 insertions(+) diff --git a/inc/python b/inc/python index 2443c4d465..a004217b4a 100644 --- a/inc/python +++ b/inc/python @@ -351,6 +351,20 @@ function pip_install { return $result } +function pip_uninstall { + local name=$1 + if [[ -n ${PIP_VIRTUAL_ENV:=} && -d ${PIP_VIRTUAL_ENV} ]]; then + local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip + local sudo_pip="env" + else + local cmd_pip + cmd_pip=$(get_pip_command $PYTHON2_VERSION) + local sudo_pip="sudo -H" + fi + # don't error if we can't uninstall, it might not be there + $sudo_pip $cmd_pip uninstall $name || /bin/true +} + # get version of a package from global requirements file # get_from_global_requirements function get_from_global_requirements { diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 47605af991..326c8bf4ae 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -67,6 +67,8 @@ function install_libvirt { else install_package libvirt-clients libvirt-daemon-system libvirt-dev fi + # uninstall in case the libvirt version changed + pip_uninstall libvirt-python pip_install_gr libvirt-python #pip_install_gr elif is_fedora || is_suse; then @@ -84,6 +86,7 @@ function install_libvirt { fi install_package libvirt libvirt-devel + pip_uninstall libvirt-python pip_install_gr libvirt-python fi From db8a199b309e22f1341186517c00ec875ae421bb Mon Sep 17 00:00:00 2001 From: vmud213 Date: Thu, 11 May 2017 09:33:43 +0000 Subject: [PATCH 0502/1936] Add files/ironic-inspector* to .gitignore When ironic-inspector is enabled through devstack, these two files are created which needs to be masked. Change-Id: I7a3db6fd6197da20cca1e938727d54195957ac18 Closes-Bug: #1690105 --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index d1781bc730..7967e14a19 100644 --- a/.gitignore +++ b/.gitignore @@ -23,6 +23,7 @@ files/images files/pip-* files/get-pip.py* files/ir-deploy* +files/ironic-inspector* local.conf local.sh localrc From f24e29920b9ec40e924c71bbe1999e027d61390e Mon Sep 17 00:00:00 2001 From: Angel Noam Date: Thu, 11 May 2017 15:13:29 +0300 Subject: [PATCH 0503/1936] Add support for image name end with .raw Change-Id: If3bd42825a29755fa8e68fa5661d068c6f0019d2 --- functions | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/functions b/functions index 8968a1861b..f842f63038 100644 --- a/functions +++ b/functions @@ -310,6 +310,11 @@ function upload_image { disk_format=qcow2 container_format=bare ;; + *.raw) + image_name=$(basename "$image" ".raw") + disk_format=raw + container_format=bare + ;; *.iso) image_name=$(basename "$image" ".iso") disk_format=iso From 2a6112ea9ab698714c6b4e92ddb829e2d4574a7c Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Fri, 12 May 2017 10:16:33 -0700 Subject: [PATCH 0504/1936] Document testing of new devstack features At the Boston 2017 Summit I had mentioned that the pattern of using non voting/experimental jobs was not working for getting new features into Devstack. It is slow and leads people to being too conservative when it comes to pushing new things in. Instead I suggested that since Devstack changes are self testing we add the features, have change that enables the feature, and if that changes passes we move forward with merging (assuming code review is fine and necessary communication is done). Document this process in the HACKING file so that we have something we can point to when people want to add a new experimental job for every new little thing (ipv6, tls, systemd, etc). Change-Id: I5190cc3d3de4e81d52748347306133b5034d5531 --- HACKING.rst | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/HACKING.rst b/HACKING.rst index b76cb6c4a7..fc67f09a7b 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -322,7 +322,7 @@ Variables and Functions Review Criteria -=============== +--------------- There are some broad criteria that will be followed when reviewing your change @@ -364,3 +364,26 @@ your change * **Reviewers** -- please see ``MAINTAINERS.rst`` for a list of people that should be added to reviews of various sub-systems. + + +Making Changes, Testing, and CI +------------------------------- + +Changes to Devstack are tested by automated continuous integration jobs +that run on a variety of Linux Distros using a handful of common +configurations. What this means is that every change to Devstack is +self testing. One major benefit of this is that developers do not +typically need to add new non voting test jobs to add features to +Devstack. Instead the features can be added, then if testing passes +with the feature enabled the change is ready to merge (pending code +review). + +A concrete example of this was the switch from screen based service +management to systemd based service management. No new jobs were +created for this. Instead the features were added to devstack, tested +locally and in CI using a change that enabled the feature, then once +the enabling change was passing and the new behavior communicated and +documented it was merged. + +Using this process has been proven to be effective and leads to +quicker implementation of desired features. From 84ce2f1c9b0e2aaba95015aea8ed9acaacb62e1b Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Tue, 9 May 2017 07:35:08 -0400 Subject: [PATCH 0505/1936] add a table of contents Add a table of contents help readers find the information they need without having to read all of the prose on every page. Remove the site-map file, which doesn't appear to be linked anywhere. Change-Id: Ib5761c9cfdd5a083df562413d727cb4ac7547c9e Signed-off-by: Doug Hellmann --- doc/source/index.rst | 9 +++++++++ doc/source/site-map.rst | 24 ------------------------ 2 files changed, 9 insertions(+), 24 deletions(-) delete mode 100644 doc/source/site-map.rst diff --git a/doc/source/index.rst b/doc/source/index.rst index cbd697112a..47087c5a0b 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -142,3 +142,12 @@ services, features, and configuration not present in base devstack. Get :doc:`the big picture ` of what we are trying to do with devstack, and help us by :doc:`contributing to the project `. + +Contents +-------- + +.. toctree:: + :glob: + :maxdepth: 2 + + * diff --git a/doc/source/site-map.rst b/doc/source/site-map.rst deleted file mode 100644 index 022cc73959..0000000000 --- a/doc/source/site-map.rst +++ /dev/null @@ -1,24 +0,0 @@ -:orphan: - -.. the TOC on the front page actually makes the document a lot more - confusing. This lets us bury a toc which we can link in when - appropriate. - -========== - Site Map -========== - -.. toctree:: - :glob: - :maxdepth: 3 - - overview - configuration - networking - plugins - plugin-registry - faq - development - hacking - guides - systemd From 1f55d38911cec5e597e7b10cc7324804c2ba063f Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Tue, 16 May 2017 08:50:53 -0700 Subject: [PATCH 0506/1936] Add systemd ulimit override function and set an override for NOFILE This sets our default ulimit NOFILE to 2048, which is double what we set things like mysql'd max_connections to. Change-Id: I5126bed1e6b9f8c64db00eae4151ac61e47b1bf8 --- functions | 16 ++++++++++++++++ stack.sh | 3 +++ stackrc | 3 +++ 3 files changed, 22 insertions(+) diff --git a/functions b/functions index f842f63038..bc2d26b496 100644 --- a/functions +++ b/functions @@ -720,6 +720,22 @@ function enable_kernel_bridge_firewall { } +# Set a systemd system override +# +# This sets a system-side override in system.conf. A per-service +# override would be /etc/systemd/system/${service}.service/override.conf +function set_systemd_override { + local key="$1" + local value="$2" + + local sysconf="/etc/systemd/system.conf" + iniset -sudo "${sysconf}" "Manager" "$key" "$value" + echo "Set systemd system override for ${key}=${value}" + + sudo systemctl daemon-reload +} + + # Restore xtrace $_XTRACE_FUNCTIONS diff --git a/stack.sh b/stack.sh index d6837ef5d0..e83eaea326 100755 --- a/stack.sh +++ b/stack.sh @@ -765,6 +765,9 @@ install_infra # Phase: pre-install run_phase stack pre-install +# NOTE(danms): Set global limits before installing anything +set_systemd_override DefaultLimitNOFILE ${ULIMIT_NOFILE} + install_rpc_backend restart_rpc_backend diff --git a/stackrc b/stackrc index 35856ca73e..ea1bcdf3f0 100644 --- a/stackrc +++ b/stackrc @@ -925,6 +925,9 @@ fi # ``LOGDIR`` is always set at this point so it is not useful as a 'enable' for service logs # ``SCREEN_LOGDIR`` may be set, it is useful to enable the compat symlinks +# System-wide ulimit file descriptors override +ULIMIT_NOFILE=${ULIMIT_NOFILE:-2048} + # Local variables: # mode: shell-script # End: From 954fd1b729f792dc6cbc470285a5e147053147a8 Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Tue, 16 May 2017 12:24:45 -0400 Subject: [PATCH 0507/1936] Use -y with 'pip uninstall' 'pip uninstall' will hang running stack.sh if it has to prompt the user for input, use -y. Change-Id: Ic94639e444b87fd3538463d5a51c01a0208a2ab2 Closes-bug: #1691172 --- inc/python | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inc/python b/inc/python index a004217b4a..4c443d6a00 100644 --- a/inc/python +++ b/inc/python @@ -362,7 +362,7 @@ function pip_uninstall { local sudo_pip="sudo -H" fi # don't error if we can't uninstall, it might not be there - $sudo_pip $cmd_pip uninstall $name || /bin/true + $sudo_pip $cmd_pip uninstall -y $name || /bin/true } # get version of a package from global requirements file From 8cd310d7638843f07f4c88adaf3aed9db7668073 Mon Sep 17 00:00:00 2001 From: Eric Fried Date: Tue, 16 May 2017 13:52:03 -0500 Subject: [PATCH 0508/1936] Restore log colors under devstack/systemd One of the pending issues with the conversion to systemd was the loss of log coloring. It turns out that journalctl by default strips out characters it considers "unprintable" - including the color codes emitted by the old-style logging. However, journalctl can be made to print them by adding the `-a` flag. This change makes devstack's log formatter conf settings include color codes like the old screen-based setup used to We also remove stackrc's setting of JOURNALCTL_F, whose usage was removed via I6af6d1857effaf662a9d72bd394864934eacbe70. Change-Id: I2401e267913a24d18dae355aa933072dbbdab1d8 --- doc/source/systemd.rst | 18 +++++++++++------- functions | 15 ++++++--------- stackrc | 2 -- 3 files changed, 17 insertions(+), 18 deletions(-) diff --git a/doc/source/systemd.rst b/doc/source/systemd.rst index efe79e4b0c..60a7719262 100644 --- a/doc/source/systemd.rst +++ b/doc/source/systemd.rst @@ -109,6 +109,17 @@ Use higher precision time stamps:: journalctl -f -o short-precise --unit devstack@n-cpu.service +By default, journalctl strips out "unprintable" characters, including +ASCII color codes. To keep the color codes (which can be interpreted by +an appropriate terminal/pager - e.g. ``less``, the default):: + + journalctl -a --unit devstack@n-cpu.service + +When outputting to the terminal using the default pager, long lines +appear to be truncated, but horizontal scrolling is supported via the +left/right arrow keys. + +See ``man 1 journalctl`` for more. Known Issues ============ @@ -137,13 +148,6 @@ system units. Future Work =========== -log colorizing --------------- - -We lose log colorization through this process. We might want to build -a custom colorizer that we could run journalctl output through -optionally for people. - user units ---------- diff --git a/functions b/functions index f842f63038..9bb64cc3c4 100644 --- a/functions +++ b/functions @@ -616,21 +616,18 @@ function setup_systemd_logging { # request-id. However, there may be an eventlet interaction here, # so going off for now. USE_JOURNAL=$(trueorfalse USE_JOURNAL False) + local pidstr="" if [[ "$USE_JOURNAL" == "True" ]]; then iniset $conf_file $conf_section use_journal "True" # if we are using the journal directly, our process id is already correct - iniset $conf_file $conf_section logging_debug_format_suffix \ - "{{%(funcName)s %(pathname)s:%(lineno)d}}" else - iniset $conf_file $conf_section logging_debug_format_suffix \ - "{{(pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d}}" + pidstr="(pid=%(process)d) " fi + iniset $conf_file $conf_section logging_debug_format_suffix "{{${pidstr}%(funcName)s %(pathname)s:%(lineno)d}}" - iniset $conf_file $conf_section logging_context_format_string \ - "%(levelname)s %(name)s [%(request_id)s %(project_name)s %(user_name)s] %(instance)s%(message)s" - iniset $conf_file $conf_section logging_default_format_string \ - "%(levelname)s %(name)s [-] %(instance)s%(color)s%(message)s" - iniset $conf_file $conf_section logging_exception_prefix "ERROR %(name)s %(instance)s" + iniset $conf_file $conf_section logging_context_format_string "%(color)s%(levelname)s %(name)s [%(request_id)s %(project_name)s %(user_name)s%(color)s] %(instance)s%(color)s%(message)s" + iniset $conf_file $conf_section logging_default_format_string "%(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" + iniset $conf_file $conf_section logging_exception_prefix "ERROR %(name)s %(instance)s" } function setup_standard_logging_identity { diff --git a/stackrc b/stackrc index 35856ca73e..2a2007f581 100644 --- a/stackrc +++ b/stackrc @@ -100,11 +100,9 @@ USER_UNITS=$(trueorfalse False USER_UNITS) if [[ "$USER_UNITS" == "True" ]]; then SYSTEMD_DIR="$HOME/.local/share/systemd/user" SYSTEMCTL="systemctl --user" - JOURNALCTL_F="journalctl -f -o short-precise --user-unit" else SYSTEMD_DIR="/etc/systemd/system" SYSTEMCTL="sudo systemctl" - JOURNALCTL_F="journalctl -f -o short-precise --unit" fi From d1fe0e62e77b2eaf711e0b4c157dc571be9ad13e Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Tue, 16 May 2017 22:27:58 -0700 Subject: [PATCH 0509/1936] Always setup libvirt for tap devices when using Neutron This logic has been tied to OVS since it was introduced in [1] and revised in [2]. However, many other backends may use tap devices that aren't related to OVS, such as Calico[3] and Linux Bridge after [4] merges. This patch just removes the dependency on OVS specifically so /dev/net/tun is added to cgroups whenever any Neutron backend is used. This is done in other deployment tools like Juju[5] so it's not unprecedented. 1. Ifab268f739b004db13024633e8abeb17691b9e46 2. Ic1da132fa421f1c70c10a319ee3239831b0f956f 3. http://docs.projectcalico.org/master/getting-started/openstack/installation/ubuntu#compute-node-install 4. I23c5faaeab69aede1fd038a36f4a0b8f928498ce 5. https://github.com/openstack/charm-nova-compute/blob/2790f81ecd32d9962617c4c3126621fffdc318a0/templates/qemu.conf Change-Id: I075595158d8f3b5a6811c4794aa7b91912940db5 Partial-Bug: #1675343 --- lib/nova_plugins/functions-libvirt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 326c8bf4ae..3e38b898ec 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -98,7 +98,7 @@ function install_libvirt { # Configures the installed libvirt system so that is accessible by # STACK_USER via qemu:///system with management capabilities. function configure_libvirt { - if is_service_enabled neutron && is_neutron_ovs_base_plugin && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF; then + if is_service_enabled neutron && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF; then # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces cat < Date: Wed, 17 May 2017 14:18:18 +0300 Subject: [PATCH 0510/1936] cleanup: remove DEVSTACK_CINDER_SECURE_DELETE DEVSTACK_CINDER_SECURE_DELETE is deprecated from liberty release. This should have been removed after kilo-eol Change-Id: I82c15a19f8fe0326d4a5c2a076baa6d3e53fcf32 --- lib/cinder | 14 -------------- lib/cinder_backends/lvm | 3 --- 2 files changed, 17 deletions(-) diff --git a/lib/cinder b/lib/cinder index 9fc25c75bb..60379475f7 100644 --- a/lib/cinder +++ b/lib/cinder @@ -84,20 +84,6 @@ CINDER_LVM_TYPE=${CINDER_LVM_TYPE:-default} # CINDER_ENABLED_BACKENDS=${CINDER_ENABLED_BACKENDS:-lvm:lvmdriver-1,lvm:lvmdriver-2} CINDER_ENABLED_BACKENDS=${CINDER_ENABLED_BACKENDS:-lvm:lvmdriver-1} - -# Should cinder perform secure deletion of volumes? -# Defaults to zero. Can also be set to none or shred. -# This was previously CINDER_SECURE_DELETE (True or False). -# Equivalents using CINDER_VOLUME_CLEAR are zero and none, respectively. -# Set to none to avoid this bug when testing: -# https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1023755 -if [[ -n $CINDER_SECURE_DELETE ]]; then - CINDER_SECURE_DELETE=$(trueorfalse True CINDER_SECURE_DELETE) - if [[ $CINDER_SECURE_DELETE == "False" ]]; then - CINDER_VOLUME_CLEAR_DEFAULT="none" - fi - deprecated "Configure secure Cinder volume deletion using CINDER_VOLUME_CLEAR instead of CINDER_SECURE_DELETE." -fi CINDER_VOLUME_CLEAR=${CINDER_VOLUME_CLEAR:-${CINDER_VOLUME_CLEAR_DEFAULT:-zero}} CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]') diff --git a/lib/cinder_backends/lvm b/lib/cinder_backends/lvm index d927f9cd6b..03e188029f 100644 --- a/lib/cinder_backends/lvm +++ b/lib/cinder_backends/lvm @@ -53,9 +53,6 @@ function configure_cinder_backend_lvm { iniset $CINDER_CONF $be_name iscsi_helper "$CINDER_ISCSI_HELPER" iniset $CINDER_CONF $be_name lvm_type "$CINDER_LVM_TYPE" - if [[ "$CINDER_SECURE_DELETE" == "False" ]]; then - iniset $CINDER_CONF $be_name volume_clear none - fi } # init_cinder_backend_lvm - Initialize volume group From fc572a5da0cad4f0f6f247abfe835a5948d3aa47 Mon Sep 17 00:00:00 2001 From: melanie witt Date: Tue, 16 May 2017 23:04:46 +0000 Subject: [PATCH 0511/1936] Add --tcp option to dstat command for connection stats This enables tcp stats (listen, established, syn, time_wait, close) in dstat to allow us to get a high-level view of performance changes in the system during gate runs. Change-Id: Ifbffbed22446e7e6a3b825c18266b63d2f2e7718 --- tools/dstat.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/dstat.sh b/tools/dstat.sh index ae7306ecb7..01c6d9b7e9 100755 --- a/tools/dstat.sh +++ b/tools/dstat.sh @@ -13,10 +13,10 @@ LOGDIR=$1 # Command line arguments for primary DStat process. -DSTAT_OPTS="-tcmndrylpg --top-cpu-adv --top-io-adv --top-mem --swap" +DSTAT_OPTS="-tcmndrylpg --top-cpu-adv --top-io-adv --top-mem --swap --tcp" # Command-line arguments for secondary background DStat process. -DSTAT_CSV_OPTS="-tcmndrylpg --output $LOGDIR/dstat-csv.log" +DSTAT_CSV_OPTS="-tcmndrylpg --tcp --output $LOGDIR/dstat-csv.log" # Execute and background the secondary dstat process and discard its output. dstat $DSTAT_CSV_OPTS >& /dev/null & From 00ff904b09e07880a274cc9b0cf35b6433dc5877 Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Fri, 19 May 2017 13:40:56 +0100 Subject: [PATCH 0512/1936] stack: Workaround libvirt issue with multiple of 16 byte fixed_key values Unable to use LUKS passphrase that is exactly 16 bytes long https://bugzilla.redhat.com/show_bug.cgi?id=1447297 Change-Id: I565339762549b076119ffedb6b83abfa12977f5e --- stack.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 9fdb47ecbd..5b73291bcf 100755 --- a/stack.sh +++ b/stack.sh @@ -1253,8 +1253,13 @@ if is_service_enabled g-reg; then fi # Create a randomized default value for the key manager's fixed_key +# NOTE(lyarwood): This is currently set to 36 as a workaround to the following +# libvirt bug that incorrectly pads passphrases that are a multiple of 16 bytes +# in length. +# Unable to use LUKS passphrase that is exactly 16 bytes long +# https://bugzilla.redhat.com/show_bug.cgi?id=1447297 if is_service_enabled nova; then - iniset $NOVA_CONF key_manager fixed_key $(generate_hex_string 32) + iniset $NOVA_CONF key_manager fixed_key $(generate_hex_string 36) fi # Launch the nova-api and wait for it to answer before continuing From 4db30f9f05ad8aba9001d444313ee2915f86ecd1 Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Tue, 9 May 2017 07:35:45 -0400 Subject: [PATCH 0513/1936] update sphinx Update sphinx to the version used to build the documentation elsewhere and turn on the option to treat warnings as errors to ensure that no poorly constructed rst is introduced. Cap sphinx<1.6.1, since that version has a conflict with pbr right now. Change-Id: I19b3332229e2094988cbf8968c42a0323194a209 Signed-off-by: Doug Hellmann --- setup.cfg | 1 + tox.ini | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/setup.cfg b/setup.cfg index 3487f6541b..73d22b5268 100644 --- a/setup.cfg +++ b/setup.cfg @@ -15,6 +15,7 @@ classifier = all_files = 1 build-dir = doc/build source-dir = doc/source +warning-is-error = 1 [pbr] warnerrors = True diff --git a/tox.ini b/tox.ini index 55a06d0cae..cc7c5444a8 100644 --- a/tox.ini +++ b/tox.ini @@ -37,8 +37,8 @@ commands = bash -c "find {toxinidir} \ deps = Pygments docutils - sphinx>=1.1.2,<1.2 - pbr>=0.6,!=0.7,<1.0 + sphinx>=1.5.1,<1.6.1 + pbr>=2.0.0,!=2.1.0 oslosphinx nwdiag blockdiag @@ -52,8 +52,8 @@ commands = [testenv:venv] deps = - pbr>=0.6,!=0.7,<1.0 - sphinx>=1.1.2,<1.2 + pbr>=2.0.0,!=2.1.0 + sphinx>=1.5.1,<1.6.1 oslosphinx blockdiag sphinxcontrib-blockdiag From ac2ae8c04779e4ad78d3813b8bc4bc4200b6d686 Mon Sep 17 00:00:00 2001 From: Nir Magnezi Date: Mon, 22 May 2017 12:40:57 +0300 Subject: [PATCH 0514/1936] Remove nova cert from devstack As a followup to I2c78a0c6599b92040146cf9f0042cff8fd2509c3, the nova cert service should be removed from devstack. Without this fix, stacking will fail is USE_SCREEN=True Change-Id: I115580352fa380b896bae290f9a4efbfe4ff0dfd --- doc/source/guides/devstack-with-lbaas-v2.rst | 2 +- lib/nova | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst index 21bea9973b..4ed64bf12f 100644 --- a/doc/source/guides/devstack-with-lbaas-v2.rst +++ b/doc/source/guides/devstack-with-lbaas-v2.rst @@ -45,7 +45,7 @@ Edit your ``local.conf`` to look like # Horizon ENABLED_SERVICES+=,horizon # Nova - ENABLED_SERVICES+=,n-api,n-crt,n-cpu,n-cond,n-sch + ENABLED_SERVICES+=,n-api,n-cpu,n-cond,n-sch # Glance ENABLED_SERVICES+=,g-api,g-reg # Neutron diff --git a/lib/nova b/lib/nova index de053ab389..e580abb4b0 100644 --- a/lib/nova +++ b/lib/nova @@ -878,7 +878,6 @@ function start_nova_rest { run_process n-cond "$NOVA_BIN_DIR/nova-conductor --config-file $compute_cell_conf" run_process n-cell-region "$NOVA_BIN_DIR/nova-cells --config-file $api_cell_conf" run_process n-cell-child "$NOVA_BIN_DIR/nova-cells --config-file $compute_cell_conf" - run_process n-crt "$NOVA_BIN_DIR/nova-cert --config-file $api_cell_conf" if is_service_enabled n-net; then if ! running_in_container; then @@ -929,7 +928,7 @@ function stop_nova_rest { # Kill the nova screen windows # Some services are listed here twice since more than one instance # of a service may be running in certain configs. - for serv in n-api n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-sproxy; do + for serv in n-api n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-sproxy; do stop_process $serv done } From 546656fc0543ec2bc5b422fd9eee17f1b8122758 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Tue, 14 Mar 2017 07:05:19 -0400 Subject: [PATCH 0515/1936] etcd3 as a base service ETCD_DOWNLOAD_URL is set to github url, in our CI, we can point ETCD_DOWNLOAD_URL to a url in tarballs.openstack.org possibly in devstack-gate Download the etcd binaries and drop them into /opt/stack/bin and use it from there. Cache the tgz for subsequent use (local workflow) daemon-reload is called twice once from inside the write_user_unit_file and then when we adjust the entries with additional things recommended by the etcd team. We need a better way to do this in the future. Added a TODO to verify the downloaded artifact later. The etcd team posts gpg signature, we could verify that or run sha256sum and hard code that in lib/etcd3 file. We would have to update it whenever we bump the etcd3 version. We use the public key "CoreOS Application Signing Key " with ID FC8A365E to verify the integrity of the downloaded file Any jobs that need to be run on architectures where v3.1.7 is not available should rey the v3.2.0-rcX release candidates. We can switch to v3.2.0 when it gets released. Initial version of this code was borrowed from the dragonflow repo: http://git.openstack.org/cgit/openstack/dragonflow/tree/devstack Change-Id: Ibbb430fb1dbf66942168e0cb52d990ab6a2eb8d7 --- lib/etcd3 | 126 +++++++++++++++++++++++++++++++++++++++++++++++++++++ stack.sh | 6 +++ stackrc | 2 +- unstack.sh | 6 +++ 4 files changed, 139 insertions(+), 1 deletion(-) create mode 100644 lib/etcd3 diff --git a/lib/etcd3 b/lib/etcd3 new file mode 100644 index 0000000000..fa60a392c9 --- /dev/null +++ b/lib/etcd3 @@ -0,0 +1,126 @@ +#!/bin/bash +# +# lib/etcd3 +# +# Functions to control the installation and configuration of etcd 3.x +# that provides a key-value store (and possibly other functions). + +# Dependencies: +# +# - ``functions`` file + +# ``stack.sh`` calls the entry points in this order: +# +# - start_etcd3 +# - stop_etcd3 +# - cleanup_etcd3 + +# Save trace setting +_XTRACE_ETCD3=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default values for etcd +ETCD_DOWNLOAD_URL=${ETCD_DOWNLOAD_URL:-https://github.com/coreos/etcd/releases/download} +ETCD_VERSION=${ETCD_VERSION:-v3.1.7} +ETCD_DATA_DIR="$DEST/data/etcd" +ETCD_SYSTEMD_SERVICE="devstack@etcd.service" +ETCD_BIN_DIR="$DEST/bin" + +if is_ubuntu ; then + UBUNTU_RELEASE_BASE_NUM=`lsb_release -r | awk '{print $2}' | cut -d '.' -f 1` +fi + +# start_etcd3() - Starts to run the etcd process +function start_etcd3 { + _install_etcd + + local cmd="$ETCD_BIN_DIR/etcd" + cmd+=" --name $HOSTNAME --data-dir $ETCD_DATA_DIR" + cmd+=" --initial-cluster-state new --initial-cluster-token etcd-cluster-01" + cmd+=" --initial-cluster $HOSTNAME=http://$SERVICE_HOST:2380" + cmd+=" --initial-advertise-peer-urls http://$SERVICE_HOST:2380" + cmd+=" --advertise-client-urls http://$SERVICE_HOST:2379" + cmd+=" --listen-peer-urls http://0.0.0.0:2380 " + cmd+=" --listen-client-urls http://$SERVICE_HOST:2379" + + local unitfile="$SYSTEMD_DIR/$ETCD_SYSTEMD_SERVICE" + write_user_unit_file $ETCD_SYSTEMD_SERVICE "$cmd" "" "root" + + iniset -sudo $unitfile "Unit" "After" "network.target" + iniset -sudo $unitfile "Service" "Type" "notify" + iniset -sudo $unitfile "Service" "Restart" "on-failure" + iniset -sudo $unitfile "Service" "LimitNOFILE" "65536" + + $SYSTEMCTL daemon-reload + $SYSTEMCTL enable $ETCD_SYSTEMD_SERVICE + $SYSTEMCTL start $ETCD_SYSTEMD_SERVICE +} + +# stop_etcd3() stops the etcd3 process +function stop_etcd3 { + $SYSTEMCTL stop $ETCD_SYSTEMD_SERVICE +} + +function cleanup_etcd { + $SYSTEMCTL disable $ETCD_SYSTEMD_SERVICE + + local unitfile="$SYSTEMD_DIR/$ETCD_SYSTEMD_SERVICE" + sudo rm -f $unitfile + + $SYSTEMCTL daemon-reload + + sudo rm -rf $ETCD_DATA_DIR +} + +function _install_etcd { + echo "Installing etcd" + + # Make sure etcd3 downloads the correct architecture + if is_arch "x86_64"; then + ETCD_ARCH="amd64" + elif is_arch "aarch64"; then + ETCD_ARCH="arm64" + elif is_arch "ppc64le"; then + ETCD_ARCH="ppc64le" + else + exit_distro_not_supported "invalid hardware type - $ETCD_ARCH" + fi + + # Install the libraries needed. Note: tooz for example does not have a hard dependency on these libraries + pip_install etcd3 + pip_install etcd3gw + + # Create the necessary directories + sudo mkdir -p $ETCD_BIN_DIR + sudo mkdir -p $ETCD_DATA_DIR + + # Download and cache the etcd tgz for subsequent use + if [ ! -f "$DEST/etcd/etcd-$ETCD_VERSION-linux-$ETCD_ARCH/etcd" ]; then + mkdir -p $DEST/etcd + ETCD_DOWNLOAD_FILE=etcd-$ETCD_VERSION-linux-$ETCD_ARCH.tar.gz + wget $ETCD_DOWNLOAD_URL/$ETCD_VERSION/$ETCD_DOWNLOAD_FILE -O $DEST/etcd/$ETCD_DOWNLOAD_FILE + wget $ETCD_DOWNLOAD_URL/$ETCD_VERSION/$ETCD_DOWNLOAD_FILE.asc -O $DEST/etcd/$ETCD_DOWNLOAD_FILE.asc + + # use gpg to verify the artifact, use a backup key server in case the first one is down for some reason + gpg --keyserver hkps.pool.sks-keyservers.net --recv-key FC8A365E || gpg --keyserver pgpkeys.mit.edu --recv-key FC8A365E + gpg --verify $DEST/etcd/$ETCD_DOWNLOAD_FILE.asc $DEST/etcd/$ETCD_DOWNLOAD_FILE + + tar xzvf $DEST/etcd/$ETCD_DOWNLOAD_FILE -C $DEST/etcd + sudo cp $DEST/etcd/etcd-$ETCD_VERSION-linux-$ETCD_ARCH/etcd $ETCD_BIN_DIR/etcd + fi + if [ ! -f "$ETCD_BIN_DIR/etcd" ]; then + sudo cp $DEST/etcd/etcd-$ETCD_VERSION-linux-$ETCD_ARCH/etcd $ETCD_BIN_DIR/etcd + fi +} + +# Restore xtrace +$_XTRACE_ETCD3 + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/stack.sh b/stack.sh index e83eaea326..ecf068a4a9 100755 --- a/stack.sh +++ b/stack.sh @@ -574,6 +574,7 @@ source $TOP_DIR/lib/neutron source $TOP_DIR/lib/ldap source $TOP_DIR/lib/dstat source $TOP_DIR/lib/dlm +source $TOP_DIR/lib/etcd3 source $TOP_DIR/lib/os_brick # Extras Source @@ -1043,6 +1044,11 @@ fi # A better kind of sysstat, with the top process per time slice start_dstat +# Etcd +# ----- + +# etcd is a distributed key value store that provides a reliable way to store data across a cluster of machines +start_etcd3 # Keystone # -------- diff --git a/stackrc b/stackrc index 41ff268bbe..9203f8b5b9 100644 --- a/stackrc +++ b/stackrc @@ -65,7 +65,7 @@ if ! isset ENABLED_SERVICES ; then # Dashboard ENABLED_SERVICES+=,horizon # Additional services - ENABLED_SERVICES+=,rabbit,tempest,mysql,dstat + ENABLED_SERVICES+=,rabbit,tempest,mysql,etcd3,dstat fi # Global toggle for enabling services under mod_wsgi. If this is set to diff --git a/unstack.sh b/unstack.sh index 485fed7f80..a9826f5a97 100755 --- a/unstack.sh +++ b/unstack.sh @@ -69,6 +69,7 @@ source $TOP_DIR/lib/swift source $TOP_DIR/lib/neutron source $TOP_DIR/lib/ldap source $TOP_DIR/lib/dstat +source $TOP_DIR/lib/etcd3 source $TOP_DIR/lib/dlm # Extras Source @@ -162,6 +163,11 @@ if is_service_enabled neutron; then cleanup_neutron fi +if is_service_enabled etcd3; then + stop_etcd3 + cleanup_etcd3 +fi + if is_service_enabled dstat; then stop_dstat fi From 0d9fd60ad4a999ba3d57042b7bc861d86126651a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20J=C3=B3zefczyk?= Date: Tue, 23 May 2017 10:17:48 +0200 Subject: [PATCH 0516/1936] Change version of noVNC to stable In master branch of noVNC project file vnc_auto.html was renamed to vnc_lite.html Because of that nova-novncproxy looks for file that actually doesn't exist. We need to change branch of noVNC to latest stable, because other projects are not ready yet to rename the path. Those projects depends on noVNC package installed in system, but it is too old for now for both CentOS (version 0.5) and Ubuntu (version 0.4). The only way to make noVNC console working on Devstack is to change the branch to stable one. Unit test also has to be modified in order to ignore novnc repo from checking against cloning non-master branch. Change-Id: Iaf4761aedf93bc6b914a6a0c5cf1cfedcc29583c Closes-bug: #1692513 --- stackrc | 2 +- tests/test_refs.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/stackrc b/stackrc index ea8b044faf..f96688bf6c 100644 --- a/stackrc +++ b/stackrc @@ -563,7 +563,7 @@ IRONIC_PYTHON_AGENT_BRANCH=${IRONIC_PYTHON_AGENT_BRANCH:-master} # a websockets/html5 or flash powered VNC console for vm instances NOVNC_REPO=${NOVNC_REPO:-https://github.com/kanaka/noVNC.git} -NOVNC_BRANCH=${NOVNC_BRANCH:-master} +NOVNC_BRANCH=${NOVNC_BRANCH:-stable/v0.6} # a websockets/html5 or flash powered SPICE console for vm instances SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git} diff --git a/tests/test_refs.sh b/tests/test_refs.sh index bccca5dff7..65848cdc72 100755 --- a/tests/test_refs.sh +++ b/tests/test_refs.sh @@ -15,7 +15,7 @@ echo "Ensuring we don't have crazy refs" -REFS=`grep BRANCH stackrc | grep -v -- '-master'` +REFS=`grep BRANCH stackrc | grep -v -- '-master' | grep -v 'NOVNC_BRANCH'` rc=$? if [[ $rc -eq 0 ]]; then echo "Branch defaults must be master. Found:" From d8283fde660bee2d4a5f639197a6a7010988b269 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Tue, 23 May 2017 22:12:39 -0400 Subject: [PATCH 0517/1936] Avoid installing etcd3 in subnodes We need to handle this better, for now, just don't install etcd in the sub nodes. We need to setup the proper clustering mechanism if we want to have etcd3 running in multiple nodes Change-Id: I8dd385e3c993942473e67d04367cdf74495dbeef --- lib/etcd3 | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/lib/etcd3 b/lib/etcd3 index fa60a392c9..2a4fa2a6af 100644 --- a/lib/etcd3 +++ b/lib/etcd3 @@ -36,6 +36,11 @@ fi # start_etcd3() - Starts to run the etcd process function start_etcd3 { + # Don't install in sub nodes (multinode scenario) + if [ "$SERVICE_HOST" != "$HOST_IP" ]; then + return + fi + _install_etcd local cmd="$ETCD_BIN_DIR/etcd" @@ -62,10 +67,20 @@ function start_etcd3 { # stop_etcd3() stops the etcd3 process function stop_etcd3 { + # Don't install in sub nodes (multinode scenario) + if [ "$SERVICE_HOST" != "$HOST_IP" ]; then + return + fi + $SYSTEMCTL stop $ETCD_SYSTEMD_SERVICE } function cleanup_etcd { + # Don't install in sub nodes (multinode scenario) + if [ "$SERVICE_HOST" != "$HOST_IP" ]; then + return + fi + $SYSTEMCTL disable $ETCD_SYSTEMD_SERVICE local unitfile="$SYSTEMD_DIR/$ETCD_SYSTEMD_SERVICE" From 2715fd0b4acf44842ceb67760bbc348093e7d70d Mon Sep 17 00:00:00 2001 From: Lucian Petrut Date: Wed, 24 May 2017 11:31:56 +0300 Subject: [PATCH 0518/1936] Do not upload vhdx images as vhd This change ensures that when uploading vhdx images, we use the proper format. At the moment, vhdx images are uploaded as vhd, which can be troublesome: first because this is misleading, second because the actual image format may be checked, having the image rejected. Change-Id: I9578be41ea9dc252404b7553679ac527e08a0ff6 --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index 52a82faf0a..689aad074e 100644 --- a/functions +++ b/functions @@ -323,7 +323,7 @@ function upload_image { *.vhd|*.vhdx|*.vhd.gz|*.vhdx.gz) local extension="${image_fname#*.}" image_name=$(basename "$image" ".$extension") - disk_format=vhd + disk_format=$(echo $image_fname | grep -oP '(?<=\.)vhdx?(?=\.|$)') container_format=bare if [ "${image_fname##*.}" == "gz" ]; then unpack=zcat From bba924121c8ad5e95f5cf55ab489d4276dcc39b1 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 24 May 2017 07:56:10 -0400 Subject: [PATCH 0519/1936] Use sha256sum instead of gpg for verification gpg verification requires network connectivity which is non mirrorable. We try to avoid that in devstack whenever possible. A sha256sum is a totally reasonable way of knowing if the downloaded package is valid. Closes-Bug: #1693092 Change-Id: Id496ab53f76444f08dc6961f1ecd25f450cc96d7 --- lib/etcd3 | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/lib/etcd3 b/lib/etcd3 index fa60a392c9..15c29132be 100644 --- a/lib/etcd3 +++ b/lib/etcd3 @@ -29,6 +29,10 @@ ETCD_VERSION=${ETCD_VERSION:-v3.1.7} ETCD_DATA_DIR="$DEST/data/etcd" ETCD_SYSTEMD_SERVICE="devstack@etcd.service" ETCD_BIN_DIR="$DEST/bin" +ETCD_SHA256_AMD64="4fde194bbcd259401e2b5c462dfa579ee7f6af539f13f130b8f5b4f52e3b3c52" +# NOTE(sdague): etcd v3.1.7 doesn't have anything for these architectures, though 3.2.0 does. +ETCD_SHA256_ARM64="" +ETCD_SHA256_PPC64="" if is_ubuntu ; then UBUNTU_RELEASE_BASE_NUM=`lsb_release -r | awk '{print $2}' | cut -d '.' -f 1` @@ -82,14 +86,19 @@ function _install_etcd { # Make sure etcd3 downloads the correct architecture if is_arch "x86_64"; then ETCD_ARCH="amd64" + ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_AMD64} elif is_arch "aarch64"; then ETCD_ARCH="arm64" + ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_ARM64} elif is_arch "ppc64le"; then ETCD_ARCH="ppc64le" + ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_PPC64} else exit_distro_not_supported "invalid hardware type - $ETCD_ARCH" fi + ETCD_NAME=etcd-$ETCD_VERSION-linux-$ETCD_ARCH + # Install the libraries needed. Note: tooz for example does not have a hard dependency on these libraries pip_install etcd3 pip_install etcd3gw @@ -99,21 +108,18 @@ function _install_etcd { sudo mkdir -p $ETCD_DATA_DIR # Download and cache the etcd tgz for subsequent use - if [ ! -f "$DEST/etcd/etcd-$ETCD_VERSION-linux-$ETCD_ARCH/etcd" ]; then - mkdir -p $DEST/etcd - ETCD_DOWNLOAD_FILE=etcd-$ETCD_VERSION-linux-$ETCD_ARCH.tar.gz - wget $ETCD_DOWNLOAD_URL/$ETCD_VERSION/$ETCD_DOWNLOAD_FILE -O $DEST/etcd/$ETCD_DOWNLOAD_FILE - wget $ETCD_DOWNLOAD_URL/$ETCD_VERSION/$ETCD_DOWNLOAD_FILE.asc -O $DEST/etcd/$ETCD_DOWNLOAD_FILE.asc - - # use gpg to verify the artifact, use a backup key server in case the first one is down for some reason - gpg --keyserver hkps.pool.sks-keyservers.net --recv-key FC8A365E || gpg --keyserver pgpkeys.mit.edu --recv-key FC8A365E - gpg --verify $DEST/etcd/$ETCD_DOWNLOAD_FILE.asc $DEST/etcd/$ETCD_DOWNLOAD_FILE - - tar xzvf $DEST/etcd/$ETCD_DOWNLOAD_FILE -C $DEST/etcd - sudo cp $DEST/etcd/etcd-$ETCD_VERSION-linux-$ETCD_ARCH/etcd $ETCD_BIN_DIR/etcd + if [ ! -f "files/etcd-$ETCD_VERSION-linux-$ETCD_ARCH/etcd" ]; then + ETCD_DOWNLOAD_FILE=$ETCD_NAME.tar.gz + wget $ETCD_DOWNLOAD_URL/$ETCD_VERSION/$ETCD_DOWNLOAD_FILE -O files/$ETCD_DOWNLOAD_FILE + echo "${ETCD_SHA256} files/${ETCD_DOWNLOAD_FILE}" > files/etcd.sha256sum + # NOTE(sdague): this should go fatal if this fails + sha256sum -c files/etcd.sha256sum + + tar xzvf files/$ETCD_DOWNLOAD_FILE -C files + sudo cp files/$ETCD_NAME/etcd $ETCD_BIN_DIR/etcd fi if [ ! -f "$ETCD_BIN_DIR/etcd" ]; then - sudo cp $DEST/etcd/etcd-$ETCD_VERSION-linux-$ETCD_ARCH/etcd $ETCD_BIN_DIR/etcd + sudo cp files/$ETCD_NAME/etcd $ETCD_BIN_DIR/etcd fi } From 94b9fae4e9f43eb1329384cbeb1977baac60846e Mon Sep 17 00:00:00 2001 From: Andreas Scheuring Date: Wed, 24 May 2017 13:31:13 +0200 Subject: [PATCH 0520/1936] Allow disabling etcd3 Etcd3 was enabled recently as new service in devstack [1]. But there's no way to disable etcd3. This is required on architectures where no etcd binaries are available (e.g. s390x). The long term goal of course should be to have those binaries available. The short term circumvention is to allow disabling the service in local.conf: disable_service etcd3 [1] https://github.com/openstack-dev/devstack/commit/546656fc0543ec2bc5b422fd9eee17f1b8122758 Change-Id: I6184ed193482dad9643ccb2b97133d4957485408 Partial-Bug: #1693192 --- stack.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index ecf068a4a9..a63f6b9e7c 100755 --- a/stack.sh +++ b/stack.sh @@ -1048,7 +1048,9 @@ start_dstat # ----- # etcd is a distributed key value store that provides a reliable way to store data across a cluster of machines -start_etcd3 +if is_service_enabled etcd3; then + start_etcd3 +fi # Keystone # -------- From c0d16c279ad2a24bd13e2dbd6ae6986c37c4f230 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Fri, 19 May 2017 10:23:46 -0400 Subject: [PATCH 0521/1936] Get rid of zookeeper from devstack In Ibbb430fb1dbf66942168e0cb52d990ab6a2eb8d7, we are adding etcd3 as a new base service. We should drop zookeeper and use etcd3 as the backend. Since cinder is the first service for this tooz+etcd3 DLM scenario and cinder uses eventlet we have cannnot use the grpc based driver in tooz. So new CINDER_COORDINATION_URL that defaults to the etcd3's grpc HTTP gateway based tooz backend. We need to hold this change until the tooz change (see Depends-On) is available in a tooz release. Depends-On: I6184ed193482dad9643ccb2b97133d4957485408 Change-Id: Ia187e1a86413edf25b909b6bb57e84fb4930a696 --- files/debs/zookeeper | 1 - files/rpms/zookeeper | 1 - files/zookeeper/environment | 36 ---------- files/zookeeper/log4j.properties | 69 ------------------- files/zookeeper/myid | 1 - files/zookeeper/zoo.cfg | 74 --------------------- lib/cinder | 9 ++- lib/dlm | 111 ------------------------------- stack.sh | 5 -- unstack.sh | 1 - 10 files changed, 4 insertions(+), 304 deletions(-) delete mode 100644 files/debs/zookeeper delete mode 100644 files/rpms/zookeeper delete mode 100644 files/zookeeper/environment delete mode 100644 files/zookeeper/log4j.properties delete mode 100644 files/zookeeper/myid delete mode 100644 files/zookeeper/zoo.cfg delete mode 100644 lib/dlm diff --git a/files/debs/zookeeper b/files/debs/zookeeper deleted file mode 100644 index f41b559007..0000000000 --- a/files/debs/zookeeper +++ /dev/null @@ -1 +0,0 @@ -zookeeperd diff --git a/files/rpms/zookeeper b/files/rpms/zookeeper deleted file mode 100644 index 1bfac538a2..0000000000 --- a/files/rpms/zookeeper +++ /dev/null @@ -1 +0,0 @@ -zookeeper diff --git a/files/zookeeper/environment b/files/zookeeper/environment deleted file mode 100644 index afa2d2f89f..0000000000 --- a/files/zookeeper/environment +++ /dev/null @@ -1,36 +0,0 @@ -# -# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -#    http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# Modified from http://packages.ubuntu.com/saucy/zookeeperd -NAME=zookeeper -ZOOCFGDIR=/etc/zookeeper/conf - -# seems, that log4j requires the log4j.properties file to be in the classpath -CLASSPATH="$ZOOCFGDIR:/usr/share/java/jline.jar:/usr/share/java/log4j-1.2.jar:/usr/share/java/xercesImpl.jar:/usr/share/java/xmlParserAPIs.jar:/usr/share/java/netty.jar:/usr/share/java/slf4j-api.jar:/usr/share/java/slf4j-log4j12.jar:/usr/share/java/zookeeper.jar" - -ZOOCFG="$ZOOCFGDIR/zoo.cfg" -ZOO_LOG_DIR=/var/log/zookeeper -USER=$NAME -GROUP=$NAME -PIDDIR=/var/run/$NAME -PIDFILE=$PIDDIR/$NAME.pid -SCRIPTNAME=/etc/init.d/$NAME -JAVA=/usr/bin/java -ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain" -ZOO_LOG4J_PROP="INFO,ROLLINGFILE" -JMXLOCALONLY=false -JAVA_OPTS="" diff --git a/files/zookeeper/log4j.properties b/files/zookeeper/log4j.properties deleted file mode 100644 index 6c45a4aad9..0000000000 --- a/files/zookeeper/log4j.properties +++ /dev/null @@ -1,69 +0,0 @@ -# -# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -#    http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# From http://packages.ubuntu.com/saucy/zookeeperd - -# ZooKeeper Logging Configuration -# - -# Format is " (, )+ - -log4j.rootLogger=${zookeeper.root.logger} - -# Example: console appender only -# log4j.rootLogger=INFO, CONSOLE - -# Example with rolling log file -#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE - -# Example with rolling log file and tracing -#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE - -# -# Log INFO level and above messages to the console -# -log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender -log4j.appender.CONSOLE.Threshold=INFO -log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout -log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n - -# -# Add ROLLINGFILE to rootLogger to get log file output -# Log DEBUG level and above messages to a log file -log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender -log4j.appender.ROLLINGFILE.Threshold=WARN -log4j.appender.ROLLINGFILE.File=${zookeeper.log.dir}/zookeeper.log - -# Max log file size of 10MB -log4j.appender.ROLLINGFILE.MaxFileSize=10MB -# uncomment the next line to limit number of backup files -#log4j.appender.ROLLINGFILE.MaxBackupIndex=10 - -log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout -log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n - - -# -# Add TRACEFILE to rootLogger to get log file output -# Log DEBUG level and above messages to a log file -log4j.appender.TRACEFILE=org.apache.log4j.FileAppender -log4j.appender.TRACEFILE.Threshold=TRACE -log4j.appender.TRACEFILE.File=${zookeeper.log.dir}/zookeeper_trace.log - -log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout -### Notice we are including log4j's NDC here (%x) -log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n diff --git a/files/zookeeper/myid b/files/zookeeper/myid deleted file mode 100644 index c227083464..0000000000 --- a/files/zookeeper/myid +++ /dev/null @@ -1 +0,0 @@ -0 \ No newline at end of file diff --git a/files/zookeeper/zoo.cfg b/files/zookeeper/zoo.cfg deleted file mode 100644 index b8f55827e3..0000000000 --- a/files/zookeeper/zoo.cfg +++ /dev/null @@ -1,74 +0,0 @@ -# -# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -#    http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# http://hadoop.apache.org/zookeeper/docs/current/zookeeperAdmin.html - -# The number of milliseconds of each tick -tickTime=2000 -# The number of ticks that the initial -# synchronization phase can take -initLimit=10 -# The number of ticks that can pass between -# sending a request and getting an acknowledgement -syncLimit=5 -# the directory where the snapshot is stored. -dataDir=/var/lib/zookeeper -# Place the dataLogDir to a separate physical disc for better performance -# dataLogDir=/disk2/zookeeper - -# the port at which the clients will connect -clientPort=2181 - -# Maximum number of clients that can connect from one client -maxClientCnxns=60 - -# specify all zookeeper servers -# The fist port is used by followers to connect to the leader -# The second one is used for leader election - -server.0=127.0.0.1:2888:3888 - -# To avoid seeks ZooKeeper allocates space in the transaction log file in -# blocks of preAllocSize kilobytes. The default block size is 64M. One reason -# for changing the size of the blocks is to reduce the block size if snapshots -# are taken more often. (Also, see snapCount). -#preAllocSize=65536 - -# Clients can submit requests faster than ZooKeeper can process them, -# especially if there are a lot of clients. To prevent ZooKeeper from running -# out of memory due to queued requests, ZooKeeper will throttle clients so that -# there is no more than globalOutstandingLimit outstanding requests in the -# system. The default limit is 1,000.ZooKeeper logs transactions to a -# transaction log. After snapCount transactions are written to a log file a -# snapshot is started and a new transaction log file is started. The default -# snapCount is 10,000. -#snapCount=1000 - -# If this option is defined, requests will be will logged to a trace file named -# traceFile.year.month.day. -#traceFile= - -# Leader accepts client connections. Default value is "yes". The leader machine -# coordinates updates. For higher update throughput at thes slight expense of -# read throughput the leader can be configured to not accept clients and focus -# on coordination. -#leaderServes=yes - -# Autopurge every hour to avoid using lots of disk in bursts -# Order of the next 2 properties matters. -# autopurge.snapRetainCount must be before autopurge.purgeInterval. -autopurge.snapRetainCount=3 -autopurge.purgeInterval=1 \ No newline at end of file diff --git a/lib/cinder b/lib/cinder index e3a687b4f2..1b4f4e6da2 100644 --- a/lib/cinder +++ b/lib/cinder @@ -363,11 +363,10 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT os_privileged_user_tenant "$SERVICE_PROJECT_NAME" iniset $CINDER_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" - # Set the backend url according to the configured dlm backend - if is_dlm_enabled; then - if [[ "$(dlm_backend)" == "zookeeper" ]]; then - iniset $CINDER_CONF coordination backend_url "zookeeper://${SERVICE_HOST}:2181" - fi + if [[ ! -z "$CINDER_COORDINATION_URL" ]]; then + iniset $CINDER_CONF coordination backend_url "$CINDER_COORDINATION_URL" + elif is_service_enabled etcd3; then + iniset $CINDER_CONF coordination backend_url "etcd3+http://${SERVICE_HOST}:2379" fi } diff --git a/lib/dlm b/lib/dlm deleted file mode 100644 index b5ac0f5d33..0000000000 --- a/lib/dlm +++ /dev/null @@ -1,111 +0,0 @@ -#!/bin/bash -# -# lib/dlm -# -# Functions to control the installation and configuration of software -# that provides a dlm (and possibly other functions). The default is -# **zookeeper**, and is going to be the only backend supported in the -# devstack tree. - -# Dependencies: -# -# - ``functions`` file - -# ``stack.sh`` calls the entry points in this order: -# -# - is_dlm_enabled -# - install_dlm -# - configure_dlm -# - cleanup_dlm - -# Save trace setting -_XTRACE_DLM=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# - -# Set up default directories -ZOOKEEPER_DATA_DIR=$DEST/data/zookeeper -ZOOKEEPER_CONF_DIR=/etc/zookeeper - - -# Entry Points -# ------------ -# -# NOTE(sdague): it is expected that when someone wants to implement -# another one of these out of tree, they'll implement the following -# functions: -# -# - dlm_backend -# - install_dlm -# - configure_dlm -# - cleanup_dlm - -# This should be declared in the settings file of any plugin or -# service that needs to have a dlm in their environment. -function use_dlm { - enable_service $(dlm_backend) -} - -# A function to return the name of the backend in question, some users -# are going to need to know this. -function dlm_backend { - echo "zookeeper" -} - -# Test if a dlm is enabled (defaults to a zookeeper specific check) -function is_dlm_enabled { - [[ ,${ENABLED_SERVICES}, =~ ,"$(dlm_backend)", ]] && return 0 - return 1 -} - -# cleanup_dlm() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_dlm { - # NOTE(sdague): we don't check for is_enabled here because we - # should just delete this regardless. Some times users updated - # their service list before they run cleanup. - sudo rm -rf $ZOOKEEPER_DATA_DIR -} - -# configure_dlm() - Set config files, create data dirs, etc -function configure_dlm { - if is_dlm_enabled; then - sudo cp $FILES/zookeeper/* $ZOOKEEPER_CONF_DIR - sudo sed -i -e 's|.*dataDir.*|dataDir='$ZOOKEEPER_DATA_DIR'|' $ZOOKEEPER_CONF_DIR/zoo.cfg - # clean up from previous (possibly aborted) runs - # create required data files - sudo rm -rf $ZOOKEEPER_DATA_DIR - sudo mkdir -p $ZOOKEEPER_DATA_DIR - # restart after configuration, there is no reason to make this - # another step, because having data files that don't match the - # zookeeper running is just going to cause tears. - restart_service zookeeper - fi -} - -# install_dlm() - Collect source and prepare -function install_dlm { - if is_dlm_enabled; then - pip_install_gr_extras tooz zookeeper - if is_ubuntu; then - install_package zookeeperd - elif is_fedora; then - install_package zookeeper - else - die $LINENO "Don't know how to install zookeeper on this platform" - fi - fi -} - -# Restore xtrace -$_XTRACE_DLM - -# Tell emacs to use shell-script-mode -## Local variables: -## mode: shell-script -## End: diff --git a/stack.sh b/stack.sh index ecf068a4a9..6ff2464fce 100755 --- a/stack.sh +++ b/stack.sh @@ -573,7 +573,6 @@ source $TOP_DIR/lib/swift source $TOP_DIR/lib/neutron source $TOP_DIR/lib/ldap source $TOP_DIR/lib/dstat -source $TOP_DIR/lib/dlm source $TOP_DIR/lib/etcd3 source $TOP_DIR/lib/os_brick @@ -772,10 +771,6 @@ set_systemd_override DefaultLimitNOFILE ${ULIMIT_NOFILE} install_rpc_backend restart_rpc_backend -# NOTE(sdague): dlm install is conditional on one being enabled by configuration -install_dlm -configure_dlm - if is_service_enabled $DATABASE_BACKENDS; then install_database fi diff --git a/unstack.sh b/unstack.sh index a9826f5a97..77a151f933 100755 --- a/unstack.sh +++ b/unstack.sh @@ -70,7 +70,6 @@ source $TOP_DIR/lib/neutron source $TOP_DIR/lib/ldap source $TOP_DIR/lib/dstat source $TOP_DIR/lib/etcd3 -source $TOP_DIR/lib/dlm # Extras Source # -------------- From c0644f39a47958a416a82f10f1091d07fec97896 Mon Sep 17 00:00:00 2001 From: Kirill Zaitsev Date: Wed, 24 May 2017 13:00:47 +0300 Subject: [PATCH 0522/1936] Use correct argument order in truorfalse for USE_JOURNAL truorfalse function from common-functions accepts default as the first parameter. The arguments for USE_JOURNAL were mixed up and this commit restores correct order. Change-Id: Id3621b0e1910a625d6cfb8e81bd27bea82543ae9 --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index 52a82faf0a..d3a2b5104f 100644 --- a/functions +++ b/functions @@ -615,7 +615,7 @@ function setup_systemd_logging { # native systemd path, which provides for things like search on # request-id. However, there may be an eventlet interaction here, # so going off for now. - USE_JOURNAL=$(trueorfalse USE_JOURNAL False) + USE_JOURNAL=$(trueorfalse False USE_JOURNAL) local pidstr="" if [[ "$USE_JOURNAL" == "True" ]]; then iniset $conf_file $conf_section use_journal "True" From bb436d3373715253a17b6e1fd9211c2bc110a96f Mon Sep 17 00:00:00 2001 From: Amrith Kumar Date: Wed, 10 May 2017 08:21:04 -0400 Subject: [PATCH 0523/1936] Make use of Ubuntu Cloud Archive switchable The trove development environment is typically a linux VM within which openstack is installed, and trove launches guest vm's within that environment. To make it possible for these vm's to launch in one human lifetime, one must set vt/x and enable nested hypervisors to use with kvm; qemu emulation will take way too long. The new libvirtd (v2.5.0) in Ubuntu Cloud Archive doesn't handle nested hypervisors well and if you use it, you end up with a guest hanging on the GRUB line. To enable that use-case, we provide ENABLE_UBUNTU_CLOUD_ARCHIVE which the trove developer can set (to False) before running devstack. Change-Id: Ia0265c67bb7d2a438575a03c0ddbf2d9c53266ed Closes-Bug: #1689370 --- stackrc | 2 ++ tools/fixup_stuff.sh | 5 ++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 1300f45cda..f7a0c876f2 100644 --- a/stackrc +++ b/stackrc @@ -665,6 +665,8 @@ case "$VIRT_DRIVER" in ;; esac +# By default, devstack will use Ubuntu Cloud Archive. +ENABLE_UBUNTU_CLOUD_ARCHIVE=$(trueorfalse True ENABLE_UBUNTU_CLOUD_ARCHIVE) # Images # ------ diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index d07d2675c6..0b78bdeb00 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -72,7 +72,10 @@ fi # We've found that Libvirt on Xenial is flaky and crashes enough to be # a regular top e-r bug. Opt into Ubuntu Cloud Archive if on Xenial to # get newer Libvirt. -if [[ "$DISTRO" = "xenial" ]]; then +# Make it possible to switch this based on an environment variable as +# libvirt 2.5.0 doesn't handle nested virtualization quite well and this +# is required for the trove development environment. +if [[ "${ENABLE_UBUNTU_CLOUD_ARCHIVE}" == "True" && "$DISTRO" = "xenial" ]]; then # This pulls in apt-add-repository install_package "software-properties-common" # Use UCA for newer libvirt. Should give us libvirt 2.5.0. From 853b475bfd79621ce7189b133625cb404d618efb Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Thu, 25 May 2017 13:03:10 -0400 Subject: [PATCH 0524/1936] Fix typo should be etcd3 Change-Id: Icfa24654699a4e4e4be8a53f7bbe4634badbff7b --- lib/etcd3 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/etcd3 b/lib/etcd3 index d62214c111..5cab3f556a 100644 --- a/lib/etcd3 +++ b/lib/etcd3 @@ -79,7 +79,7 @@ function stop_etcd3 { $SYSTEMCTL stop $ETCD_SYSTEMD_SERVICE } -function cleanup_etcd { +function cleanup_etcd3 { # Don't install in sub nodes (multinode scenario) if [ "$SERVICE_HOST" != "$HOST_IP" ]; then return From e123edeebf1559731b451cec0fd3c06ff974bbf2 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 23 May 2017 15:53:48 -0400 Subject: [PATCH 0525/1936] Add global_request_id to systemd logs With cinder supporting this now, start logging global_request_id in systemd logs. It will be None for all the services until the work starts coming together, but it is safe to do. Change-Id: Ic6ba1a42da88c03e43d89658b453f6a0b353e0db --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index 52a82faf0a..ebc4c282d3 100644 --- a/functions +++ b/functions @@ -625,7 +625,7 @@ function setup_systemd_logging { fi iniset $conf_file $conf_section logging_debug_format_suffix "{{${pidstr}%(funcName)s %(pathname)s:%(lineno)d}}" - iniset $conf_file $conf_section logging_context_format_string "%(color)s%(levelname)s %(name)s [%(request_id)s %(project_name)s %(user_name)s%(color)s] %(instance)s%(color)s%(message)s" + iniset $conf_file $conf_section logging_context_format_string "%(color)s%(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(project_name)s %(user_name)s%(color)s] %(instance)s%(color)s%(message)s" iniset $conf_file $conf_section logging_default_format_string "%(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" iniset $conf_file $conf_section logging_exception_prefix "ERROR %(name)s %(instance)s" } From d0db62a476e29355ca08db0237295139c8fce4f6 Mon Sep 17 00:00:00 2001 From: Kirill Zaitsev Date: Fri, 26 May 2017 19:02:52 +0300 Subject: [PATCH 0526/1936] Use systemd-tmpfiles to create /var/run/uwsgi On ubuntu contents of /var/run do not persist between reboots. Devstack uses /var/run/uwsgi as home for wsgi sockets. This means that after rebooting the machine services, that rely on uwsgi would fail to start. Currently it affects keystone.service and placement-api.service. This patch changes delegates directory creation to systemd-tmpfiles, which would run on startup. Change-Id: I27d168cea93698739ef08ac76c828695a49176c7 Closes-Bug: #1692767 --- lib/apache | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/apache b/lib/apache index 34ac660266..3be6c1dbb1 100644 --- a/lib/apache +++ b/lib/apache @@ -229,7 +229,13 @@ function write_uwsgi_config { # create a home for the sockets; note don't use /tmp -- apache has # a private view of it on some platforms. local socket_dir='/var/run/uwsgi' - sudo install -d -o $STACK_USER -m 755 $socket_dir + + # /var/run will be empty on ubuntu after reboot, so we can use systemd-temptiles + # to automatically create $socket_dir. + sudo mkdir -p /etc/tmpfiles.d/ + echo "d $socket_dir 0755 $STACK_USER root" | sudo tee /etc/tmpfiles.d/uwsgi.conf + sudo systemd-tmpfiles --create /etc/tmpfiles.d/uwsgi.conf + local socket="$socket_dir/${name}.socket" # always cleanup given that we are using iniset here From de8580691d133fb8ad479ef53008d197e651332b Mon Sep 17 00:00:00 2001 From: Hongbin Lu Date: Wed, 24 May 2017 18:42:33 +0000 Subject: [PATCH 0527/1936] Expose etcd port as a variable This allows devstack plugins to retrieve the etcd port from devstack instead of hard-coding it. Change-Id: I106b559b8ac0fb99a0426bce97a27f67e32d264d --- lib/etcd3 | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/etcd3 b/lib/etcd3 index d62214c111..5472926f64 100644 --- a/lib/etcd3 +++ b/lib/etcd3 @@ -33,6 +33,7 @@ ETCD_SHA256_AMD64="4fde194bbcd259401e2b5c462dfa579ee7f6af539f13f130b8f5b4f52e3b3 # NOTE(sdague): etcd v3.1.7 doesn't have anything for these architectures, though 3.2.0 does. ETCD_SHA256_ARM64="" ETCD_SHA256_PPC64="" +ETCD_PORT=2379 if is_ubuntu ; then UBUNTU_RELEASE_BASE_NUM=`lsb_release -r | awk '{print $2}' | cut -d '.' -f 1` @@ -52,9 +53,9 @@ function start_etcd3 { cmd+=" --initial-cluster-state new --initial-cluster-token etcd-cluster-01" cmd+=" --initial-cluster $HOSTNAME=http://$SERVICE_HOST:2380" cmd+=" --initial-advertise-peer-urls http://$SERVICE_HOST:2380" - cmd+=" --advertise-client-urls http://$SERVICE_HOST:2379" + cmd+=" --advertise-client-urls http://$SERVICE_HOST:$ETCD_PORT" cmd+=" --listen-peer-urls http://0.0.0.0:2380 " - cmd+=" --listen-client-urls http://$SERVICE_HOST:2379" + cmd+=" --listen-client-urls http://$SERVICE_HOST:$ETCD_PORT" local unitfile="$SYSTEMD_DIR/$ETCD_SYSTEMD_SERVICE" write_user_unit_file $ETCD_SYSTEMD_SERVICE "$cmd" "" "root" From fabc976e41ae1d036403403719aac42f111caa35 Mon Sep 17 00:00:00 2001 From: root Date: Sat, 27 May 2017 15:01:16 +0800 Subject: [PATCH 0528/1936] Fix configuration link syntax error The configuration guide link has syntax problem in README.rst, and the click the link will lead to page 404. Fix the syntax problem Change-Id: I47a1641a6898930dca508cdac98b1b43c05dc446 --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index dfa68b927f..b4240bdc91 100644 --- a/README.rst +++ b/README.rst @@ -92,5 +92,5 @@ DevStack can be extensively configured via the configuration file `local.conf`. It is likely that you will need to provide and modify this file if you want anything other than the most basic setup. Start by reading the `configuration guide -_` +`_ for details of the configuration file and the many available options. From 4baac6572573945f49b3b3df7b7ea27f15955477 Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Sat, 27 May 2017 20:53:20 -0700 Subject: [PATCH 0529/1936] Use proper python when configuring certs We have to do silly overrides of cert locations for requests for reasons. If we are running under python3 then we were previously looking in the wrong location for the requests certs. Update the cert fixing function to properly use python3 to find the certs if python3 is enabled. Change-Id: Id1369da0d812edcf9b1204e9c567f8bfe77c48b2 --- lib/tls | 3 ++- stack.sh | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/tls b/lib/tls index 7c6b967bc4..65c2cba300 100644 --- a/lib/tls +++ b/lib/tls @@ -345,7 +345,8 @@ function make_root_CA { function fix_system_ca_bundle_path { if is_service_enabled tls-proxy; then local capath - capath=$(python -c $'try:\n from requests import certs\n print certs.where()\nexcept ImportError: pass') + local python_cmd=${1:-python} + capath=$($python_cmd -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass') if [[ ! $capath == "" && ! $capath =~ ^/etc/.* && ! -L $capath ]]; then if is_fedora; then diff --git a/stack.sh b/stack.sh index 7a24ed947f..1615b1c046 100755 --- a/stack.sh +++ b/stack.sh @@ -888,6 +888,9 @@ fi if is_service_enabled tls-proxy; then fix_system_ca_bundle_path + if python3_enabled ; then + fix_system_ca_bundle_path python3 + fi fi # Extras Install From 309b99ebcfa9d8bcf18c1d3eed1e52787c63f8c7 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Tue, 23 May 2017 15:18:31 -0400 Subject: [PATCH 0530/1936] Add a function to get an available random port This commit adds a new function get_random_port to return a randomly available port from the local port range. Change-Id: Icaed180cc14602a74cdb3fd3456b690d8a4c729c --- functions | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/functions b/functions index 52a82faf0a..4d2703f150 100644 --- a/functions +++ b/functions @@ -732,6 +732,24 @@ function set_systemd_override { sudo systemctl daemon-reload } +# Get a random port from the local port range +# +# This function returns an available port in the local port range. The search +# order is not truly random, but should be considered a random value by the +# user because it depends on the state of your local system. +function get_random_port { + read lower_port upper_port < /proc/sys/net/ipv4/ip_local_port_range + while true; do + for (( port = upper_port ; port >= lower_port ; port-- )); do + sudo lsof -i ":$port" &> /dev/null + if [[ $? > 0 ]] ; then + break 2 + fi + done + done + echo $port +} + # Restore xtrace $_XTRACE_FUNCTIONS From e6217a9719b88b550ccbbce7c9bc064cb746c1a3 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Mon, 24 Apr 2017 16:49:04 -0400 Subject: [PATCH 0531/1936] Use uwsgi for glance-api This commit adds support for deploying glance as a wsgi script under uwsgi. To get around limitations in the uwsgi protocol when using python3 for chunked encoding we have to setup uwsgi in http mode on a random port listening on localhost and use mod_proxy to forward the incoming requests. The alternative approach of having apache buffer the requests locally with the send_cl option with mod_proxy_uwsgi only worked on python2 and also has the limitation that apache is buffering the entire chunked object, which could be several gigabytes in size. Depends-On: I089a22a4be4227a551c32442dba27c426f54c87d Change-Id: Ie98fb7da5e8ecfa49cd680b88139cb7034d5f88f --- lib/apache | 54 +++++++++++++++++++++++++++++++++++++++++++++++++++++- lib/cinder | 2 +- lib/glance | 39 ++++++++++++++++++++++++++++----------- lib/nova | 2 +- 4 files changed, 83 insertions(+), 14 deletions(-) diff --git a/lib/apache b/lib/apache index 34ac660266..f2de7f2ea9 100644 --- a/lib/apache +++ b/lib/apache @@ -260,12 +260,64 @@ function write_uwsgi_config { else local apache_conf="" apache_conf=$(apache_site_config_for $name) - echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}/\" retry=0 " | sudo tee $apache_conf + echo "SetEnv proxy-sendcl 1" | sudo tee $apache_conf + echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}/\" retry=0 " | sudo tee -a $apache_conf enable_apache_site $name restart_apache_server fi } +# For services using chunked encoding, the only services known to use this +# currently are Glance and Swift, we need to use an http proxy instead of +# mod_proxy_uwsgi because the chunked encoding gets dropped. See: +# https://github.com/unbit/uwsgi/issues/1540 You can workaround this on python2 +# but that involves having apache buffer the request before sending it to +# uswgi. +function write_local_uwsgi_http_config { + local file=$1 + local wsgi=$2 + local url=$3 + name=$(basename $wsgi) + + # create a home for the sockets; note don't use /tmp -- apache has + # a private view of it on some platforms. + + # always cleanup given that we are using iniset here + rm -rf $file + iniset "$file" uwsgi wsgi-file "$wsgi" + port=$(get_random_port) + iniset "$file" uwsgi http "127.0.0.1:$port" + iniset "$file" uwsgi processes $API_WORKERS + # This is running standalone + iniset "$file" uwsgi master true + # Set die-on-term & exit-on-reload so that uwsgi shuts down + iniset "$file" uwsgi die-on-term true + iniset "$file" uwsgi exit-on-reload true + iniset "$file" uwsgi enable-threads true + iniset "$file" uwsgi plugins python + # uwsgi recommends this to prevent thundering herd on accept. + iniset "$file" uwsgi thunder-lock true + # Override the default size for headers from the 4k default. + iniset "$file" uwsgi buffer-size 65535 + # Make sure the client doesn't try to re-use the connection. + iniset "$file" uwsgi add-header "Connection: close" + # This ensures that file descriptors aren't shared between processes. + iniset "$file" uwsgi lazy-apps true + iniset "$file" uwsgi chmod-socket 666 + iniset "$file" uwsgi http-raw-body true + iniset "$file" uwsgi http-chunked-input true + iniset "$file" uwsgi http-auto-chunked true + + enable_apache_mod proxy + enable_apache_mod proxy_http + local apache_conf="" + apache_conf=$(apache_site_config_for $name) + echo "KeepAlive Off" | sudo tee $apache_conf + echo "ProxyPass \"${url}\" \"http://127.0.0.1:$port\" retry=0 " | sudo tee -a $apache_conf + enable_apache_site $name + restart_apache_server +} + function remove_uwsgi_config { local file=$1 local wsgi=$2 diff --git a/lib/cinder b/lib/cinder index e3a687b4f2..dc6f443051 100644 --- a/lib/cinder +++ b/lib/cinder @@ -347,7 +347,7 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT osapi_volume_workers "$API_WORKERS" - iniset $CINDER_CONF DEFAULT glance_api_servers "${GLANCE_SERVICE_PROTOCOL}://${GLANCE_HOSTPORT}" + iniset $CINDER_CONF DEFAULT glance_api_servers "$GLANCE_URL" if is_service_enabled tls-proxy; then iniset $CINDER_CONF DEFAULT glance_protocol https iniset $CINDER_CONF DEFAULT glance_ca_certificates_file $SSL_BUNDLE_FILE diff --git a/lib/glance b/lib/glance index d6438a6b48..19564183cd 100644 --- a/lib/glance +++ b/lib/glance @@ -71,6 +71,16 @@ GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT} GLANCE_SERVICE_PROTOCOL=${GLANCE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} GLANCE_REGISTRY_PORT=${GLANCE_REGISTRY_PORT:-9191} GLANCE_REGISTRY_PORT_INT=${GLANCE_REGISTRY_PORT_INT:-19191} +GLANCE_UWSGI=$GLANCE_BIN_DIR/glance-wsgi-api +GLANCE_UWSGI_CONF=$GLANCE_CONF_DIR/glance-uswgi.ini +# If wsgi mode is uwsgi run glance under uwsgi, else default to eventlet +# TODO(mtreinish): Remove the eventlet path here and in all the similar +# conditionals below after the Pike release +if [[ "$WSGI_MODE" == "uwsgi" ]]; then + GLANCE_URL="$GLANCE_SERVICE_PROTOCOL://$GLANCE_SERVICE_HOST/image" +else + GLANCE_URL="$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" +fi # Functions # --------- @@ -103,16 +113,13 @@ function configure_glance { dburl=`database_connection_url glance` iniset $GLANCE_REGISTRY_CONF database connection $dburl iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG - iniset $GLANCE_REGISTRY_CONF DEFAULT workers "$API_WORKERS" iniset $GLANCE_REGISTRY_CONF paste_deploy flavor keystone configure_auth_token_middleware $GLANCE_REGISTRY_CONF glance $GLANCE_AUTH_CACHE_DIR/registry iniset $GLANCE_REGISTRY_CONF oslo_messaging_notifications driver messagingv2 iniset_rpc_backend glance $GLANCE_REGISTRY_CONF iniset $GLANCE_REGISTRY_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" - cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF iniset $GLANCE_API_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $GLANCE_API_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS inicomment $GLANCE_API_CONF DEFAULT log_file iniset $GLANCE_API_CONF database connection $dburl iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG @@ -140,8 +147,6 @@ function configure_glance { iniset $GLANCE_API_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/ iniset $GLANCE_API_CONF DEFAULT registry_host $GLANCE_SERVICE_HOST - iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS" - # CORS feature support - to allow calls from Horizon by default if [ -n "$GLANCE_CORS_ALLOWED_ORIGIN" ]; then iniset $GLANCE_API_CONF cors allowed_origin "$GLANCE_CORS_ALLOWED_ORIGIN" @@ -198,7 +203,6 @@ function configure_glance { setup_logging $GLANCE_REGISTRY_CONF cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI - cp -p $GLANCE_DIR/etc/glance-api-paste.ini $GLANCE_API_PASTE_INI cp $GLANCE_DIR/etc/glance-cache.conf $GLANCE_CACHE_CONF @@ -231,6 +235,13 @@ function configure_glance { iniset $GLANCE_API_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/%(project_id)s" iniset $GLANCE_CACHE_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/%(project_id)s" fi + + if [[ "$WSGI_MODE" == "uwsgi" ]]; then + write_local_uwsgi_http_config "$GLANCE_UWSGI_CONF" "$GLANCE_UWSGI" "/image" + else + iniset $GLANCE_API_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS + iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS" + fi } # create_glance_accounts() - Set up common required glance accounts @@ -255,7 +266,7 @@ function create_glance_accounts { get_or_create_endpoint \ "image" \ "$REGION_NAME" \ - "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" + "$GLANCE_URL" # Note(frickler): Crude workaround for https://bugs.launchpad.net/glance-store/+bug/1620999 service_domain_id=$(get_or_create_domain $SERVICE_DOMAIN_NAME) @@ -320,15 +331,21 @@ function install_glance { function start_glance { local service_protocol=$GLANCE_SERVICE_PROTOCOL if is_service_enabled tls-proxy; then - start_tls_proxy glance-service '*' $GLANCE_SERVICE_PORT $GLANCE_SERVICE_HOST $GLANCE_SERVICE_PORT_INT + if [[ "$WSGI_MODE" != "uwsgi" ]]; then + start_tls_proxy glance-service '*' $GLANCE_SERVICE_PORT $GLANCE_SERVICE_HOST $GLANCE_SERVICE_PORT_INT + fi start_tls_proxy glance-registry '*' $GLANCE_REGISTRY_PORT $GLANCE_SERVICE_HOST $GLANCE_REGISTRY_PORT_INT fi run_process g-reg "$GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf" - run_process g-api "$GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf" + if [[ "$WSGI_MODE" == "uwsgi" ]]; then + run_process g-api "$GLANCE_BIN_DIR/uwsgi --ini $GLANCE_UWSGI_CONF" + else + run_process g-api "$GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf" + fi - echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..." - if ! wait_for_service $SERVICE_TIMEOUT $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT; then + echo "Waiting for g-api ($GLANCE_SERVICE_HOST) to start..." + if ! wait_for_service $SERVICE_TIMEOUT $GLANCE_URL; then die $LINENO "g-api did not start" fi } diff --git a/lib/nova b/lib/nova index de053ab389..f9cad2551d 100644 --- a/lib/nova +++ b/lib/nova @@ -574,7 +574,7 @@ function create_nova_conf { # enable notifications, but it will allow them to function when enabled. iniset $NOVA_CONF oslo_messaging_notifications driver "messagingv2" iniset_rpc_backend nova $NOVA_CONF - iniset $NOVA_CONF glance api_servers "${GLANCE_SERVICE_PROTOCOL}://${GLANCE_HOSTPORT}" + iniset $NOVA_CONF glance api_servers "$GLANCE_URL" iniset $NOVA_CONF DEFAULT osapi_compute_workers "$API_WORKERS" iniset $NOVA_CONF DEFAULT metadata_workers "$API_WORKERS" From 35649ae0d2a356c310fd92f16356bdd086cab290 Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Sat, 27 May 2017 17:52:55 -0700 Subject: [PATCH 0532/1936] Make stack.sh work on SUSE This adds packages to suse for systemd python linkages as well as apache2 and which. And configures mod_proxy and mod_proxy_uwsgi with a2enmod. We also properly query if apache mods are enabled to avoid running into systemd service restart limits. Enable mod_version across the board as we use it and it may not be enabled by default (like in SUSE). Also in addition to enabling mod_ssl we enable the SSL flag so that TLS will work... Finally we tell the system to trust the devstack CA. Change-Id: I3442cebfb2e7c2550733eb95a12fab42e1229ce7 --- files/rpms-suse/general | 4 ++++ lib/apache | 15 ++++++++++++--- lib/tls | 12 ++++++++++++ 3 files changed, 28 insertions(+), 3 deletions(-) diff --git a/files/rpms-suse/general b/files/rpms-suse/general index 1044c25288..370f2409f7 100644 --- a/files/rpms-suse/general +++ b/files/rpms-suse/general @@ -1,3 +1,5 @@ +apache2 +apache2-devel bc bridge-utils ca-certificates-mozilla @@ -23,9 +25,11 @@ python-cmd2 # dist:opensuse-12.3 python-devel # pyOpenSSL python-xml screen +systemd-devel # for systemd-python tar tcpdump unzip util-linux wget +which zlib-devel diff --git a/lib/apache b/lib/apache index 34ac660266..43d5000808 100644 --- a/lib/apache +++ b/lib/apache @@ -53,8 +53,15 @@ APACHE_LOG_DIR="/var/log/${APACHE_NAME}" function enable_apache_mod { local mod=$1 # Apache installation, because we mark it NOPRIME - if is_ubuntu || is_suse ; then - if ! a2query -m $mod ; then + if is_ubuntu; then + # Skip mod_version as it is not a valid mod to enable + # on debuntu, instead it is built in. + if [[ "$mod" != "version" ]] && ! a2query -m $mod ; then + sudo a2enmod $mod + restart_apache_server + fi + elif is_suse; then + if ! a2enmod -q $mod ; then sudo a2enmod $mod restart_apache_server fi @@ -96,7 +103,7 @@ function install_apache_uwsgi { # delete the temp directory sudo rm -rf $dir - if is_ubuntu; then + if is_ubuntu || is_suse ; then # we've got to enable proxy and proxy_uwsgi for this to work sudo a2enmod proxy sudo a2enmod proxy_uwsgi @@ -171,6 +178,8 @@ function apache_site_config_for { # enable_apache_site() - Enable a particular apache site function enable_apache_site { local site=$@ + # Many of our sites use mod version. Just enable it. + enable_apache_mod version if is_ubuntu; then sudo a2ensite ${site} elif is_fedora || is_suse; then diff --git a/lib/tls b/lib/tls index 238687c5dd..de7a3affa1 100644 --- a/lib/tls +++ b/lib/tls @@ -212,6 +212,9 @@ function init_CA { if is_fedora; then sudo cp $INT_CA_DIR/ca-chain.pem /usr/share/pki/ca-trust-source/anchors/devstack-chain.pem sudo update-ca-trust + elif is_suse; then + sudo cp $INT_CA_DIR/ca-chain.pem /usr/share/pki/trust/anchors/devstack-chain.pem + sudo update-ca-certificates elif is_ubuntu; then sudo cp $INT_CA_DIR/ca-chain.pem /usr/local/share/ca-certificates/devstack-int.crt sudo cp $ROOT_CA_DIR/cacert.pem /usr/local/share/ca-certificates/devstack-root.crt @@ -354,6 +357,9 @@ function fix_system_ca_bundle_path { elif is_ubuntu; then sudo rm -f $capath sudo ln -s /etc/ssl/certs/ca-certificates.crt $capath + elif is_suse; then + sudo rm -f $capath + sudo ln -s /etc/ssl/ca-bundle.pem $capath else echo "Don't know how to set the CA bundle, expect the install to fail." fi @@ -416,6 +422,9 @@ function enable_mod_ssl { if is_ubuntu; then sudo a2enmod ssl + elif is_suse; then + sudo a2enmod ssl + sudo a2enflag SSL elif is_fedora; then # Fedora enables mod_ssl by default : @@ -522,6 +531,9 @@ $listen_string LogFormat "%v %h %l %u %t \"%r\" %>s %b" EOF + if is_suse ; then + sudo a2enflag SSL + fi for mod in ssl proxy proxy_http; do enable_apache_mod $mod done From 14728c7a51c56141eafbf58617814680887c6690 Mon Sep 17 00:00:00 2001 From: Markus Zoeller Date: Mon, 29 May 2017 15:39:21 +0200 Subject: [PATCH 0533/1936] docs: add "kvm on s390x" specific configuration in `local.conf` The upstream CI runs exclusively on nodes with x86 architectures, but OpenStack supports even more platforms. One of them is the KVM on s390x (IBM z systems), which is supported since the *Kilo* release. This change describes the additional settings in the ``local.conf`` file to enable Devstack on that platform. This is useful for PoCs. Change-Id: I943b552ca2e36210ac57f36c16db930eb5e58623 --- doc/source/configuration.rst | 60 ++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 66b8702f76..064bf515e6 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -195,6 +195,9 @@ will not be set if there is no IPv6 address on the default Ethernet interface. Setting it here also makes it available for ``openrc`` to set ``OS_AUTH_URL``. ``HOST_IPV6`` is not set by default. +For architecture specific configurations which differ from the x86 default +here, see `arch-configuration`_. + Historical Notes ================ @@ -749,3 +752,60 @@ overridden by setting them in the ``localrc`` section. :: TERMINATE_TIMEOUT=30 + + +.. _arch-configuration: + +Architectures +------------- + +The upstream CI runs exclusively on nodes with x86 architectures, but +OpenStack supports even more architectures. Some of them need to configure +Devstack in a certain way. + +KVM on s390x (IBM z Systems) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +KVM on s390x (IBM z Systems) is supported since the *Kilo* release. For +an all-in-one setup, these minimal settings in the ``local.conf`` file +are needed:: + + [[local|localrc]] + ADMIN_PASSWORD=secret + DATABASE_PASSWORD=$ADMIN_PASSWORD + RABBIT_PASSWORD=$ADMIN_PASSWORD + SERVICE_PASSWORD=$ADMIN_PASSWORD + + DOWNLOAD_DEFAULT_IMAGES=False + IMAGE_URLS="https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-s390x-disk1.img" + + enable_service n-sproxy + disable_service n-novnc + disable_service etcd3 # https://bugs.launchpad.net/devstack/+bug/1693192 + + [[post-config|$NOVA_CONF]] + + [serial_console] + base_url=ws://$HOST_IP:6083/ # optional + +Reasoning: + +* The default image of Devstack is x86 only, so we deactivate the download + with ``DOWNLOAD_DEFAULT_IMAGES``. The referenced guest image + in the code above (``IMAGE_URLS``) serves as an example. The list of + possible s390x guest images is not limited to that. + +* This platform doesn't support a graphical console like VNC or SPICE. + The technical reason is the missing framebuffer on the platform. This + means we rely on the substitute feature *serial console* which needs the + proxy service ``n-sproxy``. We also disable VNC's proxy ``n-novnc`` for + that reason . The configuration in the ``post-config`` section is only + needed if you want to use the *serial console* outside of the all-in-one + setup. + +* The service ``etcd3`` needs to be disabled as long as bug report + https://bugs.launchpad.net/devstack/+bug/1693192 is not resolved. + +.. note:: To run *Tempest* against this *Devstack* all-in-one, you'll need + to use a guest image which is smaller than 1GB when uncompressed. + The example image from above is bigger than that! From 2dfca047d97d4867048dbc9eec34b6f7ff54655e Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Thu, 25 May 2017 14:57:19 -0700 Subject: [PATCH 0534/1936] Don't run and check c-api if it is disabled Previously we didn't block out the c-api startup code because the devstack functions to start services check that for us. However, since the cinder devstack code checks the service is up and runs the tls proxy if tls is enabled we need to block it all off to avoid doing those things if c-api is disabled. Change-Id: I1c4f22f785af96caaf4baa21ff28714b9afd3458 --- lib/cinder | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/lib/cinder b/lib/cinder index e3a687b4f2..f36c179cf2 100644 --- a/lib/cinder +++ b/lib/cinder @@ -512,17 +512,24 @@ function start_cinder { fi fi - if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then - enable_apache_site osapi-volume - restart_apache_server - tail_log c-api /var/log/$APACHE_NAME/c-api.log - else - run_process c-api "$CINDER_BIN_DIR/cinder-api --config-file $CINDER_CONF" - fi + if is_service_enabled c-api ; then + if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then + enable_apache_site osapi-volume + restart_apache_server + tail_log c-api /var/log/$APACHE_NAME/c-api.log + else + run_process c-api "$CINDER_BIN_DIR/cinder-api --config-file $CINDER_CONF" + fi - echo "Waiting for Cinder API to start..." - if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$CINDER_SERVICE_HOST:$service_port; then - die $LINENO "c-api did not start" + echo "Waiting for Cinder API to start..." + if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$CINDER_SERVICE_HOST:$service_port; then + die $LINENO "c-api did not start" + fi + + # Start proxies if enabled + if is_service_enabled tls-proxy; then + start_tls_proxy cinder '*' $CINDER_SERVICE_PORT $CINDER_SERVICE_HOST $CINDER_SERVICE_PORT_INT + fi fi run_process c-sch "$CINDER_BIN_DIR/cinder-scheduler --config-file $CINDER_CONF" @@ -532,11 +539,6 @@ function start_cinder { # NOTE(jdg): For cinder, startup order matters. To ensure that repor_capabilities is received # by the scheduler start the cinder-volume service last (or restart it) after the scheduler # has started. This is a quick fix for lp bug/1189595 - - # Start proxies if enabled - if is_service_enabled c-api && is_service_enabled tls-proxy; then - start_tls_proxy cinder '*' $CINDER_SERVICE_PORT $CINDER_SERVICE_HOST $CINDER_SERVICE_PORT_INT - fi } # stop_cinder() - Stop running processes From 29ec28216f91eab118134b5444e0276ef845e0d0 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Tue, 30 May 2017 14:21:39 -0500 Subject: [PATCH 0535/1936] Configure volume_clear setting per backend volume_clear is currently set in the DEFAULT section, but this is a backend specific setting, and therefore needs to be set in the backend config section. Change-Id: Ifa3a659bb4768b8915a0f23e7f14b0f3348d93d2 --- lib/cinder | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/cinder b/lib/cinder index 2f302c0204..762edc4393 100644 --- a/lib/cinder +++ b/lib/cinder @@ -289,6 +289,9 @@ function configure_cinder { default_name=$be_name fi enabled_backends+=$be_name, + + iniset $CINDER_CONF $be_name volume_clear $CINDER_VOLUME_CLEAR + done iniset $CINDER_CONF DEFAULT enabled_backends ${enabled_backends%,*} if [[ -n "$default_name" ]]; then @@ -318,8 +321,6 @@ function configure_cinder { iniset_rpc_backend cinder $CINDER_CONF - iniset $CINDER_CONF DEFAULT volume_clear $CINDER_VOLUME_CLEAR - # Format logging setup_logging $CINDER_CONF $CINDER_USE_MOD_WSGI From 6f962a2ee5e131282b1e2bc117784ae41b083cad Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Wed, 31 May 2017 11:00:08 +0100 Subject: [PATCH 0536/1936] Change "files" directory in etcd project Function "_install_etcd" is trying to use "files" directory to download a file. Instead of this, this directory should be $FILES, which is defined previously in parent script. TrivialFix Change-Id: I643ce3b9aba1f65f03524430c748bf120d071509 --- lib/etcd3 | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/lib/etcd3 b/lib/etcd3 index 5cab3f556a..a1a2ac6a74 100644 --- a/lib/etcd3 +++ b/lib/etcd3 @@ -123,18 +123,18 @@ function _install_etcd { sudo mkdir -p $ETCD_DATA_DIR # Download and cache the etcd tgz for subsequent use - if [ ! -f "files/etcd-$ETCD_VERSION-linux-$ETCD_ARCH/etcd" ]; then + if [ ! -f "$FILES/etcd-$ETCD_VERSION-linux-$ETCD_ARCH/etcd" ]; then ETCD_DOWNLOAD_FILE=$ETCD_NAME.tar.gz - wget $ETCD_DOWNLOAD_URL/$ETCD_VERSION/$ETCD_DOWNLOAD_FILE -O files/$ETCD_DOWNLOAD_FILE - echo "${ETCD_SHA256} files/${ETCD_DOWNLOAD_FILE}" > files/etcd.sha256sum + wget $ETCD_DOWNLOAD_URL/$ETCD_VERSION/$ETCD_DOWNLOAD_FILE -O $FILES/$ETCD_DOWNLOAD_FILE + echo "${ETCD_SHA256} $FILES/${ETCD_DOWNLOAD_FILE}" > $FILES/etcd.sha256sum # NOTE(sdague): this should go fatal if this fails - sha256sum -c files/etcd.sha256sum + sha256sum -c $FILES/etcd.sha256sum - tar xzvf files/$ETCD_DOWNLOAD_FILE -C files - sudo cp files/$ETCD_NAME/etcd $ETCD_BIN_DIR/etcd + tar xzvf $FILES/$ETCD_DOWNLOAD_FILE -C $FILES + sudo cp $FILES/$ETCD_NAME/etcd $ETCD_BIN_DIR/etcd fi if [ ! -f "$ETCD_BIN_DIR/etcd" ]; then - sudo cp files/$ETCD_NAME/etcd $ETCD_BIN_DIR/etcd + sudo cp $FILES/$ETCD_NAME/etcd $ETCD_BIN_DIR/etcd fi } From 07b3bc24a3dc30cf4ec7ab2448128bf5a07fbfc0 Mon Sep 17 00:00:00 2001 From: gong yong sheng Date: Mon, 5 Jun 2017 14:02:28 +0800 Subject: [PATCH 0537/1936] Change restart Always to always Change-Id: I1cb00cc012eda72ff50e958ba1fb04daeac69e26 Closes-bug: #1695822 --- functions-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions-common b/functions-common index 30933ea4c2..26d1c28cac 100644 --- a/functions-common +++ b/functions-common @@ -1468,7 +1468,7 @@ function write_uwsgi_user_unit_file { iniset -sudo $unitfile "Service" "ExecStart" "$command" iniset -sudo $unitfile "Service" "Type" "notify" iniset -sudo $unitfile "Service" "KillSignal" "SIGQUIT" - iniset -sudo $unitfile "Service" "Restart" "Always" + iniset -sudo $unitfile "Service" "Restart" "always" iniset -sudo $unitfile "Service" "NotifyAccess" "all" iniset -sudo $unitfile "Service" "RestartForceExitStatus" "100" From 1ade00da55cfc383b4832f8da03df01001b40fcd Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Mon, 5 Jun 2017 11:01:45 -0400 Subject: [PATCH 0538/1936] Fix scheduler_default_filters usage The scheduler_default_filters config option moved out of the DEFAULT option group into a more specific group, and the old option is deprecated as a result so we need to update our usage. Change-Id: I5d6574d19c3f16abadddb19f34cb645dcdcc07f4 --- lib/nova | 2 +- lib/nova_plugins/hypervisor-fake | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/nova b/lib/nova index e580abb4b0..9c3ba33013 100644 --- a/lib/nova +++ b/lib/nova @@ -454,7 +454,7 @@ function create_nova_conf { iniset $NOVA_CONF wsgi api_paste_config "$NOVA_API_PASTE_INI" iniset $NOVA_CONF DEFAULT rootwrap_config "$NOVA_CONF_DIR/rootwrap.conf" iniset $NOVA_CONF DEFAULT scheduler_driver "$SCHEDULER" - iniset $NOVA_CONF DEFAULT scheduler_default_filters "$FILTERS" + iniset $NOVA_CONF filter_scheduler enabled_filters "$FILTERS" iniset $NOVA_CONF DEFAULT default_floating_pool "$PUBLIC_NETWORK_NAME" if [[ $SERVICE_IP_VERSION == 6 ]]; then iniset $NOVA_CONF DEFAULT my_ip "$HOST_IPV6" diff --git a/lib/nova_plugins/hypervisor-fake b/lib/nova_plugins/hypervisor-fake index f9b95c1816..49c8dee83a 100644 --- a/lib/nova_plugins/hypervisor-fake +++ b/lib/nova_plugins/hypervisor-fake @@ -49,7 +49,7 @@ function configure_nova_hypervisor { iniset $NOVA_CONF DEFAULT quota_security_groups -1 iniset $NOVA_CONF DEFAULT quota_security_group_rules -1 iniset $NOVA_CONF DEFAULT quota_key_pairs -1 - iniset $NOVA_CONF DEFAULT scheduler_default_filters "RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,CoreFilter,RamFilter,DiskFilter" + iniset $NOVA_CONF filter_scheduler enabled_filters "RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,CoreFilter,RamFilter,DiskFilter" } # install_nova_hypervisor() - Install external components From 7a74c2ab24dc7083996df5c9ed7baf89cbf219f3 Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Mon, 5 Jun 2017 16:06:06 +0000 Subject: [PATCH 0539/1936] Start placement before services that might like to use it Otherwise those services, notably n-cpu, will try to register resource providers before placement is ready. Change-Id: I89fd4fa42baf3d19ee209c59cd85b97adb97c58b Closes-Bug: #1695634 --- stack.sh | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/stack.sh b/stack.sh index 048acf8e52..24f534d2a5 100755 --- a/stack.sh +++ b/stack.sh @@ -1300,6 +1300,13 @@ elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then $NOVA_BIN_DIR/nova-manage --config-file $NM_CONF floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL fi +# Start placement before any of the service that are likely to want +# to use it to manage resource providers. +if is_service_enabled placement; then + echo_summary "Starting Placement" + start_placement +fi + if is_service_enabled neutron; then start_neutron fi @@ -1314,10 +1321,6 @@ if is_service_enabled nova; then start_nova create_flavors fi -if is_service_enabled placement; then - echo_summary "Starting Placement" - start_placement -fi if is_service_enabled cinder; then echo_summary "Starting Cinder" start_cinder From e28db4c2f297c8438fdc614954339f9178c07d54 Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Thu, 1 Jun 2017 16:07:32 -0700 Subject: [PATCH 0540/1936] Set swift functest config when using tls Because the swift functests (which use test.conf) run out of a virtualenv they don't get access to the system wide trust of the devstack CA. Handle this by explicitly configuring the cafile to trust in the test.conf file. We also set the web_front_end to apache2 as that is what is terminating TLS for us. The tests handle different web server behaviors using this flag. Swift's functests will need to read these values in and properly configure things on its end. Change-Id: I4cdba36ccab6acd76205184882ee29e4f1e12333 --- lib/swift | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/swift b/lib/swift index 8fad6b8f97..79ead6c104 100644 --- a/lib/swift +++ b/lib/swift @@ -541,6 +541,10 @@ EOF else iniset ${testfile} func_test auth_prefix /identity/v2.0/ fi + if is_service_enabled tls-proxy; then + iniset ${testfile} func_test cafile ${SSL_BUNDLE_FILE} + iniset ${testfile} func_test web_front_end apache2 + fi fi local user_group From e61e19ee82b8b35decdaa4151aa9d4762b8153cb Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Sat, 27 May 2017 23:43:05 +0200 Subject: [PATCH 0541/1936] Enable opensuse-42.2 as a tested distro openSUSE 42.2 passes testing on the experimental gate and in order to add it as continuosly tested target we need to add it to the positive list of tested distributions. Change-Id: I46f94cfad828534f324994c3d21bddff40e8f9a2 --- stack.sh | 2 +- tools/install_prereqs.sh | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 83da1c61cb..8989ad6141 100755 --- a/stack.sh +++ b/stack.sh @@ -192,7 +192,7 @@ source $TOP_DIR/stackrc # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (xenial|yakkety|zesty|stretch|jessie|f24|f25|rhel7|kvmibm1) ]]; then +if [[ ! ${DISTRO} =~ (xenial|yakkety|zesty|stretch|jessie|f24|f25|opensuse-42.2|rhel7|kvmibm1) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh index da59093581..a77eae6fe7 100755 --- a/tools/install_prereqs.sh +++ b/tools/install_prereqs.sh @@ -60,6 +60,14 @@ export_proxy_variables # Install Packages # ================ +if [[ "${DISTRO}" == "opensuse-42.2" ]]; then + # temporary workaround until https://bugzilla.suse.com/show_bug.cgi?id=1041161 is fixed + sudo zypper ar -f http://download.opensuse.org/update/leap/42.2-test/ leap42.2-test-updates + sudo zypper --non-interactive --gpg-auto-import-keys --no-gpg-checks ref + sudo zypper --non-interactive install liberasurecode-devel + sudo zypper rr leap42.2-test-updates +fi + # Install package requirements PACKAGES=$(get_packages general,$ENABLED_SERVICES) PACKAGES="$PACKAGES $(get_plugin_packages)" From f4dbd12f78236c7c98b68d7841783ed29d6e77d7 Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Wed, 31 May 2017 13:17:22 -0700 Subject: [PATCH 0542/1936] Set specified header size when enabling tls-proxy As part of getting swift's functional testing to work properly through the tls-proxy we need to increase the allowed request header size in apache. This was a non issue without tls proxy as requests hit the eventlet webserver directly which was configured via the swift config which sets this relatively large limit (by default devstack configures swift to have a header size limit of 16384). Now we pass in an optional parameter to start_tls_proxy that includes the desired header size. lib/swift then passes in the value it also configures in its swift.conf. If not explicitly set we default to 8190 which is apache2's default. Change-Id: Ib2811c8d3cbb49cf94b70294788526b15a798edd --- lib/swift | 2 +- lib/tls | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/lib/swift b/lib/swift index 8fad6b8f97..8769e98cd8 100644 --- a/lib/swift +++ b/lib/swift @@ -837,7 +837,7 @@ function start_swift { if is_service_enabled tls-proxy; then local proxy_port=${SWIFT_DEFAULT_BIND_PORT} - start_tls_proxy swift '*' $proxy_port $SERVICE_HOST $SWIFT_DEFAULT_BIND_PORT_INT + start_tls_proxy swift '*' $proxy_port $SERVICE_HOST $SWIFT_DEFAULT_BIND_PORT_INT $SWIFT_MAX_HEADER_SIZE fi run_process s-proxy "$SWIFT_BIN_DIR/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v" diff --git a/lib/tls b/lib/tls index 65c2cba300..3f59d5b8a3 100644 --- a/lib/tls +++ b/lib/tls @@ -485,6 +485,8 @@ function start_tls_proxy { local f_port=$3 local b_host=$4 local b_port=$5 + # 8190 is the default apache size. + local f_header_size=${6:-8190} tune_apache_connections @@ -512,6 +514,11 @@ $listen_string # ('Connection aborted.', BadStatusLine("''",)) error KeepAlive Off + # This increase in allowed request header sizes is required + # for swift functional testing to work with tls enabled. It is 2 bytes + # larger than the apache default of 8190. + LimitRequestFieldSize $f_header_size + ProxyPass http://$b_host:$b_port/ retry=0 nocanon ProxyPassReverse http://$b_host:$b_port/ From aefc926cd45b2dc74d98f89e3a3b4cc92f2090ff Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Wed, 31 May 2017 15:39:48 -0400 Subject: [PATCH 0543/1936] Support installing os-traits from git This will be used in a src job for running os-traits changes in a dsvm/tempest setup. Change-Id: I3c4433fb1ca2787e96b577a15d584b625c364ef3 --- lib/oslo | 6 ++++++ stackrc | 4 ++++ tests/test_libs_from_pypi.sh | 2 +- 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/lib/oslo b/lib/oslo index 2895503de6..d15a3031a2 100644 --- a/lib/oslo +++ b/lib/oslo @@ -54,6 +54,11 @@ GITDIR["python-openstacksdk"]=$DEST/python-openstacksdk GITDIR["stevedore"]=$DEST/stevedore GITDIR["taskflow"]=$DEST/taskflow GITDIR["tooz"]=$DEST/tooz +# TODO(mriedem): This is a common pattern so even though os-traits isn't +# officially an oslo library, it is nice to re-use this script for non-oslo +# things like os-traits. We should rename this script to be more generic +# and then fold os-brick into it also. +GITDIR["os-traits"]=$DEST/os-traits # Support entry points installation of console scripts OSLO_BIN_DIR=$(get_python_exec_prefix) @@ -104,6 +109,7 @@ function install_oslo { _do_install_oslo_lib "stevedore" _do_install_oslo_lib "taskflow" _do_install_oslo_lib "tooz" + _do_install_oslo_lib "os-traits" } # Restore xtrace diff --git a/stackrc b/stackrc index e9b8df2131..6d11589f11 100644 --- a/stackrc +++ b/stackrc @@ -570,6 +570,10 @@ GITREPO["neutron-lib"]=${NEUTRON_LIB_REPO:-${GIT_BASE}/openstack/neutron-lib.git GITBRANCH["neutron-lib"]=${NEUTRON_LIB_BRANCH:-master} GITDIR["neutron-lib"]=$DEST/neutron-lib +# os-traits library for resource provider traits in the placement service +GITREPO["os-traits"]=${OS_TRAITS_REPO:-${GIT_BASE}/openstack/os-traits.git} +GITBRANCH["os-traits"]=${OS_TRAITS_BRANCH:-master} + ################## # # TripleO / Heat Agent Components diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index 608ef6ae91..1f2d3c23b8 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -39,7 +39,7 @@ ALL_LIBS+=" oslo.serialization django_openstack_auth" ALL_LIBS+=" python-openstackclient osc-lib os-client-config oslo.rootwrap" ALL_LIBS+=" oslo.i18n oslo.utils python-openstacksdk python-swiftclient" ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy" -ALL_LIBS+=" debtcollector os-brick automaton futurist oslo.service" +ALL_LIBS+=" debtcollector os-brick os-traits automaton futurist oslo.service" ALL_LIBS+=" oslo.cache oslo.reports osprofiler cursive" ALL_LIBS+=" keystoneauth ironic-lib neutron-lib oslo.privsep" ALL_LIBS+=" diskimage-builder os-vif python-brick-cinderclient-ext" From bbf14db3a7cb72703ee19040e29242ed128ddf0f Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Tue, 6 Jun 2017 23:28:26 +0200 Subject: [PATCH 0544/1936] Remove temporary openSUSE-42.2 workaround We required initially 42.2 test updates to be enabled as the liberasurecode-devel update wasn't released. It is now released so we can stop pulling that part in. Change-Id: I4e514e317da8a95809593a49c6dce619bc4c021f --- tools/install_prereqs.sh | 8 -------- 1 file changed, 8 deletions(-) diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh index a77eae6fe7..da59093581 100755 --- a/tools/install_prereqs.sh +++ b/tools/install_prereqs.sh @@ -60,14 +60,6 @@ export_proxy_variables # Install Packages # ================ -if [[ "${DISTRO}" == "opensuse-42.2" ]]; then - # temporary workaround until https://bugzilla.suse.com/show_bug.cgi?id=1041161 is fixed - sudo zypper ar -f http://download.opensuse.org/update/leap/42.2-test/ leap42.2-test-updates - sudo zypper --non-interactive --gpg-auto-import-keys --no-gpg-checks ref - sudo zypper --non-interactive install liberasurecode-devel - sudo zypper rr leap42.2-test-updates -fi - # Install package requirements PACKAGES=$(get_packages general,$ENABLED_SERVICES) PACKAGES="$PACKAGES $(get_plugin_packages)" From a40e036d808a1db5961351c506bfb2bacc61cd2f Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 7 Jun 2017 08:24:31 +0000 Subject: [PATCH 0545/1936] Updated from generate-devstack-plugins-list Change-Id: I5980980fe5071a781b5b95efd69f479359f8ee6e --- doc/source/plugin-registry.rst | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 96a2733690..9bfedcf0ef 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -48,6 +48,7 @@ devstack-plugin-container `git://git.openstack.org/openstack/devsta devstack-plugin-glusterfs `git://git.openstack.org/openstack/devstack-plugin-glusterfs `__ devstack-plugin-hdfs `git://git.openstack.org/openstack/devstack-plugin-hdfs `__ devstack-plugin-kafka `git://git.openstack.org/openstack/devstack-plugin-kafka `__ +devstack-plugin-libvirt-qemu `git://git.openstack.org/openstack/devstack-plugin-libvirt-qemu `__ devstack-plugin-mariadb `git://git.openstack.org/openstack/devstack-plugin-mariadb `__ devstack-plugin-nfs `git://git.openstack.org/openstack/devstack-plugin-nfs `__ devstack-plugin-pika `git://git.openstack.org/openstack/devstack-plugin-pika `__ @@ -62,7 +63,6 @@ freezer-web-ui `git://git.openstack.org/openstack/freeze fuxi `git://git.openstack.org/openstack/fuxi `__ gce-api `git://git.openstack.org/openstack/gce-api `__ glare `git://git.openstack.org/openstack/glare `__ -gnocchi `git://git.openstack.org/openstack/gnocchi `__ group-based-policy `git://git.openstack.org/openstack/group-based-policy `__ heat `git://git.openstack.org/openstack/heat `__ horizon-mellanox `git://git.openstack.org/openstack/horizon-mellanox `__ @@ -136,6 +136,7 @@ octavia-dashboard `git://git.openstack.org/openstack/octavi os-xenapi `git://git.openstack.org/openstack/os-xenapi `__ osprofiler `git://git.openstack.org/openstack/osprofiler `__ panko `git://git.openstack.org/openstack/panko `__ +patrole `git://git.openstack.org/openstack/patrole `__ picasso `git://git.openstack.org/openstack/picasso `__ rally `git://git.openstack.org/openstack/rally `__ sahara `git://git.openstack.org/openstack/sahara `__ @@ -145,6 +146,7 @@ searchlight `git://git.openstack.org/openstack/search searchlight-ui `git://git.openstack.org/openstack/searchlight-ui `__ senlin `git://git.openstack.org/openstack/senlin `__ solum `git://git.openstack.org/openstack/solum `__ +stackube `git://git.openstack.org/openstack/stackube `__ tacker `git://git.openstack.org/openstack/tacker `__ tap-as-a-service `git://git.openstack.org/openstack/tap-as-a-service `__ tricircle `git://git.openstack.org/openstack/tricircle `__ From 886d7dbe12bb0f25a3612d54877a41a917dee1f0 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Mon, 5 Jun 2017 11:32:32 -0400 Subject: [PATCH 0546/1936] nova: fix usage of scheduler_driver config option The scheduler_driver option has been moved and deprecated. This change uses the new group and name for the option. Change-Id: I27aeff5911510c9f47191acaa0c0b5b71f977cd7 --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 9c3ba33013..b7a03695df 100644 --- a/lib/nova +++ b/lib/nova @@ -453,7 +453,7 @@ function create_nova_conf { fi iniset $NOVA_CONF wsgi api_paste_config "$NOVA_API_PASTE_INI" iniset $NOVA_CONF DEFAULT rootwrap_config "$NOVA_CONF_DIR/rootwrap.conf" - iniset $NOVA_CONF DEFAULT scheduler_driver "$SCHEDULER" + iniset $NOVA_CONF scheduler driver "$SCHEDULER" iniset $NOVA_CONF filter_scheduler enabled_filters "$FILTERS" iniset $NOVA_CONF DEFAULT default_floating_pool "$PUBLIC_NETWORK_NAME" if [[ $SERVICE_IP_VERSION == 6 ]]; then From 902158bb8fb8452d747225e5986fb789828e4e67 Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Tue, 30 May 2017 14:11:09 -0700 Subject: [PATCH 0547/1936] Don't treat service as enabled if in disabled list The old implementation for is_$service_enabled simply checked if any of the subservices were enabled and if so the service was considered to be enabled. This makes disabling services complicated as it means you have to list every single subservice which can and do change over time. Instead also check if the generic service name is in the disabled services list and if so don't treat the service as enabled. Change-Id: I7fe4dfca2cd9c15069d50a04161a29c5638291cb --- lib/cinder | 1 + lib/glance | 1 + lib/keystone | 1 + lib/neutron | 2 ++ lib/nova | 1 + lib/swift | 1 + lib/template | 1 + 7 files changed, 8 insertions(+) diff --git a/lib/cinder b/lib/cinder index 2cbab2030b..291eebee87 100644 --- a/lib/cinder +++ b/lib/cinder @@ -129,6 +129,7 @@ CINDER_CACHE_ENABLED_FOR_BACKENDS=${CINDER_CACHE_ENABLED_FOR_BACKENDS:-$CINDER_E # Test if any Cinder services are enabled # is_cinder_enabled function is_cinder_enabled { + [[ ,${DISABLED_SERVICES} =~ ,"cinder" ]] && return 1 [[ ,${ENABLED_SERVICES} =~ ,"c-" ]] && return 0 return 1 } diff --git a/lib/glance b/lib/glance index d6438a6b48..57b5f45113 100644 --- a/lib/glance +++ b/lib/glance @@ -78,6 +78,7 @@ GLANCE_REGISTRY_PORT_INT=${GLANCE_REGISTRY_PORT_INT:-19191} # Test if any Glance services are enabled # is_glance_enabled function is_glance_enabled { + [[ ,${DISABLED_SERVICES} =~ ,"glance" ]] && return 1 [[ ,${ENABLED_SERVICES} =~ ,"g-" ]] && return 0 return 1 } diff --git a/lib/keystone b/lib/keystone index 4bb6893089..eaed937d2e 100644 --- a/lib/keystone +++ b/lib/keystone @@ -134,6 +134,7 @@ KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT=${KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT:-2} # Test if Keystone is enabled # is_keystone_enabled function is_keystone_enabled { + [[ ,${DISABLED_SERVICES} =~ ,"keystone" ]] && return 1 [[ ,${ENABLED_SERVICES}, =~ ,"key", ]] && return 0 return 1 } diff --git a/lib/neutron b/lib/neutron index efca88050b..5c88a50bad 100644 --- a/lib/neutron +++ b/lib/neutron @@ -91,6 +91,7 @@ declare -a -g _NEUTRON_SERVER_EXTRA_CONF_FILES_ABS # Test if any Neutron services are enabled # is_neutron_enabled function is_neutron_enabled { + [[ ,${DISABLED_SERVICES} =~ ,"neutron" ]] && return 1 [[ ,${ENABLED_SERVICES} =~ ,"neutron-" || ,${ENABLED_SERVICES} =~ ,"q-" ]] && return 0 return 1 } @@ -98,6 +99,7 @@ function is_neutron_enabled { # Test if any Neutron services are enabled # is_neutron_enabled function is_neutron_legacy_enabled { + [[ ,${DISABLED_SERVICES} =~ ,"neutron" ]] && return 1 [[ ,${ENABLED_SERVICES} =~ ,"q-" ]] && return 0 return 1 } diff --git a/lib/nova b/lib/nova index 9c3ba33013..5832f11d67 100644 --- a/lib/nova +++ b/lib/nova @@ -175,6 +175,7 @@ NOVA_USE_SERVICE_TOKEN=$(trueorfalse False NOVA_USE_SERVICE_TOKEN) # Test if any Nova services are enabled # is_nova_enabled function is_nova_enabled { + [[ ,${DISABLED_SERVICES} =~ ,"nova" ]] && return 1 [[ ,${ENABLED_SERVICES} =~ ,"n-" ]] && return 0 return 1 } diff --git a/lib/swift b/lib/swift index 8fad6b8f97..1472e4407b 100644 --- a/lib/swift +++ b/lib/swift @@ -174,6 +174,7 @@ SWIFT_STORAGE_IPS=${SWIFT_STORAGE_IPS:-} # Test if any Swift services are enabled # is_swift_enabled function is_swift_enabled { + [[ ,${DISABLED_SERVICES} =~ ,"swift" ]] && return 1 [[ ,${ENABLED_SERVICES} =~ ,"s-" ]] && return 0 return 1 } diff --git a/lib/template b/lib/template index b92fb40483..25d653cb46 100644 --- a/lib/template +++ b/lib/template @@ -41,6 +41,7 @@ XXX_CONF_DIR=/etc/XXXX # Test if any XXXX services are enabled # is_XXXX_enabled function is_XXXX_enabled { + [[ ,${DISABLED_SERVICES} =~ ,"XXXX" ]] && return 1 [[ ,${ENABLED_SERVICES} =~ ,"XX-" ]] && return 0 return 1 } From f15fd26943c0209859d2b6ee7c0d500134b34f3f Mon Sep 17 00:00:00 2001 From: Huan Xie Date: Sat, 27 May 2017 01:30:52 -0700 Subject: [PATCH 0548/1936] XenAPI: Move dom0 related operations to os-xenapi devstack plugin When installing OpenStack via DevStack on XenServer, we need to some preparation operations in dom0 which will refer the function in devstack/tools/xen/functions file, but we are planning to move the whole folder of tools/xen from devstack to os-xenapi, so it this patch is to moving the dom0 related operation to os-xenapi repo first. Change-Id: Ib59d802a7a4eab4ccce0e29d80f29efa4655bc0b Depends-On: I712ee74ce945859ba5118e09b7d9436ca2686cb7 --- lib/nova_plugins/hypervisor-xenserver | 8 -------- 1 file changed, 8 deletions(-) diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver index 880b87f291..6f79e4ff7c 100644 --- a/lib/nova_plugins/hypervisor-xenserver +++ b/lib/nova_plugins/hypervisor-xenserver @@ -84,14 +84,6 @@ function configure_nova_hypervisor { * * * * * /root/rotate_xen_guest_logs.sh >/dev/null 2>&1 CRONTAB - # Create directories for kernels and images - { - echo "set -eux" - cat $TOP_DIR/tools/xen/functions - echo "create_directory_for_images" - echo "create_directory_for_kernels" - echo "install_conntrack_tools" - } | $ssh_dom0 } # install_nova_hypervisor() - Install external components From dcd4b64c990660f9b11b999a3b70e17c36323c4c Mon Sep 17 00:00:00 2001 From: Lance Bragstad Date: Mon, 12 Jun 2017 14:41:42 +0000 Subject: [PATCH 0549/1936] Increase KEYSTONE_LOCKOUT_DURATION to 10 Transient failures were being reported because the current lockout period for users was too short. While this does increase the run time IdentityV3UsersTest.test_user_account_lockout, it allows for more flexibility if there is network latency or some other factor that cause the lockout to expired before the next authentication. Change-Id: I61bc39bbc35ac414b4a72929a90845956c99eb1a Closes-Bug: 1693917 --- lib/keystone | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index eaed937d2e..b185ae63f2 100644 --- a/lib/keystone +++ b/lib/keystone @@ -124,7 +124,7 @@ KEYSTONE_SERVICE_URI_V3=$KEYSTONE_SERVICE_URI/v3 # Security compliance KEYSTONE_SECURITY_COMPLIANCE_ENABLED=${KEYSTONE_SECURITY_COMPLIANCE_ENABLED:-True} KEYSTONE_LOCKOUT_FAILURE_ATTEMPTS=${KEYSTONE_LOCKOUT_FAILURE_ATTEMPTS:-2} -KEYSTONE_LOCKOUT_DURATION=${KEYSTONE_LOCKOUT_DURATION:-5} +KEYSTONE_LOCKOUT_DURATION=${KEYSTONE_LOCKOUT_DURATION:-10} KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT=${KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT:-2} From f63aa021cf996b500b6a570c0114c490bb57461a Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 8 Jun 2017 08:22:38 -0400 Subject: [PATCH 0550/1936] Fleetify nova conductor for N cells This makes us start two levels of nova-conductor processes, and one per cell. Change-Id: Ice4aceac5dc44954db3661480b9365f54e47a4c9 --- lib/nova | 111 +++++++++++++++++++++++++++++++++++++++++++++++++------ stack.sh | 4 +- 2 files changed, 102 insertions(+), 13 deletions(-) diff --git a/lib/nova b/lib/nova index 5832f11d67..8eac254262 100644 --- a/lib/nova +++ b/lib/nova @@ -53,10 +53,18 @@ NOVA_AUTH_CACHE_DIR=${NOVA_AUTH_CACHE_DIR:-/var/cache/nova} NOVA_CONF_DIR=/etc/nova NOVA_CONF=$NOVA_CONF_DIR/nova.conf NOVA_CELLS_CONF=$NOVA_CONF_DIR/nova-cells.conf +NOVA_CPU_CONF=$NOVA_CONF_DIR/nova-cpu.conf NOVA_FAKE_CONF=$NOVA_CONF_DIR/nova-fake.conf NOVA_CELLS_DB=${NOVA_CELLS_DB:-nova_cell} NOVA_API_DB=${NOVA_API_DB:-nova_api} +# The total number of cells we expect. Must be greater than one and doesn't +# count cell0. +NOVA_NUM_CELLS=${NOVA_NUM_CELLS:-1} +# Our cell index, so we know what rabbit vhost to connect to. +# This should be in the range of 1-$NOVA_NUM_CELLS +NOVA_CPU_CELL=${NOVA_CPU_CELL:-1} + NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini} if is_suse; then @@ -479,7 +487,7 @@ function create_nova_conf { # require them running on the host. The ensures that n-cpu doesn't # leak a need to use the db in a multinode scenario. if is_service_enabled n-api n-cond n-sched; then - iniset $NOVA_CONF database connection `database_connection_url nova` + iniset $NOVA_CONF database connection `database_connection_url nova_cell0` iniset $NOVA_CONF api_database connection `database_connection_url nova_api` fi @@ -614,6 +622,20 @@ function create_nova_conf { if [ "$NOVA_USE_SERVICE_TOKEN" == "True" ]; then init_nova_service_user_conf fi + + if is_service_enabled n-cond; then + for i in $(seq 1 $NOVA_NUM_CELLS); do + local conf + local vhost + conf=$(conductor_conf $i) + vhost="nova_cell${i}" + iniset $conf database connection `database_connection_url nova_cell${i}` + iniset $conf conductor workers "$API_WORKERS" + iniset $conf DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" + rpc_backend_add_vhost $vhost + iniset_rpc_backend nova $conf DEFAULT $vhost + done + fi } function init_nova_service_user_conf { @@ -628,6 +650,11 @@ function init_nova_service_user_conf { iniset $NOVA_CONF service_user auth_strategy keystone } +function conductor_conf { + local cell="$1" + echo "${NOVA_CONF_DIR}/nova_cell${cell}.conf" +} + function init_nova_cells { if is_service_enabled n-cell; then cp $NOVA_CONF $NOVA_CELLS_CONF @@ -692,8 +719,6 @@ function init_nova { recreate_database $NOVA_API_DB $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF api_db sync - # (Re)create nova databases - recreate_database nova recreate_database nova_cell0 # map_cell0 will create the cell mapping record in the nova_api DB so @@ -705,6 +730,12 @@ function init_nova { # Migrate nova and nova_cell0 databases. $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db sync + # (Re)create nova databases + for i in $(seq 1 $NOVA_NUM_CELLS); do + recreate_database nova_cell${i} + $NOVA_BIN_DIR/nova-manage --config-file $(conductor_conf $i) db sync + done + if is_service_enabled n-cell; then recreate_database $NOVA_CELLS_DB fi @@ -713,9 +744,13 @@ function init_nova { # Needed for flavor conversion $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db online_data_migrations + # FIXME(danms): Should this be configurable? + iniset $NOVA_CONF workarounds disable_group_policy_check_upcall True + # create the cell1 cell for the main nova db where the hosts live - nova-manage cell_v2 create_cell --transport-url $(get_transport_url) \ - --name 'cell1' + for i in $(seq 1 $NOVA_NUM_CELLS); do + nova-manage --config-file $NOVA_CONF --config-file $(conductor_conf $i) cell_v2 create_cell --name "cell$i" + done fi create_nova_cache_dir @@ -823,25 +858,38 @@ function start_nova_api { # start_nova_compute() - Start the compute process function start_nova_compute { + local nomulticellflag="$1" # Hack to set the path for rootwrap local old_path=$PATH export PATH=$NOVA_BIN_DIR:$PATH if is_service_enabled n-cell; then local compute_cell_conf=$NOVA_CELLS_CONF + # NOTE(danms): Don't setup conductor fleet for cellsv1 + nomulticellflag='nomulticell' else local compute_cell_conf=$NOVA_CONF fi + if [ "$nomulticellflag" = 'nomulticell' ]; then + # NOTE(danms): Grenade doesn't setup multi-cell rabbit, so + # skip these bits and use the normal config. + NOVA_CPU_CONF=$compute_cell_conf + echo "Skipping multi-cell conductor fleet setup" + else + cp $compute_cell_conf $NOVA_CPU_CONF + iniset_rpc_backend nova $NOVA_CPU_CONF DEFAULT "nova_cell${NOVA_CPU_CELL}" + fi + if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then # The group **$LIBVIRT_GROUP** is added to the current user in this script. # ``sg`` is used in run_process to execute nova-compute as a member of the # **$LIBVIRT_GROUP** group. - run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" $LIBVIRT_GROUP + run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF" $LIBVIRT_GROUP elif [[ "$VIRT_DRIVER" = 'lxd' ]]; then - run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" $LXD_GROUP + run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF" $LXD_GROUP elif [[ "$VIRT_DRIVER" = 'docker' || "$VIRT_DRIVER" = 'zun' ]]; then - run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" $DOCKER_GROUP + run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF" $DOCKER_GROUP elif [[ "$VIRT_DRIVER" = 'fake' ]]; then local i for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`; do @@ -850,13 +898,13 @@ function start_nova_compute { # gets its own configuration and own log file. local fake_conf="${NOVA_FAKE_CONF}-${i}" iniset $fake_conf DEFAULT nhost "${HOSTNAME}${i}" - run_process "n-cpu-${i}" "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf --config-file $fake_conf" + run_process "n-cpu-${i}" "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF --config-file $fake_conf" done else if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then start_nova_hypervisor fi - run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" + run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF" fi export PATH=$old_path @@ -876,7 +924,6 @@ function start_nova_rest { fi # ``run_process`` checks ``is_service_enabled``, it is not needed here - run_process n-cond "$NOVA_BIN_DIR/nova-conductor --config-file $compute_cell_conf" run_process n-cell-region "$NOVA_BIN_DIR/nova-cells --config-file $api_cell_conf" run_process n-cell-child "$NOVA_BIN_DIR/nova-cells --config-file $compute_cell_conf" @@ -899,8 +946,38 @@ function start_nova_rest { export PATH=$old_path } +function enable_nova_fleet { + if is_service_enabled n-cond; then + enable_service n-super-cond + for i in $(seq 1 $NOVA_NUM_CELLS); do + enable_service n-cond-cell${i} + done + fi +} + +function start_nova_conductor { + if is_service_enabled n-cell; then + echo "Starting nova-conductor in a cellsv1-compatible way" + run_process n-cond "$NOVA_BIN_DIR/nova-conductor --config-file $NOVA_CELLS_CONF" + return + fi + + enable_nova_fleet + if is_service_enabled n-super-cond; then + run_process n-super-cond "$NOVA_BIN_DIR/nova-conductor --config-file $NOVA_CONF" + fi + for i in $(seq 1 $NOVA_NUM_CELLS); do + if is_service_enabled n-cond-cell${i}; then + local conf + conf=$(conductor_conf $i) + run_process n-cond-cell${i} "$NOVA_BIN_DIR/nova-conductor --config-file $conf" + fi + done +} + function start_nova { start_nova_rest + start_nova_conductor start_nova_compute } @@ -929,14 +1006,24 @@ function stop_nova_rest { # Kill the nova screen windows # Some services are listed here twice since more than one instance # of a service may be running in certain configs. - for serv in n-api n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-sproxy; do + for serv in n-api n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cell n-cell n-api-meta n-sproxy; do stop_process $serv done } +function stop_nova_conductor { + enable_nova_fleet + for srv in n-super-cond $(seq -f n-cond-cell%0.f 1 $NOVA_NUM_CELLS); do + if is_service_enabled $srv; then + stop_process $srv + fi + done +} + # stop_nova() - Stop running processes (non-screen) function stop_nova { stop_nova_rest + stop_nova_conductor stop_nova_compute } diff --git a/stack.sh b/stack.sh index 6793d45cc1..5148ae1d77 100755 --- a/stack.sh +++ b/stack.sh @@ -1268,7 +1268,9 @@ fi # Unable to use LUKS passphrase that is exactly 16 bytes long # https://bugzilla.redhat.com/show_bug.cgi?id=1447297 if is_service_enabled nova; then - iniset $NOVA_CONF key_manager fixed_key $(generate_hex_string 36) + key=$(generate_hex_string 36) + iniset $NOVA_CONF key_manager fixed_key "$key" + iniset $NOVA_CPU_CONF key_manager fixed_key "$key" fi # Launch the nova-api and wait for it to answer before continuing From f266a2dc81be050fec41a180906723ab5cf61df5 Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Mon, 12 Jun 2017 14:57:59 -0700 Subject: [PATCH 0551/1936] Install test-requirements with main install To reduce the total number of invocations necessary for pip which isn't the quickest thing ever (due to needing to evaluate constraints and deps lists and what is currently installed) combine the main installation of software with its test-requirements.txt file which should roughly halve our pip invocations. Change-Id: Ibcc3264136e66d34a879ad1c90a62e1bb6a84243 --- inc/python | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/inc/python b/inc/python index 4c443d6a00..718cbb23b1 100644 --- a/inc/python +++ b/inc/python @@ -320,6 +320,14 @@ function pip_install { fi $xtrace + + # Also install test requirements + local install_test_reqs="" + local test_req="${!#}/test-requirements.txt" + if [[ -e "$test_req" ]]; then + install_test_reqs="-r $test_req" + fi + # adding SETUPTOOLS_SYS_PATH_TECHNIQUE is a workaround to keep # the same behaviour of setuptools before version 25.0.0. # related issue: https://github.com/pypa/pip/issues/3874 @@ -329,24 +337,10 @@ function pip_install { no_proxy="${no_proxy:-}" \ PIP_FIND_LINKS=$PIP_FIND_LINKS \ SETUPTOOLS_SYS_PATH_TECHNIQUE=rewrite \ - $cmd_pip $upgrade \ + $cmd_pip $upgrade $install_test_reqs \ $@ result=$? - # Also install test requirements - local test_req="${!#}/test-requirements.txt" - if [[ $result == 0 ]] && [[ -e "$test_req" ]]; then - echo "Installing test-requirements for $test_req" - $sudo_pip \ - http_proxy=${http_proxy:-} \ - https_proxy=${https_proxy:-} \ - no_proxy=${no_proxy:-} \ - PIP_FIND_LINKS=$PIP_FIND_LINKS \ - $cmd_pip $upgrade \ - -r $test_req - result=$? - fi - time_stop "pip_install" return $result } From 2b6e9ac47127453fdf907f3d9b56c71260ed0ba2 Mon Sep 17 00:00:00 2001 From: Boris Pavlovic Date: Mon, 12 Jun 2017 17:08:33 -0700 Subject: [PATCH 0552/1936] Improve OpenStack performance by redcuing bcrypt hasing rounds number Reduce bcrypt hashing rounds from 12 to 4 (minimal possilbe). This is going to imporve a lot of perforamcne of OpenStack. Bcrypt is hashing algorithm that is designed to use a lot of resources and in that way stops brutforce attacks. It's exponential algorithm that depends on amount of rounds. By default they use 12 rounds which is quite high value, good enough for real secure production enviorments. In case of DevStack it's going to slow down all authentication by many times. Rally shows about 5 times slownest (adding 2-5 seconds to every authenticate) DevStack is meant for developemnt & CI so performance is way more important than security. Change-Id: Id8c763d63cb91f37a774f9400f35c309f37d6f12 --- lib/keystone | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/keystone b/lib/keystone index eaed937d2e..7994065bbd 100644 --- a/lib/keystone +++ b/lib/keystone @@ -127,6 +127,12 @@ KEYSTONE_LOCKOUT_FAILURE_ATTEMPTS=${KEYSTONE_LOCKOUT_FAILURE_ATTEMPTS:-2} KEYSTONE_LOCKOUT_DURATION=${KEYSTONE_LOCKOUT_DURATION:-5} KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT=${KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT:-2} +# Number of bcrypt hashing rounds, increasing number exponentially increases required +# resources to generate password hash. This is very effective way to protect from +# bruteforce attacks. 4 is minimal value that can be specified for bcrypt and +# it works way faster than default 12. Minimal value is great for CI and development +# however may not be suitable for real production. +KEYSTONE_PASSWORD_HASH_ROUNDS=${KEYSTONE_PASSWORD_HASH_ROUNDS:-4} # Functions # --------- @@ -225,6 +231,7 @@ function configure_keystone { fi iniset $KEYSTONE_CONF identity driver "$KEYSTONE_IDENTITY_BACKEND" + iniset $KEYSTONE_CONF identity password_hash_rounds $KEYSTONE_PASSWORD_HASH_ROUNDS iniset $KEYSTONE_CONF assignment driver "$KEYSTONE_ASSIGNMENT_BACKEND" iniset $KEYSTONE_CONF role driver "$KEYSTONE_ROLE_BACKEND" iniset $KEYSTONE_CONF resource driver "$KEYSTONE_RESOURCE_BACKEND" From 15b0a5f1eb849a30bcea9a1e437e9a88ae2c6f92 Mon Sep 17 00:00:00 2001 From: Markus Zoeller Date: Wed, 31 May 2017 11:21:22 +0200 Subject: [PATCH 0553/1936] systemd: Always create the systemd unit files Commit 5edae54 introduced the usage of systemd in Devstack. This allowed the transition away from 'screen'. Systemd needs "user unit files" to describe the services. Currently, those unit files get only created when an openstack service (n-cpu, c-sch, g-api, ...) is in the list of enabled services (`ENABLED_SERVICES`). This means, when Devstack is fully stacked, there is no way to start the systemd unit of an openstack service which is *not* in that list. This commit changes that behavior, and creates the systemd unit files independently of the list ENABLED_SERVICES. This means, when Devstack is fully stacked, I can start a systemd unit of an openstack service which wasn't in the ENABLED_SERVICES list. This allows more flexible lifecycle management of openstack services in the gate, which is useful for tests which test components which are not in the "default configuration" (e.g. the "nova-serialproxy" service). The `clean.sh` script purges all traces of systemd user unit files created by devstack. Change-Id: I0f7e1ee8723f4de47cbc56b727182f90a2b32bfb --- clean.sh | 7 +++++++ functions-common | 23 +++++++++++++++++------ 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/clean.sh b/clean.sh index ef38fbf4f8..9ffe3bee6b 100755 --- a/clean.sh +++ b/clean.sh @@ -125,6 +125,13 @@ if [[ -n "$SCREEN_LOGDIR" ]] && [[ -d "$SCREEN_LOGDIR" ]]; then sudo rm -rf $SCREEN_LOGDIR fi +# Clean out the sytemd user unit files if systemd was used. +if [[ "$USE_SYSTEMD" = "True" ]]; then + sudo find $SYSTEMD_DIR -type f -name '*devstack@*service' -delete + # Make systemd aware of the deletion. + $SYSTEMCTL daemon-reload +fi + # Clean up venvs DIRS_TO_CLEAN="$WHEELHOUSE ${PROJECT_VENV[@]} .config/openstack" rm -rf $DIRS_TO_CLEAN diff --git a/functions-common b/functions-common index 30933ea4c2..48ce72524e 100644 --- a/functions-common +++ b/functions-common @@ -1508,8 +1508,13 @@ EOF } -# Helper function to build a basic unit file and run it under systemd. -function _run_under_systemd { +# Defines a systemd service which can be enabled and started later on. +# arg1: The openstack service name ('n-cpu', 'c-sch', ...). +# arg2: The command to start (e.g. path to service binary + config files). +# arg3: The group which owns the process. +# arg4: The user which owns the process. +# Returns: The systemd service name which got defined. +function _define_systemd_service { local service=$1 local command="$2" local cmd=$command @@ -1524,9 +1529,7 @@ function _run_under_systemd { else write_user_unit_file $systemd_service "$cmd" "$group" "$user" fi - - $SYSTEMCTL enable $systemd_service - $SYSTEMCTL start $systemd_service + echo $systemd_service } # Helper to remove the ``*.failure`` files under ``$SERVICE_DIR/$SCREEN_NAME``. @@ -1567,11 +1570,19 @@ function run_process { local user=$4 local name=$service + local systemd_service time_start "run_process" + # Note we deliberately make all service files, even if the service + # isn't enabled, so it can be enabled by a dev manually on command + # line. + if [[ "$USE_SYSTEMD" = "True" ]]; then + systemd_service=$(_define_systemd_service "$name" "$command" "$group" "$user") + fi if is_service_enabled $service; then if [[ "$USE_SYSTEMD" = "True" ]]; then - _run_under_systemd "$name" "$command" "$group" "$user" + $SYSTEMCTL enable $systemd_service + $SYSTEMCTL start $systemd_service elif [[ "$USE_SCREEN" = "True" ]]; then if [[ "$user" == "root" ]]; then command="sudo $command" From d095e97624467fb1e0fa38955b45960d3cbc5651 Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Tue, 13 Jun 2017 10:18:36 -0700 Subject: [PATCH 0554/1936] Support unicode via en_US.utf8 Because C.utf8 is not everywhere and is sometimes called C.UTF-8 (just to confuse people) use en_US.utf8 which is in most places. This isn't language/region agnostic but gives a consistent unicode aware locale to devstack. Change-Id: I67a8c77a5041e9cee740adf0e02fdc9b183c5bc4 fixes-bug: 1697733 --- stack.sh | 32 +++++++++++++++++++++++++++++--- 1 file changed, 29 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index 6793d45cc1..39f0d1080c 100755 --- a/stack.sh +++ b/stack.sh @@ -27,11 +27,37 @@ set -o xtrace # Make sure custom grep options don't get in the way unset GREP_OPTIONS -# Sanitize language settings to avoid commands bailing out -# with "unsupported locale setting" errors. +# NOTE(sdague): why do we explicitly set locale when running stack.sh? +# +# Devstack is written in bash, and many functions used throughout +# devstack process text comming off a command (like the ip command) +# and do transforms using grep, sed, cut, awk on the strings that are +# returned. Many of these programs are interationalized, which is +# great for end users, but means that the strings that devstack +# functions depend upon might not be there in other locales. We thus +# need to pin the world to an english basis during the runs. +# +# Previously we used the C locale for this, every system has it, and +# it gives us a stable sort order. It does however mean that we +# effectively drop unicode support.... boo! :( +# +# With python3 being more unicode aware by default, that's not the +# right option. While there is a C.utf8 locale, some distros are +# shipping it as C.UTF8 for extra confusingness. And it's support +# isn't super clear across distros. This is made more challenging when +# trying to support both out of the box distros, and the gate which +# uses diskimage builder to build disk images in a different way than +# the distros do. +# +# So... en_US.utf8 it is. That's existed for a very long time. It is a +# compromise position, but it is the least worse idea at the time of +# this comment. +# +# We also have to unset other variables that might impact LC_ALL +# taking effect. unset LANG unset LANGUAGE -LC_ALL=C +LC_ALL=en_US.utf8 export LC_ALL # Make sure umask is sane From 633dbc3d8e7d3f06e13d532748c71ffba6cd21ea Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Wed, 14 Jun 2017 12:09:21 -0700 Subject: [PATCH 0555/1936] Track db sync command time useage We are trying to keep better track of what pieces of devstack consume the most time. Add the db sync commands to the time tracking as they run the database migrations which can take more time than expected. Change-Id: Ib92f2b8304ccf703712d45fd7207444de3599e2d --- lib/cinder | 2 ++ lib/glance | 2 ++ lib/keystone | 2 ++ lib/neutron | 2 ++ lib/neutron-legacy | 2 ++ lib/nova | 2 ++ lib/placement | 2 ++ 7 files changed, 14 insertions(+) diff --git a/lib/cinder b/lib/cinder index 291eebee87..20688129fd 100644 --- a/lib/cinder +++ b/lib/cinder @@ -407,8 +407,10 @@ function init_cinder { # (Re)create cinder database recreate_database cinder + time_start "dbsync" # Migrate cinder database $CINDER_BIN_DIR/cinder-manage --config-file $CINDER_CONF db sync + time_stop "dbsync" fi if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then diff --git a/lib/glance b/lib/glance index 57b5f45113..baf8c6191c 100644 --- a/lib/glance +++ b/lib/glance @@ -285,11 +285,13 @@ function init_glance { # (Re)create glance database recreate_database glance + time_start "dbsync" # Migrate glance database $GLANCE_BIN_DIR/glance-manage --config-file $GLANCE_CONF_DIR/glance-api.conf db_sync # Load metadata definitions $GLANCE_BIN_DIR/glance-manage --config-file $GLANCE_CONF_DIR/glance-api.conf db_load_metadefs + time_stop "dbsync" create_glance_cache_dir } diff --git a/lib/keystone b/lib/keystone index eaed937d2e..9de660b4e2 100644 --- a/lib/keystone +++ b/lib/keystone @@ -458,8 +458,10 @@ function init_keystone { recreate_database keystone fi + time_start "dbsync" # Initialize keystone database $KEYSTONE_BIN_DIR/keystone-manage --config-file $KEYSTONE_CONF db_sync + time_stop "dbsync" if [[ "$KEYSTONE_TOKEN_FORMAT" == "pki" || "$KEYSTONE_TOKEN_FORMAT" == "pkiz" ]]; then # Set up certificates diff --git a/lib/neutron b/lib/neutron index 5c88a50bad..2a660ec8e1 100644 --- a/lib/neutron +++ b/lib/neutron @@ -336,8 +336,10 @@ function init_neutron_new { recreate_database neutron + time_start "dbsync" # Run Neutron db migrations $NEUTRON_BIN_DIR/neutron-db-manage upgrade heads + time_stop "dbsync" create_neutron_cache_dir } diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 67cf110644..784f3a8167 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -417,8 +417,10 @@ function create_mutnauq_accounts { # init_mutnauq() - Initialize databases, etc. function init_mutnauq { recreate_database $Q_DB_NAME + time_start "dbsync" # Run Neutron db migrations $NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head + time_stop "dbsync" } # install_mutnauq() - Collect source and prepare diff --git a/lib/nova b/lib/nova index aae3108013..18715fc418 100644 --- a/lib/nova +++ b/lib/nova @@ -649,7 +649,9 @@ function init_nova_cells { iniset $NOVA_CELLS_CONF DEFAULT enabled_apis metadata fi + time_start "dbsync" $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CELLS_CONF db sync + time_stop "dbsync" $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CELLS_CONF cell create --name=region --cell_type=parent --username=$RABBIT_USERID --hostname=$RABBIT_HOST --port=5672 --password=$RABBIT_PASSWORD --virtual_host=/ --woffset=0 --wscale=1 $NOVA_BIN_DIR/nova-manage cell create --name=child --cell_type=child --username=$RABBIT_USERID --hostname=$RABBIT_HOST --port=5672 --password=$RABBIT_PASSWORD --virtual_host=child_cell --woffset=0 --wscale=1 diff --git a/lib/placement b/lib/placement index ad12824a38..8adbbdec68 100644 --- a/lib/placement +++ b/lib/placement @@ -149,7 +149,9 @@ function create_placement_accounts { function init_placement { if [ "$PLACEMENT_DB_ENABLED" != False ]; then recreate_database placement + time_start "dbsync" $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF api_db sync + time_stop "dbsync" fi create_placement_accounts } From 3e9b562d0b1f1815597daa5346ba32ef48f868af Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Wed, 14 Jun 2017 15:29:47 -0700 Subject: [PATCH 0556/1936] Actually check if roles are set In the helper functions to check if roles are set and if not add the role and return the id we weren't actually checking if the role was set. The reason for this was we grepped for name values while outputing only uuid values with OSC. Fix for this is straightforward, we just add the --role argument to OSC which will filter for us then we don't have to use a grep on the wrong value type. Change-Id: I2691b347d2a6273100deb4a1750ab353a8e49673 --- functions-common | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/functions-common b/functions-common index 30933ea4c2..35613ab7e9 100644 --- a/functions-common +++ b/functions-common @@ -864,10 +864,11 @@ function get_or_add_user_project_role { # Gets user role id user_role_id=$(openstack role assignment list \ + --role $1 \ --user $2 \ --project $3 \ $domain_args \ - | grep " $1 " | get_field 1) + | grep '^|\s[a-f0-9]\+' | get_field 1) if [[ -z "$user_role_id" ]]; then # Adds role to user and get it openstack role add $1 \ @@ -875,10 +876,11 @@ function get_or_add_user_project_role { --project $3 \ $domain_args user_role_id=$(openstack role assignment list \ + --role $1 \ --user $2 \ --project $3 \ $domain_args \ - | grep " $1 " | get_field 1) + | grep '^|\s[a-f0-9]\+' | get_field 1) fi echo $user_role_id } @@ -889,18 +891,20 @@ function get_or_add_user_domain_role { local user_role_id # Gets user role id user_role_id=$(openstack role assignment list \ + --role $1 \ --user $2 \ --domain $3 \ - | grep " $1 " | get_field 1) + | grep '^|\s[a-f0-9]\+' | get_field 1) if [[ -z "$user_role_id" ]]; then # Adds role to user and get it openstack role add $1 \ --user $2 \ --domain $3 user_role_id=$(openstack role assignment list \ + --role $1 \ --user $2 \ --domain $3 \ - | grep " $1 " | get_field 1) + | grep '^|\s[a-f0-9]\+' | get_field 1) fi echo $user_role_id } @@ -911,6 +915,7 @@ function get_or_add_group_project_role { local group_role_id # Gets group role id group_role_id=$(openstack role assignment list \ + --role $1 \ --group $2 \ --project $3 \ -f value) @@ -920,6 +925,7 @@ function get_or_add_group_project_role { --group $2 \ --project $3 group_role_id=$(openstack role assignment list \ + --role $1 \ --group $2 \ --project $3 \ -f value) From 85cf2933ccdd70f4f6d505c9aca36f063eddf229 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 27 Mar 2017 15:35:13 -0400 Subject: [PATCH 0557/1936] Provide timings for OSC during devstack run The OSC number remain high, and it's useful to understand how much time we spend making OSC calls, especially to surface it relative to other items. The way we embed this in our code makes it hard to instrument. This patch creates a wrapper function for OSC which collects the timings then aliases `openstack` to that function. This means any invocations of the openstack utility goes through our function while devstack is running. Because this is an alias it only affects the stack.sh shell and any subshells. This also moves the time tracking infrastructure to count in ms, instead of s, because some of these operations are close enough to a second that rounding early is losing way to many significant digits. We divide by 1000 before reporting to the user. Change-Id: Ic5f1844ce732d447ee980b3c9fdc417f72482609 --- functions-common | 51 ++++++++++++++++++++++++++++++++++++++++++++++-- stack.sh | 9 +++++++++ 2 files changed, 58 insertions(+), 2 deletions(-) diff --git a/functions-common b/functions-common index ec68644757..98e9a4bf66 100644 --- a/functions-common +++ b/functions-common @@ -2459,7 +2459,7 @@ function time_start { if [[ -n "$start_time" ]]; then die $LINENO "Trying to start the clock on $name, but it's already been started" fi - _TIME_START[$name]=$(date +%s) + _TIME_START[$name]=$(date +%s%3N) } # time_stop $name @@ -2480,7 +2480,7 @@ function time_stop { if [[ -z "$start_time" ]]; then die $LINENO "Trying to stop the clock on $name, but it was never started" fi - end_time=$(date +%s) + end_time=$(date +%s%3N) elapsed_time=$(($end_time - $start_time)) total=${_TIME_TOTAL[$name]:-0} # reset the clock so we can start it in the future @@ -2488,6 +2488,49 @@ function time_stop { _TIME_TOTAL[$name]=$(($total + $elapsed_time)) } +function oscwrap { + local out + local rc + local start + local end + # Cannot use timer_start and timer_stop as we run in subshells + # and those rely on modifying vars in the same process (which cannot + # happen from a subshell. + start=$(date +%s%3N) + out=$(command openstack "$@") + rc=$? + end=$(date +%s%3N) + echo $((end - start)) >> $OSCWRAP_TIMER_FILE + + echo "$out" + return $rc +} + +function install_oscwrap { + # File to accumulate our timing data + OSCWRAP_TIMER_FILE=$(mktemp) + # Bash by default doesn't expand aliases, allow it for the aliases + # we want to whitelist. + shopt -s expand_aliases + # Remove all aliases that might be expanded to preserve old unexpanded + # behavior + unalias -a + # Add only the alias we want for openstack + alias openstack=oscwrap +} + +function cleanup_oscwrap { + local total=0 + if python3_enabled ; then + local python=python3 + else + local python=python + fi + total=$(cat $OSCWRAP_TIMER_FILE | $python -c "import sys; print(sum(int(l) for l in sys.stdin))") + _TIME_TOTAL["osc"]=$total + rm $OSCWRAP_TIMER_FILE +} + # time_totals # Print out total time summary function time_totals { @@ -2506,6 +2549,8 @@ function time_totals { fi done + cleanup_oscwrap + xtrace=$(set +o | grep xtrace) set +o xtrace @@ -2517,6 +2562,8 @@ function time_totals { echo for t in ${!_TIME_TOTAL[*]}; do local v=${_TIME_TOTAL[$t]} + # because we're recording in milliseconds + v=$(($v / 1000)) printf "%-${len}s %3d\n" "$t" "$v" done echo "=========================" diff --git a/stack.sh b/stack.sh index 20cdc1dfcc..89b61fc2c5 100755 --- a/stack.sh +++ b/stack.sh @@ -493,6 +493,11 @@ function exit_trap { kill 2>&1 $jobs fi + #Remove timing data file + if [ -f "$OSCWRAP_TIMER_FILE" ] ; then + rm "$OSCWRAP_TIMER_FILE" + fi + # Kill the last spinner process kill_spinner @@ -903,6 +908,10 @@ else pip_install_gr python-openstackclient fi +# Installs alias for osc so that we can collect timing for all +# osc commands. Alias dies with stack.sh. +install_oscwrap + if [[ $TRACK_DEPENDS = True ]]; then $DEST/.venv/bin/pip freeze > $DEST/requires-post-pip if ! diff -Nru $DEST/requires-pre-pip $DEST/requires-post-pip > $DEST/requires.diff; then From 19279b0f87e2ab1c684d62078df296211d3a60cc Mon Sep 17 00:00:00 2001 From: Antoni Segura Puimedon Date: Fri, 16 Jun 2017 16:03:32 +0200 Subject: [PATCH 0558/1936] etcd3: Allow for multi-host deployments In Multi host deployments, it is possible to run ETCD in a different host than the SERVICE_HOST (where all the controllers run). This patch brings that distinction. Change-Id: I15fe6f25eedf1efebaab81cce26b080577b856cc Signed-off-by: Antoni Segura Puimedon --- lib/etcd3 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/etcd3 b/lib/etcd3 index f8b113bae6..75c2ae93ae 100644 --- a/lib/etcd3 +++ b/lib/etcd3 @@ -53,9 +53,9 @@ function start_etcd3 { cmd+=" --initial-cluster-state new --initial-cluster-token etcd-cluster-01" cmd+=" --initial-cluster $HOSTNAME=http://$SERVICE_HOST:2380" cmd+=" --initial-advertise-peer-urls http://$SERVICE_HOST:2380" - cmd+=" --advertise-client-urls http://$SERVICE_HOST:$ETCD_PORT" + cmd+=" --advertise-client-urls http://${HOST_IP}:$ETCD_PORT" cmd+=" --listen-peer-urls http://0.0.0.0:2380 " - cmd+=" --listen-client-urls http://$SERVICE_HOST:$ETCD_PORT" + cmd+=" --listen-client-urls http://${HOST_IP}:$ETCD_PORT" local unitfile="$SYSTEMD_DIR/$ETCD_SYSTEMD_SERVICE" write_user_unit_file $ETCD_SYSTEMD_SERVICE "$cmd" "" "root" From 41da1a9feb397d2536acd2f59daab55937a04513 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Mon, 19 Jun 2017 08:55:47 +0000 Subject: [PATCH 0559/1936] Updated from generate-devstack-plugins-list Change-Id: Ida1d6d012e1e05f35ba45670436acd6f920c9575 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 9bfedcf0ef..c1c66b9bb6 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -149,6 +149,7 @@ solum `git://git.openstack.org/openstack/solum stackube `git://git.openstack.org/openstack/stackube `__ tacker `git://git.openstack.org/openstack/tacker `__ tap-as-a-service `git://git.openstack.org/openstack/tap-as-a-service `__ +tap-as-a-service-dashboard `git://git.openstack.org/openstack/tap-as-a-service-dashboard `__ tricircle `git://git.openstack.org/openstack/tricircle `__ trio2o `git://git.openstack.org/openstack/trio2o `__ trove `git://git.openstack.org/openstack/trove `__ From 62b56601a9c6d6e9ad573e87bdecf360a3dd915e Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 19 Jun 2017 08:27:16 -0400 Subject: [PATCH 0560/1936] rework etcd3 install to be like other devstack services This separates out the install phase early from the start phase to make this mirror other services in devstack. Depends-On: I4124dc7e3fd3b4d973979da85209ec991c0f8c4b Change-Id: I76f8740448b25a48869ee80006e826baa6cafc2b --- lib/etcd3 | 13 +------------ lib/oslo | 7 +++++++ stack.sh | 4 ++++ 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/lib/etcd3 b/lib/etcd3 index f8b113bae6..25de238ff8 100644 --- a/lib/etcd3 +++ b/lib/etcd3 @@ -41,13 +41,6 @@ fi # start_etcd3() - Starts to run the etcd process function start_etcd3 { - # Don't install in sub nodes (multinode scenario) - if [ "$SERVICE_HOST" != "$HOST_IP" ]; then - return - fi - - _install_etcd - local cmd="$ETCD_BIN_DIR/etcd" cmd+=" --name $HOSTNAME --data-dir $ETCD_DATA_DIR" cmd+=" --initial-cluster-state new --initial-cluster-token etcd-cluster-01" @@ -96,7 +89,7 @@ function cleanup_etcd3 { sudo rm -rf $ETCD_DATA_DIR } -function _install_etcd { +function install_etcd3 { echo "Installing etcd" # Make sure etcd3 downloads the correct architecture @@ -115,10 +108,6 @@ function _install_etcd { ETCD_NAME=etcd-$ETCD_VERSION-linux-$ETCD_ARCH - # Install the libraries needed. Note: tooz for example does not have a hard dependency on these libraries - pip_install etcd3 - pip_install etcd3gw - # Create the necessary directories sudo mkdir -p $ETCD_BIN_DIR sudo mkdir -p $ETCD_DATA_DIR diff --git a/lib/oslo b/lib/oslo index d15a3031a2..cbfd5fb513 100644 --- a/lib/oslo +++ b/lib/oslo @@ -109,7 +109,14 @@ function install_oslo { _do_install_oslo_lib "stevedore" _do_install_oslo_lib "taskflow" _do_install_oslo_lib "tooz" + # installation of additional libraries + # + # os-traits for nova _do_install_oslo_lib "os-traits" + + # etcd (because tooz does not have a hard dependency on these) + pip_install etcd3 + pip_install etcd3gw } # Restore xtrace diff --git a/stack.sh b/stack.sh index cfdc2c4c50..c46859567c 100755 --- a/stack.sh +++ b/stack.sh @@ -813,6 +813,10 @@ if is_service_enabled neutron; then install_neutron_agent_packages fi +if is_service_enabled etcd3; then + install_etcd3 +fi + # Check Out and Install Source # ---------------------------- From 3ed99c0b27122ff00e2d236086ab16b0cc1887c1 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 20 Jun 2017 14:09:30 -0400 Subject: [PATCH 0561/1936] mv lib/oslo to lib/libraries There is confusion about where installation of new libraries should end up, to prevent lots of little files being added make a lib/libraries which is the old lib/oslo. Put compat functions and includes in place to help with transition. Change-Id: Ieeab605d187ef6aec571211ab235ea67fa95a607 --- lib/libraries | 135 ++++++++++++++++++++++++++++++++++++++++++++++++++ lib/oslo | 121 +------------------------------------------- stack.sh | 6 +-- 3 files changed, 140 insertions(+), 122 deletions(-) create mode 100644 lib/libraries diff --git a/lib/libraries b/lib/libraries new file mode 100644 index 0000000000..9feb503f1b --- /dev/null +++ b/lib/libraries @@ -0,0 +1,135 @@ +#!/bin/bash +# +# lib/oslo +# +# Functions to install **Oslo** libraries from git +# +# We need this to handle the fact that projects would like to use +# pre-released versions of oslo libraries. + +# Dependencies: +# +# - ``functions`` file + +# ``stack.sh`` calls the entry points in this order: +# +# - install_oslo + +# Save trace setting +_XTRACE_LIB_OSLO=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- +GITDIR["automaton"]=$DEST/automaton +GITDIR["castellan"]=$DEST/castellan +GITDIR["cliff"]=$DEST/cliff +GITDIR["cursive"]=$DEST/cursive +GITDIR["debtcollector"]=$DEST/debtcollector +GITDIR["futurist"]=$DEST/futurist +GITDIR["os-client-config"]=$DEST/os-client-config +GITDIR["osc-lib"]=$DEST/osc-lib +GITDIR["oslo.cache"]=$DEST/oslo.cache +GITDIR["oslo.concurrency"]=$DEST/oslo.concurrency +GITDIR["oslo.config"]=$DEST/oslo.config +GITDIR["oslo.context"]=$DEST/oslo.context +GITDIR["oslo.db"]=$DEST/oslo.db +GITDIR["oslo.i18n"]=$DEST/oslo.i18n +GITDIR["oslo.log"]=$DEST/oslo.log +GITDIR["oslo.messaging"]=$DEST/oslo.messaging +GITDIR["oslo.middleware"]=$DEST/oslo.middleware +GITDIR["oslo.policy"]=$DEST/oslo.policy +GITDIR["oslo.privsep"]=$DEST/oslo.privsep +GITDIR["oslo.reports"]=$DEST/oslo.reports +GITDIR["oslo.rootwrap"]=$DEST/oslo.rootwrap +GITDIR["oslo.serialization"]=$DEST/oslo.serialization +GITDIR["oslo.service"]=$DEST/oslo.service +GITDIR["oslo.utils"]=$DEST/oslo.utils +GITDIR["oslo.versionedobjects"]=$DEST/oslo.versionedobjects +GITDIR["oslo.vmware"]=$DEST/oslo.vmware +GITDIR["osprofiler"]=$DEST/osprofiler +GITDIR["pycadf"]=$DEST/pycadf +GITDIR["python-openstacksdk"]=$DEST/python-openstacksdk +GITDIR["stevedore"]=$DEST/stevedore +GITDIR["taskflow"]=$DEST/taskflow +GITDIR["tooz"]=$DEST/tooz +# TODO(mriedem): This is a common pattern so even though os-traits isn't +# officially an oslo library, it is nice to re-use this script for non-oslo +# things like os-traits. We should rename this script to be more generic +# and then fold os-brick into it also. +GITDIR["os-traits"]=$DEST/os-traits + +# Support entry points installation of console scripts +OSLO_BIN_DIR=$(get_python_exec_prefix) + + +# Functions +# --------- + +function _install_lib_from_source { + local name=$1 + if use_library_from_git "$name"; then + git_clone_by_name "$name" + setup_dev_lib "$name" + fi +} + +# install_oslo - install libraries that oslo needs +function install_oslo { + install_libs +} + +# install_libs() - Install additional libraries that we need and want +# on all environments. Some will only install here if from source, +# others will always install. +function install_libs { + _install_lib_from_source "automaton" + _install_lib_from_source "castellan" + _install_lib_from_source "cliff" + _install_lib_from_source "cursive" + _install_lib_from_source "debtcollector" + _install_lib_from_source "futurist" + _install_lib_from_source "osc-lib" + _install_lib_from_source "os-client-config" + _install_lib_from_source "oslo.cache" + _install_lib_from_source "oslo.concurrency" + _install_lib_from_source "oslo.config" + _install_lib_from_source "oslo.context" + _install_lib_from_source "oslo.db" + _install_lib_from_source "oslo.i18n" + _install_lib_from_source "oslo.log" + _install_lib_from_source "oslo.messaging" + _install_lib_from_source "oslo.middleware" + _install_lib_from_source "oslo.policy" + _install_lib_from_source "oslo.privsep" + _install_lib_from_source "oslo.reports" + _install_lib_from_source "oslo.rootwrap" + _install_lib_from_source "oslo.serialization" + _install_lib_from_source "oslo.service" + _install_lib_from_source "oslo.utils" + _install_lib_from_source "oslo.versionedobjects" + _install_lib_from_source "oslo.vmware" + _install_lib_from_source "osprofiler" + _install_lib_from_source "pycadf" + _install_lib_from_source "python-openstacksdk" + _install_lib_from_source "stevedore" + _install_lib_from_source "taskflow" + _install_lib_from_source "tooz" + # installation of additional libraries + # + # os-traits for nova + _install_lib_from_source "os-traits" + + # etcd (because tooz does not have a hard dependency on these) + pip_install etcd3 + pip_install etcd3gw +} + +# Restore xtrace +$_XTRACE_LIB_OSLO + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/oslo b/lib/oslo index cbfd5fb513..3ae64c8210 100644 --- a/lib/oslo +++ b/lib/oslo @@ -6,123 +6,6 @@ # # We need this to handle the fact that projects would like to use # pre-released versions of oslo libraries. - -# Dependencies: # -# - ``functions`` file - -# ``stack.sh`` calls the entry points in this order: -# -# - install_oslo - -# Save trace setting -_XTRACE_LIB_OSLO=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- -GITDIR["automaton"]=$DEST/automaton -GITDIR["castellan"]=$DEST/castellan -GITDIR["cliff"]=$DEST/cliff -GITDIR["cursive"]=$DEST/cursive -GITDIR["debtcollector"]=$DEST/debtcollector -GITDIR["futurist"]=$DEST/futurist -GITDIR["os-client-config"]=$DEST/os-client-config -GITDIR["osc-lib"]=$DEST/osc-lib -GITDIR["oslo.cache"]=$DEST/oslo.cache -GITDIR["oslo.concurrency"]=$DEST/oslo.concurrency -GITDIR["oslo.config"]=$DEST/oslo.config -GITDIR["oslo.context"]=$DEST/oslo.context -GITDIR["oslo.db"]=$DEST/oslo.db -GITDIR["oslo.i18n"]=$DEST/oslo.i18n -GITDIR["oslo.log"]=$DEST/oslo.log -GITDIR["oslo.messaging"]=$DEST/oslo.messaging -GITDIR["oslo.middleware"]=$DEST/oslo.middleware -GITDIR["oslo.policy"]=$DEST/oslo.policy -GITDIR["oslo.privsep"]=$DEST/oslo.privsep -GITDIR["oslo.reports"]=$DEST/oslo.reports -GITDIR["oslo.rootwrap"]=$DEST/oslo.rootwrap -GITDIR["oslo.serialization"]=$DEST/oslo.serialization -GITDIR["oslo.service"]=$DEST/oslo.service -GITDIR["oslo.utils"]=$DEST/oslo.utils -GITDIR["oslo.versionedobjects"]=$DEST/oslo.versionedobjects -GITDIR["oslo.vmware"]=$DEST/oslo.vmware -GITDIR["osprofiler"]=$DEST/osprofiler -GITDIR["pycadf"]=$DEST/pycadf -GITDIR["python-openstacksdk"]=$DEST/python-openstacksdk -GITDIR["stevedore"]=$DEST/stevedore -GITDIR["taskflow"]=$DEST/taskflow -GITDIR["tooz"]=$DEST/tooz -# TODO(mriedem): This is a common pattern so even though os-traits isn't -# officially an oslo library, it is nice to re-use this script for non-oslo -# things like os-traits. We should rename this script to be more generic -# and then fold os-brick into it also. -GITDIR["os-traits"]=$DEST/os-traits - -# Support entry points installation of console scripts -OSLO_BIN_DIR=$(get_python_exec_prefix) - - -# Functions -# --------- - -function _do_install_oslo_lib { - local name=$1 - if use_library_from_git "$name"; then - git_clone_by_name "$name" - setup_dev_lib "$name" - fi -} - -# install_oslo() - Collect source and prepare -function install_oslo { - _do_install_oslo_lib "automaton" - _do_install_oslo_lib "castellan" - _do_install_oslo_lib "cliff" - _do_install_oslo_lib "cursive" - _do_install_oslo_lib "debtcollector" - _do_install_oslo_lib "futurist" - _do_install_oslo_lib "osc-lib" - _do_install_oslo_lib "os-client-config" - _do_install_oslo_lib "oslo.cache" - _do_install_oslo_lib "oslo.concurrency" - _do_install_oslo_lib "oslo.config" - _do_install_oslo_lib "oslo.context" - _do_install_oslo_lib "oslo.db" - _do_install_oslo_lib "oslo.i18n" - _do_install_oslo_lib "oslo.log" - _do_install_oslo_lib "oslo.messaging" - _do_install_oslo_lib "oslo.middleware" - _do_install_oslo_lib "oslo.policy" - _do_install_oslo_lib "oslo.privsep" - _do_install_oslo_lib "oslo.reports" - _do_install_oslo_lib "oslo.rootwrap" - _do_install_oslo_lib "oslo.serialization" - _do_install_oslo_lib "oslo.service" - _do_install_oslo_lib "oslo.utils" - _do_install_oslo_lib "oslo.versionedobjects" - _do_install_oslo_lib "oslo.vmware" - _do_install_oslo_lib "osprofiler" - _do_install_oslo_lib "pycadf" - _do_install_oslo_lib "python-openstacksdk" - _do_install_oslo_lib "stevedore" - _do_install_oslo_lib "taskflow" - _do_install_oslo_lib "tooz" - # installation of additional libraries - # - # os-traits for nova - _do_install_oslo_lib "os-traits" - - # etcd (because tooz does not have a hard dependency on these) - pip_install etcd3 - pip_install etcd3gw -} - -# Restore xtrace -$_XTRACE_LIB_OSLO - -# Tell emacs to use shell-script-mode -## Local variables: -## mode: shell-script -## End: +# Included for compatibility with grenade, remove in Queens +source $TOP_DIR/lib/libraries diff --git a/stack.sh b/stack.sh index c46859567c..7034bc4ca2 100755 --- a/stack.sh +++ b/stack.sh @@ -592,7 +592,7 @@ source $TOP_DIR/lib/tls # Source project function libraries source $TOP_DIR/lib/infra -source $TOP_DIR/lib/oslo +source $TOP_DIR/lib/libraries source $TOP_DIR/lib/lvm source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone @@ -822,8 +822,8 @@ fi echo_summary "Installing OpenStack project source" -# Install Oslo libraries -install_oslo +# Install additional libraries +install_libs # Install uwsgi install_apache_uwsgi From d064acce4ceca5c897b31304439e235a230ac8f3 Mon Sep 17 00:00:00 2001 From: Pavlo Shchelokovskyy Date: Wed, 21 Jun 2017 09:06:37 +0000 Subject: [PATCH 0562/1936] Ignore etcd files downloaded to files/ dir Change-Id: Ic1458a612ccdefdcc34880529a259513135fa7a1 --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 7967e14a19..d2c127d099 100644 --- a/.gitignore +++ b/.gitignore @@ -24,6 +24,7 @@ files/pip-* files/get-pip.py* files/ir-deploy* files/ironic-inspector* +files/etcd* local.conf local.sh localrc From 99a6477c498588b4639c8669eb1a04a128dc7f2b Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 21 Jun 2017 10:46:12 -0400 Subject: [PATCH 0563/1936] remove lib/os_brick Fold this back into lib/libraries Change-Id: I2c4e3ebf1a1cc65841e4470d5cfe073f9b505d5f --- lib/libraries | 4 ++++ lib/os_brick | 32 -------------------------------- stack.sh | 6 ------ 3 files changed, 4 insertions(+), 38 deletions(-) delete mode 100644 lib/os_brick diff --git a/lib/libraries b/lib/libraries index 9feb503f1b..946872c3ad 100644 --- a/lib/libraries +++ b/lib/libraries @@ -59,6 +59,7 @@ GITDIR["tooz"]=$DEST/tooz # things like os-traits. We should rename this script to be more generic # and then fold os-brick into it also. GITDIR["os-traits"]=$DEST/os-traits +GITDIR["os-brick"]=$DEST/os-brick # Support entry points installation of console scripts OSLO_BIN_DIR=$(get_python_exec_prefix) @@ -120,6 +121,9 @@ function install_libs { # # os-traits for nova _install_lib_from_source "os-traits" + # os-brick for nova/cinder + _install_lib_from_source "os-brick" + # etcd (because tooz does not have a hard dependency on these) pip_install etcd3 diff --git a/lib/os_brick b/lib/os_brick deleted file mode 100644 index d1cca4af44..0000000000 --- a/lib/os_brick +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -# -# lib/os_brick -# Install **os-brick** python module from source - -# Dependencies: -# -# - functions -# - DEST, DATA_DIR must be defined - -# stack.sh -# --------- -# - install_os_brick - -# Save trace setting -_XTRACE_OS_BRICK=$(set +o | grep xtrace) -set +o xtrace - - -GITDIR["os-brick"]=$DEST/os-brick - -# Install os_brick from git only if requested, otherwise it will be pulled from -# pip repositories by requirements of projects that need it. -function install_os_brick { - if use_library_from_git "os-brick"; then - git_clone_by_name "os-brick" - setup_dev_lib "os-brick" - fi -} - -# Restore xtrace -$_XTRACE_OS_BRICK \ No newline at end of file diff --git a/stack.sh b/stack.sh index 7034bc4ca2..59f25973ea 100755 --- a/stack.sh +++ b/stack.sh @@ -605,7 +605,6 @@ source $TOP_DIR/lib/neutron source $TOP_DIR/lib/ldap source $TOP_DIR/lib/dstat source $TOP_DIR/lib/etcd3 -source $TOP_DIR/lib/os_brick # Extras Source # -------------- @@ -841,11 +840,6 @@ if is_service_enabled neutron nova horizon; then install_neutronclient fi -# Install shared libraries -if is_service_enabled cinder nova; then - install_os_brick -fi - # Setup TLS certs if is_service_enabled tls-proxy; then configure_CA From 7e41c6ce669a082fcb5f50ef3503b8d66d19b35f Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 21 Jun 2017 10:55:16 -0400 Subject: [PATCH 0564/1936] cleanup comments Change-Id: I685f726178722e58ccfd008bad16a671cc8b8e2a --- lib/libraries | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/lib/libraries b/lib/libraries index 946872c3ad..761ce66694 100644 --- a/lib/libraries +++ b/lib/libraries @@ -2,7 +2,7 @@ # # lib/oslo # -# Functions to install **Oslo** libraries from git +# Functions to install libraries from git # # We need this to handle the fact that projects would like to use # pre-released versions of oslo libraries. @@ -13,10 +13,10 @@ # ``stack.sh`` calls the entry points in this order: # -# - install_oslo +# - install_libraries # Save trace setting -_XTRACE_LIB_OSLO=$(set +o | grep xtrace) +_XTRACE_LIB_LIBRARIES=$(set +o | grep xtrace) set +o xtrace @@ -54,12 +54,11 @@ GITDIR["python-openstacksdk"]=$DEST/python-openstacksdk GITDIR["stevedore"]=$DEST/stevedore GITDIR["taskflow"]=$DEST/taskflow GITDIR["tooz"]=$DEST/tooz -# TODO(mriedem): This is a common pattern so even though os-traits isn't -# officially an oslo library, it is nice to re-use this script for non-oslo -# things like os-traits. We should rename this script to be more generic -# and then fold os-brick into it also. -GITDIR["os-traits"]=$DEST/os-traits + +# Non oslo libraries are welcomed below as well, this prevents +# duplication of this code. GITDIR["os-brick"]=$DEST/os-brick +GITDIR["os-traits"]=$DEST/os-traits # Support entry points installation of console scripts OSLO_BIN_DIR=$(get_python_exec_prefix) @@ -120,18 +119,20 @@ function install_libs { # installation of additional libraries # # os-traits for nova - _install_lib_from_source "os-traits" - # os-brick for nova/cinder _install_lib_from_source "os-brick" + _install_lib_from_source "os-traits" # etcd (because tooz does not have a hard dependency on these) + # + # NOTE(sdague): this is currently a work around because tooz + # doesn't pull in etcd3. pip_install etcd3 pip_install etcd3gw } # Restore xtrace -$_XTRACE_LIB_OSLO +$_XTRACE_LIB_LIBRARIES # Tell emacs to use shell-script-mode ## Local variables: From 43304849cfde41fa86a6fdd334ab6e703598ef8d Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Wed, 21 Jun 2017 21:45:14 +0100 Subject: [PATCH 0565/1936] Set public_endpoint to GLANCE_URL for glance-api We run glance behind uwsgi. This means that the URL glance knows about itself is wrong, and version discovery fails. Set the public endpoint to the value of GLANCE_URL which should always be correct. Change-Id: Ia7c69024a0ef6cc0fdc284ffcd06eee5678a1007 --- lib/glance | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/glance b/lib/glance index 99941232f3..41145f980d 100644 --- a/lib/glance +++ b/lib/glance @@ -186,9 +186,12 @@ function configure_glance { inicomment $GLANCE_API_CONF glance_store swift_store_auth_address fi + # We need to tell glance what it's public endpoint is so that the version + # discovery document will be correct + iniset $GLANCE_API_CONF DEFAULT public_endpoint $GLANCE_URL + if is_service_enabled tls-proxy; then iniset $GLANCE_API_CONF DEFAULT bind_port $GLANCE_SERVICE_PORT_INT - iniset $GLANCE_API_CONF DEFAULT public_endpoint $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT iniset $GLANCE_REGISTRY_CONF DEFAULT bind_port $GLANCE_REGISTRY_PORT_INT iniset $GLANCE_API_CONF keystone_authtoken identity_uri $KEYSTONE_AUTH_URI From 3410e3e01bdbdbfd360d9baebeac081c33ee0821 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 21 Jun 2017 22:07:53 +1000 Subject: [PATCH 0566/1936] Revert "Use uwsgi for glance-api" This reverts commit e6217a9719b88b550ccbbce7c9bc064cb746c1a3. Unfortunately it seems to break glance; for examples see some of the nodepool dib jobs which have all failed to upload images into glance. Note this has a revert for 43304849cfde41fa86a6fdd334ab6e703598ef8d as well, since that uses GLANCE_URL defined here. [1] http://logs.openstack.org/51/475051/2/gate/gate-dsvm-nodepool-redhat-src/e1bdb34/ [2] http://logs.openstack.org/51/475051/2/gate/gate-dsvm-nodepool-ubuntu-src/6a7665b/ [3] http://logs.openstack.org/51/475051/2/gate/gate-dsvm-nodepool-opensuse-src/b357de1/ Change-Id: I2aea120e733b05e806801121ec994f4e31a337d9 --- lib/apache | 54 +----------------------------------------------------- lib/cinder | 2 +- lib/glance | 44 ++++++++++++-------------------------------- lib/nova | 2 +- 4 files changed, 15 insertions(+), 87 deletions(-) diff --git a/lib/apache b/lib/apache index 25c65fe672..c1b6bf8b41 100644 --- a/lib/apache +++ b/lib/apache @@ -275,64 +275,12 @@ function write_uwsgi_config { else local apache_conf="" apache_conf=$(apache_site_config_for $name) - echo "SetEnv proxy-sendcl 1" | sudo tee $apache_conf - echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}/\" retry=0 " | sudo tee -a $apache_conf + echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}/\" retry=0 " | sudo tee $apache_conf enable_apache_site $name restart_apache_server fi } -# For services using chunked encoding, the only services known to use this -# currently are Glance and Swift, we need to use an http proxy instead of -# mod_proxy_uwsgi because the chunked encoding gets dropped. See: -# https://github.com/unbit/uwsgi/issues/1540 You can workaround this on python2 -# but that involves having apache buffer the request before sending it to -# uswgi. -function write_local_uwsgi_http_config { - local file=$1 - local wsgi=$2 - local url=$3 - name=$(basename $wsgi) - - # create a home for the sockets; note don't use /tmp -- apache has - # a private view of it on some platforms. - - # always cleanup given that we are using iniset here - rm -rf $file - iniset "$file" uwsgi wsgi-file "$wsgi" - port=$(get_random_port) - iniset "$file" uwsgi http "127.0.0.1:$port" - iniset "$file" uwsgi processes $API_WORKERS - # This is running standalone - iniset "$file" uwsgi master true - # Set die-on-term & exit-on-reload so that uwsgi shuts down - iniset "$file" uwsgi die-on-term true - iniset "$file" uwsgi exit-on-reload true - iniset "$file" uwsgi enable-threads true - iniset "$file" uwsgi plugins python - # uwsgi recommends this to prevent thundering herd on accept. - iniset "$file" uwsgi thunder-lock true - # Override the default size for headers from the 4k default. - iniset "$file" uwsgi buffer-size 65535 - # Make sure the client doesn't try to re-use the connection. - iniset "$file" uwsgi add-header "Connection: close" - # This ensures that file descriptors aren't shared between processes. - iniset "$file" uwsgi lazy-apps true - iniset "$file" uwsgi chmod-socket 666 - iniset "$file" uwsgi http-raw-body true - iniset "$file" uwsgi http-chunked-input true - iniset "$file" uwsgi http-auto-chunked true - - enable_apache_mod proxy - enable_apache_mod proxy_http - local apache_conf="" - apache_conf=$(apache_site_config_for $name) - echo "KeepAlive Off" | sudo tee $apache_conf - echo "ProxyPass \"${url}\" \"http://127.0.0.1:$port\" retry=0 " | sudo tee -a $apache_conf - enable_apache_site $name - restart_apache_server -} - function remove_uwsgi_config { local file=$1 local wsgi=$2 diff --git a/lib/cinder b/lib/cinder index 243b639a11..20688129fd 100644 --- a/lib/cinder +++ b/lib/cinder @@ -335,7 +335,7 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT osapi_volume_workers "$API_WORKERS" - iniset $CINDER_CONF DEFAULT glance_api_servers "$GLANCE_URL" + iniset $CINDER_CONF DEFAULT glance_api_servers "${GLANCE_SERVICE_PROTOCOL}://${GLANCE_HOSTPORT}" if is_service_enabled tls-proxy; then iniset $CINDER_CONF DEFAULT glance_protocol https iniset $CINDER_CONF DEFAULT glance_ca_certificates_file $SSL_BUNDLE_FILE diff --git a/lib/glance b/lib/glance index 41145f980d..baf8c6191c 100644 --- a/lib/glance +++ b/lib/glance @@ -71,16 +71,6 @@ GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT} GLANCE_SERVICE_PROTOCOL=${GLANCE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} GLANCE_REGISTRY_PORT=${GLANCE_REGISTRY_PORT:-9191} GLANCE_REGISTRY_PORT_INT=${GLANCE_REGISTRY_PORT_INT:-19191} -GLANCE_UWSGI=$GLANCE_BIN_DIR/glance-wsgi-api -GLANCE_UWSGI_CONF=$GLANCE_CONF_DIR/glance-uswgi.ini -# If wsgi mode is uwsgi run glance under uwsgi, else default to eventlet -# TODO(mtreinish): Remove the eventlet path here and in all the similar -# conditionals below after the Pike release -if [[ "$WSGI_MODE" == "uwsgi" ]]; then - GLANCE_URL="$GLANCE_SERVICE_PROTOCOL://$GLANCE_SERVICE_HOST/image" -else - GLANCE_URL="$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" -fi # Functions # --------- @@ -114,13 +104,16 @@ function configure_glance { dburl=`database_connection_url glance` iniset $GLANCE_REGISTRY_CONF database connection $dburl iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG + iniset $GLANCE_REGISTRY_CONF DEFAULT workers "$API_WORKERS" iniset $GLANCE_REGISTRY_CONF paste_deploy flavor keystone configure_auth_token_middleware $GLANCE_REGISTRY_CONF glance $GLANCE_AUTH_CACHE_DIR/registry iniset $GLANCE_REGISTRY_CONF oslo_messaging_notifications driver messagingv2 iniset_rpc_backend glance $GLANCE_REGISTRY_CONF iniset $GLANCE_REGISTRY_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" + cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF iniset $GLANCE_API_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $GLANCE_API_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS inicomment $GLANCE_API_CONF DEFAULT log_file iniset $GLANCE_API_CONF database connection $dburl iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG @@ -148,6 +141,8 @@ function configure_glance { iniset $GLANCE_API_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/ iniset $GLANCE_API_CONF DEFAULT registry_host $GLANCE_SERVICE_HOST + iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS" + # CORS feature support - to allow calls from Horizon by default if [ -n "$GLANCE_CORS_ALLOWED_ORIGIN" ]; then iniset $GLANCE_API_CONF cors allowed_origin "$GLANCE_CORS_ALLOWED_ORIGIN" @@ -186,12 +181,9 @@ function configure_glance { inicomment $GLANCE_API_CONF glance_store swift_store_auth_address fi - # We need to tell glance what it's public endpoint is so that the version - # discovery document will be correct - iniset $GLANCE_API_CONF DEFAULT public_endpoint $GLANCE_URL - if is_service_enabled tls-proxy; then iniset $GLANCE_API_CONF DEFAULT bind_port $GLANCE_SERVICE_PORT_INT + iniset $GLANCE_API_CONF DEFAULT public_endpoint $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT iniset $GLANCE_REGISTRY_CONF DEFAULT bind_port $GLANCE_REGISTRY_PORT_INT iniset $GLANCE_API_CONF keystone_authtoken identity_uri $KEYSTONE_AUTH_URI @@ -207,6 +199,7 @@ function configure_glance { setup_logging $GLANCE_REGISTRY_CONF cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI + cp -p $GLANCE_DIR/etc/glance-api-paste.ini $GLANCE_API_PASTE_INI cp $GLANCE_DIR/etc/glance-cache.conf $GLANCE_CACHE_CONF @@ -239,13 +232,6 @@ function configure_glance { iniset $GLANCE_API_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/%(project_id)s" iniset $GLANCE_CACHE_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/%(project_id)s" fi - - if [[ "$WSGI_MODE" == "uwsgi" ]]; then - write_local_uwsgi_http_config "$GLANCE_UWSGI_CONF" "$GLANCE_UWSGI" "/image" - else - iniset $GLANCE_API_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS - iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS" - fi } # create_glance_accounts() - Set up common required glance accounts @@ -270,7 +256,7 @@ function create_glance_accounts { get_or_create_endpoint \ "image" \ "$REGION_NAME" \ - "$GLANCE_URL" + "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" # Note(frickler): Crude workaround for https://bugs.launchpad.net/glance-store/+bug/1620999 service_domain_id=$(get_or_create_domain $SERVICE_DOMAIN_NAME) @@ -337,21 +323,15 @@ function install_glance { function start_glance { local service_protocol=$GLANCE_SERVICE_PROTOCOL if is_service_enabled tls-proxy; then - if [[ "$WSGI_MODE" != "uwsgi" ]]; then - start_tls_proxy glance-service '*' $GLANCE_SERVICE_PORT $GLANCE_SERVICE_HOST $GLANCE_SERVICE_PORT_INT - fi + start_tls_proxy glance-service '*' $GLANCE_SERVICE_PORT $GLANCE_SERVICE_HOST $GLANCE_SERVICE_PORT_INT start_tls_proxy glance-registry '*' $GLANCE_REGISTRY_PORT $GLANCE_SERVICE_HOST $GLANCE_REGISTRY_PORT_INT fi run_process g-reg "$GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf" - if [[ "$WSGI_MODE" == "uwsgi" ]]; then - run_process g-api "$GLANCE_BIN_DIR/uwsgi --ini $GLANCE_UWSGI_CONF" - else - run_process g-api "$GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf" - fi + run_process g-api "$GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf" - echo "Waiting for g-api ($GLANCE_SERVICE_HOST) to start..." - if ! wait_for_service $SERVICE_TIMEOUT $GLANCE_URL; then + echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..." + if ! wait_for_service $SERVICE_TIMEOUT $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT; then die $LINENO "g-api did not start" fi } diff --git a/lib/nova b/lib/nova index f56509ac1a..18715fc418 100644 --- a/lib/nova +++ b/lib/nova @@ -575,7 +575,7 @@ function create_nova_conf { # enable notifications, but it will allow them to function when enabled. iniset $NOVA_CONF oslo_messaging_notifications driver "messagingv2" iniset_rpc_backend nova $NOVA_CONF - iniset $NOVA_CONF glance api_servers "$GLANCE_URL" + iniset $NOVA_CONF glance api_servers "${GLANCE_SERVICE_PROTOCOL}://${GLANCE_HOSTPORT}" iniset $NOVA_CONF DEFAULT osapi_compute_workers "$API_WORKERS" iniset $NOVA_CONF DEFAULT metadata_workers "$API_WORKERS" From 599ecfbd4c71de5f71663b1cadf2700b31a84ec2 Mon Sep 17 00:00:00 2001 From: TommyLike Date: Tue, 20 Jun 2017 11:32:25 +0800 Subject: [PATCH 0567/1936] Download etcd3 only zip file not exists Give a change to manually prepare the zip file for etcd. Change-Id: I54283cf2ed62bb716af34df21e8c168014264c8c --- lib/etcd3 | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/etcd3 b/lib/etcd3 index ea58403b24..0e1fbd5bc2 100644 --- a/lib/etcd3 +++ b/lib/etcd3 @@ -115,7 +115,9 @@ function install_etcd3 { # Download and cache the etcd tgz for subsequent use if [ ! -f "$FILES/etcd-$ETCD_VERSION-linux-$ETCD_ARCH/etcd" ]; then ETCD_DOWNLOAD_FILE=$ETCD_NAME.tar.gz - wget $ETCD_DOWNLOAD_URL/$ETCD_VERSION/$ETCD_DOWNLOAD_FILE -O $FILES/$ETCD_DOWNLOAD_FILE + if [ ! -f "$FILES/$ETCD_DOWNLOAD_FILE" ]; then + wget $ETCD_DOWNLOAD_URL/$ETCD_VERSION/$ETCD_DOWNLOAD_FILE -O $FILES/$ETCD_DOWNLOAD_FILE + fi echo "${ETCD_SHA256} $FILES/${ETCD_DOWNLOAD_FILE}" > $FILES/etcd.sha256sum # NOTE(sdague): this should go fatal if this fails sha256sum -c $FILES/etcd.sha256sum From ed7dbe5754b95f54f663f695b27c7c14b1fdbd6c Mon Sep 17 00:00:00 2001 From: Kaitlin Farr Date: Mon, 19 Jun 2017 16:50:38 -0400 Subject: [PATCH 0568/1936] Add python-barbicanclient to LIBS_FROM_GIT Allow python-barbicanclient to be installed from git instead of pip. Change-Id: I99cd72724e11bab362bcaaeb773f33b2abfe815c --- lib/libraries | 3 +++ stackrc | 5 +++++ tests/test_libs_from_pypi.sh | 2 +- 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/lib/libraries b/lib/libraries index 761ce66694..4ceb80423c 100644 --- a/lib/libraries +++ b/lib/libraries @@ -121,6 +121,9 @@ function install_libs { # os-traits for nova _install_lib_from_source "os-brick" _install_lib_from_source "os-traits" + # + # python client libraries we might need from git can go here + _install_lib_from_source "python-barbicanclient" # etcd (because tooz does not have a hard dependency on these) diff --git a/stackrc b/stackrc index cfe2496240..50f7c89ba3 100644 --- a/stackrc +++ b/stackrc @@ -341,6 +341,11 @@ GITBRANCH["python-cinderclient"]=${CINDERCLIENT_BRANCH:-master} GITREPO["python-brick-cinderclient-ext"]=${BRICK_CINDERCLIENT_REPO:-${GIT_BASE}/openstack/python-brick-cinderclient-ext.git} GITBRANCH["python-brick-cinderclient-ext"]=${BRICK_CINDERCLIENT_BRANCH:-master} +# python barbican client library +GITREPO["python-barbicanclient"]=${BARBICANCLIENT_REPO:-${GIT_BASE}/openstack/python-barbicanclient.git} +GITBRANCH["python-barbicanclient"]=${BARBICANCLIENT_BRANCH:-master} +GITDIR["python-barbicanclient"]=$DEST/python-barbicanclient + # python glance client library GITREPO["python-glanceclient"]=${GLANCECLIENT_REPO:-${GIT_BASE}/openstack/python-glanceclient.git} GITBRANCH["python-glanceclient"]=${GLANCECLIENT_BRANCH:-master} diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index 1f2d3c23b8..5b4ff32f2a 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -43,7 +43,7 @@ ALL_LIBS+=" debtcollector os-brick os-traits automaton futurist oslo.service" ALL_LIBS+=" oslo.cache oslo.reports osprofiler cursive" ALL_LIBS+=" keystoneauth ironic-lib neutron-lib oslo.privsep" ALL_LIBS+=" diskimage-builder os-vif python-brick-cinderclient-ext" -ALL_LIBS+=" castellan" +ALL_LIBS+=" castellan python-barbicanclient" # Generate the above list with # echo ${!GITREPO[@]} From 13e81ad1cf6d652946b78082b280fb12d190f6f2 Mon Sep 17 00:00:00 2001 From: Yuval Brik Date: Fri, 23 Jun 2017 10:32:16 +0300 Subject: [PATCH 0569/1936] Fedora mariadb: disable cracklib In Fedora mariadb, cracklib has been enabled [0] in order to verify the password strength. Disable cracklib in Fedora devstack in order to allow simple passwords in dev environments. [0] https://src.fedoraproject.org/cgit/rpms/mariadb.git/ commit: 9442da192282aa74f43e86c96202109a173bbaba Change-Id: I2d5e965f0f19f86992794eec78134e862899c931 --- lib/databases/mysql | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/databases/mysql b/lib/databases/mysql index 7bbcace399..a0cf7a4296 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -71,6 +71,10 @@ function configure_database_mysql { elif is_fedora; then mysql=mariadb my_conf=/etc/my.cnf + local cracklib_conf=/etc/my.cnf.d/cracklib_password_check.cnf + if [ -f "$cracklib_conf" ]; then + inicomment -sudo "$cracklib_conf" "mariadb" "plugin-load-add" + fi else exit_distro_not_supported "mysql configuration" fi From c087c71834a6eb1d44040142f888b33ffefb5192 Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Thu, 15 Jun 2017 12:10:45 +0000 Subject: [PATCH 0570/1936] Revert "systemd: Always create the systemd unit files" This reverts commit 15b0a5f1eb849a30bcea9a1e437e9a88ae2c6f92. The change had an assumption that a service is properly configured even when it isn't enabled. The assumption is not true. Change-Id: Ib5a8ffe63eaec15bc29bfdd133db7169507bab82 Closes-Bug: #1698129 --- functions-common | 23 ++++++----------------- 1 file changed, 6 insertions(+), 17 deletions(-) diff --git a/functions-common b/functions-common index 48ce72524e..30933ea4c2 100644 --- a/functions-common +++ b/functions-common @@ -1508,13 +1508,8 @@ EOF } -# Defines a systemd service which can be enabled and started later on. -# arg1: The openstack service name ('n-cpu', 'c-sch', ...). -# arg2: The command to start (e.g. path to service binary + config files). -# arg3: The group which owns the process. -# arg4: The user which owns the process. -# Returns: The systemd service name which got defined. -function _define_systemd_service { +# Helper function to build a basic unit file and run it under systemd. +function _run_under_systemd { local service=$1 local command="$2" local cmd=$command @@ -1529,7 +1524,9 @@ function _define_systemd_service { else write_user_unit_file $systemd_service "$cmd" "$group" "$user" fi - echo $systemd_service + + $SYSTEMCTL enable $systemd_service + $SYSTEMCTL start $systemd_service } # Helper to remove the ``*.failure`` files under ``$SERVICE_DIR/$SCREEN_NAME``. @@ -1570,19 +1567,11 @@ function run_process { local user=$4 local name=$service - local systemd_service time_start "run_process" - # Note we deliberately make all service files, even if the service - # isn't enabled, so it can be enabled by a dev manually on command - # line. - if [[ "$USE_SYSTEMD" = "True" ]]; then - systemd_service=$(_define_systemd_service "$name" "$command" "$group" "$user") - fi if is_service_enabled $service; then if [[ "$USE_SYSTEMD" = "True" ]]; then - $SYSTEMCTL enable $systemd_service - $SYSTEMCTL start $systemd_service + _run_under_systemd "$name" "$command" "$group" "$user" elif [[ "$USE_SCREEN" = "True" ]]; then if [[ "$user" == "root" ]]; then command="sudo $command" From 0ffd5a944c371ae441c2c8d977486c5a20bc75fc Mon Sep 17 00:00:00 2001 From: Chris Date: Fri, 23 Jun 2017 11:28:20 -0600 Subject: [PATCH 0571/1936] remove unnecessary cleanup in nova startup nova's instances directory may be a shared directory (ie nfs), in these cases, we do not want to call nova_cleanup at startup since it deletes everything under $NOVA_INSTANCES_PATH. The nova_cleanup routine will still be called by the clean.sh script which is fine since we're presumably cleaning up the whole openstack cluster at that point. Change-Id: Ieb4e5d0508d4ed4c5349c497554c5da2993c9cb0 Closes-Bug: #1649389 --- stack.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/stack.sh b/stack.sh index 326d186b19..2be4528991 100755 --- a/stack.sh +++ b/stack.sh @@ -893,7 +893,6 @@ fi if is_service_enabled nova; then # Compute service stack_install_service nova - cleanup_nova configure_nova fi From 1fa653635781cd975a1031e212b35b6c38196ba4 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Fri, 23 Jun 2017 22:32:37 +0000 Subject: [PATCH 0572/1936] Revert "Revert "Use uwsgi for glance-api"" This reverts commit 3410e3e01bdbdbfd360d9baebeac081c33ee0821. Change-Id: Ic58711311eb8534cb3c4b25c333197c412ffdce5 --- lib/apache | 54 +++++++++++++++++++++++++++++++++++++++++++++++++++++- lib/cinder | 2 +- lib/glance | 44 ++++++++++++++++++++++++++++++++------------ lib/nova | 2 +- 4 files changed, 87 insertions(+), 15 deletions(-) diff --git a/lib/apache b/lib/apache index c1b6bf8b41..25c65fe672 100644 --- a/lib/apache +++ b/lib/apache @@ -275,12 +275,64 @@ function write_uwsgi_config { else local apache_conf="" apache_conf=$(apache_site_config_for $name) - echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}/\" retry=0 " | sudo tee $apache_conf + echo "SetEnv proxy-sendcl 1" | sudo tee $apache_conf + echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}/\" retry=0 " | sudo tee -a $apache_conf enable_apache_site $name restart_apache_server fi } +# For services using chunked encoding, the only services known to use this +# currently are Glance and Swift, we need to use an http proxy instead of +# mod_proxy_uwsgi because the chunked encoding gets dropped. See: +# https://github.com/unbit/uwsgi/issues/1540 You can workaround this on python2 +# but that involves having apache buffer the request before sending it to +# uswgi. +function write_local_uwsgi_http_config { + local file=$1 + local wsgi=$2 + local url=$3 + name=$(basename $wsgi) + + # create a home for the sockets; note don't use /tmp -- apache has + # a private view of it on some platforms. + + # always cleanup given that we are using iniset here + rm -rf $file + iniset "$file" uwsgi wsgi-file "$wsgi" + port=$(get_random_port) + iniset "$file" uwsgi http "127.0.0.1:$port" + iniset "$file" uwsgi processes $API_WORKERS + # This is running standalone + iniset "$file" uwsgi master true + # Set die-on-term & exit-on-reload so that uwsgi shuts down + iniset "$file" uwsgi die-on-term true + iniset "$file" uwsgi exit-on-reload true + iniset "$file" uwsgi enable-threads true + iniset "$file" uwsgi plugins python + # uwsgi recommends this to prevent thundering herd on accept. + iniset "$file" uwsgi thunder-lock true + # Override the default size for headers from the 4k default. + iniset "$file" uwsgi buffer-size 65535 + # Make sure the client doesn't try to re-use the connection. + iniset "$file" uwsgi add-header "Connection: close" + # This ensures that file descriptors aren't shared between processes. + iniset "$file" uwsgi lazy-apps true + iniset "$file" uwsgi chmod-socket 666 + iniset "$file" uwsgi http-raw-body true + iniset "$file" uwsgi http-chunked-input true + iniset "$file" uwsgi http-auto-chunked true + + enable_apache_mod proxy + enable_apache_mod proxy_http + local apache_conf="" + apache_conf=$(apache_site_config_for $name) + echo "KeepAlive Off" | sudo tee $apache_conf + echo "ProxyPass \"${url}\" \"http://127.0.0.1:$port\" retry=0 " | sudo tee -a $apache_conf + enable_apache_site $name + restart_apache_server +} + function remove_uwsgi_config { local file=$1 local wsgi=$2 diff --git a/lib/cinder b/lib/cinder index 20688129fd..243b639a11 100644 --- a/lib/cinder +++ b/lib/cinder @@ -335,7 +335,7 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT osapi_volume_workers "$API_WORKERS" - iniset $CINDER_CONF DEFAULT glance_api_servers "${GLANCE_SERVICE_PROTOCOL}://${GLANCE_HOSTPORT}" + iniset $CINDER_CONF DEFAULT glance_api_servers "$GLANCE_URL" if is_service_enabled tls-proxy; then iniset $CINDER_CONF DEFAULT glance_protocol https iniset $CINDER_CONF DEFAULT glance_ca_certificates_file $SSL_BUNDLE_FILE diff --git a/lib/glance b/lib/glance index baf8c6191c..41145f980d 100644 --- a/lib/glance +++ b/lib/glance @@ -71,6 +71,16 @@ GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT} GLANCE_SERVICE_PROTOCOL=${GLANCE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} GLANCE_REGISTRY_PORT=${GLANCE_REGISTRY_PORT:-9191} GLANCE_REGISTRY_PORT_INT=${GLANCE_REGISTRY_PORT_INT:-19191} +GLANCE_UWSGI=$GLANCE_BIN_DIR/glance-wsgi-api +GLANCE_UWSGI_CONF=$GLANCE_CONF_DIR/glance-uswgi.ini +# If wsgi mode is uwsgi run glance under uwsgi, else default to eventlet +# TODO(mtreinish): Remove the eventlet path here and in all the similar +# conditionals below after the Pike release +if [[ "$WSGI_MODE" == "uwsgi" ]]; then + GLANCE_URL="$GLANCE_SERVICE_PROTOCOL://$GLANCE_SERVICE_HOST/image" +else + GLANCE_URL="$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" +fi # Functions # --------- @@ -104,16 +114,13 @@ function configure_glance { dburl=`database_connection_url glance` iniset $GLANCE_REGISTRY_CONF database connection $dburl iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG - iniset $GLANCE_REGISTRY_CONF DEFAULT workers "$API_WORKERS" iniset $GLANCE_REGISTRY_CONF paste_deploy flavor keystone configure_auth_token_middleware $GLANCE_REGISTRY_CONF glance $GLANCE_AUTH_CACHE_DIR/registry iniset $GLANCE_REGISTRY_CONF oslo_messaging_notifications driver messagingv2 iniset_rpc_backend glance $GLANCE_REGISTRY_CONF iniset $GLANCE_REGISTRY_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" - cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF iniset $GLANCE_API_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $GLANCE_API_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS inicomment $GLANCE_API_CONF DEFAULT log_file iniset $GLANCE_API_CONF database connection $dburl iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG @@ -141,8 +148,6 @@ function configure_glance { iniset $GLANCE_API_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/ iniset $GLANCE_API_CONF DEFAULT registry_host $GLANCE_SERVICE_HOST - iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS" - # CORS feature support - to allow calls from Horizon by default if [ -n "$GLANCE_CORS_ALLOWED_ORIGIN" ]; then iniset $GLANCE_API_CONF cors allowed_origin "$GLANCE_CORS_ALLOWED_ORIGIN" @@ -181,9 +186,12 @@ function configure_glance { inicomment $GLANCE_API_CONF glance_store swift_store_auth_address fi + # We need to tell glance what it's public endpoint is so that the version + # discovery document will be correct + iniset $GLANCE_API_CONF DEFAULT public_endpoint $GLANCE_URL + if is_service_enabled tls-proxy; then iniset $GLANCE_API_CONF DEFAULT bind_port $GLANCE_SERVICE_PORT_INT - iniset $GLANCE_API_CONF DEFAULT public_endpoint $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT iniset $GLANCE_REGISTRY_CONF DEFAULT bind_port $GLANCE_REGISTRY_PORT_INT iniset $GLANCE_API_CONF keystone_authtoken identity_uri $KEYSTONE_AUTH_URI @@ -199,7 +207,6 @@ function configure_glance { setup_logging $GLANCE_REGISTRY_CONF cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI - cp -p $GLANCE_DIR/etc/glance-api-paste.ini $GLANCE_API_PASTE_INI cp $GLANCE_DIR/etc/glance-cache.conf $GLANCE_CACHE_CONF @@ -232,6 +239,13 @@ function configure_glance { iniset $GLANCE_API_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/%(project_id)s" iniset $GLANCE_CACHE_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/%(project_id)s" fi + + if [[ "$WSGI_MODE" == "uwsgi" ]]; then + write_local_uwsgi_http_config "$GLANCE_UWSGI_CONF" "$GLANCE_UWSGI" "/image" + else + iniset $GLANCE_API_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS + iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS" + fi } # create_glance_accounts() - Set up common required glance accounts @@ -256,7 +270,7 @@ function create_glance_accounts { get_or_create_endpoint \ "image" \ "$REGION_NAME" \ - "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" + "$GLANCE_URL" # Note(frickler): Crude workaround for https://bugs.launchpad.net/glance-store/+bug/1620999 service_domain_id=$(get_or_create_domain $SERVICE_DOMAIN_NAME) @@ -323,15 +337,21 @@ function install_glance { function start_glance { local service_protocol=$GLANCE_SERVICE_PROTOCOL if is_service_enabled tls-proxy; then - start_tls_proxy glance-service '*' $GLANCE_SERVICE_PORT $GLANCE_SERVICE_HOST $GLANCE_SERVICE_PORT_INT + if [[ "$WSGI_MODE" != "uwsgi" ]]; then + start_tls_proxy glance-service '*' $GLANCE_SERVICE_PORT $GLANCE_SERVICE_HOST $GLANCE_SERVICE_PORT_INT + fi start_tls_proxy glance-registry '*' $GLANCE_REGISTRY_PORT $GLANCE_SERVICE_HOST $GLANCE_REGISTRY_PORT_INT fi run_process g-reg "$GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf" - run_process g-api "$GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf" + if [[ "$WSGI_MODE" == "uwsgi" ]]; then + run_process g-api "$GLANCE_BIN_DIR/uwsgi --ini $GLANCE_UWSGI_CONF" + else + run_process g-api "$GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf" + fi - echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..." - if ! wait_for_service $SERVICE_TIMEOUT $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT; then + echo "Waiting for g-api ($GLANCE_SERVICE_HOST) to start..." + if ! wait_for_service $SERVICE_TIMEOUT $GLANCE_URL; then die $LINENO "g-api did not start" fi } diff --git a/lib/nova b/lib/nova index 18715fc418..f56509ac1a 100644 --- a/lib/nova +++ b/lib/nova @@ -575,7 +575,7 @@ function create_nova_conf { # enable notifications, but it will allow them to function when enabled. iniset $NOVA_CONF oslo_messaging_notifications driver "messagingv2" iniset_rpc_backend nova $NOVA_CONF - iniset $NOVA_CONF glance api_servers "${GLANCE_SERVICE_PROTOCOL}://${GLANCE_HOSTPORT}" + iniset $NOVA_CONF glance api_servers "$GLANCE_URL" iniset $NOVA_CONF DEFAULT osapi_compute_workers "$API_WORKERS" iniset $NOVA_CONF DEFAULT metadata_workers "$API_WORKERS" From 580a506f250162078f1aaf970f58033bfeb859e9 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Sun, 25 Jun 2017 08:01:06 +0000 Subject: [PATCH 0573/1936] Updated from generate-devstack-plugins-list Change-Id: I6e040449252aae43f330d545d979b3bdba6f855a --- doc/source/plugin-registry.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index c1c66b9bb6..92e5ecdb89 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -53,6 +53,7 @@ devstack-plugin-mariadb `git://git.openstack.org/openstack/devsta devstack-plugin-nfs `git://git.openstack.org/openstack/devstack-plugin-nfs `__ devstack-plugin-pika `git://git.openstack.org/openstack/devstack-plugin-pika `__ devstack-plugin-sheepdog `git://git.openstack.org/openstack/devstack-plugin-sheepdog `__ +devstack-plugin-vmax `git://git.openstack.org/openstack/devstack-plugin-vmax `__ devstack-plugin-zmq `git://git.openstack.org/openstack/devstack-plugin-zmq `__ dragonflow `git://git.openstack.org/openstack/dragonflow `__ drbd-devstack `git://git.openstack.org/openstack/drbd-devstack `__ @@ -123,9 +124,11 @@ networking-vsphere `git://git.openstack.org/openstack/networ neutron `git://git.openstack.org/openstack/neutron `__ neutron-dynamic-routing `git://git.openstack.org/openstack/neutron-dynamic-routing `__ neutron-fwaas `git://git.openstack.org/openstack/neutron-fwaas `__ +neutron-fwaas-dashboard `git://git.openstack.org/openstack/neutron-fwaas-dashboard `__ neutron-lbaas `git://git.openstack.org/openstack/neutron-lbaas `__ neutron-lbaas-dashboard `git://git.openstack.org/openstack/neutron-lbaas-dashboard `__ neutron-vpnaas `git://git.openstack.org/openstack/neutron-vpnaas `__ +neutron-vpnaas-dashboard `git://git.openstack.org/openstack/neutron-vpnaas-dashboard `__ nova-dpm `git://git.openstack.org/openstack/nova-dpm `__ nova-lxd `git://git.openstack.org/openstack/nova-lxd `__ nova-mksproxy `git://git.openstack.org/openstack/nova-mksproxy `__ @@ -154,6 +157,7 @@ tricircle `git://git.openstack.org/openstack/tricir trio2o `git://git.openstack.org/openstack/trio2o `__ trove `git://git.openstack.org/openstack/trove `__ trove-dashboard `git://git.openstack.org/openstack/trove-dashboard `__ +valet `git://git.openstack.org/openstack/valet `__ vitrage `git://git.openstack.org/openstack/vitrage `__ vitrage-dashboard `git://git.openstack.org/openstack/vitrage-dashboard `__ vmware-nsx `git://git.openstack.org/openstack/vmware-nsx `__ From 20eeff813d21e86c1d529d5cbc4f7b0ad732e69d Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Mon, 26 Jun 2017 07:53:08 +0000 Subject: [PATCH 0574/1936] Revert "Fleetify nova conductor for N cells" This reverts commit f63aa021cf996b500b6a570c0114c490bb57461a. Change-Id: I541abe7311e5721d79e300fcb2a0b8ffdcd3c015 Related-Bug: #1700364 --- lib/nova | 111 ++++++------------------------------------------------- stack.sh | 4 +- 2 files changed, 13 insertions(+), 102 deletions(-) diff --git a/lib/nova b/lib/nova index 8eac254262..5832f11d67 100644 --- a/lib/nova +++ b/lib/nova @@ -53,18 +53,10 @@ NOVA_AUTH_CACHE_DIR=${NOVA_AUTH_CACHE_DIR:-/var/cache/nova} NOVA_CONF_DIR=/etc/nova NOVA_CONF=$NOVA_CONF_DIR/nova.conf NOVA_CELLS_CONF=$NOVA_CONF_DIR/nova-cells.conf -NOVA_CPU_CONF=$NOVA_CONF_DIR/nova-cpu.conf NOVA_FAKE_CONF=$NOVA_CONF_DIR/nova-fake.conf NOVA_CELLS_DB=${NOVA_CELLS_DB:-nova_cell} NOVA_API_DB=${NOVA_API_DB:-nova_api} -# The total number of cells we expect. Must be greater than one and doesn't -# count cell0. -NOVA_NUM_CELLS=${NOVA_NUM_CELLS:-1} -# Our cell index, so we know what rabbit vhost to connect to. -# This should be in the range of 1-$NOVA_NUM_CELLS -NOVA_CPU_CELL=${NOVA_CPU_CELL:-1} - NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini} if is_suse; then @@ -487,7 +479,7 @@ function create_nova_conf { # require them running on the host. The ensures that n-cpu doesn't # leak a need to use the db in a multinode scenario. if is_service_enabled n-api n-cond n-sched; then - iniset $NOVA_CONF database connection `database_connection_url nova_cell0` + iniset $NOVA_CONF database connection `database_connection_url nova` iniset $NOVA_CONF api_database connection `database_connection_url nova_api` fi @@ -622,20 +614,6 @@ function create_nova_conf { if [ "$NOVA_USE_SERVICE_TOKEN" == "True" ]; then init_nova_service_user_conf fi - - if is_service_enabled n-cond; then - for i in $(seq 1 $NOVA_NUM_CELLS); do - local conf - local vhost - conf=$(conductor_conf $i) - vhost="nova_cell${i}" - iniset $conf database connection `database_connection_url nova_cell${i}` - iniset $conf conductor workers "$API_WORKERS" - iniset $conf DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" - rpc_backend_add_vhost $vhost - iniset_rpc_backend nova $conf DEFAULT $vhost - done - fi } function init_nova_service_user_conf { @@ -650,11 +628,6 @@ function init_nova_service_user_conf { iniset $NOVA_CONF service_user auth_strategy keystone } -function conductor_conf { - local cell="$1" - echo "${NOVA_CONF_DIR}/nova_cell${cell}.conf" -} - function init_nova_cells { if is_service_enabled n-cell; then cp $NOVA_CONF $NOVA_CELLS_CONF @@ -719,6 +692,8 @@ function init_nova { recreate_database $NOVA_API_DB $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF api_db sync + # (Re)create nova databases + recreate_database nova recreate_database nova_cell0 # map_cell0 will create the cell mapping record in the nova_api DB so @@ -730,12 +705,6 @@ function init_nova { # Migrate nova and nova_cell0 databases. $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db sync - # (Re)create nova databases - for i in $(seq 1 $NOVA_NUM_CELLS); do - recreate_database nova_cell${i} - $NOVA_BIN_DIR/nova-manage --config-file $(conductor_conf $i) db sync - done - if is_service_enabled n-cell; then recreate_database $NOVA_CELLS_DB fi @@ -744,13 +713,9 @@ function init_nova { # Needed for flavor conversion $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db online_data_migrations - # FIXME(danms): Should this be configurable? - iniset $NOVA_CONF workarounds disable_group_policy_check_upcall True - # create the cell1 cell for the main nova db where the hosts live - for i in $(seq 1 $NOVA_NUM_CELLS); do - nova-manage --config-file $NOVA_CONF --config-file $(conductor_conf $i) cell_v2 create_cell --name "cell$i" - done + nova-manage cell_v2 create_cell --transport-url $(get_transport_url) \ + --name 'cell1' fi create_nova_cache_dir @@ -858,38 +823,25 @@ function start_nova_api { # start_nova_compute() - Start the compute process function start_nova_compute { - local nomulticellflag="$1" # Hack to set the path for rootwrap local old_path=$PATH export PATH=$NOVA_BIN_DIR:$PATH if is_service_enabled n-cell; then local compute_cell_conf=$NOVA_CELLS_CONF - # NOTE(danms): Don't setup conductor fleet for cellsv1 - nomulticellflag='nomulticell' else local compute_cell_conf=$NOVA_CONF fi - if [ "$nomulticellflag" = 'nomulticell' ]; then - # NOTE(danms): Grenade doesn't setup multi-cell rabbit, so - # skip these bits and use the normal config. - NOVA_CPU_CONF=$compute_cell_conf - echo "Skipping multi-cell conductor fleet setup" - else - cp $compute_cell_conf $NOVA_CPU_CONF - iniset_rpc_backend nova $NOVA_CPU_CONF DEFAULT "nova_cell${NOVA_CPU_CELL}" - fi - if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then # The group **$LIBVIRT_GROUP** is added to the current user in this script. # ``sg`` is used in run_process to execute nova-compute as a member of the # **$LIBVIRT_GROUP** group. - run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF" $LIBVIRT_GROUP + run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" $LIBVIRT_GROUP elif [[ "$VIRT_DRIVER" = 'lxd' ]]; then - run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF" $LXD_GROUP + run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" $LXD_GROUP elif [[ "$VIRT_DRIVER" = 'docker' || "$VIRT_DRIVER" = 'zun' ]]; then - run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF" $DOCKER_GROUP + run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" $DOCKER_GROUP elif [[ "$VIRT_DRIVER" = 'fake' ]]; then local i for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`; do @@ -898,13 +850,13 @@ function start_nova_compute { # gets its own configuration and own log file. local fake_conf="${NOVA_FAKE_CONF}-${i}" iniset $fake_conf DEFAULT nhost "${HOSTNAME}${i}" - run_process "n-cpu-${i}" "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF --config-file $fake_conf" + run_process "n-cpu-${i}" "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf --config-file $fake_conf" done else if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then start_nova_hypervisor fi - run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF" + run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" fi export PATH=$old_path @@ -924,6 +876,7 @@ function start_nova_rest { fi # ``run_process`` checks ``is_service_enabled``, it is not needed here + run_process n-cond "$NOVA_BIN_DIR/nova-conductor --config-file $compute_cell_conf" run_process n-cell-region "$NOVA_BIN_DIR/nova-cells --config-file $api_cell_conf" run_process n-cell-child "$NOVA_BIN_DIR/nova-cells --config-file $compute_cell_conf" @@ -946,38 +899,8 @@ function start_nova_rest { export PATH=$old_path } -function enable_nova_fleet { - if is_service_enabled n-cond; then - enable_service n-super-cond - for i in $(seq 1 $NOVA_NUM_CELLS); do - enable_service n-cond-cell${i} - done - fi -} - -function start_nova_conductor { - if is_service_enabled n-cell; then - echo "Starting nova-conductor in a cellsv1-compatible way" - run_process n-cond "$NOVA_BIN_DIR/nova-conductor --config-file $NOVA_CELLS_CONF" - return - fi - - enable_nova_fleet - if is_service_enabled n-super-cond; then - run_process n-super-cond "$NOVA_BIN_DIR/nova-conductor --config-file $NOVA_CONF" - fi - for i in $(seq 1 $NOVA_NUM_CELLS); do - if is_service_enabled n-cond-cell${i}; then - local conf - conf=$(conductor_conf $i) - run_process n-cond-cell${i} "$NOVA_BIN_DIR/nova-conductor --config-file $conf" - fi - done -} - function start_nova { start_nova_rest - start_nova_conductor start_nova_compute } @@ -1006,24 +929,14 @@ function stop_nova_rest { # Kill the nova screen windows # Some services are listed here twice since more than one instance # of a service may be running in certain configs. - for serv in n-api n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cell n-cell n-api-meta n-sproxy; do + for serv in n-api n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-sproxy; do stop_process $serv done } -function stop_nova_conductor { - enable_nova_fleet - for srv in n-super-cond $(seq -f n-cond-cell%0.f 1 $NOVA_NUM_CELLS); do - if is_service_enabled $srv; then - stop_process $srv - fi - done -} - # stop_nova() - Stop running processes (non-screen) function stop_nova { stop_nova_rest - stop_nova_conductor stop_nova_compute } diff --git a/stack.sh b/stack.sh index 5148ae1d77..6793d45cc1 100755 --- a/stack.sh +++ b/stack.sh @@ -1268,9 +1268,7 @@ fi # Unable to use LUKS passphrase that is exactly 16 bytes long # https://bugzilla.redhat.com/show_bug.cgi?id=1447297 if is_service_enabled nova; then - key=$(generate_hex_string 36) - iniset $NOVA_CONF key_manager fixed_key "$key" - iniset $NOVA_CPU_CONF key_manager fixed_key "$key" + iniset $NOVA_CONF key_manager fixed_key $(generate_hex_string 36) fi # Launch the nova-api and wait for it to answer before continuing From b90bb1a46193b052cd98513213eed688e8e29d23 Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Tue, 18 Apr 2017 16:30:14 +0000 Subject: [PATCH 0575/1936] Use uwsgi for nova when NOVA_USE_MOD_WSGI is not False Unless NOVA_USE_MOD_WSGI is False, run nova-api and nova-metadata using uwsgi. Because the metadata server is always expected to run on a port and without a prefix, we have it configured to use uwsgi but not to proxy from apache: uwsgi listens on the configured port itself. uwsgi process that listen themselve do not need a socket or to chmod-socket, so those config lines have been moved to the block that is also writing proxy configuration for apache. Because this change only uses uwsgi for nova-api and nova-api-meta, nova-api-meta is set to default to enabled in stackrc because the nova-api wsgi application used by wsgi only presents the one service (osapi_compute). If NOVA_USE_MOD_WSGI is False and tls_proxy service is enabled, nova-api is run on an internal port reached via the tls_proxy. Depends-On: I8ff08d61520ccf04e32dcd02f4cecc39dae823cb Change-Id: If2d7e363a6541854f2e30c03171bef7a41aff745 --- lib/apache | 4 +- lib/nova | 144 ++++++++++++++--------------------------------------- stackrc | 2 +- 3 files changed, 40 insertions(+), 110 deletions(-) diff --git a/lib/apache b/lib/apache index 25c65fe672..8f517d2e61 100644 --- a/lib/apache +++ b/lib/apache @@ -250,7 +250,6 @@ function write_uwsgi_config { # always cleanup given that we are using iniset here rm -rf $file iniset "$file" uwsgi wsgi-file "$wsgi" - iniset "$file" uwsgi socket "$socket" iniset "$file" uwsgi processes $API_WORKERS # This is running standalone iniset "$file" uwsgi master true @@ -267,7 +266,6 @@ function write_uwsgi_config { iniset "$file" uwsgi add-header "Connection: close" # This ensures that file descriptors aren't shared between processes. iniset "$file" uwsgi lazy-apps true - iniset "$file" uwsgi chmod-socket 666 # If we said bind directly to http, then do that and don't start the apache proxy if [[ -n "$http" ]]; then @@ -276,6 +274,8 @@ function write_uwsgi_config { local apache_conf="" apache_conf=$(apache_site_config_for $name) echo "SetEnv proxy-sendcl 1" | sudo tee $apache_conf + iniset "$file" uwsgi socket "$socket" + iniset "$file" uwsgi chmod-socket 666 echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}/\" retry=0 " | sudo tee -a $apache_conf enable_apache_site $name restart_apache_server diff --git a/lib/nova b/lib/nova index f56509ac1a..3fa5de67eb 100644 --- a/lib/nova +++ b/lib/nova @@ -17,7 +17,6 @@ # # - install_nova # - configure_nova -# - _config_nova_apache_wsgi # - create_nova_conf # - init_nova # - start_nova @@ -28,7 +27,6 @@ _XTRACE_LIB_NOVA=$(set +o | grep xtrace) set +o xtrace - # Defaults # -------- @@ -56,17 +54,20 @@ NOVA_CELLS_CONF=$NOVA_CONF_DIR/nova-cells.conf NOVA_FAKE_CONF=$NOVA_CONF_DIR/nova-fake.conf NOVA_CELLS_DB=${NOVA_CELLS_DB:-nova_cell} NOVA_API_DB=${NOVA_API_DB:-nova_api} +NOVA_UWSGI=$NOVA_BIN_DIR/nova-api-wsgi +NOVA_METADATA_UWSGI=$NOVA_BIN_DIR/nova-metadata-wsgi +NOVA_UWSGI_CONF=$NOVA_CONF_DIR/nova-api-uwsgi.ini +NOVA_METADATA_UWSGI_CONF=$NOVA_CONF_DIR/nova-metadata-uwsgi.ini NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini} -if is_suse; then - NOVA_WSGI_DIR=${NOVA_WSGI_DIR:-/srv/www/htdocs/nova} -else - NOVA_WSGI_DIR=${NOVA_WSGI_DIR:-/var/www/nova} -fi - -# Toggle for deploying Nova-API under HTTPD + mod_wsgi -NOVA_USE_MOD_WSGI=${NOVA_USE_MOD_WSGI:-False} +# Toggle for deploying Nova-API under a wsgi server. We default to +# true to use UWSGI, but allow False so that fall back to the +# eventlet server can happen for grenade runs. +# NOTE(cdent): We can adjust to remove the eventlet-base api service +# after pike, at which time we can stop using NOVA_USE_MOD_WSGI to +# mean "use uwsgi" because we'll be always using uwsgi. +NOVA_USE_MOD_WSGI=${NOVA_USE_MOD_WSGI:-True} if is_service_enabled tls-proxy; then NOVA_SERVICE_PROTOCOL="https" @@ -236,66 +237,10 @@ function cleanup_nova { # cleanup_nova_hypervisor #fi - if [ "$NOVA_USE_MOD_WSGI" == "True" ]; then - _cleanup_nova_apache_wsgi - fi -} - -# _cleanup_nova_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file -function _cleanup_nova_apache_wsgi { - sudo rm -f $NOVA_WSGI_DIR/* - sudo rm -f $(apache_site_config_for nova-api) - sudo rm -f $(apache_site_config_for nova-metadata) -} - -# _config_nova_apache_wsgi() - Set WSGI config files of Nova API -function _config_nova_apache_wsgi { - sudo mkdir -p $NOVA_WSGI_DIR - - local nova_apache_conf - nova_apache_conf=$(apache_site_config_for nova-api) - local nova_metadata_apache_conf - nova_metadata_apache_conf=$(apache_site_config_for nova-metadata) - local nova_ssl="" - local nova_certfile="" - local nova_keyfile="" - local nova_api_port=$NOVA_SERVICE_PORT - local nova_metadata_port=$METADATA_SERVICE_PORT - local venv_path="" - - if [[ ${USE_VENV} = True ]]; then - venv_path="python-path=${PROJECT_VENV["nova"]}/lib/$(python_version)/site-packages" - fi - - # copy proxy vhost and wsgi helper files - sudo cp $NOVA_DIR/nova/wsgi/nova-api.py $NOVA_WSGI_DIR/nova-api - sudo cp $NOVA_DIR/nova/wsgi/nova-metadata.py $NOVA_WSGI_DIR/nova-metadata - - sudo cp $FILES/apache-nova-api.template $nova_apache_conf - sudo sed -e " - s|%PUBLICPORT%|$nova_api_port|g; - s|%APACHE_NAME%|$APACHE_NAME|g; - s|%PUBLICWSGI%|$NOVA_WSGI_DIR/nova-api|g; - s|%SSLENGINE%|$nova_ssl|g; - s|%SSLCERTFILE%|$nova_certfile|g; - s|%SSLKEYFILE%|$nova_keyfile|g; - s|%USER%|$STACK_USER|g; - s|%VIRTUALENV%|$venv_path|g - s|%APIWORKERS%|$API_WORKERS|g - " -i $nova_apache_conf - - sudo cp $FILES/apache-nova-metadata.template $nova_metadata_apache_conf - sudo sed -e " - s|%PUBLICPORT%|$nova_metadata_port|g; - s|%APACHE_NAME%|$APACHE_NAME|g; - s|%PUBLICWSGI%|$NOVA_WSGI_DIR/nova-metadata|g; - s|%SSLENGINE%|$nova_ssl|g; - s|%SSLCERTFILE%|$nova_certfile|g; - s|%SSLKEYFILE%|$nova_keyfile|g; - s|%USER%|$STACK_USER|g; - s|%VIRTUALENV%|$venv_path|g - s|%APIWORKERS%|$API_WORKERS|g - " -i $nova_metadata_apache_conf + stop_process "n-api" + stop_process "n-api-meta" + remove_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" + remove_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" } # configure_nova() - Set config files, create data dirs, etc @@ -489,7 +434,7 @@ function create_nova_conf { NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/,metadata//") fi iniset $NOVA_CONF DEFAULT enabled_apis "$NOVA_ENABLED_APIS" - if is_service_enabled tls-proxy; then + if is_service_enabled tls-proxy && [ "$NOVA_USE_MOD_WSGI" == "False" ]; then # Set the service port for a proxy to take the original iniset $NOVA_CONF DEFAULT osapi_compute_listen_port "$NOVA_SERVICE_PORT_INT" iniset $NOVA_CONF DEFAULT osapi_compute_link_prefix $NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT @@ -524,11 +469,10 @@ function create_nova_conf { iniset $NOVA_CONF DEFAULT force_config_drive "$FORCE_CONFIG_DRIVE" fi # Format logging - setup_logging $NOVA_CONF $NOVA_USE_MOD_WSGI + setup_logging $NOVA_CONF - if [ "$NOVA_USE_MOD_WSGI" == "True" ]; then - _config_nova_apache_wsgi - fi + write_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" "/compute" + write_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" "" ":${METADATA_SERVICE_PORT}" if is_service_enabled ceilometer; then iniset $NOVA_CONF DEFAULT instance_usage_audit "True" @@ -777,10 +721,6 @@ function install_nova { git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH setup_develop $NOVA_DIR sudo install -D -m 0644 -o $STACK_USER {$NOVA_DIR/tools/,/etc/bash_completion.d/}nova-manage.bash_completion - - if [ "$NOVA_USE_MOD_WSGI" == "True" ]; then - install_apache_wsgi - fi } # start_nova_api() - Start the API process ahead of other things @@ -788,6 +728,7 @@ function start_nova_api { # Get right service port for testing local service_port=$NOVA_SERVICE_PORT local service_protocol=$NOVA_SERVICE_PROTOCOL + local nova_url if is_service_enabled tls-proxy; then service_port=$NOVA_SERVICE_PORT_INT service_protocol="http" @@ -797,29 +738,23 @@ function start_nova_api { local old_path=$PATH export PATH=$NOVA_BIN_DIR:$PATH - # If the site is not enabled then we are in a grenade scenario - local enabled_site_file - enabled_site_file=$(apache_site_config_for nova-api) - if [ -f ${enabled_site_file} ] && [ "$NOVA_USE_MOD_WSGI" == "True" ]; then - enable_apache_site nova-api - enable_apache_site nova-metadata - restart_apache_server - tail_log nova-api /var/log/$APACHE_NAME/nova-api.log - tail_log nova-metadata /var/log/$APACHE_NAME/nova-metadata.log - else + if [ "$NOVA_USE_MOD_WSGI" == "False" ]; then run_process n-api "$NOVA_BIN_DIR/nova-api" + nova_url=$service_protocol://$SERVICE_HOST:$service_port + # Start proxy if tsl enabled + if is_service_enabled tls-proxy; then + start_tls_proxy nova '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT + fi + else + run_process "n-api" "$NOVA_BIN_DIR/uwsgi --ini $NOVA_UWSGI_CONF" + nova_url=$service_protocol://$SERVICE_HOST/compute/v2.1/ fi echo "Waiting for nova-api to start..." - if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$SERVICE_HOST:$service_port; then + if ! wait_for_service $SERVICE_TIMEOUT $nova_url; then die $LINENO "nova-api did not start" fi - # Start proxies if enabled - if is_service_enabled tls-proxy; then - start_tls_proxy nova '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT - fi - export PATH=$old_path } @@ -890,7 +825,11 @@ function start_nova_rest { run_process n-net "$NOVA_BIN_DIR/nova-network --config-file $compute_cell_conf" run_process n-sch "$NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf" - run_process n-api-meta "$NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf" + if [ "$NOVA_USE_MOD_WSGI" == "False" ]; then + run_process n-api-meta "$NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf" + else + run_process n-api-meta "$NOVA_BIN_DIR/uwsgi --ini $NOVA_METADATA_UWSGI_CONF" + fi run_process n-novnc "$NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_WEB_DIR" run_process n-xvnc "$NOVA_BIN_DIR/nova-xvpvncproxy --config-file $api_cell_conf" @@ -921,17 +860,8 @@ function stop_nova_compute { } function stop_nova_rest { - if [ "$NOVA_USE_MOD_WSGI" == "True" ]; then - disable_apache_site nova-api - disable_apache_site nova-metadata - restart_apache_server - else - stop_process n-api - fi - # Kill the nova screen windows - # Some services are listed here twice since more than one instance - # of a service may be running in certain configs. - for serv in n-api n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-sproxy; do + # Kill the non-compute nova processes + for serv in n-api n-api-meta n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-sproxy; do stop_process $serv done } diff --git a/stackrc b/stackrc index 50f7c89ba3..eccc52aac5 100644 --- a/stackrc +++ b/stackrc @@ -53,7 +53,7 @@ if ! isset ENABLED_SERVICES ; then # Keystone - nothing works without keystone ENABLED_SERVICES=key # Nova - services to support libvirt based openstack clouds - ENABLED_SERVICES+=,n-api,n-cpu,n-cond,n-sch,n-novnc,n-cauth + ENABLED_SERVICES+=,n-api,n-cpu,n-cond,n-sch,n-novnc,n-cauth,n-api-meta # Placement service needed for Nova ENABLED_SERVICES+=,placement-api,placement-client # Glance services needed for Nova From 2c0faca0380050b697b7d7fe6589102fa8a0cb5d Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 28 Jun 2017 09:13:04 -0400 Subject: [PATCH 0576/1936] Add devstack-version script This will help in collecting information about the devstack version being used. Change-Id: I23a8ca023bda6d097051cd57ae45401806451ad5 --- functions | 15 +++++++++++++++ stack.sh | 14 +++++++++++++- stackrc | 1 + 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/functions b/functions index e056c3fba2..738040d7bc 100644 --- a/functions +++ b/functions @@ -751,6 +751,21 @@ function get_random_port { } +function write_devstack_version { + pushd $TOP_DIR + local git_version="" + git_version=$(git log --format="%H %s %ci" -1) + cat - > /tmp/devstack-version <&3 exec 2>&3 diff --git a/stackrc b/stackrc index 50f7c89ba3..5da9a2abec 100644 --- a/stackrc +++ b/stackrc @@ -272,6 +272,7 @@ REQUIREMENTS_DIR=$DEST/requirements # Setting the variable to 'ALL' will activate the download for all # libraries. +DEVSTACK_SERIES="pike" ############## # From 82d0610f14e09931e00443364c7da25f42aa89de Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Wed, 28 Jun 2017 17:42:31 -0400 Subject: [PATCH 0577/1936] Ensure keepalive isn't used on uwsgi http mode According to the uwsgi docs [1] for http keepalive there is a separate option for http keep alive, and just setting connection close isn't enough. This commit makes sure we disable http keepalive. This will hopefully fix the random connection issues we get on image uploads to glance, which uses uwsgi http mode. [1] http://uwsgi-docs.readthedocs.io/en/latest/HTTP.html#http-keep-alive Change-Id: Ic5f83c5c93f28b2bd62ca9ac96ca8c87797ea5c9 Closes-Bug: #1701088 --- lib/apache | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/apache b/lib/apache index 25c65fe672..35a9e7b733 100644 --- a/lib/apache +++ b/lib/apache @@ -322,6 +322,7 @@ function write_local_uwsgi_http_config { iniset "$file" uwsgi http-raw-body true iniset "$file" uwsgi http-chunked-input true iniset "$file" uwsgi http-auto-chunked true + iniset "$file" uwsgi http-keepalive false enable_apache_mod proxy enable_apache_mod proxy_http From daf1274409ce8b9bd6b21955f4172bd70a7e5c68 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Fri, 3 Mar 2017 18:09:35 +0000 Subject: [PATCH 0578/1936] Run cinder-api with uWSGI Per the Pike goal, switching the Cinder API control plane to use WSGI in Apache. Co-Authored-By: Ivan Kolodyazhny Depends-On: Ie8a0eeab1bf31887d6f37cf155b2d161ddfb172d Depends-On: I14b68f36e7fcc5341bbdbcf165274d9d50f7dd04 Change-Id: I8cef6c98f9242cc38d66de0ac499490e2a237887 --- lib/cinder | 155 +++++++++++++++++++++++++---------------------------- 1 file changed, 72 insertions(+), 83 deletions(-) diff --git a/lib/cinder b/lib/cinder index b585416f7a..4274be740a 100644 --- a/lib/cinder +++ b/lib/cinder @@ -55,6 +55,8 @@ CINDER_AUTH_CACHE_DIR=${CINDER_AUTH_CACHE_DIR:-/var/cache/cinder} CINDER_CONF_DIR=/etc/cinder CINDER_CONF=$CINDER_CONF_DIR/cinder.conf +CINDER_UWSGI=$CINDER_BIN_DIR/cinder-wsgi +CINDER_UWSGI_CONF=$CINDER_CONF_DIR/cinder-api-uwsgi.ini CINDER_API_PASTE_INI=$CINDER_CONF_DIR/api-paste.ini # Public facing bits @@ -106,8 +108,9 @@ else CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-tgtadm} fi -# Toggle for deploying Cinder under HTTPD + mod_wsgi -CINDER_USE_MOD_WSGI=${CINDER_USE_MOD_WSGI:-False} +# Toggle for deploying Cinder under a wsgi server. Legacy mod_wsgi +# reference should be cleaned up to more accurately refer to uwsgi. +CINDER_USE_MOD_WSGI=${CINDER_USE_MOD_WSGI:-True} # Source the enabled backends if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then @@ -196,38 +199,8 @@ function cleanup_cinder { done fi - if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then - _cinder_cleanup_apache_wsgi - fi -} - -# _cinder_config_apache_wsgi() - Set WSGI config files -function _cinder_config_apache_wsgi { - local cinder_apache_conf - cinder_apache_conf=$(apache_site_config_for osapi-volume) - local cinder_ssl="" - local cinder_certfile="" - local cinder_keyfile="" - local cinder_api_port=$CINDER_SERVICE_PORT - local venv_path="" - - if [[ ${USE_VENV} = True ]]; then - venv_path="python-path=${PROJECT_VENV["cinder"]}/lib/python2.7/site-packages" - fi - - # copy proxy vhost file - sudo cp $FILES/apache-cinder-api.template $cinder_apache_conf - sudo sed -e " - s|%PUBLICPORT%|$cinder_api_port|g; - s|%APACHE_NAME%|$APACHE_NAME|g; - s|%APIWORKERS%|$API_WORKERS|g - s|%CINDER_BIN_DIR%|$CINDER_BIN_DIR|g; - s|%SSLENGINE%|$cinder_ssl|g; - s|%SSLCERTFILE%|$cinder_certfile|g; - s|%SSLKEYFILE%|$cinder_keyfile|g; - s|%USER%|$STACK_USER|g; - s|%VIRTUALENV%|$venv_path|g - " -i $cinder_apache_conf + stop_process "c-api" + remove_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" } # configure_cinder() - Set config files, create data dirs, etc @@ -319,9 +292,18 @@ function configure_cinder { fi if is_service_enabled tls-proxy; then - # Set the service port for a proxy to take the original - iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT - iniset $CINDER_CONF DEFAULT public_endpoint $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT + if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then + # Set the service port for a proxy to take the original + if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then + iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT + iniset $CINDER_CONF DEFAULT public_endpoint $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST + iniset $CINDER_CONF DEFAULT osapi_volume_base_URL $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST + else + iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT + iniset $CINDER_CONF DEFAULT public_endpoint $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT + iniset $CINDER_CONF DEFAULT osapi_volume_base_URL $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT + fi + fi fi if [ "$SYSLOG" != "False" ]; then @@ -333,9 +315,7 @@ function configure_cinder { # Format logging setup_logging $CINDER_CONF $CINDER_USE_MOD_WSGI - if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then - _cinder_config_apache_wsgi - fi + write_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" "/volume" if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then configure_cinder_driver @@ -374,29 +354,47 @@ function configure_cinder { # Migrated from keystone_data.sh function create_cinder_accounts { - # Cinder if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then create_service_user "cinder" get_or_create_service "cinder" "volume" "Cinder Volume Service" - get_or_create_endpoint \ - "volume" \ - "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(project_id)s" - - get_or_create_service "cinderv2" "volumev2" "Cinder Volume Service V2" - get_or_create_endpoint \ - "volumev2" \ - "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(project_id)s" - - get_or_create_service "cinderv3" "volumev3" "Cinder Volume Service V3" - get_or_create_endpoint \ - "volumev3" \ - "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s" + if [ "$CINDER_USE_MOD_WSGI" == "False" ]; then + get_or_create_endpoint \ + "volume" \ + "$REGION_NAME" \ + "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(project_id)s" + + get_or_create_service "cinderv2" "volumev2" "Cinder Volume Service V2" + get_or_create_endpoint \ + "volumev2" \ + "$REGION_NAME" \ + "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(project_id)s" + + get_or_create_service "cinderv3" "volumev3" "Cinder Volume Service V3" + get_or_create_endpoint \ + "volumev3" \ + "$REGION_NAME" \ + "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s" + else + get_or_create_endpoint \ + "volume" \ + "$REGION_NAME" \ + "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v1/\$(project_id)s" + + get_or_create_service "cinderv2" "volumev2" "Cinder Volume Service V2" + get_or_create_endpoint \ + "volumev2" \ + "$REGION_NAME" \ + "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v2/\$(project_id)s" + + get_or_create_service "cinderv3" "volumev3" "Cinder Volume Service V3" + get_or_create_endpoint \ + "volumev3" \ + "$REGION_NAME" \ + "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v3/\$(project_id)s" + fi configure_cinder_internal_tenant fi @@ -449,10 +447,6 @@ function install_cinder { elif [[ "$CINDER_ISCI_HELPER" == "lioadm" ]]; then install_package targetcli fi - - if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then - install_apache_wsgi - fi } # install_cinderclient() - Collect source and prepare @@ -483,7 +477,8 @@ function _configure_tgt_for_config_d { function start_cinder { local service_port=$CINDER_SERVICE_PORT local service_protocol=$CINDER_SERVICE_PROTOCOL - if is_service_enabled tls-proxy; then + local cinder_url + if is_service_enabled tls-proxy && ["$CINDER_USE_MOD_WSGI" == "False"]; then service_port=$CINDER_SERVICE_PORT_INT service_protocol="http" fi @@ -507,24 +502,23 @@ function start_cinder { fi fi - if is_service_enabled c-api ; then - if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then - enable_apache_site osapi-volume - restart_apache_server - tail_log c-api /var/log/$APACHE_NAME/c-api.log - else + if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then + if [ "$CINDER_USE_MOD_WSGI" == "False" ]; then run_process c-api "$CINDER_BIN_DIR/cinder-api --config-file $CINDER_CONF" + cinder_url=$service_protocol://$SERVICE_HOST:$service_port + # Start proxy if tsl enabled + if is_service_enabled tls_proxy; then + start_tls_proxy cinder '*' $CINDER_SERVICE_PORT $CINDER_SERVICE_HOST $CINDER_SERVICE_POR_INT + fi + else + run_process "c-api" "$CINDER_BIN_DIR/uwsgi --ini $CINDER_UWSGI_CONF" + cinder_url=$service_protocol://$SERVICE_HOST/volume/v3 fi + fi - echo "Waiting for Cinder API to start..." - if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$CINDER_SERVICE_HOST:$service_port; then - die $LINENO "c-api did not start" - fi - - # Start proxies if enabled - if is_service_enabled tls-proxy; then - start_tls_proxy cinder '*' $CINDER_SERVICE_PORT $CINDER_SERVICE_HOST $CINDER_SERVICE_PORT_INT - fi + echo "Waiting for Cinder API to start..." + if ! wait_for_service $SERVICE_TIMEOUT $cinder_url; then + die $LINENO "c-api did not start" fi run_process c-sch "$CINDER_BIN_DIR/cinder-scheduler --config-file $CINDER_CONF" @@ -538,12 +532,7 @@ function start_cinder { # stop_cinder() - Stop running processes function stop_cinder { - if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then - disable_apache_site osapi-volume - restart_apache_server - else - stop_process c-api - fi + stop_process c-api # Kill the cinder screen windows local serv From 3415521d5659b2268b8984bf89ede8e34f1b76f7 Mon Sep 17 00:00:00 2001 From: Manjeet Singh Bhatia Date: Fri, 30 Jun 2017 01:46:12 +0000 Subject: [PATCH 0579/1936] Don't dump commit message header to devstack-version Dumping commit message to /usr/local/bin/devstack-version has broken devstack build, because of bash error in formatting. error log [1]. [1]. http://logs.openstack.org/20/418720/6/check/gate-tempest-dsvm-networking-odl-boron-snapshot-v2driver/ed9fd45/logs/devstacklog.txt.gz Change-Id: I4c269a7f3d63ee9a976e7c3636fc3e5e8dab9ae3 --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index 738040d7bc..f04bc1fcbb 100644 --- a/functions +++ b/functions @@ -754,7 +754,7 @@ function get_random_port { function write_devstack_version { pushd $TOP_DIR local git_version="" - git_version=$(git log --format="%H %s %ci" -1) + git_version=$(git log --format="%H %ci" -1) cat - > /tmp/devstack-version < Date: Fri, 30 Jun 2017 12:15:26 -0400 Subject: [PATCH 0580/1936] Switch local http uwsgi conf to http-socket mode Previously the local uwsgi server mode was using uwsgi in http mode. This was unessecary and actually not recommend by the uwsgi docs [1][2] This is because http mode starts a frontend http process that forwards requests to the workers running the python code. This is done for the largely the same reasons we're using apache as a proxy and is unnecessary. http-socket mode doesn't do this and just exposes the workers as an http interface to the proxy. (in our case apache) [1] http://uwsgi-docs.readthedocs.io/en/latest/HTTP.html#http-sockets [2] http://uwsgi-docs.readthedocs.io/en/latest/ThingsToKnow.html Change-Id: I5671687c8083fa4bdee066c07b083a0f00be532b --- lib/apache | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/apache b/lib/apache index cf438a0794..445241e707 100644 --- a/lib/apache +++ b/lib/apache @@ -301,7 +301,7 @@ function write_local_uwsgi_http_config { rm -rf $file iniset "$file" uwsgi wsgi-file "$wsgi" port=$(get_random_port) - iniset "$file" uwsgi http "127.0.0.1:$port" + iniset "$file" uwsgi http-socket "127.0.0.1:$port" iniset "$file" uwsgi processes $API_WORKERS # This is running standalone iniset "$file" uwsgi master true From b79531a9f96736225a8991052a0be5767c217377 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Fri, 30 Jun 2017 12:10:06 -0400 Subject: [PATCH 0581/1936] Increase the socket timeout on uwsgi local mode This commit increases the socket timeout value from 4 secs to a much higher 30 secs. This is just for sanity, the load is high when we're seeing the wsgi.input timeouts, so uwsgi might be just closing the socket waiting for data over the wire. 30 seconds is overly conservative just so we can rule this out. This will likely be shrunk to a more reasonable value in the future. Change-Id: Iae85d3a084fb33b2a63550d6e353413e98c0b39c Partial-Bug: #1701088 --- lib/apache | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/apache b/lib/apache index 445241e707..e29acf29b3 100644 --- a/lib/apache +++ b/lib/apache @@ -323,6 +323,8 @@ function write_local_uwsgi_http_config { iniset "$file" uwsgi http-chunked-input true iniset "$file" uwsgi http-auto-chunked true iniset "$file" uwsgi http-keepalive false + # Increase socket timeout for slow chunked uploads + iniset "$file" uwsgi socket-timeout 30 enable_apache_mod proxy enable_apache_mod proxy_http From 07cbc4494255efa2c394b4b60e5beaa3d10f244e Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 30 Jun 2017 12:29:19 +1000 Subject: [PATCH 0582/1936] Put devstack-version info into separate file As a follow on to I4c269a7f3d63ee9a976e7c3636fc3e5e8dab9ae3; the quoting gets tricky when putting arbitrary command-substitution strings into saved echo-able strings. As they say, "the only winning move is not to play" :) An alternative proposal is to not write this into a script but just dump info into a file. To my mind, this has several advantages -- avoid getting involved in quoting, not dropping a script into the global environment -- it's just as easy to "cat" -- and the plain-text file can be collected as an artifact during log collection (also moved git commit line to separate line for easier parsing during log search, etc). Change-Id: Ic7391dd087657c0daf74046e4a052c53f4eb6e1a --- functions | 17 +++++++---------- stack.sh | 6 +++--- 2 files changed, 10 insertions(+), 13 deletions(-) diff --git a/functions b/functions index f04bc1fcbb..6f2164a777 100644 --- a/functions +++ b/functions @@ -750,19 +750,16 @@ function get_random_port { echo $port } - +# Save some state information +# +# Write out various useful state information to /etc/devstack-version function write_devstack_version { - pushd $TOP_DIR - local git_version="" - git_version=$(git log --format="%H %ci" -1) cat - > /tmp/devstack-version < Date: Wed, 5 Jul 2017 20:25:02 -0400 Subject: [PATCH 0583/1936] Enable tempest testing for extending an attached volume In Pike, Cinder and Nova will support extending the size of an attached volume, but it's not supported by all volume and compute backends. Tempest will also test it but uses a config option that we need to set based on how devstack is configured. Depends-On: Ibace6c2f91be9753a44e5f79fd013df11654851b Related to cinder blueprint extend-attached-volume Related to nova blueprint nova-support-attached-volume-extend Change-Id: I52cc2952a2938ce44c442aa3e3b69a905b2b55d5 --- lib/tempest | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/tempest b/lib/tempest index cc65ec7aa9..537da614d1 100644 --- a/lib/tempest +++ b/lib/tempest @@ -430,6 +430,12 @@ function configure_tempest { TEMPEST_VOLUME_MANAGE_VOLUME=${TEMPEST_VOLUME_MANAGE_VOLUME:-True} fi iniset $TEMPEST_CONFIG volume-feature-enabled manage_volume $(trueorfalse False TEMPEST_VOLUME_MANAGE_VOLUME) + # Only turn on TEMPEST_EXTEND_ATTACHED_VOLUME by default for "lvm" backends + # in Cinder and the libvirt driver in Nova. + if [[ "$CINDER_ENABLED_BACKENDS" == *"lvm"* ]] && [ "$VIRT_DRIVER" = "libvirt" ]; then + TEMPEST_EXTEND_ATTACHED_VOLUME=${TEMPEST_EXTEND_ATTACHED_VOLUME:-True} + fi + iniset $TEMPEST_CONFIG volume-feature-enabled extend_attached_volume $(trueorfalse False TEMPEST_EXTEND_ATTACHED_VOLUME) # TODO(ameade): Remove the api_v3 flag when Mitaka and Liberty are end of life. iniset $TEMPEST_CONFIG volume-feature-enabled api_v3 True iniset $TEMPEST_CONFIG volume-feature-enabled api_v1 $(trueorfalse False TEMPEST_VOLUME_API_V1) From f3d533157267a1568eeb36a349aee637195e9c8a Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 8 Jun 2017 08:22:38 -0400 Subject: [PATCH 0584/1936] Fleetify nova conductor for N cells This makes us start two levels of nova-conductor processes, and one per cell. Note that this also sets the notification transport_url to the top-level mq so that we continue to get a unified stream of notifications. Related-Bug: #1700496 Change-Id: I08d7da843d18b426dda8a8a231039d950a4c0ce5 Depends-On: I64b600b30f6e54db0ec9083c6c176e895c6d0cc2 Depends-On: If59453f1899e99040c554bcb9ad54c8a506adc56 --- lib/nova | 111 +++++++++++++++++++++++++++++++++++++++++++++++++------ stack.sh | 4 +- 2 files changed, 102 insertions(+), 13 deletions(-) diff --git a/lib/nova b/lib/nova index 3fa5de67eb..11c746911b 100644 --- a/lib/nova +++ b/lib/nova @@ -51,6 +51,7 @@ NOVA_AUTH_CACHE_DIR=${NOVA_AUTH_CACHE_DIR:-/var/cache/nova} NOVA_CONF_DIR=/etc/nova NOVA_CONF=$NOVA_CONF_DIR/nova.conf NOVA_CELLS_CONF=$NOVA_CONF_DIR/nova-cells.conf +NOVA_CPU_CONF=$NOVA_CONF_DIR/nova-cpu.conf NOVA_FAKE_CONF=$NOVA_CONF_DIR/nova-fake.conf NOVA_CELLS_DB=${NOVA_CELLS_DB:-nova_cell} NOVA_API_DB=${NOVA_API_DB:-nova_api} @@ -59,6 +60,13 @@ NOVA_METADATA_UWSGI=$NOVA_BIN_DIR/nova-metadata-wsgi NOVA_UWSGI_CONF=$NOVA_CONF_DIR/nova-api-uwsgi.ini NOVA_METADATA_UWSGI_CONF=$NOVA_CONF_DIR/nova-metadata-uwsgi.ini +# The total number of cells we expect. Must be greater than one and doesn't +# count cell0. +NOVA_NUM_CELLS=${NOVA_NUM_CELLS:-1} +# Our cell index, so we know what rabbit vhost to connect to. +# This should be in the range of 1-$NOVA_NUM_CELLS +NOVA_CPU_CELL=${NOVA_CPU_CELL:-1} + NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini} # Toggle for deploying Nova-API under a wsgi server. We default to @@ -424,7 +432,7 @@ function create_nova_conf { # require them running on the host. The ensures that n-cpu doesn't # leak a need to use the db in a multinode scenario. if is_service_enabled n-api n-cond n-sched; then - iniset $NOVA_CONF database connection `database_connection_url nova` + iniset $NOVA_CONF database connection `database_connection_url nova_cell0` iniset $NOVA_CONF api_database connection `database_connection_url nova_api` fi @@ -518,6 +526,7 @@ function create_nova_conf { # Set the oslo messaging driver to the typical default. This does not # enable notifications, but it will allow them to function when enabled. iniset $NOVA_CONF oslo_messaging_notifications driver "messagingv2" + iniset $NOVA_CONF oslo_messaging_notifications transport_url $(get_transport_url) iniset_rpc_backend nova $NOVA_CONF iniset $NOVA_CONF glance api_servers "$GLANCE_URL" @@ -558,6 +567,20 @@ function create_nova_conf { if [ "$NOVA_USE_SERVICE_TOKEN" == "True" ]; then init_nova_service_user_conf fi + + if is_service_enabled n-cond; then + for i in $(seq 1 $NOVA_NUM_CELLS); do + local conf + local vhost + conf=$(conductor_conf $i) + vhost="nova_cell${i}" + iniset $conf database connection `database_connection_url nova_cell${i}` + iniset $conf conductor workers "$API_WORKERS" + iniset $conf DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" + rpc_backend_add_vhost $vhost + iniset_rpc_backend nova $conf DEFAULT $vhost + done + fi } function init_nova_service_user_conf { @@ -572,6 +595,11 @@ function init_nova_service_user_conf { iniset $NOVA_CONF service_user auth_strategy keystone } +function conductor_conf { + local cell="$1" + echo "${NOVA_CONF_DIR}/nova_cell${cell}.conf" +} + function init_nova_cells { if is_service_enabled n-cell; then cp $NOVA_CONF $NOVA_CELLS_CONF @@ -638,8 +666,6 @@ function init_nova { recreate_database $NOVA_API_DB $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF api_db sync - # (Re)create nova databases - recreate_database nova recreate_database nova_cell0 # map_cell0 will create the cell mapping record in the nova_api DB so @@ -651,6 +677,12 @@ function init_nova { # Migrate nova and nova_cell0 databases. $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db sync + # (Re)create nova databases + for i in $(seq 1 $NOVA_NUM_CELLS); do + recreate_database nova_cell${i} + $NOVA_BIN_DIR/nova-manage --config-file $(conductor_conf $i) db sync + done + if is_service_enabled n-cell; then recreate_database $NOVA_CELLS_DB fi @@ -660,8 +692,9 @@ function init_nova { $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db online_data_migrations # create the cell1 cell for the main nova db where the hosts live - nova-manage cell_v2 create_cell --transport-url $(get_transport_url) \ - --name 'cell1' + for i in $(seq 1 $NOVA_NUM_CELLS); do + nova-manage --config-file $NOVA_CONF --config-file $(conductor_conf $i) cell_v2 create_cell --name "cell$i" + done fi create_nova_cache_dir @@ -760,25 +793,40 @@ function start_nova_api { # start_nova_compute() - Start the compute process function start_nova_compute { + local nomulticellflag="$1" # Hack to set the path for rootwrap local old_path=$PATH export PATH=$NOVA_BIN_DIR:$PATH if is_service_enabled n-cell; then local compute_cell_conf=$NOVA_CELLS_CONF + # NOTE(danms): Don't setup conductor fleet for cellsv1 + nomulticellflag='nomulticell' else local compute_cell_conf=$NOVA_CONF fi + if [ "$nomulticellflag" = 'nomulticell' ]; then + # NOTE(danms): Grenade doesn't setup multi-cell rabbit, so + # skip these bits and use the normal config. + NOVA_CPU_CONF=$compute_cell_conf + echo "Skipping multi-cell conductor fleet setup" + else + cp $compute_cell_conf $NOVA_CPU_CONF + # FIXME(danms): Should this be configurable? + iniset $NOVA_CPU_CONF workarounds disable_group_policy_check_upcall True + iniset_rpc_backend nova $NOVA_CPU_CONF DEFAULT "nova_cell${NOVA_CPU_CELL}" + fi + if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then # The group **$LIBVIRT_GROUP** is added to the current user in this script. # ``sg`` is used in run_process to execute nova-compute as a member of the # **$LIBVIRT_GROUP** group. - run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" $LIBVIRT_GROUP + run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF" $LIBVIRT_GROUP elif [[ "$VIRT_DRIVER" = 'lxd' ]]; then - run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" $LXD_GROUP + run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF" $LXD_GROUP elif [[ "$VIRT_DRIVER" = 'docker' || "$VIRT_DRIVER" = 'zun' ]]; then - run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" $DOCKER_GROUP + run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF" $DOCKER_GROUP elif [[ "$VIRT_DRIVER" = 'fake' ]]; then local i for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`; do @@ -787,13 +835,13 @@ function start_nova_compute { # gets its own configuration and own log file. local fake_conf="${NOVA_FAKE_CONF}-${i}" iniset $fake_conf DEFAULT nhost "${HOSTNAME}${i}" - run_process "n-cpu-${i}" "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf --config-file $fake_conf" + run_process "n-cpu-${i}" "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF --config-file $fake_conf" done else if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then start_nova_hypervisor fi - run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" + run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF" fi export PATH=$old_path @@ -813,7 +861,6 @@ function start_nova_rest { fi # ``run_process`` checks ``is_service_enabled``, it is not needed here - run_process n-cond "$NOVA_BIN_DIR/nova-conductor --config-file $compute_cell_conf" run_process n-cell-region "$NOVA_BIN_DIR/nova-cells --config-file $api_cell_conf" run_process n-cell-child "$NOVA_BIN_DIR/nova-cells --config-file $compute_cell_conf" @@ -840,8 +887,38 @@ function start_nova_rest { export PATH=$old_path } +function enable_nova_fleet { + if is_service_enabled n-cond; then + enable_service n-super-cond + for i in $(seq 1 $NOVA_NUM_CELLS); do + enable_service n-cond-cell${i} + done + fi +} + +function start_nova_conductor { + if is_service_enabled n-cell; then + echo "Starting nova-conductor in a cellsv1-compatible way" + run_process n-cond "$NOVA_BIN_DIR/nova-conductor --config-file $NOVA_CELLS_CONF" + return + fi + + enable_nova_fleet + if is_service_enabled n-super-cond; then + run_process n-super-cond "$NOVA_BIN_DIR/nova-conductor --config-file $NOVA_CONF" + fi + for i in $(seq 1 $NOVA_NUM_CELLS); do + if is_service_enabled n-cond-cell${i}; then + local conf + conf=$(conductor_conf $i) + run_process n-cond-cell${i} "$NOVA_BIN_DIR/nova-conductor --config-file $conf" + fi + done +} + function start_nova { start_nova_rest + start_nova_conductor start_nova_compute } @@ -861,14 +938,24 @@ function stop_nova_compute { function stop_nova_rest { # Kill the non-compute nova processes - for serv in n-api n-api-meta n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-sproxy; do + for serv in n-api n-api-meta n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cell n-cell n-sproxy; do stop_process $serv done } +function stop_nova_conductor { + enable_nova_fleet + for srv in n-super-cond $(seq -f n-cond-cell%0.f 1 $NOVA_NUM_CELLS); do + if is_service_enabled $srv; then + stop_process $srv + fi + done +} + # stop_nova() - Stop running processes (non-screen) function stop_nova { stop_nova_rest + stop_nova_conductor stop_nova_compute } diff --git a/stack.sh b/stack.sh index fd1865129f..8a19438eba 100755 --- a/stack.sh +++ b/stack.sh @@ -1304,7 +1304,9 @@ fi # Unable to use LUKS passphrase that is exactly 16 bytes long # https://bugzilla.redhat.com/show_bug.cgi?id=1447297 if is_service_enabled nova; then - iniset $NOVA_CONF key_manager fixed_key $(generate_hex_string 36) + key=$(generate_hex_string 36) + iniset $NOVA_CONF key_manager fixed_key "$key" + iniset $NOVA_CPU_CONF key_manager fixed_key "$key" fi # Launch the nova-api and wait for it to answer before continuing From cc36397134716116d802ba82b9ea9a0bc4830a42 Mon Sep 17 00:00:00 2001 From: Leticia Wanderley Date: Mon, 26 Jun 2017 23:52:52 -0300 Subject: [PATCH 0585/1936] Fixes devstack ldap plugin When the ldap service is enable on local.conf devstack ldap plugin starts slapd service using its default config on Ubuntu and installs ldap-utils package. Enables domain specific drivers on Keystone and creates LDAP domain 'Users' with a demo user. Change-Id: I8d7aa260b01f675e4ed201ef93bfd66474f4b228 --- files/ldap/user.ldif.in | 23 +++++++++++++ lib/horizon | 4 +++ lib/keystone | 72 +++++++++++++++++++++++++++++++++++------ lib/ldap | 24 ++++++++++++-- 4 files changed, 112 insertions(+), 11 deletions(-) create mode 100644 files/ldap/user.ldif.in diff --git a/files/ldap/user.ldif.in b/files/ldap/user.ldif.in new file mode 100644 index 0000000000..16a980757d --- /dev/null +++ b/files/ldap/user.ldif.in @@ -0,0 +1,23 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +# Demo LDAP user +dn: cn=demo,ou=Users,${BASE_DN} +cn: demo +displayName: demo +givenName: demo +mail: demo@openstack.org +objectClass: inetOrgPerson +objectClass: top +sn: demo +uid: demo +userPassword: demo diff --git a/lib/horizon b/lib/horizon index 9c7ec005a2..becc5a0e67 100644 --- a/lib/horizon +++ b/lib/horizon @@ -106,6 +106,10 @@ function configure_horizon { _horizon_config_set $local_settings "" OPENSTACK_SSL_CACERT \"${SSL_BUNDLE_FILE}\" fi + if is_service_enabled ldap; then + _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT "True" + fi + # Create an empty directory that apache uses as docroot sudo mkdir -p $HORIZON_DIR/.blackhole diff --git a/lib/keystone b/lib/keystone index eb46526264..1061081a5e 100644 --- a/lib/keystone +++ b/lib/keystone @@ -219,17 +219,10 @@ function configure_keystone { fi # Rewrite stock ``keystone.conf`` - if is_service_enabled ldap; then - #Set all needed ldap values - iniset $KEYSTONE_CONF ldap password $LDAP_PASSWORD - iniset $KEYSTONE_CONF ldap user $LDAP_MANAGER_DN - iniset $KEYSTONE_CONF ldap suffix $LDAP_BASE_DN - iniset $KEYSTONE_CONF ldap user_tree_dn "ou=Users,$LDAP_BASE_DN" - iniset $KEYSTONE_CONF DEFAULT member_role_id "9fe2ff9ee4384b1894a90878d3e92bab" - iniset $KEYSTONE_CONF DEFAULT member_role_name "_member_" + iniset $KEYSTONE_CONF identity domain_config_dir "$KEYSTONE_CONF_DIR/domains" + iniset $KEYSTONE_CONF identity domain_specific_drivers_enabled "True" fi - iniset $KEYSTONE_CONF identity driver "$KEYSTONE_IDENTITY_BACKEND" iniset $KEYSTONE_CONF identity password_hash_rounds $KEYSTONE_PASSWORD_HASH_ROUNDS iniset $KEYSTONE_CONF assignment driver "$KEYSTONE_ASSIGNMENT_BACKEND" @@ -410,6 +403,10 @@ function create_keystone_accounts { get_or_add_group_project_role $member_role $non_admin_group $alt_demo_project get_or_add_group_project_role $another_role $non_admin_group $alt_demo_project get_or_add_group_project_role $admin_role $admin_group $admin_project + + if is_service_enabled ldap; then + create_ldap_domain + fi } # Create a user that is capable of verifying keystone tokens for use with auth_token middleware. @@ -615,6 +612,63 @@ function bootstrap_keystone { --bootstrap-public-url "$KEYSTONE_SERVICE_URI" } +# create_ldap_domain() - Create domain file and initialize domain with a user +function create_ldap_domain { + # Creates domain Users + openstack --os-identity-api-version=3 domain create --description "LDAP domain" Users + + # Create domain file inside etc/keystone/domains + KEYSTONE_LDAP_DOMAIN_FILE=$KEYSTONE_CONF_DIR/domains/keystone.Users.conf + mkdir -p "$KEYSTONE_CONF_DIR/domains" + touch "$KEYSTONE_LDAP_DOMAIN_FILE" + + # Set identity driver 'ldap' + iniset $KEYSTONE_LDAP_DOMAIN_FILE identity driver "ldap" + + # LDAP settings for Users domain + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap group_allow_delete "False" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap group_allow_update "False" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap group_allow_create "False" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_allow_delete "False" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_allow_update "False" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_allow_create "False" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_tree_dn "ou=Users,$LDAP_BASE_DN" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_objectclass "inetOrgPerson" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_name_attribute "cn" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_mail_attribute "mail" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_id_attribute "uid" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user "cn=Manager,dc=openstack,dc=org" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap url "ldap://localhost" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap suffix $LDAP_BASE_DN + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap password $LDAP_PASSWORD + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap group_tree_dn "ou=Groups,$LDAP_BASE_DN" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap group_objectclass "groupOfNames" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap group_name_attribute "cn" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap group_id_attribute "cn" + + # Restart apache and identity services to associate domain and conf file + sudo service apache2 reload + sudo systemctl restart devstack@keystone + + # Create LDAP user.ldif and add user to LDAP backend + local tmp_ldap_dir + tmp_ldap_dir=$(mktemp -d -t ldap.$$.XXXXXXXXXX) + + _ldap_varsubst $FILES/ldap/user.ldif.in $slappass >$tmp_ldap_dir/user.ldif + sudo ldapadd -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -c -f $tmp_ldap_dir/user.ldif + rm -rf $tmp_ldap_dir + + local admin_project + admin_project=$(get_or_create_project "admin" default) + local ldap_user + ldap_user=$(openstack user show --domain=Users demo -f value -c id) + local admin_role="admin" + get_or_create_role $admin_role + + # Grant demo LDAP user access to project and role + get_or_add_user_project_role $admin_role $ldap_user $admin_project +} + # Restore xtrace $_XTRACE_KEYSTONE diff --git a/lib/ldap b/lib/ldap index 4cea812d3c..5a53d0eaee 100644 --- a/lib/ldap +++ b/lib/ldap @@ -119,8 +119,7 @@ function install_ldap { printf "installing OpenLDAP" if is_ubuntu; then - # Ubuntu automatically starts LDAP so no need to call start_ldap() - : + configure_ldap elif is_fedora; then start_ldap elif is_suse; then @@ -148,6 +147,27 @@ function install_ldap { rm -rf $tmp_ldap_dir } +# configure_ldap() - Configure LDAP - reconfigure slapd +function configure_ldap { + sudo debconf-set-selections < Date: Tue, 11 Jul 2017 11:25:19 -0400 Subject: [PATCH 0586/1936] Remove mysql.qcow2 from stackrc We no longer host this on tarballs.o.o, additionally it is no longer used my trove. Change-Id: I2034e8ebc530704d6e63a231056f92e14a8654e4 Signed-off-by: Paul Belanger --- stackrc | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/stackrc b/stackrc index c57e485343..4f16c3edf7 100644 --- a/stackrc +++ b/stackrc @@ -727,23 +727,6 @@ if [[ "$DOWNLOAD_DEFAULT_IMAGES" == "True" ]]; then DOWNLOAD_DEFAULT_IMAGES=False fi -# Staging area for new images. These images are cached by a run of -# ./tools/image_list.sh during CI image build (see -# project-config:nodepool/elements/cache-devstack/extra-data.d/55-cache-devstack-repos). -# -# To avoid CI failures grabbing the images, new images should be here -# for at least 24hrs (nodepool builds images at 14:00UTC) so the they -# are in the cache. -PRECACHE_IMAGES=$(trueorfalse False PRECACHE_IMAGES) -if [[ "$PRECACHE_IMAGES" == "True" ]]; then - # required for trove devstack tests; see - # git.openstack.org/cgit/openstack/trove/tree/devstack/plugin.sh - IMAGE_URL="http://tarballs.openstack.org/trove/images/ubuntu/mysql.qcow2" - if ! [[ "$IMAGE_URLS" =~ "$IMAGE_URL" ]]; then - IMAGE_URLS+=",$IMAGE_URL" - fi -fi - # Detect duplicate values in IMAGE_URLS for image_url in ${IMAGE_URLS//,/ }; do if [ $(echo "$IMAGE_URLS" | grep -o -F "$image_url" | wc -l) -gt 1 ]; then From 2f7df51cfa422ae79942953ca63d6b3a4aa37dd3 Mon Sep 17 00:00:00 2001 From: Jeremy Liu Date: Wed, 12 Jul 2017 10:09:48 +0800 Subject: [PATCH 0587/1936] Correct 'uswgi' to 'uwsgi' Change-Id: I22026e5640a626f45b7508b60f492dbdb30631af --- lib/apache | 2 +- lib/glance | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/apache b/lib/apache index e29acf29b3..ffd7966de1 100644 --- a/lib/apache +++ b/lib/apache @@ -287,7 +287,7 @@ function write_uwsgi_config { # mod_proxy_uwsgi because the chunked encoding gets dropped. See: # https://github.com/unbit/uwsgi/issues/1540 You can workaround this on python2 # but that involves having apache buffer the request before sending it to -# uswgi. +# uwsgi. function write_local_uwsgi_http_config { local file=$1 local wsgi=$2 diff --git a/lib/glance b/lib/glance index 41145f980d..0a5b9f59b6 100644 --- a/lib/glance +++ b/lib/glance @@ -72,7 +72,7 @@ GLANCE_SERVICE_PROTOCOL=${GLANCE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} GLANCE_REGISTRY_PORT=${GLANCE_REGISTRY_PORT:-9191} GLANCE_REGISTRY_PORT_INT=${GLANCE_REGISTRY_PORT_INT:-19191} GLANCE_UWSGI=$GLANCE_BIN_DIR/glance-wsgi-api -GLANCE_UWSGI_CONF=$GLANCE_CONF_DIR/glance-uswgi.ini +GLANCE_UWSGI_CONF=$GLANCE_CONF_DIR/glance-uwsgi.ini # If wsgi mode is uwsgi run glance under uwsgi, else default to eventlet # TODO(mtreinish): Remove the eventlet path here and in all the similar # conditionals below after the Pike release From 8175df1912a5722c2e308dc27623063ac542f496 Mon Sep 17 00:00:00 2001 From: Bogdan Katynski Date: Wed, 28 Jun 2017 12:34:52 +0100 Subject: [PATCH 0588/1936] Remove VGs when cleaning LVM up in devstack This patch adds removing of the volume group before removing the loopback device and the backing file when performing LVM cleanup in unstack.sh Without this commit: unstack.sh removes logical volumes, removes the loopback devices and deletes the backing file but leaves a dangling volume group $ ./stack.sh && ./unstack.sh $ sudo vgs VG #PV #LV #SN Attr VSize VFree stack-volumes-default 1 0 0 wz--n- 10.01g 10.01g $ sudo losetup -a $ sudo vgremove stack-volumes-default /dev/loop0: lseek 4096 failed: Invalid argument vg_remove_mdas stack-volumes-default failed With this commit: unstack.sh removes volume groups after removing all logical volumes but before removing the loopback device and deleting the backing file Partial-Bug: 1441236 Change-Id: Id9c06fa50f6cad28764f5a3396f559cac9999649 --- lib/lvm | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/lib/lvm b/lib/lvm index 0cebd92f77..f0471816bf 100644 --- a/lib/lvm +++ b/lib/lvm @@ -35,7 +35,7 @@ BACKING_FILE_SUFFIX=-backing-file # _clean_lvm_volume_group removes all default LVM volumes # -# Usage: clean_lvm_volume_group $vg +# Usage: _clean_lvm_volume_group $vg function _clean_lvm_volume_group { local vg=$1 @@ -43,6 +43,16 @@ function _clean_lvm_volume_group { sudo lvremove -f $vg } +# _remove_lvm_volume_group removes the volume group +# +# Usage: _remove_lvm_volume_group $vg +function _remove_lvm_volume_group { + local vg=$1 + + # Remove the volume group + sudo vgremove -f $vg +} + # _clean_lvm_backing_file() removes the backing file of the # volume group # @@ -69,6 +79,7 @@ function clean_lvm_volume_group { local vg=$1 _clean_lvm_volume_group $vg + _remove_lvm_volume_group $vg # if there is no logical volume left, it's safe to attempt a cleanup # of the backing file if [[ -z "$(sudo lvs --noheadings -o lv_name $vg 2>/dev/null)" ]]; then From 331b3de88e02d6cc480b386766f47b7a2d943125 Mon Sep 17 00:00:00 2001 From: Paul Belanger Date: Fri, 14 Jul 2017 13:11:19 -0400 Subject: [PATCH 0589/1936] Reduce tools/image_list.sh down to 4 images Now that mysql.qcow2 has been removed, we only have 4 images to worry about. This fixes cache-devstack element for openstack-infra. Change-Id: Ia06f0e0679e253a1a6614f7c38abf1f5cd13991b Signed-off-by: Paul Belanger --- tools/image_list.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/image_list.sh b/tools/image_list.sh index 27b3d4612d..29b93ed1d8 100755 --- a/tools/image_list.sh +++ b/tools/image_list.sh @@ -36,7 +36,7 @@ echo $ALL_IMAGES | tr ',' '\n' | sort | uniq # Sanity check - ensure we have a minimum number of images num=$(echo $ALL_IMAGES | tr ',' '\n' | sort | uniq | wc -l) -if [[ "$num" -lt 5 ]]; then +if [[ "$num" -lt 4 ]]; then echo "ERROR: We only found $num images in $ALL_IMAGES, which can't be right." exit 1 fi From a6467d36dbcb50c7501d5b237e32ed69ed1b4a44 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Tue, 27 Jun 2017 08:31:26 +0200 Subject: [PATCH 0590/1936] Prepare guestfs-support for openSUSE With libguestfs usage for file injection now being enabled by default as part of I568c56dbcb62ec541661364c142eff2397e3eed7 the opensuse job started to fail due to lack of guestfs images being available. The error in question was NovaException: libguestfs installed but not usable (cannot find any suitable libguestfs supermin, fixed or old-style appliance on LIBGUESTFS_PATH (search path: /usr/lib64/guestfs) This part is being fixed by explicitly adding the missing package dependencies to the compute node rpm package list while the maintenance update for Leap 42.2 is in preparation. Change-Id: Ie76ac0a51c1ee2ad6559917825dee1c7a91a3a76 --- lib/nova_plugins/hypervisor-libvirt | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index f3c8add696..0c08a0fe42 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -115,7 +115,10 @@ function install_nova_hypervisor { sudo dpkg-statoverride --add --update $STAT_OVERRIDE fi done - elif is_fedora || is_suse; then + elif is_suse; then + # Workaround for missing dependencies in python-libguestfs + install_package python-libguestfs guestfs-data augeas augeas-lenses + elif is_fedora; then install_package python-libguestfs fi fi From 04016a5dd117d1696221de1e32b9d2c389b795c8 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Sat, 15 Jul 2017 12:16:58 +0200 Subject: [PATCH 0591/1936] List openSUSE 42.3 as a supported distro openSUSE 42.3 is a relatively minor update over 42.2 and I'd like to maintain it and keep it passing. Experimental gates are being proposed, once those are passing (which should be the case with this patch included), we could enable it as a non-voting gate. Change-Id: Ia421ada0ed3751c65a2a93a208e3f4a43edf8b16 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index c453b917e2..b786f7bd83 100755 --- a/stack.sh +++ b/stack.sh @@ -221,7 +221,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (xenial|yakkety|zesty|stretch|jessie|f24|f25|opensuse-42.2|rhel7|kvmibm1) ]]; then +if [[ ! ${DISTRO} =~ (xenial|yakkety|zesty|stretch|jessie|f24|f25|opensuse-42.2|opensuse-42.3|rhel7|kvmibm1) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From ecbebd5d7b2f332d3e2432bb7f612d6191bc23b2 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Wed, 19 Jul 2017 14:01:22 -0400 Subject: [PATCH 0592/1936] Only setup nova-dhcpbridge if using nova-network Closes-Bug: #1705324 Change-Id: I3abc84483bbd256e0bbbfe64d570903080f2a2b7 --- lib/nova | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/lib/nova b/lib/nova index 3fa5de67eb..d91ccbbea1 100644 --- a/lib/nova +++ b/lib/nova @@ -541,19 +541,21 @@ function create_nova_conf { # Setup logging for nova-dhcpbridge command line sudo cp "$NOVA_CONF" "$NOVA_CONF_DIR/nova-dhcpbridge.conf" - local service="n-dhcp" - local logfile="${service}.log.${CURRENT_LOG_TIME}" - local real_logfile="${LOGDIR}/${logfile}" - if [[ -n ${LOGDIR} ]]; then - bash -c "cd '$LOGDIR' && ln -sf '$logfile' ${service}.log" - iniset "$NOVA_CONF_DIR/nova-dhcpbridge.conf" DEFAULT log_file "$real_logfile" - if [[ -n ${SCREEN_LOGDIR} ]]; then - # Drop the backward-compat symlink - ln -sf "$real_logfile" ${SCREEN_LOGDIR}/screen-${service}.log + if is_service_enabled n-net; then + local service="n-dhcp" + local logfile="${service}.log.${CURRENT_LOG_TIME}" + local real_logfile="${LOGDIR}/${logfile}" + if [[ -n ${LOGDIR} ]]; then + bash -c "cd '$LOGDIR' && ln -sf '$logfile' ${service}.log" + iniset "$NOVA_CONF_DIR/nova-dhcpbridge.conf" DEFAULT log_file "$real_logfile" + if [[ -n ${SCREEN_LOGDIR} ]]; then + # Drop the backward-compat symlink + ln -sf "$real_logfile" ${SCREEN_LOGDIR}/screen-${service}.log + fi fi - fi - iniset $NOVA_CONF DEFAULT dhcpbridge_flagfile "$NOVA_CONF_DIR/nova-dhcpbridge.conf" + iniset $NOVA_CONF DEFAULT dhcpbridge_flagfile "$NOVA_CONF_DIR/nova-dhcpbridge.conf" + fi if [ "$NOVA_USE_SERVICE_TOKEN" == "True" ]; then init_nova_service_user_conf From ab7035040319d8b145e1ad4ffcefa184ad6bd8a9 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Wed, 19 Jul 2017 09:55:26 -0700 Subject: [PATCH 0593/1936] When configuring temp urls, give Swift time to start up Otherwise, we've seen intermittent "Unable to establish connection" failures, with the main devstack log reporting things like 2017-07-19 13:54:29.973 -> start proxy service 2017-07-19 13:54:30.082 -> start OSC to store temp url key 2017-07-19 13:54:31.908 -> OSC reports failure Meanwhile, the s-proxy screen session tells us things like Jul 19 13:54:31.919988 -> start child worker Jul 19 13:54:32.206598 -> still loading the WSGI app ... and ports aren't actually bound until *after* the app is loaded. Add a wait_for_service call to wait for the proxy to come up. Change-Id: I1a722de31b144797230991700e110353a2d937dd --- lib/swift | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lib/swift b/lib/swift index fc09093925..455740ea82 100644 --- a/lib/swift +++ b/lib/swift @@ -846,6 +846,14 @@ function start_swift { fi run_process s-proxy "$SWIFT_BIN_DIR/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v" + # We also started the storage services, but proxy started last and + # will take the longest to start, so by the time it comes up, we're + # probably fine. + echo "Waiting for swift proxy to start..." + if ! wait_for_service $SERVICE_TIMEOUT $SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/info; then + die $LINENO "swift proxy did not start" + fi + if [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]]; then swift_configure_tempurls fi From fa0077707e15abfbecddbd22fe8594ffe3b4ecc4 Mon Sep 17 00:00:00 2001 From: Takashi NATSUME Date: Sat, 22 Jul 2017 08:59:43 +0900 Subject: [PATCH 0594/1936] Update URLs Some URLs are broken, so fix them. The others are redirect to new URLs, so replace them with new ones. The config options of nova serial console proxy have been gathered in nova/conf/serial_console.py. So the description in doc/source/guides/nova.rst is fixed. Change-Id: Ifd81cc09969341fbf8f135a913fc6003b94e0acc --- README.rst | 4 ++-- doc/source/guides/nova.rst | 10 ++++------ inc/python | 8 ++++---- samples/local.conf | 2 +- setup.cfg | 2 +- stack.sh | 2 +- tools/fixup_stuff.sh | 2 +- 7 files changed, 14 insertions(+), 16 deletions(-) diff --git a/README.rst b/README.rst index b4240bdc91..adbf59a637 100644 --- a/README.rst +++ b/README.rst @@ -14,7 +14,7 @@ Goals * To provide an environment for the OpenStack CI testing on every commit to the projects -Read more at http://docs.openstack.org/developer/devstack +Read more at https://docs.openstack.org/devstack/latest IMPORTANT: Be sure to carefully read `stack.sh` and any other scripts you execute before you run them, as they install software and will alter your @@ -92,5 +92,5 @@ DevStack can be extensively configured via the configuration file `local.conf`. It is likely that you will need to provide and modify this file if you want anything other than the most basic setup. Start by reading the `configuration guide -`_ +`_ for details of the configuration file and the many available options. diff --git a/doc/source/guides/nova.rst b/doc/source/guides/nova.rst index a91e0d194c..6bbab53aea 100644 --- a/doc/source/guides/nova.rst +++ b/doc/source/guides/nova.rst @@ -13,7 +13,7 @@ In Juno, nova implemented a `spec `_ to allow read/write access to the serial console of an instance via `nova-serialproxy -`_. +`_. The service can be enabled by adding ``n-sproxy`` to ``ENABLED_SERVICES``. Further options can be enabled via @@ -62,11 +62,9 @@ The service can be enabled by adding ``n-sproxy`` to Enabling the service is enough to be functional for a single machine DevStack. -These config options are defined in `nova.console.serial -`_ -and `nova.cmd.serialproxy -`_. +These config options are defined in `nova.conf.serial_console +`_. For more information on OpenStack configuration see the `OpenStack Configuration Reference -`_ +`_ diff --git a/inc/python b/inc/python index 718cbb23b1..f388f48e91 100644 --- a/inc/python +++ b/inc/python @@ -441,7 +441,7 @@ function setup_dev_lib { # project_dir: directory of project repo (e.g., /opt/stack/keystone) # extras: comma-separated list of optional dependencies to install # (e.g., ldap,memcache). -# See http://docs.openstack.org/developer/pbr/#extra-requirements +# See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements # The command is like "pip install []" function setup_install { local project_dir=$1 @@ -455,7 +455,7 @@ function setup_install { # project_dir: directory of project repo (e.g., /opt/stack/keystone) # extras: comma-separated list of optional dependencies to install # (e.g., ldap,memcache). -# See http://docs.openstack.org/developer/pbr/#extra-requirements +# See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements # The command is like "pip install -e []" function setup_develop { local project_dir=$1 @@ -487,7 +487,7 @@ function is_in_projects_txt { # flags: pip CLI options/flags # extras: comma-separated list of optional dependencies to install # (e.g., ldap,memcache). -# See http://docs.openstack.org/developer/pbr/#extra-requirements +# See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements # The command is like "pip install []" function _setup_package_with_constraints_edit { local project_dir=$1 @@ -523,7 +523,7 @@ function _setup_package_with_constraints_edit { # flags: pip CLI options/flags # extras: comma-separated list of optional dependencies to install # (e.g., ldap,memcache). -# See http://docs.openstack.org/developer/pbr/#extra-requirements +# See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements # The command is like "pip install []" function setup_package { local project_dir=$1 diff --git a/samples/local.conf b/samples/local.conf index 6d5351f904..8b76137c38 100644 --- a/samples/local.conf +++ b/samples/local.conf @@ -10,7 +10,7 @@ # This is a collection of some of the settings we have found to be useful # in our DevStack development environments. Additional settings are described -# in http://docs.openstack.org/developer/devstack/configuration.html#local-conf +# in https://docs.openstack.org/devstack/latest/configuration.html#local-conf # These should be considered as samples and are unsupported DevStack code. # The ``localrc`` section replaces the old ``localrc`` configuration file. diff --git a/setup.cfg b/setup.cfg index 73d22b5268..fcd2b13f41 100644 --- a/setup.cfg +++ b/setup.cfg @@ -5,7 +5,7 @@ description-file = README.rst author = OpenStack author-email = openstack-dev@lists.openstack.org -home-page = http://docs.openstack.org/developer/devstack +home-page = https://docs.openstack.org/devstack/latest classifier = Intended Audience :: Developers License :: OSI Approved :: Apache Software License diff --git a/stack.sh b/stack.sh index b786f7bd83..018ed3e3cc 100755 --- a/stack.sh +++ b/stack.sh @@ -1534,7 +1534,7 @@ if [[ "$USE_SYSTEMD" == "True" ]]; then echo echo "Services are running under systemd unit files." echo "For more information see: " - echo "https://docs.openstack.org/developer/devstack/systemd.html" + echo "https://docs.openstack.org/devstack/latest/systemd.html" echo fi diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 0b78bdeb00..55cd7252ea 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -157,7 +157,7 @@ if is_fedora; then # [1] https://bugzilla.redhat.com/show_bug.cgi?id=1099031 # [2] https://bugs.launchpad.net/neutron/+bug/1455303 # [3] https://github.com/redhat-openstack/openstack-puppet-modules/blob/master/firewall/manifests/linux/redhat.pp - # [4] http://docs.openstack.org/developer/devstack/guides/neutron.html + # [4] https://docs.openstack.org/devstack/latest/guides/neutron.html if is_package_installed firewalld; then sudo systemctl disable firewalld # The iptables service files are no longer included by default, From d2fbcd275dfe9c7d11dc9dd5e8ab93244f653b79 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 24 Jul 2017 10:34:22 +1000 Subject: [PATCH 0595/1936] doc: Switch from oslosphinx to openstackdocstheme Per the manuals migration effort, switch to openstackdocstheme [1] [1] https://specs.openstack.org/openstack/docs-specs/specs/pike/os-manuals-migration.html Change-Id: I0463f7d39bd72f1d27cfe3f6d5395608b9ed6b29 --- doc/source/conf.py | 10 ++++++++-- tox.ini | 8 ++++---- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 6e3ec029e9..780237fe58 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -26,7 +26,13 @@ # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [ 'oslosphinx', 'sphinxcontrib.blockdiag', 'sphinxcontrib.nwdiag' ] +extensions = [ 'openstackdocstheme', 'sphinxcontrib.blockdiag', 'sphinxcontrib.nwdiag' ] + +# openstackdocstheme options +repository_name = 'openstack-dev/devstack' +bug_project = 'devstack' +bug_tag = '' +html_last_updated_fmt = '%Y-%m-%d %H:%M' todo_include_todos = True @@ -87,7 +93,7 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = 'nature' +html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the diff --git a/tox.ini b/tox.ini index cc7c5444a8..46b15f4cb1 100644 --- a/tox.ini +++ b/tox.ini @@ -37,9 +37,9 @@ commands = bash -c "find {toxinidir} \ deps = Pygments docutils - sphinx>=1.5.1,<1.6.1 + sphinx>=1.6.2 pbr>=2.0.0,!=2.1.0 - oslosphinx + openstackdocstheme>=1.11.0 nwdiag blockdiag sphinxcontrib-blockdiag @@ -53,8 +53,8 @@ commands = [testenv:venv] deps = pbr>=2.0.0,!=2.1.0 - sphinx>=1.5.1,<1.6.1 - oslosphinx + sphinx>=1.6.2 + openstackdocstheme>=1.11.0 blockdiag sphinxcontrib-blockdiag sphinxcontrib-nwdiag From 7bbd4e95d02d2c54b673a93e53fc91dee61f2c90 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Thu, 13 Jul 2017 15:12:55 +0200 Subject: [PATCH 0596/1936] Add f26 to the supported distros The only mentionable diff is the kvm alias does not exists so we will install qemu-kvm as with rhel7 which also exists in the older supported fedoras. kvm also just an alias in suse so switching to qemu-kvm in suse as well. Change-Id: I5c79ad1ef0b11dba30c931a59786f9eb7e7f8587 --- files/rpms/cinder | 4 ++-- files/rpms/general | 4 ++-- files/rpms/nova | 2 +- files/rpms/swift | 2 +- lib/nova_plugins/functions-libvirt | 10 +--------- stack.sh | 2 +- 6 files changed, 8 insertions(+), 16 deletions(-) diff --git a/files/rpms/cinder b/files/rpms/cinder index 2c7b45baaf..3bc4e7ae72 100644 --- a/files/rpms/cinder +++ b/files/rpms/cinder @@ -1,5 +1,5 @@ iscsi-initiator-utils lvm2 qemu-img -scsi-target-utils # not:rhel7,f24,f25 NOPRIME -targetcli # dist:rhel7,f24,f25 NOPRIME \ No newline at end of file +scsi-target-utils # not:rhel7,f24,f25,f26 NOPRIME +targetcli # dist:rhel7,f24,f25,f26 NOPRIME diff --git a/files/rpms/general b/files/rpms/general index 1393d18328..2443cc8cd7 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -9,9 +9,9 @@ git-core graphviz # needed only for docs httpd httpd-devel -iptables-services # NOPRIME f23,f24,f25 +iptables-services # NOPRIME f23,f24,f25,f26 java-1.7.0-openjdk-headless # NOPRIME rhel7 -java-1.8.0-openjdk-headless # NOPRIME f23,f24,f25 +java-1.8.0-openjdk-headless # NOPRIME f23,f24,f25,f26 libffi-devel libjpeg-turbo-devel # Pillow 3.0.0 libxml2-devel # lxml diff --git a/files/rpms/nova b/files/rpms/nova index a368c552aa..632e796d07 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -7,7 +7,7 @@ gawk genisoimage # required for config_drive iptables iputils -kernel-modules # dist:f23,f24,f25 +kernel-modules # dist:f23,f24,f25,f26 kpartx libxml2-python m2crypto diff --git a/files/rpms/swift b/files/rpms/swift index 2f12df0e3b..2e09cec28f 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -2,7 +2,7 @@ curl liberasurecode-devel memcached pyxattr -rsync-daemon # dist:f23,f24,f25 +rsync-daemon # dist:f23,f24,f25,f26 sqlite xfsprogs xinetd diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 3e38b898ec..8d74c77517 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -73,15 +73,7 @@ function install_libvirt { #pip_install_gr elif is_fedora || is_suse; then # On "KVM for IBM z Systems", kvm does not have its own package - if [[ ! ${DISTRO} =~ "kvmibm1" && ! ${DISTRO} =~ "rhel7" ]]; then - install_package kvm - fi - - if [[ ${DISTRO} =~ "rhel7" ]]; then - # This should install the latest qemu-kvm build, - # which is called qemu-kvm-ev in centos7 - # (as the default OS qemu-kvm package is usually rather old, - # and should be updated by above) + if [[ ! ${DISTRO} =~ "kvmibm1" ]]; then install_package qemu-kvm fi diff --git a/stack.sh b/stack.sh index b786f7bd83..2ceb93323c 100755 --- a/stack.sh +++ b/stack.sh @@ -221,7 +221,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (xenial|yakkety|zesty|stretch|jessie|f24|f25|opensuse-42.2|opensuse-42.3|rhel7|kvmibm1) ]]; then +if [[ ! ${DISTRO} =~ (xenial|yakkety|zesty|stretch|jessie|f24|f25|f26|opensuse-42.2|opensuse-42.3|rhel7|kvmibm1) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From 46a54c90895cf5d90ebe4921fc5ce958e960d31c Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Sun, 23 Jul 2017 14:14:23 +0200 Subject: [PATCH 0597/1936] Switch to cirrors 0.3.5 also for the xen case This image is available on the download site since Feb 2016, so let's use it. Change-Id: I3b89211f6e57f2c35056d7e9c57d08651a3a314f --- stackrc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stackrc b/stackrc index 4f16c3edf7..e526b548b5 100644 --- a/stackrc +++ b/stackrc @@ -719,9 +719,9 @@ if [[ "$DOWNLOAD_DEFAULT_IMAGES" == "True" ]]; then DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-$DEFAULT_IMAGE_NAME} IMAGE_URLS+="http://partnerweb.vmware.com/programs/vmdkimage/${DEFAULT_IMAGE_FILE_NAME}";; xenserver) - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.4-x86_64-disk} - DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.4-x86_64-disk.vhd.tgz} - IMAGE_URLS+="http://ca.downloads.xensource.com/OpenStack/cirros-0.3.4-x86_64-disk.vhd.tgz" + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.5-x86_64-disk} + DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.5-x86_64-disk.vhd.tgz} + IMAGE_URLS+="http://ca.downloads.xensource.com/OpenStack/cirros-0.3.5-x86_64-disk.vhd.tgz" IMAGE_URLS+=",http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz";; esac DOWNLOAD_DEFAULT_IMAGES=False From d325875508e7d35d6dd62302d852e83815be2278 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Fri, 21 Jul 2017 08:19:16 +0200 Subject: [PATCH 0598/1936] Resolve openSUSE devstack failures This update resolves multiple issues with python-cryptography causing keystone server and nova deployment to fail. This is a temporary workaround until I196f025dbf1a9ac297946b8165620676645f7210 has landed and the extraneous dependency on python-cryptography (the package) has been removed. Change-Id: Ifb29b9089197c0429a5fc1cd08a25d2095d481f1 --- tools/install_prereqs.sh | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh index da59093581..933491081c 100755 --- a/tools/install_prereqs.sh +++ b/tools/install_prereqs.sh @@ -88,6 +88,22 @@ else export PYTHON=$(which python 2>/dev/null) fi +if is_suse; then + # novnc has an extraneous dependency on pyOpenSSL, which causes symbol conflicts + # in the bundled libssl of python-cryptography. when both are loaded into the same + # process, they start hanging or segfaulting. + install_package novnc + # deinstall the extra but irrelevant dependencies + sudo rpm -e --nodeps python-cffi python-cryptography python-pyOpenSSL + # reinstall cffi which got overwriten by the package. + sudo pip install -I cffi + # now reinstall cryptography from source, in order to rebuilt it against the + # system libssl rather than the bundled openSSL 1.1, which segfaults when combined + # with the system provided (which libpython links against) openSSL 1.0 + sudo pip install cryptography --no-binary :all: +fi + + # Mark end of run # --------------- From c63ecadb08245eda3f4ef2327a2b9ca85cf4b4f9 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Tue, 25 Jul 2017 17:08:50 -0400 Subject: [PATCH 0599/1936] Add instructions for discovering hosts in the multinode guide When doing a multi-node devstack deployment starting in Ocata the child compute nodes must be discovered and mapped to the single nova cell (cell1). In the upstream CI we do this discovery in devstack-gate after the subnodes are stacked, but for anyone doing this manually we need to provide some notes on what needs to happen after child compute nodes are stacked for a multinode environment. Change-Id: I68418bcf28d86c60fe42537186d89458fa778bda Closes-Bug: #1688397 --- doc/source/guides/multinode-lab.rst | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index 1a8ddbc194..b4e2891c10 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -197,6 +197,22 @@ A stream of activity ensues. When complete you will see a summary of to poke at your shiny new OpenStack. The most recent log file is available in ``stack.sh.log``. +Starting in the Ocata release, Nova requires a `Cells v2`_ deployment. Compute +node services must be mapped to a cell before they can be used. + +After each compute node is stacked, verify it shows up in the +``nova service-list --binary nova-compute`` output. The compute service is +registered in the cell database asynchronously so this may require polling. + +Once the compute node services shows up, run the ``./tools/discover_hosts.sh`` +script from the control node to map compute hosts to the single cell. + +The compute service running on the primary control node will be +discovered automatically when the control node is stacked so this really +only needs to be performed for subnodes. + +.. _Cells v2: https://docs.openstack.org/nova/latest/user/cells.html + Cleaning Up After DevStack -------------------------- From 2f09dcfc98959db87d6d6d7804364c9db3fa5111 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 26 Jul 2017 08:12:17 +0000 Subject: [PATCH 0600/1936] Updated from generate-devstack-plugins-list Change-Id: Ic5ccbd05b9be0739b486d0b816b94eaa5d8f355f --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 92e5ecdb89..ea406aa967 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -106,6 +106,7 @@ networking-cumulus `git://git.openstack.org/openstack/networ networking-dpm `git://git.openstack.org/openstack/networking-dpm `__ networking-fortinet `git://git.openstack.org/openstack/networking-fortinet `__ networking-generic-switch `git://git.openstack.org/openstack/networking-generic-switch `__ +networking-hpe `git://git.openstack.org/openstack/networking-hpe `__ networking-huawei `git://git.openstack.org/openstack/networking-huawei `__ networking-infoblox `git://git.openstack.org/openstack/networking-infoblox `__ networking-l2gw `git://git.openstack.org/openstack/networking-l2gw `__ From dcdf8c8e60a9db0adb4ee5cae98ed7cc511ecf83 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 25 Jul 2017 19:51:08 -0400 Subject: [PATCH 0601/1936] Wait for compute service to check in With cell v2, on initial bring up, discover hosts can't run unless all the compute nodes have checked in. The documentation says that you should run ``nova service-list --binary nova-compute`` and see all your hosts before running discover hosts. This isn't really viable in a multinode devstack because of how things are brought up in parts. We can however know that stack.sh will not complete before the compute node is up by waiting for the compute node to check in before moving forward. This puts a few more seconds into the run, but ensures everything is solid in multinode environments. Change-Id: I667e6a9be3fee8bb5bfd73426eef567489e3d88d --- functions | 14 ++++++++++++++ lib/nova | 7 +++++++ 2 files changed, 21 insertions(+) diff --git a/functions b/functions index 6f2164a777..faa6b761ab 100644 --- a/functions +++ b/functions @@ -407,6 +407,20 @@ EOF return $rval } +function wait_for_compute { + local timeout=$1 + time_start "wait_for_service" + timeout $timeout bash -x < 30 seconds + # happen between here and the script ending. However, in multinode + # tests this can very often not be the case. So ensure that the + # compute is up before we move on. + wait_for_compute 60 export PATH=$old_path } From 5adfef0a53a19436cd759b1d345bcad0a46fc1bf Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 26 Jul 2017 11:14:37 -0400 Subject: [PATCH 0602/1936] Introduce CELLSV2_SETUP variable Some environments, like grenade and ironic, need a way to revert to the non fleet version of the conductor setup. This really comes down to a global topology for CELLSV2_SETUP. The prefered is with a superconductor, but allow a downgrade to singleconductor. Depends-On: I5390ec14c41da0237c898852935aba3569e7acae Change-Id: I10fb048ef2175909019461e585d117b4284448c6 --- lib/nova | 23 ++++++++++++++++------- stackrc | 8 ++++++++ 2 files changed, 24 insertions(+), 7 deletions(-) diff --git a/lib/nova b/lib/nova index 0500fc27a8..5362e3fef4 100644 --- a/lib/nova +++ b/lib/nova @@ -432,7 +432,16 @@ function create_nova_conf { # require them running on the host. The ensures that n-cpu doesn't # leak a need to use the db in a multinode scenario. if is_service_enabled n-api n-cond n-sched; then - iniset $NOVA_CONF database connection `database_connection_url nova_cell0` + # If we're in multi-tier cells mode, we want our control services pointing + # at cell0 instead of cell1 to ensure isolation. If not, we point everything + # at the main database like normal. + if [[ "$CELLSV2_SETUP" == "singleconductor" ]]; then + local db="nova_cell1" + else + local db="nova_cell0" + fi + + iniset $NOVA_CONF database connection `database_connection_url $db` iniset $NOVA_CONF api_database connection `database_connection_url nova_api` fi @@ -676,15 +685,15 @@ function init_nova { # and nova_cell0 databases. nova-manage cell_v2 map_cell0 --database_connection `database_connection_url nova_cell0` - # Migrate nova and nova_cell0 databases. - $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db sync - # (Re)create nova databases for i in $(seq 1 $NOVA_NUM_CELLS); do recreate_database nova_cell${i} $NOVA_BIN_DIR/nova-manage --config-file $(conductor_conf $i) db sync done + # Migrate nova and nova_cell0 databases. + $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db sync + if is_service_enabled n-cell; then recreate_database $NOVA_CELLS_DB fi @@ -795,7 +804,6 @@ function start_nova_api { # start_nova_compute() - Start the compute process function start_nova_compute { - local nomulticellflag="$1" # Hack to set the path for rootwrap local old_path=$PATH export PATH=$NOVA_BIN_DIR:$PATH @@ -803,17 +811,18 @@ function start_nova_compute { if is_service_enabled n-cell; then local compute_cell_conf=$NOVA_CELLS_CONF # NOTE(danms): Don't setup conductor fleet for cellsv1 - nomulticellflag='nomulticell' + CELLSV2_SETUP="singleconductor" else local compute_cell_conf=$NOVA_CONF fi - if [ "$nomulticellflag" = 'nomulticell' ]; then + if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then # NOTE(danms): Grenade doesn't setup multi-cell rabbit, so # skip these bits and use the normal config. NOVA_CPU_CONF=$compute_cell_conf echo "Skipping multi-cell conductor fleet setup" else + # "${CELLSV2_SETUP}" is "superconductor" cp $compute_cell_conf $NOVA_CPU_CONF # FIXME(danms): Should this be configurable? iniset $NOVA_CPU_CONF workarounds disable_group_policy_check_upcall True diff --git a/stackrc b/stackrc index e526b548b5..877da82261 100644 --- a/stackrc +++ b/stackrc @@ -77,6 +77,14 @@ ENABLE_HTTPD_MOD_WSGI_SERVICES=True # Set the default Nova APIs to enable NOVA_ENABLED_APIS=osapi_compute,metadata +# CELLSV2_SETUP - how we should configure services with cells v2 +# +# - superconductor - this is one conductor for the api services, and +# one per cell managing the compute services. This is prefered +# - singleconductor - this is one conductor for the whole deployment, +# this is not recommended, and will be removed in the future. +CELLSV2_SETUP=${CELLSV2_SETUP:-"superconductor"} + # Set the root URL for Horizon HORIZON_APACHE_ROOT="/dashboard" From 97430cd9e062b907c5937eb50ca6b95ae6fcdeeb Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 27 Jul 2017 08:21:35 +0000 Subject: [PATCH 0603/1936] Updated from generate-devstack-plugins-list Change-Id: I4e47d524df32d3d41cc00c608edc6c9c588726b5 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index ea406aa967..f9ca05583f 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -92,6 +92,7 @@ mogan-ui `git://git.openstack.org/openstack/mogan- monasca-analytics `git://git.openstack.org/openstack/monasca-analytics `__ monasca-api `git://git.openstack.org/openstack/monasca-api `__ monasca-ceilometer `git://git.openstack.org/openstack/monasca-ceilometer `__ +monasca-events-api `git://git.openstack.org/openstack/monasca-events-api `__ monasca-log-api `git://git.openstack.org/openstack/monasca-log-api `__ monasca-transform `git://git.openstack.org/openstack/monasca-transform `__ murano `git://git.openstack.org/openstack/murano `__ From afc14c8e8585e6f6f00fafc78daa4e478b1635ae Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 27 Jul 2017 07:09:48 -0400 Subject: [PATCH 0604/1936] Fix last place where we need singleconductor The actual logic of launching a singleconductor didn't get all the way to the launch of the conductor itself, so we were still launching 2 conductors in the Ironic case. This attempts to fix that. Change-Id: I7ddb123dbdf3e1ec9a991e474a9990d2ccbc30d3 --- lib/nova | 38 +++++++++++++++++++++++++++++++------- 1 file changed, 31 insertions(+), 7 deletions(-) diff --git a/lib/nova b/lib/nova index 5362e3fef4..8311a54930 100644 --- a/lib/nova +++ b/lib/nova @@ -51,6 +51,7 @@ NOVA_AUTH_CACHE_DIR=${NOVA_AUTH_CACHE_DIR:-/var/cache/nova} NOVA_CONF_DIR=/etc/nova NOVA_CONF=$NOVA_CONF_DIR/nova.conf NOVA_CELLS_CONF=$NOVA_CONF_DIR/nova-cells.conf +NOVA_COND_CONF=$NOVA_CONF_DIR/nova.conf NOVA_CPU_CONF=$NOVA_CONF_DIR/nova-cpu.conf NOVA_FAKE_CONF=$NOVA_CONF_DIR/nova-fake.conf NOVA_CELLS_DB=${NOVA_CELLS_DB:-nova_cell} @@ -588,8 +589,13 @@ function create_nova_conf { iniset $conf database connection `database_connection_url nova_cell${i}` iniset $conf conductor workers "$API_WORKERS" iniset $conf DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" - rpc_backend_add_vhost $vhost - iniset_rpc_backend nova $conf DEFAULT $vhost + # if we have a singleconductor, we don't have per host message queues. + if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then + iniset_rpc_backend nova $conf DEFAULT + else + rpc_backend_add_vhost $vhost + iniset_rpc_backend nova $conf DEFAULT $vhost + fi done fi } @@ -632,6 +638,9 @@ function init_nova_cells { iniset $NOVA_CELLS_CONF DEFAULT enabled_apis metadata fi + # Cells v1 conductor should be the nova-cells.conf + NOVA_COND_CONF=$NOVA_CELLS_CONF + time_start "dbsync" $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CELLS_CONF db sync time_stop "dbsync" @@ -802,6 +811,16 @@ function start_nova_api { export PATH=$old_path } +# Detect and setup conditions under which singleconductor setup is +# needed. Notably cellsv1. +function _set_singleconductor { + # NOTE(danms): Don't setup conductor fleet for cellsv1 + if is_service_enabled n-cell; then + CELLSV2_SETUP="singleconductor" + fi +} + + # start_nova_compute() - Start the compute process function start_nova_compute { # Hack to set the path for rootwrap @@ -810,8 +829,6 @@ function start_nova_compute { if is_service_enabled n-cell; then local compute_cell_conf=$NOVA_CELLS_CONF - # NOTE(danms): Don't setup conductor fleet for cellsv1 - CELLSV2_SETUP="singleconductor" else local compute_cell_conf=$NOVA_CONF fi @@ -908,15 +925,15 @@ function enable_nova_fleet { } function start_nova_conductor { - if is_service_enabled n-cell; then + if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then echo "Starting nova-conductor in a cellsv1-compatible way" - run_process n-cond "$NOVA_BIN_DIR/nova-conductor --config-file $NOVA_CELLS_CONF" + run_process n-cond "$NOVA_BIN_DIR/nova-conductor --config-file $NOVA_COND_CONF" return fi enable_nova_fleet if is_service_enabled n-super-cond; then - run_process n-super-cond "$NOVA_BIN_DIR/nova-conductor --config-file $NOVA_CONF" + run_process n-super-cond "$NOVA_BIN_DIR/nova-conductor --config-file $NOVA_COND_CONF" fi for i in $(seq 1 $NOVA_NUM_CELLS); do if is_service_enabled n-cond-cell${i}; then @@ -928,9 +945,16 @@ function start_nova_conductor { } function start_nova { + # this catches the cells v1 case early + _set_singleconductor start_nova_rest start_nova_conductor start_nova_compute + if is_service_enabled n-api; then + # dump the cell mapping to ensure life is good + echo "Dumping cells_v2 mapping" + nova-manage cell_v2 list_cells --verbose + fi } function stop_nova_compute { From 01c0cc6d37ccca13e0e7e48a58dcc7ba98967f1c Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 27 Jul 2017 20:53:57 +0000 Subject: [PATCH 0605/1936] Revert "Wait for compute service to check in" This reverts commit dcdf8c8e60a9db0adb4ee5cae98ed7cc511ecf83. Change-Id: Ib14016a3bc6f2714758ad0291396233218c593c6 --- functions | 14 -------------- lib/nova | 7 ------- 2 files changed, 21 deletions(-) diff --git a/functions b/functions index faa6b761ab..6f2164a777 100644 --- a/functions +++ b/functions @@ -407,20 +407,6 @@ EOF return $rval } -function wait_for_compute { - local timeout=$1 - time_start "wait_for_service" - timeout $timeout bash -x < 30 seconds - # happen between here and the script ending. However, in multinode - # tests this can very often not be the case. So ensure that the - # compute is up before we move on. - wait_for_compute 60 export PATH=$old_path } From 6effdf370a1a638e72319337db57c5ef1fa1312d Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Thu, 27 Jul 2017 20:28:43 +0200 Subject: [PATCH 0606/1936] Remove extra websockify dependency cleanup step This started to fail due to a missing || : / --force option now that the dependencies got fixed: 2017-07-30 19:38:37.260 | ++ tools/install_prereqs.sh:source:97 : sudo rpm -e --nodeps python-cffi python-cryptography python-pyOpenSSL 2017-07-30 19:38:37.293 | error: package python-cffi is not installed 2017-07-30 19:38:37.293 | error: package python-cryptography is not installed 2017-07-30 19:38:37.293 | error: package python-pyOpenSSL is not installed Change-Id: Ia59afb7ee564cf2044ebdb3c5ad3e54ee91d1222 --- tools/install_prereqs.sh | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh index 933491081c..6189085e9e 100755 --- a/tools/install_prereqs.sh +++ b/tools/install_prereqs.sh @@ -89,17 +89,10 @@ else fi if is_suse; then - # novnc has an extraneous dependency on pyOpenSSL, which causes symbol conflicts - # in the bundled libssl of python-cryptography. when both are loaded into the same - # process, they start hanging or segfaulting. - install_package novnc - # deinstall the extra but irrelevant dependencies - sudo rpm -e --nodeps python-cffi python-cryptography python-pyOpenSSL - # reinstall cffi which got overwriten by the package. - sudo pip install -I cffi # now reinstall cryptography from source, in order to rebuilt it against the # system libssl rather than the bundled openSSL 1.1, which segfaults when combined - # with the system provided (which libpython links against) openSSL 1.0 + # with a system provided openSSL 1.0 + # see https://github.com/pyca/cryptography/issues/3804 and followup issues sudo pip install cryptography --no-binary :all: fi From dea3083d984569eac9647f1a28f10ae98afc42f7 Mon Sep 17 00:00:00 2001 From: Vasyl Saienko Date: Tue, 1 Aug 2017 00:16:51 +0300 Subject: [PATCH 0607/1936] Fix path to mlock_report This patch fixes path to mlock_report.py. Also add python-psutil to dstat depends as it is required by mlock_report. Change-Id: Ia2b507a7b923f1e3393a9cb7746c66d39d6abfde --- files/debs/dstat | 1 + files/rpms/dstat | 1 + tools/memory_tracker.sh | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/files/debs/dstat b/files/debs/dstat index 2b643b8b1b..0d9da4434f 100644 --- a/files/debs/dstat +++ b/files/debs/dstat @@ -1 +1,2 @@ dstat +python-psutil diff --git a/files/rpms/dstat b/files/rpms/dstat index 2b643b8b1b..0d9da4434f 100644 --- a/files/rpms/dstat +++ b/files/rpms/dstat @@ -1 +1,2 @@ dstat +python-psutil diff --git a/tools/memory_tracker.sh b/tools/memory_tracker.sh index cbdeb8f420..63f25ca2de 100755 --- a/tools/memory_tracker.sh +++ b/tools/memory_tracker.sh @@ -88,7 +88,7 @@ function tracker { # list processes that lock memory from swap if [[ $unevictable -ne $unevictable_point ]]; then unevictable_point=$unevictable - ${PYTHON} ./tools/mlock_report.py + ${PYTHON} $(dirname $0)/mlock_report.py fi echo "]]]" From fa55cb5f978f6bd9476f1b09a94c8d6aaa18beb4 Mon Sep 17 00:00:00 2001 From: Mathieu Mitchell Date: Tue, 24 Jan 2017 11:32:24 -0500 Subject: [PATCH 0608/1936] Ensure valid service names are passed to stack_install_service Currently, stack_install_service will accept any service name. This is problematic because a project plugin can pass an invalid name without noticing. This has been the case in ironic-inspector[0]. This commit ensures that stack_install_service will not silently fail when passing an invalid service name. [0] https://review.openstack.org/#/c/424680/ Change-Id: I1a8105bdbaf4aecb630df08da416808bf7180824 Closes-Bug: #1659042 --- lib/stack | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/stack b/lib/stack index f09ddcee85..bada26f1c2 100644 --- a/lib/stack +++ b/lib/stack @@ -33,5 +33,8 @@ function stack_install_service { if [[ ${USE_VENV} = True && -n ${PROJECT_VENV[$service]:-} ]]; then unset PIP_VIRTUAL_ENV fi + else + echo "No function declared with name 'install_${service}'." + exit 1 fi } From ab980ce5d6792a273db3e93eb3d163bfdc38b1de Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Tue, 1 Aug 2017 16:38:42 -0400 Subject: [PATCH 0609/1936] Disable track_instance_changes when in superconductor mode When configured for superconductor mode, which is the default, nova-compute can't reach the MQ for nova-scheduler so there is no point in even enabling the track_instance_changes code since it's a waste of time as the scheduler will never get the message. Change-Id: I2662ebd47323428b403d3c2236bec78f1fb1050f --- lib/nova | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/nova b/lib/nova index 8311a54930..7266ac70e9 100644 --- a/lib/nova +++ b/lib/nova @@ -440,6 +440,9 @@ function create_nova_conf { local db="nova_cell1" else local db="nova_cell0" + # When in superconductor mode, nova-compute can't send instance + # info updates to the scheduler, so just disable it. + iniset $NOVA_CONF filter_scheduler track_instance_changes False fi iniset $NOVA_CONF database connection `database_connection_url $db` @@ -843,6 +846,9 @@ function start_nova_compute { cp $compute_cell_conf $NOVA_CPU_CONF # FIXME(danms): Should this be configurable? iniset $NOVA_CPU_CONF workarounds disable_group_policy_check_upcall True + # Since the nova-compute service cannot reach nova-scheduler over + # RPC, we also disable track_instance_changes. + iniset $NOVA_CPU_CONF filter_scheduler track_instance_changes False iniset_rpc_backend nova $NOVA_CPU_CONF DEFAULT "nova_cell${NOVA_CPU_CELL}" fi From c2fe916fc7c6c00cdfa0085e198eaf2ad4d915d1 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 28 Jul 2017 11:29:18 +0000 Subject: [PATCH 0610/1936] Wait for compute service to check in With cell v2, on initial bring up, discover hosts can't run unless all the compute nodes have checked in. The documentation says that you should run ``nova service-list --binary nova-compute`` and see all your hosts before running discover hosts. This isn't really viable in a multinode devstack because of how things are brought up in parts. We can however know that stack.sh will not complete before the compute node is up by waiting for the compute node to check in before completing. This happens quite late in the stack.sh run, so shouldn't add any extra time in most runs. Cells v1 and Xenserver don't use real hostnames in the service table (they encode complex data that is hostname like to provide more topology information than just hostnames). They are exempted from this check. Related-Bug: #1708039 Change-Id: I32eb59b9d6c225a3e93992be3a3b9f4b251d7189 --- functions | 20 ++++++++++++++++++++ lib/nova | 22 ++++++++++++++++++++++ stack.sh | 7 +++++++ 3 files changed, 49 insertions(+) diff --git a/functions b/functions index 6f2164a777..d55cd74609 100644 --- a/functions +++ b/functions @@ -407,6 +407,26 @@ EOF return $rval } +function wait_for_compute { + local timeout=$1 + local rval=0 + time_start "wait_for_service" + timeout $timeout bash -x < 30 seconds + # happen between here and the script ending. However, in multinode + # tests this can very often not be the case. So ensure that the + # compute is up before we move on. + if is_service_enabled n-cell; then + # cells v1 can't complete the check below because it munges + # hostnames with cell information (grumble grumble). + return + fi + # TODO(sdague): honestly, this probably should be a plug point for + # an external system. + if [[ "$VIRT_DRIVER" == 'xenserver' ]]; then + # xenserver encodes information in the hostname of the compute + # because of the dom0/domU split. Just ignore for now. + return + fi + wait_for_compute 60 +} + function start_nova { # this catches the cells v1 case early _set_singleconductor diff --git a/stack.sh b/stack.sh index 015ee6ec43..51e12168ba 100755 --- a/stack.sh +++ b/stack.sh @@ -1433,6 +1433,13 @@ fi # Sanity checks # ============= +# Check that computes are all ready +# +# TODO(sdague): there should be some generic phase here. +if is_service_enabled n-cpu; then + is_nova_ready +fi + # Check the status of running services service_check From eca7ce749204f01f0041bf1bb5f00fd04c9109c7 Mon Sep 17 00:00:00 2001 From: Kevin Zhao Date: Fri, 4 Aug 2017 11:50:36 +0800 Subject: [PATCH 0611/1936] ETCD need to add UNSUPPORT environment in AArch64 Closes-bug: #1708575 Change-Id: I77e78389ac7b8df9ba9f84b072f446d2e03d84d1 Signed-off-by: Kevin Zhao --- lib/etcd3 | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/etcd3 b/lib/etcd3 index 0e1fbd5bc2..bc24790782 100644 --- a/lib/etcd3 +++ b/lib/etcd3 @@ -57,6 +57,9 @@ function start_etcd3 { iniset -sudo $unitfile "Service" "Type" "notify" iniset -sudo $unitfile "Service" "Restart" "on-failure" iniset -sudo $unitfile "Service" "LimitNOFILE" "65536" + if is_arch "aarch64"; then + iniset -sudo $unitfile "Service" "Environment" "ETCD_UNSUPPORTED_ARCH=arm64" + fi $SYSTEMCTL daemon-reload $SYSTEMCTL enable $ETCD_SYSTEMD_SERVICE From 801494550a58220e1bcbd531e810e1ca59efa7e8 Mon Sep 17 00:00:00 2001 From: Sam Betts Date: Thu, 3 Aug 2017 12:41:36 +0100 Subject: [PATCH 0612/1936] Disable baremetal sched filters when using resource classes When using resource classes to schedule baremetal nodes the baremetal filters like ExactRam etc should not be used. This patch disables them in the nova config if devstack is configured to enable ironic resource classes. Change-Id: Ic262ccaf8b541308042d61113a953653d2261964 --- lib/nova_plugins/hypervisor-ironic | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic index 7d47ef070c..d59473ca1c 100644 --- a/lib/nova_plugins/hypervisor-ironic +++ b/lib/nova_plugins/hypervisor-ironic @@ -42,7 +42,11 @@ function configure_nova_hypervisor { iniset $NOVA_CONF DEFAULT compute_driver ironic.IronicDriver iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER iniset $NOVA_CONF DEFAULT scheduler_host_manager ironic_host_manager - iniset $NOVA_CONF filter_scheduler use_baremetal_filters True + + if [[ "$IRONIC_USE_RESOURCE_CLASSES" == "False" ]]; then + iniset $NOVA_CONF filter_scheduler use_baremetal_filters True + fi + iniset $NOVA_CONF DEFAULT ram_allocation_ratio 1.0 iniset $NOVA_CONF DEFAULT reserved_host_memory_mb 0 # ironic section From 49144e627120f9ec1cf72dfc84e02b9b81e669b6 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 2 Aug 2017 09:49:27 -0400 Subject: [PATCH 0613/1936] Remove glance api_servers from nova config This should now be able to be discovered from the service catalog, there is no reason to set it. Change-Id: I7383b589fbcef9423beeab735db42c594f7b56fd get auth from context for glance endpoints: Depends-On: I4e755b9c66ec8bc3af0393e81cffd91c56064717 --- lib/nova | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/nova b/lib/nova index 8311a54930..26f65637c5 100644 --- a/lib/nova +++ b/lib/nova @@ -538,7 +538,6 @@ function create_nova_conf { iniset $NOVA_CONF oslo_messaging_notifications driver "messagingv2" iniset $NOVA_CONF oslo_messaging_notifications transport_url $(get_transport_url) iniset_rpc_backend nova $NOVA_CONF - iniset $NOVA_CONF glance api_servers "$GLANCE_URL" iniset $NOVA_CONF DEFAULT osapi_compute_workers "$API_WORKERS" iniset $NOVA_CONF DEFAULT metadata_workers "$API_WORKERS" From b645904d4aae46a8be5cee3a23710565b211f458 Mon Sep 17 00:00:00 2001 From: Kenneth Giusti Date: Fri, 4 Aug 2017 18:08:37 -0400 Subject: [PATCH 0614/1936] Use get_notification_url when configuring notifications If a project manually configures the oslo.messaging transport url for notifications it should use 'get_notification_url', not 'get_transport_url'. get_transport_url should only be used to obtain the RPC transport address. Change-Id: I77772dfa9f30a3db2db6d0387260dfe3452a26ef Closes-Bug: #1708754 --- lib/nova | 2 +- lib/rpc_backend | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/lib/nova b/lib/nova index 8311a54930..c988c2f06b 100644 --- a/lib/nova +++ b/lib/nova @@ -536,7 +536,7 @@ function create_nova_conf { # Set the oslo messaging driver to the typical default. This does not # enable notifications, but it will allow them to function when enabled. iniset $NOVA_CONF oslo_messaging_notifications driver "messagingv2" - iniset $NOVA_CONF oslo_messaging_notifications transport_url $(get_transport_url) + iniset $NOVA_CONF oslo_messaging_notifications transport_url $(get_notification_url) iniset_rpc_backend nova $NOVA_CONF iniset $NOVA_CONF glance api_servers "$GLANCE_URL" diff --git a/lib/rpc_backend b/lib/rpc_backend index 3177e88ef2..fb1cf73b99 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -114,7 +114,7 @@ function rpc_backend_add_vhost { fi } -# builds transport url string +# Returns the address of the RPC backend in URL format. function get_transport_url { local virtual_host=$1 if is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then @@ -122,8 +122,9 @@ function get_transport_url { fi } -# Repeat the definition, in case get_transport_url is overriden for RPC purpose. -# get_notification_url can then be used to talk to rabbit for notifications. +# Returns the address of the Notification backend in URL format. This +# should be used to set the transport_url option in the +# oslo_messaging_notifications group. function get_notification_url { local virtual_host=$1 if is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then From 41e6e12318718c3c5eb6f47ed75a215ed67eccf2 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 8 Aug 2017 15:06:26 +1000 Subject: [PATCH 0615/1936] Switch Centos/Fedora to Apache woker MPM In trying to debug periodic gate instability of CentOS, I noticed that it is using the prefork mpm, while Ubuntu is defaulting to the multi-threaded worker mpm. One of the problems seems related to 502 proxy errors from the TLS proxy. We see out-of-sync timestamps in the centos TLS proxy access logs, which might be innocent behaviour based on the prefork model or indicate something else. Before going too deep down this rabbit-hole, I think it is better for consistency to use the same mpm model on all our platforms, and start debugging from there. Change-Id: I9881f2e7d51fdd9fc0f7fb3e37179aa53171b531 --- lib/apache | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/apache b/lib/apache index ffd7966de1..dfca25a764 100644 --- a/lib/apache +++ b/lib/apache @@ -132,6 +132,10 @@ function install_apache_wsgi { elif is_fedora; then sudo rm -f /etc/httpd/conf.d/000-* install_package httpd mod_wsgi + # For consistency with Ubuntu, switch to the worker mpm, as + # the default is prefork + sudo sed -i '/mod_mpm_prefork.so/s/^/#/g' /etc/httpd/conf.modules.d/00-mpm.conf + sudo sed -i '/mod_mpm_worker.so/s/^#//g' /etc/httpd/conf.modules.d/00-mpm.conf elif is_suse; then install_package apache2 apache2-mod_wsgi else From 139837d69d8566088125d29739089aec7b2a9e7c Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 8 Aug 2017 17:51:29 +1000 Subject: [PATCH 0616/1936] Make TLS logs more readable After looking at these for I9881f2e7d51fdd9fc0f7fb3e37179aa53171b531 I found them not as useful as they could be. Fix the CustomLog command, that wants the logfile then the format string (or a nickname, which the LogFormat line wasn't setting). Use standard micro-second timestamps, and trim the access log to have more relevant info. Change-Id: I9f4c8ef38ab9e08aeced7b309d4a5276de07af4b --- lib/tls | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/lib/tls b/lib/tls index 6a3d260ebd..7bde5e6496 100644 --- a/lib/tls +++ b/lib/tls @@ -533,10 +533,9 @@ $listen_string ProxyPassReverse http://$b_host:$b_port/ ErrorLog $APACHE_LOG_DIR/tls-proxy_error.log - ErrorLogFormat "[%{u}t] [%-m:%l] [pid %P:tid %T] %7F: %E: [client\ %a] [frontend\ %A] %M% ,\ referer\ %{Referer}i" + ErrorLogFormat "%{cu}t [%-m:%l] [pid %P:tid %T] %7F: %E: [client\ %a] [frontend\ %A] %M% ,\ referer\ %{Referer}i" LogLevel info - CustomLog $APACHE_LOG_DIR/tls-proxy_access.log common - LogFormat "%v %h %l %u %t \"%r\" %>s %b" + CustomLog $APACHE_LOG_DIR/tls-proxy_access.log "%{%Y-%m-%d}t %{%T}t.%{msec_frac}t [%l] %a \"%r\" %>s %b" EOF if is_suse ; then From 2d57f93f68ae14e17313486e0d5ad2513af58fd1 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 3 Aug 2017 14:35:37 +1000 Subject: [PATCH 0617/1936] Don't reinstall python-virtualenv on infra nodes In the original change I said "for infra nodes, it shouldn't do anything anyway ...". Well that was pre-Fedora 26 :) It seems that dnf > 2.0 now intentionally throws an error when trying to explicitly install an ignored package. Thus, as described in the comment, take a simpler approach of skipping this on infra nodes. pip-and-virtualenv in dib should have installed the latest pip, virtualenv and setuptools, so we don't want to fiddle with that anyway. [1] https://review.openstack.org/#/c/338998/ Change-Id: Ib300b58377a0d0fe1bd7444c71acdb9a87dc033b --- tools/fixup_stuff.sh | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 55cd7252ea..f1552ab2f7 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -202,5 +202,22 @@ fi # on python-virtualenv), first install the distro python-virtualenv # to satisfy any dependencies then use pip to overwrite it. -install_package python-virtualenv -pip_install -U --force-reinstall virtualenv +# ... but, for infra builds, the pip-and-virtualenv [1] element has +# already done this to ensure the latest pip, virtualenv and +# setuptools on the base image for all platforms. It has also added +# the packages to the yum/dnf ignore list to prevent them being +# overwritten with old versions. F26 and dnf 2.0 has changed +# behaviour that means re-installing python-virtualenv fails [2]. +# Thus we do a quick check if we're in the infra environment by +# looking for the mirror config script before doing this, and just +# skip it if so. + +# [1] https://git.openstack.org/cgit/openstack/diskimage-builder/tree/ \ +# diskimage_builder/elements/pip-and-virtualenv/ \ +# install.d/pip-and-virtualenv-source-install/04-install-pip +# [2] https://bugzilla.redhat.com/show_bug.cgi?id=1477823 + +if [[ ! -f /etc/ci/mirror_info.sh ]]; then + install_package python-virtualenv + pip_install -U --force-reinstall virtualenv +fi From 32608da2c4ecc523fb331212c441fc86aabb6355 Mon Sep 17 00:00:00 2001 From: zhangbailin Date: Wed, 9 Aug 2017 01:43:00 -0700 Subject: [PATCH 0618/1936] Modify some spelling mistakes There are some comment errors, it's modify 'Captial' to 'Capital' in keystone file, and modify 'possition' to 'position' in openrc file, and modify 'comming' to 'coming' in stack file, and modify 'prefered' to 'preferred' in stackrc file. Change-Id: I0fdd539cbfff842a4ba7fca9100b881443300f9a --- lib/keystone | 2 +- openrc | 2 +- stack.sh | 2 +- stackrc | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/keystone b/lib/keystone index 1061081a5e..749e219e67 100644 --- a/lib/keystone +++ b/lib/keystone @@ -350,7 +350,7 @@ function create_keystone_accounts { # The Member role is used by Horizon and Swift so we need to keep it: local member_role="member" - # Captial Member role is legacy hard coded in Horizon / Swift + # Capital Member role is legacy hard coded in Horizon / Swift # configs. Keep it around. get_or_create_role "Member" diff --git a/openrc b/openrc index 23c173c8a1..37724c552e 100644 --- a/openrc +++ b/openrc @@ -84,7 +84,7 @@ export OS_AUTH_TYPE=password # We currently recommend using the version 3 *identity api*. # -# If you don't have a working .stackenv, this is the backup possition +# If you don't have a working .stackenv, this is the backup position KEYSTONE_BACKUP=$SERVICE_PROTOCOL://$SERVICE_HOST:5000 KEYSTONE_AUTH_URI=${KEYSTONE_AUTH_URI:-$KEYSTONE_BACKUP} diff --git a/stack.sh b/stack.sh index 015ee6ec43..e92418233b 100755 --- a/stack.sh +++ b/stack.sh @@ -30,7 +30,7 @@ unset GREP_OPTIONS # NOTE(sdague): why do we explicitly set locale when running stack.sh? # # Devstack is written in bash, and many functions used throughout -# devstack process text comming off a command (like the ip command) +# devstack process text coming off a command (like the ip command) # and do transforms using grep, sed, cut, awk on the strings that are # returned. Many of these programs are interationalized, which is # great for end users, but means that the strings that devstack diff --git a/stackrc b/stackrc index 877da82261..b03426ffa8 100644 --- a/stackrc +++ b/stackrc @@ -80,7 +80,7 @@ NOVA_ENABLED_APIS=osapi_compute,metadata # CELLSV2_SETUP - how we should configure services with cells v2 # # - superconductor - this is one conductor for the api services, and -# one per cell managing the compute services. This is prefered +# one per cell managing the compute services. This is preferred # - singleconductor - this is one conductor for the whole deployment, # this is not recommended, and will be removed in the future. CELLSV2_SETUP=${CELLSV2_SETUP:-"superconductor"} @@ -117,7 +117,7 @@ fi # Whether or not to enable Kernel Samepage Merging (KSM) if available. # This allows programs that mark their memory as mergeable to share # memory pages if they are identical. This is particularly useful with -# libvirt backends. This reduces memory useage at the cost of CPU overhead +# libvirt backends. This reduces memory usage at the cost of CPU overhead # to scan memory. We default to enabling it because we tend to be more # memory constrained than CPU bound. ENABLE_KSM=$(trueorfalse True ENABLE_KSM) From a6c782722378827ecc4606afae26f3fb7a48b92d Mon Sep 17 00:00:00 2001 From: linxuhua Date: Wed, 9 Aug 2017 17:25:03 +0800 Subject: [PATCH 0619/1936] modify the default url of noVNC i had sync the all repos of devstack for installing in intranet environment, and found the url of noVNC had changed to https://github.com/novnc/noVNC. This module upgrade from individual to an organization. the old url had redirect to the new one. Change-Id: I19fc1e2ad30dcd97cad232c9ad58f53a523616b4 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 877da82261..3bced58b25 100644 --- a/stackrc +++ b/stackrc @@ -624,7 +624,7 @@ IRONIC_PYTHON_AGENT_REPO=${IRONIC_PYTHON_AGENT_REPO:-${GIT_BASE}/openstack/ironi IRONIC_PYTHON_AGENT_BRANCH=${IRONIC_PYTHON_AGENT_BRANCH:-master} # a websockets/html5 or flash powered VNC console for vm instances -NOVNC_REPO=${NOVNC_REPO:-https://github.com/kanaka/noVNC.git} +NOVNC_REPO=${NOVNC_REPO:-https://github.com/novnc/noVNC.git} NOVNC_BRANCH=${NOVNC_BRANCH:-stable/v0.6} # a websockets/html5 or flash powered SPICE console for vm instances From 8ea8660e363b0bd1fd8bc40a9b74f76e42649f90 Mon Sep 17 00:00:00 2001 From: Eric Harney Date: Wed, 2 Aug 2017 11:40:41 -0400 Subject: [PATCH 0620/1936] Fix Cinder tls-proxy WSGI test This currently will throw errors like: lib/cinder: line 480: [True: command not found Change-Id: I6bc08532cf99411f39d23523f9fc7851e7804131 --- lib/cinder | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/cinder b/lib/cinder index 4274be740a..03328f31b6 100644 --- a/lib/cinder +++ b/lib/cinder @@ -478,7 +478,7 @@ function start_cinder { local service_port=$CINDER_SERVICE_PORT local service_protocol=$CINDER_SERVICE_PROTOCOL local cinder_url - if is_service_enabled tls-proxy && ["$CINDER_USE_MOD_WSGI" == "False"]; then + if is_service_enabled tls-proxy && [ "$CINDER_USE_MOD_WSGI" == "False" ]; then service_port=$CINDER_SERVICE_PORT_INT service_protocol="http" fi From 0629c4fe22ce02aefde7ee9a18ef75a2687bee01 Mon Sep 17 00:00:00 2001 From: Sylvain Bauza Date: Thu, 10 Aug 2017 15:34:29 +0200 Subject: [PATCH 0621/1936] Remove DiskFilter and RamFilter from Nova scheduling defaults In Ocata, we replaced the verification logic for CPU, RAM and disk by calling the Placement API instead of using those legacy scheduler filters, it's time to remove them from the default list of filters that are run, especially since Nova now removes them from the conf opt defaults thanks to Ibe1cee1cb2642f61a8d6bf9c3f6bbee4f2c2f414 Change-Id: I2e81f1bbce7476d63e84e70dcdd59a1163f89f09 Related-Bug: #1709328 --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 8311a54930..f1be7ce41d 100644 --- a/lib/nova +++ b/lib/nova @@ -101,7 +101,7 @@ SCHEDULER=${SCHEDULER:-filter_scheduler} # The following FILTERS contains SameHostFilter and DifferentHostFilter with # the default filters. -FILTERS="RetryFilter,AvailabilityZoneFilter,RamFilter,DiskFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter" +FILTERS="RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter" QEMU_CONF=/etc/libvirt/qemu.conf From 0ed3b6208952e546b7ce5c0d3dc13cb628e3f00d Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 11 Aug 2017 08:26:44 +0000 Subject: [PATCH 0622/1936] Updated from generate-devstack-plugins-list Change-Id: Ibb3f6cd6aa01daf79413b2abfb5adb3d5f121321 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index f9ca05583f..0ec31b36de 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -138,6 +138,7 @@ nova-powervm `git://git.openstack.org/openstack/nova-p oaktree `git://git.openstack.org/openstack/oaktree `__ octavia `git://git.openstack.org/openstack/octavia `__ octavia-dashboard `git://git.openstack.org/openstack/octavia-dashboard `__ +omni `git://git.openstack.org/openstack/omni `__ os-xenapi `git://git.openstack.org/openstack/os-xenapi `__ osprofiler `git://git.openstack.org/openstack/osprofiler `__ panko `git://git.openstack.org/openstack/panko `__ From 63962fbbe41fe966df96fe2e1bb30943e7ae4047 Mon Sep 17 00:00:00 2001 From: Lance Bragstad Date: Fri, 11 Aug 2017 19:37:07 +0000 Subject: [PATCH 0623/1936] Remove keystone configs for ldap read/write Configuration options that toggle support for LDAP read/write were deprecated and removed as of the Ocata release: I13eada3d5c3a166223c3e3ce70b7054eaed1003a This means we no longer need to clutter the domain-specific configuration with these values since they are no longer used. Change-Id: I23b5b994862f066c3d48ce524c396faecabf60f8 --- lib/keystone | 6 ------ 1 file changed, 6 deletions(-) diff --git a/lib/keystone b/lib/keystone index 1061081a5e..4a4a30f0cc 100644 --- a/lib/keystone +++ b/lib/keystone @@ -626,12 +626,6 @@ function create_ldap_domain { iniset $KEYSTONE_LDAP_DOMAIN_FILE identity driver "ldap" # LDAP settings for Users domain - iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap group_allow_delete "False" - iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap group_allow_update "False" - iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap group_allow_create "False" - iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_allow_delete "False" - iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_allow_update "False" - iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_allow_create "False" iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_tree_dn "ou=Users,$LDAP_BASE_DN" iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_objectclass "inetOrgPerson" iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_name_attribute "cn" From 87d2396d225533aefd8d52bb5fa574ea57dbb987 Mon Sep 17 00:00:00 2001 From: Sampath Priyankara Date: Thu, 3 Aug 2017 16:12:40 +0900 Subject: [PATCH 0624/1936] Don't uninstall pip packages if OFFLINE=True lib/nova does a pip re-install of libvirt-python to rebuild the python library incase the underlying libvirt version changed during package installs. In offline mode, the underlying version of libvirt can't have changed; so we have the situation that we've removed the libvirt python bindings but can't reinstall them (because we're offline). This fixes that particular situation, but skipping uninstalls in offline mode seems generically OK. Change-Id: I2b75d45d94d82f87d996c7570c125d46f5f99f6a Closes-Bug: #1708369 --- inc/python | 3 +++ 1 file changed, 3 insertions(+) diff --git a/inc/python b/inc/python index f388f48e91..5e7f742d48 100644 --- a/inc/python +++ b/inc/python @@ -346,6 +346,9 @@ function pip_install { } function pip_uninstall { + # Skip uninstall if offline + [[ "${OFFLINE}" = "True" ]] && return + local name=$1 if [[ -n ${PIP_VIRTUAL_ENV:=} && -d ${PIP_VIRTUAL_ENV} ]]; then local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip From a3488d5f0067b570974c14572c58bbf8cedf7ed2 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Thu, 10 Aug 2017 14:55:15 -0400 Subject: [PATCH 0625/1936] Remove proxy-sendcl from mod_proxy_uwsgi apache path Calling setenv appears to be globally scoped which is breaking the glance path which relies on chunked uploads. The glance path is separated by using mod_proxy instead of mod_proxy_uwsgi because mod_proxy_uwsgi doesn't support chunked encoding.[1] The proxy-sendcl [2] was set on the mod_proxy_uwsgi path just in case someone tried to send a chunked request to the api server we would be able to handle it. It tells apache to locally cache the chunked request and send the content-length as a normal upload to the upstream server. However, if we can only set it globally across then small potential benefit is not worth having all glance uploads cached by apache. This commit just removes setting the flag. In the future if we can have devstack isolate this flag it might be worth adding back to the mod_proxy_uwsgi path, but for right now it's not worth the tradeoff. [1] https://github.com/unbit/uwsgi/issues/1540 [2] https://httpd.apache.org/docs/2.4/mod/mod_proxy.html#request-bodies Depends-On: Idf6b4b891ba31cccbeb53d373b40fce5380cea64 Change-Id: Iab2e2848877fa1497008d18c05b0154892941589 Closes-Bug: #1709970 --- lib/apache | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/apache b/lib/apache index dfca25a764..5838a4df9b 100644 --- a/lib/apache +++ b/lib/apache @@ -277,7 +277,6 @@ function write_uwsgi_config { else local apache_conf="" apache_conf=$(apache_site_config_for $name) - echo "SetEnv proxy-sendcl 1" | sudo tee $apache_conf iniset "$file" uwsgi socket "$socket" iniset "$file" uwsgi chmod-socket 666 echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}/\" retry=0 " | sudo tee -a $apache_conf @@ -335,6 +334,7 @@ function write_local_uwsgi_http_config { local apache_conf="" apache_conf=$(apache_site_config_for $name) echo "KeepAlive Off" | sudo tee $apache_conf + echo "SetEnv proxy-sendchunked 1" | sudo tee -a $apache_conf echo "ProxyPass \"${url}\" \"http://127.0.0.1:$port\" retry=0 " | sudo tee -a $apache_conf enable_apache_site $name restart_apache_server From 6b354a909535a9dfef164c3e844544989d17514a Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Mon, 14 Aug 2017 13:58:30 -0700 Subject: [PATCH 0626/1936] Don't special case Grenade + systemd Now that the pike branch exists we don't want to special case grenade + system on the target side. We should use systemd for both sides of the pike -> master upgrade. Note this change should not be backported so that we do not attempt to use systemd on the ocata -> pike upgrade path. Depends-On: Iedf824a1772115e0dff287a898636f8e58471269 Change-Id: I6198bf1842a44773fce80672c81eee3afc3c6f38 --- stackrc | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/stackrc b/stackrc index 877da82261..18c1ce76ce 100644 --- a/stackrc +++ b/stackrc @@ -174,10 +174,7 @@ fi # if we are forcing off USE_SCREEN (as we do in the gate), force on # systemd. This allows us to drop one of 3 paths through the code. if [[ "$USE_SCREEN" == "False" ]]; then - # Remove in Pike: this gets us through grenade upgrade - if [[ "$GRENADE_PHASE" != "target" ]]; then - USE_SYSTEMD="True" - fi + USE_SYSTEMD="True" fi # Default for log coloring is based on interactive-or-not. From 0525e77d9f297bff5b3a37276b1c28440a384229 Mon Sep 17 00:00:00 2001 From: Vasyl Saienko Date: Tue, 15 Aug 2017 22:02:30 +0300 Subject: [PATCH 0627/1936] Increase host_subset_size for ironic This patch increase host_subset_size when ironic is used to 999 to minimize race conditions. Change-Id: I0874fe3b3628cb3e662ee01f24c4599247fdc82d --- lib/nova_plugins/hypervisor-ironic | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic index d59473ca1c..062afb7f7c 100644 --- a/lib/nova_plugins/hypervisor-ironic +++ b/lib/nova_plugins/hypervisor-ironic @@ -47,6 +47,8 @@ function configure_nova_hypervisor { iniset $NOVA_CONF filter_scheduler use_baremetal_filters True fi + iniset $NOVA_CONF filter_scheduler host_subset_size 999 + iniset $NOVA_CONF DEFAULT ram_allocation_ratio 1.0 iniset $NOVA_CONF DEFAULT reserved_host_memory_mb 0 # ironic section From 59fb961180caf0981aa5e3fc5a022fbe3bf3e463 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Tue, 15 Aug 2017 13:48:04 -0700 Subject: [PATCH 0628/1936] Allow both Keystone and Tempauth reseller prefixes ... to be used with domain_remap. Swift will start functionally testing domain_remap in I63428132283986bda9e5c082ffe85741449b71ba. Change-Id: I4c1ab06d040d91fd8c314d0aa2cecbbb00adf8ad --- lib/swift | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/swift b/lib/swift index 455740ea82..72a810352f 100644 --- a/lib/swift +++ b/lib/swift @@ -464,6 +464,9 @@ function configure_swift { iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth account_autocreate iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth reseller_prefix "TEMPAUTH" + # Allow both reseller prefixes to be used with domain_remap + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:domain_remap reseller_prefixes "AUTH, TEMPAUTH" + if is_service_enabled swift3; then cat <>${SWIFT_CONFIG_PROXY_SERVER} [filter:s3token] From cdfcff511fa0c9f21a700d87530d56066bdad7cf Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 16 Aug 2017 08:06:13 +0000 Subject: [PATCH 0629/1936] Updated from generate-devstack-plugins-list Change-Id: I38170528cf78a89b71a616a4d9c6179c4e3f49c5 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 0ec31b36de..84a57428d2 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -99,6 +99,7 @@ murano `git://git.openstack.org/openstack/murano networking-6wind `git://git.openstack.org/openstack/networking-6wind `__ networking-arista `git://git.openstack.org/openstack/networking-arista `__ networking-bagpipe `git://git.openstack.org/openstack/networking-bagpipe `__ +networking-baremetal `git://git.openstack.org/openstack/networking-baremetal `__ networking-bgpvpn `git://git.openstack.org/openstack/networking-bgpvpn `__ networking-brocade `git://git.openstack.org/openstack/networking-brocade `__ networking-calico `git://git.openstack.org/openstack/networking-calico `__ From 98c95f4fa1029adff778902be3afb92c080e15b3 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Wed, 16 Aug 2017 09:10:04 -0400 Subject: [PATCH 0630/1936] Stop nova conductor properly (singleconductor mode) stop_nova_conductor dropped the ball when the CELLSV2_SETUP mode is set to "singleconductor". We should cleanup the older style "n-cond" in this case. Change-Id: I9ffd6d09df6f390a842b8a374097f144564d2db4 --- lib/nova | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/nova b/lib/nova index 31b51c7c07..fa09fd8fc1 100644 --- a/lib/nova +++ b/lib/nova @@ -988,6 +988,11 @@ function stop_nova_rest { } function stop_nova_conductor { + if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then + stop_process n-cond + return + fi + enable_nova_fleet for srv in n-super-cond $(seq -f n-cond-cell%0.f 1 $NOVA_NUM_CELLS); do if is_service_enabled $srv; then From ebbbc0500c7bb7a975967e4b1fadad7d1681c9a7 Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Wed, 16 Aug 2017 16:00:16 +0100 Subject: [PATCH 0631/1936] Make reference to service-types-authority from plugins.rst We want people creating plugins (that add services) to be aware of the service-types-authority (STA), so this change adds a Prerequisites section and notes the existince of the STA there, and the need to apply there to create a service-type. Change-Id: I1aa48fe231aaa4499f8b4fe336abea668841b9af --- doc/source/plugins.rst | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst index 5b3c6cf714..fae1a1d8f5 100644 --- a/doc/source/plugins.rst +++ b/doc/source/plugins.rst @@ -12,6 +12,15 @@ tree. They are called through a strong contract, so these plugins can be sure that they will continue to work in the future as DevStack evolves. +Prerequisites +============= + +If you are planning to create a plugin that is going to host a service in the +service catalog (that is, your plugin will use the command +``get_or_create_service``) please make sure that you apply to the `service +types authority`_ to reserve a valid service-type. This will help to make sure +that all deployments of your service use the same service-type. + Plugin Interface ================ @@ -250,3 +259,5 @@ See Also For additional inspiration on devstack plugins you can check out the `Plugin Registry `_. + +.. _service types authority: https://specs.openstack.org/openstack/service-types-authority/ From 1d141daaf673d7e28e4980812b4bff953a97518b Mon Sep 17 00:00:00 2001 From: Leticia Wanderley Date: Fri, 4 Aug 2017 00:42:59 -0300 Subject: [PATCH 0632/1936] Feature flag on tempest conf to notify enabled LDAP This adds a new feature flag on tempest conf whenever LDAP is enabled. When this flag is set to True Tempest users and groups identity tests adapt to fetch users and groups from different domains. Change-Id: I368ddf34908b906355c422bd1afd6ab9b1a80053 Depends-On: Iedb470c51fa2174ab7651e6b7e22eff1f25f7aac --- lib/tempest | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/tempest b/lib/tempest index cc65ec7aa9..61fda7f1b9 100644 --- a/lib/tempest +++ b/lib/tempest @@ -297,6 +297,12 @@ function configure_tempest { # Newton and Ocata. This option can be removed after Mitaka is end of life. iniset $TEMPEST_CONFIG identity-feature-enabled forbid_global_implied_dsr True + # When LDAP is enabled domain specific drivers are also enabled and the users + # and groups identity tests must adapt to this scenario + if is_service_enabled ldap; then + iniset $TEMPEST_CONFIG identity-feature-enabled domain_specific_drivers True + fi + # Image # We want to be able to override this variable in the gate to avoid # doing an external HTTP fetch for this test. From aceb27e858b594ae80bf07bb0278a715e5a4cd3b Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Thu, 17 Aug 2017 08:59:59 -0400 Subject: [PATCH 0633/1936] Add procname for uwsgi based services Code in grenade and elsewhere rely on the process/service name when one runs "ps auxw" and they grep for example "grep -e glance-api" to check if the service is running. with uwsgi, let us make sure we use process name prefix so it is easier to spot the services and be compatible with code elsewhere that relies on this. Change-Id: I4d1cd223ed9904fcb19b26fc9362b676e0b4f9b3 --- lib/cinder | 2 +- lib/glance | 2 +- lib/keystone | 2 +- lib/nova | 4 ++-- lib/placement | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/cinder b/lib/cinder index 03328f31b6..22c5168089 100644 --- a/lib/cinder +++ b/lib/cinder @@ -511,7 +511,7 @@ function start_cinder { start_tls_proxy cinder '*' $CINDER_SERVICE_PORT $CINDER_SERVICE_HOST $CINDER_SERVICE_POR_INT fi else - run_process "c-api" "$CINDER_BIN_DIR/uwsgi --ini $CINDER_UWSGI_CONF" + run_process "c-api" "$CINDER_BIN_DIR/uwsgi --procname-prefix cinder-api --ini $CINDER_UWSGI_CONF" cinder_url=$service_protocol://$SERVICE_HOST/volume/v3 fi fi diff --git a/lib/glance b/lib/glance index 0a5b9f59b6..7b42488b59 100644 --- a/lib/glance +++ b/lib/glance @@ -345,7 +345,7 @@ function start_glance { run_process g-reg "$GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf" if [[ "$WSGI_MODE" == "uwsgi" ]]; then - run_process g-api "$GLANCE_BIN_DIR/uwsgi --ini $GLANCE_UWSGI_CONF" + run_process g-api "$GLANCE_BIN_DIR/uwsgi --procname-prefix glance-api --ini $GLANCE_UWSGI_CONF" else run_process g-api "$GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf" fi diff --git a/lib/keystone b/lib/keystone index 749e219e67..08aa675d43 100644 --- a/lib/keystone +++ b/lib/keystone @@ -550,7 +550,7 @@ function start_keystone { tail_log key /var/log/$APACHE_NAME/keystone.log tail_log key-access /var/log/$APACHE_NAME/keystone_access.log else # uwsgi - run_process keystone "$KEYSTONE_BIN_DIR/uwsgi --ini $KEYSTONE_PUBLIC_UWSGI_CONF" "" + run_process keystone "$KEYSTONE_BIN_DIR/uwsgi --procname-prefix keystone --ini $KEYSTONE_PUBLIC_UWSGI_CONF" "" fi echo "Waiting for keystone to start..." diff --git a/lib/nova b/lib/nova index fa09fd8fc1..976bf35ad5 100644 --- a/lib/nova +++ b/lib/nova @@ -805,7 +805,7 @@ function start_nova_api { start_tls_proxy nova '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT fi else - run_process "n-api" "$NOVA_BIN_DIR/uwsgi --ini $NOVA_UWSGI_CONF" + run_process "n-api" "$NOVA_BIN_DIR/uwsgi --procname-prefix nova-api --ini $NOVA_UWSGI_CONF" nova_url=$service_protocol://$SERVICE_HOST/compute/v2.1/ fi @@ -912,7 +912,7 @@ function start_nova_rest { if [ "$NOVA_USE_MOD_WSGI" == "False" ]; then run_process n-api-meta "$NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf" else - run_process n-api-meta "$NOVA_BIN_DIR/uwsgi --ini $NOVA_METADATA_UWSGI_CONF" + run_process n-api-meta "$NOVA_BIN_DIR/uwsgi --procname-prefix nova-api-meta --ini $NOVA_METADATA_UWSGI_CONF" fi run_process n-novnc "$NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_WEB_DIR" diff --git a/lib/placement b/lib/placement index 8adbbdec68..aef9b7454e 100644 --- a/lib/placement +++ b/lib/placement @@ -164,7 +164,7 @@ function install_placement { # start_placement_api() - Start the API processes ahead of other things function start_placement_api { if [[ "$WSGI_MODE" == "uwsgi" ]]; then - run_process "placement-api" "$PLACEMENT_BIN_DIR/uwsgi --ini $PLACEMENT_UWSGI_CONF" + run_process "placement-api" "$PLACEMENT_BIN_DIR/uwsgi --procname-prefix placement --ini $PLACEMENT_UWSGI_CONF" else enable_apache_site placement-api restart_apache_server From aa33c878d8da59369aed737cb636b628a1f444bb Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Wed, 16 Aug 2017 22:51:07 -0400 Subject: [PATCH 0634/1936] Fix Cleanups for keystone/placement/nova * Check KEYSTONE_DEPLOY flag and cleanup appropriately * When we stop process, we should not wipe uwsgi config we should remove files only on cleanup * We should not call cleanup *BEFORE* configure, we are just wiping out the uwsgi ini files * cleanup_placement should be called from clean.sh Change-Id: I066f5f87ff22d7da2e3814f8c2de75f2af625d2b --- clean.sh | 1 + lib/keystone | 25 ++++++++++++------------- stack.sh | 2 -- 3 files changed, 13 insertions(+), 15 deletions(-) diff --git a/clean.sh b/clean.sh index 9ffe3bee6b..2333596c1f 100755 --- a/clean.sh +++ b/clean.sh @@ -88,6 +88,7 @@ cleanup_cinder || /bin/true cleanup_glance cleanup_keystone cleanup_nova +cleanup_placement cleanup_neutron cleanup_swift cleanup_horizon diff --git a/lib/keystone b/lib/keystone index 749e219e67..685891e8bc 100644 --- a/lib/keystone +++ b/lib/keystone @@ -148,16 +148,18 @@ function is_keystone_enabled { # cleanup_keystone() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_keystone { - # TODO: remove admin at pike-2 - # These files will be created if we are running WSGI_MODE="uwsgi" - remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" - remove_uwsgi_config "$KEYSTONE_ADMIN_UWSGI_CONF" "$KEYSTONE_ADMIN_UWSGI" - sudo rm -f $(apache_site_config_for keystone-wsgi-public) - sudo rm -f $(apache_site_config_for keystone-wsgi-admin) - - # These files will be created if we are running WSGI_MODE="mod_wsgi" - disable_apache_site keystone - sudo rm -f $(apache_site_config_for keystone) + if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then + # These files will be created if we are running WSGI_MODE="mod_wsgi" + disable_apache_site keystone + sudo rm -f $(apache_site_config_for keystone) + else + stop_process "keystone" + # TODO: remove admin at pike-2 + remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" + remove_uwsgi_config "$KEYSTONE_ADMIN_UWSGI_CONF" "$KEYSTONE_ADMIN_UWSGI" + sudo rm -f $(apache_site_config_for keystone-wsgi-public) + sudo rm -f $(apache_site_config_for keystone-wsgi-admin) + fi } # _config_keystone_apache_wsgi() - Set WSGI config files of Keystone @@ -582,9 +584,6 @@ function stop_keystone { restart_apache_server else stop_process keystone - remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" - # TODO(remove in at pike-2) - remove_uwsgi_config "$KEYSTONE_ADMIN_UWSGI_CONF" "$KEYSTONE_ADMIN_UWSGI" fi # Kill the Keystone screen window stop_process key diff --git a/stack.sh b/stack.sh index e92418233b..301e1e7c6b 100755 --- a/stack.sh +++ b/stack.sh @@ -896,14 +896,12 @@ fi if is_service_enabled nova; then # Compute service stack_install_service nova - cleanup_nova configure_nova fi if is_service_enabled placement; then # placement api stack_install_service placement - cleanup_placement configure_placement fi From 64edfd47caba71a0c351269663dd457fd911fcac Mon Sep 17 00:00:00 2001 From: "John L. Villalovos" Date: Thu, 17 Aug 2017 13:21:25 -0700 Subject: [PATCH 0635/1936] Correct spelling for 'lose' from 'loose' We can 'lose' networking configuration, not 'loose' it. Change-Id: I538270c05da6fce63340cc75ec53c3834efcecfe --- doc/source/networking.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/networking.rst b/doc/source/networking.rst index bdbeaaa7a8..74010cd01a 100644 --- a/doc/source/networking.rst +++ b/doc/source/networking.rst @@ -69,7 +69,7 @@ Shared Guest Interface This is not a recommended configuration. Because of interactions between ovs and bridging, if you reboot your box with active - networking you may loose network connectivity to your system. + networking you may lose network connectivity to your system. If you need your guests accessible on the network, but only have 1 interface (using something like a NUC), you can share your one From af9f71d693cd548b18986e99f58503fa57f815f1 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 18 Aug 2017 08:21:34 +0000 Subject: [PATCH 0636/1936] Updated from generate-devstack-plugins-list Change-Id: I4747e03aa29828cba364a5887fada0721c8da56e --- doc/source/plugin-registry.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 84a57428d2..35b78da44c 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -110,6 +110,7 @@ networking-fortinet `git://git.openstack.org/openstack/networ networking-generic-switch `git://git.openstack.org/openstack/networking-generic-switch `__ networking-hpe `git://git.openstack.org/openstack/networking-hpe `__ networking-huawei `git://git.openstack.org/openstack/networking-huawei `__ +networking-hyperv `git://git.openstack.org/openstack/networking-hyperv `__ networking-infoblox `git://git.openstack.org/openstack/networking-infoblox `__ networking-l2gw `git://git.openstack.org/openstack/networking-l2gw `__ networking-midonet `git://git.openstack.org/openstack/networking-midonet `__ @@ -117,6 +118,7 @@ networking-mlnx `git://git.openstack.org/openstack/networ networking-nec `git://git.openstack.org/openstack/networking-nec `__ networking-odl `git://git.openstack.org/openstack/networking-odl `__ networking-onos `git://git.openstack.org/openstack/networking-onos `__ +networking-opencontrail `git://git.openstack.org/openstack/networking-opencontrail `__ networking-ovn `git://git.openstack.org/openstack/networking-ovn `__ networking-ovs-dpdk `git://git.openstack.org/openstack/networking-ovs-dpdk `__ networking-plumgrid `git://git.openstack.org/openstack/networking-plumgrid `__ From 5158486124814cbf032c0e79413c6f856225fd98 Mon Sep 17 00:00:00 2001 From: Omer Anson Date: Thu, 24 Aug 2017 17:47:37 +0300 Subject: [PATCH 0637/1936] Add function is_plugin_enabled Add a function which tests if a plugin has been enabled with enable_plugin. This is helpful if two co-ordinating projects want to run specific setup in devstack in one only if the other is enabled. Change-Id: Ibf113755595b19d028374cdc1c86e19b5170be4f --- functions-common | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/functions-common b/functions-common index 660df795db..a5f770f579 100644 --- a/functions-common +++ b/functions-common @@ -1882,7 +1882,7 @@ function enable_plugin { local name=$1 local url=$2 local branch=${3:-master} - if [[ ",${DEVSTACK_PLUGINS}," =~ ,${name}, ]]; then + if is_plugin_enabled $name; then die $LINENO "Plugin attempted to be enabled twice: ${name} ${url} ${branch}" fi DEVSTACK_PLUGINS+=",$name" @@ -1891,6 +1891,19 @@ function enable_plugin { GITBRANCH[$name]=$branch } +# is_plugin_enabled +# +# Check if the plugin was enabled, e.g. using enable_plugin +# +# ``name`` The name with which the plugin was enabled +function is_plugin_enabled { + local name=$1 + if [[ ",${DEVSTACK_PLUGINS}," =~ ",${name}," ]]; then + return 0 + fi + return 1 +} + # fetch_plugins # # clones all plugins From 6d213dfda7d0c3446595ce9edc56408e3924b355 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 22 Aug 2017 16:05:16 +1000 Subject: [PATCH 0638/1936] Get default python versions from interpreter Query the python2/python3 interpreter for it's version to fill in PYTHON3_VERSION and PYTHON2_VERSION defaults. This means on a python3.6 platform such as Fedora 26, we don't need to override the default. Change-Id: Id826f275b99b9f397b95e817941019fc503daa1d --- functions-common | 17 ++++++++++++++++- stackrc | 6 ++++-- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/functions-common b/functions-common index 660df795db..85054883d5 100644 --- a/functions-common +++ b/functions-common @@ -2380,13 +2380,28 @@ function is_provider_network { } +# Return just the . for the given python interpreter +function _get_python_version { + local interp=$1 + local version + version=$($interp -c 'import sys; print("%s.%s" % sys.version_info[0:2])') + echo ${version} +} + # Return the current python as "python." function python_version { local python_version - python_version=$(python -c 'import sys; print("%s.%s" % sys.version_info[0:2])') + python_version=$(_get_python_version python2) + echo "python${python_version}" +} + +function python3_version { + local python3_version + python3_version=$(_get_python_version python3) echo "python${python_version}" } + # Service wrapper to restart services # restart_service service-name function restart_service { diff --git a/stackrc b/stackrc index b123d8ac7c..625ce3f27b 100644 --- a/stackrc +++ b/stackrc @@ -153,10 +153,12 @@ export DISABLED_PYTHON3_PACKAGES="" # When Python 3 is supported by an application, adding the specific # version of Python 3 to this variable will install the app using that # version of the interpreter instead of 2.7. -export PYTHON3_VERSION=${PYTHON3_VERSION:-3.5} +_DEFAULT_PYTHON3_VERSION="$(_get_python_version python3)" +export PYTHON3_VERSION=${PYTHON3_VERSION:-${_DEFAULT_PYTHON3_VERSION}} # Just to be more explicit on the Python 2 version to use. -export PYTHON2_VERSION=${PYTHON2_VERSION:-2.7} +_DEFAULT_PYTHON2_VERSION="$(_get_python_version python2)" +export PYTHON2_VERSION=${PYTHON2_VERSION:-${_DEFAULT_PYTHON2_VERSION}} # allow local overrides of env variables, including repo config if [[ -f $RC_DIR/localrc ]]; then From 9d7e74e57c73d25e214e0269dbd9d342e8cd960c Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Fri, 25 Aug 2017 10:17:18 -0400 Subject: [PATCH 0639/1936] Setup logging for nova_cell1.conf When run in the default superconductor mode, the screen-n-cond-cell1 logs are not formatting in oslo format or colorized like the other logs. This is because screen-n-super-cond is running using nova.conf which is configured for oslo format logging with color. The oslo format logging is also needed to correctly index the logs from screen-n-cond-cell1 in logstash. This change simply configures nova_cell*.conf files for logging like nova.conf. Change-Id: I44fc11f09bb7283be0b068f5e02a424f3e5dafe2 Closes-Bug: #1713070 --- lib/nova | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/nova b/lib/nova index fa09fd8fc1..581cd548ff 100644 --- a/lib/nova +++ b/lib/nova @@ -602,6 +602,8 @@ function create_nova_conf { rpc_backend_add_vhost $vhost iniset_rpc_backend nova $conf DEFAULT $vhost fi + # Format logging + setup_logging $conf done fi } From 08367bac0bb46a4ada14ed6b9416d570e976dd9f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C5=82awek=20Kap=C5=82o=C5=84ski?= Date: Sun, 27 Aug 2017 08:44:27 +0000 Subject: [PATCH 0640/1936] Switch from $DEST/data/etcd to $DATA_DIR/etcd Etcd should use $DATA_DIR/etcd instead of "hardcoded" $DEST/data/etcd directory for its data. Change-Id: Icdc65f52a9d75981b63789036248e00d8ab72f11 --- lib/etcd3 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/etcd3 b/lib/etcd3 index bc24790782..6e32cb31ca 100644 --- a/lib/etcd3 +++ b/lib/etcd3 @@ -26,7 +26,7 @@ set +o xtrace # Set up default values for etcd ETCD_DOWNLOAD_URL=${ETCD_DOWNLOAD_URL:-https://github.com/coreos/etcd/releases/download} ETCD_VERSION=${ETCD_VERSION:-v3.1.7} -ETCD_DATA_DIR="$DEST/data/etcd" +ETCD_DATA_DIR="$DATA_DIR/etcd" ETCD_SYSTEMD_SERVICE="devstack@etcd.service" ETCD_BIN_DIR="$DEST/bin" ETCD_SHA256_AMD64="4fde194bbcd259401e2b5c462dfa579ee7f6af539f13f130b8f5b4f52e3b3c52" From c09eaf8e401b12c7b37f18b79d761876953d54a8 Mon Sep 17 00:00:00 2001 From: Le Hou Date: Mon, 28 Aug 2017 17:25:38 +0800 Subject: [PATCH 0641/1936] Update OS_AUTH_URL in Configuration.rst I am a new participant, and when I read the document I found that the command in the OS_AUTH_URL example is v2.0, so I want to update it. Change-Id: I973adc303a3cb37ce377ca4e31d1d666cd41b358 --- doc/source/configuration.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 064bf515e6..c4834b7a84 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -136,7 +136,7 @@ OS\_AUTH\_URL :: - OS_AUTH_URL=http://$SERVICE_HOST:5000/v2.0 + OS_AUTH_URL=http://$SERVICE_HOST:5000/v3.0 KEYSTONECLIENT\_DEBUG, NOVACLIENT\_DEBUG Set command-line client log level to ``DEBUG``. These are commented From 787412ce6cf9447733dd8986c07883d791b520b2 Mon Sep 17 00:00:00 2001 From: Dima Kuznetsov Date: Mon, 28 Aug 2017 09:09:38 +0300 Subject: [PATCH 0642/1936] tempest: Disable l3_agent_scheduler when running without L3 agent Dragonflow can operate without L3 agent, and does not advertise L3 agent scheduler extension when running this way Change-Id: I23d0e558c8454636fcde0a1903c78965b70bc324 --- lib/tempest | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/tempest b/lib/tempest index cc65ec7aa9..33bd74f7a3 100644 --- a/lib/tempest +++ b/lib/tempest @@ -574,6 +574,11 @@ function configure_tempest { DISABLE_NETWORK_API_EXTENSIONS+=", metering" fi + # disable l3_agent_scheduler if we didn't enable L3 agent + if ! is_service_enabled q-l3; then + DISABLE_NETWORK_API_EXTENSIONS+=", l3_agent_scheduler" + fi + local network_api_extensions=${NETWORK_API_EXTENSIONS:-"all"} if [[ ! -z "$DISABLE_NETWORK_API_EXTENSIONS" ]]; then # Enabled extensions are either the ones explicitly specified or those available on the API endpoint From 4639984b96a3ff7be28357ccbd7c8ffa60371c42 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Mon, 28 Aug 2017 11:43:37 +0000 Subject: [PATCH 0643/1936] Update function description for start_tls_proxy In [1] the definition of the function was changed, adding the service name as first parameter. Since this seems to have caused failures in some plugins, at least update the function template accordingly. [1] Ifcba410f5969521e8b3d30f02795541c1661f83a Change-Id: I4d03957f8d3a18625f06379fb21aa7ba55e32797 --- lib/tls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tls b/lib/tls index 7bde5e6496..b7ad644f5c 100644 --- a/lib/tls +++ b/lib/tls @@ -487,7 +487,7 @@ EOF } # Starts the TLS proxy for the given IP/ports -# start_tls_proxy front-host front-port back-host back-port +# start_tls_proxy service-name front-host front-port back-host back-port function start_tls_proxy { local b_service="$1-tls-proxy" local f_host=$2 From 0e58d22897457831b9dbf02d66a2f29d43803597 Mon Sep 17 00:00:00 2001 From: Clay Gerrard Date: Mon, 28 Aug 2017 14:03:18 -0700 Subject: [PATCH 0644/1936] Create correct directory layout for swift on purpose. The pre-existing configuration for swift on devstack set's the *-server's devices option (the root of the servers list of devices) to: devices = /opt/stack/data/swift/1 where "1" is the node_number, and will be 2, 3, ... N if the devstack machine is built with more than one swift node/device (pretty sure no one does that on devstack ever). The device(s) in the rings are named (perhaps confusingly similar to the swift loopback image) just "sdb1", so all storage servers expect to have a $STACK_USER writeable file system at: os.path.join(, "sdb1") That directory does not exist when you start up a devstack [1]. Currently Swift's object-server's require that directory exist before they write data into it (even with mount_check = false!). Unfortunately however, with mount_check=false the account/container servers are able to create the device directory when it does not exist [2]. Which can lead to some unfortunate results with permissions on some deployments using mount_check = false (e.g. testing or containerized environments). Fixing this issue [3] uncovered the previously benign [4] mis-configuration in devstack. Attempting 1. It was lost a long while ago I7c65303791689523f02e5ae44483a6c50b2eed1e 2. Essentially they want to: mkdir -p /opt/stack/data/swift/1/sdb1/containers/ ... but end up creating the "sdb1" dir too! 3. I3362a6ebff423016bb367b4b6b322bb41ae08764 4. Benign because the object-server share their device with the account-container devices and they would create the dirs before trying to write an object. It was incorrect, but worked by happenstance, which is nearly as good as worked on purpose. Change-Id: I52c4ecb70b1ae47e613ba243da5a4d94e5adedf2 --- lib/swift | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/lib/swift b/lib/swift index 455740ea82..3b87610007 100644 --- a/lib/swift +++ b/lib/swift @@ -608,15 +608,13 @@ function create_swift_disk { # create all of the directories needed to emulate a few different servers local node_number for node_number in ${SWIFT_REPLICAS_SEQ}; do - sudo ln -sf ${SWIFT_DATA_DIR}/drives/sdb1/$node_number ${SWIFT_DATA_DIR}/$node_number; - local drive=${SWIFT_DATA_DIR}/drives/sdb1/${node_number} - local node=${SWIFT_DATA_DIR}/${node_number}/node - local node_device=${node}/sdb1 - [[ -d $node ]] && continue - [[ -d $drive ]] && continue - sudo install -o ${STACK_USER} -g $user_group -d $drive - sudo install -o ${STACK_USER} -g $user_group -d $node_device - sudo chown -R ${STACK_USER}: ${node} + # node_devices must match *.conf devices option + local node_devices=${SWIFT_DATA_DIR}/${node_number} + local real_devices=${SWIFT_DATA_DIR}/drives/sdb1/$node_number + sudo ln -sf $real_devices $node_devices; + local device=${real_devices}/sdb1 + [[ -d $device ]] && continue + sudo install -o ${STACK_USER} -g $user_group -d $device done } From 411c34da69f423059a04431a542be2b1b7a65f38 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Tue, 29 Aug 2017 14:40:26 +0000 Subject: [PATCH 0645/1936] Fix URLs when running with tls-proxy enabled Various services are returning broken links when running behind tls-proxy. These issues can be fixed by setting the X-Forwarded-Proto header in the apache config and letting oslo_middleware parse it. Change-Id: Ibe5dbdc4644ec812f0435f59319666fc336c195a Partial-Bug: 1713731 --- lib/cinder | 3 +-- lib/neutron | 1 + lib/neutron-legacy | 1 + lib/nova | 1 + lib/tls | 3 ++- 5 files changed, 6 insertions(+), 3 deletions(-) diff --git a/lib/cinder b/lib/cinder index 03328f31b6..67939f18e9 100644 --- a/lib/cinder +++ b/lib/cinder @@ -296,8 +296,7 @@ function configure_cinder { # Set the service port for a proxy to take the original if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT - iniset $CINDER_CONF DEFAULT public_endpoint $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST - iniset $CINDER_CONF DEFAULT osapi_volume_base_URL $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST + iniset $CINDER_CONF oslo_middleware enable_proxy_headers_parsing True else iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT iniset $CINDER_CONF DEFAULT public_endpoint $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT diff --git a/lib/neutron b/lib/neutron index 2a660ec8e1..92c585a70f 100644 --- a/lib/neutron +++ b/lib/neutron @@ -242,6 +242,7 @@ function configure_neutron_new { if is_service_enabled tls-proxy; then # Set the service port for a proxy to take the original iniset $NEUTRON_CONF DEFAULT bind_port "$NEUTRON_SERVICE_PORT_INT" + iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True fi # Metering diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 784f3a8167..f9e0bd6ded 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -718,6 +718,7 @@ function _configure_neutron_common { if is_service_enabled tls-proxy; then # Set the service port for a proxy to take the original iniset $NEUTRON_CONF DEFAULT bind_port "$Q_PORT_INT" + iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True fi _neutron_setup_rootwrap diff --git a/lib/nova b/lib/nova index fa09fd8fc1..887a70d475 100644 --- a/lib/nova +++ b/lib/nova @@ -555,6 +555,7 @@ function create_nova_conf { if is_service_enabled tls-proxy; then iniset $NOVA_CONF DEFAULT glance_protocol https + iniset $NOVA_CONF oslo_middleware enable_proxy_headers_parsing True fi if is_service_enabled n-sproxy; then diff --git a/lib/tls b/lib/tls index 7bde5e6496..5bf5d96ede 100644 --- a/lib/tls +++ b/lib/tls @@ -527,6 +527,7 @@ $listen_string # for swift functional testing to work with tls enabled. It is 2 bytes # larger than the apache default of 8190. LimitRequestFieldSize $f_header_size + RequestHeader set X-Forwarded-Proto "https" ProxyPass http://$b_host:$b_port/ retry=0 nocanon @@ -541,7 +542,7 @@ EOF if is_suse ; then sudo a2enflag SSL fi - for mod in ssl proxy proxy_http; do + for mod in headers ssl proxy proxy_http; do enable_apache_mod $mod done enable_apache_site $b_service From 81f67fd7eb33fe3b197dd9b337d900a2271963b9 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Tue, 29 Aug 2017 09:52:58 +0000 Subject: [PATCH 0646/1936] Delete the default guest user from rabbitmq Leaving the default user enabled is a security issue, as it can be used without credentials. It also may mask issues like seen in [1]. [1] https://bugs.launchpad.net/bugs/1651576 Change-Id: I75b4e5696c0f8017b869127a10f3c14e2f8bd121 --- lib/rpc_backend | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/rpc_backend b/lib/rpc_backend index 3177e88ef2..5479db3333 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -97,6 +97,8 @@ function restart_rpc_backend { break done + # NOTE(frickler): Remove the default guest user + sudo rabbitmqctl delete_user guest || true fi } From f96f675ddb6448d3e4ab0114935e79690f88b4c6 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 30 Aug 2017 09:27:06 +0000 Subject: [PATCH 0647/1936] Updated from generate-devstack-plugins-list Change-Id: I0dae4374fcfc05ae883d782635954dbd8268accd --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 35b78da44c..a1d5ad822b 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -147,6 +147,7 @@ osprofiler `git://git.openstack.org/openstack/osprof panko `git://git.openstack.org/openstack/panko `__ patrole `git://git.openstack.org/openstack/patrole `__ picasso `git://git.openstack.org/openstack/picasso `__ +qinling `git://git.openstack.org/openstack/qinling `__ rally `git://git.openstack.org/openstack/rally `__ sahara `git://git.openstack.org/openstack/sahara `__ sahara-dashboard `git://git.openstack.org/openstack/sahara-dashboard `__ From def67a47e80c1ed1ed8f1bdcf105563935f6d921 Mon Sep 17 00:00:00 2001 From: Sam Betts Date: Wed, 30 Aug 2017 11:39:16 +0100 Subject: [PATCH 0648/1936] Stop using ironic host manager with resource classes There should be no needs to use the ironic host manager when using resource classes. Change-Id: I9a51ea6582dfef28e4da5f8510742230d88cbaf3 --- lib/nova_plugins/hypervisor-ironic | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic index 062afb7f7c..ebb6a31541 100644 --- a/lib/nova_plugins/hypervisor-ironic +++ b/lib/nova_plugins/hypervisor-ironic @@ -41,9 +41,9 @@ function configure_nova_hypervisor { iniset $NOVA_CONF DEFAULT compute_driver ironic.IronicDriver iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER - iniset $NOVA_CONF DEFAULT scheduler_host_manager ironic_host_manager if [[ "$IRONIC_USE_RESOURCE_CLASSES" == "False" ]]; then + iniset $NOVA_CONF DEFAULT scheduler_host_manager ironic_host_manager iniset $NOVA_CONF filter_scheduler use_baremetal_filters True fi From b79be36cdb9e0368d7976e0876ee1273110d5b5c Mon Sep 17 00:00:00 2001 From: Vladyslav Drok Date: Wed, 30 Aug 2017 19:19:56 +0300 Subject: [PATCH 0649/1936] Remove setting some of the scheduler settings It makes sense to set them only if resource classes are not used. Change-Id: I76d8501a1d1a20357acadad4cd8f2d6cef3896c1 --- lib/nova_plugins/hypervisor-ironic | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic index 062afb7f7c..034e403768 100644 --- a/lib/nova_plugins/hypervisor-ironic +++ b/lib/nova_plugins/hypervisor-ironic @@ -45,12 +45,11 @@ function configure_nova_hypervisor { if [[ "$IRONIC_USE_RESOURCE_CLASSES" == "False" ]]; then iniset $NOVA_CONF filter_scheduler use_baremetal_filters True + iniset $NOVA_CONF filter_scheduler host_subset_size 999 + iniset $NOVA_CONF DEFAULT ram_allocation_ratio 1.0 + iniset $NOVA_CONF DEFAULT reserved_host_memory_mb 0 fi - iniset $NOVA_CONF filter_scheduler host_subset_size 999 - - iniset $NOVA_CONF DEFAULT ram_allocation_ratio 1.0 - iniset $NOVA_CONF DEFAULT reserved_host_memory_mb 0 # ironic section iniset $NOVA_CONF ironic auth_type password iniset $NOVA_CONF ironic username admin From 52609c684af195b84d99473cfb7d286a972e334e Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Mon, 28 Aug 2017 11:49:28 +0000 Subject: [PATCH 0650/1936] Fix errors in tls-proxy startup for cinder Two typos made starting tls-proxy fail when CINDER_USE_MOD_WSGI is False. Change-Id: I0435282182087a36d987843699152c1c08c4a494 --- lib/cinder | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/cinder b/lib/cinder index 03328f31b6..71b0683d75 100644 --- a/lib/cinder +++ b/lib/cinder @@ -506,9 +506,9 @@ function start_cinder { if [ "$CINDER_USE_MOD_WSGI" == "False" ]; then run_process c-api "$CINDER_BIN_DIR/cinder-api --config-file $CINDER_CONF" cinder_url=$service_protocol://$SERVICE_HOST:$service_port - # Start proxy if tsl enabled - if is_service_enabled tls_proxy; then - start_tls_proxy cinder '*' $CINDER_SERVICE_PORT $CINDER_SERVICE_HOST $CINDER_SERVICE_POR_INT + # Start proxy if tls enabled + if is_service_enabled tls-proxy; then + start_tls_proxy cinder '*' $CINDER_SERVICE_PORT $CINDER_SERVICE_HOST $CINDER_SERVICE_PORT_INT fi else run_process "c-api" "$CINDER_BIN_DIR/uwsgi --ini $CINDER_UWSGI_CONF" From cdba1b371628aa0e8bc5b687351c5ee9b39e9bc6 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 30 Aug 2017 11:11:06 -0400 Subject: [PATCH 0651/1936] Remove screen support from devstack completely This tears out the alternative path of using screen, so that we only use systemd enabled paths. This simplifies the number of ways that devstack can be run, and provides a much more reliable process launcher than the screen based approach. Change-Id: I8c27182f60b0f5310b3a8bf5feb02beb7ffbb26a --- files/debs/general | 1 - files/rpms-suse/general | 1 - files/rpms/general | 1 - functions-common | 341 ++-------------------------------------- lib/nova | 4 - stack.sh | 60 ------- stackrc | 60 +------ tests/run-process.sh | 109 ------------- unstack.sh | 9 -- 9 files changed, 15 insertions(+), 571 deletions(-) delete mode 100755 tests/run-process.sh diff --git a/files/debs/general b/files/debs/general index 1dde03b7fe..8e0018d284 100644 --- a/files/debs/general +++ b/files/debs/general @@ -29,7 +29,6 @@ psmisc python2.7 python-dev python-gdbm # needed for testr -screen tar tcpdump unzip diff --git a/files/rpms-suse/general b/files/rpms-suse/general index 370f2409f7..0c1a2819b1 100644 --- a/files/rpms-suse/general +++ b/files/rpms-suse/general @@ -24,7 +24,6 @@ psmisc python-cmd2 # dist:opensuse-12.3 python-devel # pyOpenSSL python-xml -screen systemd-devel # for systemd-python tar tcpdump diff --git a/files/rpms/general b/files/rpms/general index 2443cc8cd7..f3f870823c 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -28,7 +28,6 @@ psmisc pyOpenSSL # version in pip uses too much memory python-devel redhat-rpm-config # missing dep for gcc hardening flags, see rhbz#1217376 -screen systemd-devel # for systemd-python tar tcpdump diff --git a/functions-common b/functions-common index 660df795db..fdbb9c067e 100644 --- a/functions-common +++ b/functions-common @@ -1380,62 +1380,6 @@ function zypper_install { zypper --non-interactive install --auto-agree-with-licenses "$@" } - -# Process Functions -# ================= - -# _run_process() is designed to be backgrounded by run_process() to simulate a -# fork. It includes the dirty work of closing extra filehandles and preparing log -# files to produce the same logs as screen_it(). The log filename is derived -# from the service name. -# Uses globals ``CURRENT_LOG_TIME``, ``LOGDIR``, ``SCREEN_LOGDIR``, ``SCREEN_NAME``, ``SERVICE_DIR`` -# If an optional group is provided sg will be used to set the group of -# the command. -# _run_process service "command-line" [group] -function _run_process { - # disable tracing through the exec redirects, it's just confusing in the logs. - xtrace=$(set +o | grep xtrace) - set +o xtrace - - local service=$1 - local command="$2" - local group=$3 - - # Undo logging redirections and close the extra descriptors - exec 1>&3 - exec 2>&3 - exec 3>&- - exec 6>&- - - local logfile="${service}.log.${CURRENT_LOG_TIME}" - local real_logfile="${LOGDIR}/${logfile}" - if [[ -n ${LOGDIR} ]]; then - exec 1>&"$real_logfile" 2>&1 - bash -c "cd '$LOGDIR' && ln -sf '$logfile' ${service}.log" - if [[ -n ${SCREEN_LOGDIR} ]]; then - # Drop the backward-compat symlink - ln -sf "$real_logfile" ${SCREEN_LOGDIR}/screen-${service}.log - fi - - # TODO(dtroyer): Hack to get stdout from the Python interpreter for the logs. - export PYTHONUNBUFFERED=1 - fi - - # reenable xtrace before we do *real* work - $xtrace - - # Run under ``setsid`` to force the process to become a session and group leader. - # The pid saved can be used with pkill -g to get the entire process group. - if [[ -n "$group" ]]; then - setsid sg $group "$command" & echo $! >$SERVICE_DIR/$SCREEN_NAME/$service.pid - else - setsid $command & echo $! >$SERVICE_DIR/$SCREEN_NAME/$service.pid - fi - - # Just silently exit this process - exit 0 -} - function write_user_unit_file { local service=$1 local command="$2" @@ -1535,21 +1479,6 @@ function _run_under_systemd { $SYSTEMCTL start $systemd_service } -# Helper to remove the ``*.failure`` files under ``$SERVICE_DIR/$SCREEN_NAME``. -# This is used for ``service_check`` when all the ``screen_it`` are called finished -# Uses globals ``SCREEN_NAME``, ``SERVICE_DIR`` -# init_service_check -function init_service_check { - SCREEN_NAME=${SCREEN_NAME:-stack} - SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} - - if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then - mkdir -p "$SERVICE_DIR/$SCREEN_NAME" - fi - - rm -f "$SERVICE_DIR/$SCREEN_NAME"/*.failure -} - # Find out if a process exists by partial name. # is_running name function is_running { @@ -1576,135 +1505,11 @@ function run_process { time_start "run_process" if is_service_enabled $service; then - if [[ "$USE_SYSTEMD" = "True" ]]; then - _run_under_systemd "$name" "$command" "$group" "$user" - elif [[ "$USE_SCREEN" = "True" ]]; then - if [[ "$user" == "root" ]]; then - command="sudo $command" - fi - screen_process "$name" "$command" "$group" - else - # Spawn directly without screen - if [[ "$user" == "root" ]]; then - command="sudo $command" - fi - _run_process "$name" "$command" "$group" & - fi + _run_under_systemd "$name" "$command" "$group" "$user" fi time_stop "run_process" } -# Helper to launch a process in a named screen -# Uses globals ``CURRENT_LOG_TIME``, ```LOGDIR``, ``SCREEN_LOGDIR``, `SCREEN_NAME``, -# ``SERVICE_DIR``, ``SCREEN_IS_LOGGING`` -# screen_process name "command-line" [group] -# Run a command in a shell in a screen window, if an optional group -# is provided, use sg to set the group of the command. -function screen_process { - local name=$1 - local command="$2" - local group=$3 - - SCREEN_NAME=${SCREEN_NAME:-stack} - SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} - - screen -S $SCREEN_NAME -X screen -t $name - - local logfile="${name}.log.${CURRENT_LOG_TIME}" - local real_logfile="${LOGDIR}/${logfile}" - echo "LOGDIR: $LOGDIR" - echo "SCREEN_LOGDIR: $SCREEN_LOGDIR" - echo "log: $real_logfile" - if [[ -n ${LOGDIR} ]]; then - if [[ "$SCREEN_IS_LOGGING" == "True" ]]; then - screen -S $SCREEN_NAME -p $name -X logfile "$real_logfile" - screen -S $SCREEN_NAME -p $name -X log on - fi - # If logging isn't active then avoid a broken symlink - touch "$real_logfile" - bash -c "cd '$LOGDIR' && ln -sf '$logfile' ${name}.log" - if [[ -n ${SCREEN_LOGDIR} ]]; then - # Drop the backward-compat symlink - ln -sf "$real_logfile" ${SCREEN_LOGDIR}/screen-${1}.log - fi - fi - - # sleep to allow bash to be ready to be send the command - we are - # creating a new window in screen and then sends characters, so if - # bash isn't running by the time we send the command, nothing - # happens. This sleep was added originally to handle gate runs - # where we needed this to be at least 3 seconds to pass - # consistently on slow clouds. Now this is configurable so that we - # can determine a reasonable value for the local case which should - # be much smaller. - sleep ${SCREEN_SLEEP:-3} - - NL=`echo -ne '\015'` - # This fun command does the following: - # - the passed server command is backgrounded - # - the pid of the background process is saved in the usual place - # - the server process is brought back to the foreground - # - if the server process exits prematurely the fg command errors - # and a message is written to stdout and the process failure file - # - # The pid saved can be used in stop_process() as a process group - # id to kill off all child processes - if [[ -n "$group" ]]; then - command="sg $group '$command'" - fi - - # Append the process to the screen rc file - screen_rc "$name" "$command" - - screen -S $SCREEN_NAME -p $name -X stuff "$command & echo \$! >$SERVICE_DIR/$SCREEN_NAME/${name}.pid; fg || echo \"$name failed to start. Exit code: \$?\" | tee \"$SERVICE_DIR/$SCREEN_NAME/${name}.failure\"$NL" -} - -# Screen rc file builder -# Uses globals ``SCREEN_NAME``, ``SCREENRC``, ``SCREEN_IS_LOGGING`` -# screen_rc service "command-line" -function screen_rc { - SCREEN_NAME=${SCREEN_NAME:-stack} - SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc - if [[ ! -e $SCREENRC ]]; then - # Name the screen session - echo "sessionname $SCREEN_NAME" > $SCREENRC - # Set a reasonable statusbar - echo "hardstatus alwayslastline '$SCREEN_HARDSTATUS'" >> $SCREENRC - # Some distributions override PROMPT_COMMAND for the screen terminal type - turn that off - echo "setenv PROMPT_COMMAND /bin/true" >> $SCREENRC - echo "screen -t shell bash" >> $SCREENRC - fi - # If this service doesn't already exist in the screenrc file - if ! grep $1 $SCREENRC 2>&1 > /dev/null; then - NL=`echo -ne '\015'` - echo "screen -t $1 bash" >> $SCREENRC - echo "stuff \"$2$NL\"" >> $SCREENRC - - if [[ -n ${LOGDIR} ]] && [[ "$SCREEN_IS_LOGGING" == "True" ]]; then - echo "logfile ${LOGDIR}/${1}.log.${CURRENT_LOG_TIME}" >>$SCREENRC - echo "log on" >>$SCREENRC - fi - fi -} - -# Stop a service in screen -# If a PID is available use it, kill the whole process group via TERM -# If screen is being used kill the screen window; this will catch processes -# that did not leave a PID behind -# Uses globals ``SCREEN_NAME``, ``SERVICE_DIR`` -# screen_stop_service service -function screen_stop_service { - local service=$1 - - SCREEN_NAME=${SCREEN_NAME:-stack} - SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} - - if is_service_enabled $service; then - # Clean up the screen window - screen -S $SCREEN_NAME -p $service -X kill || true - fi -} - # Stop a service process # If a PID is available use it, kill the whole process group via TERM # If screen is being used kill the screen window; this will catch processes @@ -1724,150 +1529,28 @@ function stop_process { $SYSTEMCTL stop devstack@$service.service $SYSTEMCTL disable devstack@$service.service fi - - if [[ -r $SERVICE_DIR/$SCREEN_NAME/$service.pid ]]; then - pkill -g $(cat $SERVICE_DIR/$SCREEN_NAME/$service.pid) - # oslo.service tends to stop actually shutting down - # reliably in between releases because someone believes it - # is dying too early due to some inflight work they - # have. This is a tension. It happens often enough we're - # going to just account for it in devstack and assume it - # doesn't work. - # - # Set OSLO_SERVICE_WORKS=True to skip this block - if [[ -z "$OSLO_SERVICE_WORKS" ]]; then - # TODO(danms): Remove this double-kill when we have - # this fixed in all services: - # https://bugs.launchpad.net/oslo-incubator/+bug/1446583 - sleep 1 - # /bin/true because pkill on a non existent process returns an error - pkill -g $(cat $SERVICE_DIR/$SCREEN_NAME/$service.pid) || /bin/true - fi - rm $SERVICE_DIR/$SCREEN_NAME/$service.pid - fi - if [[ "$USE_SCREEN" = "True" ]]; then - # Clean up the screen window - screen_stop_service $service - fi fi } -# Helper to get the status of each running service -# Uses globals ``SCREEN_NAME``, ``SERVICE_DIR`` -# service_check +# use systemctl to check service status function service_check { local service - local failures - SCREEN_NAME=${SCREEN_NAME:-stack} - SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} - - - if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then - echo "No service status directory found" - return - fi - - # Check if there is any failure flag file under $SERVICE_DIR/$SCREEN_NAME - # make this -o errexit safe - failures=`ls "$SERVICE_DIR/$SCREEN_NAME"/*.failure 2>/dev/null || /bin/true` - - for service in $failures; do - service=`basename $service` - service=${service%.failure} - echo "Error: Service $service is not running" + for service in ${ENABLED_SERVICES//,/ }; do + # because some things got renamed like key => keystone + if $SYSTEMCTL is-enabled devstack@$service.service; then + # no-pager is needed because otherwise status dumps to a + # pager when in interactive mode, which will stop a manual + # devstack run. + $SYSTEMCTL status devstack@$service.service --no-pager + fi done - - if [ -n "$failures" ]; then - die $LINENO "More details about the above errors can be found with screen" - fi -} - -# Tail a log file in a screen if USE_SCREEN is true. -# Uses globals ``USE_SCREEN`` -function tail_log { - local name=$1 - local logfile=$2 - - if [[ "$USE_SCREEN" = "True" ]]; then - screen_process "$name" "sudo tail -f $logfile | sed -u 's/\\\\\\\\x1b/\o033/g'" - fi -} - - -# Deprecated Functions -# -------------------- - -# _old_run_process() is designed to be backgrounded by old_run_process() to simulate a -# fork. It includes the dirty work of closing extra filehandles and preparing log -# files to produce the same logs as screen_it(). The log filename is derived -# from the service name and global-and-now-misnamed ``SCREEN_LOGDIR`` -# Uses globals ``CURRENT_LOG_TIME``, ``SCREEN_LOGDIR``, ``SCREEN_NAME``, ``SERVICE_DIR`` -# _old_run_process service "command-line" -function _old_run_process { - local service=$1 - local command="$2" - - # Undo logging redirections and close the extra descriptors - exec 1>&3 - exec 2>&3 - exec 3>&- - exec 6>&- - - if [[ -n ${SCREEN_LOGDIR} ]]; then - exec 1>&${SCREEN_LOGDIR}/screen-${1}.log.${CURRENT_LOG_TIME} 2>&1 - ln -sf ${SCREEN_LOGDIR}/screen-${1}.log.${CURRENT_LOG_TIME} ${SCREEN_LOGDIR}/screen-${1}.log - - # TODO(dtroyer): Hack to get stdout from the Python interpreter for the logs. - export PYTHONUNBUFFERED=1 - fi - - exec /bin/bash -c "$command" - die "$service exec failure: $command" -} - -# old_run_process() launches a child process that closes all file descriptors and -# then exec's the passed in command. This is meant to duplicate the semantics -# of screen_it() without screen. PIDs are written to -# ``$SERVICE_DIR/$SCREEN_NAME/$service.pid`` by the spawned child process. -# old_run_process service "command-line" -function old_run_process { - local service=$1 - local command="$2" - - # Spawn the child process - _old_run_process "$service" "$command" & - echo $! } -# Compatibility for existing start_XXXX() functions -# Uses global ``USE_SCREEN`` -# screen_it service "command-line" -function screen_it { - if is_service_enabled $1; then - # Append the service to the screen rc file - screen_rc "$1" "$2" - - if [[ "$USE_SCREEN" = "True" ]]; then - screen_process "$1" "$2" - else - # Spawn directly without screen - old_run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$1.pid - fi - fi -} -# Compatibility for existing stop_XXXX() functions -# Stop a service in screen -# If a PID is available use it, kill the whole process group via TERM -# If screen is being used kill the screen window; this will catch processes -# that did not leave a PID behind -# screen_stop service -function screen_stop { - # Clean up the screen window - stop_process $1 +function tail_log { + deprecated "With the removal of screen support, tail_log is deprecated and will be removed after Queens" } - # Plugin Functions # ================= diff --git a/lib/nova b/lib/nova index 581cd548ff..bd6695afdf 100644 --- a/lib/nova +++ b/lib/nova @@ -573,10 +573,6 @@ function create_nova_conf { if [[ -n ${LOGDIR} ]]; then bash -c "cd '$LOGDIR' && ln -sf '$logfile' ${service}.log" iniset "$NOVA_CONF_DIR/nova-dhcpbridge.conf" DEFAULT log_file "$real_logfile" - if [[ -n ${SCREEN_LOGDIR} ]]; then - # Drop the backward-compat symlink - ln -sf "$real_logfile" ${SCREEN_LOGDIR}/screen-${service}.log - fi fi iniset $NOVA_CONF DEFAULT dhcpbridge_flagfile "$NOVA_CONF_DIR/nova-dhcpbridge.conf" diff --git a/stack.sh b/stack.sh index 301e1e7c6b..6e930ad234 100755 --- a/stack.sh +++ b/stack.sh @@ -228,16 +228,6 @@ if [[ ! ${DISTRO} =~ (xenial|yakkety|zesty|stretch|jessie|f24|f25|f26|opensuse-4 fi fi -# Check to see if we are already running DevStack -# Note that this may fail if USE_SCREEN=False -if type -p screen > /dev/null && screen -ls | egrep -q "[0-9]\.$SCREEN_NAME"; then - echo "You are already running a stack.sh session." - echo "To rejoin this session type 'screen -x stack'." - echo "To destroy this session, type './unstack.sh'." - exit 1 -fi - - # Local Settings # -------------- @@ -491,24 +481,6 @@ else exec 6> >( $TOP_DIR/tools/outfilter.py -v >&3 ) fi -# Set up logging of screen windows -# Set ``SCREEN_LOGDIR`` to turn on logging of screen windows to the -# directory specified in ``SCREEN_LOGDIR``, we will log to the file -# ``screen-$SERVICE_NAME-$TIMESTAMP.log`` in that dir and have a link -# ``screen-$SERVICE_NAME.log`` to the latest log file. -# Logs are kept for as long specified in ``LOGDAYS``. -# This is deprecated....logs go in ``LOGDIR``, only symlinks will be here now. -if [[ -n "$SCREEN_LOGDIR" ]]; then - - # We make sure the directory is created. - if [[ -d "$SCREEN_LOGDIR" ]]; then - # We cleanup the old logs - find $SCREEN_LOGDIR -maxdepth 1 -name screen-\*.log -mtime +$LOGDAYS -exec rm {} \; - else - mkdir -p $SCREEN_LOGDIR - fi -fi - # Basic test for ``$DEST`` path permissions (fatal on error unless skipped) check_path_perm_sanity ${DEST} @@ -1015,38 +987,6 @@ if is_service_enabled $DATABASE_BACKENDS; then configure_database fi - -# Configure screen -# ---------------- - -USE_SCREEN=$(trueorfalse True USE_SCREEN) -if [[ "$USE_SCREEN" == "True" ]]; then - # Create a new named screen to run processes in - screen -d -m -S $SCREEN_NAME -t shell -s /bin/bash - sleep 1 - - # Set a reasonable status bar - SCREEN_HARDSTATUS=${SCREEN_HARDSTATUS:-} - if [ -z "$SCREEN_HARDSTATUS" ]; then - SCREEN_HARDSTATUS='%{= .} %-Lw%{= .}%> %n%f %t*%{= .}%+Lw%< %-=%{g}(%{d}%H/%l%{g})' - fi - screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS" - screen -r $SCREEN_NAME -X setenv PROMPT_COMMAND /bin/true - - if is_service_enabled tls-proxy; then - follow_tls_proxy - fi -fi - -# Clear ``screenrc`` file -SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc -if [[ -e $SCREENRC ]]; then - rm -f $SCREENRC -fi - -# Initialize the directory for service status check -init_service_check - # Save configuration values save_stackenv $LINENO diff --git a/stackrc b/stackrc index b123d8ac7c..787ae28a29 100644 --- a/stackrc +++ b/stackrc @@ -88,22 +88,9 @@ CELLSV2_SETUP=${CELLSV2_SETUP:-"superconductor"} # Set the root URL for Horizon HORIZON_APACHE_ROOT="/dashboard" -# TODO(sdague): Queens -# -# All the non systemd paths should be removed in queens, they only -# exist in Pike to support testing from grenade. Ensure that all this -# is cleaned up and purged, which should dramatically simplify the -# devstack codebase. - -# Whether to use 'dev mode' for screen windows. Dev mode works by -# stuffing text into the screen windows so that a developer can use -# ctrl-c, up-arrow, enter to restart the service. Starting services -# this way is slightly unreliable, and a bit slower, so this can -# be disabled for automated testing by setting this value to False. -USE_SCREEN=$(trueorfalse False USE_SCREEN) - -# Whether to use SYSTEMD to manage services -USE_SYSTEMD=$(trueorfalse False USE_SYSTEMD) +# Whether to use SYSTEMD to manage services, we only do this from +# Queens forward. +USE_SYSTEMD="True" USER_UNITS=$(trueorfalse False USER_UNITS) if [[ "$USER_UNITS" == "True" ]]; then SYSTEMD_DIR="$HOME/.local/share/systemd/user" @@ -122,16 +109,6 @@ fi # memory constrained than CPU bound. ENABLE_KSM=$(trueorfalse True ENABLE_KSM) -# When using screen, should we keep a log file on disk? You might -# want this False if you have a long-running setup where verbose logs -# can fill-up the host. -# XXX: Ideally screen itself would be configured to log but just not -# activate. This isn't possible with the screerc syntax. Temporary -# logging can still be used by a developer with: -# C-a : logfile foo -# C-a : log on -SCREEN_IS_LOGGING=$(trueorfalse True SCREEN_IS_LOGGING) - # Passwords generated by interactive devstack runs if [[ -r $RC_DIR/.localrc.password ]]; then source $RC_DIR/.localrc.password @@ -167,16 +144,6 @@ elif [[ -f $RC_DIR/.localrc.auto ]]; then source $RC_DIR/.localrc.auto fi -# TODO(sdague): Delete all this in Queens. -if [[ "$USE_SYSTEMD" == "True" ]]; then - USE_SCREEN=False -fi -# if we are forcing off USE_SCREEN (as we do in the gate), force on -# systemd. This allows us to drop one of 3 paths through the code. -if [[ "$USE_SCREEN" == "False" ]]; then - USE_SYSTEMD="True" -fi - # Default for log coloring is based on interactive-or-not. # Baseline assumption is that non-interactive invocations are for CI, # where logs are to be presented as browsable text files; hence color @@ -755,9 +722,6 @@ PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-"public"} PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-""} -# Set default screen name -SCREEN_NAME=${SCREEN_NAME:-stack} - # Allow the use of an alternate protocol (such as https) for service endpoints SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http} @@ -877,15 +841,6 @@ RECREATE_KEYSTONE_DB=$(trueorfalse True RECREATE_KEYSTONE_DB) # Following entries need to be last items in file -# Compatibility bits required by other callers like Grenade - -# Old way was using SCREEN_LOGDIR to locate those logs and LOGFILE for the stack.sh trace log. -# LOGFILE SCREEN_LOGDIR output -# not set not set no log files -# set not set stack.sh log to LOGFILE -# not set set screen logs to SCREEN_LOGDIR -# set set stack.sh log to LOGFILE, screen logs to SCREEN_LOGDIR - # New way is LOGDIR for all logs and LOGFILE for stack.sh trace log, but if not fully-qualified will be in LOGDIR # LOGFILE LOGDIR output # not set not set (new) set LOGDIR from default @@ -893,9 +848,6 @@ RECREATE_KEYSTONE_DB=$(trueorfalse True RECREATE_KEYSTONE_DB) # not set set screen logs to LOGDIR # set set stack.sh log to LOGFILE, screen logs to LOGDIR -# For compat, if SCREEN_LOGDIR is set, it will be used to create back-compat symlinks to the LOGDIR -# symlinks to SCREEN_LOGDIR (compat) - # Set up new logging defaults if [[ -z "${LOGDIR:-}" ]]; then default_logdir=$DEST/logs @@ -910,12 +862,6 @@ if [[ -z "${LOGDIR:-}" ]]; then # LOGFILE had no path, set a default LOGDIR="$default_logdir" fi - - # Check for duplication - if [[ "${SCREEN_LOGDIR:-}" == "${LOGDIR}" ]]; then - # We don't need the symlinks since it's the same directory - unset SCREEN_LOGDIR - fi fi unset default_logdir logfile fi diff --git a/tests/run-process.sh b/tests/run-process.sh deleted file mode 100755 index 301b9a032b..0000000000 --- a/tests/run-process.sh +++ /dev/null @@ -1,109 +0,0 @@ -#!/bin/bash -# tests/exec.sh - Test DevStack run_process() and stop_process() -# -# exec.sh start|stop|status -# -# Set USE_SCREEN True|False to change use of screen. -# -# This script emulates the basic exec environment in ``stack.sh`` to test -# the process spawn and kill operations. - -if [[ -z $1 ]]; then - echo "$0 start|stop" - exit 1 -fi - -TOP_DIR=$(cd $(dirname "$0")/.. && pwd) -source $TOP_DIR/functions - -USE_SCREEN=${USE_SCREEN:-False} - -ENABLED_SERVICES=fake-service - -SERVICE_DIR=/tmp -SCREEN_NAME=test -SCREEN_LOGDIR=${SERVICE_DIR}/${SCREEN_NAME} - - -# Kill background processes on exit -trap clean EXIT -clean() { - local r=$? - jobs -p - kill >/dev/null 2>&1 $(jobs -p) - exit $r -} - - -# Exit on any errors so that errors don't compound -trap failed ERR -failed() { - local r=$? - jobs -p - kill >/dev/null 2>&1 $(jobs -p) - set +o xtrace - [ -n "$LOGFILE" ] && echo "${0##*/} failed: full log in $LOGFILE" - exit $r -} - -function status { - if [[ -r $SERVICE_DIR/$SCREEN_NAME/fake-service.pid ]]; then - pstree -pg $(cat $SERVICE_DIR/$SCREEN_NAME/fake-service.pid) - fi - ps -ef | grep fake -} - -function setup_screen { -if [[ ! -d $SERVICE_DIR/$SCREEN_NAME ]]; then - rm -rf $SERVICE_DIR/$SCREEN_NAME - mkdir -p $SERVICE_DIR/$SCREEN_NAME -fi - -if [[ "$USE_SCREEN" == "True" ]]; then - # Create a new named screen to run processes in - screen -d -m -S $SCREEN_NAME -t shell -s /bin/bash - sleep 1 - - # Set a reasonable status bar - if [ -z "$SCREEN_HARDSTATUS" ]; then - SCREEN_HARDSTATUS='%{= .} %-Lw%{= .}%> %n%f %t*%{= .}%+Lw%< %-=%{g}(%{d}%H/%l%{g})' - fi - screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS" -fi - -# Clear screen rc file -SCREENRC=$TOP_DIR/tests/$SCREEN_NAME-screenrc -if [[ -e $SCREENRC ]]; then - echo -n > $SCREENRC -fi -} - -# Mimic logging - # Set up output redirection without log files - # Copy stdout to fd 3 - exec 3>&1 - if [[ "$VERBOSE" != "True" ]]; then - # Throw away stdout and stderr - #exec 1>/dev/null 2>&1 - : - fi - # Always send summary fd to original stdout - exec 6>&3 - - -if [[ "$1" == "start" ]]; then - echo "Start service" - setup_screen - run_process fake-service "$TOP_DIR/tests/fake-service.sh" - sleep 1 - status -elif [[ "$1" == "stop" ]]; then - echo "Stop service" - stop_process fake-service - status -elif [[ "$1" == "status" ]]; then - status -else - echo "Unknown command" - exit 1 -fi diff --git a/unstack.sh b/unstack.sh index 77a151f933..5d3672e25d 100755 --- a/unstack.sh +++ b/unstack.sh @@ -171,15 +171,6 @@ if is_service_enabled dstat; then stop_dstat fi -# Clean up the remainder of the screen processes -SCREEN=$(which screen) -if [[ -n "$SCREEN" ]]; then - SESSION=$(screen -ls | awk "/[0-9]+.${SCREEN_NAME}/"'{ print $1 }') - if [[ -n "$SESSION" ]]; then - screen -X -S $SESSION quit - fi -fi - # NOTE: Cinder automatically installs the lvm2 package, independently of the # enabled backends. So if Cinder is enabled, and installed successfully we are # sure lvm2 (lvremove, /etc/lvm/lvm.conf, etc.) is here. From 0eebeb415ada3437b598d0ceb6952dc7ad9134d9 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 30 Aug 2017 14:16:58 -0400 Subject: [PATCH 0652/1936] clean up screen and tail_log references Change-Id: I6bcfa09931ed1f70e071ccb16688c15c7ef2898f --- lib/cinder | 11 ++++------- lib/dstat | 2 +- lib/glance | 3 +-- lib/horizon | 5 ++--- lib/keystone | 6 +----- lib/neutron | 4 ++-- lib/neutron-legacy | 4 ++-- lib/nova | 4 ++-- lib/swift | 13 +++---------- lib/template | 4 ++-- 10 files changed, 20 insertions(+), 36 deletions(-) diff --git a/lib/cinder b/lib/cinder index 03328f31b6..ba5bd04a0d 100644 --- a/lib/cinder +++ b/lib/cinder @@ -473,7 +473,7 @@ function _configure_tgt_for_config_d { fi } -# start_cinder() - Start running processes, including screen +# start_cinder() - Start running processes function start_cinder { local service_port=$CINDER_SERVICE_PORT local service_protocol=$CINDER_SERVICE_PROTOCOL @@ -533,12 +533,9 @@ function start_cinder { # stop_cinder() - Stop running processes function stop_cinder { stop_process c-api - - # Kill the cinder screen windows - local serv - for serv in c-bak c-sch c-vol; do - stop_process $serv - done + stop_process c-bak + stop_process c-sch + stop_process c-vol } # create_volume_types() - Create Cinder's configured volume types diff --git a/lib/dstat b/lib/dstat index 982b70387e..fe38d75585 100644 --- a/lib/dstat +++ b/lib/dstat @@ -16,7 +16,7 @@ _XTRACE_DSTAT=$(set +o | grep xtrace) set +o xtrace -# start_dstat() - Start running processes, including screen +# start_dstat() - Start running processes function start_dstat { # A better kind of sysstat, with the top process per time slice run_process dstat "$TOP_DIR/tools/dstat.sh $LOGDIR" diff --git a/lib/glance b/lib/glance index 0a5b9f59b6..6e4a925488 100644 --- a/lib/glance +++ b/lib/glance @@ -333,7 +333,7 @@ function install_glance { setup_develop $GLANCE_DIR } -# start_glance() - Start running processes, including screen +# start_glance() - Start running processes function start_glance { local service_protocol=$GLANCE_SERVICE_PROTOCOL if is_service_enabled tls-proxy; then @@ -358,7 +358,6 @@ function start_glance { # stop_glance() - Stop running processes function stop_glance { - # Kill the Glance screen windows stop_process g-api stop_process g-reg } diff --git a/lib/horizon b/lib/horizon index becc5a0e67..3d2f68d09d 100644 --- a/lib/horizon +++ b/lib/horizon @@ -181,13 +181,12 @@ function install_horizon { git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH } -# start_horizon() - Start running processes, including screen +# start_horizon() - Start running processes function start_horizon { restart_apache_server - tail_log horizon /var/log/$APACHE_NAME/horizon_error.log } -# stop_horizon() - Stop running processes (non-screen) +# stop_horizon() - Stop running processes function stop_horizon { stop_apache_server } diff --git a/lib/keystone b/lib/keystone index 685891e8bc..69aadb66e0 100644 --- a/lib/keystone +++ b/lib/keystone @@ -536,7 +536,7 @@ function install_keystone { fi } -# start_keystone() - Start running processes, including screen +# start_keystone() - Start running processes function start_keystone { # Get right service port for testing local service_port=$KEYSTONE_SERVICE_PORT @@ -549,8 +549,6 @@ function start_keystone { if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then enable_apache_site keystone restart_apache_server - tail_log key /var/log/$APACHE_NAME/keystone.log - tail_log key-access /var/log/$APACHE_NAME/keystone_access.log else # uwsgi run_process keystone "$KEYSTONE_BIN_DIR/uwsgi --ini $KEYSTONE_PUBLIC_UWSGI_CONF" "" fi @@ -585,8 +583,6 @@ function stop_keystone { else stop_process keystone fi - # Kill the Keystone screen window - stop_process key } # bootstrap_keystone() - Initialize user, role and project diff --git a/lib/neutron b/lib/neutron index 2a660ec8e1..a5312888c3 100644 --- a/lib/neutron +++ b/lib/neutron @@ -409,7 +409,7 @@ function start_neutron_api { fi } -# start_neutron() - Start running processes, including screen +# start_neutron() - Start running processes function start_neutron_new { # Start up the neutron agents if enabled # TODO(sc68cal) Make this pluggable so different DevStack plugins for different Neutron plugins @@ -446,7 +446,7 @@ function start_neutron_new { fi } -# stop_neutron() - Stop running processes (non-screen) +# stop_neutron() - Stop running processes function stop_neutron_new { for serv in neutron-api neutron-agent neutron-l3; do stop_process $serv diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 784f3a8167..c8d2540ca5 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -455,7 +455,7 @@ function configure_neutron_after_post_config { fi } -# Start running processes, including screen +# Start running processes function start_neutron_service_and_check { local service_port=$Q_PORT local service_protocol=$Q_PROTOCOL @@ -524,7 +524,7 @@ function stop_mutnauq_l2_agent { stop_process q-agt } -# stop_mutnauq_other() - Stop running processes (non-screen) +# stop_mutnauq_other() - Stop running processes function stop_mutnauq_other { if is_service_enabled q-dhcp; then stop_process q-dhcp diff --git a/lib/nova b/lib/nova index bd6695afdf..5b78972571 100644 --- a/lib/nova +++ b/lib/nova @@ -882,7 +882,7 @@ function start_nova_compute { export PATH=$old_path } -# start_nova() - Start running processes, including screen +# start_nova() - Start running processes function start_nova_rest { # Hack to set the path for rootwrap local old_path=$PATH @@ -999,7 +999,7 @@ function stop_nova_conductor { done } -# stop_nova() - Stop running processes (non-screen) +# stop_nova() - Stop running processes function stop_nova { stop_nova_rest stop_nova_conductor diff --git a/lib/swift b/lib/swift index 455740ea82..45f679302c 100644 --- a/lib/swift +++ b/lib/swift @@ -7,7 +7,7 @@ # # - ``functions`` file # - ``apache`` file -# - ``DEST``, ``SCREEN_NAME``, `SWIFT_HASH` must be defined +# - ``DEST``, `SWIFT_HASH` must be defined # - ``STACK_USER`` must be defined # - ``SWIFT_DATA_DIR`` or ``DATA_DIR`` must be defined # - ``lib/keystone`` file @@ -780,7 +780,7 @@ function install_ceilometermiddleware { fi } -# start_swift() - Start running processes, including screen +# start_swift() - Start running processes function start_swift { # (re)start memcached to make sure we have a clean memcache. restart_service memcached @@ -799,13 +799,6 @@ function start_swift { restart_apache_server # The rest of the services should be started in backgroud swift-init --run-dir=${SWIFT_DATA_DIR}/run rest start - # Be we still want the logs of Swift Proxy in our screen session - tail_log s-proxy /var/log/$APACHE_NAME/proxy-server - if [[ ${SWIFT_REPLICAS} == 1 ]]; then - for type in object container account; do - tail_log s-${type} /var/log/$APACHE_NAME/${type}-server-1 - done - fi return 0 fi @@ -859,7 +852,7 @@ function start_swift { fi } -# stop_swift() - Stop running processes (non-screen) +# stop_swift() - Stop running processes function stop_swift { local type diff --git a/lib/template b/lib/template index 25d653cb46..e6d003284f 100644 --- a/lib/template +++ b/lib/template @@ -81,7 +81,7 @@ function install_XXXX { : } -# start_XXXX() - Start running processes, including screen +# start_XXXX() - Start running processes function start_XXXX { # The quoted command must be a single command and not include an # shell metacharacters, redirections or shell builtins. @@ -89,7 +89,7 @@ function start_XXXX { : } -# stop_XXXX() - Stop running processes (non-screen) +# stop_XXXX() - Stop running processes function stop_XXXX { # for serv in serv-a serv-b; do # stop_process $serv From 730faa2626cb2522dd07465b8c8cf87e39a99ace Mon Sep 17 00:00:00 2001 From: Anna Khmelnitsky Date: Fri, 1 Sep 2017 16:25:09 -0700 Subject: [PATCH 0653/1936] Cap tox != 2.8.0 temporarily to avoid ValueError This is proposed temporary change to avoid "ValueError: need more than 1 value to unpack" error in stack. Change-Id: I743febbef3a1f201cea37471356518be31585277 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index cc65ec7aa9..0ce663c7ab 100644 --- a/lib/tempest +++ b/lib/tempest @@ -608,7 +608,7 @@ function configure_tempest { # install_tempest() - Collect source and prepare function install_tempest { git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH - pip_install tox + pip_install 'tox!=2.8.0' pushd $TEMPEST_DIR tox -r --notest -efull # NOTE(mtreinish) Respect constraints in the tempest full venv, things that From c63d9331a78159833a784806db4b2cd6b58a3eed Mon Sep 17 00:00:00 2001 From: zhangbailin Date: Fri, 1 Sep 2017 19:46:16 -0700 Subject: [PATCH 0654/1936] Replace http with https for doc links in devstack's document 1) Update doc links according to OpenStack document migration 2) Use https instead of http for docs links Change-Id: I81b560d1e4c5210dc00a6a6ac06c03bb1e69d595 --- HACKING.rst | 4 ++-- doc/source/faq.rst | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/HACKING.rst b/HACKING.rst index fc67f09a7b..d5d6fbcf02 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -20,7 +20,7 @@ in `How To Contribute`__ in the OpenStack wiki. `DevStack's LaunchPad project`_ contains the usual links for blueprints, bugs, etc. __ contribute_ -.. _contribute: http://docs.openstack.org/infra/manual/developers.html +.. _contribute: https://docs.openstack.org/infra/manual/developers.html __ lp_ .. _lp: https://launchpad.net/~devstack @@ -255,7 +255,7 @@ These scripts are executed serially by ``exercise.sh`` in testing situations. * The ``OS_*`` environment variables should be the only ones used for all authentication to OpenStack clients as documented in the CLIAuth_ wiki page. -.. _CLIAuth: http://wiki.openstack.org/CLIAuth +.. _CLIAuth: https://wiki.openstack.org/CLIAuth * The exercise MUST clean up after itself if successful. If it is not successful, it is assumed that state will be left behind; this allows a chance for developers diff --git a/doc/source/faq.rst b/doc/source/faq.rst index a186336f54..ed9b4da6bb 100644 --- a/doc/source/faq.rst +++ b/doc/source/faq.rst @@ -32,9 +32,9 @@ That isn't a question, but please do! The source for DevStack is at `git.openstack.org `__ and bug reports go to `LaunchPad -`__. Contributions follow the +`__. Contributions follow the usual process as described in the `developer guide -`__. This +`__. This Sphinx documentation is housed in the doc directory. Why not use packages? From 5f2ed3697c39ee1e579fe3d0927927b2ca6a4fc4 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Mon, 4 Sep 2017 08:48:08 +0000 Subject: [PATCH 0655/1936] Updated from generate-devstack-plugins-list Change-Id: Ifbb6d64834d6f3609c56c85163dc367d778ed21f --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index a1d5ad822b..2c8805ff16 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -144,6 +144,7 @@ octavia-dashboard `git://git.openstack.org/openstack/octavi omni `git://git.openstack.org/openstack/omni `__ os-xenapi `git://git.openstack.org/openstack/os-xenapi `__ osprofiler `git://git.openstack.org/openstack/osprofiler `__ +oswin-tempest-plugin `git://git.openstack.org/openstack/oswin-tempest-plugin `__ panko `git://git.openstack.org/openstack/panko `__ patrole `git://git.openstack.org/openstack/patrole `__ picasso `git://git.openstack.org/openstack/picasso `__ From d8bb220606737719bcdf7c5b4f54906f2974c71c Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Sun, 3 Sep 2017 12:13:59 -0500 Subject: [PATCH 0656/1936] Make list_images.sh emit the etcd3 tarball We need this for every devstack run now, so downloading it from github every time isn't the most awesome thing in the world. Add an extra variable EXTRA_CACHE_URLS which will be appended to the output of tools/image_list.sh. This way, these files will be downloaded during the daily nodepool build, but they will not be in the IMAGE_LIST and hence be considered as images to upload. Add a function get_extra_file which echos the path to a file given the URL. It will first check the cache at $FILES, and if not present download it. Update the documentation in image_list.sh to reflect what's happening. Move the defaults for etcd variables into stackrc, since it is a base service now. Change-Id: I86104824a29d973a6288df1f24b7891feb86267c --- functions | 31 +++++++++++++++++++++++++++++++ lib/etcd3 | 32 ++++---------------------------- stackrc | 34 ++++++++++++++++++++++++++++++++++ tools/image_list.sh | 23 ++++++++++++++++++++--- 4 files changed, 89 insertions(+), 31 deletions(-) diff --git a/functions b/functions index 6f2164a777..ebcae334e0 100644 --- a/functions +++ b/functions @@ -45,6 +45,37 @@ function short_source { # export it so child shells have access to the 'short_source' function also. export -f short_source +# Download a file from a URL +# +# Will check cache (in $FILES) or download given URL. +# +# Argument is the URL to the remote file +# +# Will echo the local path to the file as the output. Will die on +# failure to download. +# +# Files can be pre-cached for CI environments, see EXTRA_CACHE_URLS +# and tools/image_list.sh +function get_extra_file { + local file_url=$1 + + file_name=$(basename "$file_url") + if [[ $file_url != file* ]]; then + # If the file isn't cache, download it + if [[ ! -f $FILES/$file_name ]]; then + wget --progress=dot:giga -c $file_url -O $FILES/$file_name + if [[ $? -ne 0 ]]; then + die "$file_url could not be downloaded" + fi + fi + echo "$FILES/$file_name" + return + else + # just strip the file:// bit and that's the path to the file + echo $file_url | sed 's/$file:\/\///g' + fi +} + # Retrieve an image from a URL and upload into Glance. # Uses the following variables: diff --git a/lib/etcd3 b/lib/etcd3 index 6e32cb31ca..60e827add2 100644 --- a/lib/etcd3 +++ b/lib/etcd3 @@ -24,15 +24,9 @@ set +o xtrace # -------- # Set up default values for etcd -ETCD_DOWNLOAD_URL=${ETCD_DOWNLOAD_URL:-https://github.com/coreos/etcd/releases/download} -ETCD_VERSION=${ETCD_VERSION:-v3.1.7} ETCD_DATA_DIR="$DATA_DIR/etcd" ETCD_SYSTEMD_SERVICE="devstack@etcd.service" ETCD_BIN_DIR="$DEST/bin" -ETCD_SHA256_AMD64="4fde194bbcd259401e2b5c462dfa579ee7f6af539f13f130b8f5b4f52e3b3c52" -# NOTE(sdague): etcd v3.1.7 doesn't have anything for these architectures, though 3.2.0 does. -ETCD_SHA256_ARM64="" -ETCD_SHA256_PPC64="" ETCD_PORT=2379 if is_ubuntu ; then @@ -95,37 +89,19 @@ function cleanup_etcd3 { function install_etcd3 { echo "Installing etcd" - # Make sure etcd3 downloads the correct architecture - if is_arch "x86_64"; then - ETCD_ARCH="amd64" - ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_AMD64} - elif is_arch "aarch64"; then - ETCD_ARCH="arm64" - ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_ARM64} - elif is_arch "ppc64le"; then - ETCD_ARCH="ppc64le" - ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_PPC64} - else - exit_distro_not_supported "invalid hardware type - $ETCD_ARCH" - fi - - ETCD_NAME=etcd-$ETCD_VERSION-linux-$ETCD_ARCH - # Create the necessary directories sudo mkdir -p $ETCD_BIN_DIR sudo mkdir -p $ETCD_DATA_DIR # Download and cache the etcd tgz for subsequent use + local etcd_file + etcd_file="$(get_extra_file $ETCD_DOWNLOAD_LOCATION)" if [ ! -f "$FILES/etcd-$ETCD_VERSION-linux-$ETCD_ARCH/etcd" ]; then - ETCD_DOWNLOAD_FILE=$ETCD_NAME.tar.gz - if [ ! -f "$FILES/$ETCD_DOWNLOAD_FILE" ]; then - wget $ETCD_DOWNLOAD_URL/$ETCD_VERSION/$ETCD_DOWNLOAD_FILE -O $FILES/$ETCD_DOWNLOAD_FILE - fi - echo "${ETCD_SHA256} $FILES/${ETCD_DOWNLOAD_FILE}" > $FILES/etcd.sha256sum + echo "${ETCD_SHA256} $etcd_file" > $FILES/etcd.sha256sum # NOTE(sdague): this should go fatal if this fails sha256sum -c $FILES/etcd.sha256sum - tar xzvf $FILES/$ETCD_DOWNLOAD_FILE -C $FILES + tar xzvf $etcd_file -C $FILES sudo cp $FILES/$ETCD_NAME/etcd $ETCD_BIN_DIR/etcd fi if [ ! -f "$ETCD_BIN_DIR/etcd" ]; then diff --git a/stackrc b/stackrc index b123d8ac7c..0757d78775 100644 --- a/stackrc +++ b/stackrc @@ -732,6 +732,40 @@ if [[ "$DOWNLOAD_DEFAULT_IMAGES" == "True" ]]; then DOWNLOAD_DEFAULT_IMAGES=False fi +# This is a comma separated list of extra URLS to be listed for +# download by the tools/image_list.sh script. CI environments can +# pre-download these URLS and place them in $FILES. Later scripts can +# then use "get_extra_file " which will print out the path to the +# file; it will either be downloaded on demand or acquired from the +# cache if there. +EXTRA_CACHE_URLS="" + +# etcd3 defaults +ETCD_VERSION=${ETCD_VERSION:-v3.1.7} +ETCD_SHA256_AMD64="4fde194bbcd259401e2b5c462dfa579ee7f6af539f13f130b8f5b4f52e3b3c52" +# NOTE(sdague): etcd v3.1.7 doesn't have anything for these architectures, though 3.2.0 does. +ETCD_SHA256_ARM64="" +ETCD_SHA256_PPC64="" +# Make sure etcd3 downloads the correct architecture +if is_arch "x86_64"; then + ETCD_ARCH="amd64" + ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_AMD64} +elif is_arch "aarch64"; then + ETCD_ARCH="arm64" + ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_ARM64} +elif is_arch "ppc64le"; then + ETCD_ARCH="ppc64le" + ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_PPC64} +else + exit_distro_not_supported "invalid hardware type - $ETCD_ARCH" +fi +ETCD_DOWNLOAD_URL=${ETCD_DOWNLOAD_URL:-https://github.com/coreos/etcd/releases/download} +ETCD_NAME=etcd-$ETCD_VERSION-linux-$ETCD_ARCH +ETCD_DOWNLOAD_FILE=$ETCD_NAME.tar.gz +ETCD_DOWNLOAD_LOCATION=$ETCD_DOWNLOAD_URL/$ETCD_VERSION/$ETCD_DOWNLOAD_FILE +# etcd is always required, so place it into list of pre-cached downloads +EXTRA_CACHE_URLS+=",$ETCD_DOWNLOAD_LOCATION" + # Detect duplicate values in IMAGE_URLS for image_url in ${IMAGE_URLS//,/ }; do if [ $(echo "$IMAGE_URLS" | grep -o -F "$image_url" | wc -l) -gt 1 ]; then diff --git a/tools/image_list.sh b/tools/image_list.sh index 29b93ed1d8..3a27c4acfd 100755 --- a/tools/image_list.sh +++ b/tools/image_list.sh @@ -1,5 +1,14 @@ #!/bin/bash +# Print out a list of image and other files to download for caching. +# This is mostly used by the OpenStack infrasturucture during daily +# image builds to save the large images to /opt/cache/files (see [1]) +# +# The two lists of URL's downloaded are the IMAGE_URLS and +# EXTRA_CACHE_URLS, which are setup in stackrc +# +# [1] project-config:nodepool/elements/cache-devstack/extra-data.d/55-cache-devstack-repos + # Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0")/.. && pwd) @@ -31,12 +40,20 @@ for driver in $DRIVERS; do ALL_IMAGES+=$URLS done -# Make a nice list -echo $ALL_IMAGES | tr ',' '\n' | sort | uniq - # Sanity check - ensure we have a minimum number of images num=$(echo $ALL_IMAGES | tr ',' '\n' | sort | uniq | wc -l) if [[ "$num" -lt 4 ]]; then echo "ERROR: We only found $num images in $ALL_IMAGES, which can't be right." exit 1 fi + +# This is extra non-image files that we want pre-cached. This is kept +# in a separate list because devstack loops over the IMAGE_LIST to +# upload files glance and these aren't images. (This was a bit of an +# after-thought which is why the naming around this is very +# image-centric) +URLS=$(source $TOP_DIR/stackrc && echo $EXTRA_CACHE_URLS) +ALL_IMAGES+=$URLS + +# Make a nice combined list +echo $ALL_IMAGES | tr ',' '\n' | sort | uniq From 477a962861afe2e859179245d6d39cb41f6c499d Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Fri, 4 Aug 2017 11:09:26 -0400 Subject: [PATCH 0657/1936] Enable graceful shutdown for services 1] Process using uwsgi: uwsgi services doesn't support for graceful shutting down [1]. It requires some changes in unit files [2] including adding below graceful shutdown hook and changing KillSignal: --hook-master-start "unix_signal:15 gracefully_kill_them_all All the steps and changes required are specified in etherpad [1]. 2] Non uwsgi services needs below changes: In [service] section: a. Add KillMode = process b. Add TimeoutStopSec = infinity NOTE: Creating unit file for services other than uwsgi is handled by the 'write_user_unit_file' function [3]. This function is common for all the services so this patch adds the above mentioned parameters for services using ServiceLauncher also though they don't require. Added a new stackrc variable WORKER_TIMEOUT which is required to add graceful shutdown support to uwsgi services. It will be set as a value to 'worker-reload-mercy' [4] in uwsgi file of service. The default value set to this variable is 90. [1] https://etherpad.openstack.org/p/uwsgi-issues [2] https://www.freedesktop.org/software/systemd/man/systemd.kill.html [3] https://github.com/openstack-dev/devstack/blob/2967ca3dfd0d64970dfa5dfa7ac2330ee7aa90ed/functions-common#L1439-L1461 [4] http://uwsgi-docs.readthedocs.io/en/latest/Options.html#worker-reload-mercy Co-Authored-By: Dinesh Bhor Change-Id: Ia95291325ce4858b47102dd49504250183f339ab --- functions-common | 4 +++- lib/apache | 10 ++++++++++ stackrc | 3 +++ 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/functions-common b/functions-common index a5f770f579..52f53ef147 100644 --- a/functions-common +++ b/functions-common @@ -1451,6 +1451,8 @@ function write_user_unit_file { iniset -sudo $unitfile "Unit" "Description" "Devstack $service" iniset -sudo $unitfile "Service" "User" "$user" iniset -sudo $unitfile "Service" "ExecStart" "$command" + iniset -sudo $unitfile "Service" "KillMode" "process" + iniset -sudo $unitfile "Service" "TimeoutStopSec" "infinity" if [[ -n "$group" ]]; then iniset -sudo $unitfile "Service" "Group" "$group" fi @@ -1473,7 +1475,7 @@ function write_uwsgi_user_unit_file { iniset -sudo $unitfile "Service" "User" "$user" iniset -sudo $unitfile "Service" "ExecStart" "$command" iniset -sudo $unitfile "Service" "Type" "notify" - iniset -sudo $unitfile "Service" "KillSignal" "SIGQUIT" + iniset -sudo $unitfile "Service" "KillMode" "process" iniset -sudo $unitfile "Service" "Restart" "always" iniset -sudo $unitfile "Service" "NotifyAccess" "all" iniset -sudo $unitfile "Service" "RestartForceExitStatus" "100" diff --git a/lib/apache b/lib/apache index dfca25a764..39d5b7b071 100644 --- a/lib/apache +++ b/lib/apache @@ -260,10 +260,15 @@ function write_uwsgi_config { # Set die-on-term & exit-on-reload so that uwsgi shuts down iniset "$file" uwsgi die-on-term true iniset "$file" uwsgi exit-on-reload true + # Set worker-reload-mercy so that worker will not exit till the time + # configured after graceful shutdown + iniset "$file" uwsgi worker-reload-mercy $WORKER_TIMEOUT iniset "$file" uwsgi enable-threads true iniset "$file" uwsgi plugins python # uwsgi recommends this to prevent thundering herd on accept. iniset "$file" uwsgi thunder-lock true + # Set hook to trigger graceful shutdown on SIGTERM + iniset "$file" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all" # Override the default size for headers from the 4k default. iniset "$file" uwsgi buffer-size 65535 # Make sure the client doesn't try to re-use the connection. @@ -316,6 +321,11 @@ function write_local_uwsgi_http_config { iniset "$file" uwsgi plugins python # uwsgi recommends this to prevent thundering herd on accept. iniset "$file" uwsgi thunder-lock true + # Set hook to trigger graceful shutdown on SIGTERM + iniset "$file" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all" + # Set worker-reload-mercy so that worker will not exit till the time + # configured after graceful shutdown + iniset "$file" uwsgi worker-reload-mercy $WORKER_TIMEOUT # Override the default size for headers from the 4k default. iniset "$file" uwsgi buffer-size 65535 # Make sure the client doesn't try to re-use the connection. diff --git a/stackrc b/stackrc index 3591994f79..e936b337e7 100644 --- a/stackrc +++ b/stackrc @@ -778,6 +778,9 @@ SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} # Service graceful shutdown timeout SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT=${SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT:-5} +# Service graceful shutdown timeout +WORKER_TIMEOUT=${WORKER_TIMEOUT:-90} + # Support alternative yum -- in future Fedora 'dnf' will become the # only supported installer, but for now 'yum' and 'dnf' are both # available in parallel with compatible CLIs. Allow manual switching From acb48a1a192b9531c6b30ed42426cb96f012ebb0 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Wed, 6 Sep 2017 10:55:39 +0000 Subject: [PATCH 0658/1936] Update link to nova config reference For devstack master branch we should point to the latest version and not to some possibly outdated older branch. Change-Id: I4af3aef90a2c295df3de4a5b49d127e85ab517ac --- doc/source/guides/nova.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/guides/nova.rst b/doc/source/guides/nova.rst index 6bbab53aea..0f105d7c58 100644 --- a/doc/source/guides/nova.rst +++ b/doc/source/guides/nova.rst @@ -66,5 +66,5 @@ These config options are defined in `nova.conf.serial_console `_. For more information on OpenStack configuration see the `OpenStack -Configuration Reference -`_ +Compute Service Configuration Reference +`_ From a7e9a5d447b3eeacfb52d7ddc94445058a8d6fd1 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Wed, 6 Sep 2017 10:58:04 +0000 Subject: [PATCH 0659/1936] Update to using pike cloud-archive Now that Pike has been released, switch to using the pike version of UCA instead of ocata, too. One reason to do so it that it adds python3-ceph packages, allow to have progress with the python3 compatibility of the Ceph integration. Change-Id: I7d95e53892b697c72af75ad0ce7ce2dec6d31fde --- tools/fixup_stuff.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index f1552ab2f7..efe0125741 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -84,10 +84,10 @@ if [[ "${ENABLE_UBUNTU_CLOUD_ARCHIVE}" == "True" && "$DISTRO" = "xenial" ]]; the # we can find local mirrors then use that mirror. source /etc/ci/mirror_info.sh - sudo apt-add-repository -y "deb $NODEPOOL_UCA_MIRROR xenial-updates/ocata main" + sudo apt-add-repository -y "deb $NODEPOOL_UCA_MIRROR xenial-updates/pike main" else # Otherwise use upstream UCA - sudo add-apt-repository -y cloud-archive:ocata + sudo add-apt-repository -y cloud-archive:pike fi # Disable use of libvirt wheel since a cached wheel build might be From 486376e91b1f9a7680371036e470b8692804e917 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Tue, 5 Sep 2017 19:56:06 -0500 Subject: [PATCH 0660/1936] Change CINDER_LVM_TYPE to 'auto' as the default This was previously set to thin as the default, but at the time there were failures seen with what appeared to be race conditions when creating snapshots. These failures are not seen locally, and we have a lot of installs using the default auto by this point with no reports from the field of seeing this failure. This is to be able to more extensively test this in the gate, and hopefully get this switched over to be able to thinly provision by default when possible. Change-Id: I3e99adadd1c37ba8b24b6cb71a8969ffc93f75a1 Related-bug: #1642111 --- lib/cinder | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/lib/cinder b/lib/cinder index 03328f31b6..9a2be653d1 100644 --- a/lib/cinder +++ b/lib/cinder @@ -70,12 +70,11 @@ CINDER_SERVICE_PROTOCOL=${CINDER_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} CINDER_SERVICE_LISTEN_ADDRESS=${CINDER_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS} # What type of LVM device should Cinder use for LVM backend -# Defaults to default, which is thick, the other valid choice -# is thin, which as the name implies utilizes lvm thin provisioning. -# Thinly provisioned LVM volumes may be more efficient when using the Cinder -# image cache, but there are also known race failures with volume snapshots -# and thinly provisioned LVM volumes, see bug 1642111 for details. -CINDER_LVM_TYPE=${CINDER_LVM_TYPE:-default} +# Defaults to auto, which will do thin provisioning if it's a fresh +# volume group, otherwise it will do thick. The other valid choices are +# default, which is thick, or thin, which as the name implies utilizes lvm +# thin provisioning. +CINDER_LVM_TYPE=${CINDER_LVM_TYPE:-auto} # Default backends # The backend format is type:name where type is one of the supported backend From a066abedac916751660c73548e7eab2d216323c5 Mon Sep 17 00:00:00 2001 From: Roman Podoliaka Date: Tue, 18 Apr 2017 16:18:14 +0300 Subject: [PATCH 0661/1936] placement: install osc-placement This makes sure that the openstack client placement plugin gets installed when either LIBS_FROM_GIT includes osc-placement or placement is used, which is always now if you've enabled nova, which is enabled by default. Co-Authored-By: Matt Riedemann Depends-On: Ica83e29780722dd1555904e46b9ff1d1fdf72516 Change-Id: I7c5a5c10288b356302bb3027837d4ed4f0fbad8c --- lib/libraries | 2 ++ lib/placement | 3 +++ stackrc | 4 ++++ tests/test_libs_from_pypi.sh | 3 ++- 4 files changed, 11 insertions(+), 1 deletion(-) diff --git a/lib/libraries b/lib/libraries index 4ceb80423c..6d52f642a1 100644 --- a/lib/libraries +++ b/lib/libraries @@ -30,6 +30,7 @@ GITDIR["debtcollector"]=$DEST/debtcollector GITDIR["futurist"]=$DEST/futurist GITDIR["os-client-config"]=$DEST/os-client-config GITDIR["osc-lib"]=$DEST/osc-lib +GITDIR["osc-placement"]=$DEST/osc-placement GITDIR["oslo.cache"]=$DEST/oslo.cache GITDIR["oslo.concurrency"]=$DEST/oslo.concurrency GITDIR["oslo.config"]=$DEST/oslo.config @@ -91,6 +92,7 @@ function install_libs { _install_lib_from_source "debtcollector" _install_lib_from_source "futurist" _install_lib_from_source "osc-lib" + _install_lib_from_source "osc-placement" _install_lib_from_source "os-client-config" _install_lib_from_source "oslo.cache" _install_lib_from_source "oslo.concurrency" diff --git a/lib/placement b/lib/placement index aef9b7454e..d3fb8c848d 100644 --- a/lib/placement +++ b/lib/placement @@ -159,6 +159,9 @@ function init_placement { # install_placement() - Collect source and prepare function install_placement { install_apache_wsgi + # Install the openstackclient placement client plugin for CLI + # TODO(mriedem): Use pip_install_gr once osc-placement is in g-r. + pip_install osc-placement } # start_placement_api() - Start the API processes ahead of other things diff --git a/stackrc b/stackrc index 3591994f79..af0709a225 100644 --- a/stackrc +++ b/stackrc @@ -388,6 +388,10 @@ GITBRANCH["python-openstackclient"]=${OPENSTACKCLIENT_BRANCH:-master} # this doesn't exist in a lib file, so set it here GITDIR["python-openstackclient"]=$DEST/python-openstackclient +# placement-api CLI +GITREPO["osc-placement"]=${OSC_PLACEMENT_REPO:-${GIT_BASE}/openstack/osc-placement.git} +GITBRANCH["osc-placement"]=${OSC_PLACEMENT_BRANCH:-master} + ################### # diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index 5b4ff32f2a..0bd8d49357 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -36,7 +36,8 @@ ALL_LIBS+=" oslo.messaging oslo.log cliff stevedore" ALL_LIBS+=" python-cinderclient glance_store oslo.concurrency oslo.db" ALL_LIBS+=" oslo.versionedobjects oslo.vmware keystonemiddleware" ALL_LIBS+=" oslo.serialization django_openstack_auth" -ALL_LIBS+=" python-openstackclient osc-lib os-client-config oslo.rootwrap" +ALL_LIBS+=" python-openstackclient osc-lib osc-placement" +ALL_LIBS+=" os-client-config oslo.rootwrap" ALL_LIBS+=" oslo.i18n oslo.utils python-openstacksdk python-swiftclient" ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy" ALL_LIBS+=" debtcollector os-brick os-traits automaton futurist oslo.service" From a7f32720228968a589aab9dd10a08a03834b81a5 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 7 Sep 2017 11:16:40 -0400 Subject: [PATCH 0662/1936] Fix up main readme A few tweaks to make this more accurate. Change-Id: I36ae24870be2ca6b5ac0e0507dd457b688739348 --- README.rst | 9 +++++---- doc/source/guides/devstack-with-lbaas-v2.rst | 1 - 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/README.rst b/README.rst index adbf59a637..6885546c94 100644 --- a/README.rst +++ b/README.rst @@ -1,4 +1,5 @@ -DevStack is a set of scripts and utilities to quickly deploy an OpenStack cloud. +DevStack is a set of scripts and utilities to quickly deploy an OpenStack cloud +from git source trees. Goals ===== @@ -27,9 +28,9 @@ Versions The DevStack master branch generally points to trunk versions of OpenStack components. For older, stable versions, look for branches named stable/[release] in the DevStack repo. For example, you can do the -following to create a Newton OpenStack cloud:: +following to create a Pike OpenStack cloud:: - git checkout stable/newton + git checkout stable/pike ./stack.sh You can also pick specific OpenStack project releases by setting the appropriate @@ -54,7 +55,7 @@ When the script finishes executing, you should be able to access OpenStack endpoints, like so: * Horizon: http://myhost/ -* Keystone: http://myhost:5000/v2.0/ +* Keystone: http://myhost/identity/v2.0/ We also provide an environment file that you can use to interact with your cloud via CLI:: diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst index 4ed64bf12f..3592844efb 100644 --- a/doc/source/guides/devstack-with-lbaas-v2.rst +++ b/doc/source/guides/devstack-with-lbaas-v2.rst @@ -39,7 +39,6 @@ Edit your ``local.conf`` to look like LOGFILE=$DEST/logs/stack.sh.log VERBOSE=True LOG_COLOR=True - SCREEN_LOGDIR=$DEST/logs # Pre-requisite ENABLED_SERVICES=rabbit,mysql,key # Horizon From 12fcd619302556d98a8b8191a97c914bd014bb3f Mon Sep 17 00:00:00 2001 From: Eric Fried Date: Thu, 7 Sep 2017 13:36:00 -0500 Subject: [PATCH 0663/1936] doc: How to pdb under systemd Add a section to the systemd doc describing how to use regular pdb in a systemd environment. Change-Id: Ib1f616f407eccc087de1c60624fa74e2555971c2 --- doc/source/systemd.rst | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/doc/source/systemd.rst b/doc/source/systemd.rst index 60a7719262..f9971abf5a 100644 --- a/doc/source/systemd.rst +++ b/doc/source/systemd.rst @@ -98,8 +98,7 @@ Follow logs for a specific service:: Following logs for multiple services simultaneously:: - journalctl -f --unit devstack@n-cpu.service --unit - devstack@n-cond.service + journalctl -f --unit devstack@n-cpu.service --unit devstack@n-cond.service or you can even do wild cards to follow all the nova services:: @@ -121,6 +120,29 @@ left/right arrow keys. See ``man 1 journalctl`` for more. +Debugging with pdb +================== + +In order to break into a regular pdb session on a systemd-controlled +service, you need to invoke the process manually - that is, take it out +of systemd's control. + +Discover the command systemd is using to run the service:: + + systemctl show devstack@n-sch.service -p ExecStart --no-pager + +Stop the systemd service:: + + sudo systemctl stop devstack@n-sch.service + +Inject your breakpoint in the source, e.g.:: + + import pdb; pdb.set_trace() + +Invoke the command manually:: + + /usr/local/bin/nova-scheduler --config-file /etc/nova/nova.conf + Known Issues ============ From 16ab25c187aa7972f85e96197cf182b009bb8465 Mon Sep 17 00:00:00 2001 From: Eric Fried Date: Thu, 7 Sep 2017 15:44:34 -0500 Subject: [PATCH 0664/1936] doc: How to remote-pdb under systemd Add instructions for installing and enabling remote-pdb [1] under systemd. [1] https://pypi.python.org/pypi/remote-pdb Thanks to clarkb for pointing me to this. TIL. Change-Id: I640ac36cfbcc5b199e911c0e3f6b18705c3fbbc4 --- doc/source/systemd.rst | 38 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/doc/source/systemd.rst b/doc/source/systemd.rst index f9971abf5a..c1d2944057 100644 --- a/doc/source/systemd.rst +++ b/doc/source/systemd.rst @@ -120,8 +120,11 @@ left/right arrow keys. See ``man 1 journalctl`` for more. -Debugging with pdb -================== +Debugging +========= + +Using pdb +--------- In order to break into a regular pdb session on a systemd-controlled service, you need to invoke the process manually - that is, take it out @@ -143,6 +146,37 @@ Invoke the command manually:: /usr/local/bin/nova-scheduler --config-file /etc/nova/nova.conf +Using remote-pdb +---------------- + +`remote-pdb`_ works while the process is under systemd control. + +Make sure you have remote-pdb installed:: + + sudo pip install remote-pdb + +Inject your breakpoint in the source, e.g.:: + + import remote_pdb; remote_pdb.set_trace() + +Restart the relevant service:: + + sudo systemctl restart devstack@n-api.service + +The remote-pdb code configures the telnet port when ``set_trace()`` is +invoked. Do whatever it takes to hit the instrumented code path, and +inspect the logs for a message displaying the listening port:: + + Sep 07 16:36:12 p8-100-neo devstack@n-api.service[772]: RemotePdb session open at 127.0.0.1:46771, waiting for connection ... + +Telnet to that port to enter the pdb session:: + + telnet 127.0.0.1 46771 + +See the `remote-pdb`_ home page for more options. + +.. _`remote-pdb`: https://pypi.python.org/pypi/remote-pdb + Known Issues ============ From ef60f2b7180f269a53f405efa7449618eddf86a1 Mon Sep 17 00:00:00 2001 From: Dinesh Bhor Date: Tue, 5 Sep 2017 14:40:32 +0530 Subject: [PATCH 0665/1936] Enable reloadable config in services uwsgi services: [1] By default uwsgi is set to exit on reload this breaks config reloading of the service [1][2]. It needs to be set to 'false'. [2] Requires to add 'systemctl reload' command support by adding ExecReload in unit file. Non uwsgi services: [1] Non uwsgi services only requires to add ExecReload in unit file. There was a similar patch submitted by Matthew Treinish [3] but it was already set to workflow +1(not merged as having dependency on other patch) and it was having some issues as specified in comment and missing reload functionality for other services. [1] https://etherpad.openstack.org/p/uwsgi-issues [2] http://uwsgi-docs.readthedocs.io/en/latest/Options.html#exit-on-reload [3] https://review.openstack.org/#/c/490904/2 Change-Id: I78f5e9d4574671c74a52af64724946feb41c2d7a --- functions-common | 3 +++ lib/apache | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/functions-common b/functions-common index 52f53ef147..1e6ae5cc76 100644 --- a/functions-common +++ b/functions-common @@ -45,6 +45,7 @@ declare -A -g GITBRANCH declare -A -g GITDIR TRACK_DEPENDS=${TRACK_DEPENDS:-False} +KILL_PATH="$(which kill)" # Save these variables to .stackenv STACK_ENV_VARS="BASE_SQL_CONN DATA_DIR DEST ENABLED_SERVICES HOST_IP \ @@ -1453,6 +1454,7 @@ function write_user_unit_file { iniset -sudo $unitfile "Service" "ExecStart" "$command" iniset -sudo $unitfile "Service" "KillMode" "process" iniset -sudo $unitfile "Service" "TimeoutStopSec" "infinity" + iniset -sudo $unitfile "Service" "ExecReload" "$KILL_PATH -HUP \$MAINPID" if [[ -n "$group" ]]; then iniset -sudo $unitfile "Service" "Group" "$group" fi @@ -1474,6 +1476,7 @@ function write_uwsgi_user_unit_file { iniset -sudo $unitfile "Service" "SyslogIdentifier" "$service" iniset -sudo $unitfile "Service" "User" "$user" iniset -sudo $unitfile "Service" "ExecStart" "$command" + iniset -sudo $unitfile "Service" "ExecReload" "$KILL_PATH -HUP \$MAINPID" iniset -sudo $unitfile "Service" "Type" "notify" iniset -sudo $unitfile "Service" "KillMode" "process" iniset -sudo $unitfile "Service" "Restart" "always" diff --git a/lib/apache b/lib/apache index 39d5b7b071..5dc0e98192 100644 --- a/lib/apache +++ b/lib/apache @@ -259,7 +259,7 @@ function write_uwsgi_config { iniset "$file" uwsgi master true # Set die-on-term & exit-on-reload so that uwsgi shuts down iniset "$file" uwsgi die-on-term true - iniset "$file" uwsgi exit-on-reload true + iniset "$file" uwsgi exit-on-reload false # Set worker-reload-mercy so that worker will not exit till the time # configured after graceful shutdown iniset "$file" uwsgi worker-reload-mercy $WORKER_TIMEOUT @@ -316,7 +316,7 @@ function write_local_uwsgi_http_config { iniset "$file" uwsgi master true # Set die-on-term & exit-on-reload so that uwsgi shuts down iniset "$file" uwsgi die-on-term true - iniset "$file" uwsgi exit-on-reload true + iniset "$file" uwsgi exit-on-reload false iniset "$file" uwsgi enable-threads true iniset "$file" uwsgi plugins python # uwsgi recommends this to prevent thundering herd on accept. From cbd5f4e0ad2ccc5903d12df73e3f0b033df1b3ae Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Sun, 10 Sep 2017 15:00:29 -0600 Subject: [PATCH 0666/1936] Check that generate-subunit exists before using it If stack.sh fails before os-testr is installed, the generate-subunit command won't exist. Change-Id: I7998ed81e419e25d183e5a780df0b4459cca237c --- stack.sh | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index 301e1e7c6b..10ffc5e23c 100755 --- a/stack.sh +++ b/stack.sh @@ -537,14 +537,20 @@ function exit_trap { if [[ $r -ne 0 ]]; then echo "Error on exit" - generate-subunit $DEVSTACK_START_TIME $SECONDS 'fail' >> ${SUBUNIT_OUTPUT} + # If we error before we've installed os-testr, which will fail. + if type -p generate-subunit > /dev/null; then + generate-subunit $DEVSTACK_START_TIME $SECONDS 'fail' >> ${SUBUNIT_OUTPUT} + fi if [[ -z $LOGDIR ]]; then $TOP_DIR/tools/worlddump.py else $TOP_DIR/tools/worlddump.py -d $LOGDIR fi else - generate-subunit $DEVSTACK_START_TIME $SECONDS >> ${SUBUNIT_OUTPUT} + # If we error before we've installed os-testr, which will fail. + if type -p generate-subunit > /dev/null; then + generate-subunit $DEVSTACK_START_TIME $SECONDS >> ${SUBUNIT_OUTPUT} + fi fi exit $r From 35a0c573b36236c8d73a0699cc7882170787f494 Mon Sep 17 00:00:00 2001 From: "James E. Blair" Date: Sun, 10 Sep 2017 15:37:56 -0700 Subject: [PATCH 0667/1936] Update gate clone error message for Zuul v3 Change-Id: I351de28aa5d5f25187953f1a4502445bc2ce76cc --- functions-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions-common b/functions-common index deebdec249..c0fbcafa0c 100644 --- a/functions-common +++ b/functions-common @@ -519,7 +519,7 @@ function git_clone { if [[ ! -d $git_dest ]]; then if [[ "$ERROR_ON_CLONE" = "True" ]]; then echo "The $git_dest project was not found; if this is a gate job, add" - echo "the project to the \$PROJECTS variable in the job definition." + echo "the project to 'required-projects' in the job definition." die $LINENO "Cloning not allowed in this configuration" fi git_timed clone $git_clone_flags $git_remote $git_dest From 02f9e8bbdddd69603cbb0afc93279c4151afd5f5 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Sun, 10 Sep 2017 02:51:10 +0200 Subject: [PATCH 0668/1936] Replace pmap shellout with pure python implementation Without this patch, the pmap -XX call fails on openSUSE Leap distributions as those have a fairly ancient procps version that does not support the -XX parameter. A pure python implementation is more portable, faster and even shorter than the subprocess call. Closes-Bug: #1716066 Change-Id: I2fdb457e65359a1c9d40452c922cfdca0e6e74dc --- tools/mlock_report.py | 37 +++++++++++++++---------------------- 1 file changed, 15 insertions(+), 22 deletions(-) diff --git a/tools/mlock_report.py b/tools/mlock_report.py index 2169cc2dce..07716b04d6 100755 --- a/tools/mlock_report.py +++ b/tools/mlock_report.py @@ -3,12 +3,12 @@ # This tool lists processes that lock memory pages from swapping to disk. import re -import subprocess import psutil -SUMMARY_REGEX = re.compile(b".*\s+(?P[\d]+)\s+KB") +LCK_SUMMARY_REGEX = re.compile( + "^VmLck:\s+(?P[\d]+)\s+kB", re.MULTILINE) def main(): @@ -22,28 +22,21 @@ def main(): def _get_report(): mlock_users = [] for proc in psutil.process_iter(): - pid = proc.pid # sadly psutil does not expose locked pages info, that's why we - # call to pmap and parse the output here + # iterate over the /proc/%pid/status files manually try: - out = subprocess.check_output(['pmap', '-XX', str(pid)]) - except subprocess.CalledProcessError as e: - # 42 means process just vanished, which is ok - if e.returncode == 42: - continue - raise - last_line = out.splitlines()[-1] - - # some processes don't provide a memory map, for example those - # running as kernel services, so we need to skip those that don't - # match - result = SUMMARY_REGEX.match(last_line) - if result: - locked = int(result.group('locked')) - if locked: - mlock_users.append({'name': proc.name(), - 'pid': pid, - 'locked': locked}) + s = open("%s/%d/status" % (psutil.PROCFS_PATH, proc.pid), 'r') + except EnvironmentError: + continue + with s: + for line in s: + result = LCK_SUMMARY_REGEX.search(line) + if result: + locked = int(result.group('locked')) + if locked: + mlock_users.append({'name': proc.name(), + 'pid': proc.pid, + 'locked': locked}) # produce a single line log message with per process mlock stats if mlock_users: From e43dfdd453cd8d2c71f63ebbc96ad261d13e48eb Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Tue, 12 Sep 2017 16:13:26 -0600 Subject: [PATCH 0669/1936] Change lib/neutron-legacy to not enable linuxbridge for DVR DVR isn't supported by the Linux Bridge agent, but the mechanism driver is enabled by default, so Neutron attempts port-bindings for it, generating ERRORS in the neutron-server log in the check and gate jobs. Just remove it in the DVR case. Change-Id: Ic50e12e5fecf366a182c141b5c99649e653254cb Closes-bug: #1716782 --- lib/neutron-legacy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index f9e0bd6ded..0e326d600a 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -168,7 +168,7 @@ fi # Q_DVR_MODE=${Q_DVR_MODE:-legacy} if [[ "$Q_DVR_MODE" != "legacy" ]]; then - Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,linuxbridge,l2population + Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,l2population fi # Provider Network Configurations From 941940a92cd5424bbe5125569b083f1059569bdb Mon Sep 17 00:00:00 2001 From: Andreas Scheuring Date: Wed, 13 Sep 2017 00:24:02 +0200 Subject: [PATCH 0670/1936] Add etcd3 support for s390x Since [1] devstack is failing on s390x with "Distro not supported". The reason for this is the missing etcd3 support. It worked before [1] as we were able to disable etcd3 via local.conf. But as etcd3 is a base service, we might not be able to rely on this workarond in the future anymore. As there is no etcd3 binary hosted on github like it is for other architectures, the user needs to specify an alternative download URL via local.conf. Otherwise devstack will exit with an appropriate error message. ETCD_DOWNLOAD_URL= [1] https://github.com/openstack-dev/devstack/commit/d8bb220606737719bcdf7c5b4f54906f2974c71c Change-Id: I1c378a0456dcf2e94d79a02de9d3e16753d946d6 Partial-Bug: #1693192 --- doc/source/configuration.rst | 15 ++++++++++++--- stackrc | 11 +++++++++++ 2 files changed, 23 insertions(+), 3 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index c4834b7a84..23f680a59c 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -779,9 +779,15 @@ are needed:: DOWNLOAD_DEFAULT_IMAGES=False IMAGE_URLS="https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-s390x-disk1.img" + # Provide a custom etcd3 binary download URL and ints sha256. + # The binary must be located under '//etcd--linux-s390x.tar.gz' + # on this URL. + # Build instructions for etcd3: https://github.com/linux-on-ibm-z/docs/wiki/Building-etcd + ETCD_DOWNLOAD_URL= + ETCD_SHA256= + enable_service n-sproxy disable_service n-novnc - disable_service etcd3 # https://bugs.launchpad.net/devstack/+bug/1693192 [[post-config|$NOVA_CONF]] @@ -803,8 +809,11 @@ Reasoning: needed if you want to use the *serial console* outside of the all-in-one setup. -* The service ``etcd3`` needs to be disabled as long as bug report - https://bugs.launchpad.net/devstack/+bug/1693192 is not resolved. +* A link to an etcd3 binary and its sha256 needs to be provided as the + binary for s390x is not hosted on github like it is for other + architectures. For more details see + https://bugs.launchpad.net/devstack/+bug/1693192. Etcd3 can easily be + built along https://github.com/linux-on-ibm-z/docs/wiki/Building-etcd. .. note:: To run *Tempest* against this *Devstack* all-in-one, you'll need to use a guest image which is smaller than 1GB when uncompressed. diff --git a/stackrc b/stackrc index c2bbe21aca..92a939f164 100644 --- a/stackrc +++ b/stackrc @@ -719,6 +719,7 @@ ETCD_SHA256_AMD64="4fde194bbcd259401e2b5c462dfa579ee7f6af539f13f130b8f5b4f52e3b3 # NOTE(sdague): etcd v3.1.7 doesn't have anything for these architectures, though 3.2.0 does. ETCD_SHA256_ARM64="" ETCD_SHA256_PPC64="" +ETCD_SHA256_S390X="" # Make sure etcd3 downloads the correct architecture if is_arch "x86_64"; then ETCD_ARCH="amd64" @@ -729,6 +730,16 @@ elif is_arch "aarch64"; then elif is_arch "ppc64le"; then ETCD_ARCH="ppc64le" ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_PPC64} +elif is_arch "s390x"; then + # An etcd3 binary for s390x is not available on github like it is + # for other arches. Only continue if a custom download URL was + # provided. + if [[ -n "${ETCD_DOWNLOAD_URL}" ]]; then + ETCD_ARCH="s390x" + ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_S390X} + else + exit_distro_not_supported "etcd3. No custom ETCD_DOWNLOAD_URL provided." + fi else exit_distro_not_supported "invalid hardware type - $ETCD_ARCH" fi From c04ac03e21546a3d25a2b6794802c4988f4ff532 Mon Sep 17 00:00:00 2001 From: Gyorgy Szombathelyi Date: Tue, 23 May 2017 16:52:35 +0200 Subject: [PATCH 0671/1936] Change deprecated nova settings Change-Id: I0d129150411ede344c9b710d1aca5af1df3478b6 --- lib/cinder | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) diff --git a/lib/cinder b/lib/cinder index bc0c13fb88..59bcf8235c 100644 --- a/lib/cinder +++ b/lib/cinder @@ -230,16 +230,6 @@ function configure_cinder { configure_auth_token_middleware $CINDER_CONF cinder $CINDER_AUTH_CACHE_DIR - # Change the default nova_catalog_info and nova_catalog_admin_info values in - # cinder so that the service name cinder is searching for matches that set for - # nova in keystone. - if [[ -n "$CINDER_NOVA_CATALOG_INFO" ]]; then - iniset $CINDER_CONF DEFAULT nova_catalog_info $CINDER_NOVA_CATALOG_INFO - fi - if [[ -n "$CINDER_NOVA_CATALOG_ADMIN_INFO" ]]; then - iniset $CINDER_CONF DEFAULT nova_catalog_admin_info $CINDER_NOVA_CATALOG_ADMIN_INFO - fi - iniset $CINDER_CONF DEFAULT auth_strategy keystone iniset $CINDER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL @@ -254,8 +244,6 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT periodic_interval $CINDER_PERIODIC_INTERVAL iniset $CINDER_CONF DEFAULT my_ip "$HOST_IP" - iniset $CINDER_CONF DEFAULT os_region_name "$REGION_NAME" - iniset $CINDER_CONF key_manager api_class cinder.keymgr.conf_key_mgr.ConfKeyManager if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then @@ -332,10 +320,9 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT glance_api_version 2 fi - # Set os_privileged_user credentials (used for os-assisted-snapshots) - iniset $CINDER_CONF DEFAULT os_privileged_user_name nova - iniset $CINDER_CONF DEFAULT os_privileged_user_password "$SERVICE_PASSWORD" - iniset $CINDER_CONF DEFAULT os_privileged_user_tenant "$SERVICE_PROJECT_NAME" + # Set nova credentials (used for os-assisted-snapshots) + configure_auth_token_middleware $CINDER_CONF nova $CINDER_AUTH_CACHE_DIR nova + iniset $CINDER_CONF nova region_name "$REGION_NAME" iniset $CINDER_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" if [[ ! -z "$CINDER_COORDINATION_URL" ]]; then From 868746b5dac21e4c97a03f53459fca4e5aba73db Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Wed, 13 Sep 2017 15:44:18 -0600 Subject: [PATCH 0672/1936] lib/neutron: fix metering agent startup with systemd With systemd, we now require absolute paths passed to run_process. Change-Id: Iabd425a6d0ebaaaa5b13bc4a7b062158940958e6 --- lib/neutron | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index 2ffabd416b..a672d399ea 100644 --- a/lib/neutron +++ b/lib/neutron @@ -443,7 +443,7 @@ function start_neutron_new { fi if is_service_enabled neutron-metering; then - run_process neutron-metering "$NEUTRON_METERING_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_METERING_AGENT_CONF" + run_process neutron-metering "$NEUTRON_BIN_DIR/$NEUTRON_METERING_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_METERING_AGENT_CONF" fi } From 387aadd14e87f2890ad8969c15edbe9f1d218a5a Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Thu, 14 Sep 2017 09:25:59 -0600 Subject: [PATCH 0673/1936] Install neutron rpms/debs when lib/neutron is used lib/neutron service prefixes are neutron-* not q-*. We should install those packages either way. The patch moves files/*/neutron into files/*/neutron-common so that we can correctly match */neutron against service specific dependency files (f.e. */neutron-agent) and load the common packages if any neutron-* service is present. Change-Id: I57b36f2ed3f33737223a35d9ed734bb414f31e0b --- files/debs/{neutron => neutron-common} | 0 files/rpms-suse/{neutron => neutron-common} | 0 files/rpms/{neutron => neutron-common} | 0 functions-common | 6 +++--- 4 files changed, 3 insertions(+), 3 deletions(-) rename files/debs/{neutron => neutron-common} (100%) rename files/rpms-suse/{neutron => neutron-common} (100%) rename files/rpms/{neutron => neutron-common} (100%) diff --git a/files/debs/neutron b/files/debs/neutron-common similarity index 100% rename from files/debs/neutron rename to files/debs/neutron-common diff --git a/files/rpms-suse/neutron b/files/rpms-suse/neutron-common similarity index 100% rename from files/rpms-suse/neutron rename to files/rpms-suse/neutron-common diff --git a/files/rpms/neutron b/files/rpms/neutron-common similarity index 100% rename from files/rpms/neutron rename to files/rpms/neutron-common diff --git a/functions-common b/functions-common index deebdec249..33639c5936 100644 --- a/functions-common +++ b/functions-common @@ -1211,9 +1211,9 @@ function get_packages { if [[ ! $file_to_parse =~ $package_dir/keystone ]]; then file_to_parse="${file_to_parse} ${package_dir}/keystone" fi - elif [[ $service == q-* ]]; then - if [[ ! $file_to_parse =~ $package_dir/neutron ]]; then - file_to_parse="${file_to_parse} ${package_dir}/neutron" + elif [[ $service == q-* || $service == neutron-* ]]; then + if [[ ! $file_to_parse =~ $package_dir/neutron-common ]]; then + file_to_parse="${file_to_parse} ${package_dir}/neutron-common" fi elif [[ $service == ir-* ]]; then if [[ ! $file_to_parse =~ $package_dir/ironic ]]; then From 4324f4ecd99f3cc50dea585bc8b5a680327e54bc Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 14 Sep 2017 12:59:25 -0600 Subject: [PATCH 0674/1936] Fix devstack python detection if python3 is missing On platforms without python3 installed devstack fails the python version check. This does it more gracefully. Change-Id: I4d79a41eb2d66852ab1a1aa8bd383f8e3e89cdc0 --- functions-common | 5 ++++- stackrc | 4 ++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/functions-common b/functions-common index deebdec249..b9bf0c9788 100644 --- a/functions-common +++ b/functions-common @@ -2080,7 +2080,10 @@ function is_provider_network { function _get_python_version { local interp=$1 local version - version=$($interp -c 'import sys; print("%s.%s" % sys.version_info[0:2])') + # disable erroring out here, otherwise if python 3 doesn't exist we fail hard. + if [[ -x $(which $interp) ]]; then + version=$($interp -c 'import sys; print("%s.%s" % sys.version_info[0:2])') + fi echo ${version} } diff --git a/stackrc b/stackrc index 92a939f164..d60e186448 100644 --- a/stackrc +++ b/stackrc @@ -131,11 +131,11 @@ export DISABLED_PYTHON3_PACKAGES="" # version of Python 3 to this variable will install the app using that # version of the interpreter instead of 2.7. _DEFAULT_PYTHON3_VERSION="$(_get_python_version python3)" -export PYTHON3_VERSION=${PYTHON3_VERSION:-${_DEFAULT_PYTHON3_VERSION}} +export PYTHON3_VERSION=${PYTHON3_VERSION:-${_DEFAULT_PYTHON3_VERSION:-3.5}} # Just to be more explicit on the Python 2 version to use. _DEFAULT_PYTHON2_VERSION="$(_get_python_version python2)" -export PYTHON2_VERSION=${PYTHON2_VERSION:-${_DEFAULT_PYTHON2_VERSION}} +export PYTHON2_VERSION=${PYTHON2_VERSION:-${_DEFAULT_PYTHON2_VERSION:-2.7}} # allow local overrides of env variables, including repo config if [[ -f $RC_DIR/localrc ]]; then From df17927fe84b4e50d5bc7376b6ff12bb70767a61 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 15 Sep 2017 10:09:54 +0000 Subject: [PATCH 0675/1936] Updated from generate-devstack-plugins-list Change-Id: I3012b07f87a1d9c432e1b97b7400fae693c9331a --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 2c8805ff16..6aa2e93739 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -39,6 +39,7 @@ cloudkitty `git://git.openstack.org/openstack/cloudk collectd-ceilometer-plugin `git://git.openstack.org/openstack/collectd-ceilometer-plugin `__ congress `git://git.openstack.org/openstack/congress `__ cue `git://git.openstack.org/openstack/cue `__ +cyborg `git://git.openstack.org/openstack/cyborg `__ designate `git://git.openstack.org/openstack/designate `__ devstack-plugin-additional-pkg-repos `git://git.openstack.org/openstack/devstack-plugin-additional-pkg-repos `__ devstack-plugin-amqp1 `git://git.openstack.org/openstack/devstack-plugin-amqp1 `__ From 6bd4924cec73908225e6744b1b00dd86cb39d3ed Mon Sep 17 00:00:00 2001 From: Javier Pena Date: Fri, 15 Sep 2017 15:55:00 +0200 Subject: [PATCH 0676/1936] Ignore stderr when checking for Python version https://review.openstack.org/504171 prevented Python version detection from failing when python3 is not installed. However, "which python3" returns a message in stderr when python3 is not there, and this output can make diskimage-builder get confused when parsing source-repository-images. Change-Id: Idb649dc341ede73c39954b0432ef3cacf379ed37 --- functions-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions-common b/functions-common index 283a6cf3d5..e53c72dc6e 100644 --- a/functions-common +++ b/functions-common @@ -2081,7 +2081,7 @@ function _get_python_version { local interp=$1 local version # disable erroring out here, otherwise if python 3 doesn't exist we fail hard. - if [[ -x $(which $interp) ]]; then + if [[ -x $(which $interp 2> /dev/null) ]]; then version=$($interp -c 'import sys; print("%s.%s" % sys.version_info[0:2])') fi echo ${version} From 5ad4e58868bf48a72841bee313fd2aae61b6ddf7 Mon Sep 17 00:00:00 2001 From: "John L. Villalovos" Date: Thu, 7 Sep 2017 15:33:57 -0700 Subject: [PATCH 0677/1936] Display unaccounted time in "DevStack Component Timing" In the "DevStack Component Timing" section, display the unaccounted time. Also add the units (seconds) to the output to make it clear to viewers. Change-Id: Iaca82cc54a355f7077e20e548b771e53387f6628 --- functions-common | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/functions-common b/functions-common index 4fc7c6db92..f2b8d98b39 100644 --- a/functions-common +++ b/functions-common @@ -2270,11 +2270,13 @@ function cleanup_oscwrap { function time_totals { local elapsed_time local end_time - local len=15 + local len=20 local xtrace + local unaccounted_time end_time=$(date +%s) elapsed_time=$(($end_time - $_TIME_BEGIN)) + unaccounted_time=$elapsed_time # pad 1st column this far for t in ${!_TIME_TOTAL[*]}; do @@ -2291,16 +2293,19 @@ function time_totals { echo echo "=========================" echo "DevStack Component Timing" + echo " (times are in seconds) " echo "=========================" - printf "%-${len}s %3d\n" "Total runtime" "$elapsed_time" - echo for t in ${!_TIME_TOTAL[*]}; do local v=${_TIME_TOTAL[$t]} # because we're recording in milliseconds v=$(($v / 1000)) printf "%-${len}s %3d\n" "$t" "$v" + unaccounted_time=$(($unaccounted_time - $v)) done + echo "-------------------------" + printf "%-${len}s %3d\n" "Unaccounted time" "$unaccounted_time" echo "=========================" + printf "%-${len}s %3d\n" "Total runtime" "$elapsed_time" $xtrace } From b2330c89196c65662fcf98a2295b7e41b1652b28 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Tue, 19 Sep 2017 09:10:21 +0000 Subject: [PATCH 0678/1936] Fix memcached_servers setting By default memcached is bound to 127.0.0.1 and we have no code in place to change that. So instead of using the $SERVICE_HOST variable, we hardcode it to localhost, just as we do for the cache settings, see [1]. This also avoids a bug that occurs when $SERVICE_HOST contains an IPv6 address, as in that case it would have to be prefixed by "inet6:" [2]. [1] I95d798d122e2a95e27eb1d2c4e786c3cd844440b [2] https://bugs.launchpad.net/swift/+bug/1610064 Change-Id: I46bed8a048f4b0d669dfc65b28ddeb36963553e0 Partial-Bug: 1656329 --- lib/keystone | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index c38d953742..870a1fa992 100644 --- a/lib/keystone +++ b/lib/keystone @@ -450,7 +450,7 @@ function configure_auth_token_middleware { iniset $conf_file $section cafile $SSL_BUNDLE_FILE iniset $conf_file $section signing_dir $signing_dir - iniset $conf_file $section memcached_servers $SERVICE_HOST:11211 + iniset $conf_file $section memcached_servers localhost:11211 } # init_keystone() - Initialize databases, etc. From ee22ca8373abd3b5a4c44a9c5c4da39c511195c8 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Wed, 20 Sep 2017 00:29:36 +0000 Subject: [PATCH 0679/1936] Revert "Update to using pike cloud-archive" This reverts commit a7e9a5d447b3eeacfb52d7ddc94445058a8d6fd1. The jobs that run live migration tests are failing at about a rate of 50% since this merged. There are no recent changes to nova in the last 24 hours that are related to live migration, and this is failing on the master branch only, so I suspect the failures are due to new qemu packages getting pulled in from this change. Change-Id: Ic8481539c6a0cc7af08a736a625b672979435908 Closes-Bug: #1718295 --- tools/fixup_stuff.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index efe0125741..f1552ab2f7 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -84,10 +84,10 @@ if [[ "${ENABLE_UBUNTU_CLOUD_ARCHIVE}" == "True" && "$DISTRO" = "xenial" ]]; the # we can find local mirrors then use that mirror. source /etc/ci/mirror_info.sh - sudo apt-add-repository -y "deb $NODEPOOL_UCA_MIRROR xenial-updates/pike main" + sudo apt-add-repository -y "deb $NODEPOOL_UCA_MIRROR xenial-updates/ocata main" else # Otherwise use upstream UCA - sudo add-apt-repository -y cloud-archive:pike + sudo add-apt-repository -y cloud-archive:ocata fi # Disable use of libvirt wheel since a cached wheel build might be From 7617ac208596665061453dfa04b75e4cc8a76ac9 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Tue, 19 Sep 2017 17:43:48 +0000 Subject: [PATCH 0680/1936] Make is_ipv4_address a bit more robust Still not ideal, but at least should avoid matching IPv6 addresses. Change-Id: Ibb64263fdb0308f56c18518289501dd4642dcbad --- functions-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions-common b/functions-common index 713d92e717..ffdacef32e 100644 --- a/functions-common +++ b/functions-common @@ -2014,7 +2014,7 @@ function cidr2netmask { # Check if this is a valid ipv4 address string function is_ipv4_address { local address=$1 - local regex='([0-9]{1,3}.){3}[0-9]{1,3}' + local regex='([0-9]{1,3}\.){3}[0-9]{1,3}' # TODO(clarkb) make this more robust if [[ "$address" =~ $regex ]] ; then return 0 From 146332e349416ac0b3c9653b0ae68d55dbb3f9de Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Wed, 20 Sep 2017 06:18:08 +0000 Subject: [PATCH 0681/1936] Make etcd3 setup work with IPv6 addresses The client are told to connect to SERVICE_HOST instead of HOST_IP, so we need to start etcd3 with matching listening parameters. Change-Id: I96389090180d21d25d72df8f9e8905b850bcaee9 Partial-Bug: 1656329 --- lib/etcd3 | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/lib/etcd3 b/lib/etcd3 index 60e827add2..e6a04f11de 100644 --- a/lib/etcd3 +++ b/lib/etcd3 @@ -40,9 +40,13 @@ function start_etcd3 { cmd+=" --initial-cluster-state new --initial-cluster-token etcd-cluster-01" cmd+=" --initial-cluster $HOSTNAME=http://$SERVICE_HOST:2380" cmd+=" --initial-advertise-peer-urls http://$SERVICE_HOST:2380" - cmd+=" --advertise-client-urls http://${HOST_IP}:$ETCD_PORT" - cmd+=" --listen-peer-urls http://0.0.0.0:2380 " - cmd+=" --listen-client-urls http://${HOST_IP}:$ETCD_PORT" + cmd+=" --advertise-client-urls http://$SERVICE_HOST:$ETCD_PORT" + if [ "$SERVICE_LISTEN_ADDRESS" == "::" ]; then + cmd+=" --listen-peer-urls http://[::]:2380 " + else + cmd+=" --listen-peer-urls http://0.0.0.0:2380 " + fi + cmd+=" --listen-client-urls http://$SERVICE_HOST:$ETCD_PORT" local unitfile="$SYSTEMD_DIR/$ETCD_SYSTEMD_SERVICE" write_user_unit_file $ETCD_SYSTEMD_SERVICE "$cmd" "" "root" From 917ad0998be8c48bfcc0e3031bc1b75cd9ed1927 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Wed, 20 Sep 2017 14:46:48 +0000 Subject: [PATCH 0682/1936] Update to using pike cloud-archive This reverts commit ee22ca8373abd3b5a4c44a9c5c4da39c511195c8 Depends-On: Iae2962bb86100f03fd3ad9aac3767da876291e74 Change-Id: I4d5fa052bdc5eef1795f6507589e2eaf4e093e23 Related-Bug: #1718295 --- tools/fixup_stuff.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index f1552ab2f7..efe0125741 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -84,10 +84,10 @@ if [[ "${ENABLE_UBUNTU_CLOUD_ARCHIVE}" == "True" && "$DISTRO" = "xenial" ]]; the # we can find local mirrors then use that mirror. source /etc/ci/mirror_info.sh - sudo apt-add-repository -y "deb $NODEPOOL_UCA_MIRROR xenial-updates/ocata main" + sudo apt-add-repository -y "deb $NODEPOOL_UCA_MIRROR xenial-updates/pike main" else # Otherwise use upstream UCA - sudo add-apt-repository -y cloud-archive:ocata + sudo add-apt-repository -y cloud-archive:pike fi # Disable use of libvirt wheel since a cached wheel build might be From e3915938f35737fc231ab7b8b9981dae9048438e Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Fri, 24 Feb 2017 06:24:47 +0000 Subject: [PATCH 0683/1936] Configure bridge_mappings for ovs and linuxbridge agents Otherwise neutron will fail to bind external ports because of missing entries for external physical network in the mapping. Configure it only when l3 agent is also installed on the node (otherwise the l2 agent is not exposed to external network and hence doesn't have the bridge). Change-Id: I561b74538acb0dc39f1af3e832108ce6a99441b0 --- lib/neutron | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/neutron b/lib/neutron index f277062b32..6da1705525 100644 --- a/lib/neutron +++ b/lib/neutron @@ -211,6 +211,13 @@ function configure_neutron_new { iniset $NEUTRON_L3_CONF agent root_helper_daemon "$NEUTRON_ROOTWRAP_DAEMON_CMD" iniset $NEUTRON_L3_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL neutron_plugin_configure_l3_agent $NEUTRON_L3_CONF + + # Configure the neutron agent to serve external network ports + if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then + iniset $NEUTRON_CORE_PLUGIN_CONF linux_bridge bridge_mappings "$PUBLIC_NETWORK_NAME:$PUBLIC_BRIDGE" + else + iniset $NEUTRON_CORE_PLUGIN_CONF ovs bridge_mappings "$PUBLIC_NETWORK_NAME:$PUBLIC_BRIDGE" + fi fi # Metadata From c652a498fb6ae1cdd7480b590be0f1e843768666 Mon Sep 17 00:00:00 2001 From: yuanke wei Date: Sun, 17 Sep 2017 22:18:07 +0800 Subject: [PATCH 0684/1936] Fix partial download problem (1) when checksum fails, better delete the broken files and try the second time; (2) amazon s3 is not good in mainland China, better try one more time with wget Change-Id: I24ee73f216b78bd80564863cd335e5d5a9b56360 --- functions | 2 +- lib/etcd3 | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/functions b/functions index 33a0e6aaf5..8b69c73bba 100644 --- a/functions +++ b/functions @@ -63,7 +63,7 @@ function get_extra_file { if [[ $file_url != file* ]]; then # If the file isn't cache, download it if [[ ! -f $FILES/$file_name ]]; then - wget --progress=dot:giga -c $file_url -O $FILES/$file_name + wget --progress=dot:giga -t 2 -c $file_url -O $FILES/$file_name if [[ $? -ne 0 ]]; then die "$file_url could not be downloaded" fi diff --git a/lib/etcd3 b/lib/etcd3 index 60e827add2..934bbb004f 100644 --- a/lib/etcd3 +++ b/lib/etcd3 @@ -98,8 +98,8 @@ function install_etcd3 { etcd_file="$(get_extra_file $ETCD_DOWNLOAD_LOCATION)" if [ ! -f "$FILES/etcd-$ETCD_VERSION-linux-$ETCD_ARCH/etcd" ]; then echo "${ETCD_SHA256} $etcd_file" > $FILES/etcd.sha256sum - # NOTE(sdague): this should go fatal if this fails - sha256sum -c $FILES/etcd.sha256sum + # NOTE(yuanke wei): rm the damaged file when checksum fails + sha256sum -c $FILES/etcd.sha256sum || (sudo rm -f $etcd_file; exit 1) tar xzvf $etcd_file -C $FILES sudo cp $FILES/$ETCD_NAME/etcd $ETCD_BIN_DIR/etcd From c4b0059c6b9818e622f9fb010719d6c92de9d91d Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Fri, 22 Sep 2017 15:07:36 -0400 Subject: [PATCH 0685/1936] Disable the v2.0 identity API by default The v2.0 identity API is being removed in the Queens release, but in order to do so we need to stop some v2.0 tempests tests from being run. This commit switches the default to disable the keystone v2 api. In a future commit after the removal of the api from keystone the bits to deploy the v2 api will be removed. Change-Id: I5afcba6321f496b8170be27789bee7c9ad8eacce --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 0ffcb67219..e7e584b8f2 100644 --- a/stackrc +++ b/stackrc @@ -184,7 +184,7 @@ IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-3} # will to be set to ``3`` in order to make DevStack register the Identity # endpoint as v3. This flag is experimental and will be used as basis to # identify the projects which still have issues to operate with Identity v3. -ENABLE_IDENTITY_V2=$(trueorfalse True ENABLE_IDENTITY_V2) +ENABLE_IDENTITY_V2=$(trueorfalse False ENABLE_IDENTITY_V2) if [ "$ENABLE_IDENTITY_V2" == "False" ]; then IDENTITY_API_VERSION=3 fi From 03ae3c485e497296146a102f89853fd4063a20db Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 19 Sep 2017 14:22:19 -0500 Subject: [PATCH 0686/1936] Fix comment wording nit Change-Id: I558522121099f14d380c163a2c397b34335b03df --- stack.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index 10ffc5e23c..08cdbe422b 100755 --- a/stack.sh +++ b/stack.sh @@ -537,7 +537,7 @@ function exit_trap { if [[ $r -ne 0 ]]; then echo "Error on exit" - # If we error before we've installed os-testr, which will fail. + # If we error before we've installed os-testr, this will fail. if type -p generate-subunit > /dev/null; then generate-subunit $DEVSTACK_START_TIME $SECONDS 'fail' >> ${SUBUNIT_OUTPUT} fi @@ -547,7 +547,7 @@ function exit_trap { $TOP_DIR/tools/worlddump.py -d $LOGDIR fi else - # If we error before we've installed os-testr, which will fail. + # If we error before we've installed os-testr, this will fail. if type -p generate-subunit > /dev/null; then generate-subunit $DEVSTACK_START_TIME $SECONDS >> ${SUBUNIT_OUTPUT} fi From 66a14df49ebe99805e6a332a2c346ba3e1290a68 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Fri, 22 Sep 2017 20:51:38 -0400 Subject: [PATCH 0687/1936] doc: prefix journalctl examples with sudo In a devstack environment you likely need to use sudo to run the journalctl command, so this adds that to the examples. Change-Id: Ibe6b71285a3014e80e06a50130f18bfbdb4ff3ab --- doc/source/systemd.rst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/source/systemd.rst b/doc/source/systemd.rst index c1d2944057..523d399c62 100644 --- a/doc/source/systemd.rst +++ b/doc/source/systemd.rst @@ -94,25 +94,25 @@ query facilities. We'll start with some common options. Follow logs for a specific service:: - journalctl -f --unit devstack@n-cpu.service + sudo journalctl -f --unit devstack@n-cpu.service Following logs for multiple services simultaneously:: - journalctl -f --unit devstack@n-cpu.service --unit devstack@n-cond.service + sudo journalctl -f --unit devstack@n-cpu.service --unit devstack@n-cond.service or you can even do wild cards to follow all the nova services:: - journalctl -f --unit devstack@n-* + sudo journalctl -f --unit devstack@n-* Use higher precision time stamps:: - journalctl -f -o short-precise --unit devstack@n-cpu.service + sudo journalctl -f -o short-precise --unit devstack@n-cpu.service By default, journalctl strips out "unprintable" characters, including ASCII color codes. To keep the color codes (which can be interpreted by an appropriate terminal/pager - e.g. ``less``, the default):: - journalctl -a --unit devstack@n-cpu.service + sudo journalctl -a --unit devstack@n-cpu.service When outputting to the terminal using the default pager, long lines appear to be truncated, but horizontal scrolling is supported via the From 3a59c9d7e3e54f3d3d4e64cd6c42fc1572d9ca17 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Sat, 23 Sep 2017 14:37:58 +0200 Subject: [PATCH 0688/1936] Remove deprecated forbid_global_implied_dsr This setting was only useful for Mitaka and older, however Mitaka is now EOL. Change-Id: Ia1eb87d68a6265d8541cb87a88523246885e5a8a --- lib/tempest | 4 ---- 1 file changed, 4 deletions(-) diff --git a/lib/tempest b/lib/tempest index f086f9a65d..96a170839b 100644 --- a/lib/tempest +++ b/lib/tempest @@ -293,10 +293,6 @@ function configure_tempest { iniset $TEMPEST_CONFIG identity-feature-enabled security_compliance True fi - # TODO(rodrigods): This is a feature flag for bug 1590578 which is fixed in - # Newton and Ocata. This option can be removed after Mitaka is end of life. - iniset $TEMPEST_CONFIG identity-feature-enabled forbid_global_implied_dsr True - # When LDAP is enabled domain specific drivers are also enabled and the users # and groups identity tests must adapt to this scenario if is_service_enabled ldap; then From 5ef8a125cc3a8b8f461a9636001c0ad7f6f88957 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Mon, 25 Sep 2017 13:08:51 +0200 Subject: [PATCH 0689/1936] Install minimal lsb-release package on openSUSE lsb-release is a dependency of "lsb", so it used to work before just fine as well, but it was installing about 300MB of "stuff" that we don't actually need.. Change-Id: I25c7c750cbaeb40bf4f2e8695608c4b1003289ea --- functions-common | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/functions-common b/functions-common index 1b8ca96fd4..a09f972e6e 100644 --- a/functions-common +++ b/functions-common @@ -319,10 +319,7 @@ function _ensure_lsb_release { if [[ -x $(command -v apt-get 2>/dev/null) ]]; then sudo apt-get install -y lsb-release elif [[ -x $(command -v zypper 2>/dev/null) ]]; then - # XXX: old code paths seem to have assumed SUSE platforms also - # had "yum". Keep this ordered above yum so we don't try to - # install the rh package. suse calls it just "lsb" - sudo zypper -n install lsb + sudo zypper -n install lsb-release elif [[ -x $(command -v dnf 2>/dev/null) ]]; then sudo dnf install -y redhat-lsb-core elif [[ -x $(command -v yum 2>/dev/null) ]]; then From 07a8823d4f42ceb8146cf5eb8b1fffcbb4599e25 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Mon, 25 Sep 2017 11:36:20 +0000 Subject: [PATCH 0690/1936] Update default etcd version There have been a couple of new stable releases in the meantime, update to using v3.1.10 which is the currently latest stable version. Change-Id: Ifa1421c9f12af9753052f992929deb7ebd45e804 --- stackrc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stackrc b/stackrc index 0ffcb67219..44a42a1ed9 100644 --- a/stackrc +++ b/stackrc @@ -714,9 +714,9 @@ fi EXTRA_CACHE_URLS="" # etcd3 defaults -ETCD_VERSION=${ETCD_VERSION:-v3.1.7} -ETCD_SHA256_AMD64="4fde194bbcd259401e2b5c462dfa579ee7f6af539f13f130b8f5b4f52e3b3c52" -# NOTE(sdague): etcd v3.1.7 doesn't have anything for these architectures, though 3.2.0 does. +ETCD_VERSION=${ETCD_VERSION:-v3.1.10} +ETCD_SHA256_AMD64="2d335f298619c6fb02b1124773a56966e448ad9952b26fea52909da4fe80d2be" +# NOTE(sdague): etcd v3.1.10 doesn't have anything for these architectures, though 3.2.x does. ETCD_SHA256_ARM64="" ETCD_SHA256_PPC64="" ETCD_SHA256_S390X="" From 721f7c2cd0f8c1e4dba95f5f6be7ff342b355f01 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Mon, 25 Sep 2017 13:53:10 +0200 Subject: [PATCH 0691/1936] Switch DEVSTACK_SERIES to queens pike is handled in stable/pike branch for some time already. Change-Id: I47dc5bf661c50b3b05c19ad665c2671f807233ae --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 0ffcb67219..4901009c09 100644 --- a/stackrc +++ b/stackrc @@ -246,7 +246,7 @@ REQUIREMENTS_DIR=$DEST/requirements # Setting the variable to 'ALL' will activate the download for all # libraries. -DEVSTACK_SERIES="pike" +DEVSTACK_SERIES="queens" ############## # From 062829c3f600bef1187c3d8a4baca6f66327b62a Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Wed, 20 Sep 2017 22:49:44 +0200 Subject: [PATCH 0692/1936] Drop MySQL-python installation from packages We want that one to be installed via pip, if we still use it (by default PyMySQL is used, which is already installed via pip as well). Change-Id: I76454aa7f84379aa387b144686bcfaa327b141ed --- files/rpms/keystone | 1 - files/rpms/neutron-common | 1 - files/rpms/nova | 1 - 3 files changed, 3 deletions(-) diff --git a/files/rpms/keystone b/files/rpms/keystone index 170308373a..5f19c6f70c 100644 --- a/files/rpms/keystone +++ b/files/rpms/keystone @@ -1,4 +1,3 @@ memcached mod_ssl -MySQL-python sqlite diff --git a/files/rpms/neutron-common b/files/rpms/neutron-common index a4e029a6eb..0cc8d11ceb 100644 --- a/files/rpms/neutron-common +++ b/files/rpms/neutron-common @@ -6,7 +6,6 @@ haproxy # to serve as metadata proxy inside router/dhcp namespaces iptables iputils mysql-devel -MySQL-python mysql-server # NOPRIME openvswitch # NOPRIME rabbitmq-server # NOPRIME diff --git a/files/rpms/nova b/files/rpms/nova index 632e796d07..64ed480632 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -12,7 +12,6 @@ kpartx libxml2-python m2crypto mysql-devel -MySQL-python mysql-server # NOPRIME numpy # needed by websockify for spice console parted From 32712717788d9ff373afd4ecd20b3c4f9079b260 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Fri, 22 Sep 2017 07:49:15 -0500 Subject: [PATCH 0693/1936] Change Cinder api_class setting to backend Castellan switched the `api_class` config option to `backend` in commit 8980bf7da55dd084ad84c84534fe937f0d43b9c0. The old setting will still be recognized for now, but we should switch to using the new, correct config option. Change-Id: I5e46c738531d5d56777e91a00f4cee9531356f2e --- lib/cinder | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/cinder b/lib/cinder index 7a6caf345b..07f82a1580 100644 --- a/lib/cinder +++ b/lib/cinder @@ -243,7 +243,7 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT periodic_interval $CINDER_PERIODIC_INTERVAL iniset $CINDER_CONF DEFAULT my_ip "$HOST_IP" - iniset $CINDER_CONF key_manager api_class cinder.keymgr.conf_key_mgr.ConfKeyManager + iniset $CINDER_CONF key_manager backend cinder.keymgr.conf_key_mgr.ConfKeyManager if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then local enabled_backends="" From 5085dc0fa50da4a78820c814005f89ab02d36d84 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Fri, 22 Sep 2017 20:54:39 -0400 Subject: [PATCH 0694/1936] doc: add journalctl example for grepping the logs I had to dig around for awhile to figure this out, so this adds an example on how to grep journalctl nova logs for a server instance UUID. Change-Id: I6a5c47fbcba3af1822e2f9efc2ac20ebe0387f3f --- doc/source/systemd.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/doc/source/systemd.rst b/doc/source/systemd.rst index 523d399c62..c9614db8a6 100644 --- a/doc/source/systemd.rst +++ b/doc/source/systemd.rst @@ -118,6 +118,11 @@ When outputting to the terminal using the default pager, long lines appear to be truncated, but horizontal scrolling is supported via the left/right arrow keys. +You can pipe the output to another tool, such as ``grep``. For +example, to find a server instance UUID in the nova logs:: + + sudo journalctl -a --unit devstack@n-* | grep 58391b5c-036f-44d5-bd68-21d3c26349e6 + See ``man 1 journalctl`` for more. Debugging From ef5ebed6c9ca3d9d47fd2a732a1542555a0f65ba Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Mon, 25 Sep 2017 09:38:38 +1000 Subject: [PATCH 0695/1936] Remove cache dirs from the services PKI tokens have been actively deprecated from keystone and there are deprecations being emitted from keystonemiddleware. Because of this we no longer need an auth cache directory in the services where the PKI certifcates used to be stored. Remove the creation and use of all these AUTH_CACHE directories. Change-Id: I5680376e70e74882e9fdb87ee1b95d5f40570ad7 --- lib/cinder | 12 +----------- lib/glance | 16 +++------------- lib/keystone | 6 ++---- lib/neutron | 17 +++-------------- lib/neutron-legacy | 6 ++---- lib/nova | 13 ++----------- lib/swift | 7 +------ 7 files changed, 14 insertions(+), 63 deletions(-) diff --git a/lib/cinder b/lib/cinder index 07f82a1580..387fc1ac2c 100644 --- a/lib/cinder +++ b/lib/cinder @@ -51,7 +51,6 @@ else fi CINDER_STATE_PATH=${CINDER_STATE_PATH:=$DATA_DIR/cinder} -CINDER_AUTH_CACHE_DIR=${CINDER_AUTH_CACHE_DIR:-/var/cache/cinder} CINDER_CONF_DIR=/etc/cinder CINDER_CONF=$CINDER_CONF_DIR/cinder.conf @@ -225,9 +224,8 @@ function configure_cinder { inicomment $CINDER_API_PASTE_INI filter:authtoken admin_tenant_name inicomment $CINDER_API_PASTE_INI filter:authtoken admin_user inicomment $CINDER_API_PASTE_INI filter:authtoken admin_password - inicomment $CINDER_API_PASTE_INI filter:authtoken signing_dir - configure_auth_token_middleware $CINDER_CONF cinder $CINDER_AUTH_CACHE_DIR + configure_auth_token_middleware $CINDER_CONF cinder iniset $CINDER_CONF DEFAULT auth_strategy keystone iniset $CINDER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL @@ -385,13 +383,6 @@ function create_cinder_accounts { fi } -# create_cinder_cache_dir() - Part of the init_cinder() process -function create_cinder_cache_dir { - # Create cache dir - sudo install -d -o $STACK_USER $CINDER_AUTH_CACHE_DIR - rm -f $CINDER_AUTH_CACHE_DIR/* -} - # init_cinder() - Initialize database and volume group function init_cinder { if is_service_enabled $DATABASE_BACKENDS; then @@ -420,7 +411,6 @@ function init_cinder { fi mkdir -p $CINDER_STATE_PATH/volumes - create_cinder_cache_dir } # install_cinder() - Collect source and prepare diff --git a/lib/glance b/lib/glance index 74734c7516..8241c5f9d7 100644 --- a/lib/glance +++ b/lib/glance @@ -44,7 +44,6 @@ fi GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache} GLANCE_IMAGE_DIR=${GLANCE_IMAGE_DIR:=$DATA_DIR/glance/images} GLANCE_LOCK_DIR=${GLANCE_LOCK_DIR:=$DATA_DIR/glance/locks} -GLANCE_AUTH_CACHE_DIR=${GLANCE_AUTH_CACHE_DIR:-/var/cache/glance} GLANCE_CONF_DIR=${GLANCE_CONF_DIR:-/etc/glance} GLANCE_METADEF_DIR=$GLANCE_CONF_DIR/metadefs @@ -98,7 +97,7 @@ function is_glance_enabled { function cleanup_glance { # kill instances (nova) # delete image files (glance) - sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR $GLANCE_AUTH_CACHE_DIR + sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR } # configure_glance() - Set config files, create data dirs, etc @@ -115,7 +114,7 @@ function configure_glance { iniset $GLANCE_REGISTRY_CONF database connection $dburl iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG iniset $GLANCE_REGISTRY_CONF paste_deploy flavor keystone - configure_auth_token_middleware $GLANCE_REGISTRY_CONF glance $GLANCE_AUTH_CACHE_DIR/registry + configure_auth_token_middleware $GLANCE_REGISTRY_CONF glance iniset $GLANCE_REGISTRY_CONF oslo_messaging_notifications driver messagingv2 iniset_rpc_backend glance $GLANCE_REGISTRY_CONF iniset $GLANCE_REGISTRY_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" @@ -127,7 +126,7 @@ function configure_glance { iniset $GLANCE_API_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ iniset $GLANCE_API_CONF DEFAULT lock_path $GLANCE_LOCK_DIR iniset $GLANCE_API_CONF paste_deploy flavor keystone+cachemanagement - configure_auth_token_middleware $GLANCE_API_CONF glance $GLANCE_AUTH_CACHE_DIR/api + configure_auth_token_middleware $GLANCE_API_CONF glance iniset $GLANCE_API_CONF oslo_messaging_notifications driver messagingv2 iniset_rpc_backend glance $GLANCE_API_CONF if [ "$VIRT_DRIVER" = 'xenserver' ]; then @@ -279,13 +278,6 @@ function create_glance_accounts { fi } -# create_glance_cache_dir() - Part of the init_glance() process -function create_glance_cache_dir { - # Create cache dir - sudo install -d -o $STACK_USER $GLANCE_AUTH_CACHE_DIR/api $GLANCE_AUTH_CACHE_DIR/registry $GLANCE_AUTH_CACHE_DIR/search $GLANCE_AUTH_CACHE_DIR/artifact - rm -f $GLANCE_AUTH_CACHE_DIR/api/* $GLANCE_AUTH_CACHE_DIR/registry/* $GLANCE_AUTH_CACHE_DIR/search/* $GLANCE_AUTH_CACHE_DIR/artifact/* -} - # init_glance() - Initialize databases, etc. function init_glance { # Delete existing images @@ -306,8 +298,6 @@ function init_glance { # Load metadata definitions $GLANCE_BIN_DIR/glance-manage --config-file $GLANCE_CONF_DIR/glance-api.conf db_load_metadefs time_stop "dbsync" - - create_glance_cache_dir } # install_glanceclient() - Collect source and prepare diff --git a/lib/keystone b/lib/keystone index 714f089cca..7bd887ca2d 100644 --- a/lib/keystone +++ b/lib/keystone @@ -429,7 +429,7 @@ function create_service_user { # Configure the service to use the auth token middleware. # -# configure_auth_token_middleware conf_file admin_user signing_dir [section] +# configure_auth_token_middleware conf_file admin_user [section] # # section defaults to keystone_authtoken, which is where auth_token looks in # the .conf file. If the paste config file is used (api-paste.ini) then @@ -437,8 +437,7 @@ function create_service_user { function configure_auth_token_middleware { local conf_file=$1 local admin_user=$2 - local signing_dir=$3 - local section=${4:-keystone_authtoken} + local section=${3:-keystone_authtoken} iniset $conf_file $section auth_type password iniset $conf_file $section auth_url $KEYSTONE_SERVICE_URI @@ -449,7 +448,6 @@ function configure_auth_token_middleware { iniset $conf_file $section project_domain_name "$SERVICE_DOMAIN_NAME" iniset $conf_file $section cafile $SSL_BUNDLE_FILE - iniset $conf_file $section signing_dir $signing_dir iniset $conf_file $section memcached_servers localhost:11211 } diff --git a/lib/neutron b/lib/neutron index 21c8d4c735..359f19820d 100644 --- a/lib/neutron +++ b/lib/neutron @@ -30,7 +30,6 @@ GITDIR["python-neutronclient"]=$DEST/python-neutronclient NEUTRON_AGENT=${NEUTRON_AGENT:-openvswitch} NEUTRON_DIR=$DEST/neutron -NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron} NEUTRON_BIN_DIR=$(get_python_exec_prefix) NEUTRON_DHCP_BINARY="neutron-dhcp-agent" @@ -44,7 +43,6 @@ NEUTRON_L3_CONF=$NEUTRON_CONF_DIR/l3_agent.ini NEUTRON_AGENT_CONF=$NEUTRON_CONF_DIR/ NEUTRON_STATE_PATH=${NEUTRON_STATE_PATH:=$DATA_DIR/neutron} -NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron} # By default, use the ML2 plugin NEUTRON_CORE_PLUGIN=${NEUTRON_CORE_PLUGIN:-ml2} @@ -175,8 +173,8 @@ function configure_neutron_new { iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips True iniset $NEUTRON_CONF DEFAULT auth_strategy $NEUTRON_AUTH_STRATEGY - configure_auth_token_middleware $NEUTRON_CONF neutron $NEUTRON_AUTH_CACHE_DIR keystone_authtoken - configure_auth_token_middleware $NEUTRON_CONF nova $NEUTRON_AUTH_CACHE_DIR nova + configure_auth_token_middleware $NEUTRON_CONF neutron keystone_authtoken + configure_auth_token_middleware $NEUTRON_CONF nova nova # Configure VXLAN # TODO(sc68cal) not hardcode? @@ -250,7 +248,7 @@ function configure_neutron_new { # TODO(dtroyer): remove the v2.0 hard code below iniset $NEUTRON_META_CONF DEFAULT auth_url $KEYSTONE_SERVICE_URI - configure_auth_token_middleware $NEUTRON_META_CONF neutron $NEUTRON_AUTH_CACHE_DIR DEFAULT + configure_auth_token_middleware $NEUTRON_META_CONF neutron DEFAULT fi # Format logging @@ -337,13 +335,6 @@ function create_neutron_accounts_new { fi } -# create_neutron_cache_dir() - Part of the init_neutron() process -function create_neutron_cache_dir { - # Create cache dir - sudo install -d -o $STACK_USER $NEUTRON_AUTH_CACHE_DIR - rm -f $NEUTRON_AUTH_CACHE_DIR/* -} - # init_neutron() - Initialize databases, etc. function init_neutron_new { @@ -353,8 +344,6 @@ function init_neutron_new { # Run Neutron db migrations $NEUTRON_BIN_DIR/neutron-db-manage upgrade heads time_stop "dbsync" - - create_neutron_cache_dir } # install_neutron() - Collect source and prepare diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 0ccb17c084..a0e79bc684 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -73,7 +73,6 @@ GITDIR["python-neutronclient"]=$DEST/python-neutronclient NEUTRON_DIR=$DEST/neutron NEUTRON_FWAAS_DIR=$DEST/neutron-fwaas -NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron} # Support entry points installation of console scripts if [[ -d $NEUTRON_DIR/bin/neutron-server ]]; then @@ -815,7 +814,7 @@ function _configure_neutron_service { iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES - configure_auth_token_middleware $NEUTRON_CONF nova $NEUTRON_AUTH_CACHE_DIR nova + configure_auth_token_middleware $NEUTRON_CONF nova nova # Configure plugin neutron_plugin_configure_service @@ -906,8 +905,7 @@ function _neutron_setup_keystone { local conf_file=$1 local section=$2 - create_neutron_cache_dir - configure_auth_token_middleware $conf_file $Q_ADMIN_USERNAME $NEUTRON_AUTH_CACHE_DIR $section + configure_auth_token_middleware $conf_file $Q_ADMIN_USERNAME $section } function _neutron_setup_interface_driver { diff --git a/lib/nova b/lib/nova index 1112f29bd3..dcf2a1c376 100644 --- a/lib/nova +++ b/lib/nova @@ -46,7 +46,6 @@ fi NOVA_STATE_PATH=${NOVA_STATE_PATH:=$DATA_DIR/nova} # INSTANCES_PATH is the previous name for this NOVA_INSTANCES_PATH=${NOVA_INSTANCES_PATH:=${INSTANCES_PATH:=$NOVA_STATE_PATH/instances}} -NOVA_AUTH_CACHE_DIR=${NOVA_AUTH_CACHE_DIR:-/var/cache/nova} NOVA_CONF_DIR=/etc/nova NOVA_CONF=$NOVA_CONF_DIR/nova.conf @@ -240,7 +239,7 @@ function cleanup_nova { sudo rm -rf $NOVA_INSTANCES_PATH/* fi - sudo rm -rf $NOVA_STATE_PATH $NOVA_AUTH_CACHE_DIR + sudo rm -rf $NOVA_STATE_PATH # NOTE(dtroyer): This really should be called from here but due to the way # nova abuses the _cleanup() function we're moving it @@ -464,7 +463,7 @@ function create_nova_conf { iniset $NOVA_CONF DEFAULT osapi_compute_link_prefix $NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT fi - configure_auth_token_middleware $NOVA_CONF nova $NOVA_AUTH_CACHE_DIR + configure_auth_token_middleware $NOVA_CONF nova fi if is_service_enabled cinder; then @@ -658,13 +657,6 @@ function init_nova_cells { fi } -# create_nova_cache_dir() - Part of the init_nova() process -function create_nova_cache_dir { - # Create cache dir - sudo install -d -o $STACK_USER $NOVA_AUTH_CACHE_DIR - rm -f $NOVA_AUTH_CACHE_DIR/* -} - function create_nova_conf_nova_network { local public_interface=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT} iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NETWORK_MANAGER" @@ -722,7 +714,6 @@ function init_nova { done fi - create_nova_cache_dir create_nova_keys_dir if [[ "$NOVA_BACKEND" == "LVM" ]]; then diff --git a/lib/swift b/lib/swift index 1601e2b1f8..ab014de909 100644 --- a/lib/swift +++ b/lib/swift @@ -48,7 +48,6 @@ fi SWIFT_DIR=$DEST/swift -SWIFT_AUTH_CACHE_DIR=${SWIFT_AUTH_CACHE_DIR:-/var/cache/swift} SWIFT_APACHE_WSGI_DIR=${SWIFT_APACHE_WSGI_DIR:-/var/www/swift} SWIFT3_DIR=$DEST/swift3 @@ -450,7 +449,7 @@ function configure_swift { iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken log_name swift iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken paste.filter_factory keystonemiddleware.auth_token:filter_factory - configure_auth_token_middleware $SWIFT_CONFIG_PROXY_SERVER swift $SWIFT_AUTH_CACHE_DIR filter:authtoken + configure_auth_token_middleware $SWIFT_CONFIG_PROXY_SERVER swift filter:authtoken iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken delay_auth_decision 1 iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken cache swift.cache iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken include_service_catalog False @@ -745,10 +744,6 @@ function init_swift { swift-ring-builder container.builder rebalance 42 swift-ring-builder account.builder rebalance 42 } && popd >/dev/null - - # Create cache dir - sudo install -d -o ${STACK_USER} $SWIFT_AUTH_CACHE_DIR - rm -f $SWIFT_AUTH_CACHE_DIR/* } function install_swift { From 602a057fe0d0b357533feb84d87256edfb7970e2 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Tue, 26 Sep 2017 10:15:28 +0200 Subject: [PATCH 0696/1936] Fix dstat dependencies for memory_tracker on SUSE Memory_tracker imports psutil, but does not run inside a pip/virtualenv so the system provided psutil library needs to be provided. This is matching what is done for other non-SUSE distributions Change-Id: I96f944730dc8644333d906d71339351b29b03e08 --- files/rpms-suse/dstat | 1 + 1 file changed, 1 insertion(+) diff --git a/files/rpms-suse/dstat b/files/rpms-suse/dstat index 2b643b8b1b..0d9da4434f 100644 --- a/files/rpms-suse/dstat +++ b/files/rpms-suse/dstat @@ -1 +1,2 @@ dstat +python-psutil From ba4830b84e529943353e71f375ab6538f37efa02 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Wed, 27 Sep 2017 16:45:25 -0400 Subject: [PATCH 0697/1936] Download default image when using VIRT_DRIVER=fake Change f119121d21fa0446197b26378091677daac1606a removed the default image to download which meant if you were using the fake virt driver, no image would get downloaded and tempest setup would fail. This adds it back in but doesn't use a wildcard. The default image is the same as before, but uses the variables that are also used for the default libvirt image case. Change-Id: I80eddd0d3a99572ed494b5cd36fed8ceb4d05d77 Closes-Bug: #1720003 --- stackrc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/stackrc b/stackrc index e7e584b8f2..61555173db 100644 --- a/stackrc +++ b/stackrc @@ -701,6 +701,11 @@ if [[ "$DOWNLOAD_DEFAULT_IMAGES" == "True" ]]; then DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.5-x86_64-disk.vhd.tgz} IMAGE_URLS+="http://ca.downloads.xensource.com/OpenStack/cirros-0.3.5-x86_64-disk.vhd.tgz" IMAGE_URLS+=",http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz";; + fake) + # Use the same as the default for libvirt + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk} + DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img} + IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; esac DOWNLOAD_DEFAULT_IMAGES=False fi From 59251693e7e0e576b26a33633cfff5d92d0a700e Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Thu, 28 Sep 2017 11:56:40 +0000 Subject: [PATCH 0698/1936] Add a note about overriding the journalctl nowrap default Hoping that this helps people who consider this as annoying as I do. Change-Id: I45e71301efb8d9a17989b57232a142e786175c2a --- doc/source/systemd.rst | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/doc/source/systemd.rst b/doc/source/systemd.rst index c1d2944057..2009f7407c 100644 --- a/doc/source/systemd.rst +++ b/doc/source/systemd.rst @@ -115,8 +115,9 @@ an appropriate terminal/pager - e.g. ``less``, the default):: journalctl -a --unit devstack@n-cpu.service When outputting to the terminal using the default pager, long lines -appear to be truncated, but horizontal scrolling is supported via the -left/right arrow keys. +will be truncated, but horizontal scrolling is supported via the +left/right arrow keys. You can override this by setting the +``SYSTEMD_LESS`` environment variable to e.g. ``FRXM``. See ``man 1 journalctl`` for more. From 5b419ffb1f20dfe613bd694fab8c1f08c8db7cce Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 29 Sep 2017 08:42:34 +1000 Subject: [PATCH 0699/1936] Don't check_libs_from_git for now "pip freeze" reports an error when checking because the repos don't have a remote under zuulv3 --- Error when trying to get requirement for VCS system Command "git config --get-regexp remote\..*\.url" failed with error code 1 in /opt/stack/new/keystone, falling back to uneditable format Could not determine repository location of /opt/stack/new/keystone Complete output from command git config --get-regexp remote\..*\.url: --- This means this check fails. I think we can fix this by looking at "pip list" which I will propose in a follow-on, but this fixes the immediate breaking issue. Depends-On: Ib12ddf768ee20fd7614622179f6842f5d57864ff Change-Id: I21ff749ab3e7911fa074e6d53056768f42f8aa57 --- stack.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index c545c56de3..f5aa7659f9 100755 --- a/stack.sh +++ b/stack.sh @@ -1389,7 +1389,11 @@ service_check # ensure that all the libraries we think we installed from git, # actually were. -check_libs_from_git +# +# NOTE(ianw) 2017-09-27 : "pip freeze" is currently having issues +# with zuulv3 and the way it clones remotes. We will restore this +# with a slightly different check soon +#check_libs_from_git # Configure nova cellsv2 From ae9c6ab759d9dc1c7e72159092539444ca03cf33 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 29 Sep 2017 10:16:47 +1000 Subject: [PATCH 0700/1936] Use "pip list" in check_libs_from_git As described in the change, "pip freeze" has issues with the way zuulv3 clones repos without a remote. This is an attempt to use "pip list" to check for local install Change-Id: I33d25f86b6afcadb4b190a0f6c53311111c64521 --- inc/python | 15 ++++++++++++++- stack.sh | 6 +----- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/inc/python b/inc/python index 5e7f742d48..4bc1856fbd 100644 --- a/inc/python +++ b/inc/python @@ -386,7 +386,20 @@ function use_library_from_git { # determine if a package was installed from git function lib_installed_from_git { local name=$1 - pip freeze 2>/dev/null | grep -- "$name" | grep -q -- '-e git' + # Note "pip freeze" doesn't always work here, because it tries to + # be smart about finding the remote of the git repo the package + # was installed from. This doesn't work with zuul which clones + # repos with no remote. + # + # The best option seems to be to use "pip list" which will tell + # you the path an editable install was installed from; for example + # in response to something like + # pip install -e 'git+http://git.openstack.org/openstack-dev/bashate#egg=bashate' + # pip list shows + # bashate (0.5.2.dev19, /tmp/env/src/bashate) + # Thus we look for "path after a comma" to indicate we were + # installed from some local place + pip list 2>/dev/null | grep -- "$name" | grep -q -- ', .*)$' } # check that everything that's in LIBS_FROM_GIT was actually installed diff --git a/stack.sh b/stack.sh index f5aa7659f9..c545c56de3 100755 --- a/stack.sh +++ b/stack.sh @@ -1389,11 +1389,7 @@ service_check # ensure that all the libraries we think we installed from git, # actually were. -# -# NOTE(ianw) 2017-09-27 : "pip freeze" is currently having issues -# with zuulv3 and the way it clones remotes. We will restore this -# with a slightly different check soon -#check_libs_from_git +check_libs_from_git # Configure nova cellsv2 From 1f82f43016f5e3c51560c8b7c0b9c07350731f6c Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 4 Oct 2017 09:51:02 +1100 Subject: [PATCH 0701/1936] Revert "Remove cache dirs from the services" This reverts commit ef5ebed6c9ca3d9d47fd2a732a1542555a0f65ba. The problem here is a backwards-incompatible change to configure_auth_token_middleware. Plugins are still passing a "signing_dir" which is interpreted now as the "section" argument ... this leads to an interesting red-herring issue; because "v" is a gnu sed command for checking the version, a signing_dir of "/var/..." (as done in most plugins) gives the weird error: sed: -e expression #1, char 32: expected newer version of sed I think we'll either need a new function, or dummy arguments to get this back in. Change-Id: I2098d4eb2747282622cf486fa7dbf216f932f58b --- lib/cinder | 12 +++++++++++- lib/glance | 16 +++++++++++++--- lib/keystone | 6 ++++-- lib/neutron | 17 ++++++++++++++--- lib/neutron-legacy | 6 ++++-- lib/nova | 13 +++++++++++-- lib/swift | 7 ++++++- 7 files changed, 63 insertions(+), 14 deletions(-) diff --git a/lib/cinder b/lib/cinder index 387fc1ac2c..07f82a1580 100644 --- a/lib/cinder +++ b/lib/cinder @@ -51,6 +51,7 @@ else fi CINDER_STATE_PATH=${CINDER_STATE_PATH:=$DATA_DIR/cinder} +CINDER_AUTH_CACHE_DIR=${CINDER_AUTH_CACHE_DIR:-/var/cache/cinder} CINDER_CONF_DIR=/etc/cinder CINDER_CONF=$CINDER_CONF_DIR/cinder.conf @@ -224,8 +225,9 @@ function configure_cinder { inicomment $CINDER_API_PASTE_INI filter:authtoken admin_tenant_name inicomment $CINDER_API_PASTE_INI filter:authtoken admin_user inicomment $CINDER_API_PASTE_INI filter:authtoken admin_password + inicomment $CINDER_API_PASTE_INI filter:authtoken signing_dir - configure_auth_token_middleware $CINDER_CONF cinder + configure_auth_token_middleware $CINDER_CONF cinder $CINDER_AUTH_CACHE_DIR iniset $CINDER_CONF DEFAULT auth_strategy keystone iniset $CINDER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL @@ -383,6 +385,13 @@ function create_cinder_accounts { fi } +# create_cinder_cache_dir() - Part of the init_cinder() process +function create_cinder_cache_dir { + # Create cache dir + sudo install -d -o $STACK_USER $CINDER_AUTH_CACHE_DIR + rm -f $CINDER_AUTH_CACHE_DIR/* +} + # init_cinder() - Initialize database and volume group function init_cinder { if is_service_enabled $DATABASE_BACKENDS; then @@ -411,6 +420,7 @@ function init_cinder { fi mkdir -p $CINDER_STATE_PATH/volumes + create_cinder_cache_dir } # install_cinder() - Collect source and prepare diff --git a/lib/glance b/lib/glance index 8241c5f9d7..74734c7516 100644 --- a/lib/glance +++ b/lib/glance @@ -44,6 +44,7 @@ fi GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache} GLANCE_IMAGE_DIR=${GLANCE_IMAGE_DIR:=$DATA_DIR/glance/images} GLANCE_LOCK_DIR=${GLANCE_LOCK_DIR:=$DATA_DIR/glance/locks} +GLANCE_AUTH_CACHE_DIR=${GLANCE_AUTH_CACHE_DIR:-/var/cache/glance} GLANCE_CONF_DIR=${GLANCE_CONF_DIR:-/etc/glance} GLANCE_METADEF_DIR=$GLANCE_CONF_DIR/metadefs @@ -97,7 +98,7 @@ function is_glance_enabled { function cleanup_glance { # kill instances (nova) # delete image files (glance) - sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR + sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR $GLANCE_AUTH_CACHE_DIR } # configure_glance() - Set config files, create data dirs, etc @@ -114,7 +115,7 @@ function configure_glance { iniset $GLANCE_REGISTRY_CONF database connection $dburl iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG iniset $GLANCE_REGISTRY_CONF paste_deploy flavor keystone - configure_auth_token_middleware $GLANCE_REGISTRY_CONF glance + configure_auth_token_middleware $GLANCE_REGISTRY_CONF glance $GLANCE_AUTH_CACHE_DIR/registry iniset $GLANCE_REGISTRY_CONF oslo_messaging_notifications driver messagingv2 iniset_rpc_backend glance $GLANCE_REGISTRY_CONF iniset $GLANCE_REGISTRY_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" @@ -126,7 +127,7 @@ function configure_glance { iniset $GLANCE_API_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ iniset $GLANCE_API_CONF DEFAULT lock_path $GLANCE_LOCK_DIR iniset $GLANCE_API_CONF paste_deploy flavor keystone+cachemanagement - configure_auth_token_middleware $GLANCE_API_CONF glance + configure_auth_token_middleware $GLANCE_API_CONF glance $GLANCE_AUTH_CACHE_DIR/api iniset $GLANCE_API_CONF oslo_messaging_notifications driver messagingv2 iniset_rpc_backend glance $GLANCE_API_CONF if [ "$VIRT_DRIVER" = 'xenserver' ]; then @@ -278,6 +279,13 @@ function create_glance_accounts { fi } +# create_glance_cache_dir() - Part of the init_glance() process +function create_glance_cache_dir { + # Create cache dir + sudo install -d -o $STACK_USER $GLANCE_AUTH_CACHE_DIR/api $GLANCE_AUTH_CACHE_DIR/registry $GLANCE_AUTH_CACHE_DIR/search $GLANCE_AUTH_CACHE_DIR/artifact + rm -f $GLANCE_AUTH_CACHE_DIR/api/* $GLANCE_AUTH_CACHE_DIR/registry/* $GLANCE_AUTH_CACHE_DIR/search/* $GLANCE_AUTH_CACHE_DIR/artifact/* +} + # init_glance() - Initialize databases, etc. function init_glance { # Delete existing images @@ -298,6 +306,8 @@ function init_glance { # Load metadata definitions $GLANCE_BIN_DIR/glance-manage --config-file $GLANCE_CONF_DIR/glance-api.conf db_load_metadefs time_stop "dbsync" + + create_glance_cache_dir } # install_glanceclient() - Collect source and prepare diff --git a/lib/keystone b/lib/keystone index 7bd887ca2d..714f089cca 100644 --- a/lib/keystone +++ b/lib/keystone @@ -429,7 +429,7 @@ function create_service_user { # Configure the service to use the auth token middleware. # -# configure_auth_token_middleware conf_file admin_user [section] +# configure_auth_token_middleware conf_file admin_user signing_dir [section] # # section defaults to keystone_authtoken, which is where auth_token looks in # the .conf file. If the paste config file is used (api-paste.ini) then @@ -437,7 +437,8 @@ function create_service_user { function configure_auth_token_middleware { local conf_file=$1 local admin_user=$2 - local section=${3:-keystone_authtoken} + local signing_dir=$3 + local section=${4:-keystone_authtoken} iniset $conf_file $section auth_type password iniset $conf_file $section auth_url $KEYSTONE_SERVICE_URI @@ -448,6 +449,7 @@ function configure_auth_token_middleware { iniset $conf_file $section project_domain_name "$SERVICE_DOMAIN_NAME" iniset $conf_file $section cafile $SSL_BUNDLE_FILE + iniset $conf_file $section signing_dir $signing_dir iniset $conf_file $section memcached_servers localhost:11211 } diff --git a/lib/neutron b/lib/neutron index 359f19820d..21c8d4c735 100644 --- a/lib/neutron +++ b/lib/neutron @@ -30,6 +30,7 @@ GITDIR["python-neutronclient"]=$DEST/python-neutronclient NEUTRON_AGENT=${NEUTRON_AGENT:-openvswitch} NEUTRON_DIR=$DEST/neutron +NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron} NEUTRON_BIN_DIR=$(get_python_exec_prefix) NEUTRON_DHCP_BINARY="neutron-dhcp-agent" @@ -43,6 +44,7 @@ NEUTRON_L3_CONF=$NEUTRON_CONF_DIR/l3_agent.ini NEUTRON_AGENT_CONF=$NEUTRON_CONF_DIR/ NEUTRON_STATE_PATH=${NEUTRON_STATE_PATH:=$DATA_DIR/neutron} +NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron} # By default, use the ML2 plugin NEUTRON_CORE_PLUGIN=${NEUTRON_CORE_PLUGIN:-ml2} @@ -173,8 +175,8 @@ function configure_neutron_new { iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips True iniset $NEUTRON_CONF DEFAULT auth_strategy $NEUTRON_AUTH_STRATEGY - configure_auth_token_middleware $NEUTRON_CONF neutron keystone_authtoken - configure_auth_token_middleware $NEUTRON_CONF nova nova + configure_auth_token_middleware $NEUTRON_CONF neutron $NEUTRON_AUTH_CACHE_DIR keystone_authtoken + configure_auth_token_middleware $NEUTRON_CONF nova $NEUTRON_AUTH_CACHE_DIR nova # Configure VXLAN # TODO(sc68cal) not hardcode? @@ -248,7 +250,7 @@ function configure_neutron_new { # TODO(dtroyer): remove the v2.0 hard code below iniset $NEUTRON_META_CONF DEFAULT auth_url $KEYSTONE_SERVICE_URI - configure_auth_token_middleware $NEUTRON_META_CONF neutron DEFAULT + configure_auth_token_middleware $NEUTRON_META_CONF neutron $NEUTRON_AUTH_CACHE_DIR DEFAULT fi # Format logging @@ -335,6 +337,13 @@ function create_neutron_accounts_new { fi } +# create_neutron_cache_dir() - Part of the init_neutron() process +function create_neutron_cache_dir { + # Create cache dir + sudo install -d -o $STACK_USER $NEUTRON_AUTH_CACHE_DIR + rm -f $NEUTRON_AUTH_CACHE_DIR/* +} + # init_neutron() - Initialize databases, etc. function init_neutron_new { @@ -344,6 +353,8 @@ function init_neutron_new { # Run Neutron db migrations $NEUTRON_BIN_DIR/neutron-db-manage upgrade heads time_stop "dbsync" + + create_neutron_cache_dir } # install_neutron() - Collect source and prepare diff --git a/lib/neutron-legacy b/lib/neutron-legacy index a0e79bc684..0ccb17c084 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -73,6 +73,7 @@ GITDIR["python-neutronclient"]=$DEST/python-neutronclient NEUTRON_DIR=$DEST/neutron NEUTRON_FWAAS_DIR=$DEST/neutron-fwaas +NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron} # Support entry points installation of console scripts if [[ -d $NEUTRON_DIR/bin/neutron-server ]]; then @@ -814,7 +815,7 @@ function _configure_neutron_service { iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES - configure_auth_token_middleware $NEUTRON_CONF nova nova + configure_auth_token_middleware $NEUTRON_CONF nova $NEUTRON_AUTH_CACHE_DIR nova # Configure plugin neutron_plugin_configure_service @@ -905,7 +906,8 @@ function _neutron_setup_keystone { local conf_file=$1 local section=$2 - configure_auth_token_middleware $conf_file $Q_ADMIN_USERNAME $section + create_neutron_cache_dir + configure_auth_token_middleware $conf_file $Q_ADMIN_USERNAME $NEUTRON_AUTH_CACHE_DIR $section } function _neutron_setup_interface_driver { diff --git a/lib/nova b/lib/nova index dcf2a1c376..1112f29bd3 100644 --- a/lib/nova +++ b/lib/nova @@ -46,6 +46,7 @@ fi NOVA_STATE_PATH=${NOVA_STATE_PATH:=$DATA_DIR/nova} # INSTANCES_PATH is the previous name for this NOVA_INSTANCES_PATH=${NOVA_INSTANCES_PATH:=${INSTANCES_PATH:=$NOVA_STATE_PATH/instances}} +NOVA_AUTH_CACHE_DIR=${NOVA_AUTH_CACHE_DIR:-/var/cache/nova} NOVA_CONF_DIR=/etc/nova NOVA_CONF=$NOVA_CONF_DIR/nova.conf @@ -239,7 +240,7 @@ function cleanup_nova { sudo rm -rf $NOVA_INSTANCES_PATH/* fi - sudo rm -rf $NOVA_STATE_PATH + sudo rm -rf $NOVA_STATE_PATH $NOVA_AUTH_CACHE_DIR # NOTE(dtroyer): This really should be called from here but due to the way # nova abuses the _cleanup() function we're moving it @@ -463,7 +464,7 @@ function create_nova_conf { iniset $NOVA_CONF DEFAULT osapi_compute_link_prefix $NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT fi - configure_auth_token_middleware $NOVA_CONF nova + configure_auth_token_middleware $NOVA_CONF nova $NOVA_AUTH_CACHE_DIR fi if is_service_enabled cinder; then @@ -657,6 +658,13 @@ function init_nova_cells { fi } +# create_nova_cache_dir() - Part of the init_nova() process +function create_nova_cache_dir { + # Create cache dir + sudo install -d -o $STACK_USER $NOVA_AUTH_CACHE_DIR + rm -f $NOVA_AUTH_CACHE_DIR/* +} + function create_nova_conf_nova_network { local public_interface=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT} iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NETWORK_MANAGER" @@ -714,6 +722,7 @@ function init_nova { done fi + create_nova_cache_dir create_nova_keys_dir if [[ "$NOVA_BACKEND" == "LVM" ]]; then diff --git a/lib/swift b/lib/swift index ab014de909..1601e2b1f8 100644 --- a/lib/swift +++ b/lib/swift @@ -48,6 +48,7 @@ fi SWIFT_DIR=$DEST/swift +SWIFT_AUTH_CACHE_DIR=${SWIFT_AUTH_CACHE_DIR:-/var/cache/swift} SWIFT_APACHE_WSGI_DIR=${SWIFT_APACHE_WSGI_DIR:-/var/www/swift} SWIFT3_DIR=$DEST/swift3 @@ -449,7 +450,7 @@ function configure_swift { iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken log_name swift iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken paste.filter_factory keystonemiddleware.auth_token:filter_factory - configure_auth_token_middleware $SWIFT_CONFIG_PROXY_SERVER swift filter:authtoken + configure_auth_token_middleware $SWIFT_CONFIG_PROXY_SERVER swift $SWIFT_AUTH_CACHE_DIR filter:authtoken iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken delay_auth_decision 1 iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken cache swift.cache iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken include_service_catalog False @@ -744,6 +745,10 @@ function init_swift { swift-ring-builder container.builder rebalance 42 swift-ring-builder account.builder rebalance 42 } && popd >/dev/null + + # Create cache dir + sudo install -d -o ${STACK_USER} $SWIFT_AUTH_CACHE_DIR + rm -f $SWIFT_AUTH_CACHE_DIR/* } function install_swift { From 56131eb11fa431a13028e90f179e5444c0297f2a Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Sat, 30 Sep 2017 09:55:38 -0700 Subject: [PATCH 0702/1936] Move glance auth cache creation to configure_glance We install the glance api on all nodes in multinode testing. This has been failing because we don't configure the glance auth cache dirs if we only install the glance api service. This was done as part of init_glance which is only run when installing g-reg. Fix this by moving the auth cache dir creation step into configure_glance which is run for the glance api. Change-Id: Ie669827507df0f524e6e53fe4ab3dff848dd4bd7 --- lib/glance | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/lib/glance b/lib/glance index 74734c7516..ad286bacb9 100644 --- a/lib/glance +++ b/lib/glance @@ -105,6 +105,11 @@ function cleanup_glance { function configure_glance { sudo install -d -o $STACK_USER $GLANCE_CONF_DIR $GLANCE_METADEF_DIR + # We run this here as this configures cache dirs for the auth middleware + # which is used in the api server and not in the registry. The api + # Server is configured through this function and not init_glance. + create_glance_cache_dir + # Copy over our glance configurations and update them cp $GLANCE_DIR/etc/glance-registry.conf $GLANCE_REGISTRY_CONF iniset $GLANCE_REGISTRY_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL @@ -279,7 +284,7 @@ function create_glance_accounts { fi } -# create_glance_cache_dir() - Part of the init_glance() process +# create_glance_cache_dir() - Part of the configure_glance() process function create_glance_cache_dir { # Create cache dir sudo install -d -o $STACK_USER $GLANCE_AUTH_CACHE_DIR/api $GLANCE_AUTH_CACHE_DIR/registry $GLANCE_AUTH_CACHE_DIR/search $GLANCE_AUTH_CACHE_DIR/artifact @@ -306,8 +311,6 @@ function init_glance { # Load metadata definitions $GLANCE_BIN_DIR/glance-manage --config-file $GLANCE_CONF_DIR/glance-api.conf db_load_metadefs time_stop "dbsync" - - create_glance_cache_dir } # install_glanceclient() - Collect source and prepare From 9e7ead9ac2e791f70542741aa2c84e7ad828ac7a Mon Sep 17 00:00:00 2001 From: Zane Bitter Date: Thu, 5 Oct 2017 16:51:09 -0400 Subject: [PATCH 0703/1936] Calculate package directory correctly in pip_install Strip the [] string from a [] argument when looking for the package directory. Explain what the heck is going on. Change-Id: I79beb5c3e9e7c35c91cdd0d5a1d91532bebc4b6d Closes-Bug: #1721638 --- inc/python | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/inc/python b/inc/python index 4bc1856fbd..9c810ec9b9 100644 --- a/inc/python +++ b/inc/python @@ -219,7 +219,8 @@ function disable_python3_package { # Wrapper for ``pip install`` to set cache and proxy environment variables # Uses globals ``OFFLINE``, ``PIP_VIRTUAL_ENV``, # ``PIP_UPGRADE``, ``TRACK_DEPENDS``, ``*_proxy``, -# pip_install package [package ...] +# Usage: +# pip_install pip_arguments function pip_install { local xtrace result xtrace=$(set +o | grep xtrace) @@ -241,6 +242,26 @@ function pip_install { if [[ -z "$os_PACKAGE" ]]; then GetOSVersion fi + + # Try to extract the path of the package we are installing into + # package_dir. We need this to check for test-requirements.txt, + # at least. + # + # ${!#} expands to the last positional argument to this function. + # With "extras" syntax included, our arguments might be something + # like: + # -e /path/to/fooproject[extra] + # Thus this magic line grabs just the path without extras + # + # Note that this makes no sense if this is a pypi (rather than + # local path) install; ergo you must check this path exists before + # use. Also, if we had multiple or mixed installs, we would also + # likely break. But for historical reasons, it's basically only + # the other wrapper functions in here calling this to install + # local packages, and they do so with single call per install. So + # this works (for now...) + local package_dir=${!#%\[*\]} + if [[ $TRACK_DEPENDS = True && ! "$@" =~ virtualenv ]]; then # TRACK_DEPENDS=True installation creates a circular dependency when # we attempt to install virtualenv into a virtualenv, so we must global @@ -261,7 +282,6 @@ function pip_install { # versions supported, and if we find the version of # python3 we've been told to use, use that instead of the # default pip - local package_dir=${!#} local python_versions # Special case some services that have experimental @@ -323,7 +343,7 @@ function pip_install { # Also install test requirements local install_test_reqs="" - local test_req="${!#}/test-requirements.txt" + local test_req="${package_dir}/test-requirements.txt" if [[ -e "$test_req" ]]; then install_test_reqs="-r $test_req" fi From b8335eebe88f5ebe0c1d42344ced60e55ead3c5e Mon Sep 17 00:00:00 2001 From: Markus Zoeller Date: Tue, 26 Sep 2017 08:32:50 +0200 Subject: [PATCH 0704/1936] Drop support for "kvmibm" distro The IBM hypervisor distro "KVM for IBM z Systems" gets discontiued, like announced in March 2017 [1]. The key dates are: * 03/2017: announcement * 08/2017: the last day to order (EOM) * 03/2018: the End of Service (EOL) As the CI which tests OpenStack with KVM on IBM Z doesn't rely on this distro anymore and EOM has reached, we remove the Devstack support for this distro. This basically reverts commit a5ea08b of Dec 2015. NOTE: This doesn't affect other distros which have KVM on Z support. References: [1] FAQ for KVM for IBM z Systems Delivery Strategy Change https://www-01.ibm.com/common/ssi/cgi-bin/ssialias?htmlfid=ZSQ03110USEN& Change-Id: I009ae4779588615633bff81d0c47a1b879ec9279 --- functions-common | 4 +--- lib/nova_plugins/functions-libvirt | 5 ----- stack.sh | 2 +- 3 files changed, 2 insertions(+), 9 deletions(-) diff --git a/functions-common b/functions-common index c968531fd1..c6ba99e1a1 100644 --- a/functions-common +++ b/functions-common @@ -385,8 +385,6 @@ function GetDistro { DISTRO="rhel${os_RELEASE::1}" elif [[ "$os_VENDOR" =~ (XenServer) ]]; then DISTRO="xs${os_RELEASE%.*}" - elif [[ "$os_VENDOR" =~ (kvmibm) ]]; then - DISTRO="${os_VENDOR}${os_RELEASE::1}" else # We can't make a good choice here. Setting a sensible DISTRO # is part of the problem, but not the major issue -- we really @@ -440,7 +438,7 @@ function is_fedora { [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || \ [ "$os_VENDOR" = "RedHatEnterpriseServer" ] || \ [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "OracleServer" ] || \ - [ "$os_VENDOR" = "Virtuozzo" ] || [ "$os_VENDOR" = "kvmibm" ] + [ "$os_VENDOR" = "Virtuozzo" ] } diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 8d74c77517..c8527387ab 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -72,11 +72,6 @@ function install_libvirt { pip_install_gr libvirt-python #pip_install_gr elif is_fedora || is_suse; then - # On "KVM for IBM z Systems", kvm does not have its own package - if [[ ! ${DISTRO} =~ "kvmibm1" ]]; then - install_package qemu-kvm - fi - install_package libvirt libvirt-devel pip_uninstall libvirt-python pip_install_gr libvirt-python diff --git a/stack.sh b/stack.sh index c545c56de3..8632cf1378 100755 --- a/stack.sh +++ b/stack.sh @@ -221,7 +221,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (xenial|yakkety|zesty|stretch|jessie|f24|f25|f26|opensuse-42.2|opensuse-42.3|rhel7|kvmibm1) ]]; then +if [[ ! ${DISTRO} =~ (xenial|yakkety|zesty|stretch|jessie|f24|f25|f26|opensuse-42.2|opensuse-42.3|rhel7) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From 2b97a81640df1f8d537d6cf7b291f15d8f084a18 Mon Sep 17 00:00:00 2001 From: Radoslav Gerganov Date: Tue, 10 Oct 2017 16:51:12 +0300 Subject: [PATCH 0705/1936] Remove references to $USE_SCREEN in comments The USE_SCREEN variable is not used any more Change-Id: I29ad9cdb6c8498404502d91fbc4e1299bf4a633e --- functions-common | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/functions-common b/functions-common index c968531fd1..263ce264f0 100644 --- a/functions-common +++ b/functions-common @@ -1492,7 +1492,6 @@ function is_running { # If the command includes shell metachatacters (;<>*) it must be run using a shell # If an optional group is provided sg will be used to run the # command as that group. -# Uses globals ``USE_SCREEN`` # run_process service "command-line" [group] [user] function run_process { local service=$1 @@ -1513,7 +1512,7 @@ function run_process { # If a PID is available use it, kill the whole process group via TERM # If screen is being used kill the screen window; this will catch processes # that did not leave a PID behind -# Uses globals ``SERVICE_DIR``, ``USE_SCREEN`` +# Uses globals ``SERVICE_DIR`` # stop_process service function stop_process { local service=$1 From cda2cb557f7176c431d151b32bc44eee03f73774 Mon Sep 17 00:00:00 2001 From: Eric Fried Date: Tue, 10 Oct 2017 11:49:06 -0500 Subject: [PATCH 0706/1936] Create block-storage endpoint for cinder block-storage is the official service type for cinder, according to the service-types-authority. Add it as a service in devstack, with cinder's unversioned endpoint, to enable proper discovery. Change-Id: I75cf7212678f7f270c3c32f0bce227dbbf6b466d --- lib/cinder | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/lib/cinder b/lib/cinder index 07f82a1580..c35b89d63b 100644 --- a/lib/cinder +++ b/lib/cinder @@ -344,8 +344,15 @@ function create_cinder_accounts { create_service_user "cinder" + # block-storage is the official service type + get_or_create_service "cinder" "block-storage" "Cinder Volume Service" get_or_create_service "cinder" "volume" "Cinder Volume Service" if [ "$CINDER_USE_MOD_WSGI" == "False" ]; then + get_or_create_endpoint \ + "block-storage" \ + "$REGION_NAME" \ + "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/" + get_or_create_endpoint \ "volume" \ "$REGION_NAME" \ @@ -363,6 +370,11 @@ function create_cinder_accounts { "$REGION_NAME" \ "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s" else + get_or_create_endpoint \ + "block-storage" \ + "$REGION_NAME" \ + "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/" + get_or_create_endpoint \ "volume" \ "$REGION_NAME" \ From 2da019f133bb3e53ef67e33cfd269a9b6a72d27a Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Wed, 11 Oct 2017 09:57:25 -0700 Subject: [PATCH 0707/1936] Replace the deprecated nova_metadata_ip As [1], the option nova_metadata_ip has been deprecated and we can use nova_metadata_host instead. This patch makes devstack do it. [1]: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/conf/agent/metadata/config.py#n49 Change-Id: Ifda43ec8c7743af6acdd3003c55c081ef5b1311c --- lib/neutron | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index 21c8d4c735..c5839f5c3e 100644 --- a/lib/neutron +++ b/lib/neutron @@ -243,7 +243,7 @@ function configure_neutron_new { cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $NEUTRON_META_CONF iniset $NEUTRON_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $NEUTRON_META_CONF DEFAULT nova_metadata_ip $SERVICE_HOST + iniset $NEUTRON_META_CONF DEFAULT nova_metadata_host $SERVICE_HOST iniset $NEUTRON_META_CONF DEFAULT metadata_workers $API_WORKERS # TODO(ihrachys) do we really need to set rootwrap for metadata agent? configure_root_helper_options $NEUTRON_META_CONF From 843b039b3ca24c79865d991df43bfcd5ebe0b97b Mon Sep 17 00:00:00 2001 From: jianghua wang Date: Thu, 21 Sep 2017 14:16:06 +0000 Subject: [PATCH 0708/1936] Use the renamed vnc options As the following commit has renamed the two vnc options; let's use the new options in devstack: https://review.openstack.org/#/c/498387/ Change-Id: Id125666814ea9bb8a22b579aee0f6bc1c65ade80 --- lib/nova | 4 ++-- lib/nova_plugins/hypervisor-libvirt | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/nova b/lib/nova index 1112f29bd3..ea0d2f7b89 100644 --- a/lib/nova +++ b/lib/nova @@ -520,8 +520,8 @@ function create_nova_conf { # For multi-host, this should be the management ip of the compute host. VNCSERVER_LISTEN=${VNCSERVER_LISTEN=$NOVA_SERVICE_LOCAL_HOST} VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=$NOVA_SERVICE_LOCAL_HOST} - iniset $NOVA_CONF vnc vncserver_listen "$VNCSERVER_LISTEN" - iniset $NOVA_CONF vnc vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" + iniset $NOVA_CONF vnc server_listen "$VNCSERVER_LISTEN" + iniset $NOVA_CONF vnc server_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" iniset $NOVA_CONF vnc novncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" iniset $NOVA_CONF vnc xvpvncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" else diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index 0c08a0fe42..3d676b9b8d 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -71,8 +71,8 @@ function configure_nova_hypervisor { iniset $NOVA_CONF libvirt connection_uri "parallels+unix:///system" iniset $NOVA_CONF libvirt images_type "ploop" iniset $NOVA_CONF DEFAULT force_raw_images "False" - iniset $NOVA_CONF vnc vncserver_proxyclient_address $HOST_IP - iniset $NOVA_CONF vnc vncserver_listen $HOST_IP + iniset $NOVA_CONF vnc server_proxyclient_address $HOST_IP + iniset $NOVA_CONF vnc server_listen $HOST_IP iniset $NOVA_CONF vnc keymap elif [[ "$NOVA_BACKEND" == "LVM" ]]; then iniset $NOVA_CONF libvirt images_type "lvm" From 135ebe955e19992e54350e2ed70c5f0517c93b26 Mon Sep 17 00:00:00 2001 From: Sumit Jamgade Date: Fri, 13 Oct 2017 15:08:27 +0200 Subject: [PATCH 0709/1936] Revert "Resolve openSUSE devstack failures" This reverts commit d325875508e7d35d6dd62302d852e83815be2278. the issue mentioned in the comment is now resolved. Change-Id: I2705daead3d3b95f6ad82261212f2a1f40a77fb5 --- tools/install_prereqs.sh | 9 --------- 1 file changed, 9 deletions(-) diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh index 6189085e9e..da59093581 100755 --- a/tools/install_prereqs.sh +++ b/tools/install_prereqs.sh @@ -88,15 +88,6 @@ else export PYTHON=$(which python 2>/dev/null) fi -if is_suse; then - # now reinstall cryptography from source, in order to rebuilt it against the - # system libssl rather than the bundled openSSL 1.1, which segfaults when combined - # with a system provided openSSL 1.0 - # see https://github.com/pyca/cryptography/issues/3804 and followup issues - sudo pip install cryptography --no-binary :all: -fi - - # Mark end of run # --------------- From c0d9373e860726570b27f5c560f0fe84448598cc Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Sun, 15 Oct 2017 08:26:37 +0000 Subject: [PATCH 0710/1936] Updated from generate-devstack-plugins-list Change-Id: I474294bdbb1052f8f99522415dd0c1d26d8a995b --- doc/source/plugin-registry.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 6aa2e93739..5fd6697d35 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -114,6 +114,7 @@ networking-huawei `git://git.openstack.org/openstack/networ networking-hyperv `git://git.openstack.org/openstack/networking-hyperv `__ networking-infoblox `git://git.openstack.org/openstack/networking-infoblox `__ networking-l2gw `git://git.openstack.org/openstack/networking-l2gw `__ +networking-lagopus `git://git.openstack.org/openstack/networking-lagopus `__ networking-midonet `git://git.openstack.org/openstack/networking-midonet `__ networking-mlnx `git://git.openstack.org/openstack/networking-mlnx `__ networking-nec `git://git.openstack.org/openstack/networking-nec `__ @@ -159,6 +160,7 @@ searchlight-ui `git://git.openstack.org/openstack/search senlin `git://git.openstack.org/openstack/senlin `__ solum `git://git.openstack.org/openstack/solum `__ stackube `git://git.openstack.org/openstack/stackube `__ +storlets `git://git.openstack.org/openstack/storlets `__ tacker `git://git.openstack.org/openstack/tacker `__ tap-as-a-service `git://git.openstack.org/openstack/tap-as-a-service `__ tap-as-a-service-dashboard `git://git.openstack.org/openstack/tap-as-a-service-dashboard `__ From 36ddea31a257b38aa22d3232f4c4389c851f5456 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Mon, 2 Oct 2017 10:05:17 -0500 Subject: [PATCH 0711/1936] Add devstack base job for zuul v3 This should be managed in the devstack repo, since it's a base job to run devstack. Change-Id: Iffe54fbccbccd68db08f79a1b51dd7f76dbff408 Depends-On: Ie2119f24360d56690ffd772b95a9ea6b98dd4a39 --- .zuul.yaml | 85 ++++++++ playbooks/devstack.yaml | 3 + playbooks/post.yaml | 4 + playbooks/pre.yaml | 22 +++ roles/configure-swap/README.rst | 11 ++ roles/configure-swap/defaults/main.yaml | 1 + roles/configure-swap/tasks/ephemeral.yaml | 110 +++++++++++ roles/configure-swap/tasks/main.yaml | 63 ++++++ roles/configure-swap/tasks/root.yaml | 63 ++++++ roles/export-devstack-journal/README.rst | 15 ++ .../defaults/main.yaml | 1 + roles/export-devstack-journal/tasks/main.yaml | 29 +++ roles/fetch-devstack-log-dir/README.rst | 10 + .../fetch-devstack-log-dir/defaults/main.yaml | 1 + roles/fetch-devstack-log-dir/tasks/main.yaml | 5 + roles/run-devstack/README.rst | 8 + roles/run-devstack/defaults/main.yaml | 1 + roles/run-devstack/tasks/main.yaml | 6 + roles/setup-devstack-cache/README.rst | 15 ++ roles/setup-devstack-cache/defaults/main.yaml | 2 + roles/setup-devstack-cache/tasks/main.yaml | 14 ++ roles/setup-devstack-log-dir/README.rst | 11 ++ .../setup-devstack-log-dir/defaults/main.yaml | 1 + roles/setup-devstack-log-dir/tasks/main.yaml | 5 + roles/setup-devstack-source-dirs/README.rst | 11 ++ .../defaults/main.yaml | 1 + .../tasks/main.yaml | 22 +++ roles/setup-stack-user/README.rst | 16 ++ roles/setup-stack-user/defaults/main.yaml | 2 + roles/setup-stack-user/files/50_stack_sh | 1 + roles/setup-stack-user/tasks/main.yaml | 45 +++++ roles/setup-tempest-user/README.rst | 10 + roles/setup-tempest-user/files/51_tempest_sh | 3 + roles/setup-tempest-user/tasks/main.yaml | 20 ++ roles/start-fresh-logging/README.rst | 11 ++ roles/start-fresh-logging/defaults/main.yaml | 1 + roles/start-fresh-logging/tasks/main.yaml | 56 ++++++ roles/write-devstack-local-conf/README.rst | 63 ++++++ .../defaults/main.yaml | 2 + .../library/devstack_local_conf.py | 185 ++++++++++++++++++ .../write-devstack-local-conf/tasks/main.yaml | 9 + 41 files changed, 944 insertions(+) create mode 100644 .zuul.yaml create mode 100644 playbooks/devstack.yaml create mode 100644 playbooks/post.yaml create mode 100644 playbooks/pre.yaml create mode 100644 roles/configure-swap/README.rst create mode 100644 roles/configure-swap/defaults/main.yaml create mode 100644 roles/configure-swap/tasks/ephemeral.yaml create mode 100644 roles/configure-swap/tasks/main.yaml create mode 100644 roles/configure-swap/tasks/root.yaml create mode 100644 roles/export-devstack-journal/README.rst create mode 100644 roles/export-devstack-journal/defaults/main.yaml create mode 100644 roles/export-devstack-journal/tasks/main.yaml create mode 100644 roles/fetch-devstack-log-dir/README.rst create mode 100644 roles/fetch-devstack-log-dir/defaults/main.yaml create mode 100644 roles/fetch-devstack-log-dir/tasks/main.yaml create mode 100644 roles/run-devstack/README.rst create mode 100644 roles/run-devstack/defaults/main.yaml create mode 100644 roles/run-devstack/tasks/main.yaml create mode 100644 roles/setup-devstack-cache/README.rst create mode 100644 roles/setup-devstack-cache/defaults/main.yaml create mode 100644 roles/setup-devstack-cache/tasks/main.yaml create mode 100644 roles/setup-devstack-log-dir/README.rst create mode 100644 roles/setup-devstack-log-dir/defaults/main.yaml create mode 100644 roles/setup-devstack-log-dir/tasks/main.yaml create mode 100644 roles/setup-devstack-source-dirs/README.rst create mode 100644 roles/setup-devstack-source-dirs/defaults/main.yaml create mode 100644 roles/setup-devstack-source-dirs/tasks/main.yaml create mode 100644 roles/setup-stack-user/README.rst create mode 100644 roles/setup-stack-user/defaults/main.yaml create mode 100644 roles/setup-stack-user/files/50_stack_sh create mode 100644 roles/setup-stack-user/tasks/main.yaml create mode 100644 roles/setup-tempest-user/README.rst create mode 100644 roles/setup-tempest-user/files/51_tempest_sh create mode 100644 roles/setup-tempest-user/tasks/main.yaml create mode 100644 roles/start-fresh-logging/README.rst create mode 100644 roles/start-fresh-logging/defaults/main.yaml create mode 100644 roles/start-fresh-logging/tasks/main.yaml create mode 100644 roles/write-devstack-local-conf/README.rst create mode 100644 roles/write-devstack-local-conf/defaults/main.yaml create mode 100644 roles/write-devstack-local-conf/library/devstack_local_conf.py create mode 100644 roles/write-devstack-local-conf/tasks/main.yaml diff --git a/.zuul.yaml b/.zuul.yaml new file mode 100644 index 0000000000..cee195cf39 --- /dev/null +++ b/.zuul.yaml @@ -0,0 +1,85 @@ +- nodeset: + name: openstack-single-node + nodes: + - name: controller + label: ubuntu-xenial + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: openstack-two-node + nodes: + - name: controller + label: ubuntu-xenial + - name: compute1 + label: ubuntu-xenial + groups: + - name: tempest + nodes: + - controller + - name: compute + nodes: + - controller + - compute1 + +- job: + name: devstack + parent: multinode + description: Base devstack job + nodeset: openstack-single-node + required-projects: + - openstack-dev/devstack + - openstack/cinder + - openstack/glance + - openstack/keystone + - openstack/neutron + - openstack/nova + - openstack/requirements + - openstack/swift + timeout: 7200 + vars: + devstack_localrc: + DATABASE_PASSWORD: secretdatabase + RABBIT_PASSWORD: secretrabbit + ADMIN_PASSWORD: secretadmin + SERVICE_PASSWORD: secretservice + NETWORK_GATEWAY: 10.1.0.1 + Q_USE_DEBUG_COMMAND: True + FIXED_RANGE: 10.1.0.0/20 + IPV4_ADDRS_SAFE_TO_USE: 10.1.0.0/20 + FLOATING_RANGE: 172.24.5.0/24 + PUBLIC_NETWORK_GATEWAY: 172.24.5.1 + FLOATING_HOST_PREFIX: 172.24.4 + FLOATING_HOST_MASK: 23 + SWIFT_REPLICAS: 1 + SWIFT_START_ALL_SERVICES: False + LOGFILE: /opt/stack/logs/devstacklog.txt + LOG_COLOR: False + VERBOSE: True + NETWORK_GATEWAY: 10.1.0.1 + NOVNC_FROM_PACKAGE: True + ERROR_ON_CLONE: True + # NOTE(dims): etcd 3.x is not available in debian/ubuntu + # etc. As a stop gap measure, devstack uses wget to download + # from the location below for all the CI jobs. + ETCD_DOWNLOAD_URL: "http://tarballs.openstack.org/etcd/" + devstack_services: + horizon: False + tempest: False + pre-run: playbooks/pre + post-run: playbooks/post + + +- project: + name: openstack-dev/devstack + check: + jobs: + - devstack: + files: + - ^playbooks/pre + - ^playbooks/post + - ^playbooks/devstack + - ^roles/ + - .zuul.yaml diff --git a/playbooks/devstack.yaml b/playbooks/devstack.yaml new file mode 100644 index 0000000000..ede8382632 --- /dev/null +++ b/playbooks/devstack.yaml @@ -0,0 +1,3 @@ +- hosts: all + roles: + - run-devstack diff --git a/playbooks/post.yaml b/playbooks/post.yaml new file mode 100644 index 0000000000..6f5126ff16 --- /dev/null +++ b/playbooks/post.yaml @@ -0,0 +1,4 @@ +- hosts: all + roles: + - export-devstack-journal + - fetch-devstack-log-dir diff --git a/playbooks/pre.yaml b/playbooks/pre.yaml new file mode 100644 index 0000000000..4d07960fe8 --- /dev/null +++ b/playbooks/pre.yaml @@ -0,0 +1,22 @@ +- hosts: all + roles: + - configure-swap + - setup-stack-user + - setup-tempest-user + - setup-devstack-source-dirs + - setup-devstack-log-dir + - setup-devstack-cache + - start-fresh-logging + - write-devstack-local-conf + # TODO(jeblair): remove when configure-mirrors is fixed + tasks: + - name: Hack mirror_info + shell: + _raw_params: | + mkdir /etc/ci + cat << "EOF" > /etc/ci/mirror_info.sh + export NODEPOOL_UCA_MIRROR=http://mirror.dfw.rax.openstack.org/ubuntu-cloud-archive + EOF + args: + executable: /bin/bash + become: true diff --git a/roles/configure-swap/README.rst b/roles/configure-swap/README.rst new file mode 100644 index 0000000000..eaba5cf595 --- /dev/null +++ b/roles/configure-swap/README.rst @@ -0,0 +1,11 @@ +Configure a swap partition + +Creates a swap partition on the ephemeral block device (the rest of which +will be mounted on /opt). + +**Role Variables** + +.. zuul:rolevar:: configure_swap_size + :default: 8192 + + The size of the swap partition, in MiB. diff --git a/roles/configure-swap/defaults/main.yaml b/roles/configure-swap/defaults/main.yaml new file mode 100644 index 0000000000..4d622321c7 --- /dev/null +++ b/roles/configure-swap/defaults/main.yaml @@ -0,0 +1 @@ +configure_swap_size: 8192 diff --git a/roles/configure-swap/tasks/ephemeral.yaml b/roles/configure-swap/tasks/ephemeral.yaml new file mode 100644 index 0000000000..c2316eac2b --- /dev/null +++ b/roles/configure-swap/tasks/ephemeral.yaml @@ -0,0 +1,110 @@ +# Configure attached ephemeral devices for storage and swap + +- assert: + that: + - "ephemeral_device is defined" + +- name: Set partition names + set_fact: + swap_partition: "{{ ephemeral_device}}1" + opt_partition: "{{ ephemeral_device}}2" + +- name: Ensure ephemeral device is unmounted + become: yes + mount: + name: "{{ ephemeral_device }}" + state: unmounted + +- name: Get existing partitions + become: yes + parted: + device: "{{ ephemeral_device }}" + unit: MiB + register: ephemeral_partitions + +- name: Remove any existing partitions + become: yes + parted: + device: "{{ ephemeral_device }}" + number: "{{ item.num }}" + state: absent + with_items: + - "{{ ephemeral_partitions.partitions }}" + +- name: Create new disk label + become: yes + parted: + label: msdos + device: "{{ ephemeral_device }}" + +- name: Create swap partition + become: yes + parted: + device: "{{ ephemeral_device }}" + number: 1 + state: present + part_start: '0%' + part_end: "{{ configure_swap_size }}MiB" + +- name: Create opt partition + become: yes + parted: + device: "{{ ephemeral_device }}" + number: 2 + state: present + part_start: "{{ configure_swap_size }}MiB" + part_end: "100%" + +- name: Make swap on partition + become: yes + command: "mkswap {{ swap_partition }}" + +- name: Write swap to fstab + become: yes + mount: + path: none + src: "{{ swap_partition }}" + fstype: swap + opts: sw + passno: 0 + dump: 0 + state: present + +# XXX: does "parted" plugin ensure the partition is available +# before moving on? No udev settles here ... + +- name: Add all swap + become: yes + command: swapon -a + +- name: Create /opt filesystem + become: yes + filesystem: + fstype: ext4 + dev: "{{ opt_partition }}" + +# Rackspace at least does not have enough room for two devstack +# installs on the primary partition. We copy in the existing /opt to +# the new partition on the ephemeral device, and then overmount /opt +# to there for the test runs. +# +# NOTE(ianw): the existing "mount" touches fstab. There is currently (Sep2017) +# work in [1] to split mount & fstab into separate parts, but for now we bundle +# it into an atomic shell command +# [1] https://github.com/ansible/ansible/pull/27174 +- name: Copy old /opt + become: yes + shell: | + mount {{ opt_partition }} /mnt + find /opt/ -mindepth 1 -maxdepth 1 -exec mv {} /mnt/ \; + umount /mnt + +# This overmounts any existing /opt +- name: Add opt to fstab and mount + become: yes + mount: + path: /opt + src: "{{ opt_partition }}" + fstype: ext4 + opts: noatime + state: mounted diff --git a/roles/configure-swap/tasks/main.yaml b/roles/configure-swap/tasks/main.yaml new file mode 100644 index 0000000000..8960c726c8 --- /dev/null +++ b/roles/configure-swap/tasks/main.yaml @@ -0,0 +1,63 @@ +# On RAX hosts, we have a small root partition and a large, +# unallocated ephemeral device attached at /dev/xvde +- name: Set ephemeral device if /dev/xvde exists + when: ansible_devices["xvde"] is defined + set_fact: + ephemeral_device: "/dev/xvde" + +# On other providers, we have a device called "ephemeral0". +# +# NOTE(ianw): Once [1] is in our ansible (2.4 era?), we can figure +# this out more directly by walking the device labels in the facts +# +# [1] https://github.com/ansible/ansible/commit/d46dd99f47c0ee5081d15bc5b741e9096d8bfd3e +- name: Set ephemeral device by label + when: ephemeral_device is undefined + block: + - name: Get ephemeral0 device node + command: /sbin/blkid -L ephemeral0 + register: ephemeral0 + # If this doesn't exist, returns !0 + ignore_errors: yes + changed_when: False + + - name: Set ephemeral device if LABEL exists + when: "ephemeral0.rc == 0" + set_fact: + ephemeral_device: "{{ ephemeral0.stdout }}" + +# If we have ephemeral storage and we don't appear to have setup swap, +# we will create a swap and move /opt to a large data partition there. +- include: ephemeral.yaml + static: no + when: + - ephemeral_device is defined + - ansible_memory_mb['swap']['total'] | int + 10 <= configure_swap_size + +# If no ephemeral device and no swap, then we will setup some swap +# space on the root device to ensure all hosts a consistent memory +# environment. +- include: root.yaml + static: no + when: + - ephemeral_device is undefined + - ansible_memory_mb['swap']['total'] | int + 10 <= configure_swap_size + +# ensure a standard level of swappiness. Some platforms +# (rax+centos7) come with swappiness of 0 (presumably because the +# vm doesn't come with swap setup ... but we just did that above), +# which depending on the kernel version can lead to the OOM killer +# kicking in on some processes despite swap being available; +# particularly things like mysql which have very high ratio of +# anonymous-memory to file-backed mappings. +# +# This sets swappiness low; we really don't want to be relying on +# cloud I/O based swap during our runs if we can help it +- name: Set swappiness + become: yes + sysctl: + name: vm.swappiness + value: 30 + state: present + +- debug: var=ephemeral_device diff --git a/roles/configure-swap/tasks/root.yaml b/roles/configure-swap/tasks/root.yaml new file mode 100644 index 0000000000..f22b53700f --- /dev/null +++ b/roles/configure-swap/tasks/root.yaml @@ -0,0 +1,63 @@ +# If no ephemeral devices are available, use root filesystem + +- name: Calculate required swap + set_fact: + swap_required: "{{ configure_swap_size - ansible_memory_mb['swap']['total'] | int }}" + +- block: + - name: Get root filesystem + shell: df --output='fstype' /root | tail -1 + register: root_fs + + - name: Save root filesystem + set_fact: + root_filesystem: "{{ root_fs.stdout }}" + + - debug: var=root_filesystem + +# Note, we don't use a sparse device to avoid wedging when disk space +# and memory are both unavailable. + +# Cannot fallocate on filesystems like XFS, so use slower dd +- name: Create swap backing file for non-EXT fs + when: '"ext" not in root_filesystem' + become: yes + command: dd if=/dev/zero of=/root/swapfile bs=1M count={{ swap_required }} + args: + creates: /root/swapfile + +- name: Create sparse swap backing file for EXT fs + when: '"ext" in root_filesystem' + become: yes + command: fallocate -l {{ swap_required }}M /root/swapfile + args: + creates: /root/swapfile + +- name: Ensure swapfile perms + become: yes + file: + path: /root/swapfile + owner: root + group: root + mode: 0600 + +- name: Make swapfile + become: yes + command: mkswap /root/swapfile + +- name: Write swap to fstab + become: yes + mount: + path: none + src: /root/swapfile + fstype: swap + opts: sw + passno: 0 + dump: 0 + state: present + +- name: Add all swap + become: yes + command: swapon -a + +- debug: var=swap_required diff --git a/roles/export-devstack-journal/README.rst b/roles/export-devstack-journal/README.rst new file mode 100644 index 0000000000..5f00592a03 --- /dev/null +++ b/roles/export-devstack-journal/README.rst @@ -0,0 +1,15 @@ +Export journal files from devstack services + +Export the systemd journal for every devstack service in native +journal format as well as text. Also, export a syslog-style file with +kernal and sudo messages. + +Writes the output to the ``logs/`` subdirectory of +``devstack_base_dir``. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. diff --git a/roles/export-devstack-journal/defaults/main.yaml b/roles/export-devstack-journal/defaults/main.yaml new file mode 100644 index 0000000000..fea05c8146 --- /dev/null +++ b/roles/export-devstack-journal/defaults/main.yaml @@ -0,0 +1 @@ +devstack_base_dir: /opt/stack diff --git a/roles/export-devstack-journal/tasks/main.yaml b/roles/export-devstack-journal/tasks/main.yaml new file mode 100644 index 0000000000..b9af02a591 --- /dev/null +++ b/roles/export-devstack-journal/tasks/main.yaml @@ -0,0 +1,29 @@ +# TODO: convert this to ansible +- name: Export journal files + become: true + shell: + cmd: | + u="" + name="" + for u in `systemctl list-unit-files | grep devstack | awk '{print $1}'`; do + name=$(echo $u | sed 's/devstack@/screen-/' | sed 's/\.service//') + journalctl -o short-precise --unit $u | tee {{ devstack_base_dir }}/logs/$name.txt > /dev/null + done + + # Export the journal in export format to make it downloadable + # for later searching. It can then be rewritten to a journal native + # format locally using systemd-journal-remote. This makes a class of + # debugging much easier. We don't do the native conversion here as + # some distros do not package that tooling. + journalctl -u 'devstack@*' -o export | \ + xz --threads=0 - > {{ devstack_base_dir }}/logs/devstack.journal.xz + + # The journal contains everything running under systemd, we'll + # build an old school version of the syslog with just the + # kernel and sudo messages. + journalctl \ + -t kernel \ + -t sudo \ + --no-pager \ + --since="$(cat {{ devstack_base_dir }}/log-start-timestamp.txt)" \ + | tee {{ devstack_base_dir }}/logs/syslog.txt > /dev/null diff --git a/roles/fetch-devstack-log-dir/README.rst b/roles/fetch-devstack-log-dir/README.rst new file mode 100644 index 0000000000..360a2e3dd0 --- /dev/null +++ b/roles/fetch-devstack-log-dir/README.rst @@ -0,0 +1,10 @@ +Fetch content from the devstack log directory + +Copy logs from every host back to the zuul executor. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. diff --git a/roles/fetch-devstack-log-dir/defaults/main.yaml b/roles/fetch-devstack-log-dir/defaults/main.yaml new file mode 100644 index 0000000000..fea05c8146 --- /dev/null +++ b/roles/fetch-devstack-log-dir/defaults/main.yaml @@ -0,0 +1 @@ +devstack_base_dir: /opt/stack diff --git a/roles/fetch-devstack-log-dir/tasks/main.yaml b/roles/fetch-devstack-log-dir/tasks/main.yaml new file mode 100644 index 0000000000..5a198b21b4 --- /dev/null +++ b/roles/fetch-devstack-log-dir/tasks/main.yaml @@ -0,0 +1,5 @@ +- name: Collect devstack logs + synchronize: + dest: "{{ zuul.executor.log_root }}/{{ inventory_hostname }}" + mode: pull + src: "{{ devstack_base_dir }}/logs" diff --git a/roles/run-devstack/README.rst b/roles/run-devstack/README.rst new file mode 100644 index 0000000000..d77eb15e99 --- /dev/null +++ b/roles/run-devstack/README.rst @@ -0,0 +1,8 @@ +Run devstack + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. diff --git a/roles/run-devstack/defaults/main.yaml b/roles/run-devstack/defaults/main.yaml new file mode 100644 index 0000000000..fea05c8146 --- /dev/null +++ b/roles/run-devstack/defaults/main.yaml @@ -0,0 +1 @@ +devstack_base_dir: /opt/stack diff --git a/roles/run-devstack/tasks/main.yaml b/roles/run-devstack/tasks/main.yaml new file mode 100644 index 0000000000..bafebafd65 --- /dev/null +++ b/roles/run-devstack/tasks/main.yaml @@ -0,0 +1,6 @@ +- name: Run devstack + command: ./stack.sh + args: + chdir: "{{devstack_base_dir}}/devstack" + become: true + become_user: stack diff --git a/roles/setup-devstack-cache/README.rst b/roles/setup-devstack-cache/README.rst new file mode 100644 index 0000000000..b8938c3dea --- /dev/null +++ b/roles/setup-devstack-cache/README.rst @@ -0,0 +1,15 @@ +Set up the devstack cache directory + +If the node has a cache of devstack image files, copy it into place. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. + +.. zuul:rolevar:: devstack_cache_dir + :default: /opt/cache + + The directory with the cached files. diff --git a/roles/setup-devstack-cache/defaults/main.yaml b/roles/setup-devstack-cache/defaults/main.yaml new file mode 100644 index 0000000000..c56720b4f5 --- /dev/null +++ b/roles/setup-devstack-cache/defaults/main.yaml @@ -0,0 +1,2 @@ +devstack_base_dir: /opt/stack +devstack_cache_dir: /opt/cache diff --git a/roles/setup-devstack-cache/tasks/main.yaml b/roles/setup-devstack-cache/tasks/main.yaml new file mode 100644 index 0000000000..84f33f0e16 --- /dev/null +++ b/roles/setup-devstack-cache/tasks/main.yaml @@ -0,0 +1,14 @@ +- name: Copy cached devstack files + # This uses hard links to avoid using extra space. + command: "find {{ devstack_cache_dir }}/files -mindepth 1 -maxdepth 1 -exec cp -l {} {{ devstack_base_dir }}/devstack/files/ ;" + become: true + +- name: Set ownership of cached files + file: + path: '{{ devstack_base_dir }}/devstack/files' + state: directory + recurse: true + owner: stack + group: stack + mode: a+r + become: yes diff --git a/roles/setup-devstack-log-dir/README.rst b/roles/setup-devstack-log-dir/README.rst new file mode 100644 index 0000000000..9d8dba3442 --- /dev/null +++ b/roles/setup-devstack-log-dir/README.rst @@ -0,0 +1,11 @@ +Set up the devstack log directory + +Create a log directory on the ephemeral disk partition to save space +on the root device. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. diff --git a/roles/setup-devstack-log-dir/defaults/main.yaml b/roles/setup-devstack-log-dir/defaults/main.yaml new file mode 100644 index 0000000000..fea05c8146 --- /dev/null +++ b/roles/setup-devstack-log-dir/defaults/main.yaml @@ -0,0 +1 @@ +devstack_base_dir: /opt/stack diff --git a/roles/setup-devstack-log-dir/tasks/main.yaml b/roles/setup-devstack-log-dir/tasks/main.yaml new file mode 100644 index 0000000000..b9f38dfacb --- /dev/null +++ b/roles/setup-devstack-log-dir/tasks/main.yaml @@ -0,0 +1,5 @@ +- name: Create logs directory + file: + path: '{{ devstack_base_dir }}/logs' + state: directory + become: yes diff --git a/roles/setup-devstack-source-dirs/README.rst b/roles/setup-devstack-source-dirs/README.rst new file mode 100644 index 0000000000..4ebf8399c2 --- /dev/null +++ b/roles/setup-devstack-source-dirs/README.rst @@ -0,0 +1,11 @@ +Set up the devstack source directories + +Ensure that the base directory exists, and then move the source repos +into it. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. diff --git a/roles/setup-devstack-source-dirs/defaults/main.yaml b/roles/setup-devstack-source-dirs/defaults/main.yaml new file mode 100644 index 0000000000..fea05c8146 --- /dev/null +++ b/roles/setup-devstack-source-dirs/defaults/main.yaml @@ -0,0 +1 @@ +devstack_base_dir: /opt/stack diff --git a/roles/setup-devstack-source-dirs/tasks/main.yaml b/roles/setup-devstack-source-dirs/tasks/main.yaml new file mode 100644 index 0000000000..e6bbae23b7 --- /dev/null +++ b/roles/setup-devstack-source-dirs/tasks/main.yaml @@ -0,0 +1,22 @@ +- name: Find all source repos used by this job + find: + paths: + - src/git.openstack.org/openstack + - src/git.openstack.org/openstack-dev + - src/git.openstack.org/openstack-infra + file_type: directory + register: found_repos + +- name: Copy Zuul repos into devstack working directory + command: rsync -a {{ item.path }} {{ devstack_base_dir }} + with_items: '{{ found_repos.files }}' + become: yes + +- name: Set ownership of repos + file: + path: '{{ devstack_base_dir }}' + state: directory + recurse: true + owner: stack + group: stack + become: yes diff --git a/roles/setup-stack-user/README.rst b/roles/setup-stack-user/README.rst new file mode 100644 index 0000000000..80c4d39eff --- /dev/null +++ b/roles/setup-stack-user/README.rst @@ -0,0 +1,16 @@ +Set up the `stack` user + +Create the stack user, set up its home directory, and allow it to +sudo. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. + +.. zuul:rolevar:: devstack_stack_home_dir + :default: {{ devstack_base_dir }} + + The home directory for the stack user. diff --git a/roles/setup-stack-user/defaults/main.yaml b/roles/setup-stack-user/defaults/main.yaml new file mode 100644 index 0000000000..6d0be666d4 --- /dev/null +++ b/roles/setup-stack-user/defaults/main.yaml @@ -0,0 +1,2 @@ +devstack_base_dir: /opt/stack +devstack_stack_home_dir: '{{ devstack_base_dir }}' diff --git a/roles/setup-stack-user/files/50_stack_sh b/roles/setup-stack-user/files/50_stack_sh new file mode 100644 index 0000000000..4c6b46bdb1 --- /dev/null +++ b/roles/setup-stack-user/files/50_stack_sh @@ -0,0 +1 @@ +stack ALL=(root) NOPASSWD:ALL diff --git a/roles/setup-stack-user/tasks/main.yaml b/roles/setup-stack-user/tasks/main.yaml new file mode 100644 index 0000000000..8384515ebe --- /dev/null +++ b/roles/setup-stack-user/tasks/main.yaml @@ -0,0 +1,45 @@ +- name: Create stack group + group: + name: stack + become: yes + +# NOTE(andreaf) Create a user home_dir is not safe via +# the user module since it will fail if the containing +# folder does not exists. If the folder does exists and +# it's empty, the skeleton is setup and ownership set. +- name: Create the stack user home folder + file: + path: '{{ devstack_stack_home_dir }}' + state: directory + become: yes + +- name: Create stack user + user: + name: stack + shell: /bin/bash + home: '{{ devstack_stack_home_dir }}' + group: stack + become: yes + +- name: Set stack user home directory permissions + file: + path: '{{ devstack_stack_home_dir }}' + mode: 0755 + become: yes + +- name: Copy 50_stack_sh file to /etc/sudoers.d + copy: + src: 50_stack_sh + dest: /etc/sudoers.d + mode: 0440 + owner: root + group: root + become: yes + +- name: Create new/.cache folder within BASE + file: + path: '{{ devstack_stack_home_dir }}/.cache' + state: directory + owner: stack + group: stack + become: yes diff --git a/roles/setup-tempest-user/README.rst b/roles/setup-tempest-user/README.rst new file mode 100644 index 0000000000..bb29c50a28 --- /dev/null +++ b/roles/setup-tempest-user/README.rst @@ -0,0 +1,10 @@ +Set up the `tempest` user + +Create the tempest user and allow it to sudo. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. diff --git a/roles/setup-tempest-user/files/51_tempest_sh b/roles/setup-tempest-user/files/51_tempest_sh new file mode 100644 index 0000000000..f88ff9f4f2 --- /dev/null +++ b/roles/setup-tempest-user/files/51_tempest_sh @@ -0,0 +1,3 @@ +tempest ALL=(root) NOPASSWD:/sbin/ip +tempest ALL=(root) NOPASSWD:/sbin/iptables +tempest ALL=(root) NOPASSWD:/usr/bin/ovsdb-client diff --git a/roles/setup-tempest-user/tasks/main.yaml b/roles/setup-tempest-user/tasks/main.yaml new file mode 100644 index 0000000000..892eaf655a --- /dev/null +++ b/roles/setup-tempest-user/tasks/main.yaml @@ -0,0 +1,20 @@ +- name: Create tempest group + group: + name: tempest + become: yes + +- name: Create tempest user + user: + name: tempest + shell: /bin/bash + group: tempest + become: yes + +- name: Copy 51_tempest_sh to /etc/sudoers.d + copy: + src: 51_tempest_sh + dest: /etc/sudoers.d + owner: root + group: root + mode: 0440 + become: yes diff --git a/roles/start-fresh-logging/README.rst b/roles/start-fresh-logging/README.rst new file mode 100644 index 0000000000..11b029e182 --- /dev/null +++ b/roles/start-fresh-logging/README.rst @@ -0,0 +1,11 @@ +Restart logging on all hosts + +Restart syslog so that the system logs only include output from the +job. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. diff --git a/roles/start-fresh-logging/defaults/main.yaml b/roles/start-fresh-logging/defaults/main.yaml new file mode 100644 index 0000000000..fea05c8146 --- /dev/null +++ b/roles/start-fresh-logging/defaults/main.yaml @@ -0,0 +1 @@ +devstack_base_dir: /opt/stack diff --git a/roles/start-fresh-logging/tasks/main.yaml b/roles/start-fresh-logging/tasks/main.yaml new file mode 100644 index 0000000000..6c7ba66de7 --- /dev/null +++ b/roles/start-fresh-logging/tasks/main.yaml @@ -0,0 +1,56 @@ +- name: Check for /bin/journalctl file + command: which journalctl + changed_when: False + failed_when: False + register: which_out + +- block: + - name: Get current date + command: date +"%Y-%m-%d %H:%M:%S" + register: date_out + + - name: Copy current date to log-start-timestamp.txt + copy: + dest: "{{ devstack_base_dir }}/log-start-timestamp.txt" + content: "{{ date_out.stdout }}" + when: which_out.rc == 0 + become: yes + +- block: + - name: Stop rsyslog + service: name=rsyslog state=stopped + + - name: Save syslog file prior to devstack run + command: mv /var/log/syslog /var/log/syslog-pre-devstack + + - name: Save kern.log file prior to devstack run + command: mv /var/log/kern.log /var/log/kern_log-pre-devstack + + - name: Recreate syslog file + file: name=/var/log/syslog state=touch + + - name: Recreate syslog file owner and group + command: chown /var/log/syslog --ref /var/log/syslog-pre-devstack + + - name: Recreate syslog file permissions + command: chmod /var/log/syslog --ref /var/log/syslog-pre-devstack + + - name: Add read permissions to all on syslog file + file: name=/var/log/syslog mode=a+r + + - name: Recreate kern.log file + file: name=/var/log/kern.log state=touch + + - name: Recreate kern.log file owner and group + command: chown /var/log/kern.log --ref /var/log/kern_log-pre-devstack + + - name: Recreate kern.log file permissions + command: chmod /var/log/kern.log --ref /var/log/kern_log-pre-devstack + + - name: Add read permissions to all on kern.log file + file: name=/var/log/kern.log mode=a+r + + - name: Start rsyslog + service: name=rsyslog state=started + when: which_out.rc == 1 + become: yes diff --git a/roles/write-devstack-local-conf/README.rst b/roles/write-devstack-local-conf/README.rst new file mode 100644 index 0000000000..e30dfa1e9f --- /dev/null +++ b/roles/write-devstack-local-conf/README.rst @@ -0,0 +1,63 @@ +Write the local.conf file for use by devstack + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. + +.. zuul:rolevar:: devstack_local_conf_path + :default: {{ devstack_base_dir }}/devstack/local.conf + + The path of the local.conf file. + +.. zuul:rolevar:: devstack_localrc + :type: dict + + A dictionary of variables that should be written to the localrc + section of local.conf. The values (which are strings) may contain + bash shell variables, and will be ordered so that variables used by + later entries appear first. + +.. zuul:rolevar:: devstack_local_conf + :type: dict + + A complex argument consisting of nested dictionaries which combine + to form the meta-sections of the local_conf file. The top level is + a dictionary of phases, followed by dictionaries of filenames, then + sections, which finally contain key-value pairs for the INI file + entries in those sections. + + The keys in this dictionary are the devstack phases. + + .. zuul:rolevar:: [phase] + :type: dict + + The keys in this dictionary are the filenames for this phase. + + .. zuul:rolevar:: [filename] + :type: dict + + The keys in this dictionary are the INI sections in this file. + + .. zuul:rolevar:: [section] + :type: dict + + This is a dictionary of key-value pairs which comprise + this section of the INI file. + +.. zuul:rolevar:: devstack_services + :type: dict + + A dictionary mapping service names to boolean values. If the + boolean value is ``false``, a ``disable_service`` line will be + emitted for the service name. If it is ``true``, then + ``enable_service`` will be emitted. All other values are ignored. + +.. zuul:rolevar:: devstack_plugins + :type: dict + + A dictionary mapping a plugin name to a git repo location. If the + location is a non-empty string, then an ``enable_plugin`` line will + be emmitted for the plugin name. diff --git a/roles/write-devstack-local-conf/defaults/main.yaml b/roles/write-devstack-local-conf/defaults/main.yaml new file mode 100644 index 0000000000..491fa0fdb9 --- /dev/null +++ b/roles/write-devstack-local-conf/defaults/main.yaml @@ -0,0 +1,2 @@ +devstack_base_dir: /opt/stack +devstack_local_conf_path: "{{ devstack_base_dir }}/devstack/local.conf" diff --git a/roles/write-devstack-local-conf/library/devstack_local_conf.py b/roles/write-devstack-local-conf/library/devstack_local_conf.py new file mode 100644 index 0000000000..4134beb048 --- /dev/null +++ b/roles/write-devstack-local-conf/library/devstack_local_conf.py @@ -0,0 +1,185 @@ +# Copyright (C) 2017 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +import re + + +class VarGraph(object): + # This is based on the JobGraph from Zuul. + + def __init__(self, vars): + self.vars = {} + self._varnames = set() + self._dependencies = {} # dependent_var_name -> set(parent_var_names) + for k, v in vars.items(): + self._varnames.add(k) + for k, v in vars.items(): + self._addVar(k, str(v)) + + bash_var_re = re.compile(r'\$\{?(\w+)') + def getDependencies(self, value): + return self.bash_var_re.findall(value) + + def _addVar(self, key, value): + if key in self.vars: + raise Exception("Variable {} already added".format(key)) + self.vars[key] = value + # Append the dependency information + self._dependencies.setdefault(key, set()) + try: + for dependency in self.getDependencies(value): + if dependency == key: + # A variable is allowed to reference itself; no + # dependency link needed in that case. + continue + if dependency not in self._varnames: + # It's not necessary to create a link for an + # external variable. + continue + # Make sure a circular dependency is never created + ancestor_vars = self._getParentVarNamesRecursively( + dependency, soft=True) + ancestor_vars.add(dependency) + if any((key == anc_var) for anc_var in ancestor_vars): + raise Exception("Dependency cycle detected in var {}". + format(key)) + self._dependencies[key].add(dependency) + except Exception: + del self.vars[key] + del self._dependencies[key] + raise + + def getVars(self): + ret = [] + keys = sorted(self.vars.keys()) + seen = set() + for key in keys: + dependencies = self.getDependentVarsRecursively(key) + for var in dependencies + [key]: + if var not in seen: + ret.append((var, self.vars[var])) + seen.add(var) + return ret + + def getDependentVarsRecursively(self, parent_var): + dependent_vars = [] + + current_dependent_vars = self._dependencies[parent_var] + for current_var in current_dependent_vars: + if current_var not in dependent_vars: + dependent_vars.append(current_var) + for dep in self.getDependentVarsRecursively(current_var): + if dep not in dependent_vars: + dependent_vars.append(dep) + return dependent_vars + + def _getParentVarNamesRecursively(self, dependent_var, soft=False): + all_parent_vars = set() + vars_to_iterate = set([dependent_var]) + while len(vars_to_iterate) > 0: + current_var = vars_to_iterate.pop() + current_parent_vars = self._dependencies.get(current_var) + if current_parent_vars is None: + if soft: + current_parent_vars = set() + else: + raise Exception("Dependent var {} not found: ".format( + dependent_var)) + new_parent_vars = current_parent_vars - all_parent_vars + vars_to_iterate |= new_parent_vars + all_parent_vars |= new_parent_vars + return all_parent_vars + + +class LocalConf(object): + + def __init__(self, localrc, localconf, services, plugins): + self.localrc = [] + self.meta_sections = {} + if plugins: + self.handle_plugins(plugins) + if services: + self.handle_services(services) + if localrc: + self.handle_localrc(localrc) + if localconf: + self.handle_localconf(localconf) + + def handle_plugins(self, plugins): + for k, v in plugins.items(): + if v: + self.localrc.append('enable_plugin {} {}'.format(k, v)) + + def handle_services(self, services): + for k, v in services.items(): + if v is False: + self.localrc.append('disable_service {}'.format(k)) + elif v is True: + self.localrc.append('enable_service {}'.format(k)) + + def handle_localrc(self, localrc): + vg = VarGraph(localrc) + for k, v in vg.getVars(): + self.localrc.append('{}={}'.format(k, v)) + + def handle_localconf(self, localconf): + for phase, phase_data in localconf.items(): + for fn, fn_data in phase_data.items(): + ms_name = '[[{}|{}]]'.format(phase, fn) + ms_data = [] + for section, section_data in fn_data.items(): + ms_data.append('[{}]'.format(section)) + for k, v in section_data.items(): + ms_data.append('{} = {}'.format(k, v)) + ms_data.append('') + self.meta_sections[ms_name] = ms_data + + def write(self, path): + with open(path, 'w') as f: + f.write('[[local|localrc]]\n') + f.write('\n'.join(self.localrc)) + f.write('\n\n') + for section, lines in self.meta_sections.items(): + f.write('{}\n'.format(section)) + f.write('\n'.join(lines)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + plugins=dict(type='dict'), + services=dict(type='dict'), + localrc=dict(type='dict'), + local_conf=dict(type='dict'), + path=dict(type='str'), + ) + ) + + p = module.params + lc = LocalConf(p.get('localrc'), + p.get('local_conf'), + p.get('services'), + p.get('plugins')) + lc.write(p['path']) + + module.exit_json() + + +from ansible.module_utils.basic import * # noqa +from ansible.module_utils.basic import AnsibleModule + +if __name__ == '__main__': + main() diff --git a/roles/write-devstack-local-conf/tasks/main.yaml b/roles/write-devstack-local-conf/tasks/main.yaml new file mode 100644 index 0000000000..1d67616dd4 --- /dev/null +++ b/roles/write-devstack-local-conf/tasks/main.yaml @@ -0,0 +1,9 @@ +- name: Write a job-specific local_conf file + become: true + become_user: stack + devstack_local_conf: + path: "{{ devstack_local_conf_path }}" + plugins: "{{ devstack_plugins|default(omit) }}" + services: "{{ devstack_services|default(omit) }}" + localrc: "{{ devstack_localrc|default(omit) }}" + local_conf: "{{ devstack_local_conf|default(omit) }}" From 9640d3bfbf55e74560677f9a13c241303666543a Mon Sep 17 00:00:00 2001 From: "James E. Blair" Date: Sun, 15 Oct 2017 16:23:57 -0700 Subject: [PATCH 0712/1936] Zuul: remove file matcher for devstack job The file matcher was from the early versions of this when we were running both v2 and v3. We should always run the new devstack job on all changes to devstack now that v3 is in production and we plan on building jobs off of this one. Change-Id: I7dd336b0059043f6653bdfdcba0ee5cded3e67b1 --- .zuul.yaml | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index cee195cf39..bb7239abae 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -76,10 +76,4 @@ name: openstack-dev/devstack check: jobs: - - devstack: - files: - - ^playbooks/pre - - ^playbooks/post - - ^playbooks/devstack - - ^roles/ - - .zuul.yaml + - devstack From 6839d42819c8349d8f1e72a58037198c97baca06 Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Tue, 17 Oct 2017 12:58:18 +0900 Subject: [PATCH 0713/1936] neutron-legacy: Remove no longer necessary vpnaas conditional VPNaaS agent is going to be an L3 agent extention. Related-Bug: #1692128 Depends-On: I0b86c432e4b2210e5f2a73a7e3ba16d10467f0f2 Change-Id: Id827274b7c74cdf71db6d1f2ab3eadb5fef099f5 --- lib/neutron-legacy | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 0ccb17c084..e2e0bb92a9 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -503,11 +503,7 @@ function start_mutnauq_l2_agent { function start_mutnauq_other_agents { run_process q-dhcp "$AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file $Q_DHCP_CONF_FILE" - if is_service_enabled neutron-vpnaas; then - : # Started by plugin - else - run_process q-l3 "$AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)" - fi + run_process q-l3 "$AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)" run_process q-meta "$AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file $Q_META_CONF_FILE" run_process q-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME" From 2c2ca80ce0caadc9efa18c9f9289f6b98b3c486e Mon Sep 17 00:00:00 2001 From: Jan Zerebecki Date: Tue, 17 Oct 2017 18:27:47 +0200 Subject: [PATCH 0714/1936] Fix libvirt daemon name condition This makes the condition that chooses which daemon name libvirt to call the same as for choosing the livirt package names. Without this fix the condition checking for a directory is incorrect when livirt is not yet installed, but is used before installing the packages. Change-Id: Ib5eb12769128527a6f4b3b5f7674bd2dad0ed160 --- lib/nova_plugins/functions-libvirt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 8d74c77517..dd299761e4 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -25,7 +25,7 @@ DEBUG_LIBVIRT=$(trueorfalse True DEBUG_LIBVIRT) DEBUG_LIBVIRT_COREDUMPS=$(trueorfalse False DEBUG_LIBVIRT_COREDUMPS) # Only Xenial is left with libvirt-bin. Everywhere else is libvirtd -if is_ubuntu && [ ! -f /etc/init.d/libvirtd ]; then +if is_ubuntu && [ ${DISTRO} == "xenial" ]; then LIBVIRT_DAEMON=libvirt-bin else LIBVIRT_DAEMON=libvirtd From 7b8f1e7964cec6ec9b69c7a40136aedcb888ebc2 Mon Sep 17 00:00:00 2001 From: Rafael Folco Date: Mon, 16 Oct 2017 19:10:09 +0000 Subject: [PATCH 0715/1936] Set default disk driver to virtio-blk on Power Reason is to be identical to the upstream KVM CI. Some Tempest tests rely on vdX virtio-blk device naming. Others simply create their own with a brand new image. Also, the scsi support on the CirrOS image is limited, tests booting from volume fail. Change-Id: I389147a58042aa6098a695e6dd32f3e697fbbbab --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index 8b69c73bba..20b83b3cd0 100644 --- a/functions +++ b/functions @@ -364,7 +364,7 @@ function upload_image { esac if is_arch "ppc64le" || is_arch "ppc64" || is_arch "ppc"; then - img_property="--property hw_disk_bus=scsi --property hw_scsi_model=virtio-scsi --property hw_cdrom_bus=scsi --property os_command_line=console=hvc0" + img_property="--property hw_cdrom_bus=scsi --property os_command_line=console=hvc0" fi if is_arch "aarch64"; then From f0cd9a8b08d92524fc8e2c3f05d08cdebc638e2a Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 6 Oct 2017 13:11:48 -0500 Subject: [PATCH 0716/1936] Update lib_install_from_git to use column format The pip list command supports a --format=columns option which outputs things in space delimited columns. Switch to using that. Change-Id: I5140a7d83bf567b1c3c67516112eb4c57074fa53 --- inc/python | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/inc/python b/inc/python index 9c810ec9b9..686efd40cf 100644 --- a/inc/python +++ b/inc/python @@ -415,11 +415,11 @@ function lib_installed_from_git { # you the path an editable install was installed from; for example # in response to something like # pip install -e 'git+http://git.openstack.org/openstack-dev/bashate#egg=bashate' - # pip list shows - # bashate (0.5.2.dev19, /tmp/env/src/bashate) - # Thus we look for "path after a comma" to indicate we were - # installed from some local place - pip list 2>/dev/null | grep -- "$name" | grep -q -- ', .*)$' + # pip list --format columns shows + # bashate 0.5.2.dev19 /tmp/env/src/bashate + # Thus we check the third column to see if we're installed from + # some local place. + [[ -z $(pip list --format=columns 2>/dev/null | awk "/^$name/ {print \$3}") ]] } # check that everything that's in LIBS_FROM_GIT was actually installed From e9870eb18d19dbb807d4d312cf4aead23c6f8f40 Mon Sep 17 00:00:00 2001 From: "Daniel P. Berrange" Date: Thu, 10 Nov 2016 13:03:32 +0000 Subject: [PATCH 0717/1936] nova: add support for TLS between novnc proxy & compute nodes Nova is gaining the ability to run TLS over the connection between the novnc proxy service and the QEMU/KVM compute node VNC server. This adds a new config param - 'NOVA_CONSOLE_PROXY_COMPUTE_TLS=True' - which instructs devstack to configure libvirt/QEMU to enable TLS for the VNC server, and to configure the novncproxy to use TLS when connecting. NB this use of TLS is distinct from use of TLS for the public facing API controlled by USE_SSL, they can be enabled independently. This is done in a generic manner so that it is easy to extend to cover use of TLS with the SPICE and serial console proxy services too. Change-Id: Ib29d3f5f18533115b9c51e27b373e92fc0a28d1a Depends-on: I9cc9a380500715e60bd05aa5c29ee46bc6f8d6c2 Implements bp: websocket-proxy-to-host-security --- lib/nova | 22 ++++++++++++++++++++++ lib/nova_plugins/functions-libvirt | 12 ++++++++++++ lib/tls | 18 ++++++++++++++++++ 3 files changed, 52 insertions(+) diff --git a/lib/nova b/lib/nova index ea0d2f7b89..c48aba625f 100644 --- a/lib/nova +++ b/lib/nova @@ -82,6 +82,10 @@ if is_service_enabled tls-proxy; then NOVA_SERVICE_PROTOCOL="https" fi +# Whether to use TLS for comms between the VNC/SPICE/serial proxy +# services and the compute node +NOVA_CONSOLE_PROXY_COMPUTE_TLS=${NOVA_CONSOLE_PROXY_COMPUTE_TLS:-False} + # Public facing bits NOVA_SERVICE_HOST=${NOVA_SERVICE_HOST:-$SERVICE_HOST} NOVA_SERVICE_PORT=${NOVA_SERVICE_PORT:-8774} @@ -197,6 +201,13 @@ function is_n-cell_enabled { return 1 } +# is_nova_console_proxy_compute_tls_enabled() - Test if the Nova Console Proxy +# service has TLS enabled +function is_nova_console_proxy_compute_tls_enabled { + [[ ${NOVA_CONSOLE_PROXY_COMPUTE_TLS} = "True" ]] && return 0 + return 1 +} + # Helper to clean iptables rules function clean_iptables { # Delete rules @@ -524,6 +535,17 @@ function create_nova_conf { iniset $NOVA_CONF vnc server_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" iniset $NOVA_CONF vnc novncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" iniset $NOVA_CONF vnc xvpvncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" + + if is_nova_console_proxy_compute_tls_enabled ; then + iniset $NOVA_CONF vnc auth_schemes "vencrypt" + iniset $NOVA_CONF vnc vencrypt_client_key "/etc/pki/nova-novnc/client-key.pem" + iniset $NOVA_CONF vnc vencrypt_client_cert "/etc/pki/nova-novnc/client-cert.pem" + iniset $NOVA_CONF vnc vencrypt_ca_certs "/etc/pki/nova-novnc/ca-cert.pem" + + sudo mkdir -p /etc/pki/nova-novnc + deploy_int_CA /etc/pki/nova-novnc/ca-cert.pem + deploy_int_cert /etc/pki/nova-novnc/client-cert.pem /etc/pki/nova-novnc/client-key.pem + fi else iniset $NOVA_CONF vnc enabled false fi diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 8d74c77517..85b2689c59 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -147,6 +147,18 @@ EOF fi fi + if is_nova_console_proxy_compute_tls_enabled ; then + if is_service_enabled n-novnc ; then + echo "vnc_tls = 1" | sudo tee -a $QEMU_CONF + echo "vnc_tls_x509_verify = 1" | sudo tee -a $QEMU_CONF + + sudo mkdir -p /etc/pki/libvirt-vnc + sudo chown libvirt-qemu:libvirt-qemu /etc/pki/libvirt-vnc + deploy_int_CA /etc/pki/libvirt-vnc/ca-cert.pem + deploy_int_cert /etc/pki/libvirt-vnc/server-cert.pem /etc/pki/libvirt-vnc/server-key.pem + fi + fi + # Service needs to be started on redhat/fedora -- do a restart for # sanity after fiddling the config. restart_service $LIBVIRT_DAEMON diff --git a/lib/tls b/lib/tls index 0baf86caa9..bd9272cc18 100644 --- a/lib/tls +++ b/lib/tls @@ -340,6 +340,24 @@ function make_root_CA { fi } +# Deploy the service cert & key to a service specific +# location +function deploy_int_cert { + local cert_target_file=$1 + local key_target_file=$2 + + sudo cp "$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt" "$cert_target_file" + sudo cp "$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key" "$key_target_file" +} + +# Deploy the intermediate CA cert bundle file to a service +# specific location +function deploy_int_CA { + local ca_target_file=$1 + + sudo cp "$INT_CA_DIR/ca-chain.pem" "$ca_target_file" +} + # If a non-system python-requests is installed then it will use the # built-in CA certificate store rather than the distro-specific # CA certificate store. Detect this and symlink to the correct From d8753b7dc997b5b5558ae4bbe3e14ffd0209666b Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 20 Oct 2017 14:21:33 +1100 Subject: [PATCH 0718/1936] Use configure-swap from o-z-j Use the generic role Change-Id: I534ed0256b7dfe5bef4ab13ae57cd73c51e8bd15 Depends-On: I0e9c846ace7fac8a1340746c6818fba6ec963018 --- .zuul.yaml | 2 + roles/configure-swap/README.rst | 11 --- roles/configure-swap/defaults/main.yaml | 1 - roles/configure-swap/tasks/ephemeral.yaml | 110 ---------------------- roles/configure-swap/tasks/main.yaml | 63 ------------- roles/configure-swap/tasks/root.yaml | 63 ------------- 6 files changed, 2 insertions(+), 248 deletions(-) delete mode 100644 roles/configure-swap/README.rst delete mode 100644 roles/configure-swap/defaults/main.yaml delete mode 100644 roles/configure-swap/tasks/ephemeral.yaml delete mode 100644 roles/configure-swap/tasks/main.yaml delete mode 100644 roles/configure-swap/tasks/root.yaml diff --git a/.zuul.yaml b/.zuul.yaml index bb7239abae..8d3b2f4e6d 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -38,6 +38,8 @@ - openstack/nova - openstack/requirements - openstack/swift + roles: + - zuul: openstack-infra/openstack-zuul-jobs timeout: 7200 vars: devstack_localrc: diff --git a/roles/configure-swap/README.rst b/roles/configure-swap/README.rst deleted file mode 100644 index eaba5cf595..0000000000 --- a/roles/configure-swap/README.rst +++ /dev/null @@ -1,11 +0,0 @@ -Configure a swap partition - -Creates a swap partition on the ephemeral block device (the rest of which -will be mounted on /opt). - -**Role Variables** - -.. zuul:rolevar:: configure_swap_size - :default: 8192 - - The size of the swap partition, in MiB. diff --git a/roles/configure-swap/defaults/main.yaml b/roles/configure-swap/defaults/main.yaml deleted file mode 100644 index 4d622321c7..0000000000 --- a/roles/configure-swap/defaults/main.yaml +++ /dev/null @@ -1 +0,0 @@ -configure_swap_size: 8192 diff --git a/roles/configure-swap/tasks/ephemeral.yaml b/roles/configure-swap/tasks/ephemeral.yaml deleted file mode 100644 index c2316eac2b..0000000000 --- a/roles/configure-swap/tasks/ephemeral.yaml +++ /dev/null @@ -1,110 +0,0 @@ -# Configure attached ephemeral devices for storage and swap - -- assert: - that: - - "ephemeral_device is defined" - -- name: Set partition names - set_fact: - swap_partition: "{{ ephemeral_device}}1" - opt_partition: "{{ ephemeral_device}}2" - -- name: Ensure ephemeral device is unmounted - become: yes - mount: - name: "{{ ephemeral_device }}" - state: unmounted - -- name: Get existing partitions - become: yes - parted: - device: "{{ ephemeral_device }}" - unit: MiB - register: ephemeral_partitions - -- name: Remove any existing partitions - become: yes - parted: - device: "{{ ephemeral_device }}" - number: "{{ item.num }}" - state: absent - with_items: - - "{{ ephemeral_partitions.partitions }}" - -- name: Create new disk label - become: yes - parted: - label: msdos - device: "{{ ephemeral_device }}" - -- name: Create swap partition - become: yes - parted: - device: "{{ ephemeral_device }}" - number: 1 - state: present - part_start: '0%' - part_end: "{{ configure_swap_size }}MiB" - -- name: Create opt partition - become: yes - parted: - device: "{{ ephemeral_device }}" - number: 2 - state: present - part_start: "{{ configure_swap_size }}MiB" - part_end: "100%" - -- name: Make swap on partition - become: yes - command: "mkswap {{ swap_partition }}" - -- name: Write swap to fstab - become: yes - mount: - path: none - src: "{{ swap_partition }}" - fstype: swap - opts: sw - passno: 0 - dump: 0 - state: present - -# XXX: does "parted" plugin ensure the partition is available -# before moving on? No udev settles here ... - -- name: Add all swap - become: yes - command: swapon -a - -- name: Create /opt filesystem - become: yes - filesystem: - fstype: ext4 - dev: "{{ opt_partition }}" - -# Rackspace at least does not have enough room for two devstack -# installs on the primary partition. We copy in the existing /opt to -# the new partition on the ephemeral device, and then overmount /opt -# to there for the test runs. -# -# NOTE(ianw): the existing "mount" touches fstab. There is currently (Sep2017) -# work in [1] to split mount & fstab into separate parts, but for now we bundle -# it into an atomic shell command -# [1] https://github.com/ansible/ansible/pull/27174 -- name: Copy old /opt - become: yes - shell: | - mount {{ opt_partition }} /mnt - find /opt/ -mindepth 1 -maxdepth 1 -exec mv {} /mnt/ \; - umount /mnt - -# This overmounts any existing /opt -- name: Add opt to fstab and mount - become: yes - mount: - path: /opt - src: "{{ opt_partition }}" - fstype: ext4 - opts: noatime - state: mounted diff --git a/roles/configure-swap/tasks/main.yaml b/roles/configure-swap/tasks/main.yaml deleted file mode 100644 index 8960c726c8..0000000000 --- a/roles/configure-swap/tasks/main.yaml +++ /dev/null @@ -1,63 +0,0 @@ -# On RAX hosts, we have a small root partition and a large, -# unallocated ephemeral device attached at /dev/xvde -- name: Set ephemeral device if /dev/xvde exists - when: ansible_devices["xvde"] is defined - set_fact: - ephemeral_device: "/dev/xvde" - -# On other providers, we have a device called "ephemeral0". -# -# NOTE(ianw): Once [1] is in our ansible (2.4 era?), we can figure -# this out more directly by walking the device labels in the facts -# -# [1] https://github.com/ansible/ansible/commit/d46dd99f47c0ee5081d15bc5b741e9096d8bfd3e -- name: Set ephemeral device by label - when: ephemeral_device is undefined - block: - - name: Get ephemeral0 device node - command: /sbin/blkid -L ephemeral0 - register: ephemeral0 - # If this doesn't exist, returns !0 - ignore_errors: yes - changed_when: False - - - name: Set ephemeral device if LABEL exists - when: "ephemeral0.rc == 0" - set_fact: - ephemeral_device: "{{ ephemeral0.stdout }}" - -# If we have ephemeral storage and we don't appear to have setup swap, -# we will create a swap and move /opt to a large data partition there. -- include: ephemeral.yaml - static: no - when: - - ephemeral_device is defined - - ansible_memory_mb['swap']['total'] | int + 10 <= configure_swap_size - -# If no ephemeral device and no swap, then we will setup some swap -# space on the root device to ensure all hosts a consistent memory -# environment. -- include: root.yaml - static: no - when: - - ephemeral_device is undefined - - ansible_memory_mb['swap']['total'] | int + 10 <= configure_swap_size - -# ensure a standard level of swappiness. Some platforms -# (rax+centos7) come with swappiness of 0 (presumably because the -# vm doesn't come with swap setup ... but we just did that above), -# which depending on the kernel version can lead to the OOM killer -# kicking in on some processes despite swap being available; -# particularly things like mysql which have very high ratio of -# anonymous-memory to file-backed mappings. -# -# This sets swappiness low; we really don't want to be relying on -# cloud I/O based swap during our runs if we can help it -- name: Set swappiness - become: yes - sysctl: - name: vm.swappiness - value: 30 - state: present - -- debug: var=ephemeral_device diff --git a/roles/configure-swap/tasks/root.yaml b/roles/configure-swap/tasks/root.yaml deleted file mode 100644 index f22b53700f..0000000000 --- a/roles/configure-swap/tasks/root.yaml +++ /dev/null @@ -1,63 +0,0 @@ -# If no ephemeral devices are available, use root filesystem - -- name: Calculate required swap - set_fact: - swap_required: "{{ configure_swap_size - ansible_memory_mb['swap']['total'] | int }}" - -- block: - - name: Get root filesystem - shell: df --output='fstype' /root | tail -1 - register: root_fs - - - name: Save root filesystem - set_fact: - root_filesystem: "{{ root_fs.stdout }}" - - - debug: var=root_filesystem - -# Note, we don't use a sparse device to avoid wedging when disk space -# and memory are both unavailable. - -# Cannot fallocate on filesystems like XFS, so use slower dd -- name: Create swap backing file for non-EXT fs - when: '"ext" not in root_filesystem' - become: yes - command: dd if=/dev/zero of=/root/swapfile bs=1M count={{ swap_required }} - args: - creates: /root/swapfile - -- name: Create sparse swap backing file for EXT fs - when: '"ext" in root_filesystem' - become: yes - command: fallocate -l {{ swap_required }}M /root/swapfile - args: - creates: /root/swapfile - -- name: Ensure swapfile perms - become: yes - file: - path: /root/swapfile - owner: root - group: root - mode: 0600 - -- name: Make swapfile - become: yes - command: mkswap /root/swapfile - -- name: Write swap to fstab - become: yes - mount: - path: none - src: /root/swapfile - fstype: swap - opts: sw - passno: 0 - dump: 0 - state: present - -- name: Add all swap - become: yes - command: swapon -a - -- debug: var=swap_required From 065779517f9c99a80fbc39d51784c614e4ee341c Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Fri, 20 Oct 2017 12:14:29 -0700 Subject: [PATCH 0719/1936] Properly get pip version The old code was strip()ing the version string instead of split()ing the version string so we always got the first character of the version string. This worked fine as long as the pip version was single digit but as soon as it rolls over to '10.stuff' we will compare: pip version 1 (instead of 10) > 6 Which fails bceause 1 is less than six. Instaed we really do want to compare 10 > 6 so use split on '.' instead. Change-Id: Ic7d0c04d7fa77774ab2d70fb9d11f182becec553 --- inc/python | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inc/python b/inc/python index 9c810ec9b9..8064014f36 100644 --- a/inc/python +++ b/inc/python @@ -333,7 +333,7 @@ function pip_install { # packages like setuptools? local pip_version pip_version=$(python -c "import pip; \ - print(pip.__version__.strip('.')[0])") + print(pip.__version__.split('.')[0])") if (( pip_version<6 )); then die $LINENO "Currently installed pip version ${pip_version} does not" \ "meet minimum requirements (>=6)." From c9e109f1359e2310fab1190d164a28822fc09208 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Sat, 21 Oct 2017 18:04:49 +0200 Subject: [PATCH 0720/1936] Disable nested virt in base devstack job We were doing this in devstack-gate. The gate can't handle nested virt. Change-Id: Ie71663c3144908ddd134fa5a5b1b2d265a60edcf --- .zuul.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index bb7239abae..9e00231265 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -61,6 +61,8 @@ NETWORK_GATEWAY: 10.1.0.1 NOVNC_FROM_PACKAGE: True ERROR_ON_CLONE: True + # Gate jobs can't deal with nested virt. Disable it. + LIBVIRT_TYPE: qemu # NOTE(dims): etcd 3.x is not available in debian/ubuntu # etc. As a stop gap measure, devstack uses wget to download # from the location below for all the CI jobs. From e8db8674855634dadd90adaadd4381f70c7227ed Mon Sep 17 00:00:00 2001 From: Chandan Kumar Date: Thu, 26 Oct 2017 15:34:05 +0530 Subject: [PATCH 0721/1936] Change ENABLE_KSM to $ENABLE_KSM * Since ENABLE_KSM param will be used in local.conf file and it's value is received in a variable and while compairing, the variable needs to be compared. So we need to change the same. Change-Id: Id4ed17c0642acd2313e456503cfc375ca6f61409 Closes-Bug: #1724690 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index c545c56de3..f14ed96b9b 100755 --- a/stack.sh +++ b/stack.sh @@ -1006,7 +1006,7 @@ save_stackenv $LINENO # be memory bound not cpu bound so enable KSM by default but allow people # to opt out if the CPU time is more important to them. -if [[ "ENABLE_KSM" == "True" ]] ; then +if [[ $ENABLE_KSM == "True" ]] ; then if [[ -f /sys/kernel/mm/ksm/run ]] ; then sudo sh -c "echo 1 > /sys/kernel/mm/ksm/run" fi From 228d90d5eb7784f3d4cbd889f478329a8d3a6868 Mon Sep 17 00:00:00 2001 From: "James E. Blair" Date: Thu, 26 Oct 2017 16:46:15 -0700 Subject: [PATCH 0722/1936] Zuul: add run attribute A proposed change[1] to Zuul removes the implied run attribute. Add an explicit run attribute here to prepare for that. [1] Ia8f23bce9898cd4f387554e6787b091b63e75519 Change-Id: I1fbc36c3d1b8c4ed70fceef1c587255dad50da04 --- .zuul.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.zuul.yaml b/.zuul.yaml index 9e00231265..826410c2c7 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -71,6 +71,7 @@ horizon: False tempest: False pre-run: playbooks/pre + run: playbooks/devstack post-run: playbooks/post From c0d8c1c72d61d53bf723de4e0bea6318c6b951b2 Mon Sep 17 00:00:00 2001 From: "James E. Blair" Date: Sat, 28 Oct 2017 10:23:58 -0700 Subject: [PATCH 0723/1936] Zuul: add file extension to playbook path Zuul now supports including the file extension on the playbook path and omitting the extension is now deprecrated. Update references to include the extension. Change-Id: I4bff5f12742364f7cc92e17869a047fd2185dda4 --- .zuul.yaml | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 389ada52c2..710b229d5b 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -48,7 +48,7 @@ ADMIN_PASSWORD: secretadmin SERVICE_PASSWORD: secretservice NETWORK_GATEWAY: 10.1.0.1 - Q_USE_DEBUG_COMMAND: True + Q_USE_DEBUG_COMMAND: true FIXED_RANGE: 10.1.0.0/20 IPV4_ADDRS_SAFE_TO_USE: 10.1.0.0/20 FLOATING_RANGE: 172.24.5.0/24 @@ -56,25 +56,24 @@ FLOATING_HOST_PREFIX: 172.24.4 FLOATING_HOST_MASK: 23 SWIFT_REPLICAS: 1 - SWIFT_START_ALL_SERVICES: False + SWIFT_START_ALL_SERVICES: false LOGFILE: /opt/stack/logs/devstacklog.txt - LOG_COLOR: False - VERBOSE: True - NETWORK_GATEWAY: 10.1.0.1 - NOVNC_FROM_PACKAGE: True - ERROR_ON_CLONE: True + LOG_COLOR: false + VERBOSE: true + NOVNC_FROM_PACKAGE: true + ERROR_ON_CLONE: true # Gate jobs can't deal with nested virt. Disable it. LIBVIRT_TYPE: qemu # NOTE(dims): etcd 3.x is not available in debian/ubuntu # etc. As a stop gap measure, devstack uses wget to download # from the location below for all the CI jobs. - ETCD_DOWNLOAD_URL: "http://tarballs.openstack.org/etcd/" + ETCD_DOWNLOAD_URL: http://tarballs.openstack.org/etcd/ devstack_services: - horizon: False - tempest: False - pre-run: playbooks/pre - run: playbooks/devstack - post-run: playbooks/post + horizon: false + tempest: false + pre-run: playbooks/pre.yaml + run: playbooks/devstack.yaml + post-run: playbooks/post.yaml - project: From 32ecccaeef1b06bb48fc6fe066be4f42332e5a2a Mon Sep 17 00:00:00 2001 From: caowei Date: Wed, 1 Nov 2017 11:45:21 +0800 Subject: [PATCH 0724/1936] Add "sudo" before the journalctl command Change-Id: I41c396f704918e88168c9a9c6cc9e633e7a81437 --- doc/source/configuration.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 23f680a59c..d932d8cd86 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -294,7 +294,7 @@ the systemd journal. To query the logs use the ``journalctl`` command, such as:: - journalctl --unit devstack@* + sudo journalctl --unit devstack@* More examples can be found in :ref:`journalctl-examples`. From a794b12cf2a6b349272ce44892635d07231059d1 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 1 Nov 2017 06:34:13 +0000 Subject: [PATCH 0725/1936] Updated from generate-devstack-plugins-list Change-Id: Ic6d3cdb0b1fcc674ab5adcbaf45fa5a80f10b10f --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 5fd6697d35..c3063ac2ed 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -67,6 +67,7 @@ gce-api `git://git.openstack.org/openstack/gce-ap glare `git://git.openstack.org/openstack/glare `__ group-based-policy `git://git.openstack.org/openstack/group-based-policy `__ heat `git://git.openstack.org/openstack/heat `__ +heat-dashboard `git://git.openstack.org/openstack/heat-dashboard `__ horizon-mellanox `git://git.openstack.org/openstack/horizon-mellanox `__ ironic `git://git.openstack.org/openstack/ironic `__ ironic-inspector `git://git.openstack.org/openstack/ironic-inspector `__ From 1d968d7a542c7a271d72aac08fdd7fe8b235011f Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Sat, 23 Sep 2017 14:45:42 +0200 Subject: [PATCH 0726/1936] Switch to mariadb on openSUSE The mysql-community-server is a compat provide, openSUSE uses mariadb for quite some time. Make it futureproof in case the compat provide goes away in the future. Cleanup mysql service name to MYSQL_SERVICE_NAME and consistently use it. Change-Id: I2df7b8d8b798dfa7ceade90e0c127e0609524a8b --- lib/databases/mysql | 28 +++++++++++----------------- 1 file changed, 11 insertions(+), 17 deletions(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index a0cf7a4296..0089663285 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -15,10 +15,9 @@ MYSQL_DRIVER=${MYSQL_DRIVER:-PyMySQL} register_database mysql -# Linux distros, thank you for being incredibly consistent -MYSQL=mysql +MYSQL_SERVICE_NAME=mysql if is_fedora && ! is_oraclelinux; then - MYSQL=mariadb + MYSQL_SERVICE_NAME=mariadb fi # Functions @@ -34,17 +33,17 @@ function get_database_type_mysql { # Get rid of everything enough to cleanly change database backends function cleanup_database_mysql { - stop_service $MYSQL + stop_service $MYSQL_SERVICE_NAME if is_ubuntu; then # Get ruthless with mysql apt_get purge -y mysql* mariadb* sudo rm -rf /var/lib/mysql sudo rm -rf /etc/mysql return - elif is_suse || is_oraclelinux; then + elif is_oraclelinux; then uninstall_package mysql-community-server sudo rm -rf /var/lib/mysql - elif is_fedora; then + elif is_suse || is_fedora; then uninstall_package mariadb-server sudo rm -rf /var/lib/mysql else @@ -64,12 +63,9 @@ function configure_database_mysql { if is_ubuntu; then my_conf=/etc/mysql/my.cnf - mysql=mysql elif is_suse || is_oraclelinux; then my_conf=/etc/my.cnf - mysql=mysql elif is_fedora; then - mysql=mariadb my_conf=/etc/my.cnf local cracklib_conf=/etc/my.cnf.d/cracklib_password_check.cnf if [ -f "$cracklib_conf" ]; then @@ -82,7 +78,7 @@ function configure_database_mysql { # Start mysql-server if is_fedora || is_suse; then # service is not started by default - start_service $mysql + start_service $MYSQL_SERVICE_NAME fi # Set the root password - only works the first time. For Ubuntu, we already @@ -124,7 +120,7 @@ function configure_database_mysql { iniset -sudo $my_conf mysqld log-queries-not-using-indexes 1 fi - restart_service $mysql + restart_service $MYSQL_SERVICE_NAME } function install_database_mysql { @@ -151,13 +147,11 @@ EOF chmod 0600 $HOME/.my.cnf fi # Install mysql-server - if is_suse || is_oraclelinux; then - if ! is_package_installed mariadb; then - install_package mysql-community-server - fi - elif is_fedora; then + if is_oraclelinux; then + install_package mysql-community-server + elif is_fedora || is_suse; then install_package mariadb-server - sudo systemctl enable mariadb + sudo systemctl enable $MYSQL_SERVICE_NAME elif is_ubuntu; then install_package mysql-server else From 1db9b5d3cab9ecfdc3505ea40ac4f504075fbea0 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Fri, 3 Nov 2017 08:37:21 +0000 Subject: [PATCH 0727/1936] Remove apache tls-proxy sites when stopping Currently doing a cycle of ./stack.sh; ./unstack.sh; ./stack.sh fails because the leftover tls-proxy sites will cause apache startup to fail on the second stack.sh run. So we need to disable these sites on running stop_tls_proxy. Change-Id: I03e6879be332289d19ca6a656f5f9f139dffff6f Closes-Bug: 1718189 --- lib/tls | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/lib/tls b/lib/tls index 0baf86caa9..0bc389bb88 100644 --- a/lib/tls +++ b/lib/tls @@ -564,6 +564,20 @@ function follow_tls_proxy { # using tls configuration are down. function stop_tls_proxy { stop_apache_server + + # NOTE(jh): Removing all tls-proxy configs is a bit of a hack, but + # necessary so that we can restart after an unstack. A better + # solution would be to ensure that each service calling + # start_tls_proxy will call stop_tls_proxy with the same + # parameters on shutdown so we can use the disable_apache_site + # function and remove individual files there. + if is_ubuntu; then + sudo rm -f /etc/apache2/sites-enabled/*-tls-proxy.conf + else + for i in $APACHE_CONF_DIR/*-tls-proxy.conf; do + sudo mv $i $i.disabled + done + fi } # Clean up the CA files From efc5168245406156a98a6623b9bebae757275cf7 Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Fri, 10 Nov 2017 00:50:48 -0500 Subject: [PATCH 0728/1936] Replace deprecated nova_metadata_ip Option nova_metadata_ip was deprecated in favor of nova_metadata_host. lib/neutron was updated recently but lib/neutron-legacy was missed. Change-Id: Iadd42458dda705ad0c24aa4ab2afd5b27dd8f0e1 --- lib/neutron-legacy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 0ccb17c084..bb76c5f9ce 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -757,7 +757,7 @@ function _configure_neutron_metadata_agent { cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $Q_META_CONF_FILE iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP + iniset $Q_META_CONF_FILE DEFAULT nova_metadata_host $Q_META_DATA_IP iniset $Q_META_CONF_FILE DEFAULT metadata_workers $API_WORKERS iniset $Q_META_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then From 3b815a3cb2537885b501ad559bfd69ad5efb9085 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Dulko?= Date: Tue, 14 Nov 2017 16:04:51 +0100 Subject: [PATCH 0729/1936] Provide finite value for systemd's TimeoutStopSec This commit switches TimeoutStopSec in DevStack's systemd unit files from "infinity" to "300". There are two motivations for that change: * 5 minutes should be more than enough to stop a service. * systemd included in CentOS 7 and RHEL 7 doesn't support "infinity" as a value, "0" should be provided instead. When "infinity" is set, systemd will kill the service instantly, leaving service children processes orphaned. Instead of differentiating here, we can just set a sane, finite number. Closes-Bug: 1731275 Change-Id: I0a079ea9879fa4fbba23104c2f5ab6e0721a2a2a --- functions-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions-common b/functions-common index 030ff8c02d..0160c622b7 100644 --- a/functions-common +++ b/functions-common @@ -1394,7 +1394,7 @@ function write_user_unit_file { iniset -sudo $unitfile "Service" "User" "$user" iniset -sudo $unitfile "Service" "ExecStart" "$command" iniset -sudo $unitfile "Service" "KillMode" "process" - iniset -sudo $unitfile "Service" "TimeoutStopSec" "infinity" + iniset -sudo $unitfile "Service" "TimeoutStopSec" "300" iniset -sudo $unitfile "Service" "ExecReload" "$KILL_PATH -HUP \$MAINPID" if [[ -n "$group" ]]; then iniset -sudo $unitfile "Service" "Group" "$group" From 83194f956e1c5308386adbf7146626e75875bcfe Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Fri, 3 Nov 2017 14:14:53 +0000 Subject: [PATCH 0730/1936] Update supported Ubuntu releases With the release of 17.10(artful), support for 16.10(yakkety) has ended. Update our list of supported distros accordingly. Change-Id: Id85e00f109cfd43141dec0c0d2bfedb66f14e664 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index a125d4a0d7..1d0381483a 100755 --- a/stack.sh +++ b/stack.sh @@ -221,7 +221,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (xenial|yakkety|zesty|stretch|jessie|f24|f25|f26|opensuse-42.2|opensuse-42.3|rhel7) ]]; then +if [[ ! ${DISTRO} =~ (xenial|zesty|artful|stretch|jessie|f24|f25|f26|opensuse-42.2|opensuse-42.3|rhel7) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From e340a7bfff80889361085cfb592b1880d27b5a21 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 16 Nov 2017 06:17:45 +0000 Subject: [PATCH 0731/1936] Updated from generate-devstack-plugins-list Change-Id: I884ae2ff23f8c23874e166dd7643b188f7450109 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index c3063ac2ed..43dd3c2eae 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -151,6 +151,7 @@ oswin-tempest-plugin `git://git.openstack.org/openstack/oswin- panko `git://git.openstack.org/openstack/panko `__ patrole `git://git.openstack.org/openstack/patrole `__ picasso `git://git.openstack.org/openstack/picasso `__ +python-openstacksdk `git://git.openstack.org/openstack/python-openstacksdk `__ qinling `git://git.openstack.org/openstack/qinling `__ rally `git://git.openstack.org/openstack/rally `__ sahara `git://git.openstack.org/openstack/sahara `__ From 4bc42c7197c291639be6841d75f02dc008b8b915 Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Tue, 7 Nov 2017 15:22:00 -0500 Subject: [PATCH 0732/1936] Change lib/neutron-legacy to use openstackclient neutronclient has been deprecated, use openstack. Change-Id: I55ea7b8c90b54c05aa0e3f3d4543732e516dc2e6 --- lib/neutron-legacy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 0ccb17c084..39f41fdccc 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -929,7 +929,7 @@ function delete_probe { } function _get_net_id { - neutron --os-cloud devstack-admin --os-region "$REGION_NAME" --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD net-list | grep $1 | awk '{print $2}' + openstack --os-cloud devstack-admin --os-region-name="$REGION_NAME" --os-project-name admin --os-username admin --os-password $ADMIN_PASSWORD network list | grep $1 | awk '{print $2}' } function _get_probe_cmd_prefix { From 0d0b69027bc2b0195ed365619be78466867311ec Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 17 Nov 2017 10:33:11 +1100 Subject: [PATCH 0733/1936] Restore qemu-kvm install for CentOS The kvmibm removal I009ae4779588615633bff81d0c47a1b879ec9279 incorrectly removed this (the check was install if *not* kvmibm). Since we don't support kvmibm any more, it should be safe to install everywhere as done here. For the full history, it started with us installing qemu-kvm-ev with Ide91b261f35fb19d8bd7155ca016fa3b76a45ea1, then we fixed it to be more generic and just install qemu-kvm with I46da627c0da8925064862fdc283db81591979285, then Fedora 26 support in I5c79ad1ef0b11dba30c931a59786f9eb7e7f8587 made this install everywhere *but* kvmibm. Change-Id: If3e9661451ad1055e7c8d670605a53095f0aeda4 --- lib/nova_plugins/functions-libvirt | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index c8527387ab..dbfa3b5cac 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -72,6 +72,13 @@ function install_libvirt { pip_install_gr libvirt-python #pip_install_gr elif is_fedora || is_suse; then + + # Note that in CentOS/RHEL this needs to come from the RDO + # repositories (qemu-kvm-ev ... which provides this package) + # as the base system version is too old. We should have + # pre-installed these + install_package qemu-kvm + install_package libvirt libvirt-devel pip_uninstall libvirt-python pip_install_gr libvirt-python From 1a2c86cff99d5628f93105c8d4ed815baf781941 Mon Sep 17 00:00:00 2001 From: jiangyikun Date: Thu, 7 Sep 2017 17:56:13 +0800 Subject: [PATCH 0734/1936] Cleanup nova cell configuration before config it Some old configuration(such as, LOG_COLOR config) will remain if we don't cleanup. So, we should cleanup the configuration before we config it. Change-Id: I7aff609dadf3acba13a36894614b35005f51280d --- lib/nova | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/nova b/lib/nova index ea0d2f7b89..50deeeb220 100644 --- a/lib/nova +++ b/lib/nova @@ -589,6 +589,8 @@ function create_nova_conf { local vhost conf=$(conductor_conf $i) vhost="nova_cell${i}" + # clean old conductor conf + rm -f $conf iniset $conf database connection `database_connection_url nova_cell${i}` iniset $conf conductor workers "$API_WORKERS" iniset $conf DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" From c5853ac1afe79c9b96a2c4cbd30069566ab12955 Mon Sep 17 00:00:00 2001 From: "James E. Blair" Date: Tue, 21 Nov 2017 09:44:42 -0800 Subject: [PATCH 0735/1936] Allow plugins to express dependency info Add a no-op function, "plugin_requires" to allow plugins to indicate their dependencies on each other. This will be used by the Devstack Ansible module when writing local.conf files. Also add define_plugin to allow plugins to indicate their canonical names. Change-Id: Ibd8c7222ed7dfb08d7ea821d871fc6f3b88de24b --- doc/source/plugins.rst | 25 +++++++++++++++++++++++++ functions-common | 29 +++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+) diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst index fae1a1d8f5..89b9381813 100644 --- a/doc/source/plugins.rst +++ b/doc/source/plugins.rst @@ -54,6 +54,31 @@ directory. Inside this directory there can be 3 files. default value only if the variable is unset or empty; e.g. in bash syntax ``FOO=${FOO:-default}``. + The file should include a ``define_plugin`` line to indicate the + plugin's name, which is the name that should be used by users on + "enable_plugin" lines. It should generally be the last component of + the git repo path (e.g., if the plugin's repo is + openstack/devstack-foo, then the name here should be "foo") :: + + define_plugin + + If your plugin depends on another plugin, indicate it in this file + with one or more lines like the following:: + + plugin_requires + + For a complete example, if the plugin "foo" depends on "bar", the + ``settings`` file should include:: + + define_plugin foo + plugin_requires foo bar + + Devstack does not currently use this dependency information, so it's + important that users continue to add enable_plugin lines in the + correct order in ``local.conf``, however adding this information + allows other tools to consider dependency information when + automatically generating ``local.conf`` files. + - ``plugin.sh`` - the actual plugin. It is executed by devstack at well defined points during a ``stack.sh`` run. The plugin.sh internal structure is discussed below. diff --git a/functions-common b/functions-common index 6d565916fa..aee569bee2 100644 --- a/functions-common +++ b/functions-common @@ -1703,6 +1703,35 @@ function run_phase { fi } +# define_plugin +# +# This function is a no-op. It allows a plugin to define its name So +# that other plugins may reference it by name. It should generally be +# the last component of the canonical git repo name. E.g., +# openstack/devstack-foo should use "devstack-foo" as the name here. +# +# This function is currently a noop, but the value may still be used +# by external tools (as in plugin_requires) and may be used by +# devstack in the future. +# +# ``name`` is an arbitrary name - (aka: glusterfs, nova-docker, zaqar) +function define_plugin { + : +} + +# plugin_requires +# +# This function is a no-op. It is currently used by external tools +# (such as the devstack module for Ansible) to automatically generate +# local.conf files. It is not currently used by devstack itself to +# resolve dependencies. +# +# ``name`` is an arbitrary name - (aka: glusterfs, nova-docker, zaqar) +# ``other`` is the name of another plugin +function plugin_requires { + : +} + # Service Functions # ================= From b5fb7fd627fe24876a8dd2782fce065cf1957b55 Mon Sep 17 00:00:00 2001 From: rabi Date: Wed, 22 Nov 2017 08:14:29 +0530 Subject: [PATCH 0736/1936] Fix lib_installed_from_git In commit f0cd9a8b08d92524fc8e2c3f05d08cdebc638e2a we changed to use column format, but it checks for zero length string and check_libs_from_git fails. Change-Id: I97b52b80efb33749647229a55147a08afa112dd2 --- inc/python | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inc/python b/inc/python index 9a65bfefb1..9f56ec5e7f 100644 --- a/inc/python +++ b/inc/python @@ -419,7 +419,7 @@ function lib_installed_from_git { # bashate 0.5.2.dev19 /tmp/env/src/bashate # Thus we check the third column to see if we're installed from # some local place. - [[ -z $(pip list --format=columns 2>/dev/null | awk "/^$name/ {print \$3}") ]] + [[ -n $(pip list --format=columns 2>/dev/null | awk "/^$name/ {print \$3}") ]] } # check that everything that's in LIBS_FROM_GIT was actually installed From 007f588f275ac9d7320d4b24f1d05378d3df3d37 Mon Sep 17 00:00:00 2001 From: DamonLi Date: Thu, 23 Nov 2017 10:05:46 +0800 Subject: [PATCH 0737/1936] Convert to safe name in lib_installed_from_git The 'pip list' command prints the "safe name" which converts _'s to -'s amongst other things; e.g. glance_store becomes "glance-store 0.21.1.dev22 /opt/stack/glance_store" Because people may use these more familiar "file system" names in LIBS_FROM_GIT automatically convert names when checking if libraries are installed. Change-Id: I30524f80a341f38dfa794a8f629d859e85a4a448 --- inc/python | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/inc/python b/inc/python index 9f56ec5e7f..9938f98545 100644 --- a/inc/python +++ b/inc/python @@ -406,6 +406,9 @@ function use_library_from_git { # determine if a package was installed from git function lib_installed_from_git { local name=$1 + local safe_name + safe_name=$(python -c "from pkg_resources import safe_name; \ + print(safe_name('${name}'))") # Note "pip freeze" doesn't always work here, because it tries to # be smart about finding the remote of the git repo the package # was installed from. This doesn't work with zuul which clones @@ -419,7 +422,7 @@ function lib_installed_from_git { # bashate 0.5.2.dev19 /tmp/env/src/bashate # Thus we check the third column to see if we're installed from # some local place. - [[ -n $(pip list --format=columns 2>/dev/null | awk "/^$name/ {print \$3}") ]] + [[ -n $(pip list --format=columns 2>/dev/null | awk "/^$safe_name/ {print \$3}") ]] } # check that everything that's in LIBS_FROM_GIT was actually installed From 34c1679f2ee42df40c32ad96b4269f94721dc8d4 Mon Sep 17 00:00:00 2001 From: Claudiu Belu Date: Mon, 12 Jun 2017 09:32:21 -0700 Subject: [PATCH 0738/1936] use master upper-constraints when building tempest venv The local requirements repo can be checked out to a stable branch, in which case, the requirements might conflict with tempest's master requirements. Master branch's upper-constraints should be used when building tempest's venv. Closes-Bug: #1706009 Change-Id: Ifd64638cae2886671421149dbbff3a57f9c64257 --- lib/tempest | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index bdbaaa5678..fc88f37e21 100644 --- a/lib/tempest +++ b/lib/tempest @@ -551,7 +551,10 @@ function configure_tempest { if [[ "$OFFLINE" != "True" ]]; then tox -revenv-tempest --notest fi - tox -evenv-tempest -- pip install -c $REQUIREMENTS_DIR/upper-constraints.txt -r requirements.txt + + # The requirements might be on a different branch, while tempest needs master requirements. + git -C $REQUIREMENTS_DIR show master:upper-constraints.txt > u-c-m.txt + tox -evenv-tempest -- pip install -c u-c-m.txt -r requirements.txt # Auth: iniset $TEMPEST_CONFIG auth tempest_roles "Member" From ad180e0e35d45ebe15840e02dbba835d864a2c58 Mon Sep 17 00:00:00 2001 From: Hongbin Lu Date: Wed, 29 Nov 2017 13:21:30 -0500 Subject: [PATCH 0739/1936] Install etcdctl along with etcd3 It is better to have this command line tool for debugging etcd. Change-Id: Ie0eb79d4e543df29ce6a38b57c8ef57a5d2211b4 --- lib/etcd3 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/etcd3 b/lib/etcd3 index 51df8e4c1e..d3f72264b0 100644 --- a/lib/etcd3 +++ b/lib/etcd3 @@ -107,9 +107,11 @@ function install_etcd3 { tar xzvf $etcd_file -C $FILES sudo cp $FILES/$ETCD_NAME/etcd $ETCD_BIN_DIR/etcd + sudo cp $FILES/$ETCD_NAME/etcdctl $ETCD_BIN_DIR/etcdctl fi if [ ! -f "$ETCD_BIN_DIR/etcd" ]; then sudo cp $FILES/$ETCD_NAME/etcd $ETCD_BIN_DIR/etcd + sudo cp $FILES/$ETCD_NAME/etcdctl $ETCD_BIN_DIR/etcdctl fi } From 27367bea48a14ce991f6e60400d61ab2aa11532b Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Tue, 28 Nov 2017 08:20:48 -0500 Subject: [PATCH 0740/1936] Allow overrides for ETCD ports This will enable us to run one etcd for devstack and another for kubernetes in the same box if necessary Change-Id: Ib71ded24727b80afd4d98eb68bade0f8c0f72311 --- lib/cinder | 2 +- lib/etcd3 | 9 ++++----- stackrc | 2 ++ 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/lib/cinder b/lib/cinder index 07f82a1580..2851966bcf 100644 --- a/lib/cinder +++ b/lib/cinder @@ -327,7 +327,7 @@ function configure_cinder { if [[ ! -z "$CINDER_COORDINATION_URL" ]]; then iniset $CINDER_CONF coordination backend_url "$CINDER_COORDINATION_URL" elif is_service_enabled etcd3; then - iniset $CINDER_CONF coordination backend_url "etcd3+http://${SERVICE_HOST}:2379" + iniset $CINDER_CONF coordination backend_url "etcd3+http://${SERVICE_HOST}:$ETCD_PORT" fi } diff --git a/lib/etcd3 b/lib/etcd3 index 51df8e4c1e..dc68524476 100644 --- a/lib/etcd3 +++ b/lib/etcd3 @@ -27,7 +27,6 @@ set +o xtrace ETCD_DATA_DIR="$DATA_DIR/etcd" ETCD_SYSTEMD_SERVICE="devstack@etcd.service" ETCD_BIN_DIR="$DEST/bin" -ETCD_PORT=2379 if is_ubuntu ; then UBUNTU_RELEASE_BASE_NUM=`lsb_release -r | awk '{print $2}' | cut -d '.' -f 1` @@ -38,13 +37,13 @@ function start_etcd3 { local cmd="$ETCD_BIN_DIR/etcd" cmd+=" --name $HOSTNAME --data-dir $ETCD_DATA_DIR" cmd+=" --initial-cluster-state new --initial-cluster-token etcd-cluster-01" - cmd+=" --initial-cluster $HOSTNAME=http://$SERVICE_HOST:2380" - cmd+=" --initial-advertise-peer-urls http://$SERVICE_HOST:2380" + cmd+=" --initial-cluster $HOSTNAME=http://$SERVICE_HOST:$ETCD_PEER_PORT" + cmd+=" --initial-advertise-peer-urls http://$SERVICE_HOST:$ETCD_PEER_PORT" cmd+=" --advertise-client-urls http://$SERVICE_HOST:$ETCD_PORT" if [ "$SERVICE_LISTEN_ADDRESS" == "::" ]; then - cmd+=" --listen-peer-urls http://[::]:2380 " + cmd+=" --listen-peer-urls http://[::]:$ETCD_PEER_PORT " else - cmd+=" --listen-peer-urls http://0.0.0.0:2380 " + cmd+=" --listen-peer-urls http://0.0.0.0:$ETCD_PEER_PORT " fi cmd+=" --listen-client-urls http://$SERVICE_HOST:$ETCD_PORT" diff --git a/stackrc b/stackrc index ffe405012d..ed356ee237 100644 --- a/stackrc +++ b/stackrc @@ -748,6 +748,8 @@ elif is_arch "s390x"; then else exit_distro_not_supported "invalid hardware type - $ETCD_ARCH" fi +ETCD_PORT=${ETCD_PORT:-2379} +ETCD_PEER_PORT=${ETCD_PEER_PORT:-2380} ETCD_DOWNLOAD_URL=${ETCD_DOWNLOAD_URL:-https://github.com/coreos/etcd/releases/download} ETCD_NAME=etcd-$ETCD_VERSION-linux-$ETCD_ARCH ETCD_DOWNLOAD_FILE=$ETCD_NAME.tar.gz From 1d127849121974fe6c8161eabe5ebd7151fa4f4d Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Thu, 30 Nov 2017 14:15:04 -0600 Subject: [PATCH 0741/1936] Remove Cinder policy.json install Cinder has now implemented "policy in code" and policy.json is only needed for overriding default policies. The default policy.json file has been removed in Cinder so we need to stop trying to copy it during Cinder setup. Change-Id: I364e401227fe43e2bacf8a799e10286ee445f835 --- lib/cinder | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/cinder b/lib/cinder index 07f82a1580..01686f7ddf 100644 --- a/lib/cinder +++ b/lib/cinder @@ -206,8 +206,6 @@ function cleanup_cinder { function configure_cinder { sudo install -d -o $STACK_USER -m 755 $CINDER_CONF_DIR - cp -p $CINDER_DIR/etc/cinder/policy.json $CINDER_CONF_DIR - rm -f $CINDER_CONF configure_rootwrap cinder From 9ef3e8448580e9d166e1a4669a036da0205243b5 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Sun, 3 Dec 2017 10:11:12 -0600 Subject: [PATCH 0742/1936] Add python-openstacksdk to python3 enabled list It's perfectly with with python3, add it to the list. Change-Id: Ieb9f563a2f95e78a994cef388e56a6d5a84c8935 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index ffe405012d..286a04d3fe 100644 --- a/stackrc +++ b/stackrc @@ -121,7 +121,7 @@ export USE_PYTHON3=$(trueorfalse False USE_PYTHON3) # base name of the directory from which they are installed. See # enable_python3_package to edit this variable and use_python3_for to # test membership. -export ENABLED_PYTHON3_PACKAGES="nova,glance,cinder,uwsgi,python-openstackclient" +export ENABLED_PYTHON3_PACKAGES="nova,glance,cinder,uwsgi,python-openstackclient,python-openstacksdk" # Explicitly list services not to run under Python 3. See # disable_python3_package to edit this variable. From e46f22db7fcb6e9aae5922d650eda14a15231fb8 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Sun, 3 Dec 2017 10:21:26 -0600 Subject: [PATCH 0743/1936] Add workaround for openstacksdk in check_libs_from_git python-openstacksdk does not match its pip name which is openstacksdk. So setting python-openstacksdk in LIBS_FROM_GIT leads to devstack thinking there is a problem. Put in a workaround for now. It would be better to either: a) rename python-openstacksdk repo to openstacksdk b) rename the pip name for openstacksdk back to python-openstacksdk c) add general support in the various GIT hashes for a pip name Change-Id: I57cf95763d54ad2060a4ce2af91c3ba18ca04db0 --- inc/python | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/inc/python b/inc/python index 9938f98545..2e4eff02ea 100644 --- a/inc/python +++ b/inc/python @@ -407,6 +407,12 @@ function use_library_from_git { function lib_installed_from_git { local name=$1 local safe_name + # TODO(mordred) This is a special case for python-openstacksdk, where the + # repo name and the pip name do not match. We should either add systemic + # support for providing aliases, or we should rename the git repo. + if [[ $name == 'python-openstacksdk' ]] ; then + name=openstacksdk + fi safe_name=$(python -c "from pkg_resources import safe_name; \ print(safe_name('${name}'))") # Note "pip freeze" doesn't always work here, because it tries to From cb8256f22531b59470703ca4090064cba4a0b55e Mon Sep 17 00:00:00 2001 From: lkuchlan Date: Wed, 30 Aug 2017 07:36:11 +0300 Subject: [PATCH 0744/1936] Add fixed_key parameter under key_manager section in cinder conf This change is necessary for uploading an encrypted volume to glance. Change-Id: I0975fc0c49ec243025d076f0406e28c4f5522d3a --- lib/cinder | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/cinder b/lib/cinder index 07f82a1580..a966a4b28e 100644 --- a/lib/cinder +++ b/lib/cinder @@ -244,6 +244,7 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT my_ip "$HOST_IP" iniset $CINDER_CONF key_manager backend cinder.keymgr.conf_key_mgr.ConfKeyManager + iniset $CINDER_CONF key_manager fixed_key $(openssl rand -hex 16) if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then local enabled_backends="" From 5551170ad3902de9c3f271122821a9822658ad50 Mon Sep 17 00:00:00 2001 From: "Andrea Frittoli (andreaf)" Date: Thu, 30 Nov 2017 15:49:39 +0000 Subject: [PATCH 0745/1936] Allow jobs to disable all services Several legacy jobs use the OVERRIDE_ENABLED_SERVICES variable from d-g so set the list of services that should be enabled and ignore the default set calculated via the feature matrix. Add support for a similar functionality in the zuulv3 jobs using the 'disable_all_services' localconf function. Change-Id: I690554ec62cef3be600054071efbb3f92a99249e --- roles/write-devstack-local-conf/README.rst | 7 ++++++- .../library/devstack_local_conf.py | 3 +++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/roles/write-devstack-local-conf/README.rst b/roles/write-devstack-local-conf/README.rst index e30dfa1e9f..1b7eb1b4c0 100644 --- a/roles/write-devstack-local-conf/README.rst +++ b/roles/write-devstack-local-conf/README.rst @@ -53,7 +53,12 @@ Write the local.conf file for use by devstack A dictionary mapping service names to boolean values. If the boolean value is ``false``, a ``disable_service`` line will be emitted for the service name. If it is ``true``, then - ``enable_service`` will be emitted. All other values are ignored. + ``enable_service`` will be emitted. All other values are ignored. + The special key ``base`` can be used to enable or disable the base set of + services enabled by default. If ``base`` is found, it will processed before + all other keys. If its value is ``False`` a ``disable_all_services`` will be + emitted; if its value is ``True`` nothing will be emitted since base + services are enabled by default. .. zuul:rolevar:: devstack_plugins :type: dict diff --git a/roles/write-devstack-local-conf/library/devstack_local_conf.py b/roles/write-devstack-local-conf/library/devstack_local_conf.py index 4134beb048..dbd60f52b9 100644 --- a/roles/write-devstack-local-conf/library/devstack_local_conf.py +++ b/roles/write-devstack-local-conf/library/devstack_local_conf.py @@ -124,6 +124,9 @@ def handle_plugins(self, plugins): self.localrc.append('enable_plugin {} {}'.format(k, v)) def handle_services(self, services): + base_services = services.pop('base', True) + if not base_services: + self.localrc.append('disable_all_services') for k, v in services.items(): if v is False: self.localrc.append('disable_service {}'.format(k)) From 1489b9e7101fef6270eea008917b08f3da91a771 Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Tue, 5 Dec 2017 23:46:58 +0000 Subject: [PATCH 0746/1936] Move remove_uwsgi_config to cleanup_placement It had been in stop_placement, but we don't want it there: the old side of grenade needs to call that but should not remove the uwsgi configuration when doing so. It is configuration, after all. Change-Id: Iee763adf7895145d97b184924896db3f1f48a015 Partial-Bug: #1736385 --- lib/placement | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/placement b/lib/placement index d3fb8c848d..1875857552 100644 --- a/lib/placement +++ b/lib/placement @@ -71,6 +71,7 @@ function is_placement_enabled { function cleanup_placement { sudo rm -f $(apache_site_config_for nova-placement-api) sudo rm -f $(apache_site_config_for placement-api) + remove_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI" } # _config_placement_apache_wsgi() - Set WSGI config files @@ -188,7 +189,6 @@ function start_placement { function stop_placement { if [[ "$WSGI_MODE" == "uwsgi" ]]; then stop_process "placement-api" - remove_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI" else disable_apache_site placement-api restart_apache_server From 7d4446541d6265491999efc536bc88520beadace Mon Sep 17 00:00:00 2001 From: "Andrea Frittoli (andreaf)" Date: Fri, 1 Dec 2017 17:36:38 +0000 Subject: [PATCH 0747/1936] Set the base set of services Use the test-matrix role from devstack-gate to define a base set of services to be enabled for the controller and compute nodes. Extend the local conf module to handle the base set of services. Since the test-matrix defines services for primary and subnode nodes, we need a multinode job to test that this works. Add a new host group called subnode that includes the non-controller hosts. Add a new job that runs devstack on a two nodes environment. Using service from the test matrix enables swift in the gate, so we need to set SWIFT_HASH for devstack to work. Depends-on: Ie36ba0cd7cfcd450b75000a76a64d856f2a83eba Depends-on: Id9ad3be4be25e699f77d6b5a252f046ce8234f45 Change-Id: I379abf482c89122533324e64fefbff3d5a618a89 --- .zuul.yaml | 16 ++++++++++++++++ playbooks/pre.yaml | 12 +++++++++++- roles/write-devstack-local-conf/README.rst | 13 +++++++++++-- .../defaults/main.yaml | 1 + .../library/devstack_local_conf.py | 17 +++++++++++------ roles/write-devstack-local-conf/tasks/main.yaml | 1 + 6 files changed, 51 insertions(+), 9 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 710b229d5b..b9ffb34622 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -23,6 +23,9 @@ nodes: - controller - compute1 + - name: subnode + nodes: + - compute1 - job: name: devstack @@ -39,9 +42,11 @@ - openstack/requirements - openstack/swift roles: + - zuul: openstack-infra/devstack-gate - zuul: openstack-infra/openstack-zuul-jobs timeout: 7200 vars: + test_matrix_configs: ['neutron', 'tlsproxy'] devstack_localrc: DATABASE_PASSWORD: secretdatabase RABBIT_PASSWORD: secretrabbit @@ -57,6 +62,7 @@ FLOATING_HOST_MASK: 23 SWIFT_REPLICAS: 1 SWIFT_START_ALL_SERVICES: false + SWIFT_HASH: 1234123412341234 LOGFILE: /opt/stack/logs/devstacklog.txt LOG_COLOR: false VERBOSE: true @@ -75,9 +81,19 @@ run: playbooks/devstack.yaml post-run: playbooks/post.yaml +- job: + name: devstack-multinode + parent: devstack + description: Base devstack multinode job + nodeset: openstack-two-node + # NOTE(andreaf) The multinode job is useful to see the setup of different + # services on different nodes, however the subnode configuration is not + # ready yet. Until then this job should stay non-voting. + voting: false - project: name: openstack-dev/devstack check: jobs: - devstack + - devstack-multinode diff --git a/playbooks/pre.yaml b/playbooks/pre.yaml index 4d07960fe8..d61fd45de0 100644 --- a/playbooks/pre.yaml +++ b/playbooks/pre.yaml @@ -1,3 +1,13 @@ +- hosts: controller + roles: + - role: test-matrix + test_matrix_role: primary + +- hosts: subnode + roles: + - role: test-matrix + test_matrix_role: subnode + - hosts: all roles: - configure-swap @@ -8,7 +18,7 @@ - setup-devstack-cache - start-fresh-logging - write-devstack-local-conf - # TODO(jeblair): remove when configure-mirrors is fixed + # TODO(jeblair): remove when configure-mirrors is fixed tasks: - name: Hack mirror_info shell: diff --git a/roles/write-devstack-local-conf/README.rst b/roles/write-devstack-local-conf/README.rst index 1b7eb1b4c0..73f9f0d6fd 100644 --- a/roles/write-devstack-local-conf/README.rst +++ b/roles/write-devstack-local-conf/README.rst @@ -47,6 +47,14 @@ Write the local.conf file for use by devstack This is a dictionary of key-value pairs which comprise this section of the INI file. +.. zuul:rolevar:: devstack_base_services + :type: list + :default: {{ base_services | default(omit) }} + + A list of base services which are enabled. Services can be added or removed + from this list via the ``devstack_services`` variable. This is ignored if + ``base`` is set to ``False`` in ``devstack_services``. + .. zuul:rolevar:: devstack_services :type: dict @@ -54,11 +62,12 @@ Write the local.conf file for use by devstack boolean value is ``false``, a ``disable_service`` line will be emitted for the service name. If it is ``true``, then ``enable_service`` will be emitted. All other values are ignored. + The special key ``base`` can be used to enable or disable the base set of services enabled by default. If ``base`` is found, it will processed before all other keys. If its value is ``False`` a ``disable_all_services`` will be - emitted; if its value is ``True`` nothing will be emitted since base - services are enabled by default. + emitted; if its value is ``True`` services from ``devstack_base_services`` + will be emitted via ``ENABLED_SERVICES``. .. zuul:rolevar:: devstack_plugins :type: dict diff --git a/roles/write-devstack-local-conf/defaults/main.yaml b/roles/write-devstack-local-conf/defaults/main.yaml index 491fa0fdb9..7bc1dec9b8 100644 --- a/roles/write-devstack-local-conf/defaults/main.yaml +++ b/roles/write-devstack-local-conf/defaults/main.yaml @@ -1,2 +1,3 @@ devstack_base_dir: /opt/stack devstack_local_conf_path: "{{ devstack_base_dir }}/devstack/local.conf" +devstack_base_services: "{{ enabled_services | default(omit) }}" diff --git a/roles/write-devstack-local-conf/library/devstack_local_conf.py b/roles/write-devstack-local-conf/library/devstack_local_conf.py index dbd60f52b9..55ba4afb69 100644 --- a/roles/write-devstack-local-conf/library/devstack_local_conf.py +++ b/roles/write-devstack-local-conf/library/devstack_local_conf.py @@ -106,13 +106,13 @@ def _getParentVarNamesRecursively(self, dependent_var, soft=False): class LocalConf(object): - def __init__(self, localrc, localconf, services, plugins): + def __init__(self, localrc, localconf, base_services, services, plugins): self.localrc = [] self.meta_sections = {} if plugins: self.handle_plugins(plugins) - if services: - self.handle_services(services) + if services or base_services: + self.handle_services(base_services, services or {}) if localrc: self.handle_localrc(localrc) if localconf: @@ -123,9 +123,12 @@ def handle_plugins(self, plugins): if v: self.localrc.append('enable_plugin {} {}'.format(k, v)) - def handle_services(self, services): - base_services = services.pop('base', True) - if not base_services: + def handle_services(self, base_services, services): + enable_base_services = services.pop('base', True) + if enable_base_services and base_services: + self.localrc.append('ENABLED_SERVICES={}'.format( + ",".join(base_services))) + else: self.localrc.append('disable_all_services') for k, v in services.items(): if v is False: @@ -164,6 +167,7 @@ def main(): module = AnsibleModule( argument_spec=dict( plugins=dict(type='dict'), + base_services=dict(type='list'), services=dict(type='dict'), localrc=dict(type='dict'), local_conf=dict(type='dict'), @@ -174,6 +178,7 @@ def main(): p = module.params lc = LocalConf(p.get('localrc'), p.get('local_conf'), + p.get('base_services'), p.get('services'), p.get('plugins')) lc.write(p['path']) diff --git a/roles/write-devstack-local-conf/tasks/main.yaml b/roles/write-devstack-local-conf/tasks/main.yaml index 1d67616dd4..cc21426b89 100644 --- a/roles/write-devstack-local-conf/tasks/main.yaml +++ b/roles/write-devstack-local-conf/tasks/main.yaml @@ -4,6 +4,7 @@ devstack_local_conf: path: "{{ devstack_local_conf_path }}" plugins: "{{ devstack_plugins|default(omit) }}" + base_services: "{{ devstack_base_services|default(omit) }}" services: "{{ devstack_services|default(omit) }}" localrc: "{{ devstack_localrc|default(omit) }}" local_conf: "{{ devstack_local_conf|default(omit) }}" From 986cd13a21d34886688a1f22c6dc4a5ba8bfc6d2 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 25 Oct 2017 16:05:46 -0500 Subject: [PATCH 0748/1936] Collect devstack config files in post. Co-Authored-By: yong sheng gong Co-Authored-By: Dean Troyer Co-Authored-By: Andrea Frittoli Change-Id: Id9ad3be4be25e699f77d6b5a252f046ce8234f45 --- playbooks/post.yaml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/playbooks/post.yaml b/playbooks/post.yaml index 6f5126ff16..95c366975c 100644 --- a/playbooks/post.yaml +++ b/playbooks/post.yaml @@ -1,4 +1,14 @@ - hosts: all + become: True + vars: + devstack_conf_dir: "{{ devstack_base_dir|default('/opt/stack') }}/devstack/" + stage_dir: "{{ devstack_base_dir|default('/opt/stack') }}" roles: - export-devstack-journal + - role: stage-output + zuul_copy_output: + { '{{ devstack_conf_dir }}/local.conf': 'logs', + '{{ devstack_conf_dir }}/.stackenv': 'logs' } + extensions_to_txt: + - conf - fetch-devstack-log-dir From 52804403ca821abc887e616cba758c997b46386a Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Wed, 6 Dec 2017 09:11:07 -0600 Subject: [PATCH 0749/1936] Gate on v3 devstack job The devstack base job in in use in many projects, but it is not being gated here in devstack. Let's add it to the list so that we don't accidentally break it. Change-Id: Iea13235a8438d4b540f9f27b94aed13e719481dc --- .zuul.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index b9ffb34622..7c53d45e94 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -97,3 +97,6 @@ jobs: - devstack - devstack-multinode + gate: + jobs: + - devstack From 501aaeb4e7ee07d4c52db987d748ac75dabcaaa6 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Mon, 11 Dec 2017 12:01:32 +0100 Subject: [PATCH 0750/1936] lioadm make sure targetcli is there Without installing the targetcli package tools and configs can be missing. The code was correct baside a typo, it is `ISCSI` not `ICSI` Change-Id: I32e5d84d87560458f0eaaf820dcd00c86e6dec8b --- lib/cinder | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/cinder b/lib/cinder index a966a4b28e..7c5cefe018 100644 --- a/lib/cinder +++ b/lib/cinder @@ -430,7 +430,7 @@ function install_cinder { setup_develop $CINDER_DIR if [[ "$CINDER_ISCSI_HELPER" == "tgtadm" ]]; then install_package tgt - elif [[ "$CINDER_ISCI_HELPER" == "lioadm" ]]; then + elif [[ "$CINDER_ISCSI_HELPER" == "lioadm" ]]; then install_package targetcli fi } From e1b0c3886ba5551d3fa8c79849d6a6406037c811 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Wed, 6 Dec 2017 09:07:47 -0600 Subject: [PATCH 0751/1936] Add base jobs for functional tests I keep copy-pasting these to projects from the shade repo. Let's make some base jobs people can more easily use. devstack-tox-functional runs devstack and a tox functional environment. devstack-tox-functional-consumer is the same, but runs devstack in pre. It's intended for projects for whom patches to the project won't actually impact the devstack deployment (shade, nodepool, gophercloud are all examples of such things) Change-Id: I84de60181cb88574e341ff83cd4857cce241f2dd --- .zuul.yaml | 54 +++++++++++++++++++++++++++++++++++++ playbooks/tox/post.yaml | 5 ++++ playbooks/tox/pre.yaml | 8 ++++++ playbooks/tox/run-both.yaml | 10 +++++++ playbooks/tox/run.yaml | 3 +++ 5 files changed, 80 insertions(+) create mode 100644 playbooks/tox/post.yaml create mode 100644 playbooks/tox/pre.yaml create mode 100644 playbooks/tox/run-both.yaml create mode 100644 playbooks/tox/run.yaml diff --git a/.zuul.yaml b/.zuul.yaml index 7c53d45e94..a699dbaa7d 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -91,6 +91,60 @@ # ready yet. Until then this job should stay non-voting. voting: false +- job: + name: devstack-tox-base + parent: devstack + description: | + Base job for devstack-based functional tests that use tox. + + This job is not intended to be run directly. It's just here + for organizational purposes for devstack-tox-functional and + devstack-tox-functional-consumer. + post-run: playbooks/tox/post.yaml + vars: + tox_envlist: functional + tox_install_siblings: false + +- job: + name: devstack-tox-functional + parent: devstack-tox-base + description: | + Base job for devstack-based functional tests that use tox. + + Runs devstack, then runs the tox ``functional`` environment, + then collects tox/testr build output like normal tox jobs. + + Turns off tox sibling installation. Projects may be involved + in the devstack deployment and so may be in the required-projects + list, but may not want to test against master of the other + projects in their tox env. Child jobs can set tox_install_siblings + to True to re-enable sibling processing. + run: playbooks/tox/run-both.yaml + +- job: + name: devstack-tox-functional-consumer + parent: devstack + description: | + Base job for devstack-based functional tests for projects that + consume the devstack cloud. + + This base job should only be used by projects that are not involved + in the devstack deployment step, but are instead projects that are using + devstack to get a cloud against which they can test things. + + Runs devstack in pre-run, then runs the tox ``functional`` environment, + then collects tox/testr build output like normal tox jobs. + + Turns off tox sibling installation. Projects may be involved + in the devstack deployment and so may be in the required-projects + list, but may not want to test against master of the other + projects in their tox env. Child jobs can set tox_install_siblings + to True to re-enable sibling processing. + pre-run: + - playbooks/devstack.yaml + - playbooks/tox/pre.yaml + run: playbooks/tox/run.yaml + - project: name: openstack-dev/devstack check: diff --git a/playbooks/tox/post.yaml b/playbooks/tox/post.yaml new file mode 100644 index 0000000000..d9e299ff4d --- /dev/null +++ b/playbooks/tox/post.yaml @@ -0,0 +1,5 @@ +- hosts: all + roles: + - fetch-tox-output + - fetch-testr-output + - fetch-stestr-output diff --git a/playbooks/tox/pre.yaml b/playbooks/tox/pre.yaml new file mode 100644 index 0000000000..d7e4670a80 --- /dev/null +++ b/playbooks/tox/pre.yaml @@ -0,0 +1,8 @@ +- hosts: all + roles: + # Run bindep and test-setup after devstack so that they won't interfere + - role: bindep + bindep_profile: test + bindep_dir: "{{ zuul_work_dir }}" + - test-setup + - ensure-tox diff --git a/playbooks/tox/run-both.yaml b/playbooks/tox/run-both.yaml new file mode 100644 index 0000000000..e85c2eee96 --- /dev/null +++ b/playbooks/tox/run-both.yaml @@ -0,0 +1,10 @@ +- hosts: all + roles: + - run-devstack + # Run bindep and test-setup after devstack so that they won't interfere + - role: bindep + bindep_profile: test + bindep_dir: "{{ zuul_work_dir }}" + - test-setup + - ensure-tox + - tox diff --git a/playbooks/tox/run.yaml b/playbooks/tox/run.yaml new file mode 100644 index 0000000000..22f82096c7 --- /dev/null +++ b/playbooks/tox/run.yaml @@ -0,0 +1,3 @@ +- hosts: all + roles: + - tox From 44a19b4fdceb0d13190b26af04e5092873cd0866 Mon Sep 17 00:00:00 2001 From: Brian Rosmaita Date: Mon, 11 Dec 2017 18:07:50 -0500 Subject: [PATCH 0752/1936] Clean up Glance config files The current Glance config files are a combination of copied and generated files. This patch makes all the files generated and removes now unnecssary ini(un)comment statements. It additionally removes some ini(un)comment statements that weren't having any effect on the previously generated files. Change-Id: I6e4b7694e8bebb7fe6661ead034ee257c768e342 --- lib/glance | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/lib/glance b/lib/glance index ad286bacb9..aad4726145 100644 --- a/lib/glance +++ b/lib/glance @@ -110,11 +110,9 @@ function configure_glance { # Server is configured through this function and not init_glance. create_glance_cache_dir - # Copy over our glance configurations and update them - cp $GLANCE_DIR/etc/glance-registry.conf $GLANCE_REGISTRY_CONF + # Set non-default configuration options for registry iniset $GLANCE_REGISTRY_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $GLANCE_REGISTRY_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS - inicomment $GLANCE_REGISTRY_CONF DEFAULT log_file local dburl dburl=`database_connection_url glance` iniset $GLANCE_REGISTRY_CONF database connection $dburl @@ -125,8 +123,8 @@ function configure_glance { iniset_rpc_backend glance $GLANCE_REGISTRY_CONF iniset $GLANCE_REGISTRY_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" + # Set non-default configuration options for the API server iniset $GLANCE_API_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - inicomment $GLANCE_API_CONF DEFAULT log_file iniset $GLANCE_API_CONF database connection $dburl iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG iniset $GLANCE_API_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ @@ -184,11 +182,6 @@ function configure_glance { iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $KEYSTONE_SERVICE_URI/v3 fi iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_version 3 - - # commenting is not strictly necessary but it's confusing to have bad values in conf - inicomment $GLANCE_API_CONF glance_store swift_store_user - inicomment $GLANCE_API_CONF glance_store swift_store_key - inicomment $GLANCE_API_CONF glance_store swift_store_auth_address fi # We need to tell glance what it's public endpoint is so that the version @@ -214,18 +207,13 @@ function configure_glance { cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI cp -p $GLANCE_DIR/etc/glance-api-paste.ini $GLANCE_API_PASTE_INI - cp $GLANCE_DIR/etc/glance-cache.conf $GLANCE_CACHE_CONF + # Set non-default configuration options for the glance-cache iniset $GLANCE_CACHE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - inicomment $GLANCE_CACHE_CONF DEFAULT log_file iniset $GLANCE_CACHE_CONF DEFAULT use_syslog $SYSLOG iniset $GLANCE_CACHE_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ - iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_url iniset $GLANCE_CACHE_CONF DEFAULT auth_url $KEYSTONE_AUTH_URI - iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_tenant_name iniset $GLANCE_CACHE_CONF DEFAULT admin_tenant_name $SERVICE_PROJECT_NAME - iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_user iniset $GLANCE_CACHE_CONF DEFAULT admin_user glance - iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_password iniset $GLANCE_CACHE_CONF DEFAULT admin_password $SERVICE_PASSWORD iniset $GLANCE_CACHE_CONF DEFAULT registry_host $GLANCE_SERVICE_HOST From 38a23d901aa1fd55dbe0d57718124c00d4f658cb Mon Sep 17 00:00:00 2001 From: liumk Date: Wed, 13 Dec 2017 15:09:02 +0800 Subject: [PATCH 0753/1936] ceph plugin installation: umount correct device if it existed. The loopback device is created for ceph osd. If the directory ${storage_data_dir} has been mounted when create disk, we should umount ${storage_data_dir} instead of ${storage_data_dir}/drives/sdb1. Change-Id: Ie9fe81c820c485dab9f049cf5a81c02424925728 Closes-Bug: #1689089 --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index 20b83b3cd0..ee35723745 100644 --- a/functions +++ b/functions @@ -712,7 +712,7 @@ function create_disk { # Create a loopback disk and format it to XFS. if [[ -e ${disk_image} ]]; then if egrep -q ${storage_data_dir} /proc/mounts; then - sudo umount ${storage_data_dir}/drives/sdb1 + sudo umount ${storage_data_dir} sudo rm -f ${disk_image} fi fi From 7a8d850dcc31597d80ddecd4c4bbba3dd37914ce Mon Sep 17 00:00:00 2001 From: ghanshyam Date: Thu, 14 Dec 2017 09:16:56 +0000 Subject: [PATCH 0754/1936] Add variable to set volume service version on tempest Tempest is going to test volume v3 APIs as default in gate and running a separate job to run tests on v2 APIs. To give this ability, this commit provide a var to tell which API version need to be tested and accordingly it configure the catalog_type and microversion setting on tempest. Change-Id: I531f3b32e81ac5d282461597ca286c09429cb143 Needed-By: I0c9193501eb9eaa25eb5f0786bb72eb7855099fb --- lib/tempest | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/lib/tempest b/lib/tempest index bdbaaa5678..c7dc19b311 100644 --- a/lib/tempest +++ b/lib/tempest @@ -422,6 +422,13 @@ function configure_tempest { iniset $TEMPEST_CONFIG validation network_for_ssh $TEMPEST_SSH_NETWORK_NAME # Volume + # Set the service catalog entry for Tempest to run on. Typically + # used to try different Volume API version targets. The tempest + # default it to 'volumev3'(v3 APIs endpoint) , so only set this + # if you want to change it. + if [[ -n "$TEMPEST_VOLUME_TYPE" ]]; then + iniset $TEMPEST_CONFIG volume catalog_type $TEMPEST_VOLUME_TYPE + fi # Only turn on TEMPEST_VOLUME_MANAGE_SNAPSHOT by default for "lvm" backends if [[ "$CINDER_ENABLED_BACKENDS" == *"lvm"* ]]; then TEMPEST_VOLUME_MANAGE_SNAPSHOT=${TEMPEST_VOLUME_MANAGE_SNAPSHOT:-True} @@ -443,6 +450,12 @@ function configure_tempest { iniset $TEMPEST_CONFIG volume-feature-enabled api_v1 $(trueorfalse False TEMPEST_VOLUME_API_V1) local tempest_volume_min_microversion=${TEMPEST_VOLUME_MIN_MICROVERSION:-None} local tempest_volume_max_microversion=${TEMPEST_VOLUME_MAX_MICROVERSION:-"latest"} + # Reset microversions to None where v2 is running which does not support microversion. + # Both "None" means no microversion testing. + if [[ "$TEMPEST_VOLUME_TYPE" == "volumev2" ]]; then + tempest_volume_min_microversion=None + tempest_volume_max_microversion=None + fi if [ "$tempest_volume_min_microversion" == "None" ]; then inicomment $TEMPEST_CONFIG volume min_microversion else From 4187d2fc4adbebf84e5d03e2f5c1c587b4bc9d04 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Mon, 18 Dec 2017 11:11:03 -0600 Subject: [PATCH 0755/1936] Add doc/requirements.txt file with doc requirements The sphinx jobs need to find doc requirements in either test-requiremnts.txt or doc/requirements.txt. Putting them directly in to tox.ini, not so much. Change-Id: I98a43b511a6949fa4f00c26eec224d24d6fa6588 --- doc/requirements.txt | 10 ++++++++++ tox.ini | 19 ++----------------- 2 files changed, 12 insertions(+), 17 deletions(-) create mode 100644 doc/requirements.txt diff --git a/doc/requirements.txt b/doc/requirements.txt new file mode 100644 index 0000000000..e140bc0689 --- /dev/null +++ b/doc/requirements.txt @@ -0,0 +1,10 @@ +pbr>=2.0.0,!=2.1.0 + +Pygments +docutils +sphinx>=1.6.2 +openstackdocstheme>=1.11.0 +nwdiag +blockdiag +sphinxcontrib-blockdiag +sphinxcontrib-nwdiag diff --git a/tox.ini b/tox.ini index 46b15f4cb1..74436b0f26 100644 --- a/tox.ini +++ b/tox.ini @@ -34,16 +34,7 @@ commands = bash -c "find {toxinidir} \ -print0 | xargs -0 bashate -v -iE006 -eE005,E042" [testenv:docs] -deps = - Pygments - docutils - sphinx>=1.6.2 - pbr>=2.0.0,!=2.1.0 - openstackdocstheme>=1.11.0 - nwdiag - blockdiag - sphinxcontrib-blockdiag - sphinxcontrib-nwdiag +deps = -r{toxinidir}/doc/requirements.txt whitelist_externals = bash setenv = TOP_DIR={toxinidir} @@ -51,11 +42,5 @@ commands = python setup.py build_sphinx [testenv:venv] -deps = - pbr>=2.0.0,!=2.1.0 - sphinx>=1.6.2 - openstackdocstheme>=1.11.0 - blockdiag - sphinxcontrib-blockdiag - sphinxcontrib-nwdiag +deps = -r{toxinidir}/doc/requirements.txt commands = {posargs} From 290d9d87f4e8048078a83c19d056db0c320e1c63 Mon Sep 17 00:00:00 2001 From: "Andrea Frittoli (andreaf)" Date: Mon, 11 Dec 2017 14:30:59 +0000 Subject: [PATCH 0756/1936] Use stage_dir instead of /opt/stack Historically we have collected devstack logs under /opt/stack. Stop doing that and collect them in the stage_dir instead, so that once the base job logs pull service comes around we are ready for it. This add the benefit of writing things into a folder which is already owned by the ansible user (ansible_user_dir), so we don't run into issue writing there. A few logs (devstack log, log summary and dstat) use to show up on logs.o.o. just because they happened to already be in /opt/stack/logs. With this change they would be lost, so adding them to post.yaml. Depends-on: I5ad4dfccbc1389da3afc53f3c866d3475e006db6 Change-Id: Ib4be2f5056c0dc2b776de4a0d18b47b12624be92 --- playbooks/post.yaml | 16 +++++++++++++--- roles/apache-logs-conf/README.rst | 12 ++++++++++++ roles/export-devstack-journal/README.rst | 10 ++++++++-- roles/export-devstack-journal/defaults/main.yaml | 1 + roles/export-devstack-journal/tasks/main.yaml | 14 +++++++++++--- 5 files changed, 45 insertions(+), 8 deletions(-) create mode 100644 roles/apache-logs-conf/README.rst diff --git a/playbooks/post.yaml b/playbooks/post.yaml index 95c366975c..aaa5cdd5ae 100644 --- a/playbooks/post.yaml +++ b/playbooks/post.yaml @@ -1,14 +1,24 @@ - hosts: all become: True vars: + devstack_log_dir: "{{ devstack_base_dir|default('/opt/stack') }}/logs/" devstack_conf_dir: "{{ devstack_base_dir|default('/opt/stack') }}/devstack/" - stage_dir: "{{ devstack_base_dir|default('/opt/stack') }}" roles: - export-devstack-journal - role: stage-output zuul_copy_output: { '{{ devstack_conf_dir }}/local.conf': 'logs', - '{{ devstack_conf_dir }}/.stackenv': 'logs' } + '{{ devstack_conf_dir }}/.stackenv': 'logs' , + '{{ devstack_log_dir }}/dstat-csv.log': 'logs', + '{{ devstack_log_dir }}/devstacklog.txt': 'logs', + '{{ devstack_log_dir }}/devstacklog.txt.summary': 'logs' } extensions_to_txt: - conf - - fetch-devstack-log-dir + - log + - summary + # NOTE(andreaf) We need fetch-devstack-log-dir only as long as the base job + # starts pulling logs for us from {{ ansible_user_dir }}/logs. + # Meanwhile we already store things in ansible_user_dir and use + # fetch-devstack-log-dir setting devstack_base_dir + - role: fetch-devstack-log-dir + devstack_base_dir: "{{ ansible_user_dir }}" diff --git a/roles/apache-logs-conf/README.rst b/roles/apache-logs-conf/README.rst new file mode 100644 index 0000000000..eccee403a5 --- /dev/null +++ b/roles/apache-logs-conf/README.rst @@ -0,0 +1,12 @@ +Prepare apache configs and logs for staging + +Make sure apache config files and log files are available in a linux flavor +independent location. Note that this relies on hard links, to the staging +directory must be in the same partition where the logs and configs are. + +**Role Variables** + +.. zuul:rolevar:: stage_dir + :default: {{ ansible_user_dir }} + + The base stage directory. diff --git a/roles/export-devstack-journal/README.rst b/roles/export-devstack-journal/README.rst index 5f00592a03..a34e0706a9 100644 --- a/roles/export-devstack-journal/README.rst +++ b/roles/export-devstack-journal/README.rst @@ -5,11 +5,17 @@ journal format as well as text. Also, export a syslog-style file with kernal and sudo messages. Writes the output to the ``logs/`` subdirectory of -``devstack_base_dir``. +``stage_dir``. **Role Variables** .. zuul:rolevar:: devstack_base_dir :default: /opt/stack - The devstack base directory. + The devstack base directory. This is used to obtain the + ``log-start-timestamp.txt``, used to filter the systemd journal. + +.. zuul:rolevar:: stage_dir + :default: {{ ansible_user_dir }} + + The base stage directory. diff --git a/roles/export-devstack-journal/defaults/main.yaml b/roles/export-devstack-journal/defaults/main.yaml index fea05c8146..1fb04fedc8 100644 --- a/roles/export-devstack-journal/defaults/main.yaml +++ b/roles/export-devstack-journal/defaults/main.yaml @@ -1 +1,2 @@ devstack_base_dir: /opt/stack +stage_dir: "{{ ansible_user_dir }}" diff --git a/roles/export-devstack-journal/tasks/main.yaml b/roles/export-devstack-journal/tasks/main.yaml index b9af02a591..3efa5755b2 100644 --- a/roles/export-devstack-journal/tasks/main.yaml +++ b/roles/export-devstack-journal/tasks/main.yaml @@ -1,3 +1,11 @@ +# NOTE(andreaf) This bypasses the stage-output role +- name: Ensure {{ stage_dir }}/logs exists + become: true + file: + path: "{{ stage_dir }}/logs" + state: directory + owner: "{{ ansible_user }}" + # TODO: convert this to ansible - name: Export journal files become: true @@ -7,7 +15,7 @@ name="" for u in `systemctl list-unit-files | grep devstack | awk '{print $1}'`; do name=$(echo $u | sed 's/devstack@/screen-/' | sed 's/\.service//') - journalctl -o short-precise --unit $u | tee {{ devstack_base_dir }}/logs/$name.txt > /dev/null + journalctl -o short-precise --unit $u | tee {{ stage_dir }}/logs/$name.txt > /dev/null done # Export the journal in export format to make it downloadable @@ -16,7 +24,7 @@ # debugging much easier. We don't do the native conversion here as # some distros do not package that tooling. journalctl -u 'devstack@*' -o export | \ - xz --threads=0 - > {{ devstack_base_dir }}/logs/devstack.journal.xz + xz --threads=0 - > {{ stage_dir }}/logs/devstack.journal.xz # The journal contains everything running under systemd, we'll # build an old school version of the syslog with just the @@ -26,4 +34,4 @@ -t sudo \ --no-pager \ --since="$(cat {{ devstack_base_dir }}/log-start-timestamp.txt)" \ - | tee {{ devstack_base_dir }}/logs/syslog.txt > /dev/null + | tee {{ stage_dir }}/logs/syslog.txt > /dev/null From 71bf831a906a02c9801500bcabd4ee2b9c773f22 Mon Sep 17 00:00:00 2001 From: "Andrea Frittoli (andreaf)" Date: Thu, 14 Dec 2017 20:35:38 +0000 Subject: [PATCH 0757/1936] Add a few more devstack confs and logs Add localrc for places where it may be still used. Collect verify_tempest_conf. Change-Id: I733c6472e8452ed6fc04c6de1c170713812a436f --- playbooks/post.yaml | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/playbooks/post.yaml b/playbooks/post.yaml index aaa5cdd5ae..f5f189ddc5 100644 --- a/playbooks/post.yaml +++ b/playbooks/post.yaml @@ -3,18 +3,34 @@ vars: devstack_log_dir: "{{ devstack_base_dir|default('/opt/stack') }}/logs/" devstack_conf_dir: "{{ devstack_base_dir|default('/opt/stack') }}/devstack/" + tasks: + # NOTE(andreaf) If the tempest service is enabled, a tempest.log is + # generated as part of lib/tempest, as a result of verify_tempest_config + - name: Check if a tempest log exits + stat: + path: "{{ devstack_conf_dir }}/tempest.log" + register: tempest_log + - name: Link post-devstack tempest.log + file: + src: "{{ devstack_conf_dir }}/tempest.log" + dest: "{{ stage_dir }}/verify_tempest_conf.log" + state: hard + when: tempest_log.stat.exists roles: - export-devstack-journal - role: stage-output zuul_copy_output: { '{{ devstack_conf_dir }}/local.conf': 'logs', + '{{ devstack_conf_dir }}/localrc': 'logs', '{{ devstack_conf_dir }}/.stackenv': 'logs' , '{{ devstack_log_dir }}/dstat-csv.log': 'logs', '{{ devstack_log_dir }}/devstacklog.txt': 'logs', - '{{ devstack_log_dir }}/devstacklog.txt.summary': 'logs' } + '{{ devstack_log_dir }}/devstacklog.txt.summary': 'logs', + '{{ stage_dir }}/verify_tempest_conf.log': 'logs' } extensions_to_txt: - conf - log + - localrc - summary # NOTE(andreaf) We need fetch-devstack-log-dir only as long as the base job # starts pulling logs for us from {{ ansible_user_dir }}/logs. From b7f8624bed8aabf52a0fbb5ac73a9fdd41400273 Mon Sep 17 00:00:00 2001 From: "Andrea Frittoli (andreaf)" Date: Thu, 14 Dec 2017 16:08:51 +0000 Subject: [PATCH 0758/1936] Setup devstack-early log The whole devstack log is written into the console output (job-output.txt) and into devstacklog.txt. Remove it from job-output and add a devstack-early log file (same as in legacy job) that includes all the output of stack.sh. Make sure the log file is pulled into the stage folder so that it will end up on logs.o.o. Change-Id: Ia7c1d8fe5cc03d15f455c6e62ebf4a5f6d62ab1f --- playbooks/post.yaml | 2 ++ roles/run-devstack/README.rst | 6 ++++++ roles/run-devstack/defaults/main.yaml | 1 + roles/run-devstack/tasks/main.yaml | 2 +- 4 files changed, 10 insertions(+), 1 deletion(-) diff --git a/playbooks/post.yaml b/playbooks/post.yaml index f5f189ddc5..d99ec10bfe 100644 --- a/playbooks/post.yaml +++ b/playbooks/post.yaml @@ -3,6 +3,7 @@ vars: devstack_log_dir: "{{ devstack_base_dir|default('/opt/stack') }}/logs/" devstack_conf_dir: "{{ devstack_base_dir|default('/opt/stack') }}/devstack/" + devstack_full_log: "{{ devstack_early_log|default('/opt/stack/logs/devstack-early.txt') }}" tasks: # NOTE(andreaf) If the tempest service is enabled, a tempest.log is # generated as part of lib/tempest, as a result of verify_tempest_config @@ -26,6 +27,7 @@ '{{ devstack_log_dir }}/dstat-csv.log': 'logs', '{{ devstack_log_dir }}/devstacklog.txt': 'logs', '{{ devstack_log_dir }}/devstacklog.txt.summary': 'logs', + '{{ devstack_full_log}}': 'logs', '{{ stage_dir }}/verify_tempest_conf.log': 'logs' } extensions_to_txt: - conf diff --git a/roles/run-devstack/README.rst b/roles/run-devstack/README.rst index d77eb15e99..e53f060602 100644 --- a/roles/run-devstack/README.rst +++ b/roles/run-devstack/README.rst @@ -6,3 +6,9 @@ Run devstack :default: /opt/stack The devstack base directory. + +.. zuul:rolevar:: devstack_early_log + :default: /opt/stack/log/devstack-early.txt + + The full devstack log that includes the whatever stack.sh logs before + the LOGFILE variable in local.conf is honoured. diff --git a/roles/run-devstack/defaults/main.yaml b/roles/run-devstack/defaults/main.yaml index fea05c8146..dc4528f692 100644 --- a/roles/run-devstack/defaults/main.yaml +++ b/roles/run-devstack/defaults/main.yaml @@ -1 +1,2 @@ devstack_base_dir: /opt/stack +devstack_early_log: /opt/stack/logs/devstack-early.txt diff --git a/roles/run-devstack/tasks/main.yaml b/roles/run-devstack/tasks/main.yaml index bafebafd65..f53212904a 100644 --- a/roles/run-devstack/tasks/main.yaml +++ b/roles/run-devstack/tasks/main.yaml @@ -1,5 +1,5 @@ - name: Run devstack - command: ./stack.sh + shell: ./stack.sh 2>&1 {{ devstack_early_log }} args: chdir: "{{devstack_base_dir}}/devstack" become: true From 9c977b56eb74944b45b0f0513ac36347b526c096 Mon Sep 17 00:00:00 2001 From: "Andrea Frittoli (andreaf)" Date: Fri, 8 Dec 2017 17:41:40 +0000 Subject: [PATCH 0759/1936] Stage apache logs Add apache logs to the list of things we stage. Change-Id: I9d3d8e710ae87a71b74f96538cad6fad58dbef79 --- playbooks/post.yaml | 5 +- roles/apache-logs-conf/defaults/main.yaml | 2 + roles/apache-logs-conf/tasks/main.yaml | 80 +++++++++++++++++++++++ 3 files changed, 86 insertions(+), 1 deletion(-) create mode 100644 roles/apache-logs-conf/defaults/main.yaml create mode 100644 roles/apache-logs-conf/tasks/main.yaml diff --git a/playbooks/post.yaml b/playbooks/post.yaml index d99ec10bfe..40c39beb69 100644 --- a/playbooks/post.yaml +++ b/playbooks/post.yaml @@ -19,6 +19,7 @@ when: tempest_log.stat.exists roles: - export-devstack-journal + - apache-logs-conf - role: stage-output zuul_copy_output: { '{{ devstack_conf_dir }}/local.conf': 'logs', @@ -28,7 +29,9 @@ '{{ devstack_log_dir }}/devstacklog.txt': 'logs', '{{ devstack_log_dir }}/devstacklog.txt.summary': 'logs', '{{ devstack_full_log}}': 'logs', - '{{ stage_dir }}/verify_tempest_conf.log': 'logs' } + '{{ stage_dir }}/verify_tempest_conf.log': 'logs', + '{{ stage_dir }}/apache': 'logs', + '{{ stage_dir }}/apache_config': 'logs' } extensions_to_txt: - conf - log diff --git a/roles/apache-logs-conf/defaults/main.yaml b/roles/apache-logs-conf/defaults/main.yaml new file mode 100644 index 0000000000..1fb04fedc8 --- /dev/null +++ b/roles/apache-logs-conf/defaults/main.yaml @@ -0,0 +1,2 @@ +devstack_base_dir: /opt/stack +stage_dir: "{{ ansible_user_dir }}" diff --git a/roles/apache-logs-conf/tasks/main.yaml b/roles/apache-logs-conf/tasks/main.yaml new file mode 100644 index 0000000000..7fd490ea4c --- /dev/null +++ b/roles/apache-logs-conf/tasks/main.yaml @@ -0,0 +1,80 @@ +- name: Ensure {{ stage_dir }}/apache exists + file: + path: "{{ stage_dir }}/apache" + state: directory + +- name: Link apache logs on Debian/SuSE + block: + - name: Find logs + find: + path: "/var/log/apache2" + file_type: any + register: debian_suse_apache_logs + - name: Dereference files + stat: + path: "{{ item.path }}" + with_items: "{{ debian_suse_apache_logs.files }}" + register: debian_suse_apache_deref_logs + - name: Create hard links + file: + src: "{{ item.stat.lnk_source | default(item.stat.path) }}" + dest: "{{ stage_dir }}/apache/{{ item.stat.path | basename }}" + state: hard + with_items: "{{ debian_suse_apache_deref_logs.results }}" + when: + - item.stat.isreg or item.stat.islnk + when: ansible_os_family in ('Debian', 'Suse') + +- name: Link apache logs on RedHat + block: + - name: Find logs + find: + path: "/var/log/httpd" + file_type: any + register: redhat_apache_logs + - name: Dereference files + stat: + path: "{{ item.path }}" + with_items: "{{ redhat_apache_logs.files }}" + register: redhat_apache_deref_logs + - name: Create hard links + file: + src: "{{ item.stat.lnk_source | default(item.stat.path) }}" + dest: "{{ stage_dir }}/apache/{{ item.stat.path | basename }}" + state: hard + with_items: "{{ redhat_apache_deref_logs.results }}" + when: + - item.stat.isreg or item.stat.islnk + when: ansible_os_family == 'Redhat' + +- name: Ensure {{ stage_dir }}/apache_config apache_config exists + file: + path: "{{ stage_dir }}/apache_config" + state: directory + +- name: Define config paths + set_fact: + apache_config_paths: + 'Debian': '/etc/apache2/sites-enabled/' + 'Suse': '/etc/apache2/conf.d/' + 'Redhat': '/etc/httpd/conf.d/' + +- name: Discover configurations + find: + path: "{{ apache_config_paths[ansible_os_family] }}" + file_type: any + register: apache_configs + +- name: Dereference configurations + stat: + path: "{{ item.path }}" + with_items: "{{ apache_configs.files }}" + register: apache_configs_deref + +- name: Link configurations + file: + src: "{{ item.stat.lnk_source | default(item.stat.path) }}" + dest: "{{ stage_dir }}/apache_config/{{ item.stat.path | basename }}" + state: hard + with_items: "{{ apache_configs_deref.results }}" + when: item.stat.isreg or item.stat.islnk From d04658eb50d7e1f59625039deaef17208a0c9eb9 Mon Sep 17 00:00:00 2001 From: "Andrea Frittoli (andreaf)" Date: Fri, 8 Dec 2017 17:52:29 +0000 Subject: [PATCH 0760/1936] Stage rabbitmq and DB logs Stage rabbitmq and DB logs so they are copied to logs.o.o Change-Id: Ica8a86c2649b2871f366e3ca48a8b47f455879bc --- playbooks/post.yaml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/playbooks/post.yaml b/playbooks/post.yaml index 40c39beb69..fd30bdbf2a 100644 --- a/playbooks/post.yaml +++ b/playbooks/post.yaml @@ -31,7 +31,11 @@ '{{ devstack_full_log}}': 'logs', '{{ stage_dir }}/verify_tempest_conf.log': 'logs', '{{ stage_dir }}/apache': 'logs', - '{{ stage_dir }}/apache_config': 'logs' } + '{{ stage_dir }}/apache_config': 'logs', + '/var/log/rabbitmq': 'logs', + '/var/log/postgresql': 'logs', + '/var/log/mysql.err': 'logs', + '/var/log/mysql.log': 'logs' } extensions_to_txt: - conf - log From e6b0169a0207bed66ae7bec683b5a7a816e1dd13 Mon Sep 17 00:00:00 2001 From: "Andrea Frittoli (andreaf)" Date: Fri, 8 Dec 2017 18:19:23 +0000 Subject: [PATCH 0761/1936] Stage libvirt logs and sudoers Stage libvirt logs and sudoers configuration. Change-Id: I46112c52f05c6015a9f397eda130404beeb5e899 --- playbooks/post.yaml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/playbooks/post.yaml b/playbooks/post.yaml index fd30bdbf2a..ee8a87820e 100644 --- a/playbooks/post.yaml +++ b/playbooks/post.yaml @@ -35,7 +35,10 @@ '/var/log/rabbitmq': 'logs', '/var/log/postgresql': 'logs', '/var/log/mysql.err': 'logs', - '/var/log/mysql.log': 'logs' } + '/var/log/mysql.log': 'logs', + '/var/log/libvirt': 'logs', + '/etc/sudoers': 'logs', + '/etc/sudoers.d': 'logs' } extensions_to_txt: - conf - log From 5a9872a9b90c06cddbbd4a18b16e715e3db9a939 Mon Sep 17 00:00:00 2001 From: "Andrea Frittoli (andreaf)" Date: Fri, 15 Dec 2017 13:55:50 +0000 Subject: [PATCH 0762/1936] Stage openstack config files Stage config files from openstack and all projects used by the devstack run. Change-Id: I99f1bc71856b0da7fc5b438d13d4f508d2341e44 --- playbooks/post.yaml | 2 ++ roles/devstack-project-conf/README.rst | 11 +++++++++ .../devstack-project-conf/defaults/main.yaml | 1 + roles/devstack-project-conf/tasks/main.yaml | 24 +++++++++++++++++++ 4 files changed, 38 insertions(+) create mode 100644 roles/devstack-project-conf/README.rst create mode 100644 roles/devstack-project-conf/defaults/main.yaml create mode 100644 roles/devstack-project-conf/tasks/main.yaml diff --git a/playbooks/post.yaml b/playbooks/post.yaml index ee8a87820e..36023a625a 100644 --- a/playbooks/post.yaml +++ b/playbooks/post.yaml @@ -20,6 +20,7 @@ roles: - export-devstack-journal - apache-logs-conf + - devstack-project-conf - role: stage-output zuul_copy_output: { '{{ devstack_conf_dir }}/local.conf': 'logs', @@ -32,6 +33,7 @@ '{{ stage_dir }}/verify_tempest_conf.log': 'logs', '{{ stage_dir }}/apache': 'logs', '{{ stage_dir }}/apache_config': 'logs', + '{{ stage_dir }}/etc': 'logs', '/var/log/rabbitmq': 'logs', '/var/log/postgresql': 'logs', '/var/log/mysql.err': 'logs', diff --git a/roles/devstack-project-conf/README.rst b/roles/devstack-project-conf/README.rst new file mode 100644 index 0000000000..3f2d4c9697 --- /dev/null +++ b/roles/devstack-project-conf/README.rst @@ -0,0 +1,11 @@ +Prepare OpenStack project configurations for staging + +Prepare all relevant config files for staging. +This is helpful to avoid staging the entire /etc. + +**Role Variables** + +.. zuul:rolevar:: stage_dir + :default: {{ ansible_user_dir }} + + The base stage directory. diff --git a/roles/devstack-project-conf/defaults/main.yaml b/roles/devstack-project-conf/defaults/main.yaml new file mode 100644 index 0000000000..f8fb8deac9 --- /dev/null +++ b/roles/devstack-project-conf/defaults/main.yaml @@ -0,0 +1 @@ +stage_dir: "{{ ansible_user_dir }}" diff --git a/roles/devstack-project-conf/tasks/main.yaml b/roles/devstack-project-conf/tasks/main.yaml new file mode 100644 index 0000000000..9c6e06bea9 --- /dev/null +++ b/roles/devstack-project-conf/tasks/main.yaml @@ -0,0 +1,24 @@ +- name: Ensure {{ stage_dir }}/etc exists + file: + path: "{{ stage_dir }}/etc" + state: directory + +- name: Check which projects have a config folder + stat: + path: "/etc/{{ item.value.short_name }}" + with_dict: "{{ zuul.projects }}" + register: project_configs + +- name: Copy configuration files + command: cp -pRL {{ item.stat.path }} {{ stage_dir }}/etc/{{ item.item.value.short_name }} + when: item.stat.exists + with_items: "{{ project_configs.results }}" + +- name: Check if openstack has a config folder + stat: + path: "/etc/openstack" + register: openstack_configs + +- name: Copy configuration files + command: cp -pRL /etc/openstack {{ stage_dir }}/etc/ + when: openstack_configs.stat.exists From a2b174168a5d9bd3558e60297bea9dd91f39a546 Mon Sep 17 00:00:00 2001 From: "Andrea Frittoli (andreaf)" Date: Fri, 15 Dec 2017 17:35:31 +0000 Subject: [PATCH 0763/1936] Stage a number of system type logs This could be ansiblesed a more, for now more or less the bash code from d-g wrapped in a role. Change-Id: Ia7fe40f05ca394da9a65fef6383d72f29a532e2f --- playbooks/post.yaml | 19 +++++++++- roles/capture-system-logs/README.rst | 20 +++++++++++ roles/capture-system-logs/defaults/main.yaml | 1 + roles/capture-system-logs/tasks/main.yaml | 38 ++++++++++++++++++++ 4 files changed, 77 insertions(+), 1 deletion(-) create mode 100644 roles/capture-system-logs/README.rst create mode 100644 roles/capture-system-logs/defaults/main.yaml create mode 100644 roles/capture-system-logs/tasks/main.yaml diff --git a/playbooks/post.yaml b/playbooks/post.yaml index 36023a625a..41d3c54121 100644 --- a/playbooks/post.yaml +++ b/playbooks/post.yaml @@ -21,6 +21,8 @@ - export-devstack-journal - apache-logs-conf - devstack-project-conf + # capture-system-logs should be the last role before stage-output + - capture-system-logs - role: stage-output zuul_copy_output: { '{{ devstack_conf_dir }}/local.conf': 'logs', @@ -40,7 +42,22 @@ '/var/log/mysql.log': 'logs', '/var/log/libvirt': 'logs', '/etc/sudoers': 'logs', - '/etc/sudoers.d': 'logs' } + '/etc/sudoers.d': 'logs', + '{{ stage_dir }}/iptables.txt': 'logs', + '{{ stage_dir }}/df.txt': 'logs', + '{{ stage_dir }}/pip2-freeze.txt': 'logs', + '{{ stage_dir }}/pip3-freeze.txt': 'logs', + '{{ stage_dir }}/dpkg-l.txt': 'logs', + '{{ stage_dir }}/rpm-qa.txt': 'logs', + '{{ stage_dir }}/core': 'logs', + '{{ stage_dir }}/listen53.txt': 'logs', + '{{ stage_dir }}/deprecations.log': 'logs', + '/var/log/ceph': 'logs', + '/var/log/openvswitch': 'logs', + '/var/log/glusterfs': 'logs', + '/etc/glusterfs/glusterd.vol': 'logs', + '/etc/resolv.conf': 'logs', + '/var/log/unbound.log': 'logs' } extensions_to_txt: - conf - log diff --git a/roles/capture-system-logs/README.rst b/roles/capture-system-logs/README.rst new file mode 100644 index 0000000000..c28412457a --- /dev/null +++ b/roles/capture-system-logs/README.rst @@ -0,0 +1,20 @@ +Stage a number of system type logs + +Stage a number of different logs / reports: +- snapshot of iptables +- disk space available +- pip[2|3] freeze +- installed packages (dpkg/rpm) +- ceph, openswitch, gluster +- coredumps +- dns resolver +- listen53 +- unbound.log +- deprecation messages + +**Role Variables** + +.. zuul:rolevar:: stage_dir + :default: {{ ansible_user_dir }} + + The base stage directory. diff --git a/roles/capture-system-logs/defaults/main.yaml b/roles/capture-system-logs/defaults/main.yaml new file mode 100644 index 0000000000..fea05c8146 --- /dev/null +++ b/roles/capture-system-logs/defaults/main.yaml @@ -0,0 +1 @@ +devstack_base_dir: /opt/stack diff --git a/roles/capture-system-logs/tasks/main.yaml b/roles/capture-system-logs/tasks/main.yaml new file mode 100644 index 0000000000..cd8f4f0eab --- /dev/null +++ b/roles/capture-system-logs/tasks/main.yaml @@ -0,0 +1,38 @@ +# TODO(andreaf) Make this into proper Ansible +- name: Stage various logs and reports + shell: + cmd: | + sudo iptables-save > {{ stage_dir }}/iptables.txt + df -h > {{ stage_dir }}/df.txt + + for py_ver in 2 3; do + if [[ `which python${py_ver}` ]]; then + python${py_ver} -m pip freeze > {{ stage_dir }}/pip${py_ver}-freeze.txt + fi + done + + if [ `command -v dpkg` ]; then + dpkg -l> {{ stage_dir }}/dpkg-l.txt + fi + if [ `command -v rpm` ]; then + rpm -qa | sort > {{ stage_dir }}/rpm-qa.txt + fi + + # gzip and save any coredumps in /var/core + if [ -d /var/core ]; then + sudo gzip -r /var/core + sudo cp -r /var/core {{ stage_dir }}/ + fi + + sudo ss -lntup | grep ':53' > {{ stage_dir }}/listen53.txt + + # NOTE(andreaf) Service logs are already in logs/ thanks for the + # export-devstack-journal log. Apache logs are under apache/ thans to the + # apache-logs-conf role. + grep -i deprecat {{ stage_dir }}/logs/*.txt {{ stage_dir }}/apache/*.log | \ + sed -r 's/[0-9]{1,2}\:[0-9]{1,2}\:[0-9]{1,2}\.[0-9]{1,3}/ /g' | \ + sed -r 's/[0-9]{1,2}\:[0-9]{1,2}\:[0-9]{1,2}/ /g' | \ + sed -r 's/[0-9]{1,4}-[0-9]{1,2}-[0-9]{1,4}/ /g' | + sed -r 's/\[.*\]/ /g' | \ + sed -r 's/\s[0-9]+\s/ /g' | \ + awk '{if ($0 in seen) {seen[$0]++} else {out[++n]=$0;seen[$0]=1}} END { for (i=1; i<=n; i++) print seen[out[i]]" :: " out[i] }' > {{ stage_dir }}/deprecations.log From b040af4d85c695ba4da210eb714b053d8aa834ad Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 20 Dec 2017 06:18:43 +0000 Subject: [PATCH 0764/1936] Updated from generate-devstack-plugins-list Change-Id: Ib7ed514446fbb39f87369fa941dfa87c28046942 --- doc/source/plugin-registry.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 43dd3c2eae..907671adc8 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -26,7 +26,6 @@ Plugin Name URL ====================================== === almanach `git://git.openstack.org/openstack/almanach `__ aodh `git://git.openstack.org/openstack/aodh `__ -app-catalog-ui `git://git.openstack.org/openstack/app-catalog-ui `__ astara `git://git.openstack.org/openstack/astara `__ barbican `git://git.openstack.org/openstack/barbican `__ bilean `git://git.openstack.org/openstack/bilean `__ @@ -127,6 +126,7 @@ networking-ovs-dpdk `git://git.openstack.org/openstack/networ networking-plumgrid `git://git.openstack.org/openstack/networking-plumgrid `__ networking-powervm `git://git.openstack.org/openstack/networking-powervm `__ networking-sfc `git://git.openstack.org/openstack/networking-sfc `__ +networking-spp `git://git.openstack.org/openstack/networking-spp `__ networking-vpp `git://git.openstack.org/openstack/networking-vpp `__ networking-vsphere `git://git.openstack.org/openstack/networking-vsphere `__ neutron `git://git.openstack.org/openstack/neutron `__ @@ -135,6 +135,7 @@ neutron-fwaas `git://git.openstack.org/openstack/neutro neutron-fwaas-dashboard `git://git.openstack.org/openstack/neutron-fwaas-dashboard `__ neutron-lbaas `git://git.openstack.org/openstack/neutron-lbaas `__ neutron-lbaas-dashboard `git://git.openstack.org/openstack/neutron-lbaas-dashboard `__ +neutron-tempest-plugin `git://git.openstack.org/openstack/neutron-tempest-plugin `__ neutron-vpnaas `git://git.openstack.org/openstack/neutron-vpnaas `__ neutron-vpnaas-dashboard `git://git.openstack.org/openstack/neutron-vpnaas-dashboard `__ nova-dpm `git://git.openstack.org/openstack/nova-dpm `__ From 49f3cc76bffaa09c710212ef6779943071bb050d Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Mon, 11 Dec 2017 09:24:15 +0000 Subject: [PATCH 0765/1936] Make stackenv file visible The file won't be listed as long as it is called .stackenv.txt.gz, with this it will be called _stackenv.txt.gz instead. Change-Id: Ib3b44c287ffb2ec0e48fefef1662a1c02d162657 --- playbooks/post.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/playbooks/post.yaml b/playbooks/post.yaml index 36023a625a..a2f695cb21 100644 --- a/playbooks/post.yaml +++ b/playbooks/post.yaml @@ -45,6 +45,7 @@ - conf - log - localrc + - stackenv - summary # NOTE(andreaf) We need fetch-devstack-log-dir only as long as the base job # starts pulling logs for us from {{ ansible_user_dir }}/logs. From f0636bac05dfb1c963cc1eda2832259075127809 Mon Sep 17 00:00:00 2001 From: Harald Jensas Date: Wed, 20 Dec 2017 12:07:40 +0100 Subject: [PATCH 0766/1936] Fixing a typo - internationalized Assuming confusingness a few lines down is intentional ... Change-Id: I3bd18aa32e1166bb1012ac65e83461b75be4f006 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index a125d4a0d7..ee857c516f 100755 --- a/stack.sh +++ b/stack.sh @@ -32,7 +32,7 @@ unset GREP_OPTIONS # Devstack is written in bash, and many functions used throughout # devstack process text coming off a command (like the ip command) # and do transforms using grep, sed, cut, awk on the strings that are -# returned. Many of these programs are interationalized, which is +# returned. Many of these programs are internationalized, which is # great for end users, but means that the strings that devstack # functions depend upon might not be there in other locales. We thus # need to pin the world to an english basis during the runs. From 421753f71d1e2e88b55dcbe58672395afbeced87 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Wed, 20 Dec 2017 08:31:15 -0600 Subject: [PATCH 0767/1936] Switch to consolidated fetch-subunit-output role fetch-testr-output and fetch-stestr-output are being merged. Change-Id: I00d448c4e6b98a1f504b048c74eff4e110c0b511 Depends-On: I833320cf9a932d8e119645eb798ce0c93d854321 --- playbooks/tox/post.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/playbooks/tox/post.yaml b/playbooks/tox/post.yaml index d9e299ff4d..7f0cb19824 100644 --- a/playbooks/tox/post.yaml +++ b/playbooks/tox/post.yaml @@ -1,5 +1,4 @@ - hosts: all roles: - fetch-tox-output - - fetch-testr-output - - fetch-stestr-output + - fetch-subunit-output From 6a25fb96c1e2c00ec2ec2c2b456d549c092ae495 Mon Sep 17 00:00:00 2001 From: bhagyashris Date: Thu, 14 Dec 2017 13:23:41 +0530 Subject: [PATCH 0768/1936] Initialize newly added glance-image-import config file This patch creates a new config file glance-image-import.conf at /etc/glance path. Also, each config option is initialized with default values. Need these changes to implement specs [1]: [1]: https://blueprints.launchpad.net/glance/+spec/inject-automatic-metadata Related-Change-Id: If14c7dc4f38360006f9cb350fbba54fa2f33be61 Change-Id: I665507db1838a50e344d3be909d7490f1f52040c --- lib/glance | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/glance b/lib/glance index ad286bacb9..95d2450da7 100644 --- a/lib/glance +++ b/lib/glance @@ -56,6 +56,7 @@ GLANCE_CACHE_CONF=$GLANCE_CONF_DIR/glance-cache.conf GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json GLANCE_SCHEMA_JSON=$GLANCE_CONF_DIR/schema-image.json GLANCE_SWIFT_STORE_CONF=$GLANCE_CONF_DIR/glance-swift-store.conf +GLANCE_IMAGE_IMPORT_CONF=$GLANCE_CONF_DIR/glance-image-import.conf GLANCE_V1_ENABLED=${GLANCE_V1_ENABLED:-False} if is_service_enabled tls-proxy; then @@ -232,6 +233,11 @@ function configure_glance { # Store specific confs iniset $GLANCE_CACHE_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/ + # Set default configuration options for the glance-image-import + iniset $GLANCE_IMAGE_IMPORT_CONF image_import_opts image_import_plugins [] + iniset $GLANCE_IMAGE_IMPORT_CONF inject_metadata_properties ignore_user_roles admin + iniset $GLANCE_IMAGE_IMPORT_CONF inject_metadata_properties inject + cp -p $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON cp -p $GLANCE_DIR/etc/schema-image.json $GLANCE_SCHEMA_JSON From f6d566c21d0b33344e58fcbc9ec388d7503570da Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Fri, 22 Dec 2017 11:39:29 -0500 Subject: [PATCH 0769/1936] Configure nova_cell1.conf to use placement for reschedules Nova now calculates alternate hosts in the scheduler and sends those to the cell for reschedules in case a build on a given compute node fails. The cell conductor needs to claim resources against the alternate hosts in Placement during a reschedule, therefore it needs to be configured to talk to the placement service. Part of blueprint return-alternate-hosts Change-Id: Ie599968d9e7537e551fe6d9deb63a91b256b1e11 --- lib/nova | 6 ++++++ lib/placement | 18 ++++++++++-------- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/lib/nova b/lib/nova index 50deeeb220..c40c43af6c 100644 --- a/lib/nova +++ b/lib/nova @@ -600,6 +600,12 @@ function create_nova_conf { else rpc_backend_add_vhost $vhost iniset_rpc_backend nova $conf DEFAULT $vhost + # When running in superconductor mode, the cell conductor + # must be configured to talk to the placement service for + # reschedules to work. + if is_service_enabled placement placement-client; then + configure_placement_nova_compute $conf + fi fi # Format logging setup_logging $conf diff --git a/lib/placement b/lib/placement index 1875857552..1d68f8a185 100644 --- a/lib/placement +++ b/lib/placement @@ -103,14 +103,16 @@ function _config_placement_apache_wsgi { } function configure_placement_nova_compute { - iniset $NOVA_CONF placement auth_type "password" - iniset $NOVA_CONF placement auth_url "$KEYSTONE_SERVICE_URI" - iniset $NOVA_CONF placement username placement - iniset $NOVA_CONF placement password "$SERVICE_PASSWORD" - iniset $NOVA_CONF placement user_domain_name "$SERVICE_DOMAIN_NAME" - iniset $NOVA_CONF placement project_name "$SERVICE_TENANT_NAME" - iniset $NOVA_CONF placement project_domain_name "$SERVICE_DOMAIN_NAME" - iniset $NOVA_CONF placement os_region_name "$REGION_NAME" + # Use the provided config file path or default to $NOVA_CONF. + local conf=${1:-$NOVA_CONF} + iniset $conf placement auth_type "password" + iniset $conf placement auth_url "$KEYSTONE_SERVICE_URI" + iniset $conf placement username placement + iniset $conf placement password "$SERVICE_PASSWORD" + iniset $conf placement user_domain_name "$SERVICE_DOMAIN_NAME" + iniset $conf placement project_name "$SERVICE_TENANT_NAME" + iniset $conf placement project_domain_name "$SERVICE_DOMAIN_NAME" + iniset $conf placement os_region_name "$REGION_NAME" # TODO(cdent): auth_strategy, which is common to see in these # blocks is not currently used here. For the time being the # placement api uses the auth_strategy configuration setting From b4d772b9fdb00fbdec1304ce6fbeafaa77915071 Mon Sep 17 00:00:00 2001 From: Andreas Jaeger Date: Sat, 23 Dec 2017 20:17:33 +0100 Subject: [PATCH 0770/1936] devstack job: Add irrelevant-files Do not run devstack and its siblings if only rst files, releasenotes, or files in doc directory change. This is the minimal set of irrelevant files shared by most projects already. Needed-By: Ie8504ba3d5d46f6338a228ed2d248ba6363e37ae Change-Id: Id0095763eb91592c2fd1a913526883987df704bd --- .zuul.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index a699dbaa7d..037e9a9ea8 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -80,6 +80,14 @@ pre-run: playbooks/pre.yaml run: playbooks/devstack.yaml post-run: playbooks/post.yaml + irrelevant-files: + # Documentation related + - ^.*\.rst$ + - ^api-ref/.*$ + - ^doc/.*$ + - ^releasenotes/.*$ + # Translations + - ^.*/locale/.*po$ - job: name: devstack-multinode From f8dc558486fdb484b31338aa30933e33c061638c Mon Sep 17 00:00:00 2001 From: Akihiro Motoki Date: Sun, 24 Dec 2017 08:17:01 +0900 Subject: [PATCH 0771/1936] Drop django_openstack_auth related code In Queens cycle, the code of django_openstack_auth was merged into the horizon repository. The master branch of django_openstack_auth will be retired. (horizon blueprint merge-openstack-auth) This commit drops django_openstack_auth related code from DevStack. _prepare_message_catalog_compilation in lib/horizon was used only in install_django_openstack_auth, so it is dropped too. Change-Id: If9467c520a1e07d1968b29e485df0097330356bc --- lib/horizon | 24 ------------------------ stack.sh | 2 -- stackrc | 4 ---- tests/test_libs_from_pypi.sh | 2 +- 4 files changed, 1 insertion(+), 31 deletions(-) diff --git a/lib/horizon b/lib/horizon index 3d2f68d09d..fab41bbeca 100644 --- a/lib/horizon +++ b/lib/horizon @@ -26,9 +26,6 @@ set +o xtrace # Defaults # -------- -# Set up default directories -GITDIR["django_openstack_auth"]=$DEST/django_openstack_auth - HORIZON_DIR=$DEST/horizon # local_settings.py is used to customize Dashboard settings. @@ -159,20 +156,6 @@ function init_horizon { } -# install_django_openstack_auth() - Collect source and prepare -function install_django_openstack_auth { - if use_library_from_git "django_openstack_auth"; then - local dir=${GITDIR["django_openstack_auth"]} - git_clone_by_name "django_openstack_auth" - # Compile message catalogs before installation - _prepare_message_catalog_compilation - (cd $dir; $PYTHON setup.py compile_catalog) - setup_dev_lib "django_openstack_auth" - fi - # if we aren't using this library from git, then we just let it - # get dragged in by the horizon setup. -} - # install_horizon() - Collect source and prepare function install_horizon { # Apache installation, because we mark it NOPRIME @@ -191,13 +174,6 @@ function stop_horizon { stop_apache_server } -# NOTE: It can be moved to common functions, but it is only used by compilation -# of django_openstack_auth catalogs at the moment. -function _prepare_message_catalog_compilation { - pip_install_gr Babel -} - - # Restore xtrace $_XTRACE_HORIZON diff --git a/stack.sh b/stack.sh index a125d4a0d7..db7261a380 100755 --- a/stack.sh +++ b/stack.sh @@ -894,8 +894,6 @@ if is_service_enabled placement placement-client; then fi if is_service_enabled horizon; then - # django openstack_auth - install_django_openstack_auth # dashboard stack_install_service horizon fi diff --git a/stackrc b/stackrc index 286a04d3fe..7d5c75df41 100644 --- a/stackrc +++ b/stackrc @@ -500,10 +500,6 @@ GITBRANCH["cursive"]=${CURSIVE_BRANCH:-master} GITREPO["glance_store"]=${GLANCE_STORE_REPO:-${GIT_BASE}/openstack/glance_store.git} GITBRANCH["glance_store"]=${GLANCE_STORE_BRANCH:-master} -# django openstack_auth library -GITREPO["django_openstack_auth"]=${HORIZONAUTH_REPO:-${GIT_BASE}/openstack/django_openstack_auth.git} -GITBRANCH["django_openstack_auth"]=${HORIZONAUTH_BRANCH:-master} - # keystone middleware GITREPO["keystonemiddleware"]=${KEYSTONEMIDDLEWARE_REPO:-${GIT_BASE}/openstack/keystonemiddleware.git} GITBRANCH["keystonemiddleware"]=${KEYSTONEMIDDLEWARE_BRANCH:-master} diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index 0bd8d49357..a544b56577 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -35,7 +35,7 @@ ALL_LIBS+=" python-glanceclient python-ironicclient" ALL_LIBS+=" oslo.messaging oslo.log cliff stevedore" ALL_LIBS+=" python-cinderclient glance_store oslo.concurrency oslo.db" ALL_LIBS+=" oslo.versionedobjects oslo.vmware keystonemiddleware" -ALL_LIBS+=" oslo.serialization django_openstack_auth" +ALL_LIBS+=" oslo.serialization" ALL_LIBS+=" python-openstackclient osc-lib osc-placement" ALL_LIBS+=" os-client-config oslo.rootwrap" ALL_LIBS+=" oslo.i18n oslo.utils python-openstacksdk python-swiftclient" From 7b1b6f3446965dda570806dfab06538e6796012e Mon Sep 17 00:00:00 2001 From: Takashi NATSUME Date: Wed, 20 Dec 2017 11:38:23 +0900 Subject: [PATCH 0772/1936] Fix that DISTRO is not set in unstack.sh Change-Id: I36cab41fdb767e8e31073a4fa1f929090f702910 Closes-Bug: #1738938 --- unstack.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/unstack.sh b/unstack.sh index 5d3672e25d..ccea0ef585 100755 --- a/unstack.sh +++ b/unstack.sh @@ -45,6 +45,10 @@ fi # Configure Projects # ================== +# Determine what system we are running on. This provides ``os_VENDOR``, +# ``os_RELEASE``, ``os_PACKAGE``, ``os_CODENAME`` and ``DISTRO`` +GetDistro + # Plugin Phase 0: override_defaults - allow plugins to override # defaults before other services are run run_phase override_defaults @@ -83,10 +87,6 @@ fi load_plugin_settings -# Determine what system we are running on. This provides ``os_VENDOR``, -# ``os_RELEASE``, ``os_PACKAGE``, ``os_CODENAME`` -GetOSVersion - set -o xtrace # Run extras From b3ed561c7af82fabc4a46ff0affcb71737b23a3d Mon Sep 17 00:00:00 2001 From: Toshiaki Takahashi Date: Thu, 4 Jan 2018 02:15:08 +0900 Subject: [PATCH 0773/1936] Fix "Centos" to official notation "CentOS". Change-Id: I2f76fe3a19cd2ebf004d0a7c14852f20f3dadd9e --- doc/source/overview.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/overview.rst b/doc/source/overview.rst index c07a8e6d67..814a2b148b 100644 --- a/doc/source/overview.rst +++ b/doc/source/overview.rst @@ -24,7 +24,7 @@ release.* - Ubuntu: current LTS release plus current development release - Fedora: current release plus previous release -- RHEL/Centos: current major release +- RHEL/CentOS: current major release - Other OS platforms may continue to be included but the maintenance of those platforms shall not be assumed simply due to their presence. Having a listed point-of-contact for each additional OS will greatly From 2700bf88a549088285bef41fe86ac323965c8727 Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Thu, 4 Jan 2018 10:16:16 -0800 Subject: [PATCH 0774/1936] Gzip openstack service logs These files are quite large and disk space is limited so make sure we compress the log files before copying them to storage. Additionally os-loganalyze will only operate on gzipped log files so this should fix os-loganalyze with tempest-full job's logs. This is mostly a check to confirm everything works as expected but we probably want to move the gzip step into the log publication roles so that all log files end up compressed. Change-Id: Ie87962428e0ca755c211cc5e664a14a9f2a79ac4 --- roles/export-devstack-journal/tasks/main.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/export-devstack-journal/tasks/main.yaml b/roles/export-devstack-journal/tasks/main.yaml index 3efa5755b2..6e760c1f6f 100644 --- a/roles/export-devstack-journal/tasks/main.yaml +++ b/roles/export-devstack-journal/tasks/main.yaml @@ -15,7 +15,7 @@ name="" for u in `systemctl list-unit-files | grep devstack | awk '{print $1}'`; do name=$(echo $u | sed 's/devstack@/screen-/' | sed 's/\.service//') - journalctl -o short-precise --unit $u | tee {{ stage_dir }}/logs/$name.txt > /dev/null + journalctl -o short-precise --unit $u | gzip - > {{ stage_dir }}/logs/$name.txt.gz done # Export the journal in export format to make it downloadable @@ -34,4 +34,4 @@ -t sudo \ --no-pager \ --since="$(cat {{ devstack_base_dir }}/log-start-timestamp.txt)" \ - | tee {{ stage_dir }}/logs/syslog.txt > /dev/null + | gzip - > {{ stage_dir }}/logs/syslog.txt.gz From c396355befb37d31ee8118fb5994aed63b139843 Mon Sep 17 00:00:00 2001 From: Hunt Xu Date: Mon, 8 Jan 2018 16:11:33 +0800 Subject: [PATCH 0775/1936] Use "ip neigh" instead of "arp -n" in worlddump.py Worlddumping on a system without net-tools package gets an error showing "arp: not found". As iproute2 can also show arp tables, we use it instead. Change-Id: I0cd83e6d14959dc5a1147c487b11f27fb92aa20a --- tools/worlddump.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tools/worlddump.py b/tools/worlddump.py index 6fff1497a7..750608210a 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -164,8 +164,7 @@ def network_dump(): _header("Network Dump") _dump_cmd("brctl show") - _dump_cmd("arp -n") - ip_cmds = ["addr", "link", "route"] + ip_cmds = ["neigh", "addr", "link", "route"] for cmd in ip_cmds + ['netns']: _dump_cmd("ip %s" % cmd) for netns_ in _netns_list(): From fabaa991ca28a181909b420701bbd8e195177814 Mon Sep 17 00:00:00 2001 From: Shachar Snapiri Date: Wed, 6 Dec 2017 14:45:06 +0200 Subject: [PATCH 0776/1936] Added the ability to specify checksum for etcd It is possibe for the user to override te etcd version, thus download a different etcd file, but the checksum is constant, so the checksum verification will fail in that case. Added the ability to specify a different checksum, so the user would be able to specify the new version checksum Change-Id: I85af3af841ae957964f18d4e37a86ab0703882bc Closes-Bug: #1736718 --- stackrc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/stackrc b/stackrc index 286a04d3fe..d344981c75 100644 --- a/stackrc +++ b/stackrc @@ -720,11 +720,11 @@ EXTRA_CACHE_URLS="" # etcd3 defaults ETCD_VERSION=${ETCD_VERSION:-v3.1.10} -ETCD_SHA256_AMD64="2d335f298619c6fb02b1124773a56966e448ad9952b26fea52909da4fe80d2be" +ETCD_SHA256_AMD64=${ETCD_SHA256_AMD64:-"2d335f298619c6fb02b1124773a56966e448ad9952b26fea52909da4fe80d2be"} # NOTE(sdague): etcd v3.1.10 doesn't have anything for these architectures, though 3.2.x does. -ETCD_SHA256_ARM64="" -ETCD_SHA256_PPC64="" -ETCD_SHA256_S390X="" +ETCD_SHA256_ARM64=${ETCD_SHA256_ARM64:-""} +ETCD_SHA256_PPC64=${ETCD_SHA256_PPC64:-""} +ETCD_SHA256_S390X=${ETCD_SHA256_S390X:-""} # Make sure etcd3 downloads the correct architecture if is_arch "x86_64"; then ETCD_ARCH="amd64" From fe4c3cfcf54f863ec10f970805528f9d3d0cc750 Mon Sep 17 00:00:00 2001 From: Shachar Snapiri Date: Tue, 19 Sep 2017 09:52:00 +0300 Subject: [PATCH 0777/1936] Modified the subnet-range parameter to be optional The subnet-range parameter is only sent now if a valid value exists so the command will not fail Change-Id: I5296f5b59bc6d3d3db90a685a8678db9a156eece Closes-Bug: #1718111 --- lib/neutron_plugins/services/l3 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 98315b7dd3..41a467df4c 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -188,7 +188,7 @@ function create_neutron_initial_network { if [ -z $SUBNETPOOL_V4_ID ]; then fixed_range_v4=$FIXED_RANGE fi - SUBNET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID --subnet-range $fixed_range_v4 | grep ' id ' | get_field 2) + SUBNET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} | grep ' id ' | get_field 2) die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME $project_id" fi @@ -198,7 +198,7 @@ function create_neutron_initial_network { if [ -z $SUBNETPOOL_V6_ID ]; then fixed_range_v6=$IPV6_PROVIDER_FIXED_RANGE fi - IPV6_SUBNET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID --subnet-range $fixed_range_v6 | grep ' id ' | get_field 2) + IPV6_SUBNET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} | grep ' id ' | get_field 2) die_if_not_set $LINENO IPV6_SUBNET_ID "Failure creating IPV6_SUBNET_ID for $IPV6_PROVIDER_SUBNET_NAME $project_id" fi From b9891eea1f981fa51682f2c94f8e04f047f3dec4 Mon Sep 17 00:00:00 2001 From: Victor Stinner Date: Mon, 8 Jan 2018 15:20:36 +0100 Subject: [PATCH 0778/1936] Fix Python3 get_python_exec_prefix on Fedora 27 On Fedora 27, the default Pytho 3 install prefix changed from /usr to /usr/local: https://fedoraproject.org/wiki/Changes/Making_sudo_pip_safe Closes-Bug: #1741901 Change-Id: Id40620efdf173189df053b5d380a801092933f83 --- inc/python | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/inc/python b/inc/python index 2e4eff02ea..e074ea498f 100644 --- a/inc/python +++ b/inc/python @@ -49,7 +49,11 @@ function get_python_exec_prefix { fi $xtrace - if is_fedora || is_suse; then + if python3_enabled && [ "$os_VENDOR" = "Fedora" -a $os_RELEASE -gt 26 ]; then + # Default Python 3 install prefix changed to /usr/local in Fedora 27: + # https://fedoraproject.org/wiki/Changes/Making_sudo_pip_safe + echo "/usr/local/bin" + elif is_fedora || is_suse; then echo "/usr/bin" else echo "/usr/local/bin" From 9aaa529f65011e4bfa649043c35ee808970dc426 Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Wed, 20 Sep 2017 14:23:05 -0400 Subject: [PATCH 0779/1936] Add DVR support to lib/neutron Added NEUTRON_DISTRIBUTED_ROUTING to more easily control DVR configuration. If set to True, DVR will be enabled and the default agent mode will be set to 'dvr_snat' since that works with all types of routers by default. Advanced users can override that by setting NEUTRON_DVR_MODE, for example in multi-node configurations where different agent modes are desired. This should bring lib/neutron inline with lib/neutron-legacy in supporting all the different DVR modes. Change-Id: I9f25921eefc5b935aad3bb1edc5e41ee0ce43a84 --- lib/neutron | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index c5839f5c3e..d33b49e890 100644 --- a/lib/neutron +++ b/lib/neutron @@ -32,6 +32,17 @@ NEUTRON_AGENT=${NEUTRON_AGENT:-openvswitch} NEUTRON_DIR=$DEST/neutron NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron} +NEUTRON_DISTRIBUTED_ROUTING=$(trueorfalse False NEUTRON_DISTRIBUTED_ROUTING) +# Distributed Virtual Router (DVR) configuration +# Can be: +# - ``legacy`` - No DVR functionality +# - ``dvr_snat`` - Controller or single node DVR +# - ``dvr`` - Compute node in multi-node DVR +# - ``dvr_no_external`` - Compute node in multi-node DVR, no external network +# +# Default is 'dvr_snat' since it can handle both DVR and legacy routers +NEUTRON_DVR_MODE=${NEUTRON_DVR_MODE:-dvr_snat} + NEUTRON_BIN_DIR=$(get_python_exec_prefix) NEUTRON_DHCP_BINARY="neutron-dhcp-agent" @@ -173,6 +184,7 @@ function configure_neutron_new { iniset $NEUTRON_CONF DEFAULT policy_file $policy_file iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips True + iniset $NEUTRON_CONF DEFAULT router_distributed $NEUTRON_DISTRIBUTED_ROUTING iniset $NEUTRON_CONF DEFAULT auth_strategy $NEUTRON_AUTH_STRATEGY configure_auth_token_middleware $NEUTRON_CONF neutron $NEUTRON_AUTH_CACHE_DIR keystone_authtoken @@ -181,7 +193,15 @@ function configure_neutron_new { # Configure VXLAN # TODO(sc68cal) not hardcode? iniset $NEUTRON_CORE_PLUGIN_CONF ml2 tenant_network_types vxlan - iniset $NEUTRON_CORE_PLUGIN_CONF ml2 mechanism_drivers openvswitch,linuxbridge + + local mech_drivers="openvswitch" + if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then + mech_drivers+=",l2population" + else + mech_drivers+=",linuxbridge" + fi + iniset $NEUTRON_CORE_PLUGIN_CONF ml2 mechanism_drivers $mech_drivers + iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vxlan vni_ranges 1001:2000 iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_flat flat_networks public if [[ "$NEUTRON_PORT_SECURITY" = "True" ]]; then @@ -202,6 +222,11 @@ function configure_neutron_new { else iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup firewall_driver iptables_hybrid iniset $NEUTRON_CORE_PLUGIN_CONF ovs local_ip $HOST_IP + + if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then + iniset $NEUTRON_CORE_PLUGIN_CONF agent l2_population True + iniset $NEUTRON_CORE_PLUGIN_CONF agent enable_distributed_routing True + fi fi if ! running_in_container; then @@ -236,6 +261,10 @@ function configure_neutron_new { else iniset $NEUTRON_CORE_PLUGIN_CONF ovs bridge_mappings "$PUBLIC_NETWORK_NAME:$PUBLIC_BRIDGE" fi + + if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then + iniset $NEUTRON_L3_CONF DEFAULT agent_mode $NEUTRON_DVR_MODE + fi fi # Metadata From 9fd38e79701b65d41116a8915e47978fc50176c6 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Mon, 11 Dec 2017 12:20:25 +0100 Subject: [PATCH 0780/1936] Add Fedora 27 support Removing the (f23,)f24 support they are EOL. The only non-trivial change is the apache-httpd default worker change, however might not be bad idea to use `event` instead of `worker` in the future, but for now keep it AS-IS and continue to use `worker`. Change-Id: I96d414a30b58bc4b43da45066fdf310a6a830079 Closes-Bug: #1740194 --- files/rpms/cinder | 4 ++-- files/rpms/general | 4 ++-- files/rpms/nova | 2 +- files/rpms/swift | 2 +- lib/apache | 3 ++- stack.sh | 2 +- 6 files changed, 9 insertions(+), 8 deletions(-) diff --git a/files/rpms/cinder b/files/rpms/cinder index 3bc4e7ae72..e6addc62aa 100644 --- a/files/rpms/cinder +++ b/files/rpms/cinder @@ -1,5 +1,5 @@ iscsi-initiator-utils lvm2 qemu-img -scsi-target-utils # not:rhel7,f24,f25,f26 NOPRIME -targetcli # dist:rhel7,f24,f25,f26 NOPRIME +scsi-target-utils # not:rhel7,f25,f26,f27 NOPRIME +targetcli # dist:rhel7,f25,f26,f27 NOPRIME diff --git a/files/rpms/general b/files/rpms/general index f3f870823c..878b935a1b 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -9,9 +9,9 @@ git-core graphviz # needed only for docs httpd httpd-devel -iptables-services # NOPRIME f23,f24,f25,f26 +iptables-services # NOPRIME f25,f26,f27 java-1.7.0-openjdk-headless # NOPRIME rhel7 -java-1.8.0-openjdk-headless # NOPRIME f23,f24,f25,f26 +java-1.8.0-openjdk-headless # NOPRIME f25,f26,f27 libffi-devel libjpeg-turbo-devel # Pillow 3.0.0 libxml2-devel # lxml diff --git a/files/rpms/nova b/files/rpms/nova index 64ed480632..9fb7282df5 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -7,7 +7,7 @@ gawk genisoimage # required for config_drive iptables iputils -kernel-modules # dist:f23,f24,f25,f26 +kernel-modules # dist:f25,f26,f27 kpartx libxml2-python m2crypto diff --git a/files/rpms/swift b/files/rpms/swift index 2e09cec28f..be0db140e3 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -2,7 +2,7 @@ curl liberasurecode-devel memcached pyxattr -rsync-daemon # dist:f23,f24,f25,f26 +rsync-daemon # dist:f25,f26,f27 sqlite xfsprogs xinetd diff --git a/lib/apache b/lib/apache index 3af341174e..84cec73234 100644 --- a/lib/apache +++ b/lib/apache @@ -133,8 +133,9 @@ function install_apache_wsgi { sudo rm -f /etc/httpd/conf.d/000-* install_package httpd mod_wsgi # For consistency with Ubuntu, switch to the worker mpm, as - # the default is prefork + # the default is event sudo sed -i '/mod_mpm_prefork.so/s/^/#/g' /etc/httpd/conf.modules.d/00-mpm.conf + sudo sed -i '/mod_mpm_event.so/s/^/#/g' /etc/httpd/conf.modules.d/00-mpm.conf sudo sed -i '/mod_mpm_worker.so/s/^#//g' /etc/httpd/conf.modules.d/00-mpm.conf elif is_suse; then install_package apache2 apache2-mod_wsgi diff --git a/stack.sh b/stack.sh index 1d0381483a..32eb43f292 100755 --- a/stack.sh +++ b/stack.sh @@ -221,7 +221,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (xenial|zesty|artful|stretch|jessie|f24|f25|f26|opensuse-42.2|opensuse-42.3|rhel7) ]]; then +if [[ ! ${DISTRO} =~ (xenial|zesty|artful|stretch|jessie|f25|f26|f27|opensuse-42.2|opensuse-42.3|rhel7) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From 23e87ef6188514b17435897fac067568f0f71ac6 Mon Sep 17 00:00:00 2001 From: ghanshyam Date: Sun, 10 Dec 2017 05:11:53 +0300 Subject: [PATCH 0781/1936] Remove deprecated 'deactivate_image' feature flag setting 'deactivate_image' feature flag was added long back during kilo cycle. Tempest is going to remove this feature flag. Depends-On: I843d4c64f24407d9d217005d5ea59d50d7ad62e7 Change-Id: I1ae8efc0e62acc5e05c1c00dc8970b74d8b16da0 --- lib/tempest | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index bdbaaa5678..35dbb7b6a1 100644 --- a/lib/tempest +++ b/lib/tempest @@ -310,7 +310,6 @@ function configure_tempest { fi # Image Features - iniset $TEMPEST_CONFIG image-feature-enabled deactivate_image True if [ "$GLANCE_V1_ENABLED" != "True" ]; then iniset $TEMPEST_CONFIG image-feature-enabled api_v1 False fi From 4eb998fd5a494db19b40a0ede71dde73e2d1f5ec Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Sat, 13 Jan 2018 06:17:40 +0000 Subject: [PATCH 0782/1936] Updated from generate-devstack-plugins-list Change-Id: I6b1214c9b080fbad43609548bf89be50c8191d87 --- doc/source/plugin-registry.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 907671adc8..31812aeefb 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -167,6 +167,7 @@ storlets `git://git.openstack.org/openstack/storle tacker `git://git.openstack.org/openstack/tacker `__ tap-as-a-service `git://git.openstack.org/openstack/tap-as-a-service `__ tap-as-a-service-dashboard `git://git.openstack.org/openstack/tap-as-a-service-dashboard `__ +telemetry-tempest-plugin `git://git.openstack.org/openstack/telemetry-tempest-plugin `__ tricircle `git://git.openstack.org/openstack/tricircle `__ trio2o `git://git.openstack.org/openstack/trio2o `__ trove `git://git.openstack.org/openstack/trove `__ @@ -174,6 +175,7 @@ trove-dashboard `git://git.openstack.org/openstack/trove- valet `git://git.openstack.org/openstack/valet `__ vitrage `git://git.openstack.org/openstack/vitrage `__ vitrage-dashboard `git://git.openstack.org/openstack/vitrage-dashboard `__ +vitrage-tempest-plugin `git://git.openstack.org/openstack/vitrage-tempest-plugin `__ vmware-nsx `git://git.openstack.org/openstack/vmware-nsx `__ vmware-vspc `git://git.openstack.org/openstack/vmware-vspc `__ watcher `git://git.openstack.org/openstack/watcher `__ From dc04b5aa24411b4081f0ad08021e0dc694c982e8 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 4 Dec 2017 11:32:36 +1100 Subject: [PATCH 0783/1936] CentOS: Fix EPEL mirroring and RDO install on CI nodes CentOS tests have reverted to using upstream for EPEL rather than local mirrors, introducing some unnecessary instability. The root of the problem is that /etc/nodepool/provider disappeared with zuulv3, so we now always re-install the EPEL repo and overwrite the local EPEL .repos files that were made during test setup and point to local mirrors. The other change is that we stopped installing the RDO repositories on the testing nodes too. That we were incorrectly taking this path and reinstalling EPEL has hidden the removal of these packages from the base image in the test, since it ends up installing them too. Split the install into two parts -- epel and RDO. Check for /etc/ci/mirror_info.sh (the sourcable mirror script provided by base test setup) and if so, just enable EPEL so we get the CI-mirror version correctly. Install the RDO repositories (if not already installed) unconditionally. Change-Id: Iccb045a6695deb10da4d68a5694e1fa45ccbb810 --- stack.sh | 49 +++++++++++++++++++++++++++++++++---------------- 1 file changed, 33 insertions(+), 16 deletions(-) diff --git a/stack.sh b/stack.sh index 32eb43f292..e57ab7b6fc 100755 --- a/stack.sh +++ b/stack.sh @@ -282,7 +282,7 @@ fi # Some distros need to add repos beyond the defaults provided by the vendor # to pick up required packages. -function _install_epel_and_rdo { +function _install_epel { # NOTE: We always remove and install latest -- some environments # use snapshot images, and if EPEL version updates they break # unless we update them to latest version. @@ -313,12 +313,27 @@ EOF yum_install epel-release || \ die $LINENO "Error installing EPEL repo, cannot continue" sudo rm -f /etc/yum.repos.d/epel-bootstrap.repo +} - # ... and also optional to be enabled - sudo yum-config-manager --enable rhel-7-server-optional-rpms +function _install_rdo { + # There are multiple options for this, including using CloudSIG + # repositories (centos-release-*), trunk versions, etc. Since + # we're not interested in the actual openstack distributions + # (since we're using git to run!) but only peripherial packages + # like kvm or ovs, this has been reliable. + + # TODO(ianw): figure out how to best mirror -- probably use infra + # mirror RDO reverse proxy. We could either have test + # infrastructure set it up disabled like EPEL, or fiddle it here. + # Per the point above, it's a bunch of repos so starts getting a + # little messy... + if ! is_package_installed rdo-release ; then + yum_install https://rdoproject.org/repos/rdo-release.rpm + fi - # install the lastest RDO - is_package_installed rdo-release || yum_install https://rdoproject.org/repos/rdo-release.rpm + # Also enable optional for RHEL7 proper. Note this is a silent + # no-op on other platforms. + sudo yum-config-manager --enable rhel-7-server-optional-rpms if is_oraclelinux; then sudo yum-config-manager --enable ol7_optional_latest ol7_addons ol7_MySQL56 @@ -362,20 +377,22 @@ fi # to speed things up SKIP_EPEL_INSTALL=$(trueorfalse False SKIP_EPEL_INSTALL) -# If we have /etc/nodepool/provider assume we're on a OpenStack CI -# node, where EPEL is already pointing at our internal mirror and RDO -# is pre-installed. -if [[ -f /etc/nodepool/provider ]]; then - SKIP_EPEL_INSTALL=True - if is_fedora; then - # However, EPEL is not enabled by default. +if [[ $DISTRO == "rhel7" ]]; then + # If we have /etc/ci/mirror_info.sh assume we're on a OpenStack CI + # node, where EPEL is installed (but disabled) and already + # pointing at our internal mirror + if [[ -f /etc/ci/mirror_info.sh ]]; then + SKIP_EPEL_INSTALL=True sudo yum-config-manager --enable epel fi -fi -if is_fedora && [[ $DISTRO == "rhel7" ]] && \ - [[ ${SKIP_EPEL_INSTALL} != True ]]; then - _install_epel_and_rdo + if [[ ${SKIP_EPEL_INSTALL} != True ]]; then + _install_epel + fi + # Along with EPEL, CentOS (and a-likes) require some packages only + # available in RDO repositories (e.g. OVS, or later versions of + # kvm) to run. + _install_rdo fi # Ensure python is installed From 6bcd8cad16c39dd8596d654eb4587e848d0db1fe Mon Sep 17 00:00:00 2001 From: Peter Penchev Date: Fri, 12 Jan 2018 23:49:04 +0200 Subject: [PATCH 0784/1936] Bump the Cinder LVM backing file size to 24Gb. This follows a change made to devstack-gate in commit 841ebc3 to allow tempest to succeed even if it happens to run several volume tests in parallel. Right now it's possible for a tempest-full test (run without devstack-gate) to fail with an "Insufficient free virtual space" error in the cinder-scheduler log. Suggested by: Clark Boylan Closes-Bug: 1743597 Change-Id: I16ccb9976d1bc7c9f56a6a4d73e35042a5867ef9 --- doc/source/configuration.rst | 2 +- stackrc | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index d932d8cd86..49cad05554 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -667,7 +667,7 @@ with ``VOLUME_BACKING_FILE_SIZE``. VOLUME_GROUP_NAME="stack-volumes" VOLUME_NAME_PREFIX="volume-" - VOLUME_BACKING_FILE_SIZE=10250M + VOLUME_BACKING_FILE_SIZE=24G Keystone diff --git a/stackrc b/stackrc index 286a04d3fe..d7ebdd87d4 100644 --- a/stackrc +++ b/stackrc @@ -762,8 +762,8 @@ for image_url in ${IMAGE_URLS//,/ }; do fi done -# 10Gb default volume backing file size -VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-10250M} +# 24Gb default volume backing file size +VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-24G} # Prefixes for volume and instance names VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} From 789301240adc7fa91cd34405bf87e15c3b21bc51 Mon Sep 17 00:00:00 2001 From: chengebj5238 Date: Thu, 18 Jan 2018 15:53:27 +0800 Subject: [PATCH 0785/1936] URL changes are modified to be correct Change-Id: Ied5ee7da0a37405f130f7cecff48919f16435952 --- doc/source/configuration.rst | 2 +- doc/source/guides/devstack-with-lbaas-v2.rst | 2 +- doc/source/guides/neutron.rst | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index d932d8cd86..919a053317 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -643,7 +643,7 @@ the instructions in ``./tools/xen/README.md``. Cells ~~~~~ -`Cells `__ is +`Cells `__ is an alternative scaling option. To setup a cells environment add the following to your ``localrc`` section: diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst index 3592844efb..7dee520a23 100644 --- a/doc/source/guides/devstack-with-lbaas-v2.rst +++ b/doc/source/guides/devstack-with-lbaas-v2.rst @@ -2,7 +2,7 @@ Configure Load-Balancer Version 2 ================================= Starting in the OpenStack Liberty release, the -`neutron LBaaS v2 API `_ +`neutron LBaaS v2 API `_ is now stable while the LBaaS v1 API has been deprecated. The LBaaS v2 reference driver is based on Octavia. diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst index 092809a1cf..1b8dccd7f3 100644 --- a/doc/source/guides/neutron.rst +++ b/doc/source/guides/neutron.rst @@ -396,7 +396,7 @@ controller node. In this configuration we are defining IPV4_ADDRS_SAFE_TO_USE to be a publicly routed IPv4 subnet. In this specific instance we are using -the special TEST-NET-3 subnet defined in `RFC 5737 `_, +the special TEST-NET-3 subnet defined in `RFC 5737 `_, which is used for documentation. In your DevStack setup, IPV4_ADDRS_SAFE_TO_USE would be a public IP address range that you or your organization has allocated to you, so that you could access your instances from the From ffd0047afa78bfd4d6576afe1d4b8c79435b158c Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Thu, 18 Jan 2018 15:12:29 +0000 Subject: [PATCH 0786/1936] functions-common: Don't reguess Python versions We have already established the correct version of Python to use during installation, either automatically or through user-provided information (USE_PYTHON3, PYTHON3_VERSION). Don't do it again. Change-Id: I7bdf2be9a885994bf2c437dd104048a1ff2f6666 Closes-Bug: #1744096 --- functions-common | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/functions-common b/functions-common index df295a3395..78710deeb0 100644 --- a/functions-common +++ b/functions-common @@ -2301,12 +2301,7 @@ function install_oscwrap { function cleanup_oscwrap { local total=0 - if python3_enabled ; then - local python=python3 - else - local python=python - fi - total=$(cat $OSCWRAP_TIMER_FILE | $python -c "import sys; print(sum(int(l) for l in sys.stdin))") + total=$(cat $OSCWRAP_TIMER_FILE | $PYTHON -c "import sys; print(sum(int(l) for l in sys.stdin))") _TIME_TOTAL["osc"]=$total rm $OSCWRAP_TIMER_FILE } From bb7d2f233b92a3f9ee17ad0702fecb68bbf1e712 Mon Sep 17 00:00:00 2001 From: Tim Swanson Date: Sat, 16 Dec 2017 17:14:10 -0500 Subject: [PATCH 0787/1936] Allow public router external net to use a non-flat provider network. Allow users to auto-create a neutron non-flat providernet public network and use it for external router interfaces. By default, keep the existing flat network type behavior. Change-Id: I64f71b0c9fcac97b9b84b7d30ee61659b2a690f1 --- lib/neutron_plugins/services/l3 | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 41a467df4c..9be32b79aa 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -39,9 +39,9 @@ Q_PUBLIC_VETH_INT=${Q_PUBLIC_VETH_INT:-veth-pub-int} Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-True} -# Use flat providernet for public network +# Use providernet for public network # -# If Q_USE_PROVIDERNET_FOR_PUBLIC=True, use a flat provider network +# If Q_USE_PROVIDERNET_FOR_PUBLIC=True, use a provider network # for external interface of neutron l3-agent. In that case, # PUBLIC_PHYSICAL_NETWORK specifies provider:physical_network value # used for the network. In case of ofagent, you should add the @@ -59,6 +59,10 @@ Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-True} # Q_USE_PROVIDERNET_FOR_PUBLIC=True # PUBLIC_PHYSICAL_NETWORK=public # OVS_BRIDGE_MAPPINGS=public:br-ex +# +# The provider-network-type defaults to flat, however, the values +# PUBLIC_PROVIDERNET_TYPE and PUBLIC_PROVIDERNET_SEGMENTATION_ID could +# be set to specify the parameters for an alternate network type. Q_USE_PROVIDERNET_FOR_PUBLIC=${Q_USE_PROVIDERNET_FOR_PUBLIC:-True} PUBLIC_PHYSICAL_NETWORK=${PUBLIC_PHYSICAL_NETWORK:-public} @@ -240,7 +244,7 @@ function create_neutron_initial_network { fi # Create an external network, and a subnet. Configure the external network as router gw if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then - EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type flat --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} | grep ' id ' | get_field 2) + EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type ${PUBLIC_PROVIDERNET_TYPE:-flat} ${PUBLIC_PROVIDERNET_SEGMENTATION_ID:+--provider-segment $PUBLIC_PROVIDERNET_SEGMENTATION_ID} --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} | grep ' id ' | get_field 2) else EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS | grep ' id ' | get_field 2) fi From ffe691ecd5f77795666baa9317910de8d6b858e1 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Tue, 12 Jan 2016 17:58:44 -0800 Subject: [PATCH 0788/1936] tempest: set compute-feature-enabled.volume_multiattach This adds the ENABLE_VOLUME_MULTIATTACH flag and if True configures Tempest to run volume multiattach tests. Note that due to https://bugzilla.redhat.com/show_bug.cgi?id=1378242 we can't run multiattach tests with the Pike UCA packages since those include qemu 2.10 and libvirt 3.6, and the valid versions for multiattach support with libvirt is qemu<2.10 or libvirt>=3.10. Depends-On: I80c20914c03d7371e798ca3567c37307a0d54aaa Depends-On: I158c6f20e3e6a24bd2e5299abbeb3fc5208e5885 Part of nova blueprint multi-attach-volume Change-Id: I46b7eabf6a28f230666f6933a087f73cb4408348 --- lib/tempest | 4 ++++ stackrc | 12 +++++++++++- tools/fixup_stuff.sh | 7 ++++++- 3 files changed, 21 insertions(+), 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index 35dbb7b6a1..3b39dae422 100644 --- a/lib/tempest +++ b/lib/tempest @@ -382,6 +382,10 @@ function configure_tempest { fi fi + if [[ $ENABLE_VOLUME_MULTIATTACH == "True" ]]; then + iniset $TEMPEST_CONFIG compute-feature-enabled volume_multiattach True + fi + if is_service_enabled n-novnc; then iniset $TEMPEST_CONFIG compute-feature-enabled vnc_console True fi diff --git a/stackrc b/stackrc index 286a04d3fe..ba218b9a38 100644 --- a/stackrc +++ b/stackrc @@ -601,6 +601,11 @@ NOVNC_BRANCH=${NOVNC_BRANCH:-stable/v0.6} SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git} SPICE_BRANCH=${SPICE_BRANCH:-master} +# Global flag used to configure Tempest and potentially other services if +# volume multiattach is supported. In Queens, only the libvirt compute driver +# and lvm volume driver support multiattach, and qemu must be less than 2.10 +# or libvirt must be greater than or equal to 3.10. +ENABLE_VOLUME_MULTIATTACH=$(trueorfalse False ENABLE_VOLUME_MULTIATTACH) # Nova hypervisor configuration. We default to libvirt with **kvm** but will # drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can @@ -612,7 +617,12 @@ VIRT_DRIVER=${VIRT_DRIVER:-$DEFAULT_VIRT_DRIVER} case "$VIRT_DRIVER" in ironic|libvirt) LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm} - if [[ "$os_VENDOR" =~ (Debian|Ubuntu) ]]; then + # If ENABLE_VOLUME_MULTIATTACH is True, the Ubuntu Cloud Archive can't + # be used until it provides libvirt>=3.10, and with older versions of + # Ubuntu the group is "libvirtd". + # TODO(mriedem): Remove the ENABLE_VOLUME_MULTIATTACH check when + # UCA has libvirt>=3.10. + if [[ "$os_VENDOR" =~ (Debian|Ubuntu) && "${ENABLE_VOLUME_MULTIATTACH}" == "False" ]]; then # The groups change with newer libvirt. Older Ubuntu used # 'libvirtd', but now uses libvirt like Debian. Do a quick check # to see if libvirtd group already exists to handle grenade's case. diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index efe0125741..f78f05f2f8 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -75,7 +75,12 @@ fi # Make it possible to switch this based on an environment variable as # libvirt 2.5.0 doesn't handle nested virtualization quite well and this # is required for the trove development environment. -if [[ "${ENABLE_UBUNTU_CLOUD_ARCHIVE}" == "True" && "$DISTRO" = "xenial" ]]; then +# The Pike UCA has qemu 2.10 but libvirt 3.6, therefore if +# ENABLE_VOLUME_MULTIATTACH is True, we can't use the Pike UCA +# because multiattach won't work with those package versions. +# We can remove this check when the UCA has libvirt>=3.10. +if [[ "${ENABLE_UBUNTU_CLOUD_ARCHIVE}" == "True" && "$DISTRO" = "xenial" && \ + "${ENABLE_VOLUME_MULTIATTACH}" == "False" ]]; then # This pulls in apt-add-repository install_package "software-properties-common" # Use UCA for newer libvirt. Should give us libvirt 2.5.0. From 0b45e2f8e8d120d9103e735c580e6f7396df3e29 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 18 Jan 2018 17:41:04 -0600 Subject: [PATCH 0789/1936] Remove hack for ubuntu cloud archive There is a hack here to set up ubuntu cloud archive, pinning it to mirror.dfw.rax.openstack.org. The mirror-info role seems to be doing this correctly now though, so let's remove the hack and let things work normally. Change-Id: I283cb3452245b64e9492806f06404b484f21c358 --- playbooks/pre.yaml | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/playbooks/pre.yaml b/playbooks/pre.yaml index d61fd45de0..6681fb20a5 100644 --- a/playbooks/pre.yaml +++ b/playbooks/pre.yaml @@ -18,15 +18,3 @@ - setup-devstack-cache - start-fresh-logging - write-devstack-local-conf - # TODO(jeblair): remove when configure-mirrors is fixed - tasks: - - name: Hack mirror_info - shell: - _raw_params: | - mkdir /etc/ci - cat << "EOF" > /etc/ci/mirror_info.sh - export NODEPOOL_UCA_MIRROR=http://mirror.dfw.rax.openstack.org/ubuntu-cloud-archive - EOF - args: - executable: /bin/bash - become: true From 64039ef3008ccacbacad5c45dbc1881af4e868fa Mon Sep 17 00:00:00 2001 From: Vasyl Saienko Date: Tue, 23 Jan 2018 12:06:57 +0200 Subject: [PATCH 0790/1936] Increse api_max_retries and api_retry_interval for ironic There is no way to upgrade ironic before nova because of grenade design. In multinode job we do not restart nova as we test partial upgrade of ironic there. On slow nodes upgrading ironic takes time and nova looses ironic connectivity This patch increases api_retry_interval and api_max_retries to make sure we have a time to upgrade ironic before nova compute stuck. Change-Id: I3b1429d6561431a82edda04a0e574cac38771837 --- lib/nova_plugins/hypervisor-ironic | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic index ee1a0e0a5e..c91f70b9bb 100644 --- a/lib/nova_plugins/hypervisor-ironic +++ b/lib/nova_plugins/hypervisor-ironic @@ -58,6 +58,9 @@ function configure_nova_hypervisor { iniset $NOVA_CONF ironic project_domain_id default iniset $NOVA_CONF ironic user_domain_id default iniset $NOVA_CONF ironic project_name demo + + iniset $NOVA_CONF ironic api_max_retries 300 + iniset $NOVA_CONF ironic api_retry_interval 5 } # install_nova_hypervisor() - Install external components From 8dd918c59e630cc9e7be87b8eb35aa12079e3693 Mon Sep 17 00:00:00 2001 From: Kaitlin Farr Date: Tue, 24 Oct 2017 09:34:03 -0400 Subject: [PATCH 0791/1936] Update nova api_class setting to backend Castellan switches the `api_class` config option to `backend`. The change is still backwards compatible with the old `api_class` setting, but cinder already updated to use the new option (see I5e46c738531d5d56777e91a00f4cee9531356f2e) and it is better to use the new setting. Change-Id: Ib609c82e7076d19676baaf4f08abd79ea11db0e3 --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index c40c43af6c..db88474f55 100644 --- a/lib/nova +++ b/lib/nova @@ -424,7 +424,7 @@ function create_nova_conf { iniset $NOVA_CONF DEFAULT osapi_compute_listen "$NOVA_SERVICE_LISTEN_ADDRESS" iniset $NOVA_CONF DEFAULT metadata_listen "$NOVA_SERVICE_LISTEN_ADDRESS" - iniset $NOVA_CONF key_manager api_class nova.keymgr.conf_key_mgr.ConfKeyManager + iniset $NOVA_CONF key_manager backend nova.keymgr.conf_key_mgr.ConfKeyManager if is_fedora || is_suse; then # nova defaults to /usr/local/bin, but fedora and suse pip like to From 842d54a2997adaf65369e56942e1d4f2b27aeb77 Mon Sep 17 00:00:00 2001 From: Ryota MIBU Date: Mon, 25 Dec 2017 16:28:50 +0900 Subject: [PATCH 0792/1936] use openstack command instead of nova command In function 'get_instance_ip', 'nova' client command is used to get instance information in order to retrive IP address of the instance. There is no need to use the nova command, since 'openstack' client already supports such basic operation. Moreover, 'openstack' client has an option to get value of specified column. That brings more accurate way of retriving IP address. This patch replaces nova command in 'get_instance_ip' by 'openstack' command. Note, this nova command is the only one in devstack tree. Change-Id: Iee0b81a994a4da5b3f4572c2e8eb30514cd43f89 Signed-off-by: Ryota MIBU --- functions | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/functions b/functions index 20b83b3cd0..959133ce5c 100644 --- a/functions +++ b/functions @@ -503,13 +503,13 @@ function ping_check { function get_instance_ip { local vm_id=$1 local network_name=$2 - local nova_result + local addresses local ip - nova_result="$(nova show $vm_id)" - ip=$(echo "$nova_result" | grep "$network_name" | get_field 2) + addresses=$(openstack server show -c addresses -f value "$vm_id") + ip=$(echo $addresses | sed -n "s/^.*$network_name=\([0-9\.]*\).*$/\1/p") if [[ $ip = "" ]];then - echo "$nova_result" + echo "addresses of server $vm_id : $addresses" die $LINENO "[Fail] Couldn't get ipaddress of VM" fi echo $ip From 7f3248229b170fe954af9a67a578bf6cd34d2b23 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 26 Jan 2018 06:19:49 +0000 Subject: [PATCH 0793/1936] Updated from generate-devstack-plugins-list Change-Id: I0648e5cd08bb88d5972726241da3d396ad1d0129 --- doc/source/plugin-registry.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 31812aeefb..1dab9ce9f5 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -33,7 +33,6 @@ blazar `git://git.openstack.org/openstack/blazar broadview-collector `git://git.openstack.org/openstack/broadview-collector `__ ceilometer `git://git.openstack.org/openstack/ceilometer `__ ceilometer-powervm `git://git.openstack.org/openstack/ceilometer-powervm `__ -cerberus `git://git.openstack.org/openstack/cerberus `__ cloudkitty `git://git.openstack.org/openstack/cloudkitty `__ collectd-ceilometer-plugin `git://git.openstack.org/openstack/collectd-ceilometer-plugin `__ congress `git://git.openstack.org/openstack/congress `__ @@ -95,6 +94,7 @@ monasca-api `git://git.openstack.org/openstack/monasc monasca-ceilometer `git://git.openstack.org/openstack/monasca-ceilometer `__ monasca-events-api `git://git.openstack.org/openstack/monasca-events-api `__ monasca-log-api `git://git.openstack.org/openstack/monasca-log-api `__ +monasca-tempest-plugin `git://git.openstack.org/openstack/monasca-tempest-plugin `__ monasca-transform `git://git.openstack.org/openstack/monasca-transform `__ murano `git://git.openstack.org/openstack/murano `__ networking-6wind `git://git.openstack.org/openstack/networking-6wind `__ From 744a829ce0b0a61a9b3be5afa8dfb4a258c69f11 Mon Sep 17 00:00:00 2001 From: shutingm Date: Sun, 28 Jan 2018 23:06:40 -0800 Subject: [PATCH 0794/1936] Add the dependent package for python-pcre installation This patch add libpcre3-dev and pcre-devel package for python-pcre installation. Closes-Bug: #1745606 Change-Id: I59fc688519341c90dc33b79d536f0625a6c4dd17 --- files/debs/general | 1 + files/rpms/general | 1 + 2 files changed, 2 insertions(+) diff --git a/files/debs/general b/files/debs/general index 8e0018d284..df872a0a6c 100644 --- a/files/debs/general +++ b/files/debs/general @@ -15,6 +15,7 @@ libapache2-mod-proxy-uwsgi libffi-dev # for pyOpenSSL libjpeg-dev # Pillow 3.0.0 libmysqlclient-dev # MySQL-python +libpcre3-dev # for python-pcre libpq-dev # psycopg2 libssl-dev # for pyOpenSSL libsystemd-dev # for systemd-python diff --git a/files/rpms/general b/files/rpms/general index 878b935a1b..5d9a4ad5a5 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -22,6 +22,7 @@ net-tools openssh-server openssl openssl-devel # to rebuild pyOpenSSL if needed +pcre-devel # for python-pcre pkgconfig postgresql-devel # psycopg2 psmisc From 540141cc1c32dbd451c0cc7805eda84c2e248a8f Mon Sep 17 00:00:00 2001 From: Jianghua Wang Date: Tue, 30 Jan 2018 03:18:17 +0000 Subject: [PATCH 0795/1936] Remove libpcre3-dev&pcre-devel from horizon prerequisite As libpcre3-dev&pcre-devel are now added as general prerequisite, remove it from horizon prerequisite. Change-Id: I872aec210028373c39baee0ab846469fd9920de9 --- files/debs/horizon | 1 - files/rpms/horizon | 1 - 2 files changed, 2 deletions(-) diff --git a/files/debs/horizon b/files/debs/horizon index 1f45b54f7c..48332893b1 100644 --- a/files/debs/horizon +++ b/files/debs/horizon @@ -1,3 +1,2 @@ apache2 # NOPRIME libapache2-mod-wsgi # NOPRIME -libpcre3-dev # pyScss diff --git a/files/rpms/horizon b/files/rpms/horizon index aeb2cb5c96..fa5601a95f 100644 --- a/files/rpms/horizon +++ b/files/rpms/horizon @@ -1,5 +1,4 @@ Django httpd # NOPRIME mod_wsgi # NOPRIME -pcre-devel # pyScss pyxattr From a904caded4269039a087292a8b7b95eb2de21142 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Mon, 22 Jan 2018 16:18:55 -0600 Subject: [PATCH 0796/1936] Move zuul_copy_output to be a job variable The zuul_copy_output variable is designed to be able to be used as a zuul job variable so that zuul dictionary merging will work. However, it's currently being set in the playbook rather than as a job variable, so it's not possible to supplement it in a child job. Move it to be a job variable. Also remove the wrapping {} as they should not be needed to make zuul_copy_output a dictionary. Change-Id: I78c7fed47c2ab868384c74dbff7904d33d510dd9 --- .zuul.yaml | 34 ++++++++++++++++++++++++++++++++++ playbooks/post.yaml | 34 ---------------------------------- 2 files changed, 34 insertions(+), 34 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 037e9a9ea8..0de71f7c5f 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -77,6 +77,40 @@ devstack_services: horizon: false tempest: false + zuul_copy_output: + '{{ devstack_conf_dir }}/local.conf': 'logs' + '{{ devstack_conf_dir }}/localrc': 'logs' + '{{ devstack_conf_dir }}/.stackenv': 'logs' + '{{ devstack_log_dir }}/dstat-csv.log': 'logs' + '{{ devstack_log_dir }}/devstacklog.txt': 'logs' + '{{ devstack_log_dir }}/devstacklog.txt.summary': 'logs' + '{{ devstack_full_log}}': 'logs' + '{{ stage_dir }}/verify_tempest_conf.log': 'logs' + '{{ stage_dir }}/apache': 'logs' + '{{ stage_dir }}/apache_config': 'logs' + '{{ stage_dir }}/etc': 'logs' + '/var/log/rabbitmq': 'logs' + '/var/log/postgresql': 'logs' + '/var/log/mysql.err': 'logs' + '/var/log/mysql.log': 'logs' + '/var/log/libvirt': 'logs' + '/etc/sudoers': 'logs' + '/etc/sudoers.d': 'logs' + '{{ stage_dir }}/iptables.txt': 'logs' + '{{ stage_dir }}/df.txt': 'logs' + '{{ stage_dir }}/pip2-freeze.txt': 'logs' + '{{ stage_dir }}/pip3-freeze.txt': 'logs' + '{{ stage_dir }}/dpkg-l.txt': 'logs' + '{{ stage_dir }}/rpm-qa.txt': 'logs' + '{{ stage_dir }}/core': 'logs' + '{{ stage_dir }}/listen53.txt': 'logs' + '{{ stage_dir }}/deprecations.log': 'logs' + '/var/log/ceph': 'logs' + '/var/log/openvswitch': 'logs' + '/var/log/glusterfs': 'logs' + '/etc/glusterfs/glusterd.vol': 'logs' + '/etc/resolv.conf': 'logs' + '/var/log/unbound.log': 'logs' pre-run: playbooks/pre.yaml run: playbooks/devstack.yaml post-run: playbooks/post.yaml diff --git a/playbooks/post.yaml b/playbooks/post.yaml index 0c5e83b6a0..d8ee99e783 100644 --- a/playbooks/post.yaml +++ b/playbooks/post.yaml @@ -24,40 +24,6 @@ # capture-system-logs should be the last role before stage-output - capture-system-logs - role: stage-output - zuul_copy_output: - { '{{ devstack_conf_dir }}/local.conf': 'logs', - '{{ devstack_conf_dir }}/localrc': 'logs', - '{{ devstack_conf_dir }}/.stackenv': 'logs' , - '{{ devstack_log_dir }}/dstat-csv.log': 'logs', - '{{ devstack_log_dir }}/devstacklog.txt': 'logs', - '{{ devstack_log_dir }}/devstacklog.txt.summary': 'logs', - '{{ devstack_full_log}}': 'logs', - '{{ stage_dir }}/verify_tempest_conf.log': 'logs', - '{{ stage_dir }}/apache': 'logs', - '{{ stage_dir }}/apache_config': 'logs', - '{{ stage_dir }}/etc': 'logs', - '/var/log/rabbitmq': 'logs', - '/var/log/postgresql': 'logs', - '/var/log/mysql.err': 'logs', - '/var/log/mysql.log': 'logs', - '/var/log/libvirt': 'logs', - '/etc/sudoers': 'logs', - '/etc/sudoers.d': 'logs', - '{{ stage_dir }}/iptables.txt': 'logs', - '{{ stage_dir }}/df.txt': 'logs', - '{{ stage_dir }}/pip2-freeze.txt': 'logs', - '{{ stage_dir }}/pip3-freeze.txt': 'logs', - '{{ stage_dir }}/dpkg-l.txt': 'logs', - '{{ stage_dir }}/rpm-qa.txt': 'logs', - '{{ stage_dir }}/core': 'logs', - '{{ stage_dir }}/listen53.txt': 'logs', - '{{ stage_dir }}/deprecations.log': 'logs', - '/var/log/ceph': 'logs', - '/var/log/openvswitch': 'logs', - '/var/log/glusterfs': 'logs', - '/etc/glusterfs/glusterd.vol': 'logs', - '/etc/resolv.conf': 'logs', - '/var/log/unbound.log': 'logs' } extensions_to_txt: - conf - log From bf1e6e7a34cfdbb859aa072666b7018ef7387e9e Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 1 Feb 2018 06:18:22 +0000 Subject: [PATCH 0797/1936] Updated from generate-devstack-plugins-list Change-Id: Idda642196d42d6ae930f4978c88c1b53124b7ead --- doc/source/plugin-registry.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 1dab9ce9f5..d830507f07 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -78,6 +78,7 @@ keystone `git://git.openstack.org/openstack/keysto kingbird `git://git.openstack.org/openstack/kingbird `__ kuryr-kubernetes `git://git.openstack.org/openstack/kuryr-kubernetes `__ kuryr-libnetwork `git://git.openstack.org/openstack/kuryr-libnetwork `__ +kuryr-tempest-plugin `git://git.openstack.org/openstack/kuryr-tempest-plugin `__ magnum `git://git.openstack.org/openstack/magnum `__ magnum-ui `git://git.openstack.org/openstack/magnum-ui `__ manila `git://git.openstack.org/openstack/manila `__ @@ -130,6 +131,7 @@ networking-spp `git://git.openstack.org/openstack/networ networking-vpp `git://git.openstack.org/openstack/networking-vpp `__ networking-vsphere `git://git.openstack.org/openstack/networking-vsphere `__ neutron `git://git.openstack.org/openstack/neutron `__ +neutron-classifier `git://git.openstack.org/openstack/neutron-classifier `__ neutron-dynamic-routing `git://git.openstack.org/openstack/neutron-dynamic-routing `__ neutron-fwaas `git://git.openstack.org/openstack/neutron-fwaas `__ neutron-fwaas-dashboard `git://git.openstack.org/openstack/neutron-fwaas-dashboard `__ From 854cb67636c478143ed0071d44a84447a72e87a7 Mon Sep 17 00:00:00 2001 From: Peter Penchev Date: Thu, 14 Dec 2017 21:45:52 +0000 Subject: [PATCH 0798/1936] Allow a non-127.0.0.1 definition for the hostname. In some environments it might be useful to resolve the hostname to an IP address that is reachable by other hosts in the local network. If some of the configuration scripts have taken care of putting a proper line in /etc/hosts, do not try to override it by appending a 127.0.0.1 one. Change-Id: I7ae20a66c473b0c683803cc44654cd95fcce3639 Closes-Bug: 1746751 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 32eb43f292..266ae7c11a 100755 --- a/stack.sh +++ b/stack.sh @@ -353,7 +353,7 @@ safe_chmod 0755 $DATA_DIR # Certain services such as rabbitmq require that the local hostname resolves # correctly. Make sure it exists in /etc/hosts so that is always true. LOCAL_HOSTNAME=`hostname -s` -if [ -z "`grep ^127.0.0.1 /etc/hosts | grep $LOCAL_HOSTNAME`" ]; then +if ! fgrep -qwe "$LOCAL_HOSTNAME" /etc/hosts; then sudo sed -i "s/\(^127.0.0.1.*\)/\1 $LOCAL_HOSTNAME/" /etc/hosts fi From 6e316daf45032ce9a43e0c995ced2682c950cc3b Mon Sep 17 00:00:00 2001 From: "James E. Blair" Date: Wed, 24 Jan 2018 16:52:00 -0800 Subject: [PATCH 0799/1936] Zuul: Remove project name Zuul no longer requires the project-name for in-repo configuration. Omitting it makes forking or renaming projects easier. Change-Id: I32868cec22149ec1c18fe2737a65e88d32bff531 --- .zuul.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 037e9a9ea8..0595b98400 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -46,7 +46,7 @@ - zuul: openstack-infra/openstack-zuul-jobs timeout: 7200 vars: - test_matrix_configs: ['neutron', 'tlsproxy'] + test_matrix_configs: [neutron, tlsproxy] devstack_localrc: DATABASE_PASSWORD: secretdatabase RABBIT_PASSWORD: secretrabbit @@ -154,7 +154,6 @@ run: playbooks/tox/run.yaml - project: - name: openstack-dev/devstack check: jobs: - devstack From 0f39756f6158bf5cf9a7bfc7eda655b883002722 Mon Sep 17 00:00:00 2001 From: Andrea Frittoli Date: Fri, 2 Feb 2018 17:03:24 +0000 Subject: [PATCH 0800/1936] Make the apache-logs-conf role less verbose The role is logging a lot of useless data with all the stat info from various config and log files. Remove verbosity using no_log. Change-Id: I72c721573ffc4a14adc3e2b29285c1071b7ec4f7 --- roles/apache-logs-conf/tasks/main.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/roles/apache-logs-conf/tasks/main.yaml b/roles/apache-logs-conf/tasks/main.yaml index 7fd490ea4c..60b4fbfead 100644 --- a/roles/apache-logs-conf/tasks/main.yaml +++ b/roles/apache-logs-conf/tasks/main.yaml @@ -64,12 +64,14 @@ path: "{{ apache_config_paths[ansible_os_family] }}" file_type: any register: apache_configs + no_log: true - name: Dereference configurations stat: path: "{{ item.path }}" with_items: "{{ apache_configs.files }}" register: apache_configs_deref + no_log: true - name: Link configurations file: @@ -78,3 +80,4 @@ state: hard with_items: "{{ apache_configs_deref.results }}" when: item.stat.isreg or item.stat.islnk + no_log: true From 63171eeae238c09cbda8c37fc55af0cd43edd82c Mon Sep 17 00:00:00 2001 From: "Andrea Frittoli (andreaf)" Date: Wed, 31 Jan 2018 21:01:36 +0000 Subject: [PATCH 0801/1936] Move extensions_to_txt to the job defintion Move extensions_to_txt to the job defintion so that it may be extended by descendant jobs. Depends-on: https://review.openstack.org/540485/ Change-Id: I6e9009faa1451698ed781ce1ffdd9f22c97daa93 --- .zuul.yaml | 6 ++++++ playbooks/post.yaml | 6 ------ 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 0de71f7c5f..8fe4535814 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -111,6 +111,12 @@ '/etc/glusterfs/glusterd.vol': 'logs' '/etc/resolv.conf': 'logs' '/var/log/unbound.log': 'logs' + extensions_to_txt: + conf: True + log: True + localrc: True + stackenv: True + summary: True pre-run: playbooks/pre.yaml run: playbooks/devstack.yaml post-run: playbooks/post.yaml diff --git a/playbooks/post.yaml b/playbooks/post.yaml index d8ee99e783..9e66f20e9e 100644 --- a/playbooks/post.yaml +++ b/playbooks/post.yaml @@ -24,12 +24,6 @@ # capture-system-logs should be the last role before stage-output - capture-system-logs - role: stage-output - extensions_to_txt: - - conf - - log - - localrc - - stackenv - - summary # NOTE(andreaf) We need fetch-devstack-log-dir only as long as the base job # starts pulling logs for us from {{ ansible_user_dir }}/logs. # Meanwhile we already store things in ansible_user_dir and use From da6de10f5dda20fc023a8215b13b873a068b6a37 Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Mon, 22 Jan 2018 11:42:01 +0000 Subject: [PATCH 0802/1936] fixed_key: By default use a hardcoded fixed_key across devstack envs This change mimics how fixed_key would actually be deployed in a real world environment, with a single key shared across Nova and Cinder across all hosts. Change-Id: I50a48e2da57a1cc1ecd250150ea6e9c3745baaca --- stack.sh | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/stack.sh b/stack.sh index 32eb43f292..f72347557a 100755 --- a/stack.sh +++ b/stack.sh @@ -1241,16 +1241,17 @@ if is_service_enabled g-reg; then done fi -# Create a randomized default value for the key manager's fixed_key -# NOTE(lyarwood): This is currently set to 36 as a workaround to the following -# libvirt bug that incorrectly pads passphrases that are a multiple of 16 bytes -# in length. -# Unable to use LUKS passphrase that is exactly 16 bytes long -# https://bugzilla.redhat.com/show_bug.cgi?id=1447297 +# NOTE(lyarwood): By default use a single hardcoded fixed_key across devstack +# deployments. This ensures the keys match across nova and cinder across all +# hosts. +FIXED_KEY=${FIXED_KEY:-bae3516cc1c0eb18b05440eba8012a4a880a2ee04d584a9c1579445e675b12defdc716ec} if is_service_enabled nova; then - key=$(generate_hex_string 36) - iniset $NOVA_CONF key_manager fixed_key "$key" - iniset $NOVA_CPU_CONF key_manager fixed_key "$key" + iniset $NOVA_CONF key_manager fixed_key "$FIXED_KEY" + iniset $NOVA_CPU_CONF key_manager fixed_key "$FIXED_KEY" +fi + +if is_service_enabled cinder; then + iniset $CINDER_CONF key_manager fixed_key "$FIXED_KEY" fi # Launch the nova-api and wait for it to answer before continuing From 838b833b638ee13599707d8a0be899815f201927 Mon Sep 17 00:00:00 2001 From: Andreas Jaeger Date: Sun, 4 Feb 2018 17:55:54 +0100 Subject: [PATCH 0803/1936] Add Zuul v3 native unit-tests Add new job to replace legacy-devstack-unit-tests. Change-Id: I4fe59b1954514a7146a4412e3103a0a05a9250f2 --- .zuul.yaml | 11 +++++++++++ playbooks/unit-tests/pre.yaml | 13 +++++++++++++ playbooks/unit-tests/run.yaml | 12 ++++++++++++ 3 files changed, 36 insertions(+) create mode 100644 playbooks/unit-tests/pre.yaml create mode 100644 playbooks/unit-tests/run.yaml diff --git a/.zuul.yaml b/.zuul.yaml index f03d30a497..802bb79802 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -187,11 +187,22 @@ - playbooks/tox/pre.yaml run: playbooks/tox/run.yaml +- job: + name: devstack-unit-tests + description: | + Runs unit tests on devstack project. + + It runs ``run_tests.sh``. + pre-run: playbooks/unit-tests/pre.yaml + run: playbooks/unit-tests/run.yaml + - project: check: jobs: - devstack - devstack-multinode + - devstack-unit-tests gate: jobs: - devstack + - devstack-unit-tests diff --git a/playbooks/unit-tests/pre.yaml b/playbooks/unit-tests/pre.yaml new file mode 100644 index 0000000000..cfa1676378 --- /dev/null +++ b/playbooks/unit-tests/pre.yaml @@ -0,0 +1,13 @@ +- hosts: all + + tasks: + + - name: Install prerequisites + shell: + chdir: '{{ zuul.project.src_dir }}' + executable: /bin/bash + cmd: | + set -e + set -x + echo "IPV4_ADDRS_SAFE_TO_USE=10.1.0.0/20" >> localrc + ./tools/install_prereqs.sh diff --git a/playbooks/unit-tests/run.yaml b/playbooks/unit-tests/run.yaml new file mode 100644 index 0000000000..181521f072 --- /dev/null +++ b/playbooks/unit-tests/run.yaml @@ -0,0 +1,12 @@ +- hosts: all + + tasks: + + - name: Run run_tests.sh + shell: + chdir: '{{ zuul.project.src_dir }}' + executable: /bin/bash + cmd: | + set -e + set -x + ./run_tests.sh From b355a9489f5da99daa5b4eb827353630a37a8138 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 5 Feb 2018 10:54:48 +1100 Subject: [PATCH 0804/1936] Add pcre-devel for opensuse As a follow-on for I59fc688519341c90dc33b79d536f0625a6c4dd17 add to suse as well. Change-Id: I72a3cf33cb98ee2eca7f027c9e28f6fbf1404002 --- files/rpms-suse/general | 1 + 1 file changed, 1 insertion(+) diff --git a/files/rpms-suse/general b/files/rpms-suse/general index 0c1a2819b1..0b69cb1c01 100644 --- a/files/rpms-suse/general +++ b/files/rpms-suse/general @@ -19,6 +19,7 @@ make net-tools openssh openssl +pcre-devel # python-pcre postgresql-devel # psycopg2 psmisc python-cmd2 # dist:opensuse-12.3 From 81fac768b5a414c7ddd347e799fb49c0ccbc1d0a Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Fri, 19 Jan 2018 10:14:45 -0800 Subject: [PATCH 0805/1936] Remove unneed devstack early log file In the run devstack role we specify a path to devstack_early_log then hand it to stack.sh as a parameter which stack.sh does nothing with. While looking at a fix for this it was pointed out that these early logs make it into the job's output log now so we don't need a special file for them. Rather than handle this as a special case just let the job-output.txt log file pick up the logs for us which allows us to remove this unneeded feature. Change-Id: I9bedbe91c60257d94173b1c70676dd6c2b49dc91 --- roles/run-devstack/README.rst | 6 ------ roles/run-devstack/defaults/main.yaml | 1 - roles/run-devstack/tasks/main.yaml | 2 +- 3 files changed, 1 insertion(+), 8 deletions(-) diff --git a/roles/run-devstack/README.rst b/roles/run-devstack/README.rst index e53f060602..d77eb15e99 100644 --- a/roles/run-devstack/README.rst +++ b/roles/run-devstack/README.rst @@ -6,9 +6,3 @@ Run devstack :default: /opt/stack The devstack base directory. - -.. zuul:rolevar:: devstack_early_log - :default: /opt/stack/log/devstack-early.txt - - The full devstack log that includes the whatever stack.sh logs before - the LOGFILE variable in local.conf is honoured. diff --git a/roles/run-devstack/defaults/main.yaml b/roles/run-devstack/defaults/main.yaml index dc4528f692..fea05c8146 100644 --- a/roles/run-devstack/defaults/main.yaml +++ b/roles/run-devstack/defaults/main.yaml @@ -1,2 +1 @@ devstack_base_dir: /opt/stack -devstack_early_log: /opt/stack/logs/devstack-early.txt diff --git a/roles/run-devstack/tasks/main.yaml b/roles/run-devstack/tasks/main.yaml index f53212904a..64f769c0d0 100644 --- a/roles/run-devstack/tasks/main.yaml +++ b/roles/run-devstack/tasks/main.yaml @@ -1,5 +1,5 @@ - name: Run devstack - shell: ./stack.sh 2>&1 {{ devstack_early_log }} + shell: ./stack.sh 2>&1 args: chdir: "{{devstack_base_dir}}/devstack" become: true From ac475bbb2a906f481b953494ba28690a716a3554 Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Wed, 7 Feb 2018 18:35:40 +0000 Subject: [PATCH 0806/1936] Correct configuration setting using NUMBER_FAKE_NOVA_COMPUTE In Change-Id Ia3843818014f7c6c7526ef3aa9676bbddb8a85ca the 'host' setting used for each of the fake compute hosts was accidentally named 'nhost' ('\nhost' was edited poorly), so the setting doesn't actually do anything: you create multiple nova-compute processes they think they are all on the same host and only one hypervisor and resource provider is created. With the correction in place, the wait_for_compute function needs to be updated to be aware of the fact that the hostnames on the compute services will have a numeric prefix when the fake virt driver is used. Change-Id: I5e8430d170c0b1c4f195ebe510aff8be59e4a3bc --- functions | 7 ++++++- lib/nova | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/functions b/functions index 959133ce5c..eabe249894 100644 --- a/functions +++ b/functions @@ -446,7 +446,12 @@ function wait_for_compute { ID="" while [[ "\$ID" == "" ]]; do sleep 1 - ID=\$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" compute service list --host `hostname` --service nova-compute -c ID -f value) + if [[ "$VIRT_DRIVER" = 'fake' ]]; then + # When using the fake driver the compute hostnames have a suffix of 1 to NUMBER_FAKE_NOVA_COMPUTE + ID=\$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" compute service list --host `hostname`1 --service nova-compute -c ID -f value) + else + ID=\$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" compute service list --host `hostname` --service nova-compute -c ID -f value) + fi done EOF time_stop "wait_for_service" diff --git a/lib/nova b/lib/nova index fea2b8509a..a6ad907cf4 100644 --- a/lib/nova +++ b/lib/nova @@ -877,7 +877,7 @@ function start_nova_compute { # creating or modifying real configurations. Each fake # gets its own configuration and own log file. local fake_conf="${NOVA_FAKE_CONF}-${i}" - iniset $fake_conf DEFAULT nhost "${HOSTNAME}${i}" + iniset $fake_conf DEFAULT host "${HOSTNAME}${i}" run_process "n-cpu-${i}" "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF --config-file $fake_conf" done else From 34b4ff0bfd0d6fe5a769bb60cdaeba9886ee9317 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 8 Feb 2018 06:15:13 +0000 Subject: [PATCH 0807/1936] Updated from generate-devstack-plugins-list Change-Id: I681735a1c29e4b41eb9fa71c2838df1a820f5f75 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index d830507f07..ba747203c6 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -169,6 +169,7 @@ storlets `git://git.openstack.org/openstack/storle tacker `git://git.openstack.org/openstack/tacker `__ tap-as-a-service `git://git.openstack.org/openstack/tap-as-a-service `__ tap-as-a-service-dashboard `git://git.openstack.org/openstack/tap-as-a-service-dashboard `__ +tatu `git://git.openstack.org/openstack/tatu `__ telemetry-tempest-plugin `git://git.openstack.org/openstack/telemetry-tempest-plugin `__ tricircle `git://git.openstack.org/openstack/tricircle `__ trio2o `git://git.openstack.org/openstack/trio2o `__ From 21e3be8a5e2768f35246e2c8ac03e09a51070225 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Tue, 6 Feb 2018 17:19:38 -0500 Subject: [PATCH 0808/1936] Add the nova-next job to the experimental queue The nova-next job is being moved from openstack-zuul-jobs to the nova repo and so we'll remove it's usage from project-config, therefore we need to define it's usage for devstack here. The related project-config change is: I36d96f89b3e5323746fcbcef5cc7e4d0384a184d Depends-On: I24a5f73c29094a23e2fdef8ee8b43601300af593 Change-Id: I28971dc7e5cb0b5cf9698e5251a7bb099e63f3db --- .zuul.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index 8bc5103588..a90df2f06a 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -212,3 +212,6 @@ jobs: - devstack - devstack-unit-tests + experimental: + jobs: + - nova-next From a4922067009b4e293811ec6aa976ef7f72363069 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Thu, 15 Feb 2018 07:45:10 -0600 Subject: [PATCH 0809/1936] Centralize setting default branch for stable branching One of the steps when we create a new stable branch is to branch devstack, then update the default branch for most repos to use the new stable branch for each repo. This requires making multiple updates throughout stackrc, and to further complicate things, there are some repo branch variables for branchless repos that should not be updated along with the others. This can be error prone if not fully aware of these exceptions. To simplify this process a little, this patch adds two common variables - one that can be set to the new stable branch name for all of the repos that should be branched, and one that can be used for all of the branchless repos to make it explicit that those values should be left alone. The cycle-trailing repos have until two weeks after final release to branch, so also adding another variable for those to make it easy to update them at a later time, separately from the other repos. Change-Id: I82aa19e739eeda3721bac1cb5153ad0bf2d1125a --- stackrc | 154 ++++++++++++++++++++++++--------------------- tests/test_refs.sh | 4 +- 2 files changed, 85 insertions(+), 73 deletions(-) diff --git a/stackrc b/stackrc index c243d271b8..b7105d368e 100644 --- a/stackrc +++ b/stackrc @@ -13,6 +13,18 @@ RC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) # Source required DevStack functions and globals source $RC_DIR/functions +# Set the target branch. This is used so that stable branching +# does not need to update each repo below. +TARGET_BRANCH=master + +# Cycle trailing projects need to branch later than the others. +TRAILING_TARGET_BRANCH=master + +# And some repos do not create stable branches, so this is used +# to make it explicit and avoid accidentally setting to a stable +# branch. +BRANCHLESS_TARGET_BRANCH=master + # Destination path for installation DEST=/opt/stack @@ -256,35 +268,35 @@ DEVSTACK_SERIES="queens" # block storage service CINDER_REPO=${CINDER_REPO:-${GIT_BASE}/openstack/cinder.git} -CINDER_BRANCH=${CINDER_BRANCH:-master} +CINDER_BRANCH=${CINDER_BRANCH:-$TARGET_BRANCH} # image catalog service GLANCE_REPO=${GLANCE_REPO:-${GIT_BASE}/openstack/glance.git} -GLANCE_BRANCH=${GLANCE_BRANCH:-master} +GLANCE_BRANCH=${GLANCE_BRANCH:-$TARGET_BRANCH} # django powered web control panel for openstack HORIZON_REPO=${HORIZON_REPO:-${GIT_BASE}/openstack/horizon.git} -HORIZON_BRANCH=${HORIZON_BRANCH:-master} +HORIZON_BRANCH=${HORIZON_BRANCH:-$TARGET_BRANCH} # unified auth system (manages accounts/tokens) KEYSTONE_REPO=${KEYSTONE_REPO:-${GIT_BASE}/openstack/keystone.git} -KEYSTONE_BRANCH=${KEYSTONE_BRANCH:-master} +KEYSTONE_BRANCH=${KEYSTONE_BRANCH:-$TARGET_BRANCH} # neutron service NEUTRON_REPO=${NEUTRON_REPO:-${GIT_BASE}/openstack/neutron.git} -NEUTRON_BRANCH=${NEUTRON_BRANCH:-master} +NEUTRON_BRANCH=${NEUTRON_BRANCH:-$TARGET_BRANCH} # neutron fwaas service NEUTRON_FWAAS_REPO=${NEUTRON_FWAAS_REPO:-${GIT_BASE}/openstack/neutron-fwaas.git} -NEUTRON_FWAAS_BRANCH=${NEUTRON_FWAAS_BRANCH:-master} +NEUTRON_FWAAS_BRANCH=${NEUTRON_FWAAS_BRANCH:-$TARGET_BRANCH} # compute service NOVA_REPO=${NOVA_REPO:-${GIT_BASE}/openstack/nova.git} -NOVA_BRANCH=${NOVA_BRANCH:-master} +NOVA_BRANCH=${NOVA_BRANCH:-$TARGET_BRANCH} # object storage service SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git} -SWIFT_BRANCH=${SWIFT_BRANCH:-master} +SWIFT_BRANCH=${SWIFT_BRANCH:-$TARGET_BRANCH} ############## # @@ -294,11 +306,11 @@ SWIFT_BRANCH=${SWIFT_BRANCH:-master} # consolidated openstack requirements REQUIREMENTS_REPO=${REQUIREMENTS_REPO:-${GIT_BASE}/openstack/requirements.git} -REQUIREMENTS_BRANCH=${REQUIREMENTS_BRANCH:-master} +REQUIREMENTS_BRANCH=${REQUIREMENTS_BRANCH:-$TARGET_BRANCH} # Tempest test suite TEMPEST_REPO=${TEMPEST_REPO:-${GIT_BASE}/openstack/tempest.git} -TEMPEST_BRANCH=${TEMPEST_BRANCH:-master} +TEMPEST_BRANCH=${TEMPEST_BRANCH:-$BRANCHLESS_TARGET_BRANCH} ############## @@ -310,56 +322,56 @@ TEMPEST_BRANCH=${TEMPEST_BRANCH:-master} # volume client GITREPO["python-cinderclient"]=${CINDERCLIENT_REPO:-${GIT_BASE}/openstack/python-cinderclient.git} -GITBRANCH["python-cinderclient"]=${CINDERCLIENT_BRANCH:-master} +GITBRANCH["python-cinderclient"]=${CINDERCLIENT_BRANCH:-$TARGET_BRANCH} # os-brick client for local volume attachement GITREPO["python-brick-cinderclient-ext"]=${BRICK_CINDERCLIENT_REPO:-${GIT_BASE}/openstack/python-brick-cinderclient-ext.git} -GITBRANCH["python-brick-cinderclient-ext"]=${BRICK_CINDERCLIENT_BRANCH:-master} +GITBRANCH["python-brick-cinderclient-ext"]=${BRICK_CINDERCLIENT_BRANCH:-$TARGET_BRANCH} # python barbican client library GITREPO["python-barbicanclient"]=${BARBICANCLIENT_REPO:-${GIT_BASE}/openstack/python-barbicanclient.git} -GITBRANCH["python-barbicanclient"]=${BARBICANCLIENT_BRANCH:-master} +GITBRANCH["python-barbicanclient"]=${BARBICANCLIENT_BRANCH:-$TARGET_BRANCH} GITDIR["python-barbicanclient"]=$DEST/python-barbicanclient # python glance client library GITREPO["python-glanceclient"]=${GLANCECLIENT_REPO:-${GIT_BASE}/openstack/python-glanceclient.git} -GITBRANCH["python-glanceclient"]=${GLANCECLIENT_BRANCH:-master} +GITBRANCH["python-glanceclient"]=${GLANCECLIENT_BRANCH:-$TARGET_BRANCH} # ironic client GITREPO["python-ironicclient"]=${IRONICCLIENT_REPO:-${GIT_BASE}/openstack/python-ironicclient.git} -GITBRANCH["python-ironicclient"]=${IRONICCLIENT_BRANCH:-master} +GITBRANCH["python-ironicclient"]=${IRONICCLIENT_BRANCH:-$TARGET_BRANCH} # ironic plugin is out of tree, but nova uses it. set GITDIR here. GITDIR["python-ironicclient"]=$DEST/python-ironicclient # the base authentication plugins that clients use to authenticate GITREPO["keystoneauth"]=${KEYSTONEAUTH_REPO:-${GIT_BASE}/openstack/keystoneauth.git} -GITBRANCH["keystoneauth"]=${KEYSTONEAUTH_BRANCH:-master} +GITBRANCH["keystoneauth"]=${KEYSTONEAUTH_BRANCH:-$TARGET_BRANCH} # python keystone client library to nova that horizon uses GITREPO["python-keystoneclient"]=${KEYSTONECLIENT_REPO:-${GIT_BASE}/openstack/python-keystoneclient.git} -GITBRANCH["python-keystoneclient"]=${KEYSTONECLIENT_BRANCH:-master} +GITBRANCH["python-keystoneclient"]=${KEYSTONECLIENT_BRANCH:-$TARGET_BRANCH} # neutron client GITREPO["python-neutronclient"]=${NEUTRONCLIENT_REPO:-${GIT_BASE}/openstack/python-neutronclient.git} -GITBRANCH["python-neutronclient"]=${NEUTRONCLIENT_BRANCH:-master} +GITBRANCH["python-neutronclient"]=${NEUTRONCLIENT_BRANCH:-$TARGET_BRANCH} # python client library to nova that horizon (and others) use GITREPO["python-novaclient"]=${NOVACLIENT_REPO:-${GIT_BASE}/openstack/python-novaclient.git} -GITBRANCH["python-novaclient"]=${NOVACLIENT_BRANCH:-master} +GITBRANCH["python-novaclient"]=${NOVACLIENT_BRANCH:-$TARGET_BRANCH} # python swift client library GITREPO["python-swiftclient"]=${SWIFTCLIENT_REPO:-${GIT_BASE}/openstack/python-swiftclient.git} -GITBRANCH["python-swiftclient"]=${SWIFTCLIENT_BRANCH:-master} +GITBRANCH["python-swiftclient"]=${SWIFTCLIENT_BRANCH:-$TARGET_BRANCH} # consolidated openstack python client GITREPO["python-openstackclient"]=${OPENSTACKCLIENT_REPO:-${GIT_BASE}/openstack/python-openstackclient.git} -GITBRANCH["python-openstackclient"]=${OPENSTACKCLIENT_BRANCH:-master} +GITBRANCH["python-openstackclient"]=${OPENSTACKCLIENT_BRANCH:-$TARGET_BRANCH} # this doesn't exist in a lib file, so set it here GITDIR["python-openstackclient"]=$DEST/python-openstackclient # placement-api CLI GITREPO["osc-placement"]=${OSC_PLACEMENT_REPO:-${GIT_BASE}/openstack/osc-placement.git} -GITBRANCH["osc-placement"]=${OSC_PLACEMENT_BRANCH:-master} +GITBRANCH["osc-placement"]=${OSC_PLACEMENT_BRANCH:-$TARGET_BRANCH} ################### @@ -371,119 +383,119 @@ GITBRANCH["osc-placement"]=${OSC_PLACEMENT_BRANCH:-master} # castellan key manager interface GITREPO["castellan"]=${CASTELLAN_REPO:-${GIT_BASE}/openstack/castellan.git} -GITBRANCH["castellan"]=${CASTELLAN_BRANCH:-master} +GITBRANCH["castellan"]=${CASTELLAN_BRANCH:-$TARGET_BRANCH} # cliff command line framework GITREPO["cliff"]=${CLIFF_REPO:-${GIT_BASE}/openstack/cliff.git} -GITBRANCH["cliff"]=${CLIFF_BRANCH:-master} +GITBRANCH["cliff"]=${CLIFF_BRANCH:-$TARGET_BRANCH} # async framework/helpers GITREPO["futurist"]=${FUTURIST_REPO:-${GIT_BASE}/openstack/futurist.git} -GITBRANCH["futurist"]=${FUTURIST_BRANCH:-master} +GITBRANCH["futurist"]=${FUTURIST_BRANCH:-$TARGET_BRANCH} # debtcollector deprecation framework/helpers GITREPO["debtcollector"]=${DEBTCOLLECTOR_REPO:-${GIT_BASE}/openstack/debtcollector.git} -GITBRANCH["debtcollector"]=${DEBTCOLLECTOR_BRANCH:-master} +GITBRANCH["debtcollector"]=${DEBTCOLLECTOR_BRANCH:-$TARGET_BRANCH} # helpful state machines GITREPO["automaton"]=${AUTOMATON_REPO:-${GIT_BASE}/openstack/automaton.git} -GITBRANCH["automaton"]=${AUTOMATON_BRANCH:-master} +GITBRANCH["automaton"]=${AUTOMATON_BRANCH:-$TARGET_BRANCH} # oslo.cache GITREPO["oslo.cache"]=${OSLOCACHE_REPO:-${GIT_BASE}/openstack/oslo.cache.git} -GITBRANCH["oslo.cache"]=${OSLOCACHE_BRANCH:-master} +GITBRANCH["oslo.cache"]=${OSLOCACHE_BRANCH:-$TARGET_BRANCH} # oslo.concurrency GITREPO["oslo.concurrency"]=${OSLOCON_REPO:-${GIT_BASE}/openstack/oslo.concurrency.git} -GITBRANCH["oslo.concurrency"]=${OSLOCON_BRANCH:-master} +GITBRANCH["oslo.concurrency"]=${OSLOCON_BRANCH:-$TARGET_BRANCH} # oslo.config GITREPO["oslo.config"]=${OSLOCFG_REPO:-${GIT_BASE}/openstack/oslo.config.git} -GITBRANCH["oslo.config"]=${OSLOCFG_BRANCH:-master} +GITBRANCH["oslo.config"]=${OSLOCFG_BRANCH:-$TARGET_BRANCH} # oslo.context GITREPO["oslo.context"]=${OSLOCTX_REPO:-${GIT_BASE}/openstack/oslo.context.git} -GITBRANCH["oslo.context"]=${OSLOCTX_BRANCH:-master} +GITBRANCH["oslo.context"]=${OSLOCTX_BRANCH:-$TARGET_BRANCH} # oslo.db GITREPO["oslo.db"]=${OSLODB_REPO:-${GIT_BASE}/openstack/oslo.db.git} -GITBRANCH["oslo.db"]=${OSLODB_BRANCH:-master} +GITBRANCH["oslo.db"]=${OSLODB_BRANCH:-$TARGET_BRANCH} # oslo.i18n GITREPO["oslo.i18n"]=${OSLOI18N_REPO:-${GIT_BASE}/openstack/oslo.i18n.git} -GITBRANCH["oslo.i18n"]=${OSLOI18N_BRANCH:-master} +GITBRANCH["oslo.i18n"]=${OSLOI18N_BRANCH:-$TARGET_BRANCH} # oslo.log GITREPO["oslo.log"]=${OSLOLOG_REPO:-${GIT_BASE}/openstack/oslo.log.git} -GITBRANCH["oslo.log"]=${OSLOLOG_BRANCH:-master} +GITBRANCH["oslo.log"]=${OSLOLOG_BRANCH:-$TARGET_BRANCH} # oslo.messaging GITREPO["oslo.messaging"]=${OSLOMSG_REPO:-${GIT_BASE}/openstack/oslo.messaging.git} -GITBRANCH["oslo.messaging"]=${OSLOMSG_BRANCH:-master} +GITBRANCH["oslo.messaging"]=${OSLOMSG_BRANCH:-$TARGET_BRANCH} # oslo.middleware GITREPO["oslo.middleware"]=${OSLOMID_REPO:-${GIT_BASE}/openstack/oslo.middleware.git} -GITBRANCH["oslo.middleware"]=${OSLOMID_BRANCH:-master} +GITBRANCH["oslo.middleware"]=${OSLOMID_BRANCH:-$TARGET_BRANCH} # oslo.policy GITREPO["oslo.policy"]=${OSLOPOLICY_REPO:-${GIT_BASE}/openstack/oslo.policy.git} -GITBRANCH["oslo.policy"]=${OSLOPOLICY_BRANCH:-master} +GITBRANCH["oslo.policy"]=${OSLOPOLICY_BRANCH:-$TARGET_BRANCH} # oslo.privsep GITREPO["oslo.privsep"]=${OSLOPRIVSEP_REPO:-${GIT_BASE}/openstack/oslo.privsep.git} -GITBRANCH["oslo.privsep"]=${OSLOPRIVSEP_BRANCH:-master} +GITBRANCH["oslo.privsep"]=${OSLOPRIVSEP_BRANCH:-$TARGET_BRANCH} # oslo.reports GITREPO["oslo.reports"]=${OSLOREPORTS_REPO:-${GIT_BASE}/openstack/oslo.reports.git} -GITBRANCH["oslo.reports"]=${OSLOREPORTS_BRANCH:-master} +GITBRANCH["oslo.reports"]=${OSLOREPORTS_BRANCH:-$TARGET_BRANCH} # oslo.rootwrap GITREPO["oslo.rootwrap"]=${OSLORWRAP_REPO:-${GIT_BASE}/openstack/oslo.rootwrap.git} -GITBRANCH["oslo.rootwrap"]=${OSLORWRAP_BRANCH:-master} +GITBRANCH["oslo.rootwrap"]=${OSLORWRAP_BRANCH:-$TARGET_BRANCH} # oslo.serialization GITREPO["oslo.serialization"]=${OSLOSERIALIZATION_REPO:-${GIT_BASE}/openstack/oslo.serialization.git} -GITBRANCH["oslo.serialization"]=${OSLOSERIALIZATION_BRANCH:-master} +GITBRANCH["oslo.serialization"]=${OSLOSERIALIZATION_BRANCH:-$TARGET_BRANCH} # oslo.service GITREPO["oslo.service"]=${OSLOSERVICE_REPO:-${GIT_BASE}/openstack/oslo.service.git} -GITBRANCH["oslo.service"]=${OSLOSERVICE_BRANCH:-master} +GITBRANCH["oslo.service"]=${OSLOSERVICE_BRANCH:-$TARGET_BRANCH} # oslo.utils GITREPO["oslo.utils"]=${OSLOUTILS_REPO:-${GIT_BASE}/openstack/oslo.utils.git} -GITBRANCH["oslo.utils"]=${OSLOUTILS_BRANCH:-master} +GITBRANCH["oslo.utils"]=${OSLOUTILS_BRANCH:-$TARGET_BRANCH} # oslo.versionedobjects GITREPO["oslo.versionedobjects"]=${OSLOVERSIONEDOBJECTS_REPO:-${GIT_BASE}/openstack/oslo.versionedobjects.git} -GITBRANCH["oslo.versionedobjects"]=${OSLOVERSIONEDOBJECTS_BRANCH:-master} +GITBRANCH["oslo.versionedobjects"]=${OSLOVERSIONEDOBJECTS_BRANCH:-$TARGET_BRANCH} # oslo.vmware GITREPO["oslo.vmware"]=${OSLOVMWARE_REPO:-${GIT_BASE}/openstack/oslo.vmware.git} -GITBRANCH["oslo.vmware"]=${OSLOVMWARE_BRANCH:-master} +GITBRANCH["oslo.vmware"]=${OSLOVMWARE_BRANCH:-$TARGET_BRANCH} # osprofiler GITREPO["osprofiler"]=${OSPROFILER_REPO:-${GIT_BASE}/openstack/osprofiler.git} -GITBRANCH["osprofiler"]=${OSPROFILER_BRANCH:-master} +GITBRANCH["osprofiler"]=${OSPROFILER_BRANCH:-$TARGET_BRANCH} # pycadf auditing library GITREPO["pycadf"]=${PYCADF_REPO:-${GIT_BASE}/openstack/pycadf.git} -GITBRANCH["pycadf"]=${PYCADF_BRANCH:-master} +GITBRANCH["pycadf"]=${PYCADF_BRANCH:-$TARGET_BRANCH} # stevedore plugin manager GITREPO["stevedore"]=${STEVEDORE_REPO:-${GIT_BASE}/openstack/stevedore.git} -GITBRANCH["stevedore"]=${STEVEDORE_BRANCH:-master} +GITBRANCH["stevedore"]=${STEVEDORE_BRANCH:-$TARGET_BRANCH} # taskflow plugin manager GITREPO["taskflow"]=${TASKFLOW_REPO:-${GIT_BASE}/openstack/taskflow.git} -GITBRANCH["taskflow"]=${TASKFLOW_BRANCH:-master} +GITBRANCH["taskflow"]=${TASKFLOW_BRANCH:-$TARGET_BRANCH} # tooz plugin manager GITREPO["tooz"]=${TOOZ_REPO:-${GIT_BASE}/openstack/tooz.git} -GITBRANCH["tooz"]=${TOOZ_BRANCH:-master} +GITBRANCH["tooz"]=${TOOZ_BRANCH:-$TARGET_BRANCH} # pbr drives the setuptools configs GITREPO["pbr"]=${PBR_REPO:-${GIT_BASE}/openstack-dev/pbr.git} -GITBRANCH["pbr"]=${PBR_BRANCH:-master} +GITBRANCH["pbr"]=${PBR_BRANCH:-$TARGET_BRANCH} ################## @@ -494,65 +506,65 @@ GITBRANCH["pbr"]=${PBR_BRANCH:-master} # cursive library GITREPO["cursive"]=${CURSIVE_REPO:-${GIT_BASE}/openstack/cursive.git} -GITBRANCH["cursive"]=${CURSIVE_BRANCH:-master} +GITBRANCH["cursive"]=${CURSIVE_BRANCH:-$TARGET_BRANCH} # glance store library GITREPO["glance_store"]=${GLANCE_STORE_REPO:-${GIT_BASE}/openstack/glance_store.git} -GITBRANCH["glance_store"]=${GLANCE_STORE_BRANCH:-master} +GITBRANCH["glance_store"]=${GLANCE_STORE_BRANCH:-$TARGET_BRANCH} # keystone middleware GITREPO["keystonemiddleware"]=${KEYSTONEMIDDLEWARE_REPO:-${GIT_BASE}/openstack/keystonemiddleware.git} -GITBRANCH["keystonemiddleware"]=${KEYSTONEMIDDLEWARE_BRANCH:-master} +GITBRANCH["keystonemiddleware"]=${KEYSTONEMIDDLEWARE_BRANCH:-$TARGET_BRANCH} # s3 support for swift SWIFT3_REPO=${SWIFT3_REPO:-${GIT_BASE}/openstack/swift3.git} -SWIFT3_BRANCH=${SWIFT3_BRANCH:-master} +SWIFT3_BRANCH=${SWIFT3_BRANCH:-$TARGET_BRANCH} # ceilometer middleware GITREPO["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_REPO:-${GIT_BASE}/openstack/ceilometermiddleware.git} -GITBRANCH["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_BRANCH:-master} +GITBRANCH["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_BRANCH:-$TARGET_BRANCH} GITDIR["ceilometermiddleware"]=$DEST/ceilometermiddleware # os-brick library to manage local volume attaches GITREPO["os-brick"]=${OS_BRICK_REPO:-${GIT_BASE}/openstack/os-brick.git} -GITBRANCH["os-brick"]=${OS_BRICK_BRANCH:-master} +GITBRANCH["os-brick"]=${OS_BRICK_BRANCH:-$TARGET_BRANCH} # os-client-config to manage clouds.yaml and friends GITREPO["os-client-config"]=${OS_CLIENT_CONFIG_REPO:-${GIT_BASE}/openstack/os-client-config.git} -GITBRANCH["os-client-config"]=${OS_CLIENT_CONFIG_BRANCH:-master} +GITBRANCH["os-client-config"]=${OS_CLIENT_CONFIG_BRANCH:-$TARGET_BRANCH} GITDIR["os-client-config"]=$DEST/os-client-config # os-vif library to communicate between Neutron to Nova GITREPO["os-vif"]=${OS_VIF_REPO:-${GIT_BASE}/openstack/os-vif.git} -GITBRANCH["os-vif"]=${OS_VIF_BRANCH:-master} +GITBRANCH["os-vif"]=${OS_VIF_BRANCH:-$TARGET_BRANCH} # osc-lib OpenStackClient common lib GITREPO["osc-lib"]=${OSC_LIB_REPO:-${GIT_BASE}/openstack/osc-lib.git} -GITBRANCH["osc-lib"]=${OSC_LIB_BRANCH:-master} +GITBRANCH["osc-lib"]=${OSC_LIB_BRANCH:-$TARGET_BRANCH} # python-openstacksdk OpenStack Python SDK GITREPO["python-openstacksdk"]=${OPENSTACKSDK_REPO:-${GIT_BASE}/openstack/python-openstacksdk.git} -GITBRANCH["python-openstacksdk"]=${OPENSTACKSDK_BRANCH:-master} +GITBRANCH["python-openstacksdk"]=${OPENSTACKSDK_BRANCH:-$TARGET_BRANCH} # ironic common lib GITREPO["ironic-lib"]=${IRONIC_LIB_REPO:-${GIT_BASE}/openstack/ironic-lib.git} -GITBRANCH["ironic-lib"]=${IRONIC_LIB_BRANCH:-master} +GITBRANCH["ironic-lib"]=${IRONIC_LIB_BRANCH:-$TARGET_BRANCH} # this doesn't exist in a lib file, so set it here GITDIR["ironic-lib"]=$DEST/ironic-lib # diskimage-builder tool GITREPO["diskimage-builder"]=${DIB_REPO:-${GIT_BASE}/openstack/diskimage-builder.git} -GITBRANCH["diskimage-builder"]=${DIB_BRANCH:-master} +GITBRANCH["diskimage-builder"]=${DIB_BRANCH:-$TARGET_BRANCH} GITDIR["diskimage-builder"]=$DEST/diskimage-builder # neutron-lib library containing neutron stable non-REST interfaces GITREPO["neutron-lib"]=${NEUTRON_LIB_REPO:-${GIT_BASE}/openstack/neutron-lib.git} -GITBRANCH["neutron-lib"]=${NEUTRON_LIB_BRANCH:-master} +GITBRANCH["neutron-lib"]=${NEUTRON_LIB_BRANCH:-$TARGET_BRANCH} GITDIR["neutron-lib"]=$DEST/neutron-lib # os-traits library for resource provider traits in the placement service GITREPO["os-traits"]=${OS_TRAITS_REPO:-${GIT_BASE}/openstack/os-traits.git} -GITBRANCH["os-traits"]=${OS_TRAITS_BRANCH:-master} +GITBRANCH["os-traits"]=${OS_TRAITS_BRANCH:-$TARGET_BRANCH} ################## # @@ -562,19 +574,19 @@ GITBRANCH["os-traits"]=${OS_TRAITS_BRANCH:-master} # run-parts script required by os-refresh-config DIB_UTILS_REPO=${DIB_UTILS_REPO:-${GIT_BASE}/openstack/dib-utils.git} -DIB_UTILS_BRANCH=${DIB_UTILS_BRANCH:-master} +DIB_UTILS_BRANCH=${DIB_UTILS_BRANCH:-$BRANCHLESS_TARGET_BRANCH} # os-apply-config configuration template tool OAC_REPO=${OAC_REPO:-${GIT_BASE}/openstack/os-apply-config.git} -OAC_BRANCH=${OAC_BRANCH:-master} +OAC_BRANCH=${OAC_BRANCH:-$TRAILING_TARGET_BRANCH} # os-collect-config configuration agent OCC_REPO=${OCC_REPO:-${GIT_BASE}/openstack/os-collect-config.git} -OCC_BRANCH=${OCC_BRANCH:-master} +OCC_BRANCH=${OCC_BRANCH:-$TRAILING_TARGET_BRANCH} # os-refresh-config configuration run-parts tool ORC_REPO=${ORC_REPO:-${GIT_BASE}/openstack/os-refresh-config.git} -ORC_BRANCH=${ORC_BRANCH:-master} +ORC_BRANCH=${ORC_BRANCH:-$TRAILING_TARGET_BRANCH} ################# @@ -587,7 +599,7 @@ ORC_BRANCH=${ORC_BRANCH:-master} # ironic python agent IRONIC_PYTHON_AGENT_REPO=${IRONIC_PYTHON_AGENT_REPO:-${GIT_BASE}/openstack/ironic-python-agent.git} -IRONIC_PYTHON_AGENT_BRANCH=${IRONIC_PYTHON_AGENT_BRANCH:-master} +IRONIC_PYTHON_AGENT_BRANCH=${IRONIC_PYTHON_AGENT_BRANCH:-$TARGET_BRANCH} # a websockets/html5 or flash powered VNC console for vm instances NOVNC_REPO=${NOVNC_REPO:-https://github.com/novnc/noVNC.git} @@ -595,7 +607,7 @@ NOVNC_BRANCH=${NOVNC_BRANCH:-stable/v0.6} # a websockets/html5 or flash powered SPICE console for vm instances SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git} -SPICE_BRANCH=${SPICE_BRANCH:-master} +SPICE_BRANCH=${SPICE_BRANCH:-$BRANCHLESS_TARGET_BRANCH} # Global flag used to configure Tempest and potentially other services if # volume multiattach is supported. In Queens, only the libvirt compute driver diff --git a/tests/test_refs.sh b/tests/test_refs.sh index 65848cdc72..0f9aa4a5ca 100755 --- a/tests/test_refs.sh +++ b/tests/test_refs.sh @@ -15,10 +15,10 @@ echo "Ensuring we don't have crazy refs" -REFS=`grep BRANCH stackrc | grep -v -- '-master' | grep -v 'NOVNC_BRANCH'` +REFS=`grep BRANCH stackrc | grep -v 'TARGET_BRANCH' | grep -v 'NOVNC_BRANCH'` rc=$? if [[ $rc -eq 0 ]]; then - echo "Branch defaults must be master. Found:" + echo "Branch defaults must be one of the *TARGET_BRANCH values. Found:" echo $REFS exit 1 fi From 22d70554acf711558c1a5018f9162eb5f1f4086c Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 15 Feb 2018 16:41:19 -0600 Subject: [PATCH 0810/1936] Add /volume to block-storage endpoint The block-storage endpoint was added to the catalog, but in the suburl case it was not added with the /volume suburl. This leads to find it and attempting to use it but not being able to because it's mis-formed. Needed-By: https://review.openstack.org/545117 Change-Id: I84721c8ae637417e4b01be9e546ff77c250fc149 --- lib/cinder | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/cinder b/lib/cinder index 96a7d5bb8e..75486adda5 100644 --- a/lib/cinder +++ b/lib/cinder @@ -372,7 +372,7 @@ function create_cinder_accounts { get_or_create_endpoint \ "block-storage" \ "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/" + "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/" get_or_create_endpoint \ "volume" \ From 4d835e33b6e901ef87023ce9b7d71bc21170a9e3 Mon Sep 17 00:00:00 2001 From: IWAMOTO Toshihiro Date: Mon, 5 Feb 2018 16:57:41 +0900 Subject: [PATCH 0811/1936] Break up fixup_stuff Neutron functional tests want to use ubuntu cloud archive but it's not possible to source the fixup_stuff.sh from a neutron CI setup script. Break it up so that only the UCA portion can be executed from neutron. Change-Id: Ie18833bfa30f1789e63cbe9c86f5ece3453f43fb --- stack.sh | 1 + tools/fixup_stuff.sh | 114 +++++++++++++++++++++++++------------------ 2 files changed, 68 insertions(+), 47 deletions(-) diff --git a/stack.sh b/stack.sh index 9b496c0e20..81d2ed2bcc 100755 --- a/stack.sh +++ b/stack.sh @@ -773,6 +773,7 @@ fi # Do the ugly hacks for broken packages and distros source $TOP_DIR/tools/fixup_stuff.sh +fixup_all if [[ "$USE_SYSTEMD" == "True" ]]; then pip_install_gr systemd-python diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index f78f05f2f8..90b2c8bf1f 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -45,27 +45,29 @@ fi # where Keystone will try and bind to the port and the port will already be # in use as an ephemeral port by another process. This places an explicit # exception into the Kernel for the Keystone AUTH ports. -keystone_ports=${KEYSTONE_AUTH_PORT:-35357},${KEYSTONE_AUTH_PORT_INT:-35358} - -# Only do the reserved ports when available, on some system (like containers) -# where it's not exposed we are almost pretty sure these ports would be -# exclusive for our DevStack. -if sysctl net.ipv4.ip_local_reserved_ports >/dev/null 2>&1; then - # Get any currently reserved ports, strip off leading whitespace - reserved_ports=$(sysctl net.ipv4.ip_local_reserved_ports | awk -F'=' '{print $2;}' | sed 's/^ //') - - if [[ -z "${reserved_ports}" ]]; then - # If there are no currently reserved ports, reserve the keystone ports - sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports} +function fixup_keystone { + keystone_ports=${KEYSTONE_AUTH_PORT:-35357},${KEYSTONE_AUTH_PORT_INT:-35358} + + # Only do the reserved ports when available, on some system (like containers) + # where it's not exposed we are almost pretty sure these ports would be + # exclusive for our DevStack. + if sysctl net.ipv4.ip_local_reserved_ports >/dev/null 2>&1; then + # Get any currently reserved ports, strip off leading whitespace + reserved_ports=$(sysctl net.ipv4.ip_local_reserved_ports | awk -F'=' '{print $2;}' | sed 's/^ //') + + if [[ -z "${reserved_ports}" ]]; then + # If there are no currently reserved ports, reserve the keystone ports + sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports} + else + # If there are currently reserved ports, keep those and also reserve the + # Keystone specific ports. Duplicate reservations are merged into a single + # reservation (or range) automatically by the kernel. + sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports},${reserved_ports} + fi else - # If there are currently reserved ports, keep those and also reserve the - # Keystone specific ports. Duplicate reservations are merged into a single - # reservation (or range) automatically by the kernel. - sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports},${reserved_ports} + echo_summary "WARNING: unable to reserve keystone ports" fi -else - echo_summary "WARNING: unable to reserve keystone ports" -fi +} # Ubuntu Cloud Archive #--------------------- @@ -79,8 +81,12 @@ fi # ENABLE_VOLUME_MULTIATTACH is True, we can't use the Pike UCA # because multiattach won't work with those package versions. # We can remove this check when the UCA has libvirt>=3.10. -if [[ "${ENABLE_UBUNTU_CLOUD_ARCHIVE}" == "True" && "$DISTRO" = "xenial" && \ - "${ENABLE_VOLUME_MULTIATTACH}" == "False" ]]; then +function fixup_uca { + if [[ "${ENABLE_UBUNTU_CLOUD_ARCHIVE}" == "False" || "$DISTRO" != "xenial" || \ + "${ENABLE_VOLUME_MULTIATTACH}" == "True" ]]; then + return + fi + # This pulls in apt-add-repository install_package "software-properties-common" # Use UCA for newer libvirt. Should give us libvirt 2.5.0. @@ -104,8 +110,7 @@ if [[ "${ENABLE_UBUNTU_CLOUD_ARCHIVE}" == "True" && "$DISTRO" = "xenial" && \ # Force update our APT repos, since we added UCA above. REPOS_UPDATED=False apt_get_update -fi - +} # Python Packages # --------------- @@ -120,27 +125,32 @@ function get_package_path { # Pre-install affected packages so we can fix the permissions # These can go away once we are confident that pip 1.4.1+ is available everywhere -# Fix prettytable 0.7.2 permissions -# Don't specify --upgrade so we use the existing package if present -pip_install 'prettytable>=0.7' -PACKAGE_DIR=$(get_package_path prettytable) -# Only fix version 0.7.2 -dir=$(echo $PACKAGE_DIR/prettytable-0.7.2*) -if [[ -d $dir ]]; then - sudo chmod +r $dir/* -fi +function fixup_python_packages { + # Fix prettytable 0.7.2 permissions + # Don't specify --upgrade so we use the existing package if present + pip_install 'prettytable>=0.7' + PACKAGE_DIR=$(get_package_path prettytable) + # Only fix version 0.7.2 + dir=$(echo $PACKAGE_DIR/prettytable-0.7.2*) + if [[ -d $dir ]]; then + sudo chmod +r $dir/* + fi -# Fix httplib2 0.8 permissions -# Don't specify --upgrade so we use the existing package if present -pip_install httplib2 -PACKAGE_DIR=$(get_package_path httplib2) -# Only fix version 0.8 -dir=$(echo $PACKAGE_DIR-0.8*) -if [[ -d $dir ]]; then - sudo chmod +r $dir/* -fi + # Fix httplib2 0.8 permissions + # Don't specify --upgrade so we use the existing package if present + pip_install httplib2 + PACKAGE_DIR=$(get_package_path httplib2) + # Only fix version 0.8 + dir=$(echo $PACKAGE_DIR-0.8*) + if [[ -d $dir ]]; then + sudo chmod +r $dir/* + fi +} -if is_fedora; then +function fixup_fedora { + if ! is_fedora; then + return + fi # Disable selinux to avoid configuring to allow Apache access # to Horizon files (LP#1175444) if selinuxenabled; then @@ -198,7 +208,7 @@ if is_fedora; then pip_install --upgrade --force-reinstall requests fi fi -fi +} # The version of pip(1.5.4) supported by python-virtualenv(1.11.4) has # connection issues under proxy so re-install the latest version using @@ -222,7 +232,17 @@ fi # install.d/pip-and-virtualenv-source-install/04-install-pip # [2] https://bugzilla.redhat.com/show_bug.cgi?id=1477823 -if [[ ! -f /etc/ci/mirror_info.sh ]]; then - install_package python-virtualenv - pip_install -U --force-reinstall virtualenv -fi +function fixup_virtualenv { + if [[ ! -f /etc/ci/mirror_info.sh ]]; then + install_package python-virtualenv + pip_install -U --force-reinstall virtualenv + fi +} + +function fixup_all { + fixup_keystone + fixup_uca + fixup_python_packages + fixup_fedora + fixup_virtualenv +} From 8f7216290aebdd07f2277a92ae1baab5418c087c Mon Sep 17 00:00:00 2001 From: Josh Date: Thu, 1 Feb 2018 09:45:47 +0200 Subject: [PATCH 0812/1936] Restore NEUTRON_CREATE_INITIAL_NETWORKS flag Somehow this feature was lost in the transition from q-svc to neutron-api. This patch does not modify the default behavior but allows specifying the flag to false to prevent devstack from creating the public and private networks. Change-Id: I952672496d007552c0c4d83db0d0df9be50326fc Signed-off-by: Josh --- lib/neutron | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index c5839f5c3e..08347926f7 100644 --- a/lib/neutron +++ b/lib/neutron @@ -42,6 +42,7 @@ NEUTRON_META_CONF=$NEUTRON_CONF_DIR/metadata_agent.ini NEUTRON_DHCP_CONF=$NEUTRON_CONF_DIR/dhcp_agent.ini NEUTRON_L3_CONF=$NEUTRON_CONF_DIR/l3_agent.ini NEUTRON_AGENT_CONF=$NEUTRON_CONF_DIR/ +NEUTRON_CREATE_INITIAL_NETWORKS=${NEUTRON_CREATE_INITIAL_NETWORKS:-True} NEUTRON_STATE_PATH=${NEUTRON_STATE_PATH:=$DATA_DIR/neutron} NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron} @@ -438,7 +439,7 @@ function start_neutron_new { if is_service_enabled neutron-l3; then run_process neutron-l3 "$NEUTRON_BIN_DIR/$NEUTRON_L3_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_L3_CONF" fi - if is_service_enabled neutron-api; then + if is_service_enabled neutron-api && [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]]; then # XXX(sc68cal) - Here's where plugins can wire up their own networks instead # of the code in lib/neutron_plugins/services/l3 if type -p neutron_plugin_create_initial_networks > /dev/null; then From a3844240ab80626adbfdfda59df0fa8133340a50 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Sun, 18 Feb 2018 17:14:48 -0500 Subject: [PATCH 0813/1936] Set [scheduler]workers=$API_WORKERS Since blueprint placement-claims in Pike, the Nova FilterScheduler uses the placement service to make resource allocation 'claims' before sending the build request to the chosen compute host to perform the legacy style resource claim. This allows us to safely scale out the number of scheduler workers when using the FilterScheduler. The [scheduler]workers option defaults to ncpu if using the FilterScheduler (which is the default scheduler driver) so to avoid out of memory issues, we need to set $API_WORKERS scheduler workers if using the FilterScheduler in devstack. Depends-On: Ifdcd363d7bc22e73d76d69777483e5aaff4036e3 Change-Id: Ieae234eb5388560b3f66bf60c156a91a8e831bc4 --- lib/nova | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/nova b/lib/nova index fea2b8509a..1257967c30 100644 --- a/lib/nova +++ b/lib/nova @@ -413,6 +413,9 @@ function create_nova_conf { iniset $NOVA_CONF DEFAULT rootwrap_config "$NOVA_CONF_DIR/rootwrap.conf" iniset $NOVA_CONF scheduler driver "$SCHEDULER" iniset $NOVA_CONF filter_scheduler enabled_filters "$FILTERS" + if [[ $SCHEDULER == "filter_scheduler" ]]; then + iniset $NOVA_CONF scheduler workers "$API_WORKERS" + fi iniset $NOVA_CONF DEFAULT default_floating_pool "$PUBLIC_NETWORK_NAME" if [[ $SERVICE_IP_VERSION == 6 ]]; then iniset $NOVA_CONF DEFAULT my_ip "$HOST_IPV6" From fddf3430d8b3bb6bc60c6c69c344e7ae437ee894 Mon Sep 17 00:00:00 2001 From: Andrea Frittoli Date: Mon, 19 Feb 2018 18:34:43 +0000 Subject: [PATCH 0814/1936] Render devstack ansible roles via zuul-sphinx Change-Id: Ie0db5bcfdacc2543488704fe4428b68910bd6bf1 --- doc/requirements.txt | 1 + doc/source/conf.py | 2 +- doc/source/index.rst | 2 ++ doc/source/roles.rst | 4 ++++ 4 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 doc/source/roles.rst diff --git a/doc/requirements.txt b/doc/requirements.txt index e140bc0689..f65e9dfece 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -8,3 +8,4 @@ nwdiag blockdiag sphinxcontrib-blockdiag sphinxcontrib-nwdiag +zuul-sphinx>=0.2.0 diff --git a/doc/source/conf.py b/doc/source/conf.py index 780237fe58..e9708fae1d 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -26,7 +26,7 @@ # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [ 'openstackdocstheme', 'sphinxcontrib.blockdiag', 'sphinxcontrib.nwdiag' ] +extensions = [ 'sphinx.ext.autodoc', 'zuul_sphinx', 'openstackdocstheme', 'sphinxcontrib.blockdiag', 'sphinxcontrib.nwdiag' ] # openstackdocstheme options repository_name = 'openstack-dev/devstack' diff --git a/doc/source/index.rst b/doc/source/index.rst index 47087c5a0b..9254c23cae 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -139,6 +139,8 @@ up to date to the latest devstack). Enable :doc:`devstack plugins ` to support additional services, features, and configuration not present in base devstack. +Use devstack in your CI with :doc:`Ansible roles ` for Zuul V3. + Get :doc:`the big picture ` of what we are trying to do with devstack, and help us by :doc:`contributing to the project `. diff --git a/doc/source/roles.rst b/doc/source/roles.rst new file mode 100644 index 0000000000..5baa1e4e82 --- /dev/null +++ b/doc/source/roles.rst @@ -0,0 +1,4 @@ +Roles +===== + +.. zuul:autoroles:: From 4d55aa734de178e610e0bb8a3e3f55b82c5f2366 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Tue, 20 Feb 2018 06:10:39 +0000 Subject: [PATCH 0815/1936] Updated from generate-devstack-plugins-list Change-Id: I7c5b33448df43c77fd188a3c44099d9c932d3173 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index ba747203c6..591e226645 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -157,6 +157,7 @@ picasso `git://git.openstack.org/openstack/picass python-openstacksdk `git://git.openstack.org/openstack/python-openstacksdk `__ qinling `git://git.openstack.org/openstack/qinling `__ rally `git://git.openstack.org/openstack/rally `__ +rally-openstack `git://git.openstack.org/openstack/rally-openstack `__ sahara `git://git.openstack.org/openstack/sahara `__ sahara-dashboard `git://git.openstack.org/openstack/sahara-dashboard `__ scalpels `git://git.openstack.org/openstack/scalpels `__ From 2e9e90b9a862fee3a47ad211b712171497f13e97 Mon Sep 17 00:00:00 2001 From: Daniel Mellado Date: Tue, 20 Feb 2018 12:17:55 +0100 Subject: [PATCH 0816/1936] Fix RedHat entry in apache_config_path With CentOS 7, ansible is expecting to have RedHat as an attribute for the dict so Discover configurations task fails with an undefined variable error. Closes-Bug: #1750573 Change-Id: I5bf9c4057ca9f75d730add9e429d0ef050c6d900 --- roles/apache-logs-conf/tasks/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/apache-logs-conf/tasks/main.yaml b/roles/apache-logs-conf/tasks/main.yaml index 60b4fbfead..339ee358be 100644 --- a/roles/apache-logs-conf/tasks/main.yaml +++ b/roles/apache-logs-conf/tasks/main.yaml @@ -57,7 +57,7 @@ apache_config_paths: 'Debian': '/etc/apache2/sites-enabled/' 'Suse': '/etc/apache2/conf.d/' - 'Redhat': '/etc/httpd/conf.d/' + 'RedHat': '/etc/httpd/conf.d/' - name: Discover configurations find: From c9c39a4b043af60d06e71976dab218115ffdf524 Mon Sep 17 00:00:00 2001 From: Paul Belanger Date: Wed, 21 Feb 2018 14:23:10 -0500 Subject: [PATCH 0817/1936] Make the apache-logs-conf role even less verbose Round 2 to add no_log to more things. Specific looping over stats, it is pretty noise in logs and doesn't seem to add any value. Change-Id: I580171e0061fa331f3ed510713f1ac7a1a6cb5ea Signed-off-by: Paul Belanger --- roles/apache-logs-conf/tasks/main.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/roles/apache-logs-conf/tasks/main.yaml b/roles/apache-logs-conf/tasks/main.yaml index 339ee358be..bcc1353a87 100644 --- a/roles/apache-logs-conf/tasks/main.yaml +++ b/roles/apache-logs-conf/tasks/main.yaml @@ -10,11 +10,13 @@ path: "/var/log/apache2" file_type: any register: debian_suse_apache_logs + - name: Dereference files stat: path: "{{ item.path }}" with_items: "{{ debian_suse_apache_logs.files }}" register: debian_suse_apache_deref_logs + - name: Create hard links file: src: "{{ item.stat.lnk_source | default(item.stat.path) }}" @@ -24,6 +26,7 @@ when: - item.stat.isreg or item.stat.islnk when: ansible_os_family in ('Debian', 'Suse') + no_log: true - name: Link apache logs on RedHat block: @@ -32,11 +35,13 @@ path: "/var/log/httpd" file_type: any register: redhat_apache_logs + - name: Dereference files stat: path: "{{ item.path }}" with_items: "{{ redhat_apache_logs.files }}" register: redhat_apache_deref_logs + - name: Create hard links file: src: "{{ item.stat.lnk_source | default(item.stat.path) }}" @@ -46,6 +51,7 @@ when: - item.stat.isreg or item.stat.islnk when: ansible_os_family == 'Redhat' + no_log: true - name: Ensure {{ stage_dir }}/apache_config apache_config exists file: From d3106720d22eed7861e14b746e36715adc2933ee Mon Sep 17 00:00:00 2001 From: Paul Belanger Date: Wed, 21 Feb 2018 16:06:49 -0500 Subject: [PATCH 0818/1936] Suppress more stats tasks By default stat is pretty verbose, we can set no_log: true to avoid adding this info to our logs. Change-Id: Ia18ebfe179443382cc670ffc4363ab037c43bb85 Signed-off-by: Paul Belanger --- roles/devstack-project-conf/tasks/main.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/devstack-project-conf/tasks/main.yaml b/roles/devstack-project-conf/tasks/main.yaml index 9c6e06bea9..917cdbc370 100644 --- a/roles/devstack-project-conf/tasks/main.yaml +++ b/roles/devstack-project-conf/tasks/main.yaml @@ -8,6 +8,7 @@ path: "/etc/{{ item.value.short_name }}" with_dict: "{{ zuul.projects }}" register: project_configs + no_log: true - name: Copy configuration files command: cp -pRL {{ item.stat.path }} {{ stage_dir }}/etc/{{ item.item.value.short_name }} From a9e946471ef029755d7dd22f70a20273578c9813 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 6 Feb 2018 08:03:03 +1100 Subject: [PATCH 0819/1936] Role apache-logs-conf: fix redhat matching Ansible complains: The task includes an option with an undefined variable. The error was: 'dict object' has no attribute 'RedHat' which is just a mismatch on the "Redhat" string Change-Id: I447038256561740c224c68388fa5b6a068cc8fed --- roles/apache-logs-conf/tasks/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/apache-logs-conf/tasks/main.yaml b/roles/apache-logs-conf/tasks/main.yaml index bcc1353a87..bd64574c9b 100644 --- a/roles/apache-logs-conf/tasks/main.yaml +++ b/roles/apache-logs-conf/tasks/main.yaml @@ -50,7 +50,7 @@ with_items: "{{ redhat_apache_deref_logs.results }}" when: - item.stat.isreg or item.stat.islnk - when: ansible_os_family == 'Redhat' + when: ansible_os_family == 'RedHat' no_log: true - name: Ensure {{ stage_dir }}/apache_config apache_config exists From e033e1b80f70c6e31364218b69f9a4fe5c8135a1 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 16 Jun 2017 12:56:53 +1000 Subject: [PATCH 0820/1936] py3 changes for outfilter.py I started running this with dib where we have pure python3 environments and it failed. You can't have unbuffered text i/o in python3 for ... reasons? [1] Changing the file to binary mode works around this. Python3 opens sys.stdin in text mode, so we need to manually convert the unicode strings to bytes before we write them to the binary file. [1] http://bugs.python.org/issue17404 Change-Id: Iebb26f0d3c2347d262cbc10dfd0912840cd05878 --- tools/outfilter.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/tools/outfilter.py b/tools/outfilter.py index f82939be1d..296cf8dfe5 100755 --- a/tools/outfilter.py +++ b/tools/outfilter.py @@ -50,15 +50,13 @@ def main(): opts = get_options() outfile = None if opts.outfile: - outfile = open(opts.outfile, 'a', 0) + # note, binary mode so we can do unbuffered output. + outfile = open(opts.outfile, 'ab', 0) # Otherwise fileinput reprocess args as files sys.argv = [] - while True: - line = sys.stdin.readline() - if not line: - return 0 + for line in iter(sys.stdin.readline, ''): # put skip lines here if skip_line(line): continue @@ -75,8 +73,16 @@ def main(): if opts.verbose: sys.stdout.write(line) sys.stdout.flush() + if outfile: - outfile.write(line) + # We've opened outfile as a binary file to get the + # non-buffered behaviour. on python3, sys.stdin was + # opened with the system encoding and made the line into + # utf-8, so write the logfile out in utf-8 bytes. + if sys.version_info < (3,): + outfile.write(line) + else: + outfile.write(line.encode('utf-8')) outfile.flush() From 83ecb97fec7efde09f543fb1f5ef65bd6d4f1011 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 6 Feb 2018 10:03:34 +1100 Subject: [PATCH 0821/1936] Add VERBOSE_NO_TIMESTAMP flag As described in the documentation, this flag is intended for the case where the console output is being captured by a tool that appends its own timestamps. In the gate this is the job-output.txt. We want the console output as people like to watch that scrolling by as part of the live console log. Although this gets saved to job-output.txt, we still want to keep logging to the individual log files even though it's technically a duplicate -- in the multinode case the job-output.txt gets interleaved by all the running nodes; it's much easier to just look at the individual log files. Also, people are used to it where it is :) Change-Id: I3486636f1c76139581f6cd9668426f507b7c621d --- .zuul.yaml | 1 + doc/source/configuration.rst | 12 ++++++++++++ stack.sh | 7 ++++++- tools/outfilter.py | 27 ++++++++++++++++++++------- 4 files changed, 39 insertions(+), 8 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index a90df2f06a..cc29466f35 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -66,6 +66,7 @@ LOGFILE: /opt/stack/logs/devstacklog.txt LOG_COLOR: false VERBOSE: true + VERBOSE_NO_TIMESTAMP: true NOVNC_FROM_PACKAGE: true ERROR_ON_CLONE: true # Gate jobs can't deal with nested virt. Disable it. diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 49cad05554..1d02395058 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -286,6 +286,18 @@ see what is going on. This can be disabled with:: LOG_COLOR=False +When using the logfile, by default logs are sent to the console and +the file. You can set ``VERBOSE`` to ``false`` if you only wish the +logs to be sent to the file (this may avoid having double-logging in +some cases where you are capturing the script output and the log +files). If ``VERBOSE`` is ``true`` you can additionally set +``VERBOSE_NO_TIMESTAMP`` to avoid timestamps being added to each +output line sent to the console. This can be useful in some +situations where the console output is being captured by a runner or +framework (e.g. Ansible) that adds its own timestamps. Note that the +log lines sent to the ``LOGFILE`` will still be prefixed with a +timestamp. + Logging the Service Output ~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/stack.sh b/stack.sh index 9b496c0e20..1d1f12e116 100755 --- a/stack.sh +++ b/stack.sh @@ -405,6 +405,7 @@ is_package_installed python || install_package python # Set up logging level VERBOSE=$(trueorfalse True VERBOSE) +VERBOSE_NO_TIMESTAMP=$(trueorfalse False VERBOSE) # Draw a spinner so the user knows something is happening function spinner { @@ -470,8 +471,12 @@ if [[ -n "$LOGFILE" ]]; then # stdout later. exec 3>&1 if [[ "$VERBOSE" == "True" ]]; then + _of_args="-v" + if [[ "$VERBOSE_NO_TIMESTAMP" == "True" ]]; then + _of_args="$_of_args --no-timestamp" + fi # Set fd 1 and 2 to write the log file - exec 1> >( $TOP_DIR/tools/outfilter.py -v -o "${LOGFILE}" ) 2>&1 + exec 1> >( $TOP_DIR/tools/outfilter.py $_of_args -o "${LOGFILE}" ) 2>&1 # Set fd 6 to summary log file exec 6> >( $TOP_DIR/tools/outfilter.py -o "${SUMFILE}" ) else diff --git a/tools/outfilter.py b/tools/outfilter.py index 296cf8dfe5..cf091247d0 100755 --- a/tools/outfilter.py +++ b/tools/outfilter.py @@ -36,6 +36,13 @@ def get_options(): parser.add_argument('-o', '--outfile', help='Output file for content', default=None) + # NOTE(ianw): This is intended for the case where your stdout is + # being captured by something like ansible which independently + # logs timestamps on the lines it receives. Note that if using a + # output file, those log lines are still timestamped. + parser.add_argument('-b', '--no-timestamp', action='store_true', + help='Do not prefix stdout with timestamp (bare)', + default=False) parser.add_argument('-v', '--verbose', action='store_true', default=False) return parser.parse_args() @@ -61,17 +68,23 @@ def main(): if skip_line(line): continue - # This prevents us from nesting date lines, because - # we'd like to pull this in directly in Grenade and not double - # up on DevStack lines + # This prevents us from nesting date lines, because we'd like + # to pull this in directly in Grenade and not double up on + # DevStack lines. + # NOTE(ianw): we could actually strip the extra ts in "bare" + # mode (which came after this)? ... as we get more experience + # with zuulv3 native jobs and ansible capture it may become + # clearer what to do if HAS_DATE.search(line) is None: now = datetime.datetime.utcnow() - line = ("%s | %s" % ( + ts_line = ("%s | %s" % ( now.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3], line)) + else: + ts_line = line if opts.verbose: - sys.stdout.write(line) + sys.stdout.write(line if opts.no_timestamp else ts_line) sys.stdout.flush() if outfile: @@ -80,9 +93,9 @@ def main(): # opened with the system encoding and made the line into # utf-8, so write the logfile out in utf-8 bytes. if sys.version_info < (3,): - outfile.write(line) + outfile.write(ts_line) else: - outfile.write(line.encode('utf-8')) + outfile.write(ts_line.encode('utf-8')) outfile.flush() From 2346e9a8b531d621c1adc167d8117474f69638bc Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 9 Feb 2018 12:49:22 +1100 Subject: [PATCH 0822/1936] Run swift-container-sync under run_process All the evidence from [1] suggests that on opensuse swift-init is not detaching the daemon process correctly. It's possible there's a pipe still in play that somehow holds our ansible-streamer open. This is a minimal fix to avoid swift-init. Although it's possible in non-default paths to still use swift-init (and hence possibly hit another variant of this issue), after discussions with swift developers it was decided the intersection of tests running under our current ansible, on suse, that would enable these services is sufficiently small that this is the best course for now. [1] https://storyboard.openstack.org/#!/story/2001528 Change-Id: I1b68c08c07cf6653ea58506f738cbe0054b38f3a --- lib/swift | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/swift b/lib/swift index 1601e2b1f8..1187846dfc 100644 --- a/lib/swift +++ b/lib/swift @@ -827,7 +827,8 @@ function start_swift { else # The container-sync daemon is strictly needed to pass the container # sync Tempest tests. - swift-init --run-dir=${SWIFT_DATA_DIR}/run container-sync start + enable_service s-container-sync + run_process s-container-sync "$SWIFT_BIN_DIR/swift-container-sync ${SWIFT_CONF_DIR}/container-server/1.conf" fi else swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true From 11641ce14ddec3567187099e87f03b148aadc584 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 5 Feb 2018 14:39:18 +1100 Subject: [PATCH 0823/1936] Move platform jobs to zuulv3 native We've called the jobs that don't run on our main Ubuntu targets "platform" jobs; start at moving these jobs to native jobs. Depends-On: https://review.openstack.org/541010 Change-Id: Ib64d91206a9ac677f4d77873bc54c6a84702d6c3 --- .zuul.yaml | 58 ++++++++++++++++++++++++++++++ roles/run-devstack/tasks/main.yaml | 5 ++- 2 files changed, 62 insertions(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index cc29466f35..22ba121c2f 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -8,6 +8,36 @@ nodes: - controller +- nodeset: + name: devstack-single-node-centos-7 + nodes: + - name: controller + label: centos-7 + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: devstack-single-node-opensuse-423 + nodes: + - name: controller + label: opensuse-423 + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: devstack-single-node-fedora-27 + nodes: + - name: controller + label: fedora-27 + groups: + - name: tempest + nodes: + - controller + - nodeset: name: openstack-two-node nodes: @@ -140,6 +170,31 @@ # ready yet. Until then this job should stay non-voting. voting: false +# NOTE(ianw) Platform tests have traditionally been non-voting because +# we often have to rush things through devstack to stabilise the gate, +# and these platforms don't have the round-the-clock support to avoid +# becoming blockers in that situation. +- job: + name: devstack-platform-centos-7 + parent: tempest-full + description: Centos 7 platform test + nodeset: devstack-single-node-centos-7 + voting: false + +- job: + name: devstack-platform-opensuse-423 + parent: tempest-full + description: OpenSuSE 43.2 platform test + nodeset: devstack-single-node-opensuse-423 + voting: false + +- job: + name: devstack-platform-fedora-27 + parent: tempest-full + description: Fedora 27 platform test + nodeset: devstack-single-node-fedora-27 + voting: false + - job: name: devstack-tox-base parent: devstack @@ -207,6 +262,9 @@ check: jobs: - devstack + - devstack-platform-centos-7 + - devstack-platform-opensuse-423 + - devstack-platform-fedora-27 - devstack-multinode - devstack-unit-tests gate: diff --git a/roles/run-devstack/tasks/main.yaml b/roles/run-devstack/tasks/main.yaml index 64f769c0d0..1ff82950e4 100644 --- a/roles/run-devstack/tasks/main.yaml +++ b/roles/run-devstack/tasks/main.yaml @@ -1,5 +1,8 @@ - name: Run devstack - shell: ./stack.sh 2>&1 + shell: + cmd: | + ./stack.sh 2>&1 + echo "*** FINISHED ***" args: chdir: "{{devstack_base_dir}}/devstack" become: true From e619603bdab1eebc05690e42de887614a778022a Mon Sep 17 00:00:00 2001 From: Jianghua Wang Date: Fri, 23 Feb 2018 03:49:18 +0000 Subject: [PATCH 0824/1936] XenAPI: remove xen tools The xen tools have been moved to the project of *os-xenapi* since os-xenapi 0.3.0. We also did some refact work on these tools in os-xenapi. This commit is to remove these tools from devstack. So that os-xenapi will be the single place for xen tools. Change-Id: I4fdbe6bce12dfedd0d1e975ab8dd624ee3740c11 --- tools/xen/README.md | 174 +------ tools/xen/build_xva.sh | 191 -------- tools/xen/devstackubuntu_latecommand.sh | 14 - tools/xen/devstackubuntupreseed.cfg | 471 ------------------- tools/xen/functions | 341 -------------- tools/xen/install_os_domU.sh | 418 ---------------- tools/xen/mocks | 92 ---- tools/xen/prepare_guest.sh | 123 ----- tools/xen/prepare_guest_template.sh | 94 ---- tools/xen/scripts/install-os-vpx.sh | 135 ------ tools/xen/scripts/install_ubuntu_template.sh | 84 ---- tools/xen/scripts/manage-vdi | 96 ---- tools/xen/scripts/on_exit.sh | 24 - tools/xen/scripts/uninstall-os-vpx.sh | 88 ---- tools/xen/test_functions.sh | 205 -------- tools/xen/xenrc | 114 ----- 16 files changed, 2 insertions(+), 2662 deletions(-) delete mode 100755 tools/xen/build_xva.sh delete mode 100644 tools/xen/devstackubuntu_latecommand.sh delete mode 100644 tools/xen/devstackubuntupreseed.cfg delete mode 100644 tools/xen/functions delete mode 100755 tools/xen/install_os_domU.sh delete mode 100644 tools/xen/mocks delete mode 100755 tools/xen/prepare_guest.sh delete mode 100755 tools/xen/prepare_guest_template.sh delete mode 100755 tools/xen/scripts/install-os-vpx.sh delete mode 100755 tools/xen/scripts/install_ubuntu_template.sh delete mode 100755 tools/xen/scripts/manage-vdi delete mode 100755 tools/xen/scripts/on_exit.sh delete mode 100755 tools/xen/scripts/uninstall-os-vpx.sh delete mode 100755 tools/xen/test_functions.sh delete mode 100644 tools/xen/xenrc diff --git a/tools/xen/README.md b/tools/xen/README.md index 9559e773d3..22263bb074 100644 --- a/tools/xen/README.md +++ b/tools/xen/README.md @@ -1,173 +1,3 @@ -# Getting Started With XenServer and Devstack - -The purpose of the code in this directory it to help developers bootstrap a -XenServer 6.2 (older versions may also work) + OpenStack development -environment. This file gives some pointers on how to get started. - -Xenserver is a Type 1 hypervisor, so it is best installed on bare metal. The -OpenStack services are configured to run within a virtual machine (called OS -domU) on the XenServer host. The VM uses the XAPI toolstack to communicate with -the host over a network connection (see `MGT_BRIDGE_OR_NET_NAME`). - -The provided localrc helps to build a basic environment. - -## Introduction - -### Requirements - - - An internet-enabled network with a DHCP server on it - - XenServer box plugged in to the same network -This network will be used as the OpenStack management network. The VM Network -and the Public Network will not be connected to any physical interfaces, only -new virtual networks will be created by the `install_os_domU.sh` script. - -### Steps to follow - - - Install XenServer - - Download Devstack to XenServer - - Customise `localrc` - - Start `install_os_domU.sh` script - -### Brief explanation - -The `install_os_domU.sh` script will: - - Setup XenAPI plugins - - Create the named networks, if they don't exist - - Preseed-Netinstall an Ubuntu Virtual Machine (NOTE: you can save and reuse - it, see [Reuse the Ubuntu VM](#reuse-the-ubuntu-vm)), with 1 network - interface: - - `eth0` - Connected to `UBUNTU_INST_BRIDGE_OR_NET_NAME`, defaults to - `MGT_BRIDGE_OR_NET_NAME` - - After the Ubuntu install process finished, the network configuration is - modified to: - - `eth0` - Management interface, connected to `MGT_BRIDGE_OR_NET_NAME`. Xapi - must be accessible through this network. - - `eth1` - VM interface, connected to `VM_BRIDGE_OR_NET_NAME` - - `eth2` - Public interface, connected to `PUB_BRIDGE_OR_NET_NAME` - - Start devstack inside the created OpenStack VM - -## Step 1: Install Xenserver -Install XenServer on a clean box. You can download the latest XenServer for -free from: http://www.xenserver.org/ - -The XenServer IP configuration depends on your local network setup. If you are -using dhcp, make a reservation for XenServer, so its IP address won't change -over time. Make a note of the XenServer's IP address, as it has to be specified -in `localrc`. The other option is to manually specify the IP setup for the -XenServer box. Please make sure, that a gateway and a nameserver is configured, -as `install_os_domU.sh` will connect to github.com to get source-code snapshots. - -## Step 2: Download devstack -On your XenServer host, run the following commands as root: - - wget --no-check-certificate https://github.com/openstack-dev/devstack/zipball/master - unzip -o master -d ./devstack - cd devstack/*/ - -## Step 3: Configure your localrc inside the devstack directory -Devstack uses a localrc for user-specific configuration. Note that -the `XENAPI_PASSWORD` must be your dom0 root password. -Of course, use real passwords if this machine is exposed. - - cat > ./localrc <$STAGING_DIR/etc/systemd/system/devstack.service << EOF -[Unit] -Description=Install OpenStack by DevStack - -[Service] -Type=oneshot -RemainAfterExit=yes -ExecStartPre=/bin/rm -f /opt/stack/runsh.succeeded -ExecStart=/bin/su -c "/opt/stack/run.sh" stack -StandardOutput=tty -StandardError=tty - -[Install] -WantedBy=multi-user.target - -EOF - -# enable this service -ln -s $STAGING_DIR/etc/systemd/system/devstack.service $STAGING_DIR/etc/systemd/system/multi-user.target.wants/devstack.service - -# Configure the hostname -echo $GUEST_NAME > $STAGING_DIR/etc/hostname - -# Hostname must resolve for rabbit -HOSTS_FILE_IP=$PUB_IP -if [ $MGT_IP != "dhcp" ]; then - HOSTS_FILE_IP=$MGT_IP -fi -cat <$STAGING_DIR/etc/hosts -$HOSTS_FILE_IP $GUEST_NAME -127.0.0.1 localhost localhost.localdomain -EOF - -# Configure the network -print_interfaces_config > $STAGING_DIR/etc/network/interfaces - -# Gracefully cp only if source file/dir exists -function cp_it { - if [ -e $1 ] || [ -d $1 ]; then - cp -pRL $1 $2 - fi -} - -# Copy over your ssh keys and env if desired -COPYENV=${COPYENV:-1} -if [ "$COPYENV" = "1" ]; then - cp_it ~/.ssh $STAGING_DIR/opt/stack/.ssh - cp_it ~/.ssh/id_rsa.pub $STAGING_DIR/opt/stack/.ssh/authorized_keys - cp_it ~/.gitconfig $STAGING_DIR/opt/stack/.gitconfig - cp_it ~/.vimrc $STAGING_DIR/opt/stack/.vimrc - cp_it ~/.bashrc $STAGING_DIR/opt/stack/.bashrc -fi - -# Configure run.sh -cat <$STAGING_DIR/opt/stack/run.sh -#!/bin/bash -set -eux -( - flock -n 9 || exit 1 - - sudo chown -R stack /opt/stack - - [ -e /opt/stack/runsh.succeeded ] && rm /opt/stack/runsh.succeeded - echo \$\$ >> /opt/stack/run_sh.pid - - cd /opt/stack/devstack - ./unstack.sh || true - ./stack.sh - - # Got to the end - success - touch /opt/stack/runsh.succeeded - - # Update /etc/issue - ( - echo "OpenStack VM - Installed by DevStack" - IPADDR=$(ip -4 address show eth0 | sed -n 's/.*inet \([0-9\.]\+\).*/\1/p') - echo " Management IP: $IPADDR" - echo -n " Devstack run: " - if [ -e /opt/stack/runsh.succeeded ]; then - echo "SUCCEEDED" - else - echo "FAILED" - fi - echo "" - ) > /opt/stack/issue - sudo cp /opt/stack/issue /etc/issue - - rm /opt/stack/run_sh.pid -) 9> /opt/stack/.runsh_lock -EOF - -chmod 755 $STAGING_DIR/opt/stack/run.sh diff --git a/tools/xen/devstackubuntu_latecommand.sh b/tools/xen/devstackubuntu_latecommand.sh deleted file mode 100644 index 2afbe2cdf3..0000000000 --- a/tools/xen/devstackubuntu_latecommand.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash -set -eux - -# Need to set barrier=0 to avoid a Xen bug -# https://bugs.launchpad.net/ubuntu/+source/linux/+bug/824089 -sed -i -e 's/errors=/barrier=0,errors=/' /etc/fstab - -# Allow root to login with a password -sed -i -e 's/.*PermitRootLogin.*/PermitRootLogin yes/g' /etc/ssh/sshd_config - -# Install the XenServer tools so IP addresses are reported -wget --no-proxy @XS_TOOLS_URL@ -O /root/tools.deb -dpkg -i /root/tools.deb -rm /root/tools.deb diff --git a/tools/xen/devstackubuntupreseed.cfg b/tools/xen/devstackubuntupreseed.cfg deleted file mode 100644 index 80f334ba8e..0000000000 --- a/tools/xen/devstackubuntupreseed.cfg +++ /dev/null @@ -1,471 +0,0 @@ -### Contents of the preconfiguration file (for squeeze) -### Localization -# Preseeding only locale sets language, country and locale. -d-i debian-installer/locale string en_US - -# The values can also be preseeded individually for greater flexibility. -#d-i debian-installer/language string en -#d-i debian-installer/country string NL -#d-i debian-installer/locale string en_GB.UTF-8 -# Optionally specify additional locales to be generated. -#d-i localechooser/supported-locales en_US.UTF-8, nl_NL.UTF-8 - -# Keyboard selection. -# Disable automatic (interactive) keymap detection. -d-i console-setup/ask_detect boolean false -#d-i keyboard-configuration/modelcode string pc105 -d-i keyboard-configuration/layoutcode string us -# To select a variant of the selected layout (if you leave this out, the -# basic form of the layout will be used): -#d-i keyboard-configuration/variantcode string dvorak - -### Network configuration -# Disable network configuration entirely. This is useful for cdrom -# installations on non-networked devices where the network questions, -# warning and long timeouts are a nuisance. -#d-i netcfg/enable boolean false - -# netcfg will choose an interface that has link if possible. This makes it -# skip displaying a list if there is more than one interface. -d-i netcfg/choose_interface select auto - -# To pick a particular interface instead: -#d-i netcfg/choose_interface select eth1 - -# If you have a slow dhcp server and the installer times out waiting for -# it, this might be useful. -d-i netcfg/dhcp_timeout string 120 - -# If you prefer to configure the network manually, uncomment this line and -# the static network configuration below. -#d-i netcfg/disable_autoconfig boolean true - -# If you want the preconfiguration file to work on systems both with and -# without a dhcp server, uncomment these lines and the static network -# configuration below. -#d-i netcfg/dhcp_failed note -#d-i netcfg/dhcp_options select Configure network manually - -# Static network configuration. -#d-i netcfg/get_nameservers string 192.168.1.1 -#d-i netcfg/get_ipaddress string 192.168.1.42 -#d-i netcfg/get_netmask string 255.255.255.0 -#d-i netcfg/get_gateway string 192.168.1.1 -#d-i netcfg/confirm_static boolean true - -# Any hostname and domain names assigned from dhcp take precedence over -# values set here. However, setting the values still prevents the questions -# from being shown, even if values come from dhcp. -d-i netcfg/get_hostname string stack -d-i netcfg/get_domain string stackpass - -# Disable that annoying WEP key dialog. -d-i netcfg/wireless_wep string -# The wacky dhcp hostname that some ISPs use as a password of sorts. -#d-i netcfg/dhcp_hostname string radish - -# If non-free firmware is needed for the network or other hardware, you can -# configure the installer to always try to load it, without prompting. Or -# change to false to disable asking. -#d-i hw-detect/load_firmware boolean true - -### Network console -# Use the following settings if you wish to make use of the network-console -# component for remote installation over SSH. This only makes sense if you -# intend to perform the remainder of the installation manually. -#d-i anna/choose_modules string network-console -#d-i network-console/password password r00tme -#d-i network-console/password-again password r00tme - -### Mirror settings -# If you select ftp, the mirror/country string does not need to be set. -#d-i mirror/protocol string ftp -d-i mirror/country string manual -d-i mirror/http/hostname string archive.ubuntu.com -d-i mirror/http/directory string /ubuntu -d-i mirror/http/proxy string - -# Alternatively: by default, the installer uses CC.archive.ubuntu.com where -# CC is the ISO-3166-2 code for the selected country. You can preseed this -# so that it does so without asking. -#d-i mirror/http/mirror select CC.archive.ubuntu.com - -# Suite to install. -#d-i mirror/suite string squeeze -# Suite to use for loading installer components (optional). -#d-i mirror/udeb/suite string squeeze -# Components to use for loading installer components (optional). -#d-i mirror/udeb/components multiselect main, restricted - -### Clock and time zone setup -# Controls whether or not the hardware clock is set to UTC. -d-i clock-setup/utc boolean true - -# You may set this to any valid setting for $TZ; see the contents of -# /usr/share/zoneinfo/ for valid values. -d-i time/zone string US/Pacific - -# Controls whether to use NTP to set the clock during the install -d-i clock-setup/ntp boolean true -# NTP server to use. The default is almost always fine here. -d-i clock-setup/ntp-server string 0.us.pool.ntp.org - -### Partitioning -## Partitioning example -# If the system has free space you can choose to only partition that space. -# This is only honoured if partman-auto/method (below) is not set. -# Alternatives: custom, some_device, some_device_crypto, some_device_lvm. -#d-i partman-auto/init_automatically_partition select biggest_free - -# Alternatively, you may specify a disk to partition. If the system has only -# one disk the installer will default to using that, but otherwise the device -# name must be given in traditional, non-devfs format (so e.g. /dev/hda or -# /dev/sda, and not e.g. /dev/discs/disc0/disc). -# For example, to use the first SCSI/SATA hard disk: -#d-i partman-auto/disk string /dev/sda -# In addition, you'll need to specify the method to use. -# The presently available methods are: -# - regular: use the usual partition types for your architecture -# - lvm: use LVM to partition the disk -# - crypto: use LVM within an encrypted partition -d-i partman-auto/method string regular - -# If one of the disks that are going to be automatically partitioned -# contains an old LVM configuration, the user will normally receive a -# warning. This can be preseeded away... -d-i partman-lvm/device_remove_lvm boolean true -# The same applies to pre-existing software RAID array: -d-i partman-md/device_remove_md boolean true -# And the same goes for the confirmation to write the lvm partitions. -d-i partman-lvm/confirm boolean true - -# For LVM partitioning, you can select how much of the volume group to use -# for logical volumes. -#d-i partman-auto-lvm/guided_size string max -#d-i partman-auto-lvm/guided_size string 10GB -#d-i partman-auto-lvm/guided_size string 50% - -# You can choose one of the three predefined partitioning recipes: -# - atomic: all files in one partition -# - home: separate /home partition -# - multi: separate /home, /usr, /var, and /tmp partitions -d-i partman-auto/choose_recipe select atomic - -# Or provide a recipe of your own... -# If you have a way to get a recipe file into the d-i environment, you can -# just point at it. -#d-i partman-auto/expert_recipe_file string /hd-media/recipe - -# If not, you can put an entire recipe into the preconfiguration file in one -# (logical) line. This example creates a small /boot partition, suitable -# swap, and uses the rest of the space for the root partition: -#d-i partman-auto/expert_recipe string \ -# boot-root :: \ -# 40 50 100 ext3 \ -# $primary{ } $bootable{ } \ -# method{ format } format{ } \ -# use_filesystem{ } filesystem{ ext3 } \ -# mountpoint{ /boot } \ -# . \ -# 500 10000 1000000000 ext3 \ -# method{ format } format{ } \ -# use_filesystem{ } filesystem{ ext3 } \ -# mountpoint{ / } \ -# . \ -# 64 512 300% linux-swap \ -# method{ swap } format{ } \ -# . - -# If you just want to change the default filesystem from ext3 to something -# else, you can do that without providing a full recipe. -d-i partman/default_filesystem string ext3 - -# The full recipe format is documented in the file partman-auto-recipe.txt -# included in the 'debian-installer' package or available from D-I source -# repository. This also documents how to specify settings such as file -# system labels, volume group names and which physical devices to include -# in a volume group. - -# This makes partman automatically partition without confirmation, provided -# that you told it what to do using one of the methods above. -d-i partman-partitioning/confirm_write_new_label boolean true -d-i partman/choose_partition select finish -d-i partman/confirm boolean true -d-i partman/confirm_nooverwrite boolean true - -## Partitioning using RAID -# The method should be set to "raid". -#d-i partman-auto/method string raid -# Specify the disks to be partitioned. They will all get the same layout, -# so this will only work if the disks are the same size. -#d-i partman-auto/disk string /dev/sda /dev/sdb - -# Next you need to specify the physical partitions that will be used. -#d-i partman-auto/expert_recipe string \ -# multiraid :: \ -# 1000 5000 4000 raid \ -# $primary{ } method{ raid } \ -# . \ -# 64 512 300% raid \ -# method{ raid } \ -# . \ -# 500 10000 1000000000 raid \ -# method{ raid } \ -# . - -# Last you need to specify how the previously defined partitions will be -# used in the RAID setup. Remember to use the correct partition numbers -# for logical partitions. RAID levels 0, 1, 5, 6 and 10 are supported; -# devices are separated using "#". -# Parameters are: -# \ -# - -#d-i partman-auto-raid/recipe string \ -# 1 2 0 ext3 / \ -# /dev/sda1#/dev/sdb1 \ -# . \ -# 1 2 0 swap - \ -# /dev/sda5#/dev/sdb5 \ -# . \ -# 0 2 0 ext3 /home \ -# /dev/sda6#/dev/sdb6 \ -# . - -# For additional information see the file partman-auto-raid-recipe.txt -# included in the 'debian-installer' package or available from D-I source -# repository. - -# This makes partman automatically partition without confirmation. -d-i partman-md/confirm boolean true -d-i partman-partitioning/confirm_write_new_label boolean true -d-i partman/choose_partition select finish -d-i partman/confirm boolean true -d-i partman/confirm_nooverwrite boolean true - -## Controlling how partitions are mounted -# The default is to mount by UUID, but you can also choose "traditional" to -# use traditional device names, or "label" to try filesystem labels before -# falling back to UUIDs. -#d-i partman/mount_style select uuid - -### Base system installation -# Configure APT to not install recommended packages by default. Use of this -# option can result in an incomplete system and should only be used by very -# experienced users. -#d-i base-installer/install-recommends boolean false - -# The kernel image (meta) package to be installed; "none" can be used if no -# kernel is to be installed. -d-i base-installer/kernel/image string linux-virtual - -### Account setup -# Skip creation of a root account (normal user account will be able to -# use sudo). The default is false; preseed this to true if you want to set -# a root password. -d-i passwd/root-login boolean true -# Alternatively, to skip creation of a normal user account. -d-i passwd/make-user boolean false - -# Root password, either in clear text -d-i passwd/root-password password stackpass -d-i passwd/root-password-again password stackpass -# or encrypted using an MD5 hash. -#d-i passwd/root-password-crypted password [MD5 hash] - -# To create a normal user account. -#d-i passwd/user-fullname string Ubuntu User -#d-i passwd/username string ubuntu -# Normal user's password, either in clear text -#d-i passwd/user-password password insecure -#d-i passwd/user-password-again password insecure -# or encrypted using an MD5 hash. -#d-i passwd/user-password-crypted password [MD5 hash] -# Create the first user with the specified UID instead of the default. -#d-i passwd/user-uid string 1010 -# The installer will warn about weak passwords. If you are sure you know -# what you're doing and want to override it, uncomment this. -d-i user-setup/allow-password-weak boolean true - -# The user account will be added to some standard initial groups. To -# override that, use this. -#d-i passwd/user-default-groups string audio cdrom video - -# Set to true if you want to encrypt the first user's home directory. -d-i user-setup/encrypt-home boolean false - -### Apt setup -# You can choose to install restricted and universe software, or to install -# software from the backports repository. -d-i apt-setup/restricted boolean true -d-i apt-setup/universe boolean true -d-i apt-setup/backports boolean true -# Uncomment this if you don't want to use a network mirror. -#d-i apt-setup/use_mirror boolean false -# Select which update services to use; define the mirrors to be used. -# Values shown below are the normal defaults. -#d-i apt-setup/services-select multiselect security -#d-i apt-setup/security_host string security.ubuntu.com -#d-i apt-setup/security_path string /ubuntu - -# Additional repositories, local[0-9] available -#d-i apt-setup/local0/repository string \ -# http://local.server/ubuntu squeeze main -#d-i apt-setup/local0/comment string local server -# Enable deb-src lines -#d-i apt-setup/local0/source boolean true -# URL to the public key of the local repository; you must provide a key or -# apt will complain about the unauthenticated repository and so the -# sources.list line will be left commented out -#d-i apt-setup/local0/key string http://local.server/key - -# By default the installer requires that repositories be authenticated -# using a known gpg key. This setting can be used to disable that -# authentication. Warning: Insecure, not recommended. -#d-i debian-installer/allow_unauthenticated boolean true - -### Package selection -#tasksel tasksel/first multiselect ubuntu-desktop -#tasksel tasksel/first multiselect lamp-server, print-server -#tasksel tasksel/first multiselect kubuntu-desktop -tasksel tasksel/first multiselect openssh-server - -# Individual additional packages to install -d-i pkgsel/include string cracklib-runtime curl wget ssh openssh-server tcpdump ethtool git sudo python-netaddr coreutils - -# Whether to upgrade packages after debootstrap. -# Allowed values: none, safe-upgrade, full-upgrade -d-i pkgsel/upgrade select safe-upgrade - -# Language pack selection -#d-i pkgsel/language-packs multiselect de, en, zh - -# Policy for applying updates. May be "none" (no automatic updates), -# "unattended-upgrades" (install security updates automatically), or -# "landscape" (manage system with Landscape). -d-i pkgsel/update-policy select unattended-upgrades - -# Some versions of the installer can report back on what software you have -# installed, and what software you use. The default is not to report back, -# but sending reports helps the project determine what software is most -# popular and include it on CDs. -#popularity-contest popularity-contest/participate boolean false - -# By default, the system's locate database will be updated after the -# installer has finished installing most packages. This may take a while, so -# if you don't want it, you can set this to "false" to turn it off. -d-i pkgsel/updatedb boolean false - -### Boot loader installation -# Grub is the default boot loader (for x86). If you want lilo installed -# instead, uncomment this: -#d-i grub-installer/skip boolean true -# To also skip installing lilo, and install no bootloader, uncomment this -# too: -#d-i lilo-installer/skip boolean true - -# With a few exceptions for unusual partitioning setups, GRUB 2 is now the -# default. If you need GRUB Legacy for some particular reason, then -# uncomment this: -d-i grub-installer/grub2_instead_of_grub_legacy boolean false - -# This is fairly safe to set, it makes grub install automatically to the MBR -# if no other operating system is detected on the machine. -d-i grub-installer/only_debian boolean true - -# This one makes grub-installer install to the MBR if it also finds some other -# OS, which is less safe as it might not be able to boot that other OS. -d-i grub-installer/with_other_os boolean true - -# Alternatively, if you want to install to a location other than the mbr, -# uncomment and edit these lines: -#d-i grub-installer/only_debian boolean false -#d-i grub-installer/with_other_os boolean false -#d-i grub-installer/bootdev string (hd0,0) -# To install grub to multiple disks: -#d-i grub-installer/bootdev string (hd0,0) (hd1,0) (hd2,0) - -# Optional password for grub, either in clear text -#d-i grub-installer/password password r00tme -#d-i grub-installer/password-again password r00tme -# or encrypted using an MD5 hash, see grub-md5-crypt(8). -#d-i grub-installer/password-crypted password [MD5 hash] - -# Use the following option to add additional boot parameters for the -# installed system (if supported by the bootloader installer). -# Note: options passed to the installer will be added automatically. -#d-i debian-installer/add-kernel-opts string nousb - -### Finishing up the installation -# During installations from serial console, the regular virtual consoles -# (VT1-VT6) are normally disabled in /etc/inittab. Uncomment the next -# line to prevent this. -d-i finish-install/keep-consoles boolean true - -# Avoid that last message about the install being complete. -d-i finish-install/reboot_in_progress note - -# This will prevent the installer from ejecting the CD during the reboot, -# which is useful in some situations. -#d-i cdrom-detect/eject boolean false - -# This is how to make the installer shutdown when finished, but not -# reboot into the installed system. -#d-i debian-installer/exit/halt boolean true -# This will power off the machine instead of just halting it. -#d-i debian-installer/exit/poweroff boolean true - -### X configuration -# X can detect the right driver for some cards, but if you're preseeding, -# you override whatever it chooses. Still, vesa will work most places. -#xserver-xorg xserver-xorg/config/device/driver select vesa - -# A caveat with mouse autodetection is that if it fails, X will retry it -# over and over. So if it's preseeded to be done, there is a possibility of -# an infinite loop if the mouse is not autodetected. -#xserver-xorg xserver-xorg/autodetect_mouse boolean true - -# Monitor autodetection is recommended. -xserver-xorg xserver-xorg/autodetect_monitor boolean true -# Uncomment if you have an LCD display. -#xserver-xorg xserver-xorg/config/monitor/lcd boolean true -# X has three configuration paths for the monitor. Here's how to preseed -# the "medium" path, which is always available. The "simple" path may not -# be available, and the "advanced" path asks too many questions. -xserver-xorg xserver-xorg/config/monitor/selection-method \ - select medium -xserver-xorg xserver-xorg/config/monitor/mode-list \ - select 1024x768 @ 60 Hz - -### Preseeding other packages -# Depending on what software you choose to install, or if things go wrong -# during the installation process, it's possible that other questions may -# be asked. You can preseed those too, of course. To get a list of every -# possible question that could be asked during an install, do an -# installation, and then run these commands: -# debconf-get-selections --installer > file -# debconf-get-selections >> file - - -#### Advanced options -### Running custom commands during the installation -# d-i preseeding is inherently not secure. Nothing in the installer checks -# for attempts at buffer overflows or other exploits of the values of a -# preconfiguration file like this one. Only use preconfiguration files from -# trusted locations! To drive that home, and because it's generally useful, -# here's a way to run any shell command you'd like inside the installer, -# automatically. - -# This first command is run as early as possible, just after -# preseeding is read. -#d-i preseed/early_command string anna-install some-udeb -# This command is run immediately before the partitioner starts. It may be -# useful to apply dynamic partitioner preseeding that depends on the state -# of the disks (which may not be visible when preseed/early_command runs). -#d-i partman/early_command \ -# string debconf-set partman-auto/disk "$(list-devices disk | head -n1)" -# This command is run just before the install finishes, but when there is -# still a usable /target directory. You can chroot to /target and use it -# directly, or use the apt-install and in-target commands to easily install -# packages and run commands in the target system. -d-i preseed/late_command string diff --git a/tools/xen/functions b/tools/xen/functions deleted file mode 100644 index bc0c515e01..0000000000 --- a/tools/xen/functions +++ /dev/null @@ -1,341 +0,0 @@ -#!/bin/bash - -function die_with_error { - local err_msg - - err_msg="$1" - - echo "$err_msg" >&2 - exit 1 -} - -function xapi_plugin_location { - for PLUGIN_DIR in "/etc/xapi.d/plugins/" "/usr/lib/xcp/plugins/" "/usr/lib/xapi/plugins" "/usr/lib64/xapi/plugins"; do - if [ -d $PLUGIN_DIR ]; then - echo $PLUGIN_DIR - return 0 - fi - done - return 1 -} - -function create_directory_for_kernels { - if [ -d "/boot/guest" ]; then - echo "INFO: /boot/guest directory already exists, using that" >&2 - else - local local_path - local_path="$(get_local_sr_path)/os-guest-kernels" - mkdir -p $local_path - ln -s $local_path /boot/guest - fi -} - -function create_directory_for_images { - if [ -d "/images" ]; then - echo "INFO: /images directory already exists, using that" >&2 - else - local local_path - local_path="$(get_local_sr_path)/os-images" - mkdir -p $local_path - ln -s $local_path /images - fi -} - -function get_local_sr { - xe pool-list params=default-SR minimal=true -} - -function get_local_sr_path { - pbd_path="/var/run/sr-mount/$(get_local_sr)" - pbd_device_config_path=`xe pbd-list sr-uuid=$(get_local_sr) params=device-config | grep " path: "` - if [ -n "$pbd_device_config_path" ]; then - pbd_uuid=`xe pbd-list sr-uuid=$(get_local_sr) minimal=true` - pbd_path=`xe pbd-param-get uuid=$pbd_uuid param-name=device-config param-key=path || echo ""` - fi - echo $pbd_path -} - -function find_ip_by_name { - local guest_name="$1" - local interface="$2" - - local period=10 - local max_tries=10 - local i=0 - - while true; do - if [ $i -ge $max_tries ]; then - echo "Timeout: ip address for interface $interface of $guest_name" - exit 11 - fi - - ipaddress=$(xe vm-list --minimal \ - name-label=$guest_name \ - params=networks | sed -ne "s,^.*${interface}/ip: \([0-9.]*\).*\$,\1,p") - - if [ -z "$ipaddress" ]; then - sleep $period - i=$((i+1)) - else - echo $ipaddress - break - fi - done -} - -function _vm_uuid { - local vm_name_label - - vm_name_label="$1" - - xe vm-list name-label="$vm_name_label" --minimal -} - -function _create_new_network { - local name_label - name_label=$1 - - xe network-create name-label="$name_label" -} - -function _multiple_networks_with_name { - local name_label - name_label=$1 - - # A comma indicates multiple matches - xe network-list name-label="$name_label" --minimal | grep -q "," -} - -function _network_exists { - local name_label - name_label=$1 - - ! [ -z "$(xe network-list name-label="$name_label" --minimal)" ] -} - -function _bridge_exists { - local bridge - bridge=$1 - - ! [ -z "$(xe network-list bridge="$bridge" --minimal)" ] -} - -function _network_uuid { - local bridge_or_net_name - bridge_or_net_name=$1 - - if _bridge_exists "$bridge_or_net_name"; then - xe network-list bridge="$bridge_or_net_name" --minimal - else - xe network-list name-label="$bridge_or_net_name" --minimal - fi -} - -function add_interface { - local vm_name_label - local bridge_or_network_name - - vm_name_label="$1" - bridge_or_network_name="$2" - device_number="$3" - - local vm - local net - - vm=$(_vm_uuid "$vm_name_label") - net=$(_network_uuid "$bridge_or_network_name") - xe vif-create network-uuid=$net vm-uuid=$vm device=$device_number -} - -function setup_network { - local bridge_or_net_name - bridge_or_net_name=$1 - - if ! _bridge_exists "$bridge_or_net_name"; then - if _network_exists "$bridge_or_net_name"; then - if _multiple_networks_with_name "$bridge_or_net_name"; then - cat >&2 << EOF -ERROR: Multiple networks found matching name-label to "$bridge_or_net_name" -please review your XenServer network configuration / localrc file. -EOF - exit 1 - fi - else - _create_new_network "$bridge_or_net_name" - fi - fi -} - -function bridge_for { - local bridge_or_net_name - bridge_or_net_name=$1 - - if _bridge_exists "$bridge_or_net_name"; then - echo "$bridge_or_net_name" - else - xe network-list name-label="$bridge_or_net_name" params=bridge --minimal - fi -} - -function xenapi_ip_on { - local bridge_or_net_name - bridge_or_net_name=$1 - - ip -4 addr show $(bridge_for "$bridge_or_net_name") |\ - awk '/inet/{split($2, ip, "/"); print ip[1];}' -} - -function xenapi_is_listening_on { - local bridge_or_net_name - bridge_or_net_name=$1 - - ! [ -z $(xenapi_ip_on "$bridge_or_net_name") ] -} - -function parameter_is_specified { - local parameter_name - parameter_name=$1 - - compgen -v | grep "$parameter_name" -} - -function append_kernel_cmdline { - local vm_name_label - local kernel_args - - vm_name_label="$1" - kernel_args="$2" - - local vm - local pv_args - - vm=$(_vm_uuid "$vm_name_label") - pv_args=$(xe vm-param-get param-name=PV-args uuid=$vm) - xe vm-param-set PV-args="$pv_args $kernel_args" uuid=$vm -} - -function destroy_all_vifs_of { - local vm_name_label - - vm_name_label="$1" - - local vm - - vm=$(_vm_uuid "$vm_name_label") - IFS=, - for vif in $(xe vif-list vm-uuid=$vm --minimal); do - xe vif-destroy uuid="$vif" - done - unset IFS -} - -function have_multiple_hosts { - xe host-list --minimal | grep -q "," -} - -function attach_network { - local bridge_or_net_name - - bridge_or_net_name="$1" - - local net - local host - - net=$(_network_uuid "$bridge_or_net_name") - host=$(xe host-list --minimal) - - xe network-attach uuid=$net host-uuid=$host -} - -function set_vm_memory { - local vm_name_label - local memory - - vm_name_label="$1" - memory="$2" - - local vm - - vm=$(_vm_uuid "$vm_name_label") - - xe vm-memory-limits-set \ - static-min=${memory}MiB \ - static-max=${memory}MiB \ - dynamic-min=${memory}MiB \ - dynamic-max=${memory}MiB \ - uuid=$vm -} - -function max_vcpus { - local vm_name_label - - vm_name_label="$1" - - local vm - local host - local cpu_count - - host=$(xe host-list --minimal) - vm=$(_vm_uuid "$vm_name_label") - - cpu_count=$(xe host-param-get \ - param-name=cpu_info \ - uuid=$host | - sed -e 's/^.*cpu_count: \([0-9]*\);.*$/\1/g') - - if [ -z "$cpu_count" ]; then - # get dom0's vcpu count - cpu_count=$(cat /proc/cpuinfo | grep processor | wc -l) - fi - - # Assert cpu_count is not empty - [ -n "$cpu_count" ] - - # Assert ithas a numeric nonzero value - expr "$cpu_count" + 0 - - # 8 VCPUs should be enough for devstack VM; avoid using too - # many VCPUs: - # 1. too many VCPUs may trigger a kernel bug which result VM - # not able to boot: - # https://kernel.googlesource.com/pub/scm/linux/kernel/git/wsa/linux/+/e2e004acc7cbe3c531e752a270a74e95cde3ea48 - # 2. The remaining CPUs can be used for other purpose: - # e.g. boot test VMs. - MAX_VCPUS=8 - if [ $cpu_count -ge $MAX_VCPUS ]; then - cpu_count=$MAX_VCPUS - fi - - xe vm-param-set uuid=$vm VCPUs-max=$cpu_count - xe vm-param-set uuid=$vm VCPUs-at-startup=$cpu_count -} - -function get_domid { - local vm_name_label - - vm_name_label="$1" - - xe vm-list name-label="$vm_name_label" params=dom-id minimal=true -} - -function install_conntrack_tools { - local xs_host - local xs_ver_major - local centos_ver - local conntrack_conf - xs_host=$(xe host-list --minimal) - xs_ver_major=$(xe host-param-get uuid=$xs_host param-name=software-version param-key=product_version_text_short | cut -d'.' -f 1) - if [ $xs_ver_major -gt 6 ]; then - # Only support conntrack-tools in Dom0 with XS7.0 and above - if [ ! -f /usr/sbin/conntrackd ]; then - sed -i s/#baseurl=/baseurl=/g /etc/yum.repos.d/CentOS-Base.repo - centos_ver=$(yum version nogroups |grep Installed | cut -d' ' -f 2 | cut -d'/' -f 1 | cut -d'-' -f 1) - yum install -y --enablerepo=base --releasever=$centos_ver conntrack-tools - # Backup conntrackd.conf after install conntrack-tools, use the one with statistic mode - mv /etc/conntrackd/conntrackd.conf /etc/conntrackd/conntrackd.conf.back - conntrack_conf=$(find /usr/share/doc -name conntrackd.conf |grep stats) - cp $conntrack_conf /etc/conntrackd/conntrackd.conf - fi - service conntrackd restart - fi -} diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh deleted file mode 100755 index f4ca71a906..0000000000 --- a/tools/xen/install_os_domU.sh +++ /dev/null @@ -1,418 +0,0 @@ -#!/bin/bash - -# This script must be run on a XenServer or XCP machine -# -# It creates a DomU VM that runs OpenStack services -# -# For more details see: README.md - -set -o errexit -set -o nounset -set -o xtrace - -export LC_ALL=C - -# This directory -THIS_DIR=$(cd $(dirname "$0") && pwd) - -# Include onexit commands -. $THIS_DIR/scripts/on_exit.sh - -# xapi functions -. $THIS_DIR/functions - -# -# Get Settings -# -TOP_DIR=$(cd $THIS_DIR/../../ && pwd) -source $TOP_DIR/inc/meta-config -rm -f $TOP_DIR/.localrc.auto -extract_localrc_section $TOP_DIR/local.conf $TOP_DIR/localrc $TOP_DIR/.localrc.auto - -# Source params - override xenrc params in your localrc to suit your taste -source $THIS_DIR/xenrc - -xe_min() -{ - local cmd="$1" - shift - xe "$cmd" --minimal "$@" -} - -# -# Prepare Dom0 -# including installing XenAPI plugins -# - -cd $THIS_DIR - -# Die if multiple hosts listed -if have_multiple_hosts; then - cat >&2 << EOF -ERROR: multiple hosts found. This might mean that the XenServer is a member -of a pool - Exiting. -EOF - exit 1 -fi - -# -# Configure Networking -# - -MGT_NETWORK=`xe pif-list management=true params=network-uuid minimal=true` -MGT_BRIDGE_OR_NET_NAME=`xe network-list uuid=$MGT_NETWORK params=bridge minimal=true` - -setup_network "$VM_BRIDGE_OR_NET_NAME" -setup_network "$MGT_BRIDGE_OR_NET_NAME" -setup_network "$PUB_BRIDGE_OR_NET_NAME" - -if parameter_is_specified "FLAT_NETWORK_BRIDGE"; then - if [ "$(bridge_for "$VM_BRIDGE_OR_NET_NAME")" != "$(bridge_for "$FLAT_NETWORK_BRIDGE")" ]; then - cat >&2 << EOF -ERROR: FLAT_NETWORK_BRIDGE is specified in localrc file, and either no network -found on XenServer by searching for networks by that value as name-label or -bridge name or the network found does not match the network specified by -VM_BRIDGE_OR_NET_NAME. Please check your localrc file. -EOF - exit 1 - fi -fi - -if ! xenapi_is_listening_on "$MGT_BRIDGE_OR_NET_NAME"; then - cat >&2 << EOF -ERROR: XenAPI does not have an assigned IP address on the management network. -please review your XenServer network configuration / localrc file. -EOF - exit 1 -fi - -HOST_IP=$(xenapi_ip_on "$MGT_BRIDGE_OR_NET_NAME") - -# Set up ip forwarding, but skip on xcp-xapi -if [ -a /etc/sysconfig/network ]; then - if ! grep -q "FORWARD_IPV4=YES" /etc/sysconfig/network; then - # FIXME: This doesn't work on reboot! - echo "FORWARD_IPV4=YES" >> /etc/sysconfig/network - fi -fi -# Also, enable ip forwarding in rc.local, since the above trick isn't working -if ! grep -q "echo 1 >/proc/sys/net/ipv4/ip_forward" /etc/rc.local; then - echo "echo 1 >/proc/sys/net/ipv4/ip_forward" >> /etc/rc.local -fi -# Enable ip forwarding at runtime as well -echo 1 > /proc/sys/net/ipv4/ip_forward - - -# -# Shutdown previous runs -# - -DO_SHUTDOWN=${DO_SHUTDOWN:-1} -CLEAN_TEMPLATES=${CLEAN_TEMPLATES:-false} -if [ "$DO_SHUTDOWN" = "1" ]; then - # Shutdown all domU's that created previously - clean_templates_arg="" - if $CLEAN_TEMPLATES; then - clean_templates_arg="--remove-templates" - fi - ./scripts/uninstall-os-vpx.sh $clean_templates_arg - - # Destroy any instances that were launched - for uuid in `xe vm-list | grep -1 instance | grep uuid | sed "s/.*\: //g"`; do - echo "Shutting down nova instance $uuid" - xe vm-uninstall uuid=$uuid force=true - done - - # Destroy orphaned vdis - for uuid in `xe vdi-list | grep -1 Glance | grep uuid | sed "s/.*\: //g"`; do - xe vdi-destroy uuid=$uuid - done -fi - - -# -# Create Ubuntu VM template -# and/or create VM from template -# - -GUEST_NAME=${GUEST_NAME:-"DevStackOSDomU"} -TNAME="jeos_template_for_devstack" -SNAME_TEMPLATE="jeos_snapshot_for_devstack" -SNAME_FIRST_BOOT="before_first_boot" - -function wait_for_VM_to_halt { - set +x - echo "Waiting for the VM to halt. Progress in-VM can be checked with XenCenter or xl console:" - mgmt_ip=$(echo $XENAPI_CONNECTION_URL | tr -d -c '1234567890.') - domid=$(get_domid "$GUEST_NAME") - echo "ssh root@$mgmt_ip \"xl console $domid\"" - while true; do - state=$(xe_min vm-list name-label="$GUEST_NAME" power-state=halted) - if [ -n "$state" ]; then - break - else - echo -n "." - sleep 20 - fi - done - set -x -} - -templateuuid=$(xe template-list name-label="$TNAME") -if [ -z "$templateuuid" ]; then - # - # Install Ubuntu over network - # - UBUNTU_INST_BRIDGE_OR_NET_NAME=${UBUNTU_INST_BRIDGE_OR_NET_NAME:-"$MGT_BRIDGE_OR_NET_NAME"} - - # always update the preseed file, incase we have a newer one - PRESEED_URL=${PRESEED_URL:-""} - if [ -z "$PRESEED_URL" ]; then - PRESEED_URL="${HOST_IP}/devstackubuntupreseed.cfg" - - HTTP_SERVER_LOCATION="/opt/xensource/www" - if [ ! -e $HTTP_SERVER_LOCATION ]; then - HTTP_SERVER_LOCATION="/var/www/html" - mkdir -p $HTTP_SERVER_LOCATION - fi - - # Copy the tools DEB to the XS web server - XS_TOOLS_URL="https://github.com/downloads/citrix-openstack/warehouse/xe-guest-utilities_5.6.100-651_amd64.deb" - ISO_DIR="/opt/xensource/packages/iso" - if [ -e "$ISO_DIR" ]; then - TOOLS_ISO=$(ls -1 $ISO_DIR/*-tools-*.iso | head -1) - TMP_DIR=/tmp/temp.$RANDOM - mkdir -p $TMP_DIR - mount -o loop $TOOLS_ISO $TMP_DIR - # the target deb package maybe *amd64.deb or *all.deb, - # so use *amd64.deb by default. If it doesn't exist, - # then use *all.deb. - DEB_FILE=$(ls $TMP_DIR/Linux/*amd64.deb || ls $TMP_DIR/Linux/*all.deb) - cp $DEB_FILE $HTTP_SERVER_LOCATION - umount $TMP_DIR - rmdir $TMP_DIR - XS_TOOLS_URL=${HOST_IP}/$(basename $DEB_FILE) - fi - - cp -f $THIS_DIR/devstackubuntupreseed.cfg $HTTP_SERVER_LOCATION - cp -f $THIS_DIR/devstackubuntu_latecommand.sh $HTTP_SERVER_LOCATION/latecommand.sh - - sed \ - -e "s,\(d-i mirror/http/hostname string\).*,\1 $UBUNTU_INST_HTTP_HOSTNAME,g" \ - -e "s,\(d-i mirror/http/directory string\).*,\1 $UBUNTU_INST_HTTP_DIRECTORY,g" \ - -e "s,\(d-i mirror/http/proxy string\).*,\1 $UBUNTU_INST_HTTP_PROXY,g" \ - -e "s,\(d-i passwd/root-password password\).*,\1 $GUEST_PASSWORD,g" \ - -e "s,\(d-i passwd/root-password-again password\).*,\1 $GUEST_PASSWORD,g" \ - -e "s,\(d-i preseed/late_command string\).*,\1 in-target mkdir -p /tmp; in-target wget --no-proxy ${HOST_IP}/latecommand.sh -O /root/latecommand.sh; in-target bash /root/latecommand.sh,g" \ - -i "${HTTP_SERVER_LOCATION}/devstackubuntupreseed.cfg" - - sed \ - -e "s,@XS_TOOLS_URL@,$XS_TOOLS_URL,g" \ - -i "${HTTP_SERVER_LOCATION}/latecommand.sh" - fi - - # Update the template - $THIS_DIR/scripts/install_ubuntu_template.sh $PRESEED_URL - - # create a new VM from the given template with eth0 attached to the given - # network - $THIS_DIR/scripts/install-os-vpx.sh \ - -t "$UBUNTU_INST_TEMPLATE_NAME" \ - -n "$UBUNTU_INST_BRIDGE_OR_NET_NAME" \ - -l "$GUEST_NAME" - - set_vm_memory "$GUEST_NAME" "1024" - - xe vm-start vm="$GUEST_NAME" - - # wait for install to finish - wait_for_VM_to_halt - - # set VM to restart after a reboot - vm_uuid=$(xe_min vm-list name-label="$GUEST_NAME") - xe vm-param-set actions-after-reboot=Restart uuid="$vm_uuid" - - # Make template from VM - snuuid=$(xe vm-snapshot vm="$GUEST_NAME" new-name-label="$SNAME_TEMPLATE") - xe snapshot-clone uuid=$snuuid new-name-label="$TNAME" -else - # - # Template already installed, create VM from template - # - vm_uuid=$(xe vm-install template="$TNAME" new-name-label="$GUEST_NAME") -fi - -if [ -n "${EXIT_AFTER_JEOS_INSTALLATION:-}" ]; then - echo "User requested to quit after JEOS installation" - exit 0 -fi - -# -# Prepare VM for DevStack -# -xe vm-param-set other-config:os-vpx=true uuid="$vm_uuid" - -# Install XenServer tools, and other such things -$THIS_DIR/prepare_guest_template.sh "$GUEST_NAME" - -# Set virtual machine parameters -set_vm_memory "$GUEST_NAME" "$OSDOMU_MEM_MB" - -# Max out VCPU count for better performance -max_vcpus "$GUEST_NAME" - -# Wipe out all network cards -destroy_all_vifs_of "$GUEST_NAME" - -# Add only one interface to prepare the guest template -add_interface "$GUEST_NAME" "$MGT_BRIDGE_OR_NET_NAME" "0" - -# start the VM to run the prepare steps -xe vm-start vm="$GUEST_NAME" - -# Wait for prep script to finish and shutdown system -wait_for_VM_to_halt - -## Setup network cards -# Wipe out all -destroy_all_vifs_of "$GUEST_NAME" -# Tenant network -add_interface "$GUEST_NAME" "$VM_BRIDGE_OR_NET_NAME" "$VM_DEV_NR" -# Management network -add_interface "$GUEST_NAME" "$MGT_BRIDGE_OR_NET_NAME" "$MGT_DEV_NR" -# Public network -add_interface "$GUEST_NAME" "$PUB_BRIDGE_OR_NET_NAME" "$PUB_DEV_NR" - -# -# Inject DevStack inside VM disk -# -$THIS_DIR/build_xva.sh "$GUEST_NAME" - -FLAT_NETWORK_BRIDGE="${FLAT_NETWORK_BRIDGE:-$(bridge_for "$VM_BRIDGE_OR_NET_NAME")}" -append_kernel_cmdline "$GUEST_NAME" "flat_network_bridge=${FLAT_NETWORK_BRIDGE}" - -# Add a separate xvdb, if it was requested -if [[ "0" != "$XEN_XVDB_SIZE_GB" ]]; then - vm=$(xe vm-list name-label="$GUEST_NAME" --minimal) - - # Add a new disk - localsr=$(get_local_sr) - extra_vdi=$(xe vdi-create \ - name-label=xvdb-added-by-devstack \ - virtual-size="${XEN_XVDB_SIZE_GB}GiB" \ - sr-uuid=$localsr type=user) - xe vbd-create vm-uuid=$vm vdi-uuid=$extra_vdi device=1 -fi - -# create a snapshot before the first boot -# to allow a quick re-run with the same settings -xe vm-snapshot vm="$GUEST_NAME" new-name-label="$SNAME_FIRST_BOOT" - -# -# Run DevStack VM -# -xe vm-start vm="$GUEST_NAME" - -function ssh_no_check { - ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "$@" -} - -# Get hold of the Management IP of OpenStack VM -OS_VM_MANAGEMENT_ADDRESS=$MGT_IP -if [ $OS_VM_MANAGEMENT_ADDRESS == "dhcp" ]; then - OS_VM_MANAGEMENT_ADDRESS=$(find_ip_by_name $GUEST_NAME $MGT_DEV_NR) -fi - -# Get hold of the Service IP of OpenStack VM -if [ $HOST_IP_IFACE == "eth${MGT_DEV_NR}" ]; then - OS_VM_SERVICES_ADDRESS=$MGT_IP - if [ $MGT_IP == "dhcp" ]; then - OS_VM_SERVICES_ADDRESS=$(find_ip_by_name $GUEST_NAME $MGT_DEV_NR) - fi -else - OS_VM_SERVICES_ADDRESS=$PUB_IP - if [ $PUB_IP == "dhcp" ]; then - OS_VM_SERVICES_ADDRESS=$(find_ip_by_name $GUEST_NAME $PUB_DEV_NR) - fi -fi - -# Create an ssh-keypair, and set it up for dom0 user -rm -f /root/dom0key /root/dom0key.pub -ssh-keygen -f /root/dom0key -P "" -C "dom0" -DOMID=$(get_domid "$GUEST_NAME") - -xenstore-write /local/domain/$DOMID/authorized_keys/$DOMZERO_USER "$(cat /root/dom0key.pub)" -xenstore-chmod -u /local/domain/$DOMID/authorized_keys/$DOMZERO_USER r$DOMID - -function run_on_appliance { - ssh \ - -i /root/dom0key \ - -o UserKnownHostsFile=/dev/null \ - -o StrictHostKeyChecking=no \ - -o BatchMode=yes \ - "$DOMZERO_USER@$OS_VM_MANAGEMENT_ADDRESS" "$@" -} - -# Wait until we can log in to the appliance -while ! run_on_appliance true; do - sleep 1 -done - -# Remove authenticated_keys updater cronjob -echo "" | run_on_appliance crontab - - -# Generate a passwordless ssh key for domzero user -echo "ssh-keygen -f /home/$DOMZERO_USER/.ssh/id_rsa -C $DOMZERO_USER@appliance -N \"\" -q" | run_on_appliance - -# Authenticate that user to dom0 -run_on_appliance cat /home/$DOMZERO_USER/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys - -# If we have copied our ssh credentials, use ssh to monitor while the installation runs -WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1} -COPYENV=${COPYENV:-1} -if [ "$WAIT_TILL_LAUNCH" = "1" ] && [ -e ~/.ssh/id_rsa.pub ] && [ "$COPYENV" = "1" ]; then - set +x - - echo "VM Launched - Waiting for run.sh" - while ! ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "test -e /opt/stack/run_sh.pid"; do - sleep 10 - done - echo -n "devstack service is running, waiting for stack.sh to start logging..." - - pid=`ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "cat /opt/stack/run_sh.pid"` - if [ -n "$SCREEN_LOGDIR" ]; then - while ! ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "test -e ${SCREEN_LOGDIR}/stack.log"; do - sleep 10 - done - - ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "tail --pid $pid -n +1 -f ${SCREEN_LOGDIR}/stack.log" - else - echo -n "SCREEN_LOGDIR not set; just waiting for process $pid to finish" - ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "wait $pid" - fi - - set -x - # Fail if devstack did not succeed - ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS 'test -e /opt/stack/runsh.succeeded' - - set +x - echo "################################################################################" - echo "" - echo "All Finished!" - echo "You can visit the OpenStack Dashboard" - echo "at http://$OS_VM_SERVICES_ADDRESS, and contact other services at the usual ports." -else - set +x - echo "################################################################################" - echo "" - echo "All Finished!" - echo "Now, you can monitor the progress of the stack.sh installation by " - echo "looking at the console of your domU / checking the log files." - echo "" - echo "ssh into your domU now: 'ssh stack@$OS_VM_MANAGEMENT_ADDRESS' using your password" - echo "and then do: 'sudo systemctl status devstack' to check if devstack is still running." - echo "Check that /opt/stack/runsh.succeeded exists" - echo "" - echo "When devstack completes, you can visit the OpenStack Dashboard" - echo "at http://$OS_VM_SERVICES_ADDRESS, and contact other services at the usual ports." -fi diff --git a/tools/xen/mocks b/tools/xen/mocks deleted file mode 100644 index 3b9b05c747..0000000000 --- a/tools/xen/mocks +++ /dev/null @@ -1,92 +0,0 @@ -#!/bin/bash - -test ! -e "$LIST_OF_ACTIONS" && { - echo "Mocking is not set up properly." - echo "LIST_OF_ACTIONS should point to an existing file." - exit 1 -} - -test ! -e "$LIST_OF_DIRECTORIES" && { - echo "Mocking is not set up properly." - echo "LIST_OF_DIRECTORIES should point to an existing file." - exit 1 -} - -test ! -e "$XE_RESPONSE" && { - echo "Mocking is not set up properly." - echo "XE_RESPONSE should point to an existing file." - exit 1 -} - -test ! -e "$XE_CALLS" && { - echo "Mocking is not set up properly." - echo "XE_CALLS should point to an existing file." - exit 1 -} - -function mktemp { - if test "${1:-}" = "-d"; - then - echo "tempdir" - else - echo "tempfile" - fi -} - -function wget { - if [[ $@ =~ "failurl" ]]; then - return 1 - fi - echo "wget $@" >> $LIST_OF_ACTIONS -} - -function mkdir { - if test "${1:-}" = "-p"; - then - echo "$2" >> $LIST_OF_DIRECTORIES - fi -} - -function unzip { - echo "Random rubbish from unzip" - echo "unzip $@" >> $LIST_OF_ACTIONS -} - -function rm { - echo "rm $@" >> $LIST_OF_ACTIONS -} - -function ln { - echo "ln $@" >> $LIST_OF_ACTIONS -} - -function [ { - if test "${1:-}" = "-d"; - then - echo "[ $@" >> $LIST_OF_ACTIONS - for directory in $(cat $LIST_OF_DIRECTORIES) - do - if test "$directory" = "$2" - then - return 0 - fi - done - return 1 - fi - echo "Mock test does not implement the requested function: ${1:-}" - exit 1 -} - -function die_with_error { - echo "$1" >> $DEAD_MESSAGES -} - -function xe { - cat $XE_RESPONSE - { - for i in $(seq "$#") - do - eval "echo \"\$$i\"" - done - } >> $XE_CALLS -} diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh deleted file mode 100755 index 6de1afc199..0000000000 --- a/tools/xen/prepare_guest.sh +++ /dev/null @@ -1,123 +0,0 @@ -#!/bin/bash - -# This script is run on an Ubuntu VM. -# This script is inserted into the VM by prepare_guest_template.sh -# and is run when that VM boots. -# It customizes a fresh Ubuntu install, so it is ready -# to run stack.sh -# -# This includes installing the XenServer tools, -# creating the user called "stack", -# and shuts down the VM to signal the script has completed - -set -o errexit -set -o nounset -set -o xtrace - -# Configurable nuggets -GUEST_PASSWORD="$1" -STACK_USER="$2" -DOMZERO_USER="$3" - - -function setup_domzero_user { - local username - - username="$1" - - local key_updater_script - local sudoers_file - key_updater_script="/home/$username/update_authorized_keys.sh" - sudoers_file="/etc/sudoers.d/allow_$username" - - # Create user - adduser --disabled-password --quiet "$username" --gecos "$username" - - # Give passwordless sudo - cat > $sudoers_file << EOF - $username ALL = NOPASSWD: ALL -EOF - chmod 0440 $sudoers_file - - # A script to populate this user's authenticated_keys from xenstore - cat > $key_updater_script << EOF -#!/bin/bash -set -eux - -DOMID=\$(sudo xenstore-read domid) -sudo xenstore-exists /local/domain/\$DOMID/authorized_keys/$username -sudo xenstore-read /local/domain/\$DOMID/authorized_keys/$username > /home/$username/xenstore_value -cat /home/$username/xenstore_value > /home/$username/.ssh/authorized_keys -EOF - - # Give the key updater to the user - chown $username:$username $key_updater_script - chmod 0700 $key_updater_script - - # Setup the .ssh folder - mkdir -p /home/$username/.ssh - chown $username:$username /home/$username/.ssh - chmod 0700 /home/$username/.ssh - touch /home/$username/.ssh/authorized_keys - chown $username:$username /home/$username/.ssh/authorized_keys - chmod 0600 /home/$username/.ssh/authorized_keys - - # Setup the key updater as a cron job - crontab -u $username - << EOF -* * * * * $key_updater_script -EOF - -} - -# Make a small cracklib dictionary, so that passwd still works, but we don't -# have the big dictionary. -mkdir -p /usr/share/cracklib -echo a | cracklib-packer - -# Make /etc/shadow, and set the root password -pwconv -echo "root:$GUEST_PASSWORD" | chpasswd - -# Put the VPX into UTC. -rm -f /etc/localtime - -# Add stack user -groupadd libvirtd -useradd $STACK_USER -s /bin/bash -d /opt/stack -G libvirtd -echo $STACK_USER:$GUEST_PASSWORD | chpasswd -echo "$STACK_USER ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers - -setup_domzero_user "$DOMZERO_USER" - -# Add an udev rule, so that new block devices could be written by stack user -cat > /etc/udev/rules.d/50-openstack-blockdev.rules << EOF -KERNEL=="xvd[b-z]", GROUP="$STACK_USER", MODE="0660" -EOF - -# Give ownership of /opt/stack to stack user -chown -R $STACK_USER /opt/stack - -function setup_vimrc { - if [ ! -e $1 ]; then - # Simple but usable vimrc - cat > $1 <$STAGING_DIR/etc/rc.local -#!/bin/sh -e -bash /opt/stack/prepare_guest.sh \\ - "$GUEST_PASSWORD" "$STACK_USER" "$DOMZERO_USER" \\ - > /opt/stack/prepare_guest.log 2>&1 -EOF - -# Update ubuntu repositories -cat > $STAGING_DIR/etc/apt/sources.list << EOF -deb http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE} main restricted -deb-src http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE} main restricted -deb http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE}-updates main restricted -deb-src http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE}-updates main restricted -deb http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE} universe -deb-src http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE} universe -deb http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE}-updates universe -deb-src http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE}-updates universe -deb http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE} multiverse -deb-src http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE} multiverse -deb http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE}-updates multiverse -deb-src http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE}-updates multiverse -deb http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE}-backports main restricted universe multiverse -deb-src http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE}-backports main restricted universe multiverse - -deb http://security.ubuntu.com/ubuntu ${UBUNTU_INST_RELEASE}-security main restricted -deb-src http://security.ubuntu.com/ubuntu ${UBUNTU_INST_RELEASE}-security main restricted -deb http://security.ubuntu.com/ubuntu ${UBUNTU_INST_RELEASE}-security universe -deb-src http://security.ubuntu.com/ubuntu ${UBUNTU_INST_RELEASE}-security universe -deb http://security.ubuntu.com/ubuntu ${UBUNTU_INST_RELEASE}-security multiverse -deb-src http://security.ubuntu.com/ubuntu ${UBUNTU_INST_RELEASE}-security multiverse -EOF - -rm -f $STAGING_DIR/etc/apt/apt.conf -if [ -n "$UBUNTU_INST_HTTP_PROXY" ]; then - cat > $STAGING_DIR/etc/apt/apt.conf << EOF -Acquire::http::Proxy "$UBUNTU_INST_HTTP_PROXY"; -EOF -fi diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh deleted file mode 100755 index 66f7ef4763..0000000000 --- a/tools/xen/scripts/install-os-vpx.sh +++ /dev/null @@ -1,135 +0,0 @@ -#!/bin/bash -# -# Copyright (c) 2011 Citrix Systems, Inc. -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -set -eux - -BRIDGE= -NAME_LABEL= -TEMPLATE_NAME= - -usage() -{ -cat << EOF - - Usage: $0 -t TEMPLATE_NW_INSTALL -l NAME_LABEL [-n BRIDGE] - - Install a VM from a template - - OPTIONS: - - -h Shows this message. - -t template VM template to use - -l name Specifies the name label for the VM. - -n bridge The bridge/network to use for eth0. Defaults to xenbr0 -EOF -} - -get_params() -{ - while getopts "hbn:r:l:t:" OPTION; do - case $OPTION in - h) usage - exit 1 - ;; - n) - BRIDGE=$OPTARG - ;; - l) - NAME_LABEL=$OPTARG - ;; - t) - TEMPLATE_NAME=$OPTARG - ;; - ?) - usage - exit - ;; - esac - done - if [[ -z $BRIDGE ]]; then - BRIDGE=xenbr0 - fi - - if [[ -z $TEMPLATE_NAME ]]; then - echo "Please specify a template name" >&2 - exit 1 - fi - - if [[ -z $NAME_LABEL ]]; then - echo "Please specify a name-label for the new VM" >&2 - exit 1 - fi -} - - -xe_min() -{ - local cmd="$1" - shift - xe "$cmd" --minimal "$@" -} - - -find_network() -{ - result=$(xe_min network-list bridge="$1") - if [ "$result" = "" ]; then - result=$(xe_min network-list name-label="$1") - fi - echo "$result" -} - - -create_vif() -{ - local v="$1" - echo "Installing VM interface on [$BRIDGE]" - local out_network_uuid - out_network_uuid=$(find_network "$BRIDGE") - xe vif-create vm-uuid="$v" network-uuid="$out_network_uuid" device="0" -} - - - -# Make the VM auto-start on server boot. -set_auto_start() -{ - local v="$1" - xe vm-param-set uuid="$v" other-config:auto_poweron=true -} - - -destroy_vifs() -{ - local v="$1" - IFS=, - for vif in $(xe_min vif-list vm-uuid="$v"); do - xe vif-destroy uuid="$vif" - done - unset IFS -} - - -get_params "$@" - -vm_uuid=$(xe_min vm-install template="$TEMPLATE_NAME" new-name-label="$NAME_LABEL") -destroy_vifs "$vm_uuid" -set_auto_start "$vm_uuid" -create_vif "$vm_uuid" -xe vm-param-set actions-after-reboot=Destroy uuid="$vm_uuid" diff --git a/tools/xen/scripts/install_ubuntu_template.sh b/tools/xen/scripts/install_ubuntu_template.sh deleted file mode 100755 index 6ea364255e..0000000000 --- a/tools/xen/scripts/install_ubuntu_template.sh +++ /dev/null @@ -1,84 +0,0 @@ -#!/bin/bash -# -# This creates an Ubuntu Server 32bit or 64bit template -# on Xenserver 5.6.x, 6.0.x and 6.1.x -# The template does a net install only -# -# Based on a script by: David Markey -# - -set -o errexit -set -o nounset -set -o xtrace - -# This directory -BASE_DIR=$(cd $(dirname "$0") && pwd) - -# For default setings see xenrc -source $BASE_DIR/../xenrc - -# Get the params -preseed_url=$1 - -# Delete template or skip template creation as required -previous_template=$(xe template-list name-label="$UBUNTU_INST_TEMPLATE_NAME" \ - params=uuid --minimal) -if [ -n "$previous_template" ]; then - if $CLEAN_TEMPLATES; then - xe template-param-clear param-name=other-config uuid=$previous_template - xe template-uninstall template-uuid=$previous_template force=true - else - echo "Template $UBUNTU_INST_TEMPLATE_NAME already present" - exit 0 - fi -fi - -# Get built-in template -builtin_name="Debian Squeeze 6.0 (32-bit)" -builtin_uuid=$(xe template-list name-label="$builtin_name" --minimal) -if [[ -z $builtin_uuid ]]; then - echo "Can't find the Debian Squeeze 32bit template on your XenServer." - exit 1 -fi - -# Clone built-in template to create new template -new_uuid=$(xe vm-clone uuid=$builtin_uuid \ - new-name-label="$UBUNTU_INST_TEMPLATE_NAME") -disk_size=$(($OSDOMU_VDI_GB * 1024 * 1024 * 1024)) - -# Some of these settings can be found in example preseed files -# however these need to be answered before the netinstall -# is ready to fetch the preseed file, and as such must be here -# to get a fully automated install -pvargs="quiet console=hvc0 partman/default_filesystem=ext3 \ -console-setup/ask_detect=false locale=${UBUNTU_INST_LOCALE} \ -keyboard-configuration/layoutcode=${UBUNTU_INST_KEYBOARD} \ -netcfg/choose_interface=eth0 \ -netcfg/get_hostname=os netcfg/get_domain=os auto \ -url=${preseed_url}" - -if [ "$UBUNTU_INST_IP" != "dhcp" ]; then - netcfgargs="netcfg/disable_autoconfig=true \ -netcfg/get_nameservers=${UBUNTU_INST_NAMESERVERS} \ -netcfg/get_ipaddress=${UBUNTU_INST_IP} \ -netcfg/get_netmask=${UBUNTU_INST_NETMASK} \ -netcfg/get_gateway=${UBUNTU_INST_GATEWAY} \ -netcfg/confirm_static=true" - pvargs="${pvargs} ${netcfgargs}" -fi - -xe template-param-set uuid=$new_uuid \ - other-config:install-methods=http \ - other-config:install-repository="http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY}" \ - PV-args="$pvargs" \ - other-config:debian-release="$UBUNTU_INST_RELEASE" \ - other-config:default_template=true \ - other-config:disks='' \ - other-config:install-arch="$UBUNTU_INST_ARCH" - -if ! [ -z "$UBUNTU_INST_HTTP_PROXY" ]; then - xe template-param-set uuid=$new_uuid \ - other-config:install-proxy="$UBUNTU_INST_HTTP_PROXY" -fi - -echo "Ubuntu template installed uuid:$new_uuid" diff --git a/tools/xen/scripts/manage-vdi b/tools/xen/scripts/manage-vdi deleted file mode 100755 index 909ce328b0..0000000000 --- a/tools/xen/scripts/manage-vdi +++ /dev/null @@ -1,96 +0,0 @@ -#!/bin/bash - -set -eux - -action="$1" -vm="$2" -device="${3-0}" -part="${4-}" - -function xe_min() { - local cmd="$1" - shift - xe "$cmd" --minimal "$@" -} - -function run_udev_settle() { - which_udev=$(which udevsettle) || true - if [ -n "$which_udev" ]; then - udevsettle - else - udevadm settle - fi -} - -vm_uuid=$(xe_min vm-list name-label="$vm") -vdi_uuid=$(xe_min vbd-list params=vdi-uuid vm-uuid="$vm_uuid" \ - userdevice="$device") - -dom0_uuid=$(xe_min vm-list is-control-domain=true) - -function get_mount_device() { - vbd_uuid=$1 - - dev=$(xe_min vbd-list params=device uuid="$vbd_uuid") - if [[ "$dev" =~ "sm/" || "$dev" =~ "blktap-2/" ]]; then - DEBIAN_FRONTEND=noninteractive \ - apt-get --option "Dpkg::Options::=--force-confold" --assume-yes \ - install kpartx &> /dev/null || true - mapping=$(kpartx -av "/dev/$dev" | sed -ne 's,^add map \([a-z0-9\-]*\).*$,\1,p' | sed -ne "s,^\(.*${part}\)\$,\1,p") - if [ -z "$mapping" ]; then - echo "Failed to find mapping" - exit -1 - fi - - local device="/dev/mapper/${mapping}" - for (( i = 0; i < 5; i++ )) ; do - if [ -b $device ] ; then - echo $device - return - fi - sleep 1 - done - echo "ERROR: timed out waiting for dev-mapper" - exit 1 - else - echo "/dev/$dev$part" - fi -} - -function clean_dev_mappings() { - dev=$(xe_min vbd-list params=device uuid="$vbd_uuid") - if [[ "$dev" =~ "sm/" || "$dev" =~ "blktap-2/" ]]; then - kpartx -dv "/dev/$dev" - fi -} - -function open_vdi() { - vbd_uuid=$(xe vbd-create vm-uuid="$dom0_uuid" vdi-uuid="$vdi_uuid" \ - device=autodetect) - mp=$(mktemp -d) - xe vbd-plug uuid="$vbd_uuid" - - run_udev_settle - - mount_device=$(get_mount_device "$vbd_uuid") - mount "$mount_device" "$mp" - echo "Your vdi is mounted at $mp" -} - -function close_vdi() { - vbd_uuid=$(xe_min vbd-list vm-uuid="$dom0_uuid" vdi-uuid="$vdi_uuid") - mount_device=$(get_mount_device "$vbd_uuid") - run_udev_settle - umount "$mount_device" - - clean_dev_mappings - - xe vbd-unplug uuid=$vbd_uuid - xe vbd-destroy uuid=$vbd_uuid -} - -if [ "$action" == "open" ]; then - open_vdi -elif [ "$action" == "close" ]; then - close_vdi -fi diff --git a/tools/xen/scripts/on_exit.sh b/tools/xen/scripts/on_exit.sh deleted file mode 100755 index 2846dc42d0..0000000000 --- a/tools/xen/scripts/on_exit.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -set -e -set -o xtrace - -if [ -z "${on_exit_hooks:-}" ]; then - on_exit_hooks=() -fi - -on_exit() -{ - for i in $(seq $((${#on_exit_hooks[*]} - 1)) -1 0); do - eval "${on_exit_hooks[$i]}" - done -} - -add_on_exit() -{ - local n=${#on_exit_hooks[*]} - on_exit_hooks[$n]="$*" - if [[ $n -eq 0 ]]; then - trap on_exit EXIT - fi -} diff --git a/tools/xen/scripts/uninstall-os-vpx.sh b/tools/xen/scripts/uninstall-os-vpx.sh deleted file mode 100755 index 96dad7e852..0000000000 --- a/tools/xen/scripts/uninstall-os-vpx.sh +++ /dev/null @@ -1,88 +0,0 @@ -#!/bin/bash -# -# Copyright (c) 2011 Citrix Systems, Inc. -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -set -ex - -# By default, don't remove the templates -REMOVE_TEMPLATES=${REMOVE_TEMPLATES:-"false"} -if [ "$1" = "--remove-templates" ]; then - REMOVE_TEMPLATES=true -fi - -xe_min() -{ - local cmd="$1" - shift - xe "$cmd" --minimal "$@" -} - -destroy_vdi() -{ - local vbd_uuid="$1" - local type - type=$(xe_min vbd-list uuid=$vbd_uuid params=type) - local dev - dev=$(xe_min vbd-list uuid=$vbd_uuid params=userdevice) - local vdi_uuid - vdi_uuid=$(xe_min vbd-list uuid=$vbd_uuid params=vdi-uuid) - - if [ "$type" == 'Disk' ] && [ "$dev" != 'xvda' ] && [ "$dev" != '0' ]; then - xe vdi-destroy uuid=$vdi_uuid - fi -} - -uninstall() -{ - local vm_uuid="$1" - local power_state - power_state=$(xe_min vm-list uuid=$vm_uuid params=power-state) - - if [ "$power_state" != "halted" ]; then - xe vm-shutdown vm=$vm_uuid force=true - fi - - for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g'); do - destroy_vdi "$v" - done - - xe vm-uninstall vm=$vm_uuid force=true >/dev/null -} - -uninstall_template() -{ - local vm_uuid="$1" - - for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g'); do - destroy_vdi "$v" - done - - xe template-uninstall template-uuid=$vm_uuid force=true >/dev/null -} - -# remove the VMs and their disks -for u in $(xe_min vm-list other-config:os-vpx=true | sed -e 's/,/ /g'); do - uninstall "$u" -done - -# remove the templates -if [ "$REMOVE_TEMPLATES" == "true" ]; then - for u in $(xe_min template-list other-config:os-vpx=true | sed -e 's/,/ /g'); do - uninstall_template "$u" - done -fi diff --git a/tools/xen/test_functions.sh b/tools/xen/test_functions.sh deleted file mode 100755 index 324e6a1a1e..0000000000 --- a/tools/xen/test_functions.sh +++ /dev/null @@ -1,205 +0,0 @@ -#!/bin/bash - -# Tests for functions. -# -# The tests are sourcing the mocks file to mock out various functions. The -# mocking-out always happens in a sub-shell, thus it does not have impact on -# the functions defined here. - -# To run the tests, please run: -# -# ./test_functions.sh run_tests -# -# To only print out the discovered test functions, run: -# -# ./test_functions.sh - -. functions - -# Setup -function before_each_test { - LIST_OF_DIRECTORIES=$(mktemp) - truncate -s 0 $LIST_OF_DIRECTORIES - - LIST_OF_ACTIONS=$(mktemp) - truncate -s 0 $LIST_OF_ACTIONS - - XE_RESPONSE=$(mktemp) - truncate -s 0 $XE_RESPONSE - - XE_CALLS=$(mktemp) - truncate -s 0 $XE_CALLS - - DEAD_MESSAGES=$(mktemp) - truncate -s 0 $DEAD_MESSAGES -} - -# Teardown -function after_each_test { - rm -f $LIST_OF_DIRECTORIES - rm -f $LIST_OF_ACTIONS - rm -f $XE_RESPONSE - rm -f $XE_CALLS -} - -# Helpers -function setup_xe_response { - echo "$1" > $XE_RESPONSE -} - -function given_directory_exists { - echo "$1" >> $LIST_OF_DIRECTORIES -} - -function assert_directory_exists { - grep "$1" $LIST_OF_DIRECTORIES -} - -function assert_previous_command_failed { - [ "$?" != "0" ] || exit 1 -} - -function assert_xe_min { - grep -qe "^--minimal\$" $XE_CALLS -} - -function assert_xe_param { - grep -qe "^$1\$" $XE_CALLS -} - -function assert_died_with { - diff -u <(echo "$1") $DEAD_MESSAGES -} - -function mock_out { - local FNNAME="$1" - local OUTPUT="$2" - - . <(cat << EOF -function $FNNAME { - echo "$OUTPUT" -} -EOF -) -} - -function assert_symlink { - grep -qe "^ln -s $2 $1\$" $LIST_OF_ACTIONS -} - -# Tests -function test_plugin_directory_on_xenserver { - given_directory_exists "/etc/xapi.d/plugins/" - - PLUGDIR=$(. mocks && xapi_plugin_location) - - [ "/etc/xapi.d/plugins/" = "$PLUGDIR" ] -} - -function test_plugin_directory_on_xcp { - given_directory_exists "/usr/lib/xcp/plugins/" - - PLUGDIR=$(. mocks && xapi_plugin_location) - - [ "/usr/lib/xcp/plugins/" = "$PLUGDIR" ] -} - -function test_no_plugin_directory_found { - set +e - - local IGNORE - IGNORE=$(. mocks && xapi_plugin_location) - - assert_previous_command_failed - - grep "[ -d /etc/xapi.d/plugins/ ]" $LIST_OF_ACTIONS - grep "[ -d /usr/lib/xcp/plugins/ ]" $LIST_OF_ACTIONS -} - -function test_create_directory_for_kernels { - ( - . mocks - mock_out get_local_sr_path /var/run/sr-mount/uuid1 - create_directory_for_kernels - ) - - assert_directory_exists "/var/run/sr-mount/uuid1/os-guest-kernels" - assert_symlink "/boot/guest" "/var/run/sr-mount/uuid1/os-guest-kernels" -} - -function test_create_directory_for_kernels_existing_dir { - ( - . mocks - given_directory_exists "/boot/guest" - create_directory_for_kernels - ) - - diff -u $LIST_OF_ACTIONS - << EOF -[ -d /boot/guest ] -EOF -} - -function test_create_directory_for_images { - ( - . mocks - mock_out get_local_sr_path /var/run/sr-mount/uuid1 - create_directory_for_images - ) - - assert_directory_exists "/var/run/sr-mount/uuid1/os-images" - assert_symlink "/images" "/var/run/sr-mount/uuid1/os-images" -} - -function test_create_directory_for_images_existing_dir { - ( - . mocks - given_directory_exists "/images" - create_directory_for_images - ) - - diff -u $LIST_OF_ACTIONS - << EOF -[ -d /images ] -EOF -} - -function test_get_local_sr { - setup_xe_response "uuid123" - - local RESULT - RESULT=$(. mocks && get_local_sr) - - [ "$RESULT" == "uuid123" ] - - assert_xe_param "pool-list" params=default-SR minimal=true -} - -function test_get_local_sr_path { - local RESULT - RESULT=$(mock_out get_local_sr "uuid1" && get_local_sr_path) - - [ "/var/run/sr-mount/uuid1" == "$RESULT" ] -} - -# Test runner -[ "$1" = "" ] && { - grep -e "^function *test_" $0 | cut -d" " -f2 -} - -[ "$1" = "run_tests" ] && { - for testname in $($0); do - echo "$testname" - before_each_test - ( - set -eux - $testname - ) - if [ "$?" != "0" ]; then - echo "FAIL" - exit 1 - else - echo "PASS" - fi - - after_each_test - done -} diff --git a/tools/xen/xenrc b/tools/xen/xenrc deleted file mode 100644 index 169e0427a8..0000000000 --- a/tools/xen/xenrc +++ /dev/null @@ -1,114 +0,0 @@ -#!/bin/bash - -# -# XenServer specific defaults for the /tools/xen/ scripts -# Similar to stackrc, you can override these in your localrc -# - -# Name of this guest -GUEST_NAME=${GUEST_NAME:-DevStackOSDomU} - -# Template cleanup -CLEAN_TEMPLATES=${CLEAN_TEMPLATES:-false} - -# Size of image -VDI_MB=${VDI_MB:-5000} - -# Devstack now contains many components. 4GB ram is not enough to prevent -# swapping and memory fragmentation - the latter of which can cause failures -# such as blkfront failing to plug a VBD and lead to random test fails. -# -# Set to 6GB so an 8GB XenServer VM can have a 1GB Dom0 and leave 1GB for VMs -OSDOMU_MEM_MB=6144 -OSDOMU_VDI_GB=8 - -# Network mapping. Specify bridge names or network names. Network names may -# differ across localised versions of XenServer. If a given bridge/network -# was not found, a new network will be created with the specified name. - -# Get the management network from the XS installation -VM_BRIDGE_OR_NET_NAME="OpenStack VM Network" -PUB_BRIDGE_OR_NET_NAME="OpenStack Public Network" - -# VM Password -GUEST_PASSWORD=${GUEST_PASSWORD:-secret} - -# Extracted variables for OpenStack VM network device numbers. -# Make sure they form a continuous sequence starting from 0 -MGT_DEV_NR=0 -VM_DEV_NR=1 -PUB_DEV_NR=2 - -# Host Interface, i.e. the interface on the nova vm you want to expose the -# services on. Usually the device connected to the management network or the -# one connected to the public network is used. -HOST_IP_IFACE=${HOST_IP_IFACE:-"eth${MGT_DEV_NR}"} - -# -# Our nova host's network info -# - -# Management network -MGT_IP=${MGT_IP:-dhcp} -MGT_NETMASK=${MGT_NETMASK:-ignored} - -# VM Network -VM_IP=${VM_IP:-10.255.255.255} -VM_NETMASK=${VM_NETMASK:-255.255.255.0} - -# Public network -# Aligned with stack.sh - see FLOATING_RANGE -PUB_IP=${PUB_IP:-172.24.4.10} -PUB_NETMASK=${PUB_NETMASK:-255.255.255.0} - -# Ubuntu install settings -UBUNTU_INST_RELEASE="xenial" -UBUNTU_INST_TEMPLATE_NAME="Ubuntu 16.04 (64-bit) for DevStack" -# For 12.04 use "precise" and update template name -# However, for 12.04, you should be using -# XenServer 6.1 and later or XCP 1.6 or later -# 11.10 is only really supported with XenServer 6.0.2 and later -UBUNTU_INST_ARCH="amd64" -UBUNTU_INST_HTTP_HOSTNAME="archive.ubuntu.com" -UBUNTU_INST_HTTP_DIRECTORY="/ubuntu" -UBUNTU_INST_HTTP_PROXY="" -UBUNTU_INST_LOCALE="en_US" -UBUNTU_INST_KEYBOARD="us" -# network configuration for ubuntu netinstall -UBUNTU_INST_IP="dhcp" -UBUNTU_INST_NAMESERVERS="" -UBUNTU_INST_NETMASK="" -UBUNTU_INST_GATEWAY="" - -# Create a separate xvdb. Tis could be used as a backing device for cinder -# volumes. Specify -# XEN_XVDB_SIZE_GB=10 -# VOLUME_BACKING_DEVICE=/dev/xvdb -# in your localrc to avoid kernel lockups: -# https://bugs.launchpad.net/cinder/+bug/1023755 -# -# Set the size to 0 to avoid creation of additional disk. -XEN_XVDB_SIZE_GB=0 - -STACK_USER=stack -DOMZERO_USER=domzero - -RC_DIR="../.." - -restore_nounset=$(set +o | grep nounset) -set +u - -## Note that the lines below are coming from stackrc to support -## new-style config files -source $RC_DIR/functions-common - -# allow local overrides of env variables, including repo config -if [[ -f $RC_DIR/localrc ]]; then - # Old-style user-supplied config - source $RC_DIR/localrc -elif [[ -f $RC_DIR/.localrc.auto ]]; then - # New-style user-supplied config extracted from local.conf - source $RC_DIR/.localrc.auto -fi - -$restore_nounset From 730ce45466a903c639554b2e70f6e40a02e9e29d Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Fri, 23 Feb 2018 13:56:48 +0000 Subject: [PATCH 0825/1936] Create NOVA_READY_TIMEOUT in is_nova_ready function The function was introduced in [0] using a hardcoded timeout of 60 seconds which turns out to be too small on slow machines. Create a new global variable NOVA_READY_TIMEOUT instead so that users can override the timeout if necessary. [0] I32eb59b9d6c225a3e93992be3a3b9f4b251d7189 Co-Authored-By: Mohammed Naser Change-Id: I0cd7f193589a1a0776ae76dc30cecefe7ba9e5db --- lib/nova | 2 +- stackrc | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 8691c8eb33..d89d64e72f 100644 --- a/lib/nova +++ b/lib/nova @@ -1000,7 +1000,7 @@ function is_nova_ready { # because of the dom0/domU split. Just ignore for now. return fi - wait_for_compute 60 + wait_for_compute $NOVA_READY_TIMEOUT } function start_nova { diff --git a/stackrc b/stackrc index b7105d368e..aebf152bad 100644 --- a/stackrc +++ b/stackrc @@ -813,6 +813,9 @@ fi # Service startup timeout SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} +# Timeout for compute node registration in Nova +NOVA_READY_TIMEOUT=${NOVA_READY_TIMEOUT:-$SERVICE_TIMEOUT} + # Service graceful shutdown timeout SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT=${SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT:-5} From 7880904d15c1dbbea5c26ece1a30eb0850bf10ab Mon Sep 17 00:00:00 2001 From: Jacky Hu Date: Mon, 26 Feb 2018 18:36:59 +0800 Subject: [PATCH 0826/1936] Run local script after nova cellsv2 is configured If user try to create a server in local script before nova cells is configured, it will run into the following error: Host 'x' is not mapped to any cell. Change-Id: I4fe76865fd6e16d5beb5ed9e5d6a9f3542e990a5 --- stack.sh | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/stack.sh b/stack.sh index 1d1f12e116..1803169e2d 100755 --- a/stack.sh +++ b/stack.sh @@ -1385,15 +1385,6 @@ run_phase stack extra merge_config_group $TOP_DIR/local.conf post-extra -# Run local script -# ---------------- - -# Run ``local.sh`` if it exists to perform user-managed tasks -if [[ -x $TOP_DIR/local.sh ]]; then - echo "Running user script $TOP_DIR/local.sh" - $TOP_DIR/local.sh -fi - # Sanity checks # ============= @@ -1428,6 +1419,15 @@ if is_service_enabled n-api; then fi fi +# Run local script +# ---------------- + +# Run ``local.sh`` if it exists to perform user-managed tasks +if [[ -x $TOP_DIR/local.sh ]]; then + echo "Running user script $TOP_DIR/local.sh" + $TOP_DIR/local.sh +fi + # Bash completion # =============== From 7b1d5b64ac53e4fea9c2bdb0a6e44fc90fbac067 Mon Sep 17 00:00:00 2001 From: Andrea Frittoli Date: Mon, 26 Feb 2018 22:23:02 +0000 Subject: [PATCH 0827/1936] Define devstack-base abstract job Define an abstract job devstack base that does not require any project apart from devstack. This job defines basic devstack_localrc settings that are common to any devstack job (mostly to work with infra) and devstack_services to emit "disable_all_services" so to cancel any devstack default. The variables are defined as global ones as well as host-vars for the controller and group-vars for peer nodes, so that any descendent job may extend them, thanks for Zuul dict merging. Change-Id: I2cdb723f6ee209683044fecec59ff7b510a2752b --- .zuul.yaml | 96 +++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 74 insertions(+), 22 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index cc29466f35..0ae1187527 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -26,43 +26,43 @@ - name: subnode nodes: - compute1 + - name: switch + nodes: + - controller + - name: peers + nodes: + - compute1 - job: - name: devstack + name: devstack-base parent: multinode - description: Base devstack job - nodeset: openstack-single-node + abstract: true + description: | + Base abstract Devstack job. + + Defines plays and base variables, but it does not include any project + and it does not run any service by default. This is a common base for + all single Devstack jobs, single or multinode. + Variables are defined in job.vars, which is what is then used by single + node jobs and by multi node jobs for the controller, as well as in + job.group-vars.peers, which is what is used by multi node jobs for peer + nodes (everything but the controller). required-projects: - openstack-dev/devstack - - openstack/cinder - - openstack/glance - - openstack/keystone - - openstack/neutron - - openstack/nova - - openstack/requirements - - openstack/swift roles: - zuul: openstack-infra/devstack-gate - zuul: openstack-infra/openstack-zuul-jobs - timeout: 7200 vars: - test_matrix_configs: [neutron, tlsproxy] devstack_localrc: DATABASE_PASSWORD: secretdatabase RABBIT_PASSWORD: secretrabbit ADMIN_PASSWORD: secretadmin SERVICE_PASSWORD: secretservice NETWORK_GATEWAY: 10.1.0.1 - Q_USE_DEBUG_COMMAND: true FIXED_RANGE: 10.1.0.0/20 IPV4_ADDRS_SAFE_TO_USE: 10.1.0.0/20 FLOATING_RANGE: 172.24.5.0/24 PUBLIC_NETWORK_GATEWAY: 172.24.5.1 - FLOATING_HOST_PREFIX: 172.24.4 - FLOATING_HOST_MASK: 23 - SWIFT_REPLICAS: 1 - SWIFT_START_ALL_SERVICES: false - SWIFT_HASH: 1234123412341234 LOGFILE: /opt/stack/logs/devstacklog.txt LOG_COLOR: false VERBOSE: true @@ -76,8 +76,7 @@ # from the location below for all the CI jobs. ETCD_DOWNLOAD_URL: http://tarballs.openstack.org/etcd/ devstack_services: - horizon: false - tempest: false + base: false zuul_copy_output: '{{ devstack_conf_dir }}/local.conf': 'logs' '{{ devstack_conf_dir }}/localrc': 'logs' @@ -117,7 +116,28 @@ log: True localrc: True stackenv: True - summary: True + group-vars: + peers: + devstack_localrc: + DATABASE_PASSWORD: secretdatabase + RABBIT_PASSWORD: secretrabbit + ADMIN_PASSWORD: secretadmin + SERVICE_PASSWORD: secretservice + NETWORK_GATEWAY: 10.1.0.1 + FIXED_RANGE: 10.1.0.0/20 + IPV4_ADDRS_SAFE_TO_USE: 10.1.0.0/20 + FLOATING_RANGE: 172.24.5.0/24 + PUBLIC_NETWORK_GATEWAY: 172.24.5.1 + LOGFILE: /opt/stack/logs/devstacklog.txt + LOG_COLOR: false + VERBOSE: true + VERBOSE_NO_TIMESTAMP: true + NOVNC_FROM_PACKAGE: true + ERROR_ON_CLONE: true + LIBVIRT_TYPE: qemu + ETCD_DOWNLOAD_URL: http://tarballs.openstack.org/etcd/ + devstack_services: + base: false pre-run: playbooks/pre.yaml run: playbooks/devstack.yaml post-run: playbooks/post.yaml @@ -130,9 +150,41 @@ # Translations - ^.*/locale/.*po$ +- job: + name: devstack + parent: devstack-base + description: | + Single node devstack job for integration gate. + nodeset: openstack-single-node + required-projects: + - openstack/cinder + - openstack/glance + - openstack/keystone + - openstack/neutron + - openstack/nova + - openstack/requirements + - openstack/swift + timeout: 7200 + vars: + test_matrix_configs: [neutron, tlsproxy] + devstack_localrc: + # Common OpenStack services settings + SWIFT_REPLICAS: 1 + SWIFT_START_ALL_SERVICES: false + SWIFT_HASH: 1234123412341234 + CINDER_PERIODIC_INTERVAL: 10 + DEBUG_LIBVIRT_COREDUMPS: True + NOVA_VNC_ENABLED: true + VNCSERVER_LISTEN: 0.0.0.0 + VNCSERVER_PROXYCLIENT_ADDRESS: "{{ hostvars[inventory_hostname]['nodepool']['private_ipv4'] }}" + devstack_services: + base: true + horizon: false + tempest: false + - job: name: devstack-multinode - parent: devstack + parent: devstack-base description: Base devstack multinode job nodeset: openstack-two-node # NOTE(andreaf) The multinode job is useful to see the setup of different From b59c93cc44bc9ff0e041ff24805880294117f745 Mon Sep 17 00:00:00 2001 From: Pavlo Shchelokovskyy Date: Mon, 26 Feb 2018 16:36:54 +0000 Subject: [PATCH 0828/1936] Actually generate pip freeze outputs the shell script used is actually being run thru 'sh', not bash, which does not understand "[[" test operators. Explicitly run this script with /bin/bash instead. Change-Id: I551d2631bcb6aef49550d69b3830ffcb509abfb7 --- roles/capture-system-logs/tasks/main.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/capture-system-logs/tasks/main.yaml b/roles/capture-system-logs/tasks/main.yaml index cd8f4f0eab..de4f8eda08 100644 --- a/roles/capture-system-logs/tasks/main.yaml +++ b/roles/capture-system-logs/tasks/main.yaml @@ -1,6 +1,7 @@ # TODO(andreaf) Make this into proper Ansible - name: Stage various logs and reports shell: + executable: /bin/bash cmd: | sudo iptables-save > {{ stage_dir }}/iptables.txt df -h > {{ stage_dir }}/df.txt From ca61966f47af9070a7c6ed6fef3f2d630c43c919 Mon Sep 17 00:00:00 2001 From: Thomas Bechtold Date: Wed, 28 Feb 2018 14:46:05 +0100 Subject: [PATCH 0829/1936] neutron: Do no longer set "url" in nova.conf Since[1], "url" in the [neutron] section in nova.conf should no longer be set. [1] https://github.com/openstack/nova/commit/6cde77ebbab85bc8ccd2ab7ad977b1d4af4a13fa Depends-On: https://review.openstack.org/548572 Related-Bug: #1752289 Change-Id: Ied6c155da9d51a25ba7a524e69d018d39ed3442c --- lib/neutron | 1 - lib/neutron-legacy | 1 - 2 files changed, 2 deletions(-) diff --git a/lib/neutron b/lib/neutron index c5839f5c3e..72c5872ccd 100644 --- a/lib/neutron +++ b/lib/neutron @@ -306,7 +306,6 @@ function configure_neutron_nova_new { iniset $NOVA_CONF neutron project_domain_name "Default" iniset $NOVA_CONF neutron auth_strategy $NEUTRON_AUTH_STRATEGY iniset $NOVA_CONF neutron region_name "$REGION_NAME" - iniset $NOVA_CONF neutron url $NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST:$NEUTRON_SERVICE_PORT iniset $NOVA_CONF DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver diff --git a/lib/neutron-legacy b/lib/neutron-legacy index bb76c5f9ce..5f6cee73fb 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -376,7 +376,6 @@ function create_nova_conf_neutron { iniset $NOVA_CONF neutron project_domain_name "$SERVICE_DOMAIN_NAME" iniset $NOVA_CONF neutron auth_strategy "$Q_AUTH_STRATEGY" iniset $NOVA_CONF neutron region_name "$REGION_NAME" - iniset $NOVA_CONF neutron url "${Q_PROTOCOL}://$Q_HOST:$Q_PORT" if [[ "$Q_USE_SECGROUP" == "True" ]]; then LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver From f63ddd609f8ba0c6000d7d07074dc64232830351 Mon Sep 17 00:00:00 2001 From: Lenny Verkhovsky Date: Sun, 25 Feb 2018 14:48:05 +0000 Subject: [PATCH 0830/1936] Replace depricated iscsi_helper with target_helper Cinder change I5231f8fe3399deb9c57e6efb121d0d008dc9c7f4 replaces iscsi_helper with more general one. Change-Id: I49fe0365b170e5a5b0449d80003bcf970e4c191d --- lib/cinder | 4 ++-- lib/cinder_backends/fake_gate | 2 +- lib/cinder_backends/lvm | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/cinder b/lib/cinder index 75486adda5..a1d68712b6 100644 --- a/lib/cinder +++ b/lib/cinder @@ -101,7 +101,7 @@ CINDER_PERIODIC_INTERVAL=${CINDER_PERIODIC_INTERVAL:-60} if is_fedora; then CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-lioadm} if [[ ${CINDER_ISCSI_HELPER} != "lioadm" ]]; then - die "lioadm is the only valid Cinder iscsi_helper config on this platform" + die "lioadm is the only valid Cinder target_helper config on this platform" fi else CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-tgtadm} @@ -230,7 +230,7 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT auth_strategy keystone iniset $CINDER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $CINDER_CONF DEFAULT iscsi_helper "$CINDER_ISCSI_HELPER" + iniset $CINDER_CONF DEFAULT target_helper "$CINDER_ISCSI_HELPER" iniset $CINDER_CONF database connection `database_connection_url cinder` iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI iniset $CINDER_CONF DEFAULT rootwrap_config "$CINDER_CONF_DIR/rootwrap.conf" diff --git a/lib/cinder_backends/fake_gate b/lib/cinder_backends/fake_gate index 6b1f848790..3ffd9a6785 100644 --- a/lib/cinder_backends/fake_gate +++ b/lib/cinder_backends/fake_gate @@ -50,7 +50,7 @@ function configure_cinder_backend_lvm { iniset $CINDER_CONF $be_name volume_backend_name $be_name iniset $CINDER_CONF $be_name volume_driver "cinder.tests.fake_driver.FakeGateDriver" iniset $CINDER_CONF $be_name volume_group $VOLUME_GROUP_NAME-$be_name - iniset $CINDER_CONF $be_name iscsi_helper "$CINDER_ISCSI_HELPER" + iniset $CINDER_CONF $be_name target_helper "$CINDER_ISCSI_HELPER" iniset $CINDER_CONF $be_name lvm_type "$CINDER_LVM_TYPE" if [[ "$CINDER_VOLUME_CLEAR" == "non" ]]; then diff --git a/lib/cinder_backends/lvm b/lib/cinder_backends/lvm index 03e188029f..497081c9e4 100644 --- a/lib/cinder_backends/lvm +++ b/lib/cinder_backends/lvm @@ -50,7 +50,7 @@ function configure_cinder_backend_lvm { iniset $CINDER_CONF $be_name volume_backend_name $be_name iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.lvm.LVMVolumeDriver" iniset $CINDER_CONF $be_name volume_group $VOLUME_GROUP_NAME-$be_name - iniset $CINDER_CONF $be_name iscsi_helper "$CINDER_ISCSI_HELPER" + iniset $CINDER_CONF $be_name target_helper "$CINDER_ISCSI_HELPER" iniset $CINDER_CONF $be_name lvm_type "$CINDER_LVM_TYPE" } From 3360ffb5f27cef3f22da944e8a1314678a4bf970 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Fri, 2 Mar 2018 21:08:34 +0100 Subject: [PATCH 0831/1936] devstack master is rocky now See #snowpenstack! Change-Id: I2df485c2acf1bec0d0f7ddc6538bff2c2ec141e3 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index aebf152bad..4f627ccbd7 100644 --- a/stackrc +++ b/stackrc @@ -258,7 +258,7 @@ REQUIREMENTS_DIR=$DEST/requirements # Setting the variable to 'ALL' will activate the download for all # libraries. -DEVSTACK_SERIES="queens" +DEVSTACK_SERIES="rocky" ############## # From 6bab8321b44db260f9e001d8276a660d1ff56ed4 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Fri, 2 Mar 2018 21:13:12 +0100 Subject: [PATCH 0832/1936] Remove references to hardcoded file writing in /tmp /tmp is a world writeable directory, so using hardcoded filenames in there is just a bad coding style (susceptible to symlink attacks). Avoid using it to not give a bad precedent. Change-Id: Ia66763a0e4714f2226e98dbd85600b2035bd5088 --- functions | 4 +--- stack.sh | 6 ++---- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/functions b/functions index 3fcc38dc4b..24994c0470 100644 --- a/functions +++ b/functions @@ -810,13 +810,11 @@ function get_random_port { # # Write out various useful state information to /etc/devstack-version function write_devstack_version { - cat - > /tmp/devstack-version </dev/null DevStack Version: ${DEVSTACK_SERIES} Change: $(git log --format="%H %s %ci" -1) OS Version: ${os_VENDOR} ${os_RELEASE} ${os_CODENAME} EOF - sudo install -m 644 /tmp/devstack-version /etc/devstack-version - rm /tmp/devstack-version } # Restore xtrace diff --git a/stack.sh b/stack.sh index 1d1f12e116..c02b663646 100755 --- a/stack.sh +++ b/stack.sh @@ -962,17 +962,15 @@ fi if [[ $SYSLOG != "False" ]]; then if [[ "$SYSLOG_HOST" = "$HOST_IP" ]]; then # Configure the master host to receive - cat </tmp/90-stack-m.conf + cat </dev/null \$ModLoad imrelp \$InputRELPServerRun $SYSLOG_PORT EOF - sudo mv /tmp/90-stack-m.conf /etc/rsyslog.d else # Set rsyslog to send to remote host - cat </tmp/90-stack-s.conf + cat </dev/null *.* :omrelp:$SYSLOG_HOST:$SYSLOG_PORT EOF - sudo mv /tmp/90-stack-s.conf /etc/rsyslog.d fi RSYSLOGCONF="/etc/rsyslog.conf" From d9c1275c5df55e822a7df6880a9a1430ab4f24a0 Mon Sep 17 00:00:00 2001 From: "Andrea Frittoli (andreaf)" Date: Wed, 21 Feb 2018 14:35:58 +0000 Subject: [PATCH 0833/1936] Define devstack services without the test-matrix Emit a disable_all_services and define which service we run in the job directly. This drops the dependency from the test matrix, from devstack default list of services and it makes it easier for jobs to add/remove services based on the list in the base job. Change-Id: Ib1debefd541b933dbfc54d484c263cc0ed60423d --- .zuul.yaml | 79 +++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 76 insertions(+), 3 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 0ae1187527..2deb769d30 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -76,6 +76,8 @@ # from the location below for all the CI jobs. ETCD_DOWNLOAD_URL: http://tarballs.openstack.org/etcd/ devstack_services: + # Ignore base set of services setup by test-matrix. + # Ignore any default set by devstack. Emit a "disable_all_services". base: false zuul_copy_output: '{{ devstack_conf_dir }}/local.conf': 'logs' @@ -154,7 +156,7 @@ name: devstack parent: devstack-base description: | - Single node devstack job for integration gate. + Single or multi node devstack job for integration gate. nodeset: openstack-single-node required-projects: - openstack/cinder @@ -166,7 +168,6 @@ - openstack/swift timeout: 7200 vars: - test_matrix_configs: [neutron, tlsproxy] devstack_localrc: # Common OpenStack services settings SWIFT_REPLICAS: 1 @@ -178,9 +179,81 @@ VNCSERVER_LISTEN: 0.0.0.0 VNCSERVER_PROXYCLIENT_ADDRESS: "{{ hostvars[inventory_hostname]['nodepool']['private_ipv4'] }}" devstack_services: - base: true + # Core services enabled for this branch. + # This list replaces the test-matrix. + # Shared services + dstat: true + etcd3: true + mysql: true + peakmem_tracker: true + rabbit: true + tls-proxy: true + # Keystone services + key: true + # Glance services + g-api: true + g-reg: true + # Nova services + n-api: true + n-api-meta: true + n-cauth: true + n-cond: true + n-cpu: true + n-novnc: true + n-obj: true + n-sch: true + placement-api: true + # Neutron services + neutron-api: true + neutron-agent: true + neutron-dhcp: true + neutron-l3: true + neutron-metadata-agent: true + neutron-metering: true + # Swift services + s-account: true + s-container: true + s-object: true + s-proxy: true + # Cinder services + c-api: true + c-bak: true + c-sch: true + c-vol: true + cinder: true + # Services we don't need. + # This section is not really needed, it's for readability. horizon: false tempest: false + # Test matrix emits ceilometer but ceilomenter is not installed in the + # integrated gate, so specifying the services has not effect. + # ceilometer-*: false + group-vars: + subnode: + devstack_services: + # Core services enabled for this branch. + # This list replaces the test-matrix. + # Shared services + dstat: true + peakmem_tracker: true + tls-proxy: true + # Nova services + n-cpu: true + placement-client: true + # Neutron services + neutron-agent: true + # Cinder services + c-bak: true + c-vol: true + # Services we don't run at all on subnode. + # This section is not really needed, it's for readability. + # keystone: false + # s-*: false + horizon: false + tempest: false + # Test matrix emits ceilometer but ceilomenter is not installed in the + # integrated gate, so specifying the services has not effect. + # ceilometer-*: false - job: name: devstack-multinode From acca80414ff3db373138ed5e7124ced7306c28df Mon Sep 17 00:00:00 2001 From: Andrea Frittoli Date: Sat, 3 Mar 2018 22:16:50 +0000 Subject: [PATCH 0834/1936] Ensure that stack home is owned by stack The role that sets up the user and its home folder must ensure that the home folder is owned by stack as well. Change-Id: I2e72d7b9d68a2a14f8a148ef82cbb3f569bd1cea --- roles/setup-stack-user/tasks/main.yaml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/roles/setup-stack-user/tasks/main.yaml b/roles/setup-stack-user/tasks/main.yaml index 8384515ebe..0fc7c2d78b 100644 --- a/roles/setup-stack-user/tasks/main.yaml +++ b/roles/setup-stack-user/tasks/main.yaml @@ -21,10 +21,12 @@ group: stack become: yes -- name: Set stack user home directory permissions +- name: Set stack user home directory permissions and ownership file: path: '{{ devstack_stack_home_dir }}' mode: 0755 + owner: stack + group: stack become: yes - name: Copy 50_stack_sh file to /etc/sudoers.d @@ -36,7 +38,7 @@ group: root become: yes -- name: Create new/.cache folder within BASE +- name: Create .cache folder within BASE file: path: '{{ devstack_stack_home_dir }}/.cache' state: directory From 2a2349be5f82455c462b294dfada76820d4d2aa4 Mon Sep 17 00:00:00 2001 From: "Andrea Frittoli (andreaf)" Date: Wed, 14 Feb 2018 19:24:04 +0000 Subject: [PATCH 0835/1936] Stage .localrc.auto Change-Id: I9beb713a0b037e0229b13fd13d7d4de156ed0b59 Depends-on: https://review.openstack.org/544606 --- .zuul.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index dc3a42939e..2176a4c2a4 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -110,6 +110,7 @@ zuul_copy_output: '{{ devstack_conf_dir }}/local.conf': 'logs' '{{ devstack_conf_dir }}/localrc': 'logs' + '{{ devstack_conf_dir }}/.localrc.auto': 'logs' '{{ devstack_conf_dir }}/.stackenv': 'logs' '{{ devstack_log_dir }}/dstat-csv.log': 'logs' '{{ devstack_log_dir }}/devstacklog.txt': 'logs' @@ -146,6 +147,7 @@ log: True localrc: True stackenv: True + auto: True group-vars: peers: devstack_localrc: From 1610cc22199f84395c87f4d37d079e8398934378 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 6 Mar 2018 11:40:46 +1100 Subject: [PATCH 0836/1936] Add a note on experimental jobs With I4161e1f1c8d47070dd35fad38b00715438d94eb2 and I37fe007dc6f387d43cbaf55771027718005ac40d we have removed most of the legacy experimental jobs. Add a short note so we don't go back to the Hotel California model of "you can check in but never check out". With zuulv3 it is possible to have self-testing changes for testing indiviual jobs. Experimental should be for things with wider application to run against all changes. Change-Id: Ibfb902b17a8f7d5355689f2a584c061c001df0d8 --- .zuul.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index dc3a42939e..2a2648e4ec 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -323,6 +323,12 @@ jobs: - devstack - devstack-unit-tests + # Please add a note on each job and conditions for the job not + # being experimental any more, so we can keep this list somewhat + # pruned. + # + # * nova-next: maintained by nova for unreleased/undefaulted + # things like cellsv2 and placement-api experimental: jobs: - nova-next From 36c3aaa07e638af0b581efa28828e04146ae1364 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Mon, 5 Mar 2018 10:03:27 -0500 Subject: [PATCH 0837/1936] Add nova-cells-v1 to experimental queue jobs The legacy-tempest-dsvm-cells job is being moved into the nova repo and renamed to nova-cells-v1. This change adds the new job name to the in-tree definition of the experimental queue jobs that run on devstack changes. A project-config change will depend on this patch to undefine the legacy job name from being used on devstack changes. Depends-On: https://review.openstack.org/549780 Change-Id: I22fa1411809c46ffc423e0dd1cde0d8f40362635 --- .zuul.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index 25bd757ab3..49bf08b091 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -329,8 +329,15 @@ # being experimental any more, so we can keep this list somewhat # pruned. # + # * nova-cells-v1: maintained by nova for cells v1 (nova-cells service); + # nova gates on this job, it's in experimental for testing cells v1 + # changes to devstack w/o gating on it for all devstack changes. # * nova-next: maintained by nova for unreleased/undefaulted # things like cellsv2 and placement-api experimental: jobs: + - nova-cells-v1: + irrelevant-files: + - ^.*\.rst$ + - ^doc/.*$ - nova-next From 21221d1ad1462cdcaed4d052c3324ae384b407d4 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Tue, 6 Mar 2018 10:08:36 -0500 Subject: [PATCH 0838/1936] Configure nova to auto-calculate compute RPC upgrade levels The openstack-ansible team found a regression in Queens when setting the compute RPC upgrade_levels to 'auto' on a fresh install before any computes had started up. The dependent change fixes the issue in nova but for future proofing against this sort of issue again, we can set the compute RPC upgrade levels in devstack to 'auto' for fresh installs as well. Note that grenade already sets 'auto' for compute upgrade levels, which is why we didn't catch this in grenade testing with the compute RPC 5.0 version bump that caused the issue. Depends-On: https://review.openstack.org/549737/ Change-Id: I07f34dbc09b6108ba8f5b2a83a28c75eb42be495 Related-Bug: #1753443 --- lib/nova | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/nova b/lib/nova index 580f87f277..3b50950bb0 100644 --- a/lib/nova +++ b/lib/nova @@ -506,6 +506,8 @@ function create_nova_conf { # Format logging setup_logging $NOVA_CONF + iniset $NOVA_CONF upgrade_levels compute "auto" + write_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" "/compute" write_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" "" ":${METADATA_SERVICE_PORT}" From 5e714d33081455af4bd7a45bb9e60a905ad6f2e7 Mon Sep 17 00:00:00 2001 From: Andrea Frittoli Date: Sat, 17 Feb 2018 22:21:26 +0000 Subject: [PATCH 0839/1936] Fix multinode mode for devstack Extend the devstack job so that it can support both single and multinode cases. Multinode mode require extra settings in devstack configuration, some of which as subnode specific, some controller specific. Also keep a simple devstack-multinode job defined for now so we can run a multinode job in devstack gate, until the full tempest multinode job is ready to match the old gate-tempest-dsvm-neutron-multinode-full-ubuntu-xenial-nv. Fixing multinode also requires sharing the CA configuration between controller and peers, overlay network configuration for communication between virtual machines and running discover_hosts for nova after the subnode has been setup. The extra orchestration required for multinode is encoded in a dedicated role to allow for jobs in other repos to re-use it. Change-Id: I2dcbd9bdb401860820e655d97aa3c4775af2827f --- .zuul.yaml | 44 +++++++++++++---- playbooks/devstack.yaml | 2 +- playbooks/pre.yaml | 30 ++++++++---- roles/orchestrate-devstack/README.rst | 24 ++++++++++ roles/orchestrate-devstack/defaults/main.yaml | 1 + roles/orchestrate-devstack/tasks/main.yaml | 38 +++++++++++++++ roles/sync-devstack-data/README.rst | 12 +++++ roles/sync-devstack-data/defaults/main.yaml | 1 + roles/sync-devstack-data/tasks/main.yaml | 48 +++++++++++++++++++ 9 files changed, 179 insertions(+), 21 deletions(-) create mode 100644 roles/orchestrate-devstack/README.rst create mode 100644 roles/orchestrate-devstack/defaults/main.yaml create mode 100644 roles/orchestrate-devstack/tasks/main.yaml create mode 100644 roles/sync-devstack-data/README.rst create mode 100644 roles/sync-devstack-data/defaults/main.yaml create mode 100644 roles/sync-devstack-data/tasks/main.yaml diff --git a/.zuul.yaml b/.zuul.yaml index 2deb769d30..2ef23c89ff 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -16,19 +16,24 @@ - name: compute1 label: ubuntu-xenial groups: + # Node where tests are executed and test results collected - name: tempest nodes: - controller + # Nodes running the compute service - name: compute nodes: - controller - compute1 + # Nodes that are not the controller - name: subnode nodes: - compute1 + # Switch node for multinode networking setup - name: switch nodes: - controller + # Peer nodes for multinode networking setup - name: peers nodes: - compute1 @@ -45,7 +50,7 @@ all single Devstack jobs, single or multinode. Variables are defined in job.vars, which is what is then used by single node jobs and by multi node jobs for the controller, as well as in - job.group-vars.peers, which is what is used by multi node jobs for peer + job.group-vars.peers, which is what is used by multi node jobs for subnode nodes (everything but the controller). required-projects: - openstack-dev/devstack @@ -76,7 +81,6 @@ # from the location below for all the CI jobs. ETCD_DOWNLOAD_URL: http://tarballs.openstack.org/etcd/ devstack_services: - # Ignore base set of services setup by test-matrix. # Ignore any default set by devstack. Emit a "disable_all_services". base: false zuul_copy_output: @@ -119,7 +123,7 @@ localrc: True stackenv: True group-vars: - peers: + subnode: devstack_localrc: DATABASE_PASSWORD: secretdatabase RABBIT_PASSWORD: secretrabbit @@ -156,7 +160,9 @@ name: devstack parent: devstack-base description: | - Single or multi node devstack job for integration gate. + Base devstack job for integration gate. + + This base job can be used for single node and multinode devstack jobs. nodeset: openstack-single-node required-projects: - openstack/cinder @@ -178,6 +184,15 @@ NOVA_VNC_ENABLED: true VNCSERVER_LISTEN: 0.0.0.0 VNCSERVER_PROXYCLIENT_ADDRESS: "{{ hostvars[inventory_hostname]['nodepool']['private_ipv4'] }}" + # Multinode specific settings + SERVICE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" + HOST_IP: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" + PUBLIC_BRIDGE_MTU: "{{ external_bridge_mtu }}" + devstack_localconf: + post-config: + $NEUTRON_CONF: + DEFAULT: + global_physnet_mtu: "{{ external_bridge_mtu }}" devstack_services: # Core services enabled for this branch. # This list replaces the test-matrix. @@ -254,16 +269,25 @@ # Test matrix emits ceilometer but ceilomenter is not installed in the # integrated gate, so specifying the services has not effect. # ceilometer-*: false + devstack_localrc: + # Multinode specific settings + HOST_IP: "{{ hostvars[inventory_hostname]['nodepool']['private_ipv4'] }}" + SERVICE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" + PUBLIC_BRIDGE_MTU: "{{ external_bridge_mtu }}" + # Subnode specific settings + DATABASE_TYPE: mysql + GLANCE_HOSTPORT: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}:9292" + Q_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" + RABBIT_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" + DATABASE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" - job: name: devstack-multinode - parent: devstack-base - description: Base devstack multinode job + parent: devstack nodeset: openstack-two-node - # NOTE(andreaf) The multinode job is useful to see the setup of different - # services on different nodes, however the subnode configuration is not - # ready yet. Until then this job should stay non-voting. - voting: false + description: | + Simple multinode test to verify multinode functionality on devstack side. + This is not meant to be used as a parent job. - job: name: devstack-tox-base diff --git a/playbooks/devstack.yaml b/playbooks/devstack.yaml index ede8382632..93d19f1c7a 100644 --- a/playbooks/devstack.yaml +++ b/playbooks/devstack.yaml @@ -1,3 +1,3 @@ - hosts: all roles: - - run-devstack + - orchestrate-devstack diff --git a/playbooks/pre.yaml b/playbooks/pre.yaml index 6681fb20a5..4689a6354f 100644 --- a/playbooks/pre.yaml +++ b/playbooks/pre.yaml @@ -1,15 +1,25 @@ -- hosts: controller - roles: - - role: test-matrix - test_matrix_role: primary - -- hosts: subnode - roles: - - role: test-matrix - test_matrix_role: subnode - - hosts: all + pre_tasks: + - name: Gather minimum local MTU + set_fact: + local_mtu: > + {% set mtus = [] -%} + {% for interface in ansible_interfaces -%} + {% set interface_variable = 'ansible_' + interface -%} + {% if interface_variable in hostvars[inventory_hostname] -%} + {% set _ = mtus.append(hostvars[inventory_hostname][interface_variable]['mtu']|int) -%} + {% endif -%} + {% endfor -%} + {{- mtus|min -}} + - name: Calculate external_bridge_mtu + # 50 bytes is overhead for vxlan (which is greater than GRE + # allowing us to use either overlay option with this MTU. + # TODO(andreaf) This should work, but it may have to be reconcilied with + # the MTU setting used by the multinode setup roles in multinode pre.yaml + set_fact: + external_bridge_mtu: "{{ local_mtu | int - 50 }}" roles: + - test-matrix - configure-swap - setup-stack-user - setup-tempest-user diff --git a/roles/orchestrate-devstack/README.rst b/roles/orchestrate-devstack/README.rst new file mode 100644 index 0000000000..7803ee4d74 --- /dev/null +++ b/roles/orchestrate-devstack/README.rst @@ -0,0 +1,24 @@ +Orchestrate a devstack + +Runs devstack in a multinode scenario, with one controller node +and a group of subnodes. + +The reason for this role is so that jobs in other repository may +run devstack in their plays with no need for re-implementing the +orchestration logic. + +The "run-devstack" role is available to run devstack with no +orchestration. + +This role sets up the controller and CA first, it then pushes CA +data to sub-nodes and run devstack there. The only requirement for +this role is for the controller inventory_hostname to be "controller" +and for all sub-nodes to be defined in a group called "subnode". + + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. diff --git a/roles/orchestrate-devstack/defaults/main.yaml b/roles/orchestrate-devstack/defaults/main.yaml new file mode 100644 index 0000000000..fea05c8146 --- /dev/null +++ b/roles/orchestrate-devstack/defaults/main.yaml @@ -0,0 +1 @@ +devstack_base_dir: /opt/stack diff --git a/roles/orchestrate-devstack/tasks/main.yaml b/roles/orchestrate-devstack/tasks/main.yaml new file mode 100644 index 0000000000..12db58c520 --- /dev/null +++ b/roles/orchestrate-devstack/tasks/main.yaml @@ -0,0 +1,38 @@ +- name: Run devstack on the controller + include_role: + name: run-devstack + when: inventory_hostname == 'controller' + +- name: Setup devstack on sub-nodes + block: + + - name: Sync CA data to subnodes (when any) + # Only do this if the tls-proxy service is defined and enabled + include_role: + name: sync-devstack-data + when: devstack_services['tls-proxy']|default(false) + + - name: Run devstack on the sub-nodes + include_role: + name: run-devstack + when: inventory_hostname in groups['subnode'] + + - name: Discover hosts + # Discovers compute nodes (subnodes) and maps them to cells. Only run + # on the controller node. + # NOTE(mriedem): We want to remove this if/when nova supports + # auto-registration of computes with cells, but that's not happening in + # Ocata. + # NOTE(andreaf) This is taken (NOTE included) from the discover_hosts + # function in devstack gate. Since this is now in devstack, which is + # branched, we know that the discover_hosts tool exists. + become: true + become_user: stack + shell: ./tools/discover_hosts.sh + args: + chdir: "{{ devstack_base_dir }}/devstack" + when: inventory_hostname == 'controller' + + when: + - '"controller" in hostvars' + - '"subnode" in groups' diff --git a/roles/sync-devstack-data/README.rst b/roles/sync-devstack-data/README.rst new file mode 100644 index 0000000000..500e8cccc4 --- /dev/null +++ b/roles/sync-devstack-data/README.rst @@ -0,0 +1,12 @@ +Sync devstack data for multinode configurations + +Sync any data files which include certificates to be used if TLS is enabled. +This role must be executed on the controller and it pushes data to all +subnodes. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. diff --git a/roles/sync-devstack-data/defaults/main.yaml b/roles/sync-devstack-data/defaults/main.yaml new file mode 100644 index 0000000000..fea05c8146 --- /dev/null +++ b/roles/sync-devstack-data/defaults/main.yaml @@ -0,0 +1 @@ +devstack_base_dir: /opt/stack diff --git a/roles/sync-devstack-data/tasks/main.yaml b/roles/sync-devstack-data/tasks/main.yaml new file mode 100644 index 0000000000..46000159d4 --- /dev/null +++ b/roles/sync-devstack-data/tasks/main.yaml @@ -0,0 +1,48 @@ +- name: Ensure the data folder exists + become: true + file: + path: "{{ devstack_base_dir }}/data" + state: directory + owner: stack + group: stack + mode: 0755 + when: 'inventory_hostname in groups["subnode"]|default([])' + +- name: Ensure the CA folder exists + become: true + file: + path: "{{ devstack_base_dir }}/data/CA" + state: directory + owner: stack + group: stack + mode: 0755 + when: 'inventory_hostname in groups["subnode"]|default([])' + +- name: Pull the CA certificate and folder + become: true + synchronize: + src: "{{ item }}" + dest: "{{ zuul.executor.work_root }}/{{ item | basename }}" + mode: pull + with_items: + - "{{ devstack_base_dir }}/data/ca-bundle.pem" + - "{{ devstack_base_dir }}/data/CA" + when: inventory_hostname == 'controller' + +- name: Push the CA certificate + become: true + become_user: stack + synchronize: + src: "{{ zuul.executor.work_root }}/ca-bundle.pem" + dest: "{{ devstack_base_dir }}/data/ca-bundle.pem" + mode: push + when: 'inventory_hostname in groups["subnode"]|default([])' + +- name: Push the CA folder + become: true + become_user: stack + synchronize: + src: "{{ zuul.executor.work_root }}/CA/" + dest: "{{ devstack_base_dir }}/data/" + mode: push + when: 'inventory_hostname in groups["subnode"]|default([])' From 067e36582b8d1589c831524e921f396b270d4f44 Mon Sep 17 00:00:00 2001 From: Andrea Frittoli Date: Fri, 23 Feb 2018 16:04:46 +0000 Subject: [PATCH 0840/1936] Document jobs Add inline documentation to jobs and render it in the jobs doc page. Adjust the roles page to match jobs for title and filename. Change-Id: I47a2b4b379c8517b0dea59a75943f3f871c29046 --- .zuul.yaml | 19 +++++++++++++++++++ doc/source/index.rst | 3 ++- doc/source/roles.rst | 4 ---- doc/source/zuul_jobs.rst | 4 ++++ doc/source/zuul_roles.rst | 4 ++++ 5 files changed, 29 insertions(+), 5 deletions(-) delete mode 100644 doc/source/roles.rst create mode 100644 doc/source/zuul_jobs.rst create mode 100644 doc/source/zuul_roles.rst diff --git a/.zuul.yaml b/.zuul.yaml index 2ef23c89ff..693cd774b1 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -163,6 +163,25 @@ Base devstack job for integration gate. This base job can be used for single node and multinode devstack jobs. + + With a single node nodeset, this job sets up an "all-in-one" (aio) + devstack with the six OpenStack services included in the devstack tree: + keystone, glance, cinder, neutron, nova and swift. + + With a two node nodeset, this job sets up an aio + compute node. + The controller can be customised using host-vars.controller, the + sub-nodes can be customised using group-vars.subnode. + + Descendent jobs can enable / disable services, add devstack configuration + options, enable devstack plugins, configure log files or directories to be + transferred to the log server. + + The job assumes that there is only one controller node. The number of + subnodes can be scaled up seamlessly by setting a custom nodeset in + job.nodeset. + + The run playbook consists of a single role, so it can be easily rewritten + and extended. nodeset: openstack-single-node required-projects: - openstack/cinder diff --git a/doc/source/index.rst b/doc/source/index.rst index 9254c23cae..4c53203d97 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -139,7 +139,8 @@ up to date to the latest devstack). Enable :doc:`devstack plugins ` to support additional services, features, and configuration not present in base devstack. -Use devstack in your CI with :doc:`Ansible roles ` for Zuul V3. +Use devstack in your CI with :doc:`Ansible roles ` and +:doc:`Jobs ` for Zuul V3. Get :doc:`the big picture ` of what we are trying to do with devstack, and help us by :doc:`contributing to the project diff --git a/doc/source/roles.rst b/doc/source/roles.rst deleted file mode 100644 index 5baa1e4e82..0000000000 --- a/doc/source/roles.rst +++ /dev/null @@ -1,4 +0,0 @@ -Roles -===== - -.. zuul:autoroles:: diff --git a/doc/source/zuul_jobs.rst b/doc/source/zuul_jobs.rst new file mode 100644 index 0000000000..cf203a8973 --- /dev/null +++ b/doc/source/zuul_jobs.rst @@ -0,0 +1,4 @@ +Zuul CI Jobs +============ + +.. zuul:autojobs:: diff --git a/doc/source/zuul_roles.rst b/doc/source/zuul_roles.rst new file mode 100644 index 0000000000..4939281057 --- /dev/null +++ b/doc/source/zuul_roles.rst @@ -0,0 +1,4 @@ +Zuul CI Roles +============= + +.. zuul:autoroles:: From f32f3f5266fe9a88955129fb01a9284465b9e7e5 Mon Sep 17 00:00:00 2001 From: Andrea Frittoli Date: Mon, 19 Feb 2018 21:45:22 +0000 Subject: [PATCH 0841/1936] Document v2 to v3 devstack job migration Document how to map DEVSTACK_GATE flags into the zuul v3 ansible world. This is just an initial structure, the idea is to document most of the flags as well as provide example in-line and links to finished jobs. Change-Id: I377ebb529bcd8f4971906563c577e8cfc48b98e6 --- doc/source/index.rst | 3 +- doc/source/zuul_ci_jobs_migration.rst | 301 ++++++++++++++++++++++++++ 2 files changed, 303 insertions(+), 1 deletion(-) create mode 100644 doc/source/zuul_ci_jobs_migration.rst diff --git a/doc/source/index.rst b/doc/source/index.rst index 4c53203d97..2ff4ff088a 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -140,7 +140,8 @@ Enable :doc:`devstack plugins ` to support additional services, features, and configuration not present in base devstack. Use devstack in your CI with :doc:`Ansible roles ` and -:doc:`Jobs ` for Zuul V3. +:doc:`Jobs ` for Zuul V3. Migrate your devstack Zuul V2 jobs to Zuul +V3 with this full migration :doc:`how-to `. Get :doc:`the big picture ` of what we are trying to do with devstack, and help us by :doc:`contributing to the project diff --git a/doc/source/zuul_ci_jobs_migration.rst b/doc/source/zuul_ci_jobs_migration.rst new file mode 100644 index 0000000000..c00f06e41a --- /dev/null +++ b/doc/source/zuul_ci_jobs_migration.rst @@ -0,0 +1,301 @@ +=============================== +Migrating Zuul V2 CI jobs to V3 +=============================== + +The OpenStack CI system moved from Zuul v2 to Zuul v3, and all CI jobs moved to +the new CI system. All jobs have been migrated automatically to a format +compatible with Zuul v3; the jobs produced in this way however are suboptimal +and do not use the capabilities introduced by Zuul v3, which allow for re-use of +job parts, in the form of Ansible roles, as well as inheritance between jobs. + +DevStack hosts a set of roles, plays and jobs that can be used by other +repositories to define their DevStack based jobs. To benefit from them, jobs +must be migrated from the legacy v2 ones into v3 native format. + +This document provides guidance and examples to make the migration process as +painless and smooth as possible. + +Where to host the job definitions. +================================== + +In Zuul V3 jobs can be defined in the repository that contains the code they +excercise. If you are writing CI jobs for an OpenStack service you can define +your DevStack based CI jobs in one of the repositories that host the code for +your service. If you have a branchless repo, like a Tempest plugin, that is +a convenient choice to host the job definitions since job changes do not have +to be backported. For example, see the beginning of the ``.zuul.yaml`` from the +sahara Tempest plugin repo: + +.. code:: yaml + + # In http://git.openstack.org/cgit/openstack/sahara-tests/tree/.zuul.yaml: + - job: + name: sahara-tests-tempest + description: | + Run Tempest tests from the Sahara plugin. + parent: devstack-tempest + +Which base job to start from +============================ + +If your job needs an OpenStack cloud deployed via DevStack, but you don't plan +on running Tempest tests, you can start from one of the base +:doc:`jobs ` defined in the DevStack repo. + +The ``devstack`` job can be used for both single-node jobs and multi-node jobs, +and it includes the list of services used in the integrated gate (keystone, +glance, nova, cinder, neutron and swift). Different topologies can be achieved +by switching the nodeset used in the child job. + +The ``devstack-base`` job is similar to ``devstack`` but it does not specify any +required repo or service to be run in DevStack. It can be useful to setup +children jobs that use a very narrow DevStack setup. + +If your job needs an OpenStack cloud deployed via DevStack, and you do plan +on running Tempest tests, you can start from one of the base jobs defined in the +Tempest repo. + +The ``devstack-tempest`` job can be used for both single-node jobs and +multi-node jobs. Different topologies can be achieved by switching the nodeset +used in the child job. + +Jobs can be customized as follows without writing any Ansible code: + +- add and/or remove DevStack services +- add or modify DevStack and services configuration +- install DevStack plugins +- extend the number of sub-nodes (multinode only) +- define extra log files and/or directories to be uploaded on logs.o.o +- define extra log file extensions to be rewritten to .txt for ease of access + +Tempest jobs can be further customized as follows: + +- define the Tempest tox environment to be used +- define the test concurrency +- define the test regular expression + +Writing Ansible code, or importing existing custom roles, jobs can be further +extended by: + +- adding pre and/or post playbooks +- overriding the run playbook, add custom roles + +The (partial) example below extends a Tempest single node base job +"devstack-tempest" in the Kuryr repository. The parent job name is defined in +job.parent. + +.. code:: yaml + + # https://git.openstack.org/cgit/openstack/kuryr-kubernetes/tree/.zuul.yaml: + - job: + name: kuryr-kubernetes-tempest-base + parent: devstack-tempest + description: Base kuryr-kubernetes-job + required-projects: + - openstack/devstack-plugin-container + - openstack/kuryr + - openstack/kuryr-kubernetes + - openstack/kuryr-tempest-plugin + - openstack/neutron-lbaas + vars: + tempest_test_regex: '^(kuryr_tempest_plugin.tests.)' + tox_envlist: 'all' + devstack_localrc: + KURYR_K8S_API_PORT: 8080 + TEMPEST_PLUGINS: '/opt/stack/kuryr-tempest-plugin' + devstack_services: + kubernetes-api: true + kubernetes-controller-manager: true + kubernetes-scheduler: true + kubelet: true + kuryr-kubernetes: true + (...) + devstack_plugins: + kuryr-kubernetes: https://git.openstack.org/openstack/kuryr + devstack-plugin-container: https://git.openstack.org/openstack/devstack-plugin-container + neutron-lbaas: https://git.openstack.org/openstack/neutron-lbaas + (...) + +Job variables +============= + +Variables can be added to the job in three different places: + +- job.vars: these are global variables available to all node in the nodeset +- job.host-vars.[HOST]: these are variables available only to the specified HOST +- job.group-vars.[GROUP]: these are variables available only to the specified + GROUP + +Zuul merges dict variables through job inheritance. Host and group variables +override variables with the same name defined as global variables. + +In the example below, for the sundaes job, hosts that are not part of the +subnode group will run vanilla and chocolate. Hosts in the subnode group will +run stracciatella and strawberry. + +.. code:: yaml + + - job: + name: ice-creams + vars: + devstack_service: + vanilla: true + chocolate: false + group-vars: + subnode: + devstack_service: + pistacchio: true + stracciatella: true + + - job: + name: sundaes + parent: ice-creams + vars: + devstack_service: + chocolate: true + group-vars: + subnode: + devstack_service: + strawberry: true + pistacchio: false + + +DevStack Gate Flags +=================== + +The old CI system worked using a combination of DevStack, Tempest and +devstack-gate to setup a test environment and run tests against it. With Zuul +V3, the logic that used to live in devstack-gate is moved into different repos, +including DevStack, Tempest and grenade. + +DevStack-gate exposes an interface for job definition based on a number of +DEVSTACK_GATE_* environment variables, or flags. This guide shows how to map +DEVSTACK_GATE flags into the new +system. + +The repo column indicates in which repository is hosted the code that replaces +the devstack-gate flag. The new implementation column explains how to reproduce +the same or a similar behaviour in Zuul v3 jobs. For localrc settings, +devstack-gate defined a default value. In ansible jobs the default is either the +value defined in the parent job, or the default from DevStack, if any. + +============================================== ============= ================== +DevStack gate flag Repo New implementation +============================================== ============= ================== +OVERRIDE_ZUUL_BRANCH zuul override-checkout: + [branch] + in the job definition. +DEVSTACK_GATE_NET_OVERLAY zuul-jobs A bridge called + br-infra is set up for + all jobs that inherit + from multinode with + a dedicated `bridge role `_. +DEVSTACK_GATE_FEATURE_MATRIX devstack-gate ``test_matrix_features`` + variable of the + test-matrix role in + devstack-gate. This + is a temporary + solution, feature + matrix will go away. + In the future services + will be defined in + jobs only. +DEVSTACK_CINDER_VOLUME_CLEAR devstack *CINDER_VOLUME_CLEAR: true/false* + in devstack_localrc + in the job vars. +DEVSTACK_GATE_NEUTRON devstack True by default. To + disable, disable all + neutron services in + devstack_services in + the job definition. +DEVSTACK_GATE_CONFIGDRIVE devstack *FORCE_CONFIG_DRIVE: true/false* + in devstack_localrc + in the job vars. +DEVSTACK_GATE_INSTALL_TESTONLY devstack *INSTALL_TESTONLY_PACKAGES: true/false* + in devstack_localrc + in the job vars. +DEVSTACK_GATE_VIRT_DRIVER devstack *VIRT_DRIVER: [virt driver]* + in devstack_localrc + in the job vars. +DEVSTACK_GATE_LIBVIRT_TYPE devstack *LIBVIRT_TYPE: [libvirt type]* + in devstack_localrc + in the job vars. +DEVSTACK_GATE_TEMPEST devstack Defined by the job + tempest that is used. The + ``devstack`` job only + runs devstack. + The ``devstack-tempest`` + one triggers a Tempest + run as well. +DEVSTACK_GATE_TEMPEST_FULL tempest *tox_envlist: full* + in the job vars. +DEVSTACK_GATE_TEMPEST_ALL tempest *tox_envlist: all* + in the job vars. +DEVSTACK_GATE_TEMPEST_ALL_PLUGINS tempest *tox_envlist: all-plugin* + in the job vars. +DEVSTACK_GATE_TEMPEST_SCENARIOS tempest *tox_envlist: scenario* + in the job vars. +TEMPEST_CONCURRENCY tempest *tempest_concurrency: [value]* + in the job vars. This + is available only on + jobs that inherit from + ``devstack-tempest`` + down. +DEVSTACK_GATE_TEMPEST_NOTESTS tempest *tox_envlist: venv-tempest* + in the job vars. This + will create Tempest + virtual environment + but run no tests. +DEVSTACK_GATE_SMOKE_SERIAL tempest *tox_envlist: smoke-serial* + in the job vars. +DEVSTACK_GATE_TEMPEST_DISABLE_TENANT_ISOLATION tempest *tox_envlist: full-serial* + in the job vars. + *TEMPEST_ALLOW_TENANT_ISOLATION: false* + in devstack_localrc in + the job vars. +============================================== ============= ================== + +The following flags have not been migrated yet or are legacy and won't be +migrated at all. + +===================================== ====== ========================== +DevStack gate flag Status Details +===================================== ====== ========================== +DEVSTACK_GATE_TOPOLOGY WIP The topology depends on the base + job that is used and more + specifically on the nodeset + attached to it. The new job + format allows project to define + the variables to be passed to + every node/node-group that exists + in the topology. Named topologies + that include the nodeset and the + matching variables can be defined + in the form of base jobs. +DEVSTACK_GATE_GRENADE TBD Grenade Zuul V3 jobs will be + hosted in the grenade repo. +GRENADE_BASE_BRANCH TBD Grenade Zuul V3 jobs will be + hosted in the grenade repo. +DEVSTACK_GATE_NEUTRON_DVR TBD Depends on multinode support. +DEVSTACK_GATE_EXERCISES TBD Can be done on request. +DEVSTACK_GATE_IRONIC TBD This will probably be implemented + on ironic side. +DEVSTACK_GATE_IRONIC_DRIVER TBD This will probably be implemented + on ironic side. +DEVSTACK_GATE_IRONIC_BUILD_RAMDISK TBD This will probably be implemented + on ironic side. +DEVSTACK_GATE_POSTGRES Legacy This flag exists in d-g but the + only thing that it does is + capture postgres logs. This is + already supported by the roles in + post, so the flag is useless in + the new jobs. postgres itself can + be enabled via the + devstack_service job variable. +DEVSTACK_GATE_ZEROMQ Legacy This has no effect in d-g. +DEVSTACK_GATE_MQ_DRIVER Legacy This has no effect in d-g. +DEVSTACK_GATE_TEMPEST_STRESS_ARGS Legacy Stress is not in Tempest anymore. +DEVSTACK_GATE_TEMPEST_HEAT_SLOW Legacy This is not used anywhere. +DEVSTACK_GATE_CELLS Legacy This has no effect in d-g. +DEVSTACK_GATE_NOVA_API_METADATA_SPLIT Legacy This has no effect in d-g. +===================================== ====== ========================== From dc7b4294632172d0b743f98448942fe260a8a3ff Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Tue, 19 Sep 2017 10:52:32 +0000 Subject: [PATCH 0842/1936] Fix running with SERVICE_IP_VERSION=6 - There are some locations where we need the raw IPv6 address instead of the url-quoted version enclosed in brackets. - Make nova-api-metadata service listen on IPv6 when we need that. - Use SERVICE_HOST instead of HOST_IP for TLS_IP. Change-Id: Id074be38ee95754e88b7219de7d9beb06f796fad Partial-Bug: 1656329 --- functions-common | 5 +++++ lib/cinder | 2 +- lib/databases/mysql | 2 +- lib/glance | 6 +++--- lib/neutron-legacy | 4 ++-- lib/nova | 4 ++-- lib/swift | 2 +- lib/tls | 12 ++++-------- stackrc | 4 ++-- 9 files changed, 21 insertions(+), 20 deletions(-) diff --git a/functions-common b/functions-common index df295a3395..5f933d67cf 100644 --- a/functions-common +++ b/functions-common @@ -2049,6 +2049,11 @@ function is_ipv4_address { fi } +# Remove "[]" around urlquoted IPv6 addresses +function ipv6_unquote { + echo $1 | tr -d [] +} + # Gracefully cp only if source file/dir exists # cp_it source destination function cp_it { diff --git a/lib/cinder b/lib/cinder index c0356fe09c..4d6085fe48 100644 --- a/lib/cinder +++ b/lib/cinder @@ -67,7 +67,7 @@ CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST} CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776} CINDER_SERVICE_PORT_INT=${CINDER_SERVICE_PORT_INT:-18776} CINDER_SERVICE_PROTOCOL=${CINDER_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} -CINDER_SERVICE_LISTEN_ADDRESS=${CINDER_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS} +CINDER_SERVICE_LISTEN_ADDRESS=${CINDER_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)} # What type of LVM device should Cinder use for LVM backend # Defaults to auto, which will do thin provisioning if it's a fresh diff --git a/lib/databases/mysql b/lib/databases/mysql index 0089663285..cf61056389 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -93,7 +93,7 @@ function configure_database_mysql { # Change bind-address from localhost (127.0.0.1) to any (::) and # set default db type to InnoDB - iniset -sudo $my_conf mysqld bind-address "$SERVICE_LISTEN_ADDRESS" + iniset -sudo $my_conf mysqld bind-address "$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)" iniset -sudo $my_conf mysqld sql_mode TRADITIONAL iniset -sudo $my_conf mysqld default-storage-engine InnoDB iniset -sudo $my_conf mysqld max_connections 1024 diff --git a/lib/glance b/lib/glance index 95d2450da7..4a3e25e9a2 100644 --- a/lib/glance +++ b/lib/glance @@ -65,7 +65,7 @@ fi # Glance connection info. Note the port must be specified. GLANCE_SERVICE_HOST=${GLANCE_SERVICE_HOST:-$SERVICE_HOST} -GLANCE_SERVICE_LISTEN_ADDRESS=${GLANCE_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS} +GLANCE_SERVICE_LISTEN_ADDRESS=${GLANCE_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)} GLANCE_SERVICE_PORT=${GLANCE_SERVICE_PORT:-9292} GLANCE_SERVICE_PORT_INT=${GLANCE_SERVICE_PORT_INT:-19292} GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT} @@ -152,7 +152,7 @@ function configure_glance { # Store specific configs iniset $GLANCE_API_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/ - iniset $GLANCE_API_CONF DEFAULT registry_host $GLANCE_SERVICE_HOST + iniset $GLANCE_API_CONF DEFAULT registry_host $(ipv6_unquote $GLANCE_SERVICE_HOST) # CORS feature support - to allow calls from Horizon by default if [ -n "$GLANCE_CORS_ALLOWED_ORIGIN" ]; then @@ -228,7 +228,7 @@ function configure_glance { iniset $GLANCE_CACHE_CONF DEFAULT admin_user glance iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_password iniset $GLANCE_CACHE_CONF DEFAULT admin_password $SERVICE_PASSWORD - iniset $GLANCE_CACHE_CONF DEFAULT registry_host $GLANCE_SERVICE_HOST + iniset $GLANCE_CACHE_CONF DEFAULT registry_host $(ipv6_unquote $GLANCE_SERVICE_HOST) # Store specific confs iniset $GLANCE_CACHE_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/ diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 9701ee7632..3c6ec68b67 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -111,7 +111,7 @@ Q_HOST=${Q_HOST:-$SERVICE_HOST} # Default protocol Q_PROTOCOL=${Q_PROTOCOL:-$SERVICE_PROTOCOL} # Default listen address -Q_LISTEN_ADDRESS=${Q_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS} +Q_LISTEN_ADDRESS=${Q_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)} # Default admin username Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-neutron} # Default auth strategy @@ -121,7 +121,7 @@ Q_OVS_USE_VETH=${Q_OVS_USE_VETH:-False} Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True} Q_USE_ROOTWRAP_DAEMON=$(trueorfalse True Q_USE_ROOTWRAP_DAEMON) # Meta data IP -Q_META_DATA_IP=${Q_META_DATA_IP:-$SERVICE_HOST} +Q_META_DATA_IP=${Q_META_DATA_IP:-$(ipv6_unquote $SERVICE_HOST)} # Allow Overlapping IP among subnets Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True} Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True} diff --git a/lib/nova b/lib/nova index 580f87f277..da07579b74 100644 --- a/lib/nova +++ b/lib/nova @@ -92,7 +92,7 @@ NOVA_SERVICE_PORT=${NOVA_SERVICE_PORT:-8774} NOVA_SERVICE_PORT_INT=${NOVA_SERVICE_PORT_INT:-18774} NOVA_SERVICE_PROTOCOL=${NOVA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} NOVA_SERVICE_LOCAL_HOST=${NOVA_SERVICE_LOCAL_HOST:-$SERVICE_LOCAL_HOST} -NOVA_SERVICE_LISTEN_ADDRESS=${NOVA_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS} +NOVA_SERVICE_LISTEN_ADDRESS=${NOVA_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)} METADATA_SERVICE_PORT=${METADATA_SERVICE_PORT:-8775} # Option to enable/disable config drive @@ -507,7 +507,7 @@ function create_nova_conf { setup_logging $NOVA_CONF write_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" "/compute" - write_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" "" ":${METADATA_SERVICE_PORT}" + write_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" "" "$SERVICE_LISTEN_ADDRESS:${METADATA_SERVICE_PORT}" if is_service_enabled ceilometer; then iniset $NOVA_CONF DEFAULT instance_usage_audit "True" diff --git a/lib/swift b/lib/swift index 1187846dfc..62b8a32fed 100644 --- a/lib/swift +++ b/lib/swift @@ -56,7 +56,7 @@ SWIFT_SERVICE_PROTOCOL=${SWIFT_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} SWIFT_DEFAULT_BIND_PORT=${SWIFT_DEFAULT_BIND_PORT:-8080} SWIFT_DEFAULT_BIND_PORT_INT=${SWIFT_DEFAULT_BIND_PORT_INT:-8081} SWIFT_SERVICE_LOCAL_HOST=${SWIFT_SERVICE_LOCAL_HOST:-$SERVICE_LOCAL_HOST} -SWIFT_SERVICE_LISTEN_ADDRESS=${SWIFT_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS} +SWIFT_SERVICE_LISTEN_ADDRESS=${SWIFT_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)} # TODO: add logging to different location. diff --git a/lib/tls b/lib/tls index a72b7084d0..e3ed3cc2ac 100644 --- a/lib/tls +++ b/lib/tls @@ -37,7 +37,7 @@ if is_service_enabled tls-proxy; then # TODO(dtroyer): revisit this below after the search for HOST_IP has been done - TLS_IP=${TLS_IP:-$SERVICE_IP} + TLS_IP=${TLS_IP:-$(ipv6_unquote $SERVICE_HOST)} fi DEVSTACK_HOSTNAME=$(hostname -f) @@ -67,9 +67,9 @@ function configure_CA { # build common config file # Verify ``TLS_IP`` is good - if [[ -n "$HOST_IP" && "$HOST_IP" != "$TLS_IP" ]]; then + if [[ -n "$SERVICE_HOST" && "$(ipv6_unquote $SERVICE_HOST)" != "$TLS_IP" ]]; then # auto-discover has changed the IP - TLS_IP=$HOST_IP + TLS_IP=$(ipv6_unquote $SERVICE_HOST) fi } @@ -228,6 +228,7 @@ function init_cert { if [[ ! -r $DEVSTACK_CERT ]]; then if [[ -n "$TLS_IP" ]]; then # Lie to let incomplete match routines work + # see https://bugs.python.org/issue23239 TLS_IP="DNS:$TLS_IP,IP:$TLS_IP" fi make_cert $INT_CA_DIR $DEVSTACK_CERT_NAME $DEVSTACK_HOSTNAME "$TLS_IP" @@ -246,11 +247,6 @@ function make_cert { local alt_names=$4 if [ "$common_name" != "$SERVICE_HOST" ]; then - if [[ -z "$alt_names" ]]; then - alt_names="DNS:$SERVICE_HOST" - else - alt_names="$alt_names,DNS:$SERVICE_HOST" - fi if is_ipv4_address "$SERVICE_HOST" ; then alt_names="$alt_names,IP:$SERVICE_HOST" fi diff --git a/stackrc b/stackrc index 59442529f4..0b49fbca57 100644 --- a/stackrc +++ b/stackrc @@ -894,10 +894,10 @@ if [[ "$SERVICE_IP_VERSION" == 6 ]]; then DEF_SERVICE_HOST=[$HOST_IPV6] DEF_SERVICE_LOCAL_HOST=::1 - DEF_SERVICE_LISTEN_ADDRESS=:: + DEF_SERVICE_LISTEN_ADDRESS="[::]" fi -# This is either 0.0.0.0 for IPv4 or :: for IPv6 +# This is either 0.0.0.0 for IPv4 or [::] for IPv6 SERVICE_LISTEN_ADDRESS=${SERVICE_LISTEN_ADDRESS:-${DEF_SERVICE_LISTEN_ADDRESS}} # Allow the use of an alternate hostname (such as localhost/127.0.0.1) for From 02bb57bfefbe3658b570060a9eb084377dddaddd Mon Sep 17 00:00:00 2001 From: Andrea Frittoli Date: Fri, 9 Mar 2018 17:12:38 +0000 Subject: [PATCH 0843/1936] Enforce linear strategy for orchestrate-devstack Document that orchestrate-devstack requires a linear strategy in the invoking play. Also enforce the strategy in devstack.yaml. Change-Id: Ia081225ec2be959fc5a4ddfd491f526296a8ca10 --- playbooks/devstack.yaml | 4 ++++ roles/orchestrate-devstack/README.rst | 1 + 2 files changed, 5 insertions(+) diff --git a/playbooks/devstack.yaml b/playbooks/devstack.yaml index 93d19f1c7a..d0906380ab 100644 --- a/playbooks/devstack.yaml +++ b/playbooks/devstack.yaml @@ -1,3 +1,7 @@ - hosts: all + # This is the default strategy, however since orchestrate-devstack requires + # "linear", it is safer to enforce it in case this is running in an + # environment configured with a different default strategy. + strategy: linear roles: - orchestrate-devstack diff --git a/roles/orchestrate-devstack/README.rst b/roles/orchestrate-devstack/README.rst index 7803ee4d74..097dcea55e 100644 --- a/roles/orchestrate-devstack/README.rst +++ b/roles/orchestrate-devstack/README.rst @@ -15,6 +15,7 @@ data to sub-nodes and run devstack there. The only requirement for this role is for the controller inventory_hostname to be "controller" and for all sub-nodes to be defined in a group called "subnode". +This role needs to be invoked from a playbook that uses a "linear" strategy. **Role Variables** From 4404f680f2447912dcdbcecde14339bc6d1f5175 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Fri, 2 Mar 2018 00:37:58 +0100 Subject: [PATCH 0844/1936] Add an openSUSE Tumbleweed devstack non-voting check job This adds the necessary fixes to pass a devstack run on openSUSE Tumbleweed. Also removes opensuse 42.2 as it is EOL for some time already and no longer actively tested in the OpenStack infra. Depends-On: I1b68c08c07cf6653ea58506f738cbe0054b38f3a Change-Id: I2894482deef063fd02b0818c695a2ddbf6767039 --- .zuul.yaml | 20 +++++++++++++++++++- functions-common | 3 +++ lib/rpc_backend | 15 ++++++++++++++- lib/swift | 6 +++++- stack.sh | 2 +- tools/install_pip.sh | 4 ++-- 6 files changed, 44 insertions(+), 6 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 25bd757ab3..ebe5a78ca5 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -28,6 +28,16 @@ nodes: - controller +- nodeset: + name: devstack-single-node-opensuse-tumbleweed + nodes: + - name: controller + label: opensuse-tumbleweed + groups: + - name: tempest + nodes: + - controller + - nodeset: name: devstack-single-node-fedora-27 nodes: @@ -238,10 +248,17 @@ - job: name: devstack-platform-opensuse-423 parent: tempest-full - description: OpenSuSE 43.2 platform test + description: openSUSE 43.2 platform test nodeset: devstack-single-node-opensuse-423 voting: false +- job: + name: devstack-platform-opensuse-tumbleweed + parent: tempest-full + description: openSUSE Tumbleweed platform test + nodeset: devstack-single-node-opensuse-tumbleweed + voting: false + - job: name: devstack-platform-fedora-27 parent: tempest-full @@ -318,6 +335,7 @@ - devstack - devstack-platform-centos-7 - devstack-platform-opensuse-423 + - devstack-platform-opensuse-tumbleweed - devstack-platform-fedora-27 - devstack-multinode - devstack-unit-tests diff --git a/functions-common b/functions-common index df295a3395..279cfcfdd8 100644 --- a/functions-common +++ b/functions-common @@ -373,6 +373,9 @@ function GetDistro { DISTRO="f$os_RELEASE" elif [[ "$os_VENDOR" =~ (openSUSE) ]]; then DISTRO="opensuse-$os_RELEASE" + # Tumbleweed uses "n/a" as a codename, and the release is a datestring + # like 20180218, so not very useful. + [ "$os_CODENAME" = "n/a" ] && DISTRO="opensuse-tumbleweed" elif [[ "$os_VENDOR" =~ (SUSE LINUX) ]]; then # just use major release DISTRO="sle${os_RELEASE%.*}" diff --git a/lib/rpc_backend b/lib/rpc_backend index 44d0717116..1c7c82fcd3 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -52,7 +52,20 @@ function install_rpc_backend { if is_service_enabled rabbit; then # Install rabbitmq-server install_package rabbitmq-server - if is_fedora; then + if is_suse; then + install_package rabbitmq-server-plugins + # the default systemd socket activation only listens on the loopback interface + # which causes rabbitmq to try to start its own epmd + sudo mkdir -p /etc/systemd/system/epmd.socket.d + cat </dev/null +[Socket] +ListenStream= +ListenStream=[::]:4369 +EOF + sudo systemctl daemon-reload + sudo systemctl restart epmd.socket epmd.service + fi + if is_fedora || is_suse; then sudo systemctl enable rabbitmq-server fi fi diff --git a/lib/swift b/lib/swift index 1187846dfc..6cda9c84b2 100644 --- a/lib/swift +++ b/lib/swift @@ -557,7 +557,11 @@ EOF local swift_log_dir=${SWIFT_DATA_DIR}/logs sudo rm -rf ${swift_log_dir} - sudo install -d -o ${STACK_USER} -g adm ${swift_log_dir}/hourly + local swift_log_group=adm + if is_suse; then + swift_log_group=root + fi + sudo install -d -o ${STACK_USER} -g ${swift_log_group} ${swift_log_dir}/hourly if [[ $SYSLOG != "False" ]]; then sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \ diff --git a/stack.sh b/stack.sh index ba546c0ab1..caef76e18a 100755 --- a/stack.sh +++ b/stack.sh @@ -221,7 +221,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (xenial|zesty|artful|stretch|jessie|f25|f26|f27|opensuse-42.2|opensuse-42.3|rhel7) ]]; then +if [[ ! ${DISTRO} =~ (xenial|zesty|artful|stretch|jessie|f25|f26|f27|opensuse-42.3|opensuse-tumbleweed|rhel7) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" diff --git a/tools/install_pip.sh b/tools/install_pip.sh index dbe52782a4..1bd7392b9d 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -129,10 +129,10 @@ get_versions # Eradicate any and all system packages -# Python in fedora depends on the python-pip package so removing it +# Python in fedora/suse depends on the python-pip package so removing it # results in a nonfunctional system. pip on fedora installs to /usr so pip # can safely override the system pip for all versions of fedora -if ! is_fedora ; then +if ! is_fedora && ! is_suse; then uninstall_package python-pip uninstall_package python3-pip fi From 1c9f1bf41435dc79234c5fef33b144453d423bd5 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Mon, 12 Mar 2018 06:14:09 +0000 Subject: [PATCH 0845/1936] Updated from generate-devstack-plugins-list Change-Id: I5147dda76b476a6d3caf977077f41f904b304059 --- doc/source/plugin-registry.rst | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 591e226645..04b7698fbd 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -31,12 +31,12 @@ barbican `git://git.openstack.org/openstack/barbic bilean `git://git.openstack.org/openstack/bilean `__ blazar `git://git.openstack.org/openstack/blazar `__ broadview-collector `git://git.openstack.org/openstack/broadview-collector `__ +castellan-ui `git://git.openstack.org/openstack/castellan-ui `__ ceilometer `git://git.openstack.org/openstack/ceilometer `__ ceilometer-powervm `git://git.openstack.org/openstack/ceilometer-powervm `__ cloudkitty `git://git.openstack.org/openstack/cloudkitty `__ collectd-ceilometer-plugin `git://git.openstack.org/openstack/collectd-ceilometer-plugin `__ congress `git://git.openstack.org/openstack/congress `__ -cue `git://git.openstack.org/openstack/cue `__ cyborg `git://git.openstack.org/openstack/cyborg `__ designate `git://git.openstack.org/openstack/designate `__ devstack-plugin-additional-pkg-repos `git://git.openstack.org/openstack/devstack-plugin-additional-pkg-repos `__ @@ -71,7 +71,6 @@ ironic `git://git.openstack.org/openstack/ironic ironic-inspector `git://git.openstack.org/openstack/ironic-inspector `__ ironic-staging-drivers `git://git.openstack.org/openstack/ironic-staging-drivers `__ ironic-ui `git://git.openstack.org/openstack/ironic-ui `__ -k8s-cloud-provider `git://git.openstack.org/openstack/k8s-cloud-provider `__ karbor `git://git.openstack.org/openstack/karbor `__ karbor-dashboard `git://git.openstack.org/openstack/karbor-dashboard `__ keystone `git://git.openstack.org/openstack/keystone `__ From db5a5b3c20b4dfeb2325594e700adb3b98d7d8fd Mon Sep 17 00:00:00 2001 From: Sam Betts Date: Mon, 12 Mar 2018 10:34:01 +0000 Subject: [PATCH 0846/1936] Remove use of git -C option from tempest venv creation The -C option is not available in git versions older than "1.8.5" which are still shipped by several distributions including centos 7. Due to this incompatibility the patch has broken third party CI for Cisco on Ironic. Change-Id: I09a6f83f8b2fee870e6e1c50cbfdf2da4d70dfb2 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index ab1d63980b..f60b477c72 100644 --- a/lib/tempest +++ b/lib/tempest @@ -556,7 +556,7 @@ function configure_tempest { fi # The requirements might be on a different branch, while tempest needs master requirements. - git -C $REQUIREMENTS_DIR show master:upper-constraints.txt > u-c-m.txt + (cd $REQUIREMENTS_DIR && git show master:upper-constraints.txt) > u-c-m.txt tox -evenv-tempest -- pip install -c u-c-m.txt -r requirements.txt # Auth: From dc78ef8880b65f56abb0e0c2b80c25d9a0a495e1 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Mon, 12 Mar 2018 14:48:53 +0000 Subject: [PATCH 0847/1936] Save rc from stack.sh in run-devstack role Avoid overriding the failure state of stack.sh by the trailing echo command. Change-Id: I74820709edc98f67ff1a0c620ce5db3a3571b7f5 --- roles/run-devstack/tasks/main.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/roles/run-devstack/tasks/main.yaml b/roles/run-devstack/tasks/main.yaml index 1ff82950e4..f58b31d477 100644 --- a/roles/run-devstack/tasks/main.yaml +++ b/roles/run-devstack/tasks/main.yaml @@ -2,7 +2,9 @@ shell: cmd: | ./stack.sh 2>&1 + rc=$? echo "*** FINISHED ***" + exit $rc args: chdir: "{{devstack_base_dir}}/devstack" become: true From c81947a37d06fa07a53a891750e4864b89622e58 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Tue, 23 Jan 2018 09:30:31 +0000 Subject: [PATCH 0848/1936] Remove remnants of SCREEN_LOGDIR These appear to have been missed in commit 'cdba1b371'. Change-Id: Ibb7fd7a0d54750c16360dfceb8e6b024ed8f504e --- clean.sh | 3 --- stackrc | 1 - 2 files changed, 4 deletions(-) diff --git a/clean.sh b/clean.sh index 2333596c1f..a29ebd94f0 100755 --- a/clean.sh +++ b/clean.sh @@ -122,9 +122,6 @@ fi if [[ -n "$LOGDIR" ]] && [[ -d "$LOGDIR" ]]; then sudo rm -rf $LOGDIR fi -if [[ -n "$SCREEN_LOGDIR" ]] && [[ -d "$SCREEN_LOGDIR" ]]; then - sudo rm -rf $SCREEN_LOGDIR -fi # Clean out the sytemd user unit files if systemd was used. if [[ "$USE_SYSTEMD" = "True" ]]; then diff --git a/stackrc b/stackrc index 59442529f4..e8c35f423e 100644 --- a/stackrc +++ b/stackrc @@ -949,7 +949,6 @@ if [[ -z "${LOGDIR:-}" ]]; then fi # ``LOGDIR`` is always set at this point so it is not useful as a 'enable' for service logs -# ``SCREEN_LOGDIR`` may be set, it is useful to enable the compat symlinks # System-wide ulimit file descriptors override ULIMIT_NOFILE=${ULIMIT_NOFILE:-2048} From 165199eebdef5d43bdc34683bdad9054f8c897d0 Mon Sep 17 00:00:00 2001 From: Ivan Kolodyazhny Date: Mon, 6 Nov 2017 18:17:39 +0200 Subject: [PATCH 0849/1936] Remove default value for Cinder Auth Strategy Cinder supports both noauth and keystone auth mode. So now we can configure this value via local.conf: [[post-config|$CINDER_CONF]] [DEFAULT] auth_strategy = noauth Change-Id: I1e434362117ab30dae71a8f3a80bc139e78f51bc --- lib/cinder | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/lib/cinder b/lib/cinder index 655908c2e4..b67fe261be 100644 --- a/lib/cinder +++ b/lib/cinder @@ -227,7 +227,6 @@ function configure_cinder { configure_auth_token_middleware $CINDER_CONF cinder $CINDER_AUTH_CACHE_DIR - iniset $CINDER_CONF DEFAULT auth_strategy keystone iniset $CINDER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $CINDER_CONF DEFAULT iscsi_helper "$CINDER_ISCSI_HELPER" @@ -529,7 +528,17 @@ function create_volume_types { local be be_name for be in ${CINDER_ENABLED_BACKENDS//,/ }; do be_name=${be##*:} - openstack --os-region-name="$REGION_NAME" volume type create --property volume_backend_name="${be_name}" ${be_name} + # NOTE (e0ne): openstack client doesn't work with cinder in noauth mode + if is_service_enabled keystone; then + openstack --os-region-name="$REGION_NAME" volume type create --property volume_backend_name="${be_name}" ${be_name} + else + # TODO (e0ne): use openstack client once it will support cinder in noauth mode: + # https://bugs.launchpad.net/python-cinderclient/+bug/1755279 + local cinder_url + cinder_url=$CINDER_SERVICE_PROTOCOL://$SERVICE_HOST:$CINDER_SERVICE_PORT/v3 + OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-create ${be_name} + OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-key ${be_name} set volume_backend_name=${be_name} + fi done fi } From e6faf84ec347470b32362098e69fe2b1844d3fed Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Tue, 13 Mar 2018 12:21:34 +0000 Subject: [PATCH 0850/1936] Revert to using neutron-legacy based services The patch to remove the use of the test-matrix [0] also switched from using the neutron-legacy based service names (q-*) to the new neutron-* names. However it turns out that the new implementation is not yet working properly for most neutron-consuming projects, so we switch back to the previous situation for now. [0] https://review.openstack.org/546765 Change-Id: Id6de87211d6c4ea8fd14aa9203d8d5b17e9e2f04 --- .zuul.yaml | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 5fda6b155b..4636b8cd81 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -280,12 +280,20 @@ n-sch: true placement-api: true # Neutron services - neutron-api: true - neutron-agent: true - neutron-dhcp: true - neutron-l3: true - neutron-metadata-agent: true - neutron-metering: true + # We need to keep using the neutron-legacy based services for + # now until all issues with the new lib/neutron code are solved + q-agt: true + q-dhcp: true + q-l3: true + q-meta: true + q-metering: true + q-svc: true + # neutron-api: true + # neutron-agent: true + # neutron-dhcp: true + # neutron-l3: true + # neutron-metadata-agent: true + # neutron-metering: true # Swift services s-account: true s-container: true From cb1448495b3c8ea63febeeda862040447839f716 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Tue, 6 Mar 2018 19:22:59 +0000 Subject: [PATCH 0851/1936] Add bionic as supported distro Add the upcoming release Ubuntu 18.04 Bionic Beaver to the list of supported distros. Drop the now unsupported 17.04 (zesty) instead. Change-Id: Iea0b4bfdc510797f7886fac96eff6fdfb730252d --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index badc1a1047..ce6e6fe83d 100755 --- a/stack.sh +++ b/stack.sh @@ -221,7 +221,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (xenial|zesty|artful|stretch|jessie|f25|f26|f27|opensuse-42.3|opensuse-tumbleweed|rhel7) ]]; then +if [[ ! ${DISTRO} =~ (xenial|artful|bionic|stretch|jessie|f25|f26|f27|opensuse-42.3|opensuse-tumbleweed|rhel7) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From 18abffb72b514e0b364f54459df62c2ebc12d4bf Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Wed, 14 Mar 2018 20:09:14 -0400 Subject: [PATCH 0852/1936] Mention test-config phase in config docs I had to dig into the stack.sh code to figure out why my tempest.conf post-config wasn't work, and it's because post-config isn't the thing to use to configure tempest, test-config is. Change-Id: Ic5bbe36b5d44880d0a3a602f653b4f61fd89e9c8 Related-Bug: #1755947 --- doc/source/configuration.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 1d02395058..fc0a620441 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -41,6 +41,7 @@ The defined phases are: - **extra** - runs after services are started and before any files in ``extra.d`` are executed - **post-extra** - runs after files in ``extra.d`` are executed +- **test-config** - runs after tempest (and plugins) are configured The file is processed strictly in sequence; meta-sections may be specified more than once but if any settings are duplicated the last to From 1ab9a2d144ee1229cdd52c0a074931c719bc7ba0 Mon Sep 17 00:00:00 2001 From: Felipe Monteiro Date: Fri, 16 Mar 2018 02:02:12 +0000 Subject: [PATCH 0853/1936] Add project_tags identity feature flag This PS adds the project_tags identity feature flag which allows identity v3 project tags API functionality to be enabled for releases after Pike. Once Pike is no longer supported in Tempest this feature flag can be removed. Depends-On: Ibaec1df79c9ac69c65cf5075c3519092bc609546 Change-Id: Iec6b34c10ea1bd7103720c773b48ce130643115d --- lib/tempest | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/tempest b/lib/tempest index f60b477c72..0605ffb082 100644 --- a/lib/tempest +++ b/lib/tempest @@ -299,6 +299,10 @@ function configure_tempest { iniset $TEMPEST_CONFIG identity-feature-enabled domain_specific_drivers True fi + # TODO(felipemonteiro): Remove this once Tempest no longer supports Pike + # as this is supported in Queens and beyond. + iniset $TEMPEST_CONFIG identity-feature-enabled project_tags True + # Image # We want to be able to override this variable in the gate to avoid # doing an external HTTP fetch for this test. From 34e09dd3a90e7782a96c5a500368cad04f553e1c Mon Sep 17 00:00:00 2001 From: Luigi Toscano Date: Sun, 18 Mar 2018 17:56:27 +0100 Subject: [PATCH 0854/1936] Fix devstack job: the dict name is devstack_local_conf It looks like a typo. Change-Id: Id017b169f25a7589da3eca509c28f5d867fd5962 --- .zuul.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 4636b8cd81..07d8f48cff 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -249,7 +249,7 @@ SERVICE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" HOST_IP: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" PUBLIC_BRIDGE_MTU: "{{ external_bridge_mtu }}" - devstack_localconf: + devstack_local_conf: post-config: $NEUTRON_CONF: DEFAULT: From 23df4da0d4e61e2149e0ec164d0bad02fc8a3f07 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Mon, 19 Mar 2018 14:27:01 -0400 Subject: [PATCH 0855/1936] Use Queens UCA This updates the UCA usage from Pike to Queens. As a result, the various volume multiattach checks can also be removed because the Queens UCA has libvirt 4.0.0. Change-Id: Icb971831c8d4fe5f940d9e7993d53f1c3765e30f --- stackrc | 7 +------ tools/fixup_stuff.sh | 13 ++++--------- 2 files changed, 5 insertions(+), 15 deletions(-) diff --git a/stackrc b/stackrc index e8c35f423e..d3ea33ef44 100644 --- a/stackrc +++ b/stackrc @@ -625,12 +625,7 @@ VIRT_DRIVER=${VIRT_DRIVER:-$DEFAULT_VIRT_DRIVER} case "$VIRT_DRIVER" in ironic|libvirt) LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm} - # If ENABLE_VOLUME_MULTIATTACH is True, the Ubuntu Cloud Archive can't - # be used until it provides libvirt>=3.10, and with older versions of - # Ubuntu the group is "libvirtd". - # TODO(mriedem): Remove the ENABLE_VOLUME_MULTIATTACH check when - # UCA has libvirt>=3.10. - if [[ "$os_VENDOR" =~ (Debian|Ubuntu) && "${ENABLE_VOLUME_MULTIATTACH}" == "False" ]]; then + if [[ "$os_VENDOR" =~ (Debian|Ubuntu) ]]; then # The groups change with newer libvirt. Older Ubuntu used # 'libvirtd', but now uses libvirt like Debian. Do a quick check # to see if libvirtd group already exists to handle grenade's case. diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 90b2c8bf1f..914793245e 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -77,28 +77,23 @@ function fixup_keystone { # Make it possible to switch this based on an environment variable as # libvirt 2.5.0 doesn't handle nested virtualization quite well and this # is required for the trove development environment. -# The Pike UCA has qemu 2.10 but libvirt 3.6, therefore if -# ENABLE_VOLUME_MULTIATTACH is True, we can't use the Pike UCA -# because multiattach won't work with those package versions. -# We can remove this check when the UCA has libvirt>=3.10. function fixup_uca { - if [[ "${ENABLE_UBUNTU_CLOUD_ARCHIVE}" == "False" || "$DISTRO" != "xenial" || \ - "${ENABLE_VOLUME_MULTIATTACH}" == "True" ]]; then + if [[ "${ENABLE_UBUNTU_CLOUD_ARCHIVE}" == "False" || "$DISTRO" != "xenial" ]]; then return fi # This pulls in apt-add-repository install_package "software-properties-common" - # Use UCA for newer libvirt. Should give us libvirt 2.5.0. + # Use UCA for newer libvirt. if [[ -f /etc/ci/mirror_info.sh ]] ; then # If we are on a nodepool provided host and it has told us about where # we can find local mirrors then use that mirror. source /etc/ci/mirror_info.sh - sudo apt-add-repository -y "deb $NODEPOOL_UCA_MIRROR xenial-updates/pike main" + sudo apt-add-repository -y "deb $NODEPOOL_UCA_MIRROR xenial-updates/queens main" else # Otherwise use upstream UCA - sudo add-apt-repository -y cloud-archive:pike + sudo add-apt-repository -y cloud-archive:queens fi # Disable use of libvirt wheel since a cached wheel build might be From 6f27fca4a76aceaadab3776c87c48743671ce502 Mon Sep 17 00:00:00 2001 From: "James E. Blair" Date: Tue, 21 Nov 2017 17:05:43 -0800 Subject: [PATCH 0856/1936] Zuul: support plugin dependencies Change-Id: I81302e8988fe6498fea9f08ed66f5d0cc1fce161 --- .gitignore | 1 + .../library/devstack_local_conf.py | 219 +++++++++++++----- .../write-devstack-local-conf/library/test.py | 166 +++++++++++++ .../write-devstack-local-conf/tasks/main.yaml | 1 + tests/test_write_devstack_local_conf_role.sh | 9 + 5 files changed, 341 insertions(+), 55 deletions(-) create mode 100644 roles/write-devstack-local-conf/library/test.py create mode 100755 tests/test_write_devstack_local_conf_role.sh diff --git a/.gitignore b/.gitignore index d2c127d099..8553b3f849 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,7 @@ *.log *.log.[1-9] *.pem +*.pyc .localrc.auto .localrc.password .prereqs diff --git a/roles/write-devstack-local-conf/library/devstack_local_conf.py b/roles/write-devstack-local-conf/library/devstack_local_conf.py index 55ba4afb69..746f54f921 100644 --- a/roles/write-devstack-local-conf/library/devstack_local_conf.py +++ b/roles/write-devstack-local-conf/library/devstack_local_conf.py @@ -14,16 +14,69 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os import re -class VarGraph(object): +class DependencyGraph(object): # This is based on the JobGraph from Zuul. + def __init__(self): + self._names = set() + self._dependencies = {} # dependent_name -> set(parent_names) + + def add(self, name, dependencies): + # Append the dependency information + self._dependencies.setdefault(name, set()) + try: + for dependency in dependencies: + # Make sure a circular dependency is never created + ancestors = self._getParentNamesRecursively( + dependency, soft=True) + ancestors.add(dependency) + if name in ancestors: + raise Exception("Dependency cycle detected in {}". + format(name)) + self._dependencies[name].add(dependency) + except Exception: + del self._dependencies[name] + raise + + def getDependenciesRecursively(self, parent): + dependencies = [] + + current_dependencies = self._dependencies[parent] + for current in current_dependencies: + if current not in dependencies: + dependencies.append(current) + for dep in self.getDependenciesRecursively(current): + if dep not in dependencies: + dependencies.append(dep) + return dependencies + + def _getParentNamesRecursively(self, dependent, soft=False): + all_parent_items = set() + items_to_iterate = set([dependent]) + while len(items_to_iterate) > 0: + current_item = items_to_iterate.pop() + current_parent_items = self._dependencies.get(current_item) + if current_parent_items is None: + if soft: + current_parent_items = set() + else: + raise Exception("Dependent item {} not found: ".format( + dependent)) + new_parent_items = current_parent_items - all_parent_items + items_to_iterate |= new_parent_items + all_parent_items |= new_parent_items + return all_parent_items + + +class VarGraph(DependencyGraph): def __init__(self, vars): + super(VarGraph, self).__init__() self.vars = {} self._varnames = set() - self._dependencies = {} # dependent_var_name -> set(parent_var_names) for k, v in vars.items(): self._varnames.add(k) for k, v in vars.items(): @@ -38,28 +91,21 @@ def _addVar(self, key, value): raise Exception("Variable {} already added".format(key)) self.vars[key] = value # Append the dependency information - self._dependencies.setdefault(key, set()) + dependencies = set() + for dependency in self.getDependencies(value): + if dependency == key: + # A variable is allowed to reference itself; no + # dependency link needed in that case. + continue + if dependency not in self._varnames: + # It's not necessary to create a link for an + # external variable. + continue + dependencies.add(dependency) try: - for dependency in self.getDependencies(value): - if dependency == key: - # A variable is allowed to reference itself; no - # dependency link needed in that case. - continue - if dependency not in self._varnames: - # It's not necessary to create a link for an - # external variable. - continue - # Make sure a circular dependency is never created - ancestor_vars = self._getParentVarNamesRecursively( - dependency, soft=True) - ancestor_vars.add(dependency) - if any((key == anc_var) for anc_var in ancestor_vars): - raise Exception("Dependency cycle detected in var {}". - format(key)) - self._dependencies[key].add(dependency) + self.add(key, dependencies) except Exception: del self.vars[key] - del self._dependencies[key] raise def getVars(self): @@ -67,48 +113,105 @@ def getVars(self): keys = sorted(self.vars.keys()) seen = set() for key in keys: - dependencies = self.getDependentVarsRecursively(key) + dependencies = self.getDependenciesRecursively(key) for var in dependencies + [key]: if var not in seen: ret.append((var, self.vars[var])) seen.add(var) return ret - def getDependentVarsRecursively(self, parent_var): - dependent_vars = [] - - current_dependent_vars = self._dependencies[parent_var] - for current_var in current_dependent_vars: - if current_var not in dependent_vars: - dependent_vars.append(current_var) - for dep in self.getDependentVarsRecursively(current_var): - if dep not in dependent_vars: - dependent_vars.append(dep) - return dependent_vars - - def _getParentVarNamesRecursively(self, dependent_var, soft=False): - all_parent_vars = set() - vars_to_iterate = set([dependent_var]) - while len(vars_to_iterate) > 0: - current_var = vars_to_iterate.pop() - current_parent_vars = self._dependencies.get(current_var) - if current_parent_vars is None: - if soft: - current_parent_vars = set() - else: - raise Exception("Dependent var {} not found: ".format( - dependent_var)) - new_parent_vars = current_parent_vars - all_parent_vars - vars_to_iterate |= new_parent_vars - all_parent_vars |= new_parent_vars - return all_parent_vars + +class PluginGraph(DependencyGraph): + def __init__(self, base_dir, plugins): + super(PluginGraph, self).__init__() + # The dependency trees expressed by all the plugins we found + # (which may be more than those the job is using). + self._plugin_dependencies = {} + self.loadPluginNames(base_dir) + + self.plugins = {} + self._pluginnames = set() + for k, v in plugins.items(): + self._pluginnames.add(k) + for k, v in plugins.items(): + self._addPlugin(k, str(v)) + + def loadPluginNames(self, base_dir): + if base_dir is None: + return + git_roots = [] + for root, dirs, files in os.walk(base_dir): + if '.git' not in dirs: + continue + # Don't go deeper than git roots + dirs[:] = [] + git_roots.append(root) + for root in git_roots: + devstack = os.path.join(root, 'devstack') + if not (os.path.exists(devstack) and os.path.isdir(devstack)): + continue + settings = os.path.join(devstack, 'settings') + if not (os.path.exists(settings) and os.path.isfile(settings)): + continue + self.loadDevstackPluginInfo(settings) + + define_re = re.compile(r'^define_plugin\s+(\w+).*') + require_re = re.compile(r'^plugin_requires\s+(\w+)\s+(\w+).*') + def loadDevstackPluginInfo(self, fn): + name = None + reqs = set() + with open(fn) as f: + for line in f: + m = self.define_re.match(line) + if m: + name = m.group(1) + m = self.require_re.match(line) + if m: + if name == m.group(1): + reqs.add(m.group(2)) + if name and reqs: + self._plugin_dependencies[name] = reqs + + def getDependencies(self, value): + return self._plugin_dependencies.get(value, []) + + def _addPlugin(self, key, value): + if key in self.plugins: + raise Exception("Plugin {} already added".format(key)) + self.plugins[key] = value + # Append the dependency information + dependencies = set() + for dependency in self.getDependencies(key): + if dependency == key: + continue + dependencies.add(dependency) + try: + self.add(key, dependencies) + except Exception: + del self.plugins[key] + raise + + def getPlugins(self): + ret = [] + keys = sorted(self.plugins.keys()) + seen = set() + for key in keys: + dependencies = self.getDependenciesRecursively(key) + for plugin in dependencies + [key]: + if plugin not in seen: + ret.append((plugin, self.plugins[plugin])) + seen.add(plugin) + return ret class LocalConf(object): - def __init__(self, localrc, localconf, base_services, services, plugins): + def __init__(self, localrc, localconf, base_services, services, plugins, + base_dir): self.localrc = [] self.meta_sections = {} + self.plugin_deps = {} + self.base_dir = base_dir if plugins: self.handle_plugins(plugins) if services or base_services: @@ -119,7 +222,8 @@ def __init__(self, localrc, localconf, base_services, services, plugins): self.handle_localconf(localconf) def handle_plugins(self, plugins): - for k, v in plugins.items(): + pg = PluginGraph(self.base_dir, plugins) + for k, v in pg.getPlugins(): if v: self.localrc.append('enable_plugin {} {}'.format(k, v)) @@ -171,6 +275,7 @@ def main(): services=dict(type='dict'), localrc=dict(type='dict'), local_conf=dict(type='dict'), + base_dir=dict(type='path'), path=dict(type='str'), ) ) @@ -180,14 +285,18 @@ def main(): p.get('local_conf'), p.get('base_services'), p.get('services'), - p.get('plugins')) + p.get('plugins'), + p.get('base_dir')) lc.write(p['path']) module.exit_json() -from ansible.module_utils.basic import * # noqa -from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.basic import * # noqa + from ansible.module_utils.basic import AnsibleModule +except ImportError: + pass if __name__ == '__main__': main() diff --git a/roles/write-devstack-local-conf/library/test.py b/roles/write-devstack-local-conf/library/test.py new file mode 100644 index 0000000000..843ca6e9fd --- /dev/null +++ b/roles/write-devstack-local-conf/library/test.py @@ -0,0 +1,166 @@ +# Copyright (C) 2017 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +import tempfile +import unittest + +from devstack_local_conf import LocalConf +from collections import OrderedDict + +class TestDevstackLocalConf(unittest.TestCase): + def setUp(self): + self.tmpdir = tempfile.mkdtemp() + + def tearDown(self): + shutil.rmtree(self.tmpdir) + + def test_plugins(self): + "Test that plugins without dependencies work" + localrc = {'test_localrc': '1'} + local_conf = {'install': + {'nova.conf': + {'main': + {'test_conf': '2'}}}} + services = {'cinder': True} + # We use ordereddict here to make sure the plugins are in the + # *wrong* order for testing. + plugins = OrderedDict([ + ('bar', 'git://git.openstack.org/openstack/bar-plugin'), + ('foo', 'git://git.openstack.org/openstack/foo-plugin'), + ('baz', 'git://git.openstack.org/openstack/baz-plugin'), + ]) + p = dict(localrc=localrc, + local_conf=local_conf, + base_services=[], + services=services, + plugins=plugins, + base_dir='./test', + path=os.path.join(self.tmpdir, 'test.local.conf')) + lc = LocalConf(p.get('localrc'), + p.get('local_conf'), + p.get('base_services'), + p.get('services'), + p.get('plugins'), + p.get('base_dir')) + lc.write(p['path']) + + plugins = [] + with open(p['path']) as f: + for line in f: + if line.startswith('enable_plugin'): + plugins.append(line.split()[1]) + self.assertEqual(['bar', 'baz', 'foo'], plugins) + + def test_plugin_deps(self): + "Test that plugins with dependencies work" + os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', 'devstack')) + os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', '.git')) + os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', 'devstack')) + os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', '.git')) + with open(os.path.join( + self.tmpdir, + 'foo-plugin', 'devstack', 'settings'), 'w') as f: + f.write('define_plugin foo\n') + with open(os.path.join( + self.tmpdir, + 'bar-plugin', 'devstack', 'settings'), 'w') as f: + f.write('define_plugin bar\n') + f.write('plugin_requires bar foo\n') + + localrc = {'test_localrc': '1'} + local_conf = {'install': + {'nova.conf': + {'main': + {'test_conf': '2'}}}} + services = {'cinder': True} + # We use ordereddict here to make sure the plugins are in the + # *wrong* order for testing. + plugins = OrderedDict([ + ('bar', 'git://git.openstack.org/openstack/bar-plugin'), + ('foo', 'git://git.openstack.org/openstack/foo-plugin'), + ]) + p = dict(localrc=localrc, + local_conf=local_conf, + base_services=[], + services=services, + plugins=plugins, + base_dir=self.tmpdir, + path=os.path.join(self.tmpdir, 'test.local.conf')) + lc = LocalConf(p.get('localrc'), + p.get('local_conf'), + p.get('base_services'), + p.get('services'), + p.get('plugins'), + p.get('base_dir')) + lc.write(p['path']) + + plugins = [] + with open(p['path']) as f: + for line in f: + if line.startswith('enable_plugin'): + plugins.append(line.split()[1]) + self.assertEqual(['foo', 'bar'], plugins) + + def test_plugin_circular_deps(self): + "Test that plugins with circular dependencies fail" + os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', 'devstack')) + os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', '.git')) + os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', 'devstack')) + os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', '.git')) + with open(os.path.join( + self.tmpdir, + 'foo-plugin', 'devstack', 'settings'), 'w') as f: + f.write('define_plugin foo\n') + f.write('plugin_requires foo bar\n') + with open(os.path.join( + self.tmpdir, + 'bar-plugin', 'devstack', 'settings'), 'w') as f: + f.write('define_plugin bar\n') + f.write('plugin_requires bar foo\n') + + localrc = {'test_localrc': '1'} + local_conf = {'install': + {'nova.conf': + {'main': + {'test_conf': '2'}}}} + services = {'cinder': True} + # We use ordereddict here to make sure the plugins are in the + # *wrong* order for testing. + plugins = OrderedDict([ + ('bar', 'git://git.openstack.org/openstack/bar-plugin'), + ('foo', 'git://git.openstack.org/openstack/foo-plugin'), + ]) + p = dict(localrc=localrc, + local_conf=local_conf, + base_services=[], + services=services, + plugins=plugins, + base_dir=self.tmpdir, + path=os.path.join(self.tmpdir, 'test.local.conf')) + with self.assertRaises(Exception): + lc = LocalConf(p.get('localrc'), + p.get('local_conf'), + p.get('base_services'), + p.get('services'), + p.get('plugins'), + p.get('base_dir')) + lc.write(p['path']) + + +if __name__ == '__main__': + unittest.main() diff --git a/roles/write-devstack-local-conf/tasks/main.yaml b/roles/write-devstack-local-conf/tasks/main.yaml index cc21426b89..2a9f8985fc 100644 --- a/roles/write-devstack-local-conf/tasks/main.yaml +++ b/roles/write-devstack-local-conf/tasks/main.yaml @@ -8,3 +8,4 @@ services: "{{ devstack_services|default(omit) }}" localrc: "{{ devstack_localrc|default(omit) }}" local_conf: "{{ devstack_local_conf|default(omit) }}" + base_dir: "{{ devstack_base_dir|default(omit) }}" diff --git a/tests/test_write_devstack_local_conf_role.sh b/tests/test_write_devstack_local_conf_role.sh new file mode 100755 index 0000000000..b2bc0a2c46 --- /dev/null +++ b/tests/test_write_devstack_local_conf_role.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +TOP=$(cd $(dirname "$0")/.. && pwd) + +# Import common functions +source $TOP/functions +source $TOP/tests/unittest.sh + +python ./roles/write-devstack-local-conf/library/test.py From 236250f1c3e9f328f0dc23ab168afbcfa639a33b Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 23 Mar 2018 08:27:57 -0500 Subject: [PATCH 0857/1936] Rename python-openstacksdk to openstacksdk Change-Id: Ia77163f330f674146b369dfebea56bd97820057e Depends-On: https://review.openstack.org/554662 --- doc/source/plugin-registry.rst | 2 +- inc/python | 6 ------ lib/libraries | 2 +- stackrc | 10 +++++----- tests/test_libs_from_pypi.sh | 2 +- 5 files changed, 8 insertions(+), 14 deletions(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 04b7698fbd..a5d3ff28c6 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -147,13 +147,13 @@ oaktree `git://git.openstack.org/openstack/oaktre octavia `git://git.openstack.org/openstack/octavia `__ octavia-dashboard `git://git.openstack.org/openstack/octavia-dashboard `__ omni `git://git.openstack.org/openstack/omni `__ +openstacksdk `git://git.openstack.org/openstack/openstacksdk `__ os-xenapi `git://git.openstack.org/openstack/os-xenapi `__ osprofiler `git://git.openstack.org/openstack/osprofiler `__ oswin-tempest-plugin `git://git.openstack.org/openstack/oswin-tempest-plugin `__ panko `git://git.openstack.org/openstack/panko `__ patrole `git://git.openstack.org/openstack/patrole `__ picasso `git://git.openstack.org/openstack/picasso `__ -python-openstacksdk `git://git.openstack.org/openstack/python-openstacksdk `__ qinling `git://git.openstack.org/openstack/qinling `__ rally `git://git.openstack.org/openstack/rally `__ rally-openstack `git://git.openstack.org/openstack/rally-openstack `__ diff --git a/inc/python b/inc/python index e074ea498f..ec4233b156 100644 --- a/inc/python +++ b/inc/python @@ -411,12 +411,6 @@ function use_library_from_git { function lib_installed_from_git { local name=$1 local safe_name - # TODO(mordred) This is a special case for python-openstacksdk, where the - # repo name and the pip name do not match. We should either add systemic - # support for providing aliases, or we should rename the git repo. - if [[ $name == 'python-openstacksdk' ]] ; then - name=openstacksdk - fi safe_name=$(python -c "from pkg_resources import safe_name; \ print(safe_name('${name}'))") # Note "pip freeze" doesn't always work here, because it tries to diff --git a/lib/libraries b/lib/libraries index 6d52f642a1..52ec784d3b 100644 --- a/lib/libraries +++ b/lib/libraries @@ -28,6 +28,7 @@ GITDIR["cliff"]=$DEST/cliff GITDIR["cursive"]=$DEST/cursive GITDIR["debtcollector"]=$DEST/debtcollector GITDIR["futurist"]=$DEST/futurist +GITDIR["openstacksdk"]=$DEST/openstacksdk GITDIR["os-client-config"]=$DEST/os-client-config GITDIR["osc-lib"]=$DEST/osc-lib GITDIR["osc-placement"]=$DEST/osc-placement @@ -51,7 +52,6 @@ GITDIR["oslo.versionedobjects"]=$DEST/oslo.versionedobjects GITDIR["oslo.vmware"]=$DEST/oslo.vmware GITDIR["osprofiler"]=$DEST/osprofiler GITDIR["pycadf"]=$DEST/pycadf -GITDIR["python-openstacksdk"]=$DEST/python-openstacksdk GITDIR["stevedore"]=$DEST/stevedore GITDIR["taskflow"]=$DEST/taskflow GITDIR["tooz"]=$DEST/tooz diff --git a/stackrc b/stackrc index e8c35f423e..f05bc6ec3a 100644 --- a/stackrc +++ b/stackrc @@ -133,7 +133,7 @@ export USE_PYTHON3=$(trueorfalse False USE_PYTHON3) # base name of the directory from which they are installed. See # enable_python3_package to edit this variable and use_python3_for to # test membership. -export ENABLED_PYTHON3_PACKAGES="nova,glance,cinder,uwsgi,python-openstackclient,python-openstacksdk" +export ENABLED_PYTHON3_PACKAGES="nova,glance,cinder,uwsgi,python-openstackclient,openstacksdk" # Explicitly list services not to run under Python 3. See # disable_python3_package to edit this variable. @@ -525,6 +525,10 @@ GITREPO["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_REPO:-${GIT_BASE}/opensta GITBRANCH["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_BRANCH:-$TARGET_BRANCH} GITDIR["ceilometermiddleware"]=$DEST/ceilometermiddleware +# openstacksdk OpenStack Python SDK +GITREPO["openstacksdk"]=${OPENSTACKSDK_REPO:-${GIT_BASE}/openstack/openstacksdk.git} +GITBRANCH["openstacksdk"]=${OPENSTACKSDK_BRANCH:-$TARGET_BRANCH} + # os-brick library to manage local volume attaches GITREPO["os-brick"]=${OS_BRICK_REPO:-${GIT_BASE}/openstack/os-brick.git} GITBRANCH["os-brick"]=${OS_BRICK_BRANCH:-$TARGET_BRANCH} @@ -542,10 +546,6 @@ GITBRANCH["os-vif"]=${OS_VIF_BRANCH:-$TARGET_BRANCH} GITREPO["osc-lib"]=${OSC_LIB_REPO:-${GIT_BASE}/openstack/osc-lib.git} GITBRANCH["osc-lib"]=${OSC_LIB_BRANCH:-$TARGET_BRANCH} -# python-openstacksdk OpenStack Python SDK -GITREPO["python-openstacksdk"]=${OPENSTACKSDK_REPO:-${GIT_BASE}/openstack/python-openstacksdk.git} -GITBRANCH["python-openstacksdk"]=${OPENSTACKSDK_BRANCH:-$TARGET_BRANCH} - # ironic common lib GITREPO["ironic-lib"]=${IRONIC_LIB_REPO:-${GIT_BASE}/openstack/ironic-lib.git} GITBRANCH["ironic-lib"]=${IRONIC_LIB_BRANCH:-$TARGET_BRANCH} diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index a544b56577..c3b4457171 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -38,7 +38,7 @@ ALL_LIBS+=" oslo.versionedobjects oslo.vmware keystonemiddleware" ALL_LIBS+=" oslo.serialization" ALL_LIBS+=" python-openstackclient osc-lib osc-placement" ALL_LIBS+=" os-client-config oslo.rootwrap" -ALL_LIBS+=" oslo.i18n oslo.utils python-openstacksdk python-swiftclient" +ALL_LIBS+=" oslo.i18n oslo.utils openstacksdk python-swiftclient" ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy" ALL_LIBS+=" debtcollector os-brick os-traits automaton futurist oslo.service" ALL_LIBS+=" oslo.cache oslo.reports osprofiler cursive" From 63beab524368875820db453c244468105584bc85 Mon Sep 17 00:00:00 2001 From: Huan Xiong Date: Fri, 23 Mar 2018 14:42:37 +0000 Subject: [PATCH 0858/1936] init_cinder() shouldn't always create DEFAULT_VOLUME_GROUP_NAME DEFAULT_VOLUME_GROUP_NAME volume group is LVM ephemeral storage used by Nova. It is created by init_nova() if user sets NOVA_BACKEND to "LVM". However, init_cinder() is also hardcoded to create it, based on the asumption that CINDER_ENABLED_BACKENDS includes it. That assumption doesn't hold for the current code. What's more important, even if user wants to use DEFAULT_VOLUME_GROUP_NAME as one of cinder backends and adds it to CINDER_ENABLED_BACKENDS, the current code in init_cinder() are general enough and should work fine. This change removes relevant code in init_cinder(). It also moves DEFAULT_VOLUME_GROUP_NAME clean-up code from unstack.sh to cleanup_nova(). Change-Id: I53762f8eda6256f962cc4e1f1098406879bbcf5c --- lib/cinder | 4 ---- lib/nova | 4 ++++ unstack.sh | 4 ---- 3 files changed, 4 insertions(+), 8 deletions(-) diff --git a/lib/cinder b/lib/cinder index 3a8097f894..e0b8971dcb 100644 --- a/lib/cinder +++ b/lib/cinder @@ -420,10 +420,6 @@ function init_cinder { be_type=${be%%:*} be_name=${be##*:} if type init_cinder_backend_${be_type} >/dev/null 2>&1; then - # Always init the default volume group for lvm. - if [[ "$be_type" == "lvm" ]]; then - init_default_lvm_volume_group - fi init_cinder_backend_${be_type} ${be_name} fi done diff --git a/lib/nova b/lib/nova index 56e309333b..ee682db86a 100644 --- a/lib/nova +++ b/lib/nova @@ -264,6 +264,10 @@ function cleanup_nova { stop_process "n-api-meta" remove_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" remove_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" + + if [[ "$NOVA_BACKEND" == "LVM" ]]; then + clean_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME + fi } # configure_nova() - Set config files, create data dirs, etc diff --git a/unstack.sh b/unstack.sh index ccea0ef585..cfbf22e0fa 100755 --- a/unstack.sh +++ b/unstack.sh @@ -175,9 +175,5 @@ fi # enabled backends. So if Cinder is enabled, and installed successfully we are # sure lvm2 (lvremove, /etc/lvm/lvm.conf, etc.) is here. if is_service_enabled cinder && is_package_installed lvm2; then - # Using /bin/true here indicates a BUG - maybe the - # DEFAULT_VOLUME_GROUP_NAME doesn't exist? We should - # isolate this further down in lib/cinder cleanup. - clean_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME || /bin/true clean_lvm_filter fi From 2b3bb30d9906f70894a1945956fb8216cd929ad0 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Fri, 23 Mar 2018 19:56:26 -0400 Subject: [PATCH 0859/1936] Only sync the local cell in superconductor mode When nova-manage db sync runs on cell1 in superconductor mode, the [api_database]/connection config option isn't set in the config file on purpose so the cell can't reach the API database. As a result, the db sync on the cell config can't hit the API DB to sync cell0, which is not something we need here anyway, but it results in an error message. This tells the cell config db sync to just run it on the cell database and not try to sync cell0. Change-Id: Iac092762decd6de9e90e264f2998d255e8e40d00 --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 56e309333b..2c5307ccec 100644 --- a/lib/nova +++ b/lib/nova @@ -734,7 +734,7 @@ function init_nova { # (Re)create nova databases for i in $(seq 1 $NOVA_NUM_CELLS); do recreate_database nova_cell${i} - $NOVA_BIN_DIR/nova-manage --config-file $(conductor_conf $i) db sync + $NOVA_BIN_DIR/nova-manage --config-file $(conductor_conf $i) db sync --local_cell done # Migrate nova and nova_cell0 databases. From 3e813ae7c3a74f1e7069c57a311c454a8570ee92 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Sat, 24 Mar 2018 06:13:44 +0000 Subject: [PATCH 0860/1936] Updated from generate-devstack-plugins-list Change-Id: I64ec2bd77785744df7791c13ea2b3065a781db3e --- doc/source/plugin-registry.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index a5d3ff28c6..c21e0efcc8 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -35,7 +35,7 @@ castellan-ui `git://git.openstack.org/openstack/castel ceilometer `git://git.openstack.org/openstack/ceilometer `__ ceilometer-powervm `git://git.openstack.org/openstack/ceilometer-powervm `__ cloudkitty `git://git.openstack.org/openstack/cloudkitty `__ -collectd-ceilometer-plugin `git://git.openstack.org/openstack/collectd-ceilometer-plugin `__ +collectd-openstack-plugins `git://git.openstack.org/openstack/collectd-openstack-plugins `__ congress `git://git.openstack.org/openstack/congress `__ cyborg `git://git.openstack.org/openstack/cyborg `__ designate `git://git.openstack.org/openstack/designate `__ From b8fcb2594cbee90c559bfc68840ac4380c8567c7 Mon Sep 17 00:00:00 2001 From: esberglu Date: Wed, 21 Mar 2018 11:04:37 -0500 Subject: [PATCH 0861/1936] Update default ETCD_VERSION to latest 3.2 etcd release This updates the default ETCD_VERSION to the latest 3.2 etcd release, v3.2.17. 3.2 is chosen as it is packaged in bionic and fedora; we hope to move to packaged versions for distros that support it in due course. This version supports arm64 and ppc64le which were not supported by the previous default, v3.1.10. We have removed the override to tarballs.o.o, as these files are now cached as described in [1] [1] http://lists.openstack.org/pipermail/openstack-infra/2018-March/005871.html Depends-On: https://review.openstack.org/556688 Change-Id: I5103b4331a8d7c5660848fc148ebe4139ce6dad9 --- .zuul.yaml | 5 ----- stackrc | 10 +++++----- 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 07d8f48cff..bbcdfef977 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -116,10 +116,6 @@ ERROR_ON_CLONE: true # Gate jobs can't deal with nested virt. Disable it. LIBVIRT_TYPE: qemu - # NOTE(dims): etcd 3.x is not available in debian/ubuntu - # etc. As a stop gap measure, devstack uses wget to download - # from the location below for all the CI jobs. - ETCD_DOWNLOAD_URL: http://tarballs.openstack.org/etcd/ devstack_services: # Ignore any default set by devstack. Emit a "disable_all_services". base: false @@ -183,7 +179,6 @@ NOVNC_FROM_PACKAGE: true ERROR_ON_CLONE: true LIBVIRT_TYPE: qemu - ETCD_DOWNLOAD_URL: http://tarballs.openstack.org/etcd/ devstack_services: base: false pre-run: playbooks/pre.yaml diff --git a/stackrc b/stackrc index e8c35f423e..9c98bc983d 100644 --- a/stackrc +++ b/stackrc @@ -737,11 +737,11 @@ fi EXTRA_CACHE_URLS="" # etcd3 defaults -ETCD_VERSION=${ETCD_VERSION:-v3.1.10} -ETCD_SHA256_AMD64=${ETCD_SHA256_AMD64:-"2d335f298619c6fb02b1124773a56966e448ad9952b26fea52909da4fe80d2be"} -# NOTE(sdague): etcd v3.1.10 doesn't have anything for these architectures, though 3.2.x does. -ETCD_SHA256_ARM64=${ETCD_SHA256_ARM64:-""} -ETCD_SHA256_PPC64=${ETCD_SHA256_PPC64:-""} +ETCD_VERSION=${ETCD_VERSION:-v3.2.17} +ETCD_SHA256_AMD64=${ETCD_SHA256_AMD64:-"0a75e794502e2e76417b19da2807a9915fa58dcbf0985e397741d570f4f305cd"} +ETCD_SHA256_ARM64=${ETCD_SHA256_ARM64:-"0ab4621c44c79d17d94e43bd184d0f23b763a3669056ce4ae2d0b2942410a98f"} +ETCD_SHA256_PPC64=${ETCD_SHA256_PPC64:-"69e1279c4a2a52256b78d2a8dd23346ac46b836e678b971a459f2afaef3c275e"} +# etcd v3.3.2 doesn't have anything for s390x ETCD_SHA256_S390X=${ETCD_SHA256_S390X:-""} # Make sure etcd3 downloads the correct architecture if is_arch "x86_64"; then From 40a58d0525d493ab6ec11fda5d123d11a7437308 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 28 Mar 2018 06:36:09 +0000 Subject: [PATCH 0862/1936] Updated from generate-devstack-plugins-list Change-Id: I35e087b203f463532788284227fddd233cb6ba0d --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index c21e0efcc8..01ba9d1eac 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -75,6 +75,7 @@ karbor `git://git.openstack.org/openstack/karbor karbor-dashboard `git://git.openstack.org/openstack/karbor-dashboard `__ keystone `git://git.openstack.org/openstack/keystone `__ kingbird `git://git.openstack.org/openstack/kingbird `__ +kolla-cli `git://git.openstack.org/openstack/kolla-cli `__ kuryr-kubernetes `git://git.openstack.org/openstack/kuryr-kubernetes `__ kuryr-libnetwork `git://git.openstack.org/openstack/kuryr-libnetwork `__ kuryr-tempest-plugin `git://git.openstack.org/openstack/kuryr-tempest-plugin `__ From aab248bc89b11aa527ebf185ec4b12a2e951b11c Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 28 Mar 2018 10:05:05 -0500 Subject: [PATCH 0863/1936] Another openstacksdk rename fix Missed one, this is blocking OSc's functional-tips tests Change-Id: Iecf848c1791b1f204b0867e9da3e2ed994f02da3 --- lib/libraries | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/libraries b/lib/libraries index 52ec784d3b..b4f3c31d5e 100644 --- a/lib/libraries +++ b/lib/libraries @@ -91,6 +91,7 @@ function install_libs { _install_lib_from_source "cursive" _install_lib_from_source "debtcollector" _install_lib_from_source "futurist" + _install_lib_from_source "openstacksdk" _install_lib_from_source "osc-lib" _install_lib_from_source "osc-placement" _install_lib_from_source "os-client-config" @@ -114,7 +115,6 @@ function install_libs { _install_lib_from_source "oslo.vmware" _install_lib_from_source "osprofiler" _install_lib_from_source "pycadf" - _install_lib_from_source "python-openstacksdk" _install_lib_from_source "stevedore" _install_lib_from_source "taskflow" _install_lib_from_source "tooz" From d9060111d9496306c90e0349f4df1167e9b01ad5 Mon Sep 17 00:00:00 2001 From: Tony Breeds Date: Thu, 29 Mar 2018 12:10:28 +1100 Subject: [PATCH 0864/1936] [trivial] Comment fix In I5103b4331a8d7c5660848fc148ebe4139ce6dad9 it was noted that the comment was wrong. While this has no functional impact let's clean it up while it's fresh. Change-Id: Ia6cf8125214c69f8289fa7cff948afc10801ed2f --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index e4e76249a1..166b7cf1bb 100644 --- a/stackrc +++ b/stackrc @@ -741,7 +741,7 @@ ETCD_VERSION=${ETCD_VERSION:-v3.2.17} ETCD_SHA256_AMD64=${ETCD_SHA256_AMD64:-"0a75e794502e2e76417b19da2807a9915fa58dcbf0985e397741d570f4f305cd"} ETCD_SHA256_ARM64=${ETCD_SHA256_ARM64:-"0ab4621c44c79d17d94e43bd184d0f23b763a3669056ce4ae2d0b2942410a98f"} ETCD_SHA256_PPC64=${ETCD_SHA256_PPC64:-"69e1279c4a2a52256b78d2a8dd23346ac46b836e678b971a459f2afaef3c275e"} -# etcd v3.3.2 doesn't have anything for s390x +# etcd v3.2.x doesn't have anything for s390x ETCD_SHA256_S390X=${ETCD_SHA256_S390X:-""} # Make sure etcd3 downloads the correct architecture if is_arch "x86_64"; then From d6558b885785f6634d493e31000fb34c35e1213d Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Sat, 31 Mar 2018 06:11:19 +0000 Subject: [PATCH 0865/1936] Updated from generate-devstack-plugins-list Change-Id: I12b31df6f53926dd79a6094ca998957f485e8133 --- doc/source/plugin-registry.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 01ba9d1eac..c21e0efcc8 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -75,7 +75,6 @@ karbor `git://git.openstack.org/openstack/karbor karbor-dashboard `git://git.openstack.org/openstack/karbor-dashboard `__ keystone `git://git.openstack.org/openstack/keystone `__ kingbird `git://git.openstack.org/openstack/kingbird `__ -kolla-cli `git://git.openstack.org/openstack/kolla-cli `__ kuryr-kubernetes `git://git.openstack.org/openstack/kuryr-kubernetes `__ kuryr-libnetwork `git://git.openstack.org/openstack/kuryr-libnetwork `__ kuryr-tempest-plugin `git://git.openstack.org/openstack/kuryr-tempest-plugin `__ From a62ede7d5034185a85c8ee99a916f41c0f2bde75 Mon Sep 17 00:00:00 2001 From: Paul Belanger Date: Wed, 14 Mar 2018 11:58:56 -0400 Subject: [PATCH 0866/1936] Use NOVA_BIN_DIR / SWIFT_BIN_DIR for binaries Fix a few path issues where we didn't properly use NOVA_BIN_DIR / SWIFT_BIN_DIR. This is part of the effort to start using a virtualenv for openstack services. Change-Id: I6eb383db65cc902c67c43e5cb1a16a9716a914b2 Signed-off-by: Paul Belanger --- lib/nova | 8 ++++---- lib/swift | 45 ++++++++++++++++++++++----------------------- 2 files changed, 26 insertions(+), 27 deletions(-) diff --git a/lib/nova b/lib/nova index 56e309333b..2eef8c411f 100644 --- a/lib/nova +++ b/lib/nova @@ -685,7 +685,7 @@ function init_nova_cells { $NOVA_BIN_DIR/nova-manage cell create --name=child --cell_type=child --username=$RABBIT_USERID --hostname=$RABBIT_HOST --port=5672 --password=$RABBIT_PASSWORD --virtual_host=child_cell --woffset=0 --wscale=1 # Creates the single cells v2 cell for the child cell (v1) nova db. - nova-manage --config-file $NOVA_CELLS_CONF cell_v2 create_cell \ + $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CELLS_CONF cell_v2 create_cell \ --transport-url $(get_transport_url child_cell) --name 'cell1' fi } @@ -729,7 +729,7 @@ function init_nova { # this needs to come after the api_db sync happens. We also want to run # this before the db sync below since that will migrate both the nova # and nova_cell0 databases. - nova-manage cell_v2 map_cell0 --database_connection `database_connection_url nova_cell0` + $NOVA_BIN_DIR/nova-manage cell_v2 map_cell0 --database_connection `database_connection_url nova_cell0` # (Re)create nova databases for i in $(seq 1 $NOVA_NUM_CELLS); do @@ -750,7 +750,7 @@ function init_nova { # create the cell1 cell for the main nova db where the hosts live for i in $(seq 1 $NOVA_NUM_CELLS); do - nova-manage --config-file $NOVA_CONF --config-file $(conductor_conf $i) cell_v2 create_cell --name "cell$i" + $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF --config-file $(conductor_conf $i) cell_v2 create_cell --name "cell$i" done fi @@ -1015,7 +1015,7 @@ function start_nova { if is_service_enabled n-api; then # dump the cell mapping to ensure life is good echo "Dumping cells_v2 mapping" - nova-manage cell_v2 list_cells --verbose + $NOVA_BIN_DIR/nova-manage cell_v2 list_cells --verbose fi } diff --git a/lib/swift b/lib/swift index 6cda9c84b2..933af1005b 100644 --- a/lib/swift +++ b/lib/swift @@ -37,6 +37,7 @@ fi # Set up default directories GITDIR["python-swiftclient"]=$DEST/python-swiftclient +SWIFT_DIR=$DEST/swift # Swift virtual environment if [[ ${USE_VENV} = True ]]; then @@ -46,8 +47,6 @@ else SWIFT_BIN_DIR=$(get_python_exec_prefix) fi - -SWIFT_DIR=$DEST/swift SWIFT_AUTH_CACHE_DIR=${SWIFT_AUTH_CACHE_DIR:-/var/cache/swift} SWIFT_APACHE_WSGI_DIR=${SWIFT_APACHE_WSGI_DIR:-/var/www/swift} SWIFT3_DIR=$DEST/swift3 @@ -341,7 +340,7 @@ function configure_swift { local user_group # Make sure to kill all swift processes first - swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true + $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true sudo install -d -o ${STACK_USER} ${SWIFT_CONF_DIR} sudo install -d -o ${STACK_USER} ${SWIFT_CONF_DIR}/{object,container,account}-server @@ -704,7 +703,7 @@ function create_swift_accounts { function init_swift { local node_number # Make sure to kill all swift processes first - swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true + $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true # Forcibly re-create the backing filesystem create_swift_disk @@ -715,9 +714,9 @@ function init_swift { rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz - swift-ring-builder object.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 - swift-ring-builder container.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 - swift-ring-builder account.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 + $SWIFT_BIN_DIR/swift-ring-builder object.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 + $SWIFT_BIN_DIR/swift-ring-builder container.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 + $SWIFT_BIN_DIR/swift-ring-builder account.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 # The ring will be created on each node, and because the order of # nodes is identical we can use a seed for rebalancing, making it @@ -728,26 +727,26 @@ function init_swift { node_number=1 for node in ${SWIFT_STORAGE_IPS}; do - swift-ring-builder object.builder add z${node_number}-${node}:${OBJECT_PORT_BASE}/sdb1 1 - swift-ring-builder container.builder add z${node_number}-${node}:${CONTAINER_PORT_BASE}/sdb1 1 - swift-ring-builder account.builder add z${node_number}-${node}:${ACCOUNT_PORT_BASE}/sdb1 1 + $SWIFT_BIN_DIR/swift-ring-builder object.builder add z${node_number}-${node}:${OBJECT_PORT_BASE}/sdb1 1 + $SWIFT_BIN_DIR/swift-ring-builder container.builder add z${node_number}-${node}:${CONTAINER_PORT_BASE}/sdb1 1 + $SWIFT_BIN_DIR/swift-ring-builder account.builder add z${node_number}-${node}:${ACCOUNT_PORT_BASE}/sdb1 1 let "node_number=node_number+1" done else for node_number in ${SWIFT_REPLICAS_SEQ}; do - swift-ring-builder object.builder add z${node_number}-${SWIFT_SERVICE_LOCAL_HOST}:$(( OBJECT_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1 - swift-ring-builder container.builder add z${node_number}-${SWIFT_SERVICE_LOCAL_HOST}:$(( CONTAINER_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1 - swift-ring-builder account.builder add z${node_number}-${SWIFT_SERVICE_LOCAL_HOST}:$(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1 + $SWIFT_BIN_DIR/swift-ring-builder object.builder add z${node_number}-${SWIFT_SERVICE_LOCAL_HOST}:$(( OBJECT_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1 + $SWIFT_BIN_DIR/swift-ring-builder container.builder add z${node_number}-${SWIFT_SERVICE_LOCAL_HOST}:$(( CONTAINER_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1 + $SWIFT_BIN_DIR/swift-ring-builder account.builder add z${node_number}-${SWIFT_SERVICE_LOCAL_HOST}:$(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1 done fi # We use a seed for rebalancing. Doing this allows us to create # identical rings on multiple nodes if SWIFT_STORAGE_IPS is the same - swift-ring-builder object.builder rebalance 42 - swift-ring-builder container.builder rebalance 42 - swift-ring-builder account.builder rebalance 42 + $SWIFT_BIN_DIR/swift-ring-builder object.builder rebalance 42 + $SWIFT_BIN_DIR/swift-ring-builder container.builder rebalance 42 + $SWIFT_BIN_DIR/swift-ring-builder account.builder rebalance 42 } && popd >/dev/null # Create cache dir @@ -803,7 +802,7 @@ function start_swift { # Apache should serve the "PACO" a.k.a "main" services restart_apache_server # The rest of the services should be started in backgroud - swift-init --run-dir=${SWIFT_DATA_DIR}/run rest start + $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run rest start return 0 fi @@ -827,7 +826,7 @@ function start_swift { done if [[ "$SWIFT_START_ALL_SERVICES" == "True" ]]; then - swift-init --run-dir=${SWIFT_DATA_DIR}/run rest start + $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run rest start else # The container-sync daemon is strictly needed to pass the container # sync Tempest tests. @@ -835,8 +834,8 @@ function start_swift { run_process s-container-sync "$SWIFT_BIN_DIR/swift-container-sync ${SWIFT_CONF_DIR}/container-server/1.conf" fi else - swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true - swift-init --run-dir=${SWIFT_DATA_DIR}/run proxy stop || true + $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true + $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run proxy stop || true fi if is_service_enabled tls-proxy; then @@ -863,12 +862,12 @@ function stop_swift { local type if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then - swift-init --run-dir=${SWIFT_DATA_DIR}/run rest stop && return 0 + $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run rest stop && return 0 fi # screen normally killed by ``unstack.sh`` - if type -p swift-init >/dev/null; then - swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true + if type -p $SWIFT_BIN_DIR/swift-init >/dev/null; then + $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true fi # Dump all of the servers # Maintain the iteration as stop_process() has some desirable side-effects From 87c0de5240d33cf053815d90ba8e766958f412bb Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Tue, 3 Apr 2018 15:16:30 +0000 Subject: [PATCH 0867/1936] Export OS_CACERT after sourcing .stackenv file This makes sure that it is available to subprocesses like the other authentication data. Change-Id: I513b7c2620b171ce20a1ceb5536226f3a69f2b82 Closes-Bug: 1760901 --- openrc | 1 + 1 file changed, 1 insertion(+) diff --git a/openrc b/openrc index 37724c552e..01ec6c6114 100644 --- a/openrc +++ b/openrc @@ -29,6 +29,7 @@ source $RC_DIR/stackrc # Load the last env variables if available if [[ -r $RC_DIR/.stackenv ]]; then source $RC_DIR/.stackenv + export OS_CACERT fi # Get some necessary configuration From 4d7e33757670890ec2c8a862d504c018df744f1c Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Wed, 15 Nov 2017 09:45:56 -0600 Subject: [PATCH 0868/1936] Extract a devstack-minimal base job For folks who are doing functional testing with less than the full set of normal base services. Should be a no-op/ignorable for most people. Change-Id: If14ee018c01995e0a5b6bcdaac9ddc8810c6d503 --- .zuul.yaml | 53 ++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 40 insertions(+), 13 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index b7724816cb..880658b504 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -199,8 +199,47 @@ - ^.*/locale/.*po$ - job: - name: devstack + name: devstack-minimal parent: devstack-base + description: | + Minimal devstack base job, intended for use by jobs that need + less than the normal minimum set of required-projects. + nodeset: openstack-single-node + required-projects: + - openstack/requirements + vars: + devstack_localrc: + # Multinode specific settings + SERVICE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" + HOST_IP: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" + PUBLIC_BRIDGE_MTU: "{{ external_bridge_mtu }}" + devstack_services: + # Shared services + dstat: true + etcd3: true + mysql: true + peakmem_tracker: true + rabbit: true + group-vars: + subnode: + devstack_services: + # Shared services + dstat: true + peakmem_tracker: true + devstack_localrc: + # Multinode specific settings + HOST_IP: "{{ hostvars[inventory_hostname]['nodepool']['private_ipv4'] }}" + SERVICE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" + PUBLIC_BRIDGE_MTU: "{{ external_bridge_mtu }}" + # Subnode specific settings + DATABASE_TYPE: mysql + RABBIT_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" + DATABASE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" + + +- job: + name: devstack + parent: devstack-minimal description: | Base devstack job for integration gate. @@ -231,7 +270,6 @@ - openstack/keystone - openstack/neutron - openstack/nova - - openstack/requirements - openstack/swift timeout: 7200 vars: @@ -245,10 +283,6 @@ NOVA_VNC_ENABLED: true VNCSERVER_LISTEN: 0.0.0.0 VNCSERVER_PROXYCLIENT_ADDRESS: "{{ hostvars[inventory_hostname]['nodepool']['private_ipv4'] }}" - # Multinode specific settings - SERVICE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" - HOST_IP: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" - PUBLIC_BRIDGE_MTU: "{{ external_bridge_mtu }}" devstack_local_conf: post-config: $NEUTRON_CONF: @@ -339,16 +373,9 @@ # integrated gate, so specifying the services has not effect. # ceilometer-*: false devstack_localrc: - # Multinode specific settings - HOST_IP: "{{ hostvars[inventory_hostname]['nodepool']['private_ipv4'] }}" - SERVICE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" - PUBLIC_BRIDGE_MTU: "{{ external_bridge_mtu }}" # Subnode specific settings - DATABASE_TYPE: mysql GLANCE_HOSTPORT: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}:9292" Q_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" - RABBIT_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" - DATABASE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" - job: name: devstack-multinode From f3f7c079257701efcc3eb50125e09171ad88811e Mon Sep 17 00:00:00 2001 From: Lance Bragstad Date: Fri, 30 Mar 2018 20:56:04 +0000 Subject: [PATCH 0869/1936] Add documention for setting up LDAP Devstack supports deploying an LDAP server and configuring keystone to use it, but we didn't have any documentation for it. This commit adds some basic documentation that should help developers setup LDAP-backed development environments. Change-Id: I8ba07d73f52cb7f575ff2953977e9fdcade92d83 --- doc/source/guides.rst | 6 + doc/source/guides/devstack-with-ldap.rst | 174 +++++++++++++++++++++++ 2 files changed, 180 insertions(+) create mode 100644 doc/source/guides/devstack-with-ldap.rst diff --git a/doc/source/guides.rst b/doc/source/guides.rst index c2c7b9163a..82e0dd6ac6 100644 --- a/doc/source/guides.rst +++ b/doc/source/guides.rst @@ -20,6 +20,7 @@ Walk through various setups used by stackers guides/devstack-with-nested-kvm guides/nova guides/devstack-with-lbaas-v2 + guides/devstack-with-ldap All-In-One Single VM -------------------- @@ -66,3 +67,8 @@ Nova and devstack -------------------------------- Guide to working with nova features :doc:`Nova and devstack `. + +Deploying DevStack with LDAP +---------------------------- + +Guide to setting up :doc:`DevStack with LDAP `. diff --git a/doc/source/guides/devstack-with-ldap.rst b/doc/source/guides/devstack-with-ldap.rst new file mode 100644 index 0000000000..ec411419b5 --- /dev/null +++ b/doc/source/guides/devstack-with-ldap.rst @@ -0,0 +1,174 @@ +============================ +Deploying DevStack with LDAP +============================ + +The OpenStack Identity service has the ability to integrate with LDAP. The goal +of this guide is to walk you through setting up an LDAP-backed OpenStack +development environment. + +Introduction +============ + +LDAP support in keystone is read-only. You can use it to back an entire +OpenStack deployment to a single LDAP server, or you can use it to back +separate LDAP servers to specific keystone domains. Users within those domains +will can authenticate against keystone, assume role assignments, and interact +with other OpenStack services. + +Configuration +============= + +To deploy an OpenLDAP server, make sure ``ldap`` is added to the list of +``ENABLED_SERVICES``:: + + enable_service ldap + +Devstack will require a password to set up an LDAP administrator. This +administrative user is also the bind user specified in keystone's configuration +files, similar to a ``keystone`` user for MySQL databases. + +Devstack will prompt you for a password when running ``stack.sh`` if +``LDAP_PASSWORD`` is not set. You can add the following to your +``local.conf``:: + + LDAP_PASSWORD=super_secret_password + +At this point, devstack should have everything it needs to deploy OpenLDAP, +bootstrap it with a minimal set of users, and configure it to back to a domain +in keystone:: + + ./stack.sh + +Once ``stack.sh`` completes, you should have a running keystone deployment with +a basic set of users. It is important to note that not all users will live +within LDAP. Instead, keystone will back different domains to different +identity sources. For example, the ``default`` domain will be backed by MySQL. +This is usually where you'll find your administrative and services users. If +you query keystone for a list of domains, you should see a domain called +``Users``. This domain is set up by devstack and points to OpenLDAP. + +User Management +=============== + +Initially, there will only be two users in the LDAP server. The ``Manager`` +user is used by keystone to talk to OpenLDAP. The ``demo`` user is a generic +user that you should be able to see if you query keystone for users within the +``Users`` domain. Both of these users were added to LDAP using basic LDAP +utilities installed by devstack (e.g. ``ldap-utils``) and LDIFs. The LDIFs used +to create these users can be found in ``devstack/files/ldap/``. + +Listing Users +------------- + +To list all users in LDAP directly, you can use ``ldapsearch`` with the LDAP +user bootstrapped by devstack:: + + ldapsearch -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \ + -H ldap://localhost -b dc=openstack,dc=org + +As you can see, devstack creates an OpenStack domain called ``openstack.org`` +as a container for the ``Manager`` and ``demo`` users. + +Creating Users +-------------- + +Since keystone's LDAP integration is read-only, users must be added directly to +LDAP. Users added directly to OpenLDAP will automatically be placed into the +``Users`` domain. + +LDIFs can be used to add users via the command line. The following is an +example LDIF that can be used to create a new LDAP user, let's call it +``peter.ldif.in``:: + + dn: cn=peter,ou=Users,dc=openstack,dc=org + cn: peter + displayName: Peter Quill + givenName: Peter Quill + mail: starlord@openstack.org + objectClass: inetOrgPerson + objectClass: top + sn: peter + uid: peter + userPassword: im-a-better-pilot-than-rocket + +Now, we use the ``Manager`` user to create a user for Peter in LDAP:: + + ldapadd -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \ + -H ldap://localhost -c -f peter.ldif.in + +We should be able to assign Peter roles on projects. After Peter has some level +of authorization, he should be able to login to Horizon by specifying the +``Users`` domain and using his ``peter`` username and password. Authorization +can be given to Peter by creating a project within the ``Users`` domain and +giving him a role assignment on that project:: + + $ openstack project create --domain Users awesome-mix-vol-1 + +-------------+----------------------------------+ + | Field | Value | + +-------------+----------------------------------+ + | description | | + | domain_id | 61a2de23107c46bea2d758167af707b9 | + | enabled | True | + | id | 7d422396d54945cdac8fe1e8e32baec4 | + | is_domain | False | + | name | awesome-mix-vol-1 | + | parent_id | 61a2de23107c46bea2d758167af707b9 | + | tags | [] | + +-------------+----------------------------------+ + $ openstack role add --user peter --user-domain Users \ + --project awesome-mix-vol-1 --project-domain Users admin + + +Deleting Users +-------------- + +We can use the same basic steps to remove users from LDAP, but instead of using +LDIFs, we can just pass the ``dn`` of the user we want to delete:: + + ldapdelete -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \ + -H ldap://localhost cn=peter,ou=Users,dc=openstack,dc=org + +Group Management +================ + +Like users, groups are considered specific identities. This means that groups +also fall under the same read-only constraints as users and they can be managed +directly with LDAP in the same way users are with LDIFs. + +Adding Groups +------------- + +Let's define a specific group with the following LDIF:: + + dn: cn=guardians,ou=UserGroups,dc=openstack,dc=org + objectClass: groupOfNames + cn: guardians + description: Guardians of the Galaxy + member: cn=peter,dc=openstack,dc=org + member: cn=gamora,dc=openstack,dc=org + member: cn=drax,dc=openstack,dc=org + member: cn=rocket,dc=openstack,dc=org + member: cn=groot,dc=openstack,dc=org + +We can create the group using the same ``ldapadd`` command as we did with +users:: + + ldapadd -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \ + -H ldap://localhost -c -f guardian-group.ldif.in + +If we check the group membership in Horizon, we'll see that only Peter is a +member of the ``guardians`` group, despite the whole crew being specified in +the LDIF. Once those accounts are created in LDAP, they will automatically be +added to the ``guardians`` group. They will also assume any role assignments +given to the ``guardians`` group. + +Deleting Groups +--------------- + +Just like users, groups can be deleted using the ``dn``:: + + ldapdelete -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \ + -H ldap://localhost cn=guardians,ou=UserGroups,dc=openstack,dc=org + +Note that this operation will not remove users within that group. It will only +remove the group itself and the memberships any users had with that group. From 295610c89edc60faba1f5d8d80f40ab00b73a39c Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Tue, 20 Mar 2018 10:54:39 -0400 Subject: [PATCH 0870/1936] Remove [placement]/os_region_name usage The [placement]/os_region_name config option is deprecated and no longer required to be set (the default is fine for devstack) with the dependent nova change. Depends-On: I973180d6a384b32838ab61d4e6aaf73c255fd116 Change-Id: I6379acf179ed511f1cdadbd7fb09e2454182a5d3 --- lib/placement | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/placement b/lib/placement index 1d68f8a185..2343ac9f2a 100644 --- a/lib/placement +++ b/lib/placement @@ -112,7 +112,6 @@ function configure_placement_nova_compute { iniset $conf placement user_domain_name "$SERVICE_DOMAIN_NAME" iniset $conf placement project_name "$SERVICE_TENANT_NAME" iniset $conf placement project_domain_name "$SERVICE_DOMAIN_NAME" - iniset $conf placement os_region_name "$REGION_NAME" # TODO(cdent): auth_strategy, which is common to see in these # blocks is not currently used here. For the time being the # placement api uses the auth_strategy configuration setting From c77c9497a5ef79dab74e23a21783044241fe1670 Mon Sep 17 00:00:00 2001 From: David Rabel Date: Thu, 5 Apr 2018 20:56:22 +0200 Subject: [PATCH 0871/1936] Fix indentation in devstack-with-lbaas-v2 guides/devstack-with-lbaas-v2 contained an indentation misstake, that formatted some of the text unintentionally as quotations. Change-Id: Ibbad4974c45f028d3de461ba69e0cea837d9c871 --- doc/source/guides/devstack-with-lbaas-v2.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst index 7dee520a23..df3c7ce2ac 100644 --- a/doc/source/guides/devstack-with-lbaas-v2.rst +++ b/doc/source/guides/devstack-with-lbaas-v2.rst @@ -15,7 +15,7 @@ make sure it is updated. Install git and any other developer tools you find usef Install devstack - :: +:: git clone https://git.openstack.org/openstack-dev/devstack cd devstack @@ -23,7 +23,7 @@ Install devstack Edit your ``local.conf`` to look like - :: +:: [[local|localrc]] # Load the external LBaaS plugin. @@ -60,7 +60,7 @@ Edit your ``local.conf`` to look like Run stack.sh and do some sanity checks - :: +:: ./stack.sh . ./openrc @@ -69,7 +69,7 @@ Run stack.sh and do some sanity checks Create two nova instances that we can use as test http servers: - :: +:: #create nova instances on private network nova boot --image $(nova image-list | awk '/ cirros-.*-x86_64-uec / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node1 @@ -83,7 +83,7 @@ Create two nova instances that we can use as test http servers: Set up a simple web server on each of these instances. ssh into each instance (username 'cirros', password 'cubswin:)') and run - :: +:: MYIP=$(ifconfig eth0|grep 'inet addr'|awk -F: '{print $2}'| awk '{print $1}') while true; do echo -e "HTTP/1.0 200 OK\r\n\r\nWelcome to $MYIP" | sudo nc -l -p 80 ; done& @@ -91,7 +91,7 @@ Set up a simple web server on each of these instances. ssh into each instance (u Phase 2: Create your load balancers ------------------------------------ - :: +:: neutron lbaas-loadbalancer-create --name lb1 private-subnet neutron lbaas-loadbalancer-show lb1 # Wait for the provisioning_status to be ACTIVE. From 931f82dc291dae10b9a484f25e044aed788311e1 Mon Sep 17 00:00:00 2001 From: Colleen Murphy Date: Sun, 18 Feb 2018 14:11:10 +0100 Subject: [PATCH 0872/1936] Enable tempest tests for application credentials In Queens and later, the application credentials feature is available on keystone and enabled by default. It should be tested in devstack. Depends-on: https://review.openstack.org/545627 Change-Id: I4b0dc823487e79df16e1e603012ba4a7dc438389 --- lib/tempest | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/tempest b/lib/tempest index 0605ffb082..9de9b91870 100644 --- a/lib/tempest +++ b/lib/tempest @@ -303,6 +303,10 @@ function configure_tempest { # as this is supported in Queens and beyond. iniset $TEMPEST_CONFIG identity-feature-enabled project_tags True + # In Queens and later, application credentials are enabled by default + # so remove this once Tempest no longer supports Pike. + iniset $TEMPEST_CONFIG identity-feature-enabled application_credentials True + # Image # We want to be able to override this variable in the gate to avoid # doing an external HTTP fetch for this test. From 486057f3391ce3a262f8226f7652ed14b3b444f3 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Wed, 21 Mar 2018 13:59:18 +0100 Subject: [PATCH 0873/1936] Start OVS as root on Tumblweed to workaround bsc#1085971 There is currently a OVS 2.9.0 update in Tumbleweed that fails to start as it is having a race with systemd on creating the home directory. Workaround is to run it as root for now. Change-Id: Ief610c6473834b02a1d644d8f50d11138a48e6e6 --- lib/neutron_plugins/ovs_base | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index 50b9ae506b..36e2ed2ca2 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -72,7 +72,14 @@ function _neutron_ovs_base_install_agent_packages { if [[ $DISTRO == "sle12" ]] && [[ $os_RELEASE -lt 12.2 ]]; then restart_service openvswitch-switch else - restart_service openvswitch + # workaround for https://bugzilla.suse.com/show_bug.cgi?id=1085971 + if [[ $DISTRO =~ "tumbleweed" ]]; then + sudo sed -i -e "s,^OVS_USER_ID=.*,OVS_USER_ID='root:root'," /etc/sysconfig/openvswitch + fi + restart_service openvswitch || { + journalctl -xe || : + systemctl status openvswitch + } fi fi } From c114449bdb6fc8c4ede2b7845aa2ba049bdc332f Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Wed, 11 Apr 2018 21:33:50 +0200 Subject: [PATCH 0874/1936] Switch to mkisofs for openSUSE distributions In Tumbleweed genisoimage was dropped in favor of cdrtools, so installing that no longer works. We can however install mkisofs directly and switch to that as that is also available in Leap 42.3 and Leap 15.0+ family distros. Also drop dependency on libmysqlclient-devel which appears unnecessary (and is no longer available with mariadb 10.2+) Change-Id: Ie8402204b6cdf94c21865caba116d3fd1298c5ad --- files/rpms-suse/general | 1 - files/rpms-suse/n-cpu | 2 +- files/rpms-suse/nova | 2 +- lib/nova | 6 ++++++ 4 files changed, 8 insertions(+), 3 deletions(-) diff --git a/files/rpms-suse/general b/files/rpms-suse/general index 0b69cb1c01..b870d72149 100644 --- a/files/rpms-suse/general +++ b/files/rpms-suse/general @@ -11,7 +11,6 @@ graphviz # docs iputils libffi-devel # pyOpenSSL libjpeg8-devel # Pillow 3.0.0 -libmysqlclient-devel # MySQL-python libopenssl-devel # to rebuild pyOpenSSL if needed libxslt-devel # lxml lsof # useful when debugging diff --git a/files/rpms-suse/n-cpu b/files/rpms-suse/n-cpu index 9ece11534d..d0c572e97d 100644 --- a/files/rpms-suse/n-cpu +++ b/files/rpms-suse/n-cpu @@ -1,7 +1,7 @@ cryptsetup -genisoimage libosinfo lvm2 +mkisofs open-iscsi sg3_utils # Stuff for diablo volumes diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova index ae115d2138..4103a407d2 100644 --- a/files/rpms-suse/nova +++ b/files/rpms-suse/nova @@ -4,7 +4,6 @@ dnsmasq dnsmasq-utils # dist:opensuse-12.3,opensuse-13.1 ebtables gawk -genisoimage # required for config_drive iptables iputils kpartx @@ -12,6 +11,7 @@ kvm # NOPRIME libvirt # NOPRIME libvirt-python # NOPRIME mariadb # NOPRIME +mkisofs # required for config_drive parted polkit # qemu as fallback if kvm cannot be used diff --git a/lib/nova b/lib/nova index 2eef8c411f..939806ff95 100644 --- a/lib/nova +++ b/lib/nova @@ -506,6 +506,12 @@ function create_nova_conf { if [ "$FORCE_CONFIG_DRIVE" != "False" ]; then iniset $NOVA_CONF DEFAULT force_config_drive "$FORCE_CONFIG_DRIVE" fi + + # nova defaults to genisoimage but only mkisofs is available for 15.0+ + if is_suse; then + iniset $NOVA_CONF DEFAULT mkisofs_cmd /usr/bin/mkisofs + fi + # Format logging setup_logging $NOVA_CONF From dc5d88bc0b2233ee8490d31ce67d6a8f6e503b23 Mon Sep 17 00:00:00 2001 From: Daniel Mellado Date: Thu, 12 Apr 2018 11:41:59 -0400 Subject: [PATCH 0875/1936] Apply contraints to tempest plugins This commit applies the constraints for the tempest plugin installation so they won't go over the upper reqs. Closes-Bug: 1763436 Change-Id: I5cf91157bbdae79dec01d5b3db32efea21f1b2b7 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 0605ffb082..2321cd5a56 100644 --- a/lib/tempest +++ b/lib/tempest @@ -646,7 +646,7 @@ function install_tempest { function install_tempest_plugins { pushd $TEMPEST_DIR if [[ $TEMPEST_PLUGINS != 0 ]] ; then - tox -evenv-tempest -- pip install $TEMPEST_PLUGINS + tox -evenv-tempest -- pip install -c $REQUIREMENTS_DIR/upper-constraints.txt $TEMPEST_PLUGINS echo "Checking installed Tempest plugins:" tox -evenv-tempest -- tempest list-plugins fi From f99d1771ba1882dfbb69186212a197edae3ef02c Mon Sep 17 00:00:00 2001 From: Hongbin Lu Date: Sat, 14 Apr 2018 19:33:15 +0000 Subject: [PATCH 0876/1936] Do not use pip 10 or higher It looks pip 10 failed the uninstallation of distutils installed packages. This patch temporarily cap the version of pip to work-around. Closes-Bug: #1763966 Change-Id: I8bf80efc04883cd754c19bea0303064080112c6e --- tools/cap-pip.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/cap-pip.txt b/tools/cap-pip.txt index c280267641..f5278d7c86 100644 --- a/tools/cap-pip.txt +++ b/tools/cap-pip.txt @@ -1 +1 @@ -pip!=8 +pip!=8,<10 From e1edde38edb697f56e111f55e5992e7bf22f6284 Mon Sep 17 00:00:00 2001 From: "James E. Blair" Date: Fri, 2 Mar 2018 15:05:14 +0000 Subject: [PATCH 0877/1936] Automatically set LIBS_FROM_GIT based on required projects If a project shows up in zuul's required-projects list, add it to LIBS_FROM_GIT automatically. This way, when a user specifies that a job requires a zuul-project, it gets used in testing, but otherwise, it doesn't (pypi is used instead). Also add information about what happens behind the scenes for both LIBS_FROM_GIT and plugin dependencies. This moves the check performed in check_libs_from_git to a helper function which is installed for most kinds of installations. This means that if someone sets LIBS_FROM_GIT to "foobar", devstack won't error anymore, as nothing is going to try to install foobar, therefore the check won't run on that. However, as we move to automated generation of the local config, that error is not likely to happen. This check was originally added due to an error in the upper-constraints file (where a constraint name did not match a package name). This location of the check would still catch that type of error. Change-Id: Ifcf3ad008cf42d3d4762cfb3b6c31c93cfeb40db --- inc/python | 23 ++---- roles/write-devstack-local-conf/README.rst | 12 +++ .../library/devstack_local_conf.py | 29 ++++++-- .../write-devstack-local-conf/library/test.py | 74 +++++++++++++++++-- .../write-devstack-local-conf/tasks/main.yaml | 1 + stack.sh | 5 -- 6 files changed, 110 insertions(+), 34 deletions(-) diff --git a/inc/python b/inc/python index e074ea498f..37b4617777 100644 --- a/inc/python +++ b/inc/python @@ -435,22 +435,6 @@ function lib_installed_from_git { [[ -n $(pip list --format=columns 2>/dev/null | awk "/^$safe_name/ {print \$3}") ]] } -# check that everything that's in LIBS_FROM_GIT was actually installed -# correctly, this helps double check issues with library fat fingering. -function check_libs_from_git { - local lib="" - local not_installed="" - for lib in $(echo ${LIBS_FROM_GIT} | tr "," " "); do - if ! lib_installed_from_git "$lib"; then - not_installed+=" $lib" - fi - done - # if anything is not installed, say what it is. - if [[ -n "$not_installed" ]]; then - die $LINENO "The following LIBS_FROM_GIT were not installed correct: $not_installed" - fi -} - # setup a library by name. If we are trying to use the library from # git, we'll do a git based install, otherwise we'll punt and the # library should be installed by a requirements pull from another @@ -561,6 +545,13 @@ function _setup_package_with_constraints_edit { setup_package $project_dir "$flags" $extras + # If this project is in LIBS_FROM_GIT, verify it was actually installed + # correctly. This helps catch errors caused by constraints mismatches. + if use_library_from_git "$project_dir"; then + if ! lib_installed_from_git "$project_dir"; then + die $LINENO "The following LIBS_FROM_GIT was not installed correctly: $project_dir" + fi + fi } # ``pip install -e`` the package, which processes the dependencies diff --git a/roles/write-devstack-local-conf/README.rst b/roles/write-devstack-local-conf/README.rst index 73f9f0d6fd..bfce9c98cd 100644 --- a/roles/write-devstack-local-conf/README.rst +++ b/roles/write-devstack-local-conf/README.rst @@ -20,6 +20,14 @@ Write the local.conf file for use by devstack bash shell variables, and will be ordered so that variables used by later entries appear first. + As a special case, the variable ``LIBS_FROM_GIT`` will be + constructed automatically from the projects which appear in the + ``required-projects`` list defined by the job. To instruct + devstack to install a library from source rather than pypi, simply + add that library to the job's ``required-projects`` list. To + override the automatically-generated value, set ``LIBS_FROM_GIT`` + in ``devstack_localrc`` to the desired value. + .. zuul:rolevar:: devstack_local_conf :type: dict @@ -75,3 +83,7 @@ Write the local.conf file for use by devstack A dictionary mapping a plugin name to a git repo location. If the location is a non-empty string, then an ``enable_plugin`` line will be emmitted for the plugin name. + + If a plugin declares a dependency on another plugin (via + ``plugin_requires`` in the plugin's settings file), this role will + automatically emit ``enable_plugin`` lines in the correct order. diff --git a/roles/write-devstack-local-conf/library/devstack_local_conf.py b/roles/write-devstack-local-conf/library/devstack_local_conf.py index 746f54f921..9728fef37b 100644 --- a/roles/write-devstack-local-conf/library/devstack_local_conf.py +++ b/roles/write-devstack-local-conf/library/devstack_local_conf.py @@ -207,17 +207,17 @@ def getPlugins(self): class LocalConf(object): def __init__(self, localrc, localconf, base_services, services, plugins, - base_dir): + base_dir, projects): self.localrc = [] self.meta_sections = {} self.plugin_deps = {} self.base_dir = base_dir + self.projects = projects if plugins: self.handle_plugins(plugins) if services or base_services: self.handle_services(base_services, services or {}) - if localrc: - self.handle_localrc(localrc) + self.handle_localrc(localrc) if localconf: self.handle_localconf(localconf) @@ -241,9 +241,22 @@ def handle_services(self, base_services, services): self.localrc.append('enable_service {}'.format(k)) def handle_localrc(self, localrc): - vg = VarGraph(localrc) - for k, v in vg.getVars(): - self.localrc.append('{}={}'.format(k, v)) + lfg = False + if localrc: + vg = VarGraph(localrc) + for k, v in vg.getVars(): + self.localrc.append('{}={}'.format(k, v)) + if k == 'LIBS_FROM_GIT': + lfg = True + + if not lfg and self.projects: + required_projects = [] + for project_name, project_info in self.projects.items(): + if project_info.get('required'): + required_projects.append(project_info['short_name']) + if required_projects: + self.localrc.append('LIBS_FROM_GIT={}'.format( + ','.join(required_projects))) def handle_localconf(self, localconf): for phase, phase_data in localconf.items(): @@ -277,6 +290,7 @@ def main(): local_conf=dict(type='dict'), base_dir=dict(type='path'), path=dict(type='str'), + projects=dict(type='dict'), ) ) @@ -286,7 +300,8 @@ def main(): p.get('base_services'), p.get('services'), p.get('plugins'), - p.get('base_dir')) + p.get('base_dir'), + p.get('projects')) lc.write(p['path']) module.exit_json() diff --git a/roles/write-devstack-local-conf/library/test.py b/roles/write-devstack-local-conf/library/test.py index 843ca6e9fd..7ccb68f08d 100644 --- a/roles/write-devstack-local-conf/library/test.py +++ b/roles/write-devstack-local-conf/library/test.py @@ -56,7 +56,8 @@ def test_plugins(self): p.get('base_services'), p.get('services'), p.get('plugins'), - p.get('base_dir')) + p.get('base_dir'), + p.get('projects')) lc.write(p['path']) plugins = [] @@ -66,6 +67,7 @@ def test_plugins(self): plugins.append(line.split()[1]) self.assertEqual(['bar', 'baz', 'foo'], plugins) + def test_plugin_deps(self): "Test that plugins with dependencies work" os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', 'devstack')) @@ -101,20 +103,80 @@ def test_plugin_deps(self): plugins=plugins, base_dir=self.tmpdir, path=os.path.join(self.tmpdir, 'test.local.conf')) + + def test_libs_from_git(self): + "Test that LIBS_FROM_GIT is auto-generated" + projects = { + 'git.openstack.org/openstack/nova': { + 'required': True, + 'short_name': 'nova', + }, + 'git.openstack.org/openstack/oslo.messaging': { + 'required': True, + 'short_name': 'oslo.messaging', + }, + 'git.openstack.org/openstack/devstack-plugin': { + 'required': False, + 'short_name': 'devstack-plugin', + }, + } + p = dict(base_services=[], + base_dir='./test', + path=os.path.join(self.tmpdir, 'test.local.conf'), + projects=projects) lc = LocalConf(p.get('localrc'), p.get('local_conf'), p.get('base_services'), p.get('services'), p.get('plugins'), - p.get('base_dir')) + p.get('base_dir'), + p.get('projects')) lc.write(p['path']) - plugins = [] + lfg = None with open(p['path']) as f: for line in f: - if line.startswith('enable_plugin'): - plugins.append(line.split()[1]) - self.assertEqual(['foo', 'bar'], plugins) + if line.startswith('LIBS_FROM_GIT'): + lfg = line.strip().split('=')[1] + self.assertEqual('nova,oslo.messaging', lfg) + + def test_overridelibs_from_git(self): + "Test that LIBS_FROM_GIT can be overridden" + localrc = {'LIBS_FROM_GIT': 'oslo.db'} + projects = { + 'git.openstack.org/openstack/nova': { + 'required': True, + 'short_name': 'nova', + }, + 'git.openstack.org/openstack/oslo.messaging': { + 'required': True, + 'short_name': 'oslo.messaging', + }, + 'git.openstack.org/openstack/devstack-plugin': { + 'required': False, + 'short_name': 'devstack-plugin', + }, + } + p = dict(localrc=localrc, + base_services=[], + base_dir='./test', + path=os.path.join(self.tmpdir, 'test.local.conf'), + projects=projects) + lc = LocalConf(p.get('localrc'), + p.get('local_conf'), + p.get('base_services'), + p.get('services'), + p.get('plugins'), + p.get('base_dir'), + p.get('projects')) + lc.write(p['path']) + + lfg = None + with open(p['path']) as f: + for line in f: + if line.startswith('LIBS_FROM_GIT'): + lfg = line.strip().split('=')[1] + self.assertEqual('oslo.db', lfg) def test_plugin_circular_deps(self): "Test that plugins with circular dependencies fail" diff --git a/roles/write-devstack-local-conf/tasks/main.yaml b/roles/write-devstack-local-conf/tasks/main.yaml index 2a9f8985fc..a294cae608 100644 --- a/roles/write-devstack-local-conf/tasks/main.yaml +++ b/roles/write-devstack-local-conf/tasks/main.yaml @@ -9,3 +9,4 @@ localrc: "{{ devstack_localrc|default(omit) }}" local_conf: "{{ devstack_local_conf|default(omit) }}" base_dir: "{{ devstack_base_dir|default(omit) }}" + projects: "{{ zuul.projects }}" diff --git a/stack.sh b/stack.sh index 9b496c0e20..5643b4db1b 100755 --- a/stack.sh +++ b/stack.sh @@ -1402,11 +1402,6 @@ fi # Check the status of running services service_check -# ensure that all the libraries we think we installed from git, -# actually were. -check_libs_from_git - - # Configure nova cellsv2 # ---------------------- From fe628b9fb2e9511e78a7dc6883fa1d4766413a54 Mon Sep 17 00:00:00 2001 From: Lance Bragstad Date: Tue, 17 Apr 2018 17:01:46 +0000 Subject: [PATCH 0878/1936] Remove the sample configuration file for keystone This commit just makes sure that the configuration file for keystone exists on the system. We use iniset to actually populate the values we want before we run keystone anyway. This results in a cleaner configuration file that isn't bloated with comments and help text. Change-Id: I7a1f879e9e242a11e2c4663ec116e33da28db7f5 --- lib/keystone | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/keystone b/lib/keystone index 714f089cca..696e351ab0 100644 --- a/lib/keystone +++ b/lib/keystone @@ -202,7 +202,7 @@ function configure_keystone { sudo install -d -o $STACK_USER $KEYSTONE_CONF_DIR if [[ "$KEYSTONE_CONF_DIR" != "$KEYSTONE_DIR/etc" ]]; then - install -m 600 $KEYSTONE_DIR/etc/keystone.conf.sample $KEYSTONE_CONF + install -m 600 /dev/null $KEYSTONE_CONF if [[ -f "$KEYSTONE_DIR/etc/keystone-paste.ini" ]]; then cp -p "$KEYSTONE_DIR/etc/keystone-paste.ini" "$KEYSTONE_PASTE_INI" fi @@ -220,7 +220,7 @@ function configure_keystone { inidelete $KEYSTONE_PASTE_INI composite:admin \\/v2.0 fi - # Rewrite stock ``keystone.conf`` + # Populate ``keystone.conf`` if is_service_enabled ldap; then iniset $KEYSTONE_CONF identity domain_config_dir "$KEYSTONE_CONF_DIR/domains" iniset $KEYSTONE_CONF identity domain_specific_drivers_enabled "True" From afe141401997db054cd9987ca17d989415c65ad4 Mon Sep 17 00:00:00 2001 From: Andrea Frittoli Date: Fri, 20 Apr 2018 13:36:22 +0100 Subject: [PATCH 0879/1936] Ensure passwordless ssh with stack between nodes For compute migration to work, the stack user needs to be configured with passwordless ssh between all hosts involved in the migration. Reuse the build ssh-key for this, which is already distributed for user root. Depends-on: https://review.openstack.org/563584 Change-Id: Id07f55fea06509466add35315c135dbfba6aa714 --- roles/orchestrate-devstack/tasks/main.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/roles/orchestrate-devstack/tasks/main.yaml b/roles/orchestrate-devstack/tasks/main.yaml index 12db58c520..f747943f3c 100644 --- a/roles/orchestrate-devstack/tasks/main.yaml +++ b/roles/orchestrate-devstack/tasks/main.yaml @@ -6,6 +6,12 @@ - name: Setup devstack on sub-nodes block: + - name: Distribute the build sshkey for the user "stack" + include_role: + name: copy-build-sshkey + vars: + copy_sshkey_target_user: 'stack' + - name: Sync CA data to subnodes (when any) # Only do this if the tls-proxy service is defined and enabled include_role: From 05da9a9b1e86babe829c1c2349fa7e9838095ee8 Mon Sep 17 00:00:00 2001 From: "Andrea Frittoli (andreaf)" Date: Wed, 25 Apr 2018 13:52:44 +0100 Subject: [PATCH 0880/1936] Enable nova VNC configuration on the subnode The n-novnc service only runs on the controller node, however novnc settings must be enabled on both nodes for vnc to work, since both hosts are compute hosts. Change-Id: Icc29441f507e6e4df9fd900eb7f35b0862f52043 --- .zuul.yaml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index a979fa4fbb..609f0feea4 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -277,7 +277,7 @@ DEBUG_LIBVIRT_COREDUMPS: True NOVA_VNC_ENABLED: true VNCSERVER_LISTEN: 0.0.0.0 - VNCSERVER_PROXYCLIENT_ADDRESS: "{{ hostvars[inventory_hostname]['nodepool']['private_ipv4'] }}" + VNCSERVER_PROXYCLIENT_ADDRESS: $HOST_IP devstack_local_conf: post-config: $NEUTRON_CONF: @@ -371,6 +371,9 @@ # Subnode specific settings GLANCE_HOSTPORT: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}:9292" Q_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" + NOVA_VNC_ENABLED: true + VNCSERVER_LISTEN: 0.0.0.0 + VNCSERVER_PROXYCLIENT_ADDRESS: $HOST_IP - job: name: devstack-multinode From e8bad5cd6a65821c54d66bbc5f7ba17091439d34 Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Wed, 25 Apr 2018 13:01:03 +0100 Subject: [PATCH 0881/1936] Init placement before nova With change I7e1e89cd66397883453935dcf7172d977bf82e84 the placement service may optionally use its own database. In order for this to work, however, the ordering of how both nova and placement are configured and initialized in stack.sh requires careful control. * nova.conf must be created first * then placement must make some adjustments to it * then lib/placement needs to create the placement database * before nova does a database sync (of both databases) Otherwise, when the placement_database/connection is defined, the nova db_sync command will fail because the placement database does not yet exist. If we try to do a sync before the nova_api database is created _that_ sync will fail. This patch adjusts the ordering and also removes a comment that will no longer be true when I7e1e89cd66397883453935dcf7172d977bf82e84 is merged. Change-Id: Id5b5911c04d198fe7b94c7d827afeb5cdf43a076 --- lib/placement | 8 +++----- stack.sh | 14 +++++++++----- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/lib/placement b/lib/placement index 1d68f8a185..d70596734c 100644 --- a/lib/placement +++ b/lib/placement @@ -44,8 +44,6 @@ PLACEMENT_UWSGI_CONF=$PLACEMENT_CONF_DIR/placement-uwsgi.ini # The placement service can optionally use a separate database # connection. Set PLACEMENT_DB_ENABLED to True to use it. -# NOTE(cdent): This functionality depends on some code that is not -# yet merged in nova but is coming soon. PLACEMENT_DB_ENABLED=$(trueorfalse False PLACEMENT_DB_ENABLED) if is_service_enabled tls-proxy; then @@ -152,9 +150,9 @@ function create_placement_accounts { function init_placement { if [ "$PLACEMENT_DB_ENABLED" != False ]; then recreate_database placement - time_start "dbsync" - $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF api_db sync - time_stop "dbsync" + # Database migration will be handled when nova does an api_db sync + # TODO(cdent): When placement is extracted we'll do our own sync + # here. fi create_placement_accounts } diff --git a/stack.sh b/stack.sh index 6899fa0d8b..30f24b6a3f 100755 --- a/stack.sh +++ b/stack.sh @@ -894,6 +894,8 @@ if is_service_enabled neutron; then stack_install_service neutron fi +# Nova configuration is used by placement so we need to create nova.conf +# first. if is_service_enabled nova; then # Compute service stack_install_service nova @@ -1184,6 +1186,13 @@ if is_service_enabled cinder; then init_cinder fi +# Placement Service +# --------------- + +if is_service_enabled placement; then + echo_summary "Configuring placement" + init_placement +fi # Compute Service # --------------- @@ -1202,11 +1211,6 @@ if is_service_enabled nova; then init_nova_cells fi -if is_service_enabled placement; then - echo_summary "Configuring placement" - init_placement -fi - # Extras Configuration # ==================== From ed2d491960138f92e819b6f6ed9dd5eb99d4a54f Mon Sep 17 00:00:00 2001 From: melanie witt Date: Tue, 18 Jul 2017 22:29:41 +0000 Subject: [PATCH 0882/1936] Run console proxies per cell instead of globally Along with converting to the database backend for console token auth, the console proxies need to run per cell instead of globally. This way, the instance UUID isn't needed in the access url as users will be handed an access url local to the cell their instances is in. With console proxies sharded across cells, a large cloud will no longer have a bottleneck of one console proxy for the entire deployment. This also disables the novnc tempest tests with a TODO to re-enable them once the nova patch series that converts from the nova-consoleauth backend -> cell database backend lands. Change-Id: I67894a31b887a93de26f3d2d8a1fa84be5b9ea89 --- lib/nova | 62 ++++++++++++++++++++++++++++++++++++++++++++++++----- lib/tempest | 9 +++++++- 2 files changed, 65 insertions(+), 6 deletions(-) diff --git a/lib/nova b/lib/nova index fea2b8509a..3a00ab376c 100644 --- a/lib/nova +++ b/lib/nova @@ -921,11 +921,46 @@ function start_nova_rest { run_process n-api-meta "$NOVA_BIN_DIR/uwsgi --procname-prefix nova-api-meta --ini $NOVA_METADATA_UWSGI_CONF" fi - run_process n-novnc "$NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_WEB_DIR" - run_process n-xvnc "$NOVA_BIN_DIR/nova-xvpvncproxy --config-file $api_cell_conf" - run_process n-spice "$NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $api_cell_conf --web $SPICE_WEB_DIR" + # nova-consoleauth always runs globally run_process n-cauth "$NOVA_BIN_DIR/nova-consoleauth --config-file $api_cell_conf" - run_process n-sproxy "$NOVA_BIN_DIR/nova-serialproxy --config-file $api_cell_conf" + + export PATH=$old_path +} + +function enable_nova_console_proxies { + for i in $(seq 1 $NOVA_NUM_CELLS); do + for srv in n-novnc n-xvnc n-spice n-sproxy; do + if is_service_enabled $srv; then + enable_service ${srv}-cell${i} + fi + done + done +} + +function start_nova_console_proxies { + # Hack to set the path for rootwrap + local old_path=$PATH + # This is needed to find the nova conf + export PATH=$NOVA_BIN_DIR:$PATH + + local api_cell_conf=$NOVA_CONF + # console proxies run globally for singleconductor, else they run per cell + if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then + run_process n-novnc "$NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_WEB_DIR" + run_process n-xvnc "$NOVA_BIN_DIR/nova-xvpvncproxy --config-file $api_cell_conf" + run_process n-spice "$NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $api_cell_conf --web $SPICE_WEB_DIR" + run_process n-sproxy "$NOVA_BIN_DIR/nova-serialproxy --config-file $api_cell_conf" + else + enable_nova_console_proxies + for i in $(seq 1 $NOVA_NUM_CELLS); do + local conf + conf=$(conductor_conf $i) + run_process n-novnc-cell${i} "$NOVA_BIN_DIR/nova-novncproxy --config-file $conf --web $NOVNC_WEB_DIR" + run_process n-xvnc-cell${i} "$NOVA_BIN_DIR/nova-xvpvncproxy --config-file $conf" + run_process n-spice-cell${i} "$NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $conf --web $SPICE_WEB_DIR" + run_process n-sproxy-cell${i} "$NOVA_BIN_DIR/nova-serialproxy --config-file $conf" + done + fi export PATH=$old_path } @@ -985,6 +1020,7 @@ function start_nova { # this catches the cells v1 case early _set_singleconductor start_nova_rest + start_nova_console_proxies start_nova_conductor start_nova_compute if is_service_enabled n-api; then @@ -1010,11 +1046,26 @@ function stop_nova_compute { function stop_nova_rest { # Kill the non-compute nova processes - for serv in n-api n-api-meta n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cell n-cell n-sproxy; do + for serv in n-api n-api-meta n-net n-sch n-cauth n-cell n-cell; do stop_process $serv done } +function stop_nova_console_proxies { + if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then + for srv in n-novnc n-xvnc n-spice n-sproxy; do + stop_process $srv + done + else + enable_nova_console_proxies + for i in $(seq 1 $NOVA_NUM_CELLS); do + for srv in n-novnc n-xvnc n-spice n-sproxy; do + stop_process ${srv}-cell${i} + done + done + fi +} + function stop_nova_conductor { if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then stop_process n-cond @@ -1032,6 +1083,7 @@ function stop_nova_conductor { # stop_nova() - Stop running processes function stop_nova { stop_nova_rest + stop_nova_console_proxies stop_nova_conductor stop_nova_compute } diff --git a/lib/tempest b/lib/tempest index 3b39dae422..d22bb95e14 100644 --- a/lib/tempest +++ b/lib/tempest @@ -386,7 +386,14 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute-feature-enabled volume_multiattach True fi - if is_service_enabled n-novnc; then + # TODO(melwitt): If we're running per-cell console proxies, the novnc tests + # won't work until the nova patch series lands that converts from the + # nova-consoleauth backend -> cell database backend. So disable them unless + # we're running Cells v1. Cells v1 will never support the cell database + # backend, so it will always run with a global nova-consoleauth. + # Once the patch that converts from the nova-consoleauth backend -> cell + # database backend lands, we can re-enable the novnc tests for Cells v2. + if is_service_enabled n-novnc && is_service_enabled n-cell; then iniset $TEMPEST_CONFIG compute-feature-enabled vnc_console True fi From 6645cf7a26428f3af1e4739ac29c6a90b67f99dc Mon Sep 17 00:00:00 2001 From: melanie witt Date: Wed, 13 Dec 2017 23:59:09 +0000 Subject: [PATCH 0883/1936] Re-enable novnc tempest tests Once the nova patch series that converts from the nova-consoleauth backend -> cell database backend lands, we can re-enable the novnc tests in tempest. Depends-On: If1b6e5f20d2ea82d94f5f0550f13189fc9bc16c4 Change-Id: I2939191a1c3ce49fa2104b4ffdf795fc416a1c33 --- lib/tempest | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/lib/tempest b/lib/tempest index d22bb95e14..3b39dae422 100644 --- a/lib/tempest +++ b/lib/tempest @@ -386,14 +386,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute-feature-enabled volume_multiattach True fi - # TODO(melwitt): If we're running per-cell console proxies, the novnc tests - # won't work until the nova patch series lands that converts from the - # nova-consoleauth backend -> cell database backend. So disable them unless - # we're running Cells v1. Cells v1 will never support the cell database - # backend, so it will always run with a global nova-consoleauth. - # Once the patch that converts from the nova-consoleauth backend -> cell - # database backend lands, we can re-enable the novnc tests for Cells v2. - if is_service_enabled n-novnc && is_service_enabled n-cell; then + if is_service_enabled n-novnc; then iniset $TEMPEST_CONFIG compute-feature-enabled vnc_console True fi From 69057d46556db95267a4bc4156dadc88b5064ab9 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 1 May 2018 05:57:21 -0500 Subject: [PATCH 0884/1936] Register versioned endpoint for block-storage service cinder does not yet support operations without project_id in the url. The unversioned endpoint is not a usable endpoint for a user that requests the block-storage service. Although it would be lovely to have the block-storage service have the unversioned endpoint in the catalog, we need to get project-id out of the urls first. Change-Id: I4246708b6ea31496ba4d565ab422abc76f730ee7 Needed-By: https://review.openstack.org/564494 --- lib/cinder | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/cinder b/lib/cinder index 3a8097f894..f6cad9d9f2 100644 --- a/lib/cinder +++ b/lib/cinder @@ -349,7 +349,7 @@ function create_cinder_accounts { get_or_create_endpoint \ "block-storage" \ "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/" + "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s" get_or_create_endpoint \ "volume" \ @@ -371,7 +371,7 @@ function create_cinder_accounts { get_or_create_endpoint \ "block-storage" \ "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/" + "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v3/\$(project_id)s" get_or_create_endpoint \ "volume" \ From 7e36ded0cff9d753c1bfa4ae05d5c97ccf0dd957 Mon Sep 17 00:00:00 2001 From: Artom Lifshitz Date: Wed, 2 May 2018 10:38:57 -0400 Subject: [PATCH 0885/1936] docs: Add placement-client to compute's ENABLED_SERVICES In a multinode setup, the compute node needs to report to the placement service. If it does not do so, it effectively does not exist from the scheduler's point of view. This patch adds placement-client to the compute node's ENABLED_SERVICES so that this can happen. Change-Id: Ibfcd84e4626301bcdea70f719ade7f8365d03497 --- doc/source/guides/neutron.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst index 1b8dccd7f3..7f360c63cc 100644 --- a/doc/source/guides/neutron.rst +++ b/doc/source/guides/neutron.rst @@ -244,7 +244,7 @@ The host `devstack-2` has a very minimal `local.conf`. ## Neutron options PUBLIC_INTERFACE=eth0 - ENABLED_SERVICES=n-cpu,rabbit,q-agt + ENABLED_SERVICES=n-cpu,rabbit,q-agt,placement-client Network traffic from `eth0` on the compute nodes is then NAT'd by the controller node that runs Neutron's `neutron-l3-agent` and provides L3 From 59e6ff10ce65509beefb3fdee7aa0c8ca966a8a9 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Wed, 2 May 2018 11:45:09 -0400 Subject: [PATCH 0886/1936] Remove IRONIC_USE_RESOURCE_CLASSES check Nova has dropped support for non-resource class baremetal scheduling, so the IRONIC_USE_RESOURCE_CLASSES flag is no longer useful and has been removed. Depends-On: https://review.openstack.org/565805/ Change-Id: Ib2e6c96409c98877f6a43b76f176c1420d2d415e --- lib/nova_plugins/hypervisor-ironic | 8 -------- 1 file changed, 8 deletions(-) diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic index c91f70b9bb..49110a8643 100644 --- a/lib/nova_plugins/hypervisor-ironic +++ b/lib/nova_plugins/hypervisor-ironic @@ -42,14 +42,6 @@ function configure_nova_hypervisor { iniset $NOVA_CONF DEFAULT compute_driver ironic.IronicDriver iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER - if [[ "$IRONIC_USE_RESOURCE_CLASSES" == "False" ]]; then - iniset $NOVA_CONF DEFAULT scheduler_host_manager ironic_host_manager - iniset $NOVA_CONF filter_scheduler use_baremetal_filters True - iniset $NOVA_CONF filter_scheduler host_subset_size 999 - iniset $NOVA_CONF DEFAULT ram_allocation_ratio 1.0 - iniset $NOVA_CONF DEFAULT reserved_host_memory_mb 0 - fi - # ironic section iniset $NOVA_CONF ironic auth_type password iniset $NOVA_CONF ironic username admin From bed03ea77a93ad09ce8f06a467d5de02ef48e603 Mon Sep 17 00:00:00 2001 From: Paul Belanger Date: Tue, 1 May 2018 21:36:37 -0400 Subject: [PATCH 0887/1936] Switch to fedora-latest for nodeset name To help avoid the amount zuul.yaml chrun when we bring a new version of fedora online, switch to using fedora-latest. As of writing, fedora-28 is the latest release which we update our testing for. Also add fedora-28 support to stash.sh and remove fedora-25 / fedora-26 as they are EOL. Change-Id: I3d716554e8f270f4434cc9cac3408f8e890e0665 Depends-On: https://review.openstack.org/565758/ Signed-off-by: Paul Belanger --- .zuul.yaml | 18 ++++++++++++++---- stack.sh | 2 +- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index a979fa4fbb..87eb8c5c8d 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -48,6 +48,16 @@ nodes: - controller +- nodeset: + name: devstack-single-node-fedora-latest + nodes: + - name: controller + label: fedora-28 + groups: + - name: tempest + nodes: + - controller + - nodeset: name: openstack-two-node nodes: @@ -406,10 +416,10 @@ voting: false - job: - name: devstack-platform-fedora-27 + name: devstack-platform-fedora-latest parent: tempest-full - description: Fedora 27 platform test - nodeset: devstack-single-node-fedora-27 + description: Fedora latest platform test + nodeset: devstack-single-node-fedora-latest voting: false - job: @@ -482,7 +492,7 @@ - devstack-platform-centos-7 - devstack-platform-opensuse-423 - devstack-platform-opensuse-tumbleweed - - devstack-platform-fedora-27 + - devstack-platform-fedora-latest - devstack-multinode - devstack-unit-tests gate: diff --git a/stack.sh b/stack.sh index 6899fa0d8b..2528e2b46b 100755 --- a/stack.sh +++ b/stack.sh @@ -221,7 +221,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (xenial|artful|bionic|stretch|jessie|f25|f26|f27|opensuse-42.3|opensuse-tumbleweed|rhel7) ]]; then +if [[ ! ${DISTRO} =~ (xenial|artful|bionic|stretch|jessie|f27|f28|opensuse-42.3|opensuse-tumbleweed|rhel7) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From 12579c3db7b28381c8ec97945aa23ee02d54d22b Mon Sep 17 00:00:00 2001 From: melanie witt Date: Sat, 5 May 2018 23:55:32 +0000 Subject: [PATCH 0888/1936] Set console proxy configuration according to cells v2 setup Change 969239029d4a13956747e6e0b850d6c6ab4035f0 completed the conversion of console token authorization storage from the nova-consoleauth service to the database backend. With this change, console proxies need to be configured on a per cell basis instead of globally. There was a devstack change 6645cf7a26428f3af1e4739ac29c6a90b67f99dc following it that re-enabled the novnc tempest tests, but the nova-next job that runs the console proxies with TLS is *not* part of the normal set of jobs that run on devstack changes (it's in the experimental queue), so it was able to merge without the nova-next job passing. This configures the nova console proxies in the per cell configuration file if cells v2 is configured for multiple cells in order to pass the nova-next job. Closes-Bug: #1769286 Change-Id: Ic4fff4c59eda43dd1bc6e7b645b513b46b57c235 --- lib/nova | 119 ++++++++++++++++++++++++++++++++----------------------- 1 file changed, 69 insertions(+), 50 deletions(-) diff --git a/lib/nova b/lib/nova index 1489298ad2..0182996713 100644 --- a/lib/nova +++ b/lib/nova @@ -524,52 +524,6 @@ function create_nova_conf { iniset $NOVA_CONF DEFAULT notify_on_state_change "vm_and_task_state" fi - # All nova-compute workers need to know the vnc configuration options - # These settings don't hurt anything if n-xvnc and n-novnc are disabled - if is_service_enabled n-cpu; then - NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"} - iniset $NOVA_CONF vnc novncproxy_base_url "$NOVNCPROXY_URL" - XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"} - iniset $NOVA_CONF vnc xvpvncproxy_base_url "$XVPVNCPROXY_URL" - SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:6082/spice_auto.html"} - iniset $NOVA_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL" - fi - - if is_service_enabled n-novnc || is_service_enabled n-xvnc || [ "$NOVA_VNC_ENABLED" != False ]; then - # Address on which instance vncservers will listen on compute hosts. - # For multi-host, this should be the management ip of the compute host. - VNCSERVER_LISTEN=${VNCSERVER_LISTEN=$NOVA_SERVICE_LOCAL_HOST} - VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=$NOVA_SERVICE_LOCAL_HOST} - iniset $NOVA_CONF vnc server_listen "$VNCSERVER_LISTEN" - iniset $NOVA_CONF vnc server_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" - iniset $NOVA_CONF vnc novncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" - iniset $NOVA_CONF vnc xvpvncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" - - if is_nova_console_proxy_compute_tls_enabled ; then - iniset $NOVA_CONF vnc auth_schemes "vencrypt" - iniset $NOVA_CONF vnc vencrypt_client_key "/etc/pki/nova-novnc/client-key.pem" - iniset $NOVA_CONF vnc vencrypt_client_cert "/etc/pki/nova-novnc/client-cert.pem" - iniset $NOVA_CONF vnc vencrypt_ca_certs "/etc/pki/nova-novnc/ca-cert.pem" - - sudo mkdir -p /etc/pki/nova-novnc - deploy_int_CA /etc/pki/nova-novnc/ca-cert.pem - deploy_int_cert /etc/pki/nova-novnc/client-cert.pem /etc/pki/nova-novnc/client-key.pem - fi - else - iniset $NOVA_CONF vnc enabled false - fi - - if is_service_enabled n-spice; then - # Address on which instance spiceservers will listen on compute hosts. - # For multi-host, this should be the management ip of the compute host. - SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=$NOVA_SERVICE_LOCAL_HOST} - SPICESERVER_LISTEN=${SPICESERVER_LISTEN=$NOVA_SERVICE_LOCAL_HOST} - iniset $NOVA_CONF spice enabled true - iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN" - iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS" - iniset $NOVA_CONF spice html5proxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" - fi - # Set the oslo messaging driver to the typical default. This does not # enable notifications, but it will allow them to function when enabled. iniset $NOVA_CONF oslo_messaging_notifications driver "messagingv2" @@ -588,10 +542,6 @@ function create_nova_conf { iniset $NOVA_CONF oslo_middleware enable_proxy_headers_parsing True fi - if is_service_enabled n-sproxy; then - iniset $NOVA_CONF serial_console serialproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" - iniset $NOVA_CONF serial_console enabled True - fi iniset $NOVA_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" # Setup logging for nova-dhcpbridge command line @@ -641,6 +591,75 @@ function create_nova_conf { setup_logging $conf done fi + + # Console proxy configuration has to go after conductor configuration + # because the per cell config file nova_cellN.conf is cleared out as part + # of conductor configuration. + if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then + configure_console_proxies + else + for i in $(seq 1 $NOVA_NUM_CELLS); do + local conf + conf=$(conductor_conf $i) + configure_console_proxies $conf + done + fi +} + +function configure_console_proxies { + # Use the provided config file path or default to $NOVA_CONF. + local conf=${1:-$NOVA_CONF} + + # All nova-compute workers need to know the vnc configuration options + # These settings don't hurt anything if n-xvnc and n-novnc are disabled + if is_service_enabled n-cpu; then + NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"} + iniset $conf vnc novncproxy_base_url "$NOVNCPROXY_URL" + XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"} + iniset $conf vnc xvpvncproxy_base_url "$XVPVNCPROXY_URL" + SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:6082/spice_auto.html"} + iniset $conf spice html5proxy_base_url "$SPICEHTML5PROXY_URL" + fi + + if is_service_enabled n-novnc || is_service_enabled n-xvnc || [ "$NOVA_VNC_ENABLED" != False ]; then + # Address on which instance vncservers will listen on compute hosts. + # For multi-host, this should be the management ip of the compute host. + VNCSERVER_LISTEN=${VNCSERVER_LISTEN=$NOVA_SERVICE_LOCAL_HOST} + VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=$NOVA_SERVICE_LOCAL_HOST} + iniset $conf vnc server_listen "$VNCSERVER_LISTEN" + iniset $conf vnc server_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" + iniset $conf vnc novncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" + iniset $conf vnc xvpvncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" + + if is_nova_console_proxy_compute_tls_enabled ; then + iniset $conf vnc auth_schemes "vencrypt" + iniset $conf vnc vencrypt_client_key "/etc/pki/nova-novnc/client-key.pem" + iniset $conf vnc vencrypt_client_cert "/etc/pki/nova-novnc/client-cert.pem" + iniset $conf vnc vencrypt_ca_certs "/etc/pki/nova-novnc/ca-cert.pem" + + sudo mkdir -p /etc/pki/nova-novnc + deploy_int_CA /etc/pki/nova-novnc/ca-cert.pem + deploy_int_cert /etc/pki/nova-novnc/client-cert.pem /etc/pki/nova-novnc/client-key.pem + fi + else + iniset $conf vnc enabled false + fi + + if is_service_enabled n-spice; then + # Address on which instance spiceservers will listen on compute hosts. + # For multi-host, this should be the management ip of the compute host. + SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=$NOVA_SERVICE_LOCAL_HOST} + SPICESERVER_LISTEN=${SPICESERVER_LISTEN=$NOVA_SERVICE_LOCAL_HOST} + iniset $conf spice enabled true + iniset $conf spice server_listen "$SPICESERVER_LISTEN" + iniset $conf spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS" + iniset $conf spice html5proxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" + fi + + if is_service_enabled n-sproxy; then + iniset $conf serial_console serialproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" + iniset $conf serial_console enabled True + fi } function init_nova_service_user_conf { From f774ecf4f9f44b38f345bb614b72c83b3ec15136 Mon Sep 17 00:00:00 2001 From: Matt Smith Date: Mon, 7 May 2018 16:43:56 -0500 Subject: [PATCH 0889/1936] Changing openrc default Cinder API version to v3 * v3 is a superset of v2 and has been the defacto Cinder version for several years now. * Devstack installs Cinder v3 API by default, so the default environment variables should reflect this. Change-Id: I86e1ae4e020e2be043cf8e190d7959b65b6c093c --- openrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openrc b/openrc index 37724c552e..cc8cad406c 100644 --- a/openrc +++ b/openrc @@ -108,5 +108,5 @@ fi # Currently cinderclient needs you to specify the *volume api* version. This # needs to match the config of your catalog returned by Keystone. -export CINDER_VERSION=${CINDER_VERSION:-2} +export CINDER_VERSION=${CINDER_VERSION:-3} export OS_VOLUME_API_VERSION=${OS_VOLUME_API_VERSION:-$CINDER_VERSION} From b89bfa21b0e144d8160478b54a45a1087ea3e1df Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Tue, 8 May 2018 06:12:17 +0000 Subject: [PATCH 0890/1936] Updated from generate-devstack-plugins-list Change-Id: I202b685740fe2b4ea53d115524d3bad01b038af6 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index c21e0efcc8..92c63800b9 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -163,6 +163,7 @@ scalpels `git://git.openstack.org/openstack/scalpe searchlight `git://git.openstack.org/openstack/searchlight `__ searchlight-ui `git://git.openstack.org/openstack/searchlight-ui `__ senlin `git://git.openstack.org/openstack/senlin `__ +slogging `git://git.openstack.org/openstack/slogging `__ solum `git://git.openstack.org/openstack/solum `__ stackube `git://git.openstack.org/openstack/stackube `__ storlets `git://git.openstack.org/openstack/storlets `__ From 827f6c1a4a8af662eba038da0fd0ab5eab1305c8 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 10 May 2018 06:19:41 +0000 Subject: [PATCH 0891/1936] Updated from generate-devstack-plugins-list Change-Id: I3695c066799fb14f63050bf1ec05301b1d132d5d --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 92c63800b9..9b2cb7eec1 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -26,6 +26,7 @@ Plugin Name URL ====================================== === almanach `git://git.openstack.org/openstack/almanach `__ aodh `git://git.openstack.org/openstack/aodh `__ +apmec `git://git.openstack.org/openstack/apmec `__ astara `git://git.openstack.org/openstack/astara `__ barbican `git://git.openstack.org/openstack/barbican `__ bilean `git://git.openstack.org/openstack/bilean `__ From 65ad79409537a76aec5cdba4360094a3047a025d Mon Sep 17 00:00:00 2001 From: melanie witt Date: Wed, 9 May 2018 17:55:40 +0000 Subject: [PATCH 0892/1936] Configure console proxy settings for nova-cpu.conf Change 12579c3db7b28381c8ec97945aa23ee02d54d22b moved console-related settings from the global nova.conf to the per cell nova_cellN.conf because of a recent change in nova that moved console token authorizations from the nova-consoleauth service backend to the database backend and thus changed the deployment layout requirements from global console proxies to per cell console proxies. The change erroneously also removed console configuration settings from the nova-compute config file nova-cpu.conf because the nova-cpu.conf begins as a copy of the global nova.conf. This adds configuration of console proxies to the nova-cpu.conf in the start_nova_compute routine. The settings have also been split up to clarify which settings are used by the console proxy and which settings are used by nova-compute. Closes-Bug: #1770143 Change-Id: I2a98795674183e2c05c29e15a3a3bad1a22c0891 --- lib/nova | 54 +++++++++++++++++++++++++++++++++++------------------- 1 file changed, 35 insertions(+), 19 deletions(-) diff --git a/lib/nova b/lib/nova index 0182996713..b0af48aec4 100644 --- a/lib/nova +++ b/lib/nova @@ -606,19 +606,16 @@ function create_nova_conf { fi } -function configure_console_proxies { - # Use the provided config file path or default to $NOVA_CONF. - local conf=${1:-$NOVA_CONF} - +function configure_console_compute { # All nova-compute workers need to know the vnc configuration options # These settings don't hurt anything if n-xvnc and n-novnc are disabled if is_service_enabled n-cpu; then NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"} - iniset $conf vnc novncproxy_base_url "$NOVNCPROXY_URL" + iniset $NOVA_CPU_CONF vnc novncproxy_base_url "$NOVNCPROXY_URL" XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"} - iniset $conf vnc xvpvncproxy_base_url "$XVPVNCPROXY_URL" + iniset $NOVA_CPU_CONF vnc xvpvncproxy_base_url "$XVPVNCPROXY_URL" SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:6082/spice_auto.html"} - iniset $conf spice html5proxy_base_url "$SPICEHTML5PROXY_URL" + iniset $NOVA_CPU_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL" fi if is_service_enabled n-novnc || is_service_enabled n-xvnc || [ "$NOVA_VNC_ENABLED" != False ]; then @@ -626,8 +623,32 @@ function configure_console_proxies { # For multi-host, this should be the management ip of the compute host. VNCSERVER_LISTEN=${VNCSERVER_LISTEN=$NOVA_SERVICE_LOCAL_HOST} VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=$NOVA_SERVICE_LOCAL_HOST} - iniset $conf vnc server_listen "$VNCSERVER_LISTEN" - iniset $conf vnc server_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" + iniset $NOVA_CPU_CONF vnc server_listen "$VNCSERVER_LISTEN" + iniset $NOVA_CPU_CONF vnc server_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" + else + iniset $NOVA_CPU_CONF vnc enabled false + fi + + if is_service_enabled n-spice; then + # Address on which instance spiceservers will listen on compute hosts. + # For multi-host, this should be the management ip of the compute host. + SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=$NOVA_SERVICE_LOCAL_HOST} + SPICESERVER_LISTEN=${SPICESERVER_LISTEN=$NOVA_SERVICE_LOCAL_HOST} + iniset $NOVA_CPU_CONF spice enabled true + iniset $NOVA_CPU_CONF spice server_listen "$SPICESERVER_LISTEN" + iniset $NOVA_CPU_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS" + fi + + if is_service_enabled n-sproxy; then + iniset $NOVA_CPU_CONF serial_console enabled True + fi +} + +function configure_console_proxies { + # Use the provided config file path or default to $NOVA_CONF. + local conf=${1:-$NOVA_CONF} + + if is_service_enabled n-novnc || is_service_enabled n-xvnc || [ "$NOVA_VNC_ENABLED" != False ]; then iniset $conf vnc novncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" iniset $conf vnc xvpvncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" @@ -641,24 +662,14 @@ function configure_console_proxies { deploy_int_CA /etc/pki/nova-novnc/ca-cert.pem deploy_int_cert /etc/pki/nova-novnc/client-cert.pem /etc/pki/nova-novnc/client-key.pem fi - else - iniset $conf vnc enabled false fi if is_service_enabled n-spice; then - # Address on which instance spiceservers will listen on compute hosts. - # For multi-host, this should be the management ip of the compute host. - SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=$NOVA_SERVICE_LOCAL_HOST} - SPICESERVER_LISTEN=${SPICESERVER_LISTEN=$NOVA_SERVICE_LOCAL_HOST} - iniset $conf spice enabled true - iniset $conf spice server_listen "$SPICESERVER_LISTEN" - iniset $conf spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS" iniset $conf spice html5proxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" fi if is_service_enabled n-sproxy; then iniset $conf serial_console serialproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" - iniset $conf serial_console enabled True fi } @@ -911,6 +922,11 @@ function start_nova_compute { iniset_rpc_backend nova $NOVA_CPU_CONF DEFAULT "nova_cell${NOVA_CPU_CELL}" fi + # Console proxies were configured earlier in create_nova_conf. Now that the + # nova-cpu.conf has been created, configure the console settings required + # by the compute process. + configure_console_compute + if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then # The group **$LIBVIRT_GROUP** is added to the current user in this script. # ``sg`` is used in run_process to execute nova-compute as a member of the From a99ab7002cfea539e035e203c0d05415fac3eb6a Mon Sep 17 00:00:00 2001 From: Jakub Libosvar Date: Mon, 14 May 2018 16:12:52 +0200 Subject: [PATCH 0893/1936] neutron: Use openvswitch firewall driver by default openvswitch firewall has been in Neutron tree since Newton and has gone through lots of improvements since including simple upgrade path from the iptables hybrid driver. We have a tempest job running in Neutron tree with openvswitch firewall that's been voting and stable for a while. For neutron_tempest_plugin, we have had the openvswitch firewall in use since the beginning. This patch proposes openvswitch firewall driver to become a default driver for openvswitch agent deployments. Change-Id: If26d0180e459210511f25f1faa83dd8ccea25ff4 --- lib/neutron | 4 ++-- lib/neutron_plugins/ovs_base | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/neutron b/lib/neutron index cef8d1f2ae..3cad80a414 100644 --- a/lib/neutron +++ b/lib/neutron @@ -220,8 +220,8 @@ function configure_neutron_new { if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup firewall_driver iptables iniset $NEUTRON_CORE_PLUGIN_CONF vxlan local_ip $HOST_IP - else - iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup firewall_driver iptables_hybrid + elif [[ $NEUTRON_AGENT == "openvswitch" ]]; then + iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup firewall_driver openvswitch iniset $NEUTRON_CORE_PLUGIN_CONF ovs local_ip $HOST_IP if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index 36e2ed2ca2..523024e2fe 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -86,7 +86,7 @@ function _neutron_ovs_base_install_agent_packages { function _neutron_ovs_base_configure_firewall_driver { if [[ "$Q_USE_SECGROUP" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver iptables_hybrid + iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver openvswitch if ! running_in_container; then enable_kernel_bridge_firewall fi From 306fca807bbe69d104aeb3a135eceb18d47e7f93 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Mon, 14 May 2018 11:31:54 -0400 Subject: [PATCH 0894/1936] Add dosfstools to files dosfstools provides mkfs.vfat which is needed if n-cpu is configured with 'config_drive_format=vfat'. Change-Id: If1e1537a079e71847d91ae03ed0c18290a467c4e Related-Bug: #1770640 --- files/debs/n-cpu | 1 + files/rpms-suse/n-cpu | 1 + files/rpms/n-cpu | 1 + 3 files changed, 3 insertions(+) diff --git a/files/debs/n-cpu b/files/debs/n-cpu index d8bbf59d07..636644f10d 100644 --- a/files/debs/n-cpu +++ b/files/debs/n-cpu @@ -1,4 +1,5 @@ cryptsetup +dosfstools genisoimage gir1.2-libosinfo-1.0 lvm2 # NOPRIME diff --git a/files/rpms-suse/n-cpu b/files/rpms-suse/n-cpu index d0c572e97d..c11e9f0763 100644 --- a/files/rpms-suse/n-cpu +++ b/files/rpms-suse/n-cpu @@ -1,4 +1,5 @@ cryptsetup +dosfstools libosinfo lvm2 mkisofs diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu index 26c5ced196..68e5472685 100644 --- a/files/rpms/n-cpu +++ b/files/rpms/n-cpu @@ -1,4 +1,5 @@ cryptsetup +dosfstools genisoimage iscsi-initiator-utils libosinfo From f8755bd468cae234ca7acca018ccee31f0e474b2 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Wed, 16 May 2018 14:40:01 +0000 Subject: [PATCH 0895/1936] Allow plugins to override initial network creation The same code is already in place for the new lib/neutron library, allow this functionality to be used also when the neutron legacy services are still being deployed. This was mangled in [0]. [0] I868afeb065d80d8ccd57630b90658e330ab94251 Change-Id: I7214c4893943fbfbeb42ad140f433eecd6c3e9f0 --- stack.sh | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 2528e2b46b..446d894f8a 100755 --- a/stack.sh +++ b/stack.sh @@ -1315,7 +1315,14 @@ fi # Once neutron agents are started setup initial network elements if is_service_enabled q-svc && [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]]; then echo_summary "Creating initial neutron network elements" - create_neutron_initial_network + # Here's where plugins can wire up their own networks instead + # of the code in lib/neutron_plugins/services/l3 + if type -p neutron_plugin_create_initial_networks > /dev/null; then + neutron_plugin_create_initial_networks + else + create_neutron_initial_network + fi + fi if is_service_enabled nova; then From 1fccf0b3391ca9d60fa47cc2f9f7ccbae841d483 Mon Sep 17 00:00:00 2001 From: Sumit Jamgade Date: Thu, 24 May 2018 16:24:00 +0200 Subject: [PATCH 0896/1936] swift expects an internal client config to start using the sample form the source Change-Id: I01874b650cd5d662ca2feabe58cc880155c9421e --- lib/swift | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/swift b/lib/swift index 933af1005b..762f1dd8fc 100644 --- a/lib/swift +++ b/lib/swift @@ -368,6 +368,7 @@ function configure_swift { SWIFT_CONFIG_PROXY_SERVER=${SWIFT_CONF_DIR}/proxy-server.conf cp ${SWIFT_DIR}/etc/proxy-server.conf-sample ${SWIFT_CONFIG_PROXY_SERVER} + cp ${SWIFT_DIR}/etc/internal-client.conf-sample ${SWIFT_CONF_DIR}/internal-client.conf # To run container sync feature introduced in Swift ver 1.12.0, # container sync "realm" is added in container-sync-realms.conf From 43f25c0fc3bee28ccd50b1bd6c40046b5cd12b4f Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Thu, 31 May 2018 14:49:59 +0100 Subject: [PATCH 0897/1936] doc: Describe running a command as a separate group Some commands must be run as a separate group to work. Users can use the 'sg' tool to do this. This may be assumed knowledge for many users but it's helpful to note in this, the definitive resource for DevStack's systemd integration. Change-Id: I271c1d21b44fa972c152780c1caa01c21c265159 Signed-off-by: Stephen Finucane --- doc/source/systemd.rst | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/doc/source/systemd.rst b/doc/source/systemd.rst index 9cc401771a..1bc9911879 100644 --- a/doc/source/systemd.rst +++ b/doc/source/systemd.rst @@ -152,6 +152,19 @@ Invoke the command manually:: /usr/local/bin/nova-scheduler --config-file /etc/nova/nova.conf +Some executables, such as :program:`nova-compute`, will need to be executed +with a particular group. This will be shown in the systemd unit file:: + + sudo systemctl cat devstack@n-cpu.service | grep Group + +:: + + Group = libvirt + +Use the :program:`sg` tool to execute the command as this group:: + + sg libvirt -c '/usr/local/bin/nova-compute --config-file /etc/nova/nova-cpu.conf' + Using remote-pdb ---------------- From 0f4af398a98e7a9e742162910135e300521ea026 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Sat, 2 Jun 2018 09:34:20 -0400 Subject: [PATCH 0898/1936] Set workers=$API_WORKERS in glance-registry.conf We use $API_WORKERS to throttle the number of workers in other services but were not doing it for g-reg for some reason, which by default will run ncpu workers up to a limit of 8. Change-Id: Idc81ce05546e6d625c10e2229256eafbe7c057a5 Closes-Bug: #1774781 --- lib/glance | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/glance b/lib/glance index 6a0e719cad..528a05fcf8 100644 --- a/lib/glance +++ b/lib/glance @@ -114,6 +114,7 @@ function configure_glance { # Set non-default configuration options for registry iniset $GLANCE_REGISTRY_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $GLANCE_REGISTRY_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS + iniset $GLANCE_REGISTRY_CONF DEFAULT workers $API_WORKERS local dburl dburl=`database_connection_url glance` iniset $GLANCE_REGISTRY_CONF database connection $dburl From 5e832d3061a9edd77dff6b9a051df7f116104ea2 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Sat, 2 Jun 2018 12:40:58 -0400 Subject: [PATCH 0899/1936] Modernize VIRT_DRIVER=fake usage This makes three changes: 1. The quota options set when using the fake virt driver have been renamed so we're getting deprecation warnings on using the old names. Rather than set each quota limit value individually, we can just use the noop quota driver for the same effect. 2. The enabled_filters list for the scheduler was last updated when using the fake virt driver back in Juno via Ic7ec87e4d497d9db58eec93f2b304fe9770a2bbc - with the Placement service, we don't need the CoreFilter, RamFilter or DiskFilter. Also, in general, we just don't need to hard-code a list of scheduler filters when using the fake virt driver. If one needs to set their own scheduler filter list, they can do so using the $FILTERS variable (or post-config for nova.conf). 3. The largeops job, which ran the Tempest scenario tests, has been gone for a few years now, as have the Tempest scenario tests, so the API_WORKERS modification when using the fake virt driver should be removed. If we had a CI job like the largeops job today, we would set the worker config via the job rather than in devstack. Change-Id: I8d2bb2af40b5db8a555482a0852b1604aec29f15 --- lib/nova_plugins/hypervisor-fake | 13 +------------ stackrc | 7 +------ 2 files changed, 2 insertions(+), 18 deletions(-) diff --git a/lib/nova_plugins/hypervisor-fake b/lib/nova_plugins/hypervisor-fake index 49c8dee83a..87ee49fa4b 100644 --- a/lib/nova_plugins/hypervisor-fake +++ b/lib/nova_plugins/hypervisor-fake @@ -38,18 +38,7 @@ function cleanup_nova_hypervisor { function configure_nova_hypervisor { iniset $NOVA_CONF DEFAULT compute_driver "fake.FakeDriver" # Disable arbitrary limits - iniset $NOVA_CONF DEFAULT quota_instances -1 - iniset $NOVA_CONF DEFAULT quota_cores -1 - iniset $NOVA_CONF DEFAULT quota_ram -1 - iniset $NOVA_CONF DEFAULT quota_floating_ips -1 - iniset $NOVA_CONF DEFAULT quota_fixed_ips -1 - iniset $NOVA_CONF DEFAULT quota_metadata_items -1 - iniset $NOVA_CONF DEFAULT quota_injected_files -1 - iniset $NOVA_CONF DEFAULT quota_injected_file_path_length -1 - iniset $NOVA_CONF DEFAULT quota_security_groups -1 - iniset $NOVA_CONF DEFAULT quota_security_group_rules -1 - iniset $NOVA_CONF DEFAULT quota_key_pairs -1 - iniset $NOVA_CONF filter_scheduler enabled_filters "RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,CoreFilter,RamFilter,DiskFilter" + iniset $NOVA_CONF quota driver nova.quota.NoopQuotaDriver } # install_nova_hypervisor() - Install external components diff --git a/stackrc b/stackrc index 3c4e4370e8..6c4d7d6848 100644 --- a/stackrc +++ b/stackrc @@ -800,12 +800,7 @@ SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http} # the memory used where there are a large number of CPUs present # (the default number of workers for many services is the number of CPUs) # Also sets the minimum number of workers to 2. -if [[ "$VIRT_DRIVER" = 'fake' ]]; then - # we need more workers for the large ops job - API_WORKERS=${API_WORKERS:=$(( ($(nproc)/2)<2 ? 2 : ($(nproc)/2) ))} -else - API_WORKERS=${API_WORKERS:=$(( ($(nproc)/4)<2 ? 2 : ($(nproc)/4) ))} -fi +API_WORKERS=${API_WORKERS:=$(( ($(nproc)/4)<2 ? 2 : ($(nproc)/4) ))} # Service startup timeout SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} From c7c67658c1daa40bfcdddfc99d1e05a70d205e66 Mon Sep 17 00:00:00 2001 From: Luigi Toscano Date: Mon, 4 Jun 2018 10:59:57 +0200 Subject: [PATCH 0900/1936] iniset: fix handling of keys with spaces Ceph for example uses them. Creation already worked, but not updates of existing keys. Closes-Bug: 1774956 Change-Id: I20cb61c08079b9cd9ad56ac875525abf1442bff6 --- inc/ini-config | 2 +- tests/test_ini_config.sh | 22 ++++++++++++++++++++-- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/inc/ini-config b/inc/ini-config index 68d48d197b..6fe7788158 100644 --- a/inc/ini-config +++ b/inc/ini-config @@ -200,7 +200,7 @@ $option = $value local sep sep=$(echo -ne "\x01") # Replace it - $sudo sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file" + $sudo sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('"${option}"'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file" fi $xtrace } diff --git a/tests/test_ini_config.sh b/tests/test_ini_config.sh index a5e110736e..f7dc89a28d 100755 --- a/tests/test_ini_config.sh +++ b/tests/test_ini_config.sh @@ -44,6 +44,9 @@ empty = multi = foo1 multi = foo2 +[key_with_spaces] +rgw special key = something + # inidelete(a) [del_separate_options] a=b @@ -82,8 +85,9 @@ fi # test iniget_sections VAL=$(iniget_sections "${TEST_INI}") -assert_equal "$VAL" "default aaa bbb ccc ddd eee del_separate_options \ -del_same_option del_missing_option del_missing_option_multi del_no_options" +assert_equal "$VAL" "default aaa bbb ccc ddd eee key_with_spaces \ +del_separate_options del_same_option del_missing_option \ +del_missing_option_multi del_no_options" # Test with missing arguments BEFORE=$(cat ${TEST_INI}) @@ -209,6 +213,20 @@ iniset $SUDO_ARG ${INI_TMP_ETC_DIR}/test.new.ini test foo bar VAL=$(iniget ${INI_TMP_ETC_DIR}/test.new.ini test foo) assert_equal "$VAL" "bar" "iniset created file" +# test creation of keys with spaces +iniset ${SUDO_ARG} ${TEST_INI} key_with_spaces "rgw another key" somethingelse +VAL=$(iniget ${TEST_INI} key_with_spaces "rgw another key") +assert_equal "$VAL" "somethingelse" "iniset created a key with spaces" + +# test update of keys with spaces +iniset ${SUDO_ARG} ${TEST_INI} key_with_spaces "rgw special key" newvalue +VAL=$(iniget ${TEST_INI} key_with_spaces "rgw special key") +assert_equal "$VAL" "newvalue" "iniset updated a key with spaces" + +inidelete ${SUDO_ARG} ${TEST_INI} key_with_spaces "rgw another key" +VAL=$(iniget ${TEST_INI} key_with_spaces "rgw another key") +assert_empty VAL "inidelete removed a key with spaces" + $SUDO rm -rf ${INI_TMP_DIR} report_results From 51aec325e6252703371ab001bea0853af05ca2d8 Mon Sep 17 00:00:00 2001 From: qingszhao Date: Tue, 12 Jun 2018 08:25:20 +0800 Subject: [PATCH 0901/1936] fix tox python3 overrides We want to default to running all tox environments under python 3, so set the basepython value in each environment. We do not want to specify a minor version number, because we do not want to have to update the file every time we upgrade python. We do not want to set the override once in testenv, because that breaks the more specific versions used in default environments like py35 and py36. Change-Id: Id83cb3cdd62517045c45388f88cb3de0e3d75da1 --- tox.ini | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tox.ini b/tox.ini index 74436b0f26..f643fdb930 100644 --- a/tox.ini +++ b/tox.ini @@ -8,6 +8,7 @@ usedevelop = False install_command = pip install {opts} {packages} [testenv:bashate] +basepython = python3 # if you want to test out some changes you have made to bashate # against devstack, just set BASHATE_INSTALL_PATH=/path/... to your # modified bashate tree @@ -34,6 +35,7 @@ commands = bash -c "find {toxinidir} \ -print0 | xargs -0 bashate -v -iE006 -eE005,E042" [testenv:docs] +basepython = python3 deps = -r{toxinidir}/doc/requirements.txt whitelist_externals = bash setenv = @@ -42,5 +44,6 @@ commands = python setup.py build_sphinx [testenv:venv] +basepython = python3 deps = -r{toxinidir}/doc/requirements.txt commands = {posargs} From 0417858afa5cb65726579640231019de2215e530 Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Tue, 12 Jun 2018 15:37:00 -0400 Subject: [PATCH 0902/1936] fix typo in python3_version The function was using an undefined variable to show the version of python3 being used. Change-Id: Ibc956975d620ed5174de8823f9c202a680c56aaf Signed-off-by: Doug Hellmann --- functions-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions-common b/functions-common index b1b0995a79..25e28bd74a 100644 --- a/functions-common +++ b/functions-common @@ -2128,7 +2128,7 @@ function python_version { function python3_version { local python3_version python3_version=$(_get_python_version python3) - echo "python${python_version}" + echo "python${python3_version}" } From 7f33552d347f400fc1f2c290da3d3fa863197cee Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 14 Jun 2018 21:11:10 +1000 Subject: [PATCH 0903/1936] Switch to dnf when it exists This has all been around for a *long* time, like when dnf was a weird new thing. Now it's the opposite and yum is a weird old thing :) Choose it by default for platforms with it (Fedora, for now). Change-Id: Id2bd7d145354b996de31944929fd0267ec24a08e --- stackrc | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/stackrc b/stackrc index 3c4e4370e8..66d6f238a5 100644 --- a/stackrc +++ b/stackrc @@ -819,11 +819,14 @@ SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT=${SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT:-5} # Service graceful shutdown timeout WORKER_TIMEOUT=${WORKER_TIMEOUT:-90} -# Support alternative yum -- in future Fedora 'dnf' will become the -# only supported installer, but for now 'yum' and 'dnf' are both -# available in parallel with compatible CLIs. Allow manual switching -# till we get to the point we need to handle this automatically -YUM=${YUM:-yum} +# Choose DNF on RedHat/Fedora platforms with it, or otherwise default +# to YUM. Can remove this when only dnf is supported (i.e. centos7 +# disappears) +if [[ -e /usr/bin/dnf ]]; then + YUM=${YUM:-dnf} +else + YUM=${YUM:-yum} +fi # Common Configuration # -------------------- From 8e5f8c29b27c8dc83da31fbce5d92a173b91ba10 Mon Sep 17 00:00:00 2001 From: "James E. Blair" Date: Fri, 15 Jun 2018 10:10:35 -0700 Subject: [PATCH 0904/1936] Add the project under test to LIBS_FROM_GIT This automatically always adds the project under test to LIBS_FROM_GIT which effectively makes the normal "tempest full" job the same as the "forward testing" job when it is applied to a library repo. Change-Id: Ibbdd8a86e0ff55f67bef73e08e693b34a61b24df --- roles/write-devstack-local-conf/README.rst | 11 ++++++----- .../library/devstack_local_conf.py | 19 +++++++++++++------ .../write-devstack-local-conf/library/test.py | 17 ++++++++++++----- .../write-devstack-local-conf/tasks/main.yaml | 1 + 4 files changed, 32 insertions(+), 16 deletions(-) diff --git a/roles/write-devstack-local-conf/README.rst b/roles/write-devstack-local-conf/README.rst index bfce9c98cd..e9739cdea8 100644 --- a/roles/write-devstack-local-conf/README.rst +++ b/roles/write-devstack-local-conf/README.rst @@ -22,11 +22,12 @@ Write the local.conf file for use by devstack As a special case, the variable ``LIBS_FROM_GIT`` will be constructed automatically from the projects which appear in the - ``required-projects`` list defined by the job. To instruct - devstack to install a library from source rather than pypi, simply - add that library to the job's ``required-projects`` list. To - override the automatically-generated value, set ``LIBS_FROM_GIT`` - in ``devstack_localrc`` to the desired value. + ``required-projects`` list defined by the job plus the project of + the change under test. To instruct devstack to install a library + from source rather than pypi, simply add that library to the job's + ``required-projects`` list. To override the + automatically-generated value, set ``LIBS_FROM_GIT`` in + ``devstack_localrc`` to the desired value. .. zuul:rolevar:: devstack_local_conf :type: dict diff --git a/roles/write-devstack-local-conf/library/devstack_local_conf.py b/roles/write-devstack-local-conf/library/devstack_local_conf.py index 9728fef37b..bba7e31f96 100644 --- a/roles/write-devstack-local-conf/library/devstack_local_conf.py +++ b/roles/write-devstack-local-conf/library/devstack_local_conf.py @@ -207,12 +207,13 @@ def getPlugins(self): class LocalConf(object): def __init__(self, localrc, localconf, base_services, services, plugins, - base_dir, projects): + base_dir, projects, project): self.localrc = [] self.meta_sections = {} self.plugin_deps = {} self.base_dir = base_dir self.projects = projects + self.project = project if plugins: self.handle_plugins(plugins) if services or base_services: @@ -249,11 +250,15 @@ def handle_localrc(self, localrc): if k == 'LIBS_FROM_GIT': lfg = True - if not lfg and self.projects: + if not lfg and (self.projects or self.project): required_projects = [] - for project_name, project_info in self.projects.items(): - if project_info.get('required'): - required_projects.append(project_info['short_name']) + if self.projects: + for project_name, project_info in self.projects.items(): + if project_info.get('required'): + required_projects.append(project_info['short_name']) + if self.project: + if self.project['short_name'] not in required_projects: + required_projects.append(self.project['short_name']) if required_projects: self.localrc.append('LIBS_FROM_GIT={}'.format( ','.join(required_projects))) @@ -291,6 +296,7 @@ def main(): base_dir=dict(type='path'), path=dict(type='str'), projects=dict(type='dict'), + project=dict(type='dict'), ) ) @@ -301,7 +307,8 @@ def main(): p.get('services'), p.get('plugins'), p.get('base_dir'), - p.get('projects')) + p.get('projects'), + p.get('project')) lc.write(p['path']) module.exit_json() diff --git a/roles/write-devstack-local-conf/library/test.py b/roles/write-devstack-local-conf/library/test.py index 7ccb68f08d..791552d1ad 100644 --- a/roles/write-devstack-local-conf/library/test.py +++ b/roles/write-devstack-local-conf/library/test.py @@ -57,7 +57,8 @@ def test_plugins(self): p.get('services'), p.get('plugins'), p.get('base_dir'), - p.get('projects')) + p.get('projects'), + p.get('project')) lc.write(p['path']) plugins = [] @@ -120,17 +121,22 @@ def test_libs_from_git(self): 'short_name': 'devstack-plugin', }, } + project = { + 'short_name': 'glance', + } p = dict(base_services=[], base_dir='./test', path=os.path.join(self.tmpdir, 'test.local.conf'), - projects=projects) + projects=projects, + project=project) lc = LocalConf(p.get('localrc'), p.get('local_conf'), p.get('base_services'), p.get('services'), p.get('plugins'), p.get('base_dir'), - p.get('projects')) + p.get('projects'), + p.get('project')) lc.write(p['path']) lfg = None @@ -138,7 +144,7 @@ def test_libs_from_git(self): for line in f: if line.startswith('LIBS_FROM_GIT'): lfg = line.strip().split('=')[1] - self.assertEqual('nova,oslo.messaging', lfg) + self.assertEqual('nova,oslo.messaging,glance', lfg) def test_overridelibs_from_git(self): "Test that LIBS_FROM_GIT can be overridden" @@ -168,7 +174,8 @@ def test_overridelibs_from_git(self): p.get('services'), p.get('plugins'), p.get('base_dir'), - p.get('projects')) + p.get('projects'), + p.get('project')) lc.write(p['path']) lfg = None diff --git a/roles/write-devstack-local-conf/tasks/main.yaml b/roles/write-devstack-local-conf/tasks/main.yaml index a294cae608..9a6b083a2f 100644 --- a/roles/write-devstack-local-conf/tasks/main.yaml +++ b/roles/write-devstack-local-conf/tasks/main.yaml @@ -10,3 +10,4 @@ local_conf: "{{ devstack_local_conf|default(omit) }}" base_dir: "{{ devstack_base_dir|default(omit) }}" projects: "{{ zuul.projects }}" + project: "{{ zuul.project }}" \ No newline at end of file From a7d0c6fa2c443b2b4b5f4680faff09c6b2bd00d2 Mon Sep 17 00:00:00 2001 From: Lance Bragstad Date: Mon, 18 Jun 2018 15:06:48 +0000 Subject: [PATCH 0905/1936] Use `member` instead of `Member` Keystone now provides a set of default roles in addition to `admin` by default [0]. This is done during the `keystone-manage bootstrap` process. This change aligns the `Member` role override from devstack with the `member` role provided from keystone. [0] https://review.openstack.org/#/c/572243/ Change-Id: I3da3530aa73a8a1500116bcefdcba7b947d5e05e Closes-Bug: 1777359 --- lib/horizon | 2 +- lib/keystone | 29 ++++++++++------------------- 2 files changed, 11 insertions(+), 20 deletions(-) diff --git a/lib/horizon b/lib/horizon index fab41bbeca..293a627c78 100644 --- a/lib/horizon +++ b/lib/horizon @@ -87,7 +87,7 @@ function configure_horizon { _horizon_config_set $local_settings "" WEBROOT \"$HORIZON_APACHE_ROOT/\" _horizon_config_set $local_settings "" COMPRESS_OFFLINE True - _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_DEFAULT_ROLE \"Member\" + _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_DEFAULT_ROLE \"member\" _horizon_config_set $local_settings "" OPENSTACK_HOST \"${KEYSTONE_SERVICE_HOST}\" diff --git a/lib/keystone b/lib/keystone index 696e351ab0..7978feaf16 100644 --- a/lib/keystone +++ b/lib/keystone @@ -309,30 +309,32 @@ function configure_keystone { # service -- -- # -- -- service # -- -- ResellerAdmin -# -- -- Member +# -- -- member # demo admin admin -# demo demo Member, anotherrole +# demo demo member, anotherrole # alt_demo admin admin -# alt_demo alt_demo Member, anotherrole -# invisible_to_admin demo Member +# alt_demo alt_demo member, anotherrole +# invisible_to_admin demo member # Group Users Roles Project # ------------------------------------------------------------------ # admins admin admin admin -# nonadmins demo, alt_demo Member, anotherrole demo, alt_demo +# nonadmins demo, alt_demo member, anotherrole demo, alt_demo # Migrated from keystone_data.sh function create_keystone_accounts { - # The keystone bootstrapping process (performed via keystone-manage bootstrap) - # creates an admin user, admin role and admin project. As a sanity check - # we exercise the CLI to retrieve the IDs for these values. + # The keystone bootstrapping process (performed via keystone-manage + # bootstrap) creates an admin user, admin role, member role, and admin + # project. As a sanity check we exercise the CLI to retrieve the IDs for + # these values. local admin_project admin_project=$(openstack project show "admin" -f value -c id) local admin_user admin_user=$(openstack user show "admin" -f value -c id) local admin_role="admin" + local member_role="member" get_or_add_user_domain_role $admin_role $admin_user default @@ -349,17 +351,6 @@ function create_keystone_accounts { # role is also configurable in swift-proxy.conf get_or_create_role ResellerAdmin - # The Member role is used by Horizon and Swift so we need to keep it: - local member_role="member" - - # Capital Member role is legacy hard coded in Horizon / Swift - # configs. Keep it around. - get_or_create_role "Member" - - # The reality is that the rest of the roles listed below honestly - # should work by symbolic names. - get_or_create_role $member_role - # another_role demonstrates that an arbitrary role may be created and used # TODO(sleepsonthefloor): show how this can be used for rbac in the future! local another_role="anotherrole" From e95f2a36645b58b172855213cb8311a3486bfcd9 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Mon, 18 Jun 2018 16:17:29 -0400 Subject: [PATCH 0906/1936] Configure [neutron] in nova_cell*.conf The nova-conductor service running in the cell needs to be configured to talk to neutron for things like deallocating networks during server build failure. This changes the configure_neutron_nova flows such that the top-level nova.conf is configured as before, but we also configure each nova_cell*.conf cell conductor config files to also be able to talk to neutron. Change-Id: Ic5e17298996b5fb085272425bb3b68583247aa34 Closes-Bug: #1777505 --- lib/neutron | 48 ++++++++++++++++++++++++++------------- lib/neutron-legacy | 31 +++++++++++++------------ lib/neutron_plugins/nuage | 5 ++-- 3 files changed, 51 insertions(+), 33 deletions(-) diff --git a/lib/neutron b/lib/neutron index 3cad80a414..b857e3120d 100644 --- a/lib/neutron +++ b/lib/neutron @@ -325,25 +325,27 @@ function configure_neutron_rootwrap { } # Make Neutron-required changes to nova.conf +# Takes a single argument which is the config file to update. function configure_neutron_nova_new { - iniset $NOVA_CONF DEFAULT use_neutron True - iniset $NOVA_CONF neutron auth_type "password" - iniset $NOVA_CONF neutron auth_url "$KEYSTONE_SERVICE_URI" - iniset $NOVA_CONF neutron username neutron - iniset $NOVA_CONF neutron password "$SERVICE_PASSWORD" - iniset $NOVA_CONF neutron user_domain_name "Default" - iniset $NOVA_CONF neutron project_name "$SERVICE_TENANT_NAME" - iniset $NOVA_CONF neutron project_domain_name "Default" - iniset $NOVA_CONF neutron auth_strategy $NEUTRON_AUTH_STRATEGY - iniset $NOVA_CONF neutron region_name "$REGION_NAME" - - iniset $NOVA_CONF DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver + local conf="$1" + iniset $conf DEFAULT use_neutron True + iniset $conf neutron auth_type "password" + iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI" + iniset $conf neutron username neutron + iniset $conf neutron password "$SERVICE_PASSWORD" + iniset $conf neutron user_domain_name "Default" + iniset $conf neutron project_name "$SERVICE_TENANT_NAME" + iniset $conf neutron project_domain_name "Default" + iniset $conf neutron auth_strategy $NEUTRON_AUTH_STRATEGY + iniset $conf neutron region_name "$REGION_NAME" + + iniset $conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver # optionally set options in nova_conf - neutron_plugin_create_nova_conf + neutron_plugin_create_nova_conf $conf if is_service_enabled neutron-metadata-agent; then - iniset $NOVA_CONF neutron service_metadata_proxy "True" + iniset $conf neutron service_metadata_proxy "True" fi } @@ -568,9 +570,23 @@ function configure_neutron { function configure_neutron_nova { if is_neutron_legacy_enabled; then # Call back to old function - create_nova_conf_neutron "$@" + create_nova_conf_neutron $NOVA_CONF + if [[ "${CELLSV2_SETUP}" == "superconductor" ]]; then + for i in $(seq 1 $NOVA_NUM_CELLS); do + local conf + conf=$(conductor_conf $i) + create_nova_conf_neutron $conf + done + fi else - configure_neutron_nova_new "$@" + configure_neutron_nova_new $NOVA_CONF + if [[ "${CELLSV2_SETUP}" == "superconductor" ]]; then + for i in $(seq 1 $NOVA_NUM_CELLS); do + local conf + conf=$(conductor_conf $i) + configure_neutron_nova_new $conf + done + fi fi } diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 0cd7e31d39..2e8992de61 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -366,31 +366,32 @@ function configure_mutnauq { } function create_nova_conf_neutron { - iniset $NOVA_CONF DEFAULT use_neutron True - iniset $NOVA_CONF neutron auth_type "password" - iniset $NOVA_CONF neutron auth_url "$KEYSTONE_AUTH_URI" - iniset $NOVA_CONF neutron username "$Q_ADMIN_USERNAME" - iniset $NOVA_CONF neutron password "$SERVICE_PASSWORD" - iniset $NOVA_CONF neutron user_domain_name "$SERVICE_DOMAIN_NAME" - iniset $NOVA_CONF neutron project_name "$SERVICE_PROJECT_NAME" - iniset $NOVA_CONF neutron project_domain_name "$SERVICE_DOMAIN_NAME" - iniset $NOVA_CONF neutron auth_strategy "$Q_AUTH_STRATEGY" - iniset $NOVA_CONF neutron region_name "$REGION_NAME" + local conf="$1" + iniset $conf DEFAULT use_neutron True + iniset $conf neutron auth_type "password" + iniset $conf neutron auth_url "$KEYSTONE_AUTH_URI" + iniset $conf neutron username "$Q_ADMIN_USERNAME" + iniset $conf neutron password "$SERVICE_PASSWORD" + iniset $conf neutron user_domain_name "$SERVICE_DOMAIN_NAME" + iniset $conf neutron project_name "$SERVICE_PROJECT_NAME" + iniset $conf neutron project_domain_name "$SERVICE_DOMAIN_NAME" + iniset $conf neutron auth_strategy "$Q_AUTH_STRATEGY" + iniset $conf neutron region_name "$REGION_NAME" if [[ "$Q_USE_SECGROUP" == "True" ]]; then LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver - iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER + iniset $conf DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER fi # optionally set options in nova_conf - neutron_plugin_create_nova_conf + neutron_plugin_create_nova_conf $conf if is_service_enabled q-meta; then - iniset $NOVA_CONF neutron service_metadata_proxy "True" + iniset $conf neutron service_metadata_proxy "True" fi - iniset $NOVA_CONF DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL" - iniset $NOVA_CONF DEFAULT vif_plugging_timeout "$VIF_PLUGGING_TIMEOUT" + iniset $conf DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL" + iniset $conf DEFAULT vif_plugging_timeout "$VIF_PLUGGING_TIMEOUT" } # create_mutnauq_accounts() - Set up common required neutron accounts diff --git a/lib/neutron_plugins/nuage b/lib/neutron_plugins/nuage index 1c04aaac9a..f39c7c4f5b 100644 --- a/lib/neutron_plugins/nuage +++ b/lib/neutron_plugins/nuage @@ -8,10 +8,11 @@ _XTRACE_NEUTRON_NU=$(set +o | grep xtrace) set +o xtrace function neutron_plugin_create_nova_conf { + local conf="$1" NOVA_OVS_BRIDGE=${NOVA_OVS_BRIDGE:-"br-int"} - iniset $NOVA_CONF neutron ovs_bridge $NOVA_OVS_BRIDGE + iniset $conf neutron ovs_bridge $NOVA_OVS_BRIDGE LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver - iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER + iniset $conf DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER } function neutron_plugin_install_agent_packages { From 2f5771541209b6016c07c25d6808e1e3162f9d3b Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 22 Jun 2018 15:17:05 +1000 Subject: [PATCH 0907/1936] Remove old packages from rpms/horizon I'm not sure what the history of the (capital-D) Django package or pyxattr; they've been there for a long time and should just be installed like other dependencies these days. Change-Id: I423230cc5cbb13d2cfb7b926a9571a8157ce5c46 --- files/rpms/horizon | 2 -- 1 file changed, 2 deletions(-) diff --git a/files/rpms/horizon b/files/rpms/horizon index fa5601a95f..a88552bc84 100644 --- a/files/rpms/horizon +++ b/files/rpms/horizon @@ -1,4 +1,2 @@ -Django httpd # NOPRIME mod_wsgi # NOPRIME -pyxattr From 53e9aca6ce196d874b96829c32b51f8112174d69 Mon Sep 17 00:00:00 2001 From: Sumit Jamgade Date: Wed, 13 Jun 2018 17:30:41 +0200 Subject: [PATCH 0908/1936] install and start elasticsearch on openSUSE this will allow install and start of elasticsearch on openSUSE based distributions Change-Id: I4d778c260247e73b500ca7d17835655c21941541 --- pkg/elasticsearch.sh | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/pkg/elasticsearch.sh b/pkg/elasticsearch.sh index fefd454312..afbf11de3d 100755 --- a/pkg/elasticsearch.sh +++ b/pkg/elasticsearch.sh @@ -37,7 +37,7 @@ function wget_elasticsearch { function download_elasticsearch { if is_ubuntu; then wget_elasticsearch elasticsearch-${ELASTICSEARCH_VERSION}.deb - elif is_fedora; then + elif is_fedora || is_suse; then wget_elasticsearch elasticsearch-${ELASTICSEARCH_VERSION}.noarch.rpm fi } @@ -61,6 +61,9 @@ function start_elasticsearch { elif is_fedora; then sudo /bin/systemctl start elasticsearch.service _check_elasticsearch_ready + elif is_suse; then + sudo /usr/bin/systemctl start elasticsearch.service + _check_elasticsearch_ready else echo "Unsupported architecture...can not start elasticsearch." fi @@ -71,6 +74,8 @@ function stop_elasticsearch { sudo /etc/init.d/elasticsearch stop elif is_fedora; then sudo /bin/systemctl stop elasticsearch.service + elif is_suse ; then + sudo /usr/bin/systemctl stop elasticsearch.service else echo "Unsupported architecture...can not stop elasticsearch." fi @@ -92,6 +97,11 @@ function install_elasticsearch { yum_install ${FILES}/elasticsearch-${ELASTICSEARCH_VERSION}.noarch.rpm sudo /bin/systemctl daemon-reload sudo /bin/systemctl enable elasticsearch.service + elif is_suse; then + is_package_installed java-1_8_0-openjdk-headless || install_package java-1_8_0-openjdk-headless + zypper_install --no-gpg-checks ${FILES}/elasticsearch-${ELASTICSEARCH_VERSION}.noarch.rpm + sudo /usr/bin/systemctl daemon-reload + sudo /usr/bin/systemctl enable elasticsearch.service else echo "Unsupported install of elasticsearch on this architecture." fi @@ -103,6 +113,8 @@ function uninstall_elasticsearch { sudo apt-get purge elasticsearch elif is_fedora; then sudo yum remove elasticsearch + elif is_suse; then + sudo zypper rm elasticsearch else echo "Unsupported install of elasticsearch on this architecture." fi From 78dff2852b4070168e103976aa3e7d72beb098b0 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 22 Jun 2018 22:17:00 +1000 Subject: [PATCH 0909/1936] Update all rpms for Fedora 28 Bring along the required rpms for the ride on Fedora 28 (we really should find a way to maybe do f* or something to avoid this ... consider it a todo :) Change-Id: I37fd38de9baab478c86d23ea2cebca59dc8a5ed1 --- files/rpms/cinder | 4 ++-- files/rpms/general | 4 ++-- files/rpms/nova | 2 +- files/rpms/swift | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/files/rpms/cinder b/files/rpms/cinder index e6addc62aa..058c2354dc 100644 --- a/files/rpms/cinder +++ b/files/rpms/cinder @@ -1,5 +1,5 @@ iscsi-initiator-utils lvm2 qemu-img -scsi-target-utils # not:rhel7,f25,f26,f27 NOPRIME -targetcli # dist:rhel7,f25,f26,f27 NOPRIME +scsi-target-utils # not:rhel7,f25,f26,f27,f28 NOPRIME +targetcli # dist:rhel7,f25,f26,f27,f28 NOPRIME diff --git a/files/rpms/general b/files/rpms/general index 5d9a4ad5a5..c7863e4320 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -9,9 +9,9 @@ git-core graphviz # needed only for docs httpd httpd-devel -iptables-services # NOPRIME f25,f26,f27 +iptables-services # NOPRIME f25,f26,f27,f28 java-1.7.0-openjdk-headless # NOPRIME rhel7 -java-1.8.0-openjdk-headless # NOPRIME f25,f26,f27 +java-1.8.0-openjdk-headless # NOPRIME f25,f26,f27,f28 libffi-devel libjpeg-turbo-devel # Pillow 3.0.0 libxml2-devel # lxml diff --git a/files/rpms/nova b/files/rpms/nova index 9fb7282df5..4140cd7bae 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -7,7 +7,7 @@ gawk genisoimage # required for config_drive iptables iputils -kernel-modules # dist:f25,f26,f27 +kernel-modules # dist:f25,f26,f27,f28 kpartx libxml2-python m2crypto diff --git a/files/rpms/swift b/files/rpms/swift index be0db140e3..f2f5de69b0 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -2,7 +2,7 @@ curl liberasurecode-devel memcached pyxattr -rsync-daemon # dist:f25,f26,f27 +rsync-daemon # dist:f25,f26,f27,f28 sqlite xfsprogs xinetd From e63859362487856f1f53c173909cb8aa3bb55bab Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Thu, 28 Jun 2018 11:00:28 +0100 Subject: [PATCH 0910/1936] Make configure_neutron_nova_new and create_nova_conf_neutron param optional The commit e95f2a36645b58b172855213cb8311a3486bfcd9 broke networking-ovn (and potentially other ml2 drivers) by making the config parameter mandatory. It doesn't need to be. Change-Id: I0d5738ac3a6d27ddb7655835d77689409a6ff6f4 --- lib/neutron | 5 +++-- lib/neutron-legacy | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/lib/neutron b/lib/neutron index b857e3120d..9f9b132377 100644 --- a/lib/neutron +++ b/lib/neutron @@ -325,9 +325,10 @@ function configure_neutron_rootwrap { } # Make Neutron-required changes to nova.conf -# Takes a single argument which is the config file to update. +# Takes a single optional argument which is the config file to update, +# if not passed $NOVA_CONF is used. function configure_neutron_nova_new { - local conf="$1" + local conf=${1:-$NOVA_CONF} iniset $conf DEFAULT use_neutron True iniset $conf neutron auth_type "password" iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI" diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 2e8992de61..bee032a704 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -366,7 +366,7 @@ function configure_mutnauq { } function create_nova_conf_neutron { - local conf="$1" + local conf=${1:-$NOVA_CONF} iniset $conf DEFAULT use_neutron True iniset $conf neutron auth_type "password" iniset $conf neutron auth_url "$KEYSTONE_AUTH_URI" From 6f38cf4ad846f394489334caae7a38b8e49b646c Mon Sep 17 00:00:00 2001 From: Prabhat Ranjan Date: Fri, 16 Mar 2018 16:33:46 +0530 Subject: [PATCH 0911/1936] Fix wait_for_compute to work for modified compute host name When we change the name of compute host then devstack is breaking because it is using default host name from host. How to change compute host name in local.conf [[post-config|$NOVA_CONF]] [DEFAULT] host = foo Change-Id: I4d4392f1f58f0431b10764610668565af88d392f Signed-off-by: Prabhat Ranjan --- functions | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/functions b/functions index 24994c0470..f63595d433 100644 --- a/functions +++ b/functions @@ -441,7 +441,12 @@ EOF function wait_for_compute { local timeout=$1 local rval=0 + local compute_hostname time_start "wait_for_service" + compute_hostname=$(iniget $NOVA_CONF DEFAULT host) + if [[ -z $compute_hostname ]]; then + compute_hostname=$(hostname) + fi timeout $timeout bash -x < Date: Tue, 3 Jul 2018 07:14:16 -0700 Subject: [PATCH 0912/1936] Keystone no longer uses paste-ini With the move to flask, Keystone does not utilize paste-ini. This patchset removes the paste-ini support from devstack for Keystone. Change-Id: I8dd629937c9178660992fd648175dbef80ffa3c2 --- lib/keystone | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/lib/keystone b/lib/keystone index 7978feaf16..28603257dd 100644 --- a/lib/keystone +++ b/lib/keystone @@ -49,7 +49,6 @@ fi KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone} KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf -KEYSTONE_PASTE_INI=${KEYSTONE_PASTE_INI:-$KEYSTONE_CONF_DIR/keystone-paste.ini} KEYSTONE_PUBLIC_UWSGI_CONF=$KEYSTONE_CONF_DIR/keystone-uwsgi-public.ini KEYSTONE_ADMIN_UWSGI_CONF=$KEYSTONE_CONF_DIR/keystone-uwsgi-admin.ini KEYSTONE_PUBLIC_UWSGI=$KEYSTONE_BIN_DIR/keystone-wsgi-public @@ -203,23 +202,7 @@ function configure_keystone { if [[ "$KEYSTONE_CONF_DIR" != "$KEYSTONE_DIR/etc" ]]; then install -m 600 /dev/null $KEYSTONE_CONF - if [[ -f "$KEYSTONE_DIR/etc/keystone-paste.ini" ]]; then - cp -p "$KEYSTONE_DIR/etc/keystone-paste.ini" "$KEYSTONE_PASTE_INI" - fi fi - if [[ -f "$KEYSTONE_PASTE_INI" ]]; then - iniset "$KEYSTONE_CONF" paste_deploy config_file "$KEYSTONE_PASTE_INI" - else - # compatibility with mixed cfg and paste.deploy configuration - KEYSTONE_PASTE_INI="$KEYSTONE_CONF" - fi - - if [ "$ENABLE_IDENTITY_V2" == "False" ]; then - # Only Identity v3 API should be available; then disable v2 pipelines - inidelete $KEYSTONE_PASTE_INI composite:main \\/v2.0 - inidelete $KEYSTONE_PASTE_INI composite:admin \\/v2.0 - fi - # Populate ``keystone.conf`` if is_service_enabled ldap; then iniset $KEYSTONE_CONF identity domain_config_dir "$KEYSTONE_CONF_DIR/domains" From 19eefe5e61187bab5fb893d51e0f771101441b4a Mon Sep 17 00:00:00 2001 From: Sylvain Afchain Date: Wed, 4 Jul 2018 17:59:34 +0200 Subject: [PATCH 0913/1936] Increase the Elasticsearch service timeout On some slow system with recent version of Elasticsearch we saw that the service becomes ready after more than 1 minute. Change-Id: Id2b21ab24a96d10fffdcccd652a7d3ec4e8ce39c --- pkg/elasticsearch.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/elasticsearch.sh b/pkg/elasticsearch.sh index afbf11de3d..bd4415315f 100755 --- a/pkg/elasticsearch.sh +++ b/pkg/elasticsearch.sh @@ -49,7 +49,7 @@ function configure_elasticsearch { function _check_elasticsearch_ready { # poll elasticsearch to see if it's started - if ! wait_for_service 30 http://localhost:9200; then + if ! wait_for_service 120 http://localhost:9200; then die $LINENO "Maximum timeout reached. Could not connect to ElasticSearch" fi } From c3a33fb6911a5618eb1dc2e7448652ab844a191a Mon Sep 17 00:00:00 2001 From: Maxim Nestratov Date: Wed, 4 Jul 2018 22:07:20 +0300 Subject: [PATCH 0914/1936] Fix Virtuozzo CI Don't specify hypervisor_type=vz property for hds images as nova now reports only really connected hypervisors and not all supported by a compute host Change-Id: Ibec0856519ffe593d44d123f3b401eae19f7d95a --- functions | 1 - 1 file changed, 1 deletion(-) diff --git a/functions b/functions index f63595d433..051c8160f7 100644 --- a/functions +++ b/functions @@ -282,7 +282,6 @@ function upload_image { image create \ "$image_name" --public \ --container-format=bare --disk-format=ploop \ - --property hypervisor_type=vz \ --property vm_mode=$vm_mode < "${image}" return fi From c58c5d60ce5fc1a0c8bee3eb3ba4da790ba92475 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Tue, 10 Jul 2018 06:24:09 +0000 Subject: [PATCH 0915/1936] Updated from generate-devstack-plugins-list Change-Id: Ief0f093a0612e89d07469cb9d0201556e455dfa5 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 9b2cb7eec1..2d5259d51e 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -60,6 +60,7 @@ drbd-devstack `git://git.openstack.org/openstack/drbd-d ec2-api `git://git.openstack.org/openstack/ec2-api `__ freezer `git://git.openstack.org/openstack/freezer `__ freezer-api `git://git.openstack.org/openstack/freezer-api `__ +freezer-tempest-plugin `git://git.openstack.org/openstack/freezer-tempest-plugin `__ freezer-web-ui `git://git.openstack.org/openstack/freezer-web-ui `__ fuxi `git://git.openstack.org/openstack/fuxi `__ gce-api `git://git.openstack.org/openstack/gce-api `__ From ef3571338ababf4627c32118fc082f195a9e136c Mon Sep 17 00:00:00 2001 From: Paul Belanger Date: Tue, 10 Jul 2018 14:22:48 -0400 Subject: [PATCH 0916/1936] use fqdn for zuul projects When setting up a 3pci zuul, there is an edge case where a downstream zuul may already have openstack/foo projects, eg: review.rdoproject.org/openstack/foo. In this case, if openstack projects are not namespaced to include the connection information zuul gets confused and complains. We can avoid this by using the fqdn for git.o.o for devstack jobs and both upstream and downstream zuul will properly use the correct connection. Change-Id: I01419ea9f51ce7491aa319b6240aec9c0d4f2356 Signed-off-by: Paul Belanger --- .zuul.yaml | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index c8bb337d2f..57cbf88e1d 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -103,10 +103,10 @@ job.group-vars.peers, which is what is used by multi node jobs for subnode nodes (everything but the controller). required-projects: - - openstack-dev/devstack + - git.openstack.org/openstack-dev/devstack roles: - - zuul: openstack-infra/devstack-gate - - zuul: openstack-infra/openstack-zuul-jobs + - zuul: git.openstack.org/openstack-infra/devstack-gate + - zuul: git.openstack.org/openstack-infra/openstack-zuul-jobs vars: devstack_localrc: DATABASE_PASSWORD: secretdatabase @@ -211,7 +211,7 @@ less than the normal minimum set of required-projects. nodeset: openstack-single-node required-projects: - - openstack/requirements + - git.openstack.org/openstack/requirements vars: devstack_localrc: # Multinode specific settings @@ -270,12 +270,12 @@ and extended. nodeset: openstack-single-node required-projects: - - openstack/cinder - - openstack/glance - - openstack/keystone - - openstack/neutron - - openstack/nova - - openstack/swift + - git.openstack.org/openstack/cinder + - git.openstack.org/openstack/glance + - git.openstack.org/openstack/keystone + - git.openstack.org/openstack/neutron + - git.openstack.org/openstack/nova + - git.openstack.org/openstack/swift timeout: 7200 vars: devstack_localrc: From 274287598252b04f0ddf9741d7145a71cbb953a1 Mon Sep 17 00:00:00 2001 From: ghanshyam Date: Tue, 10 Jul 2018 09:21:46 +0000 Subject: [PATCH 0917/1936] Cleanup keystone's removed config options token.provider.drvier.uuid and token.driver has been removed from keystone[1]. Devstack has reference/setting of those config options which is confusing for user and it can lead to import error like[2] This commit cleanup the devstack bits of removed config options. bp removed-as-of-rocky [1] https://blueprints.launchpad.net/keystone/+spec/removed-as-of-rocky [2] http://paste.openstack.org/show/725391/ Change-Id: I29b3b356622c485c4c1046679234a38e7b645071 --- lib/keystone | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/lib/keystone b/lib/keystone index 28603257dd..57cb24d4cb 100644 --- a/lib/keystone +++ b/lib/keystone @@ -63,9 +63,6 @@ else KEYSTONE_DEPLOY=mod_wsgi fi -# Select the token persistence backend driver -KEYSTONE_TOKEN_BACKEND=${KEYSTONE_TOKEN_BACKEND:-sql} - # Select the Identity backend driver KEYSTONE_IDENTITY_BACKEND=${KEYSTONE_IDENTITY_BACKEND:-sql} @@ -79,7 +76,8 @@ KEYSTONE_ROLE_BACKEND=${KEYSTONE_ROLE_BACKEND:-sql} KEYSTONE_RESOURCE_BACKEND=${KEYSTONE_RESOURCE_BACKEND:-sql} # Select Keystone's token provider (and format) -# Choose from 'uuid', 'pki', 'pkiz', or 'fernet' +# Refer keystone doc for supported token provider: +# https://docs.openstack.org/keystone/latest/admin/token-provider.html KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-fernet} KEYSTONE_TOKEN_FORMAT=$(echo ${KEYSTONE_TOKEN_FORMAT} | tr '[:upper:]' '[:lower:]') @@ -245,8 +243,6 @@ function configure_keystone { iniset $KEYSTONE_CONF database connection `database_connection_url keystone` - iniset $KEYSTONE_CONF token driver "$KEYSTONE_TOKEN_BACKEND" - # Set up logging if [ "$SYSLOG" != "False" ]; then iniset $KEYSTONE_CONF DEFAULT use_syslog "True" @@ -443,11 +439,6 @@ function init_keystone { $KEYSTONE_BIN_DIR/keystone-manage --config-file $KEYSTONE_CONF db_sync time_stop "dbsync" - if [[ "$KEYSTONE_TOKEN_FORMAT" == "pki" || "$KEYSTONE_TOKEN_FORMAT" == "pkiz" ]]; then - # Set up certificates - rm -rf $KEYSTONE_CONF_DIR/ssl - $KEYSTONE_BIN_DIR/keystone-manage --config-file $KEYSTONE_CONF pki_setup - fi if [[ "$KEYSTONE_TOKEN_FORMAT" == "fernet" ]]; then rm -rf "$KEYSTONE_CONF_DIR/fernet-keys/" $KEYSTONE_BIN_DIR/keystone-manage --config-file $KEYSTONE_CONF fernet_setup From 3b5477d6356a62d7d64a519a4b1ac99309d251c0 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 12 Jul 2018 06:17:32 +0000 Subject: [PATCH 0918/1936] Updated from generate-devstack-plugins-list Change-Id: I8f702373c76953a0a29285f410d368c975ba4024 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 2d5259d51e..7ad65f741b 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -100,6 +100,7 @@ monasca-tempest-plugin `git://git.openstack.org/openstack/monasc monasca-transform `git://git.openstack.org/openstack/monasca-transform `__ murano `git://git.openstack.org/openstack/murano `__ networking-6wind `git://git.openstack.org/openstack/networking-6wind `__ +networking-ansible `git://git.openstack.org/openstack/networking-ansible `__ networking-arista `git://git.openstack.org/openstack/networking-arista `__ networking-bagpipe `git://git.openstack.org/openstack/networking-bagpipe `__ networking-baremetal `git://git.openstack.org/openstack/networking-baremetal `__ From e484f3b1c8086567ce5dc1e55ee503def8e5b429 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Tue, 10 Jul 2018 16:28:44 +0200 Subject: [PATCH 0919/1936] Remove devstack exercises These seem to be not run for quite some time and they don't succeed anymore - drop the code to avoid somebody accidentally running it and wondering. A good example of "if it isn't tested its broken". Depends-On: https://review.openstack.org/583146 Depends-On: https://review.openstack.org/583147 Change-Id: I99e8a5ca2925217a5a2401984f3f4f6f032017be --- HACKING.rst | 86 ----- doc/source/configuration.rst | 41 +-- doc/source/guides/single-machine.rst | 4 - doc/source/overview.rst | 8 - exercise.sh | 74 ----- exerciserc | 26 -- exercises/aggregates.sh | 150 --------- exercises/boot_from_volume.sh | 224 ------------- exercises/client-args.sh | 174 ---------- exercises/client-env.sh | 171 ---------- exercises/floating_ips.sh | 216 ------------- exercises/neutron-adv-test.sh | 466 --------------------------- exercises/sec_groups.sh | 81 ----- exercises/swift.sh | 69 ---- exercises/volumes.sh | 225 ------------- 15 files changed, 1 insertion(+), 2014 deletions(-) delete mode 100755 exercise.sh delete mode 100644 exerciserc delete mode 100755 exercises/aggregates.sh delete mode 100755 exercises/boot_from_volume.sh delete mode 100755 exercises/client-args.sh delete mode 100755 exercises/client-env.sh delete mode 100755 exercises/floating_ips.sh delete mode 100755 exercises/neutron-adv-test.sh delete mode 100755 exercises/sec_groups.sh delete mode 100755 exercises/swift.sh delete mode 100755 exercises/volumes.sh diff --git a/HACKING.rst b/HACKING.rst index d5d6fbcf02..38e2dac131 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -50,10 +50,6 @@ level. ``tools/build_docs.sh`` is used to generate the HTML versions of the DevStack scripts. A complete doc build can be run with ``tox -edocs``. -``exercises`` - Contains the test scripts used to sanity-check and -demonstrate some OpenStack functions. These scripts know how to exit -early or skip services that are not enabled. - ``extras.d`` - Contains the dispatch scripts called by the hooks in ``stack.sh``, ``unstack.sh`` and ``clean.sh``. See :doc:`the plugins docs ` for more information. @@ -183,88 +179,6 @@ The complete docs build is also handled with tox -edocs per the OpenStack project standard. -Exercises ---------- - -The scripts in the exercises directory are meant to 1) perform basic operational -checks on certain aspects of OpenStack; and b) document the use of the -OpenStack command-line clients. - -In addition to the guidelines above, exercise scripts MUST follow the structure -outlined here. ``swift.sh`` is perhaps the clearest example of these guidelines. -These scripts are executed serially by ``exercise.sh`` in testing situations. - -* Begin and end with a banner that stands out in a sea of script logs to aid - in debugging failures, particularly in automated testing situations. If the - end banner is not displayed, the script ended prematurely and can be assumed - to have failed. - - :: - - echo "**************************************************" - echo "Begin DevStack Exercise: $0" - echo "**************************************************" - ... - set +o xtrace - echo "**************************************************" - echo "End DevStack Exercise: $0" - echo "**************************************************" - -* The scripts will generally have the shell ``xtrace`` attribute set to display - the actual commands being executed, and the ``errexit`` attribute set to exit - the script on non-zero exit codes:: - - # This script exits on an error so that errors don't compound and you see - # only the first error that occurred. - set -o errexit - - # Print the commands being run so that we can see the command that triggers - # an error. It is also useful for following as the install occurs. - set -o xtrace - -* Settings and configuration are stored in ``exerciserc``, which must be - sourced after ``openrc`` or ``stackrc``:: - - # Import exercise configuration - source $TOP_DIR/exerciserc - -* There are a couple of helper functions in the common ``functions`` sub-script - that will check for non-zero exit codes and unset environment variables and - print a message and exit the script. These should be called after most client - commands that are not otherwise checked to short-circuit long timeouts - (instance boot failure, for example):: - - swift post $CONTAINER - die_if_error "Failure creating container $CONTAINER" - - FLOATING_IP=`euca-allocate-address | cut -f2` - die_if_not_set FLOATING_IP "Failure allocating floating IP" - -* If you want an exercise to be skipped when for example a service wasn't - enabled for the exercise to be run, you can exit your exercise with the - special exitcode 55 and it will be detected as skipped. - -* The exercise scripts should only use the various OpenStack client binaries to - interact with OpenStack. This specifically excludes any ``*-manage`` tools - as those assume direct access to configuration and databases, as well as direct - database access from the exercise itself. - -* If specific configuration needs to be present for the exercise to complete, - it should be staged in ``stack.sh``, or called from ``stack.sh``. - -* The ``OS_*`` environment variables should be the only ones used for all - authentication to OpenStack clients as documented in the CLIAuth_ wiki page. - -.. _CLIAuth: https://wiki.openstack.org/CLIAuth - -* The exercise MUST clean up after itself if successful. If it is not successful, - it is assumed that state will be left behind; this allows a chance for developers - to look around and attempt to debug the problem. The exercise SHOULD clean up - or graciously handle possible artifacts left over from previous runs if executed - again. It is acceptable to require a reboot or even a re-install of DevStack - to restore a clean test environment. - - Bash Style Guidelines ~~~~~~~~~~~~~~~~~~~~~ DevStack defines a bash set of best practices for maintaining large diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 7efe4d6eee..46e50df6bb 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -665,8 +665,7 @@ following to your ``localrc`` section: enable_service n-cell Be aware that there are some features currently missing in cells, one -notable one being security groups. The exercises have been patched to -disable functionality not supported by cells. +notable one being security groups. Cinder ~~~~~~ @@ -729,44 +728,6 @@ use the v3 API. It is possible to setup keystone without v2 API, by doing: ENABLE_IDENTITY_V2=False -Exercises -~~~~~~~~~ - -``exerciserc`` is used to configure settings for the exercise scripts. -The values shown below are the default values. These can all be -overridden by setting them in the ``localrc`` section. - -* Max time to wait while vm goes from build to active state - - :: - - ACTIVE_TIMEOUT==30 - -* Max time to wait for proper IP association and dis-association. - - :: - - ASSOCIATE_TIMEOUT=15 - -* Max time till the vm is bootable - - :: - - BOOT_TIMEOUT=30 - -* Max time from run instance command until it is running - - :: - - RUNNING_TIMEOUT=$(($BOOT_TIMEOUT + $ACTIVE_TIMEOUT)) - -* Max time to wait for a vm to terminate - - :: - - TERMINATE_TIMEOUT=30 - - .. _arch-configuration: Architectures diff --git a/doc/source/guides/single-machine.rst b/doc/source/guides/single-machine.rst index 48a4fa8b12..515ea9a9ee 100644 --- a/doc/source/guides/single-machine.rst +++ b/doc/source/guides/single-machine.rst @@ -127,7 +127,3 @@ computers on the local network. In this example that would be http://192.168.1.201/ for the dashboard (aka Horizon). Launch VMs and if you give them floating IPs and security group access those VMs will be accessible from other machines on your network. - -Some examples of using the OpenStack command-line clients ``nova`` and -``glance`` are in the shakedown scripts in ``devstack/exercises``. -``exercise.sh`` will run all of those scripts and report on the results. diff --git a/doc/source/overview.rst b/doc/source/overview.rst index 814a2b148b..2479cd0bc8 100644 --- a/doc/source/overview.rst +++ b/doc/source/overview.rst @@ -75,11 +75,3 @@ Node Configurations - single node - multi-node configurations as are tested by the gate - -Exercises ---------- - -The DevStack exercise scripts are no longer used as integration and gate -testing as that job has transitioned to Tempest. They are still -maintained as a demonstrations of using OpenStack from the command line -and for quick operational testing. diff --git a/exercise.sh b/exercise.sh deleted file mode 100755 index 90670333a1..0000000000 --- a/exercise.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env bash - -# **exercise.sh** - -# Keep track of the current DevStack directory. -TOP_DIR=$(cd $(dirname "$0") && pwd) - -# Import common functions -source $TOP_DIR/functions - -# Load local configuration -source $TOP_DIR/stackrc - -# Run everything in the exercises/ directory that isn't explicitly disabled - -# comma separated list of script basenames to skip -# to refrain from exercising foo.sh use ``SKIP_EXERCISES=foo`` -SKIP_EXERCISES=${SKIP_EXERCISES:-""} - -# comma separated list of script basenames to run -# to run only foo.sh use ``RUN_EXERCISES=foo`` -basenames=${RUN_EXERCISES:-""} - -EXERCISE_DIR=$TOP_DIR/exercises - -if [[ -z "${basenames}" ]]; then - # Locate the scripts we should run - basenames=$(for b in `ls $EXERCISE_DIR/*.sh`; do basename $b .sh; done) -else - # If ``RUN_EXERCISES`` was specified, ignore ``SKIP_EXERCISES``. - SKIP_EXERCISES= -fi - -# Track the state of each script -passes="" -failures="" -skips="" - -# Loop over each possible script (by basename) -for script in $basenames; do - if [[ ,$SKIP_EXERCISES, =~ ,$script, ]]; then - skips="$skips $script" - else - echo "=====================================================================" - echo Running $script - echo "=====================================================================" - $EXERCISE_DIR/$script.sh - exitcode=$? - if [[ $exitcode == 55 ]]; then - skips="$skips $script" - elif [[ $exitcode -ne 0 ]]; then - failures="$failures $script" - else - passes="$passes $script" - fi - fi -done - -# Output status of exercise run -echo "=====================================================================" -for script in $skips; do - echo SKIP $script -done -for script in $passes; do - echo PASS $script -done -for script in $failures; do - echo FAILED $script -done -echo "=====================================================================" - -if [[ -n "$failures" ]]; then - exit 1 -fi diff --git a/exerciserc b/exerciserc deleted file mode 100644 index 978e0b3791..0000000000 --- a/exerciserc +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env bash -# -# source exerciserc -# -# Configure the DevStack exercise scripts -# For best results, source this _after_ stackrc/localrc as it will set -# values only if they are not already set. - -# Max time to wait while vm goes from build to active state -export ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30} - -# Max time to wait for proper IP association and dis-association. -export ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-15} - -# Max time till the vm is bootable -export BOOT_TIMEOUT=${BOOT_TIMEOUT:-30} - -# Max time from run instance command until it is running -export RUNNING_TIMEOUT=${RUNNING_TIMEOUT:-$(($BOOT_TIMEOUT + $ACTIVE_TIMEOUT))} - -# Max time to wait for a vm to terminate -export TERMINATE_TIMEOUT=${TERMINATE_TIMEOUT:-30} - -# The size of the volume we want to boot from; some storage back-ends -# do not allow a disk resize, so it's important that this can be tuned -export DEFAULT_VOLUME_SIZE=${DEFAULT_VOLUME_SIZE:-1} diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh deleted file mode 100755 index 8cbca54fb2..0000000000 --- a/exercises/aggregates.sh +++ /dev/null @@ -1,150 +0,0 @@ -#!/usr/bin/env bash - -# **aggregates.sh** - -# This script demonstrates how to use host aggregates: -# -# * Create an Aggregate -# * Updating Aggregate details -# * Testing Aggregate metadata -# * Testing Aggregate delete -# * Testing General Aggregates (https://blueprints.launchpad.net/nova/+spec/general-host-aggregates) -# * Testing add/remove hosts (with one host) - -echo "**************************************************" -echo "Begin DevStack Exercise: $0" -echo "**************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Test as the admin user -# note this imports stackrc/functions, etc -. $TOP_DIR/openrc admin admin - -# Import exercise configuration -source $TOP_DIR/exerciserc - -# If nova api is not enabled we exit with exitcode 55 so that -# the exercise is skipped -is_service_enabled n-api || exit 55 - -# Cells does not support aggregates. -is_service_enabled n-cell && exit 55 - -# Create an aggregate -# =================== - -AGGREGATE_NAME=test_aggregate_$RANDOM -AGGREGATE2_NAME=test_aggregate_$RANDOM -AGGREGATE_A_ZONE=nova - -function exit_if_aggregate_present { - aggregate_name=$1 - - if [ $(nova aggregate-list | grep -c " $aggregate_name ") == 0 ]; then - echo "SUCCESS $aggregate_name not present" - else - die $LINENO "found aggregate: $aggregate_name" - exit -1 - fi -} - -exit_if_aggregate_present $AGGREGATE_NAME - -AGGREGATE_ID=$(nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE_NAME " | get_field 1) -die_if_not_set $LINENO AGGREGATE_ID "Failure creating AGGREGATE_ID for $AGGREGATE_NAME $AGGREGATE_A_ZONE" - -AGGREGATE2_ID=$(nova aggregate-create $AGGREGATE2_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE2_NAME " | get_field 1) -die_if_not_set $LINENO AGGREGATE2_ID "Fail creating AGGREGATE2_ID for $AGGREGATE2_NAME $AGGREGATE_A_ZONE" - -# check aggregate created -nova aggregate-list | grep -q " $AGGREGATE_NAME " || die $LINENO "Aggregate $AGGREGATE_NAME not created" - - -# Ensure creating a duplicate fails -# ================================= - -if nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE; then - die $LINENO "could create duplicate aggregate" -fi - - -# Test aggregate-update (and aggregate-details) -# ============================================= -AGGREGATE_NEW_NAME=test_aggregate_$RANDOM - -nova aggregate-update $AGGREGATE_ID $AGGREGATE_NEW_NAME -nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_NEW_NAME -nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_A_ZONE - -nova aggregate-update $AGGREGATE_ID $AGGREGATE_NAME $AGGREGATE_A_ZONE -nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_NAME -nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_A_ZONE - - -# Test aggregate-set-metadata -# =========================== -META_DATA_1_KEY=asdf -META_DATA_2_KEY=foo -META_DATA_3_KEY=bar - -#ensure no additional metadata is set -nova aggregate-details $AGGREGATE_ID | egrep "\|[{u ]*'availability_zone.+$AGGREGATE_A_ZONE'[ }]*\|" - -nova aggregate-set-metadata $AGGREGATE_ID ${META_DATA_1_KEY}=123 -nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY -nova aggregate-details $AGGREGATE_ID | grep 123 - -nova aggregate-set-metadata $AGGREGATE_ID ${META_DATA_2_KEY}=456 -nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY -nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY - -nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_2_KEY ${META_DATA_3_KEY}=789 -nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY -nova aggregate-details $AGGREGATE_ID | grep $META_DATA_3_KEY - -nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY && die $LINENO "ERROR metadata was not cleared" - -nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_3_KEY $META_DATA_1_KEY -nova aggregate-details $AGGREGATE_ID | egrep "\|[{u ]*'availability_zone.+$AGGREGATE_A_ZONE'[ }]*\|" - - -# Test aggregate-add/remove-host -# ============================== -if [ "$VIRT_DRIVER" == "xenserver" ]; then - echo "TODO(johngarbutt) add tests for add/remove host from pool aggregate" -fi -FIRST_HOST=$(nova host-list | grep compute | get_field 1 | head -1) -# Make sure can add two aggregates to same host -nova aggregate-add-host $AGGREGATE_ID $FIRST_HOST -nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST -if nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST; then - die $LINENO "could add duplicate host to single aggregate" -fi -nova aggregate-remove-host $AGGREGATE2_ID $FIRST_HOST -nova aggregate-remove-host $AGGREGATE_ID $FIRST_HOST - -# Test aggregate-delete -# ===================== -nova aggregate-delete $AGGREGATE_ID -nova aggregate-delete $AGGREGATE2_ID -exit_if_aggregate_present $AGGREGATE_NAME - -set +o xtrace -echo "**************************************************" -echo "End DevStack Exercise: $0" -echo "**************************************************" diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh deleted file mode 100755 index 7478bdf138..0000000000 --- a/exercises/boot_from_volume.sh +++ /dev/null @@ -1,224 +0,0 @@ -#!/usr/bin/env bash - -# **boot_from_volume.sh** - -# This script demonstrates how to boot from a volume. It does the following: -# -# * Create a bootable volume -# * Boot a volume-backed instance - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import project functions -source $TOP_DIR/lib/cinder -source $TOP_DIR/lib/neutron -source $TOP_DIR/lib/neutron-legacy - -# Import configuration -source $TOP_DIR/openrc - -# Import exercise configuration -source $TOP_DIR/exerciserc - -# If cinder is not enabled we exit with exitcode 55 so that -# the exercise is skipped -is_service_enabled cinder || exit 55 - -# Ironic does not support boot from volume. -[ "$VIRT_DRIVER" == "ironic" ] && exit 55 - -# Instance type to create -DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} - -# Boot this image, use first AMI image if unset -DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} - -# Security group name -SECGROUP=${SECGROUP:-boot_secgroup} - -# Instance and volume names -VM_NAME=${VM_NAME:-ex-bfv-inst} -VOL_NAME=${VOL_NAME:-ex-vol-bfv} - - -# Launching a server -# ================== - -# List servers for project: -nova list - -# Images -# ------ - -# List the images available -openstack image list - -# Grab the id of the image to launch -IMAGE=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) -die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" - -# Security Groups -# --------------- - -# List security groups -nova secgroup-list - -if is_service_enabled n-cell; then - # Cells does not support security groups, so force the use of "default" - SECGROUP="default" - echo "Using the default security group because of Cells." -else - # Create a secgroup - if ! nova secgroup-list | grep -q $SECGROUP; then - nova secgroup-create $SECGROUP "$SECGROUP description" - if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then - echo "Security group not created" - exit 1 - fi - fi -fi - -# Configure Security Group Rules -if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then - nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 -fi -if ! nova secgroup-list-rules $SECGROUP | grep -q " tcp .* 22 "; then - nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0 -fi - -# List secgroup rules -nova secgroup-list-rules $SECGROUP - -# Set up instance -# --------------- - -# List flavors -nova flavor-list - -# Select a flavor -INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) -if [[ -z "$INSTANCE_TYPE" ]]; then - # grab the first flavor in the list to launch if default doesn't exist - INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) -fi - -# Clean-up from previous runs -nova delete $VM_NAME || true -if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then - echo "server didn't terminate!" - exit 1 -fi - -# Setup Keypair -KEY_NAME=test_key -KEY_FILE=key.pem -nova keypair-delete $KEY_NAME || true -nova keypair-add $KEY_NAME > $KEY_FILE -chmod 600 $KEY_FILE - -# Set up volume -# ------------- - -# Delete any old volume -cinder delete $VOL_NAME || true -if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then - echo "Volume $VOL_NAME not deleted" - exit 1 -fi - -# Create the bootable volume -start_time=$(date +%s) -cinder create --image-id $IMAGE --display-name=$VOL_NAME --display-description "test bootable volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \ - die $LINENO "Failure creating volume $VOL_NAME" -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then - echo "Volume $VOL_NAME not created" - exit 1 -fi -end_time=$(date +%s) -echo "Completed cinder create in $((end_time - start_time)) seconds" - -# Get volume ID -VOL_ID=$(cinder list | grep $VOL_NAME | get_field 1) -die_if_not_set $LINENO VOL_ID "Failure retrieving volume ID for $VOL_NAME" - -# Boot instance -# ------------- - -# Boot using the --block-device-mapping param. The format of mapping is: -# =::: -# Leaving the middle two fields blank appears to do-the-right-thing -VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --block-device-mapping vda=$VOL_ID --security-groups=$SECGROUP --key-name $KEY_NAME $VM_NAME | grep ' id ' | get_field 2) -die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME" - -# Check that the status is active within ACTIVE_TIMEOUT seconds -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then - echo "server didn't become active!" - exit 1 -fi - -# Get the instance IP -IP=$(get_instance_ip $VM_UUID $PRIVATE_NETWORK_NAME) - -die_if_not_set $LINENO IP "Failure retrieving IP address" - -# Private IPs can be pinged in single node deployments -ping_check $IP $BOOT_TIMEOUT "$PRIVATE_NETWORK_NAME" - -# Clean up -# -------- - -# Delete volume backed instance -nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME" -if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then - echo "Server $VM_NAME not deleted" - exit 1 -fi - -# Wait for volume to be released -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then - echo "Volume $VOL_NAME not released" - exit 1 -fi - -# Delete volume -start_time=$(date +%s) -cinder delete $VOL_ID || die $LINENO "Failure deleting volume $VOLUME_NAME" -if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then - echo "Volume $VOL_NAME not deleted" - exit 1 -fi -end_time=$(date +%s) -echo "Completed cinder delete in $((end_time - start_time)) seconds" - -if [[ $SECGROUP = "default" ]] ; then - echo "Skipping deleting default security group" -else - # Delete secgroup - nova secgroup-delete $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP" -fi - -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End DevStack Exercise: $0" -echo "*********************************************************************" diff --git a/exercises/client-args.sh b/exercises/client-args.sh deleted file mode 100755 index b380968da8..0000000000 --- a/exercises/client-args.sh +++ /dev/null @@ -1,174 +0,0 @@ -#!/usr/bin/env bash - -# **client-args.sh** - -# Test OpenStack client authentication arguments handling - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import configuration -source $TOP_DIR/openrc - -# Import exercise configuration -source $TOP_DIR/exerciserc - -# Unset all of the known NOVA_* vars -unset NOVA_API_KEY -unset NOVA_ENDPOINT_NAME -unset NOVA_PASSWORD -unset NOVA_PROJECT_ID -unset NOVA_REGION_NAME -unset NOVA_URL -unset NOVA_USERNAME - -# Save the known variables for later -export x_PROJECT_NAME=$OS_PROJECT_NAME -export x_USERNAME=$OS_USERNAME -export x_PASSWORD=$OS_PASSWORD -export x_AUTH_URL=$OS_AUTH_URL - -# Unset the usual variables to force argument processing -unset OS_PROJECT_NAME -unset OS_USERNAME -unset OS_PASSWORD -unset OS_AUTH_URL - -# Common authentication args -PROJECT_ARG="--os-project-name=$x_PROJECT_NAME" -ARGS="--os-username=$x_USERNAME --os-password=$x_PASSWORD --os-auth-url=$x_AUTH_URL" - -# Set global return -RETURN=0 - -# Keystone client -# --------------- -if [[ "$ENABLED_SERVICES" =~ "key" ]]; then - if [[ "$SKIP_EXERCISES" =~ "key" ]]; then - STATUS_KEYSTONE="Skipped" - else - echo -e "\nTest Keystone" - if openstack $PROJECT_ARG $ARGS catalog show identity; then - STATUS_KEYSTONE="Succeeded" - else - STATUS_KEYSTONE="Failed" - RETURN=1 - fi - fi -fi - -# Nova client -# ----------- - -if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then - if [[ "$SKIP_EXERCISES" =~ "n-api" ]]; then - STATUS_NOVA="Skipped" - else - # Test OSAPI - echo -e "\nTest Nova" - if nova $PROJECT_ARG $ARGS flavor-list; then - STATUS_NOVA="Succeeded" - else - STATUS_NOVA="Failed" - RETURN=1 - fi - fi -fi - -# Cinder client -# ------------- - -if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then - if [[ "$SKIP_EXERCISES" =~ "c-api" ]]; then - STATUS_CINDER="Skipped" - else - echo -e "\nTest Cinder" - if cinder $PROJECT_ARG $ARGS list; then - STATUS_CINDER="Succeeded" - else - STATUS_CINDER="Failed" - RETURN=1 - fi - fi -fi - -# Glance client -# ------------- - -if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then - if [[ "$SKIP_EXERCISES" =~ "g-api" ]]; then - STATUS_GLANCE="Skipped" - else - echo -e "\nTest Glance" - if openstack $PROJECT_ARG $ARGS image list; then - STATUS_GLANCE="Succeeded" - else - STATUS_GLANCE="Failed" - RETURN=1 - fi - fi -fi - -# Swift client -# ------------ - -if [[ "$ENABLED_SERVICES" =~ "swift" || "$ENABLED_SERVICES" =~ "s-proxy" ]]; then - if [[ "$SKIP_EXERCISES" =~ "swift" ]]; then - STATUS_SWIFT="Skipped" - else - echo -e "\nTest Swift" - if swift $PROJECT_ARG $ARGS stat; then - STATUS_SWIFT="Succeeded" - else - STATUS_SWIFT="Failed" - RETURN=1 - fi - fi -fi - -set +o xtrace - - -# Results -# ======= - -function report { - if [[ -n "$2" ]]; then - echo "$1: $2" - fi -} - -echo -e "\n" -report "Keystone" $STATUS_KEYSTONE -report "Nova" $STATUS_NOVA -report "Cinder" $STATUS_CINDER -report "Glance" $STATUS_GLANCE -report "Swift" $STATUS_SWIFT - -if (( $RETURN == 0 )); then - echo "*********************************************************************" - echo "SUCCESS: End DevStack Exercise: $0" - echo "*********************************************************************" -fi - -exit $RETURN diff --git a/exercises/client-env.sh b/exercises/client-env.sh deleted file mode 100755 index fff04df9f2..0000000000 --- a/exercises/client-env.sh +++ /dev/null @@ -1,171 +0,0 @@ -#!/usr/bin/env bash - -# **client-env.sh** - -# Test OpenStack client environment variable handling - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import configuration -source $TOP_DIR/openrc admin - -# Import exercise configuration -source $TOP_DIR/exerciserc - -# Unset all of the known NOVA_* vars -unset NOVA_API_KEY -unset NOVA_ENDPOINT_NAME -unset NOVA_PASSWORD -unset NOVA_PROJECT_ID -unset NOVA_REGION_NAME -unset NOVA_URL -unset NOVA_USERNAME - -for i in OS_TENANT_NAME OS_USERNAME OS_PASSWORD OS_AUTH_URL; do - is_set $i - if [[ $? -ne 0 ]]; then - echo "$i expected to be set" - ABORT=1 - fi -done -if [[ -n "$ABORT" ]]; then - exit 1 -fi - -# Set global return -RETURN=0 - -# Keystone client -# --------------- -if [[ "$ENABLED_SERVICES" =~ "key" ]]; then - if [[ "$SKIP_EXERCISES" =~ "key" ]]; then - STATUS_KEYSTONE="Skipped" - else - echo -e "\nTest Keystone" - if openstack endpoint show identity; then - STATUS_KEYSTONE="Succeeded" - else - STATUS_KEYSTONE="Failed" - RETURN=1 - fi - fi -fi - -# Nova client -# ----------- - -if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then - if [[ "$SKIP_EXERCISES" =~ "n-api" ]]; then - STATUS_NOVA="Skipped" - else - # Test OSAPI - echo -e "\nTest Nova" - if nova flavor-list; then - STATUS_NOVA="Succeeded" - else - STATUS_NOVA="Failed" - RETURN=1 - fi - - fi -fi - -# Cinder client -# ------------- - -if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then - if [[ "$SKIP_EXERCISES" =~ "c-api" ]]; then - STATUS_CINDER="Skipped" - else - echo -e "\nTest Cinder" - if cinder list; then - STATUS_CINDER="Succeeded" - else - STATUS_CINDER="Failed" - RETURN=1 - fi - fi -fi - -# Glance client -# ------------- - -if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then - if [[ "$SKIP_EXERCISES" =~ "g-api" ]]; then - STATUS_GLANCE="Skipped" - else - echo -e "\nTest Glance" - if openstack image list; then - STATUS_GLANCE="Succeeded" - else - STATUS_GLANCE="Failed" - RETURN=1 - fi - fi -fi - -# Swift client -# ------------ - - -if [[ "$ENABLED_SERVICES" =~ "swift" || "$ENABLED_SERVICES" =~ "s-proxy" ]]; then - if [[ "$SKIP_EXERCISES" =~ "swift" ]]; then - STATUS_SWIFT="Skipped" - else - echo -e "\nTest Swift" - if swift stat; then - STATUS_SWIFT="Succeeded" - else - STATUS_SWIFT="Failed" - RETURN=1 - fi - fi -fi - -set +o xtrace - - -# Results -# ======= - -function report { - if [[ -n "$2" ]]; then - echo "$1: $2" - fi -} - -echo -e "\n" -report "Keystone" $STATUS_KEYSTONE -report "Nova" $STATUS_NOVA -report "Cinder" $STATUS_CINDER -report "Glance" $STATUS_GLANCE -report "Swift" $STATUS_SWIFT - -if (( $RETURN == 0 )); then - echo "*********************************************************************" - echo "SUCCESS: End DevStack Exercise: $0" - echo "*********************************************************************" -fi - -exit $RETURN diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh deleted file mode 100755 index 5abc7137b3..0000000000 --- a/exercises/floating_ips.sh +++ /dev/null @@ -1,216 +0,0 @@ -#!/usr/bin/env bash - -# **floating_ips.sh** - using the cloud can be fun - -# Test instance connectivity with the ``nova`` command from ``python-novaclient`` - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import configuration -source $TOP_DIR/openrc - -# Import project functions -source $TOP_DIR/lib/neutron -source $TOP_DIR/lib/neutron-legacy - -# Import exercise configuration -source $TOP_DIR/exerciserc - -# If nova api is not enabled we exit with exitcode 55 so that -# the exercise is skipped -is_service_enabled n-api || exit 55 - -# Instance type to create -DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} - -# Boot this image, use first AMI image if unset -DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} - -# Security group name -SECGROUP=${SECGROUP:-test_secgroup} - -# Default floating IP pool name -DEFAULT_FLOATING_POOL=${DEFAULT_FLOATING_POOL:-public} - -# Additional floating IP pool and range -TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test} - -# Instance name -VM_NAME="ex-float" - -# Cells does not support floating ips API calls -is_service_enabled n-cell && exit 55 - -# Launching a server -# ================== - -# List servers for tenant: -nova list - -# Images -# ------ - -# List the images available -openstack image list - -# Grab the id of the image to launch -IMAGE=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) -die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" - -# Security Groups -# --------------- - -# List security groups -nova secgroup-list - -# Create a secgroup -if ! nova secgroup-list | grep -q $SECGROUP; then - nova secgroup-create $SECGROUP "$SECGROUP description" - if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then - die $LINENO "Security group not created" - fi -fi - -# Configure Security Group Rules -if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then - nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 -fi -if ! nova secgroup-list-rules $SECGROUP | grep -q " tcp .* 22 "; then - nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0 -fi - -# List secgroup rules -nova secgroup-list-rules $SECGROUP - -# Set up instance -# --------------- - -# List flavors -nova flavor-list - -# Select a flavor -INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) -if [[ -z "$INSTANCE_TYPE" ]]; then - # grab the first flavor in the list to launch if default doesn't exist - INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) - die_if_not_set $LINENO INSTANCE_TYPE "Failure retrieving INSTANCE_TYPE" -fi - -# Clean-up from previous runs -nova delete $VM_NAME || true -if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then - die $LINENO "server didn't terminate!" - exit 1 -fi - -# Boot instance -# ------------- - -VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security-groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2) -die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME" - -# Check that the status is active within ACTIVE_TIMEOUT seconds -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then - die $LINENO "server didn't become active!" -fi - -# Get the instance IP -IP=$(get_instance_ip $VM_UUID $PRIVATE_NETWORK_NAME) -die_if_not_set $LINENO IP "Failure retrieving IP address" - -# Private IPs can be pinged in single node deployments -ping_check $IP $BOOT_TIMEOUT "$PRIVATE_NETWORK_NAME" - -# Floating IPs -# ------------ - -# Allocate a floating IP from the default pool -FLOATING_IP=$(nova floating-ip-create | grep $DEFAULT_FLOATING_POOL | get_field 1) -die_if_not_set $LINENO FLOATING_IP "Failure creating floating IP from pool $DEFAULT_FLOATING_POOL" - -# List floating addresses -if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep -q $FLOATING_IP; do sleep 1; done"; then - die $LINENO "Floating IP not allocated" -fi - -# Add floating IP to our server -nova add-floating-ip $VM_UUID $FLOATING_IP || \ - die $LINENO "Failure adding floating IP $FLOATING_IP to $VM_NAME" - -# Test we can ping our floating IP within ASSOCIATE_TIMEOUT seconds -ping_check $FLOATING_IP $ASSOCIATE_TIMEOUT "$PUBLIC_NETWORK_NAME" - -if ! is_service_enabled neutron; then - # Allocate an IP from second floating pool - TEST_FLOATING_IP=$(nova floating-ip-create $TEST_FLOATING_POOL | grep $TEST_FLOATING_POOL | get_field 1) - die_if_not_set $LINENO TEST_FLOATING_IP "Failure creating floating IP in $TEST_FLOATING_POOL" - - # list floating addresses - if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep $TEST_FLOATING_POOL | grep -q $TEST_FLOATING_IP; do sleep 1; done"; then - die $LINENO "Floating IP not allocated" - fi -fi - -# Dis-allow icmp traffic (ping) -nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || \ - die $LINENO "Failure deleting security group rule from $SECGROUP" - -if ! timeout $ASSOCIATE_TIMEOUT sh -c "while nova secgroup-list-rules $SECGROUP | grep -q icmp; do sleep 1; done"; then - die $LINENO "Security group rule not deleted from $SECGROUP" -fi - -# FIXME (anthony): make xs support security groups -if [ "$VIRT_DRIVER" != "ironic" -a "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then - # Test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds - ping_check $FLOATING_IP $ASSOCIATE_TIMEOUT "$PUBLIC_NETWORK_NAME" Fail -fi - -# Clean up -# -------- - -if ! is_service_enabled neutron; then - # Delete second floating IP - nova floating-ip-delete $TEST_FLOATING_IP || \ - die $LINENO "Failure deleting floating IP $TEST_FLOATING_IP" -fi - -# Delete the floating ip -nova floating-ip-delete $FLOATING_IP || \ - die $LINENO "Failure deleting floating IP $FLOATING_IP" - -# Delete instance -nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME" -# Wait for termination -if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then - die $LINENO "Server $VM_NAME not deleted" -fi - -# Delete secgroup -nova secgroup-delete $SECGROUP || \ - die $LINENO "Failure deleting security group $SECGROUP" - -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End DevStack Exercise: $0" -echo "*********************************************************************" diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh deleted file mode 100755 index e8c8f62140..0000000000 --- a/exercises/neutron-adv-test.sh +++ /dev/null @@ -1,466 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright 2012, Cisco Systems -# Copyright 2012, VMware, Inc. -# Copyright 2012, NTT MCL, Inc. -# -# Please direct any questions to dedutta@cisco.com, dwendlandt@vmware.com, nachi@nttmcl.com -# -# **neutron-adv-test.sh** - -# Perform integration testing of Nova and other components with Neutron. - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. - -set -o errtrace - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following as the install occurs. -set -o xtrace - -# Environment -# ----------- - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import configuration -source $TOP_DIR/openrc - -# Import neutron functions -source $TOP_DIR/lib/neutron -source $TOP_DIR/lib/neutron-legacy - -# If neutron is not enabled we exit with exitcode 55, which means exercise is skipped. -neutron_plugin_check_adv_test_requirements || exit 55 - -# Import exercise configuration -source $TOP_DIR/exerciserc - -# Neutron Settings -# ---------------- - -PROJECTS="DEMO1" -# TODO (nati)_Test public network -#PROJECTS="DEMO1,DEMO2" - -PUBLIC_NAME="admin" -DEMO1_NAME="demo1" -DEMO2_NAME="demo2" - -PUBLIC_NUM_NET=1 -DEMO1_NUM_NET=1 -DEMO2_NUM_NET=2 - -PUBLIC_NET1_CIDR="200.0.0.0/24" -DEMO1_NET1_CIDR="10.10.0.0/24" -DEMO2_NET1_CIDR="10.20.0.0/24" -DEMO2_NET2_CIDR="10.20.1.0/24" - -PUBLIC_NET1_GATEWAY="200.0.0.1" -DEMO1_NET1_GATEWAY="10.10.0.1" -DEMO2_NET1_GATEWAY="10.20.0.1" -DEMO2_NET2_GATEWAY="10.20.1.1" - -PUBLIC_NUM_VM=1 -DEMO1_NUM_VM=1 -DEMO2_NUM_VM=2 - -PUBLIC_VM1_NET='admin-net1' -DEMO1_VM1_NET='demo1-net1' -# Multinic settings. But this is fail without nic setting in OS image -DEMO2_VM1_NET='demo2-net1' -DEMO2_VM2_NET='demo2-net2' - -PUBLIC_NUM_ROUTER=1 -DEMO1_NUM_ROUTER=1 -DEMO2_NUM_ROUTER=1 - -PUBLIC_ROUTER1_NET="admin-net1" -DEMO1_ROUTER1_NET="demo1-net1" -DEMO2_ROUTER1_NET="demo2-net1" - -# Various functions -# ----------------- - -function foreach_project { - COMMAND=$1 - for PROJECT in ${PROJECTS//,/ };do - eval ${COMMAND//%PROJECT%/$PROJECT} - done -} - -function foreach_project_resource { - COMMAND=$1 - RESOURCE=$2 - for PROJECT in ${PROJECTS//,/ };do - eval 'NUM=$'"${PROJECT}_NUM_$RESOURCE" - for i in `seq $NUM`;do - local COMMAND_LOCAL=${COMMAND//%PROJECT%/$PROJECT} - COMMAND_LOCAL=${COMMAND_LOCAL//%NUM%/$i} - eval $COMMAND_LOCAL - done - done -} - -function foreach_project_vm { - COMMAND=$1 - foreach_project_resource "$COMMAND" 'VM' -} - -function foreach_project_net { - COMMAND=$1 - foreach_project_resource "$COMMAND" 'NET' -} - -function get_image_id { - local IMAGE_ID - IMAGE_ID=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) - die_if_not_set $LINENO IMAGE_ID "Failure retrieving IMAGE_ID" - echo "$IMAGE_ID" -} - -function get_project_id { - local PROJECT_NAME=$1 - local PROJECT_ID - PROJECT_ID=`openstack project list | grep " $PROJECT_NAME " | head -n 1 | get_field 1` - die_if_not_set $LINENO PROJECT_ID "Failure retrieving PROJECT_ID for $PROJECT_NAME" - echo "$PROJECT_ID" -} - -function get_user_id { - local USER_NAME=$1 - local USER_ID - USER_ID=`openstack user list | grep $USER_NAME | awk '{print $2}'` - die_if_not_set $LINENO USER_ID "Failure retrieving USER_ID for $USER_NAME" - echo "$USER_ID" -} - -function get_role_id { - local ROLE_NAME=$1 - local ROLE_ID - ROLE_ID=`openstack role assignment list | grep $ROLE_NAME | awk '{print $2}'` - die_if_not_set $LINENO ROLE_ID "Failure retrieving ROLE_ID for $ROLE_NAME" - echo "$ROLE_ID" -} - -function get_network_id { - local NETWORK_NAME="$1" - local NETWORK_ID - NETWORK_ID=`openstack network show -f value -c id $NETWORK_NAME` - echo $NETWORK_ID -} - -function get_flavor_id { - local INSTANCE_TYPE=$1 - local FLAVOR_ID - FLAVOR_ID=`nova flavor-list | grep $INSTANCE_TYPE | awk '{print $2}'` - die_if_not_set $LINENO FLAVOR_ID "Failure retrieving FLAVOR_ID for $INSTANCE_TYPE" - echo "$FLAVOR_ID" -} - -function confirm_server_active { - local VM_UUID=$1 - if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then - echo "server '$VM_UUID' did not become active!" - false - fi -} - -function neutron_debug_admin { - local os_username=$OS_USERNAME - local os_project_id=$OS_PROJECT_ID - source $TOP_DIR/openrc admin admin - neutron-debug $@ - source $TOP_DIR/openrc $os_username $os_project_id -} - -function add_project { - openstack project create $1 - openstack user create $2 --password ${ADMIN_PASSWORD} --project $1 - openstack role add Member --project $1 --user $2 -} - -function remove_project { - local PROJECT=$1 - local PROJECT_ID - PROJECT_ID=$(get_project_id $PROJECT) - openstack project delete $PROJECT_ID -} - -function remove_user { - local USER=$1 - local USER_ID - USER_ID=$(get_user_id $USER) - openstack user delete $USER_ID -} - -function create_projects { - source $TOP_DIR/openrc admin admin - add_project demo1 demo1 demo1 - add_project demo2 demo2 demo2 - source $TOP_DIR/openrc demo demo -} - -function delete_projects_and_users { - source $TOP_DIR/openrc admin admin - remove_user demo1 - remove_project demo1 - remove_user demo2 - remove_project demo2 - echo "removed all projects" - source $TOP_DIR/openrc demo demo -} - -function create_network { - local PROJECT=$1 - local GATEWAY=$2 - local CIDR=$3 - local NUM=$4 - local EXTRA=$5 - local NET_NAME="${PROJECT}-net$NUM" - local ROUTER_NAME="${PROJECT}-router${NUM}" - source $TOP_DIR/openrc admin admin - local PROJECT_ID - PROJECT_ID=$(get_project_id $PROJECT) - source $TOP_DIR/openrc $PROJECT $PROJECT - local NET_ID - NET_ID=$(openstack network create --project $PROJECT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' ) - die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PROJECT_ID $NET_NAME $EXTRA" - openstack subnet create --ip-version 4 --project $PROJECT_ID --gateway $GATEWAY --subnet-pool None --network $NET_ID --subnet-range $CIDR "${NET_NAME}_subnet" - neutron_debug_admin probe-create --device-owner compute $NET_ID - source $TOP_DIR/openrc demo demo -} - -function create_networks { - foreach_project_net 'create_network ${%PROJECT%_NAME} ${%PROJECT%_NET%NUM%_GATEWAY} ${%PROJECT%_NET%NUM%_CIDR} %NUM% ${%PROJECT%_NET%NUM%_EXTRA}' - #TODO(nati) test security group function - # allow ICMP for both project's security groups - #source $TOP_DIR/openrc demo1 demo1 - #$NOVA secgroup-add-rule default icmp -1 -1 0.0.0.0/0 - #source $TOP_DIR/openrc demo2 demo2 - #$NOVA secgroup-add-rule default icmp -1 -1 0.0.0.0/0 -} - -function create_vm { - local PROJECT=$1 - local NUM=$2 - local NET_NAMES=$3 - source $TOP_DIR/openrc $PROJECT $PROJECT - local NIC="" - for NET_NAME in ${NET_NAMES//,/ };do - NIC="$NIC --nic net-id="`get_network_id $NET_NAME` - done - #TODO (nati) Add multi-nic test - #TODO (nati) Add public-net test - local VM_UUID - VM_UUID=`nova boot --flavor $(get_flavor_id m1.tiny) \ - --image $(get_image_id) \ - $NIC \ - $PROJECT-server$NUM | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'` - die_if_not_set $LINENO VM_UUID "Failure launching $PROJECT-server$NUM" - confirm_server_active $VM_UUID -} - -function create_vms { - foreach_project_vm 'create_vm ${%PROJECT%_NAME} %NUM% ${%PROJECT%_VM%NUM%_NET}' -} - -function ping_ip { - # Test agent connection. Assumes namespaces are disabled, and - # that DHCP is in use, but not L3 - local VM_NAME=$1 - local NET_NAME=$2 - IP=$(get_instance_ip $VM_NAME $NET_NAME) - ping_check $IP $BOOT_TIMEOUT $NET_NAME -} - -function check_vm { - local PROJECT=$1 - local NUM=$2 - local VM_NAME="$PROJECT-server$NUM" - local NET_NAME=$3 - source $TOP_DIR/openrc $PROJECT $PROJECT - ping_ip $VM_NAME $NET_NAME - # TODO (nati) test ssh connection - # TODO (nati) test inter connection between vm - # TODO (nati) test dhcp host routes - # TODO (nati) test multi-nic -} - -function check_vms { - foreach_project_vm 'check_vm ${%PROJECT%_NAME} %NUM% ${%PROJECT%_VM%NUM%_NET}' -} - -function shutdown_vm { - local PROJECT=$1 - local NUM=$2 - source $TOP_DIR/openrc $PROJECT $PROJECT - VM_NAME=${PROJECT}-server$NUM - nova delete $VM_NAME -} - -function shutdown_vms { - foreach_project_vm 'shutdown_vm ${%PROJECT%_NAME} %NUM%' - if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q ACTIVE; do sleep 1; done"; then - die $LINENO "Some VMs failed to shutdown" - fi -} - -function delete_network { - local PROJECT=$1 - local NUM=$2 - local NET_NAME="${PROJECT}-net$NUM" - source $TOP_DIR/openrc admin admin - local PROJECT_ID - PROJECT_ID=$(get_project_id $PROJECT) - #TODO(nati) comment out until l3-agent merged - #for res in port subnet net router;do - for net_id in `openstack network list -c ID -c Name | grep $NET_NAME | awk '{print $2}'`;do - delete_probe $net_id - openstack subnet list | grep $net_id | awk '{print $2}' | xargs -I% openstack subnet delete % - openstack network delete $net_id - done - source $TOP_DIR/openrc demo demo -} - -function delete_networks { - foreach_project_net 'delete_network ${%PROJECT%_NAME} %NUM%' - # TODO(nati) add secuirty group check after it is implemented - # source $TOP_DIR/openrc demo1 demo1 - # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0 - # source $TOP_DIR/openrc demo2 demo2 - # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0 -} - -function create_all { - create_projects - create_networks - create_vms -} - -function delete_all { - shutdown_vms - delete_networks - delete_projects_and_users -} - -function all { - create_all - check_vms - delete_all -} - -# Test functions -# -------------- - -function test_functions { - IMAGE=$(get_image_id) - echo $IMAGE - - PROJECT_ID=$(get_project_id demo) - echo $PROJECT_ID - - FLAVOR_ID=$(get_flavor_id m1.tiny) - echo $FLAVOR_ID - - NETWORK_ID=$(get_network_id admin) - echo $NETWORK_ID -} - -# Usage and main -# -------------- - -function usage { - echo "$0: [-h]" - echo " -h, --help Display help message" - echo " -t, --project Create projects" - echo " -n, --net Create networks" - echo " -v, --vm Create vms" - echo " -c, --check Check connection" - echo " -x, --delete-projects Delete projects" - echo " -y, --delete-nets Delete networks" - echo " -z, --delete-vms Delete vms" - echo " -T, --test Test functions" -} - -function main { - - echo Description - - if [ $# -eq 0 ] ; then - # if no args are provided, run all tests - all - else - - while [ "$1" != "" ]; do - case $1 in - -h | --help ) usage - exit - ;; - -n | --net ) create_networks - exit - ;; - -v | --vm ) create_vms - exit - ;; - -t | --project ) create_projects - exit - ;; - -c | --check ) check_vms - exit - ;; - -T | --test ) test_functions - exit - ;; - -x | --delete-projects ) delete_projects_and_users - exit - ;; - -y | --delete-nets ) delete_networks - exit - ;; - -z | --delete-vms ) shutdown_vms - exit - ;; - -a | --all ) all - exit - ;; - * ) usage - exit 1 - esac - shift - done - fi -} - -trap failed ERR -function failed { - local r=$? - set +o errtrace - set +o xtrace - echo "Failed to execute" - echo "Starting cleanup..." - delete_all - echo "Finished cleanup" - exit $r -} - -# Kick off script -# --------------- - -echo $* -main $* - -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End DevStack Exercise: $0" -echo "*********************************************************************" diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh deleted file mode 100755 index 2f78e393eb..0000000000 --- a/exercises/sec_groups.sh +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/env bash - -# **sec_groups.sh** - -# Test security groups via the command line - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import configuration -source $TOP_DIR/openrc - -# Import exercise configuration -source $TOP_DIR/exerciserc - -# If nova api is not enabled we exit with exitcode 55 so that -# the exercise is skipped -is_service_enabled n-api || exit 55 - - -# Testing Security Groups -# ======================= - -# List security groups -nova secgroup-list - -# Create random name for new sec group and create secgroup of said name -SEC_GROUP_NAME="ex-secgroup-$(openssl rand -hex 4)" -nova secgroup-create $SEC_GROUP_NAME 'a test security group' - -# Add some rules to the secgroup -RULES_TO_ADD=( 22 3389 5900 ) - -for RULE in "${RULES_TO_ADD[@]}"; do - nova secgroup-add-rule $SEC_GROUP_NAME tcp $RULE $RULE 0.0.0.0/0 -done - -# Check to make sure rules were added -SEC_GROUP_RULES=( $(nova secgroup-list-rules $SEC_GROUP_NAME | grep -v \- | grep -v 'Source Group' | cut -d '|' -f3 | tr -d ' ') ) -die_if_not_set $LINENO SEC_GROUP_RULES "Failure retrieving SEC_GROUP_RULES for $SEC_GROUP_NAME" -for i in "${RULES_TO_ADD[@]}"; do - skip= - for j in "${SEC_GROUP_RULES[@]}"; do - [[ $i == $j ]] && { skip=1; break; } - done - [[ -n $skip ]] || exit 1 -done - -# Delete rules and secgroup -for RULE in "${RULES_TO_ADD[@]}"; do - nova secgroup-delete-rule $SEC_GROUP_NAME tcp $RULE $RULE 0.0.0.0/0 -done - -# Delete secgroup -nova secgroup-delete $SEC_GROUP_NAME || \ - die $LINENO "Failure deleting security group $SEC_GROUP_NAME" - -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End DevStack Exercise: $0" -echo "*********************************************************************" diff --git a/exercises/swift.sh b/exercises/swift.sh deleted file mode 100755 index 8aa376b8a7..0000000000 --- a/exercises/swift.sh +++ /dev/null @@ -1,69 +0,0 @@ -#!/usr/bin/env bash - -# **swift.sh** - -# Test swift via the ``python-openstackclient`` command line - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import configuration -source $TOP_DIR/openrc - -# Import exercise configuration -source $TOP_DIR/exerciserc - -# If swift is not enabled we exit with exitcode 55 which mean -# exercise is skipped. -is_service_enabled s-proxy || exit 55 - -# Container name -CONTAINER=ex-swift -OBJECT=/etc/issue - - -# Testing Swift -# ============= - -# Check if we have to swift via keystone -openstack object store account show || die $LINENO "Failure getting account status" - -# We start by creating a test container -openstack container create $CONTAINER || die $LINENO "Failure creating container $CONTAINER" - -# add a file into it. -openstack object create $CONTAINER $OBJECT || die $LINENO "Failure uploading file to container $CONTAINER" - -# list the objects -openstack object list $CONTAINER || die $LINENO "Failure listing contents of container $CONTAINER" - -# delete the object first -openstack object delete $CONTAINER $OBJECT || die $LINENO "Failure deleting object $OBJECT in container $CONTAINER" - -# delete the container -openstack container delete $CONTAINER || die $LINENO "Failure deleting container $CONTAINER" - -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End DevStack Exercise: $0" -echo "*********************************************************************" diff --git a/exercises/volumes.sh b/exercises/volumes.sh deleted file mode 100755 index e7c3560894..0000000000 --- a/exercises/volumes.sh +++ /dev/null @@ -1,225 +0,0 @@ -#!/usr/bin/env bash - -# **volumes.sh** - -# Test cinder volumes with the ``cinder`` command from ``python-cinderclient`` - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import configuration -source $TOP_DIR/openrc - -# Import project functions -source $TOP_DIR/lib/cinder -source $TOP_DIR/lib/neutron -source $TOP_DIR/lib/neutron-legacy - -# Import exercise configuration -source $TOP_DIR/exerciserc - -# If cinder is not enabled we exit with exitcode 55 which mean -# exercise is skipped. -is_service_enabled cinder || exit 55 - -# Ironic does not currently support volume attachment. -[ "$VIRT_DRIVER" == "ironic" ] && exit 55 - -# Instance type to create -DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} - -# Boot this image, use first AMI image if unset -DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} - -# Security group name -SECGROUP=${SECGROUP:-vol_secgroup} - -# Instance and volume names -VM_NAME=${VM_NAME:-ex-vol-inst} -VOL_NAME="ex-vol-$(openssl rand -hex 4)" - - -# Launching a server -# ================== - -# List servers for tenant: -nova list - -# Images -# ------ - -# List the images available -openstack image list - -# Grab the id of the image to launch -IMAGE=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) -die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" - -# Security Groups -# --------------- - -# List security groups -nova secgroup-list - -if is_service_enabled n-cell; then - # Cells does not support security groups, so force the use of "default" - SECGROUP="default" - echo "Using the default security group because of Cells." -else - # Create a secgroup - if ! nova secgroup-list | grep -q $SECGROUP; then - nova secgroup-create $SECGROUP "$SECGROUP description" - if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then - echo "Security group not created" - exit 1 - fi - fi -fi - -# Configure Security Group Rules -if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then - nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 -fi -if ! nova secgroup-list-rules $SECGROUP | grep -q " tcp .* 22 "; then - nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0 -fi - -# List secgroup rules -nova secgroup-list-rules $SECGROUP - -# Set up instance -# --------------- - -# List flavors -nova flavor-list - -# Select a flavor -INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) -if [[ -z "$INSTANCE_TYPE" ]]; then - # grab the first flavor in the list to launch if default doesn't exist - INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) - die_if_not_set $LINENO INSTANCE_TYPE "Failure retrieving INSTANCE_TYPE" -fi - -# Clean-up from previous runs -nova delete $VM_NAME || true -if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then - die $LINENO "server didn't terminate!" -fi - -# Boot instance -# ------------- - -VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security-groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2) -die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME" - -# Check that the status is active within ACTIVE_TIMEOUT seconds -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then - die $LINENO "server didn't become active!" -fi - -# Get the instance IP -IP=$(get_instance_ip $VM_UUID $PRIVATE_NETWORK_NAME) - -die_if_not_set $LINENO IP "Failure retrieving IP address" - -# Private IPs can be pinged in single node deployments -ping_check $IP $BOOT_TIMEOUT "$PRIVATE_NETWORK_NAME" - -# Volumes -# ------- - -# Verify it doesn't exist -if [[ -n $(cinder list | grep $VOL_NAME | head -1 | get_field 2) ]]; then - die $LINENO "Volume $VOL_NAME already exists" -fi - -# Create a new volume -start_time=$(date +%s) -cinder create --display-name $VOL_NAME --display-description "test volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \ - die $LINENO "Failure creating volume $VOL_NAME" -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then - die $LINENO "Volume $VOL_NAME not created" -fi -end_time=$(date +%s) -echo "Completed cinder create in $((end_time - start_time)) seconds" - -# Get volume ID -VOL_ID=$(cinder list | grep $VOL_NAME | head -1 | get_field 1) -die_if_not_set $LINENO VOL_ID "Failure retrieving volume ID for $VOL_NAME" - -# Attach to server -DEVICE=/dev/vdb -start_time=$(date +%s) -nova volume-attach $VM_UUID $VOL_ID $DEVICE || \ - die $LINENO "Failure attaching volume $VOL_NAME to $VM_NAME" -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep in-use; do sleep 1; done"; then - die $LINENO "Volume $VOL_NAME not attached to $VM_NAME" -fi -end_time=$(date +%s) -echo "Completed volume-attach in $((end_time - start_time)) seconds" - -VOL_ATTACH=$(cinder list | grep $VOL_NAME | head -1 | get_field -1) -die_if_not_set $LINENO VOL_ATTACH "Failure retrieving $VOL_NAME status" -if [[ "$VOL_ATTACH" != $VM_UUID ]]; then - die $LINENO "Volume not attached to correct instance" -fi - -# Clean up -# -------- - -# Detach volume -start_time=$(date +%s) -nova volume-detach $VM_UUID $VOL_ID || die $LINENO "Failure detaching volume $VOL_NAME from $VM_NAME" -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then - die $LINENO "Volume $VOL_NAME not detached from $VM_NAME" -fi -end_time=$(date +%s) -echo "Completed volume-detach in $((end_time - start_time)) seconds" - -# Delete volume -start_time=$(date +%s) -cinder delete $VOL_ID || die $LINENO "Failure deleting volume $VOL_NAME" -if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then - die $LINENO "Volume $VOL_NAME not deleted" -fi -end_time=$(date +%s) -echo "Completed cinder delete in $((end_time - start_time)) seconds" - -# Delete instance -nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME" -if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then - die $LINENO "Server $VM_NAME not deleted" -fi - -if [[ $SECGROUP = "default" ]] ; then - echo "Skipping deleting default security group" -else - # Delete secgroup - nova secgroup-delete $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP" -fi - -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End DevStack Exercise: $0" -echo "*********************************************************************" From 3cdff78fbac6a88c4864d4effa8126610bcce2fd Mon Sep 17 00:00:00 2001 From: Felipe Monteiro Date: Mon, 9 Jul 2018 20:25:08 +0100 Subject: [PATCH 0920/1936] Rename "Member" role in tempest_roles to "member" Recently, Keystone renamed "Member" role to "member" (case-sensitive) with https://review.openstack.org/#/c/572243/14 Case-sensitivity role requirement in Keystone was recently formalized with https://review.openstack.org/#/c/576640/ From the above reference: "Role names are case-insensitive. for example, when keystone bootstraps default roles, it creates `admin`, `member`, and `reader`. If another role `Member` (note the upper case 'M') is created, keystone will return a `409` Conflict since it considers the name "Member" == "member". Note that case is preserved in these cases." It follows that Tempest should use "member" role by default. Change-Id: Iebf04fdb4c195b6779c74f66da3f7822cf174494 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 60f571ceb3..a0f9dd8778 100644 --- a/lib/tempest +++ b/lib/tempest @@ -581,7 +581,7 @@ function configure_tempest { tox -evenv-tempest -- pip install -c u-c-m.txt -r requirements.txt # Auth: - iniset $TEMPEST_CONFIG auth tempest_roles "Member" + iniset $TEMPEST_CONFIG auth tempest_roles "member" if [[ $TEMPEST_USE_TEST_ACCOUNTS == "True" ]]; then if [[ $TEMPEST_HAS_ADMIN == "True" ]]; then tox -evenv-tempest -- tempest-account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-tenant-name $admin_project_name -r $TEMPEST_CONCURRENCY --with-admin etc/accounts.yaml From 31c7d5c796fac2d3d67e893388166f7cf26c6a33 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Mon, 23 Jul 2018 09:25:08 -0400 Subject: [PATCH 0921/1936] cinder: configure backup_swift_url in subnodes The tempest-multinode-full job is running the c-bak service on the subnode where swift isn't running, and because of the "is_enabled_service swift" check, cinder on the subnode wasn't getting configured to talk to swift so the c-bak service was down. Since chances are good that we're running swift, just configure cinder to always use it. Change-Id: I86b090967dadeeefc017ff0311beeea9441b6ba6 Closes-Bug: #1783128 --- lib/cinder | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/cinder b/lib/cinder index 92d0295b5b..664f423c73 100644 --- a/lib/cinder +++ b/lib/cinder @@ -268,7 +268,12 @@ function configure_cinder { configure_cinder_image_volume_cache fi - if is_service_enabled swift; then + if is_service_enabled c-bak; then + # NOTE(mriedem): The default backup driver uses swift and if we're + # on a subnode we might not know if swift is enabled, but chances are + # good that it is on the controller so configure the backup service + # to use it. If we want to configure the backup service to use + # a non-swift driver, we'll likely need environment variables. iniset $CINDER_CONF DEFAULT backup_swift_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_" fi From 1481322d7466c2b53a6144963663173ff0404422 Mon Sep 17 00:00:00 2001 From: Matthew Edmonds Date: Fri, 27 Jul 2018 15:48:38 -0400 Subject: [PATCH 0922/1936] Correct neutron docs The neutron guide refers to ENABLE_PROJECT_VLANS and PROJECT_VLAN_RANGE but these are not present/checked in the code, which uses ENABLE_TENANT_VLANS and TENANT_VLAN_RANGE. This corrects the documentation to match. Change-Id: I204356c861157e9fab357bb4dde55185bf18a707 --- doc/source/guides/neutron.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst index 7f360c63cc..12c6d6902d 100644 --- a/doc/source/guides/neutron.rst +++ b/doc/source/guides/neutron.rst @@ -376,8 +376,8 @@ controller node. ## Neutron options Q_USE_SECGROUP=True - ENABLE_PROJECT_VLANS=True - PROJECT_VLAN_RANGE=3001:4000 + ENABLE_TENANT_VLANS=True + TENANT_VLAN_RANGE=3001:4000 PHYSICAL_NETWORK=default OVS_PHYSICAL_BRIDGE=br-ex From 66b361b53884ef1755d18cbab1b6898ebd78caa1 Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Tue, 13 Jun 2017 00:31:01 -0700 Subject: [PATCH 0923/1936] WSGI Neutron integration This patch provides a new mechanism to deploy Neutron using WSGI script. This also starts a Neutron RPC server process when the Neutron API is loaded via a WSGI entry point to serve the agents. Co-Authored-By: Victor Morales Co-Authored-By: Nguyen Phuong An Change-Id: I16a199b04858bfc03ef50d9883154dba8b0d66ea Depends-On: https://review.openstack.org/#/c/580049/ Partially-implements: blueprint run-in-wsgi-server --- .zuul.yaml | 13 ++++++++ files/apache-neutron.template | 36 ++++++++++++++++++++ lib/neutron | 63 ++++++++++++++++++++++++++++------- lib/neutron-legacy | 51 ++++++++++++++++++++++------ 4 files changed, 140 insertions(+), 23 deletions(-) create mode 100644 files/apache-neutron.template diff --git a/.zuul.yaml b/.zuul.yaml index 57cbf88e1d..67ffe0805b 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -511,6 +511,16 @@ # changes to devstack w/o gating on it for all devstack changes. # * nova-next: maintained by nova for unreleased/undefaulted # things like cellsv2 and placement-api + # * neutron-fullstack-with-uwsgi: maintained by neutron for fullstack test + # when neutron-api is served by uwsgi, it's in exprimental for testing. + # the next cycle we can remove this job if things turn out to be + # stable enough. + # * neutron-functional-with-uwsgi: maintained by neutron for functional + # test. Next cycle we can remove this one if things turn out to be + # stable engouh with uwsgi. + # * neutron-tempest-with-uwsgi: maintained by neutron for tempest test. + # Next cycle we can remove this if everything run out stable enough. + experimental: jobs: - nova-cells-v1: @@ -518,3 +528,6 @@ - ^.*\.rst$ - ^doc/.*$ - nova-next + - neutron-fullstack-with-uwsgi + - neutron-functional-with-uwsgi + - neutron-tempest-with-uwsgi \ No newline at end of file diff --git a/files/apache-neutron.template b/files/apache-neutron.template new file mode 100644 index 0000000000..c7796b93bf --- /dev/null +++ b/files/apache-neutron.template @@ -0,0 +1,36 @@ +Listen %PUBLICPORT% +LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" neutron_combined + + + Require all granted + + + + WSGIDaemonProcess neutron-server processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% + WSGIProcessGroup neutron-server + WSGIScriptAlias / %NEUTRON_BIN%/neutron-api + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + ErrorLogFormat "%M" + ErrorLog /var/log/%APACHE_NAME%/neutron.log + CustomLog /var/log/%APACHE_NAME%/neutron_access.log neutron_combined + %SSLENGINE% + %SSLCERTFILE% + %SSLKEYFILE% + + + +%SSLLISTEN% +%SSLLISTEN% %SSLENGINE% +%SSLLISTEN% %SSLCERTFILE% +%SSLLISTEN% %SSLKEYFILE% +%SSLLISTEN% + +Alias /networking %NEUTRON_BIN%/neutron-api + + SetHandler wsgi-script + Options +ExecCGI + WSGIProcessGroup neutron-server + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + diff --git a/lib/neutron b/lib/neutron index 9f9b132377..4847e87f2f 100644 --- a/lib/neutron +++ b/lib/neutron @@ -28,6 +28,12 @@ set +o xtrace # Set up default directories GITDIR["python-neutronclient"]=$DEST/python-neutronclient +# NEUTRON_DEPLOY_MOD_WSGI defines how neutron is deployed, allowed values: +# - False (default) : Run neutron under Eventlet +# - True : Run neutron under uwsgi +# TODO(annp): Switching to uwsgi in next cycle if things turn out to be stable +# enough +NEUTRON_DEPLOY_MOD_WSGI=${NEUTRON_DEPLOY_MOD_WSGI:-False} NEUTRON_AGENT=${NEUTRON_AGENT:-openvswitch} NEUTRON_DIR=$DEST/neutron NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron} @@ -58,6 +64,8 @@ NEUTRON_CREATE_INITIAL_NETWORKS=${NEUTRON_CREATE_INITIAL_NETWORKS:-True} NEUTRON_STATE_PATH=${NEUTRON_STATE_PATH:=$DATA_DIR/neutron} NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron} +NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini + # By default, use the ML2 plugin NEUTRON_CORE_PLUGIN=${NEUTRON_CORE_PLUGIN:-ml2} NEUTRON_CORE_PLUGIN_CONF_FILENAME=${NEUTRON_CORE_PLUGIN_CONF_FILENAME:-ml2_conf.ini} @@ -286,7 +294,7 @@ function configure_neutron_new { # Format logging setup_logging $NEUTRON_CONF - if is_service_enabled tls-proxy; then + if is_service_enabled tls-proxy && [ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" ]; then # Set the service port for a proxy to take the original iniset $NEUTRON_CONF DEFAULT bind_port "$NEUTRON_SERVICE_PORT_INT" iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True @@ -357,6 +365,15 @@ function configure_neutron_nova_new { # create_neutron_accounts() - Create required service accounts function create_neutron_accounts_new { + local neutron_url + + if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then + neutron_url=$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST/networking/ + else + neutron_url=$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST:$NEUTRON_SERVICE_PORT/ + fi + + if [[ "$ENABLED_SERVICES" =~ "neutron-api" ]]; then create_service_user "neutron" @@ -364,8 +381,7 @@ function create_neutron_accounts_new { neutron_service=$(get_or_create_service "neutron" \ "network" "Neutron Service") get_or_create_endpoint $neutron_service \ - "$REGION_NAME" \ - "$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST:$NEUTRON_SERVICE_PORT/" + "$REGION_NAME" "$neutron_url" fi } @@ -427,6 +443,7 @@ function install_neutronclient { function start_neutron_api { local service_port=$NEUTRON_SERVICE_PORT local service_protocol=$NEUTRON_SERVICE_PROTOCOL + local neutron_url if is_service_enabled tls-proxy; then service_port=$NEUTRON_SERVICE_PORT_INT service_protocol="http" @@ -440,17 +457,24 @@ function start_neutron_api { opts+=" --config-file $cfg_file" done - # Start the Neutron service - # TODO(sc68cal) Stop hard coding this - run_process neutron-api "$NEUTRON_BIN_DIR/neutron-server $opts" - - if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$NEUTRON_SERVICE_HOST:$service_port; then - die $LINENO "neutron-api did not start" + if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then + run_process neutron-api "$NEUTRON_BIN_DIR/uwsgi --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF" + neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST/networking/ + enable_service neutron-rpc-server + run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $opts" + else + # Start the Neutron service + # TODO(sc68cal) Stop hard coding this + run_process neutron-api "$NEUTRON_BIN_DIR/neutron-server $opts" + neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST:$service_port + # Start proxy if enabled + if is_service_enabled tls-proxy; then + start_tls_proxy neutron '*' $NEUTRON_SERVICE_PORT $NEUTRON_SERVICE_HOST $NEUTRON_SERVICE_PORT_INT + fi fi - # Start proxy if enabled - if is_service_enabled tls-proxy; then - start_tls_proxy neutron '*' $NEUTRON_SERVICE_PORT $NEUTRON_SERVICE_HOST $NEUTRON_SERVICE_PORT_INT + if ! wait_for_service $SERVICE_TIMEOUT $neutron_url; then + die $LINENO "neutron-api did not start" fi } @@ -497,6 +521,10 @@ function stop_neutron_new { stop_process $serv done + if is_service_enabled neutron-rpc-server; then + stop_process neutron-rpc-server + fi + if is_service_enabled neutron-dhcp; then stop_process neutron-dhcp pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }') @@ -551,6 +579,13 @@ function neutron_deploy_rootwrap_filters_new { # neutron-legacy is removed. # TODO(sc68cal) Remove when neutron-legacy is no more. function cleanup_neutron { + if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then + stop_process neutron-api + stop_process neutron-rpc-server + remove_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api" + sudo rm -f $(apache_site_config_for neutron-api) + fi + if is_neutron_legacy_enabled; then # Call back to old function cleanup_mutnauq "$@" @@ -566,6 +601,10 @@ function configure_neutron { else configure_neutron_new "$@" fi + + if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then + write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api" "/networking" + fi } function configure_neutron_nova { diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 15bcfe36b9..9330b23802 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -86,6 +86,15 @@ NEUTRON_CONF_DIR=/etc/neutron NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"} +# NEUTRON_DEPLOY_MOD_WSGI defines how neutron is deployed, allowed values: +# - False (default) : Run neutron under Eventlet +# - True : Run neutron under uwsgi +# TODO(annp): Switching to uwsgi in next cycle if things turn out to be stable +# enough +NEUTRON_DEPLOY_MOD_WSGI=${NEUTRON_DEPLOY_MOD_WSGI:-False} + +NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini + # Agent binaries. Note, binary paths for other agents are set in per-service # scripts in lib/neutron_plugins/services/ AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent" @@ -402,6 +411,13 @@ function create_nova_conf_neutron { # Migrated from keystone_data.sh function create_mutnauq_accounts { + local neutron_url + if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then + neutron_url=$Q_PROTOCOL://$SERVICE_HOST/networking/ + else + neutron_url=$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/ + fi + if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then create_service_user "neutron" @@ -409,8 +425,7 @@ function create_mutnauq_accounts { get_or_create_service "neutron" "network" "Neutron Service" get_or_create_endpoint \ "network" \ - "$REGION_NAME" \ - "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/" + "$REGION_NAME" "$neutron_url" fi } @@ -460,6 +475,7 @@ function start_neutron_service_and_check { local service_port=$Q_PORT local service_protocol=$Q_PROTOCOL local cfg_file_options + local neutron_url cfg_file_options="$(determine_config_files neutron-server)" @@ -468,16 +484,24 @@ function start_neutron_service_and_check { service_protocol="http" fi # Start the Neutron service - run_process q-svc "$NEUTRON_BIN_DIR/neutron-server $cfg_file_options" + if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then + enable_service neutron-api + run_process neutron-api "$NEUTRON_BIN_DIR/uwsgi --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF" + neutron_url=$Q_PROTOCOL://$Q_HOST/networking/ + enable_service neutron-rpc-server + run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options" + else + run_process q-svc "$NEUTRON_BIN_DIR/neutron-server $cfg_file_options" + neutron_url=$service_protocol://$Q_HOST:$service_port + # Start proxy if enabled + if is_service_enabled tls-proxy; then + start_tls_proxy neutron '*' $Q_PORT $Q_HOST $Q_PORT_INT + fi + fi echo "Waiting for Neutron to start..." - local testcmd="wget ${ssl_ca} --no-proxy -q -O- $service_protocol://$Q_HOST:$service_port" + local testcmd="wget ${ssl_ca} --no-proxy -q -O- $neutron_url" test_with_retry "$testcmd" "Neutron did not start" $SERVICE_TIMEOUT - - # Start proxy if enabled - if is_service_enabled tls-proxy; then - start_tls_proxy neutron '*' $Q_PORT $Q_HOST $Q_PORT_INT - fi } # Control of the l2 agent is separated out to make it easier to test partial @@ -532,7 +556,12 @@ function stop_mutnauq_other { [ ! -z "$pid" ] && sudo kill -9 $pid fi - stop_process q-svc + if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then + stop_process neutron-rpc-server + stop_process neutron-api + else + stop_process q-svc + fi if is_service_enabled q-l3; then sudo pkill -f "radvd -C $DATA_DIR/neutron/ra" @@ -715,7 +744,7 @@ function _configure_neutron_common { # Format logging setup_logging $NEUTRON_CONF - if is_service_enabled tls-proxy; then + if is_service_enabled tls-proxy && [ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" ]; then # Set the service port for a proxy to take the original iniset $NEUTRON_CONF DEFAULT bind_port "$Q_PORT_INT" iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True From ed6e1d0996a910eca42a202f17dfeee53d250c00 Mon Sep 17 00:00:00 2001 From: Lance Bragstad Date: Wed, 1 Aug 2018 18:03:44 +0000 Subject: [PATCH 0924/1936] Set transport_url in proper keystone config section The RPC transport_url for keystone was being set in the DEFAULT section, even though keystone doesn't do anything with it. Instead, keystone leans on the [oslo_messaging_notification] section from oslo.messaging to register the transport_url option. This change sets the transport_url in the proper section instead of using the DEFAULT section. Change-Id: I11590d0175da7ea310d5529f2d7c0bf8d7fb25b3 --- lib/keystone | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index 57cb24d4cb..02e28222b7 100644 --- a/lib/keystone +++ b/lib/keystone @@ -217,7 +217,7 @@ function configure_keystone { iniset $KEYSTONE_CONF cache backend "dogpile.cache.memcached" iniset $KEYSTONE_CONF cache memcache_servers localhost:11211 - iniset_rpc_backend keystone $KEYSTONE_CONF + iniset_rpc_backend keystone $KEYSTONE_CONF oslo_messaging_notifications local service_port=$KEYSTONE_SERVICE_PORT local auth_port=$KEYSTONE_AUTH_PORT From b73fb370511563b5607db149dae66e33333dd445 Mon Sep 17 00:00:00 2001 From: Paul Belanger Date: Thu, 2 Aug 2018 16:15:52 -0400 Subject: [PATCH 0925/1936] Remove devstack-single-node-fedora-27 There is no projects using this and allows openstack-infra to delete fedora-27 images. Change-Id: I37d482dd2b5e099c370ab693ff430cb9c56360f8 Depends-On: https://review.openstack.org/588369 Signed-off-by: Paul Belanger --- .zuul.yaml | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 57cbf88e1d..99ab20d3d0 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -38,16 +38,6 @@ nodes: - controller -- nodeset: - name: devstack-single-node-fedora-27 - nodes: - - name: controller - label: fedora-27 - groups: - - name: tempest - nodes: - - controller - - nodeset: name: devstack-single-node-fedora-latest nodes: From 11b8649e839f55b1d7e17fa3636f7b160e6c5d22 Mon Sep 17 00:00:00 2001 From: ghanshyam Date: Tue, 7 Aug 2018 08:37:24 +0000 Subject: [PATCH 0926/1936] Fix TEMPEST_AUTH_VERSION comparision condition for identity v2 TEMPEST_AUTH_VERSION should be 'v3' or 'v2' not 'v2.0'. To disable the identity v2 admin tests TEMPEST_AUTH_VERSION is being compared with 'v2.0' which is incorrect. Change-Id: I5f7e3bcf733edbbee06016bcad4845dda552815e --- lib/tempest | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index 60f571ceb3..f83476d42f 100644 --- a/lib/tempest +++ b/lib/tempest @@ -279,8 +279,8 @@ function configure_tempest { iniset $TEMPEST_CONFIG identity-feature-enabled api_v2 False fi iniset $TEMPEST_CONFIG identity auth_version ${TEMPEST_AUTH_VERSION:-v3} - if [[ "$TEMPEST_AUTH_VERSION" != "v2.0" ]]; then - # we're going to disable v2 admin unless we're using v2.0 by default. + if [[ "$TEMPEST_AUTH_VERSION" != "v2" ]]; then + # we're going to disable v2 admin unless we're using v2 by default. iniset $TEMPEST_CONFIG identity-feature-enabled api_v2_admin False fi From 59f50c7967de9274d4c125e40fee8b2bfbe9cfc7 Mon Sep 17 00:00:00 2001 From: zhubx007 Date: Mon, 23 Jul 2018 11:42:07 +0800 Subject: [PATCH 0927/1936] BUG Fix: add sudo to run command arping Set 'PUBLIC_INTERFACE' in local.conf, so the code will be entered into _move_neutron_addresses_route of neutron-legacy. But if lack of sudo to run command arping, the information "arping: socket: Operation not permitted" occurs. So add 'sudo' for 'ARP_CMD' of lib/neutron-legacy. Change-Id: I8ac8a9bc2bbba049c45b28bf9b93d9a10e398fe6 Closes-Bug: #1783046 --- lib/neutron-legacy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 9330b23802..be5b73ffa6 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -633,7 +633,7 @@ function _move_neutron_addresses_route { IP_UP="sudo ip link set $to_intf up" if [[ "$af" == "inet" ]]; then IP=$(echo $IP_BRD | awk '{ print $1; exit }' | grep -o -E '(.*)/' | cut -d "/" -f1) - ARP_CMD="arping -A -c 3 -w 4.5 -I $to_intf $IP " + ARP_CMD="sudo arping -A -c 3 -w 4.5 -I $to_intf $IP " fi fi From 70be0d14a62680f415daf6e15af6dd01f38749fe Mon Sep 17 00:00:00 2001 From: Michal Rostecki Date: Thu, 9 Aug 2018 15:19:17 +0200 Subject: [PATCH 0928/1936] gitignore: Ignore all log files Before this change, only *.log and *.log.[0-9] patterns were ignored, which was not enough. Examples of file names which were not ignored: devstack.log.2018-08-09-100547 wget-log wget-log.1 Patterns *.log.* and *-log.* work for every log file generated by devstack. Change-Id: I6f0de5de74f196ab9df66cf3f2f969e53da01c22 Signed-off-by: Michal Rostecki --- .gitignore | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 8553b3f849..e5e1f6aba0 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,9 @@ *~ .*.sw? *.log -*.log.[1-9] +*-log +*.log.* +*-log.* *.pem *.pyc .localrc.auto From 2c42fd09d555981234fd88b9522c366e42e4e7c6 Mon Sep 17 00:00:00 2001 From: Chang Liu Date: Fri, 3 Aug 2018 16:15:20 +0800 Subject: [PATCH 0929/1936] Modified the description of the doc directory In the latest version of Contributing to DevStack manual.There are a few words to say that "tools/build_docs.sh is used to generate the HTML versions of the DevStack scripts".But build_docs.sh is not there since Newton version.So I delete it for good. Change-Id: I69f7aa23e1efd8f8a63aa79628e67378d524e173 --- HACKING.rst | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/HACKING.rst b/HACKING.rst index d5d6fbcf02..ce1c1b5416 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -47,8 +47,7 @@ The DevStack repo generally keeps all of the primary scripts at the root level. ``doc`` - Contains the Sphinx source for the documentation. -``tools/build_docs.sh`` is used to generate the HTML versions of the -DevStack scripts. A complete doc build can be run with ``tox -edocs``. +A complete doc build can be run with ``tox -edocs``. ``exercises`` - Contains the test scripts used to sanity-check and demonstrate some OpenStack functions. These scripts know how to exit From 7711d7f196e946b0cf9a091fd6be0cf35e48fd31 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 16 Aug 2018 06:13:29 +0000 Subject: [PATCH 0930/1936] Updated from generate-devstack-plugins-list Change-Id: I610cf0be64cfcfba754e629e2a62e8dc2cb99ddf --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 7ad65f741b..9ef2a32c88 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -158,6 +158,7 @@ panko `git://git.openstack.org/openstack/panko patrole `git://git.openstack.org/openstack/patrole `__ picasso `git://git.openstack.org/openstack/picasso `__ qinling `git://git.openstack.org/openstack/qinling `__ +qinling-dashboard `git://git.openstack.org/openstack/qinling-dashboard `__ rally `git://git.openstack.org/openstack/rally `__ rally-openstack `git://git.openstack.org/openstack/rally-openstack `__ sahara `git://git.openstack.org/openstack/sahara `__ From c617d476c2f6d7b86553e9a0cb5c9b229dc5e3af Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 23 Aug 2018 06:11:41 +0000 Subject: [PATCH 0931/1936] Updated from generate-devstack-plugins-list Change-Id: Ice486739c1dfbcbf61db178be64075164e9f7ed5 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 9ef2a32c88..f0c6238e0b 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -171,6 +171,7 @@ slogging `git://git.openstack.org/openstack/sloggi solum `git://git.openstack.org/openstack/solum `__ stackube `git://git.openstack.org/openstack/stackube `__ storlets `git://git.openstack.org/openstack/storlets `__ +stx-config `git://git.openstack.org/openstack/stx-config `__ tacker `git://git.openstack.org/openstack/tacker `__ tap-as-a-service `git://git.openstack.org/openstack/tap-as-a-service `__ tap-as-a-service-dashboard `git://git.openstack.org/openstack/tap-as-a-service-dashboard `__ From e5cac49b04084407432b60c5670c25961bd7302a Mon Sep 17 00:00:00 2001 From: ghanshyam Date: Fri, 24 Aug 2018 10:38:01 +0000 Subject: [PATCH 0932/1936] Update branches for stable/rocky Change-Id: Ia6de4b83f56c5ac0af19b0ca4f12aa4d47fd963a --- stackrc | 2 +- tests/test_libs_from_pypi.sh | 12 ------------ 2 files changed, 1 insertion(+), 13 deletions(-) diff --git a/stackrc b/stackrc index 2088bf45a3..2c464bd6be 100644 --- a/stackrc +++ b/stackrc @@ -15,7 +15,7 @@ source $RC_DIR/functions # Set the target branch. This is used so that stable branching # does not need to update each repo below. -TARGET_BRANCH=master +TARGET_BRANCH=stable/rocky # Cycle trailing projects need to branch later than the others. TRAILING_TARGET_BRANCH=master diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index c3b4457171..da2d970482 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -95,19 +95,7 @@ function test_libs_exist { echo "test_libs_exist PASSED" } -function test_branch_master { - for lib in $ALL_LIBS; do - if [[ ${GITBRANCH[$lib]} != "master" ]]; then - echo "GITBRANCH for $lib not master (${GITBRANCH[$lib]})" - exit 1 - fi - done - - echo "test_branch_master PASSED" -} - set -o errexit test_libs_exist -test_branch_master test_all_libs_upto_date From ef72d1ea77e2d5b8f210f222bcaf41b0fc7520e2 Mon Sep 17 00:00:00 2001 From: ghanshyam Date: Fri, 24 Aug 2018 10:44:54 +0000 Subject: [PATCH 0933/1936] Update DEVSTACK_SERIES to stein stable/rocky branch has been created now and current master is for stein. Change-Id: Id5d75e7a8a38a749f387f3ba670e3d2c10cb9719 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 2088bf45a3..34bd6771a6 100644 --- a/stackrc +++ b/stackrc @@ -258,7 +258,7 @@ REQUIREMENTS_DIR=$DEST/requirements # Setting the variable to 'ALL' will activate the download for all # libraries. -DEVSTACK_SERIES="rocky" +DEVSTACK_SERIES="stein" ############## # From 57bc01b3202e14ce8b722f7fcebddc8c8ae2159e Mon Sep 17 00:00:00 2001 From: "Jens Harbott (frickler)" Date: Mon, 27 Aug 2018 13:23:18 +0000 Subject: [PATCH 0934/1936] Revert "Update branches for stable/rocky" This reverts commit e5cac49b04084407432b60c5670c25961bd7302a. Change-Id: Ieceb5030a6c21378adcb9bf9c51cd862e0b0d01a --- stackrc | 2 +- tests/test_libs_from_pypi.sh | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 2c464bd6be..2088bf45a3 100644 --- a/stackrc +++ b/stackrc @@ -15,7 +15,7 @@ source $RC_DIR/functions # Set the target branch. This is used so that stable branching # does not need to update each repo below. -TARGET_BRANCH=stable/rocky +TARGET_BRANCH=master # Cycle trailing projects need to branch later than the others. TRAILING_TARGET_BRANCH=master diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index da2d970482..c3b4457171 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -95,7 +95,19 @@ function test_libs_exist { echo "test_libs_exist PASSED" } +function test_branch_master { + for lib in $ALL_LIBS; do + if [[ ${GITBRANCH[$lib]} != "master" ]]; then + echo "GITBRANCH for $lib not master (${GITBRANCH[$lib]})" + exit 1 + fi + done + + echo "test_branch_master PASSED" +} + set -o errexit test_libs_exist +test_branch_master test_all_libs_upto_date From 6465219ec39ea3ec75e796b3bd80b96550470546 Mon Sep 17 00:00:00 2001 From: Lenny Verkhovsky Date: Wed, 29 Aug 2018 16:41:04 +0300 Subject: [PATCH 0935/1936] Fixed git show for upper-constraints.txt in lib/tempest Change-Id: Id133108e0436018be81fca74a15ff29a4fe2e796 Closes-Bug: #1789643 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 0605ffb082..6da2330738 100644 --- a/lib/tempest +++ b/lib/tempest @@ -560,7 +560,7 @@ function configure_tempest { fi # The requirements might be on a different branch, while tempest needs master requirements. - (cd $REQUIREMENTS_DIR && git show master:upper-constraints.txt) > u-c-m.txt + (cd $REQUIREMENTS_DIR && git show origin/master:upper-constraints.txt) > u-c-m.txt tox -evenv-tempest -- pip install -c u-c-m.txt -r requirements.txt # Auth: From 41fe3ebd4b1570c83ddf45cb8c240528aa06a56c Mon Sep 17 00:00:00 2001 From: Akihiro Motoki Date: Sat, 8 Sep 2018 18:32:25 +0000 Subject: [PATCH 0936/1936] Update horizon wsgi wrapper to the recommended one During Rocky cycle, horizon updates the path of the wsgi wrapper to the one recommended by Django [1]. The old path will be dropped in the T release. [1] https://review.openstack.org/#/c/561802/ Related-Bug: #1763204 Change-Id: Ie942518b587d193a7de55ffcc0a2848406146eb2 --- files/apache-horizon.template | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/apache-horizon.template b/files/apache-horizon.template index bfd75678e3..efcfc0360b 100644 --- a/files/apache-horizon.template +++ b/files/apache-horizon.template @@ -1,5 +1,5 @@ - WSGIScriptAlias %WEBROOT% %HORIZON_DIR%/openstack_dashboard/wsgi/django.wsgi + WSGIScriptAlias %WEBROOT% %HORIZON_DIR%/openstack_dashboard/wsgi.py WSGIDaemonProcess horizon user=%USER% group=%GROUP% processes=3 threads=10 home=%HORIZON_DIR% display-name=%{GROUP} WSGIApplicationGroup %{GLOBAL} From a0d1016e5540791d63a09a80a49181c08725f011 Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Sat, 8 Sep 2018 22:50:06 -0400 Subject: [PATCH 0937/1936] import zuul job settings from project-config This is a mechanically generated patch to complete step 1 of moving the zuul job settings out of project-config and into each project repository. Because there will be a separate patch on each branch, the branch specifiers for branch-specific jobs have been removed. Because this patch is generated by a script, there may be some cosmetic changes to the layout of the YAML file(s) as the contents are normalized. See the python3-first goal document for details: https://governance.openstack.org/tc/goals/stein/python3-first.html Change-Id: I9169d41d790ae874af29c8ceccf0c55ab0df7727 Story: #2002586 Task: #24327 --- .zuul.yaml | 165 +++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 121 insertions(+), 44 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index feafa0c2e2..68b20930ea 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -120,46 +120,46 @@ # Ignore any default set by devstack. Emit a "disable_all_services". base: false zuul_copy_output: - '{{ devstack_conf_dir }}/local.conf': 'logs' - '{{ devstack_conf_dir }}/localrc': 'logs' - '{{ devstack_conf_dir }}/.localrc.auto': 'logs' - '{{ devstack_conf_dir }}/.stackenv': 'logs' - '{{ devstack_log_dir }}/dstat-csv.log': 'logs' - '{{ devstack_log_dir }}/devstacklog.txt': 'logs' - '{{ devstack_log_dir }}/devstacklog.txt.summary': 'logs' - '{{ devstack_full_log}}': 'logs' - '{{ stage_dir }}/verify_tempest_conf.log': 'logs' - '{{ stage_dir }}/apache': 'logs' - '{{ stage_dir }}/apache_config': 'logs' - '{{ stage_dir }}/etc': 'logs' - '/var/log/rabbitmq': 'logs' - '/var/log/postgresql': 'logs' - '/var/log/mysql.err': 'logs' - '/var/log/mysql.log': 'logs' - '/var/log/libvirt': 'logs' - '/etc/sudoers': 'logs' - '/etc/sudoers.d': 'logs' - '{{ stage_dir }}/iptables.txt': 'logs' - '{{ stage_dir }}/df.txt': 'logs' - '{{ stage_dir }}/pip2-freeze.txt': 'logs' - '{{ stage_dir }}/pip3-freeze.txt': 'logs' - '{{ stage_dir }}/dpkg-l.txt': 'logs' - '{{ stage_dir }}/rpm-qa.txt': 'logs' - '{{ stage_dir }}/core': 'logs' - '{{ stage_dir }}/listen53.txt': 'logs' - '{{ stage_dir }}/deprecations.log': 'logs' - '/var/log/ceph': 'logs' - '/var/log/openvswitch': 'logs' - '/var/log/glusterfs': 'logs' - '/etc/glusterfs/glusterd.vol': 'logs' - '/etc/resolv.conf': 'logs' - '/var/log/unbound.log': 'logs' + '{{ devstack_conf_dir }}/local.conf': logs + '{{ devstack_conf_dir }}/localrc': logs + '{{ devstack_conf_dir }}/.localrc.auto': logs + '{{ devstack_conf_dir }}/.stackenv': logs + '{{ devstack_log_dir }}/dstat-csv.log': logs + '{{ devstack_log_dir }}/devstacklog.txt': logs + '{{ devstack_log_dir }}/devstacklog.txt.summary': logs + '{{ devstack_full_log}}': logs + '{{ stage_dir }}/verify_tempest_conf.log': logs + '{{ stage_dir }}/apache': logs + '{{ stage_dir }}/apache_config': logs + '{{ stage_dir }}/etc': logs + /var/log/rabbitmq: logs + /var/log/postgresql: logs + /var/log/mysql.err: logs + /var/log/mysql.log: logs + /var/log/libvirt: logs + /etc/sudoers: logs + /etc/sudoers.d: logs + '{{ stage_dir }}/iptables.txt': logs + '{{ stage_dir }}/df.txt': logs + '{{ stage_dir }}/pip2-freeze.txt': logs + '{{ stage_dir }}/pip3-freeze.txt': logs + '{{ stage_dir }}/dpkg-l.txt': logs + '{{ stage_dir }}/rpm-qa.txt': logs + '{{ stage_dir }}/core': logs + '{{ stage_dir }}/listen53.txt': logs + '{{ stage_dir }}/deprecations.log': logs + /var/log/ceph: logs + /var/log/openvswitch: logs + /var/log/glusterfs: logs + /etc/glusterfs/glusterd.vol: logs + /etc/resolv.conf: logs + /var/log/unbound.log: logs extensions_to_txt: - conf: True - log: True - localrc: True - stackenv: True - auto: True + conf: true + log: true + localrc: true + stackenv: true + auto: true group-vars: subnode: devstack_localrc: @@ -207,7 +207,7 @@ # Multinode specific settings SERVICE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" HOST_IP: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" - PUBLIC_BRIDGE_MTU: "{{ external_bridge_mtu }}" + PUBLIC_BRIDGE_MTU: '{{ external_bridge_mtu }}' devstack_services: # Shared services dstat: true @@ -225,7 +225,7 @@ # Multinode specific settings HOST_IP: "{{ hostvars[inventory_hostname]['nodepool']['private_ipv4'] }}" SERVICE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" - PUBLIC_BRIDGE_MTU: "{{ external_bridge_mtu }}" + PUBLIC_BRIDGE_MTU: '{{ external_bridge_mtu }}' # Subnode specific settings DATABASE_TYPE: mysql RABBIT_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" @@ -274,7 +274,7 @@ SWIFT_START_ALL_SERVICES: false SWIFT_HASH: 1234123412341234 CINDER_PERIODIC_INTERVAL: 10 - DEBUG_LIBVIRT_COREDUMPS: True + DEBUG_LIBVIRT_COREDUMPS: true NOVA_VNC_ENABLED: true VNCSERVER_LISTEN: 0.0.0.0 VNCSERVER_PROXYCLIENT_ADDRESS: $HOST_IP @@ -282,7 +282,7 @@ post-config: $NEUTRON_CONF: DEFAULT: - global_physnet_mtu: "{{ external_bridge_mtu }}" + global_physnet_mtu: '{{ external_bridge_mtu }}' devstack_services: # Core services enabled for this branch. # This list replaces the test-matrix. @@ -479,6 +479,10 @@ run: playbooks/unit-tests/run.yaml - project: + templates: + - integrated-gate + - integrated-gate-py35 + - publish-openstack-sphinx-docs check: jobs: - devstack @@ -488,10 +492,48 @@ - devstack-platform-fedora-latest - devstack-multinode - devstack-unit-tests + - openstack-tox-bashate + - ironic-tempest-dsvm-ipa-wholedisk-bios-agent_ipmitool-tinyipa: + voting: false + - legacy-swift-dsvm-functional: + voting: false + irrelevant-files: + - ^.*\.rst$ + - ^doc/.*$ + - neutron-grenade: + irrelevant-files: + - ^.*\.rst$ + - ^doc/.*$ + - neutron-grenade-multinode: + irrelevant-files: + - ^.*\.rst$ + - ^doc/.*$ + - neutron-tempest-linuxbridge: + irrelevant-files: + - ^.*\.rst$ + - ^doc/.*$ + - tempest-multinode-full: + voting: false + irrelevant-files: + - ^.*\.rst$ + - ^doc/.*$ gate: jobs: - devstack - devstack-unit-tests + - openstack-tox-bashate + - neutron-grenade-multinode: + irrelevant-files: + - ^.*\.rst$ + - ^doc/.*$ + - neutron-tempest-linuxbridge: + irrelevant-files: + - ^.*\.rst$ + - ^doc/.*$ + - neutron-grenade: + irrelevant-files: + - ^.*\.rst$ + - ^doc/.*$ # Please add a note on each job and conditions for the job not # being experimental any more, so we can keep this list somewhat # pruned. @@ -520,4 +562,39 @@ - nova-next - neutron-fullstack-with-uwsgi - neutron-functional-with-uwsgi - - neutron-tempest-with-uwsgi \ No newline at end of file + - neutron-tempest-with-uwsgi + - legacy-tempest-dsvm-full-devstack-plugin-ceph: + irrelevant-files: + - ^.*\.rst$ + - ^doc/.*$ + - legacy-tempest-dsvm-py35-full-devstack-plugin-ceph: + irrelevant-files: + - ^.*\.rst$ + - ^doc/.*$ + - neutron-tempest-dvr: + irrelevant-files: + - ^.*\.rst$ + - ^doc/.*$ + - legacy-tempest-dsvm-neutron-dvr-multinode-full: + irrelevant-files: + - ^.*\.rst$ + - ^doc/.*$ + - neutron-tempest-dvr-ha-multinode-full: + irrelevant-files: + - ^.*\.rst$ + - ^doc/.*$ + - legacy-tempest-dsvm-lvm-multibackend: + irrelevant-files: + - ^.*\.rst$ + - ^doc/.*$ + - legacy-tempest-dsvm-neutron-pg-full: + irrelevant-files: + - ^.*\.rst$ + - ^doc/.*$ + periodic: + jobs: + - legacy-periodic-tempest-dsvm-oslo-latest-full-master: + irrelevant-files: + - ^.*\.rst$ + - ^doc/.*$ + From 53db72c6c64229db62649fc31965e024fbfec365 Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Sat, 8 Sep 2018 22:53:59 -0400 Subject: [PATCH 0938/1936] switch documentation job to new PTI This is a mechanically generated patch to switch the documentation jobs to use the new PTI versions of the jobs as part of the python3-first goal. See the python3-first goal document for details: https://governance.openstack.org/tc/goals/stein/python3-first.html Change-Id: I338fc71919a41ec890bcb5edd0552ec7eb680eb5 Story: #2002586 Task: #24327 --- .zuul.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 68b20930ea..88cb7a18c4 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -482,7 +482,7 @@ templates: - integrated-gate - integrated-gate-py35 - - publish-openstack-sphinx-docs + - publish-openstack-docs-pti check: jobs: - devstack From 770690eda1c4578dcca5c5cdfed6688e7e084b24 Mon Sep 17 00:00:00 2001 From: Steve Kowalik Date: Mon, 10 Sep 2018 16:17:54 -0600 Subject: [PATCH 0939/1936] Remove cgroup natty or less block Given that Natty and other releases that don't use cgroups have been out of support in Ubuntu for years now, it's high time we removed the special case code block that sets up the cgroup mount. Change-Id: I5403a4b1b64a95236b4dfcb66c35c594a3460cca --- lib/nova | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/lib/nova b/lib/nova index 5e157c5f11..8609956400 100644 --- a/lib/nova +++ b/lib/nova @@ -303,17 +303,6 @@ function configure_nova { # to simulate multiple systems. if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then if is_ubuntu; then - if [[ ! "$DISTRO" > natty ]]; then - local cgline="none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0" - sudo mkdir -p /cgroup - if ! grep -q cgroup /etc/fstab; then - echo "$cgline" | sudo tee -a /etc/fstab - fi - if ! mount -n | grep -q cgroup; then - sudo mount /cgroup - fi - fi - # enable nbd for lxc unless you're using an lvm backend # otherwise you can't boot instances if [[ "$NOVA_BACKEND" != "LVM" ]]; then From b4b6789848b485c6bbdc8cabab5134f0cc2a3828 Mon Sep 17 00:00:00 2001 From: Andreas Jaeger Date: Mon, 10 Sep 2018 18:40:41 +0200 Subject: [PATCH 0940/1936] Follow job renames the swift and devstack-plugin-ceph jobs have been renamed, follow rename and use in-repo jobs. Depends-On: https://review.openstack.org/543048 Change-Id: Idccc21e47b2cc04e5eeab4db7f7fb7cf156f8049 --- .zuul.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 88cb7a18c4..7fa8d2a4e9 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -495,7 +495,7 @@ - openstack-tox-bashate - ironic-tempest-dsvm-ipa-wholedisk-bios-agent_ipmitool-tinyipa: voting: false - - legacy-swift-dsvm-functional: + - swift-dsvm-functional: voting: false irrelevant-files: - ^.*\.rst$ @@ -563,11 +563,11 @@ - neutron-fullstack-with-uwsgi - neutron-functional-with-uwsgi - neutron-tempest-with-uwsgi - - legacy-tempest-dsvm-full-devstack-plugin-ceph: + - devstack-plugin-ceph-tempest: irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - - legacy-tempest-dsvm-py35-full-devstack-plugin-ceph: + - devstack-plugin-ceph-tempest-py3: irrelevant-files: - ^.*\.rst$ - ^doc/.*$ From 6403b1447fd9e1bcb6fc6f3ebb77d9abd9b776fc Mon Sep 17 00:00:00 2001 From: Andreas Jaeger Date: Wed, 12 Sep 2018 11:37:48 +0200 Subject: [PATCH 0941/1936] Remove master only job legacy-periodic-tempest-dsvm-oslo-latest-full-master runs only on master, remove it. This needs to stay in project-config. Change-Id: I81e66ddb0976bb4bb7a7cd8efbbae3bda551191d --- .zuul.yaml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 88cb7a18c4..36e25fc681 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -591,10 +591,3 @@ irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - periodic: - jobs: - - legacy-periodic-tempest-dsvm-oslo-latest-full-master: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ - From 147388466daef8e6589013d9bae684f9f8b8c57a Mon Sep 17 00:00:00 2001 From: Andrea Frittoli Date: Thu, 13 Sep 2018 17:50:29 +0200 Subject: [PATCH 0942/1936] Setup branch in setup-devstack-source-dirs Allow the setup-devstack-source-dirs role to accept a target role to be setup - when available - for the repos. Change-Id: Iebcba0d4be6d9d71b783e10a82c35a406afbd6bf --- roles/setup-devstack-source-dirs/README.rst | 5 ++++ .../tasks/main.yaml | 29 +++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/roles/setup-devstack-source-dirs/README.rst b/roles/setup-devstack-source-dirs/README.rst index 4ebf8399c2..4129eae383 100644 --- a/roles/setup-devstack-source-dirs/README.rst +++ b/roles/setup-devstack-source-dirs/README.rst @@ -9,3 +9,8 @@ into it. :default: /opt/stack The devstack base directory. + + .. zuul:rolevar:: devstack_branch + :default: None + + The target branch to be setup (where available). diff --git a/roles/setup-devstack-source-dirs/tasks/main.yaml b/roles/setup-devstack-source-dirs/tasks/main.yaml index e6bbae23b7..c196c37e09 100644 --- a/roles/setup-devstack-source-dirs/tasks/main.yaml +++ b/roles/setup-devstack-source-dirs/tasks/main.yaml @@ -12,6 +12,35 @@ with_items: '{{ found_repos.files }}' become: yes +- name: Setup refspec for repos into devstack working directory + shell: + # Copied almost "as-is" from devstack-gate setup-workspace function + # but removing the dependency on functions.sh + # TODO this should be rewritten as a python module. + cmd: | + cd {{ devstack_base_dir }}/{{ item.path | basename }} + base_branch={{ devstack_sources_branch }} + if git branch -a | grep "$base_branch" > /dev/null ; then + git checkout $base_branch + elif [[ "$base_branch" == stable/* ]]; then + # Look for an eol tag for the stable branch. + eol_tag=${base_branch#stable/}-eol + if git tag -l |grep $eol_tag >/dev/null; then + git checkout $eol_tag + git reset --hard $eol_tag + if ! git clean -x -f -d -q ; then + sleep 1 + git clean -x -f -d -q + fi + fi + else + git checkout master + fi + args: + executable: /bin/bash + with_items: '{{ found_repos.files }}' + when: devstack_sources_branch is defined + - name: Set ownership of repos file: path: '{{ devstack_base_dir }}' From 5ed05394ffddcd7db278db7c30b1315dcd37f724 Mon Sep 17 00:00:00 2001 From: melissaml Date: Thu, 20 Sep 2018 10:45:10 +0800 Subject: [PATCH 0943/1936] fix a typo Change-Id: I3cc4633b125ef3c5be40dd444cad78a888f832e4 --- lib/neutron_plugins/bigswitch_floodlight | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight index 52c6ad58b5..d3f5bd5752 100644 --- a/lib/neutron_plugins/bigswitch_floodlight +++ b/lib/neutron_plugins/bigswitch_floodlight @@ -1,6 +1,6 @@ #!/bin/bash # -# Neuton Big Switch/FloodLight plugin +# Neutron Big Switch/FloodLight plugin # ------------------------------------ # Save trace setting From dae1041c9412e76927614dcd1e1b3bc5600692d3 Mon Sep 17 00:00:00 2001 From: Lajos Katona Date: Fri, 21 Sep 2018 10:51:47 +0200 Subject: [PATCH 0944/1936] doc: Add USE_PYTHON3 to configuration guide Change-Id: I8b6743c1dbdc00001691b2727bd0c344fb6ccd51 --- doc/source/configuration.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 46e50df6bb..022e6ba529 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -446,6 +446,16 @@ Python bindings added when they are enabled. ADDITIONAL_VENV_PACKAGES="python-foo, python-bar" +Use python3 +------------ + +By default ``stack.sh`` uses python2 (the exact version set by the +``PYTHON2_VERSION``). This can be overriden so devstack will run +python3 (the exact version set by ``PYTHON3_VERSION``). + + :: + + USE_PYTHON3=True A clean install every time -------------------------- From b38cb6d084ea2c38bc0de1b2d385935552b2bbfd Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Mon, 24 Sep 2018 06:19:33 +0000 Subject: [PATCH 0945/1936] Updated from generate-devstack-plugins-list Change-Id: I4dba3ebf78333524185e84f94a2e7d52ad05a968 --- doc/source/plugin-registry.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index f0c6238e0b..2361b4b54c 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -62,7 +62,6 @@ freezer `git://git.openstack.org/openstack/freeze freezer-api `git://git.openstack.org/openstack/freezer-api `__ freezer-tempest-plugin `git://git.openstack.org/openstack/freezer-tempest-plugin `__ freezer-web-ui `git://git.openstack.org/openstack/freezer-web-ui `__ -fuxi `git://git.openstack.org/openstack/fuxi `__ gce-api `git://git.openstack.org/openstack/gce-api `__ glare `git://git.openstack.org/openstack/glare `__ group-based-policy `git://git.openstack.org/openstack/group-based-policy `__ From 61f6cafed7030a70e76b1836fba2390a22b052dd Mon Sep 17 00:00:00 2001 From: aojeagarcia Date: Mon, 24 Sep 2018 12:34:15 +0200 Subject: [PATCH 0946/1936] Switch to lioadm in openSUSE distros This patch switches the CINDER_ISCSI_HELPER from tgtadm to lioadm in openSUSE distros, as it increase the performance and reduce the flakiness on some tests. Change-Id: Ic3ee9c6baabe20f8f4d14246f6e29808796a5db9 Signed-off-by: aojeagarcia --- lib/cinder | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/cinder b/lib/cinder index 664f423c73..76bf928413 100644 --- a/lib/cinder +++ b/lib/cinder @@ -96,9 +96,9 @@ CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]') # https://bugs.launchpad.net/cinder/+bug/1180976 CINDER_PERIODIC_INTERVAL=${CINDER_PERIODIC_INTERVAL:-60} -# Centos7 switched to using LIO and that's all that's supported, -# although the tgt bits are in EPEL we don't want that for CI -if is_fedora; then +# Centos7 and OpenSUSE switched to using LIO and that's all that's supported, +# although the tgt bits are in EPEL and OpenSUSE we don't want that for CI +if is_fedora || is_suse; then CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-lioadm} if [[ ${CINDER_ISCSI_HELPER} != "lioadm" ]]; then die "lioadm is the only valid Cinder target_helper config on this platform" From 4e16c3dd5f4c5740ab92facf2083282440df9ac0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Suder?= Date: Wed, 26 Sep 2018 15:52:13 +0200 Subject: [PATCH 0947/1936] Fix common systemd pitfalls die msg That change introduces correct way of generating msg for die in common systemd pitfalls. Co-Authored-By: Szymon Datko Co-Authored-By: Piotr Bielak Change-Id: I28aebffce6c5561360a9e44c1abc44b709054c30 --- functions-common | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/functions-common b/functions-common index fae936a3d0..d83cd4c3bb 100644 --- a/functions-common +++ b/functions-common @@ -1439,7 +1439,7 @@ function _common_systemd_pitfalls { # do some sanity checks on $cmd to see things we don't expect to work if [[ "$cmd" =~ "sudo" ]]; then - local msg=< Date: Thu, 20 Sep 2018 17:16:23 +0000 Subject: [PATCH 0948/1936] remove external_network_bridge option The external_network_bridge option is deprecated/legacy and being removed from neutron (see I07474713206c218710544ad98c08caaa37dbf53a). This patch removes the external_network_bridge option iniset from devstack scripts. Change-Id: I4d9641cc9bb83719c9af1edabb89a63c4c2b1d96 --- lib/neutron | 3 --- lib/neutron_plugins/ovs_base | 4 ---- 2 files changed, 7 deletions(-) diff --git a/lib/neutron b/lib/neutron index 4847e87f2f..4e9691dfc0 100644 --- a/lib/neutron +++ b/lib/neutron @@ -95,9 +95,6 @@ NEUTRON_ROOTWRAP_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf NEUTRON_ROOTWRAP_CMD="$NEUTRON_ROOTWRAP $NEUTRON_ROOTWRAP_CONF_FILE" NEUTRON_ROOTWRAP_DAEMON_CMD="$NEUTRON_ROOTWRAP-daemon $NEUTRON_ROOTWRAP_CONF_FILE" -# This is needed because _neutron_ovs_base_configure_l3_agent will set -# external_network_bridge -Q_USE_PROVIDERNET_FOR_PUBLIC=${Q_USE_PROVIDERNET_FOR_PUBLIC:-True} # This is needed because _neutron_ovs_base_configure_l3_agent uses it to create # an external network bridge PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index 523024e2fe..558a7d5d07 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -96,10 +96,6 @@ function _neutron_ovs_base_configure_firewall_driver { } function _neutron_ovs_base_configure_l3_agent { - if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" != "True" ]; then - iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE - fi - neutron-ovs-cleanup --config-file $NEUTRON_CONF if [[ "$Q_USE_PUBLIC_VETH" = "True" ]]; then ip link show $Q_PUBLIC_VETH_INT > /dev/null 2>&1 || From d543ecb737507cae19d5e9d4b4c9fe1265ffbb6f Mon Sep 17 00:00:00 2001 From: Goutham Pacha Ravi Date: Thu, 27 Sep 2018 09:59:24 -0700 Subject: [PATCH 0949/1936] Add a Bionic Beaver nodeset I'm switching tempest/dsvm jobs to run on Ubuntu 18.04 LTS (Bionic Beaver) on openstack/manila, and I believe this nodeset can be here so other projects can use it too. Change-Id: Ib8279cde3e14d5378f27254188ee14dbb0800428 Needed-By: https://review.openstack.org/#/c/604929/ --- .zuul.yaml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index 9d73bede54..94da654193 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -8,6 +8,16 @@ nodes: - controller +- nodeset: + name: openstack-single-node-bionic + nodes: + - name: controller + label: ubuntu-bionic + groups: + - name: tempest + nodes: + - controller + - nodeset: name: devstack-single-node-centos-7 nodes: From 991b1f13f0aed578dccec2e761be69005357444f Mon Sep 17 00:00:00 2001 From: melanie witt Date: Thu, 27 Sep 2018 18:30:38 +0000 Subject: [PATCH 0950/1936] Update cinder backup_driver to full class name Legacy backup service support was recently dropped from cinder in change I3ada2dee1857074746b1893b82dd5f6641c6e579 and we need to adjust how we set the config option in devstack accordingly. This updates the backup_driver option to specify a full class name instead of only the module name. Closes-Bug: #1794859 Change-Id: I3a72f38b564b8b83b233fccba7685833b6394d45 --- lib/cinder_backends/ceph | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/cinder_backends/ceph b/lib/cinder_backends/ceph index 00a0bb3b3d..33c9706d3d 100644 --- a/lib/cinder_backends/ceph +++ b/lib/cinder_backends/ceph @@ -65,7 +65,7 @@ function configure_cinder_backend_ceph { sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring - iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph" + iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph.CephBackupDriver" iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF_FILE" iniset $CINDER_CONF DEFAULT backup_ceph_pool "$CINDER_BAK_CEPH_POOL" iniset $CINDER_CONF DEFAULT backup_ceph_user "$CINDER_BAK_CEPH_USER" From 866efef17af8f0d04240bf05714cb12452185822 Mon Sep 17 00:00:00 2001 From: aojeagarcia Date: Fri, 28 Sep 2018 10:43:46 +0200 Subject: [PATCH 0951/1936] Allow ipv6 ECMP in devstack It turns out that a host can have multiple valid default gateways, something that's not common in ipv4. This patches add supports for multiple default gateways in ipv6 environments. Closes-Bug: #1786259 Change-Id: I30bf655f7160dd19c427ee79acdf145671a3e520 Signed-off-by: aojeagarcia --- lib/neutron_plugins/services/l3 | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 9be32b79aa..ec289f6656 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -103,7 +103,7 @@ SUBNETPOOL_SIZE_V6=${SUBNETPOOL_SIZE_V6:-64} default_v4_route_devs=$(ip -4 route | grep ^default | awk '{print $5}') die_if_not_set $LINENO default_v4_route_devs "Failure retrieving default IPv4 route devices" -default_v6_route_devs=$(ip -6 route | grep ^default | awk '{print $5}') +default_v6_route_devs=$(ip -6 route list match default table all | grep via | awk '{print $5}') function _determine_config_l3 { local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE" @@ -395,6 +395,10 @@ function _neutron_configure_router_v6 { # This logic is specific to using the l3-agent for layer 3 if is_service_enabled q-l3 || is_service_enabled neutron-l3; then + # Ensure IPv6 forwarding is enabled on the host + sudo sysctl -w net.ipv6.conf.all.forwarding=1 + # if the Linux host considers itself to be a router then it will + # ignore all router advertisements # Ensure IPv6 RAs are accepted on interfaces with a default route. # This is needed for neutron-based devstack clouds to work in # IPv6-only clouds in the gate. Please do not remove this without @@ -405,8 +409,6 @@ function _neutron_configure_router_v6 { # device name would be reinterpreted as a slash, causing an error. sudo sysctl -w net/ipv6/conf/$d/accept_ra=2 done - # Ensure IPv6 forwarding is enabled on the host - sudo sysctl -w net.ipv6.conf.all.forwarding=1 # Configure and enable public bridge # Override global IPV6_ROUTER_GW_IP with the true value from neutron IPV6_ROUTER_GW_IP=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' | grep $ipv6_pub_subnet_id | awk -F'ip_address' '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ') From e991f7da457e0f1343ca13e8adeb0f6334b04990 Mon Sep 17 00:00:00 2001 From: imacdonn Date: Thu, 4 Oct 2018 19:41:59 +0000 Subject: [PATCH 0952/1936] Use bash-style test for Fedora version Old-style test fails on Ubuntu when python3 enabled, with: .../devstack/inc/python: line 52: [: 16.04: integer expression expected Use bash-style test, which doesn't attempt to evaluate the RHS if the LHS evaluates to false Change-Id: If18031ab98c9060e5825c3a8d3c647bd3705cd9c Closes-Bug: #1796174 --- inc/python | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inc/python b/inc/python index 96be107dfa..d8b8169fa1 100644 --- a/inc/python +++ b/inc/python @@ -49,7 +49,7 @@ function get_python_exec_prefix { fi $xtrace - if python3_enabled && [ "$os_VENDOR" = "Fedora" -a $os_RELEASE -gt 26 ]; then + if python3_enabled && [[ "$os_VENDOR" == "Fedora" && $os_RELEASE -gt 26 ]]; then # Default Python 3 install prefix changed to /usr/local in Fedora 27: # https://fedoraproject.org/wiki/Changes/Making_sudo_pip_safe echo "/usr/local/bin" From 9a543a81acb808e4275765da7ff0f613109b6603 Mon Sep 17 00:00:00 2001 From: aojeagarcia Date: Fri, 28 Sep 2018 08:55:49 +0200 Subject: [PATCH 0953/1936] Don't use ipv6 for DNS SAN fields with python3 Python2 match routines for x509 fields are broken and have to use the DNS field for ip addresses. The problem is that if you use ipv6 addresses in the DNS field, urllib3 fails when trying to encode it. Since python3 match routines for x509 fields are correct, this patch disables the hack for python3, encoding the ip address in the corresponding field only of the certificate. Partial-Bug: #1794929 Depends-On: https://review.openstack.org/#/c/608468 Change-Id: I7b9cb15ccfa181648afb12be51ee48bed14f9156 Signed-off-by: aojeagarcia --- lib/tls | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/lib/tls b/lib/tls index e3ed3cc2ac..217f40e3a5 100644 --- a/lib/tls +++ b/lib/tls @@ -227,9 +227,13 @@ function init_CA { function init_cert { if [[ ! -r $DEVSTACK_CERT ]]; then if [[ -n "$TLS_IP" ]]; then - # Lie to let incomplete match routines work - # see https://bugs.python.org/issue23239 - TLS_IP="DNS:$TLS_IP,IP:$TLS_IP" + if python3_enabled; then + TLS_IP="IP:$TLS_IP" + else + # Lie to let incomplete match routines work with python2 + # see https://bugs.python.org/issue23239 + TLS_IP="DNS:$TLS_IP,IP:$TLS_IP" + fi fi make_cert $INT_CA_DIR $DEVSTACK_CERT_NAME $DEVSTACK_HOSTNAME "$TLS_IP" From e8a6a0261c8118e8f89b51f1e31015a236a0fba3 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 8 Oct 2018 15:20:34 +1100 Subject: [PATCH 0954/1936] Quote error messages As a follow-on to I28aebffce6c5561360a9e44c1abc44b709054c30; make sure we quote the error messages on the way through so they retain their newlines. Change-Id: I493317948264941b4788b100a0b0bc13d2698acf --- functions-common | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/functions-common b/functions-common index d83cd4c3bb..038e9adf3d 100644 --- a/functions-common +++ b/functions-common @@ -228,9 +228,9 @@ function err { xtrace=$(set +o | grep xtrace) set +o xtrace local msg="[ERROR] ${BASH_SOURCE[2]}:$1 $2" - echo $msg 1>&2; + echo "$msg" 1>&2; if [[ -n ${LOGDIR} ]]; then - echo $msg >> "${LOGDIR}/error.log" + echo "$msg" >> "${LOGDIR}/error.log" fi $xtrace return $exitcode @@ -283,7 +283,7 @@ function warn { xtrace=$(set +o | grep xtrace) set +o xtrace local msg="[WARNING] ${BASH_SOURCE[2]}:$1 $2" - echo $msg + echo "$msg" $xtrace return $exitcode } @@ -1442,11 +1442,11 @@ function _common_systemd_pitfalls { read -r -d '' msg << EOF || true # read returns 1 for EOF, but it is ok here You are trying to use run_process with sudo, this is not going to work under systemd. -If you need to run a service as a user other than $STACK_USER call it with: +If you need to run a service as a user other than \$STACK_USER call it with: run_process \$name \$cmd \$group \$user EOF - die $LINENO $msg + die $LINENO "$msg" fi if [[ ! "$cmd" =~ ^/ ]]; then @@ -1456,7 +1456,7 @@ start under systemd. Please update your run_process stanza to have an absolute path. EOF - die $LINENO $msg + die $LINENO "$msg" fi } From eb7d1ad198276821c3037cb6a23f995537572dcd Mon Sep 17 00:00:00 2001 From: aojeagarcia Date: Mon, 24 Sep 2018 10:17:16 +0200 Subject: [PATCH 0955/1936] Disable apparmor for openSUSE distros Dnsmasq and haproxy are used frequently by neutron and nova, apparmor profiles can block some operations and the deployed cloud can't work properly so some tests are going to fail. Some openSUSE distros has apparmor enabled by default so we need to disable it. Change-Id: I30fda684effb09810643e58bf0b31a73d7d9b378 Signed-off-by: aojeagarcia --- tools/fixup_stuff.sh | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 914793245e..a939e30b02 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -205,6 +205,19 @@ function fixup_fedora { fi } +function fixup_suse { + if ! is_suse; then + return + fi + + # Disable apparmor profiles in openSUSE distros + # to avoid issues with haproxy and dnsmasq + if [ -x /usr/sbin/aa-enabled ] && sudo /usr/sbin/aa-enabled -q; then + sudo systemctl disable apparmor + sudo /usr/sbin/aa-teardown + fi +} + # The version of pip(1.5.4) supported by python-virtualenv(1.11.4) has # connection issues under proxy so re-install the latest version using # pip. To avoid having pip's virtualenv overwritten by the distro's @@ -239,5 +252,6 @@ function fixup_all { fixup_uca fixup_python_packages fixup_fedora + fixup_suse fixup_virtualenv } From 297a50ac86e597cb31c7f4347925ad0e5984960f Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Wed, 20 Jun 2018 11:08:54 +0200 Subject: [PATCH 0956/1936] Convert to openSUSE Leap 15.0 platform testing Leap 15.0 has been released May 25th, 2018 (see https://en.opensuse.org/Portal:15.0 ) and we'd like to transition devstack against it and remove Leap 42.3 from the testing matrix. Leap 15.0 is newer than Leap 42.3 as the numbering schema of openSUSE was changed. Co-Authored-By: Antonio Ojea Change-Id: I078f9a2580160c564c33e575008516f5e92239d6 --- .zuul.yaml | 12 ++++++------ functions-common | 8 +++++--- lib/databases/mysql | 2 +- stack.sh | 3 ++- 4 files changed, 14 insertions(+), 11 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 9d73bede54..87b8049403 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -19,10 +19,10 @@ - controller - nodeset: - name: devstack-single-node-opensuse-423 + name: devstack-single-node-opensuse-150 nodes: - name: controller - label: opensuse-423 + label: opensuse-150 groups: - name: tempest nodes: @@ -395,10 +395,10 @@ voting: false - job: - name: devstack-platform-opensuse-423 + name: devstack-platform-opensuse-150 parent: tempest-full - description: openSUSE 43.2 platform test - nodeset: devstack-single-node-opensuse-423 + description: openSUSE 15.0 platform test + nodeset: devstack-single-node-opensuse-150 voting: false - job: @@ -487,7 +487,7 @@ jobs: - devstack - devstack-platform-centos-7 - - devstack-platform-opensuse-423 + - devstack-platform-opensuse-150 - devstack-platform-opensuse-tumbleweed - devstack-platform-fedora-latest - devstack-multinode diff --git a/functions-common b/functions-common index fae936a3d0..fa571aecfd 100644 --- a/functions-common +++ b/functions-common @@ -374,8 +374,10 @@ function GetDistro { elif [[ "$os_VENDOR" =~ (openSUSE) ]]; then DISTRO="opensuse-$os_RELEASE" # Tumbleweed uses "n/a" as a codename, and the release is a datestring - # like 20180218, so not very useful. - [ "$os_CODENAME" = "n/a" ] && DISTRO="opensuse-tumbleweed" + # like 20180218, so not very useful. Leap however uses a release + # with a "dot", so for example 15.0 + [ "$os_CODENAME" = "n/a" -a "$os_RELEASE" = "${os_RELEASE/\./}" ] && \ + DISTRO="opensuse-tumbleweed" elif [[ "$os_VENDOR" =~ (SUSE LINUX) ]]; then # just use major release DISTRO="sle${os_RELEASE%.*}" @@ -1376,7 +1378,7 @@ function zypper_install { [[ "$(id -u)" = "0" ]] && sudo="env" $sudo http_proxy="${http_proxy:-}" https_proxy="${https_proxy:-}" \ no_proxy="${no_proxy:-}" \ - zypper --non-interactive install --auto-agree-with-licenses "$@" + zypper --non-interactive install --auto-agree-with-licenses --no-recommends "$@" } function write_user_unit_file { diff --git a/lib/databases/mysql b/lib/databases/mysql index cf61056389..ac0c083c91 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -16,7 +16,7 @@ MYSQL_DRIVER=${MYSQL_DRIVER:-PyMySQL} register_database mysql MYSQL_SERVICE_NAME=mysql -if is_fedora && ! is_oraclelinux; then +if is_suse || is_fedora && ! is_oraclelinux; then MYSQL_SERVICE_NAME=mariadb fi diff --git a/stack.sh b/stack.sh index 56e00bfb67..be3c4be147 100755 --- a/stack.sh +++ b/stack.sh @@ -221,7 +221,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (xenial|artful|bionic|stretch|jessie|f27|f28|opensuse-42.3|opensuse-tumbleweed|rhel7) ]]; then +if [[ ! ${DISTRO} =~ (xenial|artful|bionic|stretch|jessie|f27|f28|opensuse-42.3|opensuse-15.0|opensuse-tumbleweed|rhel7) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" @@ -1137,6 +1137,7 @@ if is_service_enabled neutron; then echo_summary "Configuring Neutron" configure_neutron + # Run init_neutron only on the node hosting the Neutron API server if is_service_enabled $DATABASE_BACKENDS && is_service_enabled neutron; then init_neutron From 35485ca21f13788958a636829e135ee639881a76 Mon Sep 17 00:00:00 2001 From: ghanshyam Date: Tue, 9 Oct 2018 07:39:07 +0000 Subject: [PATCH 0957/1936] Remove setting of unnecessary tempest config options Tempest have removed the volume-feature-enabled.api_v1 config options[1] and modified the default value of volume-feature-enabled.api_v3 to True. These config options not needed to be set from devstack side. [1] https://review.openstack.org/#/c/573135/ Change-Id: Ic35cf4482ab4d3c2e69348ec92568e68f6ea74ee --- lib/tempest | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/tempest b/lib/tempest index 3fefa5b320..fba8826a2d 100644 --- a/lib/tempest +++ b/lib/tempest @@ -456,9 +456,6 @@ function configure_tempest { TEMPEST_EXTEND_ATTACHED_VOLUME=${TEMPEST_EXTEND_ATTACHED_VOLUME:-True} fi iniset $TEMPEST_CONFIG volume-feature-enabled extend_attached_volume $(trueorfalse False TEMPEST_EXTEND_ATTACHED_VOLUME) - # TODO(ameade): Remove the api_v3 flag when Mitaka and Liberty are end of life. - iniset $TEMPEST_CONFIG volume-feature-enabled api_v3 True - iniset $TEMPEST_CONFIG volume-feature-enabled api_v1 $(trueorfalse False TEMPEST_VOLUME_API_V1) local tempest_volume_min_microversion=${TEMPEST_VOLUME_MIN_MICROVERSION:-None} local tempest_volume_max_microversion=${TEMPEST_VOLUME_MAX_MICROVERSION:-"latest"} # Reset microversions to None where v2 is running which does not support microversion. From da863fab1d54f285fdf964c7d09602e4801b627d Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 10 Oct 2018 06:20:08 +0000 Subject: [PATCH 0958/1936] Updated from generate-devstack-plugins-list Change-Id: I53db3e407e9495cbff758e823ea95485c888e706 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 2361b4b54c..0ebd0c32a1 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -171,6 +171,7 @@ solum `git://git.openstack.org/openstack/solum stackube `git://git.openstack.org/openstack/stackube `__ storlets `git://git.openstack.org/openstack/storlets `__ stx-config `git://git.openstack.org/openstack/stx-config `__ +stx-fault `git://git.openstack.org/openstack/stx-fault `__ tacker `git://git.openstack.org/openstack/tacker `__ tap-as-a-service `git://git.openstack.org/openstack/tap-as-a-service `__ tap-as-a-service-dashboard `git://git.openstack.org/openstack/tap-as-a-service-dashboard `__ From 0d91c29d9e363ac09d98d17bae90da1acd5ee66a Mon Sep 17 00:00:00 2001 From: aojeagarcia Date: Mon, 8 Oct 2018 16:43:19 +0200 Subject: [PATCH 0959/1936] Add devstack ipv6 jobs We can see that there is more demand on using ipv6 as the underlay infrastructure to deploy new services, and OpenStack should be ready for that. These devstack ipv6 jobs are based on the work started by Jens Harbott in https://review.openstack.org/#/c/608168/ Change-Id: I55bd067487665e5026e82a0737cb0f38a69499fb Signed-off-by: aojeagarcia --- .zuul.yaml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index 9d73bede54..6e6d458d0c 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -375,6 +375,19 @@ VNCSERVER_LISTEN: 0.0.0.0 VNCSERVER_PROXYCLIENT_ADDRESS: $HOST_IP +- job: + name: devstack-ipv6 + parent: devstack + description: | + Devstack single node job for integration gate with IPv6. + vars: + devstack_localrc: + SERVICE_IP_VERSION: 6 + SERVICE_HOST: "" + # IPv6 and certificates known issue with python2 + # https://bugs.launchpad.net/devstack/+bug/1794929 + USE_PYTHON3: true + - job: name: devstack-multinode parent: devstack @@ -486,6 +499,8 @@ check: jobs: - devstack + - devstack-ipv6: + voting: false - devstack-platform-centos-7 - devstack-platform-opensuse-423 - devstack-platform-opensuse-tumbleweed From e9f3988a57498f8593ff51055be212402fec60ce Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 11 Oct 2018 07:02:16 +0000 Subject: [PATCH 0960/1936] Updated from generate-devstack-plugins-list Change-Id: Iae7700901060efa7687138dc6adf93604afa3ef6 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 0ebd0c32a1..b6b4058223 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -65,6 +65,7 @@ freezer-web-ui `git://git.openstack.org/openstack/freeze gce-api `git://git.openstack.org/openstack/gce-api `__ glare `git://git.openstack.org/openstack/glare `__ group-based-policy `git://git.openstack.org/openstack/group-based-policy `__ +gyan `git://git.openstack.org/openstack/gyan `__ heat `git://git.openstack.org/openstack/heat `__ heat-dashboard `git://git.openstack.org/openstack/heat-dashboard `__ horizon-mellanox `git://git.openstack.org/openstack/horizon-mellanox `__ From 7d0003ef7ed2c2609ffe6782e0ce6147f9c9a3bf Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 11 Oct 2018 08:59:26 -0700 Subject: [PATCH 0961/1936] Make sure nova-compute is not configured with database access Apparently we're inheriting some database config from the main file, which should not be set for nova-compute. If we're properly in superconductor mode where we have a dedicated config for compute, remove those lines if present. Closes-Bug: #1797413 Change-Id: I4820abe57a023050dd8d067c77e26028801ff288 --- lib/nova | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/nova b/lib/nova index 5e157c5f11..8b49116147 100644 --- a/lib/nova +++ b/lib/nova @@ -922,6 +922,9 @@ function start_nova_compute { # RPC, we also disable track_instance_changes. iniset $NOVA_CPU_CONF filter_scheduler track_instance_changes False iniset_rpc_backend nova $NOVA_CPU_CONF DEFAULT "nova_cell${NOVA_CPU_CELL}" + # Make sure we nuke any database config + inidelete $NOVA_CPU_CONF database connection + inidelete $NOVA_CPU_CONF api_database connection fi # Console proxies were configured earlier in create_nova_conf. Now that the From 2c90239acc840659d8ae08e66be01917a2395bac Mon Sep 17 00:00:00 2001 From: Andreas Jaeger Date: Fri, 12 Oct 2018 10:09:17 +0200 Subject: [PATCH 0962/1936] Use tempest-pg-full The legacy job legacy-tempest-dsvm-neutron-pg-full is now named tempest-pg-full - using the new tempest and Zuul v3 frameworks. Change experimental job to use new job. Change-Id: If16397724fb4facd2a0db8148bdf7ba427ca10b6 Depends-On: https://review.openstack.org/609530 --- .zuul.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index c21fc9e2bd..8520f21353 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -597,7 +597,7 @@ irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - - legacy-tempest-dsvm-neutron-pg-full: + - tempest-pg-full: irrelevant-files: - ^.*\.rst$ - ^doc/.*$ From 1c75d2f06c03b0ed0517b499674d36052bcc3793 Mon Sep 17 00:00:00 2001 From: Laura Sofia Enriquez Date: Thu, 11 Oct 2018 19:13:17 -0300 Subject: [PATCH 0963/1936] Fedora version updated in Devstack doc Docs say that you require Fedora 24/25 to run Devstack, but Devstack is working in newer versions. Update document to say that Fedora 28 can be used instead. Closes-Bug: #1797239 Change-Id: Ie5227db9943e5ddb93cd37440165eabbae22f4fc Signed-off-by: Laura Sofia Enriquez --- doc/source/index.rst | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/doc/source/index.rst b/doc/source/index.rst index 2ff4ff088a..fcf1e82d34 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -39,8 +39,9 @@ Install Linux ------------- Start with a clean and minimal install of a Linux system. Devstack -attempts to support Ubuntu 16.04/17.04, Fedora 24/25, CentOS/RHEL 7, -as well as Debian and OpenSUSE. +attempts to support the two latest LTS releases of Ubuntu, the +latest/current Fedora version, CentOS/RHEL 7, as well as Debian and +OpenSUSE. If you do not have a preference, Ubuntu 16.04 is the most tested, and will probably go the smoothest. From 99cd16574604fc36e4fa9a8264100fe2ef17c808 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 18 Oct 2018 06:18:46 +0000 Subject: [PATCH 0964/1936] Updated from generate-devstack-plugins-list Change-Id: I2ba736a97377ba1823b3d48983ebf6e4640c72aa --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index b6b4058223..00c1255e2a 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -173,6 +173,7 @@ stackube `git://git.openstack.org/openstack/stacku storlets `git://git.openstack.org/openstack/storlets `__ stx-config `git://git.openstack.org/openstack/stx-config `__ stx-fault `git://git.openstack.org/openstack/stx-fault `__ +stx-update `git://git.openstack.org/openstack/stx-update `__ tacker `git://git.openstack.org/openstack/tacker `__ tap-as-a-service `git://git.openstack.org/openstack/tap-as-a-service `__ tap-as-a-service-dashboard `git://git.openstack.org/openstack/tap-as-a-service-dashboard `__ From 606e6bc446a421646b84ae7c143dbfb76cb416da Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 25 Oct 2018 06:29:11 +0000 Subject: [PATCH 0965/1936] Updated from generate-devstack-plugins-list Change-Id: Ie132b6898b44d24e71aa2f3534931ffac2a9efa1 --- doc/source/plugin-registry.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 00c1255e2a..b02061ed50 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -27,7 +27,6 @@ Plugin Name URL almanach `git://git.openstack.org/openstack/almanach `__ aodh `git://git.openstack.org/openstack/aodh `__ apmec `git://git.openstack.org/openstack/apmec `__ -astara `git://git.openstack.org/openstack/astara `__ barbican `git://git.openstack.org/openstack/barbican `__ bilean `git://git.openstack.org/openstack/bilean `__ blazar `git://git.openstack.org/openstack/blazar `__ From 7ddd733b7251259ddd67b93be6305380582f7ce7 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Wed, 9 May 2018 06:36:38 +0000 Subject: [PATCH 0966/1936] Drop devstack-platform-opensuse-tumbleweed Builds of opensuse-tumbleweed nodes are currently failing, so these jobs are receiving NODE_FAILURE. Change-Id: I3c2d73a150df009e7dadc76277be36eb72e0dfa7 --- .zuul.yaml | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 6682142ed2..7ac30d8284 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -38,16 +38,6 @@ nodes: - controller -- nodeset: - name: devstack-single-node-opensuse-tumbleweed - nodes: - - name: controller - label: opensuse-tumbleweed - groups: - - name: tempest - nodes: - - controller - - nodeset: name: devstack-single-node-fedora-latest nodes: @@ -424,13 +414,6 @@ nodeset: devstack-single-node-opensuse-150 voting: false -- job: - name: devstack-platform-opensuse-tumbleweed - parent: tempest-full - description: openSUSE Tumbleweed platform test - nodeset: devstack-single-node-opensuse-tumbleweed - voting: false - - job: name: devstack-platform-fedora-latest parent: tempest-full @@ -513,7 +496,6 @@ voting: false - devstack-platform-centos-7 - devstack-platform-opensuse-150 - - devstack-platform-opensuse-tumbleweed - devstack-platform-fedora-latest - devstack-multinode - devstack-unit-tests From 0d4c9c9fa37c6f47523359044e5f65629321ff6d Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Mon, 10 Sep 2018 15:52:46 -0600 Subject: [PATCH 0967/1936] Have lib/tempest to create shared network In order to make sure not possible to introduce a change in tempest which breaks the shared network compatibility. Depends-On: I6e3e53c4ac26b4fef09fefb9c590dfa91f577565 Change-Id: Ib2e7096175c991acf35de04e840ac188752d3c17 --- lib/tempest | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/tempest b/lib/tempest index fba8826a2d..84177e82f2 100644 --- a/lib/tempest +++ b/lib/tempest @@ -242,6 +242,9 @@ function configure_tempest { # and the public_network_id should not be set. if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]] && is_networking_extension_supported 'external-net'; then public_network_id=$(openstack network show -f value -c id $PUBLIC_NETWORK_NAME) + # make sure shared network presence does not confuses the tempest tests + openstack network create --share shared + openstack subnet create --description shared-subnet --subnet-range ${TEMPEST_SHARED_POOL:-192.168.233.0/24} --network shared shared-subnet fi iniset $TEMPEST_CONFIG DEFAULT use_syslog $SYSLOG From 994e82e57f78a4c302d620e44bcdda36c4c0d5f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Dulko?= Date: Fri, 19 Oct 2018 12:08:12 +0200 Subject: [PATCH 0968/1936] Update ETCD_DOWNLOAD_URL Looks like https://github.com/coreos/etcd is redirecting to https://github.com/etcd-io/etcd, so let's use that official address as the download URL. Change-Id: I39355b4deb17ae11927a5339e73fb8ec3a274cf7 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 34bd6771a6..5dd5602be6 100644 --- a/stackrc +++ b/stackrc @@ -763,7 +763,7 @@ else fi ETCD_PORT=${ETCD_PORT:-2379} ETCD_PEER_PORT=${ETCD_PEER_PORT:-2380} -ETCD_DOWNLOAD_URL=${ETCD_DOWNLOAD_URL:-https://github.com/coreos/etcd/releases/download} +ETCD_DOWNLOAD_URL=${ETCD_DOWNLOAD_URL:-https://github.com/etcd-io/etcd/releases/download} ETCD_NAME=etcd-$ETCD_VERSION-linux-$ETCD_ARCH ETCD_DOWNLOAD_FILE=$ETCD_NAME.tar.gz ETCD_DOWNLOAD_LOCATION=$ETCD_DOWNLOAD_URL/$ETCD_VERSION/$ETCD_DOWNLOAD_FILE From e3e9ea299601665a295e31a98e90dd9587165850 Mon Sep 17 00:00:00 2001 From: Derek Higgins Date: Fri, 9 Nov 2018 15:43:13 +0000 Subject: [PATCH 0969/1936] Revert "remove external_network_bridge option" This reverts commit faaf96bfb15c5f4c45a72b149dc6fe1e1f907a71. Ironic jobs were still using this option, it needs to be switched to an alternative first. Change-Id: I1683d7cfa81f5fe2497cc7045e87f8b20fed4968 --- lib/neutron | 3 +++ lib/neutron_plugins/ovs_base | 4 ++++ 2 files changed, 7 insertions(+) diff --git a/lib/neutron b/lib/neutron index 4e9691dfc0..4847e87f2f 100644 --- a/lib/neutron +++ b/lib/neutron @@ -95,6 +95,9 @@ NEUTRON_ROOTWRAP_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf NEUTRON_ROOTWRAP_CMD="$NEUTRON_ROOTWRAP $NEUTRON_ROOTWRAP_CONF_FILE" NEUTRON_ROOTWRAP_DAEMON_CMD="$NEUTRON_ROOTWRAP-daemon $NEUTRON_ROOTWRAP_CONF_FILE" +# This is needed because _neutron_ovs_base_configure_l3_agent will set +# external_network_bridge +Q_USE_PROVIDERNET_FOR_PUBLIC=${Q_USE_PROVIDERNET_FOR_PUBLIC:-True} # This is needed because _neutron_ovs_base_configure_l3_agent uses it to create # an external network bridge PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index 558a7d5d07..523024e2fe 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -96,6 +96,10 @@ function _neutron_ovs_base_configure_firewall_driver { } function _neutron_ovs_base_configure_l3_agent { + if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" != "True" ]; then + iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE + fi + neutron-ovs-cleanup --config-file $NEUTRON_CONF if [[ "$Q_USE_PUBLIC_VETH" = "True" ]]; then ip link show $Q_PUBLIC_VETH_INT > /dev/null 2>&1 || From 0f5a20fb79380806b7fd81d9e6d9c8afcd7dfbed Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 22 Nov 2018 06:19:15 +0000 Subject: [PATCH 0970/1936] Updated from generate-devstack-plugins-list Change-Id: I992065186ed4a4c5811a7c82ac09a0bc85ca11e8 --- doc/source/plugin-registry.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index b02061ed50..7ed24638fd 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -172,6 +172,8 @@ stackube `git://git.openstack.org/openstack/stacku storlets `git://git.openstack.org/openstack/storlets `__ stx-config `git://git.openstack.org/openstack/stx-config `__ stx-fault `git://git.openstack.org/openstack/stx-fault `__ +stx-integ `git://git.openstack.org/openstack/stx-integ `__ +stx-nfv `git://git.openstack.org/openstack/stx-nfv `__ stx-update `git://git.openstack.org/openstack/stx-update `__ tacker `git://git.openstack.org/openstack/tacker `__ tap-as-a-service `git://git.openstack.org/openstack/tap-as-a-service `__ From 8d1b20b4c2334dc3045cea93441568657869f6d1 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Thu, 22 Nov 2018 13:17:01 +0000 Subject: [PATCH 0971/1936] Enable universe repository on Ubuntu The universe repository is not enabled when installing Ubuntu from an ISO (at least for Bionic). This leads to some errors during the devstack run that are not seen when running based on a cloud image which has that repo enabled by default. Enable that repository unconditionally, the operation is idempotent. Change-Id: Ifcb7ecd78fb25ca2136f5848c19b74500e520873 Closes-Bug: 1792936 --- tools/fixup_stuff.sh | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index a939e30b02..4513af0a64 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -69,21 +69,29 @@ function fixup_keystone { fi } -# Ubuntu Cloud Archive -#--------------------- +# Ubuntu Repositories +#-------------------- # We've found that Libvirt on Xenial is flaky and crashes enough to be # a regular top e-r bug. Opt into Ubuntu Cloud Archive if on Xenial to # get newer Libvirt. # Make it possible to switch this based on an environment variable as # libvirt 2.5.0 doesn't handle nested virtualization quite well and this # is required for the trove development environment. -function fixup_uca { - if [[ "${ENABLE_UBUNTU_CLOUD_ARCHIVE}" == "False" || "$DISTRO" != "xenial" ]]; then +# Also enable universe since it is missing when installing from ISO. +function fixup_ubuntu { + if [[ "$DISTRO" != "xenial" && "$DISTRO" != "bionic" ]]; then return fi # This pulls in apt-add-repository install_package "software-properties-common" + + # Enable universe + sudo add-apt-repository -y universe + + if [[ "${ENABLE_UBUNTU_CLOUD_ARCHIVE}" == "False" || "$DISTRO" != "xenial" ]]; then + return + fi # Use UCA for newer libvirt. if [[ -f /etc/ci/mirror_info.sh ]] ; then # If we are on a nodepool provided host and it has told us about where @@ -249,7 +257,7 @@ function fixup_virtualenv { function fixup_all { fixup_keystone - fixup_uca + fixup_ubuntu fixup_python_packages fixup_fedora fixup_suse From 80769c5714770c02c300ac9f3e9c06a44791dbfc Mon Sep 17 00:00:00 2001 From: Akihiro Motoki Date: Fri, 23 Nov 2018 05:18:40 +0900 Subject: [PATCH 0972/1936] Migration logic for neutron policy-in-code Neutron is in a process to migrate to policy-in-code. DevStack needs to be able to handle both cases with and without policy.json in the neutron repo. Note that nova assumes neutron API access with admin so user_name:neutron needs to be included in context_is_admin to make DevStack work properly. Hopefully this can be cleanup but this is a separate topic from policy-in-code. Needed-By: https://review.openstack.org/#/c/585037/ Change-Id: Id1b0600d92e839ade1790a15c372e82e8e16ee9f --- lib/neutron | 9 +++++++-- lib/neutron-legacy | 9 +++++++-- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/lib/neutron b/lib/neutron index 4847e87f2f..62f7366e7e 100644 --- a/lib/neutron +++ b/lib/neutron @@ -183,9 +183,14 @@ function configure_neutron_new { # Neutron API server & Neutron plugin if is_service_enabled neutron-api; then local policy_file=$NEUTRON_CONF_DIR/policy.json - cp $NEUTRON_DIR/etc/policy.json $policy_file # Allow neutron user to administer neutron to match neutron account - sed -i 's/"context_is_admin": "role:admin"/"context_is_admin": "role:admin or user_name:neutron"/g' $policy_file + # NOTE(amotoki): This is required for nova works correctly with neutron. + if [ -f $NEUTRON_DIR/etc/policy.json ]; then + cp $NEUTRON_DIR/etc/policy.json $policy_file + sed -i 's/"context_is_admin": "role:admin"/"context_is_admin": "role:admin or user_name:neutron"/g' $policy_file + else + echo '{"context_is_admin": "role:admin or user_name:neutron"}' > $policy_file + fi cp $NEUTRON_DIR/etc/api-paste.ini $NEUTRON_CONF_DIR/api-paste.ini diff --git a/lib/neutron-legacy b/lib/neutron-legacy index be5b73ffa6..2fdb6db6f6 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -699,10 +699,15 @@ function _configure_neutron_common { cp $NEUTRON_DIR/etc/neutron.conf.sample $NEUTRON_CONF Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json - cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE # allow neutron user to administer neutron to match neutron account - sed -i 's/"context_is_admin": "role:admin"/"context_is_admin": "role:admin or user_name:neutron"/g' $Q_POLICY_FILE + # NOTE(amotoki): This is required for nova works correctly with neutron. + if [ -f $NEUTRON_DIR/etc/policy.json ]; then + cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE + sed -i 's/"context_is_admin": "role:admin"/"context_is_admin": "role:admin or user_name:neutron"/g' $Q_POLICY_FILE + else + echo '{"context_is_admin": "role:admin or user_name:neutron"}' > $Q_POLICY_FILE + fi # Set plugin-specific variables ``Q_DB_NAME``, ``Q_PLUGIN_CLASS``. # For main plugin config file, set ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``. From 67394b02417802842021fdef95f68edbf723eb71 Mon Sep 17 00:00:00 2001 From: Andreas Jaeger Date: Sat, 24 Nov 2018 10:14:46 +0100 Subject: [PATCH 0973/1936] Remove is_in_projects_txt This function is nowhere used as confirmed by codesearch: http://codesearch.openstack.org/?q=is_in_projects_txt&i=nope&files=&repos= We can remove the function. Note that usage of this function was removed in change I843208e2e982eb04931b76f5cb4bd219fbcd70de in 2015. This came up in context of https://review.openstack.org/619089 where requirements/projects.txt is getting removed. Change-Id: I487d3f9e340bd45e83245b9ca91e8e3b5ac3ae02 --- inc/python | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/inc/python b/inc/python index d8b8169fa1..5fb7245623 100644 --- a/inc/python +++ b/inc/python @@ -490,17 +490,6 @@ function setup_develop { _setup_package_with_constraints_edit $project_dir -e $extras } -# determine if a project as specified by directory is in -# projects.txt. This will not be an exact match because we throw away -# the namespacing when we clone, but it should be good enough in all -# practical ways. -function is_in_projects_txt { - local project_dir=$1 - local project_name - project_name=$(basename $project_dir) - grep -q "/$project_name\$" $REQUIREMENTS_DIR/projects.txt -} - # ``pip install -e`` the package, which processes the dependencies # using pip before running `setup.py develop` # From 23d33a8b5bc945bd98c8fffe86b38f97247afbd9 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Wed, 21 Nov 2018 12:10:32 -0500 Subject: [PATCH 0974/1936] Set non-0 disk sizes for tempest flavors Nova change https://review.openstack.org/603910/ is going to change the default rule on policy os_compute_api:servers:create:zero_disk_flavor to admin-only, which will prevent non-admins from creating image-backed servers with a flavor that has disk=0 since it's a potential security exposure. Therefore we need the test flavors that are created for tempest to use non-0 disk values. Since the flavor_ref and flavor_ref_alt can be aligned to the image_ref and image_ref_alt in tempest.conf, we get the image sizes from glance (in bytes) and convert those to GiB disk sizes for each flavor, respectively. Since we're using Cirros images by default, we need to make sure to round up otherwise we'd still have a 0-disk flavor. There are lots of ways the math could be done here using numfmt, bash, awk, bc, etc, but it's simplest to write and probably easiest to read by using python for the size conversion code. Change-Id: I537c299b0cd400982189f35b31df74755422737e Related-Bug: #1739646 --- lib/tempest | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index fba8826a2d..7526d3bb4e 100644 --- a/lib/tempest +++ b/lib/tempest @@ -102,6 +102,14 @@ function remove_disabled_extensions { remove_disabled_services "$extensions_list" "$disabled_exts" } +# image_size_in_gib - converts an image size from bytes to GiB, rounded up +# Takes an image ID parameter as input +function image_size_in_gib { + local size + size=$(openstack image show $1 -c size -f value) + echo $size | python -c "import math; print int(math.ceil(float(int(raw_input()) / 1024.0 ** 3)))" +} + # configure_tempest() - Set config files, create data dirs, etc function configure_tempest { if [[ "$INSTALL_TEMPEST" == "True" ]]; then @@ -125,6 +133,7 @@ function configure_tempest { local public_network_id local public_router_id local ssh_connect_method="floating" + local disk # Save IFS ifs=$IFS @@ -190,11 +199,15 @@ function configure_tempest { available_flavors=$(nova flavor-list) if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then - openstack flavor create --id 42 --ram 64 --disk 0 --vcpus 1 m1.nano + # Determine the flavor disk size based on the image size. + disk=$(image_size_in_gib $image_uuid) + openstack flavor create --id 42 --ram 64 --disk $disk --vcpus 1 m1.nano fi flavor_ref=42 if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then - openstack flavor create --id 84 --ram 128 --disk 0 --vcpus 1 m1.micro + # Determine the alt flavor disk size based on the alt image size. + disk=$(image_size_in_gib $image_uuid_alt) + openstack flavor create --id 84 --ram 128 --disk $disk --vcpus 1 m1.micro fi flavor_ref_alt=84 else From 78a564bb0304b6f930e1491e7e116a0a0f6d9ab6 Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Fri, 5 Oct 2018 10:17:56 +0100 Subject: [PATCH 0975/1936] Use openstack/placement instead of placement-in-nova We introduce and set PLACEMENT_REPO, add a placement-manage command to sync database tables (see one of the commits on which this depends), use /etc/placement/placement.conf for config, and put the uwsgi config file (pointing to placement-api instead of nova-placement-api) in /etc/placement. openstack/placement is also added to the required-projects in the devstack zuul job. Change-Id: I0b217e7a8c68a637b7a3445f6c44b7574117e320 --- .zuul.yaml | 5 ++-- lib/placement | 81 ++++++++++++++++++++++++++------------------------- stack.sh | 2 -- stackrc | 4 +++ 4 files changed, 48 insertions(+), 44 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 7ac30d8284..38e663e933 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -241,8 +241,8 @@ This base job can be used for single node and multinode devstack jobs. With a single node nodeset, this job sets up an "all-in-one" (aio) - devstack with the six OpenStack services included in the devstack tree: - keystone, glance, cinder, neutron, nova and swift. + devstack with the seven OpenStack services included in the devstack tree: + keystone, glance, cinder, neutron, nova, placement, and swift. With a two node nodeset, this job sets up an aio + compute node. The controller can be customised using host-vars.controller, the @@ -265,6 +265,7 @@ - git.openstack.org/openstack/keystone - git.openstack.org/openstack/neutron - git.openstack.org/openstack/nova + - git.openstack.org/openstack/placement - git.openstack.org/openstack/swift timeout: 7200 vars: diff --git a/lib/placement b/lib/placement index a1602bab90..409ebecc4a 100644 --- a/lib/placement +++ b/lib/placement @@ -3,9 +3,6 @@ # lib/placement # Functions to control the configuration and operation of the **Placement** service # -# Currently the placement service is embedded in nova. Eventually we -# expect this to change so this file is started as a separate entity -# despite making use of some *NOVA* variables and files. # Dependencies: # @@ -29,23 +26,21 @@ set +o xtrace # Defaults # -------- -PLACEMENT_CONF_DIR=/etc/nova -PLACEMENT_CONF=$PLACEMENT_CONF_DIR/nova.conf -PLACEMENT_AUTH_STRATEGY=${PLACEMENT_AUTH_STRATEGY:-placement} -# Nova virtual environment +PLACEMENT_DIR=$DEST/placement +PLACEMENT_CONF_DIR=/etc/placement +PLACEMENT_CONF=$PLACEMENT_CONF_DIR/placement.conf +PLACEMENT_AUTH_CACHE_DIR=${PLACEMENT_AUTH_CACHE_DIR:-/var/cache/placement} +PLACEMENT_AUTH_STRATEGY=${PLACEMENT_AUTH_STRATEGY:-keystone} +# Placement virtual environment if [[ ${USE_VENV} = True ]]; then - PROJECT_VENV["nova"]=${NOVA_DIR}.venv - PLACEMENT_BIN_DIR=${PROJECT_VENV["nova"]}/bin + PROJECT_VENV["placement"]=${PLACEMENT_DIR}.venv + PLACEMENT_BIN_DIR=${PROJECT_VENV["placement"]}/bin else PLACEMENT_BIN_DIR=$(get_python_exec_prefix) fi -PLACEMENT_UWSGI=$PLACEMENT_BIN_DIR/nova-placement-api +PLACEMENT_UWSGI=$PLACEMENT_BIN_DIR/placement-api PLACEMENT_UWSGI_CONF=$PLACEMENT_CONF_DIR/placement-uwsgi.ini -# The placement service can optionally use a separate database -# connection. Set PLACEMENT_DB_ENABLED to True to use it. -PLACEMENT_DB_ENABLED=$(trueorfalse False PLACEMENT_DB_ENABLED) - if is_service_enabled tls-proxy; then PLACEMENT_SERVICE_PROTOCOL="https" fi @@ -70,27 +65,26 @@ function cleanup_placement { sudo rm -f $(apache_site_config_for nova-placement-api) sudo rm -f $(apache_site_config_for placement-api) remove_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI" + sudo rm -f $PLACEMENT_AUTH_CACHE_DIR/* } # _config_placement_apache_wsgi() - Set WSGI config files function _config_placement_apache_wsgi { local placement_api_apache_conf local venv_path="" - local nova_bin_dir="" - nova_bin_dir=$(get_python_exec_prefix) + local placement_bin_dir="" + placement_bin_dir=$(get_python_exec_prefix) placement_api_apache_conf=$(apache_site_config_for placement-api) - # reuse nova's venv if there is one as placement code lives - # there if [[ ${USE_VENV} = True ]]; then - venv_path="python-path=${PROJECT_VENV["nova"]}/lib/$(python_version)/site-packages" - nova_bin_dir=${PROJECT_VENV["nova"]}/bin + venv_path="python-path=${PROJECT_VENV["placement"]}/lib/$(python_version)/site-packages" + placement_bin_dir=${PROJECT_VENV["placement"]}/bin fi sudo cp $FILES/apache-placement-api.template $placement_api_apache_conf sudo sed -e " s|%APACHE_NAME%|$APACHE_NAME|g; - s|%PUBLICWSGI%|$nova_bin_dir/nova-placement-api|g; + s|%PUBLICWSGI%|$placement_bin_dir/placement-api|g; s|%SSLENGINE%|$placement_ssl|g; s|%SSLCERTFILE%|$placement_certfile|g; s|%SSLKEYFILE%|$placement_keyfile|g; @@ -110,19 +104,23 @@ function configure_placement_nova_compute { iniset $conf placement user_domain_name "$SERVICE_DOMAIN_NAME" iniset $conf placement project_name "$SERVICE_TENANT_NAME" iniset $conf placement project_domain_name "$SERVICE_DOMAIN_NAME" - # TODO(cdent): auth_strategy, which is common to see in these - # blocks is not currently used here. For the time being the - # placement api uses the auth_strategy configuration setting - # established by the nova api. This avoids, for the time, being, - # creating redundant configuration items that are just used for - # testing. + iniset $conf placement auth_strategy $PLACEMENT_AUTH_STRATEGY +} + +# create_placement_conf() - Write confg +function create_placement_conf { + rm -f $PLACEMENT_CONF + iniset $PLACEMENT_CONF placement_database connection `database_connection_url placement` + iniset $PLACEMENT_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" + iniset $PLACEMENT_CONF api auth_strategy $PLACEMENT_AUTH_STRATEGY + configure_auth_token_middleware $PLACEMENT_CONF placement $PLACEMENT_AUTH_CACHE_DIR + setup_logging $PLACEMENT_CONF } # configure_placement() - Set config files, create data dirs, etc function configure_placement { - if [ "$PLACEMENT_DB_ENABLED" != False ]; then - iniset $PLACEMENT_CONF placement_database connection `database_connection_url placement` - fi + sudo install -d -o $STACK_USER $PLACEMENT_CONF_DIR + create_placement_conf if [[ "$WSGI_MODE" == "uwsgi" ]]; then write_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI" "/placement" @@ -143,25 +141,28 @@ function create_placement_accounts { "$placement_api_url" } +# create_placement_cache_dir() - Create directories for keystone cache +function create_placement_cache_dir { + # Create cache dir + sudo install -d -o $STACK_USER $PLACEMENT_AUTH_CACHE_DIR + rm -f $PLACEMENT_AUTH_CACHE_DIR/* +} + # init_placement() - Create service user and endpoints -# If PLACEMENT_DB_ENABLED is true, create the separate placement db -# using, for now, the api_db migrations. function init_placement { - if [ "$PLACEMENT_DB_ENABLED" != False ]; then - recreate_database placement - # Database migration will be handled when nova does an api_db sync - # TODO(cdent): When placement is extracted we'll do our own sync - # here. - fi + recreate_database placement + $PLACEMENT_BIN_DIR/placement-manage db sync create_placement_accounts + create_placement_cache_dir } # install_placement() - Collect source and prepare function install_placement { install_apache_wsgi # Install the openstackclient placement client plugin for CLI - # TODO(mriedem): Use pip_install_gr once osc-placement is in g-r. - pip_install osc-placement + pip_install_gr osc-placement + git_clone $PLACEMENT_REPO $PLACEMENT_DIR $PLACEMENT_BRANCH + setup_develop $PLACEMENT_DIR } # start_placement_api() - Start the API processes ahead of other things diff --git a/stack.sh b/stack.sh index be3c4be147..497c8bc479 100755 --- a/stack.sh +++ b/stack.sh @@ -894,8 +894,6 @@ if is_service_enabled neutron; then stack_install_service neutron fi -# Nova configuration is used by placement so we need to create nova.conf -# first. if is_service_enabled nova; then # Compute service stack_install_service nova diff --git a/stackrc b/stackrc index 34bd6771a6..746372d123 100644 --- a/stackrc +++ b/stackrc @@ -298,6 +298,10 @@ NOVA_BRANCH=${NOVA_BRANCH:-$TARGET_BRANCH} SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git} SWIFT_BRANCH=${SWIFT_BRANCH:-$TARGET_BRANCH} +# placement service +PLACEMENT_REPO=${PLACEMENT_REPO:-${GIT_BASE}/openstack/placement.git} +PLACEMENT_BRANCH=${PLACEMENT_BRANCH:-$TARGET_BRANCH} + ############## # # Testing Components From 3027c20545688a39c1db84a83e0e8252d238603c Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Tue, 20 Nov 2018 22:18:26 +0000 Subject: [PATCH 0976/1936] Address nits on lib/placement for extracted placement Earlier review [1] suggested some cleanups which have been done here: * Removing a redundant call from cleanup_placement * Fixing a typo in a comment [1] https://review.openstack.org/#/c/600162/15/lib/placement Change-Id: I8abd2f02b123c6c1937c026ff13eb4e600de3202 --- lib/placement | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/placement b/lib/placement index 409ebecc4a..da69e39264 100644 --- a/lib/placement +++ b/lib/placement @@ -62,7 +62,6 @@ function is_placement_enabled { # cleanup_placement() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_placement { - sudo rm -f $(apache_site_config_for nova-placement-api) sudo rm -f $(apache_site_config_for placement-api) remove_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI" sudo rm -f $PLACEMENT_AUTH_CACHE_DIR/* @@ -107,7 +106,7 @@ function configure_placement_nova_compute { iniset $conf placement auth_strategy $PLACEMENT_AUTH_STRATEGY } -# create_placement_conf() - Write confg +# create_placement_conf() - Write config function create_placement_conf { rm -f $PLACEMENT_CONF iniset $PLACEMENT_CONF placement_database connection `database_connection_url placement` From a6017b6eec743981fdbc9b50bf6b6752d1c86c0d Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Tue, 16 Oct 2018 13:57:23 +0000 Subject: [PATCH 0977/1936] Add openstack-single-node-xenial nodeset Allow other jobs to explicitly require a node running Xenial. This seems clearer than having a generic openstack-single-node nodeset which implicitly uses Xenial. Change-Id: I013fb8abd4e6ab6539bd9410acbc8446e57ec70c --- .zuul.yaml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index 7ac30d8284..f3fc87f23f 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -18,6 +18,16 @@ nodes: - controller +- nodeset: + name: openstack-single-node-xenial + nodes: + - name: controller + label: ubuntu-xenial + groups: + - name: tempest + nodes: + - controller + - nodeset: name: devstack-single-node-centos-7 nodes: From 4727aaa742e142427cc5138fe09b3c69c559b5e5 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Tue, 16 Oct 2018 13:57:23 +0000 Subject: [PATCH 0978/1936] Switch devstack jobs to run on Bionic Switch the nodesets that devstack job run on from Xenial to Bionic, i.e. the latest Ubuntu LTS release. Keep variants running on Xenial in order to make sure that we stay backwards compatible while we keep running Xenial jobs on the older stable branches. Change-Id: I8749ed24d5f451d29f767ebb2761abd743b7d306 --- .zuul.yaml | 95 ++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 92 insertions(+), 3 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index f3fc87f23f..a1c570b78f 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -88,6 +88,66 @@ nodes: - compute1 +- nodeset: + name: openstack-two-node-bionic + nodes: + - name: controller + label: ubuntu-bionic + - name: compute1 + label: ubuntu-bionic + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + +- nodeset: + name: openstack-two-node-xenial + nodes: + - name: controller + label: ubuntu-xenial + - name: compute1 + label: ubuntu-xenial + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + - job: name: devstack-base parent: multinode @@ -209,7 +269,7 @@ description: | Minimal devstack base job, intended for use by jobs that need less than the normal minimum set of required-projects. - nodeset: openstack-single-node + nodeset: openstack-single-node-bionic required-projects: - git.openstack.org/openstack/requirements vars: @@ -268,7 +328,6 @@ The run playbook consists of a single role, so it can be easily rewritten and extended. - nodeset: openstack-single-node required-projects: - git.openstack.org/openstack/cinder - git.openstack.org/openstack/glance @@ -398,14 +457,31 @@ # https://bugs.launchpad.net/devstack/+bug/1794929 USE_PYTHON3: true +- job: + name: devstack-xenial + parent: devstack + nodeset: openstack-single-node-xenial + description: | + Simple singlenode test to verify functionality on devstack + side running on Xenial. + - job: name: devstack-multinode parent: devstack - nodeset: openstack-two-node + nodeset: openstack-two-node-bionic description: | Simple multinode test to verify multinode functionality on devstack side. This is not meant to be used as a parent job. +- job: + name: devstack-multinode-xenial + parent: devstack + nodeset: openstack-two-node-xenial + description: | + Simple multinode test to verify multinode functionality on devstack + side running on Xenial. + This is not meant to be used as a parent job. + # NOTE(ianw) Platform tests have traditionally been non-voting because # we often have to rush things through devstack to stabilise the gate, # and these platforms don't have the round-the-clock support to avoid @@ -431,6 +507,13 @@ nodeset: devstack-single-node-fedora-latest voting: false +- job: + name: devstack-platform-xenial + parent: tempest-full + description: Ubuntu Xenial platform test + nodeset: openstack-single-node-xenial + voting: false + - job: name: devstack-tox-base parent: devstack @@ -502,12 +585,15 @@ check: jobs: - devstack + - devstack-xenial - devstack-ipv6: voting: false - devstack-platform-centos-7 - devstack-platform-opensuse-150 - devstack-platform-fedora-latest + - devstack-platform-xenial - devstack-multinode + - devstack-multinode-xenial - devstack-unit-tests - openstack-tox-bashate - ironic-tempest-dsvm-ipa-wholedisk-bios-agent_ipmitool-tinyipa: @@ -537,6 +623,9 @@ gate: jobs: - devstack + - devstack-xenial + - devstack-multinode + - devstack-multinode-xenial - devstack-unit-tests - openstack-tox-bashate - neutron-grenade-multinode: From 3492feeedda7accb58a179c64932d4b6d154473a Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Fri, 30 Nov 2018 13:57:17 +0000 Subject: [PATCH 0979/1936] Use trueorfalse for NEUTRON_DEPLOY_MOD_WSGI Current code assumes the variable is being set to either "True" or "False", which will lead to weird errors if it is being set to something like "true" instead. Change-Id: I88983c9150efad882cd867c2d14d86ba6b2522c9 --- lib/neutron | 2 +- lib/neutron-legacy | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/neutron b/lib/neutron index 4847e87f2f..5ef9280981 100644 --- a/lib/neutron +++ b/lib/neutron @@ -33,7 +33,7 @@ GITDIR["python-neutronclient"]=$DEST/python-neutronclient # - True : Run neutron under uwsgi # TODO(annp): Switching to uwsgi in next cycle if things turn out to be stable # enough -NEUTRON_DEPLOY_MOD_WSGI=${NEUTRON_DEPLOY_MOD_WSGI:-False} +NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse False NEUTRON_DEPLOY_MOD_WSGI) NEUTRON_AGENT=${NEUTRON_AGENT:-openvswitch} NEUTRON_DIR=$DEST/neutron NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron} diff --git a/lib/neutron-legacy b/lib/neutron-legacy index be5b73ffa6..9513667dfe 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -91,7 +91,7 @@ export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/d # - True : Run neutron under uwsgi # TODO(annp): Switching to uwsgi in next cycle if things turn out to be stable # enough -NEUTRON_DEPLOY_MOD_WSGI=${NEUTRON_DEPLOY_MOD_WSGI:-False} +NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse False NEUTRON_DEPLOY_MOD_WSGI) NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini From 0137703825ea5f493e7486e19c2d83b328ca2998 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Fri, 30 Nov 2018 14:40:12 -0800 Subject: [PATCH 0980/1936] Allow IP-based subject alt names ... even when no other subject alt names provided Previously, a non-voting job in barbican's gate would fail with something like X509 V3 routines:X509V3_parse_list:invalid null name:v3_utl.c:319: X509 V3 routines:DO_EXT_NCONF:invalid extension string:v3_conf.c:140:name=subjectAltName,section=DNS:pykmip-server,,IP:198.72.124.103 X509 V3 routines:X509V3_EXT_nconf:error in extension:v3_conf.c:95:name=subjectAltName, value=DNS:pykmip-server,,IP:198.72.124.103 because we'd have an invalid empty string. Change-Id: I5459b8976539924cd6cc6c1e681b6753a76b804c --- lib/tls | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/tls b/lib/tls index 217f40e3a5..9b55099e43 100644 --- a/lib/tls +++ b/lib/tls @@ -252,7 +252,11 @@ function make_cert { if [ "$common_name" != "$SERVICE_HOST" ]; then if is_ipv4_address "$SERVICE_HOST" ; then - alt_names="$alt_names,IP:$SERVICE_HOST" + if [[ -z "$alt_names" ]]; then + alt_names="IP:$SERVICE_HOST" + else + alt_names="$alt_names,IP:$SERVICE_HOST" + fi fi fi From d1fe5ad507c6dcb6955d66fab0b6bc9fb59a80f2 Mon Sep 17 00:00:00 2001 From: qingszhao Date: Tue, 4 Dec 2018 13:03:28 +0000 Subject: [PATCH 0981/1936] Change openstack-dev to openstack-discuss Mailinglists have been updated. Openstack-discuss replaces openstack-dev. Change-Id: Id639a45680b149ca1ffe8e91fcdea713576b355d --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index fcd2b13f41..825d386026 100644 --- a/setup.cfg +++ b/setup.cfg @@ -4,7 +4,7 @@ summary = OpenStack DevStack description-file = README.rst author = OpenStack -author-email = openstack-dev@lists.openstack.org +author-email = openstack-discuss@lists.openstack.org home-page = https://docs.openstack.org/devstack/latest classifier = Intended Audience :: Developers From d2a927264aa71e3103a9b265f1e0d9911fd4653e Mon Sep 17 00:00:00 2001 From: Carlos Goncalves Date: Tue, 4 Dec 2018 21:59:55 +0100 Subject: [PATCH 0982/1936] Enable the SCL repository for CentOS Enable the Software Collections (SCL) repository for CentOS. This repository includes useful software (e.g. the Go Toolset) which is not present in the main repository. For example, Octavia uses a Go based testing tool and its CentOS-based jobs got broken now with the update to CentOS 7.6 which no longer provides golang. Change-Id: Ic68a6d6cd7da41510e624b6bea7976d9a960af98 --- stack.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/stack.sh b/stack.sh index 497c8bc479..144c233f05 100755 --- a/stack.sh +++ b/stack.sh @@ -335,6 +335,13 @@ function _install_rdo { # no-op on other platforms. sudo yum-config-manager --enable rhel-7-server-optional-rpms + # Enable the Software Collections (SCL) repository for CentOS. + # This repository includes useful software (e.g. the Go Toolset) + # which is not present in the main repository. + if [[ "$os_VENDOR" =~ (CentOS) ]]; then + yum_install centos-release-scl + fi + if is_oraclelinux; then sudo yum-config-manager --enable ol7_optional_latest ol7_addons ol7_MySQL56 fi From 14a22dff3429b9bf237dcd6d92572f2e53b76d9a Mon Sep 17 00:00:00 2001 From: ghanshyam Date: Thu, 6 Dec 2018 10:23:51 +0000 Subject: [PATCH 0983/1936] Fix README for rolevar name 'devstack_sources_branch' This commit fixes the name of role in README file - https://review.openstack.org/#/c/549517 Change-Id: I0b7ada56339e5e3ff461c2b62e27f226720bb52f --- roles/setup-devstack-source-dirs/README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/setup-devstack-source-dirs/README.rst b/roles/setup-devstack-source-dirs/README.rst index 4129eae383..49d22c3c64 100644 --- a/roles/setup-devstack-source-dirs/README.rst +++ b/roles/setup-devstack-source-dirs/README.rst @@ -10,7 +10,7 @@ into it. The devstack base directory. - .. zuul:rolevar:: devstack_branch + .. zuul:rolevar:: devstack_sources_branch :default: None The target branch to be setup (where available). From 5a9e32afeb723dac3f0111b96cea7da77cdc722e Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Fri, 7 Dec 2018 10:26:18 -0500 Subject: [PATCH 0984/1936] Update comment about nova-cells-v1 job Nova is moving nova-cells-v1 to its experimental queue set of jobs so the comment in devstack should be updated. Depends-On: https://review.openstack.org/623538 Change-Id: Iefbaa9b809d1426640cbd47a42213f28c9ec5ff3 Related-Bug: #1807407 --- .zuul.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 21d1a97046..4f91b5b997 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -557,7 +557,7 @@ # pruned. # # * nova-cells-v1: maintained by nova for cells v1 (nova-cells service); - # nova gates on this job, it's in experimental for testing cells v1 + # it's in experimental here (and in nova) for testing cells v1 # changes to devstack w/o gating on it for all devstack changes. # * nova-next: maintained by nova for unreleased/undefaulted # things like cellsv2 and placement-api From e344c97c0eb93e1d96ca8ebe250bb08d227ef5ac Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Fri, 7 Dec 2018 14:49:15 -0800 Subject: [PATCH 0985/1936] Set apache proxy-initial-not-pooled env var We've run into what appears to be a race with apache trying to reuse a pooled connection to a backend when that pool connection is closing. This leads to errors like: [Fri Dec 07 21:44:10.752362 2018] [proxy_http:error] [pid 19073:tid 139654393218816] (20014)Internal error (specific information not available): [client 104.130.127.213:45408] AH01102: error reading status line from remote server 127.0.0.1:60999 [Fri Dec 07 21:44:10.752405 2018] [proxy:error] [pid 19073:tid 139654393218816] [client 104.130.127.213:45408] AH00898: Error reading from remote server returned by /image/v2/images/ec31a4fd-e22b-4e97-8c6c-1ef330823fc1/file According to the internets this can be addressed (at the cost of some performance) by setting the proxy-initial-not-pooled env var for mod proxy. From the mod_proxy docs: If this variable is set, no pooled connection will be reused if the client request is the initial request on the frontend connection. This avoids the "proxy: error reading status line from remote server" error message caused by the race condition that the backend server closed the pooled connection after the connection check by the proxy and before data sent by the proxy reached the backend. It has to be kept in mind that setting this variable downgrades performance, especially with HTTP/1.0 clients. Closes-Bug: #1807518 Change-Id: I374deddefaa033de858b7bc15f893bf731ad7ff2 --- lib/tls | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/tls b/lib/tls index 217f40e3a5..0032449e13 100644 --- a/lib/tls +++ b/lib/tls @@ -547,6 +547,9 @@ $listen_string LimitRequestFieldSize $f_header_size RequestHeader set X-Forwarded-Proto "https" + # Avoid races (at the cost of performance) to re-use a pooled connection + # where the connection is closed (bug 1807518). + SetEnv proxy-initial-not-pooled ProxyPass http://$b_host:$b_port/ retry=0 nocanon ProxyPassReverse http://$b_host:$b_port/ From 2dad33b4ba43c30b5c06bd52248fcdd49aeb2ad5 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Tue, 11 Dec 2018 06:11:25 +0000 Subject: [PATCH 0986/1936] Updated from generate-devstack-plugins-list Change-Id: Ia5998eaec8f470ec584079f4639a84b2bf1430e1 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 7ed24638fd..02d6911d4b 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -180,6 +180,7 @@ tap-as-a-service `git://git.openstack.org/openstack/tap-as tap-as-a-service-dashboard `git://git.openstack.org/openstack/tap-as-a-service-dashboard `__ tatu `git://git.openstack.org/openstack/tatu `__ telemetry-tempest-plugin `git://git.openstack.org/openstack/telemetry-tempest-plugin `__ +tobiko `git://git.openstack.org/openstack/tobiko `__ tricircle `git://git.openstack.org/openstack/tricircle `__ trio2o `git://git.openstack.org/openstack/trio2o `__ trove `git://git.openstack.org/openstack/trove `__ From 0ce6ae813707678541697b2ffe24056f0f8e26b7 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Tue, 21 Nov 2017 11:55:57 +0000 Subject: [PATCH 0987/1936] Update cirros version This is the next release in the 0.3.x stable series, containing a fix for getting out of disk errors when cirros reads metadata from a config-drive[0]. [0] https://bugs.launchpad.net/cirros/+bug/1808119 Change-Id: Id2f20ebafdd78c2dadf81b8f80f22e7bd6db7755 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index ffe405012d..d520f2f2e9 100644 --- a/stackrc +++ b/stackrc @@ -668,7 +668,7 @@ ENABLE_UBUNTU_CLOUD_ARCHIVE=$(trueorfalse True ENABLE_UBUNTU_CLOUD_ARCHIVE) #IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image #IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image -CIRROS_VERSION=${CIRROS_VERSION:-"0.3.5"} +CIRROS_VERSION=${CIRROS_VERSION:-"0.3.6"} CIRROS_ARCH=${CIRROS_ARCH:-"x86_64"} # Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of From 584979ce2eeb0c2e590a996657e601bfd5543eee Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Thu, 13 Dec 2018 08:22:12 -0500 Subject: [PATCH 0988/1936] docs: cleanup install docs This does a few things to the home page and all-in-one single machine install guide: * Uses code blocks for formatting * Adds the customary "$" to the console blocks in the all-in-one single machine install guide * Instructs to use "sudo su stack" and adds a note about "sudo visudo" in the all-in-one single machine doc * Creates a symbolic link to the sample local.conf and links to it from the install guide (note that local.conf might be old by now) * Fixes the .gitignore file to only ignore local.conf in the root of the repository, otherwise it would ignore local.conf everywhere including the samples and doc/source/assets directories. Change-Id: I50ae7bd32c4c1caa2ac8551fc54b31dd2dfae568 --- .gitignore | 2 +- doc/source/assets/local.conf | 1 + doc/source/guides/single-machine.rst | 35 +++++++++++++++++----------- doc/source/index.rst | 16 ++++++++----- 4 files changed, 34 insertions(+), 20 deletions(-) create mode 120000 doc/source/assets/local.conf diff --git a/.gitignore b/.gitignore index e5e1f6aba0..956e13e21c 100644 --- a/.gitignore +++ b/.gitignore @@ -28,7 +28,7 @@ files/get-pip.py* files/ir-deploy* files/ironic-inspector* files/etcd* -local.conf +^local.conf local.sh localrc proto diff --git a/doc/source/assets/local.conf b/doc/source/assets/local.conf new file mode 120000 index 0000000000..cfc2a4e9d8 --- /dev/null +++ b/doc/source/assets/local.conf @@ -0,0 +1 @@ +../../../samples/local.conf \ No newline at end of file diff --git a/doc/source/guides/single-machine.rst b/doc/source/guides/single-machine.rst index 515ea9a9ee..168172c630 100644 --- a/doc/source/guides/single-machine.rst +++ b/doc/source/guides/single-machine.rst @@ -45,31 +45,37 @@ We need to add a user to install DevStack. (if you created a user during install you can skip this step and just give the user sudo privileges below) -:: +.. code-block:: console - useradd -s /bin/bash -d /opt/stack -m stack + $ sudo useradd -s /bin/bash -d /opt/stack -m stack Since this user will be making many changes to your system, it will need to have sudo privileges: -:: +.. code-block:: console - apt-get install sudo -y || yum install -y sudo - echo "stack ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers + $ apt-get install sudo -y || yum install -y sudo + $ echo "stack ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers + +.. note:: On some systems you may need to use ``sudo visudo``. From here on you should use the user you created. **Logout** and -**login** as that user. +**login** as that user: + +.. code-block:: console + + $ sudo su stack && cd ~ Download DevStack ----------------- We'll grab the latest version of DevStack via https: -:: +.. code-block:: console - sudo apt-get install git -y || sudo yum install -y git - git clone https://git.openstack.org/openstack-dev/devstack - cd devstack + $ sudo apt-get install git -y || sudo yum install -y git + $ git clone https://git.openstack.org/openstack-dev/devstack + $ cd devstack Run DevStack ------------ @@ -97,7 +103,7 @@ do the following: ``local.conf`` should look something like this: -:: +.. code-block:: ini [[local|localrc]] FLOATING_RANGE=192.168.1.224/27 @@ -109,11 +115,14 @@ do the following: RABBIT_PASSWORD=flopsymopsy SERVICE_PASSWORD=iheartksl +.. note:: There is a sample :download:`local.conf ` file + under the *samples* directory in the devstack repository. + Run DevStack: -:: +.. code-block:: console - ./stack.sh + $ ./stack.sh A seemingly endless stream of activity ensues. When complete you will see a summary of ``stack.sh``'s work, including the relevant URLs, diff --git a/doc/source/index.rst b/doc/source/index.rst index fcf1e82d34..6c42a5b4e9 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -55,14 +55,14 @@ are usually fine). You can quickly create a separate `stack` user to run DevStack with -:: +.. code-block:: console $ sudo useradd -s /bin/bash -d /opt/stack -m stack Since this user will be making many changes to your system, it should have sudo privileges: -:: +.. code-block:: console $ echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack $ sudo su - stack @@ -70,7 +70,7 @@ have sudo privileges: Download DevStack ----------------- -:: +.. code-block:: console $ git clone https://git.openstack.org/openstack-dev/devstack $ cd devstack @@ -83,7 +83,8 @@ Create a local.conf Create a ``local.conf`` file with 4 passwords preset at the root of the devstack git repo. -:: + +.. code-block:: ini [[local|localrc]] ADMIN_PASSWORD=secret @@ -93,12 +94,15 @@ devstack git repo. This is the minimum required config to get started with DevStack. +.. note:: There is a sample :download:`local.conf ` file + under the *samples* directory in the devstack repository. + Start the install ----------------- -:: +.. code-block:: console - ./stack.sh + $ ./stack.sh This will take a 15 - 20 minutes, largely depending on the speed of your internet connection. Many git trees and packages will be From e0b375c8ce77ca256d108b2c558d57e17efab4f7 Mon Sep 17 00:00:00 2001 From: Akihiro Motoki Date: Fri, 14 Dec 2018 17:29:27 +0900 Subject: [PATCH 0989/1936] Use volume v3 API in clouds.yaml by default Volume API v2 has been deprecated for a long time. There is no reason to use volume v2 in clouds.yaml by default. This commit also drops "--os-identity-api-version 3" from write_clouds_yaml in functions -common as "3" is the default value of tools/update_clouds_yaml.py. They are hardcoded in DevStack so there is no reason to pass it. Change-Id: Ie84026a3d19f7711fc781b7012355096c7ff6b5a --- functions-common | 3 --- tools/update_clouds_yaml.py | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/functions-common b/functions-common index af95bfb879..9be1513b5d 100644 --- a/functions-common +++ b/functions-common @@ -92,7 +92,6 @@ function write_clouds_yaml { --file $CLOUDS_YAML \ --os-cloud devstack \ --os-region-name $REGION_NAME \ - --os-identity-api-version 3 \ $CA_CERT_ARG \ --os-auth-url $KEYSTONE_SERVICE_URI \ --os-username demo \ @@ -104,7 +103,6 @@ function write_clouds_yaml { --file $CLOUDS_YAML \ --os-cloud devstack-alt \ --os-region-name $REGION_NAME \ - --os-identity-api-version 3 \ $CA_CERT_ARG \ --os-auth-url $KEYSTONE_SERVICE_URI \ --os-username alt_demo \ @@ -116,7 +114,6 @@ function write_clouds_yaml { --file $CLOUDS_YAML \ --os-cloud devstack-admin \ --os-region-name $REGION_NAME \ - --os-identity-api-version 3 \ $CA_CERT_ARG \ --os-auth-url $KEYSTONE_SERVICE_URI \ --os-username admin \ diff --git a/tools/update_clouds_yaml.py b/tools/update_clouds_yaml.py index eb7265f76c..3aad0e0a29 100755 --- a/tools/update_clouds_yaml.py +++ b/tools/update_clouds_yaml.py @@ -83,7 +83,7 @@ def main(): parser.add_argument('--os-cloud', required=True) parser.add_argument('--os-region-name', default='RegionOne') parser.add_argument('--os-identity-api-version', default='3') - parser.add_argument('--os-volume-api-version', default='2') + parser.add_argument('--os-volume-api-version', default='3') parser.add_argument('--os-cacert') parser.add_argument('--os-auth-url', required=True) parser.add_argument('--os-username', required=True) From 93a94f503b058a1452cc8b1eef389cbec5975863 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Fri, 14 Dec 2018 06:39:59 -0800 Subject: [PATCH 0990/1936] Enable direct-io on LVM loop devices This enables direct-io on the loop devices that we create for LVM backing stores. The goal here is to reduce the buffer cache overhead involved with loop mounting a very large file on a filesystem, as well as potentially providing a little more block-device-like behavior for things that expect them. We are hoping this will address some of the very long LVM calls that cinder does, which randomly take a very long time, causing timeouts. The loop direct-io support was added in kernel 4.4.0, which was xenial, but the losetup binary does not have the required flag. Thus, this patch checks the "losetup -h" output for the flag before deciding to enable it. Change-Id: Idc69cf3598d6ed6646c0145733c90ad0b1b60883 --- lib/lvm | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/lib/lvm b/lib/lvm index f0471816bf..d9e78a016f 100644 --- a/lib/lvm +++ b/lib/lvm @@ -99,8 +99,15 @@ function _create_lvm_volume_group { if ! sudo vgs $vg; then # Only create if the file doesn't already exists [[ -f $backing_file ]] || truncate -s $size $backing_file + + local directio="" + # Check to see if we can do direct-io + if losetup -h | grep -q direct-io; then + directio="--direct-io=on" + fi + local vg_dev - vg_dev=`sudo losetup -f --show $backing_file` + vg_dev=$(sudo losetup -f --show $directio $backing_file) # Only create volume group if it doesn't already exist if ! sudo vgs $vg; then From 16fe9646486d0b621c7fb2c15ffec0c004508f73 Mon Sep 17 00:00:00 2001 From: Pavlo Shchelokovskyy Date: Tue, 18 Dec 2018 09:01:36 +0000 Subject: [PATCH 0991/1936] Ignore local.conf in root of repo gitignore is not parsing regex, only shell globs, so '^' has no meaning, and local.conf is being thus tracked. This patch properly ignores only local.conf in root of repo but still tracks samples/local.conf and others. Change-Id: I93ef778f1f3ee8101ce21cce377f7b527b7153f3 --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 956e13e21c..8fe56ad6ab 100644 --- a/.gitignore +++ b/.gitignore @@ -28,7 +28,7 @@ files/get-pip.py* files/ir-deploy* files/ironic-inspector* files/etcd* -^local.conf +/local.conf local.sh localrc proto From b3ee6f46153c46e46493f187b3b246e2229ce208 Mon Sep 17 00:00:00 2001 From: Riccardo Pittau Date: Tue, 18 Dec 2018 11:19:59 +0100 Subject: [PATCH 0992/1936] Correcting typo in plugins section Fixing the example of the plugin's name as the last component of the git repo path should be foo. Change-Id: I2f12d4d26993ec192517de7f5541c4219ee59ed9 --- doc/source/plugins.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst index 89b9381813..c69472955c 100644 --- a/doc/source/plugins.rst +++ b/doc/source/plugins.rst @@ -58,7 +58,7 @@ directory. Inside this directory there can be 3 files. plugin's name, which is the name that should be used by users on "enable_plugin" lines. It should generally be the last component of the git repo path (e.g., if the plugin's repo is - openstack/devstack-foo, then the name here should be "foo") :: + openstack/foo, then the name here should be "foo") :: define_plugin @@ -148,7 +148,7 @@ An example plugin would look something as follows. ``devstack/settings``:: - # settings file for template + # settings file for template enable_service template From dc33485ff28989db389b57a7d1a9bce97259ad12 Mon Sep 17 00:00:00 2001 From: ghanshyam Date: Wed, 19 Dec 2018 04:23:30 +0000 Subject: [PATCH 0993/1936] Use renamed template 'integrated-gate-py3' 'integrated-gate-py35' template is going to be renamed to 'integrated-gate-py3' in https://review.openstack.org/#/c/626078/ Integrated jobs are running on Bionic now where python 3.6 is available. Which means gate jobs in 'integrated-gate-py35' template are running on python 3.6 not on 3.5 which makes this template name confusing. depends on commit rename the 'integrated-gate-py35' to 'integrated-gate-py3' so that it can convey that template will use available python 3 version in used distro. For example: 3.5 in xenial and 3.6 in bionic and so on. This commit starts using the new template name so that old template name can be removed. Depends-On: https://review.openstack.org/#/c/626078/ Change-Id: I07048817eb826337dd5bd89a97711bb9d43495cf --- .zuul.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 9aafcdbb84..02d3df9b9c 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -581,7 +581,7 @@ - project: templates: - integrated-gate - - integrated-gate-py35 + - integrated-gate-py3 - publish-openstack-docs-pti check: jobs: From 7bb5fff85c0f7b4131bc669abe99c04391682a91 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 19 Dec 2018 14:16:41 +1100 Subject: [PATCH 0994/1936] setup-devstack-source-dirs: also copy github libraries This will help us use standard mechanisms to install libraries that Zuul has cloned from github Change-Id: I8ecfeeba9133ce175fb72fc59be696879013d927 --- .../tasks/main.yaml | 26 ++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/roles/setup-devstack-source-dirs/tasks/main.yaml b/roles/setup-devstack-source-dirs/tasks/main.yaml index c196c37e09..dfa934f68b 100644 --- a/roles/setup-devstack-source-dirs/tasks/main.yaml +++ b/roles/setup-devstack-source-dirs/tasks/main.yaml @@ -1,4 +1,4 @@ -- name: Find all source repos used by this job +- name: Find all OpenStack source repos used by this job find: paths: - src/git.openstack.org/openstack @@ -12,6 +12,30 @@ with_items: '{{ found_repos.files }}' become: yes +# Github projects are github.com/username/repo (username might be a +# top-level project too), so we have to do a two-step swizzle to just +# get the full repo path (ansible's find module doesn't help with this +# :/) +- name: Find top level github projects + find: + paths: + - src/github.com + file_type: directory + register: found_github_projects + +- name: Find actual github repos + find: + paths: '{{ found_github_projects.files | map(attribute="path") | list }}' + file_type: directory + register: found_github_repos + when: found_github_projects.files + +- name: Copy github repos into devstack working directory + command: rsync -a {{ item.path }} {{ devstack_base_dir }} + with_items: '{{ found_github_repos.files }}' + become: yes + when: found_github_projects.files + - name: Setup refspec for repos into devstack working directory shell: # Copied almost "as-is" from devstack-gate setup-workspace function From 15b6ac98cb932488bce77b267f0f27903f0d47d0 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Tue, 21 Nov 2017 11:55:57 +0000 Subject: [PATCH 0995/1936] Update default cirros version Cirros has a new release 0.4.0, which fixes a couple of issues. Change-Id: I419348f1784600e3989c8e86a99c04b24f3610c4 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 8463313625..4b94386521 100644 --- a/stackrc +++ b/stackrc @@ -685,7 +685,7 @@ ENABLE_UBUNTU_CLOUD_ARCHIVE=$(trueorfalse True ENABLE_UBUNTU_CLOUD_ARCHIVE) #IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image #IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image -CIRROS_VERSION=${CIRROS_VERSION:-"0.3.6"} +CIRROS_VERSION=${CIRROS_VERSION:-"0.4.0"} CIRROS_ARCH=${CIRROS_ARCH:-"x86_64"} # Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of From 6a7e3ec6479097f4918eb66d25b52cfa46953dec Mon Sep 17 00:00:00 2001 From: npraveen35 Date: Thu, 20 Dec 2018 18:25:36 +0530 Subject: [PATCH 0996/1936] typo fixed Change-Id: Id777576d8876d7ba257f0243f3b4ce5756dd9b58 --- clean.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clean.sh b/clean.sh index a29ebd94f0..d6c6b40608 100755 --- a/clean.sh +++ b/clean.sh @@ -123,7 +123,7 @@ if [[ -n "$LOGDIR" ]] && [[ -d "$LOGDIR" ]]; then sudo rm -rf $LOGDIR fi -# Clean out the sytemd user unit files if systemd was used. +# Clean out the systemd user unit files if systemd was used. if [[ "$USE_SYSTEMD" = "True" ]]; then sudo find $SYSTEMD_DIR -type f -name '*devstack@*service' -delete # Make systemd aware of the deletion. From 119ee66cded7d6e1ed04419a00e59c269eea1c54 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Dulko?= Date: Thu, 20 Dec 2018 18:55:29 +0100 Subject: [PATCH 0997/1936] Log debug on etcd if $ENABLE_DEBUG_LOG_LEVEL is on Change-Id: I452a2e4882377d165e3c28fcec18e237e45db8a4 --- lib/etcd3 | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/etcd3 b/lib/etcd3 index 26d07fd19e..c65a522267 100644 --- a/lib/etcd3 +++ b/lib/etcd3 @@ -46,6 +46,9 @@ function start_etcd3 { cmd+=" --listen-peer-urls http://0.0.0.0:$ETCD_PEER_PORT " fi cmd+=" --listen-client-urls http://$SERVICE_HOST:$ETCD_PORT" + if [ "$ENABLE_DEBUG_LOG_LEVEL" == "True" ]; then + cmd+=" --debug" + fi local unitfile="$SYSTEMD_DIR/$ETCD_SYSTEMD_SERVICE" write_user_unit_file $ETCD_SYSTEMD_SERVICE "$cmd" "" "root" From 8e802da4069349a2f6ccdef348999304669a6cbe Mon Sep 17 00:00:00 2001 From: Lance Bragstad Date: Fri, 4 Jan 2019 15:21:43 +0000 Subject: [PATCH 0998/1936] Cleanup LDAP integration guide This commit fixes a grammar issue in the LDAP integration guide and it adds prompts to the command-line examples to be more explicit about where or how commands are being run. Change-Id: Ic6a5adfbcf2841656929e6c3875889a31d314089 --- doc/source/guides/devstack-with-ldap.rst | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/doc/source/guides/devstack-with-ldap.rst b/doc/source/guides/devstack-with-ldap.rst index ec411419b5..4c54723c71 100644 --- a/doc/source/guides/devstack-with-ldap.rst +++ b/doc/source/guides/devstack-with-ldap.rst @@ -12,14 +12,14 @@ Introduction LDAP support in keystone is read-only. You can use it to back an entire OpenStack deployment to a single LDAP server, or you can use it to back separate LDAP servers to specific keystone domains. Users within those domains -will can authenticate against keystone, assume role assignments, and interact -with other OpenStack services. +can authenticate against keystone, assume role assignments, and interact with +other OpenStack services. Configuration ============= To deploy an OpenLDAP server, make sure ``ldap`` is added to the list of -``ENABLED_SERVICES``:: +``ENABLED_SERVICES`` in the ``local.conf`` file:: enable_service ldap @@ -35,9 +35,9 @@ Devstack will prompt you for a password when running ``stack.sh`` if At this point, devstack should have everything it needs to deploy OpenLDAP, bootstrap it with a minimal set of users, and configure it to back to a domain -in keystone:: +in keystone. You can do this by running the ``stack.sh`` script:: - ./stack.sh + $ ./stack.sh Once ``stack.sh`` completes, you should have a running keystone deployment with a basic set of users. It is important to note that not all users will live @@ -63,7 +63,7 @@ Listing Users To list all users in LDAP directly, you can use ``ldapsearch`` with the LDAP user bootstrapped by devstack:: - ldapsearch -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \ + $ ldapsearch -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \ -H ldap://localhost -b dc=openstack,dc=org As you can see, devstack creates an OpenStack domain called ``openstack.org`` @@ -93,7 +93,7 @@ example LDIF that can be used to create a new LDAP user, let's call it Now, we use the ``Manager`` user to create a user for Peter in LDAP:: - ldapadd -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \ + $ ldapadd -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \ -H ldap://localhost -c -f peter.ldif.in We should be able to assign Peter roles on projects. After Peter has some level @@ -125,7 +125,7 @@ Deleting Users We can use the same basic steps to remove users from LDAP, but instead of using LDIFs, we can just pass the ``dn`` of the user we want to delete:: - ldapdelete -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \ + $ ldapdelete -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \ -H ldap://localhost cn=peter,ou=Users,dc=openstack,dc=org Group Management @@ -153,7 +153,7 @@ Let's define a specific group with the following LDIF:: We can create the group using the same ``ldapadd`` command as we did with users:: - ldapadd -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \ + $ ldapadd -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \ -H ldap://localhost -c -f guardian-group.ldif.in If we check the group membership in Horizon, we'll see that only Peter is a @@ -167,7 +167,7 @@ Deleting Groups Just like users, groups can be deleted using the ``dn``:: - ldapdelete -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \ + $ ldapdelete -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \ -H ldap://localhost cn=guardians,ou=UserGroups,dc=openstack,dc=org Note that this operation will not remove users within that group. It will only From 5d7d891380f569deaf403aae46a9354eb0243999 Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Mon, 3 Dec 2018 14:21:06 +0000 Subject: [PATCH 0999/1936] Drop the deprecated and now removed barrier XFS mount options Both barrier and nobarrier were deprecated with the 4.10 kernel [1] and then removed [2] with the 4.19 kernel as now used by Fedora >= 28. Both should be safe to drop at this point. [1] https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=4cf4573 [2] https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=1c02d50 Change-Id: I6871a7765e3e04122d8d546f43d36bb8415383fc --- functions | 2 +- lib/swift | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/functions b/functions index 051c8160f7..187ad2311d 100644 --- a/functions +++ b/functions @@ -739,7 +739,7 @@ function create_disk { # Mount the disk with mount options to make it as efficient as possible if ! egrep -q ${storage_data_dir} /proc/mounts; then - sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 \ + sudo mount -t xfs -o loop,noatime,nodiratime,logbufs=8 \ ${disk_image} ${storage_data_dir} fi } diff --git a/lib/swift b/lib/swift index 3b3e608e80..e2ee0cb470 100644 --- a/lib/swift +++ b/lib/swift @@ -607,7 +607,7 @@ function create_swift_disk { # Mount the disk with mount options to make it as efficient as possible mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1 if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then - sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 \ + sudo mount -t xfs -o loop,noatime,nodiratime,logbufs=8 \ ${SWIFT_DISK_IMAGE} ${SWIFT_DATA_DIR}/drives/sdb1 fi From 77866259e4204eb08afd55e8a29994ad49c58e0b Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Fri, 7 Dec 2018 18:52:16 +0000 Subject: [PATCH 1000/1936] Update supported Fedora releases Fedora 27 has now hit EOL [1] while Fedora 29 has been released [2]. [1] https://fedoramagazine.org/fedora-27-end-of-life/ [2] https://fedoramagazine.org/announcing-fedora-29/ Change-Id: I12e287e36f01581f1c7145545ab05be527ed15c6 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 144c233f05..54a4f985bd 100755 --- a/stack.sh +++ b/stack.sh @@ -221,7 +221,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (xenial|artful|bionic|stretch|jessie|f27|f28|opensuse-42.3|opensuse-15.0|opensuse-tumbleweed|rhel7) ]]; then +if [[ ! ${DISTRO} =~ (xenial|artful|bionic|stretch|jessie|f28|f29|opensuse-42.3|opensuse-15.0|opensuse-tumbleweed|rhel7) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From 5690582073756bdc156550b90a8e284a8d4e0e5f Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 8 Jan 2019 15:29:16 +0000 Subject: [PATCH 1001/1936] Add devstack-system-admin for system scoped actions Keystone is moving more things to require a system scoped token to work. Getting one of those requires that domain and project information are not set. Change-Id: I2e1640e9f9ef6cdf56bef49d1ae8f0591570c3e6 --- functions-common | 11 +++++++++++ tools/update_clouds_yaml.py | 14 +++++++++++--- 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/functions-common b/functions-common index 9be1513b5d..e5962db42b 100644 --- a/functions-common +++ b/functions-common @@ -120,6 +120,17 @@ function write_clouds_yaml { --os-password $ADMIN_PASSWORD \ --os-project-name admin + # admin with a system-scoped token -> devstack-system + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-system-admin \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username admin \ + --os-password $ADMIN_PASSWORD \ + --os-system-scope all + # CLean up any old clouds.yaml files we had laying around rm -f $(eval echo ~"$STACK_USER")/.config/openstack/clouds.yaml } diff --git a/tools/update_clouds_yaml.py b/tools/update_clouds_yaml.py index 3aad0e0a29..9187c664d0 100755 --- a/tools/update_clouds_yaml.py +++ b/tools/update_clouds_yaml.py @@ -41,12 +41,19 @@ def __init__(self, args): 'auth_url': args.os_auth_url, 'username': args.os_username, 'password': args.os_password, - 'project_name': args.os_project_name, }, } - if args.os_identity_api_version == '3': + if args.os_project_name and args.os_system_scope: + print( + "WARNING: os_project_name and os_system_scope were both" + " given. os_system_scope will take priority.") + if args.os_project_name and not args.os_system_scope: + self._cloud_data['auth']['project_name'] = args.os_project_name + if args.os_identity_api_version == '3' and not args.os_system_scope: self._cloud_data['auth']['user_domain_id'] = 'default' self._cloud_data['auth']['project_domain_id'] = 'default' + if args.os_system_scope: + self._cloud_data['auth']['system_scope'] = args.os_system_scope if args.os_cacert: self._cloud_data['cacert'] = args.os_cacert @@ -88,7 +95,8 @@ def main(): parser.add_argument('--os-auth-url', required=True) parser.add_argument('--os-username', required=True) parser.add_argument('--os-password', required=True) - parser.add_argument('--os-project-name', required=True) + parser.add_argument('--os-project-name') + parser.add_argument('--os-system-scope') args = parser.parse_args() From 6d103a7ff845076da984afbd317cd8cf4504fb7a Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Wed, 19 Dec 2018 11:53:16 +0000 Subject: [PATCH 1002/1936] Fix testing for the write-devstack-local-conf role The test_plugin_deps function in the test code for the write-devstack-local-conf role was missing the import part of actually executing the code under test and asserting the expected result. Change-Id: I125870b13d2581cdec0dede11157b19b702565cd --- roles/write-devstack-local-conf/library/test.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/roles/write-devstack-local-conf/library/test.py b/roles/write-devstack-local-conf/library/test.py index 791552d1ad..65d327b267 100644 --- a/roles/write-devstack-local-conf/library/test.py +++ b/roles/write-devstack-local-conf/library/test.py @@ -104,6 +104,22 @@ def test_plugin_deps(self): plugins=plugins, base_dir=self.tmpdir, path=os.path.join(self.tmpdir, 'test.local.conf')) + lc = LocalConf(p.get('localrc'), + p.get('local_conf'), + p.get('base_services'), + p.get('services'), + p.get('plugins'), + p.get('base_dir'), + p.get('projects'), + p.get('project')) + lc.write(p['path']) + + plugins = [] + with open(p['path']) as f: + for line in f: + if line.startswith('enable_plugin'): + plugins.append(line.split()[1]) + self.assertEqual(['foo', 'bar'], plugins) def test_libs_from_git(self): "Test that LIBS_FROM_GIT is auto-generated" From 0b855007f8d44662ad4fd52bda7df85f94b241c2 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Wed, 19 Dec 2018 12:20:51 +0000 Subject: [PATCH 1003/1936] Allow plugin names to contain non-letter characters There are already devstack plugins that contain a hyphen in the name, like `networking-baremetal`. In order to allow ordering for these to work properly, amend the regexes we are using to match any non-whitespace characters instead of only alphanumerics. Amend the test to cover this use case. Change-Id: I91093a424f8d5e8007f140083e1ea36a81fe849f Closes-Bug: 1809016 --- .../library/devstack_local_conf.py | 4 ++-- roles/write-devstack-local-conf/library/test.py | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/roles/write-devstack-local-conf/library/devstack_local_conf.py b/roles/write-devstack-local-conf/library/devstack_local_conf.py index bba7e31f96..b1ad2dd4b4 100644 --- a/roles/write-devstack-local-conf/library/devstack_local_conf.py +++ b/roles/write-devstack-local-conf/library/devstack_local_conf.py @@ -155,8 +155,8 @@ def loadPluginNames(self, base_dir): continue self.loadDevstackPluginInfo(settings) - define_re = re.compile(r'^define_plugin\s+(\w+).*') - require_re = re.compile(r'^plugin_requires\s+(\w+)\s+(\w+).*') + define_re = re.compile(r'^define_plugin\s+(\S+).*') + require_re = re.compile(r'^plugin_requires\s+(\S+)\s+(\S+).*') def loadDevstackPluginInfo(self, fn): name = None reqs = set() diff --git a/roles/write-devstack-local-conf/library/test.py b/roles/write-devstack-local-conf/library/test.py index 65d327b267..88d404b856 100644 --- a/roles/write-devstack-local-conf/library/test.py +++ b/roles/write-devstack-local-conf/library/test.py @@ -78,12 +78,12 @@ def test_plugin_deps(self): with open(os.path.join( self.tmpdir, 'foo-plugin', 'devstack', 'settings'), 'w') as f: - f.write('define_plugin foo\n') + f.write('define_plugin foo-plugin\n') with open(os.path.join( self.tmpdir, 'bar-plugin', 'devstack', 'settings'), 'w') as f: - f.write('define_plugin bar\n') - f.write('plugin_requires bar foo\n') + f.write('define_plugin bar-plugin\n') + f.write('plugin_requires bar-plugin foo-plugin\n') localrc = {'test_localrc': '1'} local_conf = {'install': @@ -94,8 +94,8 @@ def test_plugin_deps(self): # We use ordereddict here to make sure the plugins are in the # *wrong* order for testing. plugins = OrderedDict([ - ('bar', 'git://git.openstack.org/openstack/bar-plugin'), - ('foo', 'git://git.openstack.org/openstack/foo-plugin'), + ('bar-plugin', 'git://git.openstack.org/openstack/bar-plugin'), + ('foo-plugin', 'git://git.openstack.org/openstack/foo-plugin'), ]) p = dict(localrc=localrc, local_conf=local_conf, @@ -119,7 +119,7 @@ def test_plugin_deps(self): for line in f: if line.startswith('enable_plugin'): plugins.append(line.split()[1]) - self.assertEqual(['foo', 'bar'], plugins) + self.assertEqual(['foo-plugin', 'bar-plugin'], plugins) def test_libs_from_git(self): "Test that LIBS_FROM_GIT is auto-generated" From 36377f63e348200cd091b702c74350062a69fff9 Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Tue, 4 Dec 2018 11:33:03 -0500 Subject: [PATCH 1004/1936] install under python3 by default when enabled Remove the requirement that services explicitly enable python3 support in order to be tested under python3 when running with python3 enabled. Keep the enable_python3_package() function for backwards compatibility, for now, since it is called in some devstack plugins. Explicitly add swift to the set of packages that should not be installed using python3 by default until full support is available. Change-Id: I8ab0a7c242bbf5bf3f091f5a85a98e2f4543f856 Signed-off-by: Doug Hellmann --- inc/python | 39 +++++++++++---------------------------- stackrc | 8 +------- tests/test_python.sh | 7 +------ 3 files changed, 13 insertions(+), 41 deletions(-) diff --git a/inc/python b/inc/python index 5fb7245623..f1df101939 100644 --- a/inc/python +++ b/inc/python @@ -115,13 +115,12 @@ function check_python3_support_for_package_remote { echo $classifier } -# python3_enabled_for() checks if the service(s) specified as arguments are -# enabled by the user in ``ENABLED_PYTHON3_PACKAGES``. +# python3_enabled_for() assumes the service(s) specified as arguments are +# enabled for python 3 unless explicitly disabled. See python3_disabled_for(). # # Multiple services specified as arguments are ``OR``'ed together; the test # is a short-circuit boolean, i.e it returns on the first match. # -# Uses global ``ENABLED_PYTHON3_PACKAGES`` # python3_enabled_for dir [dir ...] function python3_enabled_for { local xtrace @@ -132,7 +131,9 @@ function python3_enabled_for { local dirs=$@ local dir for dir in ${dirs}; do - [[ ,${ENABLED_PYTHON3_PACKAGES}, =~ ,${dir}, ]] && enabled=0 + if ! python3_disabled_for "${dir}"; then + enabled=0 + fi done $xtrace @@ -163,42 +164,29 @@ function python3_disabled_for { return $enabled } -# enable_python3_package() adds the repositories passed as argument to the -# ``ENABLED_PYTHON3_PACKAGES`` list, if they are not already present. +# enable_python3_package() -- no-op for backwards compatibility # # For example: # enable_python3_package nova # -# Uses global ``ENABLED_PYTHON3_PACKAGES`` # enable_python3_package dir [dir ...] function enable_python3_package { local xtrace xtrace=$(set +o | grep xtrace) set +o xtrace - local tmpsvcs="${ENABLED_PYTHON3_PACKAGES}" - local python3 - for dir in $@; do - if [[ ,${DISABLED_PYTHON3_PACKAGES}, =~ ,${dir}, ]]; then - warn $LINENO "Attempt to enable_python3_package ${dir} when it has been disabled" - continue - fi - if ! python3_enabled_for $dir; then - tmpsvcs+=",$dir" - fi - done - ENABLED_PYTHON3_PACKAGES=$(_cleanup_service_list "$tmpsvcs") + echo "It is no longer necessary to call enable_python3_package()." $xtrace } -# disable_python3_package() prepares the services passed as argument to be -# removed from the ``ENABLED_PYTHON3_PACKAGES`` list, if they are present. +# disable_python3_package() adds the services passed as argument to +# the ``DISABLED_PYTHON3_PACKAGES`` list. # # For example: # disable_python3_package swift # -# Uses globals ``ENABLED_PYTHON3_PACKAGES`` and ``DISABLED_PYTHON3_PACKAGES`` +# Uses global ``DISABLED_PYTHON3_PACKAGES`` # disable_python3_package dir [dir ...] function disable_python3_package { local xtrace @@ -206,16 +194,11 @@ function disable_python3_package { set +o xtrace local disabled_svcs="${DISABLED_PYTHON3_PACKAGES}" - local enabled_svcs=",${ENABLED_PYTHON3_PACKAGES}," local dir for dir in $@; do disabled_svcs+=",$dir" - if python3_enabled_for $dir; then - enabled_svcs=${enabled_svcs//,$dir,/,} - fi done DISABLED_PYTHON3_PACKAGES=$(_cleanup_service_list "$disabled_svcs") - ENABLED_PYTHON3_PACKAGES=$(_cleanup_service_list "$enabled_svcs") $xtrace } @@ -295,7 +278,7 @@ function pip_install { if python3_disabled_for ${package_dir##*/}; then echo "Explicitly using $PYTHON2_VERSION version to install $package_dir based on DISABLED_PYTHON3_PACKAGES" elif python3_enabled_for ${package_dir##*/}; then - echo "Explicitly using $PYTHON3_VERSION version to install $package_dir based on ENABLED_PYTHON3_PACKAGES" + echo "Using $PYTHON3_VERSION version to install $package_dir based on default behavior" sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8" cmd_pip=$(get_pip_command $PYTHON3_VERSION) elif [[ -d "$package_dir" ]]; then diff --git a/stackrc b/stackrc index 8463313625..8f66ba7975 100644 --- a/stackrc +++ b/stackrc @@ -129,15 +129,9 @@ fi # Control whether Python 3 should be used at all. export USE_PYTHON3=$(trueorfalse False USE_PYTHON3) -# Control whether Python 3 is enabled for specific services by the -# base name of the directory from which they are installed. See -# enable_python3_package to edit this variable and use_python3_for to -# test membership. -export ENABLED_PYTHON3_PACKAGES="nova,glance,cinder,uwsgi,python-openstackclient,openstacksdk" - # Explicitly list services not to run under Python 3. See # disable_python3_package to edit this variable. -export DISABLED_PYTHON3_PACKAGES="" +export DISABLED_PYTHON3_PACKAGES="swift" # When Python 3 is supported by an application, adding the specific # version of Python 3 to this variable will install the app using that diff --git a/tests/test_python.sh b/tests/test_python.sh index 8652798778..1f5453c4c7 100755 --- a/tests/test_python.sh +++ b/tests/test_python.sh @@ -12,14 +12,9 @@ source $TOP/tests/unittest.sh echo "Testing Python 3 functions" # Initialize variables manipulated by functions under test. -export ENABLED_PYTHON3_PACKAGES="" export DISABLED_PYTHON3_PACKAGES="" -assert_false "should not be enabled yet" python3_enabled_for testpackage1 - -enable_python3_package testpackage1 -assert_equal "$ENABLED_PYTHON3_PACKAGES" "testpackage1" "unexpected result" -assert_true "should be enabled" python3_enabled_for testpackage1 +assert_true "should be enabled by default" python3_enabled_for testpackage1 assert_false "should not be disabled yet" python3_disabled_for testpackage2 From bab27cbff1431ebd9f72e595e86b5a1e902092f0 Mon Sep 17 00:00:00 2001 From: Iury Gregory Melo Ferreira Date: Wed, 9 Jan 2019 15:55:47 +0100 Subject: [PATCH 1005/1936] Rename ironic job name to match zuulv3 Depends-On: https://review.openstack.org/#/c/629173/ Change-Id: Ifa8d075729c6347ecda41c79e4de09c71483b4c6 --- .zuul.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 02d3df9b9c..afe400e1a4 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -597,7 +597,7 @@ - devstack-multinode-xenial - devstack-unit-tests - openstack-tox-bashate - - ironic-tempest-dsvm-ipa-wholedisk-bios-agent_ipmitool-tinyipa: + - ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa: voting: false - swift-dsvm-functional: voting: false From 772ade5368ff124d219f9e55b61bb9bc763e47a7 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 11 Jan 2019 06:04:41 +0000 Subject: [PATCH 1006/1936] Updated from generate-devstack-plugins-list Change-Id: I3b4c7e73b0f84f0604fa5e06e612a5bd775d7b02 --- doc/source/plugin-registry.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 02d6911d4b..9901c1ca39 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -150,6 +150,7 @@ octavia `git://git.openstack.org/openstack/octavi octavia-dashboard `git://git.openstack.org/openstack/octavia-dashboard `__ omni `git://git.openstack.org/openstack/omni `__ openstacksdk `git://git.openstack.org/openstack/openstacksdk `__ +os-faults `git://git.openstack.org/openstack/os-faults `__ os-xenapi `git://git.openstack.org/openstack/os-xenapi `__ osprofiler `git://git.openstack.org/openstack/osprofiler `__ oswin-tempest-plugin `git://git.openstack.org/openstack/oswin-tempest-plugin `__ @@ -173,6 +174,7 @@ storlets `git://git.openstack.org/openstack/storle stx-config `git://git.openstack.org/openstack/stx-config `__ stx-fault `git://git.openstack.org/openstack/stx-fault `__ stx-integ `git://git.openstack.org/openstack/stx-integ `__ +stx-metal `git://git.openstack.org/openstack/stx-metal `__ stx-nfv `git://git.openstack.org/openstack/stx-nfv `__ stx-update `git://git.openstack.org/openstack/stx-update `__ tacker `git://git.openstack.org/openstack/tacker `__ From 698796f1aeb0d9a559488bad9f1d03e4941b061e Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Fri, 14 Dec 2018 10:35:26 +0800 Subject: [PATCH 1007/1936] Fix an issue in iniset function Given the file to be configured, if user "stack" even doesn't have read access, the result of configuration is not expected. iniset with "-sudo" option will always create the section and the option which we want to configure for each calling, no matter whether this section and this option exist in the file or not. The root cause is the calling of grep and ini_has_option in iniset don't use the "sudo" option. Change-Id: I9d21322046b7be411c4c7c28fefc24894fa2e131 Signed-off-by: Yi Wang --- inc/ini-config | 17 ++++++++++++----- tests/test_ini_config.sh | 4 ++-- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/inc/ini-config b/inc/ini-config index 6fe7788158..79936823d2 100644 --- a/inc/ini-config +++ b/inc/ini-config @@ -88,17 +88,22 @@ function iniget_multiline { } # Determinate is the given option present in the INI file -# ini_has_option config-file section option +# ini_has_option [-sudo] config-file section option function ini_has_option { local xtrace xtrace=$(set +o | grep xtrace) set +o xtrace + local sudo="" + if [ $1 == "-sudo" ]; then + sudo="sudo " + shift + fi local file=$1 local section=$2 local option=$3 local line - line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") + line=$($sudo sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") $xtrace [ -n "$line" ] } @@ -173,8 +178,10 @@ function iniset { xtrace=$(set +o | grep xtrace) set +o xtrace local sudo="" + local sudo_option="" if [ $1 == "-sudo" ]; then sudo="sudo " + sudo_option="-sudo " shift fi local file=$1 @@ -187,11 +194,11 @@ function iniset { return fi - if ! grep -q "^\[$section\]" "$file" 2>/dev/null; then + if ! $sudo grep -q "^\[$section\]" "$file" 2>/dev/null; then # Add section at the end echo -e "\n[$section]" | $sudo tee --append "$file" > /dev/null fi - if ! ini_has_option "$file" "$section" "$option"; then + if ! ini_has_option $sudo_option "$file" "$section" "$option"; then # Add it $sudo sed -i -e "/^\[$section\]/ a\\ $option = $value @@ -228,7 +235,7 @@ function iniset_multiline { # the reverse order. Do a reverse here to keep the original order. values="$v ${values}" done - if ! grep -q "^\[$section\]" "$file"; then + if ! $sudo grep -q "^\[$section\]" "$file"; then # Add section at the end echo -e "\n[$section]" | $sudo tee --append "$file" > /dev/null else diff --git a/tests/test_ini_config.sh b/tests/test_ini_config.sh index f7dc89a28d..6ed1647f34 100755 --- a/tests/test_ini_config.sh +++ b/tests/test_ini_config.sh @@ -125,14 +125,14 @@ VAL=$(iniget ${TEST_INI} bbb handlers) assert_equal "$VAL" "33,44" "inset at EOF" # test empty option -if ini_has_option ${TEST_INI} ddd empty; then +if ini_has_option ${SUDO_ARG} ${TEST_INI} ddd empty; then passed "ini_has_option: ddd.empty present" else failed "ini_has_option failed: ddd.empty not found" fi # test non-empty option -if ini_has_option ${TEST_INI} bbb handlers; then +if ini_has_option ${SUDO_ARG} ${TEST_INI} bbb handlers; then passed "ini_has_option: bbb.handlers present" else failed "ini_has_option failed: bbb.handlers not found" From 58243f6203f3385039fe7124b037d5abf7e1d59e Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 13 Dec 2018 14:05:53 +1100 Subject: [PATCH 1008/1936] Add options for development bindep install This adds a -bindep option to the key development library install functions. With this option the bindep.txt file will be referenced and the relevant packages installed. Change-Id: I856f1f59fca49b6020920d8f859b797f3b904300 --- functions-common | 29 +++++++++++++++++++++ inc/python | 67 ++++++++++++++++++++++++++++++++++++++---------- stack.sh | 5 ++++ stackrc | 7 +++++ 4 files changed, 95 insertions(+), 13 deletions(-) diff --git a/functions-common b/functions-common index af95bfb879..6651cc2661 100644 --- a/functions-common +++ b/functions-common @@ -1381,6 +1381,35 @@ function zypper_install { zypper --non-interactive install --auto-agree-with-licenses --no-recommends "$@" } +# Run bindep and install packages it outputs +# +# Usage: +# install_bindep [profile,profile] +# +# Note unlike the bindep command itself, profile(s) specified should +# be a single, comma-separated string, no spaces. +function install_bindep { + local file=$1 + local profiles=${2:-""} + local pkgs + + if [[ ! -f $file ]]; then + die $LINENO "Can not find bindep file: $file" + fi + + # converting here makes it much easier to work with passing + # arguments + profiles=${profiles/,/ /} + + # Note bindep returns 1 when packages need to be installed, so we + # have to ignore it's return for "-e" + pkgs=$($DEST/bindep-venv/bin/bindep -b --file $file $profiles || true) + + if [[ -n "${pkgs}" ]]; then + install_package ${pkgs} + fi +} + function write_user_unit_file { local service=$1 local command="$2" diff --git a/inc/python b/inc/python index 5fb7245623..d4237674ed 100644 --- a/inc/python +++ b/inc/python @@ -445,7 +445,14 @@ function setup_lib { # another project. # # use this for non namespaced libraries +# +# setup_dev_lib [-bindep] function setup_dev_lib { + local bindep + if [[ $1 == -bindep* ]]; then + bindep="${1}" + shift + fi local name=$1 local dir=${GITDIR[$name]} if python3_enabled; then @@ -455,10 +462,10 @@ function setup_dev_lib { # of Python. echo "Installing $name again without Python 3 enabled" USE_PYTHON3=False - setup_develop $dir + setup_develop $bindep $dir USE_PYTHON3=True fi - setup_develop $dir + setup_develop $bindep $dir } # this should be used if you want to install globally, all libraries should @@ -469,11 +476,17 @@ function setup_dev_lib { # extras: comma-separated list of optional dependencies to install # (e.g., ldap,memcache). # See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements +# bindep: Set "-bindep" as first argument to install bindep.txt packages # The command is like "pip install []" function setup_install { + local bindep + if [[ $1 == -bindep* ]]; then + bindep="${1}" + shift + fi local project_dir=$1 local extras=$2 - _setup_package_with_constraints_edit $project_dir "" $extras + _setup_package_with_constraints_edit $bindep $project_dir "" $extras } # this should be used for projects which run services, like all services @@ -485,9 +498,14 @@ function setup_install { # See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements # The command is like "pip install -e []" function setup_develop { + local bindep + if [[ $1 == -bindep* ]]; then + bindep="${1}" + shift + fi local project_dir=$1 local extras=$2 - _setup_package_with_constraints_edit $project_dir -e $extras + _setup_package_with_constraints_edit $bindep $project_dir -e $extras } # ``pip install -e`` the package, which processes the dependencies @@ -506,6 +524,11 @@ function setup_develop { # See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements # The command is like "pip install []" function _setup_package_with_constraints_edit { + local bindep + if [[ $1 == -bindep* ]]; then + bindep="${1}" + shift + fi local project_dir=$1 local flags=$2 local extras=$3 @@ -526,7 +549,7 @@ function _setup_package_with_constraints_edit { "$flags file://$project_dir#egg=$name" fi - setup_package $project_dir "$flags" $extras + setup_package $bindep $project_dir "$flags" $extras # If this project is in LIBS_FROM_GIT, verify it was actually installed # correctly. This helps catch errors caused by constraints mismatches. @@ -538,17 +561,30 @@ function _setup_package_with_constraints_edit { } # ``pip install -e`` the package, which processes the dependencies -# using pip before running `setup.py develop` +# using pip before running `setup.py develop`. The command is like +# "pip install []" # # Uses globals ``STACK_USER`` -# setup_package project_dir [flags] [extras] -# project_dir: directory of project repo (e.g., /opt/stack/keystone) -# flags: pip CLI options/flags -# extras: comma-separated list of optional dependencies to install -# (e.g., ldap,memcache). -# See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements -# The command is like "pip install []" +# +# Usage: +# setup_package [-bindep[=profile,profile]] [extras] +# +# -bindep : Use bindep to install dependencies; select extra profiles +# as comma separated arguments after "=" +# project_dir : directory of project repo (e.g., /opt/stack/keystone) +# flags : pip CLI options/flags +# extras : comma-separated list of optional dependencies to install +# (e.g., ldap,memcache). +# See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements function setup_package { + local bindep=0 + local bindep_flag="" + local bindep_profiles="" + if [[ $1 == -bindep* ]]; then + bindep=1 + IFS="=" read bindep_flag bindep_profiles <<< ${1} + shift + fi local project_dir=$1 local flags=$2 local extras=$3 @@ -564,6 +600,11 @@ function setup_package { extras="[$extras]" fi + # install any bindep packages + if [[ $bindep == 1 ]]; then + install_bindep $project_dir/bindep.txt $bindep_profiles + fi + pip_install $flags "$project_dir$extras" # ensure that further actions can do things like setup.py sdist if [[ "$flags" == "-e" ]]; then diff --git a/stack.sh b/stack.sh index 144c233f05..ad1359e926 100755 --- a/stack.sh +++ b/stack.sh @@ -801,6 +801,11 @@ fi # Install required infra support libraries install_infra +# Install bindep +$VIRTUALENV_CMD $DEST/bindep-venv +# TODO(ianw) : optionally install from zuul checkout? +$DEST/bindep-venv/bin/pip install bindep + # Extras Pre-install # ------------------ # Phase: pre-install diff --git a/stackrc b/stackrc index 170d3b648f..c8e6c67909 100644 --- a/stackrc +++ b/stackrc @@ -149,6 +149,13 @@ export PYTHON3_VERSION=${PYTHON3_VERSION:-${_DEFAULT_PYTHON3_VERSION:-3.5}} _DEFAULT_PYTHON2_VERSION="$(_get_python_version python2)" export PYTHON2_VERSION=${PYTHON2_VERSION:-${_DEFAULT_PYTHON2_VERSION:-2.7}} +# Create a virtualenv with this +if [[ ${USE_PYTHON3} == True ]]; then + export VIRTUALENV_CMD="python3 -m venv" +else + export VIRTUALENV_CMD="virtualenv " +fi + # allow local overrides of env variables, including repo config if [[ -f $RC_DIR/localrc ]]; then # Old-style user-supplied config From 16a10d7435be54c97982b93bbc07686ebeb536ac Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Mon, 14 Jan 2019 17:16:01 +0000 Subject: [PATCH 1009/1936] Configure nova's use of placement more correctly This change addresses a few inconsistencies in how nova processes are configured to speak to the placement service. The initial inspiration was that region_name was not being set in the [placement] section, despite $REGION_NAME being used when setting the endpoint in the catalog. That's fixed. While fixing that two other issues became clear: * Configuring nova process to use placement should happen in lib/nova not lib/placement so the function has been moved. * auth_strategy is not relevant in the [placement] section of a nova process The name of the function is maintained, in case there are plugins which call it, but a comment is added to indicate that other services besides nova compute (such as the cell conductor) may use the function. Change-Id: I4a46b6460596e9a445bd90de2d52dbb71fb963df --- lib/nova | 15 +++++++++++++++ lib/placement | 13 ------------- 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/lib/nova b/lib/nova index d1d0b3c16b..182ae1efef 100644 --- a/lib/nova +++ b/lib/nova @@ -597,6 +597,21 @@ function create_nova_conf { fi } +# Configure access to placement from a nova service, usually +# compute, but sometimes conductor. +function configure_placement_nova_compute { + # Use the provided config file path or default to $NOVA_CONF. + local conf=${1:-$NOVA_CONF} + iniset $conf placement auth_type "password" + iniset $conf placement auth_url "$KEYSTONE_SERVICE_URI" + iniset $conf placement username placement + iniset $conf placement password "$SERVICE_PASSWORD" + iniset $conf placement user_domain_name "$SERVICE_DOMAIN_NAME" + iniset $conf placement project_name "$SERVICE_TENANT_NAME" + iniset $conf placement project_domain_name "$SERVICE_DOMAIN_NAME" + iniset $conf placement region_name "$REGION_NAME" +} + function configure_console_compute { # All nova-compute workers need to know the vnc configuration options # These settings don't hurt anything if n-xvnc and n-novnc are disabled diff --git a/lib/placement b/lib/placement index da69e39264..a89cd26939 100644 --- a/lib/placement +++ b/lib/placement @@ -93,19 +93,6 @@ function _config_placement_apache_wsgi { " -i $placement_api_apache_conf } -function configure_placement_nova_compute { - # Use the provided config file path or default to $NOVA_CONF. - local conf=${1:-$NOVA_CONF} - iniset $conf placement auth_type "password" - iniset $conf placement auth_url "$KEYSTONE_SERVICE_URI" - iniset $conf placement username placement - iniset $conf placement password "$SERVICE_PASSWORD" - iniset $conf placement user_domain_name "$SERVICE_DOMAIN_NAME" - iniset $conf placement project_name "$SERVICE_TENANT_NAME" - iniset $conf placement project_domain_name "$SERVICE_DOMAIN_NAME" - iniset $conf placement auth_strategy $PLACEMENT_AUTH_STRATEGY -} - # create_placement_conf() - Write config function create_placement_conf { rm -f $PLACEMENT_CONF From fa9aadfdd8c5f67a47f5a4abafbae0671283affa Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 15 Jan 2019 18:31:05 +1100 Subject: [PATCH 1010/1936] Install from bindep.txt in plugins This allows plugins to specify their binary dependencies in bindep format. Some thinking on the implementation: this is in contrast to the files/[deb|rpm] installation, which is called from the external install_prereqs.sh script. This script being an externally callable entry-point is really an artifact of the days when we would build snapshot images for CI and wanted to pre-cache downloads. These days we use the mirror system to keep packages close to CI nodes. Thus rather than expand install_prereqs.sh to also be installing virtualenvs and python dependencies, this seems to fit better as a separate internal phase of stack.sh. Documentation is updated Change-Id: Icbdfbf97c17c906a7ae86f43e80eb2c445816228 --- doc/source/plugins.rst | 56 ++++++++++++++++++++++++++++++++++++------ functions-common | 24 ++++++++++++++++++ stack.sh | 7 ++++++ 3 files changed, 80 insertions(+), 7 deletions(-) diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst index 89b9381813..de7bdf22b0 100644 --- a/doc/source/plugins.rst +++ b/doc/source/plugins.rst @@ -222,14 +222,20 @@ dependency mechanism is beyond the scope of the current work. System Packages =============== -Devstack provides a framework for getting packages installed at an early -phase of its execution. These packages may be defined in a plugin as files -that contain new-line separated lists of packages required by the plugin -Supported packaging systems include apt and yum across multiple distributions. -To enable a plugin to hook into this and install package dependencies, packages -may be listed at the following locations in the top-level of the plugin -repository: + +Devstack based +-------------- + +Devstack provides a custom framework for getting packages installed at +an early phase of its execution. These packages may be defined in a +plugin as files that contain new-line separated lists of packages +required by the plugin + +Supported packaging systems include apt and yum across multiple +distributions. To enable a plugin to hook into this and install +package dependencies, packages may be listed at the following +locations in the top-level of the plugin repository: - ``./devstack/files/debs/$plugin_name`` - Packages to install when running on Ubuntu, Debian or Linux Mint. @@ -240,6 +246,42 @@ repository: - ``./devstack/files/rpms-suse/$plugin_name`` - Packages to install when running on SUSE Linux or openSUSE. +Although there a no plans to remove this method of installing +packages, plugins should consider it deprecated for ``bindep`` support +described below. + +bindep +------ + +The `bindep `__ project has +become the defacto standard for OpenStack projects to specify binary +dependencies. + +A plugin may provide a ``./devstack/files/bindep.txt`` file, which +will be called with the *default* profile to install packages. For +details on the syntax, etc. see the bindep documentation. + +It is also possible to use the ``bindep.txt`` of projects that are +being installed from source with the ``-bindep`` flag available in +install functions. For example + +.. code-block:: bash + + if use_library_from_git "diskimage-builder"; then + GITREPO["diskimage-builder"]=$DISKIMAGE_BUILDER_REPO_URL + GITDIR["diskimage-builder"]=$DEST/diskimage-builder + GITBRANCH["diskimage-builder"]=$DISKIMAGE_BUILDER_REPO_REF + git_clone_by_name "diskimage-builder" + setup_dev_lib -bindep "diskimage-builder" + fi + +will result in any packages required by the ``bindep.txt`` of the +``diskimage-builder`` project being installed. Note however that jobs +that switch projects between source and released/pypi installs +(e.g. with a ``foo-dsvm`` and a ``foo-dsvm-src`` test to cover both +released dependencies and master versions) will have to deal with +``bindep.txt`` being unavailable without the source directory. + Using Plugins in the OpenStack Gate =================================== diff --git a/functions-common b/functions-common index 6651cc2661..2a5d13998a 100644 --- a/functions-common +++ b/functions-common @@ -1248,6 +1248,30 @@ function get_plugin_packages { $xtrace } +# Search plugins for a bindep.txt file +# +# Uses globals ``BINDEP_CMD``, ``GITDIR``, ``DEVSTACK_PLUGINS`` +# +# Note this is only valid after BINDEP_CMD is setup in stack.sh, and +# is thus not really intended to be called externally. +function _get_plugin_bindep_packages { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + local bindep_file + local packages + + for plugin in ${DEVSTACK_PLUGINS//,/ }; do + bindep_file=${GITDIR[$plugin]}/devstack/files/bindep.txt + if [[ -f ${bindep_file} ]]; then + packages+=$($BINDEP_CMD -b --file ${bindep_file} || true) + fi + done + echo "${packages}" + $xtrace +} + # Distro-agnostic package installer # Uses globals ``NO_UPDATE_REPOS``, ``REPOS_UPDATED``, ``RETRY_UPDATE`` # install_package package [package ...] diff --git a/stack.sh b/stack.sh index ad1359e926..95e4df4277 100755 --- a/stack.sh +++ b/stack.sh @@ -805,6 +805,13 @@ install_infra $VIRTUALENV_CMD $DEST/bindep-venv # TODO(ianw) : optionally install from zuul checkout? $DEST/bindep-venv/bin/pip install bindep +export BINDEP_CMD=${DEST}/bindep-venv/bin/bindep + +# Install packages as defined in plugin bindep.txt files +pkgs="$( _get_plugin_bindep_packages )" +if [[ -n "${pkgs}" ]]; then + install_package ${pkgs} +fi # Extras Pre-install # ------------------ From 82537871376afe98a286e1ba424cf192ae60869a Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Fri, 18 Jan 2019 10:42:13 -0500 Subject: [PATCH 1011/1936] Restrict database access to nova-compute in singleconductor mode Change I4820abe57a023050dd8d067c77e26028801ff288 removed access to the database for the nova-compute process but only in superconductor mode. Grenade runs in singleconductor mode though so we are getting tracebacks in nova-compute logs during grenade runs because nova-compute is running with nova.conf which is configured with access to the nova API database. This change handles removing database access for nova-compute generically to cover both the singleconductor and superconductor cases. Change-Id: I81301eeecc7669a169deeb1e2c5d298a595aab94 Closes-Bug: #1812398 --- lib/nova | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/lib/nova b/lib/nova index d1d0b3c16b..199550bf03 100644 --- a/lib/nova +++ b/lib/nova @@ -897,25 +897,26 @@ function start_nova_compute { local compute_cell_conf=$NOVA_CONF fi + cp $compute_cell_conf $NOVA_CPU_CONF + if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then # NOTE(danms): Grenade doesn't setup multi-cell rabbit, so # skip these bits and use the normal config. - NOVA_CPU_CONF=$compute_cell_conf echo "Skipping multi-cell conductor fleet setup" else # "${CELLSV2_SETUP}" is "superconductor" - cp $compute_cell_conf $NOVA_CPU_CONF # FIXME(danms): Should this be configurable? iniset $NOVA_CPU_CONF workarounds disable_group_policy_check_upcall True # Since the nova-compute service cannot reach nova-scheduler over # RPC, we also disable track_instance_changes. iniset $NOVA_CPU_CONF filter_scheduler track_instance_changes False iniset_rpc_backend nova $NOVA_CPU_CONF DEFAULT "nova_cell${NOVA_CPU_CELL}" - # Make sure we nuke any database config - inidelete $NOVA_CPU_CONF database connection - inidelete $NOVA_CPU_CONF api_database connection fi + # Make sure we nuke any database config + inidelete $NOVA_CPU_CONF database connection + inidelete $NOVA_CPU_CONF api_database connection + # Console proxies were configured earlier in create_nova_conf. Now that the # nova-cpu.conf has been created, configure the console settings required # by the compute process. From 29ab9b8e3065488842a2477a8a8c48020d955094 Mon Sep 17 00:00:00 2001 From: Luigi Toscano Date: Tue, 22 Jan 2019 16:23:42 +0100 Subject: [PATCH 1012/1936] Always start iscsid for nova-compute Recently iscsid was disabled by default on Ubuntu 18.04 (bionic), and it may be on Xenial too, see: https://bugs.launchpad.net/ubuntu/+source/open-iscsi/+bug/1755858 On a local Bionic deployment with Python 3, the lack of iscsid makes nova-compute fail with an exception when trying to attach a volume: Invalid input received: Connector doesn't have required information: initiator Asking for the service to be started even if it is already running should not hurt, so remove the check for the distribution. This does not seem to be an issue on CentOS 7 (but Python 2) where the socket activation of iscsid seems to work, so maybe there is another way to make this working. Also, the service could be enabled, not just started. Change-Id: Ifa995dcf8eb930e959f54e96af6f5fce3eac28ae --- lib/nova | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/lib/nova b/lib/nova index 199550bf03..0b08c0ac6c 100644 --- a/lib/nova +++ b/lib/nova @@ -327,10 +327,8 @@ function configure_nova { sudo chown -R $STACK_USER $NOVA_INSTANCES_PATH fi fi - if is_suse; then - # iscsid is not started by default - start_service iscsid - fi + # ensure that iscsid is started, even when disabled by default + start_service iscsid fi # Rebuild the config file from scratch From 6757a9c5dec32f39b984ceaaa0eb9e903602769a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Dulko?= Date: Fri, 21 Dec 2018 13:17:05 +0100 Subject: [PATCH 1013/1936] Add option to place etcd data dir on RAM disk Seems like for etcd-heavy services like Kubernetes, the fsync performance of gate VM's are too low [1]. This commit implements an option to put etcd data directory on RAM disk (tmpfs) to work this around. [1] http://lists.openstack.org/pipermail/openstack-discuss/2019-January/001849.html Change-Id: I5a17099cb9d6941b1a009dc82daefd2c7946d892 --- lib/etcd3 | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/lib/etcd3 b/lib/etcd3 index c65a522267..0748ea01ee 100644 --- a/lib/etcd3 +++ b/lib/etcd3 @@ -27,6 +27,10 @@ set +o xtrace ETCD_DATA_DIR="$DATA_DIR/etcd" ETCD_SYSTEMD_SERVICE="devstack@etcd.service" ETCD_BIN_DIR="$DEST/bin" +# Option below will mount ETCD_DATA_DIR as ramdisk, which is useful to run +# etcd-heavy services in the gate VM's, e.g. Kubernetes. +ETCD_USE_RAMDISK=$(trueorfalse False ETCD_USE_RAMDISK) +ETCD_RAMDISK_MB=${ETCD_RAMDISK_MB:-512} if is_ubuntu ; then UBUNTU_RELEASE_BASE_NUM=`lsb_release -r | awk '{print $2}' | cut -d '.' -f 1` @@ -89,6 +93,9 @@ function cleanup_etcd3 { $SYSTEMCTL daemon-reload + if [[ "$ETCD_USE_RAMDISK" == "True" ]]; then + sudo umount $ETCD_DATA_DIR + fi sudo rm -rf $ETCD_DATA_DIR } @@ -98,6 +105,9 @@ function install_etcd3 { # Create the necessary directories sudo mkdir -p $ETCD_BIN_DIR sudo mkdir -p $ETCD_DATA_DIR + if [[ "$ETCD_USE_RAMDISK" == "True" ]]; then + sudo mount -t tmpfs -o nodev,nosuid,size=${ETCD_RAMDISK_MB}M tmpfs $ETCD_DATA_DIR + fi # Download and cache the etcd tgz for subsequent use local etcd_file From 8b9864d7bf2eaad3c42030cf8e7a2a0b764dc552 Mon Sep 17 00:00:00 2001 From: Michael Johnson Date: Thu, 24 Jan 2019 10:49:35 -0800 Subject: [PATCH 1014/1936] Update the devstack LBaaS guide for Octavia The existing devstack guide for load balancing is out of date. This patch updates the guide to reflect the current way to install devstack with the Octavia plugin(s). Change-Id: Id48b70b50e44ec7b965d969b2d93f77543d7364c --- doc/source/guides/devstack-with-lbaas-v2.rst | 113 +++++++++++++------ 1 file changed, 76 insertions(+), 37 deletions(-) diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst index df3c7ce2ac..b1d88cb35c 100644 --- a/doc/source/guides/devstack-with-lbaas-v2.rst +++ b/doc/source/guides/devstack-with-lbaas-v2.rst @@ -1,39 +1,54 @@ -Configure Load-Balancer Version 2 -================================= +Devstack with Octavia Load Balancing +==================================== -Starting in the OpenStack Liberty release, the -`neutron LBaaS v2 API `_ -is now stable while the LBaaS v1 API has been deprecated. The LBaaS v2 reference -driver is based on Octavia. +Starting with the OpenStack Pike release, Octavia is now a standalone service +providing load balancing services for OpenStack. +This guide will show you how to create a devstack with `Octavia API`_ enabled. + +.. _Octavia API: https://developer.openstack.org/api-ref/load-balancer/v2/index.html Phase 1: Create DevStack + 2 nova instances -------------------------------------------- First, set up a vm of your choice with at least 8 GB RAM and 16 GB disk space, -make sure it is updated. Install git and any other developer tools you find useful. +make sure it is updated. Install git and any other developer tools you find +useful. Install devstack :: git clone https://git.openstack.org/openstack-dev/devstack - cd devstack + cd devstack/tools + sudo ./create-stack-user.sh + cd ../.. + sudo mv devstack /opt/stack + sudo chown -R stack.stack /opt/stack/devstack +This will clone the current devstack code locally, then setup the "stack" +account that devstack services will run under. Finally, it will move devstack +into its default location in /opt/stack/devstack. -Edit your ``local.conf`` to look like +Edit your ``/opt/stack/devstack/local.conf`` to look like :: [[local|localrc]] - # Load the external LBaaS plugin. - enable_plugin neutron-lbaas https://git.openstack.org/openstack/neutron-lbaas enable_plugin octavia https://git.openstack.org/openstack/octavia + # If you are enabling horizon, include the octavia dashboard + # enable_plugin octavia-dashboard https://git.openstack.org/openstack/octavia-dashboard.git + # If you are enabling barbican for TLS offload in Octavia, include it here. + # enable_plugin barbican https://github.com/openstack/barbican.git + + # If you have python3 available: + # USE_PYTHON3=True # ===== BEGIN localrc ===== DATABASE_PASSWORD=password ADMIN_PASSWORD=password SERVICE_PASSWORD=password + SERVICE_TOKEN=password RABBIT_PASSWORD=password # Enable Logging LOGFILE=$DEST/logs/stack.sh.log @@ -41,27 +56,30 @@ Edit your ``local.conf`` to look like LOG_COLOR=True # Pre-requisite ENABLED_SERVICES=rabbit,mysql,key - # Horizon - ENABLED_SERVICES+=,horizon + # Horizon - enable for the OpenStack web GUI + # ENABLED_SERVICES+=,horizon # Nova - ENABLED_SERVICES+=,n-api,n-cpu,n-cond,n-sch + ENABLED_SERVICES+=,n-api,n-crt,n-obj,n-cpu,n-cond,n-sch,n-api-meta,n-sproxy + ENABLED_SERVICES+=,placement-api,placement-client # Glance ENABLED_SERVICES+=,g-api,g-reg # Neutron - ENABLED_SERVICES+=,q-svc,q-agt,q-dhcp,q-l3,q-meta - # Enable LBaaS v2 - ENABLED_SERVICES+=,q-lbaasv2 + ENABLED_SERVICES+=,q-svc,q-agt,q-dhcp,q-l3,q-meta,neutron ENABLED_SERVICES+=,octavia,o-cw,o-hk,o-hm,o-api # Cinder ENABLED_SERVICES+=,c-api,c-vol,c-sch # Tempest ENABLED_SERVICES+=,tempest + # Barbican - Optionally used for TLS offload in Octavia + # ENABLED_SERVICES+=,barbican # ===== END localrc ===== Run stack.sh and do some sanity checks :: + sudo su - stack + cd /opt/stack/devstack ./stack.sh . ./openrc @@ -72,38 +90,59 @@ Create two nova instances that we can use as test http servers: :: #create nova instances on private network - nova boot --image $(nova image-list | awk '/ cirros-.*-x86_64-uec / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node1 - nova boot --image $(nova image-list | awk '/ cirros-.*-x86_64-uec / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node2 - nova list # should show the nova instances just created + openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node1 + openstack server creeate --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node2 + openstack server list # should show the nova instances just created #add secgroup rules to allow ssh etc.. openstack security group rule create default --protocol icmp openstack security group rule create default --protocol tcp --dst-port 22:22 openstack security group rule create default --protocol tcp --dst-port 80:80 -Set up a simple web server on each of these instances. ssh into each instance (username 'cirros', password 'cubswin:)') and run +Set up a simple web server on each of these instances. ssh into each instance (username 'cirros', password 'cubswin:)' or 'gocubsgo') and run :: MYIP=$(ifconfig eth0|grep 'inet addr'|awk -F: '{print $2}'| awk '{print $1}') while true; do echo -e "HTTP/1.0 200 OK\r\n\r\nWelcome to $MYIP" | sudo nc -l -p 80 ; done& -Phase 2: Create your load balancers ------------------------------------- +Phase 2: Create your load balancer +---------------------------------- + +Make sure you have the 'openstack loadbalancer' commands: :: - neutron lbaas-loadbalancer-create --name lb1 private-subnet - neutron lbaas-loadbalancer-show lb1 # Wait for the provisioning_status to be ACTIVE. - neutron lbaas-listener-create --loadbalancer lb1 --protocol HTTP --protocol-port 80 --name listener1 - sleep 10 # Sleep since LBaaS actions can take a few seconds depending on the environment. - neutron lbaas-pool-create --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1 - sleep 10 - neutron lbaas-member-create --subnet private-subnet --address 10.0.0.3 --protocol-port 80 pool1 - sleep 10 - neutron lbaas-member-create --subnet private-subnet --address 10.0.0.5 --protocol-port 80 pool1 - -Please note here that the "10.0.0.3" and "10.0.0.5" in the above commands are the IPs of the nodes -(in my test run-thru, they were actually 10.2 and 10.4), and the address of the created LB will be -reported as "vip_address" from the lbaas-loadbalancer-create, and a quick test of that LB is -"curl that-lb-ip", which should alternate between showing the IPs of the two nodes. + pip install python-octaviaclient + +Create your load balancer: + +:: + + openstack loadbalancer create --name lb1 --vip-subnet-id private-subnet + openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE. + openstack loadbalancer listener create --protocol HTTP --protocol-port 80 --name listener1 lb1 + openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE. + openstack loadbalancer pool create --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1 + openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE. + openstack loadbalancer healthmonitor create --delay 5 --timeout 2 --max-retries 1 --type HTTP pool1 + openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE. + openstack loadbalancer member create --subnet-id private-subnet --address --protocol-port 80 pool1 + openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE. + openstack loadbalancer member create --subnet-id private-subnet --address --protocol-port 80 pool1 + +Please note: The fields are the IP addresses of the nova +servers created in Phase 1. +Also note, using the API directly you can do all of the above commands in one +API call. + +Phase 3: Test your load balancer +-------------------------------- + +:: + + openstack loadbalancer show lb1 # Note the vip_address + curl http:// + curl http:// + +This should show the "Welcome to " message from each member server. From d6bbeabb5c5c35eb9ada92656c9b65fbef8aff07 Mon Sep 17 00:00:00 2001 From: Adam Spiers Date: Wed, 23 Jan 2019 19:31:53 +0000 Subject: [PATCH 1015/1936] Fix installation of mkisofs on SUSE Linux Enterprise 12 It's provided by the cdrkit-cdrtools-compat package. Change-Id: I4b57e03391d47bed777016ab1d735b8ba4aa5ceb --- files/rpms-suse/n-cpu | 3 ++- files/rpms-suse/nova | 4 +++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/files/rpms-suse/n-cpu b/files/rpms-suse/n-cpu index c11e9f0763..9c724cb9d8 100644 --- a/files/rpms-suse/n-cpu +++ b/files/rpms-suse/n-cpu @@ -1,8 +1,9 @@ +cdrkit-cdrtools-compat # dist:sle12 cryptsetup dosfstools libosinfo lvm2 -mkisofs +mkisofs # not:sle12 open-iscsi sg3_utils # Stuff for diablo volumes diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova index 4103a407d2..1d5812146b 100644 --- a/files/rpms-suse/nova +++ b/files/rpms-suse/nova @@ -1,3 +1,4 @@ +cdrkit-cdrtools-compat # dist:sle12 conntrack-tools curl dnsmasq @@ -11,7 +12,8 @@ kvm # NOPRIME libvirt # NOPRIME libvirt-python # NOPRIME mariadb # NOPRIME -mkisofs # required for config_drive +# mkisofs is required for config_drive +mkisofs # not:sle12 parted polkit # qemu as fallback if kvm cannot be used From bc2a88d1f493f4f0784650e5ac959e8677495669 Mon Sep 17 00:00:00 2001 From: Adam Spiers Date: Thu, 24 Jan 2019 18:57:33 +0000 Subject: [PATCH 1016/1936] On SUSE-based systems, check whether we have mariadb or mysql service Older mariadb packages on SLES 12 provided mysql.service. The newer ones on SLES 12 and 15 use mariadb.service; they also provide a mysql.service symlink for backwards-compatibility, but let's not rely on that. Change-Id: Ife6bd007ba30af0b77d44832b19d518034bdb12b --- lib/databases/mysql | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index ac0c083c91..4d0f5f3e45 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -16,7 +16,13 @@ MYSQL_DRIVER=${MYSQL_DRIVER:-PyMySQL} register_database mysql MYSQL_SERVICE_NAME=mysql -if is_suse || is_fedora && ! is_oraclelinux; then +if is_fedora && ! is_oraclelinux; then + MYSQL_SERVICE_NAME=mariadb +elif is_suse && systemctl list-unit-files | grep -q 'mariadb\.service'; then + # Older mariadb packages on SLES 12 provided mysql.service. The + # newer ones on SLES 12 and 15 use mariadb.service; they also + # provide a mysql.service symlink for backwards-compatibility, but + # let's not rely on that. MYSQL_SERVICE_NAME=mariadb fi From bbb6b0c2409278a4c9266c39bd1f5d91f7066bb8 Mon Sep 17 00:00:00 2001 From: Adam Spiers Date: Fri, 25 Jan 2019 00:29:42 +0000 Subject: [PATCH 1017/1936] Fix version comparison for SUSE Linux Enterprise 12 The version comparison introduced in I5152f2585c3d4d18853988d6290039d6b1713b99 was broken, because it tried to use bash's -lt operator for floating point comparison, but bash only supports integer arithmetic. So instead use devstack's vercmp() function. Change-Id: I8aac71c5bb6c2e82479d62831ea0672ba6a9a534 --- lib/neutron_plugins/ovs_base | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index 523024e2fe..fbe4c92139 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -69,7 +69,7 @@ function _neutron_ovs_base_install_agent_packages { restart_service openvswitch sudo systemctl enable openvswitch elif is_suse; then - if [[ $DISTRO == "sle12" ]] && [[ $os_RELEASE -lt 12.2 ]]; then + if [[ $DISTRO == "sle12" ]] && vercmp "$os_RELEASE" "<" "12.2" ; then restart_service openvswitch-switch else # workaround for https://bugzilla.suse.com/show_bug.cgi?id=1085971 From 96269d8e758e168fb857fca6e96cbd9a50628ac0 Mon Sep 17 00:00:00 2001 From: Brian Rosmaita Date: Mon, 17 Dec 2018 10:38:42 -0500 Subject: [PATCH 1018/1936] Change config-file to config-dir for g-api start The glance-api service may use multiple config files, so tell oslo.config about the config dir instead of a specific config file when the service is started. Change-Id: Iad3602d209cbb31e10683c67e1fd6b465d19f560 Partial-bug: #1805765 --- lib/glance | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/glance b/lib/glance index 94f6a22931..b1c8c4dacc 100644 --- a/lib/glance +++ b/lib/glance @@ -345,7 +345,7 @@ function start_glance { if [[ "$WSGI_MODE" == "uwsgi" ]]; then run_process g-api "$GLANCE_BIN_DIR/uwsgi --procname-prefix glance-api --ini $GLANCE_UWSGI_CONF" else - run_process g-api "$GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf" + run_process g-api "$GLANCE_BIN_DIR/glance-api --config-dir=$GLANCE_CONF_DIR" fi echo "Waiting for g-api ($GLANCE_SERVICE_HOST) to start..." From ee4b6a0128bad7ae64e9ae7a9c79470585e93dc6 Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Tue, 29 Jan 2019 18:17:30 +0000 Subject: [PATCH 1019/1936] set owner and group to no for fetch-devstack-log-dir As the user on the node under test may not exist on the zuul executor node we do not copy the log owner or group to avoid the rsync task failing when it tries to chown the files. Change-Id: I500cf3692a4d27b0c2a0a4f5586580d180a8778e --- roles/fetch-devstack-log-dir/tasks/main.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/roles/fetch-devstack-log-dir/tasks/main.yaml b/roles/fetch-devstack-log-dir/tasks/main.yaml index 5a198b21b4..276c4e0eb5 100644 --- a/roles/fetch-devstack-log-dir/tasks/main.yaml +++ b/roles/fetch-devstack-log-dir/tasks/main.yaml @@ -1,5 +1,10 @@ +# as the user in the guest may not exist on the executor +# we do not preserve the group or owner of the copied logs. + - name: Collect devstack logs synchronize: dest: "{{ zuul.executor.log_root }}/{{ inventory_hostname }}" mode: pull src: "{{ devstack_base_dir }}/logs" + group: no + owner: no From b6f04ca5c9c09db2d567ecbf7fad757158fd0aba Mon Sep 17 00:00:00 2001 From: Adam Spiers Date: Wed, 23 Jan 2019 18:55:16 +0000 Subject: [PATCH 1020/1936] Fix distro detection for SUSE Linux Enterprise On SUSE Linux Enterprise distributions, lsb_release -i typically returns "SUSE" not "SUSE LINUX" as the vendor string. To avoid duplication of the same regular expressions in multiple places, add is_opensuse() and is_sle() helper functions, and modify is_suse to invoke those. This may also be helpful in the future for distinguishing some corner cases where things are handled differently between openSUSE and SLE. Change-Id: I43bf163bc963758ddbb6289928837f5f6512f265 --- functions-common | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/functions-common b/functions-common index e5962db42b..28b12b270b 100644 --- a/functions-common +++ b/functions-common @@ -379,14 +379,14 @@ function GetDistro { elif [[ "$os_VENDOR" =~ (Fedora) ]]; then # For Fedora, just use 'f' and the release DISTRO="f$os_RELEASE" - elif [[ "$os_VENDOR" =~ (openSUSE) ]]; then + elif is_opensuse; then DISTRO="opensuse-$os_RELEASE" # Tumbleweed uses "n/a" as a codename, and the release is a datestring # like 20180218, so not very useful. Leap however uses a release # with a "dot", so for example 15.0 [ "$os_CODENAME" = "n/a" -a "$os_RELEASE" = "${os_RELEASE/\./}" ] && \ DISTRO="opensuse-tumbleweed" - elif [[ "$os_VENDOR" =~ (SUSE LINUX) ]]; then + elif is_suse_linux_enterprise; then # just use major release DISTRO="sle${os_RELEASE%.*}" elif [[ "$os_VENDOR" =~ (Red.*Hat) || \ @@ -460,11 +460,30 @@ function is_fedora { # (openSUSE, SLE). # is_suse function is_suse { + is_opensuse || is_suse_linux_enterprise +} + + +# Determine if current distribution is an openSUSE distribution +# is_opensuse +function is_opensuse { + if [[ -z "$os_VENDOR" ]]; then + GetOSVersion + fi + + [[ "$os_VENDOR" =~ (openSUSE) ]] +} + + +# Determine if current distribution is a SUSE Linux Enterprise (SLE) +# distribution +# is_suse_linux_enterprise +function is_suse_linux_enterprise { if [[ -z "$os_VENDOR" ]]; then GetOSVersion fi - [[ "$os_VENDOR" =~ (openSUSE) || "$os_VENDOR" == "SUSE LINUX" ]] + [[ "$os_VENDOR" =~ (^SUSE) ]] } From ec8285271e0c0b7c97fb6cd8816eca8ad844c5bd Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Wed, 30 Jan 2019 20:48:18 -0500 Subject: [PATCH 1021/1936] Change "Options pimp your stack" heading in multinode docs Uses a less offensive and arguably better understood section header in the multinode docs. Change-Id: Ie6fd58e9abd5c1ce88d88ac55419807790f61851 Closes-Bug: #1810317 --- doc/source/guides/multinode-lab.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index b4e2891c10..33820daec0 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -240,8 +240,8 @@ this when it runs but there are times it needs to still be done by hand: sudo rm -rf /etc/libvirt/qemu/inst* sudo virsh list | grep inst | awk '{print $1}' | xargs -n1 virsh destroy -Options pimp your stack -======================= +Going further +============= Additional Users ---------------- From 1516e720ce1d6eb6eca38feb1ca18dc7fc8c105c Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Tue, 5 Feb 2019 11:21:39 -0500 Subject: [PATCH 1022/1936] Set ETCD_USE_RAMDISK=True by default Cinder and etcd are enabled by default and by default cinder uses etcd as a distributed lock manager with tooz as an intermediary. We see a lot of ToozConnectionErrors [1] in the cinder logs when etcd is backed up [2] which results in cinder operations timing out causing test failures, like when a volume is not deleted within a given time. This changes ETCD_USE_RAMDISK=True by default to try and alleviate some of the pressure. An alternative is if we know we're in a single-node job we could just not use a DLM for Cinder. [1] http://status.openstack.org/elastic-recheck/#1810526 [2] etcd[26824]: sync duration of 12.076762123s, expected less than 1s Change-Id: I5f82aa40e9d84114e7b7b5cf19ec4942d6552490 Partial-Bug: #1810526 --- lib/etcd3 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/etcd3 b/lib/etcd3 index 0748ea01ee..4f3a7a4349 100644 --- a/lib/etcd3 +++ b/lib/etcd3 @@ -29,7 +29,7 @@ ETCD_SYSTEMD_SERVICE="devstack@etcd.service" ETCD_BIN_DIR="$DEST/bin" # Option below will mount ETCD_DATA_DIR as ramdisk, which is useful to run # etcd-heavy services in the gate VM's, e.g. Kubernetes. -ETCD_USE_RAMDISK=$(trueorfalse False ETCD_USE_RAMDISK) +ETCD_USE_RAMDISK=$(trueorfalse True ETCD_USE_RAMDISK) ETCD_RAMDISK_MB=${ETCD_RAMDISK_MB:-512} if is_ubuntu ; then From 4a3cc1ce341eca821ddf004ca484bd827d060507 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Wed, 30 Jan 2019 20:50:47 -0500 Subject: [PATCH 1023/1936] Remove hard-coded VOLUME_BACKING_FILE_SIZE from multinode docs The default for VOLUME_BACKING_FILE_SIZE changes over time and the docs referencing it are clearly not keeping pace so rather than hard-code a default in the docs just remove it since the doc already mentions the variable used to set that size. Change-Id: I4242584d13250872250689863d1b70c68594eefe --- doc/source/guides/multinode-lab.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index 33820daec0..7978cd86ea 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -302,10 +302,10 @@ Volumes DevStack will automatically use an existing LVM volume group named ``stack-volumes`` to store cloud-created volumes. If ``stack-volumes`` -doesn't exist, DevStack will set up a 10Gb loop-mounted file to contain -it. This obviously limits the number and size of volumes that can be -created inside OpenStack. The size can be overridden by setting -``VOLUME_BACKING_FILE_SIZE`` in ``local.conf``. +doesn't exist, DevStack will set up a loop-mounted file to contain +it. If the default size is insufficient for the number and size of volumes +required, it can be overridden by setting ``VOLUME_BACKING_FILE_SIZE`` in +``local.conf`` (sizes given in ``truncate`` compatible format, e.g. ``24G``). ``stack-volumes`` can be pre-created on any physical volume supported by Linux's LVM. The name of the volume group can be changed by setting From b43810a36635f1d0fe9291b157506fcf20272adf Mon Sep 17 00:00:00 2001 From: Brian Rosmaita Date: Thu, 7 Feb 2019 16:46:49 -0500 Subject: [PATCH 1024/1936] Glance should not reference Cinder v1 Cinder v1 was removed over a year ago. Change the cinder template URLs devstack defines in the glance-api.conf to use cinder v3 instead. Change-Id: I4a68dc0b53631be0708e7411c37619dd6dfd4fa6 --- lib/glance | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/glance b/lib/glance index 94f6a22931..8001157e58 100644 --- a/lib/glance +++ b/lib/glance @@ -236,8 +236,8 @@ function configure_glance { CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST} CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776} - iniset $GLANCE_API_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/%(project_id)s" - iniset $GLANCE_CACHE_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/%(project_id)s" + iniset $GLANCE_API_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/%(project_id)s" + iniset $GLANCE_CACHE_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/%(project_id)s" fi if [[ "$WSGI_MODE" == "uwsgi" ]]; then From bfd2a54c13b8f3235991e534d5aa822be2aeaf19 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Fri, 8 Feb 2019 14:49:16 -0500 Subject: [PATCH 1025/1936] Stop creating the cinderv1 endpoint The cinder v1 api is disabled by default, the catalog entry is confusing. Change-Id: Ifea283d8aff9f7a70b68d601c5225c3d4fe250e6 --- lib/cinder | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/lib/cinder b/lib/cinder index 76bf928413..d69b21efd0 100644 --- a/lib/cinder +++ b/lib/cinder @@ -349,18 +349,12 @@ function create_cinder_accounts { # block-storage is the official service type get_or_create_service "cinder" "block-storage" "Cinder Volume Service" - get_or_create_service "cinder" "volume" "Cinder Volume Service" if [ "$CINDER_USE_MOD_WSGI" == "False" ]; then get_or_create_endpoint \ "block-storage" \ "$REGION_NAME" \ "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s" - get_or_create_endpoint \ - "volume" \ - "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(project_id)s" - get_or_create_service "cinderv2" "volumev2" "Cinder Volume Service V2" get_or_create_endpoint \ "volumev2" \ @@ -378,11 +372,6 @@ function create_cinder_accounts { "$REGION_NAME" \ "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v3/\$(project_id)s" - get_or_create_endpoint \ - "volume" \ - "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v1/\$(project_id)s" - get_or_create_service "cinderv2" "volumev2" "Cinder Volume Service V2" get_or_create_endpoint \ "volumev2" \ From e729976c82791c513feedd51c9c2c45d79b09f61 Mon Sep 17 00:00:00 2001 From: Nguyen Hai Date: Wed, 13 Feb 2019 15:04:02 +0900 Subject: [PATCH 1026/1936] Add placement as default project in index webpage Change-Id: I101ade5368fbdf108906e89e1c3ba03a46cd6f35 --- doc/source/index.rst | 4 ++-- doc/source/overview.rst | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/doc/source/index.rst b/doc/source/index.rst index 6c42a5b4e9..9186f6dba7 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -114,8 +114,8 @@ Profit! You now have a working DevStack! Congrats! Your devstack will have installed ``keystone``, ``glance``, ``nova``, -``cinder``, ``neutron``, and ``horizon``. Floating IPs will be -available, guests have access to the external world. +``placement``, ``cinder``, ``neutron``, and ``horizon``. Floating IPs +will be available, guests have access to the external world. You can access horizon to experience the web interface to OpenStack, and manage vms, networks, volumes, and images from diff --git a/doc/source/overview.rst b/doc/source/overview.rst index 2479cd0bc8..a609333289 100644 --- a/doc/source/overview.rst +++ b/doc/source/overview.rst @@ -64,7 +64,8 @@ Services The default services configured by DevStack are Identity (keystone), Object Storage (swift), Image Service (glance), Block Storage -(cinder), Compute (nova), Networking (neutron), Dashboard (horizon) +(cinder), Compute (nova), Placement (placement), +Networking (neutron), Dashboard (horizon). Additional services not included directly in DevStack can be tied in to ``stack.sh`` using the :doc:`plugin mechanism ` to call From 72f632222f6d90d3545b5d7ca48297da4218e2ea Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 19 Feb 2019 14:06:18 +1100 Subject: [PATCH 1027/1936] Update cirros256 flavor to have some disk It seems nova has changed defaults on who can create zero-sized disk instances [1] and now some devstack jobs, like nodepool's, can't create cirros images using this flavor. It seems the easiest thing to do is just to bump it up. [1] https://review.openstack.org/#/c/603910/ Change-Id: I1172d4775d608568ccbeb27e2975d83add892ea9 --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 6c9b944ba0..033ebf3697 100644 --- a/lib/nova +++ b/lib/nova @@ -1167,7 +1167,7 @@ function create_flavors { if is_service_enabled n-api; then if ! openstack --os-region-name="$REGION_NAME" flavor list | grep -q ds512M; then # Note that danms hates these flavors and apologizes for sdague - openstack --os-region-name="$REGION_NAME" flavor create --id c1 --ram 256 --disk 0 --vcpus 1 cirros256 + openstack --os-region-name="$REGION_NAME" flavor create --id c1 --ram 256 --disk 1 --vcpus 1 cirros256 openstack --os-region-name="$REGION_NAME" flavor create --id d1 --ram 512 --disk 5 --vcpus 1 ds512M openstack --os-region-name="$REGION_NAME" flavor create --id d2 --ram 1024 --disk 10 --vcpus 1 ds1G openstack --os-region-name="$REGION_NAME" flavor create --id d3 --ram 2048 --disk 10 --vcpus 2 ds2G From f0dc93dcb4bc69442733056e88de9aff0f890664 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 20 Apr 2018 10:42:07 +1000 Subject: [PATCH 1028/1936] Workaround pip10 uninstall of packages As noted in comments, this is a hack to get pip10 to overwrite some package installed libraries. Change-Id: Iea24a3ea915e13f7e0882144028ec5ff5bfdfae1 --- tools/fixup_stuff.sh | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index a939e30b02..1ff7bfa516 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -202,7 +202,19 @@ function fixup_fedora { # install requests with the bundled urllib3 to avoid conflicts pip_install --upgrade --force-reinstall requests fi + fi + + # Since pip10, pip will refuse to uninstall files from packages + # that were created with distutils (rather than more modern + # setuptools). This is because it technically doesn't have a + # manifest of what to remove. However, in most cases, simply + # overwriting works. So this hacks around those packages that + # have been dragged in by some other system dependency + sudo rm -rf /usr/lib/python2.7/site-packages/enum34*.egg-info + sudo rm -rf /usr/lib/python2.7/site-packages/ipaddress*.egg-info + sudo rm -rf /usr/lib/python2.7/site-packages/ply-*.egg-info + sudo rm -rf /usr/lib/python2.7/site-packages/typing-*.egg-info } function fixup_suse { From 543aed422fd89875113861e6a554da7980f8dd4f Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 20 Apr 2018 15:22:47 +1000 Subject: [PATCH 1029/1936] Don't install numpy packages numpy is a python requirement of the websockify package (it appears there was some disucssion over *removing* this in [1], but did not happen). Possibly these packages were installed a long time ago before wheel support as it was taking a long time to build. But we have wheels today, and later versions than the distro provides are being dragged in anyway. Remove all distro installs. [1] https://github.com/novnc/websockify/pull/163 Change-Id: I322dd9e1a07d8ce03c26cf3fcccebd6e21282fe4 --- files/rpms-suse/n-novnc | 1 - files/rpms-suse/n-spice | 1 - files/rpms/n-novnc | 1 - files/rpms/n-spice | 1 - files/rpms/nova | 1 - 5 files changed, 5 deletions(-) delete mode 100644 files/rpms-suse/n-novnc delete mode 100644 files/rpms-suse/n-spice delete mode 100644 files/rpms/n-novnc delete mode 100644 files/rpms/n-spice diff --git a/files/rpms-suse/n-novnc b/files/rpms-suse/n-novnc deleted file mode 100644 index c8722b9f66..0000000000 --- a/files/rpms-suse/n-novnc +++ /dev/null @@ -1 +0,0 @@ -python-numpy diff --git a/files/rpms-suse/n-spice b/files/rpms-suse/n-spice deleted file mode 100644 index c8722b9f66..0000000000 --- a/files/rpms-suse/n-spice +++ /dev/null @@ -1 +0,0 @@ -python-numpy diff --git a/files/rpms/n-novnc b/files/rpms/n-novnc deleted file mode 100644 index 24ce15ab7e..0000000000 --- a/files/rpms/n-novnc +++ /dev/null @@ -1 +0,0 @@ -numpy diff --git a/files/rpms/n-spice b/files/rpms/n-spice deleted file mode 100644 index 24ce15ab7e..0000000000 --- a/files/rpms/n-spice +++ /dev/null @@ -1 +0,0 @@ -numpy diff --git a/files/rpms/nova b/files/rpms/nova index 4140cd7bae..8d73644025 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -13,7 +13,6 @@ libxml2-python m2crypto mysql-devel mysql-server # NOPRIME -numpy # needed by websockify for spice console parted polkit rabbitmq-server # NOPRIME From 29771c1c1e09e6bf2cad0f67cf4c176735f5ac0a Mon Sep 17 00:00:00 2001 From: Luigi Toscano Date: Thu, 21 Feb 2019 10:36:18 +0100 Subject: [PATCH 1030/1936] Remove the usage of read_password from library files The read_password function is defined inside stack.sh and it cannot be used inside the "public library interface" provided by DevStack. Move the calls found inside library files to stack.sh, following the same pattern of the other calls to read_password. Change-Id: I8adc6723b677dfac2bef735f660e056c498bf773 --- lib/database | 2 -- lib/nova_plugins/hypervisor-xenserver | 1 - stack.sh | 19 ++++++++++++++++++- 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/lib/database b/lib/database index 0d720527df..7940cf2208 100644 --- a/lib/database +++ b/lib/database @@ -87,8 +87,6 @@ function initialize_database_backends { if [ -n "$MYSQL_PASSWORD" ]; then DATABASE_PASSWORD=$MYSQL_PASSWORD - else - read_password DATABASE_PASSWORD "ENTER A PASSWORD TO USE FOR THE DATABASE." fi # We configure Nova, Horizon, Glance and Keystone to use MySQL as their diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver index 6f79e4ff7c..cd4d3caa84 100644 --- a/lib/nova_plugins/hypervisor-xenserver +++ b/lib/nova_plugins/hypervisor-xenserver @@ -59,7 +59,6 @@ function configure_nova_hypervisor { die $LINENO "os-xenapi plugin is not specified. Please enable this plugin in local.conf" fi - read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN." iniset $NOVA_CONF DEFAULT compute_driver "xenapi.XenAPIDriver" iniset $NOVA_CONF xenserver connection_url "$XENAPI_CONNECTION_URL" iniset $NOVA_CONF xenserver connection_username "$XENAPI_USER" diff --git a/stack.sh b/stack.sh index 022d5b9438..8d4042dc23 100755 --- a/stack.sh +++ b/stack.sh @@ -691,7 +691,14 @@ function read_password { # The available database backends are listed in ``DATABASE_BACKENDS`` after # ``lib/database`` is sourced. ``mysql`` is the default. -initialize_database_backends && echo "Using $DATABASE_TYPE database backend" || echo "No database enabled" +if initialize_database_backends; then + echo "Using $DATABASE_TYPE database backend" + # Last chance for the database password. This must be handled here + # because read_password is not a library function. + read_password DATABASE_PASSWORD "ENTER A PASSWORD TO USE FOR THE DATABASE." +else + echo "No database enabled" +fi # Queue Configuration @@ -729,6 +736,16 @@ if is_service_enabled keystone; then fi +# Nova +# ----- + +if is_service_enabled nova && [[ "$VIRT_DRIVER" == 'xenserver' ]]; then + # Look for the backend password here because read_password + # is not a library function. + read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN." +fi + + # Swift # ----- From cf7fc9c757a53113405e8f52af1020e5d21b04fe Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Mon, 25 Feb 2019 10:46:52 +0100 Subject: [PATCH 1031/1936] Add 3 nodes nodeset based on Ubuntu bionic It is needed for example in neutron-tempest-dvr-ha-multinode-full job. Change-Id: I1b9dbb256e338d7018adb8b2ee97999ebd8eda44 Related-Bug: #1804844 --- .zuul.yaml | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index afe400e1a4..ce1a300237 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -148,6 +148,41 @@ nodes: - compute1 +- nodeset: + name: openstack-three-node-bionic + nodes: + - name: controller + label: ubuntu-bionic + - name: compute1 + label: ubuntu-bionic + - name: compute2 + label: ubuntu-bionic + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + - compute2 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + - compute2 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + - compute2 + - job: name: devstack-base parent: multinode From 0d83e09464551a8eb4ee5c66dc82de7053b14d70 Mon Sep 17 00:00:00 2001 From: ghanshyam Date: Wed, 27 Feb 2019 11:10:05 +0000 Subject: [PATCH 1032/1936] Use master upper-constraints when installing tempest plugins when tempst venv is build, it use the master upper_contraint[1] but when we install tempest plugin, it use branch upper_contraint. This leads to mismatch the dependency version between tempest and required tempest plugins setup. Current flow after this change is: 1. install tempest form master (until you explicitly change TEMPEST_BRANCH which is default to master in all stable branch). It applies the upper_constraint from the stable branch but that will be overridden in step2 2. configure tempest, here the created venv will install all dependency with master's upper_constraint. 3. install tempest plugins in same venv created above. Now tempest plugin will also use the master upper_constraint. With this tempest venv which has all enabled plugin will be contsraint with master. [1] https://github.com/openstack-dev/devstack/blob/72f632222f6d90d3545b5d7ca48297da4218e2ea/lib/tempest#L590 Change-Id: I89314e8391e8f26c622fc090cbe27997b3cf049a Closes-Bug: #1816022 --- lib/tempest | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 7526d3bb4e..1e344c8567 100644 --- a/lib/tempest +++ b/lib/tempest @@ -673,7 +673,9 @@ function install_tempest { function install_tempest_plugins { pushd $TEMPEST_DIR if [[ $TEMPEST_PLUGINS != 0 ]] ; then - tox -evenv-tempest -- pip install -c $REQUIREMENTS_DIR/upper-constraints.txt $TEMPEST_PLUGINS + # The requirements might be on a different branch, while tempest & tempest plugins needs master requirements. + (cd $REQUIREMENTS_DIR && git show master:upper-constraints.txt) > u-c-m.txt + tox -evenv-tempest -- pip install -c u-c-m.txt $TEMPEST_PLUGINS echo "Checking installed Tempest plugins:" tox -evenv-tempest -- tempest list-plugins fi From 55f172961a2b5422baa6ce3d9e46989c7902bbd1 Mon Sep 17 00:00:00 2001 From: Hongbin Lu Date: Tue, 20 Nov 2018 19:22:01 +0000 Subject: [PATCH 1033/1936] Revert "Revert "remove external_network_bridge option"" This reverts commit e3e9ea299601665a295e31a98e90dd9587165850. Change-Id: I23e1b98bd2839b79226b55700ee404a8fda83f83 --- lib/neutron | 3 --- lib/neutron_plugins/ovs_base | 4 ---- 2 files changed, 7 deletions(-) diff --git a/lib/neutron b/lib/neutron index 62f7366e7e..b2414cfb1b 100644 --- a/lib/neutron +++ b/lib/neutron @@ -95,9 +95,6 @@ NEUTRON_ROOTWRAP_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf NEUTRON_ROOTWRAP_CMD="$NEUTRON_ROOTWRAP $NEUTRON_ROOTWRAP_CONF_FILE" NEUTRON_ROOTWRAP_DAEMON_CMD="$NEUTRON_ROOTWRAP-daemon $NEUTRON_ROOTWRAP_CONF_FILE" -# This is needed because _neutron_ovs_base_configure_l3_agent will set -# external_network_bridge -Q_USE_PROVIDERNET_FOR_PUBLIC=${Q_USE_PROVIDERNET_FOR_PUBLIC:-True} # This is needed because _neutron_ovs_base_configure_l3_agent uses it to create # an external network bridge PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index fbe4c92139..2e63fe3c7b 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -96,10 +96,6 @@ function _neutron_ovs_base_configure_firewall_driver { } function _neutron_ovs_base_configure_l3_agent { - if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" != "True" ]; then - iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE - fi - neutron-ovs-cleanup --config-file $NEUTRON_CONF if [[ "$Q_USE_PUBLIC_VETH" = "True" ]]; then ip link show $Q_PUBLIC_VETH_INT > /dev/null 2>&1 || From 30d48ff88782347e5deb31369aa228f7345cfc6f Mon Sep 17 00:00:00 2001 From: Lance Bragstad Date: Wed, 12 Dec 2018 19:41:36 +0000 Subject: [PATCH 1034/1936] Remove admin_domain_scope tempest setting Keystone is currently working through a bunch of changes to add proper system, domain, and project scope support for its API. This includes implementing ``admin``, ``member``, and ``reader`` roles for system, domain, and project assignments. More informaiton on those specific changes can be found here: https://review.openstack.org/#/q/(status:open+OR+status:closed)+project:openstack/keystone+branch:master+topic:implement-default-roles One thing that was uncovered in implementing that support for the project API was that setting tempest ``CONF.identity.admin_domain_scope = True`` meant domain admins of one domain would be able to list projects in other domains, highlighted in the following patch: https://review.openstack.org/#/c/624218/2 This commit doesn't set this option and assumes the proper domain-scoping behavior being built into keystone natively. Change-Id: I12a57cc43de0b17eababa19b7b94de5277689f82 Related-Bug: 1750660 --- lib/tempest | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index 7526d3bb4e..e3ad7fed6a 100644 --- a/lib/tempest +++ b/lib/tempest @@ -276,8 +276,6 @@ function configure_tempest { iniset $TEMPEST_CONFIG identity user_lockout_failure_attempts $KEYSTONE_LOCKOUT_FAILURE_ATTEMPTS iniset $TEMPEST_CONFIG identity user_lockout_duration $KEYSTONE_LOCKOUT_DURATION iniset $TEMPEST_CONFIG identity user_unique_last_password_count $KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT - # Use domain scoped tokens for admin v3 tests, v3 dynamic credentials of v3 account generation - iniset $TEMPEST_CONFIG identity admin_domain_scope True if [[ "$TEMPEST_HAS_ADMIN" == "True" ]]; then iniset $TEMPEST_CONFIG auth admin_username $admin_username iniset $TEMPEST_CONFIG auth admin_password "$password" From 8c5486993dec738a3e83746af2d62bdba648c999 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 11 May 2018 16:12:17 +0530 Subject: [PATCH 1035/1936] Bump noVNC to 1.0.0 This introduces a breaking change in the URLs used to access the console [1]. This is updated in both the documentation and linked nova change. [1] https://github.com/novnc/noVNC/commit/83391ffc Change-Id: I14a0be0034f4a76ab37eb90325967500c3bf1ff9 Depends-On: I9a50a111ff4911f4364a1b24d646095c72af3d2c Related-bug: #1682020 --- doc/source/guides/multinode-lab.rst | 2 +- lib/nova | 11 ++++++++++- stackrc | 2 +- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index 7978cd86ea..5f5bb846eb 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -177,7 +177,7 @@ machines, create a ``local.conf`` with: GLANCE_HOSTPORT=$SERVICE_HOST:9292 ENABLED_SERVICES=n-cpu,q-agt,n-api-meta,c-vol,placement-client NOVA_VNC_ENABLED=True - NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_auto.html" + NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_lite.html" VNCSERVER_LISTEN=$HOST_IP VNCSERVER_PROXYCLIENT_ADDRESS=$VNCSERVER_LISTEN diff --git a/lib/nova b/lib/nova index 033ebf3697..c892c96fce 100644 --- a/lib/nova +++ b/lib/nova @@ -614,7 +614,16 @@ function configure_console_compute { # All nova-compute workers need to know the vnc configuration options # These settings don't hurt anything if n-xvnc and n-novnc are disabled if is_service_enabled n-cpu; then - NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"} + if [ "$NOVNC_FROM_PACKAGE" == "True" ]; then + # Use the old URL when installing novnc packages. + NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"} + elif vercmp ${NOVNC_BRANCH} "<" "1.0.0"; then + # Use the old URL when installing older novnc source. + NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"} + else + # Use the new URL when building >=v1.0.0 from source. + NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_lite.html"} + fi iniset $NOVA_CPU_CONF vnc novncproxy_base_url "$NOVNCPROXY_URL" XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"} iniset $NOVA_CPU_CONF vnc xvpvncproxy_base_url "$XVPVNCPROXY_URL" diff --git a/stackrc b/stackrc index 53868f3d9d..2705c327fa 100644 --- a/stackrc +++ b/stackrc @@ -608,7 +608,7 @@ IRONIC_PYTHON_AGENT_BRANCH=${IRONIC_PYTHON_AGENT_BRANCH:-$TARGET_BRANCH} # a websockets/html5 or flash powered VNC console for vm instances NOVNC_REPO=${NOVNC_REPO:-https://github.com/novnc/noVNC.git} -NOVNC_BRANCH=${NOVNC_BRANCH:-stable/v0.6} +NOVNC_BRANCH=${NOVNC_BRANCH:-v1.0.0} # a websockets/html5 or flash powered SPICE console for vm instances SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git} From c759706686abb421879148410adced34e44333f2 Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Thu, 28 Feb 2019 11:20:36 +0000 Subject: [PATCH 1036/1936] support python 3 on centos 7 when installing with python 3.6 on centos7 pip installs packages to /usr/local/bin as it does on new versions of fedora. this change updates the check to include centos Change-Id: I7d16194d6ba1391ca31251d5b50cbb8de033fc38 --- inc/python | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/inc/python b/inc/python index ec7eb5b9bb..419d5c5701 100644 --- a/inc/python +++ b/inc/python @@ -49,7 +49,8 @@ function get_python_exec_prefix { fi $xtrace - if python3_enabled && [[ "$os_VENDOR" == "Fedora" && $os_RELEASE -gt 26 ]]; then + if python3_enabled && [[ "$os_VENDOR" == "CentOS" ]] || \ + [[ "$os_VENDOR" == "Fedora" && $os_RELEASE -gt 26 ]]; then # Default Python 3 install prefix changed to /usr/local in Fedora 27: # https://fedoraproject.org/wiki/Changes/Making_sudo_pip_safe echo "/usr/local/bin" From 36773b262e79c1eb66432cc19862f2097d94ec65 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Thu, 28 Feb 2019 17:30:47 -0500 Subject: [PATCH 1037/1936] Document the wonders of scale testing nova with the fake driver Devstack has some little known variables for running nova with the fake compute driver and running several nova-compute services on a single host, which can be useful for testing move operations and scale testing of controller services like nova-api and nova-scheduler. This adds documentation about the fake virt driver and related variables and scaling considerations when using them. Change-Id: Ic89d463d0f3f180b323edd6e2c8ff0404638ef07 --- doc/source/guides/nova.rst | 66 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/doc/source/guides/nova.rst b/doc/source/guides/nova.rst index 0f105d7c58..65491d13e8 100644 --- a/doc/source/guides/nova.rst +++ b/doc/source/guides/nova.rst @@ -68,3 +68,69 @@ These config options are defined in `nova.conf.serial_console For more information on OpenStack configuration see the `OpenStack Compute Service Configuration Reference `_ + + +Fake virt driver +================ + +Nova has a `fake virt driver`_ which can be used for scale testing the control +plane services or testing "move" operations between fake compute nodes, for +example cold/live migration, evacuate and unshelve. + +The fake virt driver does not communicate with any hypervisor, it just reports +some fake resource inventory values and keeps track of the state of the +"guests" created, moved and deleted. It is not feature-complete with the +compute API but is good enough for most API testing, and is also used within +the nova functional tests themselves so is fairly robust. + +.. _fake virt driver: http://git.openstack.org/cgit/openstack/nova/tree/nova/virt/fake.py + +Configuration +------------- + +Set the following in your devstack ``local.conf``: + +.. code-block:: ini + + [[local|localrc]] + VIRT_DRIVER=fake + NUMBER_FAKE_NOVA_COMPUTE= + +The ``NUMBER_FAKE_NOVA_COMPUTE`` variable controls the number of fake +``nova-compute`` services to run and defaults to 1. + +When ``VIRT_DRIVER=fake`` is used, devstack will disable quota checking in +nova and neutron automatically. However, other services, like cinder, will +still enforce quota limits by default. + +Scaling +------- + +The actual value to use for ``NUMBER_FAKE_NOVA_COMPUTE`` depends on factors +such as: + +* The size of the host (physical or virtualized) on which devstack is running. +* The number of API workers. By default, devstack will run ``max($nproc/2, 2)`` + workers per API service. If you are running several fake compute services on + a single host, then consider setting ``API_WORKERS=1`` in ``local.conf``. + +In addition, while quota will be disabled in neutron, there is no fake ML2 +backend for neutron so creating fake VMs will still result in real ports being +created. To create servers without networking, you can specify ``--nic=none`` +when creating the server, for example: + +.. code-block:: shell + + $ openstack --os-compute-api-version 2.37 server create --flavor cirros256 \ + --image cirros-0.3.5-x86_64-disk --nic none --wait test-server + +.. note:: ``--os-compute-api-version`` greater than or equal to 2.37 is + required to use ``--nic=none``. + +To avoid overhead from other services which you may not need, disable them in +your ``local.conf``, for example: + +.. code-block:: ini + + disable_service horizon + disable_service tempest From 56946cfc5f88d8df3b4b9a1a2530ce2cc9a68cde Mon Sep 17 00:00:00 2001 From: Nate Johnston Date: Mon, 12 Nov 2018 11:17:07 -0500 Subject: [PATCH 1038/1936] Replace deprecated brctl with ip commands The bridge-utils package has been deprecated for some time now [1] and 'brctl' does not exist on some more recent distros like Fedora 28. Replace references to brctl with the proper ip commands. Calls to "brctl show" are not being replaced with calls to "bridge link" because the output format is very different and in testing some bridges were not listed. So the simpler method of consulting /sys/class/net is used. In worlddump.py we try running both because failures are handled gracefully by _dump_cmd(), as well as "ip link show type bridge" for additional info. [1] https://lwn.net/Articles/703776/ for example Change-Id: Ie4c8ad6ce4a09c38023c9e4ec7834c249403145f Partial-Bug: #1801919 --- doc/source/guides/multinode-lab.rst | 11 ----------- lib/neutron_plugins/linuxbridge_agent | 16 +++++++++------- tools/worlddump.py | 2 ++ 3 files changed, 11 insertions(+), 18 deletions(-) diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index b4e2891c10..05dd36edc0 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -369,17 +369,6 @@ To pull glance, OpenStack Image service, from an experimental fork: Notes stuff you might need to know ================================== -Reset the Bridge ----------------- - -How to reset the bridge configuration: - -:: - - sudo brctl delif br100 eth0.926 - sudo ip link set dev br100 down - sudo brctl delbr br100 - Set MySQL Password ------------------ diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent index f2302e37bf..fa3f86203d 100644 --- a/lib/neutron_plugins/linuxbridge_agent +++ b/lib/neutron_plugins/linuxbridge_agent @@ -8,21 +8,23 @@ _XTRACE_NEUTRON_LB=$(set +o | grep xtrace) set +o xtrace function neutron_lb_cleanup { - sudo ip link set $PUBLIC_BRIDGE down - sudo brctl delbr $PUBLIC_BRIDGE + sudo ip link delete $PUBLIC_BRIDGE + bridge_list=`ls /sys/class/net/*/bridge/bridge_id 2>/dev/null | cut -f5 -d/` + if [[ -z "$bridge_list" ]]; then + return + fi if [[ "$Q_ML2_TENANT_NETWORK_TYPE" = "vxlan" ]]; then - for port in $(sudo brctl show | grep -o -e [a-zA-Z\-]*tap[0-9a-f\-]* -e vxlan-[0-9a-f\-]*); do + for port in $(echo $bridge_list | grep -o -e [a-zA-Z\-]*tap[0-9a-f\-]* -e vxlan-[0-9a-f\-]*); do sudo ip link delete $port done elif [[ "$Q_ML2_TENANT_NETWORK_TYPE" = "vlan" ]]; then - for port in $(sudo brctl show | grep -o -e [a-zA-Z\-]*tap[0-9a-f\-]* -e ${LB_PHYSICAL_INTERFACE}\.[0-9a-f\-]*); do + for port in $(echo $bridge_list | grep -o -e [a-zA-Z\-]*tap[0-9a-f\-]* -e ${LB_PHYSICAL_INTERFACE}\.[0-9a-f\-]*); do sudo ip link delete $port done fi - for bridge in $(sudo brctl show |grep -o -e brq[0-9a-f\-]*); do - sudo ip link set $bridge down - sudo brctl delbr $bridge + for bridge in $(echo $bridge_list |grep -o -e brq[0-9a-f\-]*); do + sudo ip link delete $bridge done } diff --git a/tools/worlddump.py b/tools/worlddump.py index 750608210a..88af19d2e3 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -163,7 +163,9 @@ def _netns_list(): def network_dump(): _header("Network Dump") + _dump_cmd("bridge link") _dump_cmd("brctl show") + _dump_cmd("ip link show type bridge") ip_cmds = ["neigh", "addr", "link", "route"] for cmd in ip_cmds + ['netns']: _dump_cmd("ip %s" % cmd) From 5488336545ddc2dadadfae61a0367074cc16e5e2 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Sat, 2 Mar 2019 06:14:21 +0000 Subject: [PATCH 1039/1936] Updated from generate-devstack-plugins-list Change-Id: I3a9cbc9186b7555227f0ddef3da134e98b6cfc15 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 9901c1ca39..ee278c7150 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -161,6 +161,7 @@ qinling `git://git.openstack.org/openstack/qinlin qinling-dashboard `git://git.openstack.org/openstack/qinling-dashboard `__ rally `git://git.openstack.org/openstack/rally `__ rally-openstack `git://git.openstack.org/openstack/rally-openstack `__ +rsd-virt-for-nova `git://git.openstack.org/openstack/rsd-virt-for-nova `__ sahara `git://git.openstack.org/openstack/sahara `__ sahara-dashboard `git://git.openstack.org/openstack/sahara-dashboard `__ scalpels `git://git.openstack.org/openstack/scalpels `__ From 8b003e0ed2f41fb1ab0007c969bba33c5701f625 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 4 Mar 2019 16:50:42 +1100 Subject: [PATCH 1040/1936] Switch GIT_BASE to https:// Infra are looking at implementing gitea for serving git, but this does not have a git protocol handler ATM. Switch GIT_BASE, and some testing, to https:// to be in a better position to handle this. Change-Id: I97a7b0de7b1ec2dd15d15c58699a631b09273df1 Story: #2004627 Task: #29701 --- roles/write-devstack-local-conf/library/test.py | 14 +++++++------- stackrc | 3 +-- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/roles/write-devstack-local-conf/library/test.py b/roles/write-devstack-local-conf/library/test.py index 88d404b856..45fae3da28 100644 --- a/roles/write-devstack-local-conf/library/test.py +++ b/roles/write-devstack-local-conf/library/test.py @@ -40,9 +40,9 @@ def test_plugins(self): # We use ordereddict here to make sure the plugins are in the # *wrong* order for testing. plugins = OrderedDict([ - ('bar', 'git://git.openstack.org/openstack/bar-plugin'), - ('foo', 'git://git.openstack.org/openstack/foo-plugin'), - ('baz', 'git://git.openstack.org/openstack/baz-plugin'), + ('bar', 'https://git.openstack.org/openstack/bar-plugin'), + ('foo', 'https://git.openstack.org/openstack/foo-plugin'), + ('baz', 'https://git.openstack.org/openstack/baz-plugin'), ]) p = dict(localrc=localrc, local_conf=local_conf, @@ -94,8 +94,8 @@ def test_plugin_deps(self): # We use ordereddict here to make sure the plugins are in the # *wrong* order for testing. plugins = OrderedDict([ - ('bar-plugin', 'git://git.openstack.org/openstack/bar-plugin'), - ('foo-plugin', 'git://git.openstack.org/openstack/foo-plugin'), + ('bar-plugin', 'https://git.openstack.org/openstack/bar-plugin'), + ('foo-plugin', 'https://git.openstack.org/openstack/foo-plugin'), ]) p = dict(localrc=localrc, local_conf=local_conf, @@ -227,8 +227,8 @@ def test_plugin_circular_deps(self): # We use ordereddict here to make sure the plugins are in the # *wrong* order for testing. plugins = OrderedDict([ - ('bar', 'git://git.openstack.org/openstack/bar-plugin'), - ('foo', 'git://git.openstack.org/openstack/foo-plugin'), + ('bar', 'https://git.openstack.org/openstack/bar-plugin'), + ('foo', 'https://git.openstack.org/openstack/foo-plugin'), ]) p = dict(localrc=localrc, local_conf=local_conf, diff --git a/stackrc b/stackrc index 53868f3d9d..652f2de0d5 100644 --- a/stackrc +++ b/stackrc @@ -237,8 +237,7 @@ WSGI_MODE=${WSGI_MODE:-"uwsgi"} # ------------ # Base GIT Repo URL -# Another option is https://git.openstack.org -GIT_BASE=${GIT_BASE:-git://git.openstack.org} +GIT_BASE=${GIT_BASE:-https://git.openstack.org} # The location of REQUIREMENTS once cloned REQUIREMENTS_DIR=$DEST/requirements From 6509fd334e0956dc44870d19a80381578e3e3574 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 4 Mar 2019 17:26:20 +1100 Subject: [PATCH 1041/1936] Remove git:// openstack references This updates various parts of documentation to use https, rather than git, which is not implemented by gitea Change-Id: I8d2a93128dcdaba0a00b43d18652781733f90cf0 Story: #2004627 Task: #29701 --- README.rst | 2 +- doc/source/guides/neutron.rst | 4 +- doc/source/plugin-registry.rst | 352 ++++++++++++------------ doc/source/plugins.rst | 4 +- tools/generate-devstack-plugins-list.sh | 4 +- 5 files changed, 183 insertions(+), 183 deletions(-) diff --git a/README.rst b/README.rst index 6885546c94..ad7ede45c5 100644 --- a/README.rst +++ b/README.rst @@ -38,7 +38,7 @@ You can also pick specific OpenStack project releases by setting the appropriate `stackrc` for the default set). Usually just before a release there will be milestone-proposed branches that need to be tested:: - GLANCE_REPO=git://git.openstack.org/openstack/glance.git + GLANCE_REPO=https://git.openstack.org/openstack/glance.git GLANCE_BRANCH=milestone-proposed Start A Dev Cloud diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst index 12c6d6902d..80b2f85285 100644 --- a/doc/source/guides/neutron.rst +++ b/doc/source/guides/neutron.rst @@ -567,7 +567,7 @@ you do not require them. Q_ML2_PLUGIN_MECHANISM_DRIVERS=macvtap Q_USE_PROVIDER_NETWORKING=True - enable_plugin neutron git://git.openstack.org/openstack/neutron + enable_plugin neutron https://git.openstack.org/openstack/neutron ## MacVTap agent options Q_AGENT=macvtap @@ -622,7 +622,7 @@ For the MacVTap compute node, use this local.conf: # Services that a compute node runs disable_all_services - enable_plugin neutron git://git.openstack.org/openstack/neutron + enable_plugin neutron https://git.openstack.org/openstack/neutron ENABLED_SERVICES+=n-cpu,q-agt ## MacVTap agent options diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index ee278c7150..9338d18819 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -24,182 +24,182 @@ official OpenStack projects. ====================================== === Plugin Name URL ====================================== === -almanach `git://git.openstack.org/openstack/almanach `__ -aodh `git://git.openstack.org/openstack/aodh `__ -apmec `git://git.openstack.org/openstack/apmec `__ -barbican `git://git.openstack.org/openstack/barbican `__ -bilean `git://git.openstack.org/openstack/bilean `__ -blazar `git://git.openstack.org/openstack/blazar `__ -broadview-collector `git://git.openstack.org/openstack/broadview-collector `__ -castellan-ui `git://git.openstack.org/openstack/castellan-ui `__ -ceilometer `git://git.openstack.org/openstack/ceilometer `__ -ceilometer-powervm `git://git.openstack.org/openstack/ceilometer-powervm `__ -cloudkitty `git://git.openstack.org/openstack/cloudkitty `__ -collectd-openstack-plugins `git://git.openstack.org/openstack/collectd-openstack-plugins `__ -congress `git://git.openstack.org/openstack/congress `__ -cyborg `git://git.openstack.org/openstack/cyborg `__ -designate `git://git.openstack.org/openstack/designate `__ -devstack-plugin-additional-pkg-repos `git://git.openstack.org/openstack/devstack-plugin-additional-pkg-repos `__ -devstack-plugin-amqp1 `git://git.openstack.org/openstack/devstack-plugin-amqp1 `__ -devstack-plugin-bdd `git://git.openstack.org/openstack/devstack-plugin-bdd `__ -devstack-plugin-ceph `git://git.openstack.org/openstack/devstack-plugin-ceph `__ -devstack-plugin-container `git://git.openstack.org/openstack/devstack-plugin-container `__ -devstack-plugin-glusterfs `git://git.openstack.org/openstack/devstack-plugin-glusterfs `__ -devstack-plugin-hdfs `git://git.openstack.org/openstack/devstack-plugin-hdfs `__ -devstack-plugin-kafka `git://git.openstack.org/openstack/devstack-plugin-kafka `__ -devstack-plugin-libvirt-qemu `git://git.openstack.org/openstack/devstack-plugin-libvirt-qemu `__ -devstack-plugin-mariadb `git://git.openstack.org/openstack/devstack-plugin-mariadb `__ -devstack-plugin-nfs `git://git.openstack.org/openstack/devstack-plugin-nfs `__ -devstack-plugin-pika `git://git.openstack.org/openstack/devstack-plugin-pika `__ -devstack-plugin-sheepdog `git://git.openstack.org/openstack/devstack-plugin-sheepdog `__ -devstack-plugin-vmax `git://git.openstack.org/openstack/devstack-plugin-vmax `__ -devstack-plugin-zmq `git://git.openstack.org/openstack/devstack-plugin-zmq `__ -dragonflow `git://git.openstack.org/openstack/dragonflow `__ -drbd-devstack `git://git.openstack.org/openstack/drbd-devstack `__ -ec2-api `git://git.openstack.org/openstack/ec2-api `__ -freezer `git://git.openstack.org/openstack/freezer `__ -freezer-api `git://git.openstack.org/openstack/freezer-api `__ -freezer-tempest-plugin `git://git.openstack.org/openstack/freezer-tempest-plugin `__ -freezer-web-ui `git://git.openstack.org/openstack/freezer-web-ui `__ -gce-api `git://git.openstack.org/openstack/gce-api `__ -glare `git://git.openstack.org/openstack/glare `__ -group-based-policy `git://git.openstack.org/openstack/group-based-policy `__ -gyan `git://git.openstack.org/openstack/gyan `__ -heat `git://git.openstack.org/openstack/heat `__ -heat-dashboard `git://git.openstack.org/openstack/heat-dashboard `__ -horizon-mellanox `git://git.openstack.org/openstack/horizon-mellanox `__ -ironic `git://git.openstack.org/openstack/ironic `__ -ironic-inspector `git://git.openstack.org/openstack/ironic-inspector `__ -ironic-staging-drivers `git://git.openstack.org/openstack/ironic-staging-drivers `__ -ironic-ui `git://git.openstack.org/openstack/ironic-ui `__ -karbor `git://git.openstack.org/openstack/karbor `__ -karbor-dashboard `git://git.openstack.org/openstack/karbor-dashboard `__ -keystone `git://git.openstack.org/openstack/keystone `__ -kingbird `git://git.openstack.org/openstack/kingbird `__ -kuryr-kubernetes `git://git.openstack.org/openstack/kuryr-kubernetes `__ -kuryr-libnetwork `git://git.openstack.org/openstack/kuryr-libnetwork `__ -kuryr-tempest-plugin `git://git.openstack.org/openstack/kuryr-tempest-plugin `__ -magnum `git://git.openstack.org/openstack/magnum `__ -magnum-ui `git://git.openstack.org/openstack/magnum-ui `__ -manila `git://git.openstack.org/openstack/manila `__ -manila-ui `git://git.openstack.org/openstack/manila-ui `__ -masakari `git://git.openstack.org/openstack/masakari `__ -meteos `git://git.openstack.org/openstack/meteos `__ -meteos-ui `git://git.openstack.org/openstack/meteos-ui `__ -mistral `git://git.openstack.org/openstack/mistral `__ -mixmatch `git://git.openstack.org/openstack/mixmatch `__ -mogan `git://git.openstack.org/openstack/mogan `__ -mogan-ui `git://git.openstack.org/openstack/mogan-ui `__ -monasca-analytics `git://git.openstack.org/openstack/monasca-analytics `__ -monasca-api `git://git.openstack.org/openstack/monasca-api `__ -monasca-ceilometer `git://git.openstack.org/openstack/monasca-ceilometer `__ -monasca-events-api `git://git.openstack.org/openstack/monasca-events-api `__ -monasca-log-api `git://git.openstack.org/openstack/monasca-log-api `__ -monasca-tempest-plugin `git://git.openstack.org/openstack/monasca-tempest-plugin `__ -monasca-transform `git://git.openstack.org/openstack/monasca-transform `__ -murano `git://git.openstack.org/openstack/murano `__ -networking-6wind `git://git.openstack.org/openstack/networking-6wind `__ -networking-ansible `git://git.openstack.org/openstack/networking-ansible `__ -networking-arista `git://git.openstack.org/openstack/networking-arista `__ -networking-bagpipe `git://git.openstack.org/openstack/networking-bagpipe `__ -networking-baremetal `git://git.openstack.org/openstack/networking-baremetal `__ -networking-bgpvpn `git://git.openstack.org/openstack/networking-bgpvpn `__ -networking-brocade `git://git.openstack.org/openstack/networking-brocade `__ -networking-calico `git://git.openstack.org/openstack/networking-calico `__ -networking-cisco `git://git.openstack.org/openstack/networking-cisco `__ -networking-cumulus `git://git.openstack.org/openstack/networking-cumulus `__ -networking-dpm `git://git.openstack.org/openstack/networking-dpm `__ -networking-fortinet `git://git.openstack.org/openstack/networking-fortinet `__ -networking-generic-switch `git://git.openstack.org/openstack/networking-generic-switch `__ -networking-hpe `git://git.openstack.org/openstack/networking-hpe `__ -networking-huawei `git://git.openstack.org/openstack/networking-huawei `__ -networking-hyperv `git://git.openstack.org/openstack/networking-hyperv `__ -networking-infoblox `git://git.openstack.org/openstack/networking-infoblox `__ -networking-l2gw `git://git.openstack.org/openstack/networking-l2gw `__ -networking-lagopus `git://git.openstack.org/openstack/networking-lagopus `__ -networking-midonet `git://git.openstack.org/openstack/networking-midonet `__ -networking-mlnx `git://git.openstack.org/openstack/networking-mlnx `__ -networking-nec `git://git.openstack.org/openstack/networking-nec `__ -networking-odl `git://git.openstack.org/openstack/networking-odl `__ -networking-onos `git://git.openstack.org/openstack/networking-onos `__ -networking-opencontrail `git://git.openstack.org/openstack/networking-opencontrail `__ -networking-ovn `git://git.openstack.org/openstack/networking-ovn `__ -networking-ovs-dpdk `git://git.openstack.org/openstack/networking-ovs-dpdk `__ -networking-plumgrid `git://git.openstack.org/openstack/networking-plumgrid `__ -networking-powervm `git://git.openstack.org/openstack/networking-powervm `__ -networking-sfc `git://git.openstack.org/openstack/networking-sfc `__ -networking-spp `git://git.openstack.org/openstack/networking-spp `__ -networking-vpp `git://git.openstack.org/openstack/networking-vpp `__ -networking-vsphere `git://git.openstack.org/openstack/networking-vsphere `__ -neutron `git://git.openstack.org/openstack/neutron `__ -neutron-classifier `git://git.openstack.org/openstack/neutron-classifier `__ -neutron-dynamic-routing `git://git.openstack.org/openstack/neutron-dynamic-routing `__ -neutron-fwaas `git://git.openstack.org/openstack/neutron-fwaas `__ -neutron-fwaas-dashboard `git://git.openstack.org/openstack/neutron-fwaas-dashboard `__ -neutron-lbaas `git://git.openstack.org/openstack/neutron-lbaas `__ -neutron-lbaas-dashboard `git://git.openstack.org/openstack/neutron-lbaas-dashboard `__ -neutron-tempest-plugin `git://git.openstack.org/openstack/neutron-tempest-plugin `__ -neutron-vpnaas `git://git.openstack.org/openstack/neutron-vpnaas `__ -neutron-vpnaas-dashboard `git://git.openstack.org/openstack/neutron-vpnaas-dashboard `__ -nova-dpm `git://git.openstack.org/openstack/nova-dpm `__ -nova-lxd `git://git.openstack.org/openstack/nova-lxd `__ -nova-mksproxy `git://git.openstack.org/openstack/nova-mksproxy `__ -nova-powervm `git://git.openstack.org/openstack/nova-powervm `__ -oaktree `git://git.openstack.org/openstack/oaktree `__ -octavia `git://git.openstack.org/openstack/octavia `__ -octavia-dashboard `git://git.openstack.org/openstack/octavia-dashboard `__ -omni `git://git.openstack.org/openstack/omni `__ -openstacksdk `git://git.openstack.org/openstack/openstacksdk `__ -os-faults `git://git.openstack.org/openstack/os-faults `__ -os-xenapi `git://git.openstack.org/openstack/os-xenapi `__ -osprofiler `git://git.openstack.org/openstack/osprofiler `__ -oswin-tempest-plugin `git://git.openstack.org/openstack/oswin-tempest-plugin `__ -panko `git://git.openstack.org/openstack/panko `__ -patrole `git://git.openstack.org/openstack/patrole `__ -picasso `git://git.openstack.org/openstack/picasso `__ -qinling `git://git.openstack.org/openstack/qinling `__ -qinling-dashboard `git://git.openstack.org/openstack/qinling-dashboard `__ -rally `git://git.openstack.org/openstack/rally `__ -rally-openstack `git://git.openstack.org/openstack/rally-openstack `__ -rsd-virt-for-nova `git://git.openstack.org/openstack/rsd-virt-for-nova `__ -sahara `git://git.openstack.org/openstack/sahara `__ -sahara-dashboard `git://git.openstack.org/openstack/sahara-dashboard `__ -scalpels `git://git.openstack.org/openstack/scalpels `__ -searchlight `git://git.openstack.org/openstack/searchlight `__ -searchlight-ui `git://git.openstack.org/openstack/searchlight-ui `__ -senlin `git://git.openstack.org/openstack/senlin `__ -slogging `git://git.openstack.org/openstack/slogging `__ -solum `git://git.openstack.org/openstack/solum `__ -stackube `git://git.openstack.org/openstack/stackube `__ -storlets `git://git.openstack.org/openstack/storlets `__ -stx-config `git://git.openstack.org/openstack/stx-config `__ -stx-fault `git://git.openstack.org/openstack/stx-fault `__ -stx-integ `git://git.openstack.org/openstack/stx-integ `__ -stx-metal `git://git.openstack.org/openstack/stx-metal `__ -stx-nfv `git://git.openstack.org/openstack/stx-nfv `__ -stx-update `git://git.openstack.org/openstack/stx-update `__ -tacker `git://git.openstack.org/openstack/tacker `__ -tap-as-a-service `git://git.openstack.org/openstack/tap-as-a-service `__ -tap-as-a-service-dashboard `git://git.openstack.org/openstack/tap-as-a-service-dashboard `__ -tatu `git://git.openstack.org/openstack/tatu `__ -telemetry-tempest-plugin `git://git.openstack.org/openstack/telemetry-tempest-plugin `__ -tobiko `git://git.openstack.org/openstack/tobiko `__ -tricircle `git://git.openstack.org/openstack/tricircle `__ -trio2o `git://git.openstack.org/openstack/trio2o `__ -trove `git://git.openstack.org/openstack/trove `__ -trove-dashboard `git://git.openstack.org/openstack/trove-dashboard `__ -valet `git://git.openstack.org/openstack/valet `__ -vitrage `git://git.openstack.org/openstack/vitrage `__ -vitrage-dashboard `git://git.openstack.org/openstack/vitrage-dashboard `__ -vitrage-tempest-plugin `git://git.openstack.org/openstack/vitrage-tempest-plugin `__ -vmware-nsx `git://git.openstack.org/openstack/vmware-nsx `__ -vmware-vspc `git://git.openstack.org/openstack/vmware-vspc `__ -watcher `git://git.openstack.org/openstack/watcher `__ -watcher-dashboard `git://git.openstack.org/openstack/watcher-dashboard `__ -zaqar `git://git.openstack.org/openstack/zaqar `__ -zaqar-ui `git://git.openstack.org/openstack/zaqar-ui `__ -zun `git://git.openstack.org/openstack/zun `__ -zun-ui `git://git.openstack.org/openstack/zun-ui `__ +almanach `https://git.openstack.org/openstack/almanach `__ +aodh `https://git.openstack.org/openstack/aodh `__ +apmec `https://git.openstack.org/openstack/apmec `__ +barbican `https://git.openstack.org/openstack/barbican `__ +bilean `https://git.openstack.org/openstack/bilean `__ +blazar `https://git.openstack.org/openstack/blazar `__ +broadview-collector `https://git.openstack.org/openstack/broadview-collector `__ +castellan-ui `https://git.openstack.org/openstack/castellan-ui `__ +ceilometer `https://git.openstack.org/openstack/ceilometer `__ +ceilometer-powervm `https://git.openstack.org/openstack/ceilometer-powervm `__ +cloudkitty `https://git.openstack.org/openstack/cloudkitty `__ +collectd-openstack-plugins `https://git.openstack.org/openstack/collectd-openstack-plugins `__ +congress `https://git.openstack.org/openstack/congress `__ +cyborg `https://git.openstack.org/openstack/cyborg `__ +designate `https://git.openstack.org/openstack/designate `__ +devstack-plugin-additional-pkg-repos `https://git.openstack.org/openstack/devstack-plugin-additional-pkg-repos `__ +devstack-plugin-amqp1 `https://git.openstack.org/openstack/devstack-plugin-amqp1 `__ +devstack-plugin-bdd `https://git.openstack.org/openstack/devstack-plugin-bdd `__ +devstack-plugin-ceph `https://git.openstack.org/openstack/devstack-plugin-ceph `__ +devstack-plugin-container `https://git.openstack.org/openstack/devstack-plugin-container `__ +devstack-plugin-glusterfs `https://git.openstack.org/openstack/devstack-plugin-glusterfs `__ +devstack-plugin-hdfs `https://git.openstack.org/openstack/devstack-plugin-hdfs `__ +devstack-plugin-kafka `https://git.openstack.org/openstack/devstack-plugin-kafka `__ +devstack-plugin-libvirt-qemu `https://git.openstack.org/openstack/devstack-plugin-libvirt-qemu `__ +devstack-plugin-mariadb `https://git.openstack.org/openstack/devstack-plugin-mariadb `__ +devstack-plugin-nfs `https://git.openstack.org/openstack/devstack-plugin-nfs `__ +devstack-plugin-pika `https://git.openstack.org/openstack/devstack-plugin-pika `__ +devstack-plugin-sheepdog `https://git.openstack.org/openstack/devstack-plugin-sheepdog `__ +devstack-plugin-vmax `https://git.openstack.org/openstack/devstack-plugin-vmax `__ +devstack-plugin-zmq `https://git.openstack.org/openstack/devstack-plugin-zmq `__ +dragonflow `https://git.openstack.org/openstack/dragonflow `__ +drbd-devstack `https://git.openstack.org/openstack/drbd-devstack `__ +ec2-api `https://git.openstack.org/openstack/ec2-api `__ +freezer `https://git.openstack.org/openstack/freezer `__ +freezer-api `https://git.openstack.org/openstack/freezer-api `__ +freezer-tempest-plugin `https://git.openstack.org/openstack/freezer-tempest-plugin `__ +freezer-web-ui `https://git.openstack.org/openstack/freezer-web-ui `__ +gce-api `https://git.openstack.org/openstack/gce-api `__ +glare `https://git.openstack.org/openstack/glare `__ +group-based-policy `https://git.openstack.org/openstack/group-based-policy `__ +gyan `https://git.openstack.org/openstack/gyan `__ +heat `https://git.openstack.org/openstack/heat `__ +heat-dashboard `https://git.openstack.org/openstack/heat-dashboard `__ +horizon-mellanox `https://git.openstack.org/openstack/horizon-mellanox `__ +ironic `https://git.openstack.org/openstack/ironic `__ +ironic-inspector `https://git.openstack.org/openstack/ironic-inspector `__ +ironic-staging-drivers `https://git.openstack.org/openstack/ironic-staging-drivers `__ +ironic-ui `https://git.openstack.org/openstack/ironic-ui `__ +karbor `https://git.openstack.org/openstack/karbor `__ +karbor-dashboard `https://git.openstack.org/openstack/karbor-dashboard `__ +keystone `https://git.openstack.org/openstack/keystone `__ +kingbird `https://git.openstack.org/openstack/kingbird `__ +kuryr-kubernetes `https://git.openstack.org/openstack/kuryr-kubernetes `__ +kuryr-libnetwork `https://git.openstack.org/openstack/kuryr-libnetwork `__ +kuryr-tempest-plugin `https://git.openstack.org/openstack/kuryr-tempest-plugin `__ +magnum `https://git.openstack.org/openstack/magnum `__ +magnum-ui `https://git.openstack.org/openstack/magnum-ui `__ +manila `https://git.openstack.org/openstack/manila `__ +manila-ui `https://git.openstack.org/openstack/manila-ui `__ +masakari `https://git.openstack.org/openstack/masakari `__ +meteos `https://git.openstack.org/openstack/meteos `__ +meteos-ui `https://git.openstack.org/openstack/meteos-ui `__ +mistral `https://git.openstack.org/openstack/mistral `__ +mixmatch `https://git.openstack.org/openstack/mixmatch `__ +mogan `https://git.openstack.org/openstack/mogan `__ +mogan-ui `https://git.openstack.org/openstack/mogan-ui `__ +monasca-analytics `https://git.openstack.org/openstack/monasca-analytics `__ +monasca-api `https://git.openstack.org/openstack/monasca-api `__ +monasca-ceilometer `https://git.openstack.org/openstack/monasca-ceilometer `__ +monasca-events-api `https://git.openstack.org/openstack/monasca-events-api `__ +monasca-log-api `https://git.openstack.org/openstack/monasca-log-api `__ +monasca-tempest-plugin `https://git.openstack.org/openstack/monasca-tempest-plugin `__ +monasca-transform `https://git.openstack.org/openstack/monasca-transform `__ +murano `https://git.openstack.org/openstack/murano `__ +networking-6wind `https://git.openstack.org/openstack/networking-6wind `__ +networking-ansible `https://git.openstack.org/openstack/networking-ansible `__ +networking-arista `https://git.openstack.org/openstack/networking-arista `__ +networking-bagpipe `https://git.openstack.org/openstack/networking-bagpipe `__ +networking-baremetal `https://git.openstack.org/openstack/networking-baremetal `__ +networking-bgpvpn `https://git.openstack.org/openstack/networking-bgpvpn `__ +networking-brocade `https://git.openstack.org/openstack/networking-brocade `__ +networking-calico `https://git.openstack.org/openstack/networking-calico `__ +networking-cisco `https://git.openstack.org/openstack/networking-cisco `__ +networking-cumulus `https://git.openstack.org/openstack/networking-cumulus `__ +networking-dpm `https://git.openstack.org/openstack/networking-dpm `__ +networking-fortinet `https://git.openstack.org/openstack/networking-fortinet `__ +networking-generic-switch `https://git.openstack.org/openstack/networking-generic-switch `__ +networking-hpe `https://git.openstack.org/openstack/networking-hpe `__ +networking-huawei `https://git.openstack.org/openstack/networking-huawei `__ +networking-hyperv `https://git.openstack.org/openstack/networking-hyperv `__ +networking-infoblox `https://git.openstack.org/openstack/networking-infoblox `__ +networking-l2gw `https://git.openstack.org/openstack/networking-l2gw `__ +networking-lagopus `https://git.openstack.org/openstack/networking-lagopus `__ +networking-midonet `https://git.openstack.org/openstack/networking-midonet `__ +networking-mlnx `https://git.openstack.org/openstack/networking-mlnx `__ +networking-nec `https://git.openstack.org/openstack/networking-nec `__ +networking-odl `https://git.openstack.org/openstack/networking-odl `__ +networking-onos `https://git.openstack.org/openstack/networking-onos `__ +networking-opencontrail `https://git.openstack.org/openstack/networking-opencontrail `__ +networking-ovn `https://git.openstack.org/openstack/networking-ovn `__ +networking-ovs-dpdk `https://git.openstack.org/openstack/networking-ovs-dpdk `__ +networking-plumgrid `https://git.openstack.org/openstack/networking-plumgrid `__ +networking-powervm `https://git.openstack.org/openstack/networking-powervm `__ +networking-sfc `https://git.openstack.org/openstack/networking-sfc `__ +networking-spp `https://git.openstack.org/openstack/networking-spp `__ +networking-vpp `https://git.openstack.org/openstack/networking-vpp `__ +networking-vsphere `https://git.openstack.org/openstack/networking-vsphere `__ +neutron `https://git.openstack.org/openstack/neutron `__ +neutron-classifier `https://git.openstack.org/openstack/neutron-classifier `__ +neutron-dynamic-routing `https://git.openstack.org/openstack/neutron-dynamic-routing `__ +neutron-fwaas `https://git.openstack.org/openstack/neutron-fwaas `__ +neutron-fwaas-dashboard `https://git.openstack.org/openstack/neutron-fwaas-dashboard `__ +neutron-lbaas `https://git.openstack.org/openstack/neutron-lbaas `__ +neutron-lbaas-dashboard `https://git.openstack.org/openstack/neutron-lbaas-dashboard `__ +neutron-tempest-plugin `https://git.openstack.org/openstack/neutron-tempest-plugin `__ +neutron-vpnaas `https://git.openstack.org/openstack/neutron-vpnaas `__ +neutron-vpnaas-dashboard `https://git.openstack.org/openstack/neutron-vpnaas-dashboard `__ +nova-dpm `https://git.openstack.org/openstack/nova-dpm `__ +nova-lxd `https://git.openstack.org/openstack/nova-lxd `__ +nova-mksproxy `https://git.openstack.org/openstack/nova-mksproxy `__ +nova-powervm `https://git.openstack.org/openstack/nova-powervm `__ +oaktree `https://git.openstack.org/openstack/oaktree `__ +octavia `https://git.openstack.org/openstack/octavia `__ +octavia-dashboard `https://git.openstack.org/openstack/octavia-dashboard `__ +omni `https://git.openstack.org/openstack/omni `__ +openstacksdk `https://git.openstack.org/openstack/openstacksdk `__ +os-faults `https://git.openstack.org/openstack/os-faults `__ +os-xenapi `https://git.openstack.org/openstack/os-xenapi `__ +osprofiler `https://git.openstack.org/openstack/osprofiler `__ +oswin-tempest-plugin `https://git.openstack.org/openstack/oswin-tempest-plugin `__ +panko `https://git.openstack.org/openstack/panko `__ +patrole `https://git.openstack.org/openstack/patrole `__ +picasso `https://git.openstack.org/openstack/picasso `__ +qinling `https://git.openstack.org/openstack/qinling `__ +qinling-dashboard `https://git.openstack.org/openstack/qinling-dashboard `__ +rally `https://git.openstack.org/openstack/rally `__ +rally-openstack `https://git.openstack.org/openstack/rally-openstack `__ +rsd-virt-for-nova `https://git.openstack.org/openstack/rsd-virt-for-nova `__ +sahara `https://git.openstack.org/openstack/sahara `__ +sahara-dashboard `https://git.openstack.org/openstack/sahara-dashboard `__ +scalpels `https://git.openstack.org/openstack/scalpels `__ +searchlight `https://git.openstack.org/openstack/searchlight `__ +searchlight-ui `https://git.openstack.org/openstack/searchlight-ui `__ +senlin `https://git.openstack.org/openstack/senlin `__ +slogging `https://git.openstack.org/openstack/slogging `__ +solum `https://git.openstack.org/openstack/solum `__ +stackube `https://git.openstack.org/openstack/stackube `__ +storlets `https://git.openstack.org/openstack/storlets `__ +stx-config `https://git.openstack.org/openstack/stx-config `__ +stx-fault `https://git.openstack.org/openstack/stx-fault `__ +stx-integ `https://git.openstack.org/openstack/stx-integ `__ +stx-metal `https://git.openstack.org/openstack/stx-metal `__ +stx-nfv `https://git.openstack.org/openstack/stx-nfv `__ +stx-update `https://git.openstack.org/openstack/stx-update `__ +tacker `https://git.openstack.org/openstack/tacker `__ +tap-as-a-service `https://git.openstack.org/openstack/tap-as-a-service `__ +tap-as-a-service-dashboard `https://git.openstack.org/openstack/tap-as-a-service-dashboard `__ +tatu `https://git.openstack.org/openstack/tatu `__ +telemetry-tempest-plugin `https://git.openstack.org/openstack/telemetry-tempest-plugin `__ +tobiko `https://git.openstack.org/openstack/tobiko `__ +tricircle `https://git.openstack.org/openstack/tricircle `__ +trio2o `https://git.openstack.org/openstack/trio2o `__ +trove `https://git.openstack.org/openstack/trove `__ +trove-dashboard `https://git.openstack.org/openstack/trove-dashboard `__ +valet `https://git.openstack.org/openstack/valet `__ +vitrage `https://git.openstack.org/openstack/vitrage `__ +vitrage-dashboard `https://git.openstack.org/openstack/vitrage-dashboard `__ +vitrage-tempest-plugin `https://git.openstack.org/openstack/vitrage-tempest-plugin `__ +vmware-nsx `https://git.openstack.org/openstack/vmware-nsx `__ +vmware-vspc `https://git.openstack.org/openstack/vmware-vspc `__ +watcher `https://git.openstack.org/openstack/watcher `__ +watcher-dashboard `https://git.openstack.org/openstack/watcher-dashboard `__ +zaqar `https://git.openstack.org/openstack/zaqar `__ +zaqar-ui `https://git.openstack.org/openstack/zaqar-ui `__ +zun `https://git.openstack.org/openstack/zun `__ +zun-ui `https://git.openstack.org/openstack/zun-ui `__ ====================================== === diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst index c69472955c..b1f2397454 100644 --- a/doc/source/plugins.rst +++ b/doc/source/plugins.rst @@ -99,7 +99,7 @@ They are added in the following format:: An example would be as follows:: - enable_plugin ec2-api git://git.openstack.org/openstack/ec2-api + enable_plugin ec2-api https://git.openstack.org/openstack/ec2-api plugin.sh contract ================== @@ -277,7 +277,7 @@ be needed in your ``jenkins/jobs/.yaml`` definition in # note the actual url here is somewhat irrelevant because it # caches in nodepool, however make it a valid url for # documentation purposes. - export DEVSTACK_LOCAL_CONFIG="enable_plugin ec2-api git://git.openstack.org/openstack/ec2-api" + export DEVSTACK_LOCAL_CONFIG="enable_plugin ec2-api https://git.openstack.org/openstack/ec2-api" See Also ======== diff --git a/tools/generate-devstack-plugins-list.sh b/tools/generate-devstack-plugins-list.sh index 95f13318b8..27c9c4118e 100755 --- a/tools/generate-devstack-plugins-list.sh +++ b/tools/generate-devstack-plugins-list.sh @@ -65,7 +65,7 @@ name_col_len=$(( name_col_len + 2 )) # ====================== === # Plugin Name URL # ====================== === -# foobar `git://... `__ +# foobar `https://... `__ # ... printf "\n\n" @@ -74,7 +74,7 @@ printf "%-${name_col_len}s %s\n" "Plugin Name" "URL" title_underline ${name_col_len} for plugin in ${sorted_plugins}; do - giturl="git://git.openstack.org/openstack/${plugin}" + giturl="https://git.openstack.org/openstack/${plugin}" gitlink="https://git.openstack.org/cgit/openstack/${plugin}" printf "%-${name_col_len}s %s\n" "${plugin}" "\`${giturl} <${gitlink}>\`__" done From 363acd9d3d83af2ab088d84b069c1ffba2512e3b Mon Sep 17 00:00:00 2001 From: Eric Harney Date: Mon, 4 Mar 2019 17:50:47 -0500 Subject: [PATCH 1042/1936] Cinder: install targetcli-fb for Bionic The "targetcli" package no longer exists in Bionic and has been superseded by "targetcli-fb". Change-Id: I99e4e8ad8fbb6e7c86571af8b0c222dafacf6447 --- lib/cinder | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/cinder b/lib/cinder index d69b21efd0..48f3e45c55 100644 --- a/lib/cinder +++ b/lib/cinder @@ -434,7 +434,11 @@ function install_cinder { if [[ "$CINDER_ISCSI_HELPER" == "tgtadm" ]]; then install_package tgt elif [[ "$CINDER_ISCSI_HELPER" == "lioadm" ]]; then - install_package targetcli + if [[ ${DISTRO} == "bionic" ]]; then + install_package targetcli-fb + else + install_package targetcli + fi fi } From 4eb455aa287155646f78a4621365b37f9df30793 Mon Sep 17 00:00:00 2001 From: Kashyap Chamarthy Date: Fri, 22 Feb 2019 20:11:35 +0100 Subject: [PATCH 1043/1936] Capture the content of 'audit.log' file On CentOS/ Fedora machines, this can be useful when QEMU silently fails to start up due to SELinux denials. For Debian-based machines, which use AppAromor, DevStack already captures the output of 'kern.log' (via `journalctl -t kernel` redirected into 'syslog.txt.gz'). Change-Id: I231b22664f0944b905e00568759785615a1d47c3 Acked-by: Clark Bolyan Signed-off-by: Kashyap Chamarthy --- .zuul.yaml | 1 + roles/capture-system-logs/tasks/main.yaml | 11 +++++++++++ 2 files changed, 12 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index afe400e1a4..8ede2fce80 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -218,6 +218,7 @@ '{{ stage_dir }}/core': logs '{{ stage_dir }}/listen53.txt': logs '{{ stage_dir }}/deprecations.log': logs + '{{ stage_dir }}/audit.log': logs /var/log/ceph: logs /var/log/openvswitch: logs /var/log/glusterfs: logs diff --git a/roles/capture-system-logs/tasks/main.yaml b/roles/capture-system-logs/tasks/main.yaml index de4f8eda08..905806d529 100644 --- a/roles/capture-system-logs/tasks/main.yaml +++ b/roles/capture-system-logs/tasks/main.yaml @@ -19,6 +19,17 @@ rpm -qa | sort > {{ stage_dir }}/rpm-qa.txt fi + # NOTE(kchamart) The 'audit.log' can be useful in cases when QEMU + # failed to start due to denials from SELinux — useful for CentOS + # and Fedora machines. For Ubuntu (which runs AppArmor), DevStack + # already captures the contents of /var/log/kern.log (via + # `journalctl -t kernel` redirected into syslog.txt.gz), which + # contains AppArmor-related messages. + if [ -f /var/log/audit/audit.log ] ; then + sudo cp /var/log/audit/audit.log {{stage_dir }}/audit.log && + chmod +r {{ stage_dir }}/audit.log; + fi + # gzip and save any coredumps in /var/core if [ -d /var/core ]; then sudo gzip -r /var/core From 676957ffcff78e790134776f71035a3b14974896 Mon Sep 17 00:00:00 2001 From: Hongbin Lu Date: Sun, 10 Mar 2019 14:55:48 +0000 Subject: [PATCH 1044/1936] Update etcd version to 3.3.12 The default version is 3.2.17 which seems to be too old. Some external tools are not compatible with this old version. For example, kubeadm cannot support external etcd version that is older than 3.2.18. This commit update the etcd version to 3.3.12 wich is the current latest version. Change-Id: Icfabbe580bb83a3babb98cc9fdbfb8eb388dc108 --- stackrc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/stackrc b/stackrc index 53868f3d9d..d104beaa6c 100644 --- a/stackrc +++ b/stackrc @@ -737,10 +737,10 @@ fi EXTRA_CACHE_URLS="" # etcd3 defaults -ETCD_VERSION=${ETCD_VERSION:-v3.2.17} -ETCD_SHA256_AMD64=${ETCD_SHA256_AMD64:-"0a75e794502e2e76417b19da2807a9915fa58dcbf0985e397741d570f4f305cd"} -ETCD_SHA256_ARM64=${ETCD_SHA256_ARM64:-"0ab4621c44c79d17d94e43bd184d0f23b763a3669056ce4ae2d0b2942410a98f"} -ETCD_SHA256_PPC64=${ETCD_SHA256_PPC64:-"69e1279c4a2a52256b78d2a8dd23346ac46b836e678b971a459f2afaef3c275e"} +ETCD_VERSION=${ETCD_VERSION:-v3.3.12} +ETCD_SHA256_AMD64=${ETCD_SHA256_AMD64:-"dc5d82df095dae0a2970e4d870b6929590689dd707ae3d33e7b86da0f7f211b6"} +ETCD_SHA256_ARM64=${ETCD_SHA256_ARM64:-"170b848ac1a071fe7d495d404a868a2c0090750b2944f8a260ef1c6125b2b4f4"} +ETCD_SHA256_PPC64=${ETCD_SHA256_PPC64:-"77f807b1b51abbf51e020bb05bdb8ce088cb58260fcd22749ea32eee710463d3"} # etcd v3.2.x doesn't have anything for s390x ETCD_SHA256_S390X=${ETCD_SHA256_S390X:-""} # Make sure etcd3 downloads the correct architecture From 610927f4255f0ed4877a3e85d628202e4af2f7d4 Mon Sep 17 00:00:00 2001 From: Luigi Toscano Date: Tue, 26 Feb 2019 18:39:51 +0100 Subject: [PATCH 1045/1936] zuul job: write the enable_plugin lines last Plugins must be the last items in the local.conf file otherwise the configuration set in the rest of the file is not applied to them (for example a different value of DEST.) Change-Id: Ia001badca179c3f3436d5ecd26b0755a3f3a3078 --- .../library/devstack_local_conf.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/roles/write-devstack-local-conf/library/devstack_local_conf.py b/roles/write-devstack-local-conf/library/devstack_local_conf.py index b1ad2dd4b4..1366a2274e 100644 --- a/roles/write-devstack-local-conf/library/devstack_local_conf.py +++ b/roles/write-devstack-local-conf/library/devstack_local_conf.py @@ -214,11 +214,14 @@ def __init__(self, localrc, localconf, base_services, services, plugins, self.base_dir = base_dir self.projects = projects self.project = project - if plugins: - self.handle_plugins(plugins) if services or base_services: self.handle_services(base_services, services or {}) self.handle_localrc(localrc) + # Plugins must be the last items in localrc, otherwise + # the configuration lines which follows them in the file are + # not applied to the plugins (for example, the value of DEST.) + if plugins: + self.handle_plugins(plugins) if localconf: self.handle_localconf(localconf) From 70d043dd6039fb55aa9d40d593839037bb8c43cf Mon Sep 17 00:00:00 2001 From: Luigi Toscano Date: Tue, 12 Mar 2019 22:25:44 +0100 Subject: [PATCH 1046/1936] zuul: new variable to easily populate TEMPEST_PLUGINS TEMPEST_PLUGINS contains the list of the tempest plugins installed alongside tempest by lib/tempest. If TEMPEST_PLUGINS is not explicitly set, the new tempest_plugins variable is used to fill it by combining its items with the base devstack path. Change-Id: I9f1fa2755e16871ff9d6ba33fdeaf3023eedf8d4 --- doc/source/zuul_ci_jobs_migration.rst | 3 +- roles/write-devstack-local-conf/README.rst | 9 ++ .../library/devstack_local_conf.py | 26 ++++- .../write-devstack-local-conf/library/test.py | 98 ++++++++++++------- .../write-devstack-local-conf/tasks/main.yaml | 3 +- 5 files changed, 96 insertions(+), 43 deletions(-) diff --git a/doc/source/zuul_ci_jobs_migration.rst b/doc/source/zuul_ci_jobs_migration.rst index c00f06e41a..633f951d3d 100644 --- a/doc/source/zuul_ci_jobs_migration.rst +++ b/doc/source/zuul_ci_jobs_migration.rst @@ -102,7 +102,6 @@ job.parent. tox_envlist: 'all' devstack_localrc: KURYR_K8S_API_PORT: 8080 - TEMPEST_PLUGINS: '/opt/stack/kuryr-tempest-plugin' devstack_services: kubernetes-api: true kubernetes-controller-manager: true @@ -114,6 +113,8 @@ job.parent. kuryr-kubernetes: https://git.openstack.org/openstack/kuryr devstack-plugin-container: https://git.openstack.org/openstack/devstack-plugin-container neutron-lbaas: https://git.openstack.org/openstack/neutron-lbaas + tempest_plugins: + - kuryr-tempest-plugin (...) Job variables diff --git a/roles/write-devstack-local-conf/README.rst b/roles/write-devstack-local-conf/README.rst index e9739cdea8..d0a51e77c2 100644 --- a/roles/write-devstack-local-conf/README.rst +++ b/roles/write-devstack-local-conf/README.rst @@ -88,3 +88,12 @@ Write the local.conf file for use by devstack If a plugin declares a dependency on another plugin (via ``plugin_requires`` in the plugin's settings file), this role will automatically emit ``enable_plugin`` lines in the correct order. + +.. zuul:rolevar:: tempest_plugins + :type: list + + A list of tempest plugins which are installed alongside tempest. + + The list of values will be combined with the base devstack directory + and used to populate the ``TEMPEST_PLUGINS`` variable. If the variable + already exists, its value is *not* changed. diff --git a/roles/write-devstack-local-conf/library/devstack_local_conf.py b/roles/write-devstack-local-conf/library/devstack_local_conf.py index 1366a2274e..8df8dea6dd 100644 --- a/roles/write-devstack-local-conf/library/devstack_local_conf.py +++ b/roles/write-devstack-local-conf/library/devstack_local_conf.py @@ -207,13 +207,15 @@ def getPlugins(self): class LocalConf(object): def __init__(self, localrc, localconf, base_services, services, plugins, - base_dir, projects, project): + base_dir, projects, project, tempest_plugins): self.localrc = [] + self.warnings = [] self.meta_sections = {} self.plugin_deps = {} self.base_dir = base_dir self.projects = projects self.project = project + self.tempest_plugins = tempest_plugins if services or base_services: self.handle_services(base_services, services or {}) self.handle_localrc(localrc) @@ -246,12 +248,15 @@ def handle_services(self, base_services, services): def handle_localrc(self, localrc): lfg = False + tp = False if localrc: vg = VarGraph(localrc) for k, v in vg.getVars(): self.localrc.append('{}={}'.format(k, v)) if k == 'LIBS_FROM_GIT': lfg = True + elif k == 'TEMPEST_PLUGINS': + tp = True if not lfg and (self.projects or self.project): required_projects = [] @@ -266,6 +271,19 @@ def handle_localrc(self, localrc): self.localrc.append('LIBS_FROM_GIT={}'.format( ','.join(required_projects))) + if self.tempest_plugins: + if not tp: + tp_dirs = [] + for tempest_plugin in self.tempest_plugins: + tp_dirs.append(os.path.join(self.base_dir, tempest_plugin)) + self.localrc.append('TEMPEST_PLUGINS="{}"'.format( + ' '.join(tp_dirs))) + else: + self.warnings.append('TEMPEST_PLUGINS already defined ({}),' + 'requested value {} ignored'.format( + tp, self.tempest_plugins)) + + def handle_localconf(self, localconf): for phase, phase_data in localconf.items(): for fn, fn_data in phase_data.items(): @@ -300,6 +318,7 @@ def main(): path=dict(type='str'), projects=dict(type='dict'), project=dict(type='dict'), + tempest_plugins=dict(type='list'), ) ) @@ -311,10 +330,11 @@ def main(): p.get('plugins'), p.get('base_dir'), p.get('projects'), - p.get('project')) + p.get('project'), + p.get('tempest_plugins')) lc.write(p['path']) - module.exit_json() + module.exit_json(warnings=lc.warnings) try: diff --git a/roles/write-devstack-local-conf/library/test.py b/roles/write-devstack-local-conf/library/test.py index 88d404b856..377c6930b8 100644 --- a/roles/write-devstack-local-conf/library/test.py +++ b/roles/write-devstack-local-conf/library/test.py @@ -23,6 +23,20 @@ from collections import OrderedDict class TestDevstackLocalConf(unittest.TestCase): + + @staticmethod + def _init_localconf(p): + lc = LocalConf(p.get('localrc'), + p.get('local_conf'), + p.get('base_services'), + p.get('services'), + p.get('plugins'), + p.get('base_dir'), + p.get('projects'), + p.get('project'), + p.get('tempest_plugins')) + return lc + def setUp(self): self.tmpdir = tempfile.mkdtemp() @@ -51,14 +65,7 @@ def test_plugins(self): plugins=plugins, base_dir='./test', path=os.path.join(self.tmpdir, 'test.local.conf')) - lc = LocalConf(p.get('localrc'), - p.get('local_conf'), - p.get('base_services'), - p.get('services'), - p.get('plugins'), - p.get('base_dir'), - p.get('projects'), - p.get('project')) + lc = self._init_localconf(p) lc.write(p['path']) plugins = [] @@ -104,14 +111,7 @@ def test_plugin_deps(self): plugins=plugins, base_dir=self.tmpdir, path=os.path.join(self.tmpdir, 'test.local.conf')) - lc = LocalConf(p.get('localrc'), - p.get('local_conf'), - p.get('base_services'), - p.get('services'), - p.get('plugins'), - p.get('base_dir'), - p.get('projects'), - p.get('project')) + lc = self._init_localconf(p) lc.write(p['path']) plugins = [] @@ -145,14 +145,7 @@ def test_libs_from_git(self): path=os.path.join(self.tmpdir, 'test.local.conf'), projects=projects, project=project) - lc = LocalConf(p.get('localrc'), - p.get('local_conf'), - p.get('base_services'), - p.get('services'), - p.get('plugins'), - p.get('base_dir'), - p.get('projects'), - p.get('project')) + lc = self._init_localconf(p) lc.write(p['path']) lfg = None @@ -184,14 +177,7 @@ def test_overridelibs_from_git(self): base_dir='./test', path=os.path.join(self.tmpdir, 'test.local.conf'), projects=projects) - lc = LocalConf(p.get('localrc'), - p.get('local_conf'), - p.get('base_services'), - p.get('services'), - p.get('plugins'), - p.get('base_dir'), - p.get('projects'), - p.get('project')) + lc = self._init_localconf(p) lc.write(p['path']) lfg = None @@ -238,14 +224,50 @@ def test_plugin_circular_deps(self): base_dir=self.tmpdir, path=os.path.join(self.tmpdir, 'test.local.conf')) with self.assertRaises(Exception): - lc = LocalConf(p.get('localrc'), - p.get('local_conf'), - p.get('base_services'), - p.get('services'), - p.get('plugins'), - p.get('base_dir')) + lc = self._init_localconf(p) lc.write(p['path']) + def _find_tempest_plugins_value(self, file_path): + tp = None + with open(file_path) as f: + for line in f: + if line.startswith('TEMPEST_PLUGINS'): + found = line.strip().split('=')[1] + self.assertIsNone(tp, + "TEMPEST_PLUGIN ({}) found again ({})".format( + tp, found)) + tp = found + return tp + + def test_tempest_plugins(self): + "Test that TEMPEST_PLUGINS is correctly populated." + p = dict(base_services=[], + base_dir='./test', + path=os.path.join(self.tmpdir, 'test.local.conf'), + tempest_plugins=['heat-tempest-plugin', 'sahara-tests']) + lc = self._init_localconf(p) + lc.write(p['path']) + + tp = self._find_tempest_plugins_value(p['path']) + self.assertEqual('"./test/heat-tempest-plugin ./test/sahara-tests"', tp) + self.assertEqual(len(lc.warnings), 0) + + def test_tempest_plugins_not_overridden(self): + """Test that the existing value of TEMPEST_PLUGINS is not overridden + by the user-provided value, but a warning is emitted.""" + localrc = {'TEMPEST_PLUGINS': 'someplugin'} + p = dict(localrc=localrc, + base_services=[], + base_dir='./test', + path=os.path.join(self.tmpdir, 'test.local.conf'), + tempest_plugins=['heat-tempest-plugin', 'sahara-tests']) + lc = self._init_localconf(p) + lc.write(p['path']) + + tp = self._find_tempest_plugins_value(p['path']) + self.assertEqual('someplugin', tp) + self.assertEqual(len(lc.warnings), 1) + if __name__ == '__main__': unittest.main() diff --git a/roles/write-devstack-local-conf/tasks/main.yaml b/roles/write-devstack-local-conf/tasks/main.yaml index 9a6b083a2f..bfd086034b 100644 --- a/roles/write-devstack-local-conf/tasks/main.yaml +++ b/roles/write-devstack-local-conf/tasks/main.yaml @@ -10,4 +10,5 @@ local_conf: "{{ devstack_local_conf|default(omit) }}" base_dir: "{{ devstack_base_dir|default(omit) }}" projects: "{{ zuul.projects }}" - project: "{{ zuul.project }}" \ No newline at end of file + project: "{{ zuul.project }}" + tempest_plugins: "{{ tempest_plugins|default(omit) }}" From deadc7c439f1126ba986b9c4f3c4d0b6d7774016 Mon Sep 17 00:00:00 2001 From: whoami-rajat Date: Thu, 14 Mar 2019 11:01:18 +0530 Subject: [PATCH 1047/1936] Fix : sort variables fetched from env list In bionic, when exporting env variables, the env list displays variables in unsorted format. While fetching we are getting 'https_proxy' before 'http_proxy' which is failing in comparison to our expected values[1]. This patch sorts the variables fetched from env. [1] http://logs.openstack.org/30/643130/3/check/devstack-unit-tests/effbf7a/job-output.txt.gz#_2019-03-13_23_12_35_465026 Change-Id: Ie504eabf0d3fec1b97bc711e2702c06bcf75d158 --- tests/test_functions.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_functions.sh b/tests/test_functions.sh index adf20cdb80..08143d2a68 100755 --- a/tests/test_functions.sh +++ b/tests/test_functions.sh @@ -272,7 +272,7 @@ function test_export_proxy_variables { export_proxy_variables expected=$(echo -e "http_proxy=$http_proxy\nhttps_proxy=$https_proxy\nno_proxy=$no_proxy") - results=$(env | egrep '(http(s)?|no)_proxy=') + results=$(env | egrep '(http(s)?|no)_proxy=' | sort) if [[ $expected = $results ]]; then passed "OK: Proxy variables are exported when proxy variables are set" else From f28c75f87abdda00dcfc2f673816728676313c95 Mon Sep 17 00:00:00 2001 From: whoami-rajat Date: Wed, 13 Mar 2019 23:41:05 +0530 Subject: [PATCH 1048/1936] Cinder: create target directory for targetcli-fb package While installing 'targetcli-fb' on bionic, a dependent package 'python-rtslib-fb' requires (but doesn't create) /etc/target and fails[1] when this directory not found. This patch creates the required directory. [1] http://logs.openstack.org/26/641926/1/check/cinder-tempest-dsvm-lvm-lio-barbican/8f95df1/logs/devstacklog.txt.gz#_2019-03-13_03_36_44_623 Related-bug: #1819819 Change-Id: I7efefead873037da4aaacbdc6284458bdaad0f6b --- lib/cinder | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/cinder b/lib/cinder index 48f3e45c55..047b25b3c5 100644 --- a/lib/cinder +++ b/lib/cinder @@ -435,6 +435,9 @@ function install_cinder { install_package tgt elif [[ "$CINDER_ISCSI_HELPER" == "lioadm" ]]; then if [[ ${DISTRO} == "bionic" ]]; then + # TODO(frickler): Workaround for https://launchpad.net/bugs/1819819 + sudo mkdir -p /etc/target + install_package targetcli-fb else install_package targetcli From bcd8a50cc688ee39b4b998efe959e35c4c5b1408 Mon Sep 17 00:00:00 2001 From: Vlad Gridin Date: Thu, 14 Mar 2019 14:40:55 +0100 Subject: [PATCH 1049/1936] Fix installing tempest plugins When running stack.sh locally on stable branches with tempest enabled and TEMPEST_PLUGINS set, devstack will try to fetch master branch of requirements and that fails if branch is not tracked. Change-Id: Ia1ae6869a8fede2af5cd7c875e0946b6a75eb518 Closes-Bug: #1820051 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 1e344c8567..0e8bfe7404 100644 --- a/lib/tempest +++ b/lib/tempest @@ -674,7 +674,7 @@ function install_tempest_plugins { pushd $TEMPEST_DIR if [[ $TEMPEST_PLUGINS != 0 ]] ; then # The requirements might be on a different branch, while tempest & tempest plugins needs master requirements. - (cd $REQUIREMENTS_DIR && git show master:upper-constraints.txt) > u-c-m.txt + (cd $REQUIREMENTS_DIR && git show origin/master:upper-constraints.txt) > u-c-m.txt tox -evenv-tempest -- pip install -c u-c-m.txt $TEMPEST_PLUGINS echo "Checking installed Tempest plugins:" tox -evenv-tempest -- tempest list-plugins From ddb6179b0479ea9478cf2a146fe9b0d7592acaec Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Tue, 19 Mar 2019 15:04:12 -0400 Subject: [PATCH 1050/1936] Ease python 3 classifier check in check_python3_support_for_package_local This makes the grep match in check_python3_support_for_package_local the same as check_python3_support_for_package_remote. Change I0349de2026c49279ba7f262d5e86d37018d66326 in grenade started setting the PYTHON3_VERSION variable, and then we recently started using bionic nodes everywhere which means we're running python 3.6. The etcd3gw package has a python 3 and 3.5 classifier, but not 3.6: https://pypi.org/project/etcd3gw/ The pip_install function code that is dealing with installing py3 packages is hitting a problem installing etcd3gw if the package is local because of the more restrictive grep in the check_python3_support_for_package_local function, and since PYTHON3_VERSION=3.6 now, we don't install from py3 and install etcd3gw on python 2.7 which makes services like cinder-volume and cinder-backup, which use etcd3gw, fail when they are running under python 3 (they get module import errors). This simply removes the $ restriction on the grep. Looking at the change that added those local/remote functions: I243ea4b76f0d5ef57a03b5b0798a05468ee6de9b There is no explanation for the difference, it just said: Also, since not many packages are classified correctly, fallback to looking for just "Programming Language :: Python :: 3" and log a message for the package to highlight the problem. So that's what this change does. Note that alternatives would be: 1. Update the etcd3gw package to add the 3.6 classifier and do a release (this should probably happen anyway). 2. Add etcd3gw to ENABLED_PYTHON3_PACKAGES but that would be a short-term hack workaround. Change-Id: Icd3768870ba0f1659bb2e6f002043d975047b73e Closes-Bug: #1820892 --- inc/python | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/inc/python b/inc/python index 419d5c5701..0926330fdf 100644 --- a/inc/python +++ b/inc/python @@ -101,7 +101,7 @@ function check_python3_support_for_package_local { cd $name set +e classifier=$(python setup.py --classifiers \ - | grep 'Programming Language :: Python :: 3$') + | grep 'Programming Language :: Python :: 3') set -e echo $classifier } @@ -297,6 +297,8 @@ function pip_install { echo "Automatically using $PYTHON3_VERSION version to install $package_dir based on local package settings" sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8" cmd_pip=$(get_pip_command $PYTHON3_VERSION) + else + echo "WARNING: Did not find python 3 classifier for local package $package_dir" fi fi else @@ -307,6 +309,8 @@ function pip_install { echo "Automatically using $PYTHON3_VERSION version to install $package based on remote package settings" sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8" cmd_pip=$(get_pip_command $PYTHON3_VERSION) + else + echo "WARNING: Did not find python 3 classifier for remote package $package_dir" fi fi fi From 245445b1bd35e580259f4decd9b0b047fe728794 Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Thu, 21 Mar 2019 11:40:17 -0400 Subject: [PATCH 1051/1936] Use L3RouterPlugin alias The 'router' alias for the L3RouterPlugin has been in setup.cfg for a while, let's use it. Change-Id: Ifa196bd33959f1425df9a34cdab4acf4c3b8071b --- lib/neutron_plugins/ml2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index c5a4c02cc5..127d46bcd7 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -44,7 +44,7 @@ fi # L3 Plugin to load for ML2 # For some flat network environment, they not want to extend L3 plugin. # Make sure it is able to set empty to ML2_L3_PLUGIN. -ML2_L3_PLUGIN=${ML2_L3_PLUGIN-neutron.services.l3_router.l3_router_plugin.L3RouterPlugin} +ML2_L3_PLUGIN=${ML2_L3_PLUGIN-router} function populate_ml2_config { CONF=$1 From a30dd1cc96a75d565ae924e4698c4208150e5564 Mon Sep 17 00:00:00 2001 From: Lenny Verkhovsky Date: Thu, 14 Mar 2019 13:19:36 +0200 Subject: [PATCH 1052/1936] Fixed support python 2 on Fedora 27 I7d16194d6ba1391ca31251d5b50cbb8de033fc38 added wrong behavour on Fedora > 26 and Centos 7 when python3 disabled pip should install packages in /usr/bin Closes-Bug: #1820070 Change-Id: I3a8efbc8eb6e311db9c7347577c5d2047ba523a9 --- inc/python | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/inc/python b/inc/python index 419d5c5701..ba2048dd84 100644 --- a/inc/python +++ b/inc/python @@ -49,16 +49,9 @@ function get_python_exec_prefix { fi $xtrace - if python3_enabled && [[ "$os_VENDOR" == "CentOS" ]] || \ - [[ "$os_VENDOR" == "Fedora" && $os_RELEASE -gt 26 ]]; then - # Default Python 3 install prefix changed to /usr/local in Fedora 27: - # https://fedoraproject.org/wiki/Changes/Making_sudo_pip_safe - echo "/usr/local/bin" - elif is_fedora || is_suse; then - echo "/usr/bin" - else - echo "/usr/local/bin" - fi + local PYTHON_PATH=/usr/local/bin + ( is_fedora && ! python3_enabled ) || is_suse && PYTHON_PATH=/usr/bin + echo $PYTHON_PATH } # Wrapper for ``pip install`` that only installs versions of libraries From e2853bf2d0a2e63d53d0f2d0cb21fd406f6289b0 Mon Sep 17 00:00:00 2001 From: melanie witt Date: Wed, 13 Mar 2019 13:16:51 +0000 Subject: [PATCH 1053/1936] Set ownership of /etc/pki/ files for TLS OpenSSL 1.0.2 generates key files with default permissions: 644 and the files are copied to the /etc/pki/* directories with sudo. When the default CI node Ubuntu version was changed from Xenial => Bionic we changed from OpenSSL 1.0.2 => 1.1.0. And OpenSSL 1.1.0 generates key files with default permissions: 600. When we copy the key file to /etc/pki/* using sudo, it becomes owned by root and then the console-related users are unable to read it. This sets the ownership of the /etc/pki/ files to the user:group intended to read them. Closes-Bug: #1819794 Change-Id: I437a46c875cf633272e8cad0811e5557f2ac3641 --- lib/nova | 16 ++++++++++++++++ lib/nova_plugins/functions-libvirt | 8 +++++++- 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 033ebf3697..137a249c65 100644 --- a/lib/nova +++ b/lib/nova @@ -665,6 +665,22 @@ function configure_console_proxies { sudo mkdir -p /etc/pki/nova-novnc deploy_int_CA /etc/pki/nova-novnc/ca-cert.pem deploy_int_cert /etc/pki/nova-novnc/client-cert.pem /etc/pki/nova-novnc/client-key.pem + # OpenSSL 1.1.0 generates the key file with permissions: 600, by + # default, and the deploy_int* methods use 'sudo cp' to copy the + # files, making them owned by root:root. + # Change ownership of everything under /etc/pki/nova-novnc to + # $STACK_USER:$(id -g ${STACK_USER}) so that $STACK_USER can read + # the key file. + sudo chown -R $STACK_USER:$(id -g ${STACK_USER}) /etc/pki/nova-novnc + # This is needed to enable TLS in the proxy itself, example log: + # WebSocket server settings: + # - Listen on 0.0.0.0:6080 + # - Flash security policy server + # - Web server (no directory listings). Web root: /usr/share/novnc + # - SSL/TLS support + # - proxying from 0.0.0.0:6080 to None:None + iniset $conf DEFAULT key "/etc/pki/nova-novnc/client-key.pem" + iniset $conf DEFAULT cert "/etc/pki/nova-novnc/client-cert.pem" fi fi diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index fcb4777997..463986944f 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -155,9 +155,15 @@ EOF echo "vnc_tls_x509_verify = 1" | sudo tee -a $QEMU_CONF sudo mkdir -p /etc/pki/libvirt-vnc - sudo chown libvirt-qemu:libvirt-qemu /etc/pki/libvirt-vnc deploy_int_CA /etc/pki/libvirt-vnc/ca-cert.pem deploy_int_cert /etc/pki/libvirt-vnc/server-cert.pem /etc/pki/libvirt-vnc/server-key.pem + # OpenSSL 1.1.0 generates the key file with permissions: 600, by + # default and the deploy_int* methods use 'sudo cp' to copy the + # files, making them owned by root:root. + # Change ownership of everything under /etc/pki/libvirt-vnc to + # libvirt-qemu:libvirt-qemu so that libvirt-qemu can read the key + # file. + sudo chown -R libvirt-qemu:libvirt-qemu /etc/pki/libvirt-vnc fi fi From 315bc13595bf1ebae205f462fc6078f9fc3c840b Mon Sep 17 00:00:00 2001 From: ghanshyam Date: Tue, 26 Mar 2019 18:47:06 +0000 Subject: [PATCH 1054/1936] Update DEVSTACK_SERIES to train stable/stein branch has been created now and current master is for train. Change-Id: I119f0388891db44c9753e49cedfeb32c74f2a40d --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index c6304bba94..2924d3956c 100644 --- a/stackrc +++ b/stackrc @@ -258,7 +258,7 @@ REQUIREMENTS_DIR=$DEST/requirements # Setting the variable to 'ALL' will activate the download for all # libraries. -DEVSTACK_SERIES="stein" +DEVSTACK_SERIES="train" ############## # From e769348882ea3e6b4465e3a6af72029633b2485e Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 11 Feb 2019 12:26:03 +1100 Subject: [PATCH 1055/1936] Quote devstack_localrc arguments If you have devstack_localrc: ARGUMENT: "argument with spaces" The quotes get lost during YAML processing and the resulting file has ARGUMENT=argument with spaces which is a shell error. Quote all arguments to avoid this sort of thing. Change-Id: Ia63a53d745dfea7262bcdb5d46425f431c3ccfe5 --- .../write-devstack-local-conf/library/devstack_local_conf.py | 2 +- roles/write-devstack-local-conf/library/test.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/write-devstack-local-conf/library/devstack_local_conf.py b/roles/write-devstack-local-conf/library/devstack_local_conf.py index 8df8dea6dd..3a8cd588b9 100644 --- a/roles/write-devstack-local-conf/library/devstack_local_conf.py +++ b/roles/write-devstack-local-conf/library/devstack_local_conf.py @@ -252,7 +252,7 @@ def handle_localrc(self, localrc): if localrc: vg = VarGraph(localrc) for k, v in vg.getVars(): - self.localrc.append('{}={}'.format(k, v)) + self.localrc.append('{}="{}"'.format(k, v)) if k == 'LIBS_FROM_GIT': lfg = True elif k == 'TEMPEST_PLUGINS': diff --git a/roles/write-devstack-local-conf/library/test.py b/roles/write-devstack-local-conf/library/test.py index 2900949105..22bf2da55d 100644 --- a/roles/write-devstack-local-conf/library/test.py +++ b/roles/write-devstack-local-conf/library/test.py @@ -185,7 +185,7 @@ def test_overridelibs_from_git(self): for line in f: if line.startswith('LIBS_FROM_GIT'): lfg = line.strip().split('=')[1] - self.assertEqual('oslo.db', lfg) + self.assertEqual('"oslo.db"', lfg) def test_plugin_circular_deps(self): "Test that plugins with circular dependencies fail" @@ -265,7 +265,7 @@ def test_tempest_plugins_not_overridden(self): lc.write(p['path']) tp = self._find_tempest_plugins_value(p['path']) - self.assertEqual('someplugin', tp) + self.assertEqual('"someplugin"', tp) self.assertEqual(len(lc.warnings), 1) From 59ce1d902e2137bb7346a0d1f223e0ce1cb83216 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 11 Feb 2019 12:27:10 +1100 Subject: [PATCH 1056/1936] Export all journal logs Currently we only export the devstack@ services, and then separately export the kernel & sudo logs to syslog.txt. This leaves a lot of logs potentially behind in the journal for various daemons. Just export the whole lot. Using this output is currently very opaque and makes use of systemd export tools that are very un-discoverable. Add a README that will appear alongside the journal explaining how to actually use it. This is a template as it would be nice to put into things like the list of services that are in the journal, or maybe other magic. Also make sure we export the logs since the start timestamp; currently during a full run we drop the initial logs. Change-Id: Id2626f9113d82c6d524039acda8a8ec74afb2081 --- roles/export-devstack-journal/README.rst | 14 +++--- roles/export-devstack-journal/tasks/main.yaml | 45 +++++++++++++------ .../templates/devstack.journal.README.txt.j2 | 33 ++++++++++++++ 3 files changed, 73 insertions(+), 19 deletions(-) create mode 100644 roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 diff --git a/roles/export-devstack-journal/README.rst b/roles/export-devstack-journal/README.rst index a34e0706a9..9e3c919627 100644 --- a/roles/export-devstack-journal/README.rst +++ b/roles/export-devstack-journal/README.rst @@ -1,11 +1,15 @@ Export journal files from devstack services -Export the systemd journal for every devstack service in native -journal format as well as text. Also, export a syslog-style file with -kernal and sudo messages. +This performs a number of logging collection services -Writes the output to the ``logs/`` subdirectory of -``stage_dir``. +* Export the systemd journal in native format +* For every devstack service, export logs to text in a file named + ``screen-*`` to maintain legacy compatability when devstack services + used to run in a screen session and were logged separately. +* Export a syslog-style file with kernel and sudo messages for legacy + compatability. + +Writes the output to the ``logs/`` subdirectory of ``stage_dir``. **Role Variables** diff --git a/roles/export-devstack-journal/tasks/main.yaml b/roles/export-devstack-journal/tasks/main.yaml index 6e760c1f6f..cbec4447b8 100644 --- a/roles/export-devstack-journal/tasks/main.yaml +++ b/roles/export-devstack-journal/tasks/main.yaml @@ -6,32 +6,49 @@ state: directory owner: "{{ ansible_user }}" -# TODO: convert this to ansible -- name: Export journal files +- name: Export legacy stack screen log files become: true shell: cmd: | u="" name="" - for u in `systemctl list-unit-files | grep devstack | awk '{print $1}'`; do + for u in $(systemctl list-unit-files | grep devstack | awk '{print $1}'); do name=$(echo $u | sed 's/devstack@/screen-/' | sed 's/\.service//') journalctl -o short-precise --unit $u | gzip - > {{ stage_dir }}/logs/$name.txt.gz done - # Export the journal in export format to make it downloadable - # for later searching. It can then be rewritten to a journal native - # format locally using systemd-journal-remote. This makes a class of - # debugging much easier. We don't do the native conversion here as - # some distros do not package that tooling. - journalctl -u 'devstack@*' -o export | \ - xz --threads=0 - > {{ stage_dir }}/logs/devstack.journal.xz - - # The journal contains everything running under systemd, we'll - # build an old school version of the syslog with just the - # kernel and sudo messages. +- name: Export legacy syslog.txt + become: true + shell: + # The journal contains everything running under systemd, we'll + # build an old school version of the syslog with just the + # kernel and sudo messages. + cmd: | journalctl \ -t kernel \ -t sudo \ --no-pager \ --since="$(cat {{ devstack_base_dir }}/log-start-timestamp.txt)" \ | gzip - > {{ stage_dir }}/logs/syslog.txt.gz + +# TODO: convert this to ansible +# - make a list of the above units +# - iterate the list here +- name: Export journal + become: true + shell: + # Export the journal in export format to make it downloadable + # for later searching. It can then be rewritten to a journal native + # format locally using systemd-journal-remote. This makes a class of + # debugging much easier. We don't do the native conversion here as + # some distros do not package that tooling. + cmd: | + journalctl -o export \ + --since="$(cat {{ devstack_base_dir }}/log-start-timestamp.txt)" \ + | xz --threads=0 - > {{ stage_dir }}/logs/devstack.journal.xz + +- name: Save journal README + become: true + template: + src: devstack.journal.README.txt.j2 + dest: '{{ stage_dir }}/logs/devstack.journal.README.txt' diff --git a/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 b/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 new file mode 100644 index 0000000000..598eb7f3db --- /dev/null +++ b/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 @@ -0,0 +1,33 @@ +Devstack systemd journal +======================== + +The devstack.journal file is a copy of the systemd journal during the +devstack run. + +To use it, you will need to convert it so journalctl can read it +locally. After downloading the file: + + $ /lib/systemd/systemd-journal-remote <(xzcat ./devstack.journal.xz) -o output.journal + +Note this binary is not in the regular path. On Debian/Ubuntu +platforms, you will need to have the "sytemd-journal-remote" package +installed. + +It should result in something like: + + Finishing after writing entries + +You can then use journalctl to examine this file. For example, to see +all devstack services try: + + $ journalctl --file ./output.journal -u 'devstack@*' + +To see just cinder API server logs restrict the match with + + $ journalctl --file ./output.journal -u 'devstack@c-api' + +There may be many types of logs available in the journal, a command like + + $ journalctl --file ./output.journal --output=json-pretty | grep "_SYSTEMD_UNIT" | sort -u + +can help you find interesting things to filter on. \ No newline at end of file From 2bbc9bbbb8616711a0a52540f9a35d4394f0d0ad Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 11 Feb 2019 12:25:38 +1100 Subject: [PATCH 1057/1936] Add service to tcpdump during run This adds a service to run a tcpdump during the run. This can be useful to capture various network traffic for post analysis. There didn't seem to quite be an appropriate place to document it, so a new debugging file is started, with some terse explaination of our various system-wide debugging services. Change-Id: I09aaa57611c5047d09a9bce7932d34e9d50b30e6 --- .zuul.yaml | 1 + doc/source/debugging.rst | 46 ++++++++++++++++++++++++++++++++++++++++ lib/tcpdump | 43 +++++++++++++++++++++++++++++++++++++ stack.sh | 7 ++++++ 4 files changed, 97 insertions(+) create mode 100644 doc/source/debugging.rst create mode 100644 lib/tcpdump diff --git a/.zuul.yaml b/.zuul.yaml index 7ee759818e..62e875350d 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -232,6 +232,7 @@ '{{ devstack_log_dir }}/dstat-csv.log': logs '{{ devstack_log_dir }}/devstacklog.txt': logs '{{ devstack_log_dir }}/devstacklog.txt.summary': logs + '{{ devstack_log_dir }}/tcpdump.pcap': logs '{{ devstack_full_log}}': logs '{{ stage_dir }}/verify_tempest_conf.log': logs '{{ stage_dir }}/apache': logs diff --git a/doc/source/debugging.rst b/doc/source/debugging.rst new file mode 100644 index 0000000000..fd0d9cdf74 --- /dev/null +++ b/doc/source/debugging.rst @@ -0,0 +1,46 @@ +===================== +System-wide debugging +===================== + +A lot can go wrong during a devstack run, and there are a few inbuilt +tools to help you. + +dstat +----- + +Enable the ``dstat`` service to produce performance logs during the +devstack run. These will be logged to the journal and also as a CSV +file. + +memory_tracker +-------------- + +The ``memory_tracker`` service periodically monitors RAM usage and +provides consumption output when available memory is seen to be +falling (i.e. processes are consuming memory). It also provides +output showing locked (unswappable) memory. + +tcpdump +------- + +Enable the ``tcpdump`` service to run a background tcpdump. You must +set the ``TCPDUMP_ARGS`` variable to something suitable (there is no +default). For example, to trace iSCSI communication during a job in +the OpenStack gate and copy the result into the log output, you might +use: + +.. code-block:: yaml + + job: + name: devstack-job + parent: devstack + vars: + devstack_services: + tcpdump: true + devstack_localrc: + TCPDUMP_ARGS: "-i any tcp port 3260" + zuul_copy_output: + '{{ devstack_log_dir }}/tcpdump.pcap': logs + + + diff --git a/lib/tcpdump b/lib/tcpdump new file mode 100644 index 0000000000..16e8269d02 --- /dev/null +++ b/lib/tcpdump @@ -0,0 +1,43 @@ +#!/bin/bash +# +# lib/tcpdump +# Functions to start and stop a tcpdump + +# Dependencies: +# +# - ``functions`` file + +# ``stack.sh`` calls the entry points in this order: +# +# - start_tcpdump +# - stop_tcpdump + +# Save trace setting +_XTRACE_TCPDUMP=$(set +o | grep xtrace) +set +o xtrace + +TCPDUMP_OUTPUT=${TCPDUMP_OUTPUT:-$LOGDIR/tcpdump.pcap} + +# e.g. for iscsi +# "-i any tcp port 3260" +TCPDUMP_ARGS=${TCPDUMP_ARGS:-""} + +# start_tcpdump() - Start running processes +function start_tcpdump { + # Run a tcpdump with given arguments and save the packet capture + if is_service_enabled tcpdump; then + if [[ -z "${TCPDUMP_ARGS}" ]]; then + die $LINENO "The tcpdump service requires TCPDUMP_ARGS to be set" + fi + touch ${TCPDUMP_OUTPUT} + run_process tcpdump "/usr/sbin/tcpdump -w $TCPDUMP_OUTPUT $TCPDUMP_ARGS" root root + fi +} + +# stop_tcpdump() stop tcpdump process +function stop_tcpdump { + stop_process tcpdump +} + +# Restore xtrace +$_XTRACE_TCPDUMP diff --git a/stack.sh b/stack.sh index 022d5b9438..dfc9d24f51 100755 --- a/stack.sh +++ b/stack.sh @@ -614,6 +614,7 @@ source $TOP_DIR/lib/swift source $TOP_DIR/lib/neutron source $TOP_DIR/lib/ldap source $TOP_DIR/lib/dstat +source $TOP_DIR/lib/tcpdump source $TOP_DIR/lib/etcd3 # Extras Source @@ -1053,6 +1054,12 @@ fi # A better kind of sysstat, with the top process per time slice start_dstat +# Run a background tcpdump for debugging +# Note: must set TCPDUMP_ARGS with the enabled service +if is_service_enabled tcpdump; then + start_tcpdump +fi + # Etcd # ----- From 8213d7c03990c8134cbe12bb6c7dac32bb7a0c50 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 11 Feb 2019 12:28:15 +1100 Subject: [PATCH 1058/1936] Add ISCSI_DEBUG option This option adds a systemd override to start the iscsi daemon with debugging enabled. Change-Id: Ie27991776aa07a695026036e47513221220332a0 --- lib/nova | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 7c6f8cd465..dee798cd5e 100644 --- a/lib/nova +++ b/lib/nova @@ -183,6 +183,10 @@ TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29} # and Glance. NOVA_USE_SERVICE_TOKEN=$(trueorfalse False NOVA_USE_SERVICE_TOKEN) +# Enable debugging levels for iscsid service (goes from 0-8) +ISCSID_DEBUG=$(trueorfalse False ISCSID_DEBUG) +ISCSID_DEBUG_LEVEL=${ISCSID_DEBUG_LEVEL:-4} + # Functions # --------- @@ -327,8 +331,22 @@ function configure_nova { sudo chown -R $STACK_USER $NOVA_INSTANCES_PATH fi fi + + if [[ ${ISCSID_DEBUG} == "True" ]]; then + # Install an override that starts iscsid with debugging + # enabled. + cat > /tmp/iscsid.override < Date: Mon, 11 Feb 2019 13:35:43 +1100 Subject: [PATCH 1059/1936] Handle pcp-dstat transition dstat is Python 2, never going to be updated and effectively abandonded. The replacement is pcp-dstat [1] which is mostly compatible, with a few differences. As distro start transitioning (Fedora has), just drop the unsupported args for now. [1] https://pcp.io/man/man1/pcp-dstat.1.html Change-Id: Ibec8a37cb18a14656d97e2096c66bc8b21406068 --- tools/dstat.sh | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/tools/dstat.sh b/tools/dstat.sh index 01c6d9b7e9..e6cbb0f21c 100755 --- a/tools/dstat.sh +++ b/tools/dstat.sh @@ -12,8 +12,17 @@ # Retrieve log directory as argument from calling script. LOGDIR=$1 +DSTAT_TOP_OPTS="--top-cpu-adv --top-io-adv --top-mem" +if dstat --version | grep -q 'pcp-dstat' ; then + # dstat is unmaintained, and moving to a plugin of performance + # co-pilot. Fedora 29 for example has rolled this out. It's + # mostly compatible, except for a few options which are not + # implemented (yet?) + DSTAT_TOP_OPTS="" +fi + # Command line arguments for primary DStat process. -DSTAT_OPTS="-tcmndrylpg --top-cpu-adv --top-io-adv --top-mem --swap --tcp" +DSTAT_OPTS="-tcmndrylpg ${DSTAT_TOP_OPTS} --swap --tcp" # Command-line arguments for secondary background DStat process. DSTAT_CSV_OPTS="-tcmndrylpg --tcp --output $LOGDIR/dstat-csv.log" From 7224a6b54d4114d1b82a63e702586951860bab95 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Mon, 1 Apr 2019 11:16:53 +0000 Subject: [PATCH 1060/1936] Update docs index page - Switch from proposing Ubuntu 16.04 to 18.04 as the most tested platform. - Make it clearer that creating an additional "stack" user is optional when running on a cloud image, as this step often leads to errors for new users. - Fix some minor nits along the way. Change-Id: I39aef1a230b668b932b1681fcd0deeb423b411f1 --- doc/source/index.rst | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/doc/source/index.rst b/doc/source/index.rst index 9186f6dba7..1ea1c5ddae 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -38,22 +38,23 @@ Quick Start Install Linux ------------- -Start with a clean and minimal install of a Linux system. Devstack +Start with a clean and minimal install of a Linux system. DevStack attempts to support the two latest LTS releases of Ubuntu, the latest/current Fedora version, CentOS/RHEL 7, as well as Debian and OpenSUSE. -If you do not have a preference, Ubuntu 16.04 is the most tested, and -will probably go the smoothest. +If you do not have a preference, Ubuntu 18.04 (Bionic Beaver) is the +most tested, and will probably go the smoothest. -Add Stack User --------------- +Add Stack User (optional) +------------------------- -Devstack should be run as a non-root user with sudo enabled +DevStack should be run as a non-root user with sudo enabled (standard logins to cloud images such as "ubuntu" or "cloud-user" are usually fine). -You can quickly create a separate `stack` user to run DevStack with +If you are not using a cloud image, you can create a separate `stack` user +to run DevStack with .. code-block:: console @@ -76,12 +77,12 @@ Download DevStack $ cd devstack The ``devstack`` repo contains a script that installs OpenStack and -templates for configuration files +templates for configuration files. Create a local.conf ------------------- -Create a ``local.conf`` file with 4 passwords preset at the root of the +Create a ``local.conf`` file with four passwords preset at the root of the devstack git repo. .. code-block:: ini From 7f0b4f3001575d1419c5020e007ce2c841c88f2f Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Mon, 1 Apr 2019 11:43:28 +0000 Subject: [PATCH 1061/1936] Fix double quoting issue when writing localconf When [0] introduced quoting all arguments, it broke existing consumers that already quote their value themselves. Fix this by avoiding to add additional quotes to the value when it already starts with a double quote. [0] https://review.openstack.org/636078 Change-Id: I92146e04731efc6dcc632ae6c3a7c374e783cdba Closes-Bug: 1822453 --- .../library/devstack_local_conf.py | 6 +++++- .../write-devstack-local-conf/library/test.py | 18 ++++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/roles/write-devstack-local-conf/library/devstack_local_conf.py b/roles/write-devstack-local-conf/library/devstack_local_conf.py index 3a8cd588b9..2f97d0e355 100644 --- a/roles/write-devstack-local-conf/library/devstack_local_conf.py +++ b/roles/write-devstack-local-conf/library/devstack_local_conf.py @@ -252,7 +252,11 @@ def handle_localrc(self, localrc): if localrc: vg = VarGraph(localrc) for k, v in vg.getVars(): - self.localrc.append('{}="{}"'.format(k, v)) + # Avoid double quoting + if len(v) and v[0]=='"': + self.localrc.append('{}={}'.format(k, v)) + else: + self.localrc.append('{}="{}"'.format(k, v)) if k == 'LIBS_FROM_GIT': lfg = True elif k == 'TEMPEST_PLUGINS': diff --git a/roles/write-devstack-local-conf/library/test.py b/roles/write-devstack-local-conf/library/test.py index 22bf2da55d..7c526b34c8 100644 --- a/roles/write-devstack-local-conf/library/test.py +++ b/roles/write-devstack-local-conf/library/test.py @@ -187,6 +187,24 @@ def test_overridelibs_from_git(self): lfg = line.strip().split('=')[1] self.assertEqual('"oslo.db"', lfg) + def test_avoid_double_quote(self): + "Test that there a no duplicated quotes" + localrc = {'TESTVAR': '"quoted value"'} + p = dict(localrc=localrc, + base_services=[], + base_dir='./test', + path=os.path.join(self.tmpdir, 'test.local.conf'), + projects={}) + lc = self._init_localconf(p) + lc.write(p['path']) + + testvar = None + with open(p['path']) as f: + for line in f: + if line.startswith('TESTVAR'): + testvar = line.strip().split('=')[1] + self.assertEqual('"quoted value"', testvar) + def test_plugin_circular_deps(self): "Test that plugins with circular dependencies fail" os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', 'devstack')) From e03bcb2c8b8f1ee1cbef579454a30776e43175b3 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Mon, 1 Apr 2019 12:19:45 -0400 Subject: [PATCH 1062/1936] Remove crusty old python 3 package version logic If we are running with python3, just assume that any package that is not blacklisted is available for py3 and just attempt to install it and let pip sort it out whether it gets installed from a local or remote package. Change-Id: Ic05d183e489320f6dfc721575d47e7e4d661f87c Closes-Bug: #1820892 --- inc/python | 70 ++++-------------------------------------------------- 1 file changed, 5 insertions(+), 65 deletions(-) diff --git a/inc/python b/inc/python index e2a042e452..19e1228d23 100644 --- a/inc/python +++ b/inc/python @@ -81,34 +81,6 @@ function pip_install_gr_extras { pip_install $clean_name[$extras] } -# Determine the python versions supported by a package -function get_python_versions_for_package { - local name=$1 - cd $name && python setup.py --classifiers \ - | grep 'Language' | cut -f5 -d: | grep '\.' | tr '\n' ' ' -} - -# Check for python3 classifier in local directory -function check_python3_support_for_package_local { - local name=$1 - cd $name - set +e - classifier=$(python setup.py --classifiers \ - | grep 'Programming Language :: Python :: 3') - set -e - echo $classifier -} - -# Check for python3 classifier on pypi -function check_python3_support_for_package_remote { - local name=$1 - set +e - classifier=$(curl -s -L "https://pypi.python.org/pypi/$name/json" \ - | grep '"Programming Language :: Python :: 3"') - set -e - echo $classifier -} - # python3_enabled_for() assumes the service(s) specified as arguments are # enabled for python 3 unless explicitly disabled. See python3_disabled_for(). # @@ -259,52 +231,20 @@ function pip_install { cmd_pip=$(get_pip_command $PYTHON2_VERSION) local sudo_pip="sudo -H" if python3_enabled; then - # Look at the package classifiers to find the python - # versions supported, and if we find the version of - # python3 we've been told to use, use that instead of the - # default pip - local python_versions - # Special case some services that have experimental # support for python3 in progress, but don't claim support # in their classifier echo "Check python version for : $package_dir" if python3_disabled_for ${package_dir##*/}; then echo "Explicitly using $PYTHON2_VERSION version to install $package_dir based on DISABLED_PYTHON3_PACKAGES" - elif python3_enabled_for ${package_dir##*/}; then + else + # For everything that is not explicitly blacklisted with + # DISABLED_PYTHON3_PACKAGES, assume it supports python3 + # and we will let pip sort out the install, regardless of + # the package being local or remote. echo "Using $PYTHON3_VERSION version to install $package_dir based on default behavior" sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8" cmd_pip=$(get_pip_command $PYTHON3_VERSION) - elif [[ -d "$package_dir" ]]; then - python_versions=$(get_python_versions_for_package $package_dir) - if [[ $python_versions =~ $PYTHON3_VERSION ]]; then - echo "Automatically using $PYTHON3_VERSION version to install $package_dir based on classifiers" - sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8" - cmd_pip=$(get_pip_command $PYTHON3_VERSION) - else - # The package may not have yet advertised python3.5 - # support so check for just python3 classifier and log - # a warning. - python3_classifier=$(check_python3_support_for_package_local $package_dir) - if [[ ! -z "$python3_classifier" ]]; then - echo "Automatically using $PYTHON3_VERSION version to install $package_dir based on local package settings" - sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8" - cmd_pip=$(get_pip_command $PYTHON3_VERSION) - else - echo "WARNING: Did not find python 3 classifier for local package $package_dir" - fi - fi - else - # Check pypi as we don't have the package on disk - package=$(echo $package_dir | grep -o '^[.a-zA-Z0-9_-]*') - python3_classifier=$(check_python3_support_for_package_remote $package) - if [[ ! -z "$python3_classifier" ]]; then - echo "Automatically using $PYTHON3_VERSION version to install $package based on remote package settings" - sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8" - cmd_pip=$(get_pip_command $PYTHON3_VERSION) - else - echo "WARNING: Did not find python 3 classifier for remote package $package_dir" - fi fi fi fi From 3ffc00940fdfddbef2c3bd4bc2298104ab606504 Mon Sep 17 00:00:00 2001 From: Luigi Toscano Date: Tue, 2 Apr 2019 12:28:31 +0200 Subject: [PATCH 1063/1936] sync-devstack-data: new argument devstack_data_base_dir When the role is used by grenade, the data directory is shared among different devstack executions, and the base directory is different, for example: /opt/stack/data vs /opt/stack/{old,new}. The new devstack_data_base_dir parameter allows user to specify a base directory for the data/ directory which is unrelated to the devstack directory. The default value is devstack_base_dir, so the default behavior is unchanged. Change-Id: Ie69b7b51947cbf1a8b31d2701783de2fb56a2d33 --- roles/sync-devstack-data/README.rst | 7 +++++++ roles/sync-devstack-data/defaults/main.yaml | 1 + roles/sync-devstack-data/tasks/main.yaml | 12 ++++++------ 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/roles/sync-devstack-data/README.rst b/roles/sync-devstack-data/README.rst index 500e8cccc4..388625c893 100644 --- a/roles/sync-devstack-data/README.rst +++ b/roles/sync-devstack-data/README.rst @@ -10,3 +10,10 @@ subnodes. :default: /opt/stack The devstack base directory. + +.. zuul:rolevar:: devstack_data_base_dir + :default: {{ devstack_base_dir }} + + The devstack base directory for data/. + Useful for example when multiple executions of devstack (i.e. grenade) + share the same data directory. diff --git a/roles/sync-devstack-data/defaults/main.yaml b/roles/sync-devstack-data/defaults/main.yaml index fea05c8146..6b5017b811 100644 --- a/roles/sync-devstack-data/defaults/main.yaml +++ b/roles/sync-devstack-data/defaults/main.yaml @@ -1 +1,2 @@ devstack_base_dir: /opt/stack +devstack_data_base_dir: "{{ devstack_base_dir }}" diff --git a/roles/sync-devstack-data/tasks/main.yaml b/roles/sync-devstack-data/tasks/main.yaml index 46000159d4..e62be87ccd 100644 --- a/roles/sync-devstack-data/tasks/main.yaml +++ b/roles/sync-devstack-data/tasks/main.yaml @@ -1,7 +1,7 @@ - name: Ensure the data folder exists become: true file: - path: "{{ devstack_base_dir }}/data" + path: "{{ devstack_data_base_dir }}/data" state: directory owner: stack group: stack @@ -11,7 +11,7 @@ - name: Ensure the CA folder exists become: true file: - path: "{{ devstack_base_dir }}/data/CA" + path: "{{ devstack_data_base_dir }}/data/CA" state: directory owner: stack group: stack @@ -25,8 +25,8 @@ dest: "{{ zuul.executor.work_root }}/{{ item | basename }}" mode: pull with_items: - - "{{ devstack_base_dir }}/data/ca-bundle.pem" - - "{{ devstack_base_dir }}/data/CA" + - "{{ devstack_data_base_dir }}/data/ca-bundle.pem" + - "{{ devstack_data_base_dir }}/data/CA" when: inventory_hostname == 'controller' - name: Push the CA certificate @@ -34,7 +34,7 @@ become_user: stack synchronize: src: "{{ zuul.executor.work_root }}/ca-bundle.pem" - dest: "{{ devstack_base_dir }}/data/ca-bundle.pem" + dest: "{{ devstack_data_base_dir }}/data/ca-bundle.pem" mode: push when: 'inventory_hostname in groups["subnode"]|default([])' @@ -43,6 +43,6 @@ become_user: stack synchronize: src: "{{ zuul.executor.work_root }}/CA/" - dest: "{{ devstack_base_dir }}/data/" + dest: "{{ devstack_data_base_dir }}/data/" mode: push when: 'inventory_hostname in groups["subnode"]|default([])' From d5a68a6b7039a66455d7e320379a6b0b3879891c Mon Sep 17 00:00:00 2001 From: melanie witt Date: Tue, 2 Apr 2019 22:52:23 +0000 Subject: [PATCH 1064/1936] Configure console proxy ports in nova_cellN.conf We're able to run multiple cells in devstack by setting the variable NOVA_NUM_CELLS in the devstack local.conf. Since we run console proxies per cell, we will start two console proxies if NOVA_NUM_CELLS=2. However, we've not been configuring the console proxy ports in the nova_cellN.conf files, so an attempt to start more than one will result in a port conflict and failure to start the subsequent console proxy services with error: ERROR nova error: [Errno 98] Address already in use This adds configuration of the console proxy ports based on an offset while looping across NOVA_NUM_CELLS. The base port values are taken from the config option defaults in the nova code: nova/conf/vnc.py, nova/conf/spice.py, and nova/conf/serial_console.py. Closes-Bug: #1822873 Change-Id: I8934d0b9392f2976347391c8a650ad260f337762 --- lib/nova | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index dee798cd5e..1b5132b018 100644 --- a/lib/nova +++ b/lib/nova @@ -607,8 +607,10 @@ function create_nova_conf { else for i in $(seq 1 $NOVA_NUM_CELLS); do local conf + local offset conf=$(conductor_conf $i) - configure_console_proxies $conf + offset=$((i - 1)) + configure_console_proxies $conf $offset done fi } @@ -678,10 +680,17 @@ function configure_console_compute { function configure_console_proxies { # Use the provided config file path or default to $NOVA_CONF. local conf=${1:-$NOVA_CONF} + local offset=${2:-0} + # Stagger the offset based on the total number of possible console proxies + # (novnc, xvpvnc, spice, serial) so that their ports will not collide if + # all are enabled. + offset=$((offset * 4)) if is_service_enabled n-novnc || is_service_enabled n-xvnc || [ "$NOVA_VNC_ENABLED" != False ]; then iniset $conf vnc novncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" + iniset $conf vnc novncproxy_port $((6080 + offset)) iniset $conf vnc xvpvncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" + iniset $conf vnc xvpvncproxy_port $((6081 + offset)) if is_nova_console_proxy_compute_tls_enabled ; then iniset $conf vnc auth_schemes "vencrypt" @@ -713,10 +722,12 @@ function configure_console_proxies { if is_service_enabled n-spice; then iniset $conf spice html5proxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" + iniset $conf spice html5proxy_port $((6082 + offset)) fi if is_service_enabled n-sproxy; then iniset $conf serial_console serialproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" + iniset $conf serial_console serialproxy_port $((6083 + offset)) fi } From 5fcb4447cf601905a795064e67cfb479c0af67a1 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Thu, 14 Feb 2019 12:56:48 +0000 Subject: [PATCH 1065/1936] Make most platform jobs run with python3 python2.7 will be EOL soon, let's test on python3 instead. Exclude CentOS 7, as there is no python3 easily available. Change-Id: I24d8812c0c37b6d376fd5ae38067513bb62a2804 --- .zuul.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index afe400e1a4..17723e7dd0 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -496,21 +496,21 @@ - job: name: devstack-platform-opensuse-150 - parent: tempest-full + parent: tempest-full-py3 description: openSUSE 15.0 platform test nodeset: devstack-single-node-opensuse-150 voting: false - job: name: devstack-platform-fedora-latest - parent: tempest-full + parent: tempest-full-py3 description: Fedora latest platform test nodeset: devstack-single-node-fedora-latest voting: false - job: name: devstack-platform-xenial - parent: tempest-full + parent: tempest-full-py3 description: Ubuntu Xenial platform test nodeset: openstack-single-node-xenial voting: false From cc072fd32f72d4f05257d0b0c8c7c41fa1178f52 Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Wed, 31 May 2017 20:27:59 -0700 Subject: [PATCH 1066/1936] Run devstack CA and cert setup early Previously apache was configured and restarted before we configured the CA and certs. In most cases this is fine because those specific vhosts didn't use tls. However, if you had previously run devstack and had leftover vhosts and an unconfigured CA or certs devstack would fail. This is a small corner case, but its simple to address by moving CA and cert setup up in stack.sh to before we do anything related to web servers. Change-Id: I31dbaf9471088b9faff26c7b790da6f6feebb2d5 --- stack.sh | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/stack.sh b/stack.sh index dfc9d24f51..0d7e623c78 100755 --- a/stack.sh +++ b/stack.sh @@ -833,6 +833,18 @@ if is_service_enabled etcd3; then install_etcd3 fi +# Setup TLS certs +# --------------- + +# Do this early, before any webservers are set up to ensure +# we don't run into problems with missing certs when apache +# is restarted. +if is_service_enabled tls-proxy; then + configure_CA + init_CA + init_cert +fi + # Check Out and Install Source # ---------------------------- @@ -857,13 +869,6 @@ if is_service_enabled neutron nova horizon; then install_neutronclient fi -# Setup TLS certs -if is_service_enabled tls-proxy; then - configure_CA - init_CA - init_cert -fi - # Install middleware install_keystonemiddleware From 1348ac990df40c504056a235156979629b844e8a Mon Sep 17 00:00:00 2001 From: Akihiro Motoki Date: Thu, 4 Apr 2019 22:30:24 +0900 Subject: [PATCH 1067/1936] stack.sh: Clear OpenStack related envvars stack.sh usually fails when it is started in a shell session where OpenStack related environment variables OS_* are set. Most common failure scenarios are failures in keystone operations. This commits clears OpenStack related environment variables at the begining of stack.sh. Change-Id: I3a924a0586dc9bb28f3bf3e151e100c24015efe5 --- stack.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stack.sh b/stack.sh index dfc9d24f51..396d742c72 100755 --- a/stack.sh +++ b/stack.sh @@ -60,6 +60,9 @@ unset LANGUAGE LC_ALL=en_US.utf8 export LC_ALL +# Clear all OpenStack related envvars +unset `env | grep -E '^OS_' | cut -d = -f 1` + # Make sure umask is sane umask 022 From 56af9229a04b29c1ac70099e8361cec423096e99 Mon Sep 17 00:00:00 2001 From: Trinh Nguyen Date: Fri, 14 Dec 2018 17:27:46 +0900 Subject: [PATCH 1068/1936] Remove pkg/elasticsearch.sh The pkg/elasticsearch.sh is only used by Panko but Panko has moved the script to its own repository [1]. [1] https://review.openstack.org/#/c/643262/ Change-Id: I0ce40f4299246b68624abd2072c2abce06f1e70b Signed-off-by: Trinh Nguyen --- pkg/elasticsearch.sh | 148 ------------------------------------------- 1 file changed, 148 deletions(-) delete mode 100755 pkg/elasticsearch.sh diff --git a/pkg/elasticsearch.sh b/pkg/elasticsearch.sh deleted file mode 100755 index bd4415315f..0000000000 --- a/pkg/elasticsearch.sh +++ /dev/null @@ -1,148 +0,0 @@ -#!/bin/bash -xe - -# basic reference point for things like filecache -# -# TODO(sdague): once we have a few of these I imagine the download -# step can probably be factored out to something nicer -TOP_DIR=$(cd $(dirname "$0")/.. && pwd) -FILES=$TOP_DIR/files -source $TOP_DIR/stackrc - -# Package source and version, all pkg files are expected to have -# something like this, as well as a way to override them. -ELASTICSEARCH_VERSION=${ELASTICSEARCH_VERSION:-1.7.5} -ELASTICSEARCH_BASEURL=${ELASTICSEARCH_BASEURL:-https://download.elasticsearch.org/elasticsearch/elasticsearch} - -# Elastic search actual implementation -function wget_elasticsearch { - local file=${1} - - if [ ! -f ${FILES}/${file} ]; then - wget $ELASTICSEARCH_BASEURL/${file} -O ${FILES}/${file} - fi - - if [ ! -f ${FILES}/${file}.sha1.txt ]; then - wget $ELASTICSEARCH_BASEURL/${file}.sha1.txt -O ${FILES}/${file}.sha1.txt - fi - - pushd ${FILES}; sha1sum ${file} > ${file}.sha1.gen; popd - - if ! diff ${FILES}/${file}.sha1.gen ${FILES}/${file}.sha1.txt; then - echo "Invalid elasticsearch download. Could not install." - return 1 - fi - return 0 -} - -function download_elasticsearch { - if is_ubuntu; then - wget_elasticsearch elasticsearch-${ELASTICSEARCH_VERSION}.deb - elif is_fedora || is_suse; then - wget_elasticsearch elasticsearch-${ELASTICSEARCH_VERSION}.noarch.rpm - fi -} - -function configure_elasticsearch { - # currently a no op - : -} - -function _check_elasticsearch_ready { - # poll elasticsearch to see if it's started - if ! wait_for_service 120 http://localhost:9200; then - die $LINENO "Maximum timeout reached. Could not connect to ElasticSearch" - fi -} - -function start_elasticsearch { - if is_ubuntu; then - sudo /etc/init.d/elasticsearch start - _check_elasticsearch_ready - elif is_fedora; then - sudo /bin/systemctl start elasticsearch.service - _check_elasticsearch_ready - elif is_suse; then - sudo /usr/bin/systemctl start elasticsearch.service - _check_elasticsearch_ready - else - echo "Unsupported architecture...can not start elasticsearch." - fi -} - -function stop_elasticsearch { - if is_ubuntu; then - sudo /etc/init.d/elasticsearch stop - elif is_fedora; then - sudo /bin/systemctl stop elasticsearch.service - elif is_suse ; then - sudo /usr/bin/systemctl stop elasticsearch.service - else - echo "Unsupported architecture...can not stop elasticsearch." - fi -} - -function install_elasticsearch { - pip_install_gr elasticsearch - if is_package_installed elasticsearch; then - echo "Note: elasticsearch was already installed." - return - fi - if is_ubuntu; then - is_package_installed default-jre-headless || install_package default-jre-headless - - sudo dpkg -i ${FILES}/elasticsearch-${ELASTICSEARCH_VERSION}.deb - sudo update-rc.d elasticsearch defaults 95 10 - elif is_fedora; then - is_package_installed java-1.8.0-openjdk-headless || install_package java-1.8.0-openjdk-headless - yum_install ${FILES}/elasticsearch-${ELASTICSEARCH_VERSION}.noarch.rpm - sudo /bin/systemctl daemon-reload - sudo /bin/systemctl enable elasticsearch.service - elif is_suse; then - is_package_installed java-1_8_0-openjdk-headless || install_package java-1_8_0-openjdk-headless - zypper_install --no-gpg-checks ${FILES}/elasticsearch-${ELASTICSEARCH_VERSION}.noarch.rpm - sudo /usr/bin/systemctl daemon-reload - sudo /usr/bin/systemctl enable elasticsearch.service - else - echo "Unsupported install of elasticsearch on this architecture." - fi -} - -function uninstall_elasticsearch { - if is_package_installed elasticsearch; then - if is_ubuntu; then - sudo apt-get purge elasticsearch - elif is_fedora; then - sudo yum remove elasticsearch - elif is_suse; then - sudo zypper rm elasticsearch - else - echo "Unsupported install of elasticsearch on this architecture." - fi - fi -} - -# The PHASE dispatcher. All pkg files are expected to basically cargo -# cult the case statement. -PHASE=$1 -echo "Phase is $PHASE" - -case $PHASE in - download) - download_elasticsearch - ;; - install) - install_elasticsearch - ;; - configure) - configure_elasticsearch - ;; - start) - start_elasticsearch - ;; - stop) - stop_elasticsearch - ;; - uninstall) - uninstall_elasticsearch - ;; -esac From 70f5d91a616931790eea7e19028e1d097645ab3b Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 12 Apr 2019 06:14:57 +0000 Subject: [PATCH 1069/1936] Updated from generate-devstack-plugins-list Change-Id: Id100f77027445edf849cc295ecebb31c79601b40 --- doc/source/plugin-registry.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 9338d18819..f3aa1bffcc 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -34,6 +34,7 @@ broadview-collector `https://git.openstack.org/openstack/broa castellan-ui `https://git.openstack.org/openstack/castellan-ui `__ ceilometer `https://git.openstack.org/openstack/ceilometer `__ ceilometer-powervm `https://git.openstack.org/openstack/ceilometer-powervm `__ +cinderlib `https://git.openstack.org/openstack/cinderlib `__ cloudkitty `https://git.openstack.org/openstack/cloudkitty `__ collectd-openstack-plugins `https://git.openstack.org/openstack/collectd-openstack-plugins `__ congress `https://git.openstack.org/openstack/congress `__ @@ -82,6 +83,7 @@ kuryr-tempest-plugin `https://git.openstack.org/openstack/kury magnum `https://git.openstack.org/openstack/magnum `__ magnum-ui `https://git.openstack.org/openstack/magnum-ui `__ manila `https://git.openstack.org/openstack/manila `__ +manila-tempest-plugin `https://git.openstack.org/openstack/manila-tempest-plugin `__ manila-ui `https://git.openstack.org/openstack/manila-ui `__ masakari `https://git.openstack.org/openstack/masakari `__ meteos `https://git.openstack.org/openstack/meteos `__ @@ -174,6 +176,7 @@ stackube `https://git.openstack.org/openstack/stac storlets `https://git.openstack.org/openstack/storlets `__ stx-config `https://git.openstack.org/openstack/stx-config `__ stx-fault `https://git.openstack.org/openstack/stx-fault `__ +stx-ha `https://git.openstack.org/openstack/stx-ha `__ stx-integ `https://git.openstack.org/openstack/stx-integ `__ stx-metal `https://git.openstack.org/openstack/stx-metal `__ stx-nfv `https://git.openstack.org/openstack/stx-nfv `__ From 28a62242843ff45315f4918062ed654a3f3e3b31 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Wed, 6 Dec 2017 09:21:43 -0600 Subject: [PATCH 1070/1936] Add openstacksdk functional job to devstack pipelines openstacksdk gates on the new-style devstack functional base jobs. It serves as a good test case to make sure the functional base jobs don't break. Change-Id: I817639ed30cda8ea51d156872a14bbcf10a4e63d --- .zuul.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index 9aafcdbb84..7c2a9d5073 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -621,6 +621,10 @@ irrelevant-files: - ^.*\.rst$ - ^doc/.*$ + - openstacksdk-functional-devstack: + irrelevant-files: + - ^.*\.rst$ + - ^doc/.*$ gate: jobs: - devstack @@ -641,6 +645,10 @@ irrelevant-files: - ^.*\.rst$ - ^doc/.*$ + - openstacksdk-functional-devstack: + irrelevant-files: + - ^.*\.rst$ + - ^doc/.*$ # Please add a note on each job and conditions for the job not # being experimental any more, so we can keep this list somewhat # pruned. From 7fbc70e2cb17b89259efb70ddcc8bb98ddd844e5 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Mon, 15 Apr 2019 06:24:44 +0000 Subject: [PATCH 1071/1936] Updated from generate-devstack-plugins-list Change-Id: I2798fe053669be212c47ee9c579ca4f6262af5a1 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index f3aa1bffcc..93c16f454c 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -123,6 +123,7 @@ networking-midonet `https://git.openstack.org/openstack/netw networking-mlnx `https://git.openstack.org/openstack/networking-mlnx `__ networking-nec `https://git.openstack.org/openstack/networking-nec `__ networking-odl `https://git.openstack.org/openstack/networking-odl `__ +networking-omnipath `https://git.openstack.org/openstack/networking-omnipath `__ networking-onos `https://git.openstack.org/openstack/networking-onos `__ networking-opencontrail `https://git.openstack.org/openstack/networking-opencontrail `__ networking-ovn `https://git.openstack.org/openstack/networking-ovn `__ From 10f44098825d1d13452117dc5ee31e3e895b495f Mon Sep 17 00:00:00 2001 From: Colleen Murphy Date: Thu, 28 Feb 2019 23:44:14 +0100 Subject: [PATCH 1072/1936] Add hack to fix ply installation on opensuse The python3-ply package is indirectly a dependency of dhcp-client, which is not exactly an optional package. Pip >=10 refuses to install ply from global-requirements with this distro package is installed, so our only option is to remove it manually. Change-Id: I377fdd4a581eb4b6275584d92cafc0b783fe3b84 --- tools/fixup_stuff.sh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 1ff7bfa516..748223902c 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -228,6 +228,14 @@ function fixup_suse { sudo systemctl disable apparmor sudo /usr/sbin/aa-teardown fi + + # Since pip10, pip will refuse to uninstall files from packages + # that were created with distutils (rather than more modern + # setuptools). This is because it technically doesn't have a + # manifest of what to remove. However, in most cases, simply + # overwriting works. So this hacks around those packages that + # have been dragged in by some other system dependency + sudo rm -rf /usr/lib/python3.6/site-packages/ply-*.egg-info } # The version of pip(1.5.4) supported by python-virtualenv(1.11.4) has From a9a51ca6382eb01c99fce51084f64a69f4c7d58a Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Mon, 15 Apr 2019 23:54:31 +0200 Subject: [PATCH 1073/1936] Fix is_neutron_legacy_enabled function This function will now first filter out all "neutron-" strings from DISABLED_SERVICES list before looking for "neutron" string in it. Change-Id: I5cab6a3be553713e1257599fb72042c6001f2672 Close-Bug: #1824884 --- lib/neutron | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index 1066d8e342..947c491ec1 100644 --- a/lib/neutron +++ b/lib/neutron @@ -117,7 +117,9 @@ function is_neutron_enabled { # Test if any Neutron services are enabled # is_neutron_enabled function is_neutron_legacy_enabled { - [[ ,${DISABLED_SERVICES} =~ ,"neutron" ]] && return 1 + # first we need to remove all "neutron-" from DISABLED_SERVICES list + disabled_services_copy=$(echo $DISABLED_SERVICES | sed 's/neutron-//g') + [[ ,${disabled_services_copy} =~ ,"neutron" ]] && return 1 [[ ,${ENABLED_SERVICES} =~ ,"q-" ]] && return 0 return 1 } From a13474fd7892312ef42a211c344d8c785f9a5f93 Mon Sep 17 00:00:00 2001 From: Erik Olof Gunnar Andersson Date: Thu, 18 Apr 2019 12:18:23 -0700 Subject: [PATCH 1074/1936] Add region_name to ironic compute configuration We should always pass on a region when talking to ironic. This will also help detect and test issues specific to regions. Change-Id: Iaab3c1bcedc5aaa2106c0758cbb43bade3de2cf5 --- lib/nova_plugins/hypervisor-ironic | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic index 49110a8643..1279256055 100644 --- a/lib/nova_plugins/hypervisor-ironic +++ b/lib/nova_plugins/hypervisor-ironic @@ -50,6 +50,7 @@ function configure_nova_hypervisor { iniset $NOVA_CONF ironic project_domain_id default iniset $NOVA_CONF ironic user_domain_id default iniset $NOVA_CONF ironic project_name demo + iniset $NOVA_CONF ironic region_name $REGION_NAME iniset $NOVA_CONF ironic api_max_retries 300 iniset $NOVA_CONF ironic api_retry_interval 5 From 666f5491b67e32b5f0e0f7a9eb5dc6ad72a1fd72 Mon Sep 17 00:00:00 2001 From: OpenDev Sysadmins Date: Fri, 19 Apr 2019 19:43:10 +0000 Subject: [PATCH 1075/1936] OpenDev Migration Patch This commit was bulk generated and pushed by the OpenDev sysadmins as a part of the Git hosting and code review systems migration detailed in these mailing list posts: http://lists.openstack.org/pipermail/openstack-discuss/2019-March/003603.html http://lists.openstack.org/pipermail/openstack-discuss/2019-April/004920.html Attempts have been made to correct repository namespaces and hostnames based on simple pattern matching, but it's possible some were updated incorrectly or missed entirely. Please reach out to us via the contact information listed at https://opendev.org/ with any questions you may have. --- .gitreview | 4 ++-- .zuul.yaml | 22 +++++++++---------- .../tasks/main.yaml | 6 ++--- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/.gitreview b/.gitreview index 570d31a987..e1bf63ba7a 100644 --- a/.gitreview +++ b/.gitreview @@ -1,4 +1,4 @@ [gerrit] -host=review.openstack.org +host=review.opendev.org port=29418 -project=openstack-dev/devstack.git +project=openstack/devstack.git diff --git a/.zuul.yaml b/.zuul.yaml index c20f55cb4a..8c0ce2f2ba 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -198,10 +198,10 @@ job.group-vars.peers, which is what is used by multi node jobs for subnode nodes (everything but the controller). required-projects: - - git.openstack.org/openstack-dev/devstack + - opendev.org/openstack/devstack roles: - - zuul: git.openstack.org/openstack-infra/devstack-gate - - zuul: git.openstack.org/openstack-infra/openstack-zuul-jobs + - zuul: opendev.org/openstack/devstack-gate + - zuul: opendev.org/openstack/openstack-zuul-jobs vars: devstack_localrc: DATABASE_PASSWORD: secretdatabase @@ -308,7 +308,7 @@ less than the normal minimum set of required-projects. nodeset: openstack-single-node-bionic required-projects: - - git.openstack.org/openstack/requirements + - opendev.org/openstack/requirements vars: devstack_localrc: # Multinode specific settings @@ -366,13 +366,13 @@ The run playbook consists of a single role, so it can be easily rewritten and extended. required-projects: - - git.openstack.org/openstack/cinder - - git.openstack.org/openstack/glance - - git.openstack.org/openstack/keystone - - git.openstack.org/openstack/neutron - - git.openstack.org/openstack/nova - - git.openstack.org/openstack/placement - - git.openstack.org/openstack/swift + - opendev.org/openstack/cinder + - opendev.org/openstack/glance + - opendev.org/openstack/keystone + - opendev.org/openstack/neutron + - opendev.org/openstack/nova + - opendev.org/openstack/placement + - opendev.org/openstack/swift timeout: 7200 vars: devstack_localrc: diff --git a/roles/setup-devstack-source-dirs/tasks/main.yaml b/roles/setup-devstack-source-dirs/tasks/main.yaml index dfa934f68b..f6d52ab3ad 100644 --- a/roles/setup-devstack-source-dirs/tasks/main.yaml +++ b/roles/setup-devstack-source-dirs/tasks/main.yaml @@ -1,9 +1,9 @@ - name: Find all OpenStack source repos used by this job find: paths: - - src/git.openstack.org/openstack - - src/git.openstack.org/openstack-dev - - src/git.openstack.org/openstack-infra + - src/opendev.org/openstack + - src/opendev.org/openstack-dev + - src/opendev.org/openstack-infra file_type: directory register: found_repos From e7f3d91a7ddb0a1d01a4394f42feb51ffc3b9b43 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Sat, 20 Apr 2019 09:11:58 -0500 Subject: [PATCH 1076/1936] Update repo namespace search list With the new namespaces we have to look around a bit more to find repos top copy into the DevStack working directory. Add: * starlingx/ * x/ * zuul/ Depends-On: https://review.opendev.org/653988 Change-Id: I8a55522a5fee46f415f0c0ce580ded3476133460 Signed-off-by: Dean Troyer --- roles/setup-devstack-source-dirs/tasks/main.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/roles/setup-devstack-source-dirs/tasks/main.yaml b/roles/setup-devstack-source-dirs/tasks/main.yaml index f6d52ab3ad..1e5ce660f3 100644 --- a/roles/setup-devstack-source-dirs/tasks/main.yaml +++ b/roles/setup-devstack-source-dirs/tasks/main.yaml @@ -4,6 +4,9 @@ - src/opendev.org/openstack - src/opendev.org/openstack-dev - src/opendev.org/openstack-infra + - src/opendev.org/starlingx + - src/opendev.org/x + - src/opendev.org/zuul file_type: directory register: found_repos From dc9ba8b8aee4022ef1598cd0d8cfd7dfe34a4b6b Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 23 Apr 2019 13:02:00 +0000 Subject: [PATCH 1077/1936] Add opendev to repo search list glean is in opendev and is used by nodepool devstack jobs. Change-Id: I38dfd7cc531b20b26862193be14f4ebb53352efa --- roles/setup-devstack-source-dirs/tasks/main.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/setup-devstack-source-dirs/tasks/main.yaml b/roles/setup-devstack-source-dirs/tasks/main.yaml index 1e5ce660f3..160757ede9 100644 --- a/roles/setup-devstack-source-dirs/tasks/main.yaml +++ b/roles/setup-devstack-source-dirs/tasks/main.yaml @@ -1,6 +1,7 @@ - name: Find all OpenStack source repos used by this job find: paths: + - src/opendev.org/opendev - src/opendev.org/openstack - src/opendev.org/openstack-dev - src/opendev.org/openstack-infra From 4705861dd01fdb37bde2a9432571501599d20766 Mon Sep 17 00:00:00 2001 From: Artom Lifshitz Date: Wed, 23 May 2018 10:08:56 -0400 Subject: [PATCH 1078/1936] Explicitly set scheduler_available_filters Tempest's scheduler_available_filters has a special 'all' value that is understood to mean 'all filters are enabled' by various tempest tests. However, what it really means is 'the default nova filters are enabled.' In an effort to help clean that up, this patch explicitly sets scheduler_available_filters to nova's $FILTERS. Because $FILTERS is now used in both lib/nova and lib/tempest, it is renamed $NOVA_FILTERS. Change-Id: I6ffc1e9989cd61d666f9c1db9c94fbabd7151918 Related-bug: 1628443 --- lib/nova | 6 +++--- lib/tempest | 5 +++++ 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/lib/nova b/lib/nova index 5e157c5f11..0003179a4d 100644 --- a/lib/nova +++ b/lib/nova @@ -103,9 +103,9 @@ FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"False"} # should work in most cases. SCHEDULER=${SCHEDULER:-filter_scheduler} -# The following FILTERS contains SameHostFilter and DifferentHostFilter with +# The following NOVA_FILTERS contains SameHostFilter and DifferentHostFilter with # the default filters. -FILTERS="RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter" +NOVA_FILTERS="RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter" QEMU_CONF=/etc/libvirt/qemu.conf @@ -423,7 +423,7 @@ function create_nova_conf { iniset $NOVA_CONF wsgi api_paste_config "$NOVA_API_PASTE_INI" iniset $NOVA_CONF DEFAULT rootwrap_config "$NOVA_CONF_DIR/rootwrap.conf" iniset $NOVA_CONF scheduler driver "$SCHEDULER" - iniset $NOVA_CONF filter_scheduler enabled_filters "$FILTERS" + iniset $NOVA_CONF filter_scheduler enabled_filters "$NOVA_FILTERS" if [[ $SCHEDULER == "filter_scheduler" ]]; then iniset $NOVA_CONF scheduler workers "$API_WORKERS" fi diff --git a/lib/tempest b/lib/tempest index 60f571ceb3..b8b313eb30 100644 --- a/lib/tempest +++ b/lib/tempest @@ -372,6 +372,11 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute-feature-enabled block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} iniset $TEMPEST_CONFIG compute-feature-enabled live_migrate_back_and_forth ${LIVE_MIGRATE_BACK_AND_FORTH:-False} iniset $TEMPEST_CONFIG compute-feature-enabled attach_encrypted_volume ${ATTACH_ENCRYPTED_VOLUME_AVAILABLE:-True} + + if [[ -n "$NOVA_FILTERS" ]]; then + iniset $TEMPEST_CONFIG compute-feature-enabled scheduler_enabled_filters ${NOVA_FILTERS} + fi + if is_service_enabled n-cell; then # Cells doesn't support shelving/unshelving iniset $TEMPEST_CONFIG compute-feature-enabled shelve False From 62e27d3b1c81cd8a45ee6a8e77dce9f68fc9ea1a Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Tue, 23 Apr 2019 13:44:37 -0400 Subject: [PATCH 1079/1936] Remove n-obj from zuul and docs n-obj hasn't been around for many years and devstack doesn't use it anymore anyway so this just cleans up some vestigial use of the old service. Change-Id: I04b2d2dc2b4e49fab90f5ef94f4e087e969aa24b --- .zuul.yaml | 1 - doc/source/guides/devstack-with-lbaas-v2.rst | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 8c0ce2f2ba..60e3a14f14 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -412,7 +412,6 @@ n-cond: true n-cpu: true n-novnc: true - n-obj: true n-sch: true placement-api: true # Neutron services diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst index b1d88cb35c..a27a4d20b4 100644 --- a/doc/source/guides/devstack-with-lbaas-v2.rst +++ b/doc/source/guides/devstack-with-lbaas-v2.rst @@ -59,7 +59,7 @@ Edit your ``/opt/stack/devstack/local.conf`` to look like # Horizon - enable for the OpenStack web GUI # ENABLED_SERVICES+=,horizon # Nova - ENABLED_SERVICES+=,n-api,n-crt,n-obj,n-cpu,n-cond,n-sch,n-api-meta,n-sproxy + ENABLED_SERVICES+=,n-api,n-crt,n-cpu,n-cond,n-sch,n-api-meta,n-sproxy ENABLED_SERVICES+=,placement-api,placement-client # Glance ENABLED_SERVICES+=,g-api,g-reg From 87daf8abe64f9b51fb840a455088b18fd0a791ff Mon Sep 17 00:00:00 2001 From: Brian Rosmaita Date: Mon, 15 Apr 2019 12:00:07 -0400 Subject: [PATCH 1080/1936] End support for changing cinder periodic_interval Support for changing the cinder periodic_interval config option was added way back in havana as a workaround for bug #1180976 by change I20e52e66fcc94b224476cdd14c88bd6981b4e617. As the fix for that bug does not require modifying this config value, and such modification may have unintentional adverse effects, end the support. Change-Id: I1ef1fe564123216b19582262726cdb1078b7650e Partial-bug: #1824837 --- .zuul.yaml | 1 - lib/cinder | 9 --------- 2 files changed, 10 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 8c0ce2f2ba..785cf910d6 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -380,7 +380,6 @@ SWIFT_REPLICAS: 1 SWIFT_START_ALL_SERVICES: false SWIFT_HASH: 1234123412341234 - CINDER_PERIODIC_INTERVAL: 10 DEBUG_LIBVIRT_COREDUMPS: true NOVA_VNC_ENABLED: true VNCSERVER_LISTEN: 0.0.0.0 diff --git a/lib/cinder b/lib/cinder index 047b25b3c5..ed8349aad6 100644 --- a/lib/cinder +++ b/lib/cinder @@ -88,14 +88,6 @@ CINDER_ENABLED_BACKENDS=${CINDER_ENABLED_BACKENDS:-lvm:lvmdriver-1} CINDER_VOLUME_CLEAR=${CINDER_VOLUME_CLEAR:-${CINDER_VOLUME_CLEAR_DEFAULT:-zero}} CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]') -# Cinder reports allocations back to the scheduler on periodic intervals -# it turns out we can get an "out of space" issue when we run tests too -# quickly just because cinder didn't realize we'd freed up resources. -# Make this configurable so that devstack-gate/tempest can set it to -# less than the 60 second default -# https://bugs.launchpad.net/cinder/+bug/1180976 -CINDER_PERIODIC_INTERVAL=${CINDER_PERIODIC_INTERVAL:-60} - # Centos7 and OpenSUSE switched to using LIO and that's all that's supported, # although the tgt bits are in EPEL and OpenSUSE we don't want that for CI if is_fedora || is_suse; then @@ -237,7 +229,6 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT osapi_volume_listen $CINDER_SERVICE_LISTEN_ADDRESS iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH iniset $CINDER_CONF oslo_concurrency lock_path $CINDER_STATE_PATH - iniset $CINDER_CONF DEFAULT periodic_interval $CINDER_PERIODIC_INTERVAL iniset $CINDER_CONF DEFAULT my_ip "$HOST_IP" iniset $CINDER_CONF key_manager backend cinder.keymgr.conf_key_mgr.ConfKeyManager From 5fe60c1a342f78482042f34f9a6d1a6739cbb1b7 Mon Sep 17 00:00:00 2001 From: ZhongShengping Date: Tue, 30 Apr 2019 10:12:51 +0800 Subject: [PATCH 1081/1936] Option "lock_path" from group "DEFAULT" is deprecated Option "lock_path" from group "DEFAULT" is deprecated. Use option "lock_path" from group "oslo_concurrency". Change-Id: I7c7501a4a351155eeba77bb7cd43c8d6f5ea73bc --- lib/glance | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/glance b/lib/glance index 65487cb444..d630c9a3b0 100644 --- a/lib/glance +++ b/lib/glance @@ -130,7 +130,7 @@ function configure_glance { iniset $GLANCE_API_CONF database connection $dburl iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG iniset $GLANCE_API_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ - iniset $GLANCE_API_CONF DEFAULT lock_path $GLANCE_LOCK_DIR + iniset $GLANCE_API_CONF oslo_concurrency lock_path $GLANCE_LOCK_DIR iniset $GLANCE_API_CONF paste_deploy flavor keystone+cachemanagement configure_auth_token_middleware $GLANCE_API_CONF glance $GLANCE_AUTH_CACHE_DIR/api iniset $GLANCE_API_CONF oslo_messaging_notifications driver messagingv2 From 9e3b3bf5244e0b62cd4ab9914c622e81ba08f4c8 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Tue, 4 Sep 2018 16:51:45 -0400 Subject: [PATCH 1082/1936] Add nova-status upgrade check call post-deploy Once nova is setup and n-cpu on the host is reporting a service record and discovered (the host is mapping in the API DB), we should run the nova-status upgrade check to verify the deployment. Change-Id: I9683bf94233ebacb3057ce159cb3dc53aa55a2f4 Related-Bug: #1790721 --- stack.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/stack.sh b/stack.sh index 4f6e5b6da1..b06f7bdcc1 100755 --- a/stack.sh +++ b/stack.sh @@ -1442,6 +1442,12 @@ if is_service_enabled n-api; then # environment is up. echo_summary "SKIPPING Cell setup because n-cpu is not enabled. You will have to do this manually before you have a working environment." fi + # Run the nova-status upgrade check command which can also be used + # to verify the base install. Note that this is good enough in a + # single node deployment, but in a multi-node setup it won't verify + # any subnodes - that would have to be driven from whatever tooling + # is deploying the subnodes, e.g. the zuul v3 devstack-multinode job. + $NOVA_BIN_DIR/nova-status --config-file $NOVA_CONF upgrade check fi # Run local script From 070e4ee65e73584bae5fdb23135abb972baf8ae9 Mon Sep 17 00:00:00 2001 From: Kota Tsuyuzaki Date: Thu, 13 Sep 2018 03:08:19 +0900 Subject: [PATCH 1083/1936] Deprecate swift3, use s3api in Swift repo swift3 is no longer actively maintained in the upstream. That has been moved to Swift repository as s3api so we should use s3api middleware instead. As well as swift3, s3token is also maintained in Swift upstream. Change-Id: I4582d81da066ab53e6f11ad1df7af91425f2b0ca --- doc/source/configuration.rst | 4 ++-- lib/nova | 2 +- lib/swift | 32 +++++++++----------------------- stack.sh | 6 ++---- stackrc | 4 ---- 5 files changed, 14 insertions(+), 34 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 022e6ba529..9ca8441263 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -627,8 +627,8 @@ used when adding nodes to the Swift rings. Swift S3 ++++++++ -If you are enabling ``swift3`` in ``ENABLED_SERVICES`` DevStack will -install the swift3 middleware emulation. Swift will be configured to +If you are enabling ``s3api`` in ``ENABLED_SERVICES`` DevStack will +install the s3api middleware emulation. Swift will be configured to act as a S3 endpoint for Keystone so effectively replacing the ``nova-objectstore``. diff --git a/lib/nova b/lib/nova index 1fb50dfd76..4653d693eb 100644 --- a/lib/nova +++ b/lib/nova @@ -403,7 +403,7 @@ function create_nova_accounts { fi # S3 - if is_service_enabled swift3; then + if is_service_enabled s3api; then get_or_create_service "s3" "s3" "S3" get_or_create_endpoint \ "s3" \ diff --git a/lib/swift b/lib/swift index e2ee0cb470..d9a7878652 100644 --- a/lib/swift +++ b/lib/swift @@ -49,7 +49,6 @@ fi SWIFT_AUTH_CACHE_DIR=${SWIFT_AUTH_CACHE_DIR:-/var/cache/swift} SWIFT_APACHE_WSGI_DIR=${SWIFT_APACHE_WSGI_DIR:-/var/www/swift} -SWIFT3_DIR=$DEST/swift3 SWIFT_SERVICE_PROTOCOL=${SWIFT_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} SWIFT_DEFAULT_BIND_PORT=${SWIFT_DEFAULT_BIND_PORT:-8080} @@ -68,8 +67,8 @@ SWIFT_DISK_IMAGE=${SWIFT_DATA_DIR}/drives/images/swift.img # Default is ``/etc/swift``. SWIFT_CONF_DIR=${SWIFT_CONF_DIR:-/etc/swift} -if is_service_enabled s-proxy && is_service_enabled swift3; then - # If we are using ``swift3``, we can default the S3 port to swift instead +if is_service_enabled s-proxy && is_service_enabled s3api; then + # If we are using ``s3api``, we can default the S3 port to swift instead # of nova-objectstore S3_SERVICE_PORT=${S3_SERVICE_PORT:-$SWIFT_DEFAULT_BIND_PORT} fi @@ -423,16 +422,19 @@ function configure_swift { iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:proxy-logging reveal_sensitive_prefix ${SWIFT_LOG_TOKEN_LENGTH} # By default Swift will be installed with Keystone and tempauth middleware - # and add the swift3 middleware if its configured for it. The token for + # and add the s3api middleware if its configured for it. The token for # tempauth would be prefixed with the reseller_prefix setting `TEMPAUTH_` the # token for keystoneauth would have the standard reseller_prefix `AUTH_` - if is_service_enabled swift3;then - swift_pipeline+=" swift3 s3token " + if is_service_enabled s3api;then + swift_pipeline+=" s3api" fi - if is_service_enabled keystone; then + if is_service_enabled s3api;then + swift_pipeline+=" s3token" + fi swift_pipeline+=" authtoken keystoneauth" fi + swift_pipeline+=" tempauth " sed -i "/^pipeline/ { s/tempauth/${swift_pipeline} ${SWIFT_EXTRAS_MIDDLEWARE}/ ;}" ${SWIFT_CONFIG_PROXY_SERVER} @@ -467,22 +469,6 @@ function configure_swift { # Allow both reseller prefixes to be used with domain_remap iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:domain_remap reseller_prefixes "AUTH, TEMPAUTH" - if is_service_enabled swift3; then - cat <>${SWIFT_CONFIG_PROXY_SERVER} -[filter:s3token] -paste.filter_factory = keystonemiddleware.s3_token:filter_factory -auth_uri = ${KEYSTONE_AUTH_URI} -cafile = ${SSL_BUNDLE_FILE} -admin_user = swift -admin_tenant_name = ${SERVICE_PROJECT_NAME} -admin_password = ${SERVICE_PASSWORD} - -[filter:swift3] -use = egg:swift3#swift3 -location = ${REGION_NAME} -EOF - fi - cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONF_DIR}/swift.conf iniset ${SWIFT_CONF_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH} iniset ${SWIFT_CONF_DIR}/swift.conf swift-constraints max_header_size ${SWIFT_MAX_HEADER_SIZE} diff --git a/stack.sh b/stack.sh index 4f6e5b6da1..cebd959ebc 100755 --- a/stack.sh +++ b/stack.sh @@ -889,12 +889,10 @@ if is_service_enabled swift; then stack_install_service swift configure_swift - # swift3 middleware to provide S3 emulation to Swift - if is_service_enabled swift3; then + # s3api middleware to provide S3 emulation to Swift + if is_service_enabled s3api; then # Replace the nova-objectstore port by the swift port S3_SERVICE_PORT=8080 - git_clone $SWIFT3_REPO $SWIFT3_DIR $SWIFT3_BRANCH - setup_develop $SWIFT3_DIR fi fi diff --git a/stackrc b/stackrc index 2924d3956c..2291e3c3d8 100644 --- a/stackrc +++ b/stackrc @@ -520,10 +520,6 @@ GITBRANCH["glance_store"]=${GLANCE_STORE_BRANCH:-$TARGET_BRANCH} GITREPO["keystonemiddleware"]=${KEYSTONEMIDDLEWARE_REPO:-${GIT_BASE}/openstack/keystonemiddleware.git} GITBRANCH["keystonemiddleware"]=${KEYSTONEMIDDLEWARE_BRANCH:-$TARGET_BRANCH} -# s3 support for swift -SWIFT3_REPO=${SWIFT3_REPO:-${GIT_BASE}/openstack/swift3.git} -SWIFT3_BRANCH=${SWIFT3_BRANCH:-$TARGET_BRANCH} - # ceilometer middleware GITREPO["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_REPO:-${GIT_BASE}/openstack/ceilometermiddleware.git} GITBRANCH["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_BRANCH:-$TARGET_BRANCH} From 8b8158ed8f2f448a214ce3b1978b9cb5b039f6ed Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Fri, 5 Apr 2019 11:30:14 +0000 Subject: [PATCH 1084/1936] "raw_input()" deprecated in Python3 Built-in function "raw_input()" is deprecated in favor of "input()" [1]. [1] https://www.python.org/dev/peps/pep-3111/ Change-Id: I31c4c59373a2ad04987da3daffb3eed50916a6db Closes-Bug: #1823321 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 8c6fa01c45..95b138c6bf 100644 --- a/lib/tempest +++ b/lib/tempest @@ -107,7 +107,7 @@ function remove_disabled_extensions { function image_size_in_gib { local size size=$(openstack image show $1 -c size -f value) - echo $size | python -c "import math; print int(math.ceil(float(int(raw_input()) / 1024.0 ** 3)))" + echo $size | python -c "import math; import six; print(int(math.ceil(float(int(six.moves.input()) / 1024.0 ** 3))))" } # configure_tempest() - Set config files, create data dirs, etc From 9bf7e2654124205cd71343c2af175d730c634ab7 Mon Sep 17 00:00:00 2001 From: Swaminathan Vasudevan Date: Thu, 2 May 2019 13:45:46 -0700 Subject: [PATCH 1085/1936] DVR-Enable ARP Responder when DVR and L2pop is enabled This patch enables ARP Responder with DVR routers along with l2pop in devstack. Related-Bug: #1774459 Change-Id: I82f628c32f6e38c2419b6ffe90d9f9adf96777b1 --- lib/neutron | 1 + lib/neutron_plugins/ml2 | 1 + 2 files changed, 2 insertions(+) diff --git a/lib/neutron b/lib/neutron index 1066d8e342..17ffce7cb5 100644 --- a/lib/neutron +++ b/lib/neutron @@ -237,6 +237,7 @@ function configure_neutron_new { if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then iniset $NEUTRON_CORE_PLUGIN_CONF agent l2_population True iniset $NEUTRON_CORE_PLUGIN_CONF agent enable_distributed_routing True + iniset $NEUTRON_CORE_PLUGIN_CONF agent arp_responder True fi fi diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index c5a4c02cc5..a7466833e1 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -147,6 +147,7 @@ function neutron_plugin_configure_service { populate_ml2_config /$Q_PLUGIN_CONF_FILE agent l2_population=True populate_ml2_config /$Q_PLUGIN_CONF_FILE agent tunnel_types=vxlan populate_ml2_config /$Q_PLUGIN_CONF_FILE agent enable_distributed_routing=True + populate_ml2_config /$Q_PLUGIN_CONF_FILE agent arp_responder=True fi } From 4db9d567d19759ee090e506adf7ad77d8e64e452 Mon Sep 17 00:00:00 2001 From: Tom Barron Date: Wed, 9 Jan 2019 08:43:52 -0500 Subject: [PATCH 1086/1936] Safety check for python version in get_pip_command We know empirically that some legacy gate jobs pass and appear to be running with python3 but actually pip was invoked with PYTHON3_VERSION unset so that they are actually ran with python2 packages. As a followup to this discussion [1], add a safety check in the get_pip_command function to ensure that a python version has been set when it is invoked. [1] https://review.openstack.org/#/c/622415/4/inc/python@283 Change-Id: I3a08406fb7d68282c6b98abb33a625821510046a --- inc/python | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/inc/python b/inc/python index 19e1228d23..0e575ae9e9 100644 --- a/inc/python +++ b/inc/python @@ -29,6 +29,10 @@ declare -A -g PROJECT_VENV # get_pip_command function get_pip_command { local version="$1" + if [ -z "$version" ]; then + die $LINENO "pip python version is not set." + fi + # NOTE(dhellmann): I don't know if we actually get a pip3.4-python # under any circumstances. which pip${version} || which pip${version}-python From f745a0a7aa8cc86a02cd43012b307bb65df5b1aa Mon Sep 17 00:00:00 2001 From: Lucas Xu Date: Thu, 30 May 2019 20:49:29 +0000 Subject: [PATCH 1087/1936] Fix Typo on Devstack-with-lbaas-v2.rst Change "creeate" to "create" in thie CLI Change-Id: Ic9997ad2b852ae25b28c5ad7481fad188b632a50 --- doc/source/guides/devstack-with-lbaas-v2.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst index a27a4d20b4..db138ae2c5 100644 --- a/doc/source/guides/devstack-with-lbaas-v2.rst +++ b/doc/source/guides/devstack-with-lbaas-v2.rst @@ -91,7 +91,7 @@ Create two nova instances that we can use as test http servers: #create nova instances on private network openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node1 - openstack server creeate --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node2 + openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node2 openstack server list # should show the nova instances just created #add secgroup rules to allow ssh etc.. From 4b8cba77fe3444c925b5e4fe39743d54b0243eef Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Tue, 21 May 2019 14:17:11 +0100 Subject: [PATCH 1088/1936] Remove n-cells, n-net and n-cauth Remove nova cells v1 support, which also allows/necessitates removing support for nova networks (which was only supported with cells v1) and nova-consoleauth (which was required by cells v1 but is unnecessary otherwise). The Depends-On isn't really necessary, but it's here to make sure this doesn't merge until we _really_ have killed cells v1. I honestly expected this patch would be bigger. Change-Id: I90316208d1af42c1659d3bee386f95e38aaf2c56 Signed-off-by: Stephen Finucane Depends-On: Ib0e0b708c46e4330e51f8f8fdfbb02d45aaf0f44 --- .zuul.yaml | 10 +- HACKING.rst | 4 +- doc/source/configuration.rst | 14 --- doc/source/guides/multinode-lab.rst | 6 - doc/source/guides/single-machine.rst | 9 +- files/rpms/nova | 2 +- functions | 7 +- functions-common | 5 - lib/neutron-legacy | 2 - lib/nova | 168 ++------------------------ lib/nova_plugins/hypervisor-xenserver | 3 - lib/tempest | 23 +--- stack.sh | 29 +---- stackrc | 3 +- 14 files changed, 23 insertions(+), 262 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 60e3a14f14..9996f5aba7 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -408,7 +408,6 @@ # Nova services n-api: true n-api-meta: true - n-cauth: true n-cond: true n-cpu: true n-novnc: true @@ -689,11 +688,8 @@ # being experimental any more, so we can keep this list somewhat # pruned. # - # * nova-cells-v1: maintained by nova for cells v1 (nova-cells service); - # it's in experimental here (and in nova) for testing cells v1 - # changes to devstack w/o gating on it for all devstack changes. # * nova-next: maintained by nova for unreleased/undefaulted - # things like cellsv2 and placement-api + # things # * neutron-fullstack-with-uwsgi: maintained by neutron for fullstack test # when neutron-api is served by uwsgi, it's in exprimental for testing. # the next cycle we can remove this job if things turn out to be @@ -706,10 +702,6 @@ experimental: jobs: - - nova-cells-v1: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ - nova-next - neutron-fullstack-with-uwsgi - neutron-functional-with-uwsgi diff --git a/HACKING.rst b/HACKING.rst index 3853eed9a1..968306a2c4 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -145,8 +145,8 @@ follows: * Global configuration that may be referenced in ``local.conf``, i.e. ``DEST``, ``DATA_DIR`` * Global service configuration like ``ENABLED_SERVICES`` * Variables used by multiple services that do not have a clear owner, i.e. - ``VOLUME_BACKING_FILE_SIZE`` (nova-compute, nova-volumes and cinder) or - ``PUBLIC_NETWORK_NAME`` (nova-network and neutron) + ``VOLUME_BACKING_FILE_SIZE`` (nova-compute and cinder) or + ``PUBLIC_NETWORK_NAME`` (only neutron but formerly nova-network too) * Variables that can not be cleanly declared in a project file due to dependency ordering, i.e. the order of sourcing the project files can not be changed for other reasons but the earlier file needs to dereference a diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 9ca8441263..5e8004dc44 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -663,20 +663,6 @@ Xenserver If you would like to use Xenserver as the hypervisor, please refer to the instructions in ``./tools/xen/README.md``. -Cells -~~~~~ - -`Cells `__ is -an alternative scaling option. To setup a cells environment add the -following to your ``localrc`` section: - -:: - - enable_service n-cell - -Be aware that there are some features currently missing in cells, one -notable one being security groups. - Cinder ~~~~~~ diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index c3574ac593..3c4acc8c7c 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -120,11 +120,8 @@ cluster controller's DevStack in ``local.conf``: [[local|localrc]] HOST_IP=192.168.42.11 - FLAT_INTERFACE=eth0 FIXED_RANGE=10.4.128.0/20 - FIXED_NETWORK_SIZE=4096 FLOATING_RANGE=192.168.42.128/25 - MULTI_HOST=1 LOGFILE=/opt/stack/logs/stack.sh.log ADMIN_PASSWORD=labstack DATABASE_PASSWORD=supersecret @@ -160,11 +157,8 @@ machines, create a ``local.conf`` with: [[local|localrc]] HOST_IP=192.168.42.12 # change this per compute node - FLAT_INTERFACE=eth0 FIXED_RANGE=10.4.128.0/20 - FIXED_NETWORK_SIZE=4096 FLOATING_RANGE=192.168.42.128/25 - MULTI_HOST=1 LOGFILE=/opt/stack/logs/stack.sh.log ADMIN_PASSWORD=labstack DATABASE_PASSWORD=supersecret diff --git a/doc/source/guides/single-machine.rst b/doc/source/guides/single-machine.rst index 168172c630..cfbd6b1884 100644 --- a/doc/source/guides/single-machine.rst +++ b/doc/source/guides/single-machine.rst @@ -87,11 +87,8 @@ do the following: - Set ``FLOATING_RANGE`` to a range not used on the local network, i.e. 192.168.1.224/27. This configures IP addresses ending in 225-254 to be used as floating IPs. -- Set ``FIXED_RANGE`` and ``FIXED_NETWORK_SIZE`` to configure the - internal address space used by the instances. -- Set ``FLAT_INTERFACE`` to the Ethernet interface that connects the - host to your local network. This is the interface that should be - configured with the static IP address mentioned above. +- Set ``FIXED_RANGE`` to configure the internal address space used by the + instances. - Set the administrative password. This password is used for the **admin** and **demo** accounts set up as OpenStack users. - Set the MySQL administrative password. The default here is a random @@ -108,8 +105,6 @@ do the following: [[local|localrc]] FLOATING_RANGE=192.168.1.224/27 FIXED_RANGE=10.11.12.0/24 - FIXED_NETWORK_SIZE=256 - FLAT_INTERFACE=eth0 ADMIN_PASSWORD=supersecret DATABASE_PASSWORD=iheartdatabases RABBIT_PASSWORD=flopsymopsy diff --git a/files/rpms/nova b/files/rpms/nova index 8d73644025..f69fc373d7 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -1,6 +1,6 @@ conntrack-tools curl -dnsmasq # for nova-network +dnsmasq # for q-dhcp dnsmasq-utils # for dhcp_release ebtables gawk diff --git a/functions b/functions index 187ad2311d..93035673b5 100644 --- a/functions +++ b/functions @@ -469,7 +469,7 @@ EOF # ping check -# Uses globals ``ENABLED_SERVICES``, ``TOP_DIR``, ``MULTI_HOST``, ``PRIVATE_NETWORK`` +# Uses globals ``ENABLED_SERVICES``, ``TOP_DIR``, ``PRIVATE_NETWORK`` # ping_check [boot-timeout] [from_net] [expected] function ping_check { local ip=$1 @@ -483,12 +483,9 @@ function ping_check { # if we don't specify a from_net we're expecting things to work # fine from our local box. if [[ -n "$from_net" ]]; then + # TODO(stephenfin): Is there any way neutron could be disabled now? if is_service_enabled neutron; then ping_cmd="$TOP_DIR/tools/ping_neutron.sh $from_net" - elif [[ "$MULTI_HOST" = "True" && "$from_net" = "$PRIVATE_NETWORK_NAME" ]]; then - # there is no way to address the multihost / private case, bail here for compatibility. - # TODO: remove this cruft and redo code to handle this at the caller level. - return fi fi diff --git a/functions-common b/functions-common index bace9e00e6..922ff6fa5f 100644 --- a/functions-common +++ b/functions-common @@ -1929,10 +1929,6 @@ function enable_service { # For backward compatibility if we have **swift** in ENABLED_SERVICES all the # **s-** services will be enabled. This will be deprecated in the future. # -# Cells within nova is enabled if **n-cell** is in ``ENABLED_SERVICES``. -# We also need to make sure to treat **n-cell-region** and **n-cell-child** -# as enabled in this case. -# # Uses global ``ENABLED_SERVICES`` # is_service_enabled service [service ...] function is_service_enabled { @@ -1955,7 +1951,6 @@ function is_service_enabled { # TODO(dtroyer): Remove these legacy special-cases after the is_XXX_enabled() # are implemented - [[ ${service} == n-cell-* && ,${ENABLED_SERVICES} =~ ,"n-cell" ]] && enabled=0 [[ ${service} == n-cpu-* && ,${ENABLED_SERVICES} =~ ,"n-cpu" ]] && enabled=0 [[ ${service} == "nova" && ,${ENABLED_SERVICES} =~ ,"n-" ]] && enabled=0 [[ ${service} == "glance" && ,${ENABLED_SERVICES} =~ ,"g-" ]] && enabled=0 diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 82571151e4..87edc5ab81 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -50,8 +50,6 @@ # See "Neutron Network Configuration" below for additional variables # that must be set in localrc for connectivity across hosts with # Neutron. -# -# With Neutron networking the NETWORK_MANAGER variable is ignored. # Settings # -------- diff --git a/lib/nova b/lib/nova index 6ce1dcc4cc..2efe7cb9b4 100644 --- a/lib/nova +++ b/lib/nova @@ -50,11 +50,9 @@ NOVA_AUTH_CACHE_DIR=${NOVA_AUTH_CACHE_DIR:-/var/cache/nova} NOVA_CONF_DIR=/etc/nova NOVA_CONF=$NOVA_CONF_DIR/nova.conf -NOVA_CELLS_CONF=$NOVA_CONF_DIR/nova-cells.conf NOVA_COND_CONF=$NOVA_CONF_DIR/nova.conf NOVA_CPU_CONF=$NOVA_CONF_DIR/nova-cpu.conf NOVA_FAKE_CONF=$NOVA_CONF_DIR/nova-fake.conf -NOVA_CELLS_DB=${NOVA_CELLS_DB:-nova_cell} NOVA_API_DB=${NOVA_API_DB:-nova_api} NOVA_UWSGI=$NOVA_BIN_DIR/nova-api-wsgi NOVA_METADATA_UWSGI=$NOVA_BIN_DIR/nova-metadata-wsgi @@ -111,7 +109,6 @@ QEMU_CONF=/etc/libvirt/qemu.conf # Set default defaults here as some hypervisor drivers override these PUBLIC_INTERFACE_DEFAULT=br100 -FLAT_NETWORK_BRIDGE_DEFAULT=br100 # Set ``GUEST_INTERFACE_DEFAULT`` to some interface on the box so that # the default isn't completely crazy. This will match ``eth*``, ``em*``, or # the new ``p*`` interfaces, then basically picks the first @@ -137,44 +134,6 @@ if is_service_enabled nova && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; th source $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER fi - -# Nova Network Configuration -# -------------------------- - -NETWORK_MANAGER=${NETWORK_MANAGER:-${NET_MAN:-FlatDHCPManager}} - -VLAN_INTERFACE=${VLAN_INTERFACE:-$GUEST_INTERFACE_DEFAULT} -FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT} - -# If you are using the FlatDHCP network mode on multiple hosts, set the -# ``FLAT_INTERFACE`` variable but make sure that the interface doesn't already -# have an IP or you risk breaking things. -# -# **DHCP Warning**: If your flat interface device uses DHCP, there will be a -# hiccup while the network is moved from the flat interface to the flat network -# bridge. This will happen when you launch your first instance. Upon launch -# you will lose all connectivity to the node, and the VM launch will probably -# fail. -# -# If you are running on a single node and don't need to access the VMs from -# devices other than that node, you can set ``FLAT_INTERFACE=`` -# This will stop nova from bridging any interfaces into ``FLAT_NETWORK_BRIDGE``. -FLAT_INTERFACE=${FLAT_INTERFACE:-$GUEST_INTERFACE_DEFAULT} - -# ``MULTI_HOST`` is a mode where each compute node runs its own network node. This -# allows network operations and routing for a VM to occur on the server that is -# running the VM - removing a SPOF and bandwidth bottleneck. -MULTI_HOST=$(trueorfalse False MULTI_HOST) - -# ``NOVA_ALLOW_MOVE_TO_SAME_HOST`` can be set to False in multi node DevStack, -# where there are at least two nova-computes. -NOVA_ALLOW_MOVE_TO_SAME_HOST=$(trueorfalse True NOVA_ALLOW_MOVE_TO_SAME_HOST) - -# Test floating pool and range are used for testing. They are defined -# here until the admin APIs can replace nova-manage -TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test} -TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29} - # Other Nova configurations # ---------------------------- @@ -183,6 +142,10 @@ TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29} # and Glance. NOVA_USE_SERVICE_TOKEN=$(trueorfalse False NOVA_USE_SERVICE_TOKEN) +# ``NOVA_ALLOW_MOVE_TO_SAME_HOST`` can be set to False in multi node DevStack, +# where there are at least two nova-computes. +NOVA_ALLOW_MOVE_TO_SAME_HOST=$(trueorfalse True NOVA_ALLOW_MOVE_TO_SAME_HOST) + # Enable debugging levels for iscsid service (goes from 0-8) ISCSID_DEBUG=$(trueorfalse False ISCSID_DEBUG) ISCSID_DEBUG_LEVEL=${ISCSID_DEBUG_LEVEL:-4} @@ -198,13 +161,6 @@ function is_nova_enabled { return 1 } -# Test if any Nova Cell services are enabled -# is_nova_enabled -function is_n-cell_enabled { - [[ ,${ENABLED_SERVICES} =~ ,"n-cell" ]] && return 0 - return 1 -} - # is_nova_console_proxy_compute_tls_enabled() - Test if the Nova Console Proxy # service has TLS enabled function is_nova_console_proxy_compute_tls_enabled { @@ -501,10 +457,6 @@ function create_nova_conf { if [ -n "$NOVA_INSTANCES_PATH" ]; then iniset $NOVA_CONF DEFAULT instances_path "$NOVA_INSTANCES_PATH" fi - if [ "$MULTI_HOST" != "False" ]; then - iniset $NOVA_CONF DEFAULT multi_host "True" - iniset $NOVA_CONF DEFAULT send_arp_for_ha "True" - fi if [ "$SYSLOG" != "False" ]; then iniset $NOVA_CONF DEFAULT use_syslog "True" fi @@ -551,21 +503,6 @@ function create_nova_conf { iniset $NOVA_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" - # Setup logging for nova-dhcpbridge command line - sudo cp "$NOVA_CONF" "$NOVA_CONF_DIR/nova-dhcpbridge.conf" - - if is_service_enabled n-net; then - local service="n-dhcp" - local logfile="${service}.log.${CURRENT_LOG_TIME}" - local real_logfile="${LOGDIR}/${logfile}" - if [[ -n ${LOGDIR} ]]; then - bash -c "cd '$LOGDIR' && ln -sf '$logfile' ${service}.log" - iniset "$NOVA_CONF_DIR/nova-dhcpbridge.conf" DEFAULT log_file "$real_logfile" - fi - - iniset $NOVA_CONF DEFAULT dhcpbridge_flagfile "$NOVA_CONF_DIR/nova-dhcpbridge.conf" - fi - if [ "$NOVA_USE_SERVICE_TOKEN" == "True" ]; then init_nova_service_user_conf fi @@ -748,42 +685,6 @@ function conductor_conf { echo "${NOVA_CONF_DIR}/nova_cell${cell}.conf" } -function init_nova_cells { - if is_service_enabled n-cell; then - cp $NOVA_CONF $NOVA_CELLS_CONF - iniset $NOVA_CELLS_CONF database connection `database_connection_url $NOVA_CELLS_DB` - rpc_backend_add_vhost child_cell - iniset_rpc_backend nova $NOVA_CELLS_CONF DEFAULT child_cell - iniset $NOVA_CELLS_CONF DEFAULT dhcpbridge_flagfile $NOVA_CELLS_CONF - iniset $NOVA_CELLS_CONF cells enable True - iniset $NOVA_CELLS_CONF cells cell_type compute - iniset $NOVA_CELLS_CONF cells name child - - iniset $NOVA_CONF cells enable True - iniset $NOVA_CONF cells cell_type api - iniset $NOVA_CONF cells name region - - if is_service_enabled n-api-meta; then - NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/,metadata//") - iniset $NOVA_CONF DEFAULT enabled_apis $NOVA_ENABLED_APIS - iniset $NOVA_CELLS_CONF DEFAULT enabled_apis metadata - fi - - # Cells v1 conductor should be the nova-cells.conf - NOVA_COND_CONF=$NOVA_CELLS_CONF - - time_start "dbsync" - $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CELLS_CONF db sync - time_stop "dbsync" - $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CELLS_CONF cell create --name=region --cell_type=parent --username=$RABBIT_USERID --hostname=$RABBIT_HOST --port=5672 --password=$RABBIT_PASSWORD --virtual_host=/ --woffset=0 --wscale=1 - $NOVA_BIN_DIR/nova-manage cell create --name=child --cell_type=child --username=$RABBIT_USERID --hostname=$RABBIT_HOST --port=5672 --password=$RABBIT_PASSWORD --virtual_host=child_cell --woffset=0 --wscale=1 - - # Creates the single cells v2 cell for the child cell (v1) nova db. - $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CELLS_CONF cell_v2 create_cell \ - --transport-url $(get_transport_url child_cell) --name 'cell1' - fi -} - # create_nova_cache_dir() - Part of the init_nova() process function create_nova_cache_dir { # Create cache dir @@ -791,18 +692,6 @@ function create_nova_cache_dir { rm -f $NOVA_AUTH_CACHE_DIR/* } -function create_nova_conf_nova_network { - local public_interface=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT} - iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NETWORK_MANAGER" - iniset $NOVA_CONF DEFAULT public_interface "$public_interface" - iniset $NOVA_CONF DEFAULT vlan_interface "$VLAN_INTERFACE" - iniset $NOVA_CONF DEFAULT flat_network_bridge "$FLAT_NETWORK_BRIDGE" - if [ -n "$FLAT_INTERFACE" ]; then - iniset $NOVA_CONF DEFAULT flat_interface "$FLAT_INTERFACE" - fi - iniset $NOVA_CONF DEFAULT use_neutron False -} - # create_nova_keys_dir() - Part of the init_nova() process function create_nova_keys_dir { # Create keys dir @@ -834,10 +723,6 @@ function init_nova { # Migrate nova and nova_cell0 databases. $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db sync - if is_service_enabled n-cell; then - recreate_database $NOVA_CELLS_DB - fi - # Run online migrations on the new databases # Needed for flavor conversion $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db online_data_migrations @@ -942,15 +827,6 @@ function start_nova_api { export PATH=$old_path } -# Detect and setup conditions under which singleconductor setup is -# needed. Notably cellsv1. -function _set_singleconductor { - # NOTE(danms): Don't setup conductor fleet for cellsv1 - if is_service_enabled n-cell; then - CELLSV2_SETUP="singleconductor" - fi -} - # start_nova_compute() - Start the compute process function start_nova_compute { @@ -958,11 +834,7 @@ function start_nova_compute { local old_path=$PATH export PATH=$NOVA_BIN_DIR:$PATH - if is_service_enabled n-cell; then - local compute_cell_conf=$NOVA_CELLS_CONF - else - local compute_cell_conf=$NOVA_CONF - fi + local compute_cell_conf=$NOVA_CONF cp $compute_cell_conf $NOVA_CPU_CONF @@ -1025,22 +897,7 @@ function start_nova_rest { export PATH=$NOVA_BIN_DIR:$PATH local api_cell_conf=$NOVA_CONF - if is_service_enabled n-cell; then - local compute_cell_conf=$NOVA_CELLS_CONF - else - local compute_cell_conf=$NOVA_CONF - fi - - # ``run_process`` checks ``is_service_enabled``, it is not needed here - run_process n-cell-region "$NOVA_BIN_DIR/nova-cells --config-file $api_cell_conf" - run_process n-cell-child "$NOVA_BIN_DIR/nova-cells --config-file $compute_cell_conf" - - if is_service_enabled n-net; then - if ! running_in_container; then - enable_kernel_bridge_firewall - fi - fi - run_process n-net "$NOVA_BIN_DIR/nova-network --config-file $compute_cell_conf" + local compute_cell_conf=$NOVA_CONF run_process n-sch "$NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf" if [ "$NOVA_USE_MOD_WSGI" == "False" ]; then @@ -1049,9 +906,6 @@ function start_nova_rest { run_process n-api-meta "$NOVA_BIN_DIR/uwsgi --procname-prefix nova-api-meta --ini $NOVA_METADATA_UWSGI_CONF" fi - # nova-consoleauth always runs globally - run_process n-cauth "$NOVA_BIN_DIR/nova-consoleauth --config-file $api_cell_conf" - export PATH=$old_path } @@ -1129,11 +983,7 @@ function is_nova_ready { # happen between here and the script ending. However, in multinode # tests this can very often not be the case. So ensure that the # compute is up before we move on. - if is_service_enabled n-cell; then - # cells v1 can't complete the check below because it munges - # hostnames with cell information (grumble grumble). - return - fi + # TODO(sdague): honestly, this probably should be a plug point for # an external system. if [[ "$VIRT_DRIVER" == 'xenserver' ]]; then @@ -1145,8 +995,6 @@ function is_nova_ready { } function start_nova { - # this catches the cells v1 case early - _set_singleconductor start_nova_rest start_nova_console_proxies start_nova_conductor @@ -1174,7 +1022,7 @@ function stop_nova_compute { function stop_nova_rest { # Kill the non-compute nova processes - for serv in n-api n-api-meta n-net n-sch n-cauth n-cell n-cell; do + for serv in n-api n-api-meta n-sch; do stop_process $serv done } diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver index 6f79e4ff7c..2fdbde1df8 100644 --- a/lib/nova_plugins/hypervisor-xenserver +++ b/lib/nova_plugins/hypervisor-xenserver @@ -24,9 +24,6 @@ set +o xtrace # Defaults # -------- -# Allow ``build_domU.sh`` to specify the flat network bridge via kernel args -FLAT_NETWORK_BRIDGE_DEFAULT=$(sed -e 's/.* flat_network_bridge=\([[:alnum:]]*\).*$/\1/g' /proc/cmdline) - VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1} diff --git a/lib/tempest b/lib/tempest index 95b138c6bf..9f1b677eba 100644 --- a/lib/tempest +++ b/lib/tempest @@ -342,7 +342,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute flavor_ref $flavor_ref iniset $TEMPEST_CONFIG compute flavor_ref_alt $flavor_ref_alt iniset $TEMPEST_CONFIG validation connect_method $ssh_connect_method - if ! is_service_enabled n-cell && ! is_service_enabled neutron; then + if ! is_service_enabled neutron; then iniset $TEMPEST_CONFIG compute fixed_network_name $PRIVATE_NETWORK_NAME fi @@ -391,24 +391,6 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute-feature-enabled scheduler_enabled_filters ${NOVA_FILTERS} fi - if is_service_enabled n-cell; then - # Cells doesn't support shelving/unshelving - iniset $TEMPEST_CONFIG compute-feature-enabled shelve False - # Cells doesn't support hot-plugging virtual interfaces. - iniset $TEMPEST_CONFIG compute-feature-enabled interface_attach False - # Cells v1 doesn't support the rescue/unrescue tests in Tempest - iniset $TEMPEST_CONFIG compute-feature-enabled rescue False - - if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then - # Cells supports resize but does not currently work with devstack - # because of the custom flavors created for Tempest runs which are - # not in the cells database. - # TODO(mriedem): work on adding a nova-manage command to sync - # flavors into the cells database. - iniset $TEMPEST_CONFIG compute-feature-enabled resize False - fi - fi - if [[ $ENABLE_VOLUME_MULTIATTACH == "True" ]]; then iniset $TEMPEST_CONFIG compute-feature-enabled volume_multiattach True fi @@ -554,8 +536,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute-feature-enabled shelve False iniset $TEMPEST_CONFIG compute-feature-enabled snapshot False iniset $TEMPEST_CONFIG compute-feature-enabled suspend False - elif ! is_service_enabled n-cell; then - # cells v1 does not support swapping volumes + else iniset $TEMPEST_CONFIG compute-feature-enabled swap_volume True fi fi diff --git a/stack.sh b/stack.sh index fa5b43ce38..7230c1f5cf 100755 --- a/stack.sh +++ b/stack.sh @@ -1167,10 +1167,11 @@ if is_service_enabled neutron; then fi fi + # Nova # ---- -if is_service_enabled n-net q-dhcp; then +if is_service_enabled q-dhcp; then # Delete traces of nova networks from prior runs # Do not kill any dnsmasq instance spawned by NetworkManager netman_pid=$(pidof NetworkManager || true) @@ -1182,12 +1183,6 @@ if is_service_enabled n-net q-dhcp; then clean_iptables - if is_service_enabled n-net; then - rm -rf ${NOVA_STATE_PATH}/networks - sudo mkdir -p ${NOVA_STATE_PATH}/networks - safe_chown -R ${STACK_USER} ${NOVA_STATE_PATH}/networks - fi - # Force IP forwarding on, just in case sudo sysctl -w net.ipv4.ip_forward=1 fi @@ -1226,13 +1221,11 @@ if is_service_enabled nova; then init_nova # Additional Nova configuration that is dependent on other services + # TODO(stephenfin): Is it possible for neutron to *not* be enabled now? If + # not, remove the if here if is_service_enabled neutron; then configure_neutron_nova - elif is_service_enabled n-net; then - create_nova_conf_nova_network fi - - init_nova_cells fi @@ -1314,20 +1307,6 @@ elif is_service_enabled q-svc; then echo_summary "Starting Neutron" configure_neutron_after_post_config start_neutron_service_and_check -elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then - NM_CONF=${NOVA_CONF} - if is_service_enabled n-cell; then - NM_CONF=${NOVA_CELLS_CONF} - fi - - # Create a small network - $NOVA_BIN_DIR/nova-manage --config-file $NM_CONF network create "$PRIVATE_NETWORK_NAME" $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS - - # Create some floating ips - $NOVA_BIN_DIR/nova-manage --config-file $NM_CONF floating create $FLOATING_RANGE --pool=$PUBLIC_NETWORK_NAME - - # Create a second pool - $NOVA_BIN_DIR/nova-manage --config-file $NM_CONF floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL fi # Start placement before any of the service that are likely to want diff --git a/stackrc b/stackrc index 2291e3c3d8..3432fb6228 100644 --- a/stackrc +++ b/stackrc @@ -65,7 +65,7 @@ if ! isset ENABLED_SERVICES ; then # Keystone - nothing works without keystone ENABLED_SERVICES=key # Nova - services to support libvirt based openstack clouds - ENABLED_SERVICES+=,n-api,n-cpu,n-cond,n-sch,n-novnc,n-cauth,n-api-meta + ENABLED_SERVICES+=,n-api,n-cpu,n-cond,n-sch,n-novnc,n-api-meta # Placement service needed for Nova ENABLED_SERVICES+=,placement-api,placement-client # Glance services needed for Nova @@ -846,7 +846,6 @@ ENABLE_DEBUG_LOG_LEVEL=$(trueorfalse True ENABLE_DEBUG_LOG_LEVEL) FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24} IPV4_ADDRS_SAFE_TO_USE=${IPV4_ADDRS_SAFE_TO_USE:-10.0.0.0/22} FIXED_RANGE=${FIXED_RANGE:-$IPV4_ADDRS_SAFE_TO_USE} -FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256} HOST_IP_IFACE=${HOST_IP_IFACE:-} HOST_IP=${HOST_IP:-} HOST_IPV6=${HOST_IPV6:-} From 283e86fbb59aa11afb21b916d3a106e442baee0e Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Wed, 22 May 2019 10:38:28 +0100 Subject: [PATCH 1089/1936] nova: Set '[neutron] default_floating_pool' instead In change I2ce8ff3d7c33a402b8af50182ec01f512859c388, we duplicated the 'default_floating_pool' option, found in the '[DEFAULT]' group, to the '[neutron]' group. This allowed us to continue with our deprecation plans for the former option, which should be retired along with nova-network. Update the nova lib module so it'll set the new option, we can safely assume to be the correct one now that we've removed support for cells v1 and nova-network. Change-Id: If9a02b640e6c2e1300c7b11b7552ba13c1496d79 Signed-off-by: Stephen Finucane --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 2efe7cb9b4..eebf170509 100644 --- a/lib/nova +++ b/lib/nova @@ -388,7 +388,7 @@ function create_nova_conf { if [[ $SCHEDULER == "filter_scheduler" ]]; then iniset $NOVA_CONF scheduler workers "$API_WORKERS" fi - iniset $NOVA_CONF DEFAULT default_floating_pool "$PUBLIC_NETWORK_NAME" + iniset $NOVA_CONF neutron default_floating_pool "$PUBLIC_NETWORK_NAME" if [[ $SERVICE_IP_VERSION == 6 ]]; then iniset $NOVA_CONF DEFAULT my_ip "$HOST_IPV6" iniset $NOVA_CONF DEFAULT use_ipv6 "True" From a23e4153ef6711ebddc05290bb19c4c2d4fc2c18 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Wed, 22 May 2019 10:46:02 +0100 Subject: [PATCH 1090/1936] nova: Stop setting '[DEFAULT] use_ipv6' Change I188fc2cd1b26fe7a71804f7e7d66b111d6f15e30 in nova stopped us respecting this when generating the network templates injected into instances on boot. With the removal of nova-network, there is no longer any other reason to set this. Change-Id: I925b7c6c23133cd5a835960f4507c979f615d78e Signed-off-by: Stephen Finucane --- lib/nova | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/nova b/lib/nova index eebf170509..69364909ba 100644 --- a/lib/nova +++ b/lib/nova @@ -391,7 +391,6 @@ function create_nova_conf { iniset $NOVA_CONF neutron default_floating_pool "$PUBLIC_NETWORK_NAME" if [[ $SERVICE_IP_VERSION == 6 ]]; then iniset $NOVA_CONF DEFAULT my_ip "$HOST_IPV6" - iniset $NOVA_CONF DEFAULT use_ipv6 "True" else iniset $NOVA_CONF DEFAULT my_ip "$HOST_IP" fi From 1b15176b0598f00c442c3b40985df30204dc6963 Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Fri, 31 May 2019 16:23:57 +0200 Subject: [PATCH 1091/1936] Use neutron-legacy on subnodes in devstack zuul job There are still some issues with lib/neutron thus neutron-legacy is used on controller node in multinode jobs and in single node jobs. But in "group-vars" in devstack job it was configured to use lib/neutron which can cause some problems in multinode jobs. So lets switch to neutron-legacy on subnodes also until lib/neutron will be ready to use everywhere. Change-Id: I0d7f9f2baaee2836a719f199939156bd4f53f778 --- .zuul.yaml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 60e3a14f14..34ee8cda91 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -460,7 +460,10 @@ n-cpu: true placement-client: true # Neutron services - neutron-agent: true + # We need to keep using the neutron-legacy based services for + # now until all issues with the new lib/neutron code are solved + q-agt: true + # neutron-agent: true # Cinder services c-bak: true c-vol: true From 16395949385356c4ab3c82227cd6c6a92496d356 Mon Sep 17 00:00:00 2001 From: shenjiatong Date: Mon, 3 Jun 2019 10:52:01 +0800 Subject: [PATCH 1092/1936] remove duplicate entries under title guides in table of contents right now, there are duplicate titles under contents --> guides as shown in https://docs.openstack.org/devstack/latest/, although they will forward to different pages, it is still a little confusing for users. This change will hide toctree in guides.rst and users could click links in page to jump to detail page to read more. Change-Id: I2f3abeca7f56a3aedeabb48630ed2c61635b93cd --- doc/source/guides.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/guides.rst b/doc/source/guides.rst index 82e0dd6ac6..ca134c4520 100644 --- a/doc/source/guides.rst +++ b/doc/source/guides.rst @@ -10,6 +10,7 @@ Walk through various setups used by stackers .. toctree:: :glob: + :hidden: :maxdepth: 1 guides/single-vm From 5e2d0e0bb5beffc23087383e7923dabaa2004a98 Mon Sep 17 00:00:00 2001 From: Alex Monk Date: Tue, 4 Jun 2019 01:21:44 +0100 Subject: [PATCH 1093/1936] Permit use of sudo-ldap instead of sudo package If the sudo-ldap package is providing the sudo command instead of the plain sudo package, accept that instead of breaking the system and requiring direct root login intervention to fix things. Change-Id: I45d7e4617bd59e72b4f0bf2e91750a6830e2a010 --- stack.sh | 2 +- tools/create-stack-user.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index fa5b43ce38..9b3097ba23 100755 --- a/stack.sh +++ b/stack.sh @@ -247,7 +247,7 @@ disable_negated_services # -------------- # We're not as **root** so make sure ``sudo`` is available -is_package_installed sudo || install_package sudo +is_package_installed sudo || is_package_installed sudo-ldap || install_package sudo # UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || diff --git a/tools/create-stack-user.sh b/tools/create-stack-user.sh index c0b7ac70aa..919cacb036 100755 --- a/tools/create-stack-user.sh +++ b/tools/create-stack-user.sh @@ -32,7 +32,7 @@ GetDistro source $TOP_DIR/stackrc # Give the non-root user the ability to run as **root** via ``sudo`` -is_package_installed sudo || install_package sudo +is_package_installed sudo || is_package_installed sudo-ldap || install_package sudo [[ -z "$STACK_USER" ]] && die "STACK_USER is not set. Exiting." From fc207050988e236c32b0736f44c6bf2883cea95f Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Wed, 5 Jun 2019 08:24:45 +0000 Subject: [PATCH 1094/1936] Add setting of placement microversion on tempest conf Tempest not support placement microversion setting so that test can call APIs with specific placement microversion. This commit adds the setting of placement API microversion on Tempest conf. Change-Id: Ie04aa993ec7a1495740d9267b076a40f4291e25e --- lib/tempest | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/lib/tempest b/lib/tempest index 9f1b677eba..6afed0eaf1 100644 --- a/lib/tempest +++ b/lib/tempest @@ -513,6 +513,24 @@ function configure_tempest { iniset $TEMPEST_CONFIG volume storage_protocol "$TEMPEST_STORAGE_PROTOCOL" fi + # Placement Features + # Set the microversion range for placement. + # Setting [None, latest] range of microversion which allow Tempest to run all microversions tests. + # NOTE- To avoid microversion tests failure on stable branch, we need to change "tempest_placement_max_microversion" + # for stable branch on each release which should be changed from "latest" to max supported version of that release. + local tempest_placement_min_microversion=${TEMPEST_PLACEMENT_MIN_MICROVERSION:-None} + local tempest_placement_max_microversion=${TEMPEST_PLACEMENT_MAX_MICROVERSION:-"latest"} + if [ "$tempest_placement_min_microversion" == "None" ]; then + inicomment $TEMPEST_CONFIG placement min_microversion + else + iniset $TEMPEST_CONFIG placement min_microversion $tempest_placement_min_microversion + fi + if [ "$tempest_placement_max_microversion" == "None" ]; then + inicomment $TEMPEST_CONFIG placement max_microversion + else + iniset $TEMPEST_CONFIG placement max_microversion $tempest_placement_max_microversion + fi + # Baremetal if [ "$VIRT_DRIVER" = "ironic" ] ; then iniset $TEMPEST_CONFIG compute-feature-enabled change_password False From 58abccb89f46576b573733fd424056b2f5a0203d Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Fri, 24 May 2019 15:09:01 -0400 Subject: [PATCH 1095/1936] Add nova-multi-cell job to experimental queue Nova has had a multi-cell job (nova-multi-cell) since the Train release but is currently non-voting in the check queue for nova changes. This change adds the job to the experimental queue for devstack changes so we can test changes to devstack and make sure they work for the multi-cell job. Change-Id: Icf31baf6fd4313aec5ecfb9e8f9cbcef1ff7f61d --- .zuul.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index 3c516dd350..2dd0b9f4ef 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -701,9 +701,12 @@ # stable engouh with uwsgi. # * neutron-tempest-with-uwsgi: maintained by neutron for tempest test. # Next cycle we can remove this if everything run out stable enough. + # * nova-multi-cell: maintained by nova and currently non-voting in the + # check queue for nova changes but relies on devstack configuration experimental: jobs: + - nova-multi-cell - nova-next - neutron-fullstack-with-uwsgi - neutron-functional-with-uwsgi From 6e5b1384665c0c039222ff6cbea5a120e60e89d1 Mon Sep 17 00:00:00 2001 From: Julia Kreger Date: Wed, 9 Jan 2019 17:00:45 -0800 Subject: [PATCH 1096/1936] Enable vlan networking for newer neutron plugin Ironic's CI makes extensive use of VLAN based networking and the newer neutron plugin hardcodes the tenant networking type to vxlan which is naturally problematic. It also lacks the ability to set the necessary constraints for vlan networking which are added for vxlan networking. This patch enables the type of tenant networking to be defined as vlan, and enables for a physical network vlan range mapping setting to be configured which is required for a vlan to be allocated upon network creation. Change-Id: I55874c1ce82898e9dfb81505d8f3b14abde33579 --- lib/neutron | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/lib/neutron b/lib/neutron index 947c491ec1..763a906e52 100644 --- a/lib/neutron +++ b/lib/neutron @@ -100,6 +100,17 @@ NEUTRON_ROOTWRAP_DAEMON_CMD="$NEUTRON_ROOTWRAP-daemon $NEUTRON_ROOTWRAP_CONF_FIL PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} PUBLIC_BRIDGE_MTU=${PUBLIC_BRIDGE_MTU:-1500} +# Network type - default vxlan, however enables vlan based jobs to override +# using the legacy environment variable as well as a new variable in greater +# alignment with the naming scheme of this plugin. +NEUTRON_TENANT_NETWORK_TYPE=${NEUTRON_TENANT_NETWORK_TYPE:-vxlan} + +NEUTRON_TENANT_VLAN_RANGE=${NEUTRON_TENANT_VLAN_RANGE:-${TENANT_VLAN_RANGE:-100:150}} + +# Physical network for VLAN network usage. +NEUTRON_PHYSICAL_NETWORK=${NEUTRON_PHYSICAL_NETWORK:-} + + # Additional neutron api config files declare -a -g _NEUTRON_SERVER_EXTRA_CONF_FILES_ABS @@ -203,9 +214,8 @@ function configure_neutron_new { configure_auth_token_middleware $NEUTRON_CONF neutron $NEUTRON_AUTH_CACHE_DIR keystone_authtoken configure_auth_token_middleware $NEUTRON_CONF nova $NEUTRON_AUTH_CACHE_DIR nova - # Configure VXLAN - # TODO(sc68cal) not hardcode? - iniset $NEUTRON_CORE_PLUGIN_CONF ml2 tenant_network_types vxlan + # Configure tenant network type + iniset $NEUTRON_CORE_PLUGIN_CONF ml2 tenant_network_types $NEUTRON_TENANT_NETWORK_TYPE local mech_drivers="openvswitch" if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then @@ -217,6 +227,9 @@ function configure_neutron_new { iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vxlan vni_ranges 1001:2000 iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_flat flat_networks public + if [[ "$NEUTRON_TENANT_NETWORK_TYPE" =~ "vlan" ]] && [[ "$NEUTRON_PHYSICAL_NETWORK" != "" ]]; then + iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vlan network_vlan_ranges ${NEUTRON_PHYSICAL_NETWORK}:${NEUTRON_TENANT_VLAN_RANGE} + fi if [[ "$NEUTRON_PORT_SECURITY" = "True" ]]; then neutron_ml2_extension_driver_add port_security fi From b57757ae14a54e78ebd533198564d24af77c51da Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Mon, 3 Jun 2019 16:08:09 -0400 Subject: [PATCH 1097/1936] Add NOVA_NOTIFICATION_FORMAT variable Nova change https://review.opendev.org/603079/ changed the default configuration to send only unversioned notfications rather than both versioned and unversioned notifications. This could break unsuspecting downstream projects (like Watcher) whose CI jobs are not explicitly configuring nova for the types of notifications they need but are just relying on getting both per the previous default of the config option. This adds a variable which defaults to "unversioned" to match the nova default but allows downstream CI jobs to easily configure another value. Needed by https://review.opendev.org/663332/ Change-Id: Ied9d50b07c368d5c2be658c744f340a8d1ee41e0 --- lib/nova | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/nova b/lib/nova index 6ce1dcc4cc..7d66b68cfb 100644 --- a/lib/nova +++ b/lib/nova @@ -187,6 +187,10 @@ NOVA_USE_SERVICE_TOKEN=$(trueorfalse False NOVA_USE_SERVICE_TOKEN) ISCSID_DEBUG=$(trueorfalse False ISCSID_DEBUG) ISCSID_DEBUG_LEVEL=${ISCSID_DEBUG_LEVEL:-4} +# Format for notifications. Nova defaults to "unversioned" since Train. +# Other options include "versioned" and "both". +NOVA_NOTIFICATION_FORMAT=${NOVA_NOTIFICATION_FORMAT:-unversioned} + # Functions # --------- @@ -535,6 +539,7 @@ function create_nova_conf { # enable notifications, but it will allow them to function when enabled. iniset $NOVA_CONF oslo_messaging_notifications driver "messagingv2" iniset $NOVA_CONF oslo_messaging_notifications transport_url $(get_notification_url) + iniset $NOVA_CONF notifications notification_format "$NOVA_NOTIFICATION_FORMAT" iniset_rpc_backend nova $NOVA_CONF iniset $NOVA_CONF DEFAULT osapi_compute_workers "$API_WORKERS" From 5b8656e748dca1c822556b27f51d67ab238e0721 Mon Sep 17 00:00:00 2001 From: Kenichi Omichi Date: Tue, 18 Jun 2019 23:38:28 +0000 Subject: [PATCH 1098/1936] Remove RetryFilter from config Since Ic0a03e89903bf925638fa26cca3dac7db710dca3 RetryFilter has been deprecated. So we should not enable the RetryFilter on our tests. Change-Id: I48c2c4d0714f582af8948dc88b48df1c2c62fcd2 --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 8220e0f10b..0a091e3e60 100644 --- a/lib/nova +++ b/lib/nova @@ -103,7 +103,7 @@ SCHEDULER=${SCHEDULER:-filter_scheduler} # The following NOVA_FILTERS contains SameHostFilter and DifferentHostFilter with # the default filters. -NOVA_FILTERS="RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter" +NOVA_FILTERS="AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter" QEMU_CONF=/etc/libvirt/qemu.conf From f7302e1af10938a0ffc259ab9bfd3919693fe36b Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Wed, 19 Jun 2019 11:49:40 -0500 Subject: [PATCH 1099/1936] Fix configuration doc block formatting Many of the code blocks in the configuration documentation had extra leading spaces. This resulted in the blocks being both code block formatted as well as blockquoted in the output. This patch removes leading spaces and some minor cleanup to get the formatted output correct. Change-Id: Ic4dfb49c547d51e16b673bc88d7b2b1a907e3258 Signed-off-by: Sean McGinnis --- doc/source/configuration.rst | 146 +++++++++++++++-------------------- 1 file changed, 63 insertions(+), 83 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 5e8004dc44..098e994ccd 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -224,25 +224,22 @@ check out. These may be overridden in ``local.conf`` to pull source from a different repo for testing, such as a Gerrit branch proposal. ``GIT_BASE`` points to the primary repository server. - :: +:: - NOVA_REPO=$GIT_BASE/openstack/nova.git - NOVA_BRANCH=master + NOVA_REPO=$GIT_BASE/openstack/nova.git + NOVA_BRANCH=master To pull a branch directly from Gerrit, get the repo and branch from -the Gerrit review page: - - :: - - git fetch https://review.openstack.org/p/openstack/nova refs/changes/50/5050/1 && git checkout FETCH_HEAD +the Gerrit review page:: - The repo is the stanza following ``fetch`` and the branch is the - stanza following that: + git fetch https://review.openstack.org/p/openstack/nova \ + refs/changes/50/5050/1 && git checkout FETCH_HEAD - :: +The repo is the stanza following ``fetch`` and the branch is the +stanza following that:: - NOVA_REPO=https://review.openstack.org/p/openstack/nova - NOVA_BRANCH=refs/changes/50/5050/1 + NOVA_REPO=https://review.openstack.org/p/openstack/nova + NOVA_BRANCH=refs/changes/50/5050/1 Installation Directory @@ -255,9 +252,9 @@ By setting it early in the ``localrc`` section you can reference it in later variables. It can be useful to set it even though it is not changed from the default value. - :: +:: - DEST=/opt/stack + DEST=/opt/stack Logging ------- @@ -271,21 +268,21 @@ runs. It can be sent to a file in addition to the console by setting timestamp will be appended to the given filename for each run of ``stack.sh``. - :: +:: - LOGFILE=$DEST/logs/stack.sh.log + LOGFILE=$DEST/logs/stack.sh.log Old log files are cleaned automatically if ``LOGDAYS`` is set to the number of days of old log files to keep. - :: +:: - LOGDAYS=1 + LOGDAYS=1 Some coloring is used during the DevStack runs to make it easier to see what is going on. This can be disabled with:: - LOG_COLOR=False + LOG_COLOR=False When using the logfile, by default logs are sent to the console and the file. You can set ``VERBOSE`` to ``false`` if you only wish the @@ -317,12 +314,12 @@ Example Logging Configuration For example, non-interactive installs probably wish to save output to a file, keep service logs and disable color in the stored files. - :: +:: - [[local|localrc]] - DEST=/opt/stack/ - LOGFILE=$LOGDIR/stack.sh.log - LOG_COLOR=False + [[local|localrc]] + DEST=/opt/stack/ + LOGFILE=$LOGDIR/stack.sh.log + LOG_COLOR=False Database Backend ---------------- @@ -330,12 +327,10 @@ Database Backend Multiple database backends are available. The available databases are defined in the lib/databases directory. ``mysql`` is the default database, choose a different one by putting the -following in the ``localrc`` section: - - :: +following in the ``localrc`` section:: - disable_service mysql - enable_service postgresql + disable_service mysql + enable_service postgresql ``mysql`` is the default database. @@ -347,11 +342,9 @@ backends may be available via external plugins. Enabling or disabling RabbitMQ is handled via the usual service functions and ``ENABLED_SERVICES``. -Example disabling RabbitMQ in ``local.conf``: +Example disabling RabbitMQ in ``local.conf``:: -:: - - disable_service rabbit + disable_service rabbit Apache Frontend @@ -370,34 +363,23 @@ override toggle available that can be set in your ``local.conf``. Keystone is run under Apache with ``mod_wsgi`` by default. -Example (Keystone) - -:: +Example (Keystone):: KEYSTONE_USE_MOD_WSGI="True" -Example (Nova): - -:: +Example (Nova):: NOVA_USE_MOD_WSGI="True" -Example (Swift): - -:: +Example (Swift):: SWIFT_USE_MOD_WSGI="True" -Example (Heat): - -:: +Example (Heat):: HEAT_USE_MOD_WSGI="True" - -Example (Cinder): - -:: +Example (Cinder):: CINDER_USE_MOD_WSGI="True" @@ -413,9 +395,9 @@ system you can have devstack install it from upstream, or from local git trees by specifying it in ``LIBS_FROM_GIT``. Multiple libraries can be specified as a comma separated list. - :: +:: - LIBS_FROM_GIT=python-keystoneclient,oslo.config + LIBS_FROM_GIT=python-keystoneclient,oslo.config Setting the variable to ``ALL`` will activate the download for all libraries. @@ -431,9 +413,9 @@ Each entry in the ``PROJECT_VENV`` array contains the directory name of a venv to be used for the project. The array index is the project name. Multiple projects can use the same venv if desired. - :: +:: - PROJECT_VENV["glance"]=${GLANCE_DIR}.venv + PROJECT_VENV["glance"]=${GLANCE_DIR}.venv ``ADDITIONAL_VENV_PACKAGES`` is a comma-separated list of additional packages to be installed into each venv. Often projects will not have @@ -442,9 +424,9 @@ are 'optional' requirements, i.e. only needed for certain configurations. By default, the enabled databases will have their Python bindings added when they are enabled. - :: +:: - ADDITIONAL_VENV_PACKAGES="python-foo, python-bar" + ADDITIONAL_VENV_PACKAGES="python-foo, python-bar" Use python3 ------------ @@ -453,9 +435,9 @@ By default ``stack.sh`` uses python2 (the exact version set by the ``PYTHON2_VERSION``). This can be overriden so devstack will run python3 (the exact version set by ``PYTHON3_VERSION``). - :: +:: - USE_PYTHON3=True + USE_PYTHON3=True A clean install every time -------------------------- @@ -465,9 +447,9 @@ exist in ``$DEST``. ``stack.sh`` will freshen each repo on each run if ``RECLONE`` is set to ``yes``. This avoids having to manually remove repos in order to get the current branch from ``$GIT_BASE``. - :: +:: - RECLONE=yes + RECLONE=yes Upgrade packages installed by pip --------------------------------- @@ -478,9 +460,9 @@ requirement. If ``PIP_UPGRADE`` is set to ``True`` then existing required Python packages will be upgraded to the most recent version that matches requirements. - :: +:: - PIP_UPGRADE=True + PIP_UPGRADE=True Guest Images ------------ @@ -494,11 +476,11 @@ their testing-requirements in ``stack.sh``. Setting these default images; in that case, you will want to populate ``IMAGE_URLS`` with sufficient images to satisfy testing-requirements. - :: +:: - DOWNLOAD_DEFAULT_IMAGES=False - IMAGE_URLS="http://foo.bar.com/image.qcow," - IMAGE_URLS+="http://foo.bar.com/image2.qcow" + DOWNLOAD_DEFAULT_IMAGES=False + IMAGE_URLS="http://foo.bar.com/image.qcow," + IMAGE_URLS+="http://foo.bar.com/image2.qcow" Instance Type @@ -517,9 +499,9 @@ KVM on Power with QEMU 2.4 requires 512 MB to load the firmware - running instances on ppc64/ppc64le can choose one of the default created flavors as follows: - :: +:: - DEFAULT_INSTANCE_TYPE=m1.tiny + DEFAULT_INSTANCE_TYPE=m1.tiny IP Version @@ -530,19 +512,19 @@ IPv4, IPv6, or dual-stack self-service project data-network by with either ``IP_VERSION=4``, ``IP_VERSION=6``, or ``IP_VERSION=4+6`` respectively. - :: +:: - IP_VERSION=4+6 + IP_VERSION=4+6 The following optional variables can be used to alter the default IPv6 behavior: - :: +:: - IPV6_RA_MODE=slaac - IPV6_ADDRESS_MODE=slaac - IPV6_ADDRS_SAFE_TO_USE=fd$IPV6_GLOBAL_ID::/56 - IPV6_PRIVATE_NETWORK_GATEWAY=fd$IPV6_GLOBAL_ID::1 + IPV6_RA_MODE=slaac + IPV6_ADDRESS_MODE=slaac + IPV6_ADDRS_SAFE_TO_USE=fd$IPV6_GLOBAL_ID::/56 + IPV6_PRIVATE_NETWORK_GATEWAY=fd$IPV6_GLOBAL_ID::1 *Note*: ``IPV6_ADDRS_SAFE_TO_USE`` and ``IPV6_PRIVATE_NETWORK_GATEWAY`` can be configured with any valid IPv6 prefix. The default values make @@ -565,11 +547,9 @@ address. The default value for this setting is ``4``. Dual-mode support, for example ``4+6`` is not currently supported. ``HOST_IPV6`` can -optionally be used to alter the default IPv6 address - - :: +optionally be used to alter the default IPv6 address:: - HOST_IPV6=${some_local_ipv6_address} + HOST_IPV6=${some_local_ipv6_address} Multi-node setup ~~~~~~~~~~~~~~~~ @@ -671,11 +651,11 @@ set by ``VOLUME_GROUP_NAME``, the logical volume name prefix is set with ``VOLUME_NAME_PREFIX`` and the size of the volume backing file is set with ``VOLUME_BACKING_FILE_SIZE``. - :: +:: - VOLUME_GROUP_NAME="stack-volumes" - VOLUME_NAME_PREFIX="volume-" - VOLUME_BACKING_FILE_SIZE=24G + VOLUME_GROUP_NAME="stack-volumes" + VOLUME_NAME_PREFIX="volume-" + VOLUME_BACKING_FILE_SIZE=24G Keystone From d5634c4723df4f6b597578a8588ad3730e4b5bbc Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 4 Jun 2019 17:30:13 +1000 Subject: [PATCH 1100/1936] Fix plugin doc generation for opendev transition Update the server to opendev and update paths for gitea, along with any other references. Switch to a blacklist where we just remove stackforge; this leaves all the new namespaces like x/ and starlingx/ being checked. Use a common session for checking for the plugin file which makes it a *lot* faster. Remove unsed "plugins" array variable Regenerate the file Change-Id: Ie3e615ba352a389da22e129c5c67cf6abd8cfdc8 --- doc/source/plugin-registry.rst | 369 ++++++++++++------------ tools/generate-devstack-plugins-list.py | 25 +- tools/generate-devstack-plugins-list.sh | 10 +- 3 files changed, 204 insertions(+), 200 deletions(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 93c16f454c..ea8c31807d 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -21,189 +21,190 @@ available DevStack plugins. This includes, but is not limited to, official OpenStack projects. -====================================== === -Plugin Name URL -====================================== === -almanach `https://git.openstack.org/openstack/almanach `__ -aodh `https://git.openstack.org/openstack/aodh `__ -apmec `https://git.openstack.org/openstack/apmec `__ -barbican `https://git.openstack.org/openstack/barbican `__ -bilean `https://git.openstack.org/openstack/bilean `__ -blazar `https://git.openstack.org/openstack/blazar `__ -broadview-collector `https://git.openstack.org/openstack/broadview-collector `__ -castellan-ui `https://git.openstack.org/openstack/castellan-ui `__ -ceilometer `https://git.openstack.org/openstack/ceilometer `__ -ceilometer-powervm `https://git.openstack.org/openstack/ceilometer-powervm `__ -cinderlib `https://git.openstack.org/openstack/cinderlib `__ -cloudkitty `https://git.openstack.org/openstack/cloudkitty `__ -collectd-openstack-plugins `https://git.openstack.org/openstack/collectd-openstack-plugins `__ -congress `https://git.openstack.org/openstack/congress `__ -cyborg `https://git.openstack.org/openstack/cyborg `__ -designate `https://git.openstack.org/openstack/designate `__ -devstack-plugin-additional-pkg-repos `https://git.openstack.org/openstack/devstack-plugin-additional-pkg-repos `__ -devstack-plugin-amqp1 `https://git.openstack.org/openstack/devstack-plugin-amqp1 `__ -devstack-plugin-bdd `https://git.openstack.org/openstack/devstack-plugin-bdd `__ -devstack-plugin-ceph `https://git.openstack.org/openstack/devstack-plugin-ceph `__ -devstack-plugin-container `https://git.openstack.org/openstack/devstack-plugin-container `__ -devstack-plugin-glusterfs `https://git.openstack.org/openstack/devstack-plugin-glusterfs `__ -devstack-plugin-hdfs `https://git.openstack.org/openstack/devstack-plugin-hdfs `__ -devstack-plugin-kafka `https://git.openstack.org/openstack/devstack-plugin-kafka `__ -devstack-plugin-libvirt-qemu `https://git.openstack.org/openstack/devstack-plugin-libvirt-qemu `__ -devstack-plugin-mariadb `https://git.openstack.org/openstack/devstack-plugin-mariadb `__ -devstack-plugin-nfs `https://git.openstack.org/openstack/devstack-plugin-nfs `__ -devstack-plugin-pika `https://git.openstack.org/openstack/devstack-plugin-pika `__ -devstack-plugin-sheepdog `https://git.openstack.org/openstack/devstack-plugin-sheepdog `__ -devstack-plugin-vmax `https://git.openstack.org/openstack/devstack-plugin-vmax `__ -devstack-plugin-zmq `https://git.openstack.org/openstack/devstack-plugin-zmq `__ -dragonflow `https://git.openstack.org/openstack/dragonflow `__ -drbd-devstack `https://git.openstack.org/openstack/drbd-devstack `__ -ec2-api `https://git.openstack.org/openstack/ec2-api `__ -freezer `https://git.openstack.org/openstack/freezer `__ -freezer-api `https://git.openstack.org/openstack/freezer-api `__ -freezer-tempest-plugin `https://git.openstack.org/openstack/freezer-tempest-plugin `__ -freezer-web-ui `https://git.openstack.org/openstack/freezer-web-ui `__ -gce-api `https://git.openstack.org/openstack/gce-api `__ -glare `https://git.openstack.org/openstack/glare `__ -group-based-policy `https://git.openstack.org/openstack/group-based-policy `__ -gyan `https://git.openstack.org/openstack/gyan `__ -heat `https://git.openstack.org/openstack/heat `__ -heat-dashboard `https://git.openstack.org/openstack/heat-dashboard `__ -horizon-mellanox `https://git.openstack.org/openstack/horizon-mellanox `__ -ironic `https://git.openstack.org/openstack/ironic `__ -ironic-inspector `https://git.openstack.org/openstack/ironic-inspector `__ -ironic-staging-drivers `https://git.openstack.org/openstack/ironic-staging-drivers `__ -ironic-ui `https://git.openstack.org/openstack/ironic-ui `__ -karbor `https://git.openstack.org/openstack/karbor `__ -karbor-dashboard `https://git.openstack.org/openstack/karbor-dashboard `__ -keystone `https://git.openstack.org/openstack/keystone `__ -kingbird `https://git.openstack.org/openstack/kingbird `__ -kuryr-kubernetes `https://git.openstack.org/openstack/kuryr-kubernetes `__ -kuryr-libnetwork `https://git.openstack.org/openstack/kuryr-libnetwork `__ -kuryr-tempest-plugin `https://git.openstack.org/openstack/kuryr-tempest-plugin `__ -magnum `https://git.openstack.org/openstack/magnum `__ -magnum-ui `https://git.openstack.org/openstack/magnum-ui `__ -manila `https://git.openstack.org/openstack/manila `__ -manila-tempest-plugin `https://git.openstack.org/openstack/manila-tempest-plugin `__ -manila-ui `https://git.openstack.org/openstack/manila-ui `__ -masakari `https://git.openstack.org/openstack/masakari `__ -meteos `https://git.openstack.org/openstack/meteos `__ -meteos-ui `https://git.openstack.org/openstack/meteos-ui `__ -mistral `https://git.openstack.org/openstack/mistral `__ -mixmatch `https://git.openstack.org/openstack/mixmatch `__ -mogan `https://git.openstack.org/openstack/mogan `__ -mogan-ui `https://git.openstack.org/openstack/mogan-ui `__ -monasca-analytics `https://git.openstack.org/openstack/monasca-analytics `__ -monasca-api `https://git.openstack.org/openstack/monasca-api `__ -monasca-ceilometer `https://git.openstack.org/openstack/monasca-ceilometer `__ -monasca-events-api `https://git.openstack.org/openstack/monasca-events-api `__ -monasca-log-api `https://git.openstack.org/openstack/monasca-log-api `__ -monasca-tempest-plugin `https://git.openstack.org/openstack/monasca-tempest-plugin `__ -monasca-transform `https://git.openstack.org/openstack/monasca-transform `__ -murano `https://git.openstack.org/openstack/murano `__ -networking-6wind `https://git.openstack.org/openstack/networking-6wind `__ -networking-ansible `https://git.openstack.org/openstack/networking-ansible `__ -networking-arista `https://git.openstack.org/openstack/networking-arista `__ -networking-bagpipe `https://git.openstack.org/openstack/networking-bagpipe `__ -networking-baremetal `https://git.openstack.org/openstack/networking-baremetal `__ -networking-bgpvpn `https://git.openstack.org/openstack/networking-bgpvpn `__ -networking-brocade `https://git.openstack.org/openstack/networking-brocade `__ -networking-calico `https://git.openstack.org/openstack/networking-calico `__ -networking-cisco `https://git.openstack.org/openstack/networking-cisco `__ -networking-cumulus `https://git.openstack.org/openstack/networking-cumulus `__ -networking-dpm `https://git.openstack.org/openstack/networking-dpm `__ -networking-fortinet `https://git.openstack.org/openstack/networking-fortinet `__ -networking-generic-switch `https://git.openstack.org/openstack/networking-generic-switch `__ -networking-hpe `https://git.openstack.org/openstack/networking-hpe `__ -networking-huawei `https://git.openstack.org/openstack/networking-huawei `__ -networking-hyperv `https://git.openstack.org/openstack/networking-hyperv `__ -networking-infoblox `https://git.openstack.org/openstack/networking-infoblox `__ -networking-l2gw `https://git.openstack.org/openstack/networking-l2gw `__ -networking-lagopus `https://git.openstack.org/openstack/networking-lagopus `__ -networking-midonet `https://git.openstack.org/openstack/networking-midonet `__ -networking-mlnx `https://git.openstack.org/openstack/networking-mlnx `__ -networking-nec `https://git.openstack.org/openstack/networking-nec `__ -networking-odl `https://git.openstack.org/openstack/networking-odl `__ -networking-omnipath `https://git.openstack.org/openstack/networking-omnipath `__ -networking-onos `https://git.openstack.org/openstack/networking-onos `__ -networking-opencontrail `https://git.openstack.org/openstack/networking-opencontrail `__ -networking-ovn `https://git.openstack.org/openstack/networking-ovn `__ -networking-ovs-dpdk `https://git.openstack.org/openstack/networking-ovs-dpdk `__ -networking-plumgrid `https://git.openstack.org/openstack/networking-plumgrid `__ -networking-powervm `https://git.openstack.org/openstack/networking-powervm `__ -networking-sfc `https://git.openstack.org/openstack/networking-sfc `__ -networking-spp `https://git.openstack.org/openstack/networking-spp `__ -networking-vpp `https://git.openstack.org/openstack/networking-vpp `__ -networking-vsphere `https://git.openstack.org/openstack/networking-vsphere `__ -neutron `https://git.openstack.org/openstack/neutron `__ -neutron-classifier `https://git.openstack.org/openstack/neutron-classifier `__ -neutron-dynamic-routing `https://git.openstack.org/openstack/neutron-dynamic-routing `__ -neutron-fwaas `https://git.openstack.org/openstack/neutron-fwaas `__ -neutron-fwaas-dashboard `https://git.openstack.org/openstack/neutron-fwaas-dashboard `__ -neutron-lbaas `https://git.openstack.org/openstack/neutron-lbaas `__ -neutron-lbaas-dashboard `https://git.openstack.org/openstack/neutron-lbaas-dashboard `__ -neutron-tempest-plugin `https://git.openstack.org/openstack/neutron-tempest-plugin `__ -neutron-vpnaas `https://git.openstack.org/openstack/neutron-vpnaas `__ -neutron-vpnaas-dashboard `https://git.openstack.org/openstack/neutron-vpnaas-dashboard `__ -nova-dpm `https://git.openstack.org/openstack/nova-dpm `__ -nova-lxd `https://git.openstack.org/openstack/nova-lxd `__ -nova-mksproxy `https://git.openstack.org/openstack/nova-mksproxy `__ -nova-powervm `https://git.openstack.org/openstack/nova-powervm `__ -oaktree `https://git.openstack.org/openstack/oaktree `__ -octavia `https://git.openstack.org/openstack/octavia `__ -octavia-dashboard `https://git.openstack.org/openstack/octavia-dashboard `__ -omni `https://git.openstack.org/openstack/omni `__ -openstacksdk `https://git.openstack.org/openstack/openstacksdk `__ -os-faults `https://git.openstack.org/openstack/os-faults `__ -os-xenapi `https://git.openstack.org/openstack/os-xenapi `__ -osprofiler `https://git.openstack.org/openstack/osprofiler `__ -oswin-tempest-plugin `https://git.openstack.org/openstack/oswin-tempest-plugin `__ -panko `https://git.openstack.org/openstack/panko `__ -patrole `https://git.openstack.org/openstack/patrole `__ -picasso `https://git.openstack.org/openstack/picasso `__ -qinling `https://git.openstack.org/openstack/qinling `__ -qinling-dashboard `https://git.openstack.org/openstack/qinling-dashboard `__ -rally `https://git.openstack.org/openstack/rally `__ -rally-openstack `https://git.openstack.org/openstack/rally-openstack `__ -rsd-virt-for-nova `https://git.openstack.org/openstack/rsd-virt-for-nova `__ -sahara `https://git.openstack.org/openstack/sahara `__ -sahara-dashboard `https://git.openstack.org/openstack/sahara-dashboard `__ -scalpels `https://git.openstack.org/openstack/scalpels `__ -searchlight `https://git.openstack.org/openstack/searchlight `__ -searchlight-ui `https://git.openstack.org/openstack/searchlight-ui `__ -senlin `https://git.openstack.org/openstack/senlin `__ -slogging `https://git.openstack.org/openstack/slogging `__ -solum `https://git.openstack.org/openstack/solum `__ -stackube `https://git.openstack.org/openstack/stackube `__ -storlets `https://git.openstack.org/openstack/storlets `__ -stx-config `https://git.openstack.org/openstack/stx-config `__ -stx-fault `https://git.openstack.org/openstack/stx-fault `__ -stx-ha `https://git.openstack.org/openstack/stx-ha `__ -stx-integ `https://git.openstack.org/openstack/stx-integ `__ -stx-metal `https://git.openstack.org/openstack/stx-metal `__ -stx-nfv `https://git.openstack.org/openstack/stx-nfv `__ -stx-update `https://git.openstack.org/openstack/stx-update `__ -tacker `https://git.openstack.org/openstack/tacker `__ -tap-as-a-service `https://git.openstack.org/openstack/tap-as-a-service `__ -tap-as-a-service-dashboard `https://git.openstack.org/openstack/tap-as-a-service-dashboard `__ -tatu `https://git.openstack.org/openstack/tatu `__ -telemetry-tempest-plugin `https://git.openstack.org/openstack/telemetry-tempest-plugin `__ -tobiko `https://git.openstack.org/openstack/tobiko `__ -tricircle `https://git.openstack.org/openstack/tricircle `__ -trio2o `https://git.openstack.org/openstack/trio2o `__ -trove `https://git.openstack.org/openstack/trove `__ -trove-dashboard `https://git.openstack.org/openstack/trove-dashboard `__ -valet `https://git.openstack.org/openstack/valet `__ -vitrage `https://git.openstack.org/openstack/vitrage `__ -vitrage-dashboard `https://git.openstack.org/openstack/vitrage-dashboard `__ -vitrage-tempest-plugin `https://git.openstack.org/openstack/vitrage-tempest-plugin `__ -vmware-nsx `https://git.openstack.org/openstack/vmware-nsx `__ -vmware-vspc `https://git.openstack.org/openstack/vmware-vspc `__ -watcher `https://git.openstack.org/openstack/watcher `__ -watcher-dashboard `https://git.openstack.org/openstack/watcher-dashboard `__ -zaqar `https://git.openstack.org/openstack/zaqar `__ -zaqar-ui `https://git.openstack.org/openstack/zaqar-ui `__ -zun `https://git.openstack.org/openstack/zun `__ -zun-ui `https://git.openstack.org/openstack/zun-ui `__ -====================================== === +======================================== === +Plugin Name URL +======================================== === +openstack/aodh `https://opendev.org/openstack/aodh `__ +openstack/barbican `https://opendev.org/openstack/barbican `__ +openstack/blazar `https://opendev.org/openstack/blazar `__ +openstack/ceilometer `https://opendev.org/openstack/ceilometer `__ +openstack/ceilometer-powervm `https://opendev.org/openstack/ceilometer-powervm `__ +openstack/cinderlib `https://opendev.org/openstack/cinderlib `__ +openstack/cloudkitty `https://opendev.org/openstack/cloudkitty `__ +openstack/congress `https://opendev.org/openstack/congress `__ +openstack/cyborg `https://opendev.org/openstack/cyborg `__ +openstack/designate `https://opendev.org/openstack/designate `__ +openstack/devstack-plugin-amqp1 `https://opendev.org/openstack/devstack-plugin-amqp1 `__ +openstack/devstack-plugin-ceph `https://opendev.org/openstack/devstack-plugin-ceph `__ +openstack/devstack-plugin-container `https://opendev.org/openstack/devstack-plugin-container `__ +openstack/devstack-plugin-kafka `https://opendev.org/openstack/devstack-plugin-kafka `__ +openstack/devstack-plugin-pika `https://opendev.org/openstack/devstack-plugin-pika `__ +openstack/devstack-plugin-zmq `https://opendev.org/openstack/devstack-plugin-zmq `__ +openstack/dragonflow `https://opendev.org/openstack/dragonflow `__ +openstack/ec2-api `https://opendev.org/openstack/ec2-api `__ +openstack/freezer `https://opendev.org/openstack/freezer `__ +openstack/freezer-api `https://opendev.org/openstack/freezer-api `__ +openstack/freezer-tempest-plugin `https://opendev.org/openstack/freezer-tempest-plugin `__ +openstack/freezer-web-ui `https://opendev.org/openstack/freezer-web-ui `__ +openstack/heat `https://opendev.org/openstack/heat `__ +openstack/heat-dashboard `https://opendev.org/openstack/heat-dashboard `__ +openstack/ironic `https://opendev.org/openstack/ironic `__ +openstack/ironic-inspector `https://opendev.org/openstack/ironic-inspector `__ +openstack/ironic-ui `https://opendev.org/openstack/ironic-ui `__ +openstack/karbor `https://opendev.org/openstack/karbor `__ +openstack/karbor-dashboard `https://opendev.org/openstack/karbor-dashboard `__ +openstack/keystone `https://opendev.org/openstack/keystone `__ +openstack/kuryr-kubernetes `https://opendev.org/openstack/kuryr-kubernetes `__ +openstack/kuryr-libnetwork `https://opendev.org/openstack/kuryr-libnetwork `__ +openstack/kuryr-tempest-plugin `https://opendev.org/openstack/kuryr-tempest-plugin `__ +openstack/magnum `https://opendev.org/openstack/magnum `__ +openstack/magnum-ui `https://opendev.org/openstack/magnum-ui `__ +openstack/manila `https://opendev.org/openstack/manila `__ +openstack/manila-tempest-plugin `https://opendev.org/openstack/manila-tempest-plugin `__ +openstack/manila-ui `https://opendev.org/openstack/manila-ui `__ +openstack/masakari `https://opendev.org/openstack/masakari `__ +openstack/mistral `https://opendev.org/openstack/mistral `__ +openstack/monasca-analytics `https://opendev.org/openstack/monasca-analytics `__ +openstack/monasca-api `https://opendev.org/openstack/monasca-api `__ +openstack/monasca-ceilometer `https://opendev.org/openstack/monasca-ceilometer `__ +openstack/monasca-events-api `https://opendev.org/openstack/monasca-events-api `__ +openstack/monasca-log-api `https://opendev.org/openstack/monasca-log-api `__ +openstack/monasca-tempest-plugin `https://opendev.org/openstack/monasca-tempest-plugin `__ +openstack/monasca-transform `https://opendev.org/openstack/monasca-transform `__ +openstack/murano `https://opendev.org/openstack/murano `__ +openstack/networking-bagpipe `https://opendev.org/openstack/networking-bagpipe `__ +openstack/networking-baremetal `https://opendev.org/openstack/networking-baremetal `__ +openstack/networking-bgpvpn `https://opendev.org/openstack/networking-bgpvpn `__ +openstack/networking-calico `https://opendev.org/openstack/networking-calico `__ +openstack/networking-generic-switch `https://opendev.org/openstack/networking-generic-switch `__ +openstack/networking-hyperv `https://opendev.org/openstack/networking-hyperv `__ +openstack/networking-l2gw `https://opendev.org/openstack/networking-l2gw `__ +openstack/networking-midonet `https://opendev.org/openstack/networking-midonet `__ +openstack/networking-odl `https://opendev.org/openstack/networking-odl `__ +openstack/networking-onos `https://opendev.org/openstack/networking-onos `__ +openstack/networking-ovn `https://opendev.org/openstack/networking-ovn `__ +openstack/networking-powervm `https://opendev.org/openstack/networking-powervm `__ +openstack/networking-sfc `https://opendev.org/openstack/networking-sfc `__ +openstack/neutron `https://opendev.org/openstack/neutron `__ +openstack/neutron-dynamic-routing `https://opendev.org/openstack/neutron-dynamic-routing `__ +openstack/neutron-fwaas `https://opendev.org/openstack/neutron-fwaas `__ +openstack/neutron-fwaas-dashboard `https://opendev.org/openstack/neutron-fwaas-dashboard `__ +openstack/neutron-tempest-plugin `https://opendev.org/openstack/neutron-tempest-plugin `__ +openstack/neutron-vpnaas `https://opendev.org/openstack/neutron-vpnaas `__ +openstack/neutron-vpnaas-dashboard `https://opendev.org/openstack/neutron-vpnaas-dashboard `__ +openstack/nova-powervm `https://opendev.org/openstack/nova-powervm `__ +openstack/octavia `https://opendev.org/openstack/octavia `__ +openstack/octavia-dashboard `https://opendev.org/openstack/octavia-dashboard `__ +openstack/openstacksdk `https://opendev.org/openstack/openstacksdk `__ +openstack/os-loganalyze `https://opendev.org/openstack/os-loganalyze `__ +openstack/osprofiler `https://opendev.org/openstack/osprofiler `__ +openstack/oswin-tempest-plugin `https://opendev.org/openstack/oswin-tempest-plugin `__ +openstack/panko `https://opendev.org/openstack/panko `__ +openstack/patrole `https://opendev.org/openstack/patrole `__ +openstack/qinling `https://opendev.org/openstack/qinling `__ +openstack/qinling-dashboard `https://opendev.org/openstack/qinling-dashboard `__ +openstack/rally `https://opendev.org/openstack/rally `__ +openstack/rally-openstack `https://opendev.org/openstack/rally-openstack `__ +openstack/sahara `https://opendev.org/openstack/sahara `__ +openstack/sahara-dashboard `https://opendev.org/openstack/sahara-dashboard `__ +openstack/searchlight `https://opendev.org/openstack/searchlight `__ +openstack/searchlight-ui `https://opendev.org/openstack/searchlight-ui `__ +openstack/senlin `https://opendev.org/openstack/senlin `__ +openstack/shade `https://opendev.org/openstack/shade `__ +openstack/solum `https://opendev.org/openstack/solum `__ +openstack/storlets `https://opendev.org/openstack/storlets `__ +openstack/tacker `https://opendev.org/openstack/tacker `__ +openstack/telemetry-tempest-plugin `https://opendev.org/openstack/telemetry-tempest-plugin `__ +openstack/tricircle `https://opendev.org/openstack/tricircle `__ +openstack/trove `https://opendev.org/openstack/trove `__ +openstack/trove-dashboard `https://opendev.org/openstack/trove-dashboard `__ +openstack/vitrage `https://opendev.org/openstack/vitrage `__ +openstack/vitrage-dashboard `https://opendev.org/openstack/vitrage-dashboard `__ +openstack/vitrage-tempest-plugin `https://opendev.org/openstack/vitrage-tempest-plugin `__ +openstack/watcher `https://opendev.org/openstack/watcher `__ +openstack/watcher-dashboard `https://opendev.org/openstack/watcher-dashboard `__ +openstack/zaqar `https://opendev.org/openstack/zaqar `__ +openstack/zaqar-ui `https://opendev.org/openstack/zaqar-ui `__ +openstack/zun `https://opendev.org/openstack/zun `__ +openstack/zun-ui `https://opendev.org/openstack/zun-ui `__ +performa/os-faults `https://opendev.org/performa/os-faults `__ +starlingx/config `https://opendev.org/starlingx/config `__ +starlingx/fault `https://opendev.org/starlingx/fault `__ +starlingx/ha `https://opendev.org/starlingx/ha `__ +starlingx/integ `https://opendev.org/starlingx/integ `__ +starlingx/metal `https://opendev.org/starlingx/metal `__ +starlingx/nfv `https://opendev.org/starlingx/nfv `__ +starlingx/update `https://opendev.org/starlingx/update `__ +x/almanach `https://opendev.org/x/almanach `__ +x/apmec `https://opendev.org/x/apmec `__ +x/bilean `https://opendev.org/x/bilean `__ +x/broadview-collector `https://opendev.org/x/broadview-collector `__ +x/collectd-openstack-plugins `https://opendev.org/x/collectd-openstack-plugins `__ +x/devstack-plugin-additional-pkg-repos `https://opendev.org/x/devstack-plugin-additional-pkg-repos `__ +x/devstack-plugin-bdd `https://opendev.org/x/devstack-plugin-bdd `__ +x/devstack-plugin-glusterfs `https://opendev.org/x/devstack-plugin-glusterfs `__ +x/devstack-plugin-hdfs `https://opendev.org/x/devstack-plugin-hdfs `__ +x/devstack-plugin-libvirt-qemu `https://opendev.org/x/devstack-plugin-libvirt-qemu `__ +x/devstack-plugin-mariadb `https://opendev.org/x/devstack-plugin-mariadb `__ +x/devstack-plugin-nfs `https://opendev.org/x/devstack-plugin-nfs `__ +x/devstack-plugin-sheepdog `https://opendev.org/x/devstack-plugin-sheepdog `__ +x/devstack-plugin-vmax `https://opendev.org/x/devstack-plugin-vmax `__ +x/drbd-devstack `https://opendev.org/x/drbd-devstack `__ +x/fenix `https://opendev.org/x/fenix `__ +x/gce-api `https://opendev.org/x/gce-api `__ +x/glare `https://opendev.org/x/glare `__ +x/group-based-policy `https://opendev.org/x/group-based-policy `__ +x/gyan `https://opendev.org/x/gyan `__ +x/horizon-mellanox `https://opendev.org/x/horizon-mellanox `__ +x/ironic-staging-drivers `https://opendev.org/x/ironic-staging-drivers `__ +x/kingbird `https://opendev.org/x/kingbird `__ +x/meteos `https://opendev.org/x/meteos `__ +x/meteos-ui `https://opendev.org/x/meteos-ui `__ +x/mixmatch `https://opendev.org/x/mixmatch `__ +x/mogan `https://opendev.org/x/mogan `__ +x/mogan-ui `https://opendev.org/x/mogan-ui `__ +x/networking-6wind `https://opendev.org/x/networking-6wind `__ +x/networking-ansible `https://opendev.org/x/networking-ansible `__ +x/networking-arista `https://opendev.org/x/networking-arista `__ +x/networking-brocade `https://opendev.org/x/networking-brocade `__ +x/networking-cisco `https://opendev.org/x/networking-cisco `__ +x/networking-cumulus `https://opendev.org/x/networking-cumulus `__ +x/networking-dpm `https://opendev.org/x/networking-dpm `__ +x/networking-fortinet `https://opendev.org/x/networking-fortinet `__ +x/networking-hpe `https://opendev.org/x/networking-hpe `__ +x/networking-huawei `https://opendev.org/x/networking-huawei `__ +x/networking-infoblox `https://opendev.org/x/networking-infoblox `__ +x/networking-lagopus `https://opendev.org/x/networking-lagopus `__ +x/networking-mlnx `https://opendev.org/x/networking-mlnx `__ +x/networking-nec `https://opendev.org/x/networking-nec `__ +x/networking-omnipath `https://opendev.org/x/networking-omnipath `__ +x/networking-opencontrail `https://opendev.org/x/networking-opencontrail `__ +x/networking-ovs-dpdk `https://opendev.org/x/networking-ovs-dpdk `__ +x/networking-plumgrid `https://opendev.org/x/networking-plumgrid `__ +x/networking-spp `https://opendev.org/x/networking-spp `__ +x/networking-vpp `https://opendev.org/x/networking-vpp `__ +x/networking-vsphere `https://opendev.org/x/networking-vsphere `__ +x/neutron-classifier `https://opendev.org/x/neutron-classifier `__ +x/nova-dpm `https://opendev.org/x/nova-dpm `__ +x/nova-lxd `https://opendev.org/x/nova-lxd `__ +x/nova-mksproxy `https://opendev.org/x/nova-mksproxy `__ +x/oaktree `https://opendev.org/x/oaktree `__ +x/omni `https://opendev.org/x/omni `__ +x/os-xenapi `https://opendev.org/x/os-xenapi `__ +x/picasso `https://opendev.org/x/picasso `__ +x/rsd-virt-for-nova `https://opendev.org/x/rsd-virt-for-nova `__ +x/scalpels `https://opendev.org/x/scalpels `__ +x/slogging `https://opendev.org/x/slogging `__ +x/stackube `https://opendev.org/x/stackube `__ +x/tap-as-a-service `https://opendev.org/x/tap-as-a-service `__ +x/tap-as-a-service-dashboard `https://opendev.org/x/tap-as-a-service-dashboard `__ +x/tatu `https://opendev.org/x/tatu `__ +x/tobiko `https://opendev.org/x/tobiko `__ +x/trio2o `https://opendev.org/x/trio2o `__ +x/valet `https://opendev.org/x/valet `__ +x/vmware-nsx `https://opendev.org/x/vmware-nsx `__ +x/vmware-vspc `https://opendev.org/x/vmware-vspc `__ +zuul/nodepool `https://opendev.org/zuul/nodepool `__ +======================================== === diff --git a/tools/generate-devstack-plugins-list.py b/tools/generate-devstack-plugins-list.py index 56f12e7ab6..580560ccab 100644 --- a/tools/generate-devstack-plugins-list.py +++ b/tools/generate-devstack-plugins-list.py @@ -23,13 +23,14 @@ # working directory # * network access to https://git.openstack.org/cgit +import functools import logging import json import requests logging.basicConfig(level=logging.DEBUG) -url = 'https://review.openstack.org/projects/' +url = 'https://review.opendev.org/projects/' # This is what a project looks like ''' @@ -39,26 +40,30 @@ }, ''' -def is_in_openstack_namespace(proj): - # only interested in openstack namespace (e.g. not retired +def is_in_wanted_namespace(proj): + # only interested in openstack or x namespace (e.g. not retired # stackforge, etc) - return proj.startswith('openstack/') + if proj.startswith('stackforge/') or \ + proj.startswith('stackforge-attic/'): + return False + else: + return True # Check if this project has a plugin file -def has_devstack_plugin(proj): +def has_devstack_plugin(session, proj): # Don't link in the deb packaging repos if "openstack/deb-" in proj: return False - r = requests.get("https://git.openstack.org/cgit/%s/plain/devstack/plugin.sh" % proj) + r = session.get("https://opendev.org/%s/raw/branch/master/devstack/plugin.sh" % proj) return r.status_code == 200 logging.debug("Getting project list from %s" % url) r = requests.get(url) -projects = sorted(filter(is_in_openstack_namespace, json.loads(r.text[4:]))) +projects = sorted(filter(is_in_wanted_namespace, json.loads(r.text[4:]))) logging.debug("Found %d projects" % len(projects)) -found_plugins = filter(has_devstack_plugin, projects) +s = requests.Session() +found_plugins = filter(functools.partial(has_devstack_plugin, s), projects) for project in found_plugins: - # strip of openstack/ - print(project[10:]) + print(project) diff --git a/tools/generate-devstack-plugins-list.sh b/tools/generate-devstack-plugins-list.sh index 27c9c4118e..a3aa7ba63d 100755 --- a/tools/generate-devstack-plugins-list.sh +++ b/tools/generate-devstack-plugins-list.sh @@ -28,9 +28,9 @@ # * the environment variable git_dir pointing to the location # * of said git repositories # ) OR ( -# * network access to the review.openstack.org Gerrit API +# * network access to the review.opendev.org Gerrit API # working directory -# * network access to https://git.openstack.org/cgit +# * network access to https://opendev.org # )) # # If a file named data/devstack-plugins-registry.header or @@ -50,8 +50,6 @@ function title_underline { } ( -declare -A plugins - if [[ -r data/devstack-plugins-registry.header ]]; then cat data/devstack-plugins-registry.header fi @@ -74,8 +72,8 @@ printf "%-${name_col_len}s %s\n" "Plugin Name" "URL" title_underline ${name_col_len} for plugin in ${sorted_plugins}; do - giturl="https://git.openstack.org/openstack/${plugin}" - gitlink="https://git.openstack.org/cgit/openstack/${plugin}" + giturl="https://opendev.org/${plugin}" + gitlink="https://opendev.org/${plugin}" printf "%-${name_col_len}s %s\n" "${plugin}" "\`${giturl} <${gitlink}>\`__" done From 371a25328525ac16677ad721f725e81628f9d941 Mon Sep 17 00:00:00 2001 From: Mark McClain Date: Tue, 7 May 2019 12:12:29 -0400 Subject: [PATCH 1101/1936] Change the GIT_BASE default to https://opendev.org Change-Id: Ifcfce490edb3d77e4e436e002d35bc909e1a057c --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 3432fb6228..4fc31a3d39 100644 --- a/stackrc +++ b/stackrc @@ -237,7 +237,7 @@ WSGI_MODE=${WSGI_MODE:-"uwsgi"} # ------------ # Base GIT Repo URL -GIT_BASE=${GIT_BASE:-https://git.openstack.org} +GIT_BASE=${GIT_BASE:-https://opendev.org} # The location of REQUIREMENTS once cloned REQUIREMENTS_DIR=$DEST/requirements From 9b6d2f20b47523ddc51349943dd76bb76d1c58d8 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Tue, 18 Jun 2019 10:43:16 -0400 Subject: [PATCH 1102/1936] Update (git|review).openstack.org links to opendev This updates links going to git.openstack.org and review.openstack.org to go to their respective opendev locations to avoid redirects. Change-Id: I78e3bb5303718962f591117f9c0ee11f2314b128 Closes-Bug: #1833256 --- HACKING.rst | 4 ++-- README.rst | 2 +- doc/source/configuration.rst | 6 +++--- doc/source/faq.rst | 3 +-- doc/source/guides/devstack-with-lbaas-v2.rst | 6 +++--- doc/source/guides/lxc.rst | 2 +- doc/source/guides/multinode-lab.rst | 2 +- doc/source/guides/neutron.rst | 4 ++-- doc/source/guides/nova.rst | 2 +- doc/source/guides/single-machine.rst | 2 +- doc/source/guides/single-vm.rst | 2 +- doc/source/index.rst | 5 ++--- doc/source/plugins.rst | 15 +++++++++++---- doc/source/zuul_ci_jobs_migration.rst | 10 +++++----- inc/python | 2 +- tools/fixup_stuff.sh | 2 +- tools/generate-devstack-plugins-list.py | 4 ++-- tools/install_pip.sh | 2 +- 18 files changed, 40 insertions(+), 35 deletions(-) diff --git a/HACKING.rst b/HACKING.rst index 968306a2c4..f6951064ae 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -11,7 +11,7 @@ Shell script was chosen because it best illustrates the steps used to set up and interact with OpenStack components. DevStack's official repository is located on git.openstack.org at -https://git.openstack.org/openstack-dev/devstack. Besides the master branch that +https://opendev.org/openstack/devstack. Besides the master branch that tracks the OpenStack trunk branches a separate branch is maintained for all OpenStack releases starting with Diablo (stable/diablo). @@ -26,7 +26,7 @@ __ lp_ .. _lp: https://launchpad.net/~devstack The `Gerrit review -queue `__ +queue `__ is used for all commits. The primary script in DevStack is ``stack.sh``, which performs the bulk of the diff --git a/README.rst b/README.rst index ad7ede45c5..f3a585a926 100644 --- a/README.rst +++ b/README.rst @@ -38,7 +38,7 @@ You can also pick specific OpenStack project releases by setting the appropriate `stackrc` for the default set). Usually just before a release there will be milestone-proposed branches that need to be tested:: - GLANCE_REPO=https://git.openstack.org/openstack/glance.git + GLANCE_REPO=https://opendev.org/openstack/glance.git GLANCE_BRANCH=milestone-proposed Start A Dev Cloud diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 098e994ccd..0105d5e636 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -205,7 +205,7 @@ Historical Notes Historically DevStack obtained all local configuration and customizations from a ``localrc`` file. In Oct 2013 the ``local.conf`` configuration method was introduced (in `review 46768 -`__) to simplify this +`__) to simplify this process. Configuration Notes @@ -232,13 +232,13 @@ proposal. ``GIT_BASE`` points to the primary repository server. To pull a branch directly from Gerrit, get the repo and branch from the Gerrit review page:: - git fetch https://review.openstack.org/p/openstack/nova \ + git fetch https://review.opendev.org/openstack/nova \ refs/changes/50/5050/1 && git checkout FETCH_HEAD The repo is the stanza following ``fetch`` and the branch is the stanza following that:: - NOVA_REPO=https://review.openstack.org/p/openstack/nova + NOVA_REPO=https://review.opendev.org/openstack/nova NOVA_BRANCH=refs/changes/50/5050/1 diff --git a/doc/source/faq.rst b/doc/source/faq.rst index efb315cbee..8214de0f6a 100644 --- a/doc/source/faq.rst +++ b/doc/source/faq.rst @@ -80,8 +80,7 @@ I'd like to help! ~~~~~~~~~~~~~~~~~ That isn't a question, but please do! The source for DevStack is at -`git.openstack.org -`__ and bug +`opendev.org `__ and bug reports go to `LaunchPad `__. Contributions follow the usual process as described in the `developer guide diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst index db138ae2c5..07a9bb33cc 100644 --- a/doc/source/guides/devstack-with-lbaas-v2.rst +++ b/doc/source/guides/devstack-with-lbaas-v2.rst @@ -19,7 +19,7 @@ Install devstack :: - git clone https://git.openstack.org/openstack-dev/devstack + git clone https://opendev.org/openstack/devstack cd devstack/tools sudo ./create-stack-user.sh cd ../.. @@ -35,9 +35,9 @@ Edit your ``/opt/stack/devstack/local.conf`` to look like :: [[local|localrc]] - enable_plugin octavia https://git.openstack.org/openstack/octavia + enable_plugin octavia https://opendev.org/openstack/octavia # If you are enabling horizon, include the octavia dashboard - # enable_plugin octavia-dashboard https://git.openstack.org/openstack/octavia-dashboard.git + # enable_plugin octavia-dashboard https://opendev.org/openstack/octavia-dashboard.git # If you are enabling barbican for TLS offload in Octavia, include it here. # enable_plugin barbican https://github.com/openstack/barbican.git diff --git a/doc/source/guides/lxc.rst b/doc/source/guides/lxc.rst index 9549ed2974..dcaa4166c4 100644 --- a/doc/source/guides/lxc.rst +++ b/doc/source/guides/lxc.rst @@ -105,7 +105,7 @@ The commands in this section should all be run inside your container. :: - git clone https://git.openstack.org/openstack-dev/devstack + git clone https://opendev.org/openstack/devstack #. Configure diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index 3c4acc8c7c..15f02a0e5e 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -103,7 +103,7 @@ Grab the latest version of DevStack: :: - git clone https://git.openstack.org/openstack-dev/devstack + git clone https://opendev.org/openstack/devstack cd devstack Up to this point all of the steps apply to each node in the cluster. diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst index 80b2f85285..2c25a1c350 100644 --- a/doc/source/guides/neutron.rst +++ b/doc/source/guides/neutron.rst @@ -567,7 +567,7 @@ you do not require them. Q_ML2_PLUGIN_MECHANISM_DRIVERS=macvtap Q_USE_PROVIDER_NETWORKING=True - enable_plugin neutron https://git.openstack.org/openstack/neutron + enable_plugin neutron https://opendev.org/openstack/neutron ## MacVTap agent options Q_AGENT=macvtap @@ -622,7 +622,7 @@ For the MacVTap compute node, use this local.conf: # Services that a compute node runs disable_all_services - enable_plugin neutron https://git.openstack.org/openstack/neutron + enable_plugin neutron https://opendev.org/openstack/neutron ENABLED_SERVICES+=n-cpu,q-agt ## MacVTap agent options diff --git a/doc/source/guides/nova.rst b/doc/source/guides/nova.rst index 65491d13e8..2271e2321b 100644 --- a/doc/source/guides/nova.rst +++ b/doc/source/guides/nova.rst @@ -83,7 +83,7 @@ some fake resource inventory values and keeps track of the state of the compute API but is good enough for most API testing, and is also used within the nova functional tests themselves so is fairly robust. -.. _fake virt driver: http://git.openstack.org/cgit/openstack/nova/tree/nova/virt/fake.py +.. _fake virt driver: https://opendev.org/openstack/nova/src/branch/master/nova/virt/fake.py Configuration ------------- diff --git a/doc/source/guides/single-machine.rst b/doc/source/guides/single-machine.rst index cfbd6b1884..a0e97edb37 100644 --- a/doc/source/guides/single-machine.rst +++ b/doc/source/guides/single-machine.rst @@ -74,7 +74,7 @@ We'll grab the latest version of DevStack via https: .. code-block:: console $ sudo apt-get install git -y || sudo yum install -y git - $ git clone https://git.openstack.org/openstack-dev/devstack + $ git clone https://opendev.org/openstack/devstack $ cd devstack Run DevStack diff --git a/doc/source/guides/single-vm.rst b/doc/source/guides/single-vm.rst index 45b8f2dd89..8ebf2a638d 100644 --- a/doc/source/guides/single-vm.rst +++ b/doc/source/guides/single-vm.rst @@ -60,7 +60,7 @@ passed as the user-data file when booting the VM. DEBIAN_FRONTEND=noninteractive sudo apt-get install -qqy git || sudo yum install -qy git sudo chown stack:stack /home/stack cd /home/stack - git clone https://git.openstack.org/openstack-dev/devstack + git clone https://opendev.org/openstack/devstack cd devstack echo '[[local|localrc]]' > local.conf echo ADMIN_PASSWORD=password >> local.conf diff --git a/doc/source/index.rst b/doc/source/index.rst index 1ea1c5ddae..8f958585ee 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -23,8 +23,7 @@ everything from git master. It is used interactively as a development environment and as the basis for much of the OpenStack project's functional testing. -The source is available at -``__. +The source is available at ``__. .. warning:: @@ -73,7 +72,7 @@ Download DevStack .. code-block:: console - $ git clone https://git.openstack.org/openstack-dev/devstack + $ git clone https://opendev.org/openstack/devstack $ cd devstack The ``devstack`` repo contains a script that installs OpenStack and diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst index b1f2397454..09ba980548 100644 --- a/doc/source/plugins.rst +++ b/doc/source/plugins.rst @@ -99,7 +99,7 @@ They are added in the following format:: An example would be as follows:: - enable_plugin ec2-api https://git.openstack.org/openstack/ec2-api + enable_plugin ec2-api https://opendev.org/openstack/ec2-api plugin.sh contract ================== @@ -264,10 +264,12 @@ integration of alternate RPC systems (e.g. zmq, qpid). In these cases the best practice is to build a dedicated ``openstack/devstack-plugin-FOO`` project. +Legacy project-config jobs +-------------------------- + To enable a plugin to be used in a gate job, the following lines will be needed in your ``jenkins/jobs/.yaml`` definition in -`project-config -`_:: +`project-config `_:: # Because we are testing a non standard project, add the # our project repository. This makes zuul do the right @@ -277,7 +279,12 @@ be needed in your ``jenkins/jobs/.yaml`` definition in # note the actual url here is somewhat irrelevant because it # caches in nodepool, however make it a valid url for # documentation purposes. - export DEVSTACK_LOCAL_CONFIG="enable_plugin ec2-api https://git.openstack.org/openstack/ec2-api" + export DEVSTACK_LOCAL_CONFIG="enable_plugin ec2-api https://opendev.org/openstack/ec2-api" + +Zuul v3 jobs +------------ + +See the ``devstack_plugins`` example in :doc:`zuul_ci_jobs_migration`. See Also ======== diff --git a/doc/source/zuul_ci_jobs_migration.rst b/doc/source/zuul_ci_jobs_migration.rst index 633f951d3d..dbb79893db 100644 --- a/doc/source/zuul_ci_jobs_migration.rst +++ b/doc/source/zuul_ci_jobs_migration.rst @@ -28,7 +28,7 @@ sahara Tempest plugin repo: .. code:: yaml - # In http://git.openstack.org/cgit/openstack/sahara-tests/tree/.zuul.yaml: + # In https://opendev.org/openstack/sahara-tests/src/branch/master/.zuul.yaml: - job: name: sahara-tests-tempest description: | @@ -86,7 +86,7 @@ job.parent. .. code:: yaml - # https://git.openstack.org/cgit/openstack/kuryr-kubernetes/tree/.zuul.yaml: + # https://opendev.org/openstack/kuryr-kubernetes/src/branch/master/.zuul.d/base.yaml: - job: name: kuryr-kubernetes-tempest-base parent: devstack-tempest @@ -110,9 +110,9 @@ job.parent. kuryr-kubernetes: true (...) devstack_plugins: - kuryr-kubernetes: https://git.openstack.org/openstack/kuryr - devstack-plugin-container: https://git.openstack.org/openstack/devstack-plugin-container - neutron-lbaas: https://git.openstack.org/openstack/neutron-lbaas + kuryr-kubernetes: https://opendev.org/openstack/kuryr + devstack-plugin-container: https://opendev.org/openstack/devstack-plugin-container + neutron-lbaas: https://opendev.org/openstack/neutron-lbaas tempest_plugins: - kuryr-tempest-plugin (...) diff --git a/inc/python b/inc/python index 0e575ae9e9..ea8ff67e08 100644 --- a/inc/python +++ b/inc/python @@ -346,7 +346,7 @@ function lib_installed_from_git { # The best option seems to be to use "pip list" which will tell # you the path an editable install was installed from; for example # in response to something like - # pip install -e 'git+http://git.openstack.org/openstack-dev/bashate#egg=bashate' + # pip install -e 'git+https://opendev.org/openstack/bashate#egg=bashate' # pip list --format columns shows # bashate 0.5.2.dev19 /tmp/env/src/bashate # Thus we check the third column to see if we're installed from diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 748223902c..1fa053f457 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -255,7 +255,7 @@ function fixup_suse { # looking for the mirror config script before doing this, and just # skip it if so. -# [1] https://git.openstack.org/cgit/openstack/diskimage-builder/tree/ \ +# [1] https://opendev.org/openstack/diskimage-builder/src/branch/master/ \ # diskimage_builder/elements/pip-and-virtualenv/ \ # install.d/pip-and-virtualenv-source-install/04-install-pip # [2] https://bugzilla.redhat.com/show_bug.cgi?id=1477823 diff --git a/tools/generate-devstack-plugins-list.py b/tools/generate-devstack-plugins-list.py index 580560ccab..11062eab2b 100644 --- a/tools/generate-devstack-plugins-list.py +++ b/tools/generate-devstack-plugins-list.py @@ -19,9 +19,9 @@ # # In order to function correctly, the environment in which the # script runs must have -# * network access to the review.openstack.org Gerrit API +# * network access to the review.opendev.org Gerrit API # working directory -# * network access to https://git.openstack.org/cgit +# * network access to https://opendev.org/ import functools import logging diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 1bd7392b9d..2b6aa4c2e8 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -35,7 +35,7 @@ FILES=$TOP_DIR/files # done by openstack-infra diskimage-builder elements as part of image # preparation [1]. This prevents any network access, which can be # unreliable in CI situations. -# [1] http://git.openstack.org/cgit/openstack-infra/project-config/tree/nodepool/elements/cache-devstack/source-repository-pip +# [1] https://opendev.org/openstack/project-config/src/branch/master/nodepool/elements/cache-devstack/source-repository-pip PIP_GET_PIP_URL=${PIP_GET_PIP_URL:-"https://bootstrap.pypa.io/get-pip.py"} LOCAL_PIP="$FILES/$(basename $PIP_GET_PIP_URL)" From 99e7445fb245deaa67d831a23c4e4ee21c13c855 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Fri, 21 Jun 2019 09:38:24 -0400 Subject: [PATCH 1103/1936] Adjust repo namespace for pbr This is a follow up for change Ifcfce490edb3d77e4e436e002d35bc909e1a057c where the GIT_BASE was changed to the opendev URL to avoid redirects. The pbr namespace in stackrc was old and still getting redirected and this change fixes that. Change-Id: Ib444e928fa2ca7650670f97be6927202333a1dd7 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 4fc31a3d39..2661b5fa0b 100644 --- a/stackrc +++ b/stackrc @@ -498,7 +498,7 @@ GITREPO["tooz"]=${TOOZ_REPO:-${GIT_BASE}/openstack/tooz.git} GITBRANCH["tooz"]=${TOOZ_BRANCH:-$TARGET_BRANCH} # pbr drives the setuptools configs -GITREPO["pbr"]=${PBR_REPO:-${GIT_BASE}/openstack-dev/pbr.git} +GITREPO["pbr"]=${PBR_REPO:-${GIT_BASE}/openstack/pbr.git} GITBRANCH["pbr"]=${PBR_BRANCH:-$TARGET_BRANCH} From 52c2886f7ede0e47f53b65fd1fd30f7d6ca7c086 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Thu, 20 Jun 2019 07:42:31 +0000 Subject: [PATCH 1104/1936] Add capability of adding additional network API extensions Currently, devstack has NETWORK_API_EXTENSIONS var to define the network API extensions. NETWORK_API_EXTENSIONS is defaulted to 'all' for master and hard coded list of extensions per release. Zuul jobs of network extensions (for example neutron-fwaas) need add the some extra extensions in the default list. To do so, they need to duplicate all the defaults extensions and then add the extra extensions. Much difficult situation is when defaults extensions list vary from release to release so they have to keep updating the NETWORK_API_EXTENSIONS per release. This commit defines a new var ADDITIONAL_NETWORK_API_EXTENSIONS which will take extra extensions and append into the default list. This way Zuul jobs do not need to duplicate the default extensions. Change-Id: I7270c9b9e047a851970439522c0356c9089a5b74 --- lib/tempest | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/tempest b/lib/tempest index 9f1b677eba..3eb9803f6d 100644 --- a/lib/tempest +++ b/lib/tempest @@ -618,6 +618,9 @@ function configure_tempest { # Remove disabled extensions network_api_extensions=$(remove_disabled_extensions $network_api_extensions $DISABLE_NETWORK_API_EXTENSIONS) fi + if [[ -n "$ADDITIONAL_NETWORK_API_EXTENSIONS" ]] && [[ "$network_api_extensions" != "all" ]]; then + network_api_extensions+=",$ADDITIONAL_NETWORK_API_EXTENSIONS" + fi iniset $TEMPEST_CONFIG network-feature-enabled api_extensions $network_api_extensions # Swift API Extensions local object_storage_api_extensions=${OBJECT_STORAGE_API_EXTENSIONS:-"all"} From 8ab64b3236c93c8449edf80165017898b83f1269 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Fri, 17 Nov 2017 19:52:29 +0100 Subject: [PATCH 1105/1936] Drop signing_dir option from configure_auth_token_middleware This is no longer being used due to Keystone PKI tokens no longer being implemented. In order to not break backward compatibility we create a new function that is to be used instead and deprecate the old one. Modify the old function to ignore the 3rd argument and display a deprecation warning. Adjust callers to no longer create and set that directory, calling the new function instead. Change-Id: Id0dec1ba72467cce5cacfcfdb2bc0af2bd3a3610 --- lib/cinder | 13 ++----------- lib/glance | 24 +++--------------------- lib/keystone | 17 +++++++++++------ lib/neutron | 17 +++-------------- lib/neutron-legacy | 14 ++------------ lib/nova | 13 ++----------- lib/placement | 12 +----------- lib/swift | 7 +------ 8 files changed, 25 insertions(+), 92 deletions(-) diff --git a/lib/cinder b/lib/cinder index ed8349aad6..32e38c44da 100644 --- a/lib/cinder +++ b/lib/cinder @@ -51,7 +51,6 @@ else fi CINDER_STATE_PATH=${CINDER_STATE_PATH:=$DATA_DIR/cinder} -CINDER_AUTH_CACHE_DIR=${CINDER_AUTH_CACHE_DIR:-/var/cache/cinder} CINDER_CONF_DIR=/etc/cinder CINDER_CONF=$CINDER_CONF_DIR/cinder.conf @@ -217,7 +216,7 @@ function configure_cinder { inicomment $CINDER_API_PASTE_INI filter:authtoken admin_password inicomment $CINDER_API_PASTE_INI filter:authtoken signing_dir - configure_auth_token_middleware $CINDER_CONF cinder $CINDER_AUTH_CACHE_DIR + configure_keystone_authtoken_middleware $CINDER_CONF cinder iniset $CINDER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL @@ -314,7 +313,7 @@ function configure_cinder { fi # Set nova credentials (used for os-assisted-snapshots) - configure_auth_token_middleware $CINDER_CONF nova $CINDER_AUTH_CACHE_DIR nova + configure_keystone_authtoken_middleware $CINDER_CONF nova nova iniset $CINDER_CONF nova region_name "$REGION_NAME" iniset $CINDER_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" @@ -380,13 +379,6 @@ function create_cinder_accounts { fi } -# create_cinder_cache_dir() - Part of the init_cinder() process -function create_cinder_cache_dir { - # Create cache dir - sudo install -d -o $STACK_USER $CINDER_AUTH_CACHE_DIR - rm -f $CINDER_AUTH_CACHE_DIR/* -} - # init_cinder() - Initialize database and volume group function init_cinder { if is_service_enabled $DATABASE_BACKENDS; then @@ -415,7 +407,6 @@ function init_cinder { fi mkdir -p $CINDER_STATE_PATH/volumes - create_cinder_cache_dir } # install_cinder() - Collect source and prepare diff --git a/lib/glance b/lib/glance index d630c9a3b0..54d3276433 100644 --- a/lib/glance +++ b/lib/glance @@ -44,7 +44,6 @@ fi GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache} GLANCE_IMAGE_DIR=${GLANCE_IMAGE_DIR:=$DATA_DIR/glance/images} GLANCE_LOCK_DIR=${GLANCE_LOCK_DIR:=$DATA_DIR/glance/locks} -GLANCE_AUTH_CACHE_DIR=${GLANCE_AUTH_CACHE_DIR:-/var/cache/glance} GLANCE_CONF_DIR=${GLANCE_CONF_DIR:-/etc/glance} GLANCE_METADEF_DIR=$GLANCE_CONF_DIR/metadefs @@ -97,20 +96,14 @@ function is_glance_enabled { # cleanup_glance() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_glance { - # kill instances (nova) # delete image files (glance) - sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR $GLANCE_AUTH_CACHE_DIR + sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR } # configure_glance() - Set config files, create data dirs, etc function configure_glance { sudo install -d -o $STACK_USER $GLANCE_CONF_DIR $GLANCE_METADEF_DIR - # We run this here as this configures cache dirs for the auth middleware - # which is used in the api server and not in the registry. The api - # Server is configured through this function and not init_glance. - create_glance_cache_dir - # Set non-default configuration options for registry iniset $GLANCE_REGISTRY_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $GLANCE_REGISTRY_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS @@ -120,7 +113,7 @@ function configure_glance { iniset $GLANCE_REGISTRY_CONF database connection $dburl iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG iniset $GLANCE_REGISTRY_CONF paste_deploy flavor keystone - configure_auth_token_middleware $GLANCE_REGISTRY_CONF glance $GLANCE_AUTH_CACHE_DIR/registry + configure_keystone_authtoken_middleware $GLANCE_REGISTRY_CONF glance iniset $GLANCE_REGISTRY_CONF oslo_messaging_notifications driver messagingv2 iniset_rpc_backend glance $GLANCE_REGISTRY_CONF iniset $GLANCE_REGISTRY_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" @@ -132,7 +125,7 @@ function configure_glance { iniset $GLANCE_API_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ iniset $GLANCE_API_CONF oslo_concurrency lock_path $GLANCE_LOCK_DIR iniset $GLANCE_API_CONF paste_deploy flavor keystone+cachemanagement - configure_auth_token_middleware $GLANCE_API_CONF glance $GLANCE_AUTH_CACHE_DIR/api + configure_keystone_authtoken_middleware $GLANCE_API_CONF glance iniset $GLANCE_API_CONF oslo_messaging_notifications driver messagingv2 iniset_rpc_backend glance $GLANCE_API_CONF if [ "$VIRT_DRIVER" = 'xenserver' ]; then @@ -279,23 +272,12 @@ function create_glance_accounts { fi } -# create_glance_cache_dir() - Part of the configure_glance() process -function create_glance_cache_dir { - # Create cache dir - sudo install -d -o $STACK_USER $GLANCE_AUTH_CACHE_DIR/api $GLANCE_AUTH_CACHE_DIR/registry $GLANCE_AUTH_CACHE_DIR/search $GLANCE_AUTH_CACHE_DIR/artifact - rm -f $GLANCE_AUTH_CACHE_DIR/api/* $GLANCE_AUTH_CACHE_DIR/registry/* $GLANCE_AUTH_CACHE_DIR/search/* $GLANCE_AUTH_CACHE_DIR/artifact/* -} - # init_glance() - Initialize databases, etc. function init_glance { # Delete existing images rm -rf $GLANCE_IMAGE_DIR mkdir -p $GLANCE_IMAGE_DIR - # Delete existing cache - rm -rf $GLANCE_CACHE_DIR - mkdir -p $GLANCE_CACHE_DIR - # (Re)create glance database recreate_database glance diff --git a/lib/keystone b/lib/keystone index 02e28222b7..5bd552f557 100644 --- a/lib/keystone +++ b/lib/keystone @@ -397,18 +397,17 @@ function create_service_user { fi } -# Configure the service to use the auth token middleware. +# Configure a service to use the auth token middleware. # -# configure_auth_token_middleware conf_file admin_user signing_dir [section] +# configure_keystone_authtoken_middleware conf_file admin_user IGNORED [section] # # section defaults to keystone_authtoken, which is where auth_token looks in # the .conf file. If the paste config file is used (api-paste.ini) then # provide the section name for the auth_token filter. -function configure_auth_token_middleware { +function configure_keystone_authtoken_middleware { local conf_file=$1 local admin_user=$2 - local signing_dir=$3 - local section=${4:-keystone_authtoken} + local section=${3:-keystone_authtoken} iniset $conf_file $section auth_type password iniset $conf_file $section auth_url $KEYSTONE_SERVICE_URI @@ -419,10 +418,16 @@ function configure_auth_token_middleware { iniset $conf_file $section project_domain_name "$SERVICE_DOMAIN_NAME" iniset $conf_file $section cafile $SSL_BUNDLE_FILE - iniset $conf_file $section signing_dir $signing_dir iniset $conf_file $section memcached_servers localhost:11211 } +# configure_auth_token_middleware conf_file admin_user IGNORED [section] +# TODO(frickler): old function for backwards compatibility, remove in U cycle +function configure_auth_token_middleware { + echo "WARNING: configure_auth_token_middleware is deprecated, use configure_keystone_authtoken_middleware instead" + configure_keystone_authtoken_middleware $1 $2 $4 +} + # init_keystone() - Initialize databases, etc. function init_keystone { if is_service_enabled ldap; then diff --git a/lib/neutron b/lib/neutron index 947c491ec1..e1fd10ce1f 100644 --- a/lib/neutron +++ b/lib/neutron @@ -36,7 +36,6 @@ GITDIR["python-neutronclient"]=$DEST/python-neutronclient NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse False NEUTRON_DEPLOY_MOD_WSGI) NEUTRON_AGENT=${NEUTRON_AGENT:-openvswitch} NEUTRON_DIR=$DEST/neutron -NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron} NEUTRON_DISTRIBUTED_ROUTING=$(trueorfalse False NEUTRON_DISTRIBUTED_ROUTING) # Distributed Virtual Router (DVR) configuration @@ -62,7 +61,6 @@ NEUTRON_AGENT_CONF=$NEUTRON_CONF_DIR/ NEUTRON_CREATE_INITIAL_NETWORKS=${NEUTRON_CREATE_INITIAL_NETWORKS:-True} NEUTRON_STATE_PATH=${NEUTRON_STATE_PATH:=$DATA_DIR/neutron} -NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron} NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini @@ -200,8 +198,8 @@ function configure_neutron_new { iniset $NEUTRON_CONF DEFAULT router_distributed $NEUTRON_DISTRIBUTED_ROUTING iniset $NEUTRON_CONF DEFAULT auth_strategy $NEUTRON_AUTH_STRATEGY - configure_auth_token_middleware $NEUTRON_CONF neutron $NEUTRON_AUTH_CACHE_DIR keystone_authtoken - configure_auth_token_middleware $NEUTRON_CONF nova $NEUTRON_AUTH_CACHE_DIR nova + configure_keystone_authtoken_middleware $NEUTRON_CONF neutron + configure_keystone_authtoken_middleware $NEUTRON_CONF nova nova # Configure VXLAN # TODO(sc68cal) not hardcode? @@ -292,7 +290,7 @@ function configure_neutron_new { # TODO(dtroyer): remove the v2.0 hard code below iniset $NEUTRON_META_CONF DEFAULT auth_url $KEYSTONE_SERVICE_URI - configure_auth_token_middleware $NEUTRON_META_CONF neutron $NEUTRON_AUTH_CACHE_DIR DEFAULT + configure_keystone_authtoken_middleware $NEUTRON_META_CONF neutron DEFAULT fi # Format logging @@ -389,13 +387,6 @@ function create_neutron_accounts_new { fi } -# create_neutron_cache_dir() - Part of the init_neutron() process -function create_neutron_cache_dir { - # Create cache dir - sudo install -d -o $STACK_USER $NEUTRON_AUTH_CACHE_DIR - rm -f $NEUTRON_AUTH_CACHE_DIR/* -} - # init_neutron() - Initialize databases, etc. function init_neutron_new { @@ -405,8 +396,6 @@ function init_neutron_new { # Run Neutron db migrations $NEUTRON_BIN_DIR/neutron-db-manage upgrade heads time_stop "dbsync" - - create_neutron_cache_dir } # install_neutron() - Collect source and prepare diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 87edc5ab81..dbd6e2c06b 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -71,7 +71,6 @@ GITDIR["python-neutronclient"]=$DEST/python-neutronclient NEUTRON_DIR=$DEST/neutron NEUTRON_FWAAS_DIR=$DEST/neutron-fwaas -NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron} # Support entry points installation of console scripts if [[ -d $NEUTRON_DIR/bin/neutron-server ]]; then @@ -841,13 +840,13 @@ function _configure_neutron_service { iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP iniset $NEUTRON_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY - _neutron_setup_keystone $NEUTRON_CONF keystone_authtoken + configure_keystone_authtoken_middleware $NEUTRON_CONF $Q_ADMIN_USERNAME # Configuration for neutron notifications to nova. iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES - configure_auth_token_middleware $NEUTRON_CONF nova $NEUTRON_AUTH_CACHE_DIR nova + configure_keystone_authtoken_middleware $NEUTRON_CONF nova nova # Configure plugin neutron_plugin_configure_service @@ -933,15 +932,6 @@ function _neutron_setup_rootwrap { fi } -# Configures keystone integration for neutron service -function _neutron_setup_keystone { - local conf_file=$1 - local section=$2 - - create_neutron_cache_dir - configure_auth_token_middleware $conf_file $Q_ADMIN_USERNAME $NEUTRON_AUTH_CACHE_DIR $section -} - function _neutron_setup_interface_driver { # ovs_use_veth needs to be set before the plugin configuration diff --git a/lib/nova b/lib/nova index 8220e0f10b..a394a64eef 100644 --- a/lib/nova +++ b/lib/nova @@ -46,7 +46,6 @@ fi NOVA_STATE_PATH=${NOVA_STATE_PATH:=$DATA_DIR/nova} # INSTANCES_PATH is the previous name for this NOVA_INSTANCES_PATH=${NOVA_INSTANCES_PATH:=${INSTANCES_PATH:=$NOVA_STATE_PATH/instances}} -NOVA_AUTH_CACHE_DIR=${NOVA_AUTH_CACHE_DIR:-/var/cache/nova} NOVA_CONF_DIR=/etc/nova NOVA_CONF=$NOVA_CONF_DIR/nova.conf @@ -215,7 +214,7 @@ function cleanup_nova { sudo rm -rf $NOVA_INSTANCES_PATH/* fi - sudo rm -rf $NOVA_STATE_PATH $NOVA_AUTH_CACHE_DIR + sudo rm -rf $NOVA_STATE_PATH # NOTE(dtroyer): This really should be called from here but due to the way # nova abuses the _cleanup() function we're moving it @@ -443,7 +442,7 @@ function create_nova_conf { iniset $NOVA_CONF DEFAULT osapi_compute_link_prefix $NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT fi - configure_auth_token_middleware $NOVA_CONF nova $NOVA_AUTH_CACHE_DIR + configure_keystone_authtoken_middleware $NOVA_CONF nova fi if is_service_enabled cinder; then @@ -690,13 +689,6 @@ function conductor_conf { echo "${NOVA_CONF_DIR}/nova_cell${cell}.conf" } -# create_nova_cache_dir() - Part of the init_nova() process -function create_nova_cache_dir { - # Create cache dir - sudo install -d -o $STACK_USER $NOVA_AUTH_CACHE_DIR - rm -f $NOVA_AUTH_CACHE_DIR/* -} - # create_nova_keys_dir() - Part of the init_nova() process function create_nova_keys_dir { # Create keys dir @@ -738,7 +730,6 @@ function init_nova { done fi - create_nova_cache_dir create_nova_keys_dir if [[ "$NOVA_BACKEND" == "LVM" ]]; then diff --git a/lib/placement b/lib/placement index a89cd26939..785b0ddfca 100644 --- a/lib/placement +++ b/lib/placement @@ -29,7 +29,6 @@ set +o xtrace PLACEMENT_DIR=$DEST/placement PLACEMENT_CONF_DIR=/etc/placement PLACEMENT_CONF=$PLACEMENT_CONF_DIR/placement.conf -PLACEMENT_AUTH_CACHE_DIR=${PLACEMENT_AUTH_CACHE_DIR:-/var/cache/placement} PLACEMENT_AUTH_STRATEGY=${PLACEMENT_AUTH_STRATEGY:-keystone} # Placement virtual environment if [[ ${USE_VENV} = True ]]; then @@ -64,7 +63,6 @@ function is_placement_enabled { function cleanup_placement { sudo rm -f $(apache_site_config_for placement-api) remove_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI" - sudo rm -f $PLACEMENT_AUTH_CACHE_DIR/* } # _config_placement_apache_wsgi() - Set WSGI config files @@ -99,7 +97,7 @@ function create_placement_conf { iniset $PLACEMENT_CONF placement_database connection `database_connection_url placement` iniset $PLACEMENT_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" iniset $PLACEMENT_CONF api auth_strategy $PLACEMENT_AUTH_STRATEGY - configure_auth_token_middleware $PLACEMENT_CONF placement $PLACEMENT_AUTH_CACHE_DIR + configure_keystone_authtoken_middleware $PLACEMENT_CONF placement setup_logging $PLACEMENT_CONF } @@ -127,19 +125,11 @@ function create_placement_accounts { "$placement_api_url" } -# create_placement_cache_dir() - Create directories for keystone cache -function create_placement_cache_dir { - # Create cache dir - sudo install -d -o $STACK_USER $PLACEMENT_AUTH_CACHE_DIR - rm -f $PLACEMENT_AUTH_CACHE_DIR/* -} - # init_placement() - Create service user and endpoints function init_placement { recreate_database placement $PLACEMENT_BIN_DIR/placement-manage db sync create_placement_accounts - create_placement_cache_dir } # install_placement() - Collect source and prepare diff --git a/lib/swift b/lib/swift index d9a7878652..5be9e3575e 100644 --- a/lib/swift +++ b/lib/swift @@ -47,7 +47,6 @@ else SWIFT_BIN_DIR=$(get_python_exec_prefix) fi -SWIFT_AUTH_CACHE_DIR=${SWIFT_AUTH_CACHE_DIR:-/var/cache/swift} SWIFT_APACHE_WSGI_DIR=${SWIFT_APACHE_WSGI_DIR:-/var/www/swift} SWIFT_SERVICE_PROTOCOL=${SWIFT_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} @@ -452,7 +451,7 @@ function configure_swift { iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken log_name swift iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken paste.filter_factory keystonemiddleware.auth_token:filter_factory - configure_auth_token_middleware $SWIFT_CONFIG_PROXY_SERVER swift $SWIFT_AUTH_CACHE_DIR filter:authtoken + configure_keystone_authtoken_middleware $SWIFT_CONFIG_PROXY_SERVER swift filter:authtoken iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken delay_auth_decision 1 iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken cache swift.cache iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken include_service_catalog False @@ -735,10 +734,6 @@ function init_swift { $SWIFT_BIN_DIR/swift-ring-builder container.builder rebalance 42 $SWIFT_BIN_DIR/swift-ring-builder account.builder rebalance 42 } && popd >/dev/null - - # Create cache dir - sudo install -d -o ${STACK_USER} $SWIFT_AUTH_CACHE_DIR - rm -f $SWIFT_AUTH_CACHE_DIR/* } function install_swift { From 8b31dce38b630ba3ed1883bcf91a90d296ae44f2 Mon Sep 17 00:00:00 2001 From: Carlos Goncalves Date: Fri, 21 Jun 2019 13:13:40 +0200 Subject: [PATCH 1106/1936] Fix rdo-release install The URL for rdo-release package is version-less and redirects to latest stable version. This becomes problematic when stacking older stable versions as dependencies might not be met or newer and incompatibile ones might get installed. Closes-Bug: #1833696 Change-Id: Icb07dcb4c9a3950a3c31a3a8dcb8d0b4c713fdb1 --- stack.sh | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 9bda79a36b..80a7d0e7f5 100755 --- a/stack.sh +++ b/stack.sh @@ -331,7 +331,13 @@ function _install_rdo { # Per the point above, it's a bunch of repos so starts getting a # little messy... if ! is_package_installed rdo-release ; then - yum_install https://rdoproject.org/repos/rdo-release.rpm + if [[ "$TARGET_BRANCH" == "master" ]]; then + yum_install https://rdoproject.org/repos/rdo-release.rpm + else + # Get latest rdo-release-$rdo_release RPM package version + rdo_release=$(echo $TARGET_BRANCH | sed "s|stable/||g") + yum_install https://rdoproject.org/repos/openstack-$rdo_release/rdo-release-$rdo_release.rpm + fi fi # Also enable optional for RHEL7 proper. Note this is a silent From 45ded1cca01bfa31dcb3ad1591125e4890d6f562 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 3 Jul 2019 06:12:23 +0000 Subject: [PATCH 1107/1936] Updated from generate-devstack-plugins-list Change-Id: I1b4c9571b8f3dece270865fb336e73aca3a36a76 --- doc/source/plugin-registry.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index ea8c31807d..1c1c5a5a25 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -204,7 +204,6 @@ x/trio2o `https://opendev.org/x/trio2o `__ x/vmware-nsx `https://opendev.org/x/vmware-nsx `__ x/vmware-vspc `https://opendev.org/x/vmware-vspc `__ -zuul/nodepool `https://opendev.org/zuul/nodepool `__ ======================================== === From d7d902f6b60298d56359a07a63b2355373e54956 Mon Sep 17 00:00:00 2001 From: melanie witt Date: Fri, 24 May 2019 20:09:28 +0000 Subject: [PATCH 1108/1936] Configure console proxy ports in nova-cpu.conf In change I8934d0b9392f2976347391c8a650ad260f337762, we began configuring console proxy ports for multiple cells in the nova controller config files to avoid "Address already in use" errors from port collisions when running multiple cells on a single host. This correspondingly configures the console proxy ports in the nova compute config file based on what cell we're in, according to the NOVA_CPU_CELL variable. The base_url config for serial console is also added where the default was previously used. The url is taken from the config option default in the nova code: nova/conf/serial_console.py [1]. [1] https://github.com/openstack/nova/blob/8f00b5d/nova/conf/serial_console.py#L54 Change-Id: Id885fc5a769bce8111f1052a1b55d26be817c890 Closes-Bug: #1830417 --- lib/nova | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/lib/nova b/lib/nova index a394a64eef..18e7a7d98a 100644 --- a/lib/nova +++ b/lib/nova @@ -572,23 +572,34 @@ function configure_placement_nova_compute { } function configure_console_compute { + # If we are running multiple cells (and thus multiple console proxies) on a + # single host, we offset the ports to avoid collisions. We need to + # correspondingly configure the console proxy port for nova-compute and we + # can use the NOVA_CPU_CELL variable to know which cell we are for + # calculating the offset. + # Stagger the offset based on the total number of possible console proxies + # (novnc, xvpvnc, spice, serial) so that their ports will not collide if + # all are enabled. + local offset + offset=$(((NOVA_CPU_CELL - 1) * 4)) + # All nova-compute workers need to know the vnc configuration options # These settings don't hurt anything if n-xvnc and n-novnc are disabled if is_service_enabled n-cpu; then if [ "$NOVNC_FROM_PACKAGE" == "True" ]; then # Use the old URL when installing novnc packages. - NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"} + NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:$((6080 + offset))/vnc_auto.html"} elif vercmp ${NOVNC_BRANCH} "<" "1.0.0"; then - # Use the old URL when installing older novnc source. - NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"} + # Use the old URL when installing older novnc source. + NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:$((6080 + offset))/vnc_auto.html"} else # Use the new URL when building >=v1.0.0 from source. - NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_lite.html"} + NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:$((6080 + offset))/vnc_lite.html"} fi iniset $NOVA_CPU_CONF vnc novncproxy_base_url "$NOVNCPROXY_URL" - XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"} + XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:$((6081 + offset))/console"} iniset $NOVA_CPU_CONF vnc xvpvncproxy_base_url "$XVPVNCPROXY_URL" - SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:6082/spice_auto.html"} + SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:$((6082 + offset))/spice_auto.html"} iniset $NOVA_CPU_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL" fi @@ -615,6 +626,7 @@ function configure_console_compute { if is_service_enabled n-sproxy; then iniset $NOVA_CPU_CONF serial_console enabled True + iniset $NOVA_CPU_CONF serial_console base_url "ws://$SERVICE_HOST:$((6083 + offset))/" fi } From 3446481a23d2c878297b4f535c7b9b44d07ed5fb Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Wed, 3 Jul 2019 13:16:04 +0000 Subject: [PATCH 1109/1936] Update docs about python-systemd pkg The package has been renamed in order to avoid the namespace collision with systemd, update our doc accordingly. Change-Id: I1b9a434d9bb6a7d9dc38ef965017ed9f8773d595 Closes-Bug: 1825949 --- doc/source/systemd.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/source/systemd.rst b/doc/source/systemd.rst index 1bc9911879..7168768504 100644 --- a/doc/source/systemd.rst +++ b/doc/source/systemd.rst @@ -208,7 +208,8 @@ into the ``systemd`` namespace, which can cause some issues. the one you want. - ``systemd`` - a python 3 only library, not what you want. - ``python-systemd`` - another library you don't want. Installing it - on a system will break ansible's ability to run. + on a system will break ansible's ability to run. The package has now + been renamed to ``cysystemd``, which avoids the namespace collision. If we were using user units, the ``[Service]`` - ``Group=`` parameter From 70fca49de75a77fd766d5b72f8757e4f7c0a7a48 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Wed, 26 Jun 2019 09:57:02 +0200 Subject: [PATCH 1110/1936] Add and switch to the newly created opensuse-15 nodeset opensuse-150 nodeset is referring to openSUSE 15.0, which is still in maintenance but openSUSE 15.1 has been released already. "opensuse-15" is going to refer to the "latest openSUSE 15.x" build released and working for OpenStack going forward, so add this nodeset and use it by default going forward. Change-Id: Ic3f4d6998a66da5226bc95088d7e3c83dfe737ce --- .zuul.yaml | 12 ++++++------ stack.sh | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 3c516dd350..bcfd0c1a48 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -39,10 +39,10 @@ - controller - nodeset: - name: devstack-single-node-opensuse-150 + name: devstack-single-node-opensuse-15 nodes: - name: controller - label: opensuse-150 + label: opensuse-15 groups: - name: tempest nodes: @@ -532,10 +532,10 @@ voting: false - job: - name: devstack-platform-opensuse-150 + name: devstack-platform-opensuse-15 parent: tempest-full-py3 - description: openSUSE 15.0 platform test - nodeset: devstack-single-node-opensuse-150 + description: openSUSE 15.x platform test + nodeset: devstack-single-node-opensuse-15 voting: false - job: @@ -627,7 +627,7 @@ - devstack-ipv6: voting: false - devstack-platform-centos-7 - - devstack-platform-opensuse-150 + - devstack-platform-opensuse-15 - devstack-platform-fedora-latest - devstack-platform-xenial - devstack-multinode diff --git a/stack.sh b/stack.sh index 9bda79a36b..20938ada94 100755 --- a/stack.sh +++ b/stack.sh @@ -224,7 +224,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (xenial|artful|bionic|stretch|jessie|f28|f29|opensuse-42.3|opensuse-15.0|opensuse-tumbleweed|rhel7) ]]; then +if [[ ! ${DISTRO} =~ (xenial|artful|bionic|stretch|jessie|f28|f29|opensuse-15.0|opensuse-15.1|opensuse-tumbleweed|rhel7) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From 705e9cb5dc8b63c902f588d29271686501be0c6e Mon Sep 17 00:00:00 2001 From: Vanou Ishii Date: Tue, 2 Jul 2019 00:40:24 -0400 Subject: [PATCH 1111/1936] Fix error in configure_nova_hypervisor with hardware Ironic node Trying to deploy OpenStack environment consisting of ironic nova hypervisor & hardware Ironic node (not VM Ironic node) with devstack got failed. Devstack error says error occurred while calling configure_libvirt in configure_nova_hypervisor. This happens because libvirt related packages are not installed when specifying "VIRT_DRIVER=ironic" and "IRONIC_IS_HARDWARE=True". To fix this problem, this commit add "if" statement to check Ironic node is hardware or not using "is_ironic_hardware" function in "function-common" file. Change-Id: I1113478175fadec79d0f8bf6ae842ed86e5e686b Closes-Bug: #1834985 --- lib/nova_plugins/hypervisor-ironic | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic index 1279256055..9bc04e2122 100644 --- a/lib/nova_plugins/hypervisor-ironic +++ b/lib/nova_plugins/hypervisor-ironic @@ -36,7 +36,9 @@ function cleanup_nova_hypervisor { # configure_nova_hypervisor - Set config files, create data dirs, etc function configure_nova_hypervisor { - configure_libvirt + if ! is_ironic_hardware; then + configure_libvirt + fi LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.firewall.NoopFirewallDriver"} iniset $NOVA_CONF DEFAULT compute_driver ironic.IronicDriver From 764ccd07a7f30ecf6549bad618bc05ffb97fae35 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Thu, 11 Jul 2019 09:47:48 -0700 Subject: [PATCH 1112/1936] Remove Swift from default DISABLED_PYTHON3_PACKAGES As of https://review.opendev.org/#/c/653548/ this is no longer necessary. Change-Id: Iab43d77c3444fa97f3339f0e5fa4ad24e87e3fd6 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 2661b5fa0b..4ac8c0338e 100644 --- a/stackrc +++ b/stackrc @@ -131,7 +131,7 @@ export USE_PYTHON3=$(trueorfalse False USE_PYTHON3) # Explicitly list services not to run under Python 3. See # disable_python3_package to edit this variable. -export DISABLED_PYTHON3_PACKAGES="swift" +export DISABLED_PYTHON3_PACKAGES="" # When Python 3 is supported by an application, adding the specific # version of Python 3 to this variable will install the app using that From d51baee40d0e50675d8055cab67b262ef0fc9f1f Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Fri, 12 Jul 2019 11:51:17 -0400 Subject: [PATCH 1113/1936] Add NOVA_SHUTDOWN_TIMEOUT variable This adds a variable to control the [DEFAULT]/shutdown_timeout config in nova to control whether or not a guest should have a graceful shutdown of the OS or if it should just stop immediately (no timeout). Since devstack uses CirrOS images by default, the default value for the NOVA_SHUTDOWN_TIMEOUT variable is 0 which should speed up tempest runs. The default in nova.conf [1] is 60 seconds. [1] https://docs.openstack.org/nova/latest/configuration/config.html#DEFAULT.shutdown_timeout Change-Id: Ida83f70a1c4e61e5248f2bd42b4c24f7ac6d2310 Related-Bug: #1829896 --- lib/nova | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/nova b/lib/nova index 29bce5a819..f423b570cd 100644 --- a/lib/nova +++ b/lib/nova @@ -153,6 +153,11 @@ ISCSID_DEBUG_LEVEL=${ISCSID_DEBUG_LEVEL:-4} # Other options include "versioned" and "both". NOVA_NOTIFICATION_FORMAT=${NOVA_NOTIFICATION_FORMAT:-unversioned} +# Timeout for servers to gracefully shutdown the OS during operations +# like shelve, rescue, stop, rebuild. Defaults to 0 since the default +# image in devstack is CirrOS. +NOVA_SHUTDOWN_TIMEOUT=${NOVA_SHUTDOWN_TIMEOUT:-0} + # Functions # --------- @@ -401,6 +406,7 @@ function create_nova_conf { iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x" iniset $NOVA_CONF DEFAULT osapi_compute_listen "$NOVA_SERVICE_LISTEN_ADDRESS" iniset $NOVA_CONF DEFAULT metadata_listen "$NOVA_SERVICE_LISTEN_ADDRESS" + iniset $NOVA_CONF DEFAULT shutdown_timeout $NOVA_SHUTDOWN_TIMEOUT iniset $NOVA_CONF key_manager backend nova.keymgr.conf_key_mgr.ConfKeyManager From dc01a8ab63aff1be170fb59c293ed4bddd03749a Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Sun, 14 Jul 2019 22:33:13 +0200 Subject: [PATCH 1114/1936] Switch TLS tests to TLSv1.2+ only This would more likely match a relevant production deployment. Change-Id: I4ee2ff0c00a8e33fd069a782b32eed5fef62c01b --- files/apache-keystone.template | 1 + files/apache-neutron.template | 1 + lib/tls | 1 + 3 files changed, 3 insertions(+) diff --git a/files/apache-keystone.template b/files/apache-keystone.template index 128436027d..480fe06a9c 100644 --- a/files/apache-keystone.template +++ b/files/apache-keystone.template @@ -38,6 +38,7 @@ LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" %SSLLISTEN% %SSLENGINE% %SSLLISTEN% %SSLCERTFILE% %SSLLISTEN% %SSLKEYFILE% +%SSLLISTEN% SSLProtocol -all +TLSv1.3 +TLSv1.2 %SSLLISTEN% Alias /identity %KEYSTONE_BIN%/keystone-wsgi-public diff --git a/files/apache-neutron.template b/files/apache-neutron.template index c7796b93bf..358e87f5da 100644 --- a/files/apache-neutron.template +++ b/files/apache-neutron.template @@ -24,6 +24,7 @@ LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" %SSLLISTEN% %SSLENGINE% %SSLLISTEN% %SSLCERTFILE% %SSLLISTEN% %SSLKEYFILE% +%SSLLISTEN% SSLProtocol -all +TLSv1.3 +TLSv1.2 %SSLLISTEN% Alias /networking %NEUTRON_BIN%/neutron-api diff --git a/lib/tls b/lib/tls index 0032449e13..6f2a65a75b 100644 --- a/lib/tls +++ b/lib/tls @@ -536,6 +536,7 @@ $listen_string SSLEngine On SSLCertificateFile $DEVSTACK_CERT + SSLProtocol -all +TLSv1.3 +TLSv1.2 # Disable KeepAlive to fix bug #1630664 a.k.a the # ('Connection aborted.', BadStatusLine("''",)) error From 4106e46b2ed042ebeec533ac9022baa5d623ae06 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 24 Jul 2019 06:12:42 +0000 Subject: [PATCH 1115/1936] Updated from generate-devstack-plugins-list Change-Id: Ie46214ed8a5a5887f04805b9157b76ebf44f9616 --- doc/source/plugin-registry.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 1c1c5a5a25..5cbe4ed505 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -186,7 +186,6 @@ x/networking-vpp `https://opendev.org/x/networking-vpp < x/networking-vsphere `https://opendev.org/x/networking-vsphere `__ x/neutron-classifier `https://opendev.org/x/neutron-classifier `__ x/nova-dpm `https://opendev.org/x/nova-dpm `__ -x/nova-lxd `https://opendev.org/x/nova-lxd `__ x/nova-mksproxy `https://opendev.org/x/nova-mksproxy `__ x/oaktree `https://opendev.org/x/oaktree `__ x/omni `https://opendev.org/x/omni `__ From 420d3df48125a276974741a4f14a50df55184b8d Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Thu, 25 Jul 2019 06:48:39 +0000 Subject: [PATCH 1116/1936] Make 'devstack-ipv6' job as voting 'devstack-ipv6' job set the devstack to deploy the services on IPv6. As part of community goal 'Support IPv6-Only Deployments'[1], this is going to be the base job for all project specific or tempest IPv6 jobs. Running this as voting make sure any devstack setting or changes would not break the IPv6 jobs. Story: #2005477 Task: #35923 [1] https://storyboard.openstack.org/#!/story/2005477 Change-Id: Id6580e8b29b6b04e34c2c1eca3125fa08920eb1d --- .zuul.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 2dd0b9f4ef..ee855bdf7f 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -624,8 +624,7 @@ jobs: - devstack - devstack-xenial - - devstack-ipv6: - voting: false + - devstack-ipv6 - devstack-platform-centos-7 - devstack-platform-opensuse-150 - devstack-platform-fedora-latest @@ -666,6 +665,7 @@ jobs: - devstack - devstack-xenial + - devstack-ipv6 - devstack-multinode - devstack-multinode-xenial - devstack-unit-tests From b0b80d76e1b8787fbfa66aedaf700c2abe44e22d Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Wed, 24 Jul 2019 10:31:27 +0000 Subject: [PATCH 1117/1936] Remove VNC server listen addresses seeting to IPv4 from base job 'devstack' job set the VNC listen addresses 'VNCSERVER_LISTEN' and 'VNCSERVER_PROXYCLIENT_ADDRESS' IPv4 which makes 'devstack-ipv6' job to either unset those or set for IPv6 values. Let's remove the setting of those in base job and let lib/nova set based on configured ip version from job. 'devstack-ipv6' base job will be used to define the IPv6-only jobs on Tempest and project side gate. Change-Id: Iea469128b15298aee61245e702d20603c8d376fb Story: #2005477 Task: #35923 --- .zuul.yaml | 4 ---- lib/nova | 4 ++-- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index ee855bdf7f..048d69d3e0 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -382,8 +382,6 @@ SWIFT_HASH: 1234123412341234 DEBUG_LIBVIRT_COREDUMPS: true NOVA_VNC_ENABLED: true - VNCSERVER_LISTEN: 0.0.0.0 - VNCSERVER_PROXYCLIENT_ADDRESS: $HOST_IP devstack_local_conf: post-config: $NEUTRON_CONF: @@ -479,8 +477,6 @@ GLANCE_HOSTPORT: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}:9292" Q_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" NOVA_VNC_ENABLED: true - VNCSERVER_LISTEN: 0.0.0.0 - VNCSERVER_PROXYCLIENT_ADDRESS: $HOST_IP - job: name: devstack-ipv6 diff --git a/lib/nova b/lib/nova index f423b570cd..7b364a6b36 100644 --- a/lib/nova +++ b/lib/nova @@ -601,8 +601,8 @@ function configure_console_compute { if is_service_enabled n-novnc || is_service_enabled n-xvnc || [ "$NOVA_VNC_ENABLED" != False ]; then # Address on which instance vncservers will listen on compute hosts. # For multi-host, this should be the management ip of the compute host. - VNCSERVER_LISTEN=${VNCSERVER_LISTEN=$NOVA_SERVICE_LOCAL_HOST} - VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=$NOVA_SERVICE_LOCAL_HOST} + VNCSERVER_LISTEN=${VNCSERVER_LISTEN:-$NOVA_SERVICE_LOCAL_HOST} + VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS:-$NOVA_SERVICE_LOCAL_HOST} iniset $NOVA_CPU_CONF vnc server_listen "$VNCSERVER_LISTEN" iniset $NOVA_CPU_CONF vnc server_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" else From 352d58a7afd9e2261e639af78e4fb4c99d8f9f81 Mon Sep 17 00:00:00 2001 From: Graham Hayes Date: Mon, 20 Jul 2015 16:28:52 +0100 Subject: [PATCH 1118/1936] Only modify folders created by devstack running chown and chmod on files and folders not created by devstack causes a few issues: * On nfs mounted directories it can take an extremely long time to chown -R some of the git repos, especially if any tox commands have been ran in the host * chown can cause the host files to get into a weird state if nfs is set up wrong. If files and folders are pre-existing we should assume they are in the correct state, and not modify them. Fix setup-devstack-log-dir to create the logs directory with correct permissions in the first place. Change-Id: I5ebdaded3ffd0a5bc70c5e9ab5b18daefb358f58 Signed-off-by: Graham Hayes --- roles/setup-devstack-log-dir/tasks/main.yaml | 3 +++ roles/sync-devstack-data/tasks/main.yaml | 11 +++++++++++ stack.sh | 17 +++++++++++------ 3 files changed, 25 insertions(+), 6 deletions(-) diff --git a/roles/setup-devstack-log-dir/tasks/main.yaml b/roles/setup-devstack-log-dir/tasks/main.yaml index b9f38dfacb..d8e8cfe70a 100644 --- a/roles/setup-devstack-log-dir/tasks/main.yaml +++ b/roles/setup-devstack-log-dir/tasks/main.yaml @@ -2,4 +2,7 @@ file: path: '{{ devstack_base_dir }}/logs' state: directory + mode: 0755 + owner: stack + group: stack become: yes diff --git a/roles/sync-devstack-data/tasks/main.yaml b/roles/sync-devstack-data/tasks/main.yaml index e62be87ccd..a1d37c3951 100644 --- a/roles/sync-devstack-data/tasks/main.yaml +++ b/roles/sync-devstack-data/tasks/main.yaml @@ -46,3 +46,14 @@ dest: "{{ devstack_data_base_dir }}/data/" mode: push when: 'inventory_hostname in groups["subnode"]|default([])' + +- name: Ensure the data folder and subfolders have the correct permissions + become: true + file: + path: "{{ devstack_data_base_dir }}/data" + state: directory + owner: stack + group: stack + mode: 0755 + recurse: yes + when: 'inventory_hostname in groups["subnode"]|default([])' diff --git a/stack.sh b/stack.sh index c0216f4ce9..3c316448e1 100755 --- a/stack.sh +++ b/stack.sh @@ -365,9 +365,12 @@ DEST=${DEST:-/opt/stack} # Create the destination directory and ensure it is writable by the user # and read/executable by everybody for daemons (e.g. apache run for horizon) -sudo mkdir -p $DEST -safe_chown -R $STACK_USER $DEST -safe_chmod 0755 $DEST +# If directory exists do not modify the permissions. +if [[ ! -d $DEST ]]; then + sudo mkdir -p $DEST + safe_chown -R $STACK_USER $DEST + safe_chmod 0755 $DEST +fi # Destination path for devstack logs if [[ -n ${LOGDIR:-} ]]; then @@ -376,9 +379,11 @@ fi # Destination path for service data DATA_DIR=${DATA_DIR:-${DEST}/data} -sudo mkdir -p $DATA_DIR -safe_chown -R $STACK_USER $DATA_DIR -safe_chmod 0755 $DATA_DIR +if [[ ! -d $DATA_DIR ]]; then + sudo mkdir -p $DATA_DIR + safe_chown -R $STACK_USER $DATA_DIR + safe_chmod 0755 $DATA_DIR +fi # Configure proper hostname # Certain services such as rabbitmq require that the local hostname resolves From 97096e0a29fca0bc5194a8b5d7950bc244963267 Mon Sep 17 00:00:00 2001 From: Jan Gutter Date: Fri, 26 Jul 2019 17:46:44 +0200 Subject: [PATCH 1119/1936] Fix benign epmd@0.0.0.0.socket failure * The restart loop for rabbitmq-server can trigger socket activation of epmd without rabbitmq-server running. This can lead to 'systemctl status' reporting 'State: degraded' with no simple way to reset to 'State: running'. * It's important to note that this socket activation failure is benign and is not an indicator of system failure. Change-Id: Iede4f5ebeffb59644dee4a17b6331b3cdd04d146 Signed-off-by: Jan Gutter --- lib/rpc_backend | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/rpc_backend b/lib/rpc_backend index 1c7c82fcd3..743b4ae170 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -66,7 +66,12 @@ EOF sudo systemctl restart epmd.socket epmd.service fi if is_fedora || is_suse; then - sudo systemctl enable rabbitmq-server + # NOTE(jangutter): If rabbitmq is not running (as in a fresh + # install) then rabbit_setuser triggers epmd@0.0.0.0.socket with + # socket activation. This fails the first time and does not get + # cleared. It is benign, but the workaround is to start rabbitmq a + # bit earlier for RPM based distros. + sudo systemctl --now enable rabbitmq-server fi fi } From 0fc6b2c5a87a1884bf3cff086081b3a253cd9302 Mon Sep 17 00:00:00 2001 From: Andreas Jaeger Date: Tue, 30 Jul 2019 17:52:55 +0200 Subject: [PATCH 1120/1936] Update api-ref location The api documentation is now published on docs.openstack.org instead of developer.openstack.org. Update all links that are changed to the new location. Note that redirects will be set up as well but let's point now to the new location. For details, see: http://lists.openstack.org/pipermail/openstack-discuss/2019-July/007828.html Change-Id: I8a6c3403192d1416cb66cc9e92ec827b339f1270 --- doc/source/guides/devstack-with-lbaas-v2.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst index 07a9bb33cc..1cd85b4de9 100644 --- a/doc/source/guides/devstack-with-lbaas-v2.rst +++ b/doc/source/guides/devstack-with-lbaas-v2.rst @@ -6,7 +6,7 @@ providing load balancing services for OpenStack. This guide will show you how to create a devstack with `Octavia API`_ enabled. -.. _Octavia API: https://developer.openstack.org/api-ref/load-balancer/v2/index.html +.. _Octavia API: https://docs.openstack.org/api-ref/load-balancer/v2/index.html Phase 1: Create DevStack + 2 nova instances -------------------------------------------- From a54919180b1316b998739f8b2ba0cc4b79c6b759 Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Wed, 31 Jul 2019 12:18:39 -0400 Subject: [PATCH 1121/1936] Un-quote metadata address if it is IPv6 In lib/neutron-legacy, the Nova metadata host address is un-quoted if it is IPv6, i.e. 2001:db8::1, not [2001:db8::1]. We should be doing the same in lib/neutron. Change-Id: I80c96603a41ef9d289712ef15b464859aa9257be --- lib/neutron | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index a6504e8dc2..0d23d97fcf 100644 --- a/lib/neutron +++ b/lib/neutron @@ -54,6 +54,7 @@ NEUTRON_DHCP_BINARY="neutron-dhcp-agent" NEUTRON_CONF_DIR=/etc/neutron NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf NEUTRON_META_CONF=$NEUTRON_CONF_DIR/metadata_agent.ini +NEUTRON_META_DATA_HOST=${NEUTRON_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)} NEUTRON_DHCP_CONF=$NEUTRON_CONF_DIR/dhcp_agent.ini NEUTRON_L3_CONF=$NEUTRON_CONF_DIR/l3_agent.ini @@ -296,7 +297,7 @@ function configure_neutron_new { cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $NEUTRON_META_CONF iniset $NEUTRON_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $NEUTRON_META_CONF DEFAULT nova_metadata_host $SERVICE_HOST + iniset $NEUTRON_META_CONF DEFAULT nova_metadata_host $NEUTRON_META_DATA_HOST iniset $NEUTRON_META_CONF DEFAULT metadata_workers $API_WORKERS # TODO(ihrachys) do we really need to set rootwrap for metadata agent? configure_root_helper_options $NEUTRON_META_CONF From 99bcaf5721b89a7e74b6ac8888d82e3f83b04ef4 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Wed, 31 Jul 2019 12:39:49 +0000 Subject: [PATCH 1122/1936] Add 'tempest-ipv6-only' job on devstack gate We recently added the 'tempest-ipv6-only' job on tempest side which will use the devstack base job 'devstack-ipv6' job and add more verification and testing for IPv6 deployment. Let's add that job on devstack gate also to avoid any break due to devstack changes. Change-Id: Ib2c85ec262b027351872e2b5a39b06a4ba1b880a Story: #2005477 Task: #35923 --- .zuul.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index 875aed390e..9853798963 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -657,6 +657,10 @@ irrelevant-files: - ^.*\.rst$ - ^doc/.*$ + - tempest-ipv6-only: + irrelevant-files: + - ^.*\.rst$ + - ^doc/.*$ gate: jobs: - devstack @@ -682,6 +686,10 @@ irrelevant-files: - ^.*\.rst$ - ^doc/.*$ + - tempest-ipv6-only: + irrelevant-files: + - ^.*\.rst$ + - ^doc/.*$ # Please add a note on each job and conditions for the job not # being experimental any more, so we can keep this list somewhat # pruned. From d33cdd01f83b891b010e0fd238f1816910f3fd77 Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Thu, 1 Aug 2019 14:58:37 +0200 Subject: [PATCH 1123/1936] Add options to configure cache in nova This patch adds new options: * CACHE_BACKEND - with default "dogpile.cache.memcached" * MEMCACHE_SERVERS - with default "localhost:1121" to add possibility to configure various backends as cache in Nova and Keystone. It also adds options: * KEYSTONE_ENABLE_CACHE - True by default * NOVA_ENABLE_CACHE - True by default To make possibility to enable and disable cache in those projects' config files. Default values configured there are the same as before were hardcoded for Keystone config. Nova has also enabled this cache by default. Change-Id: I9082be077b59acd3a39910fa64e29147cb5c2dd7 Closes-Bug: #1836642 --- lib/keystone | 9 ++++++--- lib/nova | 12 ++++++++++-- stackrc | 4 ++++ 3 files changed, 20 insertions(+), 5 deletions(-) diff --git a/lib/keystone b/lib/keystone index 5bd552f557..9ceb829264 100644 --- a/lib/keystone +++ b/lib/keystone @@ -131,6 +131,9 @@ KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT=${KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT:-2} # however may not be suitable for real production. KEYSTONE_PASSWORD_HASH_ROUNDS=${KEYSTONE_PASSWORD_HASH_ROUNDS:-4} +# Cache settings +KEYSTONE_ENABLE_CACHE=${KEYSTONE_ENABLE_CACHE:-True} + # Functions # --------- @@ -213,9 +216,9 @@ function configure_keystone { iniset $KEYSTONE_CONF resource driver "$KEYSTONE_RESOURCE_BACKEND" # Enable caching - iniset $KEYSTONE_CONF cache enabled "True" - iniset $KEYSTONE_CONF cache backend "dogpile.cache.memcached" - iniset $KEYSTONE_CONF cache memcache_servers localhost:11211 + iniset $KEYSTONE_CONF cache enabled $KEYSTONE_ENABLE_CACHE + iniset $KEYSTONE_CONF cache backend $CACHE_BACKEND + iniset $KEYSTONE_CONF cache memcache_servers $MEMCACHE_SERVERS iniset_rpc_backend keystone $KEYSTONE_CONF oslo_messaging_notifications diff --git a/lib/nova b/lib/nova index e3f8655b1a..0a2becb0dc 100644 --- a/lib/nova +++ b/lib/nova @@ -91,6 +91,7 @@ NOVA_SERVICE_PROTOCOL=${NOVA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} NOVA_SERVICE_LOCAL_HOST=${NOVA_SERVICE_LOCAL_HOST:-$SERVICE_LOCAL_HOST} NOVA_SERVICE_LISTEN_ADDRESS=${NOVA_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)} METADATA_SERVICE_PORT=${METADATA_SERVICE_PORT:-8775} +NOVA_ENABLE_CACHE=${NOVA_ENABLE_CACHE:-True} # Option to enable/disable config drive # NOTE: Set ``FORCE_CONFIG_DRIVE="False"`` to turn OFF config drive @@ -416,8 +417,8 @@ function create_nova_conf { iniset $NOVA_CONF DEFAULT bindir "/usr/bin" fi - # only setup database connections if there are services that - # require them running on the host. The ensures that n-cpu doesn't + # only setup database connections and cache backend if there are services + # that require them running on the host. The ensures that n-cpu doesn't # leak a need to use the db in a multinode scenario. if is_service_enabled n-api n-cond n-sched; then # If we're in multi-tier cells mode, we want our control services pointing @@ -434,6 +435,13 @@ function create_nova_conf { iniset $NOVA_CONF database connection `database_connection_url $db` iniset $NOVA_CONF api_database connection `database_connection_url nova_api` + + # Cache related settings + # Those settings aren't really needed in n-cpu thus it is configured + # only on nodes which runs controller services + iniset $NOVA_CONF cache enabled $NOVA_ENABLE_CACHE + iniset $NOVA_CONF cache backend $CACHE_BACKEND + iniset $NOVA_CONF cache memcache_servers $MEMCACHE_SERVERS fi if is_service_enabled n-api; then diff --git a/stackrc b/stackrc index 4ac8c0338e..10117f2700 100644 --- a/stackrc +++ b/stackrc @@ -770,6 +770,10 @@ ETCD_DOWNLOAD_LOCATION=$ETCD_DOWNLOAD_URL/$ETCD_VERSION/$ETCD_DOWNLOAD_FILE # etcd is always required, so place it into list of pre-cached downloads EXTRA_CACHE_URLS+=",$ETCD_DOWNLOAD_LOCATION" +# Cache settings +CACHE_BACKEND=${CACHE_BACKEND:-"dogpile.cache.memcached"} +MEMCACHE_SERVERS=${MEMCACHE_SERVERS:-"localhost:11211"} + # Detect duplicate values in IMAGE_URLS for image_url in ${IMAGE_URLS//,/ }; do if [ $(echo "$IMAGE_URLS" | grep -o -F "$image_url" | wc -l) -gt 1 ]; then From 474f535a14598133728fedda884b437b1ce6e5e2 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 8 Aug 2019 09:15:11 +1000 Subject: [PATCH 1124/1936] oscwrap: make a little quieter A huge part of the logs is irrelevant bash aliases captured by the openstack client timing wrapper from the run of "openstack complete", which is only helpful on interactive systems where you'll interact with the command line. Call it directly to avoid capturing the logs. While we're here, turn off tracing inside the oscwrap function, which is called frequently. It's not useful for debugging. Change-Id: I1cb5399fe7ee6f0e547a9cfff70396aa2007632e --- functions-common | 5 +++++ stack.sh | 5 ++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/functions-common b/functions-common index e234523fdd..a13d611415 100644 --- a/functions-common +++ b/functions-common @@ -2354,6 +2354,10 @@ function time_stop { } function oscwrap { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local out local rc local start @@ -2368,6 +2372,7 @@ function oscwrap { echo $((end - start)) >> $OSCWRAP_TIMER_FILE echo "$out" + $xtrace return $rc } diff --git a/stack.sh b/stack.sh index 3c316448e1..a19799501b 100755 --- a/stack.sh +++ b/stack.sh @@ -1475,7 +1475,10 @@ fi # =============== # Prepare bash completion for OSC -openstack complete | sudo tee /etc/bash_completion.d/osc.bash_completion > /dev/null +# Note we use "command" to avoid the timing wrapper +# which isn't relevant here and floods logs +command openstack complete \ + | sudo tee /etc/bash_completion.d/osc.bash_completion > /dev/null # If cinder is configured, set global_filter for PV devices if is_service_enabled cinder; then From 6c7337e80ee10ed180df284e492d36fa1f60ebc2 Mon Sep 17 00:00:00 2001 From: Adam Spiers Date: Wed, 7 Aug 2019 14:34:56 +0100 Subject: [PATCH 1125/1936] Make stop/disable of apparmor work on all SLE systems The existing code to disable apparmor on SUSE systems only worked for recent openSUSE / SLE releases. On SLE12 (at least), aa-enabled and aa-teardown are not available, so instead use systemd's interface for stop/disable. However on newer releases, systemctl stop apparmor is a no-op: https://www.suse.com/releasenotes/x86_64/SUSE-SLES/15/#fate-325343 https://gitlab.com/apparmor/apparmor/merge_requests/81 https://build.opensuse.org/package/view_file/openSUSE:Leap:15.2/apparmor/apparmor.service?expand=1 So we still need to call aa-teardown if it's available. Change-Id: I8d99c8d743cc1935324e2e4fcb67efaa5241199e --- tools/fixup_stuff.sh | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 037928f298..d7b824c048 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -230,12 +230,24 @@ function fixup_suse { return fi - # Disable apparmor profiles in openSUSE distros - # to avoid issues with haproxy and dnsmasq - if [ -x /usr/sbin/aa-enabled ] && sudo /usr/sbin/aa-enabled -q; then - sudo systemctl disable apparmor + # Deactivate and disable apparmor profiles in openSUSE and SLE + # distros to avoid issues with haproxy and dnsmasq. In newer + # releases, systemctl stop apparmor is actually a no-op, so we + # have to use aa-teardown to make sure we've deactivated the + # profiles: + # + # https://www.suse.com/releasenotes/x86_64/SUSE-SLES/15/#fate-325343 + # https://gitlab.com/apparmor/apparmor/merge_requests/81 + # https://build.opensuse.org/package/view_file/openSUSE:Leap:15.2/apparmor/apparmor.service?expand=1 + if sudo systemctl is-active -q apparmor; then + sudo systemctl stop apparmor + fi + if [ -x /usr/sbin/aa-teardown ]; then sudo /usr/sbin/aa-teardown fi + if sudo systemctl is-enabled -q apparmor; then + sudo systemctl disable apparmor + fi # Since pip10, pip will refuse to uninstall files from packages # that were created with distutils (rather than more modern From 0a3288c1b40a4338df351a3fef9a346e78e12191 Mon Sep 17 00:00:00 2001 From: melanie witt Date: Fri, 9 Aug 2019 15:57:50 +0000 Subject: [PATCH 1126/1936] Set console server host/address in nova-cpu.conf for multi-host Currently, the console server host and listen address on the compute host is always being set to localhost. This works fine in a single node all-in-one deployment, but will not work properly when nova-compute is running on a separate host in a multi-node deployment. This sets the console server host and listen address on the compute host to the nova host IP and service listen address instead of the localhost. Co-Authored-By: Matt Riedemann Closes-Bug: #1669468 Change-Id: Id8b0b4159b98c7ff3c85ec3daa03d556d9897ce9 --- lib/nova | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/lib/nova b/lib/nova index 7cc408d764..fb106d7e29 100644 --- a/lib/nova +++ b/lib/nova @@ -88,7 +88,6 @@ NOVA_SERVICE_HOST=${NOVA_SERVICE_HOST:-$SERVICE_HOST} NOVA_SERVICE_PORT=${NOVA_SERVICE_PORT:-8774} NOVA_SERVICE_PORT_INT=${NOVA_SERVICE_PORT_INT:-18774} NOVA_SERVICE_PROTOCOL=${NOVA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} -NOVA_SERVICE_LOCAL_HOST=${NOVA_SERVICE_LOCAL_HOST:-$SERVICE_LOCAL_HOST} NOVA_SERVICE_LISTEN_ADDRESS=${NOVA_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)} METADATA_SERVICE_PORT=${METADATA_SERVICE_PORT:-8775} NOVA_ENABLE_CACHE=${NOVA_ENABLE_CACHE:-True} @@ -597,6 +596,11 @@ function configure_console_compute { local offset offset=$(((NOVA_CPU_CELL - 1) * 4)) + # Use the host IP instead of the service host because for multi-node, the + # service host will be the controller only. + local default_proxyclient_addr + default_proxyclient_addr=$(iniget $NOVA_CPU_CONF DEFAULT my_ip) + # All nova-compute workers need to know the vnc configuration options # These settings don't hurt anything if n-xvnc and n-novnc are disabled if is_service_enabled n-cpu; then @@ -620,8 +624,8 @@ function configure_console_compute { if is_service_enabled n-novnc || is_service_enabled n-xvnc || [ "$NOVA_VNC_ENABLED" != False ]; then # Address on which instance vncservers will listen on compute hosts. # For multi-host, this should be the management ip of the compute host. - VNCSERVER_LISTEN=${VNCSERVER_LISTEN:-$NOVA_SERVICE_LOCAL_HOST} - VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS:-$NOVA_SERVICE_LOCAL_HOST} + VNCSERVER_LISTEN=${VNCSERVER_LISTEN:-$NOVA_SERVICE_LISTEN_ADDRESS} + VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS:-$default_proxyclient_addr} iniset $NOVA_CPU_CONF vnc server_listen "$VNCSERVER_LISTEN" iniset $NOVA_CPU_CONF vnc server_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" else @@ -631,8 +635,8 @@ function configure_console_compute { if is_service_enabled n-spice; then # Address on which instance spiceservers will listen on compute hosts. # For multi-host, this should be the management ip of the compute host. - SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=$NOVA_SERVICE_LOCAL_HOST} - SPICESERVER_LISTEN=${SPICESERVER_LISTEN=$NOVA_SERVICE_LOCAL_HOST} + SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS:-$default_proxyclient_addr} + SPICESERVER_LISTEN=${SPICESERVER_LISTEN:-$NOVA_SERVICE_LISTEN_ADDRESS} iniset $NOVA_CPU_CONF spice enabled true iniset $NOVA_CPU_CONF spice server_listen "$SPICESERVER_LISTEN" iniset $NOVA_CPU_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS" From 8dd89e52d04da2230914deceaee7c4d41e73ccc2 Mon Sep 17 00:00:00 2001 From: Andreas Jaeger Date: Sun, 11 Aug 2019 16:00:12 +0200 Subject: [PATCH 1127/1936] Update docs building Switch to "modern" way of building docs using sphinx-build directly, remove now unsed parts from setup.cfg. Upgrade to openstackdocstheme 1.20 and remove obsolete variables from conf.py. Convert external links to internal RST links so that Sphinx can verify that they are correct. Replace redirected links with new targets. Use opendev.org instead of github.com where appropriate. Change-Id: Iedcc008b170821aa74acefc02ec6a243a0dc307c --- HACKING.rst | 2 +- doc/requirements.txt | 2 +- doc/source/conf.py | 26 ++++---------------- doc/source/configuration.rst | 4 ++- doc/source/guides/devstack-with-lbaas-v2.rst | 2 +- doc/source/guides/nova.rst | 4 +-- doc/source/guides/single-vm.rst | 2 +- doc/source/plugins.rst | 2 +- doc/source/systemd.rst | 2 +- doc/source/zuul_ci_jobs_migration.rst | 2 +- setup.cfg | 9 ------- tools/xen/README.md | 2 +- tox.ini | 3 ++- 13 files changed, 20 insertions(+), 42 deletions(-) diff --git a/HACKING.rst b/HACKING.rst index f6951064ae..f0bb269cb3 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -189,7 +189,7 @@ to enforce basic guidelines, similar to pep8 and flake8 tools for Python. The list below is not complete for what bashate checks, nor is it all checked by bashate. So many lines of code, so little time. -.. _bashate: https://pypi.python.org/pypi/bashate +.. _bashate: https://pypi.org/project/bashate/ Whitespace Rules ---------------- diff --git a/doc/requirements.txt b/doc/requirements.txt index f65e9dfece..fffb83d96b 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -3,7 +3,7 @@ pbr>=2.0.0,!=2.1.0 Pygments docutils sphinx>=1.6.2 -openstackdocstheme>=1.11.0 +openstackdocstheme>=1.20.0 nwdiag blockdiag sphinxcontrib-blockdiag diff --git a/doc/source/conf.py b/doc/source/conf.py index e9708fae1d..9059f8c678 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -11,9 +11,6 @@ # All configuration values have a default; values that are commented out # serve to show the default. -import sys -import os - # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. @@ -26,13 +23,16 @@ # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [ 'sphinx.ext.autodoc', 'zuul_sphinx', 'openstackdocstheme', 'sphinxcontrib.blockdiag', 'sphinxcontrib.nwdiag' ] +extensions = [ 'sphinx.ext.autodoc', + 'zuul_sphinx', + 'openstackdocstheme', + 'sphinxcontrib.blockdiag', + 'sphinxcontrib.nwdiag' ] # openstackdocstheme options repository_name = 'openstack-dev/devstack' bug_project = 'devstack' bug_tag = '' -html_last_updated_fmt = '%Y-%m-%d %H:%M' todo_include_todos = True @@ -119,11 +119,6 @@ # pixels large. #html_favicon = None -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1" -html_last_updated_fmt = os.popen(git_cmd).read() - # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True @@ -167,17 +162,6 @@ # -- Options for LaTeX output -------------------------------------------------- -latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', - -# The font size ('10pt', '11pt' or '12pt'). -#'pointsize': '10pt', - -# Additional stuff for the LaTeX preamble. -#'preamble': '', -} - # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 0105d5e636..45f4ffe6e9 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -259,6 +259,8 @@ changed from the default value. Logging ------- +.. _enable_logging: + Enable Logging ~~~~~~~~~~~~~~ @@ -495,7 +497,7 @@ should be specified in the configuration file so Tempest selects the default flavors instead. KVM on Power with QEMU 2.4 requires 512 MB to load the firmware - -`QEMU 2.4 - PowerPC `__ so users +`QEMU 2.4 - PowerPC `__ so users running instances on ppc64/ppc64le can choose one of the default created flavors as follows: diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst index 1cd85b4de9..669a70d0bb 100644 --- a/doc/source/guides/devstack-with-lbaas-v2.rst +++ b/doc/source/guides/devstack-with-lbaas-v2.rst @@ -39,7 +39,7 @@ Edit your ``/opt/stack/devstack/local.conf`` to look like # If you are enabling horizon, include the octavia dashboard # enable_plugin octavia-dashboard https://opendev.org/openstack/octavia-dashboard.git # If you are enabling barbican for TLS offload in Octavia, include it here. - # enable_plugin barbican https://github.com/openstack/barbican.git + # enable_plugin barbican https://opendev.org/openstack/barbican # If you have python3 available: # USE_PYTHON3=True diff --git a/doc/source/guides/nova.rst b/doc/source/guides/nova.rst index 2271e2321b..5b427972c4 100644 --- a/doc/source/guides/nova.rst +++ b/doc/source/guides/nova.rst @@ -10,7 +10,7 @@ nova-serialproxy ================ In Juno, nova implemented a `spec -`_ +`_ to allow read/write access to the serial console of an instance via `nova-serialproxy `_. @@ -63,7 +63,7 @@ The service can be enabled by adding ``n-sproxy`` to Enabling the service is enough to be functional for a single machine DevStack. These config options are defined in `nova.conf.serial_console -`_. +`_. For more information on OpenStack configuration see the `OpenStack Compute Service Configuration Reference diff --git a/doc/source/guides/single-vm.rst b/doc/source/guides/single-vm.rst index 8ebf2a638d..7dac18b333 100644 --- a/doc/source/guides/single-vm.rst +++ b/doc/source/guides/single-vm.rst @@ -78,7 +78,7 @@ As DevStack will refuse to run as root, this configures ``cloud-init`` to create a non-root user and run the ``start.sh`` script as that user. If you are using cloud-init and you have not -`enabled custom logging <../configuration.html#enable-logging>`_ of the stack +:ref:`enabled custom logging ` of the stack output, then the stack output can be found in ``/var/log/cloud-init-output.log`` by default. diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst index 2484569d35..a18a786c49 100644 --- a/doc/source/plugins.rst +++ b/doc/source/plugins.rst @@ -332,6 +332,6 @@ See Also ======== For additional inspiration on devstack plugins you can check out the -`Plugin Registry `_. +:doc:`Plugin Registry `. .. _service types authority: https://specs.openstack.org/openstack/service-types-authority/ diff --git a/doc/source/systemd.rst b/doc/source/systemd.rst index 1bc9911879..15b3f75660 100644 --- a/doc/source/systemd.rst +++ b/doc/source/systemd.rst @@ -194,7 +194,7 @@ Telnet to that port to enter the pdb session:: See the `remote-pdb`_ home page for more options. -.. _`remote-pdb`: https://pypi.python.org/pypi/remote-pdb +.. _`remote-pdb`: https://pypi.org/project/remote-pdb/ Known Issues ============ diff --git a/doc/source/zuul_ci_jobs_migration.rst b/doc/source/zuul_ci_jobs_migration.rst index dbb79893db..ed97d4cf34 100644 --- a/doc/source/zuul_ci_jobs_migration.rst +++ b/doc/source/zuul_ci_jobs_migration.rst @@ -190,7 +190,7 @@ DEVSTACK_GATE_NET_OVERLAY zuul-jobs A bridge called br-infra is set up for all jobs that inherit from multinode with - a dedicated `bridge role `_. + a dedicated `bridge role `_. DEVSTACK_GATE_FEATURE_MATRIX devstack-gate ``test_matrix_features`` variable of the test-matrix role in diff --git a/setup.cfg b/setup.cfg index 825d386026..4e27ad80d8 100644 --- a/setup.cfg +++ b/setup.cfg @@ -11,14 +11,5 @@ classifier = License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux -[build_sphinx] -all_files = 1 -build-dir = doc/build -source-dir = doc/source -warning-is-error = 1 - -[pbr] -warnerrors = True - [wheel] universal = 1 diff --git a/tools/xen/README.md b/tools/xen/README.md index 22263bb074..287301156e 100644 --- a/tools/xen/README.md +++ b/tools/xen/README.md @@ -1,3 +1,3 @@ Note: XenServer relative tools have been moved to `os-xenapi`_ and be maintained there. -.. _os-xenapi: https://github.com/openstack/os-xenapi/ +.. _os-xenapi: https://opendev.org/x/os-xenapi/ diff --git a/tox.ini b/tox.ini index f643fdb930..d81107fe1a 100644 --- a/tox.ini +++ b/tox.ini @@ -41,7 +41,8 @@ whitelist_externals = bash setenv = TOP_DIR={toxinidir} commands = - python setup.py build_sphinx + sphinx-build -W -b html -d doc/build/doctrees doc/source doc/build/html + [testenv:venv] basepython = python3 From 0fe25e31a8ff40d76279e55c731fd31a93f0d21c Mon Sep 17 00:00:00 2001 From: Julia Kreger Date: Thu, 20 Jun 2019 20:39:53 -0700 Subject: [PATCH 1128/1936] Add the IPv6 IP to the TLS cert For some crazy reason, we've forgotten about trying to use IPv6 addresses directly with the SSL certificates. So lets add some logic so clients can connect directly with the v6 IP. Change-Id: Ie8b8a2d99945f028bebe805b83bfd863b7b72d57 --- lib/tls | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/tls b/lib/tls index 0032449e13..65ffeb937d 100644 --- a/lib/tls +++ b/lib/tls @@ -234,6 +234,9 @@ function init_cert { # see https://bugs.python.org/issue23239 TLS_IP="DNS:$TLS_IP,IP:$TLS_IP" fi + if [[ -n "$HOST_IPV6" ]]; then + TLS_IP="$TLS_IP,IP:$HOST_IPV6" + fi fi make_cert $INT_CA_DIR $DEVSTACK_CERT_NAME $DEVSTACK_HOSTNAME "$TLS_IP" From d3a2fcf48597ab7486a193567a02e650188370dc Mon Sep 17 00:00:00 2001 From: Andreas Jaeger Date: Tue, 13 Aug 2019 19:27:06 +0200 Subject: [PATCH 1129/1936] Use list tables Reformat overlong table into a list-table to make it easier to edit. The change contains no wording changes besides giving titles to the list-tables. Fix formatting for setup-devstack-source-dirs so that the variable does not get displayed with a grey bar at https://docs.openstack.org/devstack/latest/zuul_roles.html#role-setup-devstack-source-dirs Change-Id: I7378d46c507b1d86f1d5319655a55f2a8c5a8f60 --- doc/source/zuul_ci_jobs_migration.rst | 256 +++++++++++--------- roles/setup-devstack-source-dirs/README.rst | 6 +- 2 files changed, 143 insertions(+), 119 deletions(-) diff --git a/doc/source/zuul_ci_jobs_migration.rst b/doc/source/zuul_ci_jobs_migration.rst index ed97d4cf34..17e7e16fb7 100644 --- a/doc/source/zuul_ci_jobs_migration.rst +++ b/doc/source/zuul_ci_jobs_migration.rst @@ -180,123 +180,147 @@ the same or a similar behaviour in Zuul v3 jobs. For localrc settings, devstack-gate defined a default value. In ansible jobs the default is either the value defined in the parent job, or the default from DevStack, if any. -============================================== ============= ================== -DevStack gate flag Repo New implementation -============================================== ============= ================== -OVERRIDE_ZUUL_BRANCH zuul override-checkout: - [branch] - in the job definition. -DEVSTACK_GATE_NET_OVERLAY zuul-jobs A bridge called - br-infra is set up for - all jobs that inherit - from multinode with - a dedicated `bridge role `_. -DEVSTACK_GATE_FEATURE_MATRIX devstack-gate ``test_matrix_features`` - variable of the - test-matrix role in - devstack-gate. This - is a temporary - solution, feature - matrix will go away. - In the future services - will be defined in - jobs only. -DEVSTACK_CINDER_VOLUME_CLEAR devstack *CINDER_VOLUME_CLEAR: true/false* - in devstack_localrc - in the job vars. -DEVSTACK_GATE_NEUTRON devstack True by default. To - disable, disable all - neutron services in - devstack_services in - the job definition. -DEVSTACK_GATE_CONFIGDRIVE devstack *FORCE_CONFIG_DRIVE: true/false* - in devstack_localrc - in the job vars. -DEVSTACK_GATE_INSTALL_TESTONLY devstack *INSTALL_TESTONLY_PACKAGES: true/false* - in devstack_localrc - in the job vars. -DEVSTACK_GATE_VIRT_DRIVER devstack *VIRT_DRIVER: [virt driver]* - in devstack_localrc - in the job vars. -DEVSTACK_GATE_LIBVIRT_TYPE devstack *LIBVIRT_TYPE: [libvirt type]* - in devstack_localrc - in the job vars. -DEVSTACK_GATE_TEMPEST devstack Defined by the job - tempest that is used. The - ``devstack`` job only - runs devstack. - The ``devstack-tempest`` - one triggers a Tempest - run as well. -DEVSTACK_GATE_TEMPEST_FULL tempest *tox_envlist: full* - in the job vars. -DEVSTACK_GATE_TEMPEST_ALL tempest *tox_envlist: all* - in the job vars. -DEVSTACK_GATE_TEMPEST_ALL_PLUGINS tempest *tox_envlist: all-plugin* - in the job vars. -DEVSTACK_GATE_TEMPEST_SCENARIOS tempest *tox_envlist: scenario* - in the job vars. -TEMPEST_CONCURRENCY tempest *tempest_concurrency: [value]* - in the job vars. This - is available only on - jobs that inherit from - ``devstack-tempest`` - down. -DEVSTACK_GATE_TEMPEST_NOTESTS tempest *tox_envlist: venv-tempest* - in the job vars. This - will create Tempest - virtual environment - but run no tests. -DEVSTACK_GATE_SMOKE_SERIAL tempest *tox_envlist: smoke-serial* - in the job vars. -DEVSTACK_GATE_TEMPEST_DISABLE_TENANT_ISOLATION tempest *tox_envlist: full-serial* - in the job vars. - *TEMPEST_ALLOW_TENANT_ISOLATION: false* - in devstack_localrc in - the job vars. -============================================== ============= ================== +.. list-table:: **DevStack Gate Flags** + :widths: 20 10 60 + :header-rows: 1 + + * - DevStack gate flag + - Repo + - New implementation + * - OVERRIDE_ZUUL_BRANCH + - zuul + - override-checkout: [branch] in the job definition. + * - DEVSTACK_GATE_NET_OVERLAY + - zuul-jobs + - A bridge called br-infra is set up for all jobs that inherit + from multinode with a dedicated `bridge role + `_. + * - DEVSTACK_GATE_FEATURE_MATRIX + - devstack-gate + - ``test_matrix_features`` variable of the test-matrix role in + devstack-gate. This is a temporary solution, feature matrix + will go away. In the future services will be defined in jobs + only. + * - DEVSTACK_CINDER_VOLUME_CLEAR + - devstack + - *CINDER_VOLUME_CLEAR: true/false* in devstack_localrc in the + job vars. + * - DEVSTACK_GATE_NEUTRON + - devstack + - True by default. To disable, disable all neutron services in + devstack_services in the job definition. + * - DEVSTACK_GATE_CONFIGDRIVE + - devstack + - *FORCE_CONFIG_DRIVE: true/false* in devstack_localrc in the job + vars. + * - DEVSTACK_GATE_INSTALL_TESTONLY + - devstack + - *INSTALL_TESTONLY_PACKAGES: true/false* in devstack_localrc in + the job vars. + * - DEVSTACK_GATE_VIRT_DRIVER + - devstack + - *VIRT_DRIVER: [virt driver]* in devstack_localrc in the job + vars. + * - DEVSTACK_GATE_LIBVIRT_TYPE + - devstack + - *LIBVIRT_TYPE: [libvirt type]* in devstack_localrc in the job + vars. + * - DEVSTACK_GATE_TEMPEST + - devstack and tempest + - Defined by the job that is used. The ``devstack`` job only runs + devstack. The ``devstack-tempest`` one triggers a Tempest run + as well. + * - DEVSTACK_GATE_TEMPEST_FULL + - tempest + - *tox_envlist: full* in the job vars. + * - DEVSTACK_GATE_TEMPEST_ALL + - tempest + - *tox_envlist: all* in the job vars. + * - DEVSTACK_GATE_TEMPEST_ALL_PLUGINS + - tempest + - *tox_envlist: all-plugin* in the job vars. + * - DEVSTACK_GATE_TEMPEST_SCENARIOS + - tempest + - *tox_envlist: scenario* in the job vars. + * - TEMPEST_CONCURRENCY + - tempest + - *tempest_concurrency: [value]* in the job vars. This is + available only on jobs that inherit from ``devstack-tempest`` + down. + * - DEVSTACK_GATE_TEMPEST_NOTESTS + - tempest + - *tox_envlist: venv-tempest* in the job vars. This will create + Tempest virtual environment but run no tests. + * - DEVSTACK_GATE_SMOKE_SERIAL + - tempest + - *tox_envlist: smoke-serial* in the job vars. + * - DEVSTACK_GATE_TEMPEST_DISABLE_TENANT_ISOLATION + - tempest + - *tox_envlist: full-serial* in the job vars. + *TEMPEST_ALLOW_TENANT_ISOLATION: false* in devstack_localrc in + the job vars. + The following flags have not been migrated yet or are legacy and won't be migrated at all. -===================================== ====== ========================== -DevStack gate flag Status Details -===================================== ====== ========================== -DEVSTACK_GATE_TOPOLOGY WIP The topology depends on the base - job that is used and more - specifically on the nodeset - attached to it. The new job - format allows project to define - the variables to be passed to - every node/node-group that exists - in the topology. Named topologies - that include the nodeset and the - matching variables can be defined - in the form of base jobs. -DEVSTACK_GATE_GRENADE TBD Grenade Zuul V3 jobs will be - hosted in the grenade repo. -GRENADE_BASE_BRANCH TBD Grenade Zuul V3 jobs will be - hosted in the grenade repo. -DEVSTACK_GATE_NEUTRON_DVR TBD Depends on multinode support. -DEVSTACK_GATE_EXERCISES TBD Can be done on request. -DEVSTACK_GATE_IRONIC TBD This will probably be implemented - on ironic side. -DEVSTACK_GATE_IRONIC_DRIVER TBD This will probably be implemented - on ironic side. -DEVSTACK_GATE_IRONIC_BUILD_RAMDISK TBD This will probably be implemented - on ironic side. -DEVSTACK_GATE_POSTGRES Legacy This flag exists in d-g but the - only thing that it does is - capture postgres logs. This is - already supported by the roles in - post, so the flag is useless in - the new jobs. postgres itself can - be enabled via the - devstack_service job variable. -DEVSTACK_GATE_ZEROMQ Legacy This has no effect in d-g. -DEVSTACK_GATE_MQ_DRIVER Legacy This has no effect in d-g. -DEVSTACK_GATE_TEMPEST_STRESS_ARGS Legacy Stress is not in Tempest anymore. -DEVSTACK_GATE_TEMPEST_HEAT_SLOW Legacy This is not used anywhere. -DEVSTACK_GATE_CELLS Legacy This has no effect in d-g. -DEVSTACK_GATE_NOVA_API_METADATA_SPLIT Legacy This has no effect in d-g. -===================================== ====== ========================== +.. list-table:: **Not Migrated DevStack Gate Flags** + :widths: 20 10 60 + :header-rows: 1 + + * - DevStack gate flag + - Status + - Details + * - DEVSTACK_GATE_TOPOLOGY + - WIP + - The topology depends on the base job that is used and more + specifically on the nodeset attached to it. The new job format + allows project to define the variables to be passed to every + node/node-group that exists in the topology. Named topologies + that include the nodeset and the matching variables can be + defined in the form of base jobs. + * - DEVSTACK_GATE_GRENADE + - TBD + - Grenade Zuul V3 jobs will be hosted in the grenade repo. + * - GRENADE_BASE_BRANCH + - TBD + - Grenade Zuul V3 jobs will be hosted in the grenade repo. + * - DEVSTACK_GATE_NEUTRON_DVR + - TBD + - Depends on multinode support. + * - DEVSTACK_GATE_EXERCISES + - TBD + - Can be done on request. + * - DEVSTACK_GATE_IRONIC + - TBD + - This will probably be implemented on ironic side. + * - DEVSTACK_GATE_IRONIC_DRIVER + - TBD + - This will probably be implemented on ironic side. + * - DEVSTACK_GATE_IRONIC_BUILD_RAMDISK + - TBD + - This will probably be implemented on ironic side. + * - DEVSTACK_GATE_POSTGRES + - Legacy + - This flag exists in d-g but the only thing that it does is + capture postgres logs. This is already supported by the roles + in post, so the flag is useless in the new jobs. postgres + itself can be enabled via the devstack_service job variable. + * - DEVSTACK_GATE_ZEROMQ + - Legacy + - This has no effect in d-g. + * - DEVSTACK_GATE_MQ_DRIVER + - Legacy + - This has no effect in d-g. + * - DEVSTACK_GATE_TEMPEST_STRESS_ARGS + - Legacy + - Stress is not in Tempest anymore. + * - DEVSTACK_GATE_TEMPEST_HEAT_SLOW + - Legacy + - This is not used anywhere. + * - DEVSTACK_GATE_CELLS + - Legacy + - This has no effect in d-g. + * - DEVSTACK_GATE_NOVA_API_METADATA_SPLIT + - Legacy + - This has no effect in d-g. diff --git a/roles/setup-devstack-source-dirs/README.rst b/roles/setup-devstack-source-dirs/README.rst index 49d22c3c64..0aa048b7d2 100644 --- a/roles/setup-devstack-source-dirs/README.rst +++ b/roles/setup-devstack-source-dirs/README.rst @@ -10,7 +10,7 @@ into it. The devstack base directory. - .. zuul:rolevar:: devstack_sources_branch - :default: None +.. zuul:rolevar:: devstack_sources_branch + :default: None - The target branch to be setup (where available). + The target branch to be setup (where available). From 005004eded47ee840b78e6e0c94701c5bd253816 Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Fri, 31 May 2019 13:50:55 -0400 Subject: [PATCH 1130/1936] Switch to fedora-29 for fedora-latest nodeset F28 is EOL as of 5.29.19, so change to use fedora-29 and remove it from stack.sh. Depends-on: https://review.opendev.org/#/c/662538/ Change-Id: I5ebdb68fcd01a1e63be4b3c0735a274783aad818 --- .zuul.yaml | 2 +- stack.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 9853798963..a01e405f0b 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -52,7 +52,7 @@ name: devstack-single-node-fedora-latest nodes: - name: controller - label: fedora-28 + label: fedora-29 groups: - name: tempest nodes: diff --git a/stack.sh b/stack.sh index 3c316448e1..10f3dc2b28 100755 --- a/stack.sh +++ b/stack.sh @@ -224,7 +224,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (xenial|artful|bionic|stretch|jessie|f28|f29|opensuse-15.0|opensuse-15.1|opensuse-tumbleweed|rhel7) ]]; then +if [[ ! ${DISTRO} =~ (xenial|artful|bionic|stretch|jessie|f29|opensuse-15.0|opensuse-15.1|opensuse-tumbleweed|rhel7) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From 5a30480ce188b4846a2f5c86ca38331b635d5b3b Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Fri, 16 Aug 2019 09:57:59 +0000 Subject: [PATCH 1131/1936] Update HACKING doc Some minor updates. Change-Id: I0d4e20e394f0fa420e896e0b1a90872ccf909b58 --- HACKING.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/HACKING.rst b/HACKING.rst index f6951064ae..78462c29bd 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -10,7 +10,7 @@ and so is limited to Bash (version 4 and up) and compatible shells. Shell script was chosen because it best illustrates the steps used to set up and interact with OpenStack components. -DevStack's official repository is located on git.openstack.org at +DevStack's official repository is located on opendev.org at https://opendev.org/openstack/devstack. Besides the master branch that tracks the OpenStack trunk branches a separate branch is maintained for all OpenStack releases starting with Diablo (stable/diablo). @@ -23,7 +23,7 @@ __ contribute_ .. _contribute: https://docs.openstack.org/infra/manual/developers.html __ lp_ -.. _lp: https://launchpad.net/~devstack +.. _lp: https://launchpad.net/devstack The `Gerrit review queue `__ @@ -163,7 +163,7 @@ Documentation The DevStack repo now contains all of the static pages of devstack.org in the ``doc/source`` directory. The OpenStack CI system rebuilds the docs after every -commit and updates devstack.org (now a redirect to docs.openstack.org/developer/devstack). +commit and updates devstack.org (now a redirect to https://docs.openstack.org/devstack/latest/). All of the scripts are processed with shocco_ to render them with the comments as text describing the script below. For this reason we tend to be a little From d331fa7a2d027e98c08aba442c05ba54abc7d6d8 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Mon, 29 Jul 2019 10:42:24 +0000 Subject: [PATCH 1132/1936] Set cinder's my_ip based on SERVICE_IP_VERSION value Devstack's lib/cinder set the my_ip on cinder side but it hard-code it with HOST_IP[1]. It is no issue for IPv4 env but when you build or run the IPv6 job then this ip is left to set with IPv6. my_ip should be set to HOST_IP or HOST_IPV6 based on SERVICE_IP_VERSION value. As part of Train community goal 'Support IPv6-Only Deployments', we will expand the 'devstack-tempest-ipv6' job to do IPv6-only deployments verification so we need fix the the my_ip setting. Closes-Bug: #1838250 Depends-On: https://review.opendev.org/#/c/677524/ [1]https://github.com/openstack/devstack/blob/6aeaceb0c4ef078d028fb6605cac2a37444097d8/lib/cinder#L231 Change-Id: I71c74e46467a5d3c1bf9c7d683f364cba7cf9d80 --- lib/cinder | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/lib/cinder b/lib/cinder index 32e38c44da..fd960535d9 100644 --- a/lib/cinder +++ b/lib/cinder @@ -228,8 +228,11 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT osapi_volume_listen $CINDER_SERVICE_LISTEN_ADDRESS iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH iniset $CINDER_CONF oslo_concurrency lock_path $CINDER_STATE_PATH - iniset $CINDER_CONF DEFAULT my_ip "$HOST_IP" - + if [[ $SERVICE_IP_VERSION == 6 ]]; then + iniset $CINDER_CONF DEFAULT my_ip "$HOST_IPV6" + else + iniset $CINDER_CONF DEFAULT my_ip "$HOST_IP" + fi iniset $CINDER_CONF key_manager backend cinder.keymgr.conf_key_mgr.ConfKeyManager iniset $CINDER_CONF key_manager fixed_key $(openssl rand -hex 16) From f1a794e1326b79aa5c5e67530f2169479b9bf1f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Harald=20Jens=C3=A5s?= Date: Wed, 21 Aug 2019 10:49:57 +0200 Subject: [PATCH 1133/1936] flat_networks - Don't hardcode public network name Use the PUBLIC_NETWORK_NAME variable instead of hardcoding it when setting the [ml2_type_flat]/flat_networks option. Change-Id: I8bfc37089ec90eb06ee41d85744dad0f3f734c16 --- lib/neutron | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index 0d23d97fcf..ea20e04d08 100644 --- a/lib/neutron +++ b/lib/neutron @@ -225,7 +225,7 @@ function configure_neutron_new { iniset $NEUTRON_CORE_PLUGIN_CONF ml2 mechanism_drivers $mech_drivers iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vxlan vni_ranges 1001:2000 - iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_flat flat_networks public + iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_flat flat_networks $PUBLIC_NETWORK_NAME if [[ "$NEUTRON_TENANT_NETWORK_TYPE" =~ "vlan" ]] && [[ "$NEUTRON_PHYSICAL_NETWORK" != "" ]]; then iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vlan network_vlan_ranges ${NEUTRON_PHYSICAL_NETWORK}:${NEUTRON_TENANT_VLAN_RANGE} fi From e273c0433f40ba581703dae4378a74af40410c9f Mon Sep 17 00:00:00 2001 From: Eric Fried Date: Tue, 13 Aug 2019 14:28:24 -0500 Subject: [PATCH 1134/1936] Set ksa retry conf options for n-cpu [ironic] We're trying to get nova to talk to ironic through openstacksdk and need to be able to specify retry limits/intervals there. We could reuse the existing conf options, but better to support the standard ones exposed from keystoneauth1 via [1] and [2]. Note that these will be ignored unless you have keystoneauth1 3.15.0 (for [1]) or 3.16.0 ([1] and [2]) and are building your adapter using ksa-derived conf options (see the Needed-By). Needed-By: https://review.opendev.org/642899 [1] https://review.opendev.org/#/c/666287/ [2] https://review.opendev.org/#/c/672930/ Change-Id: I79c416e25d635b0ffa419640b4bd91e36f78b1ab --- lib/nova_plugins/hypervisor-ironic | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic index 9bc04e2122..adcc278812 100644 --- a/lib/nova_plugins/hypervisor-ironic +++ b/lib/nova_plugins/hypervisor-ironic @@ -54,8 +54,14 @@ function configure_nova_hypervisor { iniset $NOVA_CONF ironic project_name demo iniset $NOVA_CONF ironic region_name $REGION_NAME + # These are used with crufty legacy ironicclient iniset $NOVA_CONF ironic api_max_retries 300 iniset $NOVA_CONF ironic api_retry_interval 5 + # These are used with shiny new openstacksdk + iniset $NOVA_CONF ironic connect_retries 300 + iniset $NOVA_CONF ironic connect_retry_delay 5 + iniset $NOVA_CONF ironic status_code_retries 300 + iniset $NOVA_CONF ironic status_code_retry_delay 5 } # install_nova_hypervisor() - Install external components From 2468ceaa724aa5c8c44fb87ae223eb6687ff85f2 Mon Sep 17 00:00:00 2001 From: Eric Fried Date: Thu, 25 Jul 2019 13:18:58 -0500 Subject: [PATCH 1135/1936] Merge (don't overwrite) $NOVA_CPU_CONF Per the referenced bug, $NOVA_CPU_CONF was previously being initialized by copying $NOVA_CONF, thereby trashing any values already configured in $NOVA_CPU_CONF. With this commit, we merge the values from $NOVA_CPU_CONF in after the copy. Note that this makes use of the merge_config_file function, which is defined in inc/meta-config, which wasn't being sourced from every code path that hit start_nova_compute; so this commit also moves that import from stack.sh to functions (next to the other imports from inc/, which makes sense anyway). Change-Id: Id3e2baa2221e13f512f8dcf1248e1e15b6a7597f Closes-Bug: #1802143 --- functions | 1 + lib/nova | 3 +++ stack.sh | 3 --- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/functions b/functions index 93035673b5..f33fd25fee 100644 --- a/functions +++ b/functions @@ -18,6 +18,7 @@ declare -r -g _DEVSTACK_FUNCTIONS=1 FUNC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) source ${FUNC_DIR}/functions-common source ${FUNC_DIR}/inc/ini-config +source ${FUNC_DIR}/inc/meta-config source ${FUNC_DIR}/inc/python source ${FUNC_DIR}/inc/rootwrap diff --git a/lib/nova b/lib/nova index f423b570cd..1ec1a785e6 100644 --- a/lib/nova +++ b/lib/nova @@ -838,7 +838,10 @@ function start_nova_compute { local compute_cell_conf=$NOVA_CONF + # Bug #1802143: $NOVA_CPU_CONF is constructed by first copying $NOVA_CONF... cp $compute_cell_conf $NOVA_CPU_CONF + # ...and then adding/overriding anything explicitly set in $NOVA_CPU_CONF + merge_config_file $TOP_DIR/local.conf post-config '$NOVA_CPU_CONF' if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then # NOTE(danms): Grenade doesn't setup multi-cell rabbit, so diff --git a/stack.sh b/stack.sh index b8d597e11c..01469688eb 100755 --- a/stack.sh +++ b/stack.sh @@ -167,9 +167,6 @@ LAST_SPINNER_PID="" # Import common functions source $TOP_DIR/functions -# Import config functions -source $TOP_DIR/inc/meta-config - # Import 'public' stack.sh functions source $TOP_DIR/lib/stack From f92c346131db2c89b930b1a23f8489419a2217dc Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Thu, 22 Aug 2019 12:15:09 -0400 Subject: [PATCH 1136/1936] Fix MySQL log collection The mysql logs weren't being copied to logs and published. Change-Id: I20740b468e4b310ac07e77f3930cae92026942fb --- .zuul.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 9853798963..1b43611358 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -240,8 +240,7 @@ '{{ stage_dir }}/etc': logs /var/log/rabbitmq: logs /var/log/postgresql: logs - /var/log/mysql.err: logs - /var/log/mysql.log: logs + /var/log/mysql: logs /var/log/libvirt: logs /etc/sudoers: logs /etc/sudoers.d: logs From 28c498150d89a8a414a50ad307a79b764514e424 Mon Sep 17 00:00:00 2001 From: Szymon Datko Date: Thu, 22 Aug 2019 15:39:53 +0200 Subject: [PATCH 1137/1936] Select proper flavor_ref_alt for Tempest MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently if user selects the default instance type for Tempest tests, some of resize-related tests may fail due to resize attempt into flavor with smaller disk size. It is because there is just simple check if flavor_ref and flavor_ref_alt (IDs) aren't the same. To ensure resize is really possible, there shall be additional verification introduced. Co-Authored-By: Michał Madarasz Change-Id: Iaa1bfa9cb76cbe54be658d2d70d97d99e7fb5be9 --- lib/tempest | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/lib/tempest b/lib/tempest index 4a192a0790..96c9ced14a 100644 --- a/lib/tempest +++ b/lib/tempest @@ -130,6 +130,8 @@ function configure_tempest { local available_flavors local flavors_ref local flavor_lines + local flavor_ref_size + local flavor_ref_alt_size local public_network_id local public_router_id local ssh_connect_method="floating" @@ -233,11 +235,24 @@ function configure_tempest { fi flavor_ref=${flavors[0]} flavor_ref_alt=$flavor_ref + flavor_ref_size=$(openstack flavor show --format value --column disk "${flavor_ref}") # Ensure ``flavor_ref`` and ``flavor_ref_alt`` have different values. # Some resize instance in tempest tests depends on this. for f in ${flavors[@]:1}; do if [[ "$f" != "$flavor_ref" ]]; then + # + # NOTE(sdatko): Resize is only possible when target flavor + # is not smaller than the original one. For + # Tempest tests, in case there was a bigger + # flavor selected as default, e.g. m1.small, + # we need to perform additional check. + # + flavor_ref_alt_size=$(openstack flavor show --format value --column disk "${f}") + if [[ "${flavor_ref_alt_size}" -lt "${flavor_ref_size}" ]]; then + continue + fi + flavor_ref_alt=$f break fi From 168ca7f0a474f1207ee01dab0ca2e70f34783e9c Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Mon, 26 Aug 2019 10:11:03 +0100 Subject: [PATCH 1138/1936] Remove deprecated PostgreSQL database driver This was deprecated for removal in Pike. It's probably time to drop it. Note that the 'postgresql-devel'/'postgresql-server-dev-all' packages are retained since some packages still include 'psycopg2' in their general requirements. Change-Id: I51e8354e99972757253ce259e6c03c91da24398c Signed-off-by: Stephen Finucane --- doc/source/configuration.rst | 22 ++--- doc/source/zuul_ci_jobs_migration.rst | 5 +- functions | 3 +- lib/databases/postgresql | 137 -------------------------- stack.sh | 9 +- unstack.sh | 4 - 6 files changed, 14 insertions(+), 166 deletions(-) delete mode 100644 lib/databases/postgresql diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 45f4ffe6e9..62571e0760 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -326,29 +326,23 @@ a file, keep service logs and disable color in the stored files. Database Backend ---------------- -Multiple database backends are available. The available databases are defined -in the lib/databases directory. -``mysql`` is the default database, choose a different one by putting the -following in the ``localrc`` section:: +Support for the MySQL database backend is included. Addition database backends +may be available via external plugins. Enabling of disabling MySQL is handled +via the usual service functions and ``ENABLED_SERVICES``. For example, to +disable MySQL in ``local.conf``:: disable_service mysql - enable_service postgresql - -``mysql`` is the default database. RPC Backend ----------- -Support for a RabbitMQ RPC backend is included. Additional RPC -backends may be available via external plugins. Enabling or disabling -RabbitMQ is handled via the usual service functions and -``ENABLED_SERVICES``. - -Example disabling RabbitMQ in ``local.conf``:: +Support for a RabbitMQ RPC backend is included. Additional RPC backends may be +available via external plugins. Enabling or disabling RabbitMQ is handled via +the usual service functions and ``ENABLED_SERVICES``. For example, to disable +RabbitMQ in ``local.conf``:: disable_service rabbit - Apache Frontend --------------- diff --git a/doc/source/zuul_ci_jobs_migration.rst b/doc/source/zuul_ci_jobs_migration.rst index 17e7e16fb7..66f8251039 100644 --- a/doc/source/zuul_ci_jobs_migration.rst +++ b/doc/source/zuul_ci_jobs_migration.rst @@ -302,10 +302,7 @@ migrated at all. - This will probably be implemented on ironic side. * - DEVSTACK_GATE_POSTGRES - Legacy - - This flag exists in d-g but the only thing that it does is - capture postgres logs. This is already supported by the roles - in post, so the flag is useless in the new jobs. postgres - itself can be enabled via the devstack_service job variable. + - This has no effect in d-g. * - DEVSTACK_GATE_ZEROMQ - Legacy - This has no effect in d-g. diff --git a/functions b/functions index f33fd25fee..8eeb03203e 100644 --- a/functions +++ b/functions @@ -400,7 +400,8 @@ function upload_image { # initialized yet, just save the configuration selection and call back later # to validate it. # -# ``$1`` - the name of the database backend to use (mysql, postgresql, ...) +# ``$1`` - the name of the database backend to use (only mysql is currently +# supported) function use_database { if [[ -z "$DATABASE_BACKENDS" ]]; then # No backends registered means this is likely called from ``localrc`` diff --git a/lib/databases/postgresql b/lib/databases/postgresql deleted file mode 100644 index 618834b550..0000000000 --- a/lib/databases/postgresql +++ /dev/null @@ -1,137 +0,0 @@ -#!/bin/bash -# -# lib/databases/postgresql -# Functions to control the configuration and operation of the **PostgreSQL** database backend - -# Dependencies: -# -# - DATABASE_{HOST,USER,PASSWORD} must be defined - -# Save trace setting -_XTRACE_PG=$(set +o | grep xtrace) -set +o xtrace - - -MAX_DB_CONNECTIONS=${MAX_DB_CONNECTIONS:-200} - - -register_database postgresql - - -# Functions -# --------- - -function get_database_type_postgresql { - echo postgresql -} - -# Get rid of everything enough to cleanly change database backends -function cleanup_database_postgresql { - stop_service postgresql - if is_ubuntu; then - # Get ruthless with mysql - apt_get purge -y postgresql* - return - elif is_fedora || is_suse; then - uninstall_package postgresql-server - else - return - fi -} - -function recreate_database_postgresql { - local db=$1 - # Avoid unsightly error when calling dropdb when the database doesn't exist - psql -h$DATABASE_HOST -U$DATABASE_USER -dtemplate1 -c "DROP DATABASE IF EXISTS $db" - createdb -h $DATABASE_HOST -U$DATABASE_USER -l C -T template0 -E utf8 $db -} - -function configure_database_postgresql { - local pg_conf pg_dir pg_hba check_role version - echo_summary "Configuring and starting PostgreSQL" - if is_fedora; then - pg_hba=/var/lib/pgsql/data/pg_hba.conf - pg_conf=/var/lib/pgsql/data/postgresql.conf - if ! sudo [ -e $pg_hba ]; then - sudo postgresql-setup initdb - fi - elif is_ubuntu; then - version=`psql --version | cut -d ' ' -f3 | cut -d. -f1-2` - if vercmp $version '>=' 9.3; then - if [ -z "`pg_lsclusters -h`" ]; then - echo 'No PostgreSQL clusters exist; will create one' - sudo pg_createcluster $version main --start - fi - fi - pg_dir=`find /etc/postgresql -name pg_hba.conf|xargs dirname` - pg_hba=$pg_dir/pg_hba.conf - pg_conf=$pg_dir/postgresql.conf - elif is_suse; then - pg_hba=/var/lib/pgsql/data/pg_hba.conf - pg_conf=/var/lib/pgsql/data/postgresql.conf - # initdb is called when postgresql is first started - sudo [ -e $pg_hba ] || start_service postgresql - else - exit_distro_not_supported "postgresql configuration" - fi - # Listen on all addresses - sudo sed -i "/listen_addresses/s/.*/listen_addresses = '*'/" $pg_conf - # Set max_connections - sudo sed -i "/max_connections/s/.*/max_connections = $MAX_DB_CONNECTIONS/" $pg_conf - # Do password auth from all IPv4 clients - sudo sed -i "/^host/s/all\s\+127.0.0.1\/32\s\+ident/$DATABASE_USER\t0.0.0.0\/0\tpassword/" $pg_hba - # Do password auth for all IPv6 clients - sudo sed -i "/^host/s/all\s\+::1\/128\s\+ident/$DATABASE_USER\t::0\/0\tpassword/" $pg_hba - restart_service postgresql - - # Create the role if it's not here or else alter it. - check_role=$(sudo -u root sudo -u postgres -i psql -t -c "SELECT 'HERE' from pg_roles where rolname='$DATABASE_USER'") - if [[ ${check_role} == *HERE ]];then - sudo -u root sudo -u postgres -i psql -c "ALTER ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" - else - sudo -u root sudo -u postgres -i psql -c "CREATE ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" - fi -} - -function install_database_postgresql { - echo_summary "Installing postgresql" - deprecated "Use of postgresql in devstack is deprecated, and will be removed during the Pike cycle" - local pgpass=$HOME/.pgpass - if [[ ! -e $pgpass ]]; then - cat < $pgpass -*:*:*:$DATABASE_USER:$DATABASE_PASSWORD -EOF - chmod 0600 $pgpass - else - sed -i "s/:root:\w\+/:root:$DATABASE_PASSWORD/" $pgpass - fi - if is_ubuntu; then - install_package postgresql - elif is_fedora || is_suse; then - install_package postgresql-server - if is_fedora; then - sudo systemctl enable postgresql - fi - else - exit_distro_not_supported "postgresql installation" - fi -} - -function install_database_python_postgresql { - # Install Python client module - pip_install_gr psycopg2 - ADDITIONAL_VENV_PACKAGES+=",psycopg2" -} - -function database_connection_url_postgresql { - local db=$1 - echo "$BASE_SQL_CONN/$db?client_encoding=utf8" -} - - -# Restore xtrace -$_XTRACE_PG - -# Local variables: -# mode: shell-script -# End: diff --git a/stack.sh b/stack.sh index 9982c353c0..5064d5db1d 100755 --- a/stack.sh +++ b/stack.sh @@ -695,14 +695,11 @@ function read_password { # Database Configuration # ---------------------- -# To select between database backends, add the following to ``local.conf``: +# DevStack provides a MySQL database backend. Additional backends may be +# provided by external plugins and can be enabled using the usual service +# functions and ``ENABLED_SERVICES``. For example, to disable MySQL: # # disable_service mysql -# enable_service postgresql -# -# The available database backends are listed in ``DATABASE_BACKENDS`` after -# ``lib/database`` is sourced. ``mysql`` is the default. - if initialize_database_backends; then echo "Using $DATABASE_TYPE database backend" # Last chance for the database password. This must be handled here diff --git a/unstack.sh b/unstack.sh index ccea0ef585..07dc2b1418 100755 --- a/unstack.sh +++ b/unstack.sh @@ -147,10 +147,6 @@ if [[ -n "$UNSTACK_ALL" ]]; then stop_service mysql fi - if is_service_enabled postgresql; then - stop_service postgresql - fi - # Stop rabbitmq-server if is_service_enabled rabbit; then stop_service rabbitmq-server From 951e14d6dd534236e2710a48fb3d86d66f5c0228 Mon Sep 17 00:00:00 2001 From: Bharat Kunwar Date: Thu, 8 Aug 2019 16:20:19 +0000 Subject: [PATCH 1139/1936] Fix default security group in samples/local.sh The script currently complains about multiple default security groups. This obtains the default and uses it when creating security group rules. Change-Id: I81e59eae5df79889ed1fb02d45af26e3a55aa0e9 --- samples/local.sh | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/samples/local.sh b/samples/local.sh index 9cd0bdcc17..a1c5c8143b 100755 --- a/samples/local.sh +++ b/samples/local.sh @@ -41,6 +41,13 @@ if is_service_enabled nova; then fi done + # Update security default group + # ----------------------------- + + # Add tcp/22 and icmp to default security group + default=$(openstack security group list -f value -c ID) + openstack security group rule create $default --protocol tcp --dst-port 22 + openstack security group rule create $default --protocol icmp # Create A Flavor # --------------- @@ -57,12 +64,4 @@ if is_service_enabled nova; then openstack flavor create $MI_NAME --id 6 --ram 128 --disk 0 --vcpus 1 fi - - # Other Uses - # ---------- - - # Add tcp/22 and icmp to default security group - openstack security group rule create --project $OS_PROJECT_NAME default --protocol tcp --ingress --dst-port 22 - openstack security group rule create --project $OS_PROJECT_NAME default --protocol icmp - fi From 16bccbcea410ce426f83b5086424080b5bfaf925 Mon Sep 17 00:00:00 2001 From: Peter Penchev Date: Mon, 9 Sep 2019 15:14:11 +0300 Subject: [PATCH 1140/1936] Revert "install LIBS_FROM_GIT using python 2 and 3 where appropriate" All the OpenStack projects should be able to run under Python 3 now so the fallback installation of the Python 2 libraries should not be needed any longer. This also avoids the problem of script files installed by the libraries sometimes being overwritten by the Python 2 version leading to incorrect execution later, as discussed in http://lists.openstack.org/pipermail/openstack-discuss/2019-September/009226.html This reverts commit a2eb89417fbb6d61526b1819cbe3d0a60537eedd. Change-Id: I1cdb7e4a209872f1620be556b7278879a4b86df5 --- inc/python | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/inc/python b/inc/python index ea8ff67e08..81b6a960a4 100644 --- a/inc/python +++ b/inc/python @@ -380,16 +380,6 @@ function setup_dev_lib { fi local name=$1 local dir=${GITDIR[$name]} - if python3_enabled; then - # Turn off Python 3 mode and install the package again, - # forcing a Python 2 installation. This ensures that all libs - # being used for development are installed under both versions - # of Python. - echo "Installing $name again without Python 3 enabled" - USE_PYTHON3=False - setup_develop $bindep $dir - USE_PYTHON3=True - fi setup_develop $bindep $dir } From ce396d374bc00f21671bcdae30e12c44cd1ef1eb Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Thu, 5 Sep 2019 08:51:33 +0000 Subject: [PATCH 1141/1936] Fix worlddump log collection All credit for figuring this out goes to frickler (and that was the hard bit so thank you!). The worlddump files were not being collected because they weren't in our log collection list. Add worlddump to this list so that we collect these files. One thing that makes this slightly complicated is the worlddump files are named with a timestamp and we can't have globs in our collection list. To address this we create a copy of the file with a -latest.txt suffix. This gives us a deterministic file name for log collection without using globs. Note we do not use a symlink here because some jobs gzip their log files (breaking symlinks) and others do not. This makes it painful to always have a valid link. Not having a valid link can break log collection. Hardlinks may be another option but simply making a copy is easier to manage as you don't have to worry about links preexisting and the dumpfiles are not that large. Change-Id: I96ae5f5290546ad25ca434c1106c01354d2d053c --- .zuul.yaml | 1 + tools/worlddump.py | 9 +++++++++ 2 files changed, 10 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index 1b43611358..197942906f 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -233,6 +233,7 @@ '{{ devstack_log_dir }}/devstacklog.txt': logs '{{ devstack_log_dir }}/devstacklog.txt.summary': logs '{{ devstack_log_dir }}/tcpdump.pcap': logs + '{{ devstack_log_dir }}/worlddump-latest.txt': logs '{{ devstack_full_log}}': logs '{{ stage_dir }}/verify_tempest_conf.log': logs '{{ stage_dir }}/apache': logs diff --git a/tools/worlddump.py b/tools/worlddump.py index 88af19d2e3..d1453ca076 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -25,6 +25,7 @@ import fnmatch import os import os.path +import shutil import subprocess import sys @@ -248,6 +249,14 @@ def main(): compute_consoles() guru_meditation_reports() var_core() + # Singular name for ease of log retrieval + copyname = os.path.join(opts.dir, 'worlddump') + if opts.name: + copyname += '-' + opts.name + copyname += '-latest.txt' + # We make a full copy to deal with jobs that may or may not + # gzip logs breaking symlinks. + shutil.copyfile(fname, copyname) if __name__ == '__main__': From ba50347526ea76b501dacf774ea6f0997361358c Mon Sep 17 00:00:00 2001 From: Artem Goncharov Date: Wed, 21 Nov 2018 15:28:27 +0100 Subject: [PATCH 1142/1936] Update for f29 In Fedora 29 dstat was merged with pcp-system-tools (see https://pagure.io/fesco/issue/1956) Work around a iscsi bug with external packages until we can get the package fixed. Obsolete F27 Change-Id: I2f16658c5a3e22cac70912a0f3ad65cdd7071a1e --- files/rpms/cinder | 4 ++-- files/rpms/dstat | 3 ++- files/rpms/general | 4 ++-- files/rpms/nova | 2 +- files/rpms/swift | 2 +- lib/nova | 10 ++++++++++ 6 files changed, 18 insertions(+), 7 deletions(-) diff --git a/files/rpms/cinder b/files/rpms/cinder index 058c2354dc..e6b33dcc5e 100644 --- a/files/rpms/cinder +++ b/files/rpms/cinder @@ -1,5 +1,5 @@ iscsi-initiator-utils lvm2 qemu-img -scsi-target-utils # not:rhel7,f25,f26,f27,f28 NOPRIME -targetcli # dist:rhel7,f25,f26,f27,f28 NOPRIME +scsi-target-utils # not:rhel7,f25,f26,f27,f28,f29 NOPRIME +targetcli # dist:rhel7,f25,f26,f27,f28,f29 NOPRIME diff --git a/files/rpms/dstat b/files/rpms/dstat index 0d9da4434f..b058c277cb 100644 --- a/files/rpms/dstat +++ b/files/rpms/dstat @@ -1,2 +1,3 @@ -dstat +dstat # not:f29 +pcp-system-tools # dist:f29 python-psutil diff --git a/files/rpms/general b/files/rpms/general index c7863e4320..5bf1e9ac90 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -9,9 +9,9 @@ git-core graphviz # needed only for docs httpd httpd-devel -iptables-services # NOPRIME f25,f26,f27,f28 +iptables-services # NOPRIME f25,f26,f27,f28,f29 java-1.7.0-openjdk-headless # NOPRIME rhel7 -java-1.8.0-openjdk-headless # NOPRIME f25,f26,f27,f28 +java-1.8.0-openjdk-headless # NOPRIME f25,f26,f27,f28,f29 libffi-devel libjpeg-turbo-devel # Pillow 3.0.0 libxml2-devel # lxml diff --git a/files/rpms/nova b/files/rpms/nova index f69fc373d7..639d793756 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -7,7 +7,7 @@ gawk genisoimage # required for config_drive iptables iputils -kernel-modules # dist:f25,f26,f27,f28 +kernel-modules # dist:f25,f26,f27,f28,f29 kpartx libxml2-python m2crypto diff --git a/files/rpms/swift b/files/rpms/swift index f2f5de69b0..be524d1367 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -2,7 +2,7 @@ curl liberasurecode-devel memcached pyxattr -rsync-daemon # dist:f25,f26,f27,f28 +rsync-daemon # dist:f25,f26,f27,f28,f29 sqlite xfsprogs xinetd diff --git a/lib/nova b/lib/nova index 22f0706b3d..c8e6164916 100644 --- a/lib/nova +++ b/lib/nova @@ -296,6 +296,16 @@ function configure_nova { fi fi + if is_fedora; then + # There is an iscsi-initiator bug where it inserts + # different whitespace that causes a bunch of output + # matching to fail. We have not been able to get + # fixed, yet :/ Exists in fedora 29 & 30 + # https://bugzilla.redhat.com/show_bug.cgi?id=1676365 + sudo dnf copr enable -y iwienand/iscsi-initiator-utils + sudo dnf update -y + fi + if [[ ${ISCSID_DEBUG} == "True" ]]; then # Install an override that starts iscsid with debugging # enabled. From 0c9a6cab9118fd4ebcdb5277ccf66756fe4f2ec8 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Thu, 19 Sep 2019 13:57:43 +0000 Subject: [PATCH 1143/1936] Enable accept_ra before enabling forwarding We need to enable accept_ra before we enable forwarding, otherwise existing addresses and routes may get dropped until the next RA is received, possibly causing connection errors in the meantime. Change-Id: I1fdeede59547de896ed89222ecf121fd9e6b810d --- lib/neutron_plugins/services/l3 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index ec289f6656..69536bbe58 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -395,8 +395,6 @@ function _neutron_configure_router_v6 { # This logic is specific to using the l3-agent for layer 3 if is_service_enabled q-l3 || is_service_enabled neutron-l3; then - # Ensure IPv6 forwarding is enabled on the host - sudo sysctl -w net.ipv6.conf.all.forwarding=1 # if the Linux host considers itself to be a router then it will # ignore all router advertisements # Ensure IPv6 RAs are accepted on interfaces with a default route. @@ -409,6 +407,8 @@ function _neutron_configure_router_v6 { # device name would be reinterpreted as a slash, causing an error. sudo sysctl -w net/ipv6/conf/$d/accept_ra=2 done + # Ensure IPv6 forwarding is enabled on the host + sudo sysctl -w net.ipv6.conf.all.forwarding=1 # Configure and enable public bridge # Override global IPV6_ROUTER_GW_IP with the true value from neutron IPV6_ROUTER_GW_IP=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' | grep $ipv6_pub_subnet_id | awk -F'ip_address' '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ') From 1a46c898db9c16173013d95e2bc954992121077c Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 20 Sep 2019 08:11:08 +1000 Subject: [PATCH 1144/1936] Restrict iscsi package hack to Fedora I forgot in I2f16658c5a3e22cac70912a0f3ad65cdd7071a1e that "is_fedora" matches rhel/centos for historical reasons. Restrict the install to just the Fedora platforms by checking DISTRO matches Change-Id: Ica4a690a4f2894a03ceb8557a947ed2ea4a60e53 --- lib/nova | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/nova b/lib/nova index c8e6164916..c41f881fa1 100644 --- a/lib/nova +++ b/lib/nova @@ -296,11 +296,11 @@ function configure_nova { fi fi - if is_fedora; then + if is_fedora && [[ $DISTRO =~ f[0-9][0-9] ]]; then # There is an iscsi-initiator bug where it inserts # different whitespace that causes a bunch of output # matching to fail. We have not been able to get - # fixed, yet :/ Exists in fedora 29 & 30 + # fixed, yet :/ Exists in fedora 29 & 30 at least # https://bugzilla.redhat.com/show_bug.cgi?id=1676365 sudo dnf copr enable -y iwienand/iscsi-initiator-utils sudo dnf update -y From 893817d30af5edded6cae9005ac6f00712fd787d Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 25 Sep 2019 08:30:07 +1000 Subject: [PATCH 1145/1936] generate-devstack-plugins-list: Retry on opendev.org 500 A 500 error from gitea can occasionally show up as a project dropping their devstack plugin (I543faced83a685d48706d004ae49800abfb89dc5). To avoid noise in the proposal jobs, implement a small retry loop for 500 errors. Change-Id: Ide23e4de819a2c751d887eeaa7f0b9d0437f8e2c --- tools/generate-devstack-plugins-list.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tools/generate-devstack-plugins-list.py b/tools/generate-devstack-plugins-list.py index 11062eab2b..d39b8018ae 100644 --- a/tools/generate-devstack-plugins-list.py +++ b/tools/generate-devstack-plugins-list.py @@ -28,6 +28,9 @@ import json import requests +from requests.adapters import HTTPAdapter +from requests.packages.urllib3.util.retry import Retry + logging.basicConfig(level=logging.DEBUG) url = 'https://review.opendev.org/projects/' @@ -63,6 +66,12 @@ def has_devstack_plugin(session, proj): logging.debug("Found %d projects" % len(projects)) s = requests.Session() +# sometimes gitea gives us a 500 error; retry sanely +# https://stackoverflow.com/a/35636367 +retries = Retry(total=3, backoff_factor=1, + status_forcelist=[ 500 ]) +s.mount('https://', HTTPAdapter(max_retries=retries)) + found_plugins = filter(functools.partial(has_devstack_plugin, s), projects) for project in found_plugins: From c6f588df02bfc01bde463cb63f19f90a5935839b Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Wed, 25 Sep 2019 10:31:09 -0400 Subject: [PATCH 1146/1936] Fix target branch for pbr and diskimage-builder The target branch was centralized in change I82aa19e739eeda3721bac1cb5153ad0bf2d1125a but there are two issues, pbr and diskimage-builder are using TARGET_BRANCH which gets changed to stable/* for each openstack stable branch that gets created for devstack, e.g. I861068ae1a9902cef61c52c70dda7bb42f4371a0, but pbr and diskimage-builder don't have stable branches so they should be using BRANCHLESS_TARGET_BRANCH i.e. master. Change-Id: I47ac7a7e194ca6d613d0ccaebfd557346644c2df --- stackrc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackrc b/stackrc index 10117f2700..4b049413be 100644 --- a/stackrc +++ b/stackrc @@ -499,7 +499,7 @@ GITBRANCH["tooz"]=${TOOZ_BRANCH:-$TARGET_BRANCH} # pbr drives the setuptools configs GITREPO["pbr"]=${PBR_REPO:-${GIT_BASE}/openstack/pbr.git} -GITBRANCH["pbr"]=${PBR_BRANCH:-$TARGET_BRANCH} +GITBRANCH["pbr"]=${PBR_BRANCH:-$BRANCHLESS_TARGET_BRANCH} ################## @@ -554,7 +554,7 @@ GITDIR["ironic-lib"]=$DEST/ironic-lib # diskimage-builder tool GITREPO["diskimage-builder"]=${DIB_REPO:-${GIT_BASE}/openstack/diskimage-builder.git} -GITBRANCH["diskimage-builder"]=${DIB_BRANCH:-$TARGET_BRANCH} +GITBRANCH["diskimage-builder"]=${DIB_BRANCH:-$BRANCHLESS_TARGET_BRANCH} GITDIR["diskimage-builder"]=$DEST/diskimage-builder # neutron-lib library containing neutron stable non-REST interfaces From 6eb2c5990d738612c4cec34daeb673092303a50a Mon Sep 17 00:00:00 2001 From: Colleen Murphy Date: Wed, 25 Sep 2019 12:51:23 -0700 Subject: [PATCH 1147/1936] Fix six package on opensuse for pip 10 openSUE Leap 15.0 and 15.1 both provide python3-six version 1.11.0. Since version 1.12.0 was released, pip>=10 recognizes the version difference and tries to uninstall the distro-provided version and fails. This change adds another hack to remove the egg-info file for the six library so that pip can manage it directly. We also have to wait to install os-testr until after the fixup has happened since trying to install it triggers the issue. Change-Id: I4649abe06b5893a5251bfcdd4234abccde6ceda2 --- stack.sh | 6 +++--- tools/fixup_stuff.sh | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index 11783fd5bc..b7b37e2cf6 100755 --- a/stack.sh +++ b/stack.sh @@ -796,9 +796,6 @@ if [[ "$OFFLINE" != "True" ]]; then PYPI_ALTERNATIVE_URL=${PYPI_ALTERNATIVE_URL:-""} $TOP_DIR/tools/install_pip.sh fi -# Install subunit for the subunit output stream -pip_install -U os-testr - TRACK_DEPENDS=${TRACK_DEPENDS:-False} # Install Python packages into a virtualenv so that we can track them @@ -816,6 +813,9 @@ fi source $TOP_DIR/tools/fixup_stuff.sh fixup_all +# Install subunit for the subunit output stream +pip_install -U os-testr + if [[ "$USE_SYSTEMD" == "True" ]]; then pip_install_gr systemd-python # the default rate limit of 1000 messages / 30 seconds is not diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index d7b824c048..d2989379fe 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -256,6 +256,7 @@ function fixup_suse { # overwriting works. So this hacks around those packages that # have been dragged in by some other system dependency sudo rm -rf /usr/lib/python3.6/site-packages/ply-*.egg-info + sudo rm -rf /usr/lib/python3.6/site-packages/six-*.egg-info } # The version of pip(1.5.4) supported by python-virtualenv(1.11.4) has From 12e5ddcc2d0058ead4de76053db698ed21d4e813 Mon Sep 17 00:00:00 2001 From: Masayuki Igawa Date: Wed, 25 Sep 2019 12:23:16 +0900 Subject: [PATCH 1148/1936] Add PDF documentation build This commit adds PDF documentation build target 'pdf-docs' that will build PDF versions of our docs. As per the Train community goal: https://governance.openstack.org/tc/goals/selected/train/pdf-doc-generation.html Change-Id: Iecb0fe5b957af7dae66bea04dfbd9c2fb4f74a99 Story: #2006070 Task: #35456 --- doc/source/conf.py | 2 +- doc/source/index.rst | 9 ++++----- tox.ini | 8 ++++++++ 3 files changed, 13 insertions(+), 6 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 9059f8c678..56043ba6f7 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -165,7 +165,7 @@ # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ - ('index', 'DevStack-doc.tex', u'DevStack Docs', + ('index', 'doc-devstack.tex', u'DevStack Docs', u'OpenStack DevStack Team', 'manual'), ] diff --git a/doc/source/index.rst b/doc/source/index.rst index 8f958585ee..6694022316 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -11,9 +11,8 @@ and how to go beyond this setup. Both should be a set of quick links to other documents to let people explore from there. -========== - DevStack -========== +DevStack +======== .. image:: assets/images/logo-blue.png @@ -32,7 +31,7 @@ The source is available at ``__. are dedicated to this purpose. Quick Start -=========== ++++++++++++ Install Linux ------------- @@ -153,7 +152,7 @@ with devstack, and help us by :doc:`contributing to the project `. Contents --------- +++++++++ .. toctree:: :glob: diff --git a/tox.ini b/tox.ini index d81107fe1a..26baa2a1c5 100644 --- a/tox.ini +++ b/tox.ini @@ -43,6 +43,14 @@ setenv = commands = sphinx-build -W -b html -d doc/build/doctrees doc/source doc/build/html +[testenv:pdf-docs] +basepython = python3 +deps = {[testenv:docs]deps} +whitelist_externals = + make +commands = + sphinx-build -W -b latex doc/source doc/build/pdf + make -C doc/build/pdf [testenv:venv] basepython = python3 From 594885c80864710846f4fed35f19dc30a060f446 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Fri, 27 Sep 2019 16:45:09 -0400 Subject: [PATCH 1149/1936] Centralize and configure nova with cinder service user access Since Queens [1] nova has been able to be configured with cinder service user credentials for operating on cinder resources without a user auth token similar to things nova needs to do without a user auth token for working with neutron and placement resources. This change: - centralizes the nova [cinder] section configuration - adds the necessary auth configuration Needed by: https://review.opendev.org/549130/ [1] I3c35bba43fee81baebe8261f546c1424ce3a3383 Change-Id: I5640ee431f6856853f6b00ec7ed1ea21d05117dd --- lib/nova | 31 ++++++++++++++++++++++++------- 1 file changed, 24 insertions(+), 7 deletions(-) diff --git a/lib/nova b/lib/nova index c41f881fa1..33d0a9f8a8 100644 --- a/lib/nova +++ b/lib/nova @@ -468,11 +468,7 @@ function create_nova_conf { fi if is_service_enabled cinder; then - if is_service_enabled tls-proxy; then - CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST} - CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776} - iniset $NOVA_CONF cinder cafile $SSL_BUNDLE_FILE - fi + configure_cinder_access fi if [ -n "$NOVA_STATE_PATH" ]; then @@ -520,8 +516,6 @@ function create_nova_conf { # don't let the conductor get out of control now that we're using a pure python db driver iniset $NOVA_CONF conductor workers "$API_WORKERS" - iniset $NOVA_CONF cinder os_region_name "$REGION_NAME" - if is_service_enabled tls-proxy; then iniset $NOVA_CONF DEFAULT glance_protocol https iniset $NOVA_CONF oslo_middleware enable_proxy_headers_parsing True @@ -593,6 +587,29 @@ function configure_placement_nova_compute { iniset $conf placement region_name "$REGION_NAME" } +# Configure access to cinder. +function configure_cinder_access { + iniset $NOVA_CONF cinder os_region_name "$REGION_NAME" + iniset $NOVA_CONF cinder auth_type "password" + iniset $NOVA_CONF cinder auth_url "$KEYSTONE_SERVICE_URI" + # NOTE(mriedem): This looks a bit weird but we use the nova user here + # since it has the admin role and the cinder user does not. This is + # similar to using the nova user in init_nova_service_user_conf. We need + # to use a user with the admin role for background tasks in nova to + # be able to GET block-storage API resources owned by another project + # since cinder has low-level "is_admin" checks in its DB API. + iniset $NOVA_CONF cinder username nova + iniset $NOVA_CONF cinder password "$SERVICE_PASSWORD" + iniset $NOVA_CONF cinder user_domain_name "$SERVICE_DOMAIN_NAME" + iniset $NOVA_CONF cinder project_name "$SERVICE_TENANT_NAME" + iniset $NOVA_CONF cinder project_domain_name "$SERVICE_DOMAIN_NAME" + if is_service_enabled tls-proxy; then + CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST} + CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776} + iniset $NOVA_CONF cinder cafile $SSL_BUNDLE_FILE + fi +} + function configure_console_compute { # If we are running multiple cells (and thus multiple console proxies) on a # single host, we offset the ports to avoid collisions. We need to From c67a689fe56f55fa3cd288fba76f88d3aa5b9b8d Mon Sep 17 00:00:00 2001 From: Luigi Toscano Date: Tue, 1 Oct 2019 20:14:22 +0200 Subject: [PATCH 1150/1936] Ensure that the zuul home can be traversed The default permissions for the zuul home directory are not the same in the various distributions. As /home/zuul contains the sources, a 700 default may be problematic when accessing those files, so make sure that the executable permissions are set. Closes-Bug: 1846251 Change-Id: Ic9769e56274d7205844b86d3b5200a6415e4acad --- playbooks/pre.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/playbooks/pre.yaml b/playbooks/pre.yaml index 4689a6354f..60f365aa7a 100644 --- a/playbooks/pre.yaml +++ b/playbooks/pre.yaml @@ -1,5 +1,12 @@ - hosts: all pre_tasks: + - name: Fix the permissions of the zuul home directory + # Make sure that the zuul home can be traversed, + # so that all users can access the sources placed there. + # Some distributions create it with 700 by default. + file: + path: "{{ ansible_user_dir }}" + mode: a+x - name: Gather minimum local MTU set_fact: local_mtu: > From deb3ff50f15bc89921ea548f5b53d49492c5ee65 Mon Sep 17 00:00:00 2001 From: Akihiro Motoki Date: Wed, 24 Jul 2019 17:08:44 +0900 Subject: [PATCH 1151/1936] lib/horizon: no need to specify keystone v3 to API version keystone v3 is the default API version in horizon now, so there is no need to specify it in local_settings.py explicitly. This commit also makes minor changes in lib/horizon _horizon_config_set(). * Do not insert a blank line after each setting. * Use the local variable $file to specify the target file consistently. Change-Id: I5faea3e1f357726a256d2b48fc1afeabfead4998 --- lib/horizon | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/lib/horizon b/lib/horizon index 293a627c78..b2bf7bcb49 100644 --- a/lib/horizon +++ b/lib/horizon @@ -43,8 +43,8 @@ function _horizon_config_set { local value=$4 if [ -z "$section" ]; then - sed -e "/^$option/d" -i $local_settings - echo -e "\n$option=$value" >> $file + sed -e "/^$option/d" -i $file + echo "$option = $value" >> $file elif grep -q "^$section" $file; then local line line=$(sed -ne "/^$section/,/^}/ { /^ *'$option':/ p; }" $file) @@ -84,6 +84,9 @@ function configure_horizon { local local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py cp $HORIZON_SETTINGS $local_settings + # Ensure local_setting.py file ends with EOL (newline) + echo >> $local_settings + _horizon_config_set $local_settings "" WEBROOT \"$HORIZON_APACHE_ROOT/\" _horizon_config_set $local_settings "" COMPRESS_OFFLINE True @@ -91,7 +94,6 @@ function configure_horizon { _horizon_config_set $local_settings "" OPENSTACK_HOST \"${KEYSTONE_SERVICE_HOST}\" - _horizon_config_set $local_settings "" OPENSTACK_API_VERSIONS {\"identity\":3} _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_URL "\"${KEYSTONE_SERVICE_URI}/v3\"" # note(trebskit): if HOST_IP points at non-localhost ip address, horizon cannot be accessed From 5c6b3c32791f6a1b6e3646e739d41ae86d866d45 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 4 Oct 2019 09:31:53 +1000 Subject: [PATCH 1152/1936] Add override variable for LIBVIRT_TYPE Allow jobs to set LIBVIRT_TYPE, but retain the current default. This is for testing nested virt. Change-Id: Ife215db3f59f011574a50b0d1cbd5565a7408dfe --- .zuul.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index f7594d4ddc..cc94369988 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -219,8 +219,8 @@ VERBOSE_NO_TIMESTAMP: true NOVNC_FROM_PACKAGE: true ERROR_ON_CLONE: true - # Gate jobs can't deal with nested virt. Disable it. - LIBVIRT_TYPE: qemu + # Gate jobs can't deal with nested virt. Disable it by default. + LIBVIRT_TYPE: '{{ devstack_libvirt_type | default("qemu") }}' devstack_services: # Ignore any default set by devstack. Emit a "disable_all_services". base: false From dad5665036c83dfc25de28fe82ad7831b6f0915c Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Mon, 7 Oct 2019 17:21:46 +0000 Subject: [PATCH 1153/1936] Update DEVSTACK_SERIES to ussuri stable/train branch has been created now and current master is for ussuri. Change-Id: I2bb84cb4b32e344572a7c3e6f300c1aa19e486df --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 10117f2700..e8ee4e09bd 100644 --- a/stackrc +++ b/stackrc @@ -258,7 +258,7 @@ REQUIREMENTS_DIR=$DEST/requirements # Setting the variable to 'ALL' will activate the download for all # libraries. -DEVSTACK_SERIES="train" +DEVSTACK_SERIES="ussuri" ############## # From 8c86e5a53e1c77fc17df7d979f6435b534021d13 Mon Sep 17 00:00:00 2001 From: Eric Harney Date: Thu, 17 Oct 2019 15:42:41 -0400 Subject: [PATCH 1154/1936] Cinder: only set volume_clear for LVM This only applies to the LVM driver (when using thick provisioning), and doesn't have any effect on other backends like NFS, so only write the conf entry for LVM. Change-Id: I722ba2fa0010d9887ed9b7fdd9e050cd4694768e --- lib/cinder | 3 --- lib/cinder_backends/lvm | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/lib/cinder b/lib/cinder index fd960535d9..2e6e97a006 100644 --- a/lib/cinder +++ b/lib/cinder @@ -250,9 +250,6 @@ function configure_cinder { default_name=$be_name fi enabled_backends+=$be_name, - - iniset $CINDER_CONF $be_name volume_clear $CINDER_VOLUME_CLEAR - done iniset $CINDER_CONF DEFAULT enabled_backends ${enabled_backends%,*} if [[ -n "$default_name" ]]; then diff --git a/lib/cinder_backends/lvm b/lib/cinder_backends/lvm index 497081c9e4..e03ef14c55 100644 --- a/lib/cinder_backends/lvm +++ b/lib/cinder_backends/lvm @@ -52,7 +52,7 @@ function configure_cinder_backend_lvm { iniset $CINDER_CONF $be_name volume_group $VOLUME_GROUP_NAME-$be_name iniset $CINDER_CONF $be_name target_helper "$CINDER_ISCSI_HELPER" iniset $CINDER_CONF $be_name lvm_type "$CINDER_LVM_TYPE" - + iniset $CINDER_CONF $be_name volume_clear "$CINDER_VOLUME_CLEAR" } # init_cinder_backend_lvm - Initialize volume group From b14665f0dde0d0862d8755a796b9f680e42f790b Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Thu, 17 Oct 2019 19:34:05 +0000 Subject: [PATCH 1155/1936] Revert "Remove deprecated PostgreSQL database driver" This reverts commit 168ca7f0a474f1207ee01dab0ca2e70f34783e9c. Removing postgresql support from devstack was unnecessary since it's not broken and not causing maintenance issues as far as I know. The commit being reverted said that pg support was deprecated in Pike but nothing in the docs or commit message refer to official deprecation of postgres support in devstack or openstack in general. Not to mention that there are still postgres-based jobs that will no longer work *and* the notification to the mailing list about doing this happened *after* it was already done [1] leaving stakeholders with no time to reply. [1] http://lists.openstack.org/pipermail/openstack-discuss/2019-October/010196.html Change-Id: Ie7036d37d79e6aba462b7c97f917e2e7aed108f9 --- doc/source/configuration.rst | 22 +++-- doc/source/zuul_ci_jobs_migration.rst | 5 +- functions | 3 +- lib/databases/postgresql | 137 ++++++++++++++++++++++++++ stack.sh | 9 +- unstack.sh | 4 + 6 files changed, 166 insertions(+), 14 deletions(-) create mode 100644 lib/databases/postgresql diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 62571e0760..45f4ffe6e9 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -326,23 +326,29 @@ a file, keep service logs and disable color in the stored files. Database Backend ---------------- -Support for the MySQL database backend is included. Addition database backends -may be available via external plugins. Enabling of disabling MySQL is handled -via the usual service functions and ``ENABLED_SERVICES``. For example, to -disable MySQL in ``local.conf``:: +Multiple database backends are available. The available databases are defined +in the lib/databases directory. +``mysql`` is the default database, choose a different one by putting the +following in the ``localrc`` section:: disable_service mysql + enable_service postgresql + +``mysql`` is the default database. RPC Backend ----------- -Support for a RabbitMQ RPC backend is included. Additional RPC backends may be -available via external plugins. Enabling or disabling RabbitMQ is handled via -the usual service functions and ``ENABLED_SERVICES``. For example, to disable -RabbitMQ in ``local.conf``:: +Support for a RabbitMQ RPC backend is included. Additional RPC +backends may be available via external plugins. Enabling or disabling +RabbitMQ is handled via the usual service functions and +``ENABLED_SERVICES``. + +Example disabling RabbitMQ in ``local.conf``:: disable_service rabbit + Apache Frontend --------------- diff --git a/doc/source/zuul_ci_jobs_migration.rst b/doc/source/zuul_ci_jobs_migration.rst index 66f8251039..17e7e16fb7 100644 --- a/doc/source/zuul_ci_jobs_migration.rst +++ b/doc/source/zuul_ci_jobs_migration.rst @@ -302,7 +302,10 @@ migrated at all. - This will probably be implemented on ironic side. * - DEVSTACK_GATE_POSTGRES - Legacy - - This has no effect in d-g. + - This flag exists in d-g but the only thing that it does is + capture postgres logs. This is already supported by the roles + in post, so the flag is useless in the new jobs. postgres + itself can be enabled via the devstack_service job variable. * - DEVSTACK_GATE_ZEROMQ - Legacy - This has no effect in d-g. diff --git a/functions b/functions index 8eeb03203e..f33fd25fee 100644 --- a/functions +++ b/functions @@ -400,8 +400,7 @@ function upload_image { # initialized yet, just save the configuration selection and call back later # to validate it. # -# ``$1`` - the name of the database backend to use (only mysql is currently -# supported) +# ``$1`` - the name of the database backend to use (mysql, postgresql, ...) function use_database { if [[ -z "$DATABASE_BACKENDS" ]]; then # No backends registered means this is likely called from ``localrc`` diff --git a/lib/databases/postgresql b/lib/databases/postgresql new file mode 100644 index 0000000000..618834b550 --- /dev/null +++ b/lib/databases/postgresql @@ -0,0 +1,137 @@ +#!/bin/bash +# +# lib/databases/postgresql +# Functions to control the configuration and operation of the **PostgreSQL** database backend + +# Dependencies: +# +# - DATABASE_{HOST,USER,PASSWORD} must be defined + +# Save trace setting +_XTRACE_PG=$(set +o | grep xtrace) +set +o xtrace + + +MAX_DB_CONNECTIONS=${MAX_DB_CONNECTIONS:-200} + + +register_database postgresql + + +# Functions +# --------- + +function get_database_type_postgresql { + echo postgresql +} + +# Get rid of everything enough to cleanly change database backends +function cleanup_database_postgresql { + stop_service postgresql + if is_ubuntu; then + # Get ruthless with mysql + apt_get purge -y postgresql* + return + elif is_fedora || is_suse; then + uninstall_package postgresql-server + else + return + fi +} + +function recreate_database_postgresql { + local db=$1 + # Avoid unsightly error when calling dropdb when the database doesn't exist + psql -h$DATABASE_HOST -U$DATABASE_USER -dtemplate1 -c "DROP DATABASE IF EXISTS $db" + createdb -h $DATABASE_HOST -U$DATABASE_USER -l C -T template0 -E utf8 $db +} + +function configure_database_postgresql { + local pg_conf pg_dir pg_hba check_role version + echo_summary "Configuring and starting PostgreSQL" + if is_fedora; then + pg_hba=/var/lib/pgsql/data/pg_hba.conf + pg_conf=/var/lib/pgsql/data/postgresql.conf + if ! sudo [ -e $pg_hba ]; then + sudo postgresql-setup initdb + fi + elif is_ubuntu; then + version=`psql --version | cut -d ' ' -f3 | cut -d. -f1-2` + if vercmp $version '>=' 9.3; then + if [ -z "`pg_lsclusters -h`" ]; then + echo 'No PostgreSQL clusters exist; will create one' + sudo pg_createcluster $version main --start + fi + fi + pg_dir=`find /etc/postgresql -name pg_hba.conf|xargs dirname` + pg_hba=$pg_dir/pg_hba.conf + pg_conf=$pg_dir/postgresql.conf + elif is_suse; then + pg_hba=/var/lib/pgsql/data/pg_hba.conf + pg_conf=/var/lib/pgsql/data/postgresql.conf + # initdb is called when postgresql is first started + sudo [ -e $pg_hba ] || start_service postgresql + else + exit_distro_not_supported "postgresql configuration" + fi + # Listen on all addresses + sudo sed -i "/listen_addresses/s/.*/listen_addresses = '*'/" $pg_conf + # Set max_connections + sudo sed -i "/max_connections/s/.*/max_connections = $MAX_DB_CONNECTIONS/" $pg_conf + # Do password auth from all IPv4 clients + sudo sed -i "/^host/s/all\s\+127.0.0.1\/32\s\+ident/$DATABASE_USER\t0.0.0.0\/0\tpassword/" $pg_hba + # Do password auth for all IPv6 clients + sudo sed -i "/^host/s/all\s\+::1\/128\s\+ident/$DATABASE_USER\t::0\/0\tpassword/" $pg_hba + restart_service postgresql + + # Create the role if it's not here or else alter it. + check_role=$(sudo -u root sudo -u postgres -i psql -t -c "SELECT 'HERE' from pg_roles where rolname='$DATABASE_USER'") + if [[ ${check_role} == *HERE ]];then + sudo -u root sudo -u postgres -i psql -c "ALTER ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" + else + sudo -u root sudo -u postgres -i psql -c "CREATE ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" + fi +} + +function install_database_postgresql { + echo_summary "Installing postgresql" + deprecated "Use of postgresql in devstack is deprecated, and will be removed during the Pike cycle" + local pgpass=$HOME/.pgpass + if [[ ! -e $pgpass ]]; then + cat < $pgpass +*:*:*:$DATABASE_USER:$DATABASE_PASSWORD +EOF + chmod 0600 $pgpass + else + sed -i "s/:root:\w\+/:root:$DATABASE_PASSWORD/" $pgpass + fi + if is_ubuntu; then + install_package postgresql + elif is_fedora || is_suse; then + install_package postgresql-server + if is_fedora; then + sudo systemctl enable postgresql + fi + else + exit_distro_not_supported "postgresql installation" + fi +} + +function install_database_python_postgresql { + # Install Python client module + pip_install_gr psycopg2 + ADDITIONAL_VENV_PACKAGES+=",psycopg2" +} + +function database_connection_url_postgresql { + local db=$1 + echo "$BASE_SQL_CONN/$db?client_encoding=utf8" +} + + +# Restore xtrace +$_XTRACE_PG + +# Local variables: +# mode: shell-script +# End: diff --git a/stack.sh b/stack.sh index c652f65403..b7b37e2cf6 100755 --- a/stack.sh +++ b/stack.sh @@ -695,11 +695,14 @@ function read_password { # Database Configuration # ---------------------- -# DevStack provides a MySQL database backend. Additional backends may be -# provided by external plugins and can be enabled using the usual service -# functions and ``ENABLED_SERVICES``. For example, to disable MySQL: +# To select between database backends, add the following to ``local.conf``: # # disable_service mysql +# enable_service postgresql +# +# The available database backends are listed in ``DATABASE_BACKENDS`` after +# ``lib/database`` is sourced. ``mysql`` is the default. + if initialize_database_backends; then echo "Using $DATABASE_TYPE database backend" # Last chance for the database password. This must be handled here diff --git a/unstack.sh b/unstack.sh index 07dc2b1418..ccea0ef585 100755 --- a/unstack.sh +++ b/unstack.sh @@ -147,6 +147,10 @@ if [[ -n "$UNSTACK_ALL" ]]; then stop_service mysql fi + if is_service_enabled postgresql; then + stop_service postgresql + fi + # Stop rabbitmq-server if is_service_enabled rabbit; then stop_service rabbitmq-server From a676c4029e46743ccf2e81ebd17cd306ffc4960d Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Wed, 2 Aug 2017 16:46:35 +0000 Subject: [PATCH 1156/1936] Revert "Generate deprecation warning for postgresql" Based on resolution [1], there's no clear indication that next steps involve the removal of the DB from Devstack or from the gate. [1] I332cef8ec4539520adcf37c6d2ea11488289fcfd This reverts commit d9aaae95f2b84170bf35e037715e4963d89f940c. Change-Id: I8410d65c0e0b24035aa035fac7560a686d53ec50 --- lib/databases/postgresql | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/databases/postgresql b/lib/databases/postgresql index 618834b550..1f347f5548 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -95,7 +95,6 @@ function configure_database_postgresql { function install_database_postgresql { echo_summary "Installing postgresql" - deprecated "Use of postgresql in devstack is deprecated, and will be removed during the Pike cycle" local pgpass=$HOME/.pgpass if [[ ! -e $pgpass ]]; then cat < $pgpass From 1d378dcf6d3699d99838050cc804c64a1862ba8f Mon Sep 17 00:00:00 2001 From: melanie witt Date: Wed, 23 Oct 2019 04:20:23 +0000 Subject: [PATCH 1157/1936] Remove n-novnc service requirement for TLS configuration When configuring TLS between the console proxy (where the n-novnc service runs) and the compute host, some configuration for QEMU needs to be done on the compute host. The existing code for this requires the n-novnc service to be running, which it is in a single node all-in-one deployment. However, when running in a multinode deployment, the n-novnc service runs only on the controller and not on the subnode. Yet, we need to configure QEMU on the subnode compute host as well. This removes the n-novnc service requirement to enable TLS QEMU configuration to occur on a compute subnode in a multinode deployment. Closes-Bug: #1849418 Change-Id: I8b6970e91ad7f52ff489cb9f776ca216d8f86aa4 --- lib/nova_plugins/functions-libvirt | 28 +++++++++++++--------------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 463986944f..914ee7bcf7 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -150,21 +150,19 @@ EOF fi if is_nova_console_proxy_compute_tls_enabled ; then - if is_service_enabled n-novnc ; then - echo "vnc_tls = 1" | sudo tee -a $QEMU_CONF - echo "vnc_tls_x509_verify = 1" | sudo tee -a $QEMU_CONF - - sudo mkdir -p /etc/pki/libvirt-vnc - deploy_int_CA /etc/pki/libvirt-vnc/ca-cert.pem - deploy_int_cert /etc/pki/libvirt-vnc/server-cert.pem /etc/pki/libvirt-vnc/server-key.pem - # OpenSSL 1.1.0 generates the key file with permissions: 600, by - # default and the deploy_int* methods use 'sudo cp' to copy the - # files, making them owned by root:root. - # Change ownership of everything under /etc/pki/libvirt-vnc to - # libvirt-qemu:libvirt-qemu so that libvirt-qemu can read the key - # file. - sudo chown -R libvirt-qemu:libvirt-qemu /etc/pki/libvirt-vnc - fi + echo "vnc_tls = 1" | sudo tee -a $QEMU_CONF + echo "vnc_tls_x509_verify = 1" | sudo tee -a $QEMU_CONF + + sudo mkdir -p /etc/pki/libvirt-vnc + deploy_int_CA /etc/pki/libvirt-vnc/ca-cert.pem + deploy_int_cert /etc/pki/libvirt-vnc/server-cert.pem /etc/pki/libvirt-vnc/server-key.pem + # OpenSSL 1.1.0 generates the key file with permissions: 600, by + # default and the deploy_int* methods use 'sudo cp' to copy the + # files, making them owned by root:root. + # Change ownership of everything under /etc/pki/libvirt-vnc to + # libvirt-qemu:libvirt-qemu so that libvirt-qemu can read the key + # file. + sudo chown -R libvirt-qemu:libvirt-qemu /etc/pki/libvirt-vnc fi # Service needs to be started on redhat/fedora -- do a restart for From da18895162bababea638b3f28c76cb9766e821b6 Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Thu, 31 Oct 2019 14:49:23 -0400 Subject: [PATCH 1158/1936] Fix brctl calls Some distros no longer ship brctl, iproute2 should be used in its place. The linuxbridge agent plugin script was still using it, as was worlddump, which generates this warning on a failure: Running devstack worlddump.py /bin/sh: 1: brctl: not found Conditionalizing worlddump based on whether brctl is installed to make this go away. Change-Id: Iafbf4038bab08c261d45d117b12d4629ba32d65e --- lib/neutron_plugins/linuxbridge_agent | 2 +- tools/worlddump.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent index fa3f86203d..1f1b0e8e52 100644 --- a/lib/neutron_plugins/linuxbridge_agent +++ b/lib/neutron_plugins/linuxbridge_agent @@ -48,7 +48,7 @@ function neutron_plugin_configure_dhcp_agent { function neutron_plugin_configure_l3_agent { local conf_file=$1 - sudo brctl addbr $PUBLIC_BRIDGE + sudo ip link add $PUBLIC_BRIDGE type bridge set_mtu $PUBLIC_BRIDGE $PUBLIC_BRIDGE_MTU } diff --git a/tools/worlddump.py b/tools/worlddump.py index d1453ca076..d5ff5d1ab5 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -165,7 +165,8 @@ def network_dump(): _header("Network Dump") _dump_cmd("bridge link") - _dump_cmd("brctl show") + if _find_cmd("brctl"): + _dump_cmd("brctl show") _dump_cmd("ip link show type bridge") ip_cmds = ["neigh", "addr", "link", "route"] for cmd in ip_cmds + ['netns']: From 6c15c38f1a9e8e833a29637598f373d7ea29bc62 Mon Sep 17 00:00:00 2001 From: Flavio Fernandes Date: Wed, 6 Nov 2019 07:11:33 -0500 Subject: [PATCH 1159/1936] stackrc: Make REQUIREMENTS_DIR configurable In cases where global REQUIREMENTS_DIR is set, use it instead of overwriting it. This is particularly needed in cases where users of pip_install wrapper have the upper-constraints.txt at another location. Change-Id: I34e9f94548c575e1af5bca9655a3b7d1915375a8 Signed-off-by: Flavio Fernandes --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 3fcdadffa3..0f80143f39 100644 --- a/stackrc +++ b/stackrc @@ -240,7 +240,7 @@ WSGI_MODE=${WSGI_MODE:-"uwsgi"} GIT_BASE=${GIT_BASE:-https://opendev.org} # The location of REQUIREMENTS once cloned -REQUIREMENTS_DIR=$DEST/requirements +REQUIREMENTS_DIR=${REQUIREMENTS_DIR:-$DEST/requirements} # Which libraries should we install from git instead of using released # versions on pypi? From 0e02e7fd55276678ba839a098896b6142363147c Mon Sep 17 00:00:00 2001 From: Zane Bitter Date: Thu, 14 Nov 2019 14:01:27 -0500 Subject: [PATCH 1160/1936] Don't install glance default policy With Glance defining default policies in code, it's no longer necessary to install policy.json from the repo. Change-Id: I9f9160f5a2bf9fd77fb3807e12de219b7a49952d Depends-On: https://review.opendev.org/693129 --- lib/glance | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/glance b/lib/glance index 54d3276433..740bcabf6a 100644 --- a/lib/glance +++ b/lib/glance @@ -52,7 +52,6 @@ GLANCE_API_CONF=$GLANCE_CONF_DIR/glance-api.conf GLANCE_REGISTRY_PASTE_INI=$GLANCE_CONF_DIR/glance-registry-paste.ini GLANCE_API_PASTE_INI=$GLANCE_CONF_DIR/glance-api-paste.ini GLANCE_CACHE_CONF=$GLANCE_CONF_DIR/glance-cache.conf -GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json GLANCE_SCHEMA_JSON=$GLANCE_CONF_DIR/schema-image.json GLANCE_SWIFT_STORE_CONF=$GLANCE_CONF_DIR/glance-swift-store.conf GLANCE_IMAGE_IMPORT_CONF=$GLANCE_CONF_DIR/glance-image-import.conf @@ -220,7 +219,6 @@ function configure_glance { iniset $GLANCE_IMAGE_IMPORT_CONF inject_metadata_properties ignore_user_roles admin iniset $GLANCE_IMAGE_IMPORT_CONF inject_metadata_properties inject - cp -p $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON cp -p $GLANCE_DIR/etc/schema-image.json $GLANCE_SCHEMA_JSON cp -p $GLANCE_DIR/etc/metadefs/*.json $GLANCE_METADEF_DIR From 277f29f68300a5598c02f48ebeb4039528435774 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Tue, 19 Nov 2019 10:09:11 +0000 Subject: [PATCH 1161/1936] Drop centos7 platform job The centos7 job is running with python2, which is no longer supported by nova, so we can drop it in master. Change-Id: Id9ef507dd6f4226d65c6ed3043666b0aa6a3bd1c --- .zuul.yaml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index cc94369988..13b98eb923 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -520,13 +520,6 @@ # we often have to rush things through devstack to stabilise the gate, # and these platforms don't have the round-the-clock support to avoid # becoming blockers in that situation. -- job: - name: devstack-platform-centos-7 - parent: tempest-full - description: Centos 7 platform test - nodeset: devstack-single-node-centos-7 - voting: false - - job: name: devstack-platform-opensuse-15 parent: tempest-full-py3 @@ -621,7 +614,6 @@ - devstack - devstack-xenial - devstack-ipv6 - - devstack-platform-centos-7 - devstack-platform-opensuse-15 - devstack-platform-fedora-latest - devstack-platform-xenial From 2d112db86b1b6861f6b4844bef324bda3a89248b Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Thu, 14 Nov 2019 11:35:29 +0100 Subject: [PATCH 1162/1936] Drop old neutron-grenade job This job is still running python 2.7. As we are dropping py2 support in Ussuri cycle, lets drop this job now. There is same job called "grenade-py3" which runs on python 3 already and this will be now used in project's CI. Depends-On: https://review.opendev.org/#/c/695036/ Change-Id: I5cd8e137a3ae06e49a4351629c5eb207c4e6bf1a --- .zuul.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index cc94369988..7c32298a7a 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -636,7 +636,7 @@ irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - - neutron-grenade: + - grenade-py3: irrelevant-files: - ^.*\.rst$ - ^doc/.*$ @@ -678,7 +678,7 @@ irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - - neutron-grenade: + - grenade-py3: irrelevant-files: - ^.*\.rst$ - ^doc/.*$ From 39082a3b4ede7ba84fcc01a6d00e9cd66e2f582d Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Tue, 2 Apr 2019 11:03:06 +0000 Subject: [PATCH 1163/1936] Handle localrc early enough in stackrc We need to source the environment overrides before they get evaluated. Otherwise e.g. USE_PYTHON3 is factually being ignored for some settings. Also fix creating python3 venvs by using the "virtualenv" command for that task. Change-Id: I16c78a7fef80372d9a1684c3256c5b50b052ecae --- stackrc | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/stackrc b/stackrc index 3fcdadffa3..ab55e13512 100644 --- a/stackrc +++ b/stackrc @@ -89,6 +89,15 @@ ENABLE_HTTPD_MOD_WSGI_SERVICES=True # Set the default Nova APIs to enable NOVA_ENABLED_APIS=osapi_compute,metadata +# allow local overrides of env variables, including repo config +if [[ -f $RC_DIR/localrc ]]; then + # Old-style user-supplied config + source $RC_DIR/localrc +elif [[ -f $RC_DIR/.localrc.auto ]]; then + # New-style user-supplied config extracted from local.conf + source $RC_DIR/.localrc.auto +fi + # CELLSV2_SETUP - how we should configure services with cells v2 # # - superconductor - this is one conductor for the api services, and @@ -145,20 +154,11 @@ export PYTHON2_VERSION=${PYTHON2_VERSION:-${_DEFAULT_PYTHON2_VERSION:-2.7}} # Create a virtualenv with this if [[ ${USE_PYTHON3} == True ]]; then - export VIRTUALENV_CMD="python3 -m venv" + export VIRTUALENV_CMD="virtualenv -p python3" else export VIRTUALENV_CMD="virtualenv " fi -# allow local overrides of env variables, including repo config -if [[ -f $RC_DIR/localrc ]]; then - # Old-style user-supplied config - source $RC_DIR/localrc -elif [[ -f $RC_DIR/.localrc.auto ]]; then - # New-style user-supplied config extracted from local.conf - source $RC_DIR/.localrc.auto -fi - # Default for log coloring is based on interactive-or-not. # Baseline assumption is that non-interactive invocations are for CI, # where logs are to be presented as browsable text files; hence color From 48b519b5c62be33388b76fd25eebb5673d1e2c18 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Mon, 1 Apr 2019 12:22:42 -0400 Subject: [PATCH 1164/1936] Change USE_PYTHON3=True by default Since Stein, gate jobs have been using bionic nodes so they are running with python 3.6, so it makes sense to also default devstack itself to run with python3 by default. Depends-On: https://review.opendev.org/688731 Change-Id: I52b03caee0ba700da3a15035201ea6cd91baa06b --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index ab55e13512..c796f3ae26 100644 --- a/stackrc +++ b/stackrc @@ -136,7 +136,7 @@ if [[ -r $RC_DIR/.localrc.password ]]; then fi # Control whether Python 3 should be used at all. -export USE_PYTHON3=$(trueorfalse False USE_PYTHON3) +export USE_PYTHON3=$(trueorfalse True USE_PYTHON3) # Explicitly list services not to run under Python 3. See # disable_python3_package to edit this variable. From 279a7589b03db69fd1b85d947cd0171dacef94ee Mon Sep 17 00:00:00 2001 From: "Jens Harbott (frickler)" Date: Mon, 16 Apr 2018 12:08:30 +0000 Subject: [PATCH 1165/1936] Revert "Do not use pip 10 or higher" This reverts commit f99d1771ba1882dfbb69186212a197edae3ef02c. Added workarounds that might want to get split into their own patch before merging: - Don't install python-psutil - Don't run peakmem_tracker Change-Id: If4fb16555e15082a4d97cffdf3cfa608a682997d --- .zuul.yaml | 4 ---- files/debs/dstat | 1 - files/rpms-suse/dstat | 1 - files/rpms/dstat | 1 - tools/cap-pip.txt | 1 - tools/install_pip.sh | 6 +++--- 6 files changed, 3 insertions(+), 11 deletions(-) delete mode 100644 tools/cap-pip.txt diff --git a/.zuul.yaml b/.zuul.yaml index 7c32298a7a..19dee584fc 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -320,14 +320,12 @@ dstat: true etcd3: true mysql: true - peakmem_tracker: true rabbit: true group-vars: subnode: devstack_services: # Shared services dstat: true - peakmem_tracker: true devstack_localrc: # Multinode specific settings HOST_IP: "{{ hostvars[inventory_hostname]['nodepool']['private_ipv4'] }}" @@ -394,7 +392,6 @@ dstat: true etcd3: true mysql: true - peakmem_tracker: true rabbit: true tls-proxy: true # Keystone services @@ -450,7 +447,6 @@ # This list replaces the test-matrix. # Shared services dstat: true - peakmem_tracker: true tls-proxy: true # Nova services n-cpu: true diff --git a/files/debs/dstat b/files/debs/dstat index 0d9da4434f..2b643b8b1b 100644 --- a/files/debs/dstat +++ b/files/debs/dstat @@ -1,2 +1 @@ dstat -python-psutil diff --git a/files/rpms-suse/dstat b/files/rpms-suse/dstat index 0d9da4434f..2b643b8b1b 100644 --- a/files/rpms-suse/dstat +++ b/files/rpms-suse/dstat @@ -1,2 +1 @@ dstat -python-psutil diff --git a/files/rpms/dstat b/files/rpms/dstat index b058c277cb..d7b272a93b 100644 --- a/files/rpms/dstat +++ b/files/rpms/dstat @@ -1,3 +1,2 @@ dstat # not:f29 pcp-system-tools # dist:f29 -python-psutil diff --git a/tools/cap-pip.txt b/tools/cap-pip.txt deleted file mode 100644 index f5278d7c86..0000000000 --- a/tools/cap-pip.txt +++ /dev/null @@ -1 +0,0 @@ -pip!=8,<10 diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 2b6aa4c2e8..dcd546629f 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -89,9 +89,9 @@ function install_get_pip { die $LINENO "Download of get-pip.py failed" touch $LOCAL_PIP.downloaded fi - sudo -H -E python $LOCAL_PIP -c $TOOLS_DIR/cap-pip.txt - if python3_enabled; then - sudo -H -E python${PYTHON3_VERSION} $LOCAL_PIP -c $TOOLS_DIR/cap-pip.txt + sudo -H -E python${PYTHON3_VERSION} $LOCAL_PIP + if ! python3_enabled; then + sudo -H -E python $LOCAL_PIP fi } From 2e6677869925c86c01cae883b3dde6cccad81d30 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 20 Nov 2019 10:41:34 +1100 Subject: [PATCH 1166/1936] Drop Xenial support With the goals of Ussuri being Python 3.6 [1], the python 3.5 environment on Xenial is too old. Remove testing and the most obvious bits of support from devstack. Also drop claimed support for artful, which is long EOL. [1] https://governance.openstack.org/tc/reference/runtimes/ussuri.html Change-Id: Iefcca99904dde76b34efbbfc0e04515dfa5a09e5 --- .zuul.yaml | 29 ------------------------- files/debs/nova | 5 ++--- lib/nova_plugins/functions-libvirt | 18 +++------------ stack.sh | 4 ++-- stackrc | 3 --- tools/fixup_stuff.sh | 35 ++---------------------------- 6 files changed, 9 insertions(+), 85 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index cc94369988..8f64620426 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -491,14 +491,6 @@ # https://bugs.launchpad.net/devstack/+bug/1794929 USE_PYTHON3: true -- job: - name: devstack-xenial - parent: devstack - nodeset: openstack-single-node-xenial - description: | - Simple singlenode test to verify functionality on devstack - side running on Xenial. - - job: name: devstack-multinode parent: devstack @@ -507,15 +499,6 @@ Simple multinode test to verify multinode functionality on devstack side. This is not meant to be used as a parent job. -- job: - name: devstack-multinode-xenial - parent: devstack - nodeset: openstack-two-node-xenial - description: | - Simple multinode test to verify multinode functionality on devstack - side running on Xenial. - This is not meant to be used as a parent job. - # NOTE(ianw) Platform tests have traditionally been non-voting because # we often have to rush things through devstack to stabilise the gate, # and these platforms don't have the round-the-clock support to avoid @@ -541,13 +524,6 @@ nodeset: devstack-single-node-fedora-latest voting: false -- job: - name: devstack-platform-xenial - parent: tempest-full-py3 - description: Ubuntu Xenial platform test - nodeset: openstack-single-node-xenial - voting: false - - job: name: devstack-tox-base parent: devstack @@ -619,14 +595,11 @@ check: jobs: - devstack - - devstack-xenial - devstack-ipv6 - devstack-platform-centos-7 - devstack-platform-opensuse-15 - devstack-platform-fedora-latest - - devstack-platform-xenial - devstack-multinode - - devstack-multinode-xenial - devstack-unit-tests - openstack-tox-bashate - ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa: @@ -664,10 +637,8 @@ gate: jobs: - devstack - - devstack-xenial - devstack-ipv6 - devstack-multinode - - devstack-multinode-xenial - devstack-unit-tests - openstack-tox-bashate - neutron-grenade-multinode: diff --git a/files/debs/nova b/files/debs/nova index 5e14aec836..e5110e9c75 100644 --- a/files/debs/nova +++ b/files/debs/nova @@ -10,9 +10,8 @@ iputils-arping kpartx libjs-jquery-tablesorter # Needed for coverage html reports libmysqlclient-dev -libvirt-bin # dist:xenial NOPRIME -libvirt-clients # not:xenial NOPRIME -libvirt-daemon-system # not:xenial NOPRIME +libvirt-clients # NOPRIME +libvirt-daemon-system # NOPRIME libvirt-dev # NOPRIME mysql-server # NOPRIME parted diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 914ee7bcf7..35666393ca 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -24,17 +24,10 @@ DEBUG_LIBVIRT=$(trueorfalse True DEBUG_LIBVIRT) # Currently fairly specific to OpenStackCI hosts DEBUG_LIBVIRT_COREDUMPS=$(trueorfalse False DEBUG_LIBVIRT_COREDUMPS) -# Only Xenial is left with libvirt-bin. Everywhere else is libvirtd -if is_ubuntu && [ ${DISTRO} == "xenial" ]; then - LIBVIRT_DAEMON=libvirt-bin -else - LIBVIRT_DAEMON=libvirtd -fi - # Enable coredumps for libvirt # Bug: https://bugs.launchpad.net/nova/+bug/1643911 function _enable_coredump { - local confdir=/etc/systemd/system/${LIBVIRT_DAEMON}.service.d + local confdir=/etc/systemd/system/libvirtd.service.d local conffile=${confdir}/coredump.conf # Create a coredump directory, and instruct the kernel to save to @@ -61,12 +54,7 @@ EOF function install_libvirt { if is_ubuntu; then - install_package qemu-system - if [[ ${DISTRO} == "xenial" ]]; then - install_package libvirt-bin libvirt-dev - else - install_package libvirt-clients libvirt-daemon-system libvirt-dev - fi + install_package qemu-system libvirt-clients libvirt-daemon-system libvirt-dev # uninstall in case the libvirt version changed pip_uninstall libvirt-python pip_install_gr libvirt-python @@ -167,7 +155,7 @@ EOF # Service needs to be started on redhat/fedora -- do a restart for # sanity after fiddling the config. - restart_service $LIBVIRT_DAEMON + restart_service libvirtd # Restart virtlogd companion service to ensure it is running properly # https://bugs.launchpad.net/ubuntu/+source/libvirt/+bug/1577455 diff --git a/stack.sh b/stack.sh index b7b37e2cf6..d0206ebac0 100755 --- a/stack.sh +++ b/stack.sh @@ -12,7 +12,7 @@ # a multi-node developer install. # To keep this script simple we assume you are running on a recent **Ubuntu** -# (16.04 Xenial or newer), **Fedora** (F24 or newer), or **CentOS/RHEL** +# (Bionic or newer), **Fedora** (F24 or newer), or **CentOS/RHEL** # (7 or newer) machine. (It may work on other platforms but support for those # platforms is left to those who added them to DevStack.) It should work in # a VM or physical server. Additionally, we maintain a list of ``deb`` and @@ -221,7 +221,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (xenial|artful|bionic|stretch|jessie|f29|opensuse-15.0|opensuse-15.1|opensuse-tumbleweed|rhel7) ]]; then +if [[ ! ${DISTRO} =~ (bionic|stretch|jessie|f29|opensuse-15.0|opensuse-15.1|opensuse-tumbleweed|rhel7) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" diff --git a/stackrc b/stackrc index 3fcdadffa3..15150df43b 100644 --- a/stackrc +++ b/stackrc @@ -656,9 +656,6 @@ case "$VIRT_DRIVER" in ;; esac -# By default, devstack will use Ubuntu Cloud Archive. -ENABLE_UBUNTU_CLOUD_ARCHIVE=$(trueorfalse True ENABLE_UBUNTU_CLOUD_ARCHIVE) - # Images # ------ diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index d2989379fe..eb8a76f276 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -71,15 +71,9 @@ function fixup_keystone { # Ubuntu Repositories #-------------------- -# We've found that Libvirt on Xenial is flaky and crashes enough to be -# a regular top e-r bug. Opt into Ubuntu Cloud Archive if on Xenial to -# get newer Libvirt. -# Make it possible to switch this based on an environment variable as -# libvirt 2.5.0 doesn't handle nested virtualization quite well and this -# is required for the trove development environment. -# Also enable universe since it is missing when installing from ISO. +# Enable universe for bionic since it is missing when installing from ISO. function fixup_ubuntu { - if [[ "$DISTRO" != "xenial" && "$DISTRO" != "bionic" ]]; then + if [[ "$DISTRO" != "bionic" ]]; then return fi @@ -88,31 +82,6 @@ function fixup_ubuntu { # Enable universe sudo add-apt-repository -y universe - - if [[ "${ENABLE_UBUNTU_CLOUD_ARCHIVE}" == "False" || "$DISTRO" != "xenial" ]]; then - return - fi - # Use UCA for newer libvirt. - if [[ -f /etc/ci/mirror_info.sh ]] ; then - # If we are on a nodepool provided host and it has told us about where - # we can find local mirrors then use that mirror. - source /etc/ci/mirror_info.sh - - sudo apt-add-repository -y "deb $NODEPOOL_UCA_MIRROR xenial-updates/queens main" - else - # Otherwise use upstream UCA - sudo add-apt-repository -y cloud-archive:queens - fi - - # Disable use of libvirt wheel since a cached wheel build might be - # against older libvirt binary. Particularly a problem if using - # the openstack wheel mirrors, but can hit locally too. - # TODO(clarkb) figure out how to use upstream wheel again. - iniset -sudo /etc/pip.conf "global" "no-binary" "libvirt-python" - - # Force update our APT repos, since we added UCA above. - REPOS_UPDATED=False - apt_get_update } # Python Packages From 8579f583077e5df3358605e4cdfe418b1d46f53b Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Mon, 4 Nov 2019 11:37:54 +0000 Subject: [PATCH 1167/1936] Dump the upper constraints for tempest in a temporary file This will avoid the creation of an unneeded file in the "tempest" repository directory. TrivialFix Change-Id: Id3f46b3537cd3232cb29c42808bde44c667565f1 --- lib/tempest | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/lib/tempest b/lib/tempest index 96c9ced14a..ea5033a29d 100644 --- a/lib/tempest +++ b/lib/tempest @@ -607,8 +607,11 @@ function configure_tempest { fi # The requirements might be on a different branch, while tempest needs master requirements. - (cd $REQUIREMENTS_DIR && git show origin/master:upper-constraints.txt) > u-c-m.txt - tox -evenv-tempest -- pip install -c u-c-m.txt -r requirements.txt + local tmp_u_c_m + tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX) + (cd $REQUIREMENTS_DIR && git show origin/master:upper-constraints.txt) > $tmp_u_c_m + tox -evenv-tempest -- pip install -c $tmp_u_c_m -r requirements.txt + rm -f $tmp_u_c_m # Auth: iniset $TEMPEST_CONFIG auth tempest_roles "member" @@ -697,8 +700,11 @@ function install_tempest_plugins { pushd $TEMPEST_DIR if [[ $TEMPEST_PLUGINS != 0 ]] ; then # The requirements might be on a different branch, while tempest & tempest plugins needs master requirements. - (cd $REQUIREMENTS_DIR && git show origin/master:upper-constraints.txt) > u-c-m.txt - tox -evenv-tempest -- pip install -c u-c-m.txt $TEMPEST_PLUGINS + local tmp_u_c_m + tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX) + (cd $REQUIREMENTS_DIR && git show origin/master:upper-constraints.txt) > $tmp_u_c_m + tox -evenv-tempest -- pip install -c $tmp_u_c_m $TEMPEST_PLUGINS + rm -f $tmp_u_c_m echo "Checking installed Tempest plugins:" tox -evenv-tempest -- tempest list-plugins fi From d54a1c6869653c4af1d0dbd76e31b20879c675b5 Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Tue, 10 Sep 2019 12:05:06 +0200 Subject: [PATCH 1168/1936] Add possibility to configure manually MYSQL_SERVICE_NAME This variable can be now set in Devstack's config file and in such case Devstack will not set it automatically to value most likely correct for the distro. By default this value is empty string and in such case Devstack will work in exactly same way as it was before this patch and will determine automatically what name should be used there. In addition in case of Ubuntu package $MYSQL_SERVICE_NAME-server will be now installed instead of mysql-server always. This will allow to easy configure e.g. CI job which will run using Mariadb instead of Mysql on Ubuntu. Change-Id: I25af0b54ad235b08c6c399b4125c737acf57ee2e --- lib/databases/mysql | 44 ++++++++++++++++++++++++++++++++------------ 1 file changed, 32 insertions(+), 12 deletions(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index 4d0f5f3e45..4e3cc72bdb 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -15,15 +15,17 @@ MYSQL_DRIVER=${MYSQL_DRIVER:-PyMySQL} register_database mysql -MYSQL_SERVICE_NAME=mysql -if is_fedora && ! is_oraclelinux; then - MYSQL_SERVICE_NAME=mariadb -elif is_suse && systemctl list-unit-files | grep -q 'mariadb\.service'; then - # Older mariadb packages on SLES 12 provided mysql.service. The - # newer ones on SLES 12 and 15 use mariadb.service; they also - # provide a mysql.service symlink for backwards-compatibility, but - # let's not rely on that. - MYSQL_SERVICE_NAME=mariadb +if [[ -z "$MYSQL_SERVICE_NAME" ]]; then + MYSQL_SERVICE_NAME=mysql + if is_fedora && ! is_oraclelinux; then + MYSQL_SERVICE_NAME=mariadb + elif is_suse && systemctl list-unit-files | grep -q 'mariadb\.service'; then + # Older mariadb packages on SLES 12 provided mysql.service. The + # newer ones on SLES 12 and 15 use mariadb.service; they also + # provide a mysql.service symlink for backwards-compatibility, but + # let's not rely on that. + MYSQL_SERVICE_NAME=mariadb + fi fi # Functions @@ -92,8 +94,23 @@ function configure_database_mysql { # because the package might have been installed already. sudo mysqladmin -u root password $DATABASE_PASSWORD || true + # In case of Mariadb, giving hostname in arguments causes permission + # problems as it expects connection through socket + if is_ubuntu && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then + local cmd_args="-uroot -p$DATABASE_PASSWORD " + else + local cmd_args="-uroot -p$DATABASE_PASSWORD -h127.0.0.1 " + fi + + # In mariadb e.g. on Ubuntu socket plugin is used for authentication + # as root so it works only as sudo. To restore old "mysql like" behaviour, + # we need to change auth plugin for root user + if [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then + sudo mysql $cmd_args -e "UPDATE mysql.user SET plugin='' WHERE user='$DATABASE_USER' AND host='localhost';" + sudo mysql $cmd_args -e "FLUSH PRIVILEGES;" + fi # Update the DB to give user '$DATABASE_USER'@'%' full control of the all databases: - sudo mysql -uroot -p$DATABASE_PASSWORD -h127.0.0.1 -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';" + sudo mysql $cmd_args -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';" # Now update ``my.cnf`` for some local needs and restart the mysql service @@ -148,8 +165,11 @@ MYSQL_PRESEED [client] user=$DATABASE_USER password=$DATABASE_PASSWORD -host=$MYSQL_HOST EOF + + if ! is_ubuntu || [ "$MYSQL_SERVICE_NAME" != "mariadb" ]; then + echo "host=$MYSQL_HOST" >> $HOME/.my.cnf + fi chmod 0600 $HOME/.my.cnf fi # Install mysql-server @@ -159,7 +179,7 @@ EOF install_package mariadb-server sudo systemctl enable $MYSQL_SERVICE_NAME elif is_ubuntu; then - install_package mysql-server + install_package $MYSQL_SERVICE_NAME-server else exit_distro_not_supported "mysql installation" fi From 40f7579bb8ebd8082dc5f55785c0ae4644b062ec Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Tue, 26 Nov 2019 15:20:03 +0000 Subject: [PATCH 1169/1936] lib/tempest: Do not rely on six for image_size_in_gib input() should work on both python versions for what we need. I understand the concern about eval() on python2 but, in the case it's used we should be fine, plus, python2 is being removed from OpenStack projects. Change-Id: I86a7c31374986f81132bc4f49aee0a76b90e6553 Signed-off-by: Lucas Alvares Gomes --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 96c9ced14a..7d0a98275f 100644 --- a/lib/tempest +++ b/lib/tempest @@ -107,7 +107,7 @@ function remove_disabled_extensions { function image_size_in_gib { local size size=$(openstack image show $1 -c size -f value) - echo $size | python -c "import math; import six; print(int(math.ceil(float(int(six.moves.input()) / 1024.0 ** 3))))" + echo $size | python3 -c "import math; print(int(math.ceil(float(int(input()) / 1024.0 ** 3))))" } # configure_tempest() - Set config files, create data dirs, etc From 7ddbece508413592cbb7540408b3edbf04ca8d04 Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Tue, 3 Dec 2019 14:35:03 -0800 Subject: [PATCH 1170/1936] Enable libvirt virtio rng device We've seen jobs where tests fail due to what appears to be rng starvation. Enable virtio rng device to try and alleviate this. Change-Id: I70d800cdc45b6008f775110f22c0000736421529 --- functions | 6 +++--- lib/nova | 20 ++++++++++---------- lib/tempest | 4 ++-- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/functions b/functions index f33fd25fee..8ea634e753 100644 --- a/functions +++ b/functions @@ -292,7 +292,7 @@ function upload_image { local disk_format="" local container_format="" local unpack="" - local img_property="" + local img_property="--property hw_rng_model=virtio" case "$image_fname" in *.tar.gz|*.tgz) # Extract ami and aki files @@ -364,11 +364,11 @@ function upload_image { esac if is_arch "ppc64le" || is_arch "ppc64" || is_arch "ppc"; then - img_property="--property hw_cdrom_bus=scsi --property os_command_line=console=hvc0" + img_property="$img_property --property hw_cdrom_bus=scsi --property os_command_line=console=hvc0" fi if is_arch "aarch64"; then - img_property="--property hw_machine_type=virt --property hw_cdrom_bus=scsi --property hw_scsi_model=virtio-scsi --property os_command_line='console=ttyAMA0'" + img_property="$img_property --property hw_machine_type=virt --property hw_cdrom_bus=scsi --property hw_scsi_model=virtio-scsi --property os_command_line='console=ttyAMA0'" fi if [ "$container_format" = "bare" ]; then diff --git a/lib/nova b/lib/nova index c41f881fa1..58a236a525 100644 --- a/lib/nova +++ b/lib/nova @@ -1107,19 +1107,19 @@ function create_flavors { if is_service_enabled n-api; then if ! openstack --os-region-name="$REGION_NAME" flavor list | grep -q ds512M; then # Note that danms hates these flavors and apologizes for sdague - openstack --os-region-name="$REGION_NAME" flavor create --id c1 --ram 256 --disk 1 --vcpus 1 cirros256 - openstack --os-region-name="$REGION_NAME" flavor create --id d1 --ram 512 --disk 5 --vcpus 1 ds512M - openstack --os-region-name="$REGION_NAME" flavor create --id d2 --ram 1024 --disk 10 --vcpus 1 ds1G - openstack --os-region-name="$REGION_NAME" flavor create --id d3 --ram 2048 --disk 10 --vcpus 2 ds2G - openstack --os-region-name="$REGION_NAME" flavor create --id d4 --ram 4096 --disk 20 --vcpus 4 ds4G + openstack --os-region-name="$REGION_NAME" flavor create --id c1 --ram 256 --disk 1 --vcpus 1 --property hw_rng:allowed=True cirros256 + openstack --os-region-name="$REGION_NAME" flavor create --id d1 --ram 512 --disk 5 --vcpus 1 --property hw_rng:allowed=True ds512M + openstack --os-region-name="$REGION_NAME" flavor create --id d2 --ram 1024 --disk 10 --vcpus 1 --property hw_rng:allowed=True ds1G + openstack --os-region-name="$REGION_NAME" flavor create --id d3 --ram 2048 --disk 10 --vcpus 2 --property hw_rng:allowed=True ds2G + openstack --os-region-name="$REGION_NAME" flavor create --id d4 --ram 4096 --disk 20 --vcpus 4 --property hw_rng:allowed=True ds4G fi if ! openstack --os-region-name="$REGION_NAME" flavor list | grep -q m1.tiny; then - openstack --os-region-name="$REGION_NAME" flavor create --id 1 --ram 512 --disk 1 --vcpus 1 m1.tiny - openstack --os-region-name="$REGION_NAME" flavor create --id 2 --ram 2048 --disk 20 --vcpus 1 m1.small - openstack --os-region-name="$REGION_NAME" flavor create --id 3 --ram 4096 --disk 40 --vcpus 2 m1.medium - openstack --os-region-name="$REGION_NAME" flavor create --id 4 --ram 8192 --disk 80 --vcpus 4 m1.large - openstack --os-region-name="$REGION_NAME" flavor create --id 5 --ram 16384 --disk 160 --vcpus 8 m1.xlarge + openstack --os-region-name="$REGION_NAME" flavor create --id 1 --ram 512 --disk 1 --vcpus 1 --property hw_rng:allowed=True m1.tiny + openstack --os-region-name="$REGION_NAME" flavor create --id 2 --ram 2048 --disk 20 --vcpus 1 --property hw_rng:allowed=True m1.small + openstack --os-region-name="$REGION_NAME" flavor create --id 3 --ram 4096 --disk 40 --vcpus 2 --property hw_rng:allowed=True m1.medium + openstack --os-region-name="$REGION_NAME" flavor create --id 4 --ram 8192 --disk 80 --vcpus 4 --property hw_rng:allowed=True m1.large + openstack --os-region-name="$REGION_NAME" flavor create --id 5 --ram 16384 --disk 160 --vcpus 8 --property hw_rng:allowed=True m1.xlarge fi fi } diff --git a/lib/tempest b/lib/tempest index 96c9ced14a..1bb9b52645 100644 --- a/lib/tempest +++ b/lib/tempest @@ -203,13 +203,13 @@ function configure_tempest { if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then # Determine the flavor disk size based on the image size. disk=$(image_size_in_gib $image_uuid) - openstack flavor create --id 42 --ram 64 --disk $disk --vcpus 1 m1.nano + openstack flavor create --id 42 --ram 64 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.nano fi flavor_ref=42 if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then # Determine the alt flavor disk size based on the alt image size. disk=$(image_size_in_gib $image_uuid_alt) - openstack flavor create --id 84 --ram 128 --disk $disk --vcpus 1 m1.micro + openstack flavor create --id 84 --ram 128 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.micro fi flavor_ref_alt=84 else From ede8b1269cb03178c31f700200309dd76b00dd5a Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Mon, 9 Dec 2019 14:21:21 +0900 Subject: [PATCH 1171/1936] mysql: Don't bother to change auth plugin on centos This partially reverts the previous change [1], which broke networking-midonet jobs. [1] https://review.opendev.org/#/c/681201/ Closes-Bug: #1855516 Change-Id: I0255c6acce72a8376dbc6d8f8d0314a7dabf019c --- lib/databases/mysql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index 4e3cc72bdb..420a86e04e 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -105,7 +105,7 @@ function configure_database_mysql { # In mariadb e.g. on Ubuntu socket plugin is used for authentication # as root so it works only as sudo. To restore old "mysql like" behaviour, # we need to change auth plugin for root user - if [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then + if is_ubuntu && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then sudo mysql $cmd_args -e "UPDATE mysql.user SET plugin='' WHERE user='$DATABASE_USER' AND host='localhost';" sudo mysql $cmd_args -e "FLUSH PRIVILEGES;" fi From f7a7076facbe8ef7c77863abf977af5cdafdeb6e Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Wed, 11 Dec 2019 09:51:58 +0100 Subject: [PATCH 1172/1936] Include dnsmasq-utils package on all Ubuntu versions This package provides dhcp_release tool but in files/debs/neutron-common it was listed to be installed only on Ubuntu Precise. The same file is also in Nova's packages but there is no restriction to Ubuntu Precise only there. So on all Neutron jobs it was fine but on Ironic's job where Nova wasn't enabled, this package was not installed and caused problems in Neutron DHCP agent. Change-Id: Idd0711cfe6d43f21754a2f0c230cd094ea33cb27 Closes-Bug: #1855910 --- files/debs/neutron-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/debs/neutron-common b/files/debs/neutron-common index e30f678f7a..b269f6330b 100644 --- a/files/debs/neutron-common +++ b/files/debs/neutron-common @@ -1,6 +1,6 @@ acl dnsmasq-base -dnsmasq-utils # for dhcp_release only available in dist:precise +dnsmasq-utils # for dhcp_release ebtables haproxy # to serve as metadata proxy inside router/dhcp namespaces iptables From d6a7b73fc8d01d8118031f086d0ad20ab6186059 Mon Sep 17 00:00:00 2001 From: Lenny Verkhovsky Date: Tue, 12 Nov 2019 15:54:32 +0200 Subject: [PATCH 1173/1936] Create OVS bridge even if OVS_BRIDGE_MAPPINGS is not empty in complex cases when mapping is defined in local.conf OVS bridge can be created automatically. Change-Id: I2e5e1068e77291d1d199cd698cec4946480c7601 --- lib/neutron_plugins/openvswitch_agent | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent index b65a2587c2..100961196d 100644 --- a/lib/neutron_plugins/openvswitch_agent +++ b/lib/neutron_plugins/openvswitch_agent @@ -41,8 +41,10 @@ function neutron_plugin_configure_plugin_agent { # Setup physical network bridge mappings. Override # ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` for more # complex physical network configurations. - if [[ "$OVS_BRIDGE_MAPPINGS" == "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then - OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE + if [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then + if [[ "$OVS_BRIDGE_MAPPINGS" == "" ]]; then + OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE + fi # Configure bridge manually with physical interface as port for multi-node _neutron_ovs_base_add_bridge $OVS_PHYSICAL_BRIDGE From 56b2e7fe8b676d0fd94f511529c731d9a6af5c4a Mon Sep 17 00:00:00 2001 From: Eyal Date: Thu, 19 Dec 2019 13:32:55 +0200 Subject: [PATCH 1174/1936] fix a command typo Change-Id: I01787641c887bfc0f4620b90b4ff488958dac53e --- .../templates/devstack.journal.README.txt.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 b/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 index 598eb7f3db..fe36653102 100644 --- a/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 +++ b/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 @@ -10,7 +10,7 @@ locally. After downloading the file: $ /lib/systemd/systemd-journal-remote <(xzcat ./devstack.journal.xz) -o output.journal Note this binary is not in the regular path. On Debian/Ubuntu -platforms, you will need to have the "sytemd-journal-remote" package +platforms, you will need to have the "systemd-journal-remote" package installed. It should result in something like: From f0dd9996cc635709276f51e4d94c5ebbf5d5b49f Mon Sep 17 00:00:00 2001 From: ghanshyam Date: Mon, 26 Nov 2018 07:38:54 +0000 Subject: [PATCH 1175/1936] Fix DevStack to configure tempest's service_availability Tempest's service_availability config option includes all the service availability which is further used by tests to take decision of skip or run the test. For example, [service_availability].nova is true then, compute test will run or if [service_availability].aodh is false then, all aodh related tests either in aodh tempest plugin or any other plugins will be skipped. Now question is what is the best way to set the each service availability for tempest or tempest plugins tests. We have 2 category of service here- 1. Service tested by Tempest (nova, cinder, keystone, glance, swift, neutron) (let's say type1 service) 2. Services tested by Tempest plugins (all other than above list) (let's say type2 service) We need the standard way to set both type of service so that we can maintain the setting of service_availability config options in consistent way. As discussed on bug#1743688/ and review https://review.openstack.org/#/c/536723/, we will use devstack lib/tempest to set the type1 service which is services test owned by Tempest and type2 service setting will be done by devstack plugins of those service. For example - [service_availability].ironic will be set by ironic's devstack plugin. because that is best place we know ironic is installed and available. To do that we need: 1. Add setting of [service_availability].* in devstack plugins 2. Remove setting of type2 service from devstack lib/tempest This commit does the second part and all depends-on patches handle the first part. Related-Bug: #1743688 Change-Id: If3aec9fd1c61e2bb53233be437b97b811dc82414 --- lib/tempest | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/lib/tempest b/lib/tempest index fba8826a2d..00e946e549 100644 --- a/lib/tempest +++ b/lib/tempest @@ -543,11 +543,15 @@ function configure_tempest { # ``service_available`` # - # this tempest service list needs to be all the services that - # tempest supports, otherwise we can have an erroneous set of + # this tempest service list needs to be the services that + # tempest own, otherwise we can have an erroneous set of # defaults (something defaulting true in Tempest, but not listed here). + # services tested by tempest plugins needs to be set on service devstack + # plugin side as devstack cannot keep track of all the tempest plugins + # services. Refer Bug#1743688 for more details. + # 'horizon' is also kept here as no devtack plugin for horizon. local service - local tempest_services="key,glance,nova,neutron,cinder,swift,heat,ceilometer,horizon,sahara,ironic,trove" + local tempest_services="key,glance,nova,neutron,cinder,swift,horizon" for service in ${tempest_services//,/ }; do if is_service_enabled $service ; then iniset $TEMPEST_CONFIG service_available $service "True" From 7634c780027ad05e416c2a0c5ac688751aba4be6 Mon Sep 17 00:00:00 2001 From: Soniya Vyas Date: Thu, 26 Dec 2019 16:59:56 +0530 Subject: [PATCH 1176/1936] Removal of deprecated command and deprecated optional argument lib/tempest uses 'tempest-account-generator' which is deprecated 4 years back. In addition to above, lib/tempest also uses 'os-tenant-name' which is also deprecated. Use of 'tempest account-generator' and 'os-project-name' should be done now. Signed-off by: Soniya Vyas Change-Id: I624e1dc57a3d3533322fb298c01f70241d0400ed --- lib/tempest | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index a91fe01fa3..ce0886bbd9 100644 --- a/lib/tempest +++ b/lib/tempest @@ -617,9 +617,9 @@ function configure_tempest { iniset $TEMPEST_CONFIG auth tempest_roles "member" if [[ $TEMPEST_USE_TEST_ACCOUNTS == "True" ]]; then if [[ $TEMPEST_HAS_ADMIN == "True" ]]; then - tox -evenv-tempest -- tempest-account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-tenant-name $admin_project_name -r $TEMPEST_CONCURRENCY --with-admin etc/accounts.yaml + tox -evenv-tempest -- tempest account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-project-name $admin_project_name -r $TEMPEST_CONCURRENCY --with-admin etc/accounts.yaml else - tox -evenv-tempest -- tempest-account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-tenant-name $admin_project_name -r $TEMPEST_CONCURRENCY etc/accounts.yaml + tox -evenv-tempest -- tempest account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-project-name $admin_project_name -r $TEMPEST_CONCURRENCY etc/accounts.yaml fi iniset $TEMPEST_CONFIG auth use_dynamic_credentials False iniset $TEMPEST_CONFIG auth test_accounts_file "etc/accounts.yaml" From f8aa74bc045110b1ea53f3e358836b4b76faeb92 Mon Sep 17 00:00:00 2001 From: Colleen Murphy Date: Fri, 27 Dec 2019 09:30:57 -0800 Subject: [PATCH 1177/1936] Enable access rules tempest tests In Train, the access rules API was introduced in keystone. This change enables testing it in tempest. Depends-on: https://review.opendev.org/699519 Change-Id: I2af21868cbf584a6881c6208bc2afc3bdb323ab9 --- lib/tempest | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/tempest b/lib/tempest index a91fe01fa3..c8a1ea0ec7 100644 --- a/lib/tempest +++ b/lib/tempest @@ -336,6 +336,10 @@ function configure_tempest { # so remove this once Tempest no longer supports Pike. iniset $TEMPEST_CONFIG identity-feature-enabled application_credentials True + # In Train and later, access rules for application credentials are enabled + # by default so remove this once Tempest no longer supports Stein. + iniset $TEMPEST_CONFIG identity-feature-enabled access_rules True + # Image # We want to be able to override this variable in the gate to avoid # doing an external HTTP fetch for this test. From 4143ce6fc95ccd9bde88c22baaa3eeaba58183f8 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 13 Jan 2020 16:05:11 +1100 Subject: [PATCH 1178/1936] Create virtualenv with abstracted VIRTUALENV_CMD Just calling "virtualenv" makes a Python 2 based environment; setuptools just dropped Python 2 support (as Python 2 reached EOL in Jan 2020) so this has now become a breakage. Although the Python 2 path won't work, use the abstracted command. This should stop us having to revisit this for any future cleanups (or switing to venv, etc). Change-Id: I531e971b78491a9276753c0d86b04c4adbd224aa --- lib/infra | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/infra b/lib/infra index cf003cce01..b983f2b739 100644 --- a/lib/infra +++ b/lib/infra @@ -29,7 +29,7 @@ GITDIR["pbr"]=$DEST/pbr # install_infra() - Collect source and prepare function install_infra { local PIP_VIRTUAL_ENV="$REQUIREMENTS_DIR/.venv" - [ ! -d $PIP_VIRTUAL_ENV ] && virtualenv $PIP_VIRTUAL_ENV + [ ! -d $PIP_VIRTUAL_ENV ] && ${VIRTUALENV_CMD} $PIP_VIRTUAL_ENV # We don't care about testing git pbr in the requirements venv. PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install -U pbr PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install $REQUIREMENTS_DIR From bcb2c30c317834f62cab470dc0b58670d945a6ef Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 13 Jan 2020 16:31:20 +1100 Subject: [PATCH 1179/1936] Remove TRACK_DEPENDS This was added in 2012 with I89677fd54635e82b10ab674ddeb9ffb3f1a755f0, but I can not see it being used anywhere currently. It's use of virtualenv's has become problematic in a python2 deprecated world, but since it is not used, remove it to avoid further confusion. Change-Id: I65d44d24f449436ca6229928eee2c5a021793055 --- functions-common | 8 +------- inc/python | 51 ++++++++++++++++++++---------------------------- stack.sh | 24 ----------------------- 3 files changed, 22 insertions(+), 61 deletions(-) diff --git a/functions-common b/functions-common index a13d611415..6be07b42bb 100644 --- a/functions-common +++ b/functions-common @@ -27,7 +27,6 @@ # - ``RECLONE`` # - ``REQUIREMENTS_DIR`` # - ``STACK_USER`` -# - ``TRACK_DEPENDS`` # - ``http_proxy``, ``https_proxy``, ``no_proxy`` # @@ -44,7 +43,6 @@ declare -A -g GITREPO declare -A -g GITBRANCH declare -A -g GITDIR -TRACK_DEPENDS=${TRACK_DEPENDS:-False} KILL_PATH="$(which kill)" # Save these variables to .stackenv @@ -2075,11 +2073,7 @@ function _safe_permission_operation { return 0 fi - if [[ $TRACK_DEPENDS = True ]]; then - sudo_cmd="env" - else - sudo_cmd="sudo" - fi + sudo_cmd="sudo" $xtrace $sudo_cmd $@ diff --git a/inc/python b/inc/python index 81b6a960a4..fd43cef932 100644 --- a/inc/python +++ b/inc/python @@ -175,7 +175,7 @@ function disable_python3_package { # Wrapper for ``pip install`` to set cache and proxy environment variables # Uses globals ``OFFLINE``, ``PIP_VIRTUAL_ENV``, -# ``PIP_UPGRADE``, ``TRACK_DEPENDS``, ``*_proxy``, +# ``PIP_UPGRADE``, ``*_proxy``, # Usage: # pip_install pip_arguments function pip_install { @@ -219,37 +219,28 @@ function pip_install { # this works (for now...) local package_dir=${!#%\[*\]} - if [[ $TRACK_DEPENDS = True && ! "$@" =~ virtualenv ]]; then - # TRACK_DEPENDS=True installation creates a circular dependency when - # we attempt to install virtualenv into a virtualenv, so we must global - # that installation. - source $DEST/.venv/bin/activate - local cmd_pip=$DEST/.venv/bin/pip + if [[ -n ${PIP_VIRTUAL_ENV:=} && -d ${PIP_VIRTUAL_ENV} ]]; then + local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip local sudo_pip="env" else - if [[ -n ${PIP_VIRTUAL_ENV:=} && -d ${PIP_VIRTUAL_ENV} ]]; then - local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip - local sudo_pip="env" - else - local cmd_pip - cmd_pip=$(get_pip_command $PYTHON2_VERSION) - local sudo_pip="sudo -H" - if python3_enabled; then - # Special case some services that have experimental - # support for python3 in progress, but don't claim support - # in their classifier - echo "Check python version for : $package_dir" - if python3_disabled_for ${package_dir##*/}; then - echo "Explicitly using $PYTHON2_VERSION version to install $package_dir based on DISABLED_PYTHON3_PACKAGES" - else - # For everything that is not explicitly blacklisted with - # DISABLED_PYTHON3_PACKAGES, assume it supports python3 - # and we will let pip sort out the install, regardless of - # the package being local or remote. - echo "Using $PYTHON3_VERSION version to install $package_dir based on default behavior" - sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8" - cmd_pip=$(get_pip_command $PYTHON3_VERSION) - fi + local cmd_pip + cmd_pip=$(get_pip_command $PYTHON2_VERSION) + local sudo_pip="sudo -H" + if python3_enabled; then + # Special case some services that have experimental + # support for python3 in progress, but don't claim support + # in their classifier + echo "Check python version for : $package_dir" + if python3_disabled_for ${package_dir##*/}; then + echo "Explicitly using $PYTHON2_VERSION version to install $package_dir based on DISABLED_PYTHON3_PACKAGES" + else + # For everything that is not explicitly blacklisted with + # DISABLED_PYTHON3_PACKAGES, assume it supports python3 + # and we will let pip sort out the install, regardless of + # the package being local or remote. + echo "Using $PYTHON3_VERSION version to install $package_dir based on default behavior" + sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8" + cmd_pip=$(get_pip_command $PYTHON3_VERSION) fi fi fi diff --git a/stack.sh b/stack.sh index d0206ebac0..089510fa48 100755 --- a/stack.sh +++ b/stack.sh @@ -796,19 +796,6 @@ if [[ "$OFFLINE" != "True" ]]; then PYPI_ALTERNATIVE_URL=${PYPI_ALTERNATIVE_URL:-""} $TOP_DIR/tools/install_pip.sh fi -TRACK_DEPENDS=${TRACK_DEPENDS:-False} - -# Install Python packages into a virtualenv so that we can track them -if [[ $TRACK_DEPENDS = True ]]; then - echo_summary "Installing Python packages into a virtualenv $DEST/.venv" - pip_install -U virtualenv - - rm -rf $DEST/.venv - virtualenv --system-site-packages $DEST/.venv - source $DEST/.venv/bin/activate - $DEST/.venv/bin/pip freeze > $DEST/requires-pre-pip -fi - # Do the ugly hacks for broken packages and distros source $TOP_DIR/tools/fixup_stuff.sh fixup_all @@ -997,17 +984,6 @@ fi # osc commands. Alias dies with stack.sh. install_oscwrap -if [[ $TRACK_DEPENDS = True ]]; then - $DEST/.venv/bin/pip freeze > $DEST/requires-post-pip - if ! diff -Nru $DEST/requires-pre-pip $DEST/requires-post-pip > $DEST/requires.diff; then - echo "Detect some changes for installed packages of pip, in depend tracking mode" - cat $DEST/requires.diff - fi - echo "Ran stack.sh in depend tracking mode, bailing out now" - exit 0 -fi - - # Syslog # ------ From d02fa6f856ac5951b8a879c23b57d5a752f28918 Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Tue, 14 Jan 2020 15:47:51 +0000 Subject: [PATCH 1180/1936] do not gzip legacy service logs This change removes the .gz extension from the service and syslog logs exported via journalctl. This change nolonger gzip compresses the exported logs so that they can be rendered in the browser directly when served from swift. Change-Id: I4557a737cb13b9c2406056be08ab8a32ddd45162 --- roles/export-devstack-journal/tasks/main.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/export-devstack-journal/tasks/main.yaml b/roles/export-devstack-journal/tasks/main.yaml index cbec4447b8..ef839edaaf 100644 --- a/roles/export-devstack-journal/tasks/main.yaml +++ b/roles/export-devstack-journal/tasks/main.yaml @@ -14,7 +14,7 @@ name="" for u in $(systemctl list-unit-files | grep devstack | awk '{print $1}'); do name=$(echo $u | sed 's/devstack@/screen-/' | sed 's/\.service//') - journalctl -o short-precise --unit $u | gzip - > {{ stage_dir }}/logs/$name.txt.gz + journalctl -o short-precise --unit $u > {{ stage_dir }}/logs/$name.txt done - name: Export legacy syslog.txt @@ -29,7 +29,7 @@ -t sudo \ --no-pager \ --since="$(cat {{ devstack_base_dir }}/log-start-timestamp.txt)" \ - | gzip - > {{ stage_dir }}/logs/syslog.txt.gz + > {{ stage_dir }}/logs/syslog.txt # TODO: convert this to ansible # - make a list of the above units From 85c5ec11c9526e0429163db5701d3ea8c096a324 Mon Sep 17 00:00:00 2001 From: Akihiro Motoki Date: Wed, 15 Jan 2020 10:58:29 +0900 Subject: [PATCH 1181/1936] Cleanup VM instances during unstack Currently VMs created by a previous DevStack run still exists even after re-run stack.sh. This leads to a failure in launching a VM after the second run of stack.sh. We need to check the reason of the failure by nova compute log and clean up remaining VMs. It is annoying. IIRC we cleaned up existing VMs. While I failed to identify which commit changed this behavior, I believe it is worth recovering it. This commit changes unstack.sh to call cleanup_nova. cleanup_cinder() already cleans up LVM volumes and some of them may be used by VMs, so I believe it is reasonable to clean up VMs in unstack.sh. Change-Id: I9fcbc5105e443037fada1ef6a76a078145964256 --- unstack.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/unstack.sh b/unstack.sh index ccea0ef585..276111edb9 100755 --- a/unstack.sh +++ b/unstack.sh @@ -99,6 +99,7 @@ run_phase unstack if is_service_enabled nova; then stop_nova + cleanup_nova fi if is_service_enabled placement; then From d8dec362baa2bf7f6ffe1c47352fdbe032eaf20a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rados=C5=82aw=20Piliszek?= Date: Wed, 15 Jan 2020 18:30:36 +0100 Subject: [PATCH 1182/1936] Run Glance initialization when Glance is enabled, not just registry Per [1] Glance registry should not be required to run since Queens. [1] https://specs.openstack.org/openstack/glance-specs/specs/queens/approved/glance/deprecate-registry.html Change-Id: I5477c8769ff4ae151d4d6ccb5e5d8dd5788909b0 Closes-bug: #1859847 --- stack.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index 089510fa48..dee84fa8e2 100755 --- a/stack.sh +++ b/stack.sh @@ -1155,7 +1155,7 @@ fi # Glance # ------ -if is_service_enabled g-reg; then +if is_glance_enabled; then echo_summary "Configuring Glance" init_glance fi @@ -1280,7 +1280,7 @@ fi # scripts as userdata. # See https://help.ubuntu.com/community/CloudInit for more on ``cloud-init`` -if is_service_enabled g-reg; then +if is_glance_enabled; then echo_summary "Uploading images" From d7dfcdb4674daae8a294848b1de6fa87c5d7d4eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rados=C5=82aw=20Piliszek?= Date: Wed, 15 Jan 2020 18:36:20 +0100 Subject: [PATCH 1183/1936] Stop enabling g-reg by default Per [1] Glance registry should not be required to run since Queens. [1] https://specs.openstack.org/openstack/glance-specs/specs/queens/approved/glance/deprecate-registry.html Change-Id: I93325cbd26dbc6a30062d9ba83acab248897b18e Depends-on: https://review.opendev.org/702707 Related-bug: #1859847 --- .zuul.yaml | 1 - stackrc | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index cbb9d99866..08513c23e9 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -401,7 +401,6 @@ key: true # Glance services g-api: true - g-reg: true # Nova services n-api: true n-api-meta: true diff --git a/stackrc b/stackrc index 2d3a599a36..9fd7f75335 100644 --- a/stackrc +++ b/stackrc @@ -69,7 +69,7 @@ if ! isset ENABLED_SERVICES ; then # Placement service needed for Nova ENABLED_SERVICES+=,placement-api,placement-client # Glance services needed for Nova - ENABLED_SERVICES+=,g-api,g-reg + ENABLED_SERVICES+=,g-api # Cinder ENABLED_SERVICES+=,c-sch,c-api,c-vol # Neutron From 248d4bb8d2205de38e56ef1f92a4bf0870400a85 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Thu, 28 Nov 2019 12:57:12 +0000 Subject: [PATCH 1184/1936] Stop configuring '[DEFAULT] firewall_driver' for nova This option has default to the 'NoopFirewallDriver' for some time and will soon be removed. Stop configuring it entirely. Change-Id: I4dbc0015cf26d7edf51d0d5fd978ccd3a1ad1b79 Signed-off-by: Stephen Finucane --- lib/neutron | 2 -- lib/neutron-legacy | 5 ----- lib/neutron_plugins/nuage | 2 -- lib/nova_plugins/hypervisor-ironic | 2 -- lib/nova_plugins/hypervisor-libvirt | 2 -- lib/nova_plugins/hypervisor-openvz | 2 -- lib/nova_plugins/hypervisor-xenserver | 3 --- 7 files changed, 18 deletions(-) diff --git a/lib/neutron b/lib/neutron index 888b5e864e..a86d83e170 100644 --- a/lib/neutron +++ b/lib/neutron @@ -365,8 +365,6 @@ function configure_neutron_nova_new { iniset $conf neutron auth_strategy $NEUTRON_AUTH_STRATEGY iniset $conf neutron region_name "$REGION_NAME" - iniset $conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver - # optionally set options in nova_conf neutron_plugin_create_nova_conf $conf diff --git a/lib/neutron-legacy b/lib/neutron-legacy index dbd6e2c06b..f0bdcf1da7 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -384,11 +384,6 @@ function create_nova_conf_neutron { iniset $conf neutron auth_strategy "$Q_AUTH_STRATEGY" iniset $conf neutron region_name "$REGION_NAME" - if [[ "$Q_USE_SECGROUP" == "True" ]]; then - LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver - iniset $conf DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER - fi - # optionally set options in nova_conf neutron_plugin_create_nova_conf $conf diff --git a/lib/neutron_plugins/nuage b/lib/neutron_plugins/nuage index f39c7c4f5b..8c75e15048 100644 --- a/lib/neutron_plugins/nuage +++ b/lib/neutron_plugins/nuage @@ -11,8 +11,6 @@ function neutron_plugin_create_nova_conf { local conf="$1" NOVA_OVS_BRIDGE=${NOVA_OVS_BRIDGE:-"br-int"} iniset $conf neutron ovs_bridge $NOVA_OVS_BRIDGE - LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver - iniset $conf DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER } function neutron_plugin_install_agent_packages { diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic index adcc278812..113e2a75ea 100644 --- a/lib/nova_plugins/hypervisor-ironic +++ b/lib/nova_plugins/hypervisor-ironic @@ -39,10 +39,8 @@ function configure_nova_hypervisor { if ! is_ironic_hardware; then configure_libvirt fi - LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.firewall.NoopFirewallDriver"} iniset $NOVA_CONF DEFAULT compute_driver ironic.IronicDriver - iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER # ironic section iniset $NOVA_CONF ironic auth_type password diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index 3d676b9b8d..7d3ace8c1c 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -45,8 +45,6 @@ function configure_nova_hypervisor { iniset $NOVA_CONF libvirt live_migration_uri "qemu+ssh://$STACK_USER@%s/system" iniset $NOVA_CONF DEFAULT default_ephemeral_format "ext4" iniset $NOVA_CONF DEFAULT compute_driver "libvirt.LibvirtDriver" - LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} - iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER" # Power architecture currently does not support graphical consoles. if is_arch "ppc64"; then iniset $NOVA_CONF vnc enabled "false" diff --git a/lib/nova_plugins/hypervisor-openvz b/lib/nova_plugins/hypervisor-openvz index 58ab5c11ac..57dc45c1c5 100644 --- a/lib/nova_plugins/hypervisor-openvz +++ b/lib/nova_plugins/hypervisor-openvz @@ -38,8 +38,6 @@ function cleanup_nova_hypervisor { function configure_nova_hypervisor { iniset $NOVA_CONF DEFAULT compute_driver "openvz.OpenVzDriver" iniset $NOVA_CONF DEFAULT connection_type "openvz" - LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} - iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER" } # install_nova_hypervisor() - Install external components diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver index ccab18dc97..511ec1bc09 100644 --- a/lib/nova_plugins/hypervisor-xenserver +++ b/lib/nova_plugins/hypervisor-xenserver @@ -61,9 +61,6 @@ function configure_nova_hypervisor { iniset $NOVA_CONF xenserver connection_username "$XENAPI_USER" iniset $NOVA_CONF xenserver connection_password "$XENAPI_PASSWORD" iniset $NOVA_CONF DEFAULT flat_injected "False" - # Need to avoid crash due to new firewall support - XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"} - iniset $NOVA_CONF DEFAULT firewall_driver "$XEN_FIREWALL_DRIVER" local dom0_ip dom0_ip=$(echo "$XENAPI_CONNECTION_URL" | cut -d "/" -f 3-) From f9ff151549bfa57cfeec524cf06ae3d65c8ab0a8 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Thu, 16 Jan 2020 10:52:52 +0000 Subject: [PATCH 1185/1936] Stop configuring '[DEFAULT] use_neutron' for nova This has now been removed and even prior to removal defaulted to True. Change-Id: I847a873d833a4dbee96afa1d2726fea2b8045eeb Signed-off-by: Stephen Finucane --- lib/neutron | 1 - lib/neutron-legacy | 1 - 2 files changed, 2 deletions(-) diff --git a/lib/neutron b/lib/neutron index a86d83e170..9e6a80cf08 100644 --- a/lib/neutron +++ b/lib/neutron @@ -354,7 +354,6 @@ function configure_neutron_rootwrap { # if not passed $NOVA_CONF is used. function configure_neutron_nova_new { local conf=${1:-$NOVA_CONF} - iniset $conf DEFAULT use_neutron True iniset $conf neutron auth_type "password" iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI" iniset $conf neutron username neutron diff --git a/lib/neutron-legacy b/lib/neutron-legacy index f0bdcf1da7..3d39d41b7e 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -373,7 +373,6 @@ function configure_mutnauq { function create_nova_conf_neutron { local conf=${1:-$NOVA_CONF} - iniset $conf DEFAULT use_neutron True iniset $conf neutron auth_type "password" iniset $conf neutron auth_url "$KEYSTONE_AUTH_URI" iniset $conf neutron username "$Q_ADMIN_USERNAME" From ec3543a02883c3d9b288128e0a6cb941315e72cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rados=C5=82aw=20Piliszek?= Date: Thu, 16 Jan 2020 19:58:37 +0100 Subject: [PATCH 1186/1936] Init Glance database only on the node with the database backend Since [1] Glance init depends on either g-api or g-reg being enabled. This broke multinode g-api deployments with singlenode database backend. This commit aligns Glance with other services w.r.t when to apply database init. [1] d8dec362baa2bf7f6ffe1c47352fdbe032eaf20a Change-Id: Idc07764d6ba3a828f19691f56c73cbe9179c2673 Closes-bug: #1860021 --- lib/glance | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/lib/glance b/lib/glance index 740bcabf6a..b4cab2ad74 100644 --- a/lib/glance +++ b/lib/glance @@ -276,16 +276,18 @@ function init_glance { rm -rf $GLANCE_IMAGE_DIR mkdir -p $GLANCE_IMAGE_DIR - # (Re)create glance database - recreate_database glance + if is_service_enabled $DATABASE_BACKENDS; then + # (Re)create glance database + recreate_database glance - time_start "dbsync" - # Migrate glance database - $GLANCE_BIN_DIR/glance-manage --config-file $GLANCE_CONF_DIR/glance-api.conf db_sync + time_start "dbsync" + # Migrate glance database + $GLANCE_BIN_DIR/glance-manage --config-file $GLANCE_CONF_DIR/glance-api.conf db_sync - # Load metadata definitions - $GLANCE_BIN_DIR/glance-manage --config-file $GLANCE_CONF_DIR/glance-api.conf db_load_metadefs - time_stop "dbsync" + # Load metadata definitions + $GLANCE_BIN_DIR/glance-manage --config-file $GLANCE_CONF_DIR/glance-api.conf db_load_metadefs + time_stop "dbsync" + fi } # install_glanceclient() - Collect source and prepare From 6b6bdc7111aa6f953a29c5ffc8854039336ce660 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Wed, 9 Oct 2019 16:13:20 +0100 Subject: [PATCH 1187/1936] inc/python: Remove ability to mark packages as non-Python3 Everything in OpenStack *must* be Python 3 supporting now, which means it's time to remove the functionality that allows us to blacklist packages that didn't support Python 3. Change-Id: I7c8cf538ec88bd4056b0109f19671e3d65f5da3a Signed-off-by: Stephen Finucane --- inc/python | 80 +++----------------------------------------- stackrc | 4 --- tests/test_python.sh | 25 -------------- 3 files changed, 5 insertions(+), 104 deletions(-) delete mode 100755 tests/test_python.sh diff --git a/inc/python b/inc/python index fd43cef932..32dd72594f 100644 --- a/inc/python +++ b/inc/python @@ -85,60 +85,8 @@ function pip_install_gr_extras { pip_install $clean_name[$extras] } -# python3_enabled_for() assumes the service(s) specified as arguments are -# enabled for python 3 unless explicitly disabled. See python3_disabled_for(). -# -# Multiple services specified as arguments are ``OR``'ed together; the test -# is a short-circuit boolean, i.e it returns on the first match. -# -# python3_enabled_for dir [dir ...] -function python3_enabled_for { - local xtrace - xtrace=$(set +o | grep xtrace) - set +o xtrace - - local enabled=1 - local dirs=$@ - local dir - for dir in ${dirs}; do - if ! python3_disabled_for "${dir}"; then - enabled=0 - fi - done - - $xtrace - return $enabled -} - -# python3_disabled_for() checks if the service(s) specified as arguments are -# disabled by the user in ``DISABLED_PYTHON3_PACKAGES``. -# -# Multiple services specified as arguments are ``OR``'ed together; the test -# is a short-circuit boolean, i.e it returns on the first match. -# -# Uses global ``DISABLED_PYTHON3_PACKAGES`` -# python3_disabled_for dir [dir ...] -function python3_disabled_for { - local xtrace - xtrace=$(set +o | grep xtrace) - set +o xtrace - - local enabled=1 - local dirs=$@ - local dir - for dir in ${dirs}; do - [[ ,${DISABLED_PYTHON3_PACKAGES}, =~ ,${dir}, ]] && enabled=0 - done - - $xtrace - return $enabled -} - # enable_python3_package() -- no-op for backwards compatibility # -# For example: -# enable_python3_package nova -# # enable_python3_package dir [dir ...] function enable_python3_package { local xtrace @@ -150,25 +98,15 @@ function enable_python3_package { $xtrace } -# disable_python3_package() adds the services passed as argument to -# the ``DISABLED_PYTHON3_PACKAGES`` list. -# -# For example: -# disable_python3_package swift +# disable_python3_package() -- no-op for backwards compatibility # -# Uses global ``DISABLED_PYTHON3_PACKAGES`` # disable_python3_package dir [dir ...] function disable_python3_package { local xtrace xtrace=$(set +o | grep xtrace) set +o xtrace - local disabled_svcs="${DISABLED_PYTHON3_PACKAGES}" - local dir - for dir in $@; do - disabled_svcs+=",$dir" - done - DISABLED_PYTHON3_PACKAGES=$(_cleanup_service_list "$disabled_svcs") + echo "It is no longer possible to call disable_python3_package()." $xtrace } @@ -231,17 +169,9 @@ function pip_install { # support for python3 in progress, but don't claim support # in their classifier echo "Check python version for : $package_dir" - if python3_disabled_for ${package_dir##*/}; then - echo "Explicitly using $PYTHON2_VERSION version to install $package_dir based on DISABLED_PYTHON3_PACKAGES" - else - # For everything that is not explicitly blacklisted with - # DISABLED_PYTHON3_PACKAGES, assume it supports python3 - # and we will let pip sort out the install, regardless of - # the package being local or remote. - echo "Using $PYTHON3_VERSION version to install $package_dir based on default behavior" - sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8" - cmd_pip=$(get_pip_command $PYTHON3_VERSION) - fi + echo "Using $PYTHON3_VERSION version to install $package_dir based on default behavior" + sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8" + cmd_pip=$(get_pip_command $PYTHON3_VERSION) fi fi diff --git a/stackrc b/stackrc index 9fd7f75335..b31fb395c7 100644 --- a/stackrc +++ b/stackrc @@ -138,10 +138,6 @@ fi # Control whether Python 3 should be used at all. export USE_PYTHON3=$(trueorfalse True USE_PYTHON3) -# Explicitly list services not to run under Python 3. See -# disable_python3_package to edit this variable. -export DISABLED_PYTHON3_PACKAGES="" - # When Python 3 is supported by an application, adding the specific # version of Python 3 to this variable will install the app using that # version of the interpreter instead of 2.7. diff --git a/tests/test_python.sh b/tests/test_python.sh deleted file mode 100755 index 1f5453c4c7..0000000000 --- a/tests/test_python.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env bash - -# Tests for DevStack INI functions - -TOP=$(cd $(dirname "$0")/.. && pwd) - -source $TOP/functions-common -source $TOP/inc/python - -source $TOP/tests/unittest.sh - -echo "Testing Python 3 functions" - -# Initialize variables manipulated by functions under test. -export DISABLED_PYTHON3_PACKAGES="" - -assert_true "should be enabled by default" python3_enabled_for testpackage1 - -assert_false "should not be disabled yet" python3_disabled_for testpackage2 - -disable_python3_package testpackage2 -assert_equal "$DISABLED_PYTHON3_PACKAGES" "testpackage2" "unexpected result" -assert_true "should be disabled" python3_disabled_for testpackage2 - -report_results From 98f3bbe509c2de9efaf4f3fc1b5dbc42d7a67987 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 17 Jan 2020 17:41:22 +0000 Subject: [PATCH 1188/1936] Revert "Stop enabling g-reg by default" This reverts commit d7dfcdb4674daae8a294848b1de6fa87c5d7d4eb. A subsquent change that depends on this, d8dec362baa2bf7f6ffe1c47352fdbe032eaf20a, has knock on effects for devstack-gate and needs to be reverted. Revert this first. Change-Id: Ic5402f57052648e10eacf3c3de67d2cdd2d42f63 Signed-off-by: Stephen Finucane Partial-bug: #1860021 --- .zuul.yaml | 1 + stackrc | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 08513c23e9..cbb9d99866 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -401,6 +401,7 @@ key: true # Glance services g-api: true + g-reg: true # Nova services n-api: true n-api-meta: true diff --git a/stackrc b/stackrc index 9fd7f75335..2d3a599a36 100644 --- a/stackrc +++ b/stackrc @@ -69,7 +69,7 @@ if ! isset ENABLED_SERVICES ; then # Placement service needed for Nova ENABLED_SERVICES+=,placement-api,placement-client # Glance services needed for Nova - ENABLED_SERVICES+=,g-api + ENABLED_SERVICES+=,g-api,g-reg # Cinder ENABLED_SERVICES+=,c-sch,c-api,c-vol # Neutron From 48d1f028c43dd26aab852715e451e1ec08421a2f Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 17 Jan 2020 17:23:11 +0000 Subject: [PATCH 1189/1936] Revert "Run Glance initialization when Glance is enabled, not just registry" This reverts commit d8dec362baa2bf7f6ffe1c47352fdbe032eaf20a. This has knock on effects for devstack-gate, which configures g-api on subnodes node but not mysql, resulting in failures. A longer term fix would be to either a) stop configuring g-api on subnodes if we can determine it's not necessary or b) only configure the database if on the main node. However, both options are subject to debate so for now just unclog the gate. Change-Id: I58baa3b6c63c648836ae8152c2d6d7ceff11a388 Signed-off-by: Stephen Finucane Closes-bug: #1860021 --- stack.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index dee84fa8e2..089510fa48 100755 --- a/stack.sh +++ b/stack.sh @@ -1155,7 +1155,7 @@ fi # Glance # ------ -if is_glance_enabled; then +if is_service_enabled g-reg; then echo_summary "Configuring Glance" init_glance fi @@ -1280,7 +1280,7 @@ fi # scripts as userdata. # See https://help.ubuntu.com/community/CloudInit for more on ``cloud-init`` -if is_glance_enabled; then +if is_service_enabled g-reg; then echo_summary "Uploading images" From 8dd6f153d6b8dfd8ae976e7f1c9df06019a159b9 Mon Sep 17 00:00:00 2001 From: Carlos Goncalves Date: Mon, 20 Jan 2020 07:24:17 +0100 Subject: [PATCH 1190/1936] Add LIBVIRT_CPU_MODE to set CPU mode In same cases, the hypervisor presents to the guest OS a named CPU model is similar to the host CPU and adds extra features to approximate the host model. However, this does not guarantee all features will be precisely match. This patch adds LIBVIRT_CPU_MODE to allow users to define the CPU mode they want to use, for example "host-passthrough". Change-Id: I83792c776b50d1d22584be2a37cc6a166f09c72b --- lib/nova | 1 + lib/nova_plugins/hypervisor-libvirt | 2 +- stackrc | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 7557a51196..0893ed7084 100644 --- a/lib/nova +++ b/lib/nova @@ -259,6 +259,7 @@ function configure_nova { if [ ! -e /dev/kvm ]; then echo "WARNING: Switching to QEMU" LIBVIRT_TYPE=qemu + LIBVIRT_CPU_MODE=none if which selinuxenabled >/dev/null 2>&1 && selinuxenabled; then # https://bugzilla.redhat.com/show_bug.cgi?id=753589 sudo setsebool virt_use_execmem on diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index 7d3ace8c1c..d1b3d784b7 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -39,7 +39,7 @@ function cleanup_nova_hypervisor { function configure_nova_hypervisor { configure_libvirt iniset $NOVA_CONF libvirt virt_type "$LIBVIRT_TYPE" - iniset $NOVA_CONF libvirt cpu_mode "none" + iniset $NOVA_CONF libvirt cpu_mode "$LIBVIRT_CPU_MODE" # Do not enable USB tablet input devices to avoid QEMU CPU overhead. iniset $NOVA_CONF DEFAULT pointer_model "ps2mouse" iniset $NOVA_CONF libvirt live_migration_uri "qemu+ssh://$STACK_USER@%s/system" diff --git a/stackrc b/stackrc index 2d3a599a36..d581e48530 100644 --- a/stackrc +++ b/stackrc @@ -625,6 +625,7 @@ VIRT_DRIVER=${VIRT_DRIVER:-$DEFAULT_VIRT_DRIVER} case "$VIRT_DRIVER" in ironic|libvirt) LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm} + LIBVIRT_CPU_MODE=${LIBVIRT_CPU_MODE:-none} if [[ "$os_VENDOR" =~ (Debian|Ubuntu) ]]; then # The groups change with newer libvirt. Older Ubuntu used # 'libvirtd', but now uses libvirt like Debian. Do a quick check From 2e45f2c267c9ababdbdfc4c505b329398391c5f9 Mon Sep 17 00:00:00 2001 From: Ghanshyam Date: Sat, 18 Jan 2020 19:59:29 -0600 Subject: [PATCH 1191/1936] Adding nova-live-migration job in devstack gate nova-live-migration is legacy job and and rely on devstack-gate + devstack setting so any change in devstack can break it. Example bug: 1860021 We can remove this job once it is migrated to zuulv3 native. Change-Id: Ie34d4dc1ab30ced8161796fe32628db07de86cc9 Related-bug: #1860021 --- .zuul.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index e86805f13d..3468f69e28 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -622,6 +622,15 @@ irrelevant-files: - ^.*\.rst$ - ^doc/.*$ + # NOTE(gmann): Remove this job from devstack pipeline once it is + # migrated to zuulv3 native. This is legacy job and rely on + # devstack-gate + devstack setting so any change in devstack can + # break it. + - nova-live-migration: + voting: false + irrelevant-files: + - ^.*\.rst$ + - ^doc/.*$ gate: jobs: - devstack From e18325ca67dcb70b01a29563432291e1baf9f46e Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Wed, 22 Jan 2020 05:54:06 +0000 Subject: [PATCH 1192/1936] Fix pip uncap fallout for nova and barbican Need to make PyYAML overridable on Ubuntu, it is a dependency for e.g. cloud-init, so we cannot remove it. Depends-On: https://review.opendev.org/703792 Change-Id: I4423dfb2c30299903b52a2bb06d846dd487f5b8b --- tools/fixup_stuff.sh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index eb8a76f276..7e5ae75831 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -82,6 +82,14 @@ function fixup_ubuntu { # Enable universe sudo add-apt-repository -y universe + + # Since pip10, pip will refuse to uninstall files from packages + # that were created with distutils (rather than more modern + # setuptools). This is because it technically doesn't have a + # manifest of what to remove. However, in most cases, simply + # overwriting works. So this hacks around those packages that + # have been dragged in by some other system dependency + sudo rm -f /usr/lib/python3/dist-packages/PyYAML-*.egg-info } # Python Packages From 09e860fc2c306774076c1814ba3ab7c44404066d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rados=C5=82aw=20Piliszek?= Date: Sun, 19 Jan 2020 12:41:14 +0100 Subject: [PATCH 1193/1936] Run Glance initialization when Glance is enabled, not just registry (v2) Per [1] Glance registry should not be required to run since Queens. v2 improves on v1 [2] (now reverted [3]) by applying minor comments from reviews so far and ensuring nova-live-migration job does not see a change in behavior and hence does not break [4]. [5] tried to fix the issue but it did only partially, regarding the database but not the image upload [6]. This patch ensures double cirros image upload does not happen as well. [1] https://specs.openstack.org/openstack/glance-specs/specs/queens/approved/glance/deprecate-registry.html [2] https://review.opendev.org/702707 [3] https://review.opendev.org/703131 [4] https://bugs.launchpad.net/devstack/+bug/1860021 [5] https://review.opendev.org/702960 [6] https://bugs.launchpad.net/devstack/+bug/1860021/comments/16 Change-Id: I61538acd6bd4c7b3da26c4084225b220d7d1aa2c Closes-bug: #1859847 Related-bug: #1860021 --- lib/glance | 18 ++++++++---------- stack.sh | 7 ++++--- 2 files changed, 12 insertions(+), 13 deletions(-) diff --git a/lib/glance b/lib/glance index b4cab2ad74..740bcabf6a 100644 --- a/lib/glance +++ b/lib/glance @@ -276,18 +276,16 @@ function init_glance { rm -rf $GLANCE_IMAGE_DIR mkdir -p $GLANCE_IMAGE_DIR - if is_service_enabled $DATABASE_BACKENDS; then - # (Re)create glance database - recreate_database glance + # (Re)create glance database + recreate_database glance - time_start "dbsync" - # Migrate glance database - $GLANCE_BIN_DIR/glance-manage --config-file $GLANCE_CONF_DIR/glance-api.conf db_sync + time_start "dbsync" + # Migrate glance database + $GLANCE_BIN_DIR/glance-manage --config-file $GLANCE_CONF_DIR/glance-api.conf db_sync - # Load metadata definitions - $GLANCE_BIN_DIR/glance-manage --config-file $GLANCE_CONF_DIR/glance-api.conf db_load_metadefs - time_stop "dbsync" - fi + # Load metadata definitions + $GLANCE_BIN_DIR/glance-manage --config-file $GLANCE_CONF_DIR/glance-api.conf db_load_metadefs + time_stop "dbsync" } # install_glanceclient() - Collect source and prepare diff --git a/stack.sh b/stack.sh index 089510fa48..fe71eae36c 100755 --- a/stack.sh +++ b/stack.sh @@ -1155,7 +1155,8 @@ fi # Glance # ------ -if is_service_enabled g-reg; then +# NOTE(yoctozepto): limited to node hosting the database which is the controller +if is_service_enabled $DATABASE_BACKENDS && is_service_enabled glance; then echo_summary "Configuring Glance" init_glance fi @@ -1280,8 +1281,8 @@ fi # scripts as userdata. # See https://help.ubuntu.com/community/CloudInit for more on ``cloud-init`` -if is_service_enabled g-reg; then - +# NOTE(yoctozepto): limited to node hosting the database which is the controller +if is_service_enabled $DATABASE_BACKENDS && is_service_enabled glance; then echo_summary "Uploading images" for image_url in ${IMAGE_URLS//,/ }; do From 89cb80d2120a7247dcc8b1f6a073cf9c9e488806 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rados=C5=82aw=20Piliszek?= Date: Sat, 18 Jan 2020 15:41:17 +0000 Subject: [PATCH 1194/1936] Revert "Revert "Stop enabling g-reg by default"" This reverts commit 98f3bbe509c2de9efaf4f3fc1b5dbc42d7a67987. This is no longer necessary as proper fix [1] is now applied. [1] https://review.opendev.org/703288 Change-Id: Ibc40f79b1daf30246ed24790e9b305caea497cb2 Related-bug: #1859847 Related-bug: #1860021 --- .zuul.yaml | 1 - stackrc | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 3468f69e28..130b84a149 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -398,7 +398,6 @@ key: true # Glance services g-api: true - g-reg: true # Nova services n-api: true n-api-meta: true diff --git a/stackrc b/stackrc index 412aca8f07..b31fb395c7 100644 --- a/stackrc +++ b/stackrc @@ -69,7 +69,7 @@ if ! isset ENABLED_SERVICES ; then # Placement service needed for Nova ENABLED_SERVICES+=,placement-api,placement-client # Glance services needed for Nova - ENABLED_SERVICES+=,g-api,g-reg + ENABLED_SERVICES+=,g-api # Cinder ENABLED_SERVICES+=,c-sch,c-api,c-vol # Neutron From 78cf6f642aea91385dfbe6e1fa594e9f5373e69c Mon Sep 17 00:00:00 2001 From: Terry Wilson Date: Tue, 15 Oct 2019 19:45:09 +0000 Subject: [PATCH 1195/1936] Always install python3 and its dev package Some distros do not install python3/python3-devel with the minimal install. F29 doesn't install -devel, and neither Centos 7 or 8 install either. This patch ensures that these packages get installed. Ideally, PYTHON3_VERSION would be set *after* ensuring that python3 was installed, but it gets a little tricky with all of the includes. This sets it to 3.6 as nothing uses 3.5 anymore. Change-Id: I7bdfc408b7c18273639ec26eade475856ac43593 --- inc/python | 2 ++ stack.sh | 5 ++++- stackrc | 2 +- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/inc/python b/inc/python index 32dd72594f..bf3aacaf8f 100644 --- a/inc/python +++ b/inc/python @@ -463,6 +463,8 @@ function install_python3 { apt_get install python${PYTHON3_VERSION} python${PYTHON3_VERSION}-dev elif is_suse; then install_package python3-devel python3-dbm + elif is_fedora; then + install_package python3 python3-devel fi } diff --git a/stack.sh b/stack.sh index 089510fa48..de1499ddd6 100755 --- a/stack.sh +++ b/stack.sh @@ -415,8 +415,11 @@ fi # Ensure python is installed # -------------------------- -is_package_installed python || install_package python +install_python3 +if ! python3_enabled; then + is_package_installed python || install_package python +fi # Configure Logging # ----------------- diff --git a/stackrc b/stackrc index 412aca8f07..6cc328ea60 100644 --- a/stackrc +++ b/stackrc @@ -142,7 +142,7 @@ export USE_PYTHON3=$(trueorfalse True USE_PYTHON3) # version of Python 3 to this variable will install the app using that # version of the interpreter instead of 2.7. _DEFAULT_PYTHON3_VERSION="$(_get_python_version python3)" -export PYTHON3_VERSION=${PYTHON3_VERSION:-${_DEFAULT_PYTHON3_VERSION:-3.5}} +export PYTHON3_VERSION=${PYTHON3_VERSION:-${_DEFAULT_PYTHON3_VERSION:-3.6}} # Just to be more explicit on the Python 2 version to use. _DEFAULT_PYTHON2_VERSION="$(_get_python_version python2)" From afd346a0a10d016e6ab95bdba3932eb6df8f636b Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 23 Jan 2020 13:13:05 +1100 Subject: [PATCH 1196/1936] devstack-plugins-list: skip openstack/openstack The proposal job to update the plugin list has been failing for a long time as it gets a 500 error from gitea on the openstack/openstack repo. This is an odd "superrepo" with all projects as submodules; thus openstack/openstack/devstack is actually a project, not the directory with a plugin in it. Skip this repo (gitea shouldn't return a 500, but that's another thing...) Regenerate the list manually for this run. Change-Id: I6ed65bcb720d4cb10702cbf66106120e001ec35f --- doc/source/plugin-registry.rst | 2 +- tools/generate-devstack-plugins-list.py | 9 +++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 5cbe4ed505..904400ece6 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -147,7 +147,6 @@ x/devstack-plugin-hdfs `https://opendev.org/x/devstack-plugin- x/devstack-plugin-libvirt-qemu `https://opendev.org/x/devstack-plugin-libvirt-qemu `__ x/devstack-plugin-mariadb `https://opendev.org/x/devstack-plugin-mariadb `__ x/devstack-plugin-nfs `https://opendev.org/x/devstack-plugin-nfs `__ -x/devstack-plugin-sheepdog `https://opendev.org/x/devstack-plugin-sheepdog `__ x/devstack-plugin-vmax `https://opendev.org/x/devstack-plugin-vmax `__ x/drbd-devstack `https://opendev.org/x/drbd-devstack `__ x/fenix `https://opendev.org/x/fenix `__ @@ -203,6 +202,7 @@ x/trio2o `https://opendev.org/x/trio2o `__ x/vmware-nsx `https://opendev.org/x/vmware-nsx `__ x/vmware-vspc `https://opendev.org/x/vmware-vspc `__ +x/whitebox-tempest-plugin `https://opendev.org/x/whitebox-tempest-plugin `__ ======================================== === diff --git a/tools/generate-devstack-plugins-list.py b/tools/generate-devstack-plugins-list.py index d39b8018ae..3ac9c4d9aa 100644 --- a/tools/generate-devstack-plugins-list.py +++ b/tools/generate-devstack-plugins-list.py @@ -45,9 +45,14 @@ def is_in_wanted_namespace(proj): # only interested in openstack or x namespace (e.g. not retired - # stackforge, etc) + # stackforge, etc). + # + # openstack/openstack "super-repo" of openstack projects as + # submodules, that can cause gitea to 500 timeout and thus stop + # this script. Skip it. if proj.startswith('stackforge/') or \ - proj.startswith('stackforge-attic/'): + proj.startswith('stackforge-attic/') or \ + proj == "openstack/openstack": return False else: return True From 97d857593759f96f036a7e0a528cb6f2159e0fc7 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Thu, 19 Dec 2019 07:38:45 -0600 Subject: [PATCH 1197/1936] Remove Sheepdog plugin information The Sheepdog project has been defunct for awhile now, and the Sheepdog driver and os-brick connector is now being removed from Cinder. This cleans up plugin references for the driver. Change-Id: Ieb2d9cf653b2d3a4af30cab26b8428a7c7edff98 Signed-off-by: Sean McGinnis --- lib/cinder_plugins/sheepdog | 41 ------------------------------------- 1 file changed, 41 deletions(-) delete mode 100644 lib/cinder_plugins/sheepdog diff --git a/lib/cinder_plugins/sheepdog b/lib/cinder_plugins/sheepdog deleted file mode 100644 index 558de46c6d..0000000000 --- a/lib/cinder_plugins/sheepdog +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash -# -# lib/cinder_plugins/sheepdog -# Configure the sheepdog driver - -# Enable with: -# -# CINDER_DRIVER=sheepdog - -# Dependencies: -# -# - ``functions`` file -# - ``cinder`` configurations - -# configure_cinder_driver - make configuration changes, including those to other services - -# Save trace setting -_XTRACE_CINDER_SHEEPDOG=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# Set up default directories - - -# Entry Points -# ------------ - -# configure_cinder_driver - Set config files, create data dirs, etc -function configure_cinder_driver { - iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.sheepdog.SheepdogDriver" -} - -# Restore xtrace -$_XTRACE_CINDER_SHEEPDOG - -# Local variables: -# mode: shell-script -# End: From 6808a3468dd157f9f0676f972aea54add4e33063 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Mon, 20 Jan 2020 15:52:33 +0000 Subject: [PATCH 1198/1936] Clean up remainders of USE_SYSTEMD and logging setup We dropped the ability to override USE_SYSTEMD to False when we deleted screen support in [0], so we can also clean up any conditionals based on it. Also clean up the logging setup functions, dropping local vars for parameters that we don't actually support anymore. [0] I8c27182f60b0f5310b3a8bf5feb02beb7ffbb26a Change-Id: I5cbce9f2c42e111761e8689447b3f8cbb7ea2eb5 --- clean.sh | 10 ++++------ functions | 43 ++++++++++++++++--------------------------- stack.sh | 25 ++++++++++--------------- stackrc | 4 +--- 4 files changed, 31 insertions(+), 51 deletions(-) diff --git a/clean.sh b/clean.sh index d6c6b40608..685a719cfb 100755 --- a/clean.sh +++ b/clean.sh @@ -123,12 +123,10 @@ if [[ -n "$LOGDIR" ]] && [[ -d "$LOGDIR" ]]; then sudo rm -rf $LOGDIR fi -# Clean out the systemd user unit files if systemd was used. -if [[ "$USE_SYSTEMD" = "True" ]]; then - sudo find $SYSTEMD_DIR -type f -name '*devstack@*service' -delete - # Make systemd aware of the deletion. - $SYSTEMCTL daemon-reload -fi +# Clean out the systemd unit files. +sudo find $SYSTEMD_DIR -type f -name '*devstack@*service' -delete +# Make systemd aware of the deletion. +$SYSTEMCTL daemon-reload # Clean up venvs DIRS_TO_CLEAN="$WHEELHOUSE ${PROJECT_VENV[@]} .config/openstack" diff --git a/functions b/functions index 8ea634e753..6da6bc52fa 100644 --- a/functions +++ b/functions @@ -635,40 +635,29 @@ function vercmp { # This sets up defaults we like in devstack for logging for tracking # down issues, and makes sure everything is done the same between # projects. +# NOTE(jh): Historically this function switched between three different +# functions: setup_systemd_logging, setup_colorized_logging and +# setup_standard_logging_identity. Since we always run with systemd now, +# this could be cleaned up, but the other functions may still be in use +# by plugins. Since deprecations haven't worked in the past, we'll just +# leave them in place. function setup_logging { - local conf_file=$1 - local other_cond=${2:-"False"} - if [[ "$USE_SYSTEMD" == "True" ]]; then - setup_systemd_logging $conf_file - elif [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$other_cond" == "False" ]; then - setup_colorized_logging $conf_file - else - setup_standard_logging_identity $conf_file - fi + setup_systemd_logging $1 } # This function sets log formatting options for colorizing log # output to stdout. It is meant to be called by lib modules. -# The last two parameters are optional and can be used to specify -# non-default value for project and user format variables. -# Defaults are respectively 'project_name' and 'user_name' -# -# setup_colorized_logging something.conf SOMESECTION function setup_colorized_logging { local conf_file=$1 - local conf_section="DEFAULT" - local project_var="project_name" - local user_var="user_name" # Add color to logging output - iniset $conf_file $conf_section logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %("$project_var")s %("$user_var")s%(color)s] %(instance)s%(color)s%(message)s" - iniset $conf_file $conf_section logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" - iniset $conf_file $conf_section logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" - iniset $conf_file $conf_section logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" + iniset $conf_file DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %(project_name)s %(user_name)s%(color)s] %(instance)s%(color)s%(message)s" + iniset $conf_file DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" + iniset $conf_file DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" + iniset $conf_file DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" } function setup_systemd_logging { local conf_file=$1 - local conf_section="DEFAULT" # NOTE(sdague): this is a nice to have, and means we're using the # native systemd path, which provides for things like search on # request-id. However, there may be an eventlet interaction here, @@ -676,16 +665,16 @@ function setup_systemd_logging { USE_JOURNAL=$(trueorfalse False USE_JOURNAL) local pidstr="" if [[ "$USE_JOURNAL" == "True" ]]; then - iniset $conf_file $conf_section use_journal "True" + iniset $conf_file DEFAULT use_journal "True" # if we are using the journal directly, our process id is already correct else pidstr="(pid=%(process)d) " fi - iniset $conf_file $conf_section logging_debug_format_suffix "{{${pidstr}%(funcName)s %(pathname)s:%(lineno)d}}" + iniset $conf_file DEFAULT logging_debug_format_suffix "{{${pidstr}%(funcName)s %(pathname)s:%(lineno)d}}" - iniset $conf_file $conf_section logging_context_format_string "%(color)s%(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(project_name)s %(user_name)s%(color)s] %(instance)s%(color)s%(message)s" - iniset $conf_file $conf_section logging_default_format_string "%(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" - iniset $conf_file $conf_section logging_exception_prefix "ERROR %(name)s %(instance)s" + iniset $conf_file DEFAULT logging_context_format_string "%(color)s%(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(project_name)s %(user_name)s%(color)s] %(instance)s%(color)s%(message)s" + iniset $conf_file DEFAULT logging_default_format_string "%(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" + iniset $conf_file DEFAULT logging_exception_prefix "ERROR %(name)s %(instance)s" } function setup_standard_logging_identity { diff --git a/stack.sh b/stack.sh index 089510fa48..f00bceb21d 100755 --- a/stack.sh +++ b/stack.sh @@ -803,13 +803,11 @@ fixup_all # Install subunit for the subunit output stream pip_install -U os-testr -if [[ "$USE_SYSTEMD" == "True" ]]; then - pip_install_gr systemd-python - # the default rate limit of 1000 messages / 30 seconds is not - # sufficient given how verbose our logging is. - iniset -sudo /etc/systemd/journald.conf "Journal" "RateLimitBurst" "0" - sudo systemctl restart systemd-journald -fi +pip_install_gr systemd-python +# the default rate limit of 1000 messages / 30 seconds is not +# sufficient given how verbose our logging is. +iniset -sudo /etc/systemd/journald.conf "Journal" "RateLimitBurst" "0" +sudo systemctl restart systemd-journald # Virtual Environment # ------------------- @@ -1521,14 +1519,11 @@ if [[ -n "$DEPRECATED_TEXT" ]]; then echo fi -# If USE_SYSTEMD is enabled, tell the user about using it. -if [[ "$USE_SYSTEMD" == "True" ]]; then - echo - echo "Services are running under systemd unit files." - echo "For more information see: " - echo "https://docs.openstack.org/devstack/latest/systemd.html" - echo -fi +echo +echo "Services are running under systemd unit files." +echo "For more information see: " +echo "https://docs.openstack.org/devstack/latest/systemd.html" +echo # Useful info on current state cat /etc/devstack-version diff --git a/stackrc b/stackrc index 2d3a599a36..6d8f594309 100644 --- a/stackrc +++ b/stackrc @@ -109,9 +109,7 @@ CELLSV2_SETUP=${CELLSV2_SETUP:-"superconductor"} # Set the root URL for Horizon HORIZON_APACHE_ROOT="/dashboard" -# Whether to use SYSTEMD to manage services, we only do this from -# Queens forward. -USE_SYSTEMD="True" +# Whether to use user specific units for running services or global ones. USER_UNITS=$(trueorfalse False USER_UNITS) if [[ "$USER_UNITS" == "True" ]]; then SYSTEMD_DIR="$HOME/.local/share/systemd/user" From 29bf8523965b504624f9450f3dffe863994b4077 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rados=C5=82aw=20Piliszek?= Date: Fri, 24 Jan 2020 11:52:13 +0100 Subject: [PATCH 1199/1936] Do not try finding pip for python2 when not required [1] stopped installing pip for py2 when py3 is being used. This patch makes sure we check only for py3 pip then. Also removed some no-longer-relevant comment and made uninstall behave the same. Check for pip>=6 removed too. See also [2]. [1] 279a7589b03db69fd1b85d947cd0171dacef94ee [2] http://lists.openstack.org/pipermail/openstack-discuss/2020-January/012182.html Change-Id: I36ee53e57e468d760b80a7e621b90899867a8efd --- inc/python | 27 ++++++++++----------------- 1 file changed, 10 insertions(+), 17 deletions(-) diff --git a/inc/python b/inc/python index 32dd72594f..37e8399f4a 100644 --- a/inc/python +++ b/inc/python @@ -162,16 +162,14 @@ function pip_install { local sudo_pip="env" else local cmd_pip - cmd_pip=$(get_pip_command $PYTHON2_VERSION) local sudo_pip="sudo -H" if python3_enabled; then - # Special case some services that have experimental - # support for python3 in progress, but don't claim support - # in their classifier - echo "Check python version for : $package_dir" - echo "Using $PYTHON3_VERSION version to install $package_dir based on default behavior" + echo "Using python $PYTHON3_VERSION to install $package_dir because python3_enabled=True" sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8" cmd_pip=$(get_pip_command $PYTHON3_VERSION) + else + echo "Using python $PYTHON2_VERSION to install $package_dir because python3_enabled=False" + cmd_pip=$(get_pip_command $PYTHON2_VERSION) fi fi @@ -179,16 +177,6 @@ function pip_install { # Always apply constraints cmd_pip="$cmd_pip -c $REQUIREMENTS_DIR/upper-constraints.txt" - # FIXME(dhellmann): Need to force multiple versions of pip for - # packages like setuptools? - local pip_version - pip_version=$(python -c "import pip; \ - print(pip.__version__.split('.')[0])") - if (( pip_version<6 )); then - die $LINENO "Currently installed pip version ${pip_version} does not" \ - "meet minimum requirements (>=6)." - fi - $xtrace # Also install test requirements @@ -225,8 +213,13 @@ function pip_uninstall { local sudo_pip="env" else local cmd_pip - cmd_pip=$(get_pip_command $PYTHON2_VERSION) local sudo_pip="sudo -H" + if python3_enabled; then + sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8" + cmd_pip=$(get_pip_command $PYTHON3_VERSION) + else + cmd_pip=$(get_pip_command $PYTHON2_VERSION) + fi fi # don't error if we can't uninstall, it might not be there $sudo_pip $cmd_pip uninstall -y $name || /bin/true From 4dc02f97e7a750bf97ec71ce7fedd49e96dc5bdb Mon Sep 17 00:00:00 2001 From: Witek Bedyk Date: Fri, 24 Jan 2020 18:52:25 +0100 Subject: [PATCH 1200/1936] Remove fixup_python_packages The hack has be around for pip 1.4.1 and older. It should be safe to remove it by now. In fact it causes problems in my Ubutu Bionic VM when trying to overwrite httplib2 library installed from the distro package. Change-Id: I34b826f4e8f10f8d44b888120f19fcc7ba501b3d --- tools/fixup_stuff.sh | 37 ------------------------------------- 1 file changed, 37 deletions(-) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 7e5ae75831..a5c1667518 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -5,16 +5,6 @@ # fixup_stuff.sh # # All distro and package specific hacks go in here -# -# - prettytable 0.7.2 permissions are 600 in the package and -# pip 1.4 doesn't fix it (1.3 did) -# -# - httplib2 0.8 permissions are 600 in the package and -# pip 1.4 doesn't fix it (1.3 did) -# -# - Fedora: -# - set selinux not enforcing -# - uninstall firewalld (f20 only) # If ``TOP_DIR`` is set we're being sourced rather than running stand-alone @@ -101,32 +91,6 @@ function get_package_path { echo $(python -c "import os; import $package; print(os.path.split(os.path.realpath($package.__file__))[0])") } - -# Pre-install affected packages so we can fix the permissions -# These can go away once we are confident that pip 1.4.1+ is available everywhere - -function fixup_python_packages { - # Fix prettytable 0.7.2 permissions - # Don't specify --upgrade so we use the existing package if present - pip_install 'prettytable>=0.7' - PACKAGE_DIR=$(get_package_path prettytable) - # Only fix version 0.7.2 - dir=$(echo $PACKAGE_DIR/prettytable-0.7.2*) - if [[ -d $dir ]]; then - sudo chmod +r $dir/* - fi - - # Fix httplib2 0.8 permissions - # Don't specify --upgrade so we use the existing package if present - pip_install httplib2 - PACKAGE_DIR=$(get_package_path httplib2) - # Only fix version 0.8 - dir=$(echo $PACKAGE_DIR-0.8*) - if [[ -d $dir ]]; then - sudo chmod +r $dir/* - fi -} - function fixup_fedora { if ! is_fedora; then return @@ -268,7 +232,6 @@ function fixup_virtualenv { function fixup_all { fixup_keystone fixup_ubuntu - fixup_python_packages fixup_fedora fixup_suse fixup_virtualenv From 19e4d972884e422d691c4cc1fe9e7ed3a03a2c01 Mon Sep 17 00:00:00 2001 From: Federico Ressi Date: Fri, 24 Jan 2020 11:44:46 +0100 Subject: [PATCH 1201/1936] Install psutil required by tools/mlock_report.py script 'tools/mlock_report.py' script requires 'psutil' package to be installed. This ensures it is done before memory_peak service is started. Partial-Bug: #1860753 Change-Id: I7b2b6eaf9856c6057e1a4a0054d15074150a6cb6 --- lib/dstat | 9 +++++++++ stack.sh | 7 +++++++ 2 files changed, 16 insertions(+) diff --git a/lib/dstat b/lib/dstat index fe38d75585..f5bd2bb948 100644 --- a/lib/dstat +++ b/lib/dstat @@ -9,6 +9,7 @@ # ``stack.sh`` calls the entry points in this order: # +# - install_dstat # - start_dstat # - stop_dstat @@ -16,6 +17,14 @@ _XTRACE_DSTAT=$(set +o | grep xtrace) set +o xtrace +# install_dstat() - Install prerequisites for dstat services +function install_dstat { + if is_service_enabled memory_tracker; then + # Install python libraries required by tools/mlock_report.py + pip_install_gr psutil + fi +} + # start_dstat() - Start running processes function start_dstat { # A better kind of sysstat, with the top process per time slice diff --git a/stack.sh b/stack.sh index fe71eae36c..2a6e470f7e 100755 --- a/stack.sh +++ b/stack.sh @@ -867,6 +867,13 @@ if is_service_enabled tls-proxy; then init_cert fi +# Dstat +# ----- + +# Install dstat services prerequisites +install_dstat + + # Check Out and Install Source # ---------------------------- From e727dd56821231677e398133a3f991dcc3931ff4 Mon Sep 17 00:00:00 2001 From: Witek Bedyk Date: Mon, 27 Jan 2020 16:00:12 +0100 Subject: [PATCH 1202/1936] Remove conflicting packages in Ubuntu Following packages conflict with pip installed versions: * httplib2 * pyasn1-modules Change-Id: Ic4f70f839765e67394509cc543560aac7f50e287 --- tools/fixup_stuff.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index a5c1667518..ae555d5861 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -79,6 +79,8 @@ function fixup_ubuntu { # manifest of what to remove. However, in most cases, simply # overwriting works. So this hacks around those packages that # have been dragged in by some other system dependency + sudo rm -f /usr/lib/python3/dist-packages/httplib2-*.egg-info + sudo rm -f /usr/lib/python3/dist-packages/pyasn1_modules-*.egg-info sudo rm -f /usr/lib/python3/dist-packages/PyYAML-*.egg-info } From 169f5dee475f49ca18cd5f803287797a6c1ee5c0 Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Mon, 6 Jan 2020 13:45:33 +0000 Subject: [PATCH 1203/1936] libvirt: Support the use of the virt-preview repo when using Fedora The virt-preview repo provides the latest rawhide versions of QEMU, Libvirt and other virt tools for older releases of Fedora. This repo is extremely useful when testing features in OpenStack that rely on these latest builds well in advance of them landing in full Fedora, CentOS or RHEL releases. This change adds a ``ENABLE_FEDORA_VIRT_PREVIEW_REPO`` configurable to control when this repo is enabled and used when deploying on Fedora. Change-Id: I5c3e1b7b632fd73310c462530990cdb0e0c0ceea --- lib/nova_plugins/functions-libvirt | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 35666393ca..03df258e03 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -24,6 +24,10 @@ DEBUG_LIBVIRT=$(trueorfalse True DEBUG_LIBVIRT) # Currently fairly specific to OpenStackCI hosts DEBUG_LIBVIRT_COREDUMPS=$(trueorfalse False DEBUG_LIBVIRT_COREDUMPS) +# Enable the Fedora Virtualization Preview Copr repo that provides the latest +# rawhide builds of QEMU, Libvirt and other virt tools. +ENABLE_FEDORA_VIRT_PREVIEW_REPO=$(trueorfalse False ENABLE_FEDORA_VIRT_PREVIEW_REPO) + # Enable coredumps for libvirt # Bug: https://bugs.launchpad.net/nova/+bug/1643911 function _enable_coredump { @@ -61,6 +65,12 @@ function install_libvirt { #pip_install_gr elif is_fedora || is_suse; then + # Optionally enable the virt-preview repo when on Fedora + if [[ $DISTRO =~ f[0-9][0-9] ]] && [[ ${ENABLE_FEDORA_VIRT_PREVIEW_REPO} == "True" ]]; then + # https://copr.fedorainfracloud.org/coprs/g/virtmaint-sig/virt-preview/ + sudo dnf copr enable -y @virtmaint-sig/virt-preview + fi + # Note that in CentOS/RHEL this needs to come from the RDO # repositories (qemu-kvm-ev ... which provides this package) # as the base system version is too old. We should have From f6597b1b461b758c3984ad607d3a9559d273a66d Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Wed, 29 Jan 2020 15:46:35 +0000 Subject: [PATCH 1204/1936] Undeprecate neutron-legacy scripts Work on the new neutron scripts has stalled and they aren't in a useable state yet. Given the ongoing decline in contributions, let us acknowledge this and undeprecate the neutron-legacy scripts so that people can continue to use them without feeling guilty about it. Change-Id: I4bce19da861abf18ddb89d82fd312c5e49a4ee7c --- .zuul.yaml | 15 +++------------ lib/neutron-legacy | 2 -- 2 files changed, 3 insertions(+), 14 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 130b84a149..7801297557 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -405,22 +405,15 @@ n-cpu: true n-novnc: true n-sch: true + # Placement service placement-api: true # Neutron services - # We need to keep using the neutron-legacy based services for - # now until all issues with the new lib/neutron code are solved q-agt: true q-dhcp: true q-l3: true q-meta: true q-metering: true q-svc: true - # neutron-api: true - # neutron-agent: true - # neutron-dhcp: true - # neutron-l3: true - # neutron-metadata-agent: true - # neutron-metering: true # Swift services s-account: true s-container: true @@ -449,12 +442,10 @@ tls-proxy: true # Nova services n-cpu: true + # Placement services placement-client: true # Neutron services - # We need to keep using the neutron-legacy based services for - # now until all issues with the new lib/neutron code are solved q-agt: true - # neutron-agent: true # Cinder services c-bak: true c-vol: true @@ -464,7 +455,7 @@ # s-*: false horizon: false tempest: false - # Test matrix emits ceilometer but ceilomenter is not installed in the + # Test matrix emits ceilometer but ceilometer is not installed in the # integrated gate, so specifying the services has not effect. # ceilometer-*: false devstack_localrc: diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 3d39d41b7e..8fb8858eb8 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -58,8 +58,6 @@ # Neutron Network Configuration # ----------------------------- -deprecated "Using lib/neutron-legacy is deprecated, and it will be removed in the future" - if is_service_enabled tls-proxy; then Q_PROTOCOL="https" fi From e1c0406d10380f6ad3620fa9e12df8499a1010ac Mon Sep 17 00:00:00 2001 From: Ghanshyam Date: Wed, 29 Jan 2020 15:39:17 -0600 Subject: [PATCH 1205/1936] Support TEMPEST_BRANCH with tag name TEMPEST_BRANCH which is mostly set as master so that Tempest master is run to test the env. With stable branch going to EM state and Tempest master might not work due to incompatibility of code or requirements. In that case we pin the Tempest so that older Tempest can be used for their testing. Till now for ocata, pike and, queens we used the gerrit style ref to pin the Tempest which is not preferred way. We should be able to use the tag name on TEMPEST_BRANCH. This commit explicitly checkout the tag set in TEMPEST_BRANCH as git_clone does not checkout the tag directly until RECLONE is true or tempest dir does not exist. After this stable branch or job can set the tag directly with name. For exmaple: TEMPEST_BRANCH=23.0.0. Change-Id: Ic777e4b56c4932dde135ac909cb5c6f4a7d5cc78 --- lib/tempest | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/tempest b/lib/tempest index ce0886bbd9..ec61c184a0 100644 --- a/lib/tempest +++ b/lib/tempest @@ -686,6 +686,11 @@ function install_tempest { git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH pip_install 'tox!=2.8.0' pushd $TEMPEST_DIR + # NOTE(gmann): checkout the TEMPEST_BRANCH in case TEMPEST_BRANCH + # is tag name not master. git_clone would not checkout tag because + # TEMPEST_DIR already exist until RECLONE is true. + git checkout $TEMPEST_BRANCH + tox -r --notest -efull # NOTE(mtreinish) Respect constraints in the tempest full venv, things that # are using a tox job other than full will not be respecting constraints but From 3555b48ff45a109e6456923d597fa174084387ab Mon Sep 17 00:00:00 2001 From: Federico Ressi Date: Fri, 24 Jan 2020 06:49:03 +0100 Subject: [PATCH 1206/1936] Switch to python3 for memory_peak service When starting 'memory_peak' service is using python command instead of python3, while psutil (required package) is most probably being installed into the python3 environment (as we are dropping python2.7 support). Closes-Bug: #1860753 Change-Id: Ia2b7e2e33d784560443131e2965f520b361a54e3 --- tools/memory_tracker.sh | 2 +- tools/mlock_report.py | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) mode change 100755 => 100644 tools/mlock_report.py diff --git a/tools/memory_tracker.sh b/tools/memory_tracker.sh index 63f25ca2de..6c36534f01 100755 --- a/tools/memory_tracker.sh +++ b/tools/memory_tracker.sh @@ -14,7 +14,7 @@ set -o errexit -PYTHON=${PYTHON:-python} +PYTHON=${PYTHON:-python3} # time to sleep between checks SLEEP_TIME=20 diff --git a/tools/mlock_report.py b/tools/mlock_report.py old mode 100755 new mode 100644 index 07716b04d6..b15a0bf80b --- a/tools/mlock_report.py +++ b/tools/mlock_report.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python - # This tool lists processes that lock memory pages from swapping to disk. import re From 7611d3dfd2e1f68bf11027756fd5b217d5ae9640 Mon Sep 17 00:00:00 2001 From: Carlos Camacho Date: Thu, 30 Jan 2020 14:39:51 +0100 Subject: [PATCH 1207/1936] Add -r option when removing egg-info files/folders We are hitting this error: + tools/fixup_stuff.sh:fixup_ubuntu:82 : sudo rm -f /usr/lib/python3/dist-packages/httplib2-0.11.3.egg-info rm: cannot remove '/usr/lib/python3/dist-packages/httplib2-0.11.3.egg-info': Is a directory This patch adds the -r option to allow removing folders. Change-Id: Ib7bb8b0a3dcf747bcc06da1a2fb17fa9d8808484 --- tools/fixup_stuff.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index ae555d5861..15b3ab767d 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -79,9 +79,9 @@ function fixup_ubuntu { # manifest of what to remove. However, in most cases, simply # overwriting works. So this hacks around those packages that # have been dragged in by some other system dependency - sudo rm -f /usr/lib/python3/dist-packages/httplib2-*.egg-info - sudo rm -f /usr/lib/python3/dist-packages/pyasn1_modules-*.egg-info - sudo rm -f /usr/lib/python3/dist-packages/PyYAML-*.egg-info + sudo rm -rf /usr/lib/python3/dist-packages/httplib2-*.egg-info + sudo rm -rf /usr/lib/python3/dist-packages/pyasn1_modules-*.egg-info + sudo rm -rf /usr/lib/python3/dist-packages/PyYAML-*.egg-info } # Python Packages From 64b3c5f468110e00a30216186e590e87f81bc328 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Fri, 14 Feb 2020 11:07:30 -0600 Subject: [PATCH 1208/1936] Drop the tempest-full py2 job We have dropped the tempest-full from Tempest gate and made this to run on py2 explicitly which is nothing but for stable gates. - I75868d5c9b6630fe78958ff89e58a0aced09a6b3 This job is not supposed to run on ussuri onwards master gate as everything will be python3-only. Change-Id: I372bde6a1753884efaf15da5fab48f1bddb4dab5 --- .zuul.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 130b84a149..40978e6bc2 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -577,7 +577,6 @@ - project: templates: - - integrated-gate - integrated-gate-py3 - publish-openstack-docs-pti check: From d05efc6ae92cea0a86ea5b0a848a99b5f8d51870 Mon Sep 17 00:00:00 2001 From: Andreas Jaeger Date: Mon, 17 Feb 2020 12:32:12 +0100 Subject: [PATCH 1209/1936] Remove networking-calico from plugin-registry.rst Networking-calico has been removed from openstack repositories, remove it from docs. Change-Id: I58d2964ee52d66fe89aa84c1d88c4d8d4d349b0d --- doc/source/plugin-registry.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 904400ece6..652bf0b1cc 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -75,7 +75,6 @@ openstack/murano `https://opendev.org/openstack/murano < openstack/networking-bagpipe `https://opendev.org/openstack/networking-bagpipe `__ openstack/networking-baremetal `https://opendev.org/openstack/networking-baremetal `__ openstack/networking-bgpvpn `https://opendev.org/openstack/networking-bgpvpn `__ -openstack/networking-calico `https://opendev.org/openstack/networking-calico `__ openstack/networking-generic-switch `https://opendev.org/openstack/networking-generic-switch `__ openstack/networking-hyperv `https://opendev.org/openstack/networking-hyperv `__ openstack/networking-l2gw `https://opendev.org/openstack/networking-l2gw `__ From 2dcbc28abab7eb51b9e3fd549fab49d42c48e90f Mon Sep 17 00:00:00 2001 From: Federico Ressi Date: Wed, 5 Feb 2020 11:29:51 +0100 Subject: [PATCH 1210/1936] Install versioned python RPMs on RedHat distros Set default python3 version as 3 for cases python3 is not installed before running DevStack. Implements installation of required python3x package for RedHat family distros with package name depending on configurable ${PYTHON3_VERSION}. Examples: 3 => python3 python3-devel (default one) 3.6 => python36 python36-devel 3.7 => python37 python37-devel This should help in situations where there are more than one python available for given platform and DevStack is asked to pick one by its full 3.x version Change-Id: I49d86bc9193165e0a41e8e8720be37d81a4e7ee0 --- inc/python | 6 +++++- stackrc | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/inc/python b/inc/python index 9d9f38b5a9..cf61389709 100644 --- a/inc/python +++ b/inc/python @@ -457,7 +457,11 @@ function install_python3 { elif is_suse; then install_package python3-devel python3-dbm elif is_fedora; then - install_package python3 python3-devel + if [ "$os_VENDOR" = "Fedora" ]; then + install_package python${PYTHON3_VERSION//.} + else + install_package python${PYTHON3_VERSION//.} python${PYTHON3_VERSION//.}-devel + fi fi } diff --git a/stackrc b/stackrc index e11a34c8cc..4e33b6838f 100644 --- a/stackrc +++ b/stackrc @@ -142,7 +142,7 @@ export USE_PYTHON3=$(trueorfalse True USE_PYTHON3) # version of Python 3 to this variable will install the app using that # version of the interpreter instead of 2.7. _DEFAULT_PYTHON3_VERSION="$(_get_python_version python3)" -export PYTHON3_VERSION=${PYTHON3_VERSION:-${_DEFAULT_PYTHON3_VERSION:-3.6}} +export PYTHON3_VERSION=${PYTHON3_VERSION:-${_DEFAULT_PYTHON3_VERSION:-3}} # Just to be more explicit on the Python 2 version to use. _DEFAULT_PYTHON2_VERSION="$(_get_python_version python2)" From 21a10d3499abd6e9cd1950b3615502bef5ad6b45 Mon Sep 17 00:00:00 2001 From: Federico Ressi Date: Fri, 31 Jan 2020 07:43:30 +0100 Subject: [PATCH 1211/1936] Use python3 as default python command After Python 2 is getting unsupported, new distros like CentOS 8 and RHEL8 have stopped providing 'python' package forcing user to decide which alternative to use by installing 'python2' or 'python3.x' package and then setting python alternative. This change is intended to make using python3 command as much as possible and use it as default 'python' alternative where needed. The final goals motivating this change are: - stop using python2 as much as possible - help adding support for CentOS 8 and RHEL8 Change-Id: I1e90db987c0bfa6206c211e066be03ea8738ad3f --- inc/python | 25 +++++++++++++++++++++++++ stack.sh | 19 ++++++++----------- tests/test_worlddump.sh | 2 +- tools/generate-devstack-plugins-list.py | 2 +- tools/install_prereqs.sh | 6 ------ tools/outfilter.py | 4 ++-- tools/update_clouds_yaml.py | 2 +- tools/worlddump.py | 25 ++++++++++++++++++------- 8 files changed, 56 insertions(+), 29 deletions(-) mode change 100755 => 100644 tools/outfilter.py mode change 100755 => 100644 tools/worlddump.py diff --git a/inc/python b/inc/python index cf61389709..52ad56520a 100644 --- a/inc/python +++ b/inc/python @@ -450,6 +450,31 @@ function python3_enabled { fi } +# Provide requested python version and sets PYTHON variable +function install_python { + # NOTE: install_python function should finally just do what install_python3 + # does as soon Python 2 support has been dropped + if python3_enabled; then + install_python3 + export PYTHON=$(which python${PYTHON3_VERSION} 2>/dev/null || + which python3 2>/dev/null) + if [[ "${DISTRO}" =~ (rhel8) ]]; then + # Use Python 3 as default python command so that we have only one + # python alternative to use on the system for either python and + # python3 + sudo alternatives --set python "${PYTHON}" + else + # Install anyway Python 2 for legacy scripts that still requires + # python instead of python3 command + install_package python + fi + else + echo "WARNING - Python 2 support has been deprecated in favor of Python 3" + install_package python + export PYTHON=$(which python 2>/dev/null) + fi +} + # Install python3 packages function install_python3 { if is_ubuntu; then diff --git a/stack.sh b/stack.sh index 0f9d57bc9a..7119e5f648 100755 --- a/stack.sh +++ b/stack.sh @@ -415,11 +415,8 @@ fi # Ensure python is installed # -------------------------- -install_python3 +install_python -if ! python3_enabled; then - is_package_installed python || install_package python -fi # Configure Logging # ----------------- @@ -497,14 +494,14 @@ if [[ -n "$LOGFILE" ]]; then _of_args="$_of_args --no-timestamp" fi # Set fd 1 and 2 to write the log file - exec 1> >( $TOP_DIR/tools/outfilter.py $_of_args -o "${LOGFILE}" ) 2>&1 + exec 1> >( $PYTHON $TOP_DIR/tools/outfilter.py $_of_args -o "${LOGFILE}" ) 2>&1 # Set fd 6 to summary log file - exec 6> >( $TOP_DIR/tools/outfilter.py -o "${SUMFILE}" ) + exec 6> >( $PYTHON $TOP_DIR/tools/outfilter.py -o "${SUMFILE}" ) else # Set fd 1 and 2 to primary logfile - exec 1> >( $TOP_DIR/tools/outfilter.py -o "${LOGFILE}" ) 2>&1 + exec 1> >( $PYTHON $TOP_DIR/tools/outfilter.py -o "${LOGFILE}" ) 2>&1 # Set fd 6 to summary logfile and stdout - exec 6> >( $TOP_DIR/tools/outfilter.py -v -o "${SUMFILE}" >&3 ) + exec 6> >( $PYTHON $TOP_DIR/tools/outfilter.py -v -o "${SUMFILE}" >&3 ) fi echo_summary "stack.sh log $LOGFILE" @@ -521,7 +518,7 @@ else exec 1>/dev/null 2>&1 fi # Always send summary fd to original stdout - exec 6> >( $TOP_DIR/tools/outfilter.py -v >&3 ) + exec 6> >( $PYTHON $TOP_DIR/tools/outfilter.py -v >&3 ) fi # Basic test for ``$DEST`` path permissions (fatal on error unless skipped) @@ -557,9 +554,9 @@ function exit_trap { generate-subunit $DEVSTACK_START_TIME $SECONDS 'fail' >> ${SUBUNIT_OUTPUT} fi if [[ -z $LOGDIR ]]; then - $TOP_DIR/tools/worlddump.py + ${PYTHON} $TOP_DIR/tools/worlddump.py else - $TOP_DIR/tools/worlddump.py -d $LOGDIR + ${PYTHON} $TOP_DIR/tools/worlddump.py -d $LOGDIR fi else # If we error before we've installed os-testr, this will fail. diff --git a/tests/test_worlddump.sh b/tests/test_worlddump.sh index f407d407c0..919652536d 100755 --- a/tests/test_worlddump.sh +++ b/tests/test_worlddump.sh @@ -8,7 +8,7 @@ source $TOP/tests/unittest.sh OUT_DIR=$(mktemp -d) -$TOP/tools/worlddump.py -d $OUT_DIR +${PYTHON} $TOP/tools/worlddump.py -d $OUT_DIR if [[ $? -ne 0 ]]; then fail "worlddump failed" diff --git a/tools/generate-devstack-plugins-list.py b/tools/generate-devstack-plugins-list.py index 3ac9c4d9aa..1cacd06bf8 100644 --- a/tools/generate-devstack-plugins-list.py +++ b/tools/generate-devstack-plugins-list.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 # Copyright 2016 Hewlett Packard Enterprise Development Company, L.P. # diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh index da59093581..a7c03d26cd 100755 --- a/tools/install_prereqs.sh +++ b/tools/install_prereqs.sh @@ -81,12 +81,6 @@ if [[ -n "$SYSLOG" && "$SYSLOG" != "False" ]]; then fi fi -if python3_enabled; then - install_python3 - export PYTHON=$(which python${PYTHON3_VERSION} 2>/dev/null || which python3 2>/dev/null) -else - export PYTHON=$(which python 2>/dev/null) -fi # Mark end of run # --------------- diff --git a/tools/outfilter.py b/tools/outfilter.py old mode 100755 new mode 100644 index cf091247d0..e910f79ff2 --- a/tools/outfilter.py +++ b/tools/outfilter.py @@ -1,5 +1,5 @@ -#!/usr/bin/env python -# +#!/usr/bin/env python3 + # Copyright 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/tools/update_clouds_yaml.py b/tools/update_clouds_yaml.py index 9187c664d0..7be995e8f3 100755 --- a/tools/update_clouds_yaml.py +++ b/tools/update_clouds_yaml.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain diff --git a/tools/worlddump.py b/tools/worlddump.py old mode 100755 new mode 100644 index d5ff5d1ab5..1e6e176210 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # # Copyright 2014 Hewlett-Packard Development Company, L.P. # @@ -23,8 +23,8 @@ import datetime from distutils import spawn import fnmatch +import io import os -import os.path import shutil import subprocess import sys @@ -109,9 +109,10 @@ def _bridge_list(): # This method gets max version searching 'OpenFlow versions 0x1:0x'. # And return a version value converted to an integer type. def _get_ofp_version(): - process = subprocess.Popen(['ovs-ofctl', '--version'], stdout=subprocess.PIPE) + process = subprocess.Popen(['ovs-ofctl', '--version'], + stdout=subprocess.PIPE) stdout, _ = process.communicate() - find_str = 'OpenFlow versions 0x1:0x' + find_str = b'OpenFlow versions 0x1:0x' offset = stdout.find(find_str) return int(stdout[offset + len(find_str):-1]) - 1 @@ -206,7 +207,7 @@ def process_list(): def compute_consoles(): _header("Compute consoles") - for root, dirnames, filenames in os.walk('/opt/stack'): + for root, _, filenames in os.walk('/opt/stack'): for filename in fnmatch.filter(filenames, 'console.log'): fullpath = os.path.join(root, filename) _dump_cmd("sudo cat %s" % fullpath) @@ -234,12 +235,22 @@ def var_core(): # tools out there that can do that sort of thing though. _dump_cmd("ls -ltrah /var/core") + +def disable_stdio_buffering(): + # re-open STDOUT as binary, then wrap it in a + # TextIOWrapper, and write through everything. + binary_stdout = io.open(sys.stdout.fileno(), 'wb', 0) + sys.stdout = io.TextIOWrapper(binary_stdout, write_through=True) + + def main(): opts = get_options() fname = filename(opts.dir, opts.name) print("World dumping... see %s for details" % fname) - sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) - with open(fname, 'w') as f: + + disable_stdio_buffering() + + with io.open(fname, 'w') as f: os.dup2(f.fileno(), sys.stdout.fileno()) disk_space() process_list() From 64ae08383a137d8b7fd18a8d8bc5c0fc191a186d Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Wed, 12 Feb 2020 10:04:50 +0000 Subject: [PATCH 1212/1936] Amend python pkgs for debian-based distros Drop python2 packages that are no longer needed. Replace other py2 pkgs with their py3 variant. Change-Id: Id5fc11d1ae0a7daf439c7d72f831f298f5d0ce31 --- files/debs/general | 3 --- files/debs/keystone | 2 +- files/debs/ldap | 2 +- files/debs/n-cpu | 2 +- files/debs/neutron-common | 2 +- files/debs/nova | 2 +- 6 files changed, 5 insertions(+), 8 deletions(-) diff --git a/files/debs/general b/files/debs/general index df872a0a6c..9f9d57cefb 100644 --- a/files/debs/general +++ b/files/debs/general @@ -27,9 +27,6 @@ openssh-server openssl pkg-config psmisc -python2.7 -python-dev -python-gdbm # needed for testr tar tcpdump unzip diff --git a/files/debs/keystone b/files/debs/keystone index fd0317b9b6..1cfa6ffa38 100644 --- a/files/debs/keystone +++ b/files/debs/keystone @@ -2,5 +2,5 @@ libkrb5-dev libldap2-dev libsasl2-dev memcached -python-mysqldb +python3-mysqldb sqlite3 diff --git a/files/debs/ldap b/files/debs/ldap index aa3a934d95..54896bb845 100644 --- a/files/debs/ldap +++ b/files/debs/ldap @@ -1,3 +1,3 @@ ldap-utils -python-ldap +python3-ldap slapd diff --git a/files/debs/n-cpu b/files/debs/n-cpu index 636644f10d..54d6fa3fd1 100644 --- a/files/debs/n-cpu +++ b/files/debs/n-cpu @@ -5,7 +5,7 @@ gir1.2-libosinfo-1.0 lvm2 # NOPRIME netcat-openbsd open-iscsi -python-guestfs # NOPRIME +python3-guestfs # NOPRIME qemu-utils sg3-utils sysfsutils diff --git a/files/debs/neutron-common b/files/debs/neutron-common index b269f6330b..e548396cd7 100644 --- a/files/debs/neutron-common +++ b/files/debs/neutron-common @@ -9,7 +9,7 @@ iputils-ping libmysqlclient-dev mysql-server #NOPRIME postgresql-server-dev-all -python-mysqldb +python3-mysqldb rabbitmq-server # NOPRIME radvd # NOPRIME sqlite3 diff --git a/files/debs/nova b/files/debs/nova index e5110e9c75..dce8f6ac89 100644 --- a/files/debs/nova +++ b/files/debs/nova @@ -16,7 +16,7 @@ libvirt-dev # NOPRIME mysql-server # NOPRIME parted pm-utils -python-mysqldb +python3-mysqldb qemu # dist:wheezy,jessie NOPRIME qemu-kvm # NOPRIME rabbitmq-server # NOPRIME From 08d84bc47f7341e64d6dd33f6f6a515f92840ac4 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Wed, 12 Feb 2020 10:07:36 +0000 Subject: [PATCH 1213/1936] Make database code work with mysql8 The GRANT command in mysql8 can no longer create a user implicitly. Split that part into a dedicated CREATE USER command. Also drop disabling the query_cache, it is off by default for some time and the option got removed in mysql8. Change-Id: I31bcc285ff8e373abbacb303c1269857c9cfa9ed --- lib/databases/mysql | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index 420a86e04e..e5865f2a69 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -109,8 +109,10 @@ function configure_database_mysql { sudo mysql $cmd_args -e "UPDATE mysql.user SET plugin='' WHERE user='$DATABASE_USER' AND host='localhost';" sudo mysql $cmd_args -e "FLUSH PRIVILEGES;" fi + # Create DB user if it does not already exist + sudo mysql $cmd_args -e "CREATE USER IF NOT EXISTS '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';" # Update the DB to give user '$DATABASE_USER'@'%' full control of the all databases: - sudo mysql $cmd_args -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';" + sudo mysql $cmd_args -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%';" # Now update ``my.cnf`` for some local needs and restart the mysql service @@ -120,8 +122,6 @@ function configure_database_mysql { iniset -sudo $my_conf mysqld sql_mode TRADITIONAL iniset -sudo $my_conf mysqld default-storage-engine InnoDB iniset -sudo $my_conf mysqld max_connections 1024 - iniset -sudo $my_conf mysqld query_cache_type OFF - iniset -sudo $my_conf mysqld query_cache_size 0 if [[ "$DATABASE_QUERY_LOGGING" == "True" ]]; then echo_summary "Enabling MySQL query logging" From 79ffa1f04f00eaaa12fada6715738468a04ba8a8 Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Wed, 5 Feb 2020 15:54:00 +0000 Subject: [PATCH 1214/1936] Fedora: Always install python3-devel With the recent switch to py3 we need to ensure python3-devel is installed on Fedora >= 29 before pip installing any dependencies. Change-Id: I21417687db5b3827c5272ed22f6dc95db13f2870 --- files/rpms/general | 1 + 1 file changed, 1 insertion(+) diff --git a/files/rpms/general b/files/rpms/general index 5bf1e9ac90..15d329dfd4 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -27,6 +27,7 @@ pkgconfig postgresql-devel # psycopg2 psmisc pyOpenSSL # version in pip uses too much memory +python3-devel # f29,f30 python-devel redhat-rpm-config # missing dep for gcc hardening flags, see rhbz#1217376 systemd-devel # for systemd-python From f1966d77630e62cb415a1c3bcb221897d3e7a1c6 Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Thu, 6 Feb 2020 16:20:37 +0000 Subject: [PATCH 1215/1936] Add Fedora 30 as a supported distro With Fedora 29 now officially EOL [1] we need to start the move to 30. A later change will remove f29 support from devstack entirely once fedora-latest moves to the based f30 image. [1] https://lists.fedoraproject.org/archives/list/devel-announce@lists.fedoraproject.org/thread/VUK3CJ5LO4ROUH3JTCDVHYAVVYAOCU62/ Change-Id: If87fe93757129f931a3417fc6275ffad280cdf46 --- files/rpms/cinder | 4 ++-- files/rpms/dstat | 4 ++-- files/rpms/general | 4 ++-- files/rpms/nova | 2 +- files/rpms/swift | 2 +- stack.sh | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/files/rpms/cinder b/files/rpms/cinder index e6b33dcc5e..ef810cf93d 100644 --- a/files/rpms/cinder +++ b/files/rpms/cinder @@ -1,5 +1,5 @@ iscsi-initiator-utils lvm2 qemu-img -scsi-target-utils # not:rhel7,f25,f26,f27,f28,f29 NOPRIME -targetcli # dist:rhel7,f25,f26,f27,f28,f29 NOPRIME +scsi-target-utils # not:rhel7,f25,f26,f27,f28,f29,f30 NOPRIME +targetcli # dist:rhel7,f25,f26,f27,f28,f29,f30 NOPRIME diff --git a/files/rpms/dstat b/files/rpms/dstat index d7b272a93b..ad5196617f 100644 --- a/files/rpms/dstat +++ b/files/rpms/dstat @@ -1,2 +1,2 @@ -dstat # not:f29 -pcp-system-tools # dist:f29 +dstat # not:f29,f30 +pcp-system-tools # dist:f29,f30 diff --git a/files/rpms/general b/files/rpms/general index 15d329dfd4..279546fa60 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -9,9 +9,9 @@ git-core graphviz # needed only for docs httpd httpd-devel -iptables-services # NOPRIME f25,f26,f27,f28,f29 +iptables-services # NOPRIME f25,f26,f27,f28,f29,f30 java-1.7.0-openjdk-headless # NOPRIME rhel7 -java-1.8.0-openjdk-headless # NOPRIME f25,f26,f27,f28,f29 +java-1.8.0-openjdk-headless # NOPRIME f25,f26,f27,f28,f29,f30 libffi-devel libjpeg-turbo-devel # Pillow 3.0.0 libxml2-devel # lxml diff --git a/files/rpms/nova b/files/rpms/nova index 639d793756..6c3e47870c 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -7,7 +7,7 @@ gawk genisoimage # required for config_drive iptables iputils -kernel-modules # dist:f25,f26,f27,f28,f29 +kernel-modules # dist:f25,f26,f27,f28,f29,f30 kpartx libxml2-python m2crypto diff --git a/files/rpms/swift b/files/rpms/swift index be524d1367..8aed8a4033 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -2,7 +2,7 @@ curl liberasurecode-devel memcached pyxattr -rsync-daemon # dist:f25,f26,f27,f28,f29 +rsync-daemon # dist:f25,f26,f27,f28,f29,f30 sqlite xfsprogs xinetd diff --git a/stack.sh b/stack.sh index 7119e5f648..ce4fc7560a 100755 --- a/stack.sh +++ b/stack.sh @@ -221,7 +221,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (bionic|stretch|jessie|f29|opensuse-15.0|opensuse-15.1|opensuse-tumbleweed|rhel7) ]]; then +if [[ ! ${DISTRO} =~ (bionic|stretch|jessie|f29|f30|opensuse-15.0|opensuse-15.1|opensuse-tumbleweed|rhel7) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From c6f53dacd275d97cc32a5dd4258e2a86fc05d168 Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Fri, 7 Feb 2020 10:37:08 +0000 Subject: [PATCH 1216/1936] nova: Use iscsi-initiator-utils 2.1.0 on Fedora 30 and 31 I2f16658c5a3e22cac70912a0f3ad65cdd7071a1e worked around an open-iscsi bug that remains unpatched in Fedora 30 and 31 by using a private copr repo with the patch applied. Rawhide has finally been rebased to 2.1.0 where this issue and many others have been resolved. We can now use a new repo that has been built for Fedora 30 and 31 that provides this rebased package that we can either use until the rebase is backported to 31 and 30 or 32 is supported. Change-Id: I5ba5885bd9c784949602aeb4ddff9d75fecc6b3d --- lib/nova | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 0893ed7084..3f108a4a97 100644 --- a/lib/nova +++ b/lib/nova @@ -297,7 +297,7 @@ function configure_nova { fi fi - if is_fedora && [[ $DISTRO =~ f[0-9][0-9] ]]; then + if is_fedora && [[ $DISTRO =~ f29 ]]; then # There is an iscsi-initiator bug where it inserts # different whitespace that causes a bunch of output # matching to fail. We have not been able to get @@ -305,6 +305,12 @@ function configure_nova { # https://bugzilla.redhat.com/show_bug.cgi?id=1676365 sudo dnf copr enable -y iwienand/iscsi-initiator-utils sudo dnf update -y + elif is_fedora && [[ $DISTRO =~ f3[0-1] ]]; then + # For f30 and f31 use the rebased 2.1.0 version of the package. We + # can't use this above as f29 is EOL and as a result we can't + # rebuild packages in copr for it. + sudo dnf copr enable -y lyarwood/iscsi-initiator-utils + sudo dnf update -y fi if [[ ${ISCSID_DEBUG} == "True" ]]; then From 1d1f53ddc1f70fbf0a3605031c1ffbbcb6f18002 Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Thu, 6 Feb 2020 14:37:26 +0000 Subject: [PATCH 1217/1936] Use fedora-30 as the fedora-latest image With the open-iscsi rebase we should now be able to move the fedora-latest job to fedora 30 ahead of the removal of support for 29. Change-Id: Ia80f03f275e595d3b26b52b3478f303036d59438 --- .zuul.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 40978e6bc2..7a7d56fd4c 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -52,7 +52,7 @@ name: devstack-single-node-fedora-latest nodes: - name: controller - label: fedora-29 + label: fedora-30 groups: - name: tempest nodes: From 347abd41ce860e1f33dd3870ddc54431c6427d10 Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Fri, 7 Feb 2020 14:34:29 +0000 Subject: [PATCH 1218/1936] Drop Fedora 29 support With fedora-latest now using Fedora 30 images and Fedora 29 itself EOL we can now remove support for it from devstack. This change also cleans up references to earlier Fedora releases under files/rpms/. Change-Id: I24332f7016ebb549ea678acf677c477b55ec4d4b --- files/rpms/cinder | 4 ++-- files/rpms/dstat | 4 ++-- files/rpms/general | 6 +++--- files/rpms/nova | 2 +- files/rpms/swift | 2 +- lib/nova | 14 ++------------ stack.sh | 2 +- 7 files changed, 12 insertions(+), 22 deletions(-) diff --git a/files/rpms/cinder b/files/rpms/cinder index ef810cf93d..e1e1f6c5c0 100644 --- a/files/rpms/cinder +++ b/files/rpms/cinder @@ -1,5 +1,5 @@ iscsi-initiator-utils lvm2 qemu-img -scsi-target-utils # not:rhel7,f25,f26,f27,f28,f29,f30 NOPRIME -targetcli # dist:rhel7,f25,f26,f27,f28,f29,f30 NOPRIME +scsi-target-utils # not:rhel7,f30 NOPRIME +targetcli # dist:rhel7,f30 NOPRIME diff --git a/files/rpms/dstat b/files/rpms/dstat index ad5196617f..e63af317fd 100644 --- a/files/rpms/dstat +++ b/files/rpms/dstat @@ -1,2 +1,2 @@ -dstat # not:f29,f30 -pcp-system-tools # dist:f29,f30 +dstat # not:f30 +pcp-system-tools # dist:f30 diff --git a/files/rpms/general b/files/rpms/general index 279546fa60..e3d20b3360 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -9,9 +9,9 @@ git-core graphviz # needed only for docs httpd httpd-devel -iptables-services # NOPRIME f25,f26,f27,f28,f29,f30 +iptables-services # NOPRIME f30 java-1.7.0-openjdk-headless # NOPRIME rhel7 -java-1.8.0-openjdk-headless # NOPRIME f25,f26,f27,f28,f29,f30 +java-1.8.0-openjdk-headless # NOPRIME f30 libffi-devel libjpeg-turbo-devel # Pillow 3.0.0 libxml2-devel # lxml @@ -27,7 +27,7 @@ pkgconfig postgresql-devel # psycopg2 psmisc pyOpenSSL # version in pip uses too much memory -python3-devel # f29,f30 +python3-devel # f30 python-devel redhat-rpm-config # missing dep for gcc hardening flags, see rhbz#1217376 systemd-devel # for systemd-python diff --git a/files/rpms/nova b/files/rpms/nova index 6c3e47870c..c590378677 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -7,7 +7,7 @@ gawk genisoimage # required for config_drive iptables iputils -kernel-modules # dist:f25,f26,f27,f28,f29,f30 +kernel-modules # dist:f30 kpartx libxml2-python m2crypto diff --git a/files/rpms/swift b/files/rpms/swift index 8aed8a4033..eb94d14339 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -2,7 +2,7 @@ curl liberasurecode-devel memcached pyxattr -rsync-daemon # dist:f25,f26,f27,f28,f29,f30 +rsync-daemon # dist:f30 sqlite xfsprogs xinetd diff --git a/lib/nova b/lib/nova index 3f108a4a97..a842a61fd0 100644 --- a/lib/nova +++ b/lib/nova @@ -297,18 +297,8 @@ function configure_nova { fi fi - if is_fedora && [[ $DISTRO =~ f29 ]]; then - # There is an iscsi-initiator bug where it inserts - # different whitespace that causes a bunch of output - # matching to fail. We have not been able to get - # fixed, yet :/ Exists in fedora 29 & 30 at least - # https://bugzilla.redhat.com/show_bug.cgi?id=1676365 - sudo dnf copr enable -y iwienand/iscsi-initiator-utils - sudo dnf update -y - elif is_fedora && [[ $DISTRO =~ f3[0-1] ]]; then - # For f30 and f31 use the rebased 2.1.0 version of the package. We - # can't use this above as f29 is EOL and as a result we can't - # rebuild packages in copr for it. + if is_fedora && [[ $DISTRO =~ f3[0-1] ]]; then + # For f30 and f31 use the rebased 2.1.0 version of the package. sudo dnf copr enable -y lyarwood/iscsi-initiator-utils sudo dnf update -y fi diff --git a/stack.sh b/stack.sh index ce4fc7560a..9879bd43d4 100755 --- a/stack.sh +++ b/stack.sh @@ -221,7 +221,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (bionic|stretch|jessie|f29|f30|opensuse-15.0|opensuse-15.1|opensuse-tumbleweed|rhel7) ]]; then +if [[ ! ${DISTRO} =~ (bionic|stretch|jessie|f30|opensuse-15.0|opensuse-15.1|opensuse-tumbleweed|rhel7) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From 8d4ae4ffc4a351a72a24bdf5fe6f7ac17445985a Mon Sep 17 00:00:00 2001 From: LuyaoZhong Date: Wed, 19 Feb 2020 08:16:03 +0000 Subject: [PATCH 1219/1936] bug-fix: 'bytes' type in python3 cause command fail 'str' type in python2 is 'bytes' type in python3, when use python3, we will get a prefix 'b': sudo ip netns exec b'qrouter-39cc2b45-a27c-49c5-94a6-26443a49ac63' ip neigh -------------------------------------------------------------------------- *** Failed to run 'sudo ip netns exec b'qrouter-39cc2b45-a27c-49c5-94a6-26443a49ac63' ip neigh': Command 'sudo ip netns exec b'qrouter-39cc2b45-a27c-49c5-94a6-26443a49ac63' ip neigh' returned non-zero exit status 1. The message above is raised by running tools/worlddump.py with python3. Change-Id: Ic254af86fa27729839f00c0ad4a5bbbc9e545a09 --- tools/worlddump.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) mode change 100644 => 100755 tools/worlddump.py diff --git a/tools/worlddump.py b/tools/worlddump.py old mode 100644 new mode 100755 index 1e6e176210..b21ed0c861 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -174,7 +174,7 @@ def network_dump(): _dump_cmd("ip %s" % cmd) for netns_ in _netns_list(): for cmd in ip_cmds: - args = {'netns': netns_, 'cmd': cmd} + args = {'netns': bytes.decode(netns_), 'cmd': cmd} _dump_cmd('sudo ip netns exec %(netns)s ip %(cmd)s' % args) @@ -195,7 +195,7 @@ def ovs_dump(): _dump_cmd("sudo ovs-vsctl show") for ofctl_cmd in ofctl_cmds: for bridge in bridges: - args = {'vers': vers, 'cmd': ofctl_cmd, 'bridge': bridge} + args = {'vers': vers, 'cmd': ofctl_cmd, 'bridge': bytes.decode(bridge)} _dump_cmd("sudo ovs-ofctl --protocols=%(vers)s %(cmd)s %(bridge)s" % args) From b4375af841c635209e4cd751d63123005745797a Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Mon, 6 Jan 2020 13:57:20 +0000 Subject: [PATCH 1220/1936] zuul: Add a devstack-platform-fedora-latest-virt-preview job Building on I5c3e1b7b632fd73310c462530990cdb0e0c0ceea we can now add a Fedora job using the virt-preview repo that will be used by Nova's experimental queue. Change-Id: Iad9d64912bb07f307e4897ece1621f275f1d5211 --- .zuul.yaml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index 7a7d56fd4c..65b9b0c75b 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -512,6 +512,16 @@ nodeset: devstack-single-node-fedora-latest voting: false +- job: + name: devstack-platform-fedora-latest-virt-preview + parent: tempest-full-py3 + description: Fedora latest platform test using the virt-preview repo. + nodeset: devstack-single-node-fedora-latest + voting: false + vars: + devstack_localrc: + ENABLE_FEDORA_VIRT_PREVIEW_REPO: true + - job: name: devstack-tox-base parent: devstack From 96ddc69a2ef5caea7621d3b94d4fa2b0ef10977b Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Tue, 28 Jan 2020 14:04:02 +0000 Subject: [PATCH 1221/1936] zuul: Add the fedora-latest-virt-preview job to the experimental queue Allowing the job to be tested outside of openstack/nova's own experimental queue. Change-Id: Ib07d9522d1cb6e288edb7f78ee1960ab48d89d76 --- .zuul.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index 65b9b0c75b..62cda04ac0 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -683,6 +683,10 @@ # Next cycle we can remove this if everything run out stable enough. # * nova-multi-cell: maintained by nova and currently non-voting in the # check queue for nova changes but relies on devstack configuration + # * devstack-platform-fedora-latest-virt-preview: Maintained by lyarwood + # for Nova to allow early testing of the latest versions of Libvirt and + # QEMU. Should only graduate out of experimental if it ever moves into + # the check queue for Nova. experimental: jobs: @@ -719,3 +723,4 @@ irrelevant-files: - ^.*\.rst$ - ^doc/.*$ + - devstack-platform-fedora-latest-virt-preview From 1e6b06ede00eca1087520abae8266142573733e3 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 21 Feb 2020 06:12:07 +0000 Subject: [PATCH 1222/1936] Updated from generate-devstack-plugins-list Change-Id: I543faced83a685d48706d004ae49800abfb89dc5 --- doc/source/plugin-registry.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 652bf0b1cc..1dc91ea33d 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -102,7 +102,6 @@ openstack/panko `https://opendev.org/openstack/panko `__ openstack/qinling `https://opendev.org/openstack/qinling `__ openstack/qinling-dashboard `https://opendev.org/openstack/qinling-dashboard `__ -openstack/rally `https://opendev.org/openstack/rally `__ openstack/rally-openstack `https://opendev.org/openstack/rally-openstack `__ openstack/sahara `https://opendev.org/openstack/sahara `__ openstack/sahara-dashboard `https://opendev.org/openstack/sahara-dashboard `__ From 6f91da9a2d0639daad3a6e1bce328c4c730511f9 Mon Sep 17 00:00:00 2001 From: Abhishek Kekane Date: Thu, 17 Oct 2019 09:02:41 +0000 Subject: [PATCH 1223/1936] Glance: Add support to configure multiple file stores From Train release Glance has added support [0][1] to configure multiple stores of same or different types. This patch enables developers to configure multiple file stores for glance. In order to configure multiple file stores user need to set below options in local.conf GLANCE_ENABLE_MULTIPLE_STORES=True/False To enable multiple stores of glance. GLANCE_MULTIPLE_FILE_STORES=veryfast,fast,cheap,verycheap,slow,veryslow Comma separated list of store identifiers. GLANCE_DEFAULT_BACKEND=fast Default glance store in which image should be stored if store identifier not specified explicilty. Should be one of the store identifier from GLANCE_MULTIPLE_FILE_STORES config option. NOTE: This support is added so that we can start adding tempest/CI tests for glance multiple stores. [0] 515412b59f5b3af07a1787b9f8e85a4d656d3e1c [1] https://docs.openstack.org/glance/train/admin/multistores.html Change-Id: I494f77555cfe9115356ce0ee75c7d7f192141447 --- lib/glance | 60 ++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 58 insertions(+), 2 deletions(-) diff --git a/lib/glance b/lib/glance index 740bcabf6a..e8f846fbb9 100644 --- a/lib/glance +++ b/lib/glance @@ -41,9 +41,29 @@ else GLANCE_BIN_DIR=$(get_python_exec_prefix) fi +# Glance multi-store configuration +# Boolean flag to enable multiple store configuration for glance +GLANCE_ENABLE_MULTIPLE_STORES=$(trueorfalse False GLANCE_ENABLE_MULTIPLE_STORES) + +# Comma separated list for configuring multiple file stores of glance, +# for example; GLANCE_MULTIPLE_FILE_STORES = fast,cheap,slow +GLANCE_MULTIPLE_FILE_STORES=${GLANCE_MULTIPLE_FILE_STORES:-fast} + +# Default store/backend for glance, must be one of the store specified +# in GLANCE_MULTIPLE_FILE_STORES option. +GLANCE_DEFAULT_BACKEND=${GLANCE_DEFAULT_BACKEND:-fast} + GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache} + +# File path for each store specified in GLANCE_MULTIPLE_FILE_STORES, the store +# identifier will be appended to this path at runtime. If GLANCE_MULTIPLE_FILE_STORES +# has fast,cheap specified then filepath will be generated like $DATA_DIR/glance/fast +# and $DATA_DIR/glance/cheap. +GLANCE_MULTISTORE_FILE_IMAGE_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/glance} GLANCE_IMAGE_DIR=${GLANCE_IMAGE_DIR:=$DATA_DIR/glance/images} GLANCE_LOCK_DIR=${GLANCE_LOCK_DIR:=$DATA_DIR/glance/locks} +GLANCE_STAGING_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/os_glance_staging_store} +GLANCE_TASKS_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/os_glance_tasks_store} GLANCE_CONF_DIR=${GLANCE_CONF_DIR:-/etc/glance} GLANCE_METADEF_DIR=$GLANCE_CONF_DIR/metadefs @@ -97,6 +117,18 @@ function is_glance_enabled { function cleanup_glance { # delete image files (glance) sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR + + # Cleanup multiple stores directories + if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "True" ]]; then + local store file_dir + for store in $(echo $GLANCE_MULTIPLE_FILE_STORES | tr "," "\n"); do + file_dir="${GLANCE_MULTISTORE_FILE_IMAGE_DIR}/${store}/" + sudo rm -rf $file_dir + done + + # Cleanup reserved stores directories + sudo rm -rf $GLANCE_STAGING_DIR $GLANCE_TASKS_DIR + fi } # configure_glance() - Set config files, create data dirs, etc @@ -117,6 +149,16 @@ function configure_glance { iniset_rpc_backend glance $GLANCE_REGISTRY_CONF iniset $GLANCE_REGISTRY_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" + # Configure multiple stores + if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "True" ]]; then + local store enabled_backends + enabled_backends="" + for store in $(echo $GLANCE_MULTIPLE_FILE_STORES | tr "," "\n"); do + enabled_backends+="${store}:file," + done + iniset $GLANCE_API_CONF DEFAULT enabled_backends ${enabled_backends::-1} + fi + # Set non-default configuration options for the API server iniset $GLANCE_API_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $GLANCE_API_CONF database connection $dburl @@ -141,8 +183,21 @@ function configure_glance { iniset $GLANCE_API_CONF DEFAULT enable_v1_api False fi - # Store specific configs - iniset $GLANCE_API_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/ + # Glance multiple store Store specific configs + if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "True" ]]; then + iniset $GLANCE_API_CONF glance_store default_backend $GLANCE_DEFAULT_BACKEND + local store + for store in $(echo $GLANCE_MULTIPLE_FILE_STORES | tr "," "\n"); do + iniset $GLANCE_API_CONF $store filesystem_store_datadir "${GLANCE_MULTISTORE_FILE_IMAGE_DIR}/${store}/" + done + + # Glance configure reserved stores + iniset $GLANCE_API_CONF os_glance_staging_store filesystem_store_datadir "${GLANCE_MULTISTORE_FILE_IMAGE_DIR}/os_glance_staging_store/" + iniset $GLANCE_API_CONF os_glance_tasks_store filesystem_store_datadir "${GLANCE_MULTISTORE_FILE_IMAGE_DIR}/os_glance_tasks_store/" + else + # Store specific configs + iniset $GLANCE_API_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/ + fi iniset $GLANCE_API_CONF DEFAULT registry_host $(ipv6_unquote $GLANCE_SERVICE_HOST) # CORS feature support - to allow calls from Horizon by default @@ -152,6 +207,7 @@ function configure_glance { iniset $GLANCE_API_CONF cors allowed_origin "http://$SERVICE_HOST" fi + # No multiple stores for swift yet # Store the images in swift if enabled. if is_service_enabled s-proxy; then iniset $GLANCE_API_CONF glance_store default_store swift From 2b6c2b41a03d1bb5de5df0ad7b84a2bbbbd76430 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 28 Feb 2020 06:11:43 +0000 Subject: [PATCH 1224/1936] Updated from generate-devstack-plugins-list Change-Id: I03cd12d5b2de5c2ff4ee92d66a631da34d4255c5 --- doc/source/plugin-registry.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 1dc91ea33d..6f79102aef 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -139,7 +139,6 @@ x/bilean `https://opendev.org/x/bilean `__ x/collectd-openstack-plugins `https://opendev.org/x/collectd-openstack-plugins `__ x/devstack-plugin-additional-pkg-repos `https://opendev.org/x/devstack-plugin-additional-pkg-repos `__ -x/devstack-plugin-bdd `https://opendev.org/x/devstack-plugin-bdd `__ x/devstack-plugin-glusterfs `https://opendev.org/x/devstack-plugin-glusterfs `__ x/devstack-plugin-hdfs `https://opendev.org/x/devstack-plugin-hdfs `__ x/devstack-plugin-libvirt-qemu `https://opendev.org/x/devstack-plugin-libvirt-qemu `__ From 5837ce504bf546b9e4489c0046cb5b25c656e01c Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Fri, 28 Feb 2020 10:47:42 +0000 Subject: [PATCH 1225/1936] zuul: Remove devstack-plugin-ceph-tempest job This job is now failing 100% of the time on master since the EOL of py2: https://zuul.opendev.org/t/openstack/builds?job_name=devstack-plugin-ceph-tempest&branch=master Change-Id: I0bb03190f39b5ae5494b2d10a0e819013d60bddf --- .zuul.yaml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 62cda04ac0..26b57a20d0 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -695,10 +695,6 @@ - neutron-fullstack-with-uwsgi - neutron-functional-with-uwsgi - neutron-tempest-with-uwsgi - - devstack-plugin-ceph-tempest: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ - devstack-plugin-ceph-tempest-py3: irrelevant-files: - ^.*\.rst$ From 4361ef1eb871c85ef6f05fdd8cd26c25e76c4c8e Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Wed, 11 Dec 2019 17:53:44 +0000 Subject: [PATCH 1226/1936] tempest: Enable stable_rescue tests with Libvirt Enable the compute feature for stable device rescue on all but LXC and Xen virt_types. Depends-On: https://review.opendev.org/#/c/700812/ Depends-On: https://review.opendev.org/#/c/700813/ Change-Id: Icfc717b4932be050a169fc00dae720d29a6497f7 --- lib/tempest | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/tempest b/lib/tempest index 525abcb582..6e87e54801 100644 --- a/lib/tempest +++ b/lib/tempest @@ -570,6 +570,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute-feature-enabled snapshot False iniset $TEMPEST_CONFIG compute-feature-enabled suspend False else + iniset $TEMPEST_CONFIG compute-feature-enabled stable_rescue True iniset $TEMPEST_CONFIG compute-feature-enabled swap_volume True fi fi From d0886f1ee9cd342d9d3dac0e3c9ee407dee3e697 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 5 Mar 2020 06:13:02 +0000 Subject: [PATCH 1227/1936] Updated from generate-devstack-plugins-list Change-Id: I92025c92bae7a673aae16192cfe892f3bdee269f --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 6f79102aef..0d0a4208dd 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -50,6 +50,7 @@ openstack/heat `https://opendev.org/openstack/heat `__ openstack/ironic `https://opendev.org/openstack/ironic `__ openstack/ironic-inspector `https://opendev.org/openstack/ironic-inspector `__ +openstack/ironic-prometheus-exporter `https://opendev.org/openstack/ironic-prometheus-exporter `__ openstack/ironic-ui `https://opendev.org/openstack/ironic-ui `__ openstack/karbor `https://opendev.org/openstack/karbor `__ openstack/karbor-dashboard `https://opendev.org/openstack/karbor-dashboard `__ From e87428908ad2217e6bf4ac483c234891f535f4ec Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Wed, 11 Dec 2019 15:17:58 +0000 Subject: [PATCH 1228/1936] Add "OVN" debs and rpms package files The networking-ovn code is being moved into the neutron repository as part of the effort [0]. This change is needed so we are able to install OVN from packages when running the networking-ovn functional tests along with the Neutron ones (see [1]). In the old networking-ovn repository we did compile OVN from source instead of installing it from packages but that took time. We want to do better in the Neutron repository. [0] https://blueprints.launchpad.net/neutron/+spec/neutron-ovn-merge [1] https://review.opendev.org/#/c/697440/ Change-Id: I92ab727d9954eb729c41b9a67ecb60b56883097b Signed-off-by: Lucas Alvares Gomes --- files/debs/ovn | 3 +++ files/rpms/ovn | 3 +++ 2 files changed, 6 insertions(+) create mode 100644 files/debs/ovn create mode 100644 files/rpms/ovn diff --git a/files/debs/ovn b/files/debs/ovn new file mode 100644 index 0000000000..81eea5e633 --- /dev/null +++ b/files/debs/ovn @@ -0,0 +1,3 @@ +ovn-central +ovn-controller-vtep +ovn-host diff --git a/files/rpms/ovn b/files/rpms/ovn new file mode 100644 index 0000000000..698e57b0de --- /dev/null +++ b/files/rpms/ovn @@ -0,0 +1,3 @@ +ovn-central +ovn-host +ovn-vtep From d22a8bcb1fdcc8d6ddbd59c8ae0223a3eb33c2ed Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Sun, 15 Mar 2020 18:22:45 -0500 Subject: [PATCH 1229/1936] Use py3 version of Tempest Multinode job tempest-multinode-full is py2 job and not needed to run on ussuri onwards. Chaning this to its py3 version tempest-multinode-full-py3 Change-Id: Iff271eabcf1a39d6bf6c1fcd55ff2749cab2373f --- .zuul.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 26b57a20d0..0efca8c6c8 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -617,7 +617,7 @@ irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - - tempest-multinode-full: + - tempest-multinode-full-py3: voting: false irrelevant-files: - ^.*\.rst$ From 5ae8a3504ce0619eda5fceaa638b01ba123e5e14 Mon Sep 17 00:00:00 2001 From: Mohammed Naser Date: Tue, 17 Mar 2020 20:54:07 -0400 Subject: [PATCH 1230/1936] Drop DEVSTACK_GATE_FEATURE_MATRIX DEVSTACK_GATE_FEATURE_MATRIX seems to be an old legacy thing that is no longer being used. It currently prevents using the jobs in openstack/devstack without adding openstack/devstack-gate for the role. Change-Id: Iab9b4862c01043d2c158398bac4b3b289a0adba0 --- doc/source/zuul_ci_jobs_migration.rst | 6 ------ playbooks/pre.yaml | 1 - 2 files changed, 7 deletions(-) diff --git a/doc/source/zuul_ci_jobs_migration.rst b/doc/source/zuul_ci_jobs_migration.rst index 17e7e16fb7..c43603ea17 100644 --- a/doc/source/zuul_ci_jobs_migration.rst +++ b/doc/source/zuul_ci_jobs_migration.rst @@ -195,12 +195,6 @@ value defined in the parent job, or the default from DevStack, if any. - A bridge called br-infra is set up for all jobs that inherit from multinode with a dedicated `bridge role `_. - * - DEVSTACK_GATE_FEATURE_MATRIX - - devstack-gate - - ``test_matrix_features`` variable of the test-matrix role in - devstack-gate. This is a temporary solution, feature matrix - will go away. In the future services will be defined in jobs - only. * - DEVSTACK_CINDER_VOLUME_CLEAR - devstack - *CINDER_VOLUME_CLEAR: true/false* in devstack_localrc in the diff --git a/playbooks/pre.yaml b/playbooks/pre.yaml index 60f365aa7a..ff97a1f501 100644 --- a/playbooks/pre.yaml +++ b/playbooks/pre.yaml @@ -26,7 +26,6 @@ set_fact: external_bridge_mtu: "{{ local_mtu | int - 50 }}" roles: - - test-matrix - configure-swap - setup-stack-user - setup-tempest-user From 90f0baf3cb84118e51bc5990ba6885f07b679090 Mon Sep 17 00:00:00 2001 From: ushen Date: Wed, 18 Mar 2020 20:08:16 +0800 Subject: [PATCH 1231/1936] Bring back load balance v2 guide Previous commit forgets to add an entry for load balancer version 2. This ps brings them back. Change-Id: Idac09e8259d58ed130b79b40528f733708000da7 --- doc/source/guides.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/doc/source/guides.rst b/doc/source/guides.rst index ca134c4520..e7ec629962 100644 --- a/doc/source/guides.rst +++ b/doc/source/guides.rst @@ -69,6 +69,11 @@ Nova and devstack Guide to working with nova features :doc:`Nova and devstack `. +Configure Load-Balancer Version 2 +----------------------------------- + +Guide on :doc:`Configure Load-Balancer Version 2 `. + Deploying DevStack with LDAP ---------------------------- From 7db34f6caeec6293a87661202484fcff6dc04325 Mon Sep 17 00:00:00 2001 From: Mohammed Naser Date: Wed, 18 Mar 2020 15:35:27 -0400 Subject: [PATCH 1232/1936] Use MEMCACHE_SERVERS in configure_keystone_authtoken_middleware The function was using a hard coded value of localhost:11211 when we have an option MEMCACHE_SERVERS that can be defined and used inside DevStack. Change-Id: I4947928fe406a9844d5bdaa3c826d273952fa097 --- lib/keystone | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index 9ceb829264..366e6c7054 100644 --- a/lib/keystone +++ b/lib/keystone @@ -421,7 +421,7 @@ function configure_keystone_authtoken_middleware { iniset $conf_file $section project_domain_name "$SERVICE_DOMAIN_NAME" iniset $conf_file $section cafile $SSL_BUNDLE_FILE - iniset $conf_file $section memcached_servers localhost:11211 + iniset $conf_file $section memcached_servers $MEMCACHE_SERVERS } # configure_auth_token_middleware conf_file admin_user IGNORED [section] From 4af912d88953f406f4c3275de8c6ad370aade8ab Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 20 Mar 2020 08:33:13 +0000 Subject: [PATCH 1233/1936] Updated from generate-devstack-plugins-list Change-Id: I86601bc3bf114583ccad7a301af4e5b71e3ba2bf --- doc/source/plugin-registry.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 0d0a4208dd..0c29bb2a91 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -38,6 +38,7 @@ openstack/devstack-plugin-amqp1 `https://opendev.org/openstack/devstack openstack/devstack-plugin-ceph `https://opendev.org/openstack/devstack-plugin-ceph `__ openstack/devstack-plugin-container `https://opendev.org/openstack/devstack-plugin-container `__ openstack/devstack-plugin-kafka `https://opendev.org/openstack/devstack-plugin-kafka `__ +openstack/devstack-plugin-open-cas `https://opendev.org/openstack/devstack-plugin-open-cas `__ openstack/devstack-plugin-pika `https://opendev.org/openstack/devstack-plugin-pika `__ openstack/devstack-plugin-zmq `https://opendev.org/openstack/devstack-plugin-zmq `__ openstack/dragonflow `https://opendev.org/openstack/dragonflow `__ @@ -99,6 +100,7 @@ openstack/openstacksdk `https://opendev.org/openstack/openstac openstack/os-loganalyze `https://opendev.org/openstack/os-loganalyze `__ openstack/osprofiler `https://opendev.org/openstack/osprofiler `__ openstack/oswin-tempest-plugin `https://opendev.org/openstack/oswin-tempest-plugin `__ +openstack/ovn-octavia-provider `https://opendev.org/openstack/ovn-octavia-provider `__ openstack/panko `https://opendev.org/openstack/panko `__ openstack/patrole `https://opendev.org/openstack/patrole `__ openstack/qinling `https://opendev.org/openstack/qinling `__ From 37659927923473c13f4bec88855205d0ee28bcfb Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Sat, 21 Mar 2020 06:15:49 +0000 Subject: [PATCH 1234/1936] Updated from generate-devstack-plugins-list Change-Id: Ide7724ed3b4b5bb709b7dd79a367a5d2420bcda2 --- doc/source/plugin-registry.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 0c29bb2a91..e92929a809 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -38,6 +38,7 @@ openstack/devstack-plugin-amqp1 `https://opendev.org/openstack/devstack openstack/devstack-plugin-ceph `https://opendev.org/openstack/devstack-plugin-ceph `__ openstack/devstack-plugin-container `https://opendev.org/openstack/devstack-plugin-container `__ openstack/devstack-plugin-kafka `https://opendev.org/openstack/devstack-plugin-kafka `__ +openstack/devstack-plugin-nfs `https://opendev.org/openstack/devstack-plugin-nfs `__ openstack/devstack-plugin-open-cas `https://opendev.org/openstack/devstack-plugin-open-cas `__ openstack/devstack-plugin-pika `https://opendev.org/openstack/devstack-plugin-pika `__ openstack/devstack-plugin-zmq `https://opendev.org/openstack/devstack-plugin-zmq `__ @@ -146,7 +147,6 @@ x/devstack-plugin-glusterfs `https://opendev.org/x/devstack-plugin- x/devstack-plugin-hdfs `https://opendev.org/x/devstack-plugin-hdfs `__ x/devstack-plugin-libvirt-qemu `https://opendev.org/x/devstack-plugin-libvirt-qemu `__ x/devstack-plugin-mariadb `https://opendev.org/x/devstack-plugin-mariadb `__ -x/devstack-plugin-nfs `https://opendev.org/x/devstack-plugin-nfs `__ x/devstack-plugin-vmax `https://opendev.org/x/devstack-plugin-vmax `__ x/drbd-devstack `https://opendev.org/x/drbd-devstack `__ x/fenix `https://opendev.org/x/fenix `__ From c26dfb0d6f165a567925390ac9c6f6ac32742fee Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Wed, 25 Mar 2020 08:32:26 -0500 Subject: [PATCH 1235/1936] Run pip check at end of devstack The new pip depsolver is coming this summer. Until it's ready, run pip check at the end of devstack to make sure we're not somehow installing conflicting package versions. We shouldn't be, because of constraints, but if we are, better to know and start figuring it out. Change-Id: Id98f0848ff5a252d93e5f8029df2d069924d603f --- inc/python | 24 ++++++++++++++++++++++++ stack.sh | 3 +++ 2 files changed, 27 insertions(+) diff --git a/inc/python b/inc/python index 52ad56520a..20f3c6040c 100644 --- a/inc/python +++ b/inc/python @@ -111,6 +111,30 @@ function disable_python3_package { $xtrace } + +function pip_check { + time_start "pip_check" + + if [[ -n ${PIP_VIRTUAL_ENV:=} && -d ${PIP_VIRTUAL_ENV} ]]; then + local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip + else + local cmd_pip + if python3_enabled; then + echo "Using python $PYTHON3_VERSION to check pip install because python3_enabled=True" + cmd_pip=$(get_pip_command $PYTHON3_VERSION) + else + echo "Using python $PYTHON2_VERSION to check pip install because python3_enabled=False" + cmd_pip=$(get_pip_command $PYTHON2_VERSION) + fi + fi + + $cmd_pip check + result=$? + + time_stop "pip_check" + return $result +} + # Wrapper for ``pip install`` to set cache and proxy environment variables # Uses globals ``OFFLINE``, ``PIP_VIRTUAL_ENV``, # ``PIP_UPGRADE``, ``*_proxy``, diff --git a/stack.sh b/stack.sh index 9879bd43d4..352112a4af 100755 --- a/stack.sh +++ b/stack.sh @@ -1421,6 +1421,9 @@ fi # Check the status of running services service_check +# Run pip check to make sure we're forward compatible with the pip depsolver. +pip_check + # Configure nova cellsv2 # ---------------------- From c2830fb26759678b1bfe90a77aa5c149599d6705 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 27 Mar 2020 11:08:27 -0500 Subject: [PATCH 1236/1936] Revert "Run pip check at end of devstack" This reverts commit 79b8e79488a6268f37244188ab831e99a99648c8. This is breaking things in various jobs, most notably because we do not put constraints on linters - but we install test-requirements which then can conflict with each other. Change-Id: Ibc5603c61b38ce44db58fb27a27352f59123ad09 --- inc/python | 24 ------------------------ stack.sh | 3 --- 2 files changed, 27 deletions(-) diff --git a/inc/python b/inc/python index 20f3c6040c..52ad56520a 100644 --- a/inc/python +++ b/inc/python @@ -111,30 +111,6 @@ function disable_python3_package { $xtrace } - -function pip_check { - time_start "pip_check" - - if [[ -n ${PIP_VIRTUAL_ENV:=} && -d ${PIP_VIRTUAL_ENV} ]]; then - local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip - else - local cmd_pip - if python3_enabled; then - echo "Using python $PYTHON3_VERSION to check pip install because python3_enabled=True" - cmd_pip=$(get_pip_command $PYTHON3_VERSION) - else - echo "Using python $PYTHON2_VERSION to check pip install because python3_enabled=False" - cmd_pip=$(get_pip_command $PYTHON2_VERSION) - fi - fi - - $cmd_pip check - result=$? - - time_stop "pip_check" - return $result -} - # Wrapper for ``pip install`` to set cache and proxy environment variables # Uses globals ``OFFLINE``, ``PIP_VIRTUAL_ENV``, # ``PIP_UPGRADE``, ``*_proxy``, diff --git a/stack.sh b/stack.sh index 352112a4af..9879bd43d4 100755 --- a/stack.sh +++ b/stack.sh @@ -1421,9 +1421,6 @@ fi # Check the status of running services service_check -# Run pip check to make sure we're forward compatible with the pip depsolver. -pip_check - # Configure nova cellsv2 # ---------------------- From 09b5b05c471985ad53c4b321e993101f007ee915 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 27 Mar 2020 11:22:39 -0500 Subject: [PATCH 1237/1936] Stop installing test-requirements with projects This is a test of installing openstack and then seeing if it works. OpenStack components do not need test-requirements to operate, that's why they are test-requirements. Additionally, as we look forward to depsolver pip, this is going to screw us because we don't apply constraints to linters, which are expressed in - you guessed it, test-requirements. Change-Id: I8f24b839bf42e2fb9803dc7df3a30ae20cf264eb --- inc/python | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/inc/python b/inc/python index 52ad56520a..30b9a30eca 100644 --- a/inc/python +++ b/inc/python @@ -179,13 +179,6 @@ function pip_install { $xtrace - # Also install test requirements - local install_test_reqs="" - local test_req="${package_dir}/test-requirements.txt" - if [[ -e "$test_req" ]]; then - install_test_reqs="-r $test_req" - fi - # adding SETUPTOOLS_SYS_PATH_TECHNIQUE is a workaround to keep # the same behaviour of setuptools before version 25.0.0. # related issue: https://github.com/pypa/pip/issues/3874 @@ -195,7 +188,7 @@ function pip_install { no_proxy="${no_proxy:-}" \ PIP_FIND_LINKS=$PIP_FIND_LINKS \ SETUPTOOLS_SYS_PATH_TECHNIQUE=rewrite \ - $cmd_pip $upgrade $install_test_reqs \ + $cmd_pip $upgrade \ $@ result=$? From aa71ebf92c7bb7144ae64551ad649054f4797a0b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rados=C5=82aw=20Piliszek?= Date: Mon, 30 Mar 2020 12:06:30 +0200 Subject: [PATCH 1238/1936] Test also swift-dsvm-functional-py3 Swift keeps testing py2 but we should keep both in shape. To fix stestr on py2: Depends-on: https://review.opendev.org/715942 Change-Id: I616e39c64e22d467d7186dba98226cc5beef23ea --- .zuul.yaml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 0efca8c6c8..cc97b89980 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -602,9 +602,12 @@ voting: false - swift-dsvm-functional: voting: false - irrelevant-files: + irrelevant-files: &dsvm-irrelevant-files - ^.*\.rst$ - ^doc/.*$ + - swift-dsvm-functional-py3: + voting: false + irrelevant-files: *dsvm-irrelevant-files - grenade-py3: irrelevant-files: - ^.*\.rst$ From be26306b4ab470e65cc93305453d2945aa33bf78 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rados=C5=82aw=20Piliszek?= Date: Mon, 30 Mar 2020 09:56:53 +0200 Subject: [PATCH 1239/1936] Support extras in Glance Store install Recent change to devstack dropped installing test-requirements [1] However, this caused gate failures due to lack of glance-store deps for cinder and swift support. This patch makes devstack install relevant extras depending on enabled features. Additionally, relevant functions are added/fixed to make this possible. glance-store = glance_store (for gerrit search match) [1] https://review.opendev.org/715469 Change-Id: I0bf5792a6058b52936115b515ea8360f6264a7c9 --- inc/python | 32 +++++++++++++++++++++++++++----- lib/glance | 15 ++++++++++++++- 2 files changed, 41 insertions(+), 6 deletions(-) diff --git a/inc/python b/inc/python index 30b9a30eca..dd77296049 100644 --- a/inc/python +++ b/inc/python @@ -21,6 +21,14 @@ set +o xtrace # project. A null value installs to the system Python directories. declare -A -g PROJECT_VENV +# Utility Functions +# ================= + +# Joins bash array of extras with commas as expected by other functions +function join_extras { + local IFS="," + echo "$*" +} # Python Functions # ================ @@ -80,9 +88,9 @@ function pip_install_gr { function pip_install_gr_extras { local name=$1 local extras=$2 - local clean_name - clean_name=$(get_from_global_requirements $name) - pip_install $clean_name[$extras] + local version_constraints + version_constraints=$(get_version_constraints_from_global_requirements $name) + pip_install $name[$extras]$version_constraints } # enable_python3_package() -- no-op for backwards compatibility @@ -230,6 +238,19 @@ function get_from_global_requirements { echo $required_pkg } +# get only version constraints of a package from global requirements file +# get_version_constraints_from_global_requirements +function get_version_constraints_from_global_requirements { + local package=$1 + local required_pkg_version_constraint + # drop the package name from output (\K) + required_pkg_version_constraint=$(grep -i -h -o -P "^${package}\K.*" $REQUIREMENTS_DIR/global-requirements.txt | cut -d\# -f1) + if [[ $required_pkg_version_constraint == "" ]]; then + die $LINENO "Can't find package $package in requirements" + fi + echo $required_pkg_version_constraint +} + # should we use this library from their git repo, or should we let it # get pulled in via pip dependencies. function use_library_from_git { @@ -278,7 +299,7 @@ function setup_lib { # # use this for non namespaced libraries # -# setup_dev_lib [-bindep] +# setup_dev_lib [-bindep] [] function setup_dev_lib { local bindep if [[ $1 == -bindep* ]]; then @@ -287,7 +308,8 @@ function setup_dev_lib { fi local name=$1 local dir=${GITDIR[$name]} - setup_develop $bindep $dir + local extras=$2 + setup_develop $bindep $dir $extras } # this should be used if you want to install globally, all libraries should diff --git a/lib/glance b/lib/glance index e8f846fbb9..9398bd2daf 100644 --- a/lib/glance +++ b/lib/glance @@ -355,11 +355,24 @@ function install_glanceclient { # install_glance() - Collect source and prepare function install_glance { + local glance_store_extras=() + + if is_service_enabled cinder; then + glance_store_extras=("cinder" "${glance_store_extras[@]}") + fi + + if is_service_enabled swift; then + glance_store_extras=("swift" "${glance_store_extras[@]}") + fi + # Install glance_store from git so we make sure we're testing # the latest code. if use_library_from_git "glance_store"; then git_clone_by_name "glance_store" - setup_dev_lib "glance_store" + setup_dev_lib "glance_store" $(join_extras "${glance_store_extras[@]}") + else + # we still need to pass extras + pip_install_gr_extras glance-store $(join_extras "${glance_store_extras[@]}") fi git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH From 1587ba1bd59fe0115c273ee2382bf627fce937a3 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 12 Mar 2020 15:13:37 +1100 Subject: [PATCH 1240/1936] Remove fixup_virtualenv This first came in with Id749c37ab7fefa96b35f11816b56b9def5ef4b08. It talks about ancient versions of pip; can't see we need it any more. Change-Id: I9d4831955070990a81a809d988612d9d5b1aa672 --- tools/fixup_stuff.sh | 30 ------------------------------ 1 file changed, 30 deletions(-) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 15b3ab767d..6c7123374b 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -202,39 +202,9 @@ function fixup_suse { sudo rm -rf /usr/lib/python3.6/site-packages/six-*.egg-info } -# The version of pip(1.5.4) supported by python-virtualenv(1.11.4) has -# connection issues under proxy so re-install the latest version using -# pip. To avoid having pip's virtualenv overwritten by the distro's -# package (e.g. due to installing a distro package with a dependency -# on python-virtualenv), first install the distro python-virtualenv -# to satisfy any dependencies then use pip to overwrite it. - -# ... but, for infra builds, the pip-and-virtualenv [1] element has -# already done this to ensure the latest pip, virtualenv and -# setuptools on the base image for all platforms. It has also added -# the packages to the yum/dnf ignore list to prevent them being -# overwritten with old versions. F26 and dnf 2.0 has changed -# behaviour that means re-installing python-virtualenv fails [2]. -# Thus we do a quick check if we're in the infra environment by -# looking for the mirror config script before doing this, and just -# skip it if so. - -# [1] https://opendev.org/openstack/diskimage-builder/src/branch/master/ \ -# diskimage_builder/elements/pip-and-virtualenv/ \ -# install.d/pip-and-virtualenv-source-install/04-install-pip -# [2] https://bugzilla.redhat.com/show_bug.cgi?id=1477823 - -function fixup_virtualenv { - if [[ ! -f /etc/ci/mirror_info.sh ]]; then - install_package python-virtualenv - pip_install -U --force-reinstall virtualenv - fi -} - function fixup_all { fixup_keystone fixup_ubuntu fixup_fedora fixup_suse - fixup_virtualenv } From 7a2c1c2b1b4eead11168a9e635be3e0d883a7c5f Mon Sep 17 00:00:00 2001 From: Witek Bedyk Date: Tue, 31 Mar 2020 16:43:38 +0200 Subject: [PATCH 1241/1936] Add python3-dev package for Debian based distros When stacking outside of Zuul CI the wheels have to be built locally and python3-dev package is required. Story: 2007491 Task: 39213 Change-Id: I0960269d5cf193c9ececc5490485522c74646382 --- files/debs/general | 1 + 1 file changed, 1 insertion(+) diff --git a/files/debs/general b/files/debs/general index 9f9d57cefb..ac297435db 100644 --- a/files/debs/general +++ b/files/debs/general @@ -27,6 +27,7 @@ openssh-server openssl pkg-config psmisc +python3-dev tar tcpdump unzip From 497caf015729d451428d5b608853741689f153b3 Mon Sep 17 00:00:00 2001 From: Colleen Murphy Date: Fri, 3 Apr 2020 10:14:07 -0700 Subject: [PATCH 1242/1936] Fix opensuse trusted certificates There's a bug[1] with the combination of the p11-kit and ca-certificates-mozilla packages available on the latest built opensuse-15 node in nodepool (which has not been rebuilt for weeks due to a separate issue[2]) which causes the standard CA bundle to not be installed correctly and causes jobs that call to external HTTPS services to fail. Upgrading both packages in sync fixes the issue. [1] https://bugzilla.suse.com/show_bug.cgi?id=1154871 [2] http://bugzilla.suse.com/show_bug.cgi?id=1166139 Change-Id: Ia8fdfe12fd9089e178adcb2b5eec997eebada262 Needed-by: https://review.opendev.org/713566 --- tools/fixup_stuff.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 15b3ab767d..5d3f610173 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -200,6 +200,11 @@ function fixup_suse { # have been dragged in by some other system dependency sudo rm -rf /usr/lib/python3.6/site-packages/ply-*.egg-info sudo rm -rf /usr/lib/python3.6/site-packages/six-*.egg-info + + # Ensure trusted CA certificates are up to date + # See https://bugzilla.suse.com/show_bug.cgi?id=1154871 + # May be removed once a new opensuse-15 image is available in nodepool + sudo zypper up -y p11-kit ca-certificates-mozilla } # The version of pip(1.5.4) supported by python-virtualenv(1.11.4) has From e16e925e120100156a08a0436040f42b901b361a Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Thu, 9 Apr 2020 11:00:28 +0100 Subject: [PATCH 1243/1936] Fedora: Do not start the lvmetad service after Fedora 30 This deprecated service has been removed from lvm2 in Fedora 31: https://src.fedoraproject.org/rpms/lvm2/c/0469456b352530d9f507f2515e674bfb03671f48 Change-Id: I06d572a72969c5e5e8e038caef19e358e4f97b2b --- lib/lvm | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/lib/lvm b/lib/lvm index d9e78a016f..92265f2af2 100644 --- a/lib/lvm +++ b/lib/lvm @@ -124,13 +124,15 @@ function init_lvm_volume_group { local vg=$1 local size=$2 - # Start the lvmetad and tgtd services - if is_fedora || is_suse; then + # Start the lvmetad on f30 (dropped from f31) or SUSE + if [[ $DISTRO =~ f30 ]] || is_suse; then # services is not started by default start_service lvm2-lvmetad - if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then - start_service tgtd - fi + fi + + # Start the tgtd service on Fedora and SUSE if tgtadm is used + if is_fedora || is_suse && [[ "$CINDER_ISCSI_HELPER" = "tgtadm" ]]; then + start_service tgtd fi # Start with a clean volume group From e6852eadb192365093e8ef738cff87fe60e8434a Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 12 Mar 2020 15:18:11 +1100 Subject: [PATCH 1244/1936] Use venv for VIRTUALENV_CMD Using venv, which is part of python3, we avoid an extra dependency on the virtualenv package. For Debuntu, which splits this out into a separate package, add this to debs/general. This is part of the infra efforts to ship "plain" nodes without any dependencies installed. While devstack can re-install virtualenv, we don't need any features it provides and it means one less dependency. Change-Id: I3c323640f288e57581a4eb8adba2a08d0b0cbd8f --- files/debs/general | 1 + stackrc | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/files/debs/general b/files/debs/general index ac297435db..fe0061337a 100644 --- a/files/debs/general +++ b/files/debs/general @@ -28,6 +28,7 @@ openssl pkg-config psmisc python3-dev +python3-venv tar tcpdump unzip diff --git a/stackrc b/stackrc index 4e33b6838f..17641c343d 100644 --- a/stackrc +++ b/stackrc @@ -150,7 +150,8 @@ export PYTHON2_VERSION=${PYTHON2_VERSION:-${_DEFAULT_PYTHON2_VERSION:-2.7}} # Create a virtualenv with this if [[ ${USE_PYTHON3} == True ]]; then - export VIRTUALENV_CMD="virtualenv -p python3" + # Use the built-in venv to avoid more dependencies + export VIRTUALENV_CMD="python3 -m venv" else export VIRTUALENV_CMD="virtualenv " fi From e0da11cec5324cc4c100f6e22b83f1addde06ce7 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Tue, 14 Apr 2020 06:10:51 +0000 Subject: [PATCH 1245/1936] Updated from generate-devstack-plugins-list Change-Id: I0c3950729380646e1a98a90ee82e8562cc8c33c1 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index e92929a809..70c18a88b1 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -97,6 +97,7 @@ openstack/neutron-vpnaas-dashboard `https://opendev.org/openstack/neutron- openstack/nova-powervm `https://opendev.org/openstack/nova-powervm `__ openstack/octavia `https://opendev.org/openstack/octavia `__ openstack/octavia-dashboard `https://opendev.org/openstack/octavia-dashboard `__ +openstack/octavia-tempest-plugin `https://opendev.org/openstack/octavia-tempest-plugin `__ openstack/openstacksdk `https://opendev.org/openstack/openstacksdk `__ openstack/os-loganalyze `https://opendev.org/openstack/os-loganalyze `__ openstack/osprofiler `https://opendev.org/openstack/osprofiler `__ From f9e81d34382083732b02b0cda87331e20e04d48d Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Sun, 12 Apr 2020 11:29:59 +0100 Subject: [PATCH 1246/1936] Fedora: Update required RPMs for Fedora 31 This includes the addition of the python3-virtualenv package required to provide the virtualenv binary that is no longer present in the image. Change-Id: Ie8e66d8b9f93063b97f88f41a626daddf235339b --- files/rpms/cinder | 4 ++-- files/rpms/dstat | 4 ++-- files/rpms/general | 7 ++++--- files/rpms/nova | 2 +- files/rpms/swift | 2 +- 5 files changed, 10 insertions(+), 9 deletions(-) diff --git a/files/rpms/cinder b/files/rpms/cinder index e1e1f6c5c0..a8201ea38b 100644 --- a/files/rpms/cinder +++ b/files/rpms/cinder @@ -1,5 +1,5 @@ iscsi-initiator-utils lvm2 qemu-img -scsi-target-utils # not:rhel7,f30 NOPRIME -targetcli # dist:rhel7,f30 NOPRIME +scsi-target-utils # not:rhel7,f30,f31 NOPRIME +targetcli # dist:rhel7,f30,f31 NOPRIME diff --git a/files/rpms/dstat b/files/rpms/dstat index e63af317fd..a091cceb3a 100644 --- a/files/rpms/dstat +++ b/files/rpms/dstat @@ -1,2 +1,2 @@ -dstat # not:f30 -pcp-system-tools # dist:f30 +dstat # not:f30,f31 +pcp-system-tools # dist:f30,f31 diff --git a/files/rpms/general b/files/rpms/general index e3d20b3360..361416a910 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -9,9 +9,9 @@ git-core graphviz # needed only for docs httpd httpd-devel -iptables-services # NOPRIME f30 +iptables-services # NOPRIME f30,f31 java-1.7.0-openjdk-headless # NOPRIME rhel7 -java-1.8.0-openjdk-headless # NOPRIME f30 +java-1.8.0-openjdk-headless # NOPRIME f30,f31 libffi-devel libjpeg-turbo-devel # Pillow 3.0.0 libxml2-devel # lxml @@ -27,7 +27,8 @@ pkgconfig postgresql-devel # psycopg2 psmisc pyOpenSSL # version in pip uses too much memory -python3-devel # f30 +python3-devel # dist:f30,f31 +python3-virtualenv # dist:f31 python-devel redhat-rpm-config # missing dep for gcc hardening flags, see rhbz#1217376 systemd-devel # for systemd-python diff --git a/files/rpms/nova b/files/rpms/nova index c590378677..0f3d10f53a 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -7,7 +7,7 @@ gawk genisoimage # required for config_drive iptables iputils -kernel-modules # dist:f30 +kernel-modules # dist:f30,f31 kpartx libxml2-python m2crypto diff --git a/files/rpms/swift b/files/rpms/swift index eb94d14339..745cc2e09a 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -2,7 +2,7 @@ curl liberasurecode-devel memcached pyxattr -rsync-daemon # dist:f30 +rsync-daemon # dist:f30,f31 sqlite xfsprogs xinetd From c36e51b0ae541a983eb917a8746d07d10c723e40 Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Thu, 9 Apr 2020 11:17:03 +0100 Subject: [PATCH 1247/1936] Fedora: Mark Fedora 31 as supported and use within fedora-latest Change-Id: I06235eb5bf722e6f3ab05da577fa0f00638c87b1 --- .zuul.yaml | 2 +- stack.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index cc97b89980..48c2d0d798 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -52,7 +52,7 @@ name: devstack-single-node-fedora-latest nodes: - name: controller - label: fedora-30 + label: fedora-31 groups: - name: tempest nodes: diff --git a/stack.sh b/stack.sh index 9879bd43d4..4e1134b7e3 100755 --- a/stack.sh +++ b/stack.sh @@ -221,7 +221,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (bionic|stretch|jessie|f30|opensuse-15.0|opensuse-15.1|opensuse-tumbleweed|rhel7) ]]; then +if [[ ! ${DISTRO} =~ (bionic|stretch|jessie|f30|f31|opensuse-15.0|opensuse-15.1|opensuse-tumbleweed|rhel7) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From c287e7ec3c22a9b4c2d89c1e36a6383b351f9b17 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 11 Dec 2019 16:55:41 +1100 Subject: [PATCH 1248/1936] Remove CentOS 7 as supported environment We do not support CentOS 7 on master branch due to no Python 3 or ongoing eco-system (i.e. RDO) support; see Id9ef507dd6f4226d65c6ed3043666b0aa6a3bd1c. Change-Id: If98581708568e7a8d15e6edc588a008df0cac0fb --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 4e1134b7e3..0f1ddb1b3f 100755 --- a/stack.sh +++ b/stack.sh @@ -221,7 +221,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (bionic|stretch|jessie|f30|f31|opensuse-15.0|opensuse-15.1|opensuse-tumbleweed|rhel7) ]]; then +if [[ ! ${DISTRO} =~ (bionic|stretch|jessie|f30|f31|opensuse-15.0|opensuse-15.1|opensuse-tumbleweed) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From 9b8b7021d0a8c7647b47e74a2cd4163a663acd40 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 16 Apr 2020 13:03:56 +1000 Subject: [PATCH 1249/1936] Fedora: install python3-libguestfs All platforms are python3 now Change-Id: Ie9f4c7e52f23a45bb8cf70a5fddf1b21a40d7000 --- lib/nova_plugins/hypervisor-libvirt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index d1b3d784b7..b0ae29e39b 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -117,7 +117,7 @@ function install_nova_hypervisor { # Workaround for missing dependencies in python-libguestfs install_package python-libguestfs guestfs-data augeas augeas-lenses elif is_fedora; then - install_package python-libguestfs + install_package python3-libguestfs fi fi } From ad1635147c51f0b0e8264dd32d3be9461519e87d Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Mon, 8 Jul 2019 13:17:39 +0100 Subject: [PATCH 1250/1936] Bump noVNC to 1.1.0 v1.1.0 was released over a year ago now so move to it ahead of 1.2.0. Change-Id: I13c5638f320b18c4f6f945528352471d2bb31313 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 17641c343d..70007a9a80 100644 --- a/stackrc +++ b/stackrc @@ -600,7 +600,7 @@ IRONIC_PYTHON_AGENT_BRANCH=${IRONIC_PYTHON_AGENT_BRANCH:-$TARGET_BRANCH} # a websockets/html5 or flash powered VNC console for vm instances NOVNC_REPO=${NOVNC_REPO:-https://github.com/novnc/noVNC.git} -NOVNC_BRANCH=${NOVNC_BRANCH:-v1.0.0} +NOVNC_BRANCH=${NOVNC_BRANCH:-v1.1.0} # a websockets/html5 or flash powered SPICE console for vm instances SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git} From 3cd41019b048349b42ec62d5602beb89bed9e975 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 16 Apr 2020 13:06:07 +1000 Subject: [PATCH 1251/1936] lib/tls: use python3 to run inline script We only need to run this fixup for the active python now we are 3 only. Change-Id: I7616e5ee5693b2890fb7f6bd9052890a82904c22 --- lib/tls | 3 +-- stack.sh | 3 --- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/lib/tls b/lib/tls index 65ffeb937d..d05536b45d 100644 --- a/lib/tls +++ b/lib/tls @@ -369,8 +369,7 @@ function deploy_int_CA { function fix_system_ca_bundle_path { if is_service_enabled tls-proxy; then local capath - local python_cmd=${1:-python} - capath=$($python_cmd -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass') + capath=$(python3 -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass') if [[ ! $capath == "" && ! $capath =~ ^/etc/.* && ! -L $capath ]]; then if is_fedora; then diff --git a/stack.sh b/stack.sh index 0f1ddb1b3f..b59af404a4 100755 --- a/stack.sh +++ b/stack.sh @@ -968,9 +968,6 @@ fi if is_service_enabled tls-proxy; then fix_system_ca_bundle_path - if python3_enabled ; then - fix_system_ca_bundle_path python3 - fi fi # Extras Install From 812e7846c99e6e4445fdd115a6440fe999730bc1 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 17 Apr 2020 09:25:22 +1000 Subject: [PATCH 1252/1936] Cleanup rpm-distro mariadb install We have lib/databases/mysql which is installing databases, remove it from the bulk package lists. Split is_fedora (fedora & centos8 -- soon) to install mariadb-server and mariadb-devel to retain status-quo. On suse this seems to be a meta-package 'mariadb-server' not found in package names. Trying capabilities. so split that out. It seems it has never been installing the -devel package, and things work (presumably clients are coming from wheels so don't need to build against it). Change-Id: I86433318e8f76c40c5c792b795411a5c9d8351d3 --- files/rpms-suse/neutron-common | 1 - files/rpms-suse/nova | 1 - files/rpms/general | 1 - lib/databases/mysql | 5 ++++- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/files/rpms-suse/neutron-common b/files/rpms-suse/neutron-common index d1cc73f115..e3799a9353 100644 --- a/files/rpms-suse/neutron-common +++ b/files/rpms-suse/neutron-common @@ -5,7 +5,6 @@ ebtables haproxy # to serve as metadata proxy inside router/dhcp namespaces iptables iputils -mariadb # NOPRIME rabbitmq-server # NOPRIME radvd # NOPRIME sqlite3 diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova index 1d5812146b..9923760750 100644 --- a/files/rpms-suse/nova +++ b/files/rpms-suse/nova @@ -11,7 +11,6 @@ kpartx kvm # NOPRIME libvirt # NOPRIME libvirt-python # NOPRIME -mariadb # NOPRIME # mkisofs is required for config_drive mkisofs # not:sle12 parted diff --git a/files/rpms/general b/files/rpms/general index 361416a910..b150a570b4 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -17,7 +17,6 @@ libjpeg-turbo-devel # Pillow 3.0.0 libxml2-devel # lxml libxslt-devel # lxml libyaml-devel -mariadb-devel # MySQL-python net-tools openssh-server openssl diff --git a/lib/databases/mysql b/lib/databases/mysql index e5865f2a69..d4969d713c 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -175,7 +175,10 @@ EOF # Install mysql-server if is_oraclelinux; then install_package mysql-community-server - elif is_fedora || is_suse; then + elif is_fedora; then + install_package mariadb-server mariadb-devel + sudo systemctl enable $MYSQL_SERVICE_NAME + elif is_suse; then install_package mariadb-server sudo systemctl enable $MYSQL_SERVICE_NAME elif is_ubuntu; then From 39bc71cedd96b45caec6e92dc8228da1cc3b1643 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 16 Apr 2020 11:53:11 +1000 Subject: [PATCH 1253/1936] libs/rpms : update for CentOS 7 removal With the removal of CentOS 7, we can re-evaluate the rpm installations. We should not need virtualenv after https://review.opendev.org/712609 There should be no need for python-devel as we're python3 pyOpenSSL was added to workaround memory issues in 9e98f9435ec36f2fffed0ac368befd520f07e0e1 (2015) ... I think we've moved on. pyxattr is not a package; remove it. I don't see we need packaged m2crypto, which isn't a package on CentOS 8. nor libxml2-python; these days it has wheels which should work with the normal installation process. centos8 has: * targetcli * pcp-system-tools (and no dstat) * iptables-services * java-1.8.0-openjdk-headless * kernel-modules * rsync-daemon just as all supported fedora's do, so we can remove any matching here. Change-Id: I542c426a67a98f331d2a29bacd220af81fab8cc4 --- files/rpms/cinder | 3 +-- files/rpms/dstat | 3 +-- files/rpms/general | 10 +++------- files/rpms/neutron-common | 2 -- files/rpms/nova | 6 +----- files/rpms/swift | 3 +-- 6 files changed, 7 insertions(+), 20 deletions(-) diff --git a/files/rpms/cinder b/files/rpms/cinder index a8201ea38b..c21ea08e89 100644 --- a/files/rpms/cinder +++ b/files/rpms/cinder @@ -1,5 +1,4 @@ iscsi-initiator-utils lvm2 qemu-img -scsi-target-utils # not:rhel7,f30,f31 NOPRIME -targetcli # dist:rhel7,f30,f31 NOPRIME +targetcli diff --git a/files/rpms/dstat b/files/rpms/dstat index a091cceb3a..6524bed607 100644 --- a/files/rpms/dstat +++ b/files/rpms/dstat @@ -1,2 +1 @@ -dstat # not:f30,f31 -pcp-system-tools # dist:f30,f31 +pcp-system-tools diff --git a/files/rpms/general b/files/rpms/general index b150a570b4..ff77ce05af 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -9,9 +9,8 @@ git-core graphviz # needed only for docs httpd httpd-devel -iptables-services # NOPRIME f30,f31 -java-1.7.0-openjdk-headless # NOPRIME rhel7 -java-1.8.0-openjdk-headless # NOPRIME f30,f31 +iptables-services +java-1.8.0-openjdk-headless libffi-devel libjpeg-turbo-devel # Pillow 3.0.0 libxml2-devel # lxml @@ -25,10 +24,7 @@ pcre-devel # for python-pcre pkgconfig postgresql-devel # psycopg2 psmisc -pyOpenSSL # version in pip uses too much memory -python3-devel # dist:f30,f31 -python3-virtualenv # dist:f31 -python-devel +python3-devel redhat-rpm-config # missing dep for gcc hardening flags, see rhbz#1217376 systemd-devel # for systemd-python tar diff --git a/files/rpms/neutron-common b/files/rpms/neutron-common index 0cc8d11ceb..fe25f57ea6 100644 --- a/files/rpms/neutron-common +++ b/files/rpms/neutron-common @@ -5,8 +5,6 @@ ebtables haproxy # to serve as metadata proxy inside router/dhcp namespaces iptables iputils -mysql-devel -mysql-server # NOPRIME openvswitch # NOPRIME rabbitmq-server # NOPRIME radvd # NOPRIME diff --git a/files/rpms/nova b/files/rpms/nova index 0f3d10f53a..2218330230 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -7,12 +7,8 @@ gawk genisoimage # required for config_drive iptables iputils -kernel-modules # dist:f30,f31 +kernel-modules kpartx -libxml2-python -m2crypto -mysql-devel -mysql-server # NOPRIME parted polkit rabbitmq-server # NOPRIME diff --git a/files/rpms/swift b/files/rpms/swift index 745cc2e09a..376c6f3df7 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -1,8 +1,7 @@ curl liberasurecode-devel memcached -pyxattr -rsync-daemon # dist:f30,f31 +rsync-daemon sqlite xfsprogs xinetd From f70cb70acb8050c49dd4cf5f3e4f7c70ba2d978d Mon Sep 17 00:00:00 2001 From: Bharat Kunwar Date: Mon, 20 Apr 2020 09:53:25 +0000 Subject: [PATCH 1254/1936] Support upload of qcow2.xz image format Some images come in *.xz format [1] and this needs to be uncompressed before being uploaded. [1] https://builds.coreos.fedoraproject.org/prod/streams/stable/builds/31.20200323.3.2/x86_64/fedora-coreos-31.20200323.3.2-openstack.x86_64.qcow2.xz Change-Id: I11f8484ba939c4b2c0a98aa08ef7db730baf71e8 --- functions | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/functions b/functions index 8ea634e753..0d27515c20 100644 --- a/functions +++ b/functions @@ -341,6 +341,12 @@ function upload_image { disk_format=qcow2 container_format=bare ;; + *.qcow2.xz) + image_name=$(basename "$image" ".qcow2.xz") + disk_format=qcow2 + container_format=bare + unpack=unxz + ;; *.raw) image_name=$(basename "$image" ".raw") disk_format=raw @@ -376,6 +382,16 @@ function upload_image { openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < <(zcat --force "${image}") elif [ "$unpack" = "bunzip2" ]; then openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < <(bunzip2 -cdk "${image}") + elif [ "$unpack" = "unxz" ]; then + # NOTE(brtknr): unxz the file first and cleanup afterwards to + # prevent timeout while Glance tries to upload image (e.g. to Swift). + local tmp_dir + local image_path + tmp_dir=$(mktemp -d) + image_path="$tmp_dir/$image_name" + unxz -cv "${image}" > "$image_path" + openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format --file "$image_path" + rm -rf $tmp_dir else openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < "${image}" fi From 4578f98944efac5ef0c2c1476d26efd5c880e367 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Sun, 19 Apr 2020 17:54:56 -0500 Subject: [PATCH 1255/1936] [ussuri][goal] Update contributor documentation This patch updates/adds the contributor documentation to follow the guidelines of the Ussuri cycle community goal[1]. [1] https://governance.openstack.org/tc/goals/selected/ussuri/project-ptl-and-contrib-docs.html Story: #2007236 Task: #38554 Change-Id: I870955fda7ee6e0d7809faa309bbc31c2ddd653a --- CONTRIBUTING.rst | 19 +++++++++ doc/source/contributor/contributing.rst | 56 +++++++++++++++++++++++++ doc/source/index.rst | 7 ++++ 3 files changed, 82 insertions(+) create mode 100644 CONTRIBUTING.rst create mode 100644 doc/source/contributor/contributing.rst diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst new file mode 100644 index 0000000000..bb511656f1 --- /dev/null +++ b/CONTRIBUTING.rst @@ -0,0 +1,19 @@ +The source repository for this project can be found at: + + https://opendev.org/openstack/devstack + +Pull requests submitted through GitHub are not monitored. + +To start contributing to OpenStack, follow the steps in the contribution guide +to set up and use Gerrit: + + https://docs.openstack.org/contributors/code-and-documentation/quick-start.html + +Bugs should be filed on Launchpad: + + https://bugs.launchpad.net/devstack + +For more specific information about contributing to this repository, see the +Devstack contributor guide: + + https://docs.openstack.org/devstack/latest/contributor/contributing.html diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst new file mode 100644 index 0000000000..5e0df569f7 --- /dev/null +++ b/doc/source/contributor/contributing.rst @@ -0,0 +1,56 @@ +============================ +So You Want to Contribute... +============================ + +For general information on contributing to OpenStack, please check out the +`contributor guide `_ to get started. +It covers all the basics that are common to all OpenStack projects: the accounts +you need, the basics of interacting with our Gerrit review system, how we +communicate as a community, etc. + +Below will cover the more project specific information you need to get started +with Devstack. + +Communication +~~~~~~~~~~~~~ +* IRC channel ``#openstack-qa`` at FreeNode +* Mailing list (prefix subjects with ``[qa][devstack]`` for faster responses) + http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss + +Contacting the Core Team +~~~~~~~~~~~~~~~~~~~~~~~~ +Please refer to the `Devstack Core Team +`_ contacts. + +New Feature Planning +~~~~~~~~~~~~~~~~~~~~ +If you want to propose a new feature please read `Feature Proposal Process`_ +Devstack features are tracked on `Launchpad BP `_. + +Task Tracking +~~~~~~~~~~~~~ +We track our tasks in `Launchpad `_. + +Reporting a Bug +~~~~~~~~~~~~~~~ +You found an issue and want to make sure we are aware of it? You can do so on +`Launchpad `__. +More info about Launchpad usage can be found on `OpenStack docs page +`_ + +Getting Your Patch Merged +~~~~~~~~~~~~~~~~~~~~~~~~~ +All changes proposed to the Devstack require two ``Code-Review +2`` votes from +Devstack core reviewers before one of the core reviewers can approve the patch +by giving ``Workflow +1`` vote. One exception is for patches to unblock the gate +which can be approved by single core reviewers. + +Project Team Lead Duties +~~~~~~~~~~~~~~~~~~~~~~~~ +All common PTL duties are enumerated in the `PTL guide +`_. + +The Release Process for QA is documented in `QA Release Process +`_. + +.. _Feature Proposal Process: https://wiki.openstack.org/wiki/QA#Feature_Proposal_.26_Design_discussions diff --git a/doc/source/index.rst b/doc/source/index.rst index 6694022316..7923cb88c7 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -151,6 +151,13 @@ Get :doc:`the big picture ` of what we are trying to do with devstack, and help us by :doc:`contributing to the project `. +If you are a new contributor to devstack please refer: :doc:`contributor/contributing` + +.. toctree:: + :hidden: + + contributor/contributing + Contents ++++++++ From 332992ace2bcb2a4d94c0ae3ccbc7c3a2a7bcb38 Mon Sep 17 00:00:00 2001 From: Mohammed Naser Date: Sun, 26 Apr 2020 12:39:05 -0400 Subject: [PATCH 1256/1936] setup-devstack-source-dirs: add var for paths The paths for the devstack source directories are currently hardcoded, this patch allows overriding that list. Change-Id: I2b673b0d110d84658b89bb14663584330deaf3aa --- roles/setup-devstack-source-dirs/defaults/main.yaml | 8 ++++++++ roles/setup-devstack-source-dirs/tasks/main.yaml | 9 +-------- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/roles/setup-devstack-source-dirs/defaults/main.yaml b/roles/setup-devstack-source-dirs/defaults/main.yaml index fea05c8146..77a74d7b89 100644 --- a/roles/setup-devstack-source-dirs/defaults/main.yaml +++ b/roles/setup-devstack-source-dirs/defaults/main.yaml @@ -1 +1,9 @@ devstack_base_dir: /opt/stack +devstack_source_dirs: + - src/opendev.org/opendev + - src/opendev.org/openstack + - src/opendev.org/openstack-dev + - src/opendev.org/openstack-infra + - src/opendev.org/starlingx + - src/opendev.org/x + - src/opendev.org/zuul diff --git a/roles/setup-devstack-source-dirs/tasks/main.yaml b/roles/setup-devstack-source-dirs/tasks/main.yaml index 160757ede9..294c29cd29 100644 --- a/roles/setup-devstack-source-dirs/tasks/main.yaml +++ b/roles/setup-devstack-source-dirs/tasks/main.yaml @@ -1,13 +1,6 @@ - name: Find all OpenStack source repos used by this job find: - paths: - - src/opendev.org/opendev - - src/opendev.org/openstack - - src/opendev.org/openstack-dev - - src/opendev.org/openstack-infra - - src/opendev.org/starlingx - - src/opendev.org/x - - src/opendev.org/zuul + paths: "{{ devstack_source_dirs }}" file_type: directory register: found_repos From ccc58267257f48501030ad7e3c4b18a7435f9a1c Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Mon, 27 Apr 2020 06:13:34 +0000 Subject: [PATCH 1257/1936] Updated from generate-devstack-plugins-list Change-Id: I04b13cb0114233afca9aa7987144bd69d6d0f185 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 70c18a88b1..42c5fc1324 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -138,6 +138,7 @@ starlingx/integ `https://opendev.org/starlingx/integ `__ starlingx/nfv `https://opendev.org/starlingx/nfv `__ starlingx/update `https://opendev.org/starlingx/update `__ +vexxhost/openstack-operator `https://opendev.org/vexxhost/openstack-operator `__ x/almanach `https://opendev.org/x/almanach `__ x/apmec `https://opendev.org/x/apmec `__ x/bilean `https://opendev.org/x/bilean `__ From bc86e82a61d9aa74e6f455bb47003a96c548b2fc Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Mon, 27 Apr 2020 12:03:38 -0500 Subject: [PATCH 1258/1936] Update DEVSTACK_SERIES to victoria stable/ussuri branch has been created now and current master is for victoria. Change-Id: I10bd20adf6ce43c6c73acaa646d425c6df6da199 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 17641c343d..c8c2738fef 100644 --- a/stackrc +++ b/stackrc @@ -255,7 +255,7 @@ REQUIREMENTS_DIR=${REQUIREMENTS_DIR:-$DEST/requirements} # Setting the variable to 'ALL' will activate the download for all # libraries. -DEVSTACK_SERIES="ussuri" +DEVSTACK_SERIES="victoria" ############## # From 1147300b9c572a19cf4c70ad3ba1fab7ccb965cc Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 30 Apr 2020 10:09:47 +1000 Subject: [PATCH 1259/1936] Remove bridge-utils/brctl usage This package isn't available on some distributions hasn't been required in Neutron for several years If679e79fa3242ee1cd8610b5525deca35b41c87e. Remove it. Change-Id: I7308a885c1d084efe2f0b9f542443d35966140ed --- files/debs/general | 1 - files/rpms-suse/general | 1 - files/rpms/general | 1 - lib/neutron_plugins/linuxbridge_agent | 2 +- tools/worlddump.py | 2 -- 5 files changed, 1 insertion(+), 6 deletions(-) diff --git a/files/debs/general b/files/debs/general index fe0061337a..2d8cd80473 100644 --- a/files/debs/general +++ b/files/debs/general @@ -1,7 +1,6 @@ apache2 apache2-dev bc -bridge-utils bsdmainutils curl default-jre-headless # NOPRIME diff --git a/files/rpms-suse/general b/files/rpms-suse/general index b870d72149..0af2b5b169 100644 --- a/files/rpms-suse/general +++ b/files/rpms-suse/general @@ -1,7 +1,6 @@ apache2 apache2-devel bc -bridge-utils ca-certificates-mozilla curl gcc diff --git a/files/rpms/general b/files/rpms/general index ff77ce05af..303510cbd9 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -1,5 +1,4 @@ bc -bridge-utils curl dbus gcc diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent index 1f1b0e8e52..bdeaf0f3c6 100644 --- a/lib/neutron_plugins/linuxbridge_agent +++ b/lib/neutron_plugins/linuxbridge_agent @@ -38,7 +38,7 @@ function neutron_plugin_create_nova_conf { } function neutron_plugin_install_agent_packages { - install_package bridge-utils + : } function neutron_plugin_configure_dhcp_agent { diff --git a/tools/worlddump.py b/tools/worlddump.py index b21ed0c861..0a4df52337 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -166,8 +166,6 @@ def network_dump(): _header("Network Dump") _dump_cmd("bridge link") - if _find_cmd("brctl"): - _dump_cmd("brctl show") _dump_cmd("ip link show type bridge") ip_cmds = ["neigh", "addr", "link", "route"] for cmd in ip_cmds + ['netns']: From 36705b52336a442dc05d9874638d269834ff93b7 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 9 Apr 2020 11:00:28 +0100 Subject: [PATCH 1260/1936] CentOS 8 support This adds support for CentOS 8 Change-Id: If7ddbd6655086657db03074a27a2d47053fd88e0 --- .zuul.yaml | 19 ++++++++++++ stack.sh | 85 +++++++++++++++--------------------------------------- 2 files changed, 43 insertions(+), 61 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 48c2d0d798..4a54a3a070 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -38,6 +38,16 @@ nodes: - controller +- nodeset: + name: devstack-single-node-centos-8 + nodes: + - name: controller + label: centos-8 + groups: + - name: tempest + nodes: + - controller + - nodeset: name: devstack-single-node-opensuse-15 nodes: @@ -498,6 +508,14 @@ # we often have to rush things through devstack to stabilise the gate, # and these platforms don't have the round-the-clock support to avoid # becoming blockers in that situation. +- job: + name: devstack-platform-centos-8 + parent: tempest-full-py3 + description: Centos 8 platform test + nodeset: devstack-single-node-centos-8 + voting: false + timeout: 9000 + - job: name: devstack-platform-opensuse-15 parent: tempest-full-py3 @@ -595,6 +613,7 @@ - devstack-ipv6 - devstack-platform-opensuse-15 - devstack-platform-fedora-latest + - devstack-platform-centos-8 - devstack-multinode - devstack-unit-tests - openstack-tox-bashate diff --git a/stack.sh b/stack.sh index b59af404a4..aa898de9fb 100755 --- a/stack.sh +++ b/stack.sh @@ -221,7 +221,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (bionic|stretch|jessie|f30|f31|opensuse-15.0|opensuse-15.1|opensuse-tumbleweed) ]]; then +if [[ ! ${DISTRO} =~ (bionic|stretch|jessie|f30|f31|opensuse-15.0|opensuse-15.1|opensuse-tumbleweed|rhel8) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" @@ -290,67 +290,20 @@ function _install_epel { uninstall_package epel-release || true fi - # This trick installs the latest epel-release from a bootstrap - # repo, then removes itself (as epel-release installed the - # "real" repo). - # - # You would think that rather than this, you could use - # $releasever directly in .repo file we create below. However - # RHEL gives a $releasever of "6Server" which breaks the path; - # see https://bugzilla.redhat.com/show_bug.cgi?id=1150759 - cat < Date: Thu, 30 Apr 2020 09:24:04 +1000 Subject: [PATCH 1261/1936] Cleanup yum things We no longer support platforms with Yum on master. Cleanup old references and convert to dnf. We don't need any of the failure wrapper stuff as dnf runs in strict mode by default. There seem to be a few callers out there, so we'll leave it called yum_install for now. Change-Id: Ie71a48fd85b00a97a14bf260cd013b18af4cce06 --- functions-common | 50 +++++++----------------------------------------- stack.sh | 9 +-------- stackrc | 9 --------- 3 files changed, 8 insertions(+), 60 deletions(-) diff --git a/functions-common b/functions-common index 6be07b42bb..f50ff56c7d 100644 --- a/functions-common +++ b/functions-common @@ -329,9 +329,6 @@ function _ensure_lsb_release { sudo zypper -n install lsb-release elif [[ -x $(command -v dnf 2>/dev/null) ]]; then sudo dnf install -y redhat-lsb-core - elif [[ -x $(command -v yum 2>/dev/null) ]]; then - # all rh patforms (fedora, centos, rhel) have this pkg - sudo yum install -y redhat-lsb-core else die $LINENO "Unable to find or auto-install lsb_release" fi @@ -1361,7 +1358,7 @@ function uninstall_package { if is_ubuntu; then apt_get purge "$@" elif is_fedora; then - sudo ${YUM:-yum} remove -y "$@" ||: + sudo dnf remove -y "$@" ||: elif is_suse; then sudo zypper remove -y "$@" ||: else @@ -1369,8 +1366,11 @@ function uninstall_package { fi } -# Wrapper for ``yum`` to set proxy environment variables -# Uses globals ``OFFLINE``, ``*_proxy``, ``YUM`` +# Wrapper for ``dnf`` to set proxy environment variables +# Uses globals ``OFFLINE``, ``*_proxy`` +# The name is kept for backwards compatability with external +# callers, despite none of our supported platforms using yum +# any more. # yum_install package [package ...] function yum_install { local result parse_yum_result @@ -1378,44 +1378,8 @@ function yum_install { [[ "$OFFLINE" = "True" ]] && return time_start "yum_install" - - # This is a bit tricky, because yum -y assumes missing or failed - # packages are OK (see [1]). We want devstack to stop if we are - # installing missing packages. - # - # Thus we manually match on the output (stack.sh runs in a fixed - # locale, so lang shouldn't change). - # - # If yum returns !0, we echo the result as "YUM_FAILED" and return - # that from the awk (we're subverting -e with this trick). - # Otherwise we use awk to look for failure strings and return "2" - # to indicate a terminal failure. - # - # [1] https://bugzilla.redhat.com/show_bug.cgi?id=965567 - parse_yum_result=' \ - BEGIN { result=0 } \ - /^YUM_FAILED/ { result=$2 } \ - /^No package/ { result=2 } \ - /^Failed:/ { result=2 } \ - //{ print } \ - END { exit result }' - (sudo_with_proxies "${YUM:-yum}" install -y "$@" 2>&1 || echo YUM_FAILED $?) \ - | awk "$parse_yum_result" && result=$? || result=$? - + sudo_with_proxies dnf install -y "$@" time_stop "yum_install" - - # if we return 1, then the wrapper functions will run an update - # and try installing the package again as a defense against bad - # mirrors. This can hide failures, especially when we have - # packages that are in the "Failed:" section because their rpm - # install scripts failed to run correctly (in this case, the - # package looks installed, so when the retry happens we just think - # the package is OK, and incorrectly continue on). - if [ "$result" == 2 ]; then - die "Detected fatal package install failure" - fi - - return "$result" } # zypper wrapper to set arguments correctly diff --git a/stack.sh b/stack.sh index aa898de9fb..eac8079029 100755 --- a/stack.sh +++ b/stack.sh @@ -283,19 +283,12 @@ fi # to pick up required packages. function _install_epel { - # NOTE: We always remove and install latest -- some environments - # use snapshot images, and if EPEL version updates they break - # unless we update them to latest version. - if sudo yum repolist enabled epel | grep -q 'epel'; then - uninstall_package epel-release || true - fi - # epel-release is in extras repo which is enabled by default install_package epel-release # RDO repos are not tested with epel and may have incompatibilities so # let's limit the packages fetched from epel to the ones not in RDO repos. - sudo yum-config-manager --save --setopt=includepkgs=debootstrap,dpkg epel + sudo dnf config-manager --save --setopt=includepkgs=debootstrap,dpkg epel } function _install_rdo { diff --git a/stackrc b/stackrc index cf501bb374..b189e206e1 100644 --- a/stackrc +++ b/stackrc @@ -813,15 +813,6 @@ SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT=${SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT:-5} # Service graceful shutdown timeout WORKER_TIMEOUT=${WORKER_TIMEOUT:-90} -# Choose DNF on RedHat/Fedora platforms with it, or otherwise default -# to YUM. Can remove this when only dnf is supported (i.e. centos7 -# disappears) -if [[ -e /usr/bin/dnf ]]; then - YUM=${YUM:-dnf} -else - YUM=${YUM:-yum} -fi - # Common Configuration # -------------------- From c104afec7dd72edfd909847bee9c14eaf077a28b Mon Sep 17 00:00:00 2001 From: Andreas Jaeger Date: Mon, 4 May 2020 08:21:02 +0200 Subject: [PATCH 1262/1936] Switch to new grenade job name The integrated gate template (integrated-gate-py3) has been switched to the new grenade name (grenade-py3 -> grenade). This repo uses the template but also has for irrelevant files an extra entry. Rename the job following the template change to avoid duplicate grenade runs. Details: - http://lists.openstack.org/pipermail/openstack-discuss/2020-May/014602.html Depends-On: https://review.opendev.org/725148 Change-Id: I73e67c0e01ca231678903b2460dab672f17251e3 --- .zuul.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 4a54a3a070..602975a165 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -627,7 +627,7 @@ - swift-dsvm-functional-py3: voting: false irrelevant-files: *dsvm-irrelevant-files - - grenade-py3: + - grenade: irrelevant-files: - ^.*\.rst$ - ^doc/.*$ @@ -676,7 +676,7 @@ irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - - grenade-py3: + - grenade: irrelevant-files: - ^.*\.rst$ - ^doc/.*$ From 53c2f6fe23318d16f311fde58901ad12e37049a0 Mon Sep 17 00:00:00 2001 From: Andreas Jaeger Date: Sat, 9 May 2020 13:30:10 +0200 Subject: [PATCH 1263/1936] Cleanup py27 support This repo is now testing only with Python 3, so let's make a few cleanups: - Remove obsolete section from setup.cfg - Use newer openstackdocstheme and Sphinx version for python 3 - Remove install_command from tox.ini, the default is fine - Move basepython into testenv - Update bashate version Change-Id: I3d78b3787af2efce831d223dbcab6cf84c358028 --- doc/requirements.txt | 4 ++-- setup.cfg | 3 --- tox.ini | 8 ++------ 3 files changed, 4 insertions(+), 11 deletions(-) diff --git a/doc/requirements.txt b/doc/requirements.txt index fffb83d96b..6ca2d60eaa 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -2,8 +2,8 @@ pbr>=2.0.0,!=2.1.0 Pygments docutils -sphinx>=1.6.2 -openstackdocstheme>=1.20.0 +sphinx>=2.0.0,!=2.1.0 +openstackdocstheme>=2.0.0 nwdiag blockdiag sphinxcontrib-blockdiag diff --git a/setup.cfg b/setup.cfg index 4e27ad80d8..146f010243 100644 --- a/setup.cfg +++ b/setup.cfg @@ -10,6 +10,3 @@ classifier = Intended Audience :: Developers License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux - -[wheel] -universal = 1 diff --git a/tox.ini b/tox.ini index 26baa2a1c5..ed28636d3a 100644 --- a/tox.ini +++ b/tox.ini @@ -5,15 +5,14 @@ envlist = bashate [testenv] usedevelop = False -install_command = pip install {opts} {packages} +basepython = python3 [testenv:bashate] -basepython = python3 # if you want to test out some changes you have made to bashate # against devstack, just set BASHATE_INSTALL_PATH=/path/... to your # modified bashate tree deps = - {env:BASHATE_INSTALL_PATH:bashate==0.5.1} + {env:BASHATE_INSTALL_PATH:bashate==2.0.0} whitelist_externals = bash commands = bash -c "find {toxinidir} \ -not \( -type d -name .?\* -prune \) \ @@ -35,7 +34,6 @@ commands = bash -c "find {toxinidir} \ -print0 | xargs -0 bashate -v -iE006 -eE005,E042" [testenv:docs] -basepython = python3 deps = -r{toxinidir}/doc/requirements.txt whitelist_externals = bash setenv = @@ -44,7 +42,6 @@ commands = sphinx-build -W -b html -d doc/build/doctrees doc/source doc/build/html [testenv:pdf-docs] -basepython = python3 deps = {[testenv:docs]deps} whitelist_externals = make @@ -53,6 +50,5 @@ commands = make -C doc/build/pdf [testenv:venv] -basepython = python3 deps = -r{toxinidir}/doc/requirements.txt commands = {posargs} From 551848dd696b32a48946b6f0a22079d57907a27c Mon Sep 17 00:00:00 2001 From: Kevin Zhao Date: Mon, 27 Apr 2020 08:39:37 +0800 Subject: [PATCH 1264/1936] Fix devstack default settings in aarch64 1. Lack qemu-efi in ubuntu 2. Lack edk2.git-aarch64 in Fedora/Centos 3. Remove NOVNC disable. Change-Id: Ifbd0c386df6b28bc64cef20cab8e08f99a85c782 Signed-off-by: Kevin Zhao --- lib/nova_plugins/functions-libvirt | 7 +++++++ lib/nova_plugins/hypervisor-libvirt | 2 -- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 03df258e03..d3827c30dd 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -59,6 +59,9 @@ function install_libvirt { if is_ubuntu; then install_package qemu-system libvirt-clients libvirt-daemon-system libvirt-dev + if is_arch "aarch64"; then + install_package qemu-efi + fi # uninstall in case the libvirt version changed pip_uninstall libvirt-python pip_install_gr libvirt-python @@ -78,6 +81,10 @@ function install_libvirt { install_package qemu-kvm install_package libvirt libvirt-devel + if is_arch "aarch64"; then + install_package edk2.git-aarch64 + fi + pip_uninstall libvirt-python pip_install_gr libvirt-python fi diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index b0ae29e39b..b25bc0c367 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -52,8 +52,6 @@ function configure_nova_hypervisor { # arm64-specific configuration if is_arch "aarch64"; then - # arm64 architecture currently does not support graphical consoles. - iniset $NOVA_CONF vnc enabled "false" iniset $NOVA_CONF libvirt cpu_mode "host-passthrough" fi From 782efb0f8a0b6c64e5b6a317ff5bf905e51da850 Mon Sep 17 00:00:00 2001 From: Colleen Murphy Date: Mon, 11 May 2020 18:28:32 -0700 Subject: [PATCH 1265/1936] Fix pip distro package removal for focal Ubuntu Focal doesn't have python-pip, only python3-pip. Trying to uninstall a package that apt doesn't know about (installed or uninstalled) results in a nonzero exit code so devstack fails. This patch makes the package removal safer for both python2 and python3 cases by checking first if the package exists. Change-Id: I3b1118888cb0617ffb99b72c7e9a32308033783e --- tools/install_pip.sh | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index dcd546629f..5eb538ce84 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -133,8 +133,12 @@ get_versions # results in a nonfunctional system. pip on fedora installs to /usr so pip # can safely override the system pip for all versions of fedora if ! is_fedora && ! is_suse; then - uninstall_package python-pip - uninstall_package python3-pip + if is_package_installed python-pip ; then + uninstall_package python-pip + fi + if is_package_installed python3-pip ; then + uninstall_package python3-pip + fi fi install_get_pip From 8fd45dec50ca8872bf07563dcb41cffca3683965 Mon Sep 17 00:00:00 2001 From: Luigi Toscano Date: Tue, 12 May 2020 12:00:20 +0200 Subject: [PATCH 1266/1936] New TEMPEST_VOLUME_REVERT_TO_SNAPSHOT setting Follow the pattern of the other configuration keys. The new variables allows tests to enable/disable volume revert tests provided by cinder-tempest-plugin. Revert-to-snapshot was introduced in pike, and so the tests. Change-Id: If137f201c2f646703f5a1ff96e71e48caed63b67 --- lib/tempest | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/tempest b/lib/tempest index 6e87e54801..37b22065b4 100644 --- a/lib/tempest +++ b/lib/tempest @@ -472,6 +472,11 @@ function configure_tempest { TEMPEST_EXTEND_ATTACHED_VOLUME=${TEMPEST_EXTEND_ATTACHED_VOLUME:-True} fi iniset $TEMPEST_CONFIG volume-feature-enabled extend_attached_volume $(trueorfalse False TEMPEST_EXTEND_ATTACHED_VOLUME) + # Only turn on TEMPEST_VOLUME_REVERT_TO_SNAPSHOT by default for "lvm" backends + if [[ "$CINDER_ENABLED_BACKENDS" == *"lvm"* ]]; then + TEMPEST_VOLUME_REVERT_TO_SNAPSHOT=${TEMPEST_VOLUME_REVERT_TO_SNAPSHOT:-True} + fi + iniset $TEMPEST_CONFIG volume-feature-enabled volume_revert $(trueorfalse False TEMPEST_VOLUME_REVERT_TO_SNAPSHOT) local tempest_volume_min_microversion=${TEMPEST_VOLUME_MIN_MICROVERSION:-None} local tempest_volume_max_microversion=${TEMPEST_VOLUME_MAX_MICROVERSION:-"latest"} # Reset microversions to None where v2 is running which does not support microversion. From 9cbd02d8c8a99612862ccd1c20e199797c6bcb47 Mon Sep 17 00:00:00 2001 From: Vladislav Kuzmin Date: Wed, 20 May 2020 12:14:04 +0400 Subject: [PATCH 1267/1936] Enable certificate check for glance_store+swift Disable insecure option for glance_store with swift backend when tls is enabled. Specify swift_store_cacert option. Change-Id: Ia1e8f596c95dd7b6e63cb21a94c8316dc71bf945 --- lib/glance | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/lib/glance b/lib/glance index 9398bd2daf..3f72c0b8bc 100644 --- a/lib/glance +++ b/lib/glance @@ -212,25 +212,19 @@ function configure_glance { if is_service_enabled s-proxy; then iniset $GLANCE_API_CONF glance_store default_store swift iniset $GLANCE_API_CONF glance_store swift_store_create_container_on_put True - if python3_enabled; then - iniset $GLANCE_API_CONF glance_store swift_store_auth_insecure True - fi iniset $GLANCE_API_CONF glance_store swift_store_config_file $GLANCE_SWIFT_STORE_CONF iniset $GLANCE_API_CONF glance_store default_swift_reference ref1 iniset $GLANCE_API_CONF glance_store stores "file, http, swift" + if is_service_enabled tls-proxy; then + iniset $GLANCE_API_CONF glance_store swift_store_cacert $SSL_BUNDLE_FILE + fi iniset $GLANCE_API_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" iniset $GLANCE_SWIFT_STORE_CONF ref1 user $SERVICE_PROJECT_NAME:glance-swift iniset $GLANCE_SWIFT_STORE_CONF ref1 key $SERVICE_PASSWORD - if python3_enabled; then - # NOTE(dims): Currently the glance_store+swift does not support either an insecure flag - # or ability to specify the CACERT. So fallback to http:// url - iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address ${KEYSTONE_SERVICE_URI/https/http}/v3 - else - iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $KEYSTONE_SERVICE_URI/v3 - fi + iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $KEYSTONE_SERVICE_URI/v3 iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_version 3 fi From 54edc7aeef97df768477b5fa14f8fc45266a9c2e Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Fri, 22 May 2020 13:03:15 +0100 Subject: [PATCH 1268/1936] tempest: Increase m1.nano and m1.micro RAM by 64MB to avoid tmpfs exhaustion tmpfs exhaustion has long been suspected as the root issue behind failures to load ssh keys and other metadata from local config drives as documented in bug #1808010. This can also lead to failures fetching metadata from n-metadata-api leaving Tempest unable to SSH into instances. This change increases the RAM of the m1.nano and m1.micro flavors by 64MB to hopefully avoid these errors going forward. This is also ahead of our eventual upgrade to Cirros 0.5.0 where 128MB becomes a requirement. Related-Bug: #1808010 Change-Id: I4b597579cf89939955d3c110c0bd58ca05de61f0 --- lib/tempest | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index 37b22065b4..05fcb1f1f3 100644 --- a/lib/tempest +++ b/lib/tempest @@ -203,13 +203,13 @@ function configure_tempest { if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then # Determine the flavor disk size based on the image size. disk=$(image_size_in_gib $image_uuid) - openstack flavor create --id 42 --ram 64 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.nano + openstack flavor create --id 42 --ram 128 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.nano fi flavor_ref=42 if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then # Determine the alt flavor disk size based on the alt image size. disk=$(image_size_in_gib $image_uuid_alt) - openstack flavor create --id 84 --ram 128 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.micro + openstack flavor create --id 84 --ram 192 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.micro fi flavor_ref_alt=84 else From 7f1a8c1c3edb532c1cc69fba74055ba58117b21c Mon Sep 17 00:00:00 2001 From: Andreas Jaeger Date: Tue, 19 May 2020 08:40:52 +0200 Subject: [PATCH 1269/1936] Switch to newer openstackdocstheme version Switch to openstackdocstheme 2.2.1 version. Using this version will allow especially: * Linking from HTML to PDF document * Allow parallel building of documents * Fix some rendering problems Update Sphinx version as well. Set openstackdocs_pdf_link to link to PDF file. Note that the link to the published document only works on docs.openstack.org where the PDF file is placed in the top-level html directory. The site-preview places the PDF in a pdf directory. Set openstackdocs_auto_version to not auto-version the documents. Set openstackdocs_auto_name to use 'project' as name. openstackdocstheme renames some variables, so follow the renames before the next release removes them. A couple of variables are also not needed anymore, remove them. Change pygments_style to 'native' since old theme version always used 'native' and the theme now respects the setting and using 'sphinx' can lead to some strange rendering. See also http://lists.openstack.org/pipermail/openstack-discuss/2020-May/014971.html Change-Id: I26887b175b9c1ced7347289b7d5d4f57a20ec36a --- doc/requirements.txt | 4 ++-- doc/source/conf.py | 14 ++++++++++---- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/doc/requirements.txt b/doc/requirements.txt index 6ca2d60eaa..ffce3ff74c 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -2,8 +2,8 @@ pbr>=2.0.0,!=2.1.0 Pygments docutils -sphinx>=2.0.0,!=2.1.0 -openstackdocstheme>=2.0.0 +sphinx>=2.0.0,!=2.1.0 # BSD +openstackdocstheme>=2.2.1 # Apache-2.0 nwdiag blockdiag sphinxcontrib-blockdiag diff --git a/doc/source/conf.py b/doc/source/conf.py index 56043ba6f7..2e17da17f8 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -30,9 +30,15 @@ 'sphinxcontrib.nwdiag' ] # openstackdocstheme options -repository_name = 'openstack-dev/devstack' -bug_project = 'devstack' -bug_tag = '' +openstackdocs_repo_name = 'openstack-dev/devstack' +openstackdocs_pdf_link = True +openstackdocs_bug_project = 'devstack' +openstackdocs_bug_tag = '' +openstackdocs_auto_name = False +# This repo is not tagged, so don't set versions +openstackdocs_auto_version = False +version = '' +release = '' todo_include_todos = True @@ -81,7 +87,7 @@ show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = 'native' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['DevStack-doc.'] From e726ecb537ee09187169902f19840ab432eeb5ae Mon Sep 17 00:00:00 2001 From: Riccardo Pittau Date: Thu, 14 May 2020 11:35:03 +0200 Subject: [PATCH 1270/1936] Remove sgabios.bin workaround The bug has been fixed since a while, also in recent distributions, for example Ubuntu 20.04, the sgabios.bin ROM is provided directly by qemu-system-data as an actual file under /usr/share/qemu and it conflicts with the one provided by sgabios, so removing the workaround is actually needed to prevent failures. Change-Id: Ib5f23dbd8839a0927418692054f4ed4abd76babc --- lib/nova_plugins/hypervisor-ironic | 7 ------- 1 file changed, 7 deletions(-) diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic index 113e2a75ea..b147c4327a 100644 --- a/lib/nova_plugins/hypervisor-ironic +++ b/lib/nova_plugins/hypervisor-ironic @@ -68,13 +68,6 @@ function install_nova_hypervisor { return fi install_libvirt - if [[ "$IRONIC_VM_LOG_CONSOLE" == "True" ]] && is_ubuntu; then - # Ubuntu packaging+apparmor issue prevents libvirt from loading - # the ROM from /usr/share/misc. Workaround by installing it directly - # to a directory that it can read from. (LP: #1393548) - sudo rm -rf /usr/share/qemu/sgabios.bin - sudo cp /usr/share/misc/sgabios.bin /usr/share/qemu/sgabios.bin - fi } # start_nova_hypervisor - Start any required external services From 7a0fa4fd9e5db7253fee0820fc002703d43bca3c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20J=C3=B3zefczyk?= Date: Thu, 5 Mar 2020 16:55:50 +0100 Subject: [PATCH 1271/1936] Update cirros image to cirros-0.5.1 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit New cirros with recent fixes for metadata service has been released. [1] Let's update the image version in gate. This also includes a fix to pass image name to SDK tests. [1] https://github.com/cirros-dev/cirros/pull/11/commits/e40bcd2964aa496a9d03e1aaf95cf7a86938f129 Change-Id: I6ccd646f1c22a99bed0bebf6e363d2190241b667 Co-authored-by: Radosław Piliszek --- functions-common | 5 +++++ stackrc | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/functions-common b/functions-common index f50ff56c7d..dea5aa93a8 100644 --- a/functions-common +++ b/functions-common @@ -129,6 +129,11 @@ function write_clouds_yaml { --os-password $ADMIN_PASSWORD \ --os-system-scope all + cat >> $CLOUDS_YAML < Date: Tue, 26 May 2020 16:21:45 -0700 Subject: [PATCH 1272/1936] swift: Fix s3api/keystone interaction For a long time, swift3 recommended a pipeline like ... swift3 s3token authtoken keystoneauth ... This led to inefficiencies where the proxy would first contact Keystone to validate the S3 signature and issue a token, then contact Keystone *again* to validate the token ID that was just issued. After s3token moved into the swift3 repo, it was improved to be able to put all of the headers into the WSGI environment that Swift's keystoneauth middleware expected and the recommended pipeline was changed to something like ... authtoken s3api s3token keystoneauth ... At the time, the old order would still work, it would just be less efficient. When support was added for Keystone v3, however, the new order became mandatory. All of that happened before swift3 moved back into Swift as s3api, but the pipeline placement problems are the same: Keystone users won't be able to use the S3 api with the current order. Change-Id: Id0659f109cc2fc12ddb371df0b26812ba8c442d9 Related-Change: I21e38884a2aefbb94b76c76deccd815f01db7362 Related-Change: Ic9af387b9192f285f0f486e7171eefb23968007e --- lib/swift | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/swift b/lib/swift index 5be9e3575e..b6c06c57bd 100644 --- a/lib/swift +++ b/lib/swift @@ -428,10 +428,11 @@ function configure_swift { swift_pipeline+=" s3api" fi if is_service_enabled keystone; then + swift_pipeline+=" authtoken" if is_service_enabled s3api;then swift_pipeline+=" s3token" fi - swift_pipeline+=" authtoken keystoneauth" + swift_pipeline+=" keystoneauth" fi swift_pipeline+=" tempauth " From 6681a1f12651d7ee3930051e905d305042c93363 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Tue, 26 May 2020 20:11:24 -0700 Subject: [PATCH 1273/1936] swift: Configure s3token appropriately We need an appropriate auth_uri for s3token to be able to contact Keystone. Since tempauth is always enabled, we want to delay the auth decision until after tempauth has had a chance to try. Change-Id: Ie4ff33a617b9dc74d51d037ec8ebd0d9787dd76d --- lib/swift | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/swift b/lib/swift index b6c06c57bd..3c121ca6ec 100644 --- a/lib/swift +++ b/lib/swift @@ -431,6 +431,8 @@ function configure_swift { swift_pipeline+=" authtoken" if is_service_enabled s3api;then swift_pipeline+=" s3token" + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token auth_uri ${KEYSTONE_AUTH_URI_V3} + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token delay_auth_decision true fi swift_pipeline+=" keystoneauth" fi From 65102e772e0fdbd5d3b0ac5eebb3179b461d6db0 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Wed, 27 May 2020 14:24:09 +0100 Subject: [PATCH 1274/1936] nova: Remove configuration of '[scheduler] driver' This option was deprecated in Ussuri [1] as nova no longer provided any schedulers aside from the filter scheduler and the existence of third-party schedulers was very unlikely. Stop configuring this and simply use the default. [1] https://review.opendev.org/#/c/707225/ Change-Id: Iabdd1d00e00ee269334f0fe0db265a97207e2dc6 Signed-off-by: Stephen Finucane --- lib/nova | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/lib/nova b/lib/nova index a842a61fd0..4da0462dae 100644 --- a/lib/nova +++ b/lib/nova @@ -96,10 +96,6 @@ NOVA_ENABLE_CACHE=${NOVA_ENABLE_CACHE:-True} # NOTE: Set ``FORCE_CONFIG_DRIVE="False"`` to turn OFF config drive FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"False"} -# Nova supports pluggable schedulers. The default ``FilterScheduler`` -# should work in most cases. -SCHEDULER=${SCHEDULER:-filter_scheduler} - # The following NOVA_FILTERS contains SameHostFilter and DifferentHostFilter with # the default filters. NOVA_FILTERS="AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter" @@ -398,11 +394,8 @@ function create_nova_conf { fi iniset $NOVA_CONF wsgi api_paste_config "$NOVA_API_PASTE_INI" iniset $NOVA_CONF DEFAULT rootwrap_config "$NOVA_CONF_DIR/rootwrap.conf" - iniset $NOVA_CONF scheduler driver "$SCHEDULER" iniset $NOVA_CONF filter_scheduler enabled_filters "$NOVA_FILTERS" - if [[ $SCHEDULER == "filter_scheduler" ]]; then - iniset $NOVA_CONF scheduler workers "$API_WORKERS" - fi + iniset $NOVA_CONF scheduler workers "$API_WORKERS" iniset $NOVA_CONF neutron default_floating_pool "$PUBLIC_NETWORK_NAME" if [[ $SERVICE_IP_VERSION == 6 ]]; then iniset $NOVA_CONF DEFAULT my_ip "$HOST_IPV6" From 90c6c65987341c4f21595439be0afbc314067a3c Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 29 May 2020 06:06:08 +0000 Subject: [PATCH 1275/1936] Updated from generate-devstack-plugins-list Change-Id: I77d03c088a6b5019ac69048becfff3f24573a0b3 --- doc/source/plugin-registry.rst | 2 -- 1 file changed, 2 deletions(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 42c5fc1324..05a19ac4fc 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -31,7 +31,6 @@ openstack/ceilometer `https://opendev.org/openstack/ceilomet openstack/ceilometer-powervm `https://opendev.org/openstack/ceilometer-powervm `__ openstack/cinderlib `https://opendev.org/openstack/cinderlib `__ openstack/cloudkitty `https://opendev.org/openstack/cloudkitty `__ -openstack/congress `https://opendev.org/openstack/congress `__ openstack/cyborg `https://opendev.org/openstack/cyborg `__ openstack/designate `https://opendev.org/openstack/designate `__ openstack/devstack-plugin-amqp1 `https://opendev.org/openstack/devstack-plugin-amqp1 `__ @@ -84,7 +83,6 @@ openstack/networking-l2gw `https://opendev.org/openstack/networki openstack/networking-midonet `https://opendev.org/openstack/networking-midonet `__ openstack/networking-odl `https://opendev.org/openstack/networking-odl `__ openstack/networking-onos `https://opendev.org/openstack/networking-onos `__ -openstack/networking-ovn `https://opendev.org/openstack/networking-ovn `__ openstack/networking-powervm `https://opendev.org/openstack/networking-powervm `__ openstack/networking-sfc `https://opendev.org/openstack/networking-sfc `__ openstack/neutron `https://opendev.org/openstack/neutron `__ From de41a18909a98b695642d0c4abc24d3d755cd8ff Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Fri, 29 May 2020 07:23:48 +0000 Subject: [PATCH 1276/1936] Update distros on the docs start page Centos 7 is no longer supported, replace with Centos 8. Also Debian hasn't been working for some time and progress in fixing it has stalled, so drop it for now. Change-Id: Ic1513b20f296978bca095c7aa79f022d7d9ab7ac Closes-Bug: 1881183 --- doc/source/index.rst | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/doc/source/index.rst b/doc/source/index.rst index 7923cb88c7..8b8acde38c 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -38,8 +38,7 @@ Install Linux Start with a clean and minimal install of a Linux system. DevStack attempts to support the two latest LTS releases of Ubuntu, the -latest/current Fedora version, CentOS/RHEL 7, as well as Debian and -OpenSUSE. +latest/current Fedora version, CentOS/RHEL 8 and OpenSUSE. If you do not have a preference, Ubuntu 18.04 (Bionic Beaver) is the most tested, and will probably go the smoothest. From 312517d5101206b33d3c395d27ec93d385b7fd24 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 22 Jun 2018 22:23:29 +1000 Subject: [PATCH 1277/1936] Use uwsgi binary from path All these uwsgi invocations assume that the uwsgi binary is in the same directory as their project binaries are installed into (probably /usr/bin). That may not be correct -- for example if using a packaged uwsgi on Fedora the binary will live in /usr/sbin/uwsgi (not /usr/bin where the project files from pip are). Switch invocations to just find it in the path. Change-Id: I298e3374e9c84e209ffcabbaaacda17f8df19f4f --- lib/cinder | 2 +- lib/glance | 2 +- lib/keystone | 2 +- lib/neutron | 2 +- lib/neutron-legacy | 2 +- lib/nova | 4 ++-- lib/placement | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/lib/cinder b/lib/cinder index fd960535d9..c2e55f9173 100644 --- a/lib/cinder +++ b/lib/cinder @@ -492,7 +492,7 @@ function start_cinder { start_tls_proxy cinder '*' $CINDER_SERVICE_PORT $CINDER_SERVICE_HOST $CINDER_SERVICE_PORT_INT fi else - run_process "c-api" "$CINDER_BIN_DIR/uwsgi --procname-prefix cinder-api --ini $CINDER_UWSGI_CONF" + run_process "c-api" "$(which uwsgi) --procname-prefix cinder-api --ini $CINDER_UWSGI_CONF" cinder_url=$service_protocol://$SERVICE_HOST/volume/v3 fi fi diff --git a/lib/glance b/lib/glance index 9398bd2daf..8ee422df43 100644 --- a/lib/glance +++ b/lib/glance @@ -392,7 +392,7 @@ function start_glance { run_process g-reg "$GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf" if [[ "$WSGI_MODE" == "uwsgi" ]]; then - run_process g-api "$GLANCE_BIN_DIR/uwsgi --procname-prefix glance-api --ini $GLANCE_UWSGI_CONF" + run_process g-api "$(which uwsgi) --procname-prefix glance-api --ini $GLANCE_UWSGI_CONF" else run_process g-api "$GLANCE_BIN_DIR/glance-api --config-dir=$GLANCE_CONF_DIR" fi diff --git a/lib/keystone b/lib/keystone index 366e6c7054..d2f72a37fe 100644 --- a/lib/keystone +++ b/lib/keystone @@ -523,7 +523,7 @@ function start_keystone { enable_apache_site keystone restart_apache_server else # uwsgi - run_process keystone "$KEYSTONE_BIN_DIR/uwsgi --procname-prefix keystone --ini $KEYSTONE_PUBLIC_UWSGI_CONF" "" + run_process keystone "$(which uwsgi) --procname-prefix keystone --ini $KEYSTONE_PUBLIC_UWSGI_CONF" "" fi echo "Waiting for keystone to start..." diff --git a/lib/neutron b/lib/neutron index 9e6a80cf08..885df97f7c 100644 --- a/lib/neutron +++ b/lib/neutron @@ -463,7 +463,7 @@ function start_neutron_api { done if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - run_process neutron-api "$NEUTRON_BIN_DIR/uwsgi --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF" + run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF" neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST/networking/ enable_service neutron-rpc-server run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $opts" diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 3d39d41b7e..4ddc05f2fe 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -477,7 +477,7 @@ function start_neutron_service_and_check { # Start the Neutron service if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then enable_service neutron-api - run_process neutron-api "$NEUTRON_BIN_DIR/uwsgi --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF" + run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF" neutron_url=$Q_PROTOCOL://$Q_HOST/networking/ enable_service neutron-rpc-server run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options" diff --git a/lib/nova b/lib/nova index a842a61fd0..c7ffe7f4b3 100644 --- a/lib/nova +++ b/lib/nova @@ -864,7 +864,7 @@ function start_nova_api { start_tls_proxy nova '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT fi else - run_process "n-api" "$NOVA_BIN_DIR/uwsgi --procname-prefix nova-api --ini $NOVA_UWSGI_CONF" + run_process "n-api" "$(which uwsgi) --procname-prefix nova-api --ini $NOVA_UWSGI_CONF" nova_url=$service_protocol://$SERVICE_HOST/compute/v2.1/ fi @@ -955,7 +955,7 @@ function start_nova_rest { if [ "$NOVA_USE_MOD_WSGI" == "False" ]; then run_process n-api-meta "$NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf" else - run_process n-api-meta "$NOVA_BIN_DIR/uwsgi --procname-prefix nova-api-meta --ini $NOVA_METADATA_UWSGI_CONF" + run_process n-api-meta "$(which uwsgi) --procname-prefix nova-api-meta --ini $NOVA_METADATA_UWSGI_CONF" fi export PATH=$old_path diff --git a/lib/placement b/lib/placement index 785b0ddfca..2a449bfa90 100644 --- a/lib/placement +++ b/lib/placement @@ -144,7 +144,7 @@ function install_placement { # start_placement_api() - Start the API processes ahead of other things function start_placement_api { if [[ "$WSGI_MODE" == "uwsgi" ]]; then - run_process "placement-api" "$PLACEMENT_BIN_DIR/uwsgi --procname-prefix placement --ini $PLACEMENT_UWSGI_CONF" + run_process "placement-api" "$(which uwsgi) --procname-prefix placement --ini $PLACEMENT_UWSGI_CONF" else enable_apache_site placement-api restart_apache_server From 076c9f1b4bf405994f311aaf4aba3ab179f178e3 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 3 May 2018 16:13:38 +1000 Subject: [PATCH 1278/1936] Don't install uwsgi for keystone This is already unconditionally installed via install_apache_uwsgi in stack.sh; we don't need to install it again in keystone. Since we need workarounds on some platforms (see I3bc5260e77cebe852cc8d70d9eddf84ef71d74bb) we only want to do this in one place. Change-Id: I40d84cbdf68cf6bb5cba143b6c0c126cdb8a84d4 --- lib/keystone | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/keystone b/lib/keystone index d2f72a37fe..1910f348b7 100644 --- a/lib/keystone +++ b/lib/keystone @@ -504,8 +504,6 @@ function install_keystone { if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then install_apache_wsgi - elif [ "$KEYSTONE_DEPLOY" == "uwsgi" ]; then - pip_install uwsgi fi } From 4f2a6171056f9dc3adaaf6752f559bd95a586428 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Beraud?= Date: Tue, 2 Jun 2020 20:10:56 +0200 Subject: [PATCH 1279/1936] Stop to use the __future__ module. The __future__ module [1] was used in this context to ensure compatibility between python 2 and python 3. We previously dropped the support of python 2.7 [2] and now we only support python 3 so we don't need to continue to use this module and the imports listed below. Imports commonly used and their related PEPs: - `division` is related to PEP 238 [3] - `print_function` is related to PEP 3105 [4] - `unicode_literals` is related to PEP 3112 [5] - `with_statement` is related to PEP 343 [6] - `absolute_import` is related to PEP 328 [7] [1] https://docs.python.org/3/library/__future__.html [2] https://governance.openstack.org/tc/goals/selected/ussuri/drop-py27.html [3] https://www.python.org/dev/peps/pep-0238 [4] https://www.python.org/dev/peps/pep-3105 [5] https://www.python.org/dev/peps/pep-3112 [6] https://www.python.org/dev/peps/pep-0343 [7] https://www.python.org/dev/peps/pep-0328 Change-Id: Icf8bd00b271f44b6bb0c932d6d49fe6de8a22537 --- tools/worlddump.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tools/worlddump.py b/tools/worlddump.py index 0a4df52337..6a618f5ee6 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -17,8 +17,6 @@ """Dump the state of the world for post mortem.""" -from __future__ import print_function - import argparse import datetime from distutils import spawn From c3db92b9d74704f5bf171a50103f5735e411222b Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Thu, 4 Jun 2020 23:57:18 +0900 Subject: [PATCH 1280/1936] Doc: g-reg is no longer required The glance-registry service was deprecated in Queeens release, and no longer required. Change-Id: I0d2b4604cd39d5131410b8038f92057128ca7b75 --- doc/source/guides/devstack-with-lbaas-v2.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst index 669a70d0bb..7fde6f14f8 100644 --- a/doc/source/guides/devstack-with-lbaas-v2.rst +++ b/doc/source/guides/devstack-with-lbaas-v2.rst @@ -62,7 +62,7 @@ Edit your ``/opt/stack/devstack/local.conf`` to look like ENABLED_SERVICES+=,n-api,n-crt,n-cpu,n-cond,n-sch,n-api-meta,n-sproxy ENABLED_SERVICES+=,placement-api,placement-client # Glance - ENABLED_SERVICES+=,g-api,g-reg + ENABLED_SERVICES+=,g-api # Neutron ENABLED_SERVICES+=,q-svc,q-agt,q-dhcp,q-l3,q-meta,neutron ENABLED_SERVICES+=,octavia,o-cw,o-hk,o-hm,o-api From c3b58f5335fb3545bf49b5d542ef9ed702de43c8 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Mon, 8 Jun 2020 11:58:29 +0200 Subject: [PATCH 1281/1936] Fix typos in networking document The FLOATING_RANGE variable should specify the network address of the used prefix for clarity. Change-Id: I547bd42d8bdc5f0f2001d47f2d5b43729773b1bc Closes-Bug: 1870204 --- doc/source/networking.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/networking.rst b/doc/source/networking.rst index 74010cd01a..e65c7ef195 100644 --- a/doc/source/networking.rst +++ b/doc/source/networking.rst @@ -40,7 +40,7 @@ updates. Tempest tests will work in this environment. Locally Accessible Guests ========================= -If you want to make you guests accessible from other machines on your +If you want to make your guests accessible from other machines on your network, we have to connect ``br-ex`` to a physical interface. Dedicated Guest Interface @@ -81,7 +81,7 @@ of addresses, and have them all exactly correct. [[local|localrc]] PUBLIC_INTERFACE=eth0 HOST_IP=10.42.0.52 - FLOATING_RANGE=10.42.0.52/24 + FLOATING_RANGE=10.42.0.0/24 PUBLIC_NETWORK_GATEWAY=10.42.0.1 Q_FLOATING_ALLOCATION_POOL=start=10.42.0.250,end=10.42.0.254 From 729546a1adfa10c8591e834cc85004d977e1c3dd Mon Sep 17 00:00:00 2001 From: Martin Kopec Date: Thu, 12 Mar 2020 10:03:38 +0000 Subject: [PATCH 1282/1936] Remove tempest deprecated img_dir option Tempest option scenario.img_dir has been deprecated more than 4 years, it's time to remove it from devstack. img_file option should contain the full path to the image. This patch removes setting of img_dir option and makes img_file one contain the full path of an image. Change-Id: I71102095f3603915f0bc7d21f2e18c4eac4e95ec Depends-On: https://review.opendev.org/#/c/710996/ Related-Bug: #1393881 --- lib/tempest | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index 05fcb1f1f3..0d6bdc9dbc 100644 --- a/lib/tempest +++ b/lib/tempest @@ -433,8 +433,7 @@ function configure_tempest { SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES} SCENARIO_IMAGE_FILE=$DEFAULT_IMAGE_FILE_NAME fi - iniset $TEMPEST_CONFIG scenario img_dir $SCENARIO_IMAGE_DIR - iniset $TEMPEST_CONFIG scenario img_file $SCENARIO_IMAGE_FILE + iniset $TEMPEST_CONFIG scenario img_file $SCENARIO_IMAGE_DIR/$SCENARIO_IMAGE_FILE # If using provider networking, use the physical network for validation rather than private TEMPEST_SSH_NETWORK_NAME=$PRIVATE_NETWORK_NAME From 84737ebd96327310ec5f8f7c312aeae12cbeb234 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Mon, 15 Jun 2020 09:48:46 +0200 Subject: [PATCH 1283/1936] Work around uwsgi breakage uwsgi broke installation from source with their latest release [0]. Since we want to move away from source installation anyway, make grenade based jobs non-voting for the moment so that we can backport [1] properly. [0] https://bugs.launchpad.net/bugs/1883468 [1] https://review.opendev.org/577955 Related-Bug: 1883468 Change-Id: I8e47bb7c70031a4df7f1af6b811df4c6cc784b2a --- .zuul.yaml | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 602975a165..0dc5f3e09d 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -628,10 +628,12 @@ voting: false irrelevant-files: *dsvm-irrelevant-files - grenade: + voting: false irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - neutron-grenade-multinode: + voting: false irrelevant-files: - ^.*\.rst$ - ^doc/.*$ @@ -668,18 +670,18 @@ - devstack-multinode - devstack-unit-tests - openstack-tox-bashate - - neutron-grenade-multinode: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + # - neutron-grenade-multinode: + # irrelevant-files: + # - ^.*\.rst$ + # - ^doc/.*$ - neutron-tempest-linuxbridge: irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - - grenade: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + # - grenade: + # irrelevant-files: + # - ^.*\.rst$ + # - ^doc/.*$ - openstacksdk-functional-devstack: irrelevant-files: - ^.*\.rst$ From 2d903568ed4158aa668bbda6986307a8780c71a4 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 3 May 2018 10:51:30 +1000 Subject: [PATCH 1284/1936] Use packaged uwsgi on Fedora and Ubuntu Building uwsgi from source was a workaround that was introduced a long time ago, it doesn't seem like it is needed anymore and will actually fail for Ubuntu 20.04. Also it doesn't match what will happen for most real-world installations, so let's try to get back to using distro packages. We'll still use the source install for RHEL/Centos, it remains to be tested whether we can get back to using distro versions there, too. Change-Id: I82f539bfa533349293dd5a8ce309c9cc0ffb0393 --- lib/apache | 68 +++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 47 insertions(+), 21 deletions(-) diff --git a/lib/apache b/lib/apache index 84cec73234..a31188bebb 100644 --- a/lib/apache +++ b/lib/apache @@ -82,26 +82,52 @@ function install_apache_uwsgi { apxs="apxs" fi - # Ubuntu xenial is back level on uwsgi so the proxy doesn't - # actually work. Hence we have to build from source for now. + # This varies based on packaged/installed. If we've + # pip_installed, then the pip setup will only build a "python" + # module that will be either python2 or python3 depending on what + # it was built with. # - # Centos 7 actually has the module in epel, but there was a big - # push to disable epel by default. As such, compile from source - # there as well. - - local dir - dir=$(mktemp -d) - pushd $dir - pip_install uwsgi - pip download uwsgi -c $REQUIREMENTS_DIR/upper-constraints.txt - local uwsgi - uwsgi=$(ls uwsgi*) - tar xvf $uwsgi - cd uwsgi*/apache2 - sudo $apxs -i -c mod_proxy_uwsgi.c - popd - # delete the temp directory - sudo rm -rf $dir + # For package installs, the distro ships both plugins and you need + # to select the right one ... it will not be autodetected. + if python3_enabled; then + UWSGI_PYTHON_PLUGIN=python3 + else + UWSGI_PYTHON_PLUGIN=python + fi + + if is_ubuntu; then + install_package uwsgi \ + uwsgi-plugin-python \ + uwsgi-plugin-python3 \ + libapache2-mod-proxy-uwsgi + elif [[ $os_VENDOR == "Fedora" ]]; then + # Note httpd comes with mod_proxy_uwsgi and it is loaded by + # default; the mod_proxy_uwsgi package actually conflicts now. + # See: + # https://bugzilla.redhat.com/show_bug.cgi?id=1574335 + # + # Thus there is nothing else to do after this install + install_package uwsgi \ + uwsgi-plugin-python3 + else + # Centos actually has the module in epel, but there was a big + # push to disable epel by default. As such, compile from source + # there. + local dir + dir=$(mktemp -d) + pushd $dir + pip_install uwsgi + pip download uwsgi -c $REQUIREMENTS_DIR/upper-constraints.txt + local uwsgi + uwsgi=$(ls uwsgi*) + tar xvf $uwsgi + cd uwsgi*/apache2 + sudo $apxs -i -c mod_proxy_uwsgi.c + popd + # delete the temp directory + sudo rm -rf $dir + UWSGI_PYTHON_PLUGIN=python + fi if is_ubuntu || is_suse ; then # we've got to enable proxy and proxy_uwsgi for this to work @@ -265,7 +291,7 @@ function write_uwsgi_config { # configured after graceful shutdown iniset "$file" uwsgi worker-reload-mercy $WORKER_TIMEOUT iniset "$file" uwsgi enable-threads true - iniset "$file" uwsgi plugins python + iniset "$file" uwsgi plugins http,${UWSGI_PYTHON_PLUGIN} # uwsgi recommends this to prevent thundering herd on accept. iniset "$file" uwsgi thunder-lock true # Set hook to trigger graceful shutdown on SIGTERM @@ -318,7 +344,7 @@ function write_local_uwsgi_http_config { iniset "$file" uwsgi die-on-term true iniset "$file" uwsgi exit-on-reload false iniset "$file" uwsgi enable-threads true - iniset "$file" uwsgi plugins python + iniset "$file" uwsgi plugins http,${UWSGI_PYTHON_PLUGIN} # uwsgi recommends this to prevent thundering herd on accept. iniset "$file" uwsgi thunder-lock true # Set hook to trigger graceful shutdown on SIGTERM From a267c5f477d4b037a9f466305db8c8b93e105204 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Mon, 15 Jun 2020 09:53:27 +0200 Subject: [PATCH 1285/1936] Revert "Work around uwsgi breakage" This reverts commit 84737ebd96327310ec5f8f7c312aeae12cbeb234. Change-Id: I1544c1ad9cfe3ff199153736acadba0761b51fc4 --- .zuul.yaml | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 0dc5f3e09d..602975a165 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -628,12 +628,10 @@ voting: false irrelevant-files: *dsvm-irrelevant-files - grenade: - voting: false irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - neutron-grenade-multinode: - voting: false irrelevant-files: - ^.*\.rst$ - ^doc/.*$ @@ -670,18 +668,18 @@ - devstack-multinode - devstack-unit-tests - openstack-tox-bashate - # - neutron-grenade-multinode: - # irrelevant-files: - # - ^.*\.rst$ - # - ^doc/.*$ + - neutron-grenade-multinode: + irrelevant-files: + - ^.*\.rst$ + - ^doc/.*$ - neutron-tempest-linuxbridge: irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - # - grenade: - # irrelevant-files: - # - ^.*\.rst$ - # - ^doc/.*$ + - grenade: + irrelevant-files: + - ^.*\.rst$ + - ^doc/.*$ - openstacksdk-functional-devstack: irrelevant-files: - ^.*\.rst$ From 10c3ffd26557f1921841e456b003a77f6fb11948 Mon Sep 17 00:00:00 2001 From: Andreas Jaeger Date: Mon, 15 Jun 2020 10:03:42 +0200 Subject: [PATCH 1286/1936] Handle uwsgi install for openSUSE Add packages for openSUSE distribution for installation of uwsgi. Change-Id: I2ce8959460a79b6472bd9dd59edd7d94eccbacf5 --- lib/apache | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/apache b/lib/apache index a31188bebb..41c2e3d8dd 100644 --- a/lib/apache +++ b/lib/apache @@ -109,6 +109,10 @@ function install_apache_uwsgi { # Thus there is nothing else to do after this install install_package uwsgi \ uwsgi-plugin-python3 + elif [[ $os_VENDOR =~ openSUSE ]]; then + install_package uwsgi \ + uwsgi-python3 \ + apache2-mod_uwsgi else # Centos actually has the module in epel, but there was a big # push to disable epel by default. As such, compile from source From 66c812d392a922a5b76780a515b0a065b6fa7cdf Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Tue, 16 Jun 2020 12:13:45 +0200 Subject: [PATCH 1287/1936] Remove neutron-fwaas from the jobs' required project Neutron-fwaas is going to be deprecated in master branch with [1]. [1] https://review.opendev.org/#/c/735828/ Change-Id: I513ef36e681fc3f9e5aa9f81c9aedba716366729 --- doc/source/plugin-registry.rst | 2 -- stackrc | 4 ---- 2 files changed, 6 deletions(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 05a19ac4fc..3ab4db43ea 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -87,8 +87,6 @@ openstack/networking-powervm `https://opendev.org/openstack/networki openstack/networking-sfc `https://opendev.org/openstack/networking-sfc `__ openstack/neutron `https://opendev.org/openstack/neutron `__ openstack/neutron-dynamic-routing `https://opendev.org/openstack/neutron-dynamic-routing `__ -openstack/neutron-fwaas `https://opendev.org/openstack/neutron-fwaas `__ -openstack/neutron-fwaas-dashboard `https://opendev.org/openstack/neutron-fwaas-dashboard `__ openstack/neutron-tempest-plugin `https://opendev.org/openstack/neutron-tempest-plugin `__ openstack/neutron-vpnaas `https://opendev.org/openstack/neutron-vpnaas `__ openstack/neutron-vpnaas-dashboard `https://opendev.org/openstack/neutron-vpnaas-dashboard `__ diff --git a/stackrc b/stackrc index 07c4c4b472..e323cee843 100644 --- a/stackrc +++ b/stackrc @@ -281,10 +281,6 @@ KEYSTONE_BRANCH=${KEYSTONE_BRANCH:-$TARGET_BRANCH} NEUTRON_REPO=${NEUTRON_REPO:-${GIT_BASE}/openstack/neutron.git} NEUTRON_BRANCH=${NEUTRON_BRANCH:-$TARGET_BRANCH} -# neutron fwaas service -NEUTRON_FWAAS_REPO=${NEUTRON_FWAAS_REPO:-${GIT_BASE}/openstack/neutron-fwaas.git} -NEUTRON_FWAAS_BRANCH=${NEUTRON_FWAAS_BRANCH:-$TARGET_BRANCH} - # compute service NOVA_REPO=${NOVA_REPO:-${GIT_BASE}/openstack/nova.git} NOVA_BRANCH=${NOVA_BRANCH:-$TARGET_BRANCH} From 0ae5787611897a81d894d0f53b1ed63c452fe5ee Mon Sep 17 00:00:00 2001 From: Abhishek Kekane Date: Mon, 17 Feb 2020 06:11:15 +0000 Subject: [PATCH 1288/1936] Remove glance registry configuration Glance has deprecated registry serivce for long and now efforts are placed to remove the registry code from the glance repo. To avoid regression on other projects, gate jobs etc. removing configuring registry service from the devstack. Change-Id: I6a7be6bdc97acc43c8e985060aeea05d92642e80 --- lib/glance | 40 +--------------------------------------- 1 file changed, 1 insertion(+), 39 deletions(-) diff --git a/lib/glance b/lib/glance index 8ee422df43..fee2cfd80f 100644 --- a/lib/glance +++ b/lib/glance @@ -67,9 +67,7 @@ GLANCE_TASKS_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/os_glance_tasks_s GLANCE_CONF_DIR=${GLANCE_CONF_DIR:-/etc/glance} GLANCE_METADEF_DIR=$GLANCE_CONF_DIR/metadefs -GLANCE_REGISTRY_CONF=$GLANCE_CONF_DIR/glance-registry.conf GLANCE_API_CONF=$GLANCE_CONF_DIR/glance-api.conf -GLANCE_REGISTRY_PASTE_INI=$GLANCE_CONF_DIR/glance-registry-paste.ini GLANCE_API_PASTE_INI=$GLANCE_CONF_DIR/glance-api-paste.ini GLANCE_CACHE_CONF=$GLANCE_CONF_DIR/glance-cache.conf GLANCE_SCHEMA_JSON=$GLANCE_CONF_DIR/schema-image.json @@ -88,8 +86,6 @@ GLANCE_SERVICE_PORT=${GLANCE_SERVICE_PORT:-9292} GLANCE_SERVICE_PORT_INT=${GLANCE_SERVICE_PORT_INT:-19292} GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT} GLANCE_SERVICE_PROTOCOL=${GLANCE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} -GLANCE_REGISTRY_PORT=${GLANCE_REGISTRY_PORT:-9191} -GLANCE_REGISTRY_PORT_INT=${GLANCE_REGISTRY_PORT_INT:-19191} GLANCE_UWSGI=$GLANCE_BIN_DIR/glance-wsgi-api GLANCE_UWSGI_CONF=$GLANCE_CONF_DIR/glance-uwsgi.ini # If wsgi mode is uwsgi run glance under uwsgi, else default to eventlet @@ -135,31 +131,10 @@ function cleanup_glance { function configure_glance { sudo install -d -o $STACK_USER $GLANCE_CONF_DIR $GLANCE_METADEF_DIR - # Set non-default configuration options for registry - iniset $GLANCE_REGISTRY_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $GLANCE_REGISTRY_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS - iniset $GLANCE_REGISTRY_CONF DEFAULT workers $API_WORKERS + # Set non-default configuration options for the API server local dburl dburl=`database_connection_url glance` - iniset $GLANCE_REGISTRY_CONF database connection $dburl - iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG - iniset $GLANCE_REGISTRY_CONF paste_deploy flavor keystone - configure_keystone_authtoken_middleware $GLANCE_REGISTRY_CONF glance - iniset $GLANCE_REGISTRY_CONF oslo_messaging_notifications driver messagingv2 - iniset_rpc_backend glance $GLANCE_REGISTRY_CONF - iniset $GLANCE_REGISTRY_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" - - # Configure multiple stores - if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "True" ]]; then - local store enabled_backends - enabled_backends="" - for store in $(echo $GLANCE_MULTIPLE_FILE_STORES | tr "," "\n"); do - enabled_backends+="${store}:file," - done - iniset $GLANCE_API_CONF DEFAULT enabled_backends ${enabled_backends::-1} - fi - # Set non-default configuration options for the API server iniset $GLANCE_API_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $GLANCE_API_CONF database connection $dburl iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG @@ -198,7 +173,6 @@ function configure_glance { # Store specific configs iniset $GLANCE_API_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/ fi - iniset $GLANCE_API_CONF DEFAULT registry_host $(ipv6_unquote $GLANCE_SERVICE_HOST) # CORS feature support - to allow calls from Horizon by default if [ -n "$GLANCE_CORS_ALLOWED_ORIGIN" ]; then @@ -240,21 +214,13 @@ function configure_glance { if is_service_enabled tls-proxy; then iniset $GLANCE_API_CONF DEFAULT bind_port $GLANCE_SERVICE_PORT_INT - iniset $GLANCE_REGISTRY_CONF DEFAULT bind_port $GLANCE_REGISTRY_PORT_INT iniset $GLANCE_API_CONF keystone_authtoken identity_uri $KEYSTONE_AUTH_URI - iniset $GLANCE_REGISTRY_CONF keystone_authtoken identity_uri $KEYSTONE_AUTH_URI - fi - - if is_service_enabled tls-proxy; then - iniset $GLANCE_API_CONF DEFAULT registry_client_protocol https fi # Format logging setup_logging $GLANCE_API_CONF - setup_logging $GLANCE_REGISTRY_CONF - cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI cp -p $GLANCE_DIR/etc/glance-api-paste.ini $GLANCE_API_PASTE_INI # Set non-default configuration options for the glance-cache @@ -265,7 +231,6 @@ function configure_glance { iniset $GLANCE_CACHE_CONF DEFAULT admin_tenant_name $SERVICE_PROJECT_NAME iniset $GLANCE_CACHE_CONF DEFAULT admin_user glance iniset $GLANCE_CACHE_CONF DEFAULT admin_password $SERVICE_PASSWORD - iniset $GLANCE_CACHE_CONF DEFAULT registry_host $(ipv6_unquote $GLANCE_SERVICE_HOST) # Store specific confs iniset $GLANCE_CACHE_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/ @@ -387,10 +352,8 @@ function start_glance { if [[ "$WSGI_MODE" != "uwsgi" ]]; then start_tls_proxy glance-service '*' $GLANCE_SERVICE_PORT $GLANCE_SERVICE_HOST $GLANCE_SERVICE_PORT_INT fi - start_tls_proxy glance-registry '*' $GLANCE_REGISTRY_PORT $GLANCE_SERVICE_HOST $GLANCE_REGISTRY_PORT_INT fi - run_process g-reg "$GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf" if [[ "$WSGI_MODE" == "uwsgi" ]]; then run_process g-api "$(which uwsgi) --procname-prefix glance-api --ini $GLANCE_UWSGI_CONF" else @@ -406,7 +369,6 @@ function start_glance { # stop_glance() - Stop running processes function stop_glance { stop_process g-api - stop_process g-reg } # Restore xtrace From 744f50b970bf10f06e9d52de0b57d74ddcb18467 Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Tue, 16 Jun 2020 22:28:28 +0200 Subject: [PATCH 1289/1936] Add neutron-ovn-tempest-ovs-release job to the check queue During the Victoria cycle we plan to switch default Neutron backend in Devstack from OVS to OVN. As first step before we will start discussion about this change with whole community, we want to add tempest ovn based neutron job to the devstack check queue so that we can keep verifying that with devstack changes. Change-Id: I8484baa7398b28ed5ef62f86b55022c7d8703f56 --- .zuul.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index 8c18e7155d..e30bf22ff5 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -632,6 +632,11 @@ irrelevant-files: - ^.*\.rst$ - ^doc/.*$ + - neutron-ovn-tempest-ovs-release: + voting: false + irrelevant-files: + - ^.*\.rst$ + - ^doc/.*$ - tempest-multinode-full-py3: voting: false irrelevant-files: From c2c2b6b415a4289ab740e7506f68e820ecbc5818 Mon Sep 17 00:00:00 2001 From: Federico Ressi Date: Mon, 15 Jun 2020 12:48:38 +0200 Subject: [PATCH 1290/1936] Handle uwsgi install for CentOS/RHEL Change-Id: Ifa876b3e5f89258f40055fa7ce03f5e9c601771c --- lib/apache | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/lib/apache b/lib/apache index 41c2e3d8dd..a3e9f95d4e 100644 --- a/lib/apache +++ b/lib/apache @@ -100,7 +100,7 @@ function install_apache_uwsgi { uwsgi-plugin-python \ uwsgi-plugin-python3 \ libapache2-mod-proxy-uwsgi - elif [[ $os_VENDOR == "Fedora" ]]; then + elif is_fedora; then # Note httpd comes with mod_proxy_uwsgi and it is loaded by # default; the mod_proxy_uwsgi package actually conflicts now. # See: @@ -114,9 +114,7 @@ function install_apache_uwsgi { uwsgi-python3 \ apache2-mod_uwsgi else - # Centos actually has the module in epel, but there was a big - # push to disable epel by default. As such, compile from source - # there. + # Compile uwsgi from source. local dir dir=$(mktemp -d) pushd $dir From 264d2a2e5fecd7ae94a4a71087ca93ea9cc25109 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Fri, 19 Jun 2020 10:31:38 -0500 Subject: [PATCH 1291/1936] Move process-stackviz role from Tempest to Devstack process-stackviz role currently defined in Tempest repo[1] and used in tempest jobs. Issue with having it in branchless Tempest is that any fix in that role cannot be backported to older Tempest. Also stackviz is not Tempest things it is only used by Tempest jobs. Stackviz can be considered as a service similar to Tempest so inatlling it in Devstack side make sense. Also that will give us advantage to handle the branch specific fixes or backpoting the fixes. This can solve the current issues we are facing on rocky branch - https://zuul.opendev.org/t/openstack/build/c1e2da80537448bfb24331fb4198a931/console#4/0/29/controller [1] https://opendev.org/openstack/tempest/src/branch/master/roles/process-stackviz Change-Id: I0ba1fd05c6391cd8bb978119ecfbb891def2d497 --- roles/process-stackviz/README.rst | 22 +++++++ roles/process-stackviz/defaults/main.yaml | 3 + roles/process-stackviz/tasks/main.yaml | 70 +++++++++++++++++++++++ 3 files changed, 95 insertions(+) create mode 100644 roles/process-stackviz/README.rst create mode 100644 roles/process-stackviz/defaults/main.yaml create mode 100644 roles/process-stackviz/tasks/main.yaml diff --git a/roles/process-stackviz/README.rst b/roles/process-stackviz/README.rst new file mode 100644 index 0000000000..a8447d2355 --- /dev/null +++ b/roles/process-stackviz/README.rst @@ -0,0 +1,22 @@ +Generate stackviz report. + +Generate stackviz report using subunit and dstat data, using +the stackviz archive embedded in test images. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. + +.. zuul:rolevar:: stage_dir + :default: "{{ ansible_user_dir }}" + + The stage directory where the input data can be found and + the output will be produced. + +.. zuul:rolevar:: zuul_work_dir + :default: {{ devstack_base_dir }}/tempest + + Directory to work in. It has to be a fully qualified path. diff --git a/roles/process-stackviz/defaults/main.yaml b/roles/process-stackviz/defaults/main.yaml new file mode 100644 index 0000000000..f3bc32b149 --- /dev/null +++ b/roles/process-stackviz/defaults/main.yaml @@ -0,0 +1,3 @@ +devstack_base_dir: /opt/stack +stage_dir: "{{ ansible_user_dir }}" +zuul_work_dir: "{{ devstack_base_dir }}/tempest" diff --git a/roles/process-stackviz/tasks/main.yaml b/roles/process-stackviz/tasks/main.yaml new file mode 100644 index 0000000000..c51c66cdb3 --- /dev/null +++ b/roles/process-stackviz/tasks/main.yaml @@ -0,0 +1,70 @@ +- name: Devstack checks if stackviz archive exists + stat: + path: "/opt/cache/files/stackviz-latest.tar.gz" + register: stackviz_archive + +- debug: + msg: "Stackviz archive could not be found in /opt/cache/files/stackviz-latest.tar.gz" + when: not stackviz_archive.stat.exists + +- name: Check if subunit data exists + stat: + path: "{{ zuul_work_dir }}/testrepository.subunit" + register: subunit_input + +- debug: + msg: "Subunit file could not be found at {{ zuul_work_dir }}/testrepository.subunit" + when: not subunit_input.stat.exists + +- name: Install stackviz + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists + block: + - include_role: + name: ensure-pip + + - pip: + name: "file://{{ stackviz_archive.stat.path }}" + virtualenv: /tmp/stackviz + virtualenv_command: '{{ ensure_pip_virtualenv_command }}' + extra_args: -U + +- name: Deploy stackviz static html+js + command: cp -pR /tmp/stackviz/share/stackviz-html {{ stage_dir }}/stackviz + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists + +- name: Check if dstat data exists + stat: + path: "{{ devstack_base_dir }}/logs/dstat-csv.log" + register: dstat_input + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists + +- name: Run stackviz with dstat + shell: | + cat {{ subunit_input.stat.path }} | \ + /tmp/stackviz/bin/stackviz-export \ + --dstat "{{ devstack_base_dir }}/logs/dstat-csv.log" \ + --env --stdin \ + {{ stage_dir }}/stackviz/data + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists + - dstat_input.stat.exists + failed_when: False + +- name: Run stackviz without dstat + shell: | + cat {{ subunit_input.stat.path }} | \ + /tmp/stackviz/bin/stackviz-export \ + --env --stdin \ + {{ stage_dir }}/stackviz/data + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists + - not dstat_input.stat.exists + failed_when: False From f1ed7c77c50ac28cb58c9f7ed885c6a3e0a75403 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Thu, 11 Jun 2020 05:51:26 +0000 Subject: [PATCH 1292/1936] Use python3 pip module instead of pip binary Focal only provides a pip3 binary, no pip3.8. Instead of working around that with a symlink, use the module instead. Add version information output for this variant. Change-Id: I7af194ecc40e4d43c10ce067a661bb6ab4ca37d4 --- files/debs/general | 1 + files/rpms/general | 1 + inc/python | 4 ++-- tools/install_pip.sh | 17 +++++++++++------ 4 files changed, 15 insertions(+), 8 deletions(-) diff --git a/files/debs/general b/files/debs/general index 2d8cd80473..4bf1ff4039 100644 --- a/files/debs/general +++ b/files/debs/general @@ -27,6 +27,7 @@ openssl pkg-config psmisc python3-dev +python3-pip python3-venv tar tcpdump diff --git a/files/rpms/general b/files/rpms/general index 303510cbd9..c42ce529e7 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -24,6 +24,7 @@ pkgconfig postgresql-devel # psycopg2 psmisc python3-devel +python3-pip redhat-rpm-config # missing dep for gcc hardening flags, see rhbz#1217376 systemd-devel # for systemd-python tar diff --git a/inc/python b/inc/python index dd77296049..08f9959d5c 100644 --- a/inc/python +++ b/inc/python @@ -174,7 +174,7 @@ function pip_install { if python3_enabled; then echo "Using python $PYTHON3_VERSION to install $package_dir because python3_enabled=True" sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8" - cmd_pip=$(get_pip_command $PYTHON3_VERSION) + cmd_pip="python$PYTHON3_VERSION -m pip" else echo "Using python $PYTHON2_VERSION to install $package_dir because python3_enabled=False" cmd_pip=$(get_pip_command $PYTHON2_VERSION) @@ -217,7 +217,7 @@ function pip_uninstall { local sudo_pip="sudo -H" if python3_enabled; then sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8" - cmd_pip=$(get_pip_command $PYTHON3_VERSION) + cmd_pip="python$PYTHON3_VERSION -m pip" else cmd_pip=$(get_pip_command $PYTHON2_VERSION) fi diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 5eb538ce84..517669e282 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -5,7 +5,7 @@ # Update pip and friends to a known common version # Assumptions: -# - if USE_PYTHON3=True, PYTHON3_VERSION refers to a version already installed +# - PYTHON3_VERSION refers to a version already installed set -o errexit @@ -53,6 +53,8 @@ function get_versions { else echo "pip: Not Installed" fi + # Show python3 module version + python${PYTHON3_VERSION} -m pip --version } @@ -125,7 +127,14 @@ function configure_pypi_alternative_url { # Show starting versions get_versions -# Do pip +if [[ -n $PYPI_ALTERNATIVE_URL ]]; then + configure_pypi_alternative_url +fi + +# Just use system pkgs on Focal +if [[ "$DISTRO" == focal ]]; then + exit 0 +fi # Eradicate any and all system packages @@ -143,10 +152,6 @@ fi install_get_pip -if [[ -n $PYPI_ALTERNATIVE_URL ]]; then - configure_pypi_alternative_url -fi - set -x # Note setuptools is part of requirements.txt and we want to make sure From 3480093b937b45e05f12d2af9df26d076f05067e Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Thu, 13 Feb 2020 09:38:35 +0000 Subject: [PATCH 1293/1936] Make devstack run on focal (Ubuntu LTS 20.04) - Add a nodeset and a platform job - Drop uwsgi-py2 pkg that no longer exists - Blacklist tests that are currently failing Change-Id: Ib4416dc2f5e003fd770f5240a8f78213c56af8e6 --- .zuul.yaml | 19 +++++++++++++++++++ lib/apache | 9 +++++---- stack.sh | 2 +- 3 files changed, 25 insertions(+), 5 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index f3610afba0..5cb99ab6b6 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -8,6 +8,16 @@ nodes: - controller +- nodeset: + name: openstack-single-node-focal + nodes: + - name: controller + label: ubuntu-focal + groups: + - name: tempest + nodes: + - controller + - nodeset: name: openstack-single-node-bionic nodes: @@ -512,6 +522,14 @@ parent: tempest-full-py3 description: openSUSE 15.x platform test nodeset: devstack-single-node-opensuse-15 + +- job: + name: devstack-platform-focal + parent: tempest-full-py3 + description: Ubuntu Focal Fossa platform test + nodeset: openstack-single-node-focal + vars: + tempest_black_regex: "(tempest.api.compute.volumes.test_attach_volume.AttachVolumeMultiAttachTest.test_resize_server_with_multiattached_volume|tempest.api.compute.servers.test_server_rescue_negative.ServerRescueNegativeTestJSON|tempest.api.compute.servers.test_server_rescue.ServerStableDeviceRescueTest.test_stable_device_rescue_disk_virtio_with_volume_attached)" voting: false - job: @@ -605,6 +623,7 @@ - devstack-platform-opensuse-15 - devstack-platform-fedora-latest - devstack-platform-centos-8 + - devstack-platform-focal - devstack-multinode - devstack-unit-tests - openstack-tox-bashate diff --git a/lib/apache b/lib/apache index a3e9f95d4e..a5fbf75374 100644 --- a/lib/apache +++ b/lib/apache @@ -96,10 +96,11 @@ function install_apache_uwsgi { fi if is_ubuntu; then - install_package uwsgi \ - uwsgi-plugin-python \ - uwsgi-plugin-python3 \ - libapache2-mod-proxy-uwsgi + local pkg_list="uwsgi uwsgi-plugin-python3 libapache2-mod-proxy-uwsgi" + if "$DISTRO" == 'bionic'; then + pkg_list="${pkg_list} uwsgi-plugin-python" + fi + install_package ${pkg_list} elif is_fedora; then # Note httpd comes with mod_proxy_uwsgi and it is loaded by # default; the mod_proxy_uwsgi package actually conflicts now. diff --git a/stack.sh b/stack.sh index 80c6d4dd39..fa27743bb3 100755 --- a/stack.sh +++ b/stack.sh @@ -221,7 +221,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (bionic|stretch|jessie|f30|f31|opensuse-15.0|opensuse-15.1|opensuse-tumbleweed|rhel8) ]]; then +if [[ ! ${DISTRO} =~ (bionic|focal|stretch|jessie|f30|f31|opensuse-15.0|opensuse-15.1|opensuse-tumbleweed|rhel8) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From cd57449c9f5d8f0ecac59da359108fdba80a31f7 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Tue, 23 Jun 2020 08:08:13 +0200 Subject: [PATCH 1294/1936] Stop claiming to support Debian distros We haven't had a working job for stretch or jessie in years, attempts to get things fixed have been dropped, set let's be honest and drop those from our support list. Change-Id: Ia6152be79f8044f7ff039ec0911ad4938d6271f4 --- files/debs/nova | 1 - stack.sh | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/files/debs/nova b/files/debs/nova index dce8f6ac89..a7aebbf946 100644 --- a/files/debs/nova +++ b/files/debs/nova @@ -17,7 +17,6 @@ mysql-server # NOPRIME parted pm-utils python3-mysqldb -qemu # dist:wheezy,jessie NOPRIME qemu-kvm # NOPRIME rabbitmq-server # NOPRIME socat # used by ajaxterm diff --git a/stack.sh b/stack.sh index fa27743bb3..709b97b0ef 100755 --- a/stack.sh +++ b/stack.sh @@ -221,7 +221,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (bionic|focal|stretch|jessie|f30|f31|opensuse-15.0|opensuse-15.1|opensuse-tumbleweed|rhel8) ]]; then +if [[ ! ${DISTRO} =~ (bionic|focal|f30|f31|opensuse-15.0|opensuse-15.1|opensuse-tumbleweed|rhel8) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From 8b099c408b995ed65323a8cb58437b25cf374d5e Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Tue, 23 Jun 2020 10:30:20 -0500 Subject: [PATCH 1295/1936] Remove dragonflow reference Dragonflow was retired in 2018 and is now being retired. This removes a documentation reference to it. Change-Id: I24ab79482306a7c816b5242a981f1b508ff8f6ec Signed-off-by: Sean McGinnis --- doc/source/plugin-registry.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 3ab4db43ea..eda5773a25 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -41,7 +41,6 @@ openstack/devstack-plugin-nfs `https://opendev.org/openstack/devstack openstack/devstack-plugin-open-cas `https://opendev.org/openstack/devstack-plugin-open-cas `__ openstack/devstack-plugin-pika `https://opendev.org/openstack/devstack-plugin-pika `__ openstack/devstack-plugin-zmq `https://opendev.org/openstack/devstack-plugin-zmq `__ -openstack/dragonflow `https://opendev.org/openstack/dragonflow `__ openstack/ec2-api `https://opendev.org/openstack/ec2-api `__ openstack/freezer `https://opendev.org/openstack/freezer `__ openstack/freezer-api `https://opendev.org/openstack/freezer-api `__ From ca486c5259277db72c73c661ef3e9620a3b65d49 Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Thu, 25 Jun 2020 18:22:28 +0000 Subject: [PATCH 1296/1936] Provide integer number to arping "-w" parameter Some arping versions only accept an integer number for the "deadline" (-w) parameter. Change-Id: Ie21c9b5820262d049c0fcd8147d85cc110d88272 Closes-Bug: #1885169 --- lib/neutron-legacy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index bb1536aa9c..275dbf17e5 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -622,7 +622,7 @@ function _move_neutron_addresses_route { IP_UP="sudo ip link set $to_intf up" if [[ "$af" == "inet" ]]; then IP=$(echo $IP_BRD | awk '{ print $1; exit }' | grep -o -E '(.*)/' | cut -d "/" -f1) - ARP_CMD="sudo arping -A -c 3 -w 4.5 -I $to_intf $IP " + ARP_CMD="sudo arping -A -c 3 -w 5 -I $to_intf $IP " fi fi From 02da0c40f9511d0f52964e35145819bf9fc1eeff Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Thu, 25 Jun 2020 20:03:22 -0500 Subject: [PATCH 1297/1936] Add focal nodeset This commit adds the focal nodeset for multinode so that those can be used for projects side jobs or testing. We need to define these as first step to avoid any conflict on nodeset name if project started defining these. Example: three node focal nodeset is already defined in x/tobiko, fixing the same in depends on. -I30a6bb63269f031a74f9bff6c765d59d91088797 Depends-On: https://review.opendev.org/#/c/738128/ Change-Id: I5ce49f7a7d52d00555c14b08864bc8975956b20c Story: #2007865 Task: #40212 --- .zuul.yaml | 65 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index 5cb99ab6b6..f78f3f54c7 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -108,6 +108,36 @@ nodes: - compute1 +- nodeset: + name: openstack-two-node-focal + nodes: + - name: controller + label: ubuntu-focal + - name: compute1 + label: ubuntu-focal + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + - nodeset: name: openstack-two-node-bionic nodes: @@ -168,6 +198,41 @@ nodes: - compute1 +- nodeset: + name: openstack-three-node-focal + nodes: + - name: controller + label: ubuntu-focal + - name: compute1 + label: ubuntu-focal + - name: compute2 + label: ubuntu-focal + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + - compute2 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + - compute2 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + - compute2 + - nodeset: name: openstack-three-node-bionic nodes: From 32c00890ed5f296ccb829196accfb437dbed8f6f Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Wed, 10 Apr 2019 10:33:39 +0000 Subject: [PATCH 1298/1936] Prepare for dropping keystone admin endpoint Keystone no longer has any special functionality hidden behind the admin endpoint. Stop referencing it in consumers, so it can later be dropped completely. Change-Id: I04a5d77908005268cc7c59e7e9ddeea70f6732e2 --- functions-common | 2 +- lib/glance | 5 ++--- lib/keystone | 4 ++-- lib/neutron-legacy | 2 +- lib/nova_plugins/hypervisor-ironic | 2 +- lib/swift | 2 +- openrc | 4 ++-- stack.sh | 2 +- 8 files changed, 11 insertions(+), 12 deletions(-) diff --git a/functions-common b/functions-common index dea5aa93a8..6595c3de53 100644 --- a/functions-common +++ b/functions-common @@ -47,7 +47,7 @@ KILL_PATH="$(which kill)" # Save these variables to .stackenv STACK_ENV_VARS="BASE_SQL_CONN DATA_DIR DEST ENABLED_SERVICES HOST_IP \ - KEYSTONE_AUTH_URI KEYSTONE_SERVICE_URI \ + KEYSTONE_SERVICE_URI \ LOGFILE OS_CACERT SERVICE_HOST STACK_USER TLS_IP \ HOST_IPV6 SERVICE_IP_VERSION" diff --git a/lib/glance b/lib/glance index 4fa1b6a4e3..6d252e317c 100644 --- a/lib/glance +++ b/lib/glance @@ -208,8 +208,7 @@ function configure_glance { if is_service_enabled tls-proxy; then iniset $GLANCE_API_CONF DEFAULT bind_port $GLANCE_SERVICE_PORT_INT - - iniset $GLANCE_API_CONF keystone_authtoken identity_uri $KEYSTONE_AUTH_URI + iniset $GLANCE_API_CONF keystone_authtoken identity_uri $KEYSTONE_SERVICE_URI fi # Format logging @@ -221,7 +220,7 @@ function configure_glance { iniset $GLANCE_CACHE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $GLANCE_CACHE_CONF DEFAULT use_syslog $SYSLOG iniset $GLANCE_CACHE_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ - iniset $GLANCE_CACHE_CONF DEFAULT auth_url $KEYSTONE_AUTH_URI + iniset $GLANCE_CACHE_CONF DEFAULT auth_url $KEYSTONE_SERVICE_URI iniset $GLANCE_CACHE_CONF DEFAULT admin_tenant_name $SERVICE_PROJECT_NAME iniset $GLANCE_CACHE_CONF DEFAULT admin_user glance iniset $GLANCE_CACHE_CONF DEFAULT admin_password $SERVICE_PASSWORD diff --git a/lib/keystone b/lib/keystone index 1910f348b7..d4c7b063bb 100644 --- a/lib/keystone +++ b/lib/keystone @@ -115,7 +115,7 @@ KEYSTONE_SERVICE_URI=${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}/ide KEYSTONE_AUTH_URI=$KEYSTONE_SERVICE_URI # V3 URIs -KEYSTONE_AUTH_URI_V3=$KEYSTONE_AUTH_URI/v3 +KEYSTONE_AUTH_URI_V3=$KEYSTONE_SERVICE_URI/v3 KEYSTONE_SERVICE_URI_V3=$KEYSTONE_SERVICE_URI/v3 # Security compliance @@ -413,6 +413,7 @@ function configure_keystone_authtoken_middleware { local section=${3:-keystone_authtoken} iniset $conf_file $section auth_type password + iniset $conf_file $section interface public iniset $conf_file $section auth_url $KEYSTONE_SERVICE_URI iniset $conf_file $section username $admin_user iniset $conf_file $section password $SERVICE_PASSWORD @@ -561,7 +562,6 @@ function stop_keystone { # - ``KEYSTONE_BIN_DIR`` # - ``ADMIN_PASSWORD`` # - ``IDENTITY_API_VERSION`` -# - ``KEYSTONE_AUTH_URI`` # - ``REGION_NAME`` # - ``KEYSTONE_SERVICE_PROTOCOL`` # - ``KEYSTONE_SERVICE_HOST`` diff --git a/lib/neutron-legacy b/lib/neutron-legacy index bb1536aa9c..5e4251f9f6 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -372,7 +372,7 @@ function configure_mutnauq { function create_nova_conf_neutron { local conf=${1:-$NOVA_CONF} iniset $conf neutron auth_type "password" - iniset $conf neutron auth_url "$KEYSTONE_AUTH_URI" + iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI" iniset $conf neutron username "$Q_ADMIN_USERNAME" iniset $conf neutron password "$SERVICE_PASSWORD" iniset $conf neutron user_domain_name "$SERVICE_DOMAIN_NAME" diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic index b147c4327a..bda6ef6998 100644 --- a/lib/nova_plugins/hypervisor-ironic +++ b/lib/nova_plugins/hypervisor-ironic @@ -46,7 +46,7 @@ function configure_nova_hypervisor { iniset $NOVA_CONF ironic auth_type password iniset $NOVA_CONF ironic username admin iniset $NOVA_CONF ironic password $ADMIN_PASSWORD - iniset $NOVA_CONF ironic auth_url $KEYSTONE_AUTH_URI + iniset $NOVA_CONF ironic auth_url $KEYSTONE_SERVICE_URI iniset $NOVA_CONF ironic project_domain_id default iniset $NOVA_CONF ironic user_domain_id default iniset $NOVA_CONF ironic project_name demo diff --git a/lib/swift b/lib/swift index 3c121ca6ec..a981dfc10a 100644 --- a/lib/swift +++ b/lib/swift @@ -527,7 +527,7 @@ function configure_swift { else iniset ${testfile} func_test auth_port 80 fi - iniset ${testfile} func_test auth_uri ${KEYSTONE_AUTH_URI} + iniset ${testfile} func_test auth_uri ${KEYSTONE_SERVICE_URI} if [[ "$auth_vers" == "3" ]]; then iniset ${testfile} func_test auth_prefix /identity/v3/ else diff --git a/openrc b/openrc index 99d3351d53..beeaebea42 100644 --- a/openrc +++ b/openrc @@ -87,9 +87,9 @@ export OS_AUTH_TYPE=password # If you don't have a working .stackenv, this is the backup position KEYSTONE_BACKUP=$SERVICE_PROTOCOL://$SERVICE_HOST:5000 -KEYSTONE_AUTH_URI=${KEYSTONE_AUTH_URI:-$KEYSTONE_BACKUP} +KEYSTONE_SERVICE_URI=${KEYSTONE_SERVICE_URI:-$KEYSTONE_BACKUP} -export OS_AUTH_URL=${OS_AUTH_URL:-$KEYSTONE_AUTH_URI} +export OS_AUTH_URL=${OS_AUTH_URL:-$KEYSTONE_SERVICE_URI} # Currently, in order to use openstackclient with Identity API v3, # we need to set the domain which the user and project belong to. diff --git a/stack.sh b/stack.sh index 709b97b0ef..37e75185f1 100755 --- a/stack.sh +++ b/stack.sh @@ -1053,7 +1053,7 @@ cat > $TOP_DIR/userrc_early < Date: Sat, 13 Jun 2020 11:40:09 +0200 Subject: [PATCH 1299/1936] Drop keystone dedicated ports Those historic references to port 5000 and 35357 aren't being used anymore for some time, so let us drop them. Clean up some python2/3 wording along the way. No longer mention Identity API v2, which is also a thing of the past. Change-Id: Iafff097eee082f24ea2ae27ad038ad115aa36c61 --- doc/source/configuration.rst | 22 ++++--------- files/apache-keystone.template | 30 ----------------- lib/keystone | 59 +++------------------------------- openrc | 2 +- tools/create_userrc.sh | 2 +- tools/fixup_stuff.sh | 34 -------------------- 6 files changed, 12 insertions(+), 137 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 45f4ffe6e9..67c3b8a7d1 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -137,7 +137,7 @@ OS\_AUTH\_URL :: - OS_AUTH_URL=http://$SERVICE_HOST:5000/v3.0 + OS_AUTH_URL=http://$SERVICE_HOST/identity/v3.0 KEYSTONECLIENT\_DEBUG, NOVACLIENT\_DEBUG Set command-line client log level to ``DEBUG``. These are commented @@ -430,16 +430,16 @@ Python bindings added when they are enabled. ADDITIONAL_VENV_PACKAGES="python-foo, python-bar" -Use python3 +Use python2 ------------ -By default ``stack.sh`` uses python2 (the exact version set by the -``PYTHON2_VERSION``). This can be overriden so devstack will run -python3 (the exact version set by ``PYTHON3_VERSION``). +By default ``stack.sh`` uses python3 (the exact version set by the +``PYTHON3_VERSION``). This can be overriden so devstack will run +python2 (the exact version set by ``PYTHON2_VERSION``). :: - USE_PYTHON3=True + USE_PYTHON3=False A clean install every time -------------------------- @@ -696,16 +696,6 @@ KEYSTONE_REGION_NAME to specify the region of Keystone service. KEYSTONE_REGION_NAME has a default value the same as REGION_NAME thus we omit it in the configuration of RegionOne. -Disabling Identity API v2 -+++++++++++++++++++++++++ - -The Identity API v2 is deprecated as of Mitaka and it is recommended to only -use the v3 API. It is possible to setup keystone without v2 API, by doing: - -:: - - ENABLE_IDENTITY_V2=False - .. _arch-configuration: Architectures diff --git a/files/apache-keystone.template b/files/apache-keystone.template index 128436027d..cf26c216f5 100644 --- a/files/apache-keystone.template +++ b/files/apache-keystone.template @@ -1,39 +1,9 @@ -Listen %PUBLICPORT% -Listen %ADMINPORT% LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" keystone_combined Require all granted - - WSGIDaemonProcess keystone-public processes=3 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% - WSGIProcessGroup keystone-public - WSGIScriptAlias / %KEYSTONE_BIN%/keystone-wsgi-public - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - ErrorLogFormat "%M" - ErrorLog /var/log/%APACHE_NAME%/keystone.log - CustomLog /var/log/%APACHE_NAME%/keystone_access.log keystone_combined - %SSLENGINE% - %SSLCERTFILE% - %SSLKEYFILE% - - - - WSGIDaemonProcess keystone-admin processes=3 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% - WSGIProcessGroup keystone-admin - WSGIScriptAlias / %KEYSTONE_BIN%/keystone-wsgi-admin - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - ErrorLogFormat "%M" - ErrorLog /var/log/%APACHE_NAME%/keystone.log - CustomLog /var/log/%APACHE_NAME%/keystone_access.log keystone_combined - %SSLENGINE% - %SSLCERTFILE% - %SSLKEYFILE% - - %SSLLISTEN% %SSLLISTEN% %SSLENGINE% %SSLLISTEN% %SSLCERTFILE% diff --git a/lib/keystone b/lib/keystone index d4c7b063bb..29407a0e69 100644 --- a/lib/keystone +++ b/lib/keystone @@ -83,14 +83,10 @@ KEYSTONE_TOKEN_FORMAT=$(echo ${KEYSTONE_TOKEN_FORMAT} | tr '[:upper:]' '[:lower: # Set Keystone interface configuration KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST} -KEYSTONE_AUTH_PORT=${KEYSTONE_AUTH_PORT:-35357} -KEYSTONE_AUTH_PORT_INT=${KEYSTONE_AUTH_PORT_INT:-35358} KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-$SERVICE_PROTOCOL} # Public facing bits KEYSTONE_SERVICE_HOST=${KEYSTONE_SERVICE_HOST:-$SERVICE_HOST} -KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000} -KEYSTONE_SERVICE_PORT_INT=${KEYSTONE_SERVICE_PORT_INT:-5001} KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} # Bind hosts @@ -170,22 +166,14 @@ function _config_keystone_apache_wsgi { local keystone_ssl="" local keystone_certfile="" local keystone_keyfile="" - local keystone_service_port=$KEYSTONE_SERVICE_PORT - local keystone_auth_port=$KEYSTONE_AUTH_PORT local venv_path="" - if is_service_enabled tls-proxy; then - keystone_service_port=$KEYSTONE_SERVICE_PORT_INT - keystone_auth_port=$KEYSTONE_AUTH_PORT_INT - fi if [[ ${USE_VENV} = True ]]; then venv_path="python-path=${PROJECT_VENV["keystone"]}/lib/$(python_version)/site-packages" fi sudo cp $FILES/apache-keystone.template $keystone_apache_conf sudo sed -e " - s|%PUBLICPORT%|$keystone_service_port|g; - s|%ADMINPORT%|$keystone_auth_port|g; s|%APACHE_NAME%|$APACHE_NAME|g; s|%SSLLISTEN%|$keystone_ssl_listen|g; s|%SSLENGINE%|$keystone_ssl|g; @@ -222,21 +210,8 @@ function configure_keystone { iniset_rpc_backend keystone $KEYSTONE_CONF oslo_messaging_notifications - local service_port=$KEYSTONE_SERVICE_PORT - local auth_port=$KEYSTONE_AUTH_PORT - - if is_service_enabled tls-proxy; then - # Set the service ports for a proxy to take the originals - service_port=$KEYSTONE_SERVICE_PORT_INT - auth_port=$KEYSTONE_AUTH_PORT_INT - fi - # Override the endpoints advertised by keystone (the public_endpoint and - # admin_endpoint) so that clients use the correct endpoint. By default, the - # keystone server uses the public_port and admin_port which isn't going to - # work when you want to use a different port (in the case of proxy), or you - # don't want the port (in the case of putting keystone on a path in - # apache). + # admin_endpoint) so that clients use the correct endpoint. iniset $KEYSTONE_CONF DEFAULT public_endpoint $KEYSTONE_SERVICE_URI iniset $KEYSTONE_CONF DEFAULT admin_endpoint $KEYSTONE_AUTH_URI @@ -270,12 +245,6 @@ function configure_keystone { iniset $KEYSTONE_CONF credential key_repository "$KEYSTONE_CONF_DIR/credential-keys/" - # Configure the project created by the 'keystone-manage bootstrap' as the cloud-admin project. - # The users from this project are globally admin as before, but it also - # allows policy changes in order to clarify the adminess scope. - #iniset $KEYSTONE_CONF resource admin_project_domain_name Default - #iniset $KEYSTONE_CONF resource admin_project_name admin - if [[ "$KEYSTONE_SECURITY_COMPLIANCE_ENABLED" = True ]]; then iniset $KEYSTONE_CONF security_compliance lockout_failure_attempts $KEYSTONE_LOCKOUT_FAILURE_ATTEMPTS iniset $KEYSTONE_CONF security_compliance lockout_duration $KEYSTONE_LOCKOUT_DURATION @@ -510,14 +479,6 @@ function install_keystone { # start_keystone() - Start running processes function start_keystone { - # Get right service port for testing - local service_port=$KEYSTONE_SERVICE_PORT - local auth_protocol=$KEYSTONE_AUTH_PROTOCOL - if is_service_enabled tls-proxy; then - service_port=$KEYSTONE_SERVICE_PORT_INT - auth_protocol="http" - fi - if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then enable_apache_site keystone restart_apache_server @@ -526,23 +487,13 @@ function start_keystone { fi echo "Waiting for keystone to start..." - # Check that the keystone service is running. Even if the tls tunnel - # should be enabled, make sure the internal port is checked using - # unencryted traffic at this point. - # If running in Apache, use the path rather than port. - + # Check that the keystone service is running. local service_uri=$auth_protocol://$KEYSTONE_SERVICE_HOST/identity/v$IDENTITY_API_VERSION/ if ! wait_for_service $SERVICE_TIMEOUT $service_uri; then die $LINENO "keystone did not start" fi - # Start proxies if enabled - if is_service_enabled tls-proxy; then - start_tls_proxy keystone-service '*' $KEYSTONE_SERVICE_PORT $KEYSTONE_SERVICE_HOST $KEYSTONE_SERVICE_PORT_INT - start_tls_proxy keystone-auth '*' $KEYSTONE_AUTH_PORT $KEYSTONE_AUTH_HOST $KEYSTONE_AUTH_PORT_INT - fi - # (re)start memcached to make sure we have a clean memcache. restart_service memcached } @@ -561,11 +512,9 @@ function stop_keystone { # This function uses the following GLOBAL variables: # - ``KEYSTONE_BIN_DIR`` # - ``ADMIN_PASSWORD`` -# - ``IDENTITY_API_VERSION`` +# - ``KEYSTONE_AUTH_URI`` # - ``REGION_NAME`` -# - ``KEYSTONE_SERVICE_PROTOCOL`` -# - ``KEYSTONE_SERVICE_HOST`` -# - ``KEYSTONE_SERVICE_PORT`` +# - ``KEYSTONE_SERVICE_URI`` function bootstrap_keystone { $KEYSTONE_BIN_DIR/keystone-manage bootstrap \ --bootstrap-username admin \ diff --git a/openrc b/openrc index beeaebea42..28f388be4c 100644 --- a/openrc +++ b/openrc @@ -86,7 +86,7 @@ export OS_AUTH_TYPE=password # # If you don't have a working .stackenv, this is the backup position -KEYSTONE_BACKUP=$SERVICE_PROTOCOL://$SERVICE_HOST:5000 +KEYSTONE_BACKUP=$SERVICE_PROTOCOL://$SERVICE_HOST/identity KEYSTONE_SERVICE_URI=${KEYSTONE_SERVICE_URI:-$KEYSTONE_BACKUP} export OS_AUTH_URL=${OS_AUTH_URL:-$KEYSTONE_SERVICE_URI} diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index f4a4edcbe2..c7bea4ac08 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -152,7 +152,7 @@ if [ -z "$OS_USERNAME" ]; then fi if [ -z "$OS_AUTH_URL" ]; then - export OS_AUTH_URL=http://localhost:5000/v3/ + export OS_AUTH_URL=http://localhost/identity/v3/ fi if [ -z "$OS_USER_DOMAIN_ID" -a -z "$OS_USER_DOMAIN_NAME" ]; then diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index e1409291b9..2ac8a47ca7 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -26,39 +26,6 @@ if [[ -z "$TOP_DIR" ]]; then FILES=$TOP_DIR/files fi -# Keystone Port Reservation -# ------------------------- -# Reserve and prevent ``KEYSTONE_AUTH_PORT`` and ``KEYSTONE_AUTH_PORT_INT`` from -# being used as ephemeral ports by the system. The default(s) are 35357 and -# 35358 which are in the Linux defined ephemeral port range (in disagreement -# with the IANA ephemeral port range). This is a workaround for bug #1253482 -# where Keystone will try and bind to the port and the port will already be -# in use as an ephemeral port by another process. This places an explicit -# exception into the Kernel for the Keystone AUTH ports. -function fixup_keystone { - keystone_ports=${KEYSTONE_AUTH_PORT:-35357},${KEYSTONE_AUTH_PORT_INT:-35358} - - # Only do the reserved ports when available, on some system (like containers) - # where it's not exposed we are almost pretty sure these ports would be - # exclusive for our DevStack. - if sysctl net.ipv4.ip_local_reserved_ports >/dev/null 2>&1; then - # Get any currently reserved ports, strip off leading whitespace - reserved_ports=$(sysctl net.ipv4.ip_local_reserved_ports | awk -F'=' '{print $2;}' | sed 's/^ //') - - if [[ -z "${reserved_ports}" ]]; then - # If there are no currently reserved ports, reserve the keystone ports - sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports} - else - # If there are currently reserved ports, keep those and also reserve the - # Keystone specific ports. Duplicate reservations are merged into a single - # reservation (or range) automatically by the kernel. - sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports},${reserved_ports} - fi - else - echo_summary "WARNING: unable to reserve keystone ports" - fi -} - # Ubuntu Repositories #-------------------- # Enable universe for bionic since it is missing when installing from ISO. @@ -208,7 +175,6 @@ function fixup_suse { } function fixup_all { - fixup_keystone fixup_ubuntu fixup_fedora fixup_suse From d7a82f41e469fc51fb021184c1fa6c98da428411 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Tue, 23 Jun 2020 10:21:09 +0200 Subject: [PATCH 1300/1936] Drop support for python2 python2 is EOL, let's move on and only support python3. Change-Id: Ieffda4edea9cc19484c04420ed703f7141ef9f15 --- .zuul.yaml | 3 -- doc/source/configuration.rst | 11 ---- doc/source/guides/devstack-with-lbaas-v2.rst | 3 -- inc/python | 55 ++++---------------- lib/apache | 16 ++---- lib/tls | 8 +-- stackrc | 20 +++---- tools/install_pip.sh | 6 --- 8 files changed, 21 insertions(+), 101 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index f78f3f54c7..5ae14708e1 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -558,9 +558,6 @@ devstack_localrc: SERVICE_IP_VERSION: 6 SERVICE_HOST: "" - # IPv6 and certificates known issue with python2 - # https://bugs.launchpad.net/devstack/+bug/1794929 - USE_PYTHON3: true - job: name: devstack-multinode diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 67c3b8a7d1..ec4a9c84e4 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -430,17 +430,6 @@ Python bindings added when they are enabled. ADDITIONAL_VENV_PACKAGES="python-foo, python-bar" -Use python2 ------------- - -By default ``stack.sh`` uses python3 (the exact version set by the -``PYTHON3_VERSION``). This can be overriden so devstack will run -python2 (the exact version set by ``PYTHON2_VERSION``). - -:: - - USE_PYTHON3=False - A clean install every time -------------------------- diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst index 7fde6f14f8..5d96ca7d74 100644 --- a/doc/source/guides/devstack-with-lbaas-v2.rst +++ b/doc/source/guides/devstack-with-lbaas-v2.rst @@ -41,9 +41,6 @@ Edit your ``/opt/stack/devstack/local.conf`` to look like # If you are enabling barbican for TLS offload in Octavia, include it here. # enable_plugin barbican https://opendev.org/openstack/barbican - # If you have python3 available: - # USE_PYTHON3=True - # ===== BEGIN localrc ===== DATABASE_PASSWORD=password ADMIN_PASSWORD=password diff --git a/inc/python b/inc/python index 08f9959d5c..727d52cd5b 100644 --- a/inc/python +++ b/inc/python @@ -62,7 +62,7 @@ function get_python_exec_prefix { $xtrace local PYTHON_PATH=/usr/local/bin - ( is_fedora && ! python3_enabled ) || is_suse && PYTHON_PATH=/usr/bin + is_suse && PYTHON_PATH=/usr/bin echo $PYTHON_PATH } @@ -169,16 +169,9 @@ function pip_install { local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip local sudo_pip="env" else - local cmd_pip - local sudo_pip="sudo -H" - if python3_enabled; then - echo "Using python $PYTHON3_VERSION to install $package_dir because python3_enabled=True" - sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8" - cmd_pip="python$PYTHON3_VERSION -m pip" - else - echo "Using python $PYTHON2_VERSION to install $package_dir because python3_enabled=False" - cmd_pip=$(get_pip_command $PYTHON2_VERSION) - fi + local cmd_pip="python$PYTHON3_VERSION -m pip" + local sudo_pip="sudo -H LC_ALL=en_US.UTF-8" + echo "Using python $PYTHON3_VERSION to install $package_dir" fi cmd_pip="$cmd_pip install" @@ -213,14 +206,8 @@ function pip_uninstall { local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip local sudo_pip="env" else - local cmd_pip - local sudo_pip="sudo -H" - if python3_enabled; then - sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8" - cmd_pip="python$PYTHON3_VERSION -m pip" - else - cmd_pip=$(get_pip_command $PYTHON2_VERSION) - fi + local cmd_pip="python$PYTHON3_VERSION -m pip" + local sudo_pip="sudo -H LC_ALL=en_US.UTF-8" fi # don't error if we can't uninstall, it might not be there $sudo_pip $cmd_pip uninstall -y $name || /bin/true @@ -457,37 +444,15 @@ function setup_package { } # Report whether python 3 should be used +# TODO(frickler): drop this once all legacy uses are removed function python3_enabled { - if [[ $USE_PYTHON3 == "True" ]]; then - return 0 - else - return 1 - fi + return 1 } # Provide requested python version and sets PYTHON variable function install_python { - # NOTE: install_python function should finally just do what install_python3 - # does as soon Python 2 support has been dropped - if python3_enabled; then - install_python3 - export PYTHON=$(which python${PYTHON3_VERSION} 2>/dev/null || - which python3 2>/dev/null) - if [[ "${DISTRO}" =~ (rhel8) ]]; then - # Use Python 3 as default python command so that we have only one - # python alternative to use on the system for either python and - # python3 - sudo alternatives --set python "${PYTHON}" - else - # Install anyway Python 2 for legacy scripts that still requires - # python instead of python3 command - install_package python - fi - else - echo "WARNING - Python 2 support has been deprecated in favor of Python 3" - install_package python - export PYTHON=$(which python 2>/dev/null) - fi + install_python3 + export PYTHON=$(which python${PYTHON3_VERSION} 2>/dev/null) } # Install python3 packages diff --git a/lib/apache b/lib/apache index a5fbf75374..cc282006df 100644 --- a/lib/apache +++ b/lib/apache @@ -89,11 +89,7 @@ function install_apache_uwsgi { # # For package installs, the distro ships both plugins and you need # to select the right one ... it will not be autodetected. - if python3_enabled; then - UWSGI_PYTHON_PLUGIN=python3 - else - UWSGI_PYTHON_PLUGIN=python - fi + UWSGI_PYTHON_PLUGIN=python3 if is_ubuntu; then local pkg_list="uwsgi uwsgi-plugin-python3 libapache2-mod-proxy-uwsgi" @@ -150,14 +146,10 @@ function install_apache_wsgi { if is_ubuntu; then # Install apache2, which is NOPRIME'd install_package apache2 - if python3_enabled; then - if is_package_installed libapache2-mod-wsgi; then - uninstall_package libapache2-mod-wsgi - fi - install_package libapache2-mod-wsgi-py3 - else - install_package libapache2-mod-wsgi + if is_package_installed libapache2-mod-wsgi; then + uninstall_package libapache2-mod-wsgi fi + install_package libapache2-mod-wsgi-py3 elif is_fedora; then sudo rm -f /etc/httpd/conf.d/000-* install_package httpd mod_wsgi diff --git a/lib/tls b/lib/tls index d05536b45d..baafb59c8b 100644 --- a/lib/tls +++ b/lib/tls @@ -227,13 +227,7 @@ function init_CA { function init_cert { if [[ ! -r $DEVSTACK_CERT ]]; then if [[ -n "$TLS_IP" ]]; then - if python3_enabled; then - TLS_IP="IP:$TLS_IP" - else - # Lie to let incomplete match routines work with python2 - # see https://bugs.python.org/issue23239 - TLS_IP="DNS:$TLS_IP,IP:$TLS_IP" - fi + TLS_IP="IP:$TLS_IP" if [[ -n "$HOST_IPV6" ]]; then TLS_IP="$TLS_IP,IP:$HOST_IPV6" fi diff --git a/stackrc b/stackrc index e323cee843..4ffd537ef4 100644 --- a/stackrc +++ b/stackrc @@ -134,25 +134,17 @@ if [[ -r $RC_DIR/.localrc.password ]]; then fi # Control whether Python 3 should be used at all. -export USE_PYTHON3=$(trueorfalse True USE_PYTHON3) +# TODO(frickler): Drop this when all consumers are fixed +export USE_PYTHON3=True -# When Python 3 is supported by an application, adding the specific -# version of Python 3 to this variable will install the app using that -# version of the interpreter instead of 2.7. +# Adding the specific version of Python 3 to this variable will install +# the app using that version of the interpreter instead of just 3. _DEFAULT_PYTHON3_VERSION="$(_get_python_version python3)" export PYTHON3_VERSION=${PYTHON3_VERSION:-${_DEFAULT_PYTHON3_VERSION:-3}} -# Just to be more explicit on the Python 2 version to use. -_DEFAULT_PYTHON2_VERSION="$(_get_python_version python2)" -export PYTHON2_VERSION=${PYTHON2_VERSION:-${_DEFAULT_PYTHON2_VERSION:-2.7}} - # Create a virtualenv with this -if [[ ${USE_PYTHON3} == True ]]; then - # Use the built-in venv to avoid more dependencies - export VIRTUALENV_CMD="python3 -m venv" -else - export VIRTUALENV_CMD="virtualenv " -fi +# Use the built-in venv to avoid more dependencies +export VIRTUALENV_CMD="python3 -m venv" # Default for log coloring is based on interactive-or-not. # Baseline assumption is that non-interactive invocations are for CI, diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 517669e282..f3fd1e2498 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -92,9 +92,6 @@ function install_get_pip { touch $LOCAL_PIP.downloaded fi sudo -H -E python${PYTHON3_VERSION} $LOCAL_PIP - if ! python3_enabled; then - sudo -H -E python $LOCAL_PIP - fi } @@ -142,9 +139,6 @@ fi # results in a nonfunctional system. pip on fedora installs to /usr so pip # can safely override the system pip for all versions of fedora if ! is_fedora && ! is_suse; then - if is_package_installed python-pip ; then - uninstall_package python-pip - fi if is_package_installed python3-pip ; then uninstall_package python3-pip fi From ee5cf747d8826b9f9dbf565a7a9ba2c1e25f068a Mon Sep 17 00:00:00 2001 From: Brian Rosmaita Date: Thu, 12 Dec 2019 15:52:22 -0500 Subject: [PATCH 1301/1936] Remove GLANCE_V1_ENABLED option This option sets enable_v1_api in glance-api.conf, a setting that was removed by change Ia086230cc8c92f7b7dfd5b001923110d5bc55d4d in July 2018, so remove the devstack option from lib/glance. It occurs in two other places: This option is used in lib/cinder to set an option that was removed from Cinder by change Ice379db9ae83420bacf9e96e242c7515930eae86 in Queens, so remove the related code. When this option is False, it is used in lib/tempest to set [image-feature-enabled]/api_v1 to False in the tempest config file. However, the default value of ths setting has been False since change Iab3a209c744375bf2618afc00a3f7731b62f557e in Sept 2018, so remove the related code. Change-Id: I4b18a0a388ed7e7a392fabeac613778e0d23dee7 --- lib/cinder | 4 ---- lib/glance | 7 ------- lib/tempest | 5 ----- 3 files changed, 16 deletions(-) diff --git a/lib/cinder b/lib/cinder index c2e55f9173..b1ed593080 100644 --- a/lib/cinder +++ b/lib/cinder @@ -311,10 +311,6 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT glance_ca_certificates_file $SSL_BUNDLE_FILE fi - if [ "$GLANCE_V1_ENABLED" != "True" ]; then - iniset $CINDER_CONF DEFAULT glance_api_version 2 - fi - # Set nova credentials (used for os-assisted-snapshots) configure_keystone_authtoken_middleware $CINDER_CONF nova nova iniset $CINDER_CONF nova region_name "$REGION_NAME" diff --git a/lib/glance b/lib/glance index 4fa1b6a4e3..97af6e691d 100644 --- a/lib/glance +++ b/lib/glance @@ -73,7 +73,6 @@ GLANCE_CACHE_CONF=$GLANCE_CONF_DIR/glance-cache.conf GLANCE_SCHEMA_JSON=$GLANCE_CONF_DIR/schema-image.json GLANCE_SWIFT_STORE_CONF=$GLANCE_CONF_DIR/glance-swift-store.conf GLANCE_IMAGE_IMPORT_CONF=$GLANCE_CONF_DIR/glance-image-import.conf -GLANCE_V1_ENABLED=${GLANCE_V1_ENABLED:-False} if is_service_enabled tls-proxy; then GLANCE_SERVICE_PROTOCOL="https" @@ -152,12 +151,6 @@ function configure_glance { iniset $GLANCE_API_CONF DEFAULT disk_formats "ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso,ploop" fi - # NOTE(flaper87): To uncomment as soon as all services consuming Glance are - # able to consume V2 entirely. - if [ "$GLANCE_V1_ENABLED" != "True" ]; then - iniset $GLANCE_API_CONF DEFAULT enable_v1_api False - fi - # Glance multiple store Store specific configs if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "True" ]]; then iniset $GLANCE_API_CONF glance_store default_backend $GLANCE_DEFAULT_BACKEND diff --git a/lib/tempest b/lib/tempest index 05fcb1f1f3..1ce2350620 100644 --- a/lib/tempest +++ b/lib/tempest @@ -346,11 +346,6 @@ function configure_tempest { iniset $TEMPEST_CONFIG image disk_formats "ami,ari,aki,vhd,raw,iso" fi - # Image Features - if [ "$GLANCE_V1_ENABLED" != "True" ]; then - iniset $TEMPEST_CONFIG image-feature-enabled api_v1 False - fi - # Compute iniset $TEMPEST_CONFIG compute image_ref $image_uuid iniset $TEMPEST_CONFIG compute image_ref_alt $image_uuid_alt From bde1804ee98d2bf1e3bd9e04c11dde53cbff119f Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Thu, 25 Jun 2020 20:19:03 -0500 Subject: [PATCH 1302/1936] Switch devstack base job to Ubuntu Focal As per Victoria cycle testing runtime[1], we need to test upstream CI/CD on Ubuntu 20.04(Focal). TC is in the process of defining this as a community goal[2] By moving the devstack base job to Ubuntu focal, all project side job or devstack-tempest job will automatically migrate to the new distro until they override the nodeset. As devstack is branched, all the stable jobs derived from devstack jobs will keep working on Bionic. [1] https://governance.openstack.org/tc/reference/runtimes/victoria.html [2] https://governance.openstack.org/tc/goals/proposed/migrate-ci-cd-jobs-to-ubuntu-focal.html Depends-On: https://review.opendev.org/#/c/738328/ Story: #2007865 Task: #40212 Change-Id: I63e69e898376e728eaf6d857426276fc93a1a0e3 --- .zuul.yaml | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index f78f3f54c7..cf56eae0e4 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -391,7 +391,7 @@ description: | Minimal devstack base job, intended for use by jobs that need less than the normal minimum set of required-projects. - nodeset: openstack-single-node-bionic + nodeset: openstack-single-node-focal required-projects: - opendev.org/openstack/requirements vars: @@ -565,7 +565,7 @@ - job: name: devstack-multinode parent: devstack - nodeset: openstack-two-node-bionic + nodeset: openstack-two-node-focal description: | Simple multinode test to verify multinode functionality on devstack side. This is not meant to be used as a parent job. @@ -589,12 +589,10 @@ nodeset: devstack-single-node-opensuse-15 - job: - name: devstack-platform-focal + name: devstack-platform-bionic parent: tempest-full-py3 - description: Ubuntu Focal Fossa platform test - nodeset: openstack-single-node-focal - vars: - tempest_black_regex: "(tempest.api.compute.volumes.test_attach_volume.AttachVolumeMultiAttachTest.test_resize_server_with_multiattached_volume|tempest.api.compute.servers.test_server_rescue_negative.ServerRescueNegativeTestJSON|tempest.api.compute.servers.test_server_rescue.ServerStableDeviceRescueTest.test_stable_device_rescue_disk_virtio_with_volume_attached)" + description: Ubuntu Bionic platform test + nodeset: openstack-single-node-bionic voting: false - job: @@ -688,7 +686,7 @@ - devstack-platform-opensuse-15 - devstack-platform-fedora-latest - devstack-platform-centos-8 - - devstack-platform-focal + - devstack-platform-bionic - devstack-multinode - devstack-unit-tests - openstack-tox-bashate From b066707d641696487fbd78773f191e2cfb4345f0 Mon Sep 17 00:00:00 2001 From: Xinliang Liu Date: Sun, 28 Jun 2020 08:55:28 +0000 Subject: [PATCH 1303/1936] Fix targetcli install error on ubuntu focal No targetcli package on Ubuntu Focal, it should use targetcli-fb also when "$CINDER_ISCSI_HELPER" == "lioadm". Although Xenial only has targetcli, but Xenial is dropped from CI. And starting from bionic, Ubuntu uses targetcli-fb to replace targetcli. So here we can use is_ubuntu to make ubuntu use targetcli-fb. Change-Id: I6d35b6651d486e716980dcd9f4d693bed560463a --- lib/cinder | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/cinder b/lib/cinder index c2e55f9173..b1e3d0d408 100644 --- a/lib/cinder +++ b/lib/cinder @@ -419,7 +419,7 @@ function install_cinder { if [[ "$CINDER_ISCSI_HELPER" == "tgtadm" ]]; then install_package tgt elif [[ "$CINDER_ISCSI_HELPER" == "lioadm" ]]; then - if [[ ${DISTRO} == "bionic" ]]; then + if is_ubuntu; then # TODO(frickler): Workaround for https://launchpad.net/bugs/1819819 sudo mkdir -p /etc/target From 59519ca5cc92d2d34363f2644e54fc4d7934a1b4 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Mon, 29 Jun 2020 10:24:21 +0200 Subject: [PATCH 1304/1936] Make opensuse platform job non-voting again The non-voting flag was accidentally dropped in [0]. [0] Ib4416dc2f5e003fd770f5240a8f78213c56af8e6 Change-Id: If9519f1ac9afd66553e1c1410fdc16369f166b98 --- .zuul.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.zuul.yaml b/.zuul.yaml index f78f3f54c7..4142d465bb 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -587,6 +587,7 @@ parent: tempest-full-py3 description: openSUSE 15.x platform test nodeset: devstack-single-node-opensuse-15 + voting: false - job: name: devstack-platform-focal From 3cbb33e5c6068d80c50f64a543550611eca8309a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rados=C5=82aw=20Piliszek?= Date: Tue, 30 Jun 2020 17:52:10 +0200 Subject: [PATCH 1305/1936] Fix python3_enable to return true Recent regression spotted by Dmitry Tantsur. DevStack dropped Py2 support but the now-unused-in-devstack python3_enable got its result nastily inversed. Change-Id: I4b37cc847a24705c4955cec2e6e45f0514705f1b --- inc/python | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inc/python b/inc/python index 727d52cd5b..f98d28d995 100644 --- a/inc/python +++ b/inc/python @@ -446,7 +446,7 @@ function setup_package { # Report whether python 3 should be used # TODO(frickler): drop this once all legacy uses are removed function python3_enabled { - return 1 + return 0 } # Provide requested python version and sets PYTHON variable From 1e26508983a1ebf2c23560395ffca8b1f6f79462 Mon Sep 17 00:00:00 2001 From: Hirotaka Wakabayashi Date: Thu, 2 Jul 2020 06:19:21 +0000 Subject: [PATCH 1306/1936] Use python3-mod-wsgi instead of mod_wsgi on CentOS8 This fixes a stack.sh execution error on CentOS8. We should use python3-mod_wsgi instead of mod_wsgi since mod_wsgi is replaced by python3-mod_wsgi. The following change may affect this issue. https://git.centos.org/rpms/mod_wsgi/c/4a746b53e9e3fef74b227e016e785449160871b8?branch=c8s Change-Id: I5344ecf519e1a79091b6158c2d711d09b21fae0c Closes-Bug: #1885645 --- lib/apache | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/apache b/lib/apache index cc282006df..a121fb0e6f 100644 --- a/lib/apache +++ b/lib/apache @@ -152,7 +152,7 @@ function install_apache_wsgi { install_package libapache2-mod-wsgi-py3 elif is_fedora; then sudo rm -f /etc/httpd/conf.d/000-* - install_package httpd mod_wsgi + install_package httpd python3-mod_wsgi # For consistency with Ubuntu, switch to the worker mpm, as # the default is event sudo sed -i '/mod_mpm_prefork.so/s/^/#/g' /etc/httpd/conf.modules.d/00-mpm.conf From 8956006e519022bc9640476674e638c7dd080478 Mon Sep 17 00:00:00 2001 From: Xinliang Liu Date: Thu, 2 Jul 2020 08:31:10 +0000 Subject: [PATCH 1307/1936] Fix string comparison String comparison should be included with square brackets. Change-Id: I9a4fab7848e5dba8a5b3b09a75e7aac213368706 --- lib/apache | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/apache b/lib/apache index cc282006df..6670219a51 100644 --- a/lib/apache +++ b/lib/apache @@ -93,7 +93,7 @@ function install_apache_uwsgi { if is_ubuntu; then local pkg_list="uwsgi uwsgi-plugin-python3 libapache2-mod-proxy-uwsgi" - if "$DISTRO" == 'bionic'; then + if [[ "$DISTRO" == 'bionic' ]]; then pkg_list="${pkg_list} uwsgi-plugin-python" fi install_package ${pkg_list} From f8dbfd394a826779f3af403fd5316b9d86492802 Mon Sep 17 00:00:00 2001 From: Abhishek Kekane Date: Mon, 6 Jul 2020 18:42:30 +0000 Subject: [PATCH 1308/1936] Revert "Drop keystone dedicated ports" This reverts commit f6286cb586eb1f861866bfdf85c4f873c79fd592. This patch is blocking glance as it needs mod_wsgi to perform new import workflow. Change-Id: I4475247dfe986114d37678b3d3d552c0c7d02ddc --- doc/source/configuration.rst | 12 ++++++- files/apache-keystone.template | 30 +++++++++++++++++ lib/keystone | 59 +++++++++++++++++++++++++++++++--- openrc | 2 +- tools/create_userrc.sh | 2 +- tools/fixup_stuff.sh | 34 ++++++++++++++++++++ 6 files changed, 132 insertions(+), 7 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index ec4a9c84e4..22f5999174 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -137,7 +137,7 @@ OS\_AUTH\_URL :: - OS_AUTH_URL=http://$SERVICE_HOST/identity/v3.0 + OS_AUTH_URL=http://$SERVICE_HOST:5000/v3.0 KEYSTONECLIENT\_DEBUG, NOVACLIENT\_DEBUG Set command-line client log level to ``DEBUG``. These are commented @@ -685,6 +685,16 @@ KEYSTONE_REGION_NAME to specify the region of Keystone service. KEYSTONE_REGION_NAME has a default value the same as REGION_NAME thus we omit it in the configuration of RegionOne. +Disabling Identity API v2 ++++++++++++++++++++++++++ + +The Identity API v2 is deprecated as of Mitaka and it is recommended to only +use the v3 API. It is possible to setup keystone without v2 API, by doing: + +:: + + ENABLE_IDENTITY_V2=False + .. _arch-configuration: Architectures diff --git a/files/apache-keystone.template b/files/apache-keystone.template index cf26c216f5..128436027d 100644 --- a/files/apache-keystone.template +++ b/files/apache-keystone.template @@ -1,9 +1,39 @@ +Listen %PUBLICPORT% +Listen %ADMINPORT% LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" keystone_combined Require all granted + + WSGIDaemonProcess keystone-public processes=3 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% + WSGIProcessGroup keystone-public + WSGIScriptAlias / %KEYSTONE_BIN%/keystone-wsgi-public + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + ErrorLogFormat "%M" + ErrorLog /var/log/%APACHE_NAME%/keystone.log + CustomLog /var/log/%APACHE_NAME%/keystone_access.log keystone_combined + %SSLENGINE% + %SSLCERTFILE% + %SSLKEYFILE% + + + + WSGIDaemonProcess keystone-admin processes=3 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% + WSGIProcessGroup keystone-admin + WSGIScriptAlias / %KEYSTONE_BIN%/keystone-wsgi-admin + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + ErrorLogFormat "%M" + ErrorLog /var/log/%APACHE_NAME%/keystone.log + CustomLog /var/log/%APACHE_NAME%/keystone_access.log keystone_combined + %SSLENGINE% + %SSLCERTFILE% + %SSLKEYFILE% + + %SSLLISTEN% %SSLLISTEN% %SSLENGINE% %SSLLISTEN% %SSLCERTFILE% diff --git a/lib/keystone b/lib/keystone index 29407a0e69..d4c7b063bb 100644 --- a/lib/keystone +++ b/lib/keystone @@ -83,10 +83,14 @@ KEYSTONE_TOKEN_FORMAT=$(echo ${KEYSTONE_TOKEN_FORMAT} | tr '[:upper:]' '[:lower: # Set Keystone interface configuration KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST} +KEYSTONE_AUTH_PORT=${KEYSTONE_AUTH_PORT:-35357} +KEYSTONE_AUTH_PORT_INT=${KEYSTONE_AUTH_PORT_INT:-35358} KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-$SERVICE_PROTOCOL} # Public facing bits KEYSTONE_SERVICE_HOST=${KEYSTONE_SERVICE_HOST:-$SERVICE_HOST} +KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000} +KEYSTONE_SERVICE_PORT_INT=${KEYSTONE_SERVICE_PORT_INT:-5001} KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} # Bind hosts @@ -166,14 +170,22 @@ function _config_keystone_apache_wsgi { local keystone_ssl="" local keystone_certfile="" local keystone_keyfile="" + local keystone_service_port=$KEYSTONE_SERVICE_PORT + local keystone_auth_port=$KEYSTONE_AUTH_PORT local venv_path="" + if is_service_enabled tls-proxy; then + keystone_service_port=$KEYSTONE_SERVICE_PORT_INT + keystone_auth_port=$KEYSTONE_AUTH_PORT_INT + fi if [[ ${USE_VENV} = True ]]; then venv_path="python-path=${PROJECT_VENV["keystone"]}/lib/$(python_version)/site-packages" fi sudo cp $FILES/apache-keystone.template $keystone_apache_conf sudo sed -e " + s|%PUBLICPORT%|$keystone_service_port|g; + s|%ADMINPORT%|$keystone_auth_port|g; s|%APACHE_NAME%|$APACHE_NAME|g; s|%SSLLISTEN%|$keystone_ssl_listen|g; s|%SSLENGINE%|$keystone_ssl|g; @@ -210,8 +222,21 @@ function configure_keystone { iniset_rpc_backend keystone $KEYSTONE_CONF oslo_messaging_notifications + local service_port=$KEYSTONE_SERVICE_PORT + local auth_port=$KEYSTONE_AUTH_PORT + + if is_service_enabled tls-proxy; then + # Set the service ports for a proxy to take the originals + service_port=$KEYSTONE_SERVICE_PORT_INT + auth_port=$KEYSTONE_AUTH_PORT_INT + fi + # Override the endpoints advertised by keystone (the public_endpoint and - # admin_endpoint) so that clients use the correct endpoint. + # admin_endpoint) so that clients use the correct endpoint. By default, the + # keystone server uses the public_port and admin_port which isn't going to + # work when you want to use a different port (in the case of proxy), or you + # don't want the port (in the case of putting keystone on a path in + # apache). iniset $KEYSTONE_CONF DEFAULT public_endpoint $KEYSTONE_SERVICE_URI iniset $KEYSTONE_CONF DEFAULT admin_endpoint $KEYSTONE_AUTH_URI @@ -245,6 +270,12 @@ function configure_keystone { iniset $KEYSTONE_CONF credential key_repository "$KEYSTONE_CONF_DIR/credential-keys/" + # Configure the project created by the 'keystone-manage bootstrap' as the cloud-admin project. + # The users from this project are globally admin as before, but it also + # allows policy changes in order to clarify the adminess scope. + #iniset $KEYSTONE_CONF resource admin_project_domain_name Default + #iniset $KEYSTONE_CONF resource admin_project_name admin + if [[ "$KEYSTONE_SECURITY_COMPLIANCE_ENABLED" = True ]]; then iniset $KEYSTONE_CONF security_compliance lockout_failure_attempts $KEYSTONE_LOCKOUT_FAILURE_ATTEMPTS iniset $KEYSTONE_CONF security_compliance lockout_duration $KEYSTONE_LOCKOUT_DURATION @@ -479,6 +510,14 @@ function install_keystone { # start_keystone() - Start running processes function start_keystone { + # Get right service port for testing + local service_port=$KEYSTONE_SERVICE_PORT + local auth_protocol=$KEYSTONE_AUTH_PROTOCOL + if is_service_enabled tls-proxy; then + service_port=$KEYSTONE_SERVICE_PORT_INT + auth_protocol="http" + fi + if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then enable_apache_site keystone restart_apache_server @@ -487,13 +526,23 @@ function start_keystone { fi echo "Waiting for keystone to start..." - # Check that the keystone service is running. + # Check that the keystone service is running. Even if the tls tunnel + # should be enabled, make sure the internal port is checked using + # unencryted traffic at this point. + # If running in Apache, use the path rather than port. + local service_uri=$auth_protocol://$KEYSTONE_SERVICE_HOST/identity/v$IDENTITY_API_VERSION/ if ! wait_for_service $SERVICE_TIMEOUT $service_uri; then die $LINENO "keystone did not start" fi + # Start proxies if enabled + if is_service_enabled tls-proxy; then + start_tls_proxy keystone-service '*' $KEYSTONE_SERVICE_PORT $KEYSTONE_SERVICE_HOST $KEYSTONE_SERVICE_PORT_INT + start_tls_proxy keystone-auth '*' $KEYSTONE_AUTH_PORT $KEYSTONE_AUTH_HOST $KEYSTONE_AUTH_PORT_INT + fi + # (re)start memcached to make sure we have a clean memcache. restart_service memcached } @@ -512,9 +561,11 @@ function stop_keystone { # This function uses the following GLOBAL variables: # - ``KEYSTONE_BIN_DIR`` # - ``ADMIN_PASSWORD`` -# - ``KEYSTONE_AUTH_URI`` +# - ``IDENTITY_API_VERSION`` # - ``REGION_NAME`` -# - ``KEYSTONE_SERVICE_URI`` +# - ``KEYSTONE_SERVICE_PROTOCOL`` +# - ``KEYSTONE_SERVICE_HOST`` +# - ``KEYSTONE_SERVICE_PORT`` function bootstrap_keystone { $KEYSTONE_BIN_DIR/keystone-manage bootstrap \ --bootstrap-username admin \ diff --git a/openrc b/openrc index 28f388be4c..beeaebea42 100644 --- a/openrc +++ b/openrc @@ -86,7 +86,7 @@ export OS_AUTH_TYPE=password # # If you don't have a working .stackenv, this is the backup position -KEYSTONE_BACKUP=$SERVICE_PROTOCOL://$SERVICE_HOST/identity +KEYSTONE_BACKUP=$SERVICE_PROTOCOL://$SERVICE_HOST:5000 KEYSTONE_SERVICE_URI=${KEYSTONE_SERVICE_URI:-$KEYSTONE_BACKUP} export OS_AUTH_URL=${OS_AUTH_URL:-$KEYSTONE_SERVICE_URI} diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index c7bea4ac08..f4a4edcbe2 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -152,7 +152,7 @@ if [ -z "$OS_USERNAME" ]; then fi if [ -z "$OS_AUTH_URL" ]; then - export OS_AUTH_URL=http://localhost/identity/v3/ + export OS_AUTH_URL=http://localhost:5000/v3/ fi if [ -z "$OS_USER_DOMAIN_ID" -a -z "$OS_USER_DOMAIN_NAME" ]; then diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 2ac8a47ca7..e1409291b9 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -26,6 +26,39 @@ if [[ -z "$TOP_DIR" ]]; then FILES=$TOP_DIR/files fi +# Keystone Port Reservation +# ------------------------- +# Reserve and prevent ``KEYSTONE_AUTH_PORT`` and ``KEYSTONE_AUTH_PORT_INT`` from +# being used as ephemeral ports by the system. The default(s) are 35357 and +# 35358 which are in the Linux defined ephemeral port range (in disagreement +# with the IANA ephemeral port range). This is a workaround for bug #1253482 +# where Keystone will try and bind to the port and the port will already be +# in use as an ephemeral port by another process. This places an explicit +# exception into the Kernel for the Keystone AUTH ports. +function fixup_keystone { + keystone_ports=${KEYSTONE_AUTH_PORT:-35357},${KEYSTONE_AUTH_PORT_INT:-35358} + + # Only do the reserved ports when available, on some system (like containers) + # where it's not exposed we are almost pretty sure these ports would be + # exclusive for our DevStack. + if sysctl net.ipv4.ip_local_reserved_ports >/dev/null 2>&1; then + # Get any currently reserved ports, strip off leading whitespace + reserved_ports=$(sysctl net.ipv4.ip_local_reserved_ports | awk -F'=' '{print $2;}' | sed 's/^ //') + + if [[ -z "${reserved_ports}" ]]; then + # If there are no currently reserved ports, reserve the keystone ports + sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports} + else + # If there are currently reserved ports, keep those and also reserve the + # Keystone specific ports. Duplicate reservations are merged into a single + # reservation (or range) automatically by the kernel. + sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports},${reserved_ports} + fi + else + echo_summary "WARNING: unable to reserve keystone ports" + fi +} + # Ubuntu Repositories #-------------------- # Enable universe for bionic since it is missing when installing from ISO. @@ -175,6 +208,7 @@ function fixup_suse { } function fixup_all { + fixup_keystone fixup_ubuntu fixup_fedora fixup_suse From 6184dea966d56f13f15d926dbb09e527ae3ba39e Mon Sep 17 00:00:00 2001 From: Rui Zang Date: Mon, 6 Jul 2020 23:47:56 -0700 Subject: [PATCH 1309/1936] Remove n-api-metadata service from compute nodes Starting up n-api-metadata service on every compute nodes does not solve the problem of isolated networks (no route to metadata service). It all depends on how 'enable_isolated_metadata' and related options (e.g. force_metadata) are set in dhcp agent and what is configured for the 'nova_metadata_host' option of q-meta service. Having a global n-api-metadata service in the control node is sufficient for a mult-node lab setup. Besides, the n-api-metadata services on compute nodes are not really working due to https://bugs.launchpad.net/nova/+bug/1815082 Change-Id: Ib8691c3eeee59758fbd98989d9460f1458ea422f Related-Bug: 1815082 --- doc/source/guides/multinode-lab.rst | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index 15f02a0e5e..c0b3f58157 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -169,17 +169,12 @@ machines, create a ``local.conf`` with: MYSQL_HOST=$SERVICE_HOST RABBIT_HOST=$SERVICE_HOST GLANCE_HOSTPORT=$SERVICE_HOST:9292 - ENABLED_SERVICES=n-cpu,q-agt,n-api-meta,c-vol,placement-client + ENABLED_SERVICES=n-cpu,q-agt,c-vol,placement-client NOVA_VNC_ENABLED=True NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_lite.html" VNCSERVER_LISTEN=$HOST_IP VNCSERVER_PROXYCLIENT_ADDRESS=$VNCSERVER_LISTEN -**Note:** the ``n-api-meta`` service is a version of the api server -that only serves the metadata service. It's needed because the -computes created won't have a routing path to the metadata service on -the controller. - Fire up OpenStack: :: From 95634d990572fdaf94e199e06a2b3b04f7c27b3a Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Thu, 21 Feb 2019 12:24:17 +0000 Subject: [PATCH 1310/1936] Re-enable memory_tracker The old peakmem_tracker service has been disabled in [0], now enable the replacement memory_tracker. Also fail when the old service is still configured, otherwise consumers might never notice. Depends-On: https://review.opendev.org/739995 Change-Id: I583caf3f36a8ff41d7d4106dabc6c5f24243085e --- .zuul.yaml | 4 ++++ lib/dstat | 6 +++--- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 43e5d4cf8a..256ede98a6 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -404,6 +404,7 @@ # Shared services dstat: true etcd3: true + memory_tracker: true mysql: true rabbit: true group-vars: @@ -411,6 +412,7 @@ devstack_services: # Shared services dstat: true + memory_tracker: true devstack_localrc: # Multinode specific settings HOST_IP: "{{ hostvars[inventory_hostname]['nodepool']['private_ipv4'] }}" @@ -476,6 +478,7 @@ # Shared services dstat: true etcd3: true + memory_tracker: true mysql: true rabbit: true tls-proxy: true @@ -524,6 +527,7 @@ # This list replaces the test-matrix. # Shared services dstat: true + memory_tracker: true tls-proxy: true # Nova services n-cpu: true diff --git a/lib/dstat b/lib/dstat index f5bd2bb948..eb03ae0fb2 100644 --- a/lib/dstat +++ b/lib/dstat @@ -35,10 +35,10 @@ function start_dstat { # to your localrc run_process memory_tracker "$TOP_DIR/tools/memory_tracker.sh" "" "root" - # remove support for the old name when it's no longer used (sometime in Queens) + # TODO(jh): Fail when using the old service name otherwise consumers might + # never notice that is has been removed. if is_service_enabled peakmem_tracker; then - deprecated "Use of peakmem_tracker in devstack is deprecated, use memory_tracker instead" - run_process peakmem_tracker "$TOP_DIR/tools/memory_tracker.sh" "" "root" + die $LINENO "The peakmem_tracker service has been removed, use memory_tracker instead" fi } From 0cbbefc0cb6f58b09a722d1230e7ae86113b2989 Mon Sep 17 00:00:00 2001 From: Luigi Toscano Date: Fri, 10 Jul 2020 13:21:45 +0200 Subject: [PATCH 1311/1936] zuul: clean up some legacy jobs Replace legacy-tempest-dsvm-lvm-multibackend with its native version, cinder-tempest-lvm-multibackend. Remove legacy-tempest-dsvm-neutron-dvr-multinode-full, which was defined as an experimental job in neutron and removed during the ussuri lifecycle. See https://review.opendev.org/#/c/693630/ Change-Id: I76d1efaa3a6c1fe9825e8457438f514114b2ecad --- .zuul.yaml | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 43e5d4cf8a..fc3c4704d9 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -803,15 +803,11 @@ irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - - legacy-tempest-dsvm-neutron-dvr-multinode-full: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ - neutron-tempest-dvr-ha-multinode-full: irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - - legacy-tempest-dsvm-lvm-multibackend: + - cinder-tempest-lvm-multibackend: irrelevant-files: - ^.*\.rst$ - ^doc/.*$ From 73ad9760a384e3f872de07a174440fe8e3b174ec Mon Sep 17 00:00:00 2001 From: Abhishek Kekane Date: Tue, 16 Jun 2020 15:20:48 +0000 Subject: [PATCH 1312/1936] Use glance import workflow for creating image Added new boolean option 'GLANCE_USE_IMPORT_WORKFLOW' default to False. If this parameter set in local.conf as True then devstack will use the new import workflow to create the image. In order to use new import workflow of glance; user need to set below options in local.conf GLANCE_USE_IMPORT_WORKFLOW=True Note that the import workflow does not work in uwsgi because of some fundamental restrictions it has. Thus, devstack must be configured with WSGI_MODE=mod_wsgi, otherwise glance will not be able to process the imports. The new helper function will abort to avoid in that case to avoid the image never being moved to "active" state by an import task that will never be executed. Co-Authored-By: Abhishek Kekane Co-Authored-By: Dan Smith Needed-By: https://review.opendev.org/#/c/734184 Change-Id: I1306fe816b7a3eca1e2312c0c454be3d81118eca --- functions | 79 ++++++++++++++++++++++++++++++++++-------------------- lib/glance | 2 ++ 2 files changed, 52 insertions(+), 29 deletions(-) diff --git a/functions b/functions index 24700153a9..2d60a0821f 100644 --- a/functions +++ b/functions @@ -77,6 +77,41 @@ function get_extra_file { fi } +# Upload an image to glance using the configured mechanism +# +# Arguments: +# image name +# container format +# disk format +# path to image file +# optional properties (format of propname=value) +# +function _upload_image { + local image_name="$1" + shift + local container="$1" + shift + local disk="$1" + shift + local image="$1" + shift + local properties + local useimport + + for prop in $*; do + properties+=" --property $prop" + done + + if [[ "$GLANCE_USE_IMPORT_WORKFLOW" == "True" ]]; then + if [[ "$WSGI_MODE" != "uwsgi" ]]; then + useimport="--import" + else + echo "*** Unable to use glance import workflow because WSGI_MODE=uwsgi! ***" + fi + fi + + openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" --public --container-format "$container" --disk-format "$disk" $useimport $properties < "${image}" +} # Retrieve an image from a URL and upload into Glance. # Uses the following variables: @@ -118,7 +153,7 @@ function upload_image { # OpenVZ-format images are provided as .tar.gz, but not decompressed prior to loading if [[ "$image_url" =~ 'openvz' ]]; then image_name="${image_fname%.tar.gz}" - openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" --public --container-format ami --disk-format ami < "${image}" + _upload_image "$image_name" ami ami "$image" return fi @@ -232,7 +267,8 @@ function upload_image { vmdk_adapter_type="${props[1]:-$vmdk_adapter_type}" vmdk_net_adapter="${props[2]:-$vmdk_net_adapter}" - openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" --public --container-format bare --disk-format vmdk --property vmware_disktype="$vmdk_disktype" --property vmware_adaptertype="$vmdk_adapter_type" --property hw_vif_model="$vmdk_net_adapter" < "${image}" + _upload_image "$image_name" bare vmdk "$image" vmware_disktype="$vmdk_disktype" vmware_adaptertype="$vmdk_adapter_type" hw_vif_model="$vmdk_net_adapter" + return fi @@ -246,14 +282,9 @@ function upload_image { # Nova defaults to PV for all VHD images, but # the glance setting is needed for booting # directly from volume. - force_vm_mode="--property vm_mode=xen" + force_vm_mode="vm_mode=xen" fi - openstack \ - --os-cloud=devstack-admin --os-region-name="$REGION_NAME" \ - image create \ - "$image_name" --public \ - --container-format=ovf --disk-format=vhd \ - $force_vm_mode < "${image}" + _upload_image "$image_name" ovf vhd "$image" $force_vm_mode return fi @@ -262,12 +293,7 @@ function upload_image { # Setting metadata, so PV mode is used. if [[ "$image_url" =~ '.xen-raw.tgz' ]]; then image_name="${image_fname%.xen-raw.tgz}" - openstack \ - --os-cloud=devstack-admin --os-region-name="$REGION_NAME" \ - image create \ - "$image_name" --public \ - --container-format=tgz --disk-format=raw \ - --property vm_mode=xen < "${image}" + _upload_image "$image_name" tgz raw "$image" vm_mode=xen return fi @@ -278,12 +304,7 @@ function upload_image { die $LINENO "Unknown vm_mode=${vm_mode} for Virtuozzo image" fi - openstack \ - --os-cloud=devstack-admin --os-region-name="$REGION_NAME" \ - image create \ - "$image_name" --public \ - --container-format=bare --disk-format=ploop \ - --property vm_mode=$vm_mode < "${image}" + _upload_image "$image_name" bare ploop "$image" vm_mode=$vm_mode return fi @@ -292,7 +313,7 @@ function upload_image { local disk_format="" local container_format="" local unpack="" - local img_property="--property hw_rng_model=virtio" + local img_property="hw_rng_model=virtio" case "$image_fname" in *.tar.gz|*.tgz) # Extract ami and aki files @@ -370,18 +391,18 @@ function upload_image { esac if is_arch "ppc64le" || is_arch "ppc64" || is_arch "ppc"; then - img_property="$img_property --property hw_cdrom_bus=scsi --property os_command_line=console=hvc0" + img_property="$img_property hw_cdrom_bus=scsi os_command_line=console=hvc0" fi if is_arch "aarch64"; then - img_property="$img_property --property hw_machine_type=virt --property hw_cdrom_bus=scsi --property hw_scsi_model=virtio-scsi --property os_command_line='console=ttyAMA0'" + img_property="$img_property hw_machine_type=virt hw_cdrom_bus=scsi hw_scsi_model=virtio-scsi os_command_line='console=ttyAMA0'" fi if [ "$container_format" = "bare" ]; then if [ "$unpack" = "zcat" ]; then - openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < <(zcat --force "${image}") + _upload_image "$image_name" $container_format $disk_format <(zcat --force "$image") $img_property elif [ "$unpack" = "bunzip2" ]; then - openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < <(bunzip2 -cdk "${image}") + _upload_image "$image_name" $container_format $disk_format <(bunzip2 -cdk "$image") $img_property elif [ "$unpack" = "unxz" ]; then # NOTE(brtknr): unxz the file first and cleanup afterwards to # prevent timeout while Glance tries to upload image (e.g. to Swift). @@ -390,10 +411,10 @@ function upload_image { tmp_dir=$(mktemp -d) image_path="$tmp_dir/$image_name" unxz -cv "${image}" > "$image_path" - openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format --file "$image_path" + _upload_image "$image_name" $container_format $disk_format "$image_path" $img_property rm -rf $tmp_dir else - openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < "${image}" + _upload_image "$image_name" $container_format $disk_format "$image" $img_property fi else # Use glance client to add the kernel the root filesystem. @@ -406,7 +427,7 @@ function upload_image { if [ -n "$ramdisk" ]; then ramdisk_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-ramdisk" $img_property --public --container-format ari --disk-format ari < "$ramdisk" | grep ' id ' | get_field 2) fi - openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "${image_name%.img}" $img_property --public --container-format ami --disk-format ami ${kernel_id:+--property kernel_id=$kernel_id} ${ramdisk_id:+--property ramdisk_id=$ramdisk_id} < "${image}" + _upload_image "${image_name%.img}" ami ami "$image" ${kernel_id:+ kernel_id=$kernel_id} ${ramdisk_id:+ ramdisk_id=$ramdisk_id} $img_property fi } diff --git a/lib/glance b/lib/glance index b9c23aad74..bbabb96f0f 100644 --- a/lib/glance +++ b/lib/glance @@ -65,6 +65,8 @@ GLANCE_LOCK_DIR=${GLANCE_LOCK_DIR:=$DATA_DIR/glance/locks} GLANCE_STAGING_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/os_glance_staging_store} GLANCE_TASKS_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/os_glance_tasks_store} +GLANCE_USE_IMPORT_WORKFLOW=$(trueorfalse False GLANCE_USE_IMPORT_WORKFLOW) + GLANCE_CONF_DIR=${GLANCE_CONF_DIR:-/etc/glance} GLANCE_METADEF_DIR=$GLANCE_CONF_DIR/metadefs GLANCE_API_CONF=$GLANCE_CONF_DIR/glance-api.conf From 09eea0b20bf750baa10b4edb5cd52e31b19a4a3a Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 9 Jul 2020 08:31:51 -0700 Subject: [PATCH 1313/1936] Make Glance standalone-ness a separate flag Full Glance functionality requires Glance being run in a configuration where it can spawn long-running task threads. The default uwsgi mode does not allow this, and the current workaround is to set WSGI_MODE to something other than uwsgi to get the devstack code to deploy Glance as a standalone service. Since this affects the entire rest of the deployment, this patch separates out a flag to control this behavior specifically for Glance. When WSGI_MODE=uwsgi, control of the Glance deployment mechanism is allowed via GLANCE_STANDALONE=True|False. If WSGI_MODE!= uwsgi then we deploy standalone Glance anyway. Change-Id: I79068ce0bd7414bc48ff534ee22f0de5d7b091cb --- functions | 4 ++-- lib/apache | 19 +++++++++++++++++++ lib/glance | 12 ++++++++++-- 3 files changed, 31 insertions(+), 4 deletions(-) diff --git a/functions b/functions index 2d60a0821f..9b0ea671f5 100644 --- a/functions +++ b/functions @@ -103,10 +103,10 @@ function _upload_image { done if [[ "$GLANCE_USE_IMPORT_WORKFLOW" == "True" ]]; then - if [[ "$WSGI_MODE" != "uwsgi" ]]; then + if [[ "$GLANCE_STANDALONE" == "True" ]]; then useimport="--import" else - echo "*** Unable to use glance import workflow because WSGI_MODE=uwsgi! ***" + echo "*** Unable to use glance import workflow because glance is not standalone! ***" fi fi diff --git a/lib/apache b/lib/apache index 6670219a51..2e0a2495f5 100644 --- a/lib/apache +++ b/lib/apache @@ -372,6 +372,25 @@ function write_local_uwsgi_http_config { restart_apache_server } +# Write a straight-through proxy for a service that runs locally and just needs +# to be reachable via the main http proxy at $loc +function write_local_proxy_http_config { + local name=$1 + local url=$2 + local loc=$3 + local apache_conf + apache_conf=$(apache_site_config_for $name) + + enable_apache_mod proxy + enable_apache_mod proxy_http + + echo "KeepAlive Off" | sudo tee $apache_conf + echo "SetEnv proxy-sendchunked 1" | sudo tee -a $apache_conf + echo "ProxyPass \"${loc}\" \"$url\" retry=0 " | sudo tee -a $apache_conf + enable_apache_site $name + restart_apache_server +} + function remove_uwsgi_config { local file=$1 local wsgi=$2 diff --git a/lib/glance b/lib/glance index bbabb96f0f..eebaec2a33 100644 --- a/lib/glance +++ b/lib/glance @@ -55,6 +55,13 @@ GLANCE_DEFAULT_BACKEND=${GLANCE_DEFAULT_BACKEND:-fast} GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache} +# Full Glance functionality requires running in standalone mode. If we are +# not in uwsgi mode, then we are standalone, otherwise allow separate control. +if [[ "$WSGI_MODE" != "uwsgi" ]]; then + GLANCE_STANDALONE=True +fi +GLANCE_STANDALONE=${GLANCE_STANDALONE:-False} + # File path for each store specified in GLANCE_MULTIPLE_FILE_STORES, the store # identifier will be appended to this path at runtime. If GLANCE_MULTIPLE_FILE_STORES # has fast,cheap specified then filepath will be generated like $DATA_DIR/glance/fast @@ -240,9 +247,10 @@ function configure_glance { iniset $GLANCE_CACHE_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/%(project_id)s" fi - if [[ "$WSGI_MODE" == "uwsgi" ]]; then + if [[ "$GLANCE_STANDALONE" == False ]]; then write_local_uwsgi_http_config "$GLANCE_UWSGI_CONF" "$GLANCE_UWSGI" "/image" else + write_local_proxy_http_config glance "http://$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT_INT" "/image" iniset $GLANCE_API_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS" fi @@ -342,7 +350,7 @@ function start_glance { fi fi - if [[ "$WSGI_MODE" == "uwsgi" ]]; then + if [[ "$GLANCE_STANDALONE" == False ]]; then run_process g-api "$(which uwsgi) --procname-prefix glance-api --ini $GLANCE_UWSGI_CONF" else run_process g-api "$GLANCE_BIN_DIR/glance-api --config-dir=$GLANCE_CONF_DIR" From 2614c1bba103cb88c9a88a1dfe9c6af7ccc1cc55 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Wed, 15 Jul 2020 14:41:38 -0700 Subject: [PATCH 1314/1936] Fix ami/aki image create use of $img_property This abstracts out the conversion of key=value,... property lists to a function and makes both _upload_image() and the two ami/aki image create calls use it. The move to bare key=value properties introduced a regression for ami/aki where the --property flag stopped being passed to osc in that case. Change-Id: Idf7fdfe3f5800f79f6c48f9d9606a7b53436a730 --- functions | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/functions b/functions index 9b0ea671f5..e687a9848d 100644 --- a/functions +++ b/functions @@ -77,6 +77,19 @@ function get_extra_file { fi } +# Generate image property arguments for OSC +# +# Arguments: properties, one per, like propname=value +# +# Result is --property propname1=value1 --property propname2=value2 +function _image_properties_to_arg { + local result="" + for property in $*; do + result+=" --property $property" + done + echo $result +} + # Upload an image to glance using the configured mechanism # # Arguments: @@ -98,9 +111,7 @@ function _upload_image { local properties local useimport - for prop in $*; do - properties+=" --property $prop" - done + properties=$(_image_properties_to_arg $*) if [[ "$GLANCE_USE_IMPORT_WORKFLOW" == "True" ]]; then if [[ "$GLANCE_STANDALONE" == "True" ]]; then @@ -422,10 +433,10 @@ function upload_image { # kernel for use when uploading the root filesystem. local kernel_id="" ramdisk_id=""; if [ -n "$kernel" ]; then - kernel_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-kernel" $img_property --public --container-format aki --disk-format aki < "$kernel" | grep ' id ' | get_field 2) + kernel_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-kernel" $(_image_properties_to_arg $img_property) --public --container-format aki --disk-format aki < "$kernel" | grep ' id ' | get_field 2) fi if [ -n "$ramdisk" ]; then - ramdisk_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-ramdisk" $img_property --public --container-format ari --disk-format ari < "$ramdisk" | grep ' id ' | get_field 2) + ramdisk_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-ramdisk" $(_image_properties_to_arg $img_property) --public --container-format ari --disk-format ari < "$ramdisk" | grep ' id ' | get_field 2) fi _upload_image "${image_name%.img}" ami ami "$image" ${kernel_id:+ kernel_id=$kernel_id} ${ramdisk_id:+ ramdisk_id=$ramdisk_id} $img_property fi From 49ad4850c8d5be9c90b10f118853e0288997405d Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Wed, 15 Jul 2020 14:54:22 -0700 Subject: [PATCH 1315/1936] Only set hw_rng_model by default if we're on libvirt This makes no sense to set on _every_ devstack deployment, only if we are using libvirt qemu or kvm. Make it conditional. Change-Id: I860e899274646ff73b8f084a0583325239aee9cc --- functions | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/functions b/functions index e687a9848d..c34e4874b3 100644 --- a/functions +++ b/functions @@ -324,7 +324,16 @@ function upload_image { local disk_format="" local container_format="" local unpack="" - local img_property="hw_rng_model=virtio" + local img_property="" + + # NOTE(danms): If we're on libvirt/qemu or libvirt/kvm, set the hw_rng_model + # to libvirt in the image properties. + if [[ "$VIRT_DRIVER" == "libvirt" ]]; then + if [[ "$LIBVIRT_TYPE" == "qemu" || "$LIBVIRT_TYPE" == "kvm" ]]; then + img_property="hw_rng_model=virtio" + fi + fi + case "$image_fname" in *.tar.gz|*.tgz) # Extract ami and aki files From cc0821a5867358b34d7a9c156d02bd19b2f46dec Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Wed, 15 Jul 2020 08:44:00 -0700 Subject: [PATCH 1316/1936] Default Glance to standalone mode A whole set of Glance functionality is not usable under uwsgi, including any of the more powerful async import, customization, and copying functions. In order to facilitate writing and running tempest tests for these features in all environments covered by the various jobs across all the projects that include Glance, we should default to this deployment method. It is still possible to deploy glance in uwsgi mode by setting the flag to False, and we can do that for some jobs to make sure that it continues to work. However, the default should be what we expect deployers will use, which is standalone mode. Depends-On: https://review.opendev.org/741479 Change-Id: I141acab2a07a4eebd8d850f900058bc8cbf9c7bf --- lib/glance | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/glance b/lib/glance index eebaec2a33..e941168d32 100644 --- a/lib/glance +++ b/lib/glance @@ -60,7 +60,7 @@ GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache} if [[ "$WSGI_MODE" != "uwsgi" ]]; then GLANCE_STANDALONE=True fi -GLANCE_STANDALONE=${GLANCE_STANDALONE:-False} +GLANCE_STANDALONE=${GLANCE_STANDALONE:-True} # File path for each store specified in GLANCE_MULTIPLE_FILE_STORES, the store # identifier will be appended to this path at runtime. If GLANCE_MULTIPLE_FILE_STORES From 442c57e16855863346806bd6b21e82515158b9e8 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 16 Jul 2020 10:41:13 -0700 Subject: [PATCH 1317/1936] Disable all import methods if glance is not standalone Glance should not be exposing import methods that cannot work via its API, but it does today. In order for tempest (et al) to be able to properly detect whether import is possible, we must configure the import methods in standalone mode, or disable them in wsgi mode. The referenced Glance patch will make this a requirement. Change-Id: I3bf3498d83607c5e98b70877c061dc54fc3c0a6e Needed-By: https://review.opendev.org/#/c/741497/ --- lib/glance | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/glance b/lib/glance index e941168d32..0f7cd61d72 100644 --- a/lib/glance +++ b/lib/glance @@ -183,6 +183,11 @@ function configure_glance { iniset $GLANCE_API_CONF cors allowed_origin "http://$SERVICE_HOST" fi + if [[ "$GLANCE_STANDALONE" == "False" ]]; then + # NOTE(danms): Do not advertise import methods if we are running in WSGI mode + iniset $GLANCE_API_CONF enabled_import_methods [] + fi + # No multiple stores for swift yet # Store the images in swift if enabled. if is_service_enabled s-proxy; then From f2577fc574d2d8299b3cfff458702321aca57e44 Mon Sep 17 00:00:00 2001 From: Abhishek Kekane Date: Fri, 17 Jul 2020 08:19:40 +0000 Subject: [PATCH 1318/1936] Add missing glance multi-store configurations While removing registry [1] we by mistake removed some code related to multiple store configuration for glance. This must be happened during resolving merged conflicts. Adding it back. [1] https://review.opendev.org/708062 Change-Id: I2b84f7b7c51b7b20765a06b48c75006fd2e8ab71 --- lib/glance | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/lib/glance b/lib/glance index eebaec2a33..6c9c0dff71 100644 --- a/lib/glance +++ b/lib/glance @@ -143,6 +143,16 @@ function configure_glance { local dburl dburl=`database_connection_url glance` + # Configure multiple stores + if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "True" ]]; then + local store enabled_backends + enabled_backends="" + for store in $(echo $GLANCE_MULTIPLE_FILE_STORES | tr "," "\n"); do + enabled_backends+="${store}:file," + done + iniset $GLANCE_API_CONF DEFAULT enabled_backends ${enabled_backends::-1} + fi + iniset $GLANCE_API_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $GLANCE_API_CONF database connection $dburl iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG From fcbf3e976c8fa80099698693d37afa81058803c7 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Fri, 17 Jul 2020 10:14:14 -0700 Subject: [PATCH 1319/1936] Fix glance standalone when tls-proxy is disabled MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We always want to start glance on the internal port now, regardless of whether or not tls-proxy is in use, because we write_local_proxy_http_config() for the standalone case. Change-Id: I47dea645d4a852e02e25af0e1df9c28fec92c42a Co-Authored-By: Radosław Piliszek --- lib/glance | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/glance b/lib/glance index e941168d32..f08e2ed7ab 100644 --- a/lib/glance +++ b/lib/glance @@ -252,6 +252,7 @@ function configure_glance { else write_local_proxy_http_config glance "http://$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT_INT" "/image" iniset $GLANCE_API_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS + iniset $GLANCE_API_CONF DEFAULT bind_port $GLANCE_SERVICE_PORT_INT iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS" fi } From 7700d5a825e48ea5f536e580df5c769a7224afcc Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Mon, 20 Jul 2020 17:01:41 -0700 Subject: [PATCH 1320/1936] Fix setting enabled_import_methods if glance is standalone I forgot to update devstack with the section name when I did Grenade. Change-Id: I928072e935e7a7ec24609cdc4522c96f65d3ba3b --- lib/glance | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/glance b/lib/glance index 5fc7a7f0c3..3e5ebde935 100644 --- a/lib/glance +++ b/lib/glance @@ -195,7 +195,7 @@ function configure_glance { if [[ "$GLANCE_STANDALONE" == "False" ]]; then # NOTE(danms): Do not advertise import methods if we are running in WSGI mode - iniset $GLANCE_API_CONF enabled_import_methods [] + iniset $GLANCE_API_CONF DEFAULT enabled_import_methods [] fi # No multiple stores for swift yet From 33f8f6e68419702df3cb4b8d8af2f6587ebae527 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Tue, 21 Jul 2020 19:41:48 -0700 Subject: [PATCH 1321/1936] Remove standalone requirement for glance import As of the referenced patch in glance, we can do import in wsgi mode. Also remove the enforcement that import methods are disabled. Change-Id: I8da4b4ad6105bb64c4045ca80db9742591d01564 Depends-On: https://review.opendev.org/#/c/742065 --- functions | 6 +----- lib/glance | 5 ----- 2 files changed, 1 insertion(+), 10 deletions(-) diff --git a/functions b/functions index c34e4874b3..cc1ca6cb25 100644 --- a/functions +++ b/functions @@ -114,11 +114,7 @@ function _upload_image { properties=$(_image_properties_to_arg $*) if [[ "$GLANCE_USE_IMPORT_WORKFLOW" == "True" ]]; then - if [[ "$GLANCE_STANDALONE" == "True" ]]; then - useimport="--import" - else - echo "*** Unable to use glance import workflow because glance is not standalone! ***" - fi + useimport="--import" fi openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" --public --container-format "$container" --disk-format "$disk" $useimport $properties < "${image}" diff --git a/lib/glance b/lib/glance index 3e5ebde935..2118636a46 100644 --- a/lib/glance +++ b/lib/glance @@ -193,11 +193,6 @@ function configure_glance { iniset $GLANCE_API_CONF cors allowed_origin "http://$SERVICE_HOST" fi - if [[ "$GLANCE_STANDALONE" == "False" ]]; then - # NOTE(danms): Do not advertise import methods if we are running in WSGI mode - iniset $GLANCE_API_CONF DEFAULT enabled_import_methods [] - fi - # No multiple stores for swift yet # Store the images in swift if enabled. if is_service_enabled s-proxy; then From 32d5b1ea2f7908b3558dd38d32553abeaf5db40b Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Wed, 22 Jul 2020 20:52:48 -0500 Subject: [PATCH 1322/1936] Enable image import test If glance is in standalone mode, image import works fine so enable the tempest tests. Once we will have image import or other async tasks working with glance under uwsgi then we can remove this flag and run import tests by default. Depends-On: https://review.opendev.org/#/c/741425/ Change-Id: I853e8a3815187f0aa8f05c70488ec948a97e55a6 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 47bdc22311..125749b314 100644 --- a/lib/tempest +++ b/lib/tempest @@ -345,7 +345,7 @@ function configure_tempest { if [ "$VIRT_DRIVER" = "xenserver" ]; then iniset $TEMPEST_CONFIG image disk_formats "ami,ari,aki,vhd,raw,iso" fi - + iniset $TEMPEST_CONFIG image-feature-enabled import_image $GLANCE_USE_IMPORT_WORKFLOW # Compute iniset $TEMPEST_CONFIG compute image_ref $image_uuid iniset $TEMPEST_CONFIG compute image_ref_alt $image_uuid_alt From dd3731c86a13334125715e44880491042526227c Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Tue, 28 Jul 2020 08:51:41 -0500 Subject: [PATCH 1323/1936] Install bindep packages when installing lib from src Most libs maintain their own system packages in a local bindep.txt file. We don't currently use those when installing packages from source, which can result in broken package installs. This adds a flag to always attempt to install bindep packages if the bindep.txt file exists. If a file cannot be found, it will just emit a warning and carry on. Change-Id: Ia0570f837b8af1c3fee0a314b026a4a7ed27e6a9 Signed-off-by: Sean McGinnis --- functions-common | 3 ++- lib/libraries | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/functions-common b/functions-common index 6595c3de53..d99ad9212f 100644 --- a/functions-common +++ b/functions-common @@ -1412,7 +1412,8 @@ function install_bindep { local pkgs if [[ ! -f $file ]]; then - die $LINENO "Can not find bindep file: $file" + warn $LINENO "Can not find bindep file: $file" + return fi # converting here makes it much easier to work with passing diff --git a/lib/libraries b/lib/libraries index b4f3c31d5e..c7aa8151ae 100644 --- a/lib/libraries +++ b/lib/libraries @@ -72,7 +72,7 @@ function _install_lib_from_source { local name=$1 if use_library_from_git "$name"; then git_clone_by_name "$name" - setup_dev_lib "$name" + setup_dev_lib -bindep "$name" fi } From 587e0a3510ea3b40abbdffcd1fe3204e416c4bc9 Mon Sep 17 00:00:00 2001 From: Carlos Goncalves Date: Sat, 1 Aug 2020 21:47:55 +0200 Subject: [PATCH 1324/1936] Fix is_fedora RHEL 8 detection While RHEL 7 is detected as RedHatEnterpriseServer, RHEL 8 is RedHatEnterprise. $ lsb_release -i -s RedHatEnterprise Change-Id: I3d750d808c6ebea9c230f0508cdbc016415b9922 --- functions-common | 1 + 1 file changed, 1 insertion(+) diff --git a/functions-common b/functions-common index d99ad9212f..acb3a6c920 100644 --- a/functions-common +++ b/functions-common @@ -451,6 +451,7 @@ function is_fedora { [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || \ [ "$os_VENDOR" = "RedHatEnterpriseServer" ] || \ + [ "$os_VENDOR" = "RedHatEnterprise" ] || \ [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "OracleServer" ] || \ [ "$os_VENDOR" = "Virtuozzo" ] } From 04fdd8c9eb37a34bb3155bee1f9d21c5dfb0d794 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Sun, 2 Aug 2020 13:13:00 +0200 Subject: [PATCH 1325/1936] Fix propose-updates job We need to use python3, our deps are no longer installed in python. Includes the current set of updates to the plugin registry. Change-Id: I4753ddf60ed066cc11fa74dbbd63210dbad733a8 --- doc/source/plugin-registry.rst | 4 +--- tools/generate-devstack-plugins-list.sh | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index eda5773a25..4e7c2d7b2f 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -81,7 +81,6 @@ openstack/networking-hyperv `https://opendev.org/openstack/networki openstack/networking-l2gw `https://opendev.org/openstack/networking-l2gw `__ openstack/networking-midonet `https://opendev.org/openstack/networking-midonet `__ openstack/networking-odl `https://opendev.org/openstack/networking-odl `__ -openstack/networking-onos `https://opendev.org/openstack/networking-onos `__ openstack/networking-powervm `https://opendev.org/openstack/networking-powervm `__ openstack/networking-sfc `https://opendev.org/openstack/networking-sfc `__ openstack/neutron `https://opendev.org/openstack/neutron `__ @@ -113,7 +112,6 @@ openstack/solum `https://opendev.org/openstack/solum `__ openstack/tacker `https://opendev.org/openstack/tacker `__ openstack/telemetry-tempest-plugin `https://opendev.org/openstack/telemetry-tempest-plugin `__ -openstack/tricircle `https://opendev.org/openstack/tricircle `__ openstack/trove `https://opendev.org/openstack/trove `__ openstack/trove-dashboard `https://opendev.org/openstack/trove-dashboard `__ openstack/vitrage `https://opendev.org/openstack/vitrage `__ @@ -121,6 +119,7 @@ openstack/vitrage-dashboard `https://opendev.org/openstack/vitrage- openstack/vitrage-tempest-plugin `https://opendev.org/openstack/vitrage-tempest-plugin `__ openstack/watcher `https://opendev.org/openstack/watcher `__ openstack/watcher-dashboard `https://opendev.org/openstack/watcher-dashboard `__ +openstack/whitebox-tempest-plugin `https://opendev.org/openstack/whitebox-tempest-plugin `__ openstack/zaqar `https://opendev.org/openstack/zaqar `__ openstack/zaqar-ui `https://opendev.org/openstack/zaqar-ui `__ openstack/zun `https://opendev.org/openstack/zun `__ @@ -199,7 +198,6 @@ x/trio2o `https://opendev.org/x/trio2o `__ x/vmware-nsx `https://opendev.org/x/vmware-nsx `__ x/vmware-vspc `https://opendev.org/x/vmware-vspc `__ -x/whitebox-tempest-plugin `https://opendev.org/x/whitebox-tempest-plugin `__ ======================================== === diff --git a/tools/generate-devstack-plugins-list.sh b/tools/generate-devstack-plugins-list.sh index a3aa7ba63d..3307943df9 100755 --- a/tools/generate-devstack-plugins-list.sh +++ b/tools/generate-devstack-plugins-list.sh @@ -54,7 +54,7 @@ if [[ -r data/devstack-plugins-registry.header ]]; then cat data/devstack-plugins-registry.header fi -sorted_plugins=$(python tools/generate-devstack-plugins-list.py) +sorted_plugins=$(python3 tools/generate-devstack-plugins-list.py) # find the length of the name column & pad name_col_len=$(echo "${sorted_plugins}" | wc -L) From 1d468d45dbb330c31c28e54d9c9abe5f8ec394ba Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Tue, 9 Jun 2020 14:35:52 +0100 Subject: [PATCH 1326/1936] [OVN] Move OVN module from Neutron to DevStack As part of the Victoria PTG the Neutron team entertained the idea of having the OVN driver as the default backend in DevStack (this hasn't yet being decided by the community, this will be discussed within this cycle). For this to happen, we also would need to move the module that configures OVN to the DevStack repository. This is what this patch is doing. Note that we are updating the lib/neutron-legacy module instead of lib/neutron in this patch, this is because as part of the PTG the Neutron team has decided to un-deprecate the neutron-legacy module since the "new" lib/neutron module is broken and nobody is current working on it (also all services uses neutron-legacy). Also, the ovsdbapp has been added to the ALL_LIBS list because a gate job in the ovsdbapp project repository relies on installing the library from source instead of pip to run. Depends-On: https://review.opendev.org/#/c/740663/ Change-Id: Ib4194329474e8d68a90886d2a04f027eecd741df Signed-off-by: Lucas Alvares Gomes --- lib/neutron-legacy | 36 ++ lib/neutron_plugins/ml2 | 12 +- lib/neutron_plugins/ovn_agent | 724 ++++++++++++++++++++++++++++++++++ stack.sh | 5 + stackrc | 5 + tests/test_libs_from_pypi.sh | 2 +- 6 files changed, 777 insertions(+), 7 deletions(-) create mode 100644 lib/neutron_plugins/ovn_agent diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 59649ef84a..363c62e4c2 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -362,6 +362,14 @@ function configure_mutnauq { _configure_neutron_ceilometer_notifications fi + if [[ $Q_AGENT == "ovn" ]]; then + configure_ovn + if is_service_enabled q-port-forwarding neutron-port-forwarding; then + configure_port_forwarding + fi + configure_ovn_plugin + fi + iniset $NEUTRON_CONF DEFAULT api_workers "$API_WORKERS" # devstack is not a tool for running uber scale OpenStack # clouds, therefore running without a dedicated RPC worker @@ -438,6 +446,10 @@ function install_mutnauq { git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH setup_develop $NEUTRON_DIR + + if [[ $Q_AGENT == "ovn" ]]; then + install_ovn + fi } # install_neutron_agent_packages() - Collect source and prepare @@ -459,6 +471,22 @@ function configure_neutron_after_post_config { fi } +# Start running OVN processes +function start_ovn_services { + if [[ $Q_AGENT == "ovn" ]]; then + init_ovn + start_ovn + if [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]]; then + if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" != "True" ]]; then + echo "OVN_L3_CREATE_PUBLIC_NETWORK=True is being ignored " + echo "because NEUTRON_CREATE_INITIAL_NETWORKS is set to False" + else + create_public_bridge + fi + fi + fi +} + # Start running processes function start_neutron_service_and_check { local service_port=$Q_PORT @@ -575,6 +603,10 @@ function stop_mutnauq_other { function stop_mutnauq { stop_mutnauq_other stop_mutnauq_l2_agent + + if [[ $Q_AGENT == "ovn" ]]; then + stop_ovn + fi } # _move_neutron_addresses_route() - Move the primary IP to the OVS bridge @@ -668,6 +700,10 @@ function cleanup_mutnauq { for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|fip|snat)-[0-9a-f-]*'); do sudo ip netns delete ${ns} done + + if [[ $Q_AGENT == "ovn" ]]; then + cleanup_ovn + fi } diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index 497b6c6bdc..ae4b251d83 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -7,6 +7,12 @@ _XTRACE_NEUTRON_ML2=$(set +o | grep xtrace) set +o xtrace +# Default openvswitch L2 agent +Q_AGENT=${Q_AGENT:-openvswitch} +if [ -f $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent ]; then + source $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent +fi + # Enable this to simply and quickly enable tunneling with ML2. # Select either 'gre', 'vxlan', or 'gre,vxlan' Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"vxlan"} @@ -17,12 +23,6 @@ elif [[ "$ENABLE_TENANT_TUNNELS" == "True" ]]; then Q_TUNNEL_TYPES=gre fi -# Default openvswitch L2 agent -Q_AGENT=${Q_AGENT:-openvswitch} -if [ -f $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent ]; then - source $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent -fi - # List of MechanismDrivers to load Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-openvswitch,linuxbridge} # Default GRE TypeDriver options diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent new file mode 100644 index 0000000000..e29b84fa48 --- /dev/null +++ b/lib/neutron_plugins/ovn_agent @@ -0,0 +1,724 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +# Global Sources +# -------------- + +# There are some ovs functions OVN depends on that must be sourced from +# the ovs neutron plugins. +source ${TOP_DIR}/lib/neutron_plugins/ovs_base +source ${TOP_DIR}/lib/neutron_plugins/openvswitch_agent + +# Load devstack ovs base functions +source $NEUTRON_DIR/devstack/lib/ovs + + +# Defaults +# -------- + +# Set variables for building OVN from source +OVN_REPO=${OVN_REPO:-https://github.com/ovn-org/ovn.git} +OVN_REPO_NAME=$(basename ${OVN_REPO} | cut -f1 -d'.') +OVN_REPO_NAME=${OVN_REPO_NAME:-ovn} +OVN_BRANCH=${OVN_BRANCH:-v20.06.1} +# The commit removing OVN bits from the OVS tree, it is the commit that is not +# present in OVN tree and is used to distinguish if OVN is part of OVS or not. +# https://github.com/openvswitch/ovs/commit/05bf1dbb98b0635a51f75e268ef8aed27601401d +OVN_SPLIT_HASH=05bf1dbb98b0635a51f75e268ef8aed27601401d + +if is_service_enabled tls-proxy; then + OVN_PROTO=ssl +else + OVN_PROTO=tcp +fi + +# How to connect to ovsdb-server hosting the OVN SB database. +OVN_SB_REMOTE=${OVN_SB_REMOTE:-$OVN_PROTO:$SERVICE_HOST:6642} + +# How to connect to ovsdb-server hosting the OVN NB database +OVN_NB_REMOTE=${OVN_NB_REMOTE:-$OVN_PROTO:$SERVICE_HOST:6641} + +# ml2/config for neutron_sync_mode +OVN_NEUTRON_SYNC_MODE=${OVN_NEUTRON_SYNC_MODE:-log} + +# Configured DNS servers to be used with internal_dns extension, only +# if the subnet DNS is not configured. +OVN_DNS_SERVERS=${OVN_DNS_SERVERS:-8.8.8.8} + +# The type of OVN L3 Scheduler to use. The OVN L3 Scheduler determines the +# hypervisor/chassis where a routers gateway should be hosted in OVN. The +# default OVN L3 scheduler is leastloaded +OVN_L3_SCHEDULER=${OVN_L3_SCHEDULER:-leastloaded} + +# A UUID to uniquely identify this system. If one is not specified, a random +# one will be generated. A randomly generated UUID will be saved in a file +# 'ovn-uuid' so that the same one will be re-used if you re-run DevStack. +OVN_UUID=${OVN_UUID:-} + +# Whether or not to build the openvswitch kernel module from ovs. This is required +# unless the distro kernel includes ovs+conntrack support. +OVN_BUILD_MODULES=$(trueorfalse False OVN_BUILD_MODULES) + +# Whether or not to install the ovs python module from ovs source. This can be +# used to test and validate new ovs python features. This should only be used +# for development purposes since the ovs python version is controlled by OpenStack +# requirements. +OVN_INSTALL_OVS_PYTHON_MODULE=$(trueorfalse False OVN_INSTALL_OVS_PYTHON_MODULE) + +# GENEVE overlay protocol overhead. Defaults to 38 bytes plus the IP version +# overhead (20 bytes for IPv4 (default) or 40 bytes for IPv6) which is determined +# based on the ML2 overlay_ip_version option. The ML2 framework will use this to +# configure the MTU DHCP option. +OVN_GENEVE_OVERHEAD=${OVN_GENEVE_OVERHEAD:-38} + +# The log level of the OVN databases (north and south) +OVN_DBS_LOG_LEVEL=${OVN_DBS_LOG_LEVEL:-info} + +OVN_META_CONF=$NEUTRON_CONF_DIR/neutron_ovn_metadata_agent.ini +OVN_META_DATA_HOST=${OVN_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)} + +OVSDB_SERVER_LOCAL_HOST=$SERVICE_LOCAL_HOST + +OVN_IGMP_SNOOPING_ENABLE=$(trueorfalse False OVN_IGMP_SNOOPING_ENABLE) + +OVS_PREFIX=/usr/local +OVS_SBINDIR=$OVS_PREFIX/sbin +OVS_BINDIR=$OVS_PREFIX/bin +OVS_RUNDIR=$OVS_PREFIX/var/run/openvswitch +OVS_SHAREDIR=$OVS_PREFIX/share/openvswitch +OVS_SCRIPTDIR=$OVS_SHAREDIR/scripts +OVS_DATADIR=$DATA_DIR/ovs + +OVN_DATADIR=$DATA_DIR/ovn +OVN_SHAREDIR=$OVS_PREFIX/share/ovn +OVN_SCRIPTDIR=$OVN_SHAREDIR/scripts +OVN_RUNDIR=$OVS_PREFIX/var/run/ovn + +NEUTRON_OVN_BIN_DIR=$(get_python_exec_prefix) +NEUTRON_OVN_METADATA_BINARY="neutron-ovn-metadata-agent" + +STACK_GROUP="$( id --group --name "$STACK_USER" )" + +# Defaults Overwrite +# ------------------ + +Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-ovn,logger} +Q_ML2_PLUGIN_TYPE_DRIVERS=${Q_ML2_PLUGIN_TYPE_DRIVERS:-local,flat,vlan,geneve} +Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"geneve"} +Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS:-"vni_ranges=1:65536"} +Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS:-port_security,dns,qos} +# this one allows empty: +ML2_L3_PLUGIN=${ML2_L3_PLUGIN-"ovn-router"} + + +# Utility Functions +# ----------------- + +function use_new_ovn_repository { + if [ -z "$is_new_ovn" ]; then + local ovs_repo_dir=$DEST/$OVS_REPO_NAME + if [ ! -d $ovs_repo_dir ]; then + clone_repository $OVS_REPO $ovs_repo_dir $OVS_BRANCH + fi + # Check the split commit exists in the current branch + pushd $ovs_repo_dir + git log $OVS_BRANCH --pretty=format:"%H" | grep -q $OVN_SPLIT_HASH + is_new_ovn=$? + popd + fi + return $is_new_ovn +} + +# NOTE(rtheis): Function copied from DevStack _neutron_ovs_base_setup_bridge +# and _neutron_ovs_base_add_bridge with the call to neutron-ovs-cleanup +# removed. The call is not relevant for OVN, as it is specific to the use +# of Neutron's OVS agent and hangs when running stack.sh because +# neutron-ovs-cleanup uses the OVSDB native interface. +function ovn_base_setup_bridge { + local bridge=$1 + local addbr_cmd="ovs-vsctl --no-wait -- --may-exist add-br $bridge -- set bridge $bridge protocols=OpenFlow13,OpenFlow15" + + if [ "$OVS_DATAPATH_TYPE" != "system" ] ; then + addbr_cmd="$addbr_cmd -- set Bridge $bridge datapath_type=${OVS_DATAPATH_TYPE}" + fi + + $addbr_cmd + ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge +} + +function _start_process { + $SYSTEMCTL daemon-reload + $SYSTEMCTL enable $1 + $SYSTEMCTL restart $1 +} + +function _run_process { + local service=$1 + local cmd="$2" + local stop_cmd="$3" + local group=$4 + local user=${5:-$STACK_USER} + + local systemd_service="devstack@$service.service" + local unit_file="$SYSTEMD_DIR/$systemd_service" + local environment="OVN_RUNDIR=$OVS_RUNDIR OVN_DBDIR=$OVN_DATADIR OVN_LOGDIR=$LOGDIR OVS_RUNDIR=$OVS_RUNDIR OVS_DBDIR=$OVS_DATADIR OVS_LOGDIR=$LOGDIR" + + echo "Starting $service executed command": $cmd + + write_user_unit_file $systemd_service "$cmd" "$group" "$user" + iniset -sudo $unit_file "Service" "Type" "forking" + iniset -sudo $unit_file "Service" "RemainAfterExit" "yes" + iniset -sudo $unit_file "Service" "KillMode" "mixed" + iniset -sudo $unit_file "Service" "LimitNOFILE" "65536" + iniset -sudo $unit_file "Service" "Environment" "$environment" + if [ -n "$stop_cmd" ]; then + iniset -sudo $unit_file "Service" "ExecStop" "$stop_cmd" + fi + + _start_process $systemd_service + + local testcmd="test -e $OVS_RUNDIR/$service.pid" + test_with_retry "$testcmd" "$service did not start" $SERVICE_TIMEOUT 1 + sudo ovs-appctl -t $service vlog/set console:off syslog:info file:info +} + +function clone_repository { + local repo=$1 + local dir=$2 + local branch=$3 + # Set ERROR_ON_CLONE to false to avoid the need of having the + # repositories like OVN and OVS in the required_projects of the job + # definition. + ERROR_ON_CLONE=false git_clone $repo $dir $branch +} + +function get_ext_gw_interface { + # Get ext_gw_interface depending on value of Q_USE_PUBLIC_VETH + # This function is copied directly from the devstack neutron-legacy script + if [[ "$Q_USE_PUBLIC_VETH" == "True" ]]; then + echo $Q_PUBLIC_VETH_EX + else + # Disable in-band as we are going to use local port + # to communicate with VMs + sudo ovs-vsctl set Bridge $PUBLIC_BRIDGE \ + other_config:disable-in-band=true + echo $PUBLIC_BRIDGE + fi +} + +function create_public_bridge { + # Create the public bridge that OVN will use + # This logic is based on the devstack neutron-legacy _neutron_configure_router_v4 and _v6 + local ext_gw_ifc + ext_gw_ifc=$(get_ext_gw_interface) + + ovs-vsctl --may-exist add-br $ext_gw_ifc -- set bridge $ext_gw_ifc protocols=OpenFlow13,OpenFlow15 + ovs-vsctl set open . external-ids:ovn-bridge-mappings=$PHYSICAL_NETWORK:$ext_gw_ifc + if [ -n "$FLOATING_RANGE" ]; then + local cidr_len=${FLOATING_RANGE#*/} + sudo ip addr flush dev $ext_gw_ifc + sudo ip addr add $PUBLIC_NETWORK_GATEWAY/$cidr_len dev $ext_gw_ifc + fi + + # Ensure IPv6 RAs are accepted on the interface with the default route. + # This is needed for neutron-based devstack clouds to work in + # IPv6-only clouds in the gate. Please do not remove this without + # talking to folks in Infra. This fix is based on a devstack fix for + # neutron L3 agent: https://review.openstack.org/#/c/359490/. + default_route_dev=$(ip route | grep ^default | awk '{print $5}') + sudo sysctl -w net.ipv6.conf.$default_route_dev.accept_ra=2 + + sudo sysctl -w net.ipv6.conf.all.forwarding=1 + if [ -n "$IPV6_PUBLIC_RANGE" ]; then + local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/} + sudo ip -6 addr flush dev $ext_gw_ifc + sudo ip -6 addr add $IPV6_PUBLIC_NETWORK_GATEWAY/$ipv6_cidr_len dev $ext_gw_ifc + fi + + sudo ip link set $ext_gw_ifc up +} + +function _disable_libvirt_apparmor { + if ! sudo aa-status --enabled ; then + return 0 + fi + # NOTE(arosen): This is used as a work around to allow newer versions + # of libvirt to work with ovs configured ports. See LP#1466631. + # requires the apparmor-utils + install_package apparmor-utils + # disables apparmor for libvirtd + sudo aa-complain /etc/apparmor.d/usr.sbin.libvirtd +} + + +# OVN compilation functions +# ------------------------- + + +# compile_ovn() - Compile OVN from source and load needed modules +# Accepts three parameters: +# - first optional is False by default and means that +# modules are built and installed. +# - second optional parameter defines prefix for +# ovn compilation +# - third optional parameter defines localstatedir for +# ovn single machine runtime +function compile_ovn { + local build_modules=${1:-False} + local prefix=$2 + local localstatedir=$3 + + if [ -n "$prefix" ]; then + prefix="--prefix=$prefix" + fi + + if [ -n "$localstatedir" ]; then + localstatedir="--localstatedir=$localstatedir" + fi + + clone_repository $OVN_REPO $DEST/$OVN_REPO_NAME $OVN_BRANCH + pushd $DEST/$OVN_REPO_NAME + + if [ ! -f configure ] ; then + ./boot.sh + fi + + if [ ! -f config.status ] || [ configure -nt config.status ] ; then + ./configure --with-ovs-source=$DEST/$OVS_REPO_NAME $prefix $localstatedir + fi + make -j$(($(nproc) + 1)) + sudo make install + popd +} + + +# OVN Neutron driver functions +# ---------------------------- + +# OVN service sanity check +function ovn_sanity_check { + if is_service_enabled q-agt neutron-agt; then + die $LINENO "The q-agt/neutron-agt service must be disabled with OVN." + elif is_service_enabled q-l3 neutron-l3; then + die $LINENO "The q-l3/neutron-l3 service must be disabled with OVN." + elif is_service_enabled q-svc neutron-api && [[ ! $Q_ML2_PLUGIN_MECHANISM_DRIVERS =~ "ovn" ]]; then + die $LINENO "OVN needs to be enabled in \$Q_ML2_PLUGIN_MECHANISM_DRIVERS" + elif is_service_enabled q-svc neutron-api && [[ ! $Q_ML2_PLUGIN_TYPE_DRIVERS =~ "geneve" ]]; then + die $LINENO "Geneve needs to be enabled in \$Q_ML2_PLUGIN_TYPE_DRIVERS to be used with OVN" + fi +} + +# install_ovn() - Collect source and prepare +function install_ovn { + echo "Installing OVN and dependent packages" + + # Check the OVN configuration + ovn_sanity_check + + # If OVS is already installed, remove it, because we're about to re-install + # it from source. + for package in openvswitch openvswitch-switch openvswitch-common; do + if is_package_installed $package ; then + uninstall_package $package + fi + done + + # Install tox, used to generate the config (see devstack/override-defaults) + pip_install tox + remove_ovs_packages + sudo rm -f $OVS_RUNDIR/* + + compile_ovs $OVN_BUILD_MODULES + if use_new_ovn_repository; then + compile_ovn $OVN_BUILD_MODULES + fi + + # Ensure that the OVS commands are accessible in the PATH + OVS_BINDIR=${OVS_BINDIR:-/usr/local/bin} + export PATH=$OVS_BINDIR:$PATH + + sudo mkdir -p $OVS_RUNDIR + sudo chown $(whoami) $OVS_RUNDIR + sudo mkdir -p $OVS_PREFIX/var/log/openvswitch + sudo chown $(whoami) $OVS_PREFIX/var/log/openvswitch + sudo mkdir -p $OVS_PREFIX/var/log/ovn + sudo chown $(whoami) $OVS_PREFIX/var/log/ovn + + # Archive log files and create new + local log_archive_dir=$LOGDIR/archive + mkdir -p $log_archive_dir + for logfile in ovs-vswitchd.log ovn-northd.log ovn-controller.log ovn-controller-vtep.log ovs-vtep.log ovsdb-server.log ovsdb-server-nb.log ovsdb-server-sb.log; do + if [ -f "$LOGDIR/$logfile" ] ; then + mv "$LOGDIR/$logfile" "$log_archive_dir/$logfile.${CURRENT_LOG_TIME}" + fi + done + + # Install ovsdbapp from source if requested + if use_library_from_git "ovsdbapp"; then + git_clone_by_name "ovsdbapp" + setup_dev_lib "ovsdbapp" + fi + + # Install ovs python module from ovs source. + if [[ "$OVN_INSTALL_OVS_PYTHON_MODULE" == "True" ]]; then + sudo pip uninstall -y ovs + # Clone the OVS repository if it's not yet present + clone_repository $OVS_REPO $DEST/$OVS_REPO_NAME $OVS_BRANCH + sudo pip install -e $DEST/$OVS_REPO_NAME/python + fi +} + +# filter_network_api_extensions() - Remove non-supported API extensions by +# the OVN driver from the list of enabled API extensions +function filter_network_api_extensions { + SUPPORTED_NETWORK_API_EXTENSIONS=$($PYTHON -c \ + 'from neutron.common.ovn import extensions ;\ + print(",".join(extensions.ML2_SUPPORTED_API_EXTENSIONS))') + SUPPORTED_NETWORK_API_EXTENSIONS=$SUPPORTED_NETWORK_API_EXTENSIONS,$($PYTHON -c \ + 'from neutron.common.ovn import extensions ;\ + print(",".join(extensions.ML2_SUPPORTED_API_EXTENSIONS_OVN_L3))') + if is_service_enabled q-qos neutron-qos ; then + SUPPORTED_NETWORK_API_EXTENSIONS="$SUPPORTED_NETWORK_API_EXTENSIONS,qos" + fi + NETWORK_API_EXTENSIONS=${NETWORK_API_EXTENSIONS:-$SUPPORTED_NETWORK_API_EXTENSIONS} + extensions=$(echo $NETWORK_API_EXTENSIONS | tr ', ' '\n' | sort -u) + supported_ext=$(echo $SUPPORTED_NETWORK_API_EXTENSIONS | tr ', ' '\n' | sort -u) + enabled_ext=$(comm -12 <(echo -e "$extensions") <(echo -e "$supported_ext")) + disabled_ext=$(comm -3 <(echo -e "$extensions") <(echo -e "$enabled_ext")) + + # Log a message in case some extensions had to be disabled because + # they are not supported by the OVN driver + if [ ! -z "$disabled_ext" ]; then + _disabled=$(echo $disabled_ext | tr ' ' ',') + echo "The folling network API extensions have been disabled because they are not supported by OVN: $_disabled" + fi + + # Export the final list of extensions that have been enabled and are + # supported by OVN + export NETWORK_API_EXTENSIONS=$(echo $enabled_ext | tr ' ' ',') +} + +function configure_ovn_plugin { + echo "Configuring Neutron for OVN" + + if is_service_enabled q-svc ; then + filter_network_api_extensions + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_geneve max_header_size=$OVN_GENEVE_OVERHEAD + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_connection="$OVN_NB_REMOTE" + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_sb_connection="$OVN_SB_REMOTE" + if is_service_enabled tls-proxy; then + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_sb_ca_cert="$INT_CA_DIR/ca-chain.pem" + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_sb_certificate="$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt" + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_sb_private_key="$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key" + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_ca_cert="$INT_CA_DIR/ca-chain.pem" + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_certificate="$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt" + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_private_key="$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key" + fi + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn neutron_sync_mode="$OVN_NEUTRON_SYNC_MODE" + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_l3_scheduler="$OVN_L3_SCHEDULER" + populate_ml2_config /$Q_PLUGIN_CONF_FILE securitygroup enable_security_group="$Q_USE_SECGROUP" + inicomment /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver + + if is_service_enabled q-ovn-metadata-agent; then + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True + else + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=False + fi + + if is_service_enabled q-dns neutron-dns ; then + iniset $NEUTRON_CONF DEFAULT dns_domain openstackgate.local + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn dns_servers="$OVN_DNS_SERVERS" + fi + + iniset $NEUTRON_CONF ovs igmp_snooping_enable $OVN_IGMP_SNOOPING_ENABLE + fi + + if is_service_enabled q-dhcp neutron-dhcp ; then + iniset $NEUTRON_CONF DEFAULT dhcp_agent_notification True + else + iniset $NEUTRON_CONF DEFAULT dhcp_agent_notification False + fi + + if is_service_enabled n-api-meta ; then + if is_service_enabled q-ovn-metadata-agent ; then + iniset $NOVA_CONF neutron service_metadata_proxy True + fi + fi +} + +function configure_ovn { + echo "Configuring OVN" + + if [ -z "$OVN_UUID" ] ; then + if [ -f ./ovn-uuid ] ; then + OVN_UUID=$(cat ovn-uuid) + else + OVN_UUID=$(uuidgen) + echo $OVN_UUID > ovn-uuid + fi + fi + + # Metadata + if is_service_enabled q-ovn-metadata-agent && is_service_enabled ovn-controller; then + sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR + + mkdir -p $NEUTRON_DIR/etc/neutron/plugins/ml2 + (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh) + + cp $NEUTRON_DIR/etc/neutron_ovn_metadata_agent.ini.sample $OVN_META_CONF + configure_root_helper_options $OVN_META_CONF + + iniset $OVN_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $OVN_META_CONF DEFAULT nova_metadata_host $OVN_META_DATA_HOST + iniset $OVN_META_CONF DEFAULT metadata_workers $API_WORKERS + iniset $OVN_META_CONF DEFAULT state_path $NEUTRON_STATE_PATH + iniset $OVN_META_CONF ovs ovsdb_connection unix:$OVS_RUNDIR/db.sock + iniset $OVN_META_CONF ovn ovn_sb_connection $OVN_SB_REMOTE + if is_service_enabled tls-proxy; then + iniset $OVN_META_CONF ovn \ + ovn_sb_ca_cert $INT_CA_DIR/ca-chain.pem + iniset $OVN_META_CONF ovn \ + ovn_sb_certificate $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt + iniset $OVN_META_CONF ovn \ + ovn_sb_private_key $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key + fi + fi +} + +function init_ovn { + # clean up from previous (possibly aborted) runs + # create required data files + + # Assumption: this is a dedicated test system and there is nothing important + # in the ovn, ovn-nb, or ovs databases. We're going to trash them and + # create new ones on each devstack run. + + _disable_libvirt_apparmor + + mkdir -p $OVN_DATADIR + mkdir -p $OVS_DATADIR + + rm -f $OVS_DATADIR/*.db + rm -f $OVS_DATADIR/.*.db.~lock~ + rm -f $OVN_DATADIR/*.db + rm -f $OVN_DATADIR/.*.db.~lock~ +} + +function _start_ovs { + echo "Starting OVS" + if is_service_enabled ovn-controller ovn-controller-vtep ovn-northd; then + # ovsdb-server and ovs-vswitchd are used privately in OVN as openvswitch service names. + enable_service ovsdb-server + enable_service ovs-vswitchd + + if [ ! -f $OVS_DATADIR/conf.db ]; then + ovsdb-tool create $OVS_DATADIR/conf.db $OVS_SHAREDIR/vswitch.ovsschema + fi + + if is_service_enabled ovn-controller-vtep; then + if [ ! -f $OVS_DATADIR/vtep.db ]; then + ovsdb-tool create $OVS_DATADIR/vtep.db $OVS_SHAREDIR/vtep.ovsschema + fi + fi + + local dbcmd="$OVS_SBINDIR/ovsdb-server --remote=punix:$OVS_RUNDIR/db.sock --remote=ptcp:6640:$OVSDB_SERVER_LOCAL_HOST --pidfile --detach --log-file" + dbcmd+=" --remote=db:Open_vSwitch,Open_vSwitch,manager_options" + if is_service_enabled ovn-controller-vtep; then + dbcmd+=" --remote=db:hardware_vtep,Global,managers $OVS_DATADIR/vtep.db" + fi + dbcmd+=" $OVS_DATADIR/conf.db" + _run_process ovsdb-server "$dbcmd" + + echo "Configuring OVSDB" + if is_service_enabled tls-proxy; then + ovs-vsctl --no-wait set-ssl \ + $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key \ + $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt \ + $INT_CA_DIR/ca-chain.pem + fi + ovs-vsctl --no-wait set open_vswitch . system-type="devstack" + ovs-vsctl --no-wait set open_vswitch . external-ids:system-id="$OVN_UUID" + ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-remote="$OVN_SB_REMOTE" + ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-bridge="br-int" + ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-type="geneve" + ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-ip="$HOST_IP" + # Select this chassis to host gateway routers + if [[ "$ENABLE_CHASSIS_AS_GW" == "True" ]]; then + ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-cms-options="enable-chassis-as-gw" + fi + + # Note: ovn-controller will create and configure br-int once it is started. + # So, no need to create it now because nothing depends on that bridge here. + + local ovscmd="$OVS_SBINDIR/ovs-vswitchd --log-file --pidfile --detach" + _run_process ovs-vswitchd "$ovscmd" "" "$STACK_GROUP" "root" + + if is_provider_network || [[ $Q_USE_PROVIDERNET_FOR_PUBLIC == "True" ]]; then + ovn_base_setup_bridge $OVS_PHYSICAL_BRIDGE + ovs-vsctl set open . external-ids:ovn-bridge-mappings=${PHYSICAL_NETWORK}:${OVS_PHYSICAL_BRIDGE} + fi + + if is_service_enabled ovn-controller-vtep ; then + ovn_base_setup_bridge br-v + vtep-ctl add-ps br-v + vtep-ctl set Physical_Switch br-v tunnel_ips=$HOST_IP + + enable_service ovs-vtep + local vtepcmd="$OVS_SCRIPTDIR/ovs-vtep --log-file --pidfile --detach br-v" + _run_process ovs-vtep "$vtepcmd" "" "$STACK_GROUP" "root" + + vtep-ctl set-manager tcp:$HOST_IP:6640 + fi + fi +} + +function _start_ovn_services { + _start_process "devstack@ovsdb-server.service" + _start_process "devstack@ovs-vswitchd.service" + + if is_service_enabled ovs-vtep ; then + _start_process "devstack@ovs-vtep.service" + fi + if is_service_enabled ovn-northd ; then + _start_process "devstack@ovn-northd.service" + fi + if is_service_enabled ovn-controller ; then + _start_process "devstack@ovn-controller.service" + fi + if is_service_enabled ovn-controller-vtep ; then + _start_process "devstack@ovn-controller-vtep.service" + fi + if is_service_enabled q-ovn-metadata-agent; then + _start_process "devstack@q-ovn-metadata-agent.service" + fi +} + +# start_ovn() - Start running processes, including screen +function start_ovn { + echo "Starting OVN" + + _start_ovs + + local SCRIPTDIR=$OVN_SCRIPTDIR + if ! use_new_ovn_repository; then + SCRIPTDIR=$OVS_SCRIPTDIR + fi + + if is_service_enabled ovn-northd ; then + if is_service_enabled tls-proxy; then + local tls_args="\ + --ovn-nb-db-ssl-ca-cert=$INT_CA_DIR/ca-chain.pem \ + --ovn-nb-db-ssl-cert=$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt \ + --ovn-nb-db-ssl-key=$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key \ + --ovn-sb-db-ssl-ca-cert=$INT_CA_DIR/ca-chain.pem \ + --ovn-sb-db-ssl-cert=$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt \ + --ovn-sb-db-ssl-key=$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key \ + " + else + local tls_args="" + fi + local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor $tls_args start_northd" + local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_northd" + + _run_process ovn-northd "$cmd" "$stop_cmd" + ovn-nbctl --db=unix:$OVS_RUNDIR/ovnnb_db.sock set-connection p${OVN_PROTO}:6641:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 + ovn-sbctl --db=unix:$OVS_RUNDIR/ovnsb_db.sock set-connection p${OVN_PROTO}:6642:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 + sudo ovs-appctl -t $OVS_RUNDIR/ovnnb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL + sudo ovs-appctl -t $OVS_RUNDIR/ovnsb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL + fi + + if is_service_enabled ovn-controller ; then + local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_controller" + local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_controller" + + _run_process ovn-controller "$cmd" "$stop_cmd" "$STACK_GROUP" "root" + fi + + if is_service_enabled ovn-controller-vtep ; then + local cmd="$OVS_BINDIR/ovn-controller-vtep --log-file --pidfile --detach --ovnsb-db=$OVN_SB_REMOTE" + + _run_process ovn-controller-vtep "$cmd" "" "$STACK_GROUP" "root" + fi + + if is_service_enabled q-ovn-metadata-agent; then + run_process q-ovn-metadata-agent "$NEUTRON_OVN_BIN_DIR/$NEUTRON_OVN_METADATA_BINARY --config-file $OVN_META_CONF" + # Format logging + setup_logging $OVN_META_CONF + fi + + # NOTE(lucasagomes): To keep things simpler, let's reuse the same + # RUNDIR for both OVS and OVN. This way we avoid having to specify the + # --db option in the ovn-{n,s}bctl commands while playing with DevStack + if use_new_ovn_repository; then + sudo ln -s $OVS_RUNDIR $OVN_RUNDIR + fi + + _start_ovn_services +} + +function _stop_ovs_dp { + sudo ovs-dpctl dump-dps | sudo xargs -n1 ovs-dpctl del-dp + modprobe -q -r vport_geneve vport_vxlan openvswitch || true +} + +function stop_ovn { + if is_service_enabled q-ovn-metadata-agent; then + sudo pkill -9 -f haproxy || : + stop_process neutron-ovn-metadata-agent + fi + if is_service_enabled ovn-controller-vtep ; then + stop_process ovn-controller-vtep + fi + if is_service_enabled ovn-controller ; then + stop_process ovn-controller + fi + if is_service_enabled ovn-northd ; then + stop_process ovn-northd + fi + if is_service_enabled ovs-vtep ; then + stop_process ovs-vtep + fi + + stop_process ovs-vswitchd + stop_process ovsdb-server + + _stop_ovs_dp +} + +function _cleanup { + local path=${1:-$DEST/$OVN_REPO_NAME} + pushd $path + cd $path + sudo make uninstall + sudo make distclean + popd +} + +# cleanup_ovn() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_ovn { + local ovn_path=$DEST/$OVN_REPO_NAME + local ovs_path=$DEST/$OVS_REPO_NAME + + if [ -d $ovn_path ]; then + _cleanup $ovn_path + fi + + if [ -d $ovs_path ]; then + _cleanup $ovs_path + fi + + sudo rm -f $OVN_RUNDIR +} diff --git a/stack.sh b/stack.sh index 85640d5390..ba9da638a8 100755 --- a/stack.sh +++ b/stack.sh @@ -1267,6 +1267,11 @@ if is_service_enabled n-api; then start_nova_api fi +if is_service_enabled ovn-controller ovn-controller-vtep; then + echo_summary "Starting OVN services" + start_ovn_services +fi + if is_service_enabled neutron-api; then echo_summary "Starting Neutron" start_neutron_api diff --git a/stackrc b/stackrc index 4ffd537ef4..bf1ad3d85d 100644 --- a/stackrc +++ b/stackrc @@ -549,6 +549,11 @@ GITDIR["neutron-lib"]=$DEST/neutron-lib GITREPO["os-traits"]=${OS_TRAITS_REPO:-${GIT_BASE}/openstack/os-traits.git} GITBRANCH["os-traits"]=${OS_TRAITS_BRANCH:-$TARGET_BRANCH} +# ovsdbapp used by neutron +GITREPO["ovsdbapp"]=${OVSDBAPP_REPO:-${GIT_BASE}/openstack/ovsdbapp.git} +GITBRANCH["ovsdbapp"]=${OVSDBAPP_BRANCH:-$TARGET_BRANCH} +GITDIR["ovsdbapp"]=$DEST/ovsdbapp + ################## # # TripleO / Heat Agent Components diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index c3b4457171..ab7583d042 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -44,7 +44,7 @@ ALL_LIBS+=" debtcollector os-brick os-traits automaton futurist oslo.service" ALL_LIBS+=" oslo.cache oslo.reports osprofiler cursive" ALL_LIBS+=" keystoneauth ironic-lib neutron-lib oslo.privsep" ALL_LIBS+=" diskimage-builder os-vif python-brick-cinderclient-ext" -ALL_LIBS+=" castellan python-barbicanclient" +ALL_LIBS+=" castellan python-barbicanclient ovsdbapp" # Generate the above list with # echo ${!GITREPO[@]} From 52e52d8937e8e57b2bc16f67c3651a358fa6fb77 Mon Sep 17 00:00:00 2001 From: Luigi Toscano Date: Thu, 13 Aug 2020 09:55:08 +0200 Subject: [PATCH 1327/1936] Add a dependency file for os-brick (and add lsscsi) There are a few dependencies which are really os-brick-specific. They are listed in its bindep.txt file, but os-brick is usually installed from pip, so its bindep.txt file is not available. As those dependencies are needed by the various services which use os-brick, move them to their own dependency file (with the addition of the new lsscsi, required by the next os-brick stable release) and make sure that file is parsed when installing the services which require os-brick. Side note: there should be a way to avoid this duplication also for pip-installed libraries (normal services can use files/ or even bindep, but in this case the source is not always available), (temporarily?) duplicate them, as it has been the case for the other os-brick dependencies already listed here. Change-Id: I9ab6e215dbef9ebdb1946da2f9a40ce020ecc95b --- files/debs/cinder | 2 -- files/debs/os-brick | 3 +++ files/rpms-suse/cinder | 1 - files/rpms-suse/os-brick | 2 ++ files/rpms/cinder | 1 - files/rpms/os-brick | 2 ++ functions-common | 9 +++++++++ 7 files changed, 16 insertions(+), 4 deletions(-) create mode 100644 files/debs/os-brick create mode 100644 files/rpms-suse/os-brick create mode 100644 files/rpms/os-brick diff --git a/files/debs/cinder b/files/debs/cinder index c1b79fda47..5d390e24bf 100644 --- a/files/debs/cinder +++ b/files/debs/cinder @@ -1,6 +1,4 @@ lvm2 -open-iscsi -open-iscsi-utils # Deprecated since quantal dist:precise qemu-utils tgt # NOPRIME thin-provisioning-tools diff --git a/files/debs/os-brick b/files/debs/os-brick new file mode 100644 index 0000000000..4148b0c421 --- /dev/null +++ b/files/debs/os-brick @@ -0,0 +1,3 @@ +lsscsi +open-iscsi +open-iscsi-utils # Deprecated since quantal dist:precise diff --git a/files/rpms-suse/cinder b/files/rpms-suse/cinder index 189a232fa7..b39cc79a27 100644 --- a/files/rpms-suse/cinder +++ b/files/rpms-suse/cinder @@ -1,4 +1,3 @@ lvm2 -open-iscsi qemu-tools tgt # NOPRIME diff --git a/files/rpms-suse/os-brick b/files/rpms-suse/os-brick new file mode 100644 index 0000000000..67b33a9861 --- /dev/null +++ b/files/rpms-suse/os-brick @@ -0,0 +1,2 @@ +lsscsi +open-iscsi diff --git a/files/rpms/cinder b/files/rpms/cinder index c21ea08e89..375f93e090 100644 --- a/files/rpms/cinder +++ b/files/rpms/cinder @@ -1,4 +1,3 @@ -iscsi-initiator-utils lvm2 qemu-img targetcli diff --git a/files/rpms/os-brick b/files/rpms/os-brick new file mode 100644 index 0000000000..14ff870557 --- /dev/null +++ b/files/rpms/os-brick @@ -0,0 +1,2 @@ +iscsi-initiator-utils +lsscsi diff --git a/functions-common b/functions-common index d99ad9212f..549f7e8cdc 100644 --- a/functions-common +++ b/functions-common @@ -1219,10 +1219,16 @@ function get_packages { if [[ ! $file_to_parse =~ $package_dir/glance ]]; then file_to_parse="${file_to_parse} ${package_dir}/glance" fi + if [[ ! $file_to_parse =~ $package_dir/os-brick ]]; then + file_to_parse="${file_to_parse} ${package_dir}/os-brick" + fi elif [[ $service == c-* ]]; then if [[ ! $file_to_parse =~ $package_dir/cinder ]]; then file_to_parse="${file_to_parse} ${package_dir}/cinder" fi + if [[ ! $file_to_parse =~ $package_dir/os-brick ]]; then + file_to_parse="${file_to_parse} ${package_dir}/os-brick" + fi elif [[ $service == s-* ]]; then if [[ ! $file_to_parse =~ $package_dir/swift ]]; then file_to_parse="${file_to_parse} ${package_dir}/swift" @@ -1231,6 +1237,9 @@ function get_packages { if [[ ! $file_to_parse =~ $package_dir/nova ]]; then file_to_parse="${file_to_parse} ${package_dir}/nova" fi + if [[ ! $file_to_parse =~ $package_dir/os-brick ]]; then + file_to_parse="${file_to_parse} ${package_dir}/os-brick" + fi elif [[ $service == g-* ]]; then if [[ ! $file_to_parse =~ $package_dir/glance ]]; then file_to_parse="${file_to_parse} ${package_dir}/glance" From 6fba3fdfa1e4d15c8e6128a3a426e499b221d2d2 Mon Sep 17 00:00:00 2001 From: Luigi Toscano Date: Tue, 18 Aug 2020 22:29:49 +0200 Subject: [PATCH 1328/1936] New get-devstack-os-environment role The role reads the OS_* variables set by devstack through openrc for the specified user and project and exports them as the os_env_vars fact. It is meant to be used as a porting aid for the jobs of the non-unified *clients only. This is useful to run functional tests against the generated environment. A slightly less generic version of this role is currently used by python-cinderclient and python-novaclient (get-os-environment). In order to make this more useful, call it from devstack-tox-functional and derived jobs. The role execution is disabled by default and it can be enabled by setting openrc_enable_export: true. Change-Id: I15f5a187dbc54c82e8f4a08b4bb58d56e0d66961 --- playbooks/tox/run-both.yaml | 4 +- playbooks/tox/run.yaml | 4 +- roles/get-devstack-os-environment/README.rst | 40 +++++++++++++++++++ .../defaults/main.yaml | 5 +++ .../tasks/main.yaml | 14 +++++++ 5 files changed, 65 insertions(+), 2 deletions(-) create mode 100644 roles/get-devstack-os-environment/README.rst create mode 100644 roles/get-devstack-os-environment/defaults/main.yaml create mode 100644 roles/get-devstack-os-environment/tasks/main.yaml diff --git a/playbooks/tox/run-both.yaml b/playbooks/tox/run-both.yaml index e85c2eee96..0528b46f00 100644 --- a/playbooks/tox/run-both.yaml +++ b/playbooks/tox/run-both.yaml @@ -7,4 +7,6 @@ bindep_dir: "{{ zuul_work_dir }}" - test-setup - ensure-tox - - tox + - get-devstack-os-environment + - role: tox + tox_environment: "{{ os_env_vars|default({}) }}" diff --git a/playbooks/tox/run.yaml b/playbooks/tox/run.yaml index 22f82096c7..6e549d3655 100644 --- a/playbooks/tox/run.yaml +++ b/playbooks/tox/run.yaml @@ -1,3 +1,5 @@ - hosts: all roles: - - tox + - get-devstack-os-environment + - role: tox + tox_environment: "{{ os_env_vars|default({}) }}" diff --git a/roles/get-devstack-os-environment/README.rst b/roles/get-devstack-os-environment/README.rst new file mode 100644 index 0000000000..68ddce8b5a --- /dev/null +++ b/roles/get-devstack-os-environment/README.rst @@ -0,0 +1,40 @@ +Reads the OS_* variables set by devstack through openrc +for the specified user and project and exports them as +the os_env_vars fact. + +**WARNING**: this role is meant to be used as porting aid +for the non-unified python-client jobs which +are already around, as those clients do not use clouds.yaml +as openstackclient does. +When those clients and their jobs are deprecated and removed, +or anyway when the new code is able to read from clouds.yaml +directly, this role should be removed as well. + + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. + +.. zuul:rolevar:: openrc_file + :default: {{ devstack_base_dir }}/devstack/openrc + + The location of the generated openrc file. + +.. zuul:rolevar:: openrc_user + :default: admin + + The user whose credentials should be retrieved. + +.. zuul:rolevar:: openrc_project + :default: admin + + The project (which openrc_user is part of) whose + access data should be retrieved. + +.. zuul:rolevar:: openrc_enable_export + :default: false + + Set it to true to export os_env_vars. diff --git a/roles/get-devstack-os-environment/defaults/main.yaml b/roles/get-devstack-os-environment/defaults/main.yaml new file mode 100644 index 0000000000..73ecfe920c --- /dev/null +++ b/roles/get-devstack-os-environment/defaults/main.yaml @@ -0,0 +1,5 @@ +devstack_base_dir: "/opt/stack" +openrc_file: "{{ devstack_base_dir }}/devstack/openrc" +openrc_user: admin +openrc_project: admin +openrc_enable_export: false diff --git a/roles/get-devstack-os-environment/tasks/main.yaml b/roles/get-devstack-os-environment/tasks/main.yaml new file mode 100644 index 0000000000..8c8df7f96d --- /dev/null +++ b/roles/get-devstack-os-environment/tasks/main.yaml @@ -0,0 +1,14 @@ +- when: openrc_enable_export + block: + - name: Extract the OS_ environment variables + shell: + cmd: | + source {{ openrc_file }} {{ openrc_user }} {{ openrc_project }} &>/dev/null + env | awk -F= 'BEGIN {print "---" } /^OS_/ { print " "$1": \""$2"\""} ' + args: + executable: "/bin/bash" + register: env_os + + - name: Save the OS_ environment variables as a fact + set_fact: + os_env_vars: "{{ env_os.stdout|from_yaml }}" From b9fe9c74f68f1881c0602748301ec6b68ed508d6 Mon Sep 17 00:00:00 2001 From: Carlos Goncalves Date: Thu, 20 Aug 2020 14:42:55 +0200 Subject: [PATCH 1329/1936] Make PyYAML overridable on Red Hat family distros This patch fixes an early stack issue where the following error message would be presented: ERROR: Cannot uninstall 'PyYAML'. It is a distutils installed project and thus we cannot accurately determine which files belong to it which would lead to only a partial uninstall. We also drop references to removal of Python 2 library egg infos now that Python 2 is EOL. Closes-Bug: #1892363 Change-Id: I2876ee58ab6b73682869d6b4e684e10ac5e56ad9 --- tools/fixup_stuff.sh | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index e1409291b9..bf31dcbebb 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -162,10 +162,7 @@ function fixup_fedora { # manifest of what to remove. However, in most cases, simply # overwriting works. So this hacks around those packages that # have been dragged in by some other system dependency - sudo rm -rf /usr/lib/python2.7/site-packages/enum34*.egg-info - sudo rm -rf /usr/lib/python2.7/site-packages/ipaddress*.egg-info - sudo rm -rf /usr/lib/python2.7/site-packages/ply-*.egg-info - sudo rm -rf /usr/lib/python2.7/site-packages/typing-*.egg-info + sudo rm -rf /usr/lib64/python3*/site-packages/PyYAML-*.egg-info } function fixup_suse { From f0c12bd4b54eec793cd73e0bf4d07dc2d7fee4ea Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Wed, 24 Jun 2020 07:26:35 +0200 Subject: [PATCH 1330/1936] Further py2 cleanup for Fedora Change-Id: I2433e2ed067f866751bf49983c0a7efce4be8900 --- tools/fixup_stuff.sh | 32 -------------------------------- 1 file changed, 32 deletions(-) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index bf31dcbebb..441075ddcb 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -87,12 +87,6 @@ function fixup_ubuntu { # Python Packages # --------------- -# get_package_path python-package # in import notation -function get_package_path { - local package=$1 - echo $(python -c "import os; import $package; print(os.path.split(os.path.realpath($package.__file__))[0])") -} - function fixup_fedora { if ! is_fedora; then return @@ -130,32 +124,6 @@ function fixup_fedora { fi fi - if [[ "$os_VENDOR" == "Fedora" ]] && [[ "$os_RELEASE" -ge "22" ]]; then - # requests ships vendored version of chardet/urllib3, but on - # fedora these are symlinked back to the primary versions to - # avoid duplication of code on disk. This is fine when - # maintainers keep things in sync, but since devstack takes - # over and installs later versions via pip we can end up with - # incompatible versions. - # - # The rpm package is not removed to preserve the dependent - # packages like cloud-init; rather we remove the symlinks and - # force a re-install of requests so the vendored versions it - # wants are present. - # - # Realted issues: - # https://bugs.launchpad.net/glance/+bug/1476770 - # https://bugzilla.redhat.com/show_bug.cgi?id=1253823 - - base_path=$(get_package_path requests)/packages - if [ -L $base_path/chardet -o -L $base_path/urllib3 ]; then - sudo rm -f $base_path/{chardet,urllib3} - # install requests with the bundled urllib3 to avoid conflicts - pip_install --upgrade --force-reinstall requests - fi - - fi - # Since pip10, pip will refuse to uninstall files from packages # that were created with distutils (rather than more modern # setuptools). This is because it technically doesn't have a From e7625fc72cbbbfaf37a6682166bc2180d4e50855 Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Wed, 26 Aug 2020 09:46:35 +0100 Subject: [PATCH 1331/1936] [OVN] Follow up of OVN module migration to DevStack This patch is a follow-up of Ib4194329474e8d68a90886d2a04f027eecd741df. This patch removes the configure_port_forwarding call from the neutron-legacy module because port forwarding (just like other extensions such as DNS, QOS, etc...) are already enabled in the plugin.sh file in the neutron repository [0]. The configure_port_forwarding method itself is also defined in the neutron repository so calling it here may result in a failure in case the plugin is not enabled. We are also removing the "dns" extensions from the default Q_ML2_PLUGIN_EXT_DRIVERS variable because this extension conflicts with the default DNS extensions that is enabled by Neutron when q-dns/neutron-dns service is enabled (also in [0]). The LP for this conflict problem is: https://bugs.launchpad.net/neutron/+bug/1887163. [0] https://github.com/openstack/neutron/blob/945a244588b81064e4301b6f055a3c90f472bd7e/devstack/plugin.sh#L101-L103 Change-Id: Iafb9e45520798b2a612192cfc6cca28501465862 Signed-off-by: Lucas Alvares Gomes --- lib/neutron-legacy | 3 --- lib/neutron_plugins/ovn_agent | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 363c62e4c2..2906f15736 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -364,9 +364,6 @@ function configure_mutnauq { if [[ $Q_AGENT == "ovn" ]]; then configure_ovn - if is_service_enabled q-port-forwarding neutron-port-forwarding; then - configure_port_forwarding - fi configure_ovn_plugin fi diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index e29b84fa48..f647f85c3d 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -118,7 +118,7 @@ Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-ovn,logger} Q_ML2_PLUGIN_TYPE_DRIVERS=${Q_ML2_PLUGIN_TYPE_DRIVERS:-local,flat,vlan,geneve} Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"geneve"} Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS:-"vni_ranges=1:65536"} -Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS:-port_security,dns,qos} +Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS:-port_security,qos} # this one allows empty: ML2_L3_PLUGIN=${ML2_L3_PLUGIN-"ovn-router"} From 18b4251bf4f689923a19bf7fbc50d5c2ea422b21 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 31 Aug 2020 16:22:57 +1000 Subject: [PATCH 1332/1936] Use SETUPTOOLS_USE_DISTUTILS=stdlib for global pip installs A new setuptools release has changed the way pip installs are done, see [0]. With this change we switch back to using the distro method for global pip installs. Temporarily make grenade jobs non-voting in order to allow this patch to be backported. [0] http://lists.openstack.org/pipermail/openstack-discuss/2020-August/016905.html Change-Id: I5d8aa0e58e0409c54451b51de5eb70ba9a68d849 --- .zuul.yaml | 18 ++++++++++-------- inc/python | 8 +++++++- 2 files changed, 17 insertions(+), 9 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index d387b0dd50..acf26e2ca8 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -705,10 +705,12 @@ voting: false irrelevant-files: *dsvm-irrelevant-files - grenade: + voting: false irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - neutron-grenade-multinode: + voting: false irrelevant-files: - ^.*\.rst$ - ^doc/.*$ @@ -750,18 +752,18 @@ - devstack-multinode - devstack-unit-tests - openstack-tox-bashate - - neutron-grenade-multinode: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + # - neutron-grenade-multinode: + # irrelevant-files: + # - ^.*\.rst$ + # - ^doc/.*$ - neutron-tempest-linuxbridge: irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - - grenade: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + # - grenade: + # irrelevant-files: + # - ^.*\.rst$ + # - ^doc/.*$ - openstacksdk-functional-devstack: irrelevant-files: - ^.*\.rst$ diff --git a/inc/python b/inc/python index f98d28d995..8941fd038d 100644 --- a/inc/python +++ b/inc/python @@ -170,7 +170,13 @@ function pip_install { local sudo_pip="env" else local cmd_pip="python$PYTHON3_VERSION -m pip" - local sudo_pip="sudo -H LC_ALL=en_US.UTF-8" + # See + # https://github.com/pypa/setuptools/issues/2232 + # http://lists.openstack.org/pipermail/openstack-discuss/2020-August/016905.html + # this makes setuptools >=50 use the platform distutils. + # We only want to do this on global pip installs, not if + # installing in a virtualenv + local sudo_pip="sudo -H LC_ALL=en_US.UTF-8 SETUPTOOLS_USE_DISTUTILS=stdlib " echo "Using python $PYTHON3_VERSION to install $package_dir" fi From cb8a4a6882b0cf89590cd169c6dce2c01c1dee9f Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Tue, 1 Sep 2020 14:04:00 +0200 Subject: [PATCH 1333/1936] Make grenade jobs voting again This reverts the changes made to the zuul config in [0]. Since the backports to the stable branches have been merged, the jobs can be voting again. [0] I5d8aa0e58e0409c54451b51de5eb70ba9a68d849 Change-Id: I3f5972e05faea8f11d7e87f3f8b05e4979e6c328 --- .zuul.yaml | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index acf26e2ca8..d387b0dd50 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -705,12 +705,10 @@ voting: false irrelevant-files: *dsvm-irrelevant-files - grenade: - voting: false irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - neutron-grenade-multinode: - voting: false irrelevant-files: - ^.*\.rst$ - ^doc/.*$ @@ -752,18 +750,18 @@ - devstack-multinode - devstack-unit-tests - openstack-tox-bashate - # - neutron-grenade-multinode: - # irrelevant-files: - # - ^.*\.rst$ - # - ^doc/.*$ + - neutron-grenade-multinode: + irrelevant-files: + - ^.*\.rst$ + - ^doc/.*$ - neutron-tempest-linuxbridge: irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - # - grenade: - # irrelevant-files: - # - ^.*\.rst$ - # - ^doc/.*$ + - grenade: + irrelevant-files: + - ^.*\.rst$ + - ^doc/.*$ - openstacksdk-functional-devstack: irrelevant-files: - ^.*\.rst$ From ae21b3556ff215c0f73bc0ebbc90d72fa59a3459 Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Tue, 1 Sep 2020 14:11:45 +0000 Subject: [PATCH 1334/1936] move cleanup of pyc files Previously pyc files were only cleaned if clean.sh was run. with this change a new clean_pyc_files function was introduced with the logic that was previously in clean.sh but it is now invoked from unstack.sh With the previous behavior you could not stack with horizon enabled then unstack and stack again due to the presence of pyc files that were owned by root. By moving the clean to unstack in stead of clean.sh you can now stack, unstack and stack again without hitting the pyc issue. since unstack is invoked by clean the existing clean.sh behavior has not changed in practice except for the fact the pyc files are removed sooner in the process. This change also removes support for findutils < 4.2.3 Ubuntu 12.04 and CentOS 6 both have 4.4.2 since they were released 8 years ago and are now EOL its fair to assume that all modern distros have 4.2.3+ https://repology.org/project/findutils/versions Change-Id: I13c9aad9be7e0930a0d875b7d382090caf0b8982 --- clean.sh | 9 --------- functions-common | 7 +++++++ unstack.sh | 2 ++ 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/clean.sh b/clean.sh index 685a719cfb..cb0a8b4bef 100755 --- a/clean.sh +++ b/clean.sh @@ -145,12 +145,3 @@ done rm -rf ~/.config/openstack -# Clean up all *.pyc files -if [[ -n "$DEST" ]] && [[ -d "$DEST" ]]; then - find_version=`find --version | awk '{ print $NF; exit}'` - if vercmp "$find_version" "<" "4.2.3" ; then - sudo find $DEST -name "*.pyc" -print0 | xargs -0 rm - else - sudo find $DEST -name "*.pyc" -delete - fi -fi diff --git a/functions-common b/functions-common index ffbd631fb2..547f6df038 100644 --- a/functions-common +++ b/functions-common @@ -2415,6 +2415,13 @@ function time_totals { $xtrace } +function clean_pyc_files { + # Clean up all *.pyc files + if [[ -n "$DEST" ]] && [[ -d "$DEST" ]]; then + sudo find $DEST -name "*.pyc" -delete + fi +} + # Restore xtrace $_XTRACE_FUNCTIONS_COMMON diff --git a/unstack.sh b/unstack.sh index 276111edb9..3197cf136f 100755 --- a/unstack.sh +++ b/unstack.sh @@ -182,3 +182,5 @@ if is_service_enabled cinder && is_package_installed lvm2; then clean_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME || /bin/true clean_lvm_filter fi + +clean_pyc_files From 7f7f488bc385dd707a3a6d8dae7859bbe72182e5 Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Thu, 20 Aug 2020 09:27:01 +0100 Subject: [PATCH 1335/1936] Bionic: Enable Train UCA for updated QEMU and libvirt This is will allow the openstack/nova project to facilitate a minimum required version bump of QEMU and libvirt within the libvirt virt driver in I8e349849db0b1a540d295c903f1470917b82fd97 ahead of the planned switch to focal later in Victoria. Change-Id: I85eb45632ff229676f7c29708f4a7cc64b3d90e3 --- tools/fixup_stuff.sh | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index bf31dcbebb..550239a2d9 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -73,6 +73,18 @@ function fixup_ubuntu { # Enable universe sudo add-apt-repository -y universe + if [[ -f /etc/ci/mirror_info.sh ]] ; then + # If we are on a nodepool provided host and it has told us about + # where we can find local mirrors then use that mirror. + source /etc/ci/mirror_info.sh + sudo apt-add-repository -y "deb $NODEPOOL_UCA_MIRROR bionic-updates/train main" + else + # Enable UCA:train for updated versions of QEMU and libvirt + sudo add-apt-repository -y cloud-archive:train + fi + REPOS_UPDATED=False + apt_get_update + # Since pip10, pip will refuse to uninstall files from packages # that were created with distutils (rather than more modern # setuptools). This is because it technically doesn't have a From 7ba26f5cf97b39bd0ddb53248ae4676866c10824 Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Thu, 17 Sep 2020 11:13:52 +0200 Subject: [PATCH 1336/1936] Add possibility to not compile ovs and ovn if that's disabled There is flag Q_BUILD_OVS_FROM_GIT which can be used to not compile ovs from source. But this wasn't respected in the ovn_agent's module in install_ovn function which was always installing from source ovn and ovs. We need to disable that e.g. on grenade jobs when new version is installed. Change-Id: I7d3f92365e880191dcfe7c618a6f79d5f741144f --- lib/neutron_plugins/ovn_agent | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index f647f85c3d..84df9181ea 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -28,6 +28,8 @@ source $NEUTRON_DIR/devstack/lib/ovs # Defaults # -------- +Q_BUILD_OVS_FROM_GIT=$(trueorfalse True Q_BUILD_OVS_FROM_GIT) + # Set variables for building OVN from source OVN_REPO=${OVN_REPO:-https://github.com/ovn-org/ovn.git} OVN_REPO_NAME=$(basename ${OVN_REPO} | cut -f1 -d'.') @@ -322,6 +324,11 @@ function ovn_sanity_check { # install_ovn() - Collect source and prepare function install_ovn { + if [[ "$Q_BUILD_OVS_FROM_GIT" == "False" ]]; then + echo "Installation of OVS from source disabled." + return 0 + fi + echo "Installing OVN and dependent packages" # Check the OVN configuration From 848aaf83edf66e0c5671b8e9e74414ee70fdba0e Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Fri, 4 Sep 2020 10:29:19 -0500 Subject: [PATCH 1337/1936] Add nova-ceph-multistore job in devstack gate nova-ceph-multistore is defined in nova side to test the glance multistore on ceph and it is voting on nova gate. There are other multistore testing enhancement going on for example- https://review.opendev.org/#/c/743800/. so to avoid any regression, let's run exiting mutistore job on devstack gate too. Change-Id: Ie82b4057463df4b6138c53b14a582bd84866aebd --- .zuul.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index d387b0dd50..9ae5f3f804 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -734,6 +734,10 @@ irrelevant-files: - ^.*\.rst$ - ^doc/.*$ + - nova-ceph-multistore: + irrelevant-files: + - ^.*\.rst$ + - ^doc/.*$ # NOTE(gmann): Remove this job from devstack pipeline once it is # migrated to zuulv3 native. This is legacy job and rely on # devstack-gate + devstack setting so any change in devstack can @@ -770,6 +774,10 @@ irrelevant-files: - ^.*\.rst$ - ^doc/.*$ + - nova-ceph-multistore: + irrelevant-files: + - ^.*\.rst$ + - ^doc/.*$ # Please add a note on each job and conditions for the job not # being experimental any more, so we can keep this list somewhat # pruned. From 057aaa6ec90d037661ec88ef8af2b477e28411b4 Mon Sep 17 00:00:00 2001 From: Abhishek Kekane Date: Wed, 29 Jul 2020 07:37:16 +0000 Subject: [PATCH 1338/1936] Configure cinder store for glance This patch will enable user to configure single cinder store as well as multiple cinder stores for glance. Below are the parameters needs to be added in local.conf. A. For single store USE_CINDER_FOR_GLANCE=True B. For Multiple stores USE_CINDER_FOR_GLANCE=True GLANCE_ENABLE_MULTIPLE_STORES=True CINDER_ENABLED_BACKENDS=${CINDER_ENABLED_BACKENDS:-lvm:lvmdriver-1,lvm:lvmdriver-2,nfs:nfsdriver-1,ceph:cephdriver-1} GLANCE_CINDER_DEFAULT_BACKEND=lvmdriver-1 enable_plugin devstack-plugin-nfs https://opendev.org/openstack/devstack-plugin-nfs enable_plugin devstack-plugin-ceph https://opendev.org/openstack/devstack-plugin-ceph NOTE: GLANCE_CINDER_DEFAULT_BACKEND should be one of the value from CINDER_ENABLED_BACKENDS. If you need to configure nfs and ceph backend for cinder then you need to add respective plugins in local.conf file. If GLANCE_ENABLE_MULTIPLE_STORES is True then it will not configure swift store for glance even if it is enabled in local.conf file. Needed-by: https://review.opendev.org/#/c/750018 Change-Id: Id0d63c4ea41cce389eee8dc9a96913a7d427f186 --- lib/glance | 192 +++++++++++++++++++++++++++++++++++++++++------------ stack.sh | 60 +++++++++-------- 2 files changed, 184 insertions(+), 68 deletions(-) diff --git a/lib/glance b/lib/glance index 2118636a46..a848fc7abe 100644 --- a/lib/glance +++ b/lib/glance @@ -41,6 +41,16 @@ else GLANCE_BIN_DIR=$(get_python_exec_prefix) fi +# Cinder for Glance +USE_CINDER_FOR_GLANCE=$(trueorfalse False USE_CINDER_FOR_GLANCE) +# GLANCE_CINDER_DEFAULT_BACKEND should be one of the values +# from CINDER_ENABLED_BACKENDS +GLANCE_CINDER_DEFAULT_BACKEND=${GLANCE_CINDER_DEFAULT_BACKEND:-lvmdriver-1} +GLANCE_STORE_ROOTWRAP_BASE_DIR=/usr/local/etc/glance +# NOTE (abhishekk): For opensuse data files are stored in different directory +if is_opensuse; then + GLANCE_STORE_ROOTWRAP_BASE_DIR=/usr/etc/glance +fi # Glance multi-store configuration # Boolean flag to enable multiple store configuration for glance GLANCE_ENABLE_MULTIPLE_STORES=$(trueorfalse False GLANCE_ENABLE_MULTIPLE_STORES) @@ -68,6 +78,7 @@ GLANCE_STANDALONE=${GLANCE_STANDALONE:-True} # and $DATA_DIR/glance/cheap. GLANCE_MULTISTORE_FILE_IMAGE_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/glance} GLANCE_IMAGE_DIR=${GLANCE_IMAGE_DIR:=$DATA_DIR/glance/images} +GLANCE_NFS_MOUNTPOINT=$GLANCE_IMAGE_DIR/mnt GLANCE_LOCK_DIR=${GLANCE_LOCK_DIR:=$DATA_DIR/glance/locks} GLANCE_STAGING_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/os_glance_staging_store} GLANCE_TASKS_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/os_glance_tasks_store} @@ -135,6 +146,122 @@ function cleanup_glance { fi } +# Set multiple cinder store related config options for each of the cinder store +# +function configure_multiple_cinder_stores { + + local be be_name be_type enabled_backends + for be in ${CINDER_ENABLED_BACKENDS//,/ }; do + be_type=${be%%:*} + be_name=${be##*:} + enabled_backends+="${be_name}:cinder," + + set_common_cinder_store_params $be_name + iniset $GLANCE_API_CONF $be_name cinder_volume_type ${be_name} + if [[ "$be_type" == "nfs" ]]; then + mkdir -p "$GLANCE_NFS_MOUNTPOINT" + iniset $GLANCE_API_CONF $be_name cinder_mount_point_base "$GLANCE_NFS_MOUNTPOINT" + fi + done + iniset $GLANCE_API_CONF DEFAULT enabled_backends ${enabled_backends::-1} + iniset $GLANCE_API_CONF glance_store default_backend $GLANCE_CINDER_DEFAULT_BACKEND +} + +# Set common cinder store options to given config section +# +# Arguments: +# config_section +# +function set_common_cinder_store_params { + local config_section="$1" + iniset $GLANCE_API_CONF $config_section cinder_store_auth_address $KEYSTONE_SERVICE_URI_V3 + iniset $GLANCE_API_CONF $config_section cinder_store_user_name glance + iniset $GLANCE_API_CONF $config_section cinder_store_password $SERVICE_PASSWORD + iniset $GLANCE_API_CONF $config_section cinder_store_project_name $SERVICE_PROJECT_NAME +} + +# Configure multiple file stores options for each file store +# +# Arguments: +# +function configure_multiple_file_stores { + local store enabled_backends + enabled_backends="" + for store in $(echo $GLANCE_MULTIPLE_FILE_STORES | tr "," "\n"); do + enabled_backends+="${store}:file," + done + iniset $GLANCE_API_CONF DEFAULT enabled_backends ${enabled_backends::-1} + + # Glance multiple store Store specific configs + iniset $GLANCE_API_CONF glance_store default_backend $GLANCE_DEFAULT_BACKEND + local store + for store in $(echo $glance_multiple_file_stores | tr "," "\n"); do + iniset $GLANCE_API_CONF $store filesystem_store_datadir "${GLANCE_MULTISTORE_FILE_IMAGE_DIR}/${store}/" + done +} + +# Set reserved stores for glance +function configure_reserved_stores { + iniset $GLANCE_API_CONF os_glance_staging_store filesystem_store_datadir "${GLANCE_MULTISTORE_FILE_IMAGE_DIR}/os_glance_staging_store/" + iniset $GLANCE_API_CONF os_glance_tasks_store filesystem_store_datadir "${GLANCE_MULTISTORE_FILE_IMAGE_DIR}/os_glance_tasks_store/" +} + +# Copy rootwrap file from glance_store/etc/glance to /etc/glance +# +# Arguments: +# source_path Source path to copy rootwrap files from +# +function copy_rootwrap { + local source_path="$1" + # Make glance configuration directory if it is not exists + sudo install -d -o $STACK_USER $GLANCE_CONF_DIR + cp -r $source_path/rootwrap.* $GLANCE_CONF_DIR/ +} + +# Set glance_store related config options +# +# Arguments: +# USE_CINDER_FOR_GLANCE +# GLANCE_ENABLE_MULTIPLE_STORES +# +function configure_glance_store { + local use_cinder_for_glance="$1" + local glance_enable_multiple_stores="$2" + local be + + if [[ "$glance_enable_multiple_stores" == "False" ]]; then + # Configure traditional glance_store + if [[ "$use_cinder_for_glance" == "True" ]]; then + # set common glance_store parameters + iniset $GLANCE_API_CONF glance_store stores "cinder,file,http" + iniset $GLANCE_API_CONF glance_store default_store cinder + + # set cinder related store parameters + set_common_cinder_store_params glance_store + # set nfs mount_point dir + for be in ${CINDER_ENABLED_BACKENDS//,/ }; do + local be_name=${be##*:} + if [[ "$be_name" == "nfs" ]]; then + mkdir -p $GLANCE_NFS_MOUNTPOINT + iniset $GLANCE_API_CONF glance_store cinder_mount_point_base $GLANCE_NFS_MOUNTPOINT + fi + done + fi + # Store specific configs + iniset $GLANCE_API_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/ + else + if [[ "$use_cinder_for_glance" == "True" ]]; then + # Configure multiple cinder stores for glance + configure_multiple_cinder_stores + else + # Configure multiple file stores for glance + configure_multiple_file_stores + fi + # Configure reserved stores + configure_reserved_stores + fi +} + # configure_glance() - Set config files, create data dirs, etc function configure_glance { sudo install -d -o $STACK_USER $GLANCE_CONF_DIR $GLANCE_METADEF_DIR @@ -143,16 +270,6 @@ function configure_glance { local dburl dburl=`database_connection_url glance` - # Configure multiple stores - if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "True" ]]; then - local store enabled_backends - enabled_backends="" - for store in $(echo $GLANCE_MULTIPLE_FILE_STORES | tr "," "\n"); do - enabled_backends+="${store}:file," - done - iniset $GLANCE_API_CONF DEFAULT enabled_backends ${enabled_backends::-1} - fi - iniset $GLANCE_API_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $GLANCE_API_CONF database connection $dburl iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG @@ -170,21 +287,8 @@ function configure_glance { iniset $GLANCE_API_CONF DEFAULT disk_formats "ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso,ploop" fi - # Glance multiple store Store specific configs - if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "True" ]]; then - iniset $GLANCE_API_CONF glance_store default_backend $GLANCE_DEFAULT_BACKEND - local store - for store in $(echo $GLANCE_MULTIPLE_FILE_STORES | tr "," "\n"); do - iniset $GLANCE_API_CONF $store filesystem_store_datadir "${GLANCE_MULTISTORE_FILE_IMAGE_DIR}/${store}/" - done - - # Glance configure reserved stores - iniset $GLANCE_API_CONF os_glance_staging_store filesystem_store_datadir "${GLANCE_MULTISTORE_FILE_IMAGE_DIR}/os_glance_staging_store/" - iniset $GLANCE_API_CONF os_glance_tasks_store filesystem_store_datadir "${GLANCE_MULTISTORE_FILE_IMAGE_DIR}/os_glance_tasks_store/" - else - # Store specific configs - iniset $GLANCE_API_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/ - fi + # Configure glance_store + configure_glance_store $USE_CINDER_FOR_GLANCE $GLANCE_ENABLE_MULTIPLE_STORES # CORS feature support - to allow calls from Horizon by default if [ -n "$GLANCE_CORS_ALLOWED_ORIGIN" ]; then @@ -194,24 +298,26 @@ function configure_glance { fi # No multiple stores for swift yet - # Store the images in swift if enabled. - if is_service_enabled s-proxy; then - iniset $GLANCE_API_CONF glance_store default_store swift - iniset $GLANCE_API_CONF glance_store swift_store_create_container_on_put True - - iniset $GLANCE_API_CONF glance_store swift_store_config_file $GLANCE_SWIFT_STORE_CONF - iniset $GLANCE_API_CONF glance_store default_swift_reference ref1 - iniset $GLANCE_API_CONF glance_store stores "file, http, swift" - if is_service_enabled tls-proxy; then - iniset $GLANCE_API_CONF glance_store swift_store_cacert $SSL_BUNDLE_FILE + if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "False" ]]; then + # Store the images in swift if enabled. + if is_service_enabled s-proxy; then + iniset $GLANCE_API_CONF glance_store default_store swift + iniset $GLANCE_API_CONF glance_store swift_store_create_container_on_put True + + iniset $GLANCE_API_CONF glance_store swift_store_config_file $GLANCE_SWIFT_STORE_CONF + iniset $GLANCE_API_CONF glance_store default_swift_reference ref1 + iniset $GLANCE_API_CONF glance_store stores "file, http, swift" + if is_service_enabled tls-proxy; then + iniset $GLANCE_API_CONF glance_store swift_store_cacert $SSL_BUNDLE_FILE + fi + iniset $GLANCE_API_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" + + iniset $GLANCE_SWIFT_STORE_CONF ref1 user $SERVICE_PROJECT_NAME:glance-swift + + iniset $GLANCE_SWIFT_STORE_CONF ref1 key $SERVICE_PASSWORD + iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $KEYSTONE_SERVICE_URI/v3 + iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_version 3 fi - iniset $GLANCE_API_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" - - iniset $GLANCE_SWIFT_STORE_CONF ref1 user $SERVICE_PROJECT_NAME:glance-swift - - iniset $GLANCE_SWIFT_STORE_CONF ref1 key $SERVICE_PASSWORD - iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $KEYSTONE_SERVICE_URI/v3 - iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_version 3 fi # We need to tell glance what it's public endpoint is so that the version @@ -342,9 +448,11 @@ function install_glance { if use_library_from_git "glance_store"; then git_clone_by_name "glance_store" setup_dev_lib "glance_store" $(join_extras "${glance_store_extras[@]}") + copy_rootwrap ${DEST}/glance_store/etc/glance else # we still need to pass extras pip_install_gr_extras glance-store $(join_extras "${glance_store_extras[@]}") + copy_rootwrap $GLANCE_STORE_ROOTWRAP_BASE_DIR fi git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH diff --git a/stack.sh b/stack.sh index ba9da638a8..b7ecb643cb 100755 --- a/stack.sh +++ b/stack.sh @@ -1222,32 +1222,6 @@ if is_service_enabled swift; then start_swift fi -# Launch the Glance services -if is_service_enabled glance; then - echo_summary "Starting Glance" - start_glance -fi - - -# Install Images -# ============== - -# Upload an image to Glance. -# -# The default image is CirrOS, a small testing image which lets you login as **root** -# CirrOS has a ``cloud-init`` analog supporting login via keypair and sending -# scripts as userdata. -# See https://help.ubuntu.com/community/CloudInit for more on ``cloud-init`` - -# NOTE(yoctozepto): limited to node hosting the database which is the controller -if is_service_enabled $DATABASE_BACKENDS && is_service_enabled glance; then - echo_summary "Uploading images" - - for image_url in ${IMAGE_URLS//,/ }; do - upload_image $image_url - done -fi - # NOTE(lyarwood): By default use a single hardcoded fixed_key across devstack # deployments. This ensures the keys match across nova and cinder across all # hosts. @@ -1315,6 +1289,40 @@ if is_service_enabled cinder; then create_volume_types fi +# This sleep is required for cinder volume service to become active and +# publish capabilities to cinder scheduler before creating the image-volume +if [[ "$USE_CINDER_FOR_GLANCE" == "True" ]]; then + sleep 30 +fi + +# Launch the Glance services +# NOTE (abhishekk): We need to start glance api service only after cinder +# service has started as on glance startup glance-api queries cinder for +# validating volume_type configured for cinder store of glance. +if is_service_enabled glance; then + echo_summary "Starting Glance" + start_glance +fi + +# Install Images +# ============== + +# Upload an image to Glance. +# +# The default image is CirrOS, a small testing image which lets you login as **root** +# CirrOS has a ``cloud-init`` analog supporting login via keypair and sending +# scripts as userdata. +# See https://help.ubuntu.com/community/CloudInit for more on ``cloud-init`` + +# NOTE(yoctozepto): limited to node hosting the database which is the controller +if is_service_enabled $DATABASE_BACKENDS && is_service_enabled glance; then + echo_summary "Uploading images" + + for image_url in ${IMAGE_URLS//,/ }; do + upload_image $image_url + done +fi + if is_service_enabled horizon; then echo_summary "Starting Horizon" From 4a1186aa90570b9c25782b423c5abe31da0e9033 Mon Sep 17 00:00:00 2001 From: Alexandre Arents Date: Thu, 17 Sep 2020 12:50:53 +0000 Subject: [PATCH 1339/1936] tempest: Enable shelve_migrate tests with Libvirt Enable the compute feature for shelve_migrate on all but LXC and Xen virt_types. Related-Bug: #1732428 Depends-On: https://review.opendev.org/#/c/696084/ Change-Id: I31cd00c9117607682213cfa0399709e560f4ad0d --- lib/tempest | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/tempest b/lib/tempest index 125749b314..58588b7663 100644 --- a/lib/tempest +++ b/lib/tempest @@ -569,6 +569,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute-feature-enabled snapshot False iniset $TEMPEST_CONFIG compute-feature-enabled suspend False else + iniset $TEMPEST_CONFIG compute-feature-enabled shelve_migrate True iniset $TEMPEST_CONFIG compute-feature-enabled stable_rescue True iniset $TEMPEST_CONFIG compute-feature-enabled swap_volume True fi From fa5742f8e1d95802cf8c623e29fcbc4931903b68 Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Tue, 8 Sep 2020 09:38:14 +0100 Subject: [PATCH 1340/1936] Add Fedora 32 to supported list and use as fedora-latest This includes a workaround to a known dnsmasq >= 2.81 issue that results in unanswered DHCP requests from instances as documented in the following Neutron bug: dnsmasq >= 2.81 not responding to DHCP requests with current q-dhcp configs https://bugs.launchpad.net/neutron/+bug/1896945 For the time being we will attempt to downgrade to 2.80 to avoid this. Related-Bug: #1896945 Change-Id: I3a760c43956221424926bd9dad0ebe9b28ae2b52 --- .zuul.yaml | 2 +- stack.sh | 2 +- tools/fixup_stuff.sh | 7 +++++++ 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 9105d7e27c..94410b42e4 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -72,7 +72,7 @@ name: devstack-single-node-fedora-latest nodes: - name: controller - label: fedora-31 + label: fedora-32 groups: - name: tempest nodes: diff --git a/stack.sh b/stack.sh index ba9da638a8..5f9b553536 100755 --- a/stack.sh +++ b/stack.sh @@ -221,7 +221,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (bionic|focal|f30|f31|opensuse-15.0|opensuse-15.1|opensuse-tumbleweed|rhel8) ]]; then +if [[ ! ${DISTRO} =~ (bionic|focal|f30|f31|f32|opensuse-15.0|opensuse-15.1|opensuse-tumbleweed|rhel8) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index c0e07dd51c..a3bda2b74f 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -143,6 +143,13 @@ function fixup_fedora { # overwriting works. So this hacks around those packages that # have been dragged in by some other system dependency sudo rm -rf /usr/lib64/python3*/site-packages/PyYAML-*.egg-info + + # NOTE(lyarwood): Workaround a known issue on Fedora with dnsmasq >= 2.81 + # by downgrading to 2.80 for the time being. + # https://bugs.launchpad.net/neutron/+bug/1896945 + if [[ "$DISTRO" == "f32" ]] && [[ $(rpm --queryformat %{VERSION} -q dnsmasq) != "2.80" ]]; then + sudo dnf downgrade dnsmasq-2.80 -y + fi } function fixup_suse { From f966e287396538a563d80827dc7b77438e32eb6c Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Thu, 24 Sep 2020 14:47:39 +0100 Subject: [PATCH 1341/1936] Remove Fedora 30 support Fedora 30 hit EOL earlier in the year and can be removed from devstack. https://en.wikipedia.org/wiki/Fedora_version_history#Version_history Change-Id: I47452700d520a544c93c0c187143ec763f026612 --- lib/lvm | 6 ------ lib/nova | 4 ++-- stack.sh | 2 +- 3 files changed, 3 insertions(+), 9 deletions(-) diff --git a/lib/lvm b/lib/lvm index 92265f2af2..b826c1bc63 100644 --- a/lib/lvm +++ b/lib/lvm @@ -124,12 +124,6 @@ function init_lvm_volume_group { local vg=$1 local size=$2 - # Start the lvmetad on f30 (dropped from f31) or SUSE - if [[ $DISTRO =~ f30 ]] || is_suse; then - # services is not started by default - start_service lvm2-lvmetad - fi - # Start the tgtd service on Fedora and SUSE if tgtadm is used if is_fedora || is_suse && [[ "$CINDER_ISCSI_HELPER" = "tgtadm" ]]; then start_service tgtd diff --git a/lib/nova b/lib/nova index c1354e79a9..9d7bbd8fca 100644 --- a/lib/nova +++ b/lib/nova @@ -293,8 +293,8 @@ function configure_nova { fi fi - if is_fedora && [[ $DISTRO =~ f3[0-1] ]]; then - # For f30 and f31 use the rebased 2.1.0 version of the package. + if is_fedora && [[ $DISTRO =~ f31] ]]; then + # For f31 use the rebased 2.1.0 version of the package. sudo dnf copr enable -y lyarwood/iscsi-initiator-utils sudo dnf update -y fi diff --git a/stack.sh b/stack.sh index 5f9b553536..139e754a63 100755 --- a/stack.sh +++ b/stack.sh @@ -221,7 +221,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (bionic|focal|f30|f31|f32|opensuse-15.0|opensuse-15.1|opensuse-tumbleweed|rhel8) ]]; then +if [[ ! ${DISTRO} =~ (bionic|focal|f31|f32|opensuse-15.0|opensuse-15.1|opensuse-tumbleweed|rhel8) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From 471f1625bb3ae9b11d9da69404f945a19d8b010a Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Sat, 26 Sep 2020 15:15:42 -0700 Subject: [PATCH 1342/1936] Update opensuse version Nova has newer qemu requirements which needs opensuse 15.2 to meet. The infra opensuse 15 images are 15.2 now to accomodate that. Update opensuse's supported version to match. Change-Id: I6f3c5234920b185b2b0cd9c358371402f7a7b922 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 139e754a63..14968386ea 100755 --- a/stack.sh +++ b/stack.sh @@ -221,7 +221,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (bionic|focal|f31|f32|opensuse-15.0|opensuse-15.1|opensuse-tumbleweed|rhel8) ]]; then +if [[ ! ${DISTRO} =~ (bionic|focal|f31|f32|opensuse-15.2|opensuse-tumbleweed|rhel8) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From 155109df89b07f90c99ce5c6acb32c78b58cf9de Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Fri, 24 Jul 2020 06:49:01 -0700 Subject: [PATCH 1343/1936] Change glance default back to WSGI mode The situation around glance under WSGI has changed a lot in a week. We can now run tasks and imports under WSGI, so let's switch the default back so that glance is consistent (by default) with the other projects. Change-Id: I3ae285b2ac4972c0b8abaccfc7c0ede0e1c49bf1 --- lib/glance | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/glance b/lib/glance index a848fc7abe..c2a8b7492e 100644 --- a/lib/glance +++ b/lib/glance @@ -70,7 +70,7 @@ GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache} if [[ "$WSGI_MODE" != "uwsgi" ]]; then GLANCE_STANDALONE=True fi -GLANCE_STANDALONE=${GLANCE_STANDALONE:-True} +GLANCE_STANDALONE=${GLANCE_STANDALONE:-False} # File path for each store specified in GLANCE_MULTIPLE_FILE_STORES, the store # identifier will be appended to this path at runtime. If GLANCE_MULTIPLE_FILE_STORES From 29efb7282296c183d5036bc64451a9469a053056 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Wed, 30 Sep 2020 11:06:04 -0500 Subject: [PATCH 1344/1936] Update DEVSTACK_SERIES to wallaby stable/victoria branch has been created now and current master is for wallaby. Change-Id: I5f5b233127d6ef24452fcd05db990f9ddc244dc4 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index bf1ad3d85d..a36f8970e6 100644 --- a/stackrc +++ b/stackrc @@ -245,7 +245,7 @@ REQUIREMENTS_DIR=${REQUIREMENTS_DIR:-$DEST/requirements} # Setting the variable to 'ALL' will activate the download for all # libraries. -DEVSTACK_SERIES="victoria" +DEVSTACK_SERIES="wallaby" ############## # From 8e74a617df7d53732c1ef395a97479ce58122731 Mon Sep 17 00:00:00 2001 From: Riccardo Pittau Date: Fri, 10 Apr 2020 10:48:15 +0200 Subject: [PATCH 1345/1936] Move supported distros to variable Improve redability of the distro check using a variable to store supported distributions. Change-Id: Iffa1619065358d459bd04523b7eeae9049f8f2f0 --- stack.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 3b2a331dea..bb4dfa2561 100755 --- a/stack.sh +++ b/stack.sh @@ -221,7 +221,9 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (bionic|focal|f31|f32|opensuse-15.2|opensuse-tumbleweed|rhel8) ]]; then +SUPPORTED_DISTROS="bionic|focal|f31|f32|opensuse-15.2|opensuse-tumbleweed|rhel8" + +if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From 3ebb95f9b5e82256e66fcedb23f8f0740349c898 Mon Sep 17 00:00:00 2001 From: Dan Radez Date: Fri, 2 Oct 2020 09:46:31 -0400 Subject: [PATCH 1346/1936] Removing fixup for f32 + dnsmasq 2.81 workaround reported https://bugs.launchpad.net/neutron/+bug/1896945 fixed by https://review.opendev.org/#/c/755356/ Change-Id: I86a0be548e344ed4e95eab7212ba432bf570d2ae --- tools/fixup_stuff.sh | 7 ------- 1 file changed, 7 deletions(-) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index a3bda2b74f..c0e07dd51c 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -143,13 +143,6 @@ function fixup_fedora { # overwriting works. So this hacks around those packages that # have been dragged in by some other system dependency sudo rm -rf /usr/lib64/python3*/site-packages/PyYAML-*.egg-info - - # NOTE(lyarwood): Workaround a known issue on Fedora with dnsmasq >= 2.81 - # by downgrading to 2.80 for the time being. - # https://bugs.launchpad.net/neutron/+bug/1896945 - if [[ "$DISTRO" == "f32" ]] && [[ $(rpm --queryformat %{VERSION} -q dnsmasq) != "2.80" ]]; then - sudo dnf downgrade dnsmasq-2.80 -y - fi } function fixup_suse { From 906d824a1985332949090583fdc3f4a5ce30125a Mon Sep 17 00:00:00 2001 From: Luigi Toscano Date: Sun, 11 Oct 2020 21:59:07 +0200 Subject: [PATCH 1347/1936] Fix: do not lose the tox_environment value in func tests The current code always overrides tox_environment when running functional tests, but the correct behavior is to add the discovered environment variables to tox_environments, while keeping the user-specified value for it. The current behavior breaks the devstack-tox-functional children jobs, like openstacksdk-functional-devstack-ironic, which set tox_environment. Change-Id: I5dc9054a1495ca0ef7745c08316441ab153956f4 --- playbooks/tox/run-both.yaml | 3 +-- playbooks/tox/run.yaml | 3 +-- roles/get-devstack-os-environment/defaults/main.yaml | 1 + roles/get-devstack-os-environment/tasks/main.yaml | 4 ++-- 4 files changed, 5 insertions(+), 6 deletions(-) diff --git a/playbooks/tox/run-both.yaml b/playbooks/tox/run-both.yaml index 0528b46f00..e4043d8231 100644 --- a/playbooks/tox/run-both.yaml +++ b/playbooks/tox/run-both.yaml @@ -8,5 +8,4 @@ - test-setup - ensure-tox - get-devstack-os-environment - - role: tox - tox_environment: "{{ os_env_vars|default({}) }}" + - tox diff --git a/playbooks/tox/run.yaml b/playbooks/tox/run.yaml index 6e549d3655..0d065c6ca2 100644 --- a/playbooks/tox/run.yaml +++ b/playbooks/tox/run.yaml @@ -1,5 +1,4 @@ - hosts: all roles: - get-devstack-os-environment - - role: tox - tox_environment: "{{ os_env_vars|default({}) }}" + - tox diff --git a/roles/get-devstack-os-environment/defaults/main.yaml b/roles/get-devstack-os-environment/defaults/main.yaml index 73ecfe920c..f68ea560d0 100644 --- a/roles/get-devstack-os-environment/defaults/main.yaml +++ b/roles/get-devstack-os-environment/defaults/main.yaml @@ -3,3 +3,4 @@ openrc_file: "{{ devstack_base_dir }}/devstack/openrc" openrc_user: admin openrc_project: admin openrc_enable_export: false +tox_environment: {} diff --git a/roles/get-devstack-os-environment/tasks/main.yaml b/roles/get-devstack-os-environment/tasks/main.yaml index 8c8df7f96d..b2c5e93ed4 100644 --- a/roles/get-devstack-os-environment/tasks/main.yaml +++ b/roles/get-devstack-os-environment/tasks/main.yaml @@ -9,6 +9,6 @@ executable: "/bin/bash" register: env_os - - name: Save the OS_ environment variables as a fact + - name: Append the the OS_ environment variables to tox_environment set_fact: - os_env_vars: "{{ env_os.stdout|from_yaml }}" + tox_environment: "{{ env_os.stdout|from_yaml|default({})|combine(tox_environment) }}" From 0545b48f3b14f2951033b5e09db3190a95cf3527 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Tue, 13 Oct 2020 16:06:44 +0200 Subject: [PATCH 1348/1936] Add IPv6 route information to worlddump The "ip route" command only outputs IPv4 routes, add a command to also show IPv6 route information. Drop output from "ip link" as that information is contained within the "ip addr" output already. Change-Id: Iae87f43c4b1c57f07de041e823da9d350c670389 --- tools/worlddump.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/worlddump.py b/tools/worlddump.py index 6a618f5ee6..5a264d2508 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -165,7 +165,7 @@ def network_dump(): _dump_cmd("bridge link") _dump_cmd("ip link show type bridge") - ip_cmds = ["neigh", "addr", "link", "route"] + ip_cmds = ["neigh", "addr", "route", "route -6"] for cmd in ip_cmds + ['netns']: _dump_cmd("ip %s" % cmd) for netns_ in _netns_list(): From b107f9cf18c8112cce3f796995f3a5691be56259 Mon Sep 17 00:00:00 2001 From: "Walter A. Boring IV" Date: Mon, 1 Jul 2019 16:19:12 -0700 Subject: [PATCH 1349/1936] Add support for ceph_iscsi cinder driver This patch adds support to configure the new ceph iscsi driver Depends-On: https://review.opendev.org/#/c/662829/ Depends-On:https://review.opendev.org/668667 Change-Id: Ica180e00dedb8e7ed60e27e3f4841faa8fef938c --- lib/cinder_backends/ceph_iscsi | 56 ++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) create mode 100644 lib/cinder_backends/ceph_iscsi diff --git a/lib/cinder_backends/ceph_iscsi b/lib/cinder_backends/ceph_iscsi new file mode 100644 index 0000000000..94412e0da6 --- /dev/null +++ b/lib/cinder_backends/ceph_iscsi @@ -0,0 +1,56 @@ +#!/bin/bash +# +# lib/cinder_backends/ceph_iscsi +# Configure the ceph_iscsi backend + +# Enable with: +# +# CINDER_ENABLED_BACKENDS+=,ceph_iscsi:ceph_iscsi +# +# Optional paramteters: +# CEPH_ISCSI_API_URL= +# +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# configure_ceph_backend_ceph_iscsi - called from configure_cinder() + + +# Save trace setting +_XTRACE_CINDER_CEPH_ISCSI=$(set +o | grep xtrace) +set +o xtrace + +# Entry Points +# ------------ + +# configure_cinder_backend_ceph_iscsi - Set config files, create data dirs, etc +# configure_cinder_backend_ceph_iscsi $name +function configure_cinder_backend_ceph_iscsi { + local be_name=$1 + + CEPH_ISCSI_API_URL=${CEPH_ISCSI_API_URL:-http://$CEPH_ISCSI_API_HOST:$CEPH_ISCSI_API_PORT} + + iniset $CINDER_CONF $be_name volume_backend_name $be_name + iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.ceph.rbd_iscsi.RBDISCSIDriver" + iniset $CINDER_CONF $be_name rbd_ceph_conf "$CEPH_CONF_FILE" + iniset $CINDER_CONF $be_name rbd_pool "$CINDER_CEPH_POOL" + iniset $CINDER_CONF $be_name rbd_user "$CINDER_CEPH_USER" + iniset $CINDER_CONF $be_name rbd_iscsi_api_user "$CEPH_ISCSI_API_USER" + iniset $CINDER_CONF $be_name rbd_iscsi_api_password "$CEPH_ISCSI_API_PASSWORD" + iniset $CINDER_CONF $be_name rbd_iscsi_api_url "$CEPH_ISCSI_API_URL" + iniset $CINDER_CONF $be_name rbd_iscsi_target_iqn "$CEPH_ISCSI_TARGET_IQN" + iniset $CINDER_CONF $be_name rbd_flatten_volume_from_snapshot False + iniset $CINDER_CONF $be_name rbd_max_clone_depth 5 + iniset $CINDER_CONF DEFAULT glance_api_version 2 + + pip_install rbd-iscsi-client +} + +# Restore xtrace +$_XTRACE_CINDER_CEPH_ISCSI + +# Local variables: +# mode: shell-script +# End: From 7de6e0b2eca9ac661a92badef4488d8d6380b06f Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Wed, 21 Oct 2020 13:59:50 +0100 Subject: [PATCH 1350/1936] fix ipv6 flag order in worlddump this change corrects the flag order from 'route -6' to '-6 route' as the -6 flag is an option when used with ip is an argument to the the ip command and not the route subcommand. -6 is accpeted as an argument to the standalone 'route' commannd but not 'ip route' subcommand. Change-Id: Ic2ae472e42b7b455693d0aade48dc5109e1f21ba --- tools/worlddump.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/worlddump.py b/tools/worlddump.py index 5a264d2508..22770f15b6 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -165,7 +165,7 @@ def network_dump(): _dump_cmd("bridge link") _dump_cmd("ip link show type bridge") - ip_cmds = ["neigh", "addr", "route", "route -6"] + ip_cmds = ["neigh", "addr", "route", "-6 route"] for cmd in ip_cmds + ['netns']: _dump_cmd("ip %s" % cmd) for netns_ in _netns_list(): From 6ecfe67d8e8166a3dc42332ba8d1615be6a14a68 Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Wed, 23 Sep 2020 11:54:19 +0100 Subject: [PATCH 1351/1936] Configure os-vif ovsdb_connection This patch set the os-vif "ovsdb_connection" configuration option so it can connect to the local OVSDB. By default, this option points to tcp:127.0.0.1:6640 and would fail if SERVICE_IP_VERSION == 6. Also, if SERVICE_IP_VERSION is an IPv6 address, it should be wraped with square brackets for it to work. Change-Id: Ie6eec4e140c7464936cf0b0c6307026a94c9f4ee Signed-off-by: Lucas Alvares Gomes --- lib/neutron_plugins/ovn_agent | 5 ++++- lib/nova | 5 +++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 84df9181ea..b7330db9db 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -91,7 +91,10 @@ OVN_DBS_LOG_LEVEL=${OVN_DBS_LOG_LEVEL:-info} OVN_META_CONF=$NEUTRON_CONF_DIR/neutron_ovn_metadata_agent.ini OVN_META_DATA_HOST=${OVN_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)} -OVSDB_SERVER_LOCAL_HOST=$SERVICE_LOCAL_HOST +export OVSDB_SERVER_LOCAL_HOST=$SERVICE_LOCAL_HOST +if [[ "$SERVICE_IP_VERSION" == 6 ]]; then + OVSDB_SERVER_LOCAL_HOST=[$OVSDB_SERVER_LOCAL_HOST] +fi OVN_IGMP_SNOOPING_ENABLE=$(trueorfalse False OVN_IGMP_SNOOPING_ENABLE) diff --git a/lib/nova b/lib/nova index 9d7bbd8fca..d7426039c4 100644 --- a/lib/nova +++ b/lib/nova @@ -906,6 +906,11 @@ function start_nova_compute { # by the compute process. configure_console_compute + # Configure the OVSDB connection for os-vif + if [ -n "$OVSDB_SERVER_LOCAL_HOST" ]; then + iniset $NOVA_CPU_CONF os_vif_ovs ovsdb_connection "tcp:$OVSDB_SERVER_LOCAL_HOST:6640" + fi + if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then # The group **$LIBVIRT_GROUP** is added to the current user in this script. # ``sg`` is used in run_process to execute nova-compute as a member of the From 3f28c272d0a3ae78329c81227a66c703d6a489d7 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Wed, 28 Oct 2020 13:05:14 +0000 Subject: [PATCH 1352/1936] Remove deprecated tail_log function This function has been deprecated for a long time, let's finally remove it. It is only generating a warning anyway. Change-Id: I7bd440adf2ce8283e3ad3d5d09e6b2b877e2b42e --- functions-common | 4 ---- lib/placement | 1 - lib/tls | 8 -------- 3 files changed, 13 deletions(-) diff --git a/functions-common b/functions-common index 547f6df038..13749214ba 100644 --- a/functions-common +++ b/functions-common @@ -1609,10 +1609,6 @@ function service_check { } -function tail_log { - deprecated "With the removal of screen support, tail_log is deprecated and will be removed after Queens" -} - # Plugin Functions # ================= diff --git a/lib/placement b/lib/placement index 2a449bfa90..b7798669a1 100644 --- a/lib/placement +++ b/lib/placement @@ -148,7 +148,6 @@ function start_placement_api { else enable_apache_site placement-api restart_apache_server - tail_log placement-api /var/log/$APACHE_NAME/placement-api.log fi echo "Waiting for placement-api to start..." diff --git a/lib/tls b/lib/tls index 861496d092..b3cc0b4159 100644 --- a/lib/tls +++ b/lib/tls @@ -570,14 +570,6 @@ EOF restart_apache_server } -# Follow TLS proxy -function follow_tls_proxy { - sudo touch /var/log/$APACHE_NAME/tls-proxy_error.log - tail_log tls-error /var/log/$APACHE_NAME/tls-proxy_error.log - sudo touch /var/log/$APACHE_NAME/tls-proxy_access.log - tail_log tls-proxy /var/log/$APACHE_NAME/tls-proxy_access.log -} - # Cleanup Functions # ================= From 47f76acbbac350ea18df6a9463876d38c3a13539 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Thu, 29 Oct 2020 10:42:38 +0000 Subject: [PATCH 1353/1936] Determine default IPv4 route device only when needed Sometimes instances don't have an IPv4 default route, so only check for it when we actually need it. In a followup patch we could extend the code to check for an IPv6 default route instead or in addition. Related-Bug: 1902002 Change-Id: Ie6cd241721f6b1f8e030960921a696939b2dab10 --- lib/neutron-legacy | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 2906f15736..436b0e3364 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -226,15 +226,17 @@ PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-public} # Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-br-ex} -default_route_dev=$(ip route | grep ^default | awk '{print $5}') -die_if_not_set $LINENO default_route_dev "Failure retrieving default route device" # With the linuxbridge agent, if using VLANs for tenant networks, # or if using flat or VLAN provider networks, set in ``localrc`` to # the name of the network interface to use for the physical # network. # # Example: ``LB_PHYSICAL_INTERFACE=eth1`` -LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-$default_route_dev} +if [[ $Q_AGENT == "linuxbridge" && -z ${LB_PHYSICAL_INTERFACE} ]]; then + default_route_dev=$(ip route | grep ^default | awk '{print $5}') + die_if_not_set $LINENO default_route_dev "Failure retrieving default route device" + LB_PHYSICAL_INTERFACE=$default_route_dev +fi # When Neutron tunnels are enabled it is needed to specify the # IP address of the end point in the local server. This IP is set From 2bb62b43bf8b3d95e199b111d8f6de01890c0536 Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Fri, 6 Nov 2020 17:34:53 -0500 Subject: [PATCH 1354/1936] Decrease MTU to account for IPv6 header (MTU - 50) only supports VxLAN over IPv4, decrease it to support IPv6 as well, which is 20 bytes larger. Change-Id: I0cf258770f628c1b4fb590bd274b5433fbcc1450 --- playbooks/pre.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/playbooks/pre.yaml b/playbooks/pre.yaml index ff97a1f501..68cb1d8c7a 100644 --- a/playbooks/pre.yaml +++ b/playbooks/pre.yaml @@ -19,12 +19,13 @@ {% endfor -%} {{- mtus|min -}} - name: Calculate external_bridge_mtu - # 50 bytes is overhead for vxlan (which is greater than GRE + # 30 bytes is overhead for vxlan (which is greater than GRE # allowing us to use either overlay option with this MTU. + # 40 bytes is overhead for IPv6, which will also support an IPv4 overlay. # TODO(andreaf) This should work, but it may have to be reconcilied with # the MTU setting used by the multinode setup roles in multinode pre.yaml set_fact: - external_bridge_mtu: "{{ local_mtu | int - 50 }}" + external_bridge_mtu: "{{ local_mtu | int - 30 - 40 }}" roles: - configure-swap - setup-stack-user From efc04eec00bef94059a0e5b6f457263fc84876c1 Mon Sep 17 00:00:00 2001 From: Nate Johnston Date: Tue, 3 Nov 2020 10:04:26 -0500 Subject: [PATCH 1355/1936] Look for ipv6 routes so ipv6-only jobs will not fail For change 739139 [1] PS 12, the neutron-tempest-plugin-scenario-linuxbridge died in devstack with "/opt/stack/devstack/functions-common:237 Failure retrieving default route device", which comes from "/opt/stack/devstack/lib/neutron-legacy:237:die_if_not_set". Looking at the worlddump.txt for that job [2] I see that there is a default ipv6 route; the vm was not configured with ipv4 networking. ip route -------- ip -6 route ----------- ::1 dev lo proto kernel metric 256 pref medium 2607:ff68:100:54::/64 dev ens3 proto kernel metric 256 expires 86380sec pref medium fe80::/64 dev ens3 proto kernel metric 256 pref medium default via fe80::f816:3eff:fe77:b05c dev ens3 proto ra metric 1024 expires 280sec hoplimit 64 pref medium Looking at the devstack code that throws the error [3] it looks like it only looks for a default route in the output of `ip route`, which does not include ipv6 information. This change should look in both the ipv4 and ipv6 route table. A similar check in the L3 setup code is also updated. [1] https://review.opendev.org/#/c/739139/ [2] https://d4eb7e3efe98cba79a4b-f4d168cdb20f40841821e4b213645c0f.ssl.cf2.rackcdn.com/739139/12/gate/neutron-tempest-plugin-scenario-linuxbridge/9a6b4f7/controller/logs/worlddump-latest.txt [3] https://opendev.org/openstack/devstack/src/branch/master/lib/neutron-legacy#L236 Closes-Bug: #1902002 Change-Id: I839e8c222368df98fec308cf41248a9dd0a8c187 --- lib/neutron-legacy | 2 +- lib/neutron_plugins/services/l3 | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 436b0e3364..791ff18b10 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -233,7 +233,7 @@ OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-br-ex} # # Example: ``LB_PHYSICAL_INTERFACE=eth1`` if [[ $Q_AGENT == "linuxbridge" && -z ${LB_PHYSICAL_INTERFACE} ]]; then - default_route_dev=$(ip route | grep ^default | awk '{print $5}') + default_route_dev=$( (ip route; ip -6 route) | grep ^default | head -n 1 | awk '{print $5}') die_if_not_set $LINENO default_route_dev "Failure retrieving default route device" LB_PHYSICAL_INTERFACE=$default_route_dev fi diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 69536bbe58..75a3567096 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -101,7 +101,6 @@ SUBNETPOOL_SIZE_V4=${SUBNETPOOL_SIZE_V4:-26} SUBNETPOOL_SIZE_V6=${SUBNETPOOL_SIZE_V6:-64} default_v4_route_devs=$(ip -4 route | grep ^default | awk '{print $5}') -die_if_not_set $LINENO default_v4_route_devs "Failure retrieving default IPv4 route devices" default_v6_route_devs=$(ip -6 route list match default table all | grep via | awk '{print $5}') From 6766f71d62af10f8b59b5f829a49752775e9dabe Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Fri, 24 Jul 2020 15:44:34 -0700 Subject: [PATCH 1356/1936] Make create_disk() persistent Right now a system configured with the ceph plugin will not survive a reboot because the backing disk we create and mount isn't mounted at startup, preventing ceph from starting and the rest of nova/glance from working. This makes create_disk() idempotently write an fstab rule for the disk we make, and adds a destroy_disk() handler for cleanup. Change-Id: I50cd4234f51a335af25be756bd2459dca5aa343c --- clean.sh | 2 ++ functions | 34 +++++++++++++++++++++++++++++----- 2 files changed, 31 insertions(+), 5 deletions(-) diff --git a/clean.sh b/clean.sh index cb0a8b4bef..4cebf1d9ea 100755 --- a/clean.sh +++ b/clean.sh @@ -145,3 +145,5 @@ done rm -rf ~/.config/openstack +# Clear any fstab entries made +sudo sed -i '/.*comment=devstack-.*/ d' /etc/fstab diff --git a/functions b/functions index cc1ca6cb25..e679b0f9bc 100644 --- a/functions +++ b/functions @@ -751,12 +751,13 @@ if ! function_exists echo_nolog; then fi -# create_disk - Create backing disk +# create_disk - Create, configure, and mount a backing disk function create_disk { local node_number local disk_image=${1} local storage_data_dir=${2} local loopback_disk_size=${3} + local key # Create a loopback disk and format it to XFS. if [[ -e ${disk_image} ]]; then @@ -777,11 +778,34 @@ function create_disk { # Swift and Ceph. sudo mkfs.xfs -f -i size=1024 ${disk_image} - # Mount the disk with mount options to make it as efficient as possible - if ! egrep -q ${storage_data_dir} /proc/mounts; then - sudo mount -t xfs -o loop,noatime,nodiratime,logbufs=8 \ - ${disk_image} ${storage_data_dir} + # Unmount the target, if mounted + if egrep -q $storage_data_dir /proc/mounts; then + sudo umount $storage_data_dir fi + + # Clear any old fstab rules, install a new one for this disk, and mount it + key=$(echo $disk_image | sed 's#/.##') + key="devstack-$key" + sudo sed -i '/.*comment=$key.*/ d' /etc/fstab + echo "$disk_image $storage_data_dir xfs loop,noatime,nodiratime,logbufs=8,comment=$key 0 0" | sudo tee -a /etc/fstab + sudo mount -v $storage_data_dir +} + +# Unmount, de-configure, and destroy a backing disk +function destroy_disk { + local disk_image=$1 + local storage_data_dir=$2 + + # Unmount the target, if mounted + if egrep -q $storage_data_dir /proc/mounts; then + sudo umount $storage_data_dir + fi + + # Clear any fstab rules + sed -i '/.*comment=$key.*/ d' /etc/fstab + + # Delete the file + sudo rm $disk_image } From d7d87b0202212e21727f6ced4a1eaa38a66808dd Mon Sep 17 00:00:00 2001 From: Weronika Sikora Date: Wed, 18 Sep 2019 13:45:53 +0000 Subject: [PATCH 1357/1936] Set image_alt_ssh_user during stack At this moment, only image_ssh_user is present in the config of Tempest. It's set to cirros by default and used for SSH connections in tests. However, several tests build instances with image_ref_alt, but still use image_ssh_user to connect, which results in failure if image_ref_alt is set to a non-cirros image. They should use image_alt_ssh_user instead, which can be set to whichever user the image_ref_alt needs in either local.conf or during plugin installation. Change-Id: I899909fb71a9862c891e94ba54c6a8fa137f9769 Partial-Bug: #1844535 --- lib/tempest | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 9f2ec30cb9..c55531b72e 100644 --- a/lib/tempest +++ b/lib/tempest @@ -27,6 +27,7 @@ # - ``USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION`` # - ``DEFAULT_INSTANCE_TYPE`` # - ``DEFAULT_INSTANCE_USER`` +# - ``DEFAULT_INSTANCE_ALT_USER`` # - ``CINDER_ENABLED_BACKENDS`` # - ``NOVA_ALLOW_DUPLICATE_NETWORKS`` # @@ -443,7 +444,8 @@ function configure_tempest { iniset $TEMPEST_CONFIG validation run_validation ${TEMPEST_RUN_VALIDATION:-True} iniset $TEMPEST_CONFIG validation ip_version_for_ssh 4 iniset $TEMPEST_CONFIG validation ssh_timeout $BUILD_TIMEOUT - iniset $TEMPEST_CONFIG validation image_ssh_user ${DEFAULT_INSTANCE_USER:-cirros} + iniset $TEMPEST_CONFIG validation image_ssh_user ${DEFAULT_INSTANCE_USER:=cirros} + iniset $TEMPEST_CONFIG validation image_alt_ssh_user ${DEFAULT_INSTANCE_ALT_USER:-$DEFAULT_INSTANCE_USER} iniset $TEMPEST_CONFIG validation network_for_ssh $TEMPEST_SSH_NETWORK_NAME # Volume From 6c03a85d8bebffb17ec923d9f94a499dd8c4854b Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Tue, 10 Nov 2020 14:10:11 +0000 Subject: [PATCH 1358/1936] enable ussuri cloud archive on ubuntu bionic This change updates bionic installs to use the ussuri cloud archive to enable the use of libvirt 6.0.0. This is required to prevent a libvirt bug that causes intermittent failures for the tempest test_live_block_migration_paused testcase. Change-Id: I9c395c2b5fdfe6ad9a43477280e88e9a9b34f057 Related-Bug: 1901739 --- tools/fixup_stuff.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index c0e07dd51c..cd7ee59c1d 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -77,10 +77,10 @@ function fixup_ubuntu { # If we are on a nodepool provided host and it has told us about # where we can find local mirrors then use that mirror. source /etc/ci/mirror_info.sh - sudo apt-add-repository -y "deb $NODEPOOL_UCA_MIRROR bionic-updates/train main" + sudo apt-add-repository -y "deb $NODEPOOL_UCA_MIRROR bionic-updates/ussuri main" else - # Enable UCA:train for updated versions of QEMU and libvirt - sudo add-apt-repository -y cloud-archive:train + # Enable UCA:ussuri for updated versions of QEMU and libvirt + sudo add-apt-repository -y cloud-archive:ussuri fi REPOS_UPDATED=False apt_get_update From 7a5fda83264575aebe52b62e27aae1a0398be4da Mon Sep 17 00:00:00 2001 From: Victoria Martinez de la Cruz Date: Fri, 13 Nov 2020 12:56:00 -0300 Subject: [PATCH 1359/1936] [CI] Copy /etc/ceph output to logs Change-Id: I91317c7d4972f07f741cdde2f406e290bb65467b --- .zuul.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.zuul.yaml b/.zuul.yaml index d387b0dd50..ec827c289c 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -340,6 +340,7 @@ '{{ stage_dir }}/listen53.txt': logs '{{ stage_dir }}/deprecations.log': logs '{{ stage_dir }}/audit.log': logs + /etc/ceph: logs /var/log/ceph: logs /var/log/openvswitch: logs /var/log/glusterfs: logs From cdc2e9f656089217d28d0fff8d614dc84b755e2c Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Thu, 19 Nov 2020 09:17:37 +0000 Subject: [PATCH 1360/1936] zuul: Remove nova-live-migration from check queue As documented in the associated TODO this job can be removed now that it has been migrated to zuulv3 by Ib342e2d3c395830b4667a60de7e492d3b9de2f0a. Change-Id: Id7c0fd8eec09386cd638956a46c1f75844194b9d --- .zuul.yaml | 9 --------- 1 file changed, 9 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index e71ac8d8c3..c1406716fe 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -737,15 +737,6 @@ irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - # NOTE(gmann): Remove this job from devstack pipeline once it is - # migrated to zuulv3 native. This is legacy job and rely on - # devstack-gate + devstack setting so any change in devstack can - # break it. - - nova-live-migration: - voting: false - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ gate: jobs: - devstack From 7a3a7ce876a37376fe0dca7278e41a4f46867daa Mon Sep 17 00:00:00 2001 From: Elod Illes Date: Mon, 30 Nov 2020 18:30:02 +0100 Subject: [PATCH 1361/1936] Workaround for new pip 20.3 behavior This patch caps pip version during bootstrap to avoid the issue: "ERROR: Links are not allowed as constraints" A proper fix would be to adapt to new pip behavior. Depends-On: https://review.opendev.org/764811 Change-Id: I1feed4573820436f91f8f654cc189fa3a21956fd --- tools/cap-pip.txt | 1 + tools/install_pip.sh | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 tools/cap-pip.txt diff --git a/tools/cap-pip.txt b/tools/cap-pip.txt new file mode 100644 index 0000000000..8ee551b261 --- /dev/null +++ b/tools/cap-pip.txt @@ -0,0 +1 @@ +pip<20.3 diff --git a/tools/install_pip.sh b/tools/install_pip.sh index f3fd1e2498..9afd2e53c2 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -91,7 +91,9 @@ function install_get_pip { die $LINENO "Download of get-pip.py failed" touch $LOCAL_PIP.downloaded fi - sudo -H -E python${PYTHON3_VERSION} $LOCAL_PIP + # TODO: remove the trailing pip constraint when a proper fix + # arrives for bug https://bugs.launchpad.net/devstack/+bug/1906322 + sudo -H -E python${PYTHON3_VERSION} $LOCAL_PIP -c $TOOLS_DIR/cap-pip.txt } From e7c017bd8998d9cc8c4712efe992239c73f340d1 Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Wed, 9 Dec 2020 23:53:12 +0000 Subject: [PATCH 1362/1936] fix is_fedora for centos 8 stream When deploying on the centos 8 stream variant the output of "lsb_release -i -s" is CentOSStream instead of CentOS This breaks the is_fedora function in devstack preventing package installation and removal. Change-Id: I39ccefbd06f46adf5077f8d8001f37d3b190f040 --- functions-common | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/functions-common b/functions-common index 13749214ba..87d8c64804 100644 --- a/functions-common +++ b/functions-common @@ -452,8 +452,8 @@ function is_fedora { [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || \ [ "$os_VENDOR" = "RedHatEnterpriseServer" ] || \ [ "$os_VENDOR" = "RedHatEnterprise" ] || \ - [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "OracleServer" ] || \ - [ "$os_VENDOR" = "Virtuozzo" ] + [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "CentOSStream" ] || \ + [ "$os_VENDOR" = "OracleServer" ] || [ "$os_VENDOR" = "Virtuozzo" ] } From 89acae97791a4f23a8b7e9550450cb4a3b986b01 Mon Sep 17 00:00:00 2001 From: Masayuki Igawa Date: Wed, 16 Dec 2020 09:12:40 +0900 Subject: [PATCH 1363/1936] Use python3-guestfs in Ubuntu This commit makes to use python3-guestfs instead of python-guestfs in Ubuntu because python-guestfs package is not provided in focal[1][2]. This causes errors in some gate job if `ENABLE_FILE_INJECTION` is true like the following. ``` ... Package python-guestfs is not available, but is referred to by another package. This may mean that the package is missing, has been obsoleted, or is only available from another source However the following packages replace it: python3-guestfs E: Package 'python-guestfs' has no installation candidate ... ``` http://paste.openstack.org/show/801073/ [1] https://packages.ubuntu.com/search?lang=en&keywords=python3-guestfs [2] https://packages.ubuntu.com/search?lang=en&suite=default&arch=any&searchon=names&keywords=python-guestfs Change-Id: Iffe60aa0351b732d543927afa1f1e846ba2a89fd --- lib/nova_plugins/hypervisor-libvirt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index b25bc0c367..321775d324 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -100,7 +100,7 @@ function install_nova_hypervisor { if [[ "$ENABLE_FILE_INJECTION" == "True" ]] ; then if is_ubuntu; then - install_package python-guestfs + install_package python3-guestfs # NOTE(andreaf) Ubuntu kernel can only be read by root, which breaks libguestfs: # https://bugs.launchpad.net/ubuntu/+source/linux/+bug/759725) INSTALLED_KERNELS="$(ls /boot/vmlinuz-*)" From e651d9ef8840bb7dd497b557125ce1cd5290993d Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Thu, 19 Nov 2020 14:50:01 +0000 Subject: [PATCH 1364/1936] [OVN] Use OVN from packages This patch changes the OVN module from DevStack to allow for using the OSapackaged version of OVN instead of compiling it from source. A new variable called OVN_BUILD_FROM_SOURCE has been introduced and when set to False (the default value) OVN will then use the packaged version for setting up DevStack. Note, in the stop_ovn() function, the OVN metadata agent service name was wrong and the service wasn't being stopped as part of ./unstack.sh. This patch also fixed it as well. Change-Id: Ib41e3b486550200572afd6b3ba783d7644d70d44 Signed-off-by: Lucas Alvares Gomes Co-Authored-By: Slawek Kaplonski --- lib/neutron_plugins/ovn_agent | 262 +++++++++++++++++++++------------- tools/fixup_stuff.sh | 8 ++ 2 files changed, 172 insertions(+), 98 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index b7330db9db..b661f593a4 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -72,6 +72,7 @@ OVN_UUID=${OVN_UUID:-} # Whether or not to build the openvswitch kernel module from ovs. This is required # unless the distro kernel includes ovs+conntrack support. OVN_BUILD_MODULES=$(trueorfalse False OVN_BUILD_MODULES) +OVN_BUILD_FROM_SOURCE=$(trueorfalse False OVN_BUILD_FROM_SOURCE) # Whether or not to install the ovs python module from ovs source. This can be # used to test and validate new ovs python features. This should only be used @@ -98,7 +99,10 @@ fi OVN_IGMP_SNOOPING_ENABLE=$(trueorfalse False OVN_IGMP_SNOOPING_ENABLE) -OVS_PREFIX=/usr/local +OVS_PREFIX= +if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + OVS_PREFIX=/usr/local +fi OVS_SBINDIR=$OVS_PREFIX/sbin OVS_BINDIR=$OVS_PREFIX/bin OVS_RUNDIR=$OVS_PREFIX/var/run/openvswitch @@ -116,6 +120,24 @@ NEUTRON_OVN_METADATA_BINARY="neutron-ovn-metadata-agent" STACK_GROUP="$( id --group --name "$STACK_USER" )" +OVN_NORTHD_SERVICE=ovn-northd.service +if is_ubuntu; then + # The ovn-central.service file on Ubuntu is responsible for starting + # ovn-northd and the OVN DBs (on CentOS this is done by ovn-northd.service) + OVN_NORTHD_SERVICE=ovn-central.service +fi +OVSDB_SERVER_SERVICE=ovsdb-server.service +OVS_VSWITCHD_SERVICE=ovs-vswitchd.service +OVN_CONTROLLER_SERVICE=ovn-controller.service +OVN_CONTROLLER_VTEP_SERVICE=ovn-controller-vtep.service +if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + OVSDB_SERVER_SERVICE=devstack@ovsdb-server.service + OVS_VSWITCHD_SERVICE=devstack@ovs-vswitchd.service + OVN_NORTHD_SERVICE=devstack@ovn-northd.service + OVN_CONTROLLER_SERVICE=devstack@ovn-controller.service + OVN_CONTROLLER_VTEP_SERVICE=devstack@ovn-controller-vtep.service +fi + # Defaults Overwrite # ------------------ @@ -131,10 +153,26 @@ ML2_L3_PLUGIN=${ML2_L3_PLUGIN-"ovn-router"} # Utility Functions # ----------------- +function wait_for_sock_file { + local count=0 + while [ ! -S $1 ]; do + sleep 1 + count=$((count+1)) + if [ "$count" -gt 5 ]; then + die $LINENO "Socket $1 not found" + fi + done +} + function use_new_ovn_repository { if [ -z "$is_new_ovn" ]; then local ovs_repo_dir=$DEST/$OVS_REPO_NAME if [ ! -d $ovs_repo_dir ]; then + git_timed clone $OVS_REPO $ovs_repo_dir + pushd $ovs_repo_dir + git checkout $OVS_BRANCH + popd + else clone_repository $OVS_REPO $ovs_repo_dir $OVS_BRANCH fi # Check the split commit exists in the current branch @@ -153,14 +191,14 @@ function use_new_ovn_repository { # neutron-ovs-cleanup uses the OVSDB native interface. function ovn_base_setup_bridge { local bridge=$1 - local addbr_cmd="ovs-vsctl --no-wait -- --may-exist add-br $bridge -- set bridge $bridge protocols=OpenFlow13,OpenFlow15" + local addbr_cmd="sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge -- set bridge $bridge protocols=OpenFlow13,OpenFlow15" if [ "$OVS_DATAPATH_TYPE" != "system" ] ; then addbr_cmd="$addbr_cmd -- set Bridge $bridge datapath_type=${OVS_DATAPATH_TYPE}" fi $addbr_cmd - ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge + sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge } function _start_process { @@ -229,8 +267,8 @@ function create_public_bridge { local ext_gw_ifc ext_gw_ifc=$(get_ext_gw_interface) - ovs-vsctl --may-exist add-br $ext_gw_ifc -- set bridge $ext_gw_ifc protocols=OpenFlow13,OpenFlow15 - ovs-vsctl set open . external-ids:ovn-bridge-mappings=$PHYSICAL_NETWORK:$ext_gw_ifc + sudo ovs-vsctl --may-exist add-br $ext_gw_ifc -- set bridge $ext_gw_ifc protocols=OpenFlow13,OpenFlow15 + sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=$PHYSICAL_NETWORK:$ext_gw_ifc if [ -n "$FLOATING_RANGE" ]; then local cidr_len=${FLOATING_RANGE#*/} sudo ip addr flush dev $ext_gw_ifc @@ -337,35 +375,46 @@ function install_ovn { # Check the OVN configuration ovn_sanity_check - # If OVS is already installed, remove it, because we're about to re-install - # it from source. - for package in openvswitch openvswitch-switch openvswitch-common; do - if is_package_installed $package ; then - uninstall_package $package - fi - done - # Install tox, used to generate the config (see devstack/override-defaults) pip_install tox - remove_ovs_packages - sudo rm -f $OVS_RUNDIR/* - compile_ovs $OVN_BUILD_MODULES - if use_new_ovn_repository; then - compile_ovn $OVN_BUILD_MODULES + sudo mkdir -p $OVS_RUNDIR + sudo chown $(whoami) $OVS_RUNDIR + # NOTE(lucasagomes): To keep things simpler, let's reuse the same + # RUNDIR for both OVS and OVN. This way we avoid having to specify the + # --db option in the ovn-{n,s}bctl commands while playing with DevStack + sudo ln -s $OVS_RUNDIR $OVN_RUNDIR + + if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + # If OVS is already installed, remove it, because we're about to + # re-install it from source. + for package in openvswitch openvswitch-switch openvswitch-common; do + if is_package_installed $package ; then + uninstall_package $package + fi + done + + remove_ovs_packages + sudo rm -f $OVS_RUNDIR/* + + compile_ovs $OVN_BUILD_MODULES + if use_new_ovn_repository; then + compile_ovn $OVN_BUILD_MODULES + fi + + sudo mkdir -p $OVS_PREFIX/var/log/openvswitch + sudo chown $(whoami) $OVS_PREFIX/var/log/openvswitch + sudo mkdir -p $OVS_PREFIX/var/log/ovn + sudo chown $(whoami) $OVS_PREFIX/var/log/ovn + else + fixup_ovn_centos + install_package $(get_packages openvswitch) + install_package $(get_packages ovn) fi # Ensure that the OVS commands are accessible in the PATH - OVS_BINDIR=${OVS_BINDIR:-/usr/local/bin} export PATH=$OVS_BINDIR:$PATH - sudo mkdir -p $OVS_RUNDIR - sudo chown $(whoami) $OVS_RUNDIR - sudo mkdir -p $OVS_PREFIX/var/log/openvswitch - sudo chown $(whoami) $OVS_PREFIX/var/log/openvswitch - sudo mkdir -p $OVS_PREFIX/var/log/ovn - sudo chown $(whoami) $OVS_PREFIX/var/log/ovn - # Archive log files and create new local log_archive_dir=$LOGDIR/archive mkdir -p $log_archive_dir @@ -494,7 +543,7 @@ function configure_ovn { iniset $OVN_META_CONF DEFAULT nova_metadata_host $OVN_META_DATA_HOST iniset $OVN_META_CONF DEFAULT metadata_workers $API_WORKERS iniset $OVN_META_CONF DEFAULT state_path $NEUTRON_STATE_PATH - iniset $OVN_META_CONF ovs ovsdb_connection unix:$OVS_RUNDIR/db.sock + iniset $OVN_META_CONF ovs ovsdb_connection tcp:$OVSDB_SERVER_LOCAL_HOST:6640 iniset $OVN_META_CONF ovn ovn_sb_connection $OVN_SB_REMOTE if is_service_enabled tls-proxy; then iniset $OVN_META_CONF ovn \ @@ -533,51 +582,58 @@ function _start_ovs { enable_service ovsdb-server enable_service ovs-vswitchd - if [ ! -f $OVS_DATADIR/conf.db ]; then - ovsdb-tool create $OVS_DATADIR/conf.db $OVS_SHAREDIR/vswitch.ovsschema - fi + if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + if [ ! -f $OVS_DATADIR/conf.db ]; then + ovsdb-tool create $OVS_DATADIR/conf.db $OVS_SHAREDIR/vswitch.ovsschema + fi - if is_service_enabled ovn-controller-vtep; then - if [ ! -f $OVS_DATADIR/vtep.db ]; then - ovsdb-tool create $OVS_DATADIR/vtep.db $OVS_SHAREDIR/vtep.ovsschema + if is_service_enabled ovn-controller-vtep; then + if [ ! -f $OVS_DATADIR/vtep.db ]; then + ovsdb-tool create $OVS_DATADIR/vtep.db $OVS_SHAREDIR/vtep.ovsschema + fi fi - fi - local dbcmd="$OVS_SBINDIR/ovsdb-server --remote=punix:$OVS_RUNDIR/db.sock --remote=ptcp:6640:$OVSDB_SERVER_LOCAL_HOST --pidfile --detach --log-file" - dbcmd+=" --remote=db:Open_vSwitch,Open_vSwitch,manager_options" - if is_service_enabled ovn-controller-vtep; then - dbcmd+=" --remote=db:hardware_vtep,Global,managers $OVS_DATADIR/vtep.db" + local dbcmd="$OVS_SBINDIR/ovsdb-server --remote=punix:$OVS_RUNDIR/db.sock --remote=ptcp:6640:$OVSDB_SERVER_LOCAL_HOST --pidfile --detach --log-file" + dbcmd+=" --remote=db:Open_vSwitch,Open_vSwitch,manager_options" + if is_service_enabled ovn-controller-vtep; then + dbcmd+=" --remote=db:hardware_vtep,Global,managers $OVS_DATADIR/vtep.db" + fi + dbcmd+=" $OVS_DATADIR/conf.db" + _run_process ovsdb-server "$dbcmd" + + # Note: ovn-controller will create and configure br-int once it is started. + # So, no need to create it now because nothing depends on that bridge here. + local ovscmd="$OVS_SBINDIR/ovs-vswitchd --log-file --pidfile --detach" + _run_process ovs-vswitchd "$ovscmd" "" "$STACK_GROUP" "root" + else + _start_process "$OVSDB_SERVER_SERVICE" + _start_process "$OVS_VSWITCHD_SERVICE" fi - dbcmd+=" $OVS_DATADIR/conf.db" - _run_process ovsdb-server "$dbcmd" echo "Configuring OVSDB" if is_service_enabled tls-proxy; then - ovs-vsctl --no-wait set-ssl \ + sudo ovs-vsctl --no-wait set-ssl \ $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key \ $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt \ $INT_CA_DIR/ca-chain.pem fi - ovs-vsctl --no-wait set open_vswitch . system-type="devstack" - ovs-vsctl --no-wait set open_vswitch . external-ids:system-id="$OVN_UUID" - ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-remote="$OVN_SB_REMOTE" - ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-bridge="br-int" - ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-type="geneve" - ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-ip="$HOST_IP" + + sudo ovs-vsctl --no-wait set-manager ptcp:6640:$OVSDB_SERVER_LOCAL_HOST + sudo ovs-vsctl --no-wait set open_vswitch . system-type="devstack" + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:system-id="$OVN_UUID" + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-remote="$OVN_SB_REMOTE" + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-bridge="br-int" + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-type="geneve" + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-ip="$HOST_IP" + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:hostname="$LOCAL_HOSTNAME" # Select this chassis to host gateway routers if [[ "$ENABLE_CHASSIS_AS_GW" == "True" ]]; then - ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-cms-options="enable-chassis-as-gw" + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-cms-options="enable-chassis-as-gw" fi - # Note: ovn-controller will create and configure br-int once it is started. - # So, no need to create it now because nothing depends on that bridge here. - - local ovscmd="$OVS_SBINDIR/ovs-vswitchd --log-file --pidfile --detach" - _run_process ovs-vswitchd "$ovscmd" "" "$STACK_GROUP" "root" - if is_provider_network || [[ $Q_USE_PROVIDERNET_FOR_PUBLIC == "True" ]]; then ovn_base_setup_bridge $OVS_PHYSICAL_BRIDGE - ovs-vsctl set open . external-ids:ovn-bridge-mappings=${PHYSICAL_NETWORK}:${OVS_PHYSICAL_BRIDGE} + sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=${PHYSICAL_NETWORK}:${OVS_PHYSICAL_BRIDGE} fi if is_service_enabled ovn-controller-vtep ; then @@ -595,20 +651,20 @@ function _start_ovs { } function _start_ovn_services { - _start_process "devstack@ovsdb-server.service" - _start_process "devstack@ovs-vswitchd.service" + _start_process "$OVSDB_SERVER_SERVICE" + _start_process "$OVS_VSWITCHD_SERVICE" - if is_service_enabled ovs-vtep ; then - _start_process "devstack@ovs-vtep.service" - fi if is_service_enabled ovn-northd ; then - _start_process "devstack@ovn-northd.service" + _start_process "$OVN_NORTHD_SERVICE" fi if is_service_enabled ovn-controller ; then - _start_process "devstack@ovn-controller.service" + _start_process "$OVN_CONTROLLER_SERVICE" fi if is_service_enabled ovn-controller-vtep ; then - _start_process "devstack@ovn-controller-vtep.service" + _start_process "$OVN_CONTROLLER_VTEP_SERVICE" + fi + if is_service_enabled ovs-vtep ; then + _start_process "devstack@ovs-vtep.service" fi if is_service_enabled q-ovn-metadata-agent; then _start_process "devstack@q-ovn-metadata-agent.service" @@ -627,39 +683,47 @@ function start_ovn { fi if is_service_enabled ovn-northd ; then - if is_service_enabled tls-proxy; then - local tls_args="\ - --ovn-nb-db-ssl-ca-cert=$INT_CA_DIR/ca-chain.pem \ - --ovn-nb-db-ssl-cert=$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt \ - --ovn-nb-db-ssl-key=$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key \ - --ovn-sb-db-ssl-ca-cert=$INT_CA_DIR/ca-chain.pem \ - --ovn-sb-db-ssl-cert=$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt \ - --ovn-sb-db-ssl-key=$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key \ - " + if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_northd" + local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_northd" + + _run_process ovn-northd "$cmd" "$stop_cmd" else - local tls_args="" + _start_process "$OVN_NORTHD_SERVICE" fi - local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor $tls_args start_northd" - local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_northd" - _run_process ovn-northd "$cmd" "$stop_cmd" - ovn-nbctl --db=unix:$OVS_RUNDIR/ovnnb_db.sock set-connection p${OVN_PROTO}:6641:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 - ovn-sbctl --db=unix:$OVS_RUNDIR/ovnsb_db.sock set-connection p${OVN_PROTO}:6642:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 + # Wait for the service to be ready + wait_for_sock_file $OVS_RUNDIR/ovnnb_db.sock + wait_for_sock_file $OVS_RUNDIR/ovnsb_db.sock + + if is_service_enabled tls-proxy; then + sudo ovn-nbctl --db=unix:$OVS_RUNDIR/ovnnb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem + sudo ovn-sbctl --db=unix:$OVS_RUNDIR/ovnsb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem + fi + sudo ovn-nbctl --db=unix:$OVS_RUNDIR/ovnnb_db.sock set-connection p${OVN_PROTO}:6641:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 + sudo ovn-sbctl --db=unix:$OVS_RUNDIR/ovnsb_db.sock set-connection p${OVN_PROTO}:6642:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 sudo ovs-appctl -t $OVS_RUNDIR/ovnnb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL sudo ovs-appctl -t $OVS_RUNDIR/ovnsb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL fi if is_service_enabled ovn-controller ; then - local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_controller" - local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_controller" + if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_controller" + local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_controller" - _run_process ovn-controller "$cmd" "$stop_cmd" "$STACK_GROUP" "root" + _run_process ovn-controller "$cmd" "$stop_cmd" "$STACK_GROUP" "root" + else + _start_process "$OVN_CONTROLLER_SERVICE" + fi fi if is_service_enabled ovn-controller-vtep ; then - local cmd="$OVS_BINDIR/ovn-controller-vtep --log-file --pidfile --detach --ovnsb-db=$OVN_SB_REMOTE" - - _run_process ovn-controller-vtep "$cmd" "" "$STACK_GROUP" "root" + if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + local cmd="$OVS_BINDIR/ovn-controller-vtep --log-file --pidfile --detach --ovnsb-db=$OVN_SB_REMOTE" + _run_process ovn-controller-vtep "$cmd" "" "$STACK_GROUP" "root" + else + _start_process "$OVN_CONTROLLER_VTEP_SERVICE" + fi fi if is_service_enabled q-ovn-metadata-agent; then @@ -668,13 +732,6 @@ function start_ovn { setup_logging $OVN_META_CONF fi - # NOTE(lucasagomes): To keep things simpler, let's reuse the same - # RUNDIR for both OVS and OVN. This way we avoid having to specify the - # --db option in the ovn-{n,s}bctl commands while playing with DevStack - if use_new_ovn_repository; then - sudo ln -s $OVS_RUNDIR $OVN_RUNDIR - fi - _start_ovn_services } @@ -683,26 +740,35 @@ function _stop_ovs_dp { modprobe -q -r vport_geneve vport_vxlan openvswitch || true } +function _stop_process { + local service=$1 + echo "Stopping process $service" + if $SYSTEMCTL is-enabled $service; then + $SYSTEMCTL stop $service + $SYSTEMCTL disable $service + fi +} + function stop_ovn { if is_service_enabled q-ovn-metadata-agent; then sudo pkill -9 -f haproxy || : - stop_process neutron-ovn-metadata-agent + _stop_process "devstack@q-ovn-metadata-agent.service" fi if is_service_enabled ovn-controller-vtep ; then - stop_process ovn-controller-vtep + _stop_process "$OVN_CONTROLLER_VTEP_SERVICE" fi if is_service_enabled ovn-controller ; then - stop_process ovn-controller + _stop_process "$OVN_CONTROLLER_SERVICE" fi if is_service_enabled ovn-northd ; then - stop_process ovn-northd + _stop_process "$OVN_NORTHD_SERVICE" fi if is_service_enabled ovs-vtep ; then - stop_process ovs-vtep + _stop_process "devstack@ovs-vtep.service" fi - stop_process ovs-vswitchd - stop_process ovsdb-server + _stop_process "$OVS_VSWITCHD_SERVICE" + _stop_process "$OVSDB_SERVER_SERVICE" _stop_ovs_dp } diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index cd7ee59c1d..25f726892f 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -184,6 +184,14 @@ function fixup_suse { sudo zypper up -y p11-kit ca-certificates-mozilla } +function fixup_ovn_centos { + if [[ $os_VENDOR != "CentOS" ]]; then + return + fi + # OVN packages are part of this release for CentOS + yum_install centos-release-openstack-victoria +} + function fixup_all { fixup_keystone fixup_ubuntu From 04b0b61557f7dad6c32b566255c21a36e4b0aefa Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Tue, 22 Dec 2020 12:23:56 -0600 Subject: [PATCH 1365/1936] Install swift keystone extras requirements Since the introduction of I8f24b839bf42e2fb9803dc7df3a30ae20cf264 s-proxy is no longer able to launch as keystonemiddleware (listed under test-requirements.txt) has not been installed. keystonemiddleware is listed as extras requirements in swift - https://github.com/openstack/swift/blob/e0d46d77fa740768f1dd5b989a63be85ff1fec20/setup.cfg#L79 Let's install swift keystone extra requirements also. Closes-Bug: #1909018 Change-Id: I02c692e95d70017eea03d82d75ae6c5e87bde8b1 --- lib/swift | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/swift b/lib/swift index a981dfc10a..790fb99442 100644 --- a/lib/swift +++ b/lib/swift @@ -741,7 +741,9 @@ function init_swift { function install_swift { git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH - setup_develop $SWIFT_DIR + # keystonemiddleware needs to be installed via keystone extras as defined + # in setup.cfg, see bug #1909018 for more details. + setup_develop $SWIFT_DIR keystone if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then install_apache_wsgi fi From fc41717222da029274236714f1447fcba1277b06 Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Wed, 23 Dec 2020 10:52:20 +0000 Subject: [PATCH 1366/1936] cinder: Double [DEFAULT]/rpc_response_timeout to 120 Bug #1873234 documents a number of CI failures caused by RPC requests from c-api to c-vol timing out due to `lvchange` taking longer than the default rpc_response_timeout of 60 seconds to complete. While the underlying reason for the slowness should be investigated by the cinder team a trivial workaround to the fallout created by these timeouts is to simply double the client RPC timeout used by c-api, allowing c-vol to return and overall the request to succeed. Change-Id: I53dc0ae10af6aa13f1349b58373932eb6a15ab02 Related-Bug: #1873234 --- lib/cinder | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/cinder b/lib/cinder index b892b91791..6c97e114a6 100644 --- a/lib/cinder +++ b/lib/cinder @@ -236,6 +236,11 @@ function configure_cinder { iniset $CINDER_CONF key_manager backend cinder.keymgr.conf_key_mgr.ConfKeyManager iniset $CINDER_CONF key_manager fixed_key $(openssl rand -hex 16) + # Avoid RPC timeouts in slow CI and test environments by doubling the + # default response timeout set by RPC clients. See bug #1873234 for more + # details and example failures. + iniset $CINDER_CONF DEFAULT rpc_response_timeout 120 + if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then local enabled_backends="" local default_name="" From 7e3428b9872d5b4b01ee10f2d29c55e7e2accbbd Mon Sep 17 00:00:00 2001 From: Federico Ressi Date: Fri, 11 Dec 2020 15:40:32 +0100 Subject: [PATCH 1367/1936] Install systemd-python from distribution package CentOS 8 support start to fail because of an issue [1] compiling systemd python binding modules. Let install it from distribution packages as documented by python-systemd project Web page [2] [1] https://bugzilla.redhat.com/show_bug.cgi?id=1862714 [2] https://github.com/systemd/python-systemd Closes-Bug: #1908386 Change-Id: Ic7cfd72ce1b875e75b1cdbdd44a902b25d51abb8 --- doc/source/systemd.rst | 25 ------------------------- files/debs/general | 1 + files/rpms-suse/general | 2 +- files/rpms/general | 2 +- stack.sh | 1 - 5 files changed, 3 insertions(+), 28 deletions(-) diff --git a/doc/source/systemd.rst b/doc/source/systemd.rst index 4f83b36f92..78535202d8 100644 --- a/doc/source/systemd.rst +++ b/doc/source/systemd.rst @@ -196,31 +196,6 @@ See the `remote-pdb`_ home page for more options. .. _`remote-pdb`: https://pypi.org/project/remote-pdb/ -Known Issues -============ - -Be careful about systemd python libraries. There are 3 of them on -pypi, and they are all very different. They unfortunately all install -into the ``systemd`` namespace, which can cause some issues. - -- ``systemd-python`` - this is the upstream maintained library, it has - a version number like systemd itself (currently ``234``). This is - the one you want. -- ``systemd`` - a python 3 only library, not what you want. -- ``python-systemd`` - another library you don't want. Installing it - on a system will break ansible's ability to run. The package has now - been renamed to ``cysystemd``, which avoids the namespace collision. - - -If we were using user units, the ``[Service]`` - ``Group=`` parameter -doesn't seem to work with user units, even though the documentation -says that it should. This means that we will need to do an explicit -``/usr/bin/sg``. This has the downside of making the SYSLOG_IDENTIFIER -be ``sg``. We can explicitly set that with ``SyslogIdentifier=``, but -it's really unfortunate that we're going to need this work -around. This is currently not a problem because we're only using -system units. - Future Work =========== diff --git a/files/debs/general b/files/debs/general index 4bf1ff4039..d64417f1b7 100644 --- a/files/debs/general +++ b/files/debs/general @@ -28,6 +28,7 @@ pkg-config psmisc python3-dev python3-pip +python3-systemd python3-venv tar tcpdump diff --git a/files/rpms-suse/general b/files/rpms-suse/general index 0af2b5b169..0de0876dcd 100644 --- a/files/rpms-suse/general +++ b/files/rpms-suse/general @@ -20,10 +20,10 @@ openssl pcre-devel # python-pcre postgresql-devel # psycopg2 psmisc +python3-systemd python-cmd2 # dist:opensuse-12.3 python-devel # pyOpenSSL python-xml -systemd-devel # for systemd-python tar tcpdump unzip diff --git a/files/rpms/general b/files/rpms/general index c42ce529e7..cfcd7ff261 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -25,8 +25,8 @@ postgresql-devel # psycopg2 psmisc python3-devel python3-pip +python3-systemd redhat-rpm-config # missing dep for gcc hardening flags, see rhbz#1217376 -systemd-devel # for systemd-python tar tcpdump unzip diff --git a/stack.sh b/stack.sh index bb4dfa2561..036afd7b00 100755 --- a/stack.sh +++ b/stack.sh @@ -761,7 +761,6 @@ fixup_all # Install subunit for the subunit output stream pip_install -U os-testr -pip_install_gr systemd-python # the default rate limit of 1000 messages / 30 seconds is not # sufficient given how verbose our logging is. iniset -sudo /etc/systemd/journald.conf "Journal" "RateLimitBurst" "0" From 36a575b036e55c31ab624447c5d73bc56408a672 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Fri, 13 Nov 2020 06:57:33 -0800 Subject: [PATCH 1368/1936] Clean up create_disk() a little The create_disk() helper had some redundant checks and dead code. This refactors it to put all the stale cleanup at the top, and groups the new actions together with more relevant comments to make it easier to understand. Change-Id: I1f6218a1994e66786ed9a8065e30bcceec7b8956 --- functions | 33 ++++++++++++++------------------- 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/functions b/functions index e679b0f9bc..fc87a5512d 100644 --- a/functions +++ b/functions @@ -759,16 +759,14 @@ function create_disk { local loopback_disk_size=${3} local key - # Create a loopback disk and format it to XFS. - if [[ -e ${disk_image} ]]; then - if egrep -q ${storage_data_dir} /proc/mounts; then - sudo umount ${storage_data_dir} - sudo rm -f ${disk_image} - fi - fi + key=$(echo $disk_image | sed 's#/.##') + key="devstack-$key" - sudo mkdir -p ${storage_data_dir}/drives/images + destroy_disk $disk_image $storage_data_dir + # Create an empty file of the correct size (and ensure the + # directory structure up to that path exists) + sudo mkdir -p $(dirname ${disk_image}) sudo truncate -s ${loopback_disk_size} ${disk_image} # Make a fresh XFS filesystem. Use bigger inodes so xattr can fit in @@ -778,16 +776,9 @@ function create_disk { # Swift and Ceph. sudo mkfs.xfs -f -i size=1024 ${disk_image} - # Unmount the target, if mounted - if egrep -q $storage_data_dir /proc/mounts; then - sudo umount $storage_data_dir - fi - - # Clear any old fstab rules, install a new one for this disk, and mount it - key=$(echo $disk_image | sed 's#/.##') - key="devstack-$key" - sudo sed -i '/.*comment=$key.*/ d' /etc/fstab + # Install a new loopback fstab entry for this disk image, and mount it echo "$disk_image $storage_data_dir xfs loop,noatime,nodiratime,logbufs=8,comment=$key 0 0" | sudo tee -a /etc/fstab + sudo mkdir -p $storage_data_dir sudo mount -v $storage_data_dir } @@ -795,6 +786,10 @@ function create_disk { function destroy_disk { local disk_image=$1 local storage_data_dir=$2 + local key + + key=$(echo $disk_image | sed 's#/.##') + key="devstack-$key" # Unmount the target, if mounted if egrep -q $storage_data_dir /proc/mounts; then @@ -802,10 +797,10 @@ function destroy_disk { fi # Clear any fstab rules - sed -i '/.*comment=$key.*/ d' /etc/fstab + sudo sed -i '/.*comment=$key.*/ d' /etc/fstab # Delete the file - sudo rm $disk_image + sudo rm -f $disk_image } From b37240382dc300f30efd83894fb0a9077e98d0ec Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Fri, 8 Jan 2021 09:41:56 +0100 Subject: [PATCH 1369/1936] Drop opensuse platform job It has been broken for over a month. Feel free to revert in combination with a fix, better with a commitment to keep the job in working shape permanently. Change-Id: I2604374c23716d56de29e16a459b7c7f45b84891 --- .zuul.yaml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index c1406716fe..bf32af070e 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -584,13 +584,6 @@ voting: false timeout: 9000 -- job: - name: devstack-platform-opensuse-15 - parent: tempest-full-py3 - description: openSUSE 15.x platform test - nodeset: devstack-single-node-opensuse-15 - voting: false - - job: name: devstack-platform-bionic parent: tempest-full-py3 @@ -686,7 +679,6 @@ jobs: - devstack - devstack-ipv6 - - devstack-platform-opensuse-15 - devstack-platform-fedora-latest - devstack-platform-centos-8 - devstack-platform-bionic From edee6dc341e40939360b36ce9fd09052dea1ee4d Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Tue, 12 Jan 2021 14:04:38 -0800 Subject: [PATCH 1370/1936] Add debug helper script I'm not sure if others will find this useful, but I use this script to run pieces of devstack while trying to write/debug things. It saves me a lot of time being able to get to some project-lib function without a full clean/re-stack. Figured I'd share in case it's worth putting into the tree. Change-Id: I9a92fa71d34f50c2f5ba7d11c1a45301bd4478bf --- tools/debug_function.sh | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100755 tools/debug_function.sh diff --git a/tools/debug_function.sh b/tools/debug_function.sh new file mode 100755 index 0000000000..68bd85dc61 --- /dev/null +++ b/tools/debug_function.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +# This is a small helper to speed development and debug with devstack. +# It is intended to help you run a single function in a project module +# without having to re-stack. +# +# For example, to run the just start_glance function, do this: +# +# ./tools/debug_function.sh glance start_glance + +if [ ! -f "lib/$1" ]; then + echo "Usage: $0 [project] [function] [function...]" +fi + +source stackrc +source lib/$1 +shift +set -x +while [ "$1" ]; do + echo ==== Running $1 ==== + $1 + echo ==== Done with $1 ==== + shift +done From bcd0acf6c0b5d6501e91133c3a937b3fc40f7122 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Wed, 13 Jan 2021 12:28:18 -0800 Subject: [PATCH 1371/1936] Also cap pip in tempest tox venv I am still unable to stack because of pip 20.3, but this time because of the tempest venv build. This forces it to the same capped pip, which further works around the problem. Change-Id: Icfaaefe1aa576733764b393cba96d276c9b1cf68 Related-Bug: #1906367 --- lib/tempest | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/tempest b/lib/tempest index 552e1c22a3..8ee986d555 100644 --- a/lib/tempest +++ b/lib/tempest @@ -703,6 +703,9 @@ function install_tempest { git checkout $TEMPEST_BRANCH tox -r --notest -efull + # TODO: remove the trailing pip constraint when a proper fix + # arrives for bug https://bugs.launchpad.net/devstack/+bug/1906322 + $TEMPEST_DIR/.tox/tempest/bin/pip install -U -r $RC_DIR/tools/cap-pip.txt # NOTE(mtreinish) Respect constraints in the tempest full venv, things that # are using a tox job other than full will not be respecting constraints but # running pip install -U on tempest requirements From ed164289a57549fb2b2404fc77052bb09ceb5105 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Mon, 18 Jan 2021 09:57:00 -0800 Subject: [PATCH 1372/1936] Always verify os_glance reserved namespace On master, we should always enable tempest's verification of Glance's os_glance namespace enforcement. Change-Id: Ia71878e6c53ee683a868112959876798e946e2ce Depends-On: https://review.opendev.org/c/openstack/glance/+/771070 --- lib/tempest | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/tempest b/lib/tempest index 552e1c22a3..b2047000e0 100644 --- a/lib/tempest +++ b/lib/tempest @@ -351,6 +351,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG image disk_formats "ami,ari,aki,vhd,raw,iso" fi iniset $TEMPEST_CONFIG image-feature-enabled import_image $GLANCE_USE_IMPORT_WORKFLOW + iniset $TEMPEST_CONFIG image-feature-enabled os_glance_reserved True # Compute iniset $TEMPEST_CONFIG compute image_ref $image_uuid iniset $TEMPEST_CONFIG compute image_ref_alt $image_uuid_alt From eef2a0d751f4d53fb7b475e69e229cb541f7af2c Mon Sep 17 00:00:00 2001 From: Vanou Ishii Date: Wed, 20 Jan 2021 14:15:57 +0900 Subject: [PATCH 1373/1936] Fix Early Use of die function in stack.sh This commit fixes use of die function before it's defined. die function can be used after sourcing $TOP_DIR/functions chain- sources $TOP_DIR/functions-common. Because fixed portion of stack.sh checks existence of $TOP_DIR/inc and sourcing $TOP_DIR/function chain source $TOP_DIR/inc, this commit uses echo & exit command instead of die function. Closes-Bug: #1913021 Change-Id: I5ec174cf7b02269525b1bfd0bfa94ea889d16fce --- stack.sh | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index 036afd7b00..c334159550 100755 --- a/stack.sh +++ b/stack.sh @@ -96,19 +96,25 @@ fi # templates and other useful files in the ``files`` subdirectory FILES=$TOP_DIR/files if [ ! -d $FILES ]; then - die $LINENO "missing devstack/files" + set +o xtrace + echo "missing devstack/files" + exit 1 fi # ``stack.sh`` keeps function libraries here # Make sure ``$TOP_DIR/inc`` directory is present if [ ! -d $TOP_DIR/inc ]; then - die $LINENO "missing devstack/inc" + set +o xtrace + echo "missing devstack/inc" + exit 1 fi # ``stack.sh`` keeps project libraries here # Make sure ``$TOP_DIR/lib`` directory is present if [ ! -d $TOP_DIR/lib ]; then - die $LINENO "missing devstack/lib" + set +o xtrace + echo "missing devstack/lib" + exit 1 fi # Check if run in POSIX shell From b4bba2f2c817dd5c7594e8c7950021969704db5d Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 4 Feb 2021 23:24:17 +0000 Subject: [PATCH 1374/1936] Increase volumes quota for service project If we are backing glance with cinder, we will use more volumes and if timing is right, we will clash with other tests and be unable to create what we need. If we are backing glance with cinder, we should increase the volumes quota, which this patch does (to 50 from a default of 10). Closes-Bug: #1914665 Change-Id: I2ad1c4d21f996ee1a9ce29ba4f1a4b8f5720f8fb --- lib/cinder | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lib/cinder b/lib/cinder index 6c97e114a6..33deff61f2 100644 --- a/lib/cinder +++ b/lib/cinder @@ -539,6 +539,14 @@ function create_volume_types { OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-key ${be_name} set volume_backend_name=${be_name} fi done + + # Increase quota for the service project if glance is using cinder, + # since it's likely to occasionally go above the default 10 in parallel + # test execution. + if [[ "$USE_CINDER_FOR_GLANCE" == "True" ]]; then + openstack --os-region-name="$REGION_NAME" \ + quota set --volumes 50 "$SERVICE_PROJECT_NAME" + fi fi } From a2273cc4c86348d0dd17ff8c64b2f1edeb620225 Mon Sep 17 00:00:00 2001 From: Flavio Fernandes Date: Sat, 6 Feb 2021 16:23:36 -0500 Subject: [PATCH 1375/1936] [OVN] Support for network-logging config This patchset adds configuration support for network logging when the OVN driver is enabled. Depends-On: https://review.opendev.org/768129 Change-Id: I6fc0973bedfd1dcc72b01981cd64f9283662d37c Signed-off-by: Flavio Fernandes --- lib/neutron_plugins/ovn_agent | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index b661f593a4..f2baf4a08d 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -149,6 +149,9 @@ Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS:-port_security,qos} # this one allows empty: ML2_L3_PLUGIN=${ML2_L3_PLUGIN-"ovn-router"} +Q_LOG_DRIVER_RATE_LIMIT=${Q_LOG_DRIVER_RATE_LIMIT:-100} +Q_LOG_DRIVER_BURST_LIMIT=${Q_LOG_DRIVER_BURST_LIMIT:-25} +Q_LOG_DRIVER_LOG_BASE=${Q_LOG_DRIVER_LOG_BASE:-acl_log_meter} # Utility Functions # ----------------- @@ -490,6 +493,12 @@ function configure_ovn_plugin { populate_ml2_config /$Q_PLUGIN_CONF_FILE securitygroup enable_security_group="$Q_USE_SECGROUP" inicomment /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver + if is_service_enabled q-log neutron-log; then + populate_ml2_config /$Q_PLUGIN_CONF_FILE network_log rate_limit="$Q_LOG_DRIVER_RATE_LIMIT" + populate_ml2_config /$Q_PLUGIN_CONF_FILE network_log burst_limit="$Q_LOG_DRIVER_BURST_LIMIT" + inicomment /$Q_PLUGIN_CONF_FILE network_log local_output_log_base="$Q_LOG_DRIVER_LOG_BASE" + fi + if is_service_enabled q-ovn-metadata-agent; then populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True else From 30d9bf9a6d8af9590b04caa3757956522f2004d4 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Tue, 19 Jan 2021 12:10:52 -0800 Subject: [PATCH 1376/1936] Async task support We have a *ton* of stuff in devstack that is very linear, specifically the ten-ish minutes we spend loading osc to run a single API command against something. We also generate configs, sync databases, and other things that use one core of our worker and make our runtime longer than it really needs to be. The idea in this patch is to make it super simple to run some things in the background and then wait for them to finish before proceeding to something that will require them to be done. This avoids the interleaving you would expect by redirecting the async tasks to a log file, and then cat'ing that log file synchronously during the wait operation. The per-task log file remains so it's easier to examine it in isolation. Multiple people have reported between 22-30% improvement in the time it takes to stack with this. More can be done, but what is here already makes a significant difference. Change-Id: I270a910b531641b023c13f75dfedca057a1f1031 --- .zuul.yaml | 12 +++ clean.sh | 2 +- extras.d/80-tempest.sh | 3 +- functions | 1 + inc/async | 225 +++++++++++++++++++++++++++++++++++++++++ lib/keystone | 40 ++++---- lib/nova | 29 +++--- stack.sh | 46 ++++++--- unstack.sh | 1 + 9 files changed, 315 insertions(+), 44 deletions(-) create mode 100644 inc/async diff --git a/.zuul.yaml b/.zuul.yaml index c1406716fe..7b0696bcd7 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -598,6 +598,17 @@ nodeset: openstack-single-node-bionic voting: false +- job: + name: devstack-async + parent: tempest-full-py3 + description: Async mode enabled + voting: false + vars: + devstack_localrc: + DEVSTACK_PARALLEL: True + zuul_copy_output: + /opt/stack/async: logs + - job: name: devstack-platform-fedora-latest parent: tempest-full-py3 @@ -690,6 +701,7 @@ - devstack-platform-fedora-latest - devstack-platform-centos-8 - devstack-platform-bionic + - devstack-async - devstack-multinode - devstack-unit-tests - openstack-tox-bashate diff --git a/clean.sh b/clean.sh index 4cebf1d9ea..870dfd4313 100755 --- a/clean.sh +++ b/clean.sh @@ -113,7 +113,7 @@ cleanup_rpc_backend cleanup_database # Clean out data and status -sudo rm -rf $DATA_DIR $DEST/status +sudo rm -rf $DATA_DIR $DEST/status $DEST/async # Clean out the log file and log directories if [[ -n "$LOGFILE" ]] && [[ -f "$LOGFILE" ]]; then diff --git a/extras.d/80-tempest.sh b/extras.d/80-tempest.sh index 15ecfe39eb..06c73ec763 100644 --- a/extras.d/80-tempest.sh +++ b/extras.d/80-tempest.sh @@ -6,7 +6,7 @@ if is_service_enabled tempest; then source $TOP_DIR/lib/tempest elif [[ "$1" == "stack" && "$2" == "install" ]]; then echo_summary "Installing Tempest" - install_tempest + async_runfunc install_tempest elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then # Tempest config must come after layer 2 services are running : @@ -17,6 +17,7 @@ if is_service_enabled tempest; then # local.conf Tempest option overrides : elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then + async_wait install_tempest echo_summary "Initializing Tempest" configure_tempest echo_summary "Installing Tempest Plugins" diff --git a/functions b/functions index fc87a5512d..89bbab2085 100644 --- a/functions +++ b/functions @@ -21,6 +21,7 @@ source ${FUNC_DIR}/inc/ini-config source ${FUNC_DIR}/inc/meta-config source ${FUNC_DIR}/inc/python source ${FUNC_DIR}/inc/rootwrap +source ${FUNC_DIR}/inc/async # Save trace setting _XTRACE_FUNCTIONS=$(set +o | grep xtrace) diff --git a/inc/async b/inc/async new file mode 100644 index 0000000000..d29168f2f5 --- /dev/null +++ b/inc/async @@ -0,0 +1,225 @@ +#!/bin/bash +# +# Symbolic asynchronous tasks for devstack +# +# Usage: +# +# async_runfunc my_shell_func foo bar baz +# +# ... do other stuff ... +# +# async_wait my_shell_func +# + +DEVSTACK_PARALLEL=$(trueorfalse False DEVSTACK_PARALLEL) +_ASYNC_BG_TIME=0 + +# Keep track of how much total time was spent in background tasks +# Takes a job runtime in ms. +function _async_incr_bg_time { + local elapsed_ms="$1" + _ASYNC_BG_TIME=$(($_ASYNC_BG_TIME + $elapsed_ms)) +} + +# Get the PID of a named future to wait on +function async_pidof { + local name="$1" + local inifile="${DEST}/async/${name}.ini" + + if [ -f "$inifile" ]; then + iniget $inifile job pid + else + echo 'UNKNOWN' + return 1 + fi +} + +# Log a message about a job. If the message contains "%command" then the +# full command line of the job will be substituted in the output +function async_log { + local name="$1" + shift + local message="$*" + local inifile=${DEST}/async/${name}.ini + local pid + local command + + pid=$(iniget $inifile job pid) + command=$(iniget $inifile job command | tr '#' '-') + message=$(echo "$message" | sed "s#%command#$command#g") + + echo "[Async ${name}:${pid}]: $message" +} + +# Inner function that actually runs the requested task. We wrap it like this +# just so we can emit a finish message as soon as the work is done, to make +# it easier to find the tracking just before an error. +function async_inner { + local name="$1" + local rc + shift + set -o xtrace + if $* >${DEST}/async/${name}.log 2>&1; then + rc=0 + set +o xtrace + async_log "$name" "finished successfully" + else + rc=$? + set +o xtrace + async_log "$name" "FAILED with rc $rc" + fi + iniset ${DEST}/async/${name}.ini job end_time $(date "+%s%3N") + return $rc +} + +# Run something async. Takes a symbolic name and a list of arguments of +# what to run. Ideally this would be rarely used and async_runfunc() would +# be used everywhere for readability. +# +# This spawns the work in a background worker, records a "future" to be +# collected by a later call to async_wait() +function async_run { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + local name="$1" + shift + local inifile=${DEST}/async/${name}.ini + + touch $inifile + iniset $inifile job command "$*" + iniset $inifile job start_time $(date +%s%3N) + + if [[ "$DEVSTACK_PARALLEL" = "True" ]]; then + async_inner $name $* & + iniset $inifile job pid $! + async_log "$name" "running: %command" + $xtrace + else + iniset $inifile job pid "self" + async_log "$name" "Running synchronously: %command" + $xtrace + $* + return $? + fi +} + +# Shortcut for running a shell function async. Uses the function name as the +# async name. +function async_runfunc { + async_run $1 $* +} + +# Wait for an async future to complete. May return immediately if already +# complete, or of the future has already been waited on (avoid this). May +# block until the future completes. +function async_wait { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + local pid rc running inifile runtime + rc=0 + for name in $*; do + running=$(ls ${DEST}/async/*.ini 2>/dev/null | wc -l) + inifile="${DEST}/async/${name}.ini" + + if pid=$(async_pidof "$name"); then + async_log "$name" "Waiting for completion of %command" \ + "($running other jobs running)" + time_start async_wait + if [[ "$pid" != "self" ]]; then + # Do not actually call wait if we ran synchronously + if wait $pid; then + rc=0 + else + rc=$? + fi + cat ${DEST}/async/${name}.log + fi + time_stop async_wait + local start_time + local end_time + start_time=$(iniget $inifile job start_time) + end_time=$(iniget $inifile job end_time) + _async_incr_bg_time $(($end_time - $start_time)) + runtime=$((($end_time - $start_time) / 1000)) + async_log "$name" "finished %command with result" \ + "$rc in $runtime seconds" + rm -f $inifile + if [ $rc -ne 0 ]; then + echo Stopping async wait due to error: $* + break + fi + else + # This could probably be removed - it is really just here + # to help notice if you wait for something by the wrong + # name, but it also shows up for things we didn't start + # because they were not enabled. + echo Not waiting for async task $name that we never started or \ + has already been waited for + fi + done + + $xtrace + return $rc +} + +# Check for uncollected futures and wait on them +function async_cleanup { + local name + + if [[ "$DEVSTACK_PARALLEL" != "True" ]]; then + return 0 + fi + + for inifile in $(find ${DEST}/async -name '*.ini'); do + name=$(basename $pidfile .ini) + echo "WARNING: uncollected async future $name" + async_wait $name || true + done +} + +# Make sure our async dir is created and clean +function async_init { + local async_dir=${DEST}/async + + # Clean any residue if present from previous runs + rm -Rf $async_dir + + # Make sure we have a state directory + mkdir -p $async_dir +} + +function async_print_timing { + local bg_time_minus_wait + local elapsed_time + local serial_time + local speedup + + if [[ "$DEVSTACK_PARALLEL" != "True" ]]; then + return 0 + fi + + # The logic here is: All the background task time would be + # serialized if we did not do them in the background. So we can + # add that to the elapsed time for the whole run. However, time we + # spend waiting for async things to finish adds to the elapsed + # time, but is time where we're not doing anything useful. Thus, + # we substract that from the would-be-serialized time. + + bg_time_minus_wait=$((\ + ($_ASYNC_BG_TIME - ${_TIME_TOTAL[async_wait]}) / 1000)) + elapsed_time=$(($(date "+%s") - $_TIME_BEGIN)) + serial_time=$(($elapsed_time + $bg_time_minus_wait)) + + echo + echo "=================" + echo " Async summary" + echo "=================" + echo " Time spent in the background minus waits: $bg_time_minus_wait sec" + echo " Elapsed time: $elapsed_time sec" + echo " Time if we did everything serially: $serial_time sec" + echo " Speedup: " $(echo | awk "{print $serial_time / $elapsed_time}") +} diff --git a/lib/keystone b/lib/keystone index d4c7b063bb..66e867ca68 100644 --- a/lib/keystone +++ b/lib/keystone @@ -318,25 +318,25 @@ function create_keystone_accounts { local admin_role="admin" local member_role="member" - get_or_add_user_domain_role $admin_role $admin_user default + async_run ks-domain-role get_or_add_user_domain_role $admin_role $admin_user default # Create service project/role get_or_create_domain "$SERVICE_DOMAIN_NAME" - get_or_create_project "$SERVICE_PROJECT_NAME" "$SERVICE_DOMAIN_NAME" + async_run ks-project get_or_create_project "$SERVICE_PROJECT_NAME" "$SERVICE_DOMAIN_NAME" # Service role, so service users do not have to be admins - get_or_create_role service + async_run ks-service get_or_create_role service # The ResellerAdmin role is used by Nova and Ceilometer so we need to keep it. # The admin role in swift allows a user to act as an admin for their project, # but ResellerAdmin is needed for a user to act as any project. The name of this # role is also configurable in swift-proxy.conf - get_or_create_role ResellerAdmin + async_run ks-reseller get_or_create_role ResellerAdmin # another_role demonstrates that an arbitrary role may be created and used # TODO(sleepsonthefloor): show how this can be used for rbac in the future! local another_role="anotherrole" - get_or_create_role $another_role + async_run ks-anotherrole get_or_create_role $another_role # invisible project - admin can't see this one local invis_project @@ -349,10 +349,12 @@ function create_keystone_accounts { demo_user=$(get_or_create_user "demo" \ "$ADMIN_PASSWORD" "default" "demo@example.com") - get_or_add_user_project_role $member_role $demo_user $demo_project - get_or_add_user_project_role $admin_role $admin_user $demo_project - get_or_add_user_project_role $another_role $demo_user $demo_project - get_or_add_user_project_role $member_role $demo_user $invis_project + async_wait ks-{domain-role,domain,project,service,reseller,anotherrole} + + async_run ks-demo-member get_or_add_user_project_role $member_role $demo_user $demo_project + async_run ks-demo-admin get_or_add_user_project_role $admin_role $admin_user $demo_project + async_run ks-demo-another get_or_add_user_project_role $another_role $demo_user $demo_project + async_run ks-demo-invis get_or_add_user_project_role $member_role $demo_user $invis_project # alt_demo local alt_demo_project @@ -361,9 +363,9 @@ function create_keystone_accounts { alt_demo_user=$(get_or_create_user "alt_demo" \ "$ADMIN_PASSWORD" "default" "alt_demo@example.com") - get_or_add_user_project_role $member_role $alt_demo_user $alt_demo_project - get_or_add_user_project_role $admin_role $admin_user $alt_demo_project - get_or_add_user_project_role $another_role $alt_demo_user $alt_demo_project + async_run ks-alt-member get_or_add_user_project_role $member_role $alt_demo_user $alt_demo_project + async_run ks-alt-admin get_or_add_user_project_role $admin_role $admin_user $alt_demo_project + async_run ks-alt-another get_or_add_user_project_role $another_role $alt_demo_user $alt_demo_project # groups local admin_group @@ -373,11 +375,15 @@ function create_keystone_accounts { non_admin_group=$(get_or_create_group "nonadmins" \ "default" "non-admin group") - get_or_add_group_project_role $member_role $non_admin_group $demo_project - get_or_add_group_project_role $another_role $non_admin_group $demo_project - get_or_add_group_project_role $member_role $non_admin_group $alt_demo_project - get_or_add_group_project_role $another_role $non_admin_group $alt_demo_project - get_or_add_group_project_role $admin_role $admin_group $admin_project + async_run ks-group-memberdemo get_or_add_group_project_role $member_role $non_admin_group $demo_project + async_run ks-group-anotherdemo get_or_add_group_project_role $another_role $non_admin_group $demo_project + async_run ks-group-memberalt get_or_add_group_project_role $member_role $non_admin_group $alt_demo_project + async_run ks-group-anotheralt get_or_add_group_project_role $another_role $non_admin_group $alt_demo_project + async_run ks-group-admin get_or_add_group_project_role $admin_role $admin_group $admin_project + + async_wait ks-demo-{member,admin,another,invis} + async_wait ks-alt-{member,admin,another} + async_wait ks-group-{memberdemo,anotherdemo,memberalt,anotheralt,admin} if is_service_enabled ldap; then create_ldap_domain diff --git a/lib/nova b/lib/nova index d7426039c4..0a28cd97aa 100644 --- a/lib/nova +++ b/lib/nova @@ -741,31 +741,36 @@ function create_nova_keys_dir { sudo install -d -o $STACK_USER ${NOVA_STATE_PATH} ${NOVA_STATE_PATH}/keys } +function init_nova_db { + local dbname="$1" + local conffile="$2" + recreate_database $dbname + $NOVA_BIN_DIR/nova-manage --config-file $conffile db sync --local_cell +} + # init_nova() - Initialize databases, etc. function init_nova { # All nova components talk to a central database. # Only do this step once on the API node for an entire cluster. if is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-api; then + # (Re)create nova databases + async_run nova-cell-0 init_nova_db nova_cell0 $NOVA_CONF + for i in $(seq 1 $NOVA_NUM_CELLS); do + async_run nova-cell-$i init_nova_db nova_cell${i} $(conductor_conf $i) + done + recreate_database $NOVA_API_DB $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF api_db sync - recreate_database nova_cell0 - # map_cell0 will create the cell mapping record in the nova_api DB so - # this needs to come after the api_db sync happens. We also want to run - # this before the db sync below since that will migrate both the nova - # and nova_cell0 databases. + # this needs to come after the api_db sync happens. $NOVA_BIN_DIR/nova-manage cell_v2 map_cell0 --database_connection `database_connection_url nova_cell0` - # (Re)create nova databases - for i in $(seq 1 $NOVA_NUM_CELLS); do - recreate_database nova_cell${i} - $NOVA_BIN_DIR/nova-manage --config-file $(conductor_conf $i) db sync --local_cell + # Wait for DBs to finish from above + for i in $(seq 0 $NOVA_NUM_CELLS); do + async_wait nova-cell-$i done - # Migrate nova and nova_cell0 databases. - $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db sync - # Run online migrations on the new databases # Needed for flavor conversion $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db online_data_migrations diff --git a/stack.sh b/stack.sh index 036afd7b00..dcfef6db48 100755 --- a/stack.sh +++ b/stack.sh @@ -330,6 +330,9 @@ if [[ ! -d $DATA_DIR ]]; then safe_chmod 0755 $DATA_DIR fi +# Create and/or clean the async state directory +async_init + # Configure proper hostname # Certain services such as rabbitmq require that the local hostname resolves # correctly. Make sure it exists in /etc/hosts so that is always true. @@ -1082,19 +1085,19 @@ if is_service_enabled keystone; then create_keystone_accounts if is_service_enabled nova; then - create_nova_accounts + async_runfunc create_nova_accounts fi if is_service_enabled glance; then - create_glance_accounts + async_runfunc create_glance_accounts fi if is_service_enabled cinder; then - create_cinder_accounts + async_runfunc create_cinder_accounts fi if is_service_enabled neutron; then - create_neutron_accounts + async_runfunc create_neutron_accounts fi if is_service_enabled swift; then - create_swift_accounts + async_runfunc create_swift_accounts fi fi @@ -1107,9 +1110,11 @@ write_clouds_yaml if is_service_enabled horizon; then echo_summary "Configuring Horizon" - configure_horizon + async_runfunc configure_horizon fi +async_wait create_nova_accounts create_glance_accounts create_cinder_accounts +async_wait create_neutron_accounts create_swift_accounts configure_horizon # Glance # ------ @@ -1117,7 +1122,7 @@ fi # NOTE(yoctozepto): limited to node hosting the database which is the controller if is_service_enabled $DATABASE_BACKENDS && is_service_enabled glance; then echo_summary "Configuring Glance" - init_glance + async_runfunc init_glance fi @@ -1131,7 +1136,7 @@ if is_service_enabled neutron; then # Run init_neutron only on the node hosting the Neutron API server if is_service_enabled $DATABASE_BACKENDS && is_service_enabled neutron; then - init_neutron + async_runfunc init_neutron fi fi @@ -1161,7 +1166,7 @@ fi if is_service_enabled swift; then echo_summary "Configuring Swift" - init_swift + async_runfunc init_swift fi @@ -1170,7 +1175,7 @@ fi if is_service_enabled cinder; then echo_summary "Configuring Cinder" - init_cinder + async_runfunc init_cinder fi # Placement Service @@ -1178,9 +1183,16 @@ fi if is_service_enabled placement; then echo_summary "Configuring placement" - init_placement + async_runfunc init_placement fi +# Wait for neutron and placement before starting nova +async_wait init_neutron +async_wait init_placement +async_wait init_glance +async_wait init_swift +async_wait init_cinder + # Compute Service # --------------- @@ -1192,7 +1204,7 @@ if is_service_enabled nova; then # TODO(stephenfin): Is it possible for neutron to *not* be enabled now? If # not, remove the if here if is_service_enabled neutron; then - configure_neutron_nova + async_runfunc configure_neutron_nova fi fi @@ -1236,6 +1248,8 @@ if is_service_enabled cinder; then iniset $CINDER_CONF key_manager fixed_key "$FIXED_KEY" fi +async_wait configure_neutron_nova + # Launch the nova-api and wait for it to answer before continuing if is_service_enabled n-api; then echo_summary "Starting Nova API" @@ -1282,7 +1296,7 @@ fi if is_service_enabled nova; then echo_summary "Starting Nova" start_nova - create_flavors + async_runfunc create_flavors fi if is_service_enabled cinder; then echo_summary "Starting Cinder" @@ -1331,6 +1345,8 @@ if is_service_enabled horizon; then start_horizon fi +async_wait create_flavors + # Create account rc files # ======================= @@ -1467,8 +1483,12 @@ else exec 1>&3 fi +# Make sure we didn't leak any background tasks +async_cleanup + # Dump out the time totals time_totals +async_print_timing # Using the cloud # =============== diff --git a/unstack.sh b/unstack.sh index 3197cf136f..d9dca7c107 100755 --- a/unstack.sh +++ b/unstack.sh @@ -184,3 +184,4 @@ if is_service_enabled cinder && is_package_installed lvm2; then fi clean_pyc_files +rm -Rf $DEST/async From e11d367d8e31a4875301e2e890fa8ffede270ec2 Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Wed, 30 Sep 2020 13:06:39 +0100 Subject: [PATCH 1377/1936] orchestrate-devstack: Copy controller ceph.conf and keyrings to subnode This change introduces a basic role to copy the contents of /etc/ceph between the controller and subnodes during orchestrate-devstack allowing a multinode ceph job to be introduced by I9ffdff44a3ad42ebdf26ab72e24dfe3b12b1ef8b. Note that this role is only used when devstack-plugin-ceph is enabled. Change-Id: I324c0f35db34f8540ca164bf8c6e3dea67c5b1b4 --- roles/orchestrate-devstack/tasks/main.yaml | 5 +++++ .../sync-controller-ceph-conf-and-keys/README.rst | 3 +++ .../tasks/main.yaml | 15 +++++++++++++++ 3 files changed, 23 insertions(+) create mode 100644 roles/sync-controller-ceph-conf-and-keys/README.rst create mode 100644 roles/sync-controller-ceph-conf-and-keys/tasks/main.yaml diff --git a/roles/orchestrate-devstack/tasks/main.yaml b/roles/orchestrate-devstack/tasks/main.yaml index f747943f3c..2b8ae01a62 100644 --- a/roles/orchestrate-devstack/tasks/main.yaml +++ b/roles/orchestrate-devstack/tasks/main.yaml @@ -18,6 +18,11 @@ name: sync-devstack-data when: devstack_services['tls-proxy']|default(false) + - name: Sync controller ceph.conf and key rings to subnode + include_role: + name: sync-controller-ceph-conf-and-keys + when: devstack_plugins is defined and 'devstack-plugin-ceph' in devstack_plugins + - name: Run devstack on the sub-nodes include_role: name: run-devstack diff --git a/roles/sync-controller-ceph-conf-and-keys/README.rst b/roles/sync-controller-ceph-conf-and-keys/README.rst new file mode 100644 index 0000000000..e3d2bb42a4 --- /dev/null +++ b/roles/sync-controller-ceph-conf-and-keys/README.rst @@ -0,0 +1,3 @@ +Sync ceph config and keys between controller and subnodes + +Simply copy the contents of /etc/ceph on the controller to subnodes. diff --git a/roles/sync-controller-ceph-conf-and-keys/tasks/main.yaml b/roles/sync-controller-ceph-conf-and-keys/tasks/main.yaml new file mode 100644 index 0000000000..71ece579e6 --- /dev/null +++ b/roles/sync-controller-ceph-conf-and-keys/tasks/main.yaml @@ -0,0 +1,15 @@ +- name: Ensure /etc/ceph exists on subnode + become: true + file: + path: /etc/ceph + state: directory + +- name: Copy /etc/ceph from controller to subnode + become: true + synchronize: + owner: yes + group: yes + perms: yes + src: /etc/ceph/ + dest: /etc/ceph/ + delegate_to: controller From b516efedf973d290c22c9279cf83d2dd47dc37fc Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Mon, 15 Feb 2021 10:11:43 +0000 Subject: [PATCH 1378/1936] nova: Default NOVA_USE_SERVICE_TOKEN to True Introduced in devstack by I2d7348c4a72af96c0ed2ef6c0ab75d16e9aec8fc and long tested by nova-next this enabled by most deployment tools by default now and should be enabled by default in devstack. Change-Id: Ia76b96fe87d99560db947a59cd0660aab9b05335 --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 0a28cd97aa..1999753a8c 100644 --- a/lib/nova +++ b/lib/nova @@ -135,7 +135,7 @@ fi # ``NOVA_USE_SERVICE_TOKEN`` is a mode where service token is passed along with # user token while communicating to external RESP API's like Neutron, Cinder # and Glance. -NOVA_USE_SERVICE_TOKEN=$(trueorfalse False NOVA_USE_SERVICE_TOKEN) +NOVA_USE_SERVICE_TOKEN=$(trueorfalse True NOVA_USE_SERVICE_TOKEN) # ``NOVA_ALLOW_MOVE_TO_SAME_HOST`` can be set to False in multi node DevStack, # where there are at least two nova-computes. From 57b092dbceb95ed03f8d33f64a5cc60eabd57e50 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Mon, 8 Feb 2021 11:37:38 -0600 Subject: [PATCH 1379/1936] Stop configure 'member' role in tempest_roles Config option auth.tempest_roles is used to set the extra roles to all dynamic cred tests users. - https://opendev.org/openstack/tempest/src/commit/9b6f441fdc2a970410ea631dc1318896349e010f/tempest/common/credentials_factory.py#L82 Devstack set the 'member' role in CONF.auth.tempest_roles - https://opendev.org/openstack/devstack/src/commit/556f84aea90c572873fc9834292635b41e590224/lib/tempest#L628 This cause issue if any tests testing for speciifc rols and want to exclude the 'member' role, basically this bug - https://bugs.launchpad.net/devstack/+bug/1915740 Also with 'member' role assigned by default, Tempest will not be able to test the secure RBAC new default 'reader' role. Let's remove this role assignment now and let test congfigure what they want. Closes-Bug: #1915740 Change-Id: I0b6ab9fb943c7b0925a0a0d2490a8bcdfa76cedc --- lib/tempest | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 8ee986d555..04540e5ea8 100644 --- a/lib/tempest +++ b/lib/tempest @@ -625,7 +625,6 @@ function configure_tempest { rm -f $tmp_u_c_m # Auth: - iniset $TEMPEST_CONFIG auth tempest_roles "member" if [[ $TEMPEST_USE_TEST_ACCOUNTS == "True" ]]; then if [[ $TEMPEST_HAS_ADMIN == "True" ]]; then tox -evenv-tempest -- tempest account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-project-name $admin_project_name -r $TEMPEST_CONCURRENCY --with-admin etc/accounts.yaml From 48b7633ae84c5be77c6415d7f95ca696e4c0a2b6 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Tue, 16 Feb 2021 14:14:23 -0800 Subject: [PATCH 1380/1936] Fix nova db dependency When I reordered the nova database creation for better performance and cleaner arrangement, I broke the non-standard arrangement where the super and cell conductors are squashed together. In devstack, this is implemented by pointing the controllers at cell1 in the config, which makes it hard to create and sync the databases in the natural order. This manifested in a failure when running in this mode (which apparently Trove is). As a quick fix, this special-cases the setup for cell0 if that mode is enabled. I will follow this up with a cleaner refactor of all that stuff so this hack isn't required, but that will take a bit longer. Change-Id: I5385157c281beb041bf67cba546be20cf9497cbe --- lib/nova | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 0a28cd97aa..6913040d26 100644 --- a/lib/nova +++ b/lib/nova @@ -754,7 +754,17 @@ function init_nova { # Only do this step once on the API node for an entire cluster. if is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-api; then # (Re)create nova databases - async_run nova-cell-0 init_nova_db nova_cell0 $NOVA_CONF + if [[ "$CELLSV2_SETUP" == "singleconductor" ]]; then + # If we are doing singleconductor mode, we have some strange + # interdependencies. in that the main config refers to cell1 + # instead of cell0. In that case, just make sure the cell0 database + # is created before we need it below, but don't db_sync it until + # after the cellN databases are there. + recreate_database nova_cell0 + else + async_run nova-cell-0 init_nova_db nova_cell0 $NOVA_CONF + fi + for i in $(seq 1 $NOVA_NUM_CELLS); do async_run nova-cell-$i init_nova_db nova_cell${i} $(conductor_conf $i) done @@ -771,6 +781,11 @@ function init_nova { async_wait nova-cell-$i done + if [[ "$CELLSV2_SETUP" == "singleconductor" ]]; then + # We didn't db sync cell0 above, so run it now + $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db sync + fi + # Run online migrations on the new databases # Needed for flavor conversion $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db online_data_migrations From f361122798b9e3163790bee81abfa0486746fa8a Mon Sep 17 00:00:00 2001 From: Pierre Riteau Date: Wed, 17 Feb 2021 17:43:13 +0100 Subject: [PATCH 1381/1936] Fix DevStack setup on CentOS 8.3 CentOS 8.3 changed the name of the PowerTools repository to powertools: https://wiki.centos.org/Manuals/ReleaseNotes/CentOS8.2011#Yum_repo_file_and_repoid_changes With this repository disabled, DevStack fails to install libyaml-devel, which causes a failure to install many packages. In my environment DevStack stopped with an error caused by a missing wget. Keep the command using the old repository name, for compatibility with older CentOS releases. Change-Id: I5541a8aee8467abf10ce8a10d770618bdd693f02 --- stack.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stack.sh b/stack.sh index d3c1476429..6375c8e5e0 100755 --- a/stack.sh +++ b/stack.sh @@ -365,6 +365,9 @@ if [[ $DISTRO == "rhel8" ]]; then # EPEL packages assume that the PowerTools repository is enable. sudo dnf config-manager --set-enabled PowerTools + # CentOS 8.3 changed the repository name to lower case. + sudo dnf config-manager --set-enabled powertools + if [[ ${SKIP_EPEL_INSTALL} != True ]]; then _install_epel fi From 3bdc8f66ad243f7487ba494e6a71f63c4965413a Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Tue, 9 Feb 2021 12:56:34 -0600 Subject: [PATCH 1382/1936] Add a variable to configure the Tempest venv upper constraints We use Tempest master for testing the supported stable branches so using master upper constraints works fine but when we need to use old Tempest in the below cases then master upper constraints do not work and devstack will not be able to install Tempest in vnenv: - Testing Extended Maintenance branch - Testing py2.7 jobs until stable/train with in-tree tempest plugins This commit adds a variable to set the compatible upper constraint to use for Tempest's old version. Few of the current failure which can be fixed by this new configurable var: - networking-generic-switch-tempest-dlm-python2 - https://zuul.opendev.org/t/openstack/build/ebcf3d68d62c4af3a43a222aa9ce5556 - devstack-platform-xenial on stable/steinand stable/train - https://zuul.opendev.org/t/openstack/build/37ffc1af6f3f4b44b5ca8cbfa27068ac Change-Id: I5b2217d85e6871ca3f7a3f6f859fdce9a50d3946 --- lib/tempest | 28 +++++++++++++++++++++++----- stackrc | 1 + 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/lib/tempest b/lib/tempest index 8ee986d555..77197c2a28 100644 --- a/lib/tempest +++ b/lib/tempest @@ -111,6 +111,21 @@ function image_size_in_gib { echo $size | python3 -c "import math; print(int(math.ceil(float(int(input()) / 1024.0 ** 3))))" } +function set_tempest_venv_constraints { + local tmp_c + tmp_c=$1 + if [[ $TEMPEST_VENV_UPPER_CONSTRAINTS == "master" ]]; then + (cd $REQUIREMENTS_DIR && git show origin/master:upper-constraints.txt) > $tmp_c + else + echo "Using $TEMPEST_VENV_UPPER_CONSTRAINTS constraints in Tempest virtual env." + cat $TEMPEST_VENV_UPPER_CONSTRAINTS > $tmp_c + # NOTE: setting both tox env var and once Tempest start using new var + # TOX_CONSTRAINTS_FILE then we can remove the old one. + export UPPER_CONSTRAINTS_FILE=$TEMPEST_VENV_UPPER_CONSTRAINTS + export TOX_CONSTRAINTS_FILE=$TEMPEST_VENV_UPPER_CONSTRAINTS + fi +} + # configure_tempest() - Set config files, create data dirs, etc function configure_tempest { if [[ "$INSTALL_TEMPEST" == "True" ]]; then @@ -617,10 +632,9 @@ function configure_tempest { tox -revenv-tempest --notest fi - # The requirements might be on a different branch, while tempest needs master requirements. local tmp_u_c_m tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX) - (cd $REQUIREMENTS_DIR && git show origin/master:upper-constraints.txt) > $tmp_u_c_m + set_tempest_venv_constraints $tmp_u_c_m tox -evenv-tempest -- pip install -c $tmp_u_c_m -r requirements.txt rm -f $tmp_u_c_m @@ -702,6 +716,10 @@ function install_tempest { # TEMPEST_DIR already exist until RECLONE is true. git checkout $TEMPEST_BRANCH + local tmp_u_c_m + tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX) + set_tempest_venv_constraints $tmp_u_c_m + tox -r --notest -efull # TODO: remove the trailing pip constraint when a proper fix # arrives for bug https://bugs.launchpad.net/devstack/+bug/1906322 @@ -709,8 +727,9 @@ function install_tempest { # NOTE(mtreinish) Respect constraints in the tempest full venv, things that # are using a tox job other than full will not be respecting constraints but # running pip install -U on tempest requirements - $TEMPEST_DIR/.tox/tempest/bin/pip install -c $REQUIREMENTS_DIR/upper-constraints.txt -r requirements.txt + $TEMPEST_DIR/.tox/tempest/bin/pip install -c $tmp_u_c_m -r requirements.txt PROJECT_VENV["tempest"]=${TEMPEST_DIR}/.tox/tempest + rm -f $tmp_u_c_m popd } @@ -718,10 +737,9 @@ function install_tempest { function install_tempest_plugins { pushd $TEMPEST_DIR if [[ $TEMPEST_PLUGINS != 0 ]] ; then - # The requirements might be on a different branch, while tempest & tempest plugins needs master requirements. local tmp_u_c_m tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX) - (cd $REQUIREMENTS_DIR && git show origin/master:upper-constraints.txt) > $tmp_u_c_m + set_tempest_venv_constraints $tmp_u_c_m tox -evenv-tempest -- pip install -c $tmp_u_c_m $TEMPEST_PLUGINS rm -f $tmp_u_c_m echo "Checking installed Tempest plugins:" diff --git a/stackrc b/stackrc index a36f8970e6..244acbbbb0 100644 --- a/stackrc +++ b/stackrc @@ -298,6 +298,7 @@ REQUIREMENTS_BRANCH=${REQUIREMENTS_BRANCH:-$TARGET_BRANCH} # Tempest test suite TEMPEST_REPO=${TEMPEST_REPO:-${GIT_BASE}/openstack/tempest.git} TEMPEST_BRANCH=${TEMPEST_BRANCH:-$BRANCHLESS_TARGET_BRANCH} +TEMPEST_VENV_UPPER_CONSTRAINTS=${TEMPEST_VENV_UPPER_CONSTRAINTS:-master} ############## From 8903d8c1e211607ce6bf86ff974f90717e8e2cac Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Fri, 15 Jan 2021 09:26:44 +0000 Subject: [PATCH 1383/1936] [OVN] Fix Fedora/CentOS OVN configuration When installing OVN from packages, the rpm for Fedora / CentOS pre set some configurations that conflicts with the post configuration done by DevStack. This patch fixes this problem by erasing the pre-set configuration from the packages and leaving it to DevStack to configure OVN for its use (just like we would do when compiling it from source). Change-Id: I9c18023c9aa79c0633748a6169f4f283e9d74ef0 Signed-off-by: Lucas Alvares Gomes --- lib/neutron_plugins/ovn_agent | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index b661f593a4..0a8ca9761f 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -529,6 +529,14 @@ function configure_ovn { fi fi + # Erase the pre-set configurations from packages. DevStack will + # configure OVS and OVN accordingly for its use. + if [[ "$OVN_BUILD_FROM_SOURCE" == "False" ]] && is_fedora; then + sudo truncate -s 0 /etc/openvswitch/default.conf + sudo truncate -s 0 /etc/sysconfig/openvswitch + sudo truncate -s 0 /etc/sysconfig/ovn + fi + # Metadata if is_service_enabled q-ovn-metadata-agent && is_service_enabled ovn-controller; then sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR From 8f3e51d79f392151023f3853a6c8a3f7b868ecfa Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Tue, 2 Mar 2021 16:18:48 +0000 Subject: [PATCH 1384/1936] nova: Die if console TLS enabled with tls-proxy We require the 'tls-proxy' service to set up certificates for us. Hard fail if 'NOVA_CONSOLE_PROXY_COMPUTE_TLS' is enabled but the 'tls-proxy' service is not. Change-Id: I52fec12b78ecd8f76f835551ccb84dfb1d5b3d8a Signed-off-by: Stephen Finucane --- lib/nova | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/nova b/lib/nova index 6913040d26..28d3ba45d7 100644 --- a/lib/nova +++ b/lib/nova @@ -83,6 +83,11 @@ fi # services and the compute node NOVA_CONSOLE_PROXY_COMPUTE_TLS=${NOVA_CONSOLE_PROXY_COMPUTE_TLS:-False} +# Validate configuration +if ! is_service_enabled tls-proxy && [ "$NOVA_CONSOLE_PROXY_COMPUTE_TLS" == "True" ]; then + die $LINENO "enabling TLS for the console proxy requires the tls-proxy service" +fi + # Public facing bits NOVA_SERVICE_HOST=${NOVA_SERVICE_HOST:-$SERVICE_HOST} NOVA_SERVICE_PORT=${NOVA_SERVICE_PORT:-8774} From f548ce4816b58d7e65d64fc22a1066f1aea63824 Mon Sep 17 00:00:00 2001 From: Akihiro Motoki Date: Thu, 4 Mar 2021 10:31:30 +0900 Subject: [PATCH 1385/1936] Allow to install os-ken from git repo os-ken is used by neutron ML2/OVS agent. We need to install os-ken from source to test os-ken changes against neutron. We already have tempest-integrated-networking job in os-ken repo but it turns out it consumes os-ken from PyPI :-( Change-Id: Ibcff212591e9fed25f1316403627269d81455b09 --- lib/neutron_plugins/openvswitch_agent | 4 ++++ stackrc | 5 +++++ tests/test_libs_from_pypi.sh | 2 +- 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent index 100961196d..7fed8bf853 100644 --- a/lib/neutron_plugins/openvswitch_agent +++ b/lib/neutron_plugins/openvswitch_agent @@ -15,6 +15,10 @@ function neutron_plugin_create_nova_conf { function neutron_plugin_install_agent_packages { _neutron_ovs_base_install_agent_packages + if use_library_from_git "os-ken"; then + git_clone_by_name "os-ken" + setup_dev_lib "os-ken" + fi } function neutron_plugin_configure_dhcp_agent { diff --git a/stackrc b/stackrc index a36f8970e6..2b1511d04f 100644 --- a/stackrc +++ b/stackrc @@ -554,6 +554,11 @@ GITREPO["ovsdbapp"]=${OVSDBAPP_REPO:-${GIT_BASE}/openstack/ovsdbapp.git} GITBRANCH["ovsdbapp"]=${OVSDBAPP_BRANCH:-$TARGET_BRANCH} GITDIR["ovsdbapp"]=$DEST/ovsdbapp +# os-ken used by neutron +GITREPO["os-ken"]=${OS_KEN_REPO:-${GIT_BASE}/openstack/os-ken.git} +GITBRANCH["os-ken"]=${OS_KEN_BRANCH:-$TARGET_BRANCH} +GITDIR["os-ken"]=$DEST/os-ken + ################## # # TripleO / Heat Agent Components diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index ab7583d042..5b53389073 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -44,7 +44,7 @@ ALL_LIBS+=" debtcollector os-brick os-traits automaton futurist oslo.service" ALL_LIBS+=" oslo.cache oslo.reports osprofiler cursive" ALL_LIBS+=" keystoneauth ironic-lib neutron-lib oslo.privsep" ALL_LIBS+=" diskimage-builder os-vif python-brick-cinderclient-ext" -ALL_LIBS+=" castellan python-barbicanclient ovsdbapp" +ALL_LIBS+=" castellan python-barbicanclient ovsdbapp os-ken" # Generate the above list with # echo ${!GITREPO[@]} From 3c6d1059298788d4ce35845fdb1bef2938046702 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Tue, 2 Mar 2021 16:35:47 +0000 Subject: [PATCH 1386/1936] nova: Remove nova-xvpvncproxy This was removed this service from nova in Ussuri [1]. There's no need to keep this around. [1] I2f7f2379d0cd54e4d0a91008ddb44858cfc5a4cf Change-Id: Idc95c6467a8c6e0c0ed07a6458425ff0a10ff995 Signed-off-by: Stephen Finucane --- lib/nova | 34 ++++++++++++++-------------------- 1 file changed, 14 insertions(+), 20 deletions(-) diff --git a/lib/nova b/lib/nova index 28d3ba45d7..caa778060f 100644 --- a/lib/nova +++ b/lib/nova @@ -612,10 +612,10 @@ function configure_console_compute { # can use the NOVA_CPU_CELL variable to know which cell we are for # calculating the offset. # Stagger the offset based on the total number of possible console proxies - # (novnc, xvpvnc, spice, serial) so that their ports will not collide if + # (novnc, spice, serial) so that their ports will not collide if # all are enabled. local offset - offset=$(((NOVA_CPU_CELL - 1) * 4)) + offset=$(((NOVA_CPU_CELL - 1) * 3)) # Use the host IP instead of the service host because for multi-node, the # service host will be the controller only. @@ -623,7 +623,7 @@ function configure_console_compute { default_proxyclient_addr=$(iniget $NOVA_CPU_CONF DEFAULT my_ip) # All nova-compute workers need to know the vnc configuration options - # These settings don't hurt anything if n-xvnc and n-novnc are disabled + # These settings don't hurt anything if n-novnc is disabled if is_service_enabled n-cpu; then if [ "$NOVNC_FROM_PACKAGE" == "True" ]; then # Use the old URL when installing novnc packages. @@ -636,13 +636,11 @@ function configure_console_compute { NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:$((6080 + offset))/vnc_lite.html"} fi iniset $NOVA_CPU_CONF vnc novncproxy_base_url "$NOVNCPROXY_URL" - XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:$((6081 + offset))/console"} - iniset $NOVA_CPU_CONF vnc xvpvncproxy_base_url "$XVPVNCPROXY_URL" - SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:$((6082 + offset))/spice_auto.html"} + SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:$((6081 + offset))/spice_auto.html"} iniset $NOVA_CPU_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL" fi - if is_service_enabled n-novnc || is_service_enabled n-xvnc || [ "$NOVA_VNC_ENABLED" != False ]; then + if is_service_enabled n-novnc || [ "$NOVA_VNC_ENABLED" != False ]; then # Address on which instance vncservers will listen on compute hosts. # For multi-host, this should be the management ip of the compute host. VNCSERVER_LISTEN=${VNCSERVER_LISTEN:-$NOVA_SERVICE_LISTEN_ADDRESS} @@ -665,7 +663,7 @@ function configure_console_compute { if is_service_enabled n-sproxy; then iniset $NOVA_CPU_CONF serial_console enabled True - iniset $NOVA_CPU_CONF serial_console base_url "ws://$SERVICE_HOST:$((6083 + offset))/" + iniset $NOVA_CPU_CONF serial_console base_url "ws://$SERVICE_HOST:$((6082 + offset))/" fi } @@ -674,15 +672,13 @@ function configure_console_proxies { local conf=${1:-$NOVA_CONF} local offset=${2:-0} # Stagger the offset based on the total number of possible console proxies - # (novnc, xvpvnc, spice, serial) so that their ports will not collide if + # (novnc, spice, serial) so that their ports will not collide if # all are enabled. - offset=$((offset * 4)) + offset=$((offset * 3)) - if is_service_enabled n-novnc || is_service_enabled n-xvnc || [ "$NOVA_VNC_ENABLED" != False ]; then + if is_service_enabled n-novnc || [ "$NOVA_VNC_ENABLED" != False ]; then iniset $conf vnc novncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" iniset $conf vnc novncproxy_port $((6080 + offset)) - iniset $conf vnc xvpvncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" - iniset $conf vnc xvpvncproxy_port $((6081 + offset)) if is_nova_console_proxy_compute_tls_enabled ; then iniset $conf vnc auth_schemes "vencrypt" @@ -714,12 +710,12 @@ function configure_console_proxies { if is_service_enabled n-spice; then iniset $conf spice html5proxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" - iniset $conf spice html5proxy_port $((6082 + offset)) + iniset $conf spice html5proxy_port $((6081 + offset)) fi if is_service_enabled n-sproxy; then iniset $conf serial_console serialproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" - iniset $conf serial_console serialproxy_port $((6083 + offset)) + iniset $conf serial_console serialproxy_port $((6082 + offset)) fi } @@ -986,7 +982,7 @@ function start_nova_rest { function enable_nova_console_proxies { for i in $(seq 1 $NOVA_NUM_CELLS); do - for srv in n-novnc n-xvnc n-spice n-sproxy; do + for srv in n-novnc n-spice n-sproxy; do if is_service_enabled $srv; then enable_service ${srv}-cell${i} fi @@ -1004,7 +1000,6 @@ function start_nova_console_proxies { # console proxies run globally for singleconductor, else they run per cell if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then run_process n-novnc "$NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_WEB_DIR" - run_process n-xvnc "$NOVA_BIN_DIR/nova-xvpvncproxy --config-file $api_cell_conf" run_process n-spice "$NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $api_cell_conf --web $SPICE_WEB_DIR" run_process n-sproxy "$NOVA_BIN_DIR/nova-serialproxy --config-file $api_cell_conf" else @@ -1013,7 +1008,6 @@ function start_nova_console_proxies { local conf conf=$(conductor_conf $i) run_process n-novnc-cell${i} "$NOVA_BIN_DIR/nova-novncproxy --config-file $conf --web $NOVNC_WEB_DIR" - run_process n-xvnc-cell${i} "$NOVA_BIN_DIR/nova-xvpvncproxy --config-file $conf" run_process n-spice-cell${i} "$NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $conf --web $SPICE_WEB_DIR" run_process n-sproxy-cell${i} "$NOVA_BIN_DIR/nova-serialproxy --config-file $conf" done @@ -1104,13 +1098,13 @@ function stop_nova_rest { function stop_nova_console_proxies { if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then - for srv in n-novnc n-xvnc n-spice n-sproxy; do + for srv in n-novnc n-spice n-sproxy; do stop_process $srv done else enable_nova_console_proxies for i in $(seq 1 $NOVA_NUM_CELLS); do - for srv in n-novnc n-xvnc n-spice n-sproxy; do + for srv in n-novnc n-spice n-sproxy; do stop_process ${srv}-cell${i} done done From 970891a4ef863344fb1425727b3c3bf91b1c8bb5 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Tue, 2 Mar 2021 16:45:39 +0000 Subject: [PATCH 1387/1936] Remove references to XenAPI driver The XenAPI driver was removed during the Victoria release [1], while the libvirt+xen driver has been removed in the Wallaby release [2]. Remove references to Xen from DevStack since its all a no-op now. [1] I42b302afbb1cfede7a0f7b16485a596cd70baf17 [2] I73305e82da5d8da548961b801a8e75fb0e8c4cf1 Change-Id: If7055feb88391f496a5e5e4c72008bf0050c5356 Signed-off-by: Stephen Finucane --- HACKING.rst | 3 +- MAINTAINERS.rst | 4 - doc/source/configuration.rst | 6 -- doc/source/plugins.rst | 2 +- functions | 25 ------ functions-common | 2 - lib/cinder_plugins/XenAPINFS | 46 ----------- lib/glance | 4 - lib/nova | 8 -- lib/nova_plugins/hypervisor-xenserver | 107 -------------------------- lib/tempest | 14 +--- stack.sh | 10 --- stackrc | 17 +--- tools/image_list.sh | 2 +- tools/uec/meta.py | 42 ---------- tools/xen/README.md | 3 - 16 files changed, 6 insertions(+), 289 deletions(-) delete mode 100644 lib/cinder_plugins/XenAPINFS delete mode 100644 lib/nova_plugins/hypervisor-xenserver delete mode 100644 tools/uec/meta.py delete mode 100644 tools/xen/README.md diff --git a/HACKING.rst b/HACKING.rst index f55aed8a07..0c4de303ce 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -74,8 +74,7 @@ of test of specific fragile functions in the ``functions`` and ``tools`` - Contains a collection of stand-alone scripts. While these may reference the top-level DevStack configuration they can generally be -run alone. There are also some sub-directories to support specific -environments such as XenServer. +run alone. Scripts diff --git a/MAINTAINERS.rst b/MAINTAINERS.rst index d4968a6051..3cf61a6875 100644 --- a/MAINTAINERS.rst +++ b/MAINTAINERS.rst @@ -77,10 +77,6 @@ SUSE Tempest ~~~~~~~ -Xen -~~~ -* Bob Ball - Zaqar (Marconi) ~~~~~~~~~~~~~~~ diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 22f5999174..2d0c894530 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -628,12 +628,6 @@ outside of tox. If you would like to install it add the following to your INSTALL_TEMPEST=True -Xenserver -~~~~~~~~~ - -If you would like to use Xenserver as the hypervisor, please refer to -the instructions in ``./tools/xen/README.md``. - Cinder ~~~~~~ diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst index a18a786c49..7d70d74dd0 100644 --- a/doc/source/plugins.rst +++ b/doc/source/plugins.rst @@ -241,7 +241,7 @@ locations in the top-level of the plugin repository: on Ubuntu, Debian or Linux Mint. - ``./devstack/files/rpms/$plugin_name`` - Packages to install when running - on Red Hat, Fedora, CentOS or XenServer. + on Red Hat, Fedora, or CentOS. - ``./devstack/files/rpms-suse/$plugin_name`` - Packages to install when running on SUSE Linux or openSUSE. diff --git a/functions b/functions index 89bbab2085..ccca5cda51 100644 --- a/functions +++ b/functions @@ -280,31 +280,6 @@ function upload_image { return fi - # XenServer-vhd-ovf-format images are provided as .vhd.tgz - # and should not be decompressed prior to loading - if [[ "$image_url" =~ '.vhd.tgz' ]]; then - image_name="${image_fname%.vhd.tgz}" - local force_vm_mode="" - if [[ "$image_name" =~ 'cirros' ]]; then - # Cirros VHD image currently only boots in PV mode. - # Nova defaults to PV for all VHD images, but - # the glance setting is needed for booting - # directly from volume. - force_vm_mode="vm_mode=xen" - fi - _upload_image "$image_name" ovf vhd "$image" $force_vm_mode - return - fi - - # .xen-raw.tgz suggests a Xen capable raw image inside a tgz. - # and should not be decompressed prior to loading. - # Setting metadata, so PV mode is used. - if [[ "$image_url" =~ '.xen-raw.tgz' ]]; then - image_name="${image_fname%.xen-raw.tgz}" - _upload_image "$image_name" tgz raw "$image" vm_mode=xen - return - fi - if [[ "$image_url" =~ '.hds' ]]; then image_name="${image_fname%.hds}" vm_mode=${image_name##*-} diff --git a/functions-common b/functions-common index 87d8c64804..340da754a2 100644 --- a/functions-common +++ b/functions-common @@ -397,8 +397,6 @@ function GetDistro { # Drop the . release as we assume it's compatible # XXX re-evaluate when we get RHEL10 DISTRO="rhel${os_RELEASE::1}" - elif [[ "$os_VENDOR" =~ (XenServer) ]]; then - DISTRO="xs${os_RELEASE%.*}" else # We can't make a good choice here. Setting a sensible DISTRO # is part of the problem, but not the major issue -- we really diff --git a/lib/cinder_plugins/XenAPINFS b/lib/cinder_plugins/XenAPINFS deleted file mode 100644 index 92135e7c4f..0000000000 --- a/lib/cinder_plugins/XenAPINFS +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash -# -# lib/cinder_plugins/XenAPINFS -# Configure the XenAPINFS driver - -# Enable with: -# -# CINDER_DRIVER=XenAPINFS - -# Dependencies: -# -# - ``functions`` file -# - ``cinder`` configurations - -# configure_cinder_driver - make configuration changes, including those to other services - -# Save trace setting -_XTRACE_CINDER_XENAPINFS=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# Set up default directories - - -# Entry Points -# ------------ - -# configure_cinder_driver - Set config files, create data dirs, etc -function configure_cinder_driver { - iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.xenapi.sm.XenAPINFSDriver" - iniset $CINDER_CONF DEFAULT xenapi_connection_url "$CINDER_XENAPI_CONNECTION_URL" - iniset $CINDER_CONF DEFAULT xenapi_connection_username "$CINDER_XENAPI_CONNECTION_USERNAME" - iniset $CINDER_CONF DEFAULT xenapi_connection_password "$CINDER_XENAPI_CONNECTION_PASSWORD" - iniset $CINDER_CONF DEFAULT xenapi_nfs_server "$CINDER_XENAPI_NFS_SERVER" - iniset $CINDER_CONF DEFAULT xenapi_nfs_serverpath "$CINDER_XENAPI_NFS_SERVERPATH" -} - -# Restore xtrace -$_XTRACE_CINDER_XENAPINFS - -# Local variables: -# mode: shell-script -# End: diff --git a/lib/glance b/lib/glance index c2a8b7492e..fcf778d3f6 100644 --- a/lib/glance +++ b/lib/glance @@ -279,10 +279,6 @@ function configure_glance { configure_keystone_authtoken_middleware $GLANCE_API_CONF glance iniset $GLANCE_API_CONF oslo_messaging_notifications driver messagingv2 iniset_rpc_backend glance $GLANCE_API_CONF - if [ "$VIRT_DRIVER" = 'xenserver' ]; then - iniset $GLANCE_API_CONF DEFAULT container_formats "ami,ari,aki,bare,ovf,tgz" - iniset $GLANCE_API_CONF DEFAULT disk_formats "ami,ari,aki,vhd,raw,iso" - fi if [ "$VIRT_DRIVER" = 'libvirt' ] && [ "$LIBVIRT_TYPE" = 'parallels' ]; then iniset $GLANCE_API_CONF DEFAULT disk_formats "ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso,ploop" fi diff --git a/lib/nova b/lib/nova index caa778060f..216c3cff9e 100644 --- a/lib/nova +++ b/lib/nova @@ -1052,14 +1052,6 @@ function is_nova_ready { # happen between here and the script ending. However, in multinode # tests this can very often not be the case. So ensure that the # compute is up before we move on. - - # TODO(sdague): honestly, this probably should be a plug point for - # an external system. - if [[ "$VIRT_DRIVER" == 'xenserver' ]]; then - # xenserver encodes information in the hostname of the compute - # because of the dom0/domU split. Just ignore for now. - return - fi wait_for_compute $NOVA_READY_TIMEOUT } diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver deleted file mode 100644 index 511ec1bc09..0000000000 --- a/lib/nova_plugins/hypervisor-xenserver +++ /dev/null @@ -1,107 +0,0 @@ -#!/bin/bash -# -# lib/nova_plugins/hypervisor-xenserver -# Configure the XenServer hypervisor - -# Enable with: -# VIRT_DRIVER=xenserver - -# Dependencies: -# ``functions`` file -# ``nova`` configuration - -# install_nova_hypervisor - install any external requirements -# configure_nova_hypervisor - make configuration changes, including those to other services -# start_nova_hypervisor - start any external services -# stop_nova_hypervisor - stop any external services -# cleanup_nova_hypervisor - remove transient data and cache - -# Save trace setting -_XTRACE_XENSERVER=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1} - - -# Entry Points -# ------------ - -# clean_nova_hypervisor - Clean up an installation -function cleanup_nova_hypervisor { - # This function intentionally left blank - : -} - -# configure_nova_hypervisor - Set config files, create data dirs, etc -function configure_nova_hypervisor { - if [ -z "$XENAPI_CONNECTION_URL" ]; then - die $LINENO "XENAPI_CONNECTION_URL is not specified" - fi - - # Check os-xenapi plugin is enabled - local plugins="${DEVSTACK_PLUGINS}" - local plugin - local found=0 - for plugin in ${plugins//,/ }; do - if [[ "$plugin" = "os-xenapi" ]]; then - found=1 - break - fi - done - if [[ $found -ne 1 ]]; then - die $LINENO "os-xenapi plugin is not specified. Please enable this plugin in local.conf" - fi - - iniset $NOVA_CONF DEFAULT compute_driver "xenapi.XenAPIDriver" - iniset $NOVA_CONF xenserver connection_url "$XENAPI_CONNECTION_URL" - iniset $NOVA_CONF xenserver connection_username "$XENAPI_USER" - iniset $NOVA_CONF xenserver connection_password "$XENAPI_PASSWORD" - iniset $NOVA_CONF DEFAULT flat_injected "False" - - local dom0_ip - dom0_ip=$(echo "$XENAPI_CONNECTION_URL" | cut -d "/" -f 3-) - - local ssh_dom0 - ssh_dom0="sudo -u $DOMZERO_USER ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@$dom0_ip" - - # install console logrotate script - tar -czf - -C $NOVA_DIR/tools/xenserver/ rotate_xen_guest_logs.sh | - $ssh_dom0 'tar -xzf - -C /root/ && chmod +x /root/rotate_xen_guest_logs.sh && mkdir -p /var/log/xen/guest' - - # Create a cron job that will rotate guest logs - $ssh_dom0 crontab - << CRONTAB -* * * * * /root/rotate_xen_guest_logs.sh >/dev/null 2>&1 -CRONTAB - -} - -# install_nova_hypervisor() - Install external components -function install_nova_hypervisor { - # xenapi functionality is now included in os-xenapi library which houses the plugin - # so this function intentionally left blank - : -} - -# start_nova_hypervisor - Start any required external services -function start_nova_hypervisor { - # This function intentionally left blank - : -} - -# stop_nova_hypervisor - Stop any external services -function stop_nova_hypervisor { - # This function intentionally left blank - : -} - - -# Restore xtrace -$_XTRACE_XENSERVER - -# Local variables: -# mode: shell-script -# End: diff --git a/lib/tempest b/lib/tempest index 8a5b785927..9ccd19b505 100644 --- a/lib/tempest +++ b/lib/tempest @@ -347,9 +347,6 @@ function configure_tempest { if [[ ! -z "$TEMPEST_HTTP_IMAGE" ]]; then iniset $TEMPEST_CONFIG image http_image $TEMPEST_HTTP_IMAGE fi - if [ "$VIRT_DRIVER" = "xenserver" ]; then - iniset $TEMPEST_CONFIG image disk_formats "ami,ari,aki,vhd,raw,iso" - fi iniset $TEMPEST_CONFIG image-feature-enabled import_image $GLANCE_USE_IMPORT_WORKFLOW iniset $TEMPEST_CONFIG image-feature-enabled os_glance_reserved True # Compute @@ -425,15 +422,8 @@ function configure_tempest { iniset $TEMPEST_CONFIG network-feature-enabled port_security $NEUTRON_PORT_SECURITY # Scenario - if [ "$VIRT_DRIVER" = "xenserver" ]; then - SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES} - SCENARIO_IMAGE_FILE="cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.vhd.tgz" - iniset $TEMPEST_CONFIG scenario img_disk_format vhd - iniset $TEMPEST_CONFIG scenario img_container_format ovf - else - SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES} - SCENARIO_IMAGE_FILE=$DEFAULT_IMAGE_FILE_NAME - fi + SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES} + SCENARIO_IMAGE_FILE=$DEFAULT_IMAGE_FILE_NAME iniset $TEMPEST_CONFIG scenario img_file $SCENARIO_IMAGE_DIR/$SCENARIO_IMAGE_FILE # If using provider networking, use the physical network for validation rather than private diff --git a/stack.sh b/stack.sh index 6375c8e5e0..ca9ecfa213 100755 --- a/stack.sh +++ b/stack.sh @@ -718,16 +718,6 @@ if is_service_enabled keystone; then fi -# Nova -# ----- - -if is_service_enabled nova && [[ "$VIRT_DRIVER" == 'xenserver' ]]; then - # Look for the backend password here because read_password - # is not a library function. - read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN." -fi - - # Swift # ----- diff --git a/stackrc b/stackrc index a36f8970e6..205481be08 100644 --- a/stackrc +++ b/stackrc @@ -605,10 +605,8 @@ ENABLE_VOLUME_MULTIATTACH=$(trueorfalse False ENABLE_VOLUME_MULTIATTACH) # Nova hypervisor configuration. We default to libvirt with **kvm** but will # drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can -# also install an **LXC**, **OpenVZ** or **XenAPI** based system. If xenserver-core -# is installed, the default will be XenAPI +# also install an **LXC** or **OpenVZ** based system. DEFAULT_VIRT_DRIVER=libvirt -is_package_installed xenserver-core && DEFAULT_VIRT_DRIVER=xenserver VIRT_DRIVER=${VIRT_DRIVER:-$DEFAULT_VIRT_DRIVER} case "$VIRT_DRIVER" in ironic|libvirt) @@ -633,14 +631,6 @@ case "$VIRT_DRIVER" in fake) NUMBER_FAKE_NOVA_COMPUTE=${NUMBER_FAKE_NOVA_COMPUTE:-1} ;; - xenserver) - # Xen config common to nova and neutron - XENAPI_USER=${XENAPI_USER:-"root"} - # This user will be used for dom0 - domU communication - # should be able to log in to dom0 without a password - # will be used to install the plugins - DOMZERO_USER=${DOMZERO_USER:-"domzero"} - ;; *) ;; esac @@ -695,11 +685,6 @@ if [[ "$DOWNLOAD_DEFAULT_IMAGES" == "True" ]]; then DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.2-i386-disk.vmdk} DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-$DEFAULT_IMAGE_NAME} IMAGE_URLS+="http://partnerweb.vmware.com/programs/vmdkimage/${DEFAULT_IMAGE_FILE_NAME}";; - xenserver) - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.5-x86_64-disk} - DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.5-x86_64-disk.vhd.tgz} - IMAGE_URLS+="http://ca.downloads.xensource.com/OpenStack/cirros-0.3.5-x86_64-disk.vhd.tgz" - IMAGE_URLS+=",http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz";; fake) # Use the same as the default for libvirt DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk} diff --git a/tools/image_list.sh b/tools/image_list.sh index 3a27c4acfd..81231be9f3 100755 --- a/tools/image_list.sh +++ b/tools/image_list.sh @@ -22,7 +22,7 @@ source $TOP_DIR/functions # Possible virt drivers, if we have more, add them here. Always keep # dummy in the end position to trigger the fall through case. -DRIVERS="openvz ironic libvirt vsphere xenserver dummy" +DRIVERS="openvz ironic libvirt vsphere dummy" # Extra variables to trigger getting additional images. export ENABLED_SERVICES="h-api,tr-api" diff --git a/tools/uec/meta.py b/tools/uec/meta.py deleted file mode 100644 index 1d994a60d6..0000000000 --- a/tools/uec/meta.py +++ /dev/null @@ -1,42 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import BaseHTTPServer -import SimpleHTTPServer -import sys - - -def main(host, port, HandlerClass=SimpleHTTPServer.SimpleHTTPRequestHandler, - ServerClass=BaseHTTPServer.HTTPServer, protocol="HTTP/1.0"): - """simple http server that listens on a give address:port.""" - - server_address = (host, port) - - HandlerClass.protocol_version = protocol - httpd = ServerClass(server_address, HandlerClass) - - sa = httpd.socket.getsockname() - print("Serving HTTP on", sa[0], "port", sa[1], "...") - httpd.serve_forever() - -if __name__ == '__main__': - if sys.argv[1:]: - address = sys.argv[1] - else: - address = '0.0.0.0' - if ':' in address: - host, port = address.split(':') - else: - host = address - port = 8080 - - main(host, int(port)) diff --git a/tools/xen/README.md b/tools/xen/README.md deleted file mode 100644 index 287301156e..0000000000 --- a/tools/xen/README.md +++ /dev/null @@ -1,3 +0,0 @@ -Note: XenServer relative tools have been moved to `os-xenapi`_ and be maintained there. - -.. _os-xenapi: https://opendev.org/x/os-xenapi/ From 3948fcb03c96bacc0c620de5b2c18a475e7afef2 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Tue, 2 Mar 2021 16:51:49 +0000 Subject: [PATCH 1388/1936] Remove MAINTAINERS.rst This file is mega out-of-date and no longer helpful. Remove it. Change-Id: Ic7e215c3e48a9c453d19355ad7d683494811d2af Signed-off-by: Stephen Finucane --- HACKING.rst | 3 -- MAINTAINERS.rst | 88 ------------------------------------------------- 2 files changed, 91 deletions(-) delete mode 100644 MAINTAINERS.rst diff --git a/HACKING.rst b/HACKING.rst index 0c4de303ce..6a91e0a6a8 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -274,9 +274,6 @@ your change even years from now -- why we were motivated to make a change at the time. -* **Reviewers** -- please see ``MAINTAINERS.rst`` for a list of people - that should be added to reviews of various sub-systems. - Making Changes, Testing, and CI ------------------------------- diff --git a/MAINTAINERS.rst b/MAINTAINERS.rst deleted file mode 100644 index 3cf61a6875..0000000000 --- a/MAINTAINERS.rst +++ /dev/null @@ -1,88 +0,0 @@ -MAINTAINERS -=========== - - -Overview --------- - -The following is a list of people known to have interests in -particular areas or sub-systems of devstack. - -It is a rather general guide intended to help seed the initial -reviewers list of a change. A +1 on a review from someone identified -as being a maintainer of its affected area is a very positive flag to -the core team for the veracity of the change. - -The ``devstack-core`` group can still be added to all reviews. - - -Format -~~~~~~ - -The format of the file is the name of the maintainer and their -gerrit-registered email. - - -Maintainers ------------ - -.. contents:: :local: - - -Ceph -~~~~ - -* Sebastien Han - -Cinder -~~~~~~ - -Fedora/CentOS/RHEL -~~~~~~~~~~~~~~~~~~ - -* Ian Wienand - -Neutron -~~~~~~~ - -MidoNet -~~~~~~~ - -* Jaume Devesa -* Ryu Ishimoto -* YAMAMOTO Takashi - -OpenDaylight -~~~~~~~~~~~~ - -* Kyle Mestery - -OpenFlow Agent (ofagent) -~~~~~~~~~~~~~~~~~~~~~~~~ - -* YAMAMOTO Takashi -* Fumihiko Kakuma - -Swift -~~~~~ - -* Chmouel Boudjnah - -SUSE -~~~~ - -* Ralf Haferkamp -* Vincent Untz - -Tempest -~~~~~~~ - -Zaqar (Marconi) -~~~~~~~~~~~~~~~ - -* Flavio Percoco -* Malini Kamalambal - -Oracle Linux -~~~~~~~~~~~~ -* Wiekus Beukes From 9dc2b88eb42a5f98f43bc8ad3dfa3962a4d44d74 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Fri, 5 Mar 2021 09:32:19 -0600 Subject: [PATCH 1389/1936] Add enforce_scope setting support for keystone Keystone-tempest-plugin has implemented the secure RBAC tests and enabling the enforce_scope via keystone devstack plugin. Doing those setting in devstack will help to manage easily and in central place also avoid restarting the api service. Change-Id: I30da189474476d3397152a0a15c2e30a62d712ad --- lib/keystone | 11 +++++++++++ lib/tempest | 10 ++++++++++ 2 files changed, 21 insertions(+) diff --git a/lib/keystone b/lib/keystone index 66e867ca68..e282db0bfa 100644 --- a/lib/keystone +++ b/lib/keystone @@ -134,6 +134,12 @@ KEYSTONE_PASSWORD_HASH_ROUNDS=${KEYSTONE_PASSWORD_HASH_ROUNDS:-4} # Cache settings KEYSTONE_ENABLE_CACHE=${KEYSTONE_ENABLE_CACHE:-True} +# Flag to set the oslo_policy.enforce_scope. This is used to switch +# the Identity API policies to start checking the scope of token. By Default, +# this flag is False. +# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope +KEYSTONE_ENFORCE_SCOPE=$(trueorfalse False KEYSTONE_ENFORCE_SCOPE) + # Functions # --------- @@ -281,6 +287,11 @@ function configure_keystone { iniset $KEYSTONE_CONF security_compliance lockout_duration $KEYSTONE_LOCKOUT_DURATION iniset $KEYSTONE_CONF security_compliance unique_last_password_count $KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT fi + if [[ "$KEYSTONE_ENFORCE_SCOPE" == True ]] ; then + iniset $KEYSTONE_CONF oslo_policy enforce_scope true + iniset $KEYSTONE_CONF oslo_policy enforce_new_defaults true + iniset $KEYSTONE_CONF oslo_policy policy_file policy.yaml + fi } # create_keystone_accounts() - Sets up common required keystone accounts diff --git a/lib/tempest b/lib/tempest index 8a5b785927..f210e4014b 100644 --- a/lib/tempest +++ b/lib/tempest @@ -601,6 +601,16 @@ function configure_tempest { fi done + # ``enforce_scope`` + # If services enable the enforce_scope for their policy + # we need to enable the same on Tempest side so that + # test can be run with scoped token. + if [[ "$KEYSTONE_ENFORCE_SCOPE" == True ]] ; then + iniset $TEMPEST_CONFIG enforce_scope keystone true + iniset $TEMPEST_CONFIG auth admin_system 'all' + iniset $TEMPEST_CONFIG auth admin_project_name '' + fi + if [ "$VIRT_DRIVER" = "libvirt" ] && [ "$LIBVIRT_TYPE" = "lxc" ]; then # libvirt-lxc does not support boot from volume or attaching volumes # so basically anything with cinder is out of the question. From 8c93049220bd3551b53513426c5a7bfdb7bac1d9 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Fri, 5 Mar 2021 09:40:39 -0600 Subject: [PATCH 1390/1936] Add enforce_scope setting support for Glance Glance started moving to new RBAC and glance-tempest-plugin and Tempest need to set few configuration to enable the scope checks on glance side and so does on Temepst side to tell glance is ready with scope checks so that test can be run with scoped token. Change-Id: I09f513d08212bc80a3a86a750b29b1c6625d2f89 --- lib/glance | 12 ++++++++++++ lib/tempest | 1 + 2 files changed, 13 insertions(+) diff --git a/lib/glance b/lib/glance index c2a8b7492e..fd2f2cb902 100644 --- a/lib/glance +++ b/lib/glance @@ -85,6 +85,12 @@ GLANCE_TASKS_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/os_glance_tasks_s GLANCE_USE_IMPORT_WORKFLOW=$(trueorfalse False GLANCE_USE_IMPORT_WORKFLOW) +# Flag to set the oslo_policy.enforce_scope. This is used to switch +# the Image API policies to start checking the scope of token. By Default, +# this flag is False. +# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope +GLANCE_ENFORCE_SCOPE=$(trueorfalse False GLANCE_ENFORCE_SCOPE) + GLANCE_CONF_DIR=${GLANCE_CONF_DIR:-/etc/glance} GLANCE_METADEF_DIR=$GLANCE_CONF_DIR/metadefs GLANCE_API_CONF=$GLANCE_CONF_DIR/glance-api.conf @@ -371,6 +377,12 @@ function configure_glance { iniset $GLANCE_API_CONF DEFAULT bind_port $GLANCE_SERVICE_PORT_INT iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS" fi + + if [[ "$GLANCE_ENFORCE_SCOPE" == True ]] ; then + iniset $GLANCE_API_CONF oslo_policy enforce_scope true + iniset $GLANCE_API_CONF oslo_policy enforce_new_defaults true + iniset $GLANCE_API_CONF DEFAULT enforce_secure_rbac true + fi } # create_glance_accounts() - Set up common required glance accounts diff --git a/lib/tempest b/lib/tempest index f210e4014b..0a9f800bcc 100644 --- a/lib/tempest +++ b/lib/tempest @@ -610,6 +610,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG auth admin_system 'all' iniset $TEMPEST_CONFIG auth admin_project_name '' fi + iniset $TEMPEST_CONFIG enforce_scope glance "$GLANCE_ENFORCE_SCOPE" if [ "$VIRT_DRIVER" = "libvirt" ] && [ "$LIBVIRT_TYPE" = "lxc" ]; then # libvirt-lxc does not support boot from volume or attaching volumes From bd0d0fde24a5654507e02d32eea7ea0c1fc46821 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Sat, 6 Mar 2021 17:23:39 -0600 Subject: [PATCH 1391/1936] Add enforce_scope setting support for Cinder Conder started moving to new RBAC and cinder-tempest-plugin and Tempest need to set few configuration to enable the scope checks on cinder side and on Temepst side to tell cinder is all configured with scope checks and test can be run with scoped token. Change-Id: Ic7cd919c000c4e7b9a3a06638a5bd87b1617e749 --- lib/cinder | 11 +++++++++++ lib/tempest | 2 ++ 2 files changed, 13 insertions(+) diff --git a/lib/cinder b/lib/cinder index 33deff61f2..da6f32728e 100644 --- a/lib/cinder +++ b/lib/cinder @@ -125,6 +125,12 @@ CINDER_IMG_CACHE_SIZE_COUNT=${CINDER_IMG_CACHE_SIZE_COUNT:-} # enable the cache for all cinder backends. CINDER_CACHE_ENABLED_FOR_BACKENDS=${CINDER_CACHE_ENABLED_FOR_BACKENDS:-$CINDER_ENABLED_BACKENDS} +# Flag to set the oslo_policy.enforce_scope. This is used to switch +# the Volume API policies to start checking the scope of token. by default, +# this flag is False. +# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope +CINDER_ENFORCE_SCOPE=$(trueorfalse False CINDER_ENFORCE_SCOPE) + # Functions # --------- @@ -326,6 +332,11 @@ function configure_cinder { elif is_service_enabled etcd3; then iniset $CINDER_CONF coordination backend_url "etcd3+http://${SERVICE_HOST}:$ETCD_PORT" fi + + if [[ "$CINDER_ENFORCE_SCOPE" == True ]] ; then + iniset $CINDER_CONF oslo_policy enforce_scope true + iniset $CINDER_CONF oslo_policy enforce_new_defaults true + fi } # create_cinder_accounts() - Set up common required cinder accounts diff --git a/lib/tempest b/lib/tempest index f210e4014b..238e25f07b 100644 --- a/lib/tempest +++ b/lib/tempest @@ -611,6 +611,8 @@ function configure_tempest { iniset $TEMPEST_CONFIG auth admin_project_name '' fi + iniset $TEMPEST_CONFIG enforce_scope cinder "$CINDER_ENFORCE_SCOPE" + if [ "$VIRT_DRIVER" = "libvirt" ] && [ "$LIBVIRT_TYPE" = "lxc" ]; then # libvirt-lxc does not support boot from volume or attaching volumes # so basically anything with cinder is out of the question. From 38fed19acc2aa232503f91424d6c732ed8c7ed3a Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Mon, 8 Mar 2021 08:50:53 +0000 Subject: [PATCH 1392/1936] Update Cirros to 0.5.2 This release [1] includes a single fix [2] pulling in the ahci module which is required by Iad1adbc23b31dd54a96299e7a8a4b622c15eed8d, a nova-next change introducing q35 testing to the job. This depends on the following change caching the image within the CI host image: Depends-On: https://review.opendev.org/c/openstack/project-config/+/779178/ [1] https://github.com/cirros-dev/cirros/releases/tag/0.5.2 [2] https://github.com/cirros-dev/cirros/pull/65 Change-Id: I12e0bdb3699e5343592ab834468ba6b2fcdcaaf4 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 205481be08..1bcc302846 100644 --- a/stackrc +++ b/stackrc @@ -657,7 +657,7 @@ esac #IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image #IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image -CIRROS_VERSION=${CIRROS_VERSION:-"0.5.1"} +CIRROS_VERSION=${CIRROS_VERSION:-"0.5.2"} CIRROS_ARCH=${CIRROS_ARCH:-"x86_64"} # Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of From 802259a49656170108dd79559166ad89c49e2ef7 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Tue, 12 Jan 2021 22:55:57 +0000 Subject: [PATCH 1393/1936] Simulate a remote/standalone g-api worker In order to be able to test glance's distributed import function, we need to have multiple workers in an arrangement like they would be if one was on another host (potentially at another site). This extra worker must be separate from the default image service in order to repeatedly hit one and then the other to test cross- service interactions. This allows you to enable_service g-api-r, which will clone the main g-api service, modify it to run on a different port, and start it. The service will be registered in the catalog as image_remote. Depends-On: https://review.opendev.org/c/openstack/glance/+/769976 Change-Id: I0e2bb5412701d515153c023873addb9d7abdb8a4 --- lib/glance | 71 ++++++++++++++++++++++++++++++++++++++++++++++++++++- lib/tempest | 5 ++++ 2 files changed, 75 insertions(+), 1 deletion(-) diff --git a/lib/glance b/lib/glance index c2a8b7492e..cf66023f5c 100644 --- a/lib/glance +++ b/lib/glance @@ -131,7 +131,7 @@ function is_glance_enabled { # runs that a clean run would need to clean up function cleanup_glance { # delete image files (glance) - sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR + sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR $(glance_remote_conf '') # Cleanup multiple stores directories if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "True" ]]; then @@ -365,6 +365,11 @@ function configure_glance { if [[ "$GLANCE_STANDALONE" == False ]]; then write_local_uwsgi_http_config "$GLANCE_UWSGI_CONF" "$GLANCE_UWSGI" "/image" + # Grab our uwsgi listen address and use that to fill out our + # worker_self_reference_url config + iniset $GLANCE_API_CONF DEFAULT worker_self_reference_url \ + $(awk '-F= ' '/^http-socket/ { print "http://"$2}' \ + $GLANCE_UWSGI_CONF) else write_local_proxy_http_config glance "http://$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT_INT" "/image" iniset $GLANCE_API_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS @@ -460,6 +465,64 @@ function install_glance { setup_develop $GLANCE_DIR } +# glance_remote_conf() - Return the path to an alternate config file for +# the remote glance clone +function glance_remote_conf { + echo "$(dirname ${GLANCE_CONF_DIR})/glance-remote/"$(basename "$1") +} + +# start_glance_remote_clone() - Clone the regular glance api worker +function start_glance_remote_clone { + local glance_remote_conf glance_remote_port + + glance_remote_conf_dir=$(glance_remote_conf '') + glance_remote_port=$(get_random_port) + + # Clone the existing ready-to-go glance-api setup + sudo rm -Rf $glance_remote_conf_dir + sudo cp -r "$GLANCE_CONF_DIR" $glance_remote_conf_dir + sudo chown $STACK_USER -R $glance_remote_conf_dir + + # Point this worker at different data dirs + remote_data="${DATA_DIR}/glance-remote" + mkdir -p $remote_data/os_glance_tasks_store \ + $remote_data/os_glance_staging_store + iniset $(glance_remote_conf 'glance-api.conf') os_glance_staging_store \ + filesystem_store_datadir ${remote_data}/os_glance_staging_store + iniset $(glance_remote_conf 'glance-api.conf') os_glance_tasks_store \ + filesystem_store_datadir ${remote_data}/os_glance_tasks_store + + # Change our uwsgi to our new port + sed -ri "s/^(http-socket.*):[0-9]+/\1:$glance_remote_port/" \ + $(glance_remote_conf $GLANCE_UWSGI_CONF) + + # Update the self-reference url with our new port + iniset $(glance_remote_conf $GLANCE_API_CONF) DEFAULT \ + worker_self_reference_url \ + $(awk '-F= ' '/^http-socket/ { print "http://"$2 }' \ + $(glance_remote_conf $GLANCE_UWSGI_CONF)) + + # We need to create the systemd service for the clone, but then + # change it to include an Environment line to point the WSGI app + # at the alternate config directory. + write_uwsgi_user_unit_file devstack@g-api-r.service "$(which uwsgi) \ + --procname-prefix \ + glance-api-remote \ + --ini $(glance_remote_conf $GLANCE_UWSGI_CONF)" \ + "" "$STACK_USER" + iniset -sudo ${SYSTEMD_DIR}/devstack@g-api-r.service \ + "Service" "Environment" "OS_GLANCE_CONFIG_DIR=$glance_remote_conf_dir" + + # Reload and restart with the new config + $SYSTEMCTL daemon-reload + $SYSTEMCTL restart devstack@g-api-r + + get_or_create_service glance_remote image_remote "Alternate glance" + get_or_create_endpoint image_remote $REGION_NAME \ + $(awk '-F= ' '/^http-socket/ { print "http://"$2 }' \ + $(glance_remote_conf $GLANCE_UWSGI_CONF)) +} + # start_glance() - Start running processes function start_glance { local service_protocol=$GLANCE_SERVICE_PROTOCOL @@ -475,6 +538,11 @@ function start_glance { run_process g-api "$GLANCE_BIN_DIR/glance-api --config-dir=$GLANCE_CONF_DIR" fi + if is_service_enabled g-api-r; then + echo "Starting the g-api-r clone service..." + start_glance_remote_clone + fi + echo "Waiting for g-api ($GLANCE_SERVICE_HOST) to start..." if ! wait_for_service $SERVICE_TIMEOUT $GLANCE_URL; then die $LINENO "g-api did not start" @@ -484,6 +552,7 @@ function start_glance { # stop_glance() - Stop running processes function stop_glance { stop_process g-api + stop_process g-api-r } # Restore xtrace diff --git a/lib/tempest b/lib/tempest index 8eab4f5ef2..7e7f0ab7a9 100644 --- a/lib/tempest +++ b/lib/tempest @@ -352,6 +352,11 @@ function configure_tempest { fi iniset $TEMPEST_CONFIG image-feature-enabled import_image $GLANCE_USE_IMPORT_WORKFLOW iniset $TEMPEST_CONFIG image-feature-enabled os_glance_reserved True + if is_service_enabled g-api-r; then + iniset $TEMPEST_CONFIG image alternate_image_endpoint \ + "image_remote" + fi + # Compute iniset $TEMPEST_CONFIG compute image_ref $image_uuid iniset $TEMPEST_CONFIG compute image_ref_alt $image_uuid_alt From 61b4fbf143b96365fa85456246bcadcaab3d76be Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Tue, 9 Mar 2021 08:05:37 -0800 Subject: [PATCH 1394/1936] Address feedback from glance-remote patch This cleans up some of the quote and variable handling that was pointed out in review of the previous patch. This is non-critical, so I'm putting it in a subsequent patch to avoid disturbing the careful alignment of patches across three projects that are mostly approved. Change-Id: I9b281efd74ba5cd78f97b84e5704b41fd040e481 --- lib/glance | 40 ++++++++++++++++++++++------------------ lib/tempest | 3 +-- 2 files changed, 23 insertions(+), 20 deletions(-) diff --git a/lib/glance b/lib/glance index cf66023f5c..3fb61b0268 100644 --- a/lib/glance +++ b/lib/glance @@ -130,8 +130,9 @@ function is_glance_enabled { # cleanup_glance() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_glance { - # delete image files (glance) - sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR $(glance_remote_conf '') + # delete image files (glance) and all of the glance-remote temporary + # storage + sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR "${DATA_DIR}/glance-remote" # Cleanup multiple stores directories if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "True" ]]; then @@ -468,39 +469,41 @@ function install_glance { # glance_remote_conf() - Return the path to an alternate config file for # the remote glance clone function glance_remote_conf { - echo "$(dirname ${GLANCE_CONF_DIR})/glance-remote/"$(basename "$1") + echo $(dirname "${GLANCE_CONF_DIR}")/glance-remote/$(basename "$1") } # start_glance_remote_clone() - Clone the regular glance api worker function start_glance_remote_clone { - local glance_remote_conf glance_remote_port + local glance_remote_conf_dir glance_remote_port remote_data + local glance_remote_uwsgi - glance_remote_conf_dir=$(glance_remote_conf '') + glance_remote_conf_dir="$(glance_remote_conf "")" glance_remote_port=$(get_random_port) + glance_remote_uwsgi="$(glance_remote_conf $GLANCE_UWSGI_CONF)" # Clone the existing ready-to-go glance-api setup - sudo rm -Rf $glance_remote_conf_dir - sudo cp -r "$GLANCE_CONF_DIR" $glance_remote_conf_dir - sudo chown $STACK_USER -R $glance_remote_conf_dir + sudo rm -Rf "$glance_remote_conf_dir" + sudo cp -r "$GLANCE_CONF_DIR" "$glance_remote_conf_dir" + sudo chown $STACK_USER -R "$glance_remote_conf_dir" # Point this worker at different data dirs remote_data="${DATA_DIR}/glance-remote" mkdir -p $remote_data/os_glance_tasks_store \ - $remote_data/os_glance_staging_store - iniset $(glance_remote_conf 'glance-api.conf') os_glance_staging_store \ - filesystem_store_datadir ${remote_data}/os_glance_staging_store - iniset $(glance_remote_conf 'glance-api.conf') os_glance_tasks_store \ - filesystem_store_datadir ${remote_data}/os_glance_tasks_store + "${remote_data}/os_glance_staging_store" + iniset $(glance_remote_conf "$GLANCE_API_CONF") os_glance_staging_store \ + filesystem_store_datadir "${remote_data}/os_glance_staging_store" + iniset $(glance_remote_conf "$GLANCE_API_CONF") os_glance_tasks_store \ + filesystem_store_datadir "${remote_data}/os_glance_tasks_store" # Change our uwsgi to our new port sed -ri "s/^(http-socket.*):[0-9]+/\1:$glance_remote_port/" \ - $(glance_remote_conf $GLANCE_UWSGI_CONF) + "$glance_remote_uwsgi" # Update the self-reference url with our new port iniset $(glance_remote_conf $GLANCE_API_CONF) DEFAULT \ worker_self_reference_url \ $(awk '-F= ' '/^http-socket/ { print "http://"$2 }' \ - $(glance_remote_conf $GLANCE_UWSGI_CONF)) + "$glance_remote_uwsgi") # We need to create the systemd service for the clone, but then # change it to include an Environment line to point the WSGI app @@ -508,10 +511,11 @@ function start_glance_remote_clone { write_uwsgi_user_unit_file devstack@g-api-r.service "$(which uwsgi) \ --procname-prefix \ glance-api-remote \ - --ini $(glance_remote_conf $GLANCE_UWSGI_CONF)" \ + --ini $glance_remote_uwsgi" \ "" "$STACK_USER" iniset -sudo ${SYSTEMD_DIR}/devstack@g-api-r.service \ - "Service" "Environment" "OS_GLANCE_CONFIG_DIR=$glance_remote_conf_dir" + "Service" "Environment" \ + "OS_GLANCE_CONFIG_DIR=$glance_remote_conf_dir" # Reload and restart with the new config $SYSTEMCTL daemon-reload @@ -520,7 +524,7 @@ function start_glance_remote_clone { get_or_create_service glance_remote image_remote "Alternate glance" get_or_create_endpoint image_remote $REGION_NAME \ $(awk '-F= ' '/^http-socket/ { print "http://"$2 }' \ - $(glance_remote_conf $GLANCE_UWSGI_CONF)) + $glance_remote_uwsgi) } # start_glance() - Start running processes diff --git a/lib/tempest b/lib/tempest index 7e7f0ab7a9..bbd23bb63e 100644 --- a/lib/tempest +++ b/lib/tempest @@ -353,8 +353,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG image-feature-enabled import_image $GLANCE_USE_IMPORT_WORKFLOW iniset $TEMPEST_CONFIG image-feature-enabled os_glance_reserved True if is_service_enabled g-api-r; then - iniset $TEMPEST_CONFIG image alternate_image_endpoint \ - "image_remote" + iniset $TEMPEST_CONFIG image alternate_image_endpoint image_remote fi # Compute From af79a934ef057ea6ef7690894d58d21f7818979e Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Mon, 15 Mar 2021 12:20:42 -0400 Subject: [PATCH 1395/1936] Use 'ip addr replace' in OVN code Instead of doing a flush/add, use replace like the ML2/OVS code does. Should have the same behavior of not failing if the address is already present. Change-Id: If9d8a848b079ccb8c0c9b8e6fb708107aa0d46c7 --- lib/neutron_plugins/ovn_agent | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index b661f593a4..abc9c63614 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -271,8 +271,7 @@ function create_public_bridge { sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=$PHYSICAL_NETWORK:$ext_gw_ifc if [ -n "$FLOATING_RANGE" ]; then local cidr_len=${FLOATING_RANGE#*/} - sudo ip addr flush dev $ext_gw_ifc - sudo ip addr add $PUBLIC_NETWORK_GATEWAY/$cidr_len dev $ext_gw_ifc + sudo ip addr replace $PUBLIC_NETWORK_GATEWAY/$cidr_len dev $ext_gw_ifc fi # Ensure IPv6 RAs are accepted on the interface with the default route. @@ -286,8 +285,7 @@ function create_public_bridge { sudo sysctl -w net.ipv6.conf.all.forwarding=1 if [ -n "$IPV6_PUBLIC_RANGE" ]; then local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/} - sudo ip -6 addr flush dev $ext_gw_ifc - sudo ip -6 addr add $IPV6_PUBLIC_NETWORK_GATEWAY/$ipv6_cidr_len dev $ext_gw_ifc + sudo ip -6 addr replace $IPV6_PUBLIC_NETWORK_GATEWAY/$ipv6_cidr_len dev $ext_gw_ifc fi sudo ip link set $ext_gw_ifc up From 1ed276c17791dba1f0b7ef4446d0efe09135553b Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Thu, 11 Mar 2021 13:10:28 +0100 Subject: [PATCH 1396/1936] Use (or set properly) system-id generated by openvswitch In case when OVN_UUID isn't set by user, and it isn't stored in /etc/openvswith/system-id.conf file, Devstack will reuse it. If it's not, it will generate and store it in the /etc/openvswitch/system-id.conf file so it can be set to same value after openvswitch will be e.g. restarted. In case when OVN_UUID is set by user, it will be also saved in /etc/openvswitch/system-id.conf file to make it persistent when e.g openvswitch will be restarted. Closes-Bug: #1918656 Change-Id: I8e3b05f3ab83e204bc1ce895baec0e1ba515895b --- lib/neutron_plugins/ovn_agent | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index b661f593a4..c6ac16d007 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -66,7 +66,9 @@ OVN_L3_SCHEDULER=${OVN_L3_SCHEDULER:-leastloaded} # A UUID to uniquely identify this system. If one is not specified, a random # one will be generated. A randomly generated UUID will be saved in a file -# 'ovn-uuid' so that the same one will be re-used if you re-run DevStack. +# $OVS_SYSCONFDIR/system-id.conf (typically /etc/openvswitch/system-id.conf) +# so that the same one will be re-used if you re-run DevStack or restart +# Open vSwitch service. OVN_UUID=${OVN_UUID:-} # Whether or not to build the openvswitch kernel module from ovs. This is required @@ -109,6 +111,7 @@ OVS_RUNDIR=$OVS_PREFIX/var/run/openvswitch OVS_SHAREDIR=$OVS_PREFIX/share/openvswitch OVS_SCRIPTDIR=$OVS_SHAREDIR/scripts OVS_DATADIR=$DATA_DIR/ovs +OVS_SYSCONFDIR=${OVS_SYSCONFDIR:-/etc/openvswitch} OVN_DATADIR=$DATA_DIR/ovn OVN_SHAREDIR=$OVS_PREFIX/share/ovn @@ -521,11 +524,17 @@ function configure_ovn { echo "Configuring OVN" if [ -z "$OVN_UUID" ] ; then - if [ -f ./ovn-uuid ] ; then - OVN_UUID=$(cat ovn-uuid) + if [ -f $OVS_SYSCONFDIR/system-id.conf ]; then + OVN_UUID=$(cat $OVS_SYSCONFDIR/system-id.conf) else OVN_UUID=$(uuidgen) - echo $OVN_UUID > ovn-uuid + echo $OVN_UUID | sudo tee $OVS_SYSCONFDIR/system-id.conf + fi + else + local ovs_uuid + ovs_uuid=$(cat $OVS_SYSCONFDIR/system-id.conf) + if [ "$ovs_uuid" != $OVN_UUID ]; then + echo $OVN_UUID | sudo tee $OVS_SYSCONFDIR/system-id.conf fi fi From 30819e66ddad5b57b726684e62b511a938aaea98 Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Mon, 22 Mar 2021 07:14:50 +0000 Subject: [PATCH 1397/1936] Set default OVS_SYSCONFDIR value depending on OVS_PREFIX When OVN is built from source, the value of OVS_PREFIX is set to "/usr/local". All other paths referring to OVS should be prefixed with this value. Closes-Bug: #1920634 Related-Bug: #1918656 Change-Id: I9a45a5379d1c47cdf67b9c6d3d0409a88501e61e --- lib/neutron_plugins/ovn_agent | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 97c20fcda1..2f6d1ab10d 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -111,7 +111,7 @@ OVS_RUNDIR=$OVS_PREFIX/var/run/openvswitch OVS_SHAREDIR=$OVS_PREFIX/share/openvswitch OVS_SCRIPTDIR=$OVS_SHAREDIR/scripts OVS_DATADIR=$DATA_DIR/ovs -OVS_SYSCONFDIR=${OVS_SYSCONFDIR:-/etc/openvswitch} +OVS_SYSCONFDIR=${OVS_SYSCONFDIR:-$OVS_PREFIX/etc/openvswitch} OVN_DATADIR=$DATA_DIR/ovn OVN_SHAREDIR=$OVS_PREFIX/share/ovn From 84b328c814fd5be8af53738128aa3f5ef75ca3c7 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Fri, 26 Mar 2021 07:17:42 -0700 Subject: [PATCH 1398/1936] Default to parallel execution Several jobs have been running in parallel since the late Wallaby cycle, and other developers have had it enabled locally. I have heard no async-related stability or debug-ability complaints thus far. I think that we should convert the default to parallel early in the Xena cycle in an attempt to spread the speed improvements across the board, while also collecting data on a wider set of configurations. Change-Id: I83d56c9363d481bb6d5921f5e1f9b024f136044b --- inc/async | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inc/async b/inc/async index d29168f2f5..c63bc2045a 100644 --- a/inc/async +++ b/inc/async @@ -11,7 +11,7 @@ # async_wait my_shell_func # -DEVSTACK_PARALLEL=$(trueorfalse False DEVSTACK_PARALLEL) +DEVSTACK_PARALLEL=$(trueorfalse True DEVSTACK_PARALLEL) _ASYNC_BG_TIME=0 # Keep track of how much total time was spent in background tasks From d207ba9015f3210812468bfbf7d06b1491392554 Mon Sep 17 00:00:00 2001 From: Toshiaki Takahashi Date: Wed, 23 Dec 2020 17:40:57 +0000 Subject: [PATCH 1399/1936] Move gawk into general for post-config Devstack script for setting post-config needs gawk. So this patch moves gawk from files/*/nova into files/*/general. Closes-Bug: #1909041 Change-Id: I06a1a5524f146a8d7337963e846b5a6b7561be13 --- files/debs/general | 1 + files/debs/nova | 1 - files/rpms-suse/general | 1 + files/rpms-suse/nova | 1 - files/rpms/general | 1 + files/rpms/nova | 1 - 6 files changed, 3 insertions(+), 3 deletions(-) diff --git a/files/debs/general b/files/debs/general index d64417f1b7..7e481b4072 100644 --- a/files/debs/general +++ b/files/debs/general @@ -5,6 +5,7 @@ bsdmainutils curl default-jre-headless # NOPRIME g++ +gawk gcc gettext # used for compiling message catalogs git diff --git a/files/debs/nova b/files/debs/nova index a7aebbf946..e19441453b 100644 --- a/files/debs/nova +++ b/files/debs/nova @@ -3,7 +3,6 @@ curl dnsmasq-base dnsmasq-utils # for dhcp_release ebtables -gawk genisoimage # required for config_drive iptables iputils-arping diff --git a/files/rpms-suse/general b/files/rpms-suse/general index 0de0876dcd..f63611025c 100644 --- a/files/rpms-suse/general +++ b/files/rpms-suse/general @@ -3,6 +3,7 @@ apache2-devel bc ca-certificates-mozilla curl +gawk gcc gcc-c++ git-core diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova index 9923760750..1cc2f62ea5 100644 --- a/files/rpms-suse/nova +++ b/files/rpms-suse/nova @@ -4,7 +4,6 @@ curl dnsmasq dnsmasq-utils # dist:opensuse-12.3,opensuse-13.1 ebtables -gawk iptables iputils kpartx diff --git a/files/rpms/general b/files/rpms/general index cfcd7ff261..33da0a5385 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -1,6 +1,7 @@ bc curl dbus +gawk gcc gcc-c++ gettext # used for compiling message catalogs diff --git a/files/rpms/nova b/files/rpms/nova index 2218330230..8ea8ccc5ca 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -3,7 +3,6 @@ curl dnsmasq # for q-dhcp dnsmasq-utils # for dhcp_release ebtables -gawk genisoimage # required for config_drive iptables iputils From 01a84d2d03cd871fb8734d5fdc9d149b9487e3e4 Mon Sep 17 00:00:00 2001 From: Hironori Shiina Date: Mon, 11 Jan 2021 13:42:46 -0500 Subject: [PATCH 1400/1936] Configure Cinder backup driver This patch adds a new environment variable, CINDER_BACKUP_DRIVER for configuring cinder backup driver used when c-bak service is enabled. This gets cinder backup driver configurable with a similar pattern to cinder backends. Although the current configurable backup drivers don't need cleanup functions, the interface for cleanup is prepared for the future. The following backup drivers can be configured: swift: This is the default backup driver. ceph: This already can be configured if ceph backend driver is enabled. For backward compatibility, ceph backup driver is used if ceph backend driver is enabled and no backup driver is specified. s3_swift: The s3 backup driver gets configurable with this patch. By specifying 's3_swift', the driver is configured for swift s3api. In the future, lib/cinder_backups/s3 should be created separatedly for external S3 compatible storage. This file will just set given parameters such as a URL and credentials. Change-Id: I356c224d938e1aa59c8589387a03682b3ec6e23d --- lib/cinder | 45 ++++++++++++++++++++++++----- lib/cinder_backends/ceph | 32 --------------------- lib/cinder_backups/ceph | 57 +++++++++++++++++++++++++++++++++++++ lib/cinder_backups/s3_swift | 45 +++++++++++++++++++++++++++++ lib/cinder_backups/swift | 38 +++++++++++++++++++++++++ 5 files changed, 178 insertions(+), 39 deletions(-) create mode 100644 lib/cinder_backups/ceph create mode 100644 lib/cinder_backups/s3_swift create mode 100644 lib/cinder_backups/swift diff --git a/lib/cinder b/lib/cinder index 6c97e114a6..14ab291f8a 100644 --- a/lib/cinder +++ b/lib/cinder @@ -31,6 +31,7 @@ set +o xtrace CINDER_DRIVER=${CINDER_DRIVER:-default} CINDER_PLUGINS=$TOP_DIR/lib/cinder_plugins CINDER_BACKENDS=$TOP_DIR/lib/cinder_backends +CINDER_BACKUPS=$TOP_DIR/lib/cinder_backups # grab plugin config if specified via cinder_driver if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then @@ -98,6 +99,16 @@ else CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-tgtadm} fi +# For backward compatibility +# Before CINDER_BACKUP_DRIVER was introduced, ceph backup driver was configured +# along with ceph backend driver. +if [[ -z "${CINDER_BACKUP_DRIVER}" && "$CINDER_ENABLED_BACKENDS" =~ "ceph" ]]; then + CINDER_BACKUP_DRIVER=ceph +fi + +# Supported backup drivers are in lib/cinder_backups +CINDER_BACKUP_DRIVER=${CINDER_BACKUP_DRIVER:-swift} + # Toggle for deploying Cinder under a wsgi server. Legacy mod_wsgi # reference should be cleaned up to more accurately refer to uwsgi. CINDER_USE_MOD_WSGI=${CINDER_USE_MOD_WSGI:-True} @@ -113,6 +124,15 @@ if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then done fi +# Source the backup driver +if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then + if [[ -r $CINDER_BACKUPS/$CINDER_BACKUP_DRIVER ]]; then + source $CINDER_BACKUPS/$CINDER_BACKUP_DRIVER + else + die "cinder backup driver $CINDER_BACKUP_DRIVER is not supported" + fi +fi + # Environment variables to configure the image-volume cache CINDER_IMG_CACHE_ENABLED=${CINDER_IMG_CACHE_ENABLED:-True} @@ -189,6 +209,12 @@ function cleanup_cinder { done fi + if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then + if type cleanup_cinder_backup_$CINDER_BACKUP_DRIVER >/dev/null 2>&1; then + cleanup_cinder_backup_$CINDER_BACKUP_DRIVER + fi + fi + stop_process "c-api" remove_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" } @@ -266,13 +292,12 @@ function configure_cinder { configure_cinder_image_volume_cache fi - if is_service_enabled c-bak; then - # NOTE(mriedem): The default backup driver uses swift and if we're - # on a subnode we might not know if swift is enabled, but chances are - # good that it is on the controller so configure the backup service - # to use it. If we want to configure the backup service to use - # a non-swift driver, we'll likely need environment variables. - iniset $CINDER_CONF DEFAULT backup_swift_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_" + if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then + if type configure_cinder_backup_$CINDER_BACKUP_DRIVER >/dev/null 2>&1; then + configure_cinder_backup_$CINDER_BACKUP_DRIVER + else + die "configure_cinder_backup_$CINDER_BACKUP_DRIVER doesn't exist in $CINDER_BACKUPS/$CINDER_BACKUP_DRIVER" + fi fi if is_service_enabled ceilometer; then @@ -410,6 +435,12 @@ function init_cinder { done fi + if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then + if type init_cinder_backup_$CINDER_BACKUP_DRIVER >/dev/null 2>&1; then + init_cinder_backup_$CINDER_BACKUP_DRIVER + fi + fi + mkdir -p $CINDER_STATE_PATH/volumes } diff --git a/lib/cinder_backends/ceph b/lib/cinder_backends/ceph index 33c9706d3d..0b465730c0 100644 --- a/lib/cinder_backends/ceph +++ b/lib/cinder_backends/ceph @@ -6,12 +6,6 @@ # Enable with: # # CINDER_ENABLED_BACKENDS+=,ceph:ceph -# -# Optional parameters: -# CINDER_BAK_CEPH_POOL= -# CINDER_BAK_CEPH_USER= -# CINDER_BAK_CEPH_POOL_PG= -# CINDER_BAK_CEPH_POOL_PGP= # Dependencies: # @@ -29,11 +23,6 @@ set +o xtrace # Defaults # -------- -CINDER_BAK_CEPH_POOL=${CINDER_BAK_CEPH_POOL:-backups} -CINDER_BAK_CEPH_POOL_PG=${CINDER_BAK_CEPH_POOL_PG:-8} -CINDER_BAK_CEPH_POOL_PGP=${CINDER_BAK_CEPH_POOL_PGP:-8} -CINDER_BAK_CEPH_USER=${CINDER_BAK_CEPH_USER:-cinder-bak} - # Entry Points # ------------ @@ -52,27 +41,6 @@ function configure_cinder_backend_ceph { iniset $CINDER_CONF $be_name rbd_flatten_volume_from_snapshot False iniset $CINDER_CONF $be_name rbd_max_clone_depth 5 iniset $CINDER_CONF DEFAULT glance_api_version 2 - - if is_service_enabled c-bak; then - sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP} - if [ "$REMOTE_CEPH" = "False" ]; then - # Configure Cinder backup service options, ceph pool, ceph user and ceph key - sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} size ${CEPH_REPLICAS} - if [[ $CEPH_REPLICAS -ne 1 ]]; then - sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID} - fi - fi - sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring - sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring - - iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph.CephBackupDriver" - iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF_FILE" - iniset $CINDER_CONF DEFAULT backup_ceph_pool "$CINDER_BAK_CEPH_POOL" - iniset $CINDER_CONF DEFAULT backup_ceph_user "$CINDER_BAK_CEPH_USER" - iniset $CINDER_CONF DEFAULT backup_ceph_stripe_unit 0 - iniset $CINDER_CONF DEFAULT backup_ceph_stripe_count 0 - iniset $CINDER_CONF DEFAULT restore_discard_excess_bytes True - fi } # Restore xtrace diff --git a/lib/cinder_backups/ceph b/lib/cinder_backups/ceph new file mode 100644 index 0000000000..26136bef96 --- /dev/null +++ b/lib/cinder_backups/ceph @@ -0,0 +1,57 @@ +#!/bin/bash +# +# lib/cinder_backups/ceph +# Configure the ceph backup driver + +# Enable with: +# +# CINDER_BACKUP_DRIVER=ceph + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# Save trace setting +_XTRACE_CINDER_CEPH=$(set +o | grep xtrace) +set +o xtrace + +# Defaults +# -------- + +CINDER_BAK_CEPH_POOL=${CINDER_BAK_CEPH_POOL:-backups} +CINDER_BAK_CEPH_POOL_PG=${CINDER_BAK_CEPH_POOL_PG:-8} +CINDER_BAK_CEPH_POOL_PGP=${CINDER_BAK_CEPH_POOL_PGP:-8} +CINDER_BAK_CEPH_USER=${CINDER_BAK_CEPH_USER:-cinder-bak} + + +function configure_cinder_backup_ceph { + sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP} + if [ "$REMOTE_CEPH" = "False" ]; then + # Configure Cinder backup service options, ceph pool, ceph user and ceph key + sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} size ${CEPH_REPLICAS} + if [[ $CEPH_REPLICAS -ne 1 ]]; then + sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID} + fi + fi + sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring + sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring + + iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph.CephBackupDriver" + iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF_FILE" + iniset $CINDER_CONF DEFAULT backup_ceph_pool "$CINDER_BAK_CEPH_POOL" + iniset $CINDER_CONF DEFAULT backup_ceph_user "$CINDER_BAK_CEPH_USER" + iniset $CINDER_CONF DEFAULT backup_ceph_stripe_unit 0 + iniset $CINDER_CONF DEFAULT backup_ceph_stripe_count 0 + iniset $CINDER_CONF DEFAULT restore_discard_excess_bytes True +} + +# init_cinder_backup_ceph: nothing to do +# cleanup_cinder_backup_ceph: nothing to do + +# Restore xtrace +$_XTRACE_CINDER_CEPH + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_backups/s3_swift b/lib/cinder_backups/s3_swift new file mode 100644 index 0000000000..6fb248606e --- /dev/null +++ b/lib/cinder_backups/s3_swift @@ -0,0 +1,45 @@ +#!/bin/bash +# +# lib/cinder_backups/s3_swift +# Configure the s3 backup driver with swift s3api +# +# TODO: create lib/cinder_backup/s3 for external s3 compatible storage + +# Enable with: +# +# CINDER_BACKUP_DRIVER=s3_swift +# enable_service s3api s-proxy s-object s-container s-account + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# Save trace setting +_XTRACE_CINDER_S3_SWIFT=$(set +o | grep xtrace) +set +o xtrace + +function configure_cinder_backup_s3_swift { + # This configuration requires swift and s3api. If we're + # on a subnode we might not know if they are enabled + iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.s3.S3BackupDriver" + iniset $CINDER_CONF DEFAULT backup_s3_endpoint_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$S3_SERVICE_PORT" +} + +function init_cinder_backup_s3_swift { + openstack ec2 credential create + iniset $CINDER_CONF DEFAULT backup_s3_store_access_key "$(openstack ec2 credential list -c Access -f value)" + iniset $CINDER_CONF DEFAULT backup_s3_store_secret_key "$(openstack ec2 credential list -c Secret -f value)" + if is_service_enabled tls-proxy; then + iniset $CINDER_CONF DEFAULT backup_s3_ca_cert_file "$SSL_BUNDLE_FILE" + fi +} + +# cleanup_cinder_backup_s3_swift: nothing to do + +# Restore xtrace +$_XTRACE_CINDER_S3_SWIFT + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_backups/swift b/lib/cinder_backups/swift new file mode 100644 index 0000000000..d7c977e1e3 --- /dev/null +++ b/lib/cinder_backups/swift @@ -0,0 +1,38 @@ +#!/bin/bash +# +# lib/cinder_backups/swift +# Configure the swift backup driver + +# Enable with: +# +# CINDER_BACKUP_DRIVER=swift + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# Save trace setting +_XTRACE_CINDER_SWIFT=$(set +o | grep xtrace) +set +o xtrace + + +function configure_cinder_backup_swift { + # NOTE(mriedem): The default backup driver uses swift and if we're + # on a subnode we might not know if swift is enabled, but chances are + # good that it is on the controller so configure the backup service + # to use it. + iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.swift.SwiftBackupDriver" + iniset $CINDER_CONF DEFAULT backup_swift_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_" +} + +# init_cinder_backup_swift: nothing to do +# cleanup_cinder_backup_swift: nothing to do + + +# Restore xtrace +$_XTRACE_CINDER_SWIFT + +# Local variables: +# mode: shell-script +# End: From 110b9a9b1b05d9163a674e5bcc05fcd8d48cb5bf Mon Sep 17 00:00:00 2001 From: Nobuhiro MIKI Date: Thu, 1 Apr 2021 11:00:25 +0900 Subject: [PATCH 1401/1936] Fix typo in multinode-lab document Signed-off-by: Nobuhiro MIKI Change-Id: I1b6100d6b8231f1f96a7768e26ab83f010f1e4dc --- doc/source/guides/multinode-lab.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index c0b3f58157..dc3568a845 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -395,7 +395,7 @@ SSH keys need to be exchanged between each compute node: 3. Verify that login via ssh works without a password:: - ssh -i /root/.ssh/id_rsa.pub stack@DESTINATION + ssh -i /root/.ssh/id_rsa stack@DESTINATION In essence, this means that every compute node's root user's public RSA key must exist in every other compute node's stack user's authorized_keys file and From 362641b1b8b0596371f13db8448ab0f43bd53482 Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Fri, 19 Mar 2021 10:33:24 +0000 Subject: [PATCH 1402/1936] cinder: Increase VOLUME_BACKING_FILE_SIZE As reported in bug #1920136 the tempest-integrated-compute job has started to see insufficient free virtual space errors being reported by c-sch and c-vol when creating volumes. This change simply increases the default size of the underlying LVM PV used to host these volumes within the default LVM/iSCSI c-vol backend deployed by devstack. Change-Id: I965d4a485215ac482403f1e83609452550dfd860 Closes-Bug: #1920136 --- stackrc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackrc b/stackrc index 648a028beb..81e0f12866 100644 --- a/stackrc +++ b/stackrc @@ -758,8 +758,8 @@ for image_url in ${IMAGE_URLS//,/ }; do fi done -# 24Gb default volume backing file size -VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-24G} +# 30Gb default volume backing file size +VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-30G} # Prefixes for volume and instance names VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} From 25d37efb9154f2f08e094f4dda3366a7bcd0af31 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Tue, 6 Apr 2021 10:35:19 -0500 Subject: [PATCH 1403/1936] Update DEVSTACK_SERIES to xena stable/wallaby branch has been created now and current master is for xena. Change-Id: I42f67361fe50795d929752434342effddf123486 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 648a028beb..9630221f4e 100644 --- a/stackrc +++ b/stackrc @@ -245,7 +245,7 @@ REQUIREMENTS_DIR=${REQUIREMENTS_DIR:-$DEST/requirements} # Setting the variable to 'ALL' will activate the download for all # libraries. -DEVSTACK_SERIES="wallaby" +DEVSTACK_SERIES="xena" ############## # From 6f2c807bfade2a218636e0ca441de45c5662aca0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rados=C5=82aw=20Piliszek?= Date: Tue, 6 Apr 2021 14:15:34 +0000 Subject: [PATCH 1404/1936] gzip, not xz xz may cause POST_FAILUREs due to memory pressure [1]. [1] http://lists.openstack.org/pipermail/openstack-discuss/2021-April/021609.html Change-Id: I2ea3175ecf2508b62640bfffdd798d7072e55550 --- roles/export-devstack-journal/tasks/main.yaml | 2 +- .../templates/devstack.journal.README.txt.j2 | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/export-devstack-journal/tasks/main.yaml b/roles/export-devstack-journal/tasks/main.yaml index ef839edaaf..db38b10a44 100644 --- a/roles/export-devstack-journal/tasks/main.yaml +++ b/roles/export-devstack-journal/tasks/main.yaml @@ -45,7 +45,7 @@ cmd: | journalctl -o export \ --since="$(cat {{ devstack_base_dir }}/log-start-timestamp.txt)" \ - | xz --threads=0 - > {{ stage_dir }}/logs/devstack.journal.xz + | gzip > {{ stage_dir }}/logs/devstack.journal.gz - name: Save journal README become: true diff --git a/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 b/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 index fe36653102..30519f63d7 100644 --- a/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 +++ b/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 @@ -7,7 +7,7 @@ devstack run. To use it, you will need to convert it so journalctl can read it locally. After downloading the file: - $ /lib/systemd/systemd-journal-remote <(xzcat ./devstack.journal.xz) -o output.journal + $ /lib/systemd/systemd-journal-remote <(zcat ./devstack.journal.gz) -o output.journal Note this binary is not in the regular path. On Debian/Ubuntu platforms, you will need to have the "systemd-journal-remote" package From 448db9ec41930d13a785c553e09a34417507f594 Mon Sep 17 00:00:00 2001 From: Francesco Pantano Date: Fri, 19 Feb 2021 13:25:10 +0100 Subject: [PATCH 1405/1936] Rely on ceph.conf settings when cinder backup pool is created Ceph adds the osd pool default size option on ceph.conf via [1]; this means we don't need to specify the size of this pool if the same value (same variable) is used (CEPH_REPLICAS). This change is an attempt of removing the size setting, relying on the implicit declaration of the value provided by ceph.conf. [1] https://github.com/openstack/devstack-plugin-ceph/blob/master/devstack/lib/ceph#L425 Change-Id: I5fa2105ceb3b97a4e38926d76c1e4028f1108d4a --- lib/cinder_backups/ceph | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/lib/cinder_backups/ceph b/lib/cinder_backups/ceph index 26136bef96..e4003c0720 100644 --- a/lib/cinder_backups/ceph +++ b/lib/cinder_backups/ceph @@ -27,12 +27,8 @@ CINDER_BAK_CEPH_USER=${CINDER_BAK_CEPH_USER:-cinder-bak} function configure_cinder_backup_ceph { sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP} - if [ "$REMOTE_CEPH" = "False" ]; then - # Configure Cinder backup service options, ceph pool, ceph user and ceph key - sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} size ${CEPH_REPLICAS} - if [[ $CEPH_REPLICAS -ne 1 ]]; then - sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID} - fi + if [[ "$REMOTE_CEPH" = "False" && "$CEPH_REPLICAS" -ne 1 ]]; then + sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID} fi sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring From 580fec54c3a970de80ab66b3decca69704ff1179 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Thu, 8 Apr 2021 11:03:37 -0500 Subject: [PATCH 1406/1936] Make stackviz tasks not to fail jobs Due to issue on stckviz side, job start failing with POST_FAILURE. If we fix the issue still we need to wait for periodic job periodic-package-stackviz-element to publish the latest tarball on https://tarballs.openstack.org/stackviz/dist/. Let's not fail the job for any issue occur during stackviz processing. Closes-Bug: 1863161 Change-Id: Ifee04f28ecee52e74803f1623aba5cfe5ee5ec90 --- roles/process-stackviz/tasks/main.yaml | 125 +++++++++++++------------ 1 file changed, 64 insertions(+), 61 deletions(-) diff --git a/roles/process-stackviz/tasks/main.yaml b/roles/process-stackviz/tasks/main.yaml index c51c66cdb3..3ba3d9c2e6 100644 --- a/roles/process-stackviz/tasks/main.yaml +++ b/roles/process-stackviz/tasks/main.yaml @@ -1,70 +1,73 @@ -- name: Devstack checks if stackviz archive exists - stat: - path: "/opt/cache/files/stackviz-latest.tar.gz" - register: stackviz_archive +- name: Process Stackviz + block: -- debug: - msg: "Stackviz archive could not be found in /opt/cache/files/stackviz-latest.tar.gz" - when: not stackviz_archive.stat.exists + - name: Devstack checks if stackviz archive exists + stat: + path: "/opt/cache/files/stackviz-latest.tar.gz" + register: stackviz_archive -- name: Check if subunit data exists - stat: - path: "{{ zuul_work_dir }}/testrepository.subunit" - register: subunit_input + - debug: + msg: "Stackviz archive could not be found in /opt/cache/files/stackviz-latest.tar.gz" + when: not stackviz_archive.stat.exists -- debug: - msg: "Subunit file could not be found at {{ zuul_work_dir }}/testrepository.subunit" - when: not subunit_input.stat.exists + - name: Check if subunit data exists + stat: + path: "{{ zuul_work_dir }}/testrepository.subunit" + register: subunit_input -- name: Install stackviz - when: - - stackviz_archive.stat.exists - - subunit_input.stat.exists - block: - - include_role: - name: ensure-pip + - debug: + msg: "Subunit file could not be found at {{ zuul_work_dir }}/testrepository.subunit" + when: not subunit_input.stat.exists + + - name: Install stackviz + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists + block: + - include_role: + name: ensure-pip + + - pip: + name: "file://{{ stackviz_archive.stat.path }}" + virtualenv: /tmp/stackviz + virtualenv_command: '{{ ensure_pip_virtualenv_command }}' + extra_args: -U - - pip: - name: "file://{{ stackviz_archive.stat.path }}" - virtualenv: /tmp/stackviz - virtualenv_command: '{{ ensure_pip_virtualenv_command }}' - extra_args: -U + - name: Deploy stackviz static html+js + command: cp -pR /tmp/stackviz/share/stackviz-html {{ stage_dir }}/stackviz + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists -- name: Deploy stackviz static html+js - command: cp -pR /tmp/stackviz/share/stackviz-html {{ stage_dir }}/stackviz - when: - - stackviz_archive.stat.exists - - subunit_input.stat.exists + - name: Check if dstat data exists + stat: + path: "{{ devstack_base_dir }}/logs/dstat-csv.log" + register: dstat_input + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists -- name: Check if dstat data exists - stat: - path: "{{ devstack_base_dir }}/logs/dstat-csv.log" - register: dstat_input - when: - - stackviz_archive.stat.exists - - subunit_input.stat.exists + - name: Run stackviz with dstat + shell: | + cat {{ subunit_input.stat.path }} | \ + /tmp/stackviz/bin/stackviz-export \ + --dstat "{{ devstack_base_dir }}/logs/dstat-csv.log" \ + --env --stdin \ + {{ stage_dir }}/stackviz/data + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists + - dstat_input.stat.exists -- name: Run stackviz with dstat - shell: | - cat {{ subunit_input.stat.path }} | \ - /tmp/stackviz/bin/stackviz-export \ - --dstat "{{ devstack_base_dir }}/logs/dstat-csv.log" \ - --env --stdin \ - {{ stage_dir }}/stackviz/data - when: - - stackviz_archive.stat.exists - - subunit_input.stat.exists - - dstat_input.stat.exists - failed_when: False + - name: Run stackviz without dstat + shell: | + cat {{ subunit_input.stat.path }} | \ + /tmp/stackviz/bin/stackviz-export \ + --env --stdin \ + {{ stage_dir }}/stackviz/data + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists + - not dstat_input.stat.exists -- name: Run stackviz without dstat - shell: | - cat {{ subunit_input.stat.path }} | \ - /tmp/stackviz/bin/stackviz-export \ - --env --stdin \ - {{ stage_dir }}/stackviz/data - when: - - stackviz_archive.stat.exists - - subunit_input.stat.exists - - not dstat_input.stat.exists - failed_when: False + ignore_errors: yes From aa5c38727b314b03cd7ab69612435aa206bd5e2c Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Wed, 14 Apr 2021 14:27:32 -0700 Subject: [PATCH 1407/1936] Work around CHILD_MAX bash limitation for async Apparently bash (via POSIX) only guarantees a small (32ish) number of children can be started and their statuses retrieved at any given point. On larger jobs with lots of plugins and additional work, we may go over that limit, especially for long-lived children, such as the install_tempest task. This works around that issue by creating a fifo for each child at spawn time. When the child is complete, it will block on a read against that fifo (and thus not exit). When the parent goes to wait on the child, it first writes to that fifo, unblocking the child so that it can exit near the time we go to wait. Closes-Bug: #1923728 Change-Id: Id755bdb1e7f1664ec08742d034c174e87a3d2902 --- inc/async | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/inc/async b/inc/async index c63bc2045a..11bcdfa39e 100644 --- a/inc/async +++ b/inc/async @@ -57,6 +57,7 @@ function async_log { function async_inner { local name="$1" local rc + local fifo=${DEST}/async/${name}.fifo shift set -o xtrace if $* >${DEST}/async/${name}.log 2>&1; then @@ -69,6 +70,8 @@ function async_inner { async_log "$name" "FAILED with rc $rc" fi iniset ${DEST}/async/${name}.ini job end_time $(date "+%s%3N") + # Block on the fifo until we are signaled to exit by the main process + cat $fifo return $rc } @@ -86,12 +89,14 @@ function async_run { local name="$1" shift local inifile=${DEST}/async/${name}.ini + local fifo=${DEST}/async/${name}.fifo touch $inifile iniset $inifile job command "$*" iniset $inifile job start_time $(date +%s%3N) if [[ "$DEVSTACK_PARALLEL" = "True" ]]; then + mkfifo $fifo async_inner $name $* & iniset $inifile job pid $! async_log "$name" "running: %command" @@ -119,17 +124,23 @@ function async_wait { xtrace=$(set +o | grep xtrace) set +o xtrace - local pid rc running inifile runtime + local pid rc running inifile runtime fifo rc=0 for name in $*; do running=$(ls ${DEST}/async/*.ini 2>/dev/null | wc -l) inifile="${DEST}/async/${name}.ini" + fifo=${DEST}/async/${name}.fifo if pid=$(async_pidof "$name"); then async_log "$name" "Waiting for completion of %command" \ "($running other jobs running)" time_start async_wait if [[ "$pid" != "self" ]]; then + # Signal the child to go ahead and exit since we are about to + # wait for it to collect its status. + echo "Signaling exit" + echo WAKEUP > $fifo + echo "Signaled" # Do not actually call wait if we ran synchronously if wait $pid; then rc=0 @@ -137,6 +148,7 @@ function async_wait { rc=$? fi cat ${DEST}/async/${name}.log + rm -f $fifo fi time_stop async_wait local start_time From 51e384554b4653a05abea435432431cdca4728fb Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Wed, 14 Apr 2021 07:23:10 -0700 Subject: [PATCH 1408/1936] Add some debug to async_wait failures This dumps some data in the case where we fail to wait for a child pid to help debug what is going on. This also cleans up a few review comments from the actual fix. Change-Id: I7b58ce0cf2b41bdffa448973edb4c992fe5f730c Related-Bug: #1923728 --- inc/async | 31 +++++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/inc/async b/inc/async index 11bcdfa39e..56338f5343 100644 --- a/inc/async +++ b/inc/async @@ -48,7 +48,7 @@ function async_log { command=$(iniget $inifile job command | tr '#' '-') message=$(echo "$message" | sed "s#%command#$command#g") - echo "[Async ${name}:${pid}]: $message" + echo "[$BASHPID Async ${name}:${pid}]: $message" } # Inner function that actually runs the requested task. We wrap it like this @@ -57,7 +57,7 @@ function async_log { function async_inner { local name="$1" local rc - local fifo=${DEST}/async/${name}.fifo + local fifo="${DEST}/async/${name}.fifo" shift set -o xtrace if $* >${DEST}/async/${name}.log 2>&1; then @@ -116,6 +116,24 @@ function async_runfunc { async_run $1 $* } +# Dump some information to help debug a failed wait +function async_wait_dump { + local failpid=$1 + + echo "=== Wait failure dump from $BASHPID ===" + echo "Processes:" + ps -f + echo "Waiting jobs:" + for name in $(ls ${DEST}/async/*.ini); do + echo "Job $name :" + cat "$name" + done + echo "Failed PID status:" + sudo cat /proc/$failpid/status + sudo cat /proc/$failpid/cmdline + echo "=== End wait failure dump ===" +} + # Wait for an async future to complete. May return immediately if already # complete, or of the future has already been waited on (avoid this). May # block until the future completes. @@ -129,18 +147,18 @@ function async_wait { for name in $*; do running=$(ls ${DEST}/async/*.ini 2>/dev/null | wc -l) inifile="${DEST}/async/${name}.ini" - fifo=${DEST}/async/${name}.fifo + fifo="${DEST}/async/${name}.fifo" if pid=$(async_pidof "$name"); then async_log "$name" "Waiting for completion of %command" \ - "($running other jobs running)" + "running on PID $pid ($running other jobs running)" time_start async_wait if [[ "$pid" != "self" ]]; then # Signal the child to go ahead and exit since we are about to # wait for it to collect its status. - echo "Signaling exit" + async_log "$name" "Signaling child to exit" echo WAKEUP > $fifo - echo "Signaled" + async_log "$name" "Signaled" # Do not actually call wait if we ran synchronously if wait $pid; then rc=0 @@ -161,6 +179,7 @@ function async_wait { "$rc in $runtime seconds" rm -f $inifile if [ $rc -ne 0 ]; then + async_wait_dump $pid echo Stopping async wait due to error: $* break fi From d04e795b316f5be466532e60104a983bf6419716 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rados=C5=82aw=20Piliszek?= Date: Mon, 19 Apr 2021 06:52:30 +0000 Subject: [PATCH 1409/1936] [TrivialFix] Delete symlink apts-debs Follow up on old I0416180db5b6add996ce5b48c6966c1b68adbcb0 Change-Id: If2f6166cf7c585bf303d0f6c28a2745d85eabbed --- files/apts | 1 - 1 file changed, 1 deletion(-) delete mode 120000 files/apts diff --git a/files/apts b/files/apts deleted file mode 120000 index ef926de053..0000000000 --- a/files/apts +++ /dev/null @@ -1 +0,0 @@ -debs/ \ No newline at end of file From c062792709def9ef10ddac68867e1b7bf9009435 Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Tue, 9 Mar 2021 22:36:57 +0000 Subject: [PATCH 1410/1936] cinder: Default CINDER_ISCSI_HELPER to lioadm As outlined in bug #1917750 the use of tgtadm in multinode environments with multiple c-vol services can cause volumes to use duplicate WWNs. This has been shown to cause some encrypted volume test failures as os-brick returns a /dev/disk/by-id path to n-cpu that can point to the wrong underlying volume when multiple volumes with the same WWN are connected to a host. There is also some speculation that the duplicate WWNs are also causing libvirt to fail to detach volumes from instances but as yet this has not been proven. This change aims to avoid all of the above by switching the default of CINDER_ISCSI_HELPER to lioadm for all deployments instead of just EL and SUSE based deployments. The Bionic platform job however is pinned to tgtadm as there issues installing python3-rtslib-fb. Closes-Bug: #1917750 Change-Id: If5c860d1e69aaef9a9236303c370479a7714ad43 --- .zuul.yaml | 3 +++ lib/cinder | 10 +++++----- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 67d4c24000..00129b5ca4 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -590,6 +590,9 @@ description: Ubuntu Bionic platform test nodeset: openstack-single-node-bionic voting: false + vars: + devstack_localrc: + CINDER_ISCSI_HELPER: tgtadm - job: name: devstack-async diff --git a/lib/cinder b/lib/cinder index f20631b56c..9c8d85cf59 100644 --- a/lib/cinder +++ b/lib/cinder @@ -88,15 +88,15 @@ CINDER_ENABLED_BACKENDS=${CINDER_ENABLED_BACKENDS:-lvm:lvmdriver-1} CINDER_VOLUME_CLEAR=${CINDER_VOLUME_CLEAR:-${CINDER_VOLUME_CLEAR_DEFAULT:-zero}} CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]') -# Centos7 and OpenSUSE switched to using LIO and that's all that's supported, -# although the tgt bits are in EPEL and OpenSUSE we don't want that for CI +# Default to lioadm +CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-lioadm} + +# EL and SUSE should only use lioadm, we continue to allow Ubuntu based +# deployments to use tgtadm for specific jobs in the cinder-tempest-plugin if is_fedora || is_suse; then - CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-lioadm} if [[ ${CINDER_ISCSI_HELPER} != "lioadm" ]]; then die "lioadm is the only valid Cinder target_helper config on this platform" fi -else - CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-tgtadm} fi # For backward compatibility From 0386c1cda61b57da3aedde05e317137c354fb4d9 Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Wed, 28 Apr 2021 09:26:23 +0100 Subject: [PATCH 1411/1936] cinder: Default CINDER_ISCSI_HELPER to tgtadm on Bionic If5c860d1e69aaef9a9236303c370479a7714ad43 attempted to move this default to lioadm while pinning certain Bionic based jobs to tgtadm. Unfortunately it missed the legacy dsvm based jobs within various projects that do not inherit from the devstack-platform-bionic base job and that are also not covered by devstack's gate. This change simply forces CINDER_ISCSI_HELPER to tgtadm on Bionic based hosts to ensure it is always used. Closes-Bug: #1926411 Change-Id: Ib4b38b45f25575c92fb09b8e97fa1b24af0cc06a --- lib/cinder | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/cinder b/lib/cinder index 9c8d85cf59..34d618674e 100644 --- a/lib/cinder +++ b/lib/cinder @@ -91,8 +91,13 @@ CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]') # Default to lioadm CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-lioadm} -# EL and SUSE should only use lioadm, we continue to allow Ubuntu based -# deployments to use tgtadm for specific jobs in the cinder-tempest-plugin +# Bionic needs to default to tgtadm until support is dropped within devstack +# as the rtslib-fb-targetctl service doesn't start after installing lioadm. +if is_ubuntu && [[ "$DISTRO" == "bionic" ]]; then + CINDER_ISCSI_HELPER=tgtadm +fi + +# EL and SUSE should only use lioadm if is_fedora || is_suse; then if [[ ${CINDER_ISCSI_HELPER} != "lioadm" ]]; then die "lioadm is the only valid Cinder target_helper config on this platform" From b02a43291cc5e2d5677ecbb80c0fd608d67a1374 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 27 Nov 2018 12:59:04 +1100 Subject: [PATCH 1412/1936] Handle disappearing pids in mlock_report.py If a pid disappears on us while we're reading, we should just continue on. EnvironmentError is just an alias for OSError since Python 3.3, so use the latter name. [0] [0] https://docs.python.org/3/library/exceptions.html#OSError Change-Id: I3a25cca328e1469f72c84a118a9691c1c0258bc4 Closes-Bug: #1926434 --- tools/mlock_report.py | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/tools/mlock_report.py b/tools/mlock_report.py index b15a0bf80b..1b081bbe6f 100644 --- a/tools/mlock_report.py +++ b/tools/mlock_report.py @@ -24,17 +24,19 @@ def _get_report(): # iterate over the /proc/%pid/status files manually try: s = open("%s/%d/status" % (psutil.PROCFS_PATH, proc.pid), 'r') - except EnvironmentError: + with s: + for line in s: + result = LCK_SUMMARY_REGEX.search(line) + if result: + locked = int(result.group('locked')) + if locked: + mlock_users.append({'name': proc.name(), + 'pid': proc.pid, + 'locked': locked}) + except OSError: + # pids can disappear, we're ok with that continue - with s: - for line in s: - result = LCK_SUMMARY_REGEX.search(line) - if result: - locked = int(result.group('locked')) - if locked: - mlock_users.append({'name': proc.name(), - 'pid': proc.pid, - 'locked': locked}) + # produce a single line log message with per process mlock stats if mlock_users: From 7ad4cd07c8bf4f302acc4fc6684e362309332c9d Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Thu, 29 Apr 2021 09:24:38 -0500 Subject: [PATCH 1413/1936] Drop Bionic support Since victoria cycle, we have moved upstream testing to Ubuntu Focal (20.04) and so does no Bionic distro in Xena cycle testing runtime[1]. Grenade jobs also started running on Focal since victoria was released. Only thing left was legacy jobs which were not migrated to Ubuntu Focal in Victoria and as per another community-wide goal[2], all the lgeacy jobs were suppsoed to be migrated to zuulv3 native jobs in victoria cycle itself. One of the pending job was in nova (nova-grenade-multinode) which is also migrated to zuulv3 native now - https://review.opendev.org/c/openstack/nova/+/778885 If there is any job running on bionic, we strongly recommend to migrate it to Ubuntu Focal. [1] https://governance.openstack.org/tc/reference/runtimes/xena.html [2] https://governance.openstack.org/tc/goals/selected/victoria/native-zuulv3-jobs.html Change-Id: I39e38e4a6c2e52dd3822c9fdea354258359a9f53 --- .zuul.yaml | 11 ----------- lib/apache | 3 --- lib/cinder | 6 ------ stack.sh | 2 +- tools/fixup_stuff.sh | 38 -------------------------------------- 5 files changed, 1 insertion(+), 59 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 00129b5ca4..b65aeec4dd 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -584,16 +584,6 @@ voting: false timeout: 9000 -- job: - name: devstack-platform-bionic - parent: tempest-full-py3 - description: Ubuntu Bionic platform test - nodeset: openstack-single-node-bionic - voting: false - vars: - devstack_localrc: - CINDER_ISCSI_HELPER: tgtadm - - job: name: devstack-async parent: tempest-full-py3 @@ -695,7 +685,6 @@ - devstack-ipv6 - devstack-platform-fedora-latest - devstack-platform-centos-8 - - devstack-platform-bionic - devstack-async - devstack-multinode - devstack-unit-tests diff --git a/lib/apache b/lib/apache index 870a65a9d2..04259ba31f 100644 --- a/lib/apache +++ b/lib/apache @@ -93,9 +93,6 @@ function install_apache_uwsgi { if is_ubuntu; then local pkg_list="uwsgi uwsgi-plugin-python3 libapache2-mod-proxy-uwsgi" - if [[ "$DISTRO" == 'bionic' ]]; then - pkg_list="${pkg_list} uwsgi-plugin-python" - fi install_package ${pkg_list} elif is_fedora; then # Note httpd comes with mod_proxy_uwsgi and it is loaded by diff --git a/lib/cinder b/lib/cinder index 34d618674e..7d6e843a3d 100644 --- a/lib/cinder +++ b/lib/cinder @@ -91,12 +91,6 @@ CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]') # Default to lioadm CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-lioadm} -# Bionic needs to default to tgtadm until support is dropped within devstack -# as the rtslib-fb-targetctl service doesn't start after installing lioadm. -if is_ubuntu && [[ "$DISTRO" == "bionic" ]]; then - CINDER_ISCSI_HELPER=tgtadm -fi - # EL and SUSE should only use lioadm if is_fedora || is_suse; then if [[ ${CINDER_ISCSI_HELPER} != "lioadm" ]]; then diff --git a/stack.sh b/stack.sh index ca9ecfa213..9d854da581 100755 --- a/stack.sh +++ b/stack.sh @@ -227,7 +227,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="bionic|focal|f31|f32|opensuse-15.2|opensuse-tumbleweed|rhel8" +SUPPORTED_DISTROS="focal|f31|f32|opensuse-15.2|opensuse-tumbleweed|rhel8" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 25f726892f..19219435ad 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -59,43 +59,6 @@ function fixup_keystone { fi } -# Ubuntu Repositories -#-------------------- -# Enable universe for bionic since it is missing when installing from ISO. -function fixup_ubuntu { - if [[ "$DISTRO" != "bionic" ]]; then - return - fi - - # This pulls in apt-add-repository - install_package "software-properties-common" - - # Enable universe - sudo add-apt-repository -y universe - - if [[ -f /etc/ci/mirror_info.sh ]] ; then - # If we are on a nodepool provided host and it has told us about - # where we can find local mirrors then use that mirror. - source /etc/ci/mirror_info.sh - sudo apt-add-repository -y "deb $NODEPOOL_UCA_MIRROR bionic-updates/ussuri main" - else - # Enable UCA:ussuri for updated versions of QEMU and libvirt - sudo add-apt-repository -y cloud-archive:ussuri - fi - REPOS_UPDATED=False - apt_get_update - - # Since pip10, pip will refuse to uninstall files from packages - # that were created with distutils (rather than more modern - # setuptools). This is because it technically doesn't have a - # manifest of what to remove. However, in most cases, simply - # overwriting works. So this hacks around those packages that - # have been dragged in by some other system dependency - sudo rm -rf /usr/lib/python3/dist-packages/httplib2-*.egg-info - sudo rm -rf /usr/lib/python3/dist-packages/pyasn1_modules-*.egg-info - sudo rm -rf /usr/lib/python3/dist-packages/PyYAML-*.egg-info -} - # Python Packages # --------------- @@ -194,7 +157,6 @@ function fixup_ovn_centos { function fixup_all { fixup_keystone - fixup_ubuntu fixup_fedora fixup_suse } From 06b7352478170521a07875154eef317bde0c5321 Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Thu, 29 Apr 2021 11:46:35 -0700 Subject: [PATCH 1414/1936] Fix async race updating nova configs The configure_neutron_nova function updates nova configs. While that is still running we separately update nova configs in stack.sh. This can result in unexpected configs (that don't work). Fix this by waiting for configure_neutron_nova to complete its work before we do nova config updates directly in stack.sh. For specifics we say that: [neutron] project_domain_name = Default was missing from both nova.conf and nova-cpu.conf and instances could not be created because keystone complained about not finding domain in project. The strong suspicion here is that on some systems configure_neutron_nova would write out project_domain_name while the stack.sh inisets were running resulting in stack.sh overwriting the project_domain_name content. One theory is that disabling swift makes this problem more likely as there is swift work in the middle of the async period. This is supported by the fact that our job that hits this problem does indeed disable swift. Change-Id: I0961d882d555a21233c6b4fbfc077cfe33b88499 --- stack.sh | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/stack.sh b/stack.sh index ca9ecfa213..163fc5b370 100755 --- a/stack.sh +++ b/stack.sh @@ -1238,17 +1238,21 @@ fi # deployments. This ensures the keys match across nova and cinder across all # hosts. FIXED_KEY=${FIXED_KEY:-bae3516cc1c0eb18b05440eba8012a4a880a2ee04d584a9c1579445e675b12defdc716ec} -if is_service_enabled nova; then - iniset $NOVA_CONF key_manager fixed_key "$FIXED_KEY" - iniset $NOVA_CPU_CONF key_manager fixed_key "$FIXED_KEY" -fi - if is_service_enabled cinder; then iniset $CINDER_CONF key_manager fixed_key "$FIXED_KEY" fi async_wait configure_neutron_nova +# NOTE(clarkb): This must come after async_wait configure_neutron_nova because +# configure_neutron_nova modifies $NOVA_CONF and $NOVA_CPU_CONF as well. If +# we don't wait then these two ini updates race either other and can result +# in unexpected configs. +if is_service_enabled nova; then + iniset $NOVA_CONF key_manager fixed_key "$FIXED_KEY" + iniset $NOVA_CPU_CONF key_manager fixed_key "$FIXED_KEY" +fi + # Launch the nova-api and wait for it to answer before continuing if is_service_enabled n-api; then echo_summary "Starting Nova API" From 5c304d817682d6c807b532b50a2f105479ac3fa2 Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Thu, 11 Jun 2020 11:00:56 +0100 Subject: [PATCH 1415/1936] Change Neutron's default ML2 driver to OVN As part of the Victoria PTG the Neutron community [0] agreed on changing the default backend driver from ML2/OVS to ML2/OVN in DevStack. A lot of changes have been submitted towards this goal including but not limted to: * Moving the OVN module to DevStack: https://review.opendev.org/c/openstack/devstack/+/734621 * Updating the OVN module to use distro packages instead of compiling OVN from source: https://review.opendev.org/c/openstack/devstack/+/763402o And now this patch is changing the the actual Q_AGENT, Q_ML2_TENANT_NETWORK_TYPE and Q_ML2_PLUGIN_MECHANISM_DRIVERS values in devstack to what is expected by OVN as well as updating the Zuul templates to enable the OVN services. [0] https://etherpad.opendev.org/p/neutron-victoria-ptg Change-Id: I92054ce9d2ab7a42746ed5dececef583b0f8a833 Signed-off-by: Lucas Alvares Gomes --- .zuul.yaml | 35 +++++++++++++++++++++++++++-------- lib/neutron_plugins/ml2 | 6 +++--- 2 files changed, 30 insertions(+), 11 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 00129b5ca4..e133bfacd7 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -468,6 +468,10 @@ SWIFT_HASH: 1234123412341234 DEBUG_LIBVIRT_COREDUMPS: true NOVA_VNC_ENABLED: true + OVN_L3_CREATE_PUBLIC_NETWORK: true + OVN_DBS_LOG_LEVEL: dbg + ENABLE_CHASSIS_AS_GW: true + Q_USE_PROVIDERNET_FOR_PUBLIC: true devstack_local_conf: post-config: $NEUTRON_CONF: @@ -477,7 +481,11 @@ # Core services enabled for this branch. # This list replaces the test-matrix. # Shared services - dstat: true + # + # NOTE(lucasagomes): disable dstat until bug + # https://bugs.launchpad.net/ubuntu/+source/dstat/+bug/1866619 is + # fixed. Also see: https://bugs.launchpad.net/neutron/+bug/1898863 + dstat: false etcd3: true memory_tracker: true mysql: true @@ -496,13 +504,14 @@ n-sch: true # Placement service placement-api: true + # OVN services + ovn-controller: true + ovn-northd: true + ovs-vswitchd: true + ovsdb-server: true # Neutron services - q-agt: true - q-dhcp: true - q-l3: true - q-meta: true - q-metering: true q-svc: true + q-ovn-metadata-agent: true # Swift services s-account: true s-container: true @@ -527,15 +536,24 @@ # Core services enabled for this branch. # This list replaces the test-matrix. # Shared services - dstat: true + # + # NOTE(lucasagomes): disable dstat until bug + # https://bugs.launchpad.net/ubuntu/+source/dstat/+bug/1866619 is + # fixed. Also see: https://bugs.launchpad.net/neutron/+bug/1898863 + dstat: false memory_tracker: true tls-proxy: true # Nova services n-cpu: true # Placement services placement-client: true + # OVN services + ovn-controller: true + ovn-northd: false + ovs-vswitchd: true + ovsdb-server: true # Neutron services - q-agt: true + q-ovn-metadata-agent: true # Cinder services c-bak: true c-vol: true @@ -549,6 +567,7 @@ # integrated gate, so specifying the services has not effect. # ceilometer-*: false devstack_localrc: + ENABLE_CHASSIS_AS_GW: false # Subnode specific settings GLANCE_HOSTPORT: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}:9292" Q_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index ae4b251d83..a58ba5cbcd 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -8,14 +8,14 @@ _XTRACE_NEUTRON_ML2=$(set +o | grep xtrace) set +o xtrace # Default openvswitch L2 agent -Q_AGENT=${Q_AGENT:-openvswitch} +Q_AGENT=${Q_AGENT:-ovn} if [ -f $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent ]; then source $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent fi # Enable this to simply and quickly enable tunneling with ML2. # Select either 'gre', 'vxlan', or 'gre,vxlan' -Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"vxlan"} +Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"geneve"} # This has to be set here since the agent will set this in the config file if [[ "$Q_ML2_TENANT_NETWORK_TYPE" == "gre" || "$Q_ML2_TENANT_NETWORK_TYPE" == "vxlan" ]]; then Q_TUNNEL_TYPES=$Q_ML2_TENANT_NETWORK_TYPE @@ -24,7 +24,7 @@ elif [[ "$ENABLE_TENANT_TUNNELS" == "True" ]]; then fi # List of MechanismDrivers to load -Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-openvswitch,linuxbridge} +Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-ovn} # Default GRE TypeDriver options Q_ML2_PLUGIN_GRE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GRE_TYPE_OPTIONS:-tunnel_id_ranges=$TENANT_TUNNEL_RANGES} # Default VXLAN TypeDriver options From 69a66fb62bcb77145b6eec21fc2d56d40a861d0d Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Tue, 11 May 2021 11:04:32 +0100 Subject: [PATCH 1416/1936] Fix docs job Sphinx 4.0.0 added a new dependency [0] which is causing the job to fail at the moment. This patch fix the problem by adding UC to the docs jobs. [0] https://www.sphinx-doc.org/en/master/changes.html (LaTeX: add tex-gyre font dependency) Change-Id: I28019331017405c06577ada88f8e9f6d9a2afc23 Signed-off-by: Lucas Alvares Gomes --- tox.ini | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index ed28636d3a..5bb2268c4f 100644 --- a/tox.ini +++ b/tox.ini @@ -34,7 +34,9 @@ commands = bash -c "find {toxinidir} \ -print0 | xargs -0 bashate -v -iE006 -eE005,E042" [testenv:docs] -deps = -r{toxinidir}/doc/requirements.txt +deps = + -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} + -r{toxinidir}/doc/requirements.txt whitelist_externals = bash setenv = TOP_DIR={toxinidir} From 230f5c434c800c94935d5cd0dcfc1bd18329759f Mon Sep 17 00:00:00 2001 From: Anand Bhat Date: Wed, 12 May 2021 16:53:15 +0530 Subject: [PATCH 1417/1936] Changed minversion in tox to 3.18.0 The patch bumps min version of tox to 3.18.0 python in order to replace tox's whitelist_externals by allowlist_externals option: https://github.com/tox-dev/tox/blob/master/docs/changelog.rst#v3180-2020-07-23 Change-Id: Id8bdda703afc39d352e3a53877318dc30d91a5f7 --- tox.ini | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tox.ini b/tox.ini index 5bb2268c4f..ec764abc87 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -minversion = 1.6 +minversion = 3.18.0 skipsdist = True envlist = bashate @@ -13,7 +13,7 @@ basepython = python3 # modified bashate tree deps = {env:BASHATE_INSTALL_PATH:bashate==2.0.0} -whitelist_externals = bash +allowlist_externals = bash commands = bash -c "find {toxinidir} \ -not \( -type d -name .?\* -prune \) \ -not \( -type d -name doc -prune \) \ @@ -37,7 +37,7 @@ commands = bash -c "find {toxinidir} \ deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/doc/requirements.txt -whitelist_externals = bash +allowlist_externals = bash setenv = TOP_DIR={toxinidir} commands = @@ -45,7 +45,7 @@ commands = [testenv:pdf-docs] deps = {[testenv:docs]deps} -whitelist_externals = +allowlist_externals = make commands = sphinx-build -W -b latex doc/source doc/build/pdf From ff073a5643f01dda3200d2ce426f23dc24e28b8f Mon Sep 17 00:00:00 2001 From: Ghanshyam Date: Thu, 13 May 2021 16:25:17 +0000 Subject: [PATCH 1418/1936] Revert "Change Neutron's default ML2 driver to OVN" This reverts commit 5c304d817682d6c807b532b50a2f105479ac3fa2. Reason for revert: There are more things to fix/move like done in 791085 and 791282 Also let's change all required default in devstack scripts instead of devstack's zuul job side. Basically do this change without any change in .zuul.yaml Change-Id: Ie0f59d1b9a4b97ad9fd8131819054dfb616f31fd --- .zuul.yaml | 35 ++++++++--------------------------- lib/neutron_plugins/ml2 | 6 +++--- 2 files changed, 11 insertions(+), 30 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index e133bfacd7..00129b5ca4 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -468,10 +468,6 @@ SWIFT_HASH: 1234123412341234 DEBUG_LIBVIRT_COREDUMPS: true NOVA_VNC_ENABLED: true - OVN_L3_CREATE_PUBLIC_NETWORK: true - OVN_DBS_LOG_LEVEL: dbg - ENABLE_CHASSIS_AS_GW: true - Q_USE_PROVIDERNET_FOR_PUBLIC: true devstack_local_conf: post-config: $NEUTRON_CONF: @@ -481,11 +477,7 @@ # Core services enabled for this branch. # This list replaces the test-matrix. # Shared services - # - # NOTE(lucasagomes): disable dstat until bug - # https://bugs.launchpad.net/ubuntu/+source/dstat/+bug/1866619 is - # fixed. Also see: https://bugs.launchpad.net/neutron/+bug/1898863 - dstat: false + dstat: true etcd3: true memory_tracker: true mysql: true @@ -504,14 +496,13 @@ n-sch: true # Placement service placement-api: true - # OVN services - ovn-controller: true - ovn-northd: true - ovs-vswitchd: true - ovsdb-server: true # Neutron services + q-agt: true + q-dhcp: true + q-l3: true + q-meta: true + q-metering: true q-svc: true - q-ovn-metadata-agent: true # Swift services s-account: true s-container: true @@ -536,24 +527,15 @@ # Core services enabled for this branch. # This list replaces the test-matrix. # Shared services - # - # NOTE(lucasagomes): disable dstat until bug - # https://bugs.launchpad.net/ubuntu/+source/dstat/+bug/1866619 is - # fixed. Also see: https://bugs.launchpad.net/neutron/+bug/1898863 - dstat: false + dstat: true memory_tracker: true tls-proxy: true # Nova services n-cpu: true # Placement services placement-client: true - # OVN services - ovn-controller: true - ovn-northd: false - ovs-vswitchd: true - ovsdb-server: true # Neutron services - q-ovn-metadata-agent: true + q-agt: true # Cinder services c-bak: true c-vol: true @@ -567,7 +549,6 @@ # integrated gate, so specifying the services has not effect. # ceilometer-*: false devstack_localrc: - ENABLE_CHASSIS_AS_GW: false # Subnode specific settings GLANCE_HOSTPORT: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}:9292" Q_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index a58ba5cbcd..ae4b251d83 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -8,14 +8,14 @@ _XTRACE_NEUTRON_ML2=$(set +o | grep xtrace) set +o xtrace # Default openvswitch L2 agent -Q_AGENT=${Q_AGENT:-ovn} +Q_AGENT=${Q_AGENT:-openvswitch} if [ -f $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent ]; then source $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent fi # Enable this to simply and quickly enable tunneling with ML2. # Select either 'gre', 'vxlan', or 'gre,vxlan' -Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"geneve"} +Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"vxlan"} # This has to be set here since the agent will set this in the config file if [[ "$Q_ML2_TENANT_NETWORK_TYPE" == "gre" || "$Q_ML2_TENANT_NETWORK_TYPE" == "vxlan" ]]; then Q_TUNNEL_TYPES=$Q_ML2_TENANT_NETWORK_TYPE @@ -24,7 +24,7 @@ elif [[ "$ENABLE_TENANT_TUNNELS" == "True" ]]; then fi # List of MechanismDrivers to load -Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-ovn} +Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-openvswitch,linuxbridge} # Default GRE TypeDriver options Q_ML2_PLUGIN_GRE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GRE_TYPE_OPTIONS:-tunnel_id_ranges=$TENANT_TUNNEL_RANGES} # Default VXLAN TypeDriver options From c19c1262c8b81f1cc543eafb9e5c3a34c4b632fe Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Mon, 17 May 2021 13:54:10 +0100 Subject: [PATCH 1419/1936] Replace dstat with pcp-dstat The dstat project is no longer maintained. The pcp-dstat package installs a dstat command so no further updates to the scripts should be needed. Change-Id: Ied8c9d29bed4f887c364db7080a0f2a0c02328af Signed-off-by: Lucas Alvares Gomes --- files/debs/dstat | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/files/debs/dstat b/files/debs/dstat index 2b643b8b1b..40d00f4aa4 100644 --- a/files/debs/dstat +++ b/files/debs/dstat @@ -1 +1,2 @@ -dstat +dstat # dist:bionic +pcp From 6e9f7c25704afb5bcc33c6f17a01f62068664e40 Mon Sep 17 00:00:00 2001 From: Brian Rosmaita Date: Tue, 13 Oct 2020 14:20:38 -0400 Subject: [PATCH 1420/1936] Support optimized cinder backend for glance When Glance is configured with a cinder glance_store, Cinder can be configured to allow cloning of image data directly in the backend instead of transferring data through Glance. Expose these configuration options in devstack to facilitate testing this feature. Adds: - CINDER_ALLOWED_DIRECT_URL_SCHEMES - GLANCE_SHOW_DIRECT_URL - GLANCE_SHOW_MULTIPLE_LOCATIONS Change-Id: Iee619b443088fd77cf7b1a48563203bdf4a93a39 --- lib/cinder | 19 +++++++++++++++++++ lib/glance | 15 +++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/lib/cinder b/lib/cinder index cfa3693f03..fca01a2140 100644 --- a/lib/cinder +++ b/lib/cinder @@ -104,6 +104,22 @@ if is_fedora || is_suse; then fi fi +# When Cinder is used as a backend for Glance, it can be configured to clone +# the volume containing image data directly in the backend instead of +# transferring data from volume to volume. Value is a comma separated list of +# schemes (currently only 'file' and 'cinder' are supported). The default +# configuration in Cinder is empty (that is, do not use this feature). NOTE: +# to use this feature you must also enable GLANCE_SHOW_DIRECT_URL and/or +# GLANCE_SHOW_MULTIPLE_LOCATIONS for glance-api.conf. +CINDER_ALLOWED_DIRECT_URL_SCHEMES=${CINDER_ALLOWED_DIRECT_URL_SCHEMES:-} +if [[ -n "$CINDER_ALLOWED_DIRECT_URL_SCHEMES" ]]; then + if [[ "${GLANCE_SHOW_DIRECT_URL:-False}" != "True" \ + && "${GLANCE_SHOW_MULTIPLE_LOCATIONS:-False}" != "True" ]]; then + warn $LINENO "CINDER_ALLOWED_DIRECT_URL_SCHEMES is set, but neither \ +GLANCE_SHOW_DIRECT_URL nor GLANCE_SHOW_MULTIPLE_LOCATIONS is True" + fi +fi + # For backward compatibility # Before CINDER_BACKUP_DRIVER was introduced, ceph backup driver was configured # along with ceph backend driver. @@ -266,6 +282,9 @@ function configure_cinder { fi iniset $CINDER_CONF key_manager backend cinder.keymgr.conf_key_mgr.ConfKeyManager iniset $CINDER_CONF key_manager fixed_key $(openssl rand -hex 16) + if [[ -n "$CINDER_ALLOWED_DIRECT_URL_SCHEMES" ]]; then + iniset $CINDER_CONF DEFAULT allowed_direct_url_schemes $CINDER_ALLOWED_DIRECT_URL_SCHEMES + fi # Avoid RPC timeouts in slow CI and test environments by doubling the # default response timeout set by RPC clients. See bug #1873234 for more diff --git a/lib/glance b/lib/glance index e789affaf1..118fa7c863 100644 --- a/lib/glance +++ b/lib/glance @@ -51,6 +51,18 @@ GLANCE_STORE_ROOTWRAP_BASE_DIR=/usr/local/etc/glance if is_opensuse; then GLANCE_STORE_ROOTWRAP_BASE_DIR=/usr/etc/glance fi +# When Cinder is used as a glance store, you can optionally configure cinder to +# optimize bootable volume creation by allowing volumes to be cloned directly +# in the backend instead of transferring data via Glance. To use this feature, +# set CINDER_ALLOWED_DIRECT_URL_SCHEMES for cinder.conf and enable +# GLANCE_SHOW_DIRECT_URL and/or GLANCE_SHOW_MULTIPLE_LOCATIONS for Glance. The +# default value for both of these is False, because for some backends they +# present a grave security risk (though not for Cinder, because all that's +# exposed is the volume_id where the image data is stored.) See OSSN-0065 for +# more information: https://wiki.openstack.org/wiki/OSSN/OSSN-0065 +GLANCE_SHOW_DIRECT_URL=$(trueorfalse False GLANCE_SHOW_DIRECT_URL) +GLANCE_SHOW_MULTIPLE_LOCATIONS=$(trueorfalse False GLANCE_SHOW_MULTIPLE_LOCATIONS) + # Glance multi-store configuration # Boolean flag to enable multiple store configuration for glance GLANCE_ENABLE_MULTIPLE_STORES=$(trueorfalse False GLANCE_ENABLE_MULTIPLE_STORES) @@ -283,6 +295,9 @@ function configure_glance { if [ "$VIRT_DRIVER" = 'libvirt' ] && [ "$LIBVIRT_TYPE" = 'parallels' ]; then iniset $GLANCE_API_CONF DEFAULT disk_formats "ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso,ploop" fi + # Only use these if you know what you are doing! See OSSN-0065 + iniset $GLANCE_API_CONF DEFAULT show_image_direct_url $GLANCE_SHOW_DIRECT_URL + iniset $GLANCE_API_CONF DEFAULT show_multiple_locations $GLANCE_SHOW_MULTIPLE_LOCATIONS # Configure glance_store configure_glance_store $USE_CINDER_FOR_GLANCE $GLANCE_ENABLE_MULTIPLE_STORES From 35cec0d7c0857d76d3ea0b52b97f2a166c04c13e Mon Sep 17 00:00:00 2001 From: Brian Rosmaita Date: Mon, 17 May 2021 18:58:59 -0400 Subject: [PATCH 1421/1936] Remove Block Storage API v2 support The Block Storage API v2 was deprecated in Pike by change I913c44799cddc37c3342729ec0ef34068db5b2d4 and is (finally) being removed in Xena [0]. So remove v2 support from devstack. [0] https://wiki.openstack.org/wiki/CinderXenaPTGSummary#Removing_the_Block_Storage_API_v2 Depends-on: https://review.opendev.org/c/openstack/devstack/+/792048 Change-Id: I856d78648d28ac4cad0fb212bef1ae6ad32fca90 --- lib/cinder | 12 ------------ lib/tempest | 13 ------------- 2 files changed, 25 deletions(-) diff --git a/lib/cinder b/lib/cinder index cfa3693f03..dab2aea1c6 100644 --- a/lib/cinder +++ b/lib/cinder @@ -376,12 +376,6 @@ function create_cinder_accounts { "$REGION_NAME" \ "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s" - get_or_create_service "cinderv2" "volumev2" "Cinder Volume Service V2" - get_or_create_endpoint \ - "volumev2" \ - "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(project_id)s" - get_or_create_service "cinderv3" "volumev3" "Cinder Volume Service V3" get_or_create_endpoint \ "volumev3" \ @@ -393,12 +387,6 @@ function create_cinder_accounts { "$REGION_NAME" \ "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v3/\$(project_id)s" - get_or_create_service "cinderv2" "volumev2" "Cinder Volume Service V2" - get_or_create_endpoint \ - "volumev2" \ - "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v2/\$(project_id)s" - get_or_create_service "cinderv3" "volumev3" "Cinder Volume Service V3" get_or_create_endpoint \ "volumev3" \ diff --git a/lib/tempest b/lib/tempest index 29a62290ce..d835c68d4a 100644 --- a/lib/tempest +++ b/lib/tempest @@ -459,13 +459,6 @@ function configure_tempest { iniset $TEMPEST_CONFIG validation network_for_ssh $TEMPEST_SSH_NETWORK_NAME # Volume - # Set the service catalog entry for Tempest to run on. Typically - # used to try different Volume API version targets. The tempest - # default it to 'volumev3'(v3 APIs endpoint) , so only set this - # if you want to change it. - if [[ -n "$TEMPEST_VOLUME_TYPE" ]]; then - iniset $TEMPEST_CONFIG volume catalog_type $TEMPEST_VOLUME_TYPE - fi # Only turn on TEMPEST_VOLUME_MANAGE_SNAPSHOT by default for "lvm" backends if [[ "$CINDER_ENABLED_BACKENDS" == *"lvm"* ]]; then TEMPEST_VOLUME_MANAGE_SNAPSHOT=${TEMPEST_VOLUME_MANAGE_SNAPSHOT:-True} @@ -489,12 +482,6 @@ function configure_tempest { iniset $TEMPEST_CONFIG volume-feature-enabled volume_revert $(trueorfalse False TEMPEST_VOLUME_REVERT_TO_SNAPSHOT) local tempest_volume_min_microversion=${TEMPEST_VOLUME_MIN_MICROVERSION:-None} local tempest_volume_max_microversion=${TEMPEST_VOLUME_MAX_MICROVERSION:-"latest"} - # Reset microversions to None where v2 is running which does not support microversion. - # Both "None" means no microversion testing. - if [[ "$TEMPEST_VOLUME_TYPE" == "volumev2" ]]; then - tempest_volume_min_microversion=None - tempest_volume_max_microversion=None - fi if [ "$tempest_volume_min_microversion" == "None" ]; then inicomment $TEMPEST_CONFIG volume min_microversion else From 2a9673f0278699d03931d69b4da22e9709300026 Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Wed, 19 May 2021 10:22:18 -0400 Subject: [PATCH 1422/1936] docs: recommend Ubuntu 20.04 instead of Bionic Bionic support was dropped in I39e38e4a6c2e52dd3822c9fdea354258359a9f53. Change-Id: I765aac352590fd2f74d3fd90676d6d098548e6b8 --- doc/source/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/index.rst b/doc/source/index.rst index 8b8acde38c..9f477ab911 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -40,7 +40,7 @@ Start with a clean and minimal install of a Linux system. DevStack attempts to support the two latest LTS releases of Ubuntu, the latest/current Fedora version, CentOS/RHEL 8 and OpenSUSE. -If you do not have a preference, Ubuntu 18.04 (Bionic Beaver) is the +If you do not have a preference, Ubuntu 20.04 (Focal Fossa) is the most tested, and will probably go the smoothest. Add Stack User (optional) From f0736406f5ce055072a62a62fe9fdc7cead49671 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Tue, 18 May 2021 17:15:30 -0500 Subject: [PATCH 1423/1936] Fix unit test to use python3 command unit test jobs staretd to run on ubuntu-focal now and failing for using 'python' command. Change-Id: Ie002faf4c96ac7f207207a481c057b8df0289e6c --- tests/test_write_devstack_local_conf_role.sh | 2 +- tests/unittest.sh | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/test_write_devstack_local_conf_role.sh b/tests/test_write_devstack_local_conf_role.sh index b2bc0a2c46..71d8d51614 100755 --- a/tests/test_write_devstack_local_conf_role.sh +++ b/tests/test_write_devstack_local_conf_role.sh @@ -6,4 +6,4 @@ TOP=$(cd $(dirname "$0")/.. && pwd) source $TOP/functions source $TOP/tests/unittest.sh -python ./roles/write-devstack-local-conf/library/test.py +${PYTHON} $TOP/roles/write-devstack-local-conf/library/test.py diff --git a/tests/unittest.sh b/tests/unittest.sh index 3703ece91d..fced2abe65 100644 --- a/tests/unittest.sh +++ b/tests/unittest.sh @@ -17,6 +17,8 @@ ERROR=0 PASS=0 FAILED_FUNCS="" +export PYTHON=$(which python3 2>/dev/null) + # pass a test, printing out MSG # usage: passed message function passed { From e38a39ad404637ca1649cea072883aa0a4592c4f Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Fri, 14 May 2021 09:14:24 +0100 Subject: [PATCH 1424/1936] Change default network backend driver to ML2/OVN This patch is changing the default network backend driver in DevStack to OVN. This is a long effort from the Neutron community that has been discussed on previous PTGs and agreed upon. A similar patch to this one [0] have been merged in the past but got reverted [1] because it did break some zuul jobs. This one also include fixes for such jobs and was verified at [2] [0] https://review.opendev.org/c/openstack/devstack/+/735097 [1] https://review.opendev.org/c/openstack/neutron/+/775632 [2] https://review.opendev.org/c/zuul/zuul-jobs/+/791117 Change-Id: I8c2be82f33ed9f6c36f5509b3b210ee1a38e87ca Signed-off-by: Lucas Alvares Gomes --- .zuul.yaml | 20 ++++++++++++++------ lib/neutron_plugins/ml2 | 11 ++++++----- lib/neutron_plugins/ovn_agent | 18 +++++++++++++----- stackrc | 4 +++- 4 files changed, 36 insertions(+), 17 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index b65aeec4dd..4ca0257cc6 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -468,6 +468,8 @@ SWIFT_HASH: 1234123412341234 DEBUG_LIBVIRT_COREDUMPS: true NOVA_VNC_ENABLED: true + OVN_L3_CREATE_PUBLIC_NETWORK: true + OVN_DBS_LOG_LEVEL: dbg devstack_local_conf: post-config: $NEUTRON_CONF: @@ -496,13 +498,14 @@ n-sch: true # Placement service placement-api: true + # OVN services + ovn-controller: true + ovn-northd: true + ovs-vswitchd: true + ovsdb-server: true # Neutron services - q-agt: true - q-dhcp: true - q-l3: true - q-meta: true - q-metering: true q-svc: true + q-ovn-metadata-agent: true # Swift services s-account: true s-container: true @@ -534,8 +537,12 @@ n-cpu: true # Placement services placement-client: true + # OVN services + ovn-controller: true + ovs-vswitchd: true + ovsdb-server: true # Neutron services - q-agt: true + q-ovn-metadata-agent: true # Cinder services c-bak: true c-vol: true @@ -553,6 +560,7 @@ GLANCE_HOSTPORT: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}:9292" Q_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" NOVA_VNC_ENABLED: true + ENABLE_CHASSIS_AS_GW: false - job: name: devstack-ipv6 diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index ae4b251d83..e1f868f0a7 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -7,15 +7,16 @@ _XTRACE_NEUTRON_ML2=$(set +o | grep xtrace) set +o xtrace -# Default openvswitch L2 agent -Q_AGENT=${Q_AGENT:-openvswitch} +# Default OVN L2 agent +Q_AGENT=${Q_AGENT:-ovn} if [ -f $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent ]; then source $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent fi # Enable this to simply and quickly enable tunneling with ML2. -# Select either 'gre', 'vxlan', or 'gre,vxlan' -Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"vxlan"} +# For ML2/OVS select either 'gre', 'vxlan', or 'gre,vxlan'. +# For ML2/OVN use 'geneve'. +Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"geneve"} # This has to be set here since the agent will set this in the config file if [[ "$Q_ML2_TENANT_NETWORK_TYPE" == "gre" || "$Q_ML2_TENANT_NETWORK_TYPE" == "vxlan" ]]; then Q_TUNNEL_TYPES=$Q_ML2_TENANT_NETWORK_TYPE @@ -24,7 +25,7 @@ elif [[ "$ENABLE_TENANT_TUNNELS" == "True" ]]; then fi # List of MechanismDrivers to load -Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-openvswitch,linuxbridge} +Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-ovn} # Default GRE TypeDriver options Q_ML2_PLUGIN_GRE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GRE_TYPE_OPTIONS:-tunnel_id_ranges=$TENANT_TUNNEL_RANGES} # Default VXLAN TypeDriver options diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index e4d0d75230..4af1340a26 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -21,10 +21,6 @@ source ${TOP_DIR}/lib/neutron_plugins/ovs_base source ${TOP_DIR}/lib/neutron_plugins/openvswitch_agent -# Load devstack ovs base functions -source $NEUTRON_DIR/devstack/lib/ovs - - # Defaults # -------- @@ -88,12 +84,18 @@ OVN_INSTALL_OVS_PYTHON_MODULE=$(trueorfalse False OVN_INSTALL_OVS_PYTHON_MODULE) # configure the MTU DHCP option. OVN_GENEVE_OVERHEAD=${OVN_GENEVE_OVERHEAD:-38} -# The log level of the OVN databases (north and south) +# The log level of the OVN databases (north and south). +# Supported log levels are: off, emer, err, warn, info or dbg. +# More information about log levels can be found at +# http://www.openvswitch.org/support/dist-docs/ovs-appctl.8.txt OVN_DBS_LOG_LEVEL=${OVN_DBS_LOG_LEVEL:-info} OVN_META_CONF=$NEUTRON_CONF_DIR/neutron_ovn_metadata_agent.ini OVN_META_DATA_HOST=${OVN_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)} +# If True (default) the node will be considered a gateway node. +ENABLE_CHASSIS_AS_GW=$(trueorfalse True ENABLE_CHASSIS_AS_GW) + export OVSDB_SERVER_LOCAL_HOST=$SERVICE_LOCAL_HOST if [[ "$SERVICE_IP_VERSION" == 6 ]]; then OVSDB_SERVER_LOCAL_HOST=[$OVSDB_SERVER_LOCAL_HOST] @@ -171,6 +173,9 @@ function wait_for_sock_file { } function use_new_ovn_repository { + if [[ "$OVN_BUILD_FROM_SOURCE" == "False" ]]; then + return 0 + fi if [ -z "$is_new_ovn" ]; then local ovs_repo_dir=$DEST/$OVS_REPO_NAME if [ ! -d $ovs_repo_dir ]; then @@ -390,6 +395,9 @@ function install_ovn { sudo ln -s $OVS_RUNDIR $OVN_RUNDIR if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + # Load devstack ovs base functions + source $NEUTRON_DIR/devstack/lib/ovs + # If OVS is already installed, remove it, because we're about to # re-install it from source. for package in openvswitch openvswitch-switch openvswitch-common; do diff --git a/stackrc b/stackrc index 196f61fa3c..05016594eb 100644 --- a/stackrc +++ b/stackrc @@ -72,8 +72,10 @@ if ! isset ENABLED_SERVICES ; then ENABLED_SERVICES+=,g-api # Cinder ENABLED_SERVICES+=,c-sch,c-api,c-vol + # OVN + ENABLED_SERVICES+=,ovn-controller,ovn-northd,ovs-vswitchd,ovsdb-server # Neutron - ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,q-agt,q-l3 + ENABLED_SERVICES+=,q-svc,q-ovn-metadata-agent # Dashboard ENABLED_SERVICES+=,horizon # Additional services From ddb66f2344b933f278b0b52be3ca59a4c511ab14 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rados=C5=82aw=20Piliszek?= Date: Sat, 18 Jul 2020 12:18:39 +0200 Subject: [PATCH 1425/1936] [CI] Add no-tls-proxy job Some gates run devstack like this and it follows different code paths. Let's ensure we don't break it now and then. Change-Id: I6ee1bfc30bced53f6d7fb841e01714069919fd88 Reference: http://lists.openstack.org/pipermail/openstack-discuss/2020-July/015997.html Reference: http://eavesdrop.openstack.org/meetings/qa/2021/qa.2021-05-11-14.00.log.html --- .zuul.yaml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index b65aeec4dd..1ede4448a0 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -595,6 +595,17 @@ zuul_copy_output: /opt/stack/async: logs +- job: + name: devstack-no-tls-proxy + parent: tempest-full-py3 + description: | + Tempest job with tls-proxy off. + + Some gates run devstack like this and it follows different code paths. + vars: + devstack_services: + tls-proxy: false + - job: name: devstack-platform-fedora-latest parent: tempest-full-py3 @@ -814,3 +825,7 @@ - ^.*\.rst$ - ^doc/.*$ - devstack-platform-fedora-latest-virt-preview + - devstack-no-tls-proxy + periodic: + jobs: + - devstack-no-tls-proxy From 7604e085b4542c50aefc9c23aa339958757a5770 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Tue, 25 May 2021 13:06:14 -0500 Subject: [PATCH 1426/1936] Pin nodeset for unit test job devstack unit test job does not set any nodeset and so does use default nodeset defined in base jobs in opendev. When opendev switches the default nodeset to the latest distro version, devstack unit test job can start failing. Example: - https://review.opendev.org/q/I01408f2f2959b0788fe712ac268a526502226ee9 - https://review.opendev.org/q/Ib1ea47bc7384e1f579cb08c779a32151fccd6845 To avoid such a situation in future, let's set the working nodeset for this job also so that when we cut the stable branch we can run it on the working distro version. Change-Id: I302140778fedf08bc8ba72c453837fa7b8f8f9ae --- .zuul.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.zuul.yaml b/.zuul.yaml index 4ca0257cc6..9a559ef776 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -676,6 +676,7 @@ - job: name: devstack-unit-tests + nodeset: ubuntu-focal description: | Runs unit tests on devstack project. From 22038a9a8c4418d6d49bed83024a3cd97e627860 Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Thu, 27 May 2021 13:44:20 +0100 Subject: [PATCH 1427/1936] [OVN] Configure public bridge enabled by default This patch makes the OVN_L3_CREATE_PUBLIC_NETWORK configuration True by default. This option makes the OVN lib in DevStack create & configure the external bridge, matching the same behavior from the OVS driver in DevStack. Change-Id: Icda53b95fdc3c169ac48a6ec4343c87ba404baa4 Signed-off-by: Lucas Alvares Gomes --- .zuul.yaml | 1 - lib/neutron_plugins/ovn_agent | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 6484b2a6ce..5bc6a8b424 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -468,7 +468,6 @@ SWIFT_HASH: 1234123412341234 DEBUG_LIBVIRT_COREDUMPS: true NOVA_VNC_ENABLED: true - OVN_L3_CREATE_PUBLIC_NETWORK: true OVN_DBS_LOG_LEVEL: dbg devstack_local_conf: post-config: diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 4af1340a26..f12e6a491d 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -95,6 +95,7 @@ OVN_META_DATA_HOST=${OVN_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)} # If True (default) the node will be considered a gateway node. ENABLE_CHASSIS_AS_GW=$(trueorfalse True ENABLE_CHASSIS_AS_GW) +OVN_L3_CREATE_PUBLIC_NETWORK=$(trueorfalse True OVN_L3_CREATE_PUBLIC_NETWORK) export OVSDB_SERVER_LOCAL_HOST=$SERVICE_LOCAL_HOST if [[ "$SERVICE_IP_VERSION" == 6 ]]; then From 77835633c5d1daba37f453c5bf7c84fad0f2d68e Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 13 May 2021 13:14:42 +1000 Subject: [PATCH 1428/1936] OVN : include source compliation functions This patch moves the OVS compilation module from Neutron into DevStack. It also renamed it to "ovs_source" to highlight its function, and the include has been moved to where the rest of the includes are located. Although this module is not required since by default DevStack installs OVS/OVN from the host OS packages instead of compiling from source, this is a nice to have as it avoids having bits and pieces of the code scattered around multiple repositories. Co-Authored-By: Lucas Alvares Gomes Change-Id: I39ec9ce0a91bea05cf8c446a9767ab879ac8e8f3 --- lib/neutron_plugins/ovn_agent | 6 +- lib/neutron_plugins/ovs_source | 215 +++++++++++++++++++++++++++++++++ 2 files changed, 218 insertions(+), 3 deletions(-) create mode 100644 lib/neutron_plugins/ovs_source diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 4af1340a26..71a4c60129 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -21,6 +21,9 @@ source ${TOP_DIR}/lib/neutron_plugins/ovs_base source ${TOP_DIR}/lib/neutron_plugins/openvswitch_agent +# Load devstack ovs compliation and loading functions +source ${TOP_DIR}/lib/neutron_plugins/ovs_source + # Defaults # -------- @@ -395,9 +398,6 @@ function install_ovn { sudo ln -s $OVS_RUNDIR $OVN_RUNDIR if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then - # Load devstack ovs base functions - source $NEUTRON_DIR/devstack/lib/ovs - # If OVS is already installed, remove it, because we're about to # re-install it from source. for package in openvswitch openvswitch-switch openvswitch-common; do diff --git a/lib/neutron_plugins/ovs_source b/lib/neutron_plugins/ovs_source new file mode 100644 index 0000000000..294171f18b --- /dev/null +++ b/lib/neutron_plugins/ovs_source @@ -0,0 +1,215 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Defaults +# -------- + +# Set variables for building OVS from source +OVS_REPO=${OVS_REPO:-https://github.com/openvswitch/ovs.git} +OVS_REPO_NAME=$(basename ${OVS_REPO} | cut -f1 -d'.') +OVS_REPO_NAME=${OVS_REPO_NAME:-ovs} +OVS_BRANCH=${OVS_BRANCH:-0047ca3a0290f1ef954f2c76b31477cf4b9755f5} + +# Functions + +# load_module() - Load module using modprobe module given by argument and dies +# on failure +# - fatal argument is optional and says whether function should +# exit if module can't be loaded +function load_module { + local module=$1 + local fatal=$2 + + if [ "$(trueorfalse True fatal)" == "True" ]; then + sudo modprobe $module || (dmesg && die $LINENO "FAILED TO LOAD $module") + else + sudo modprobe $module || (echo "FAILED TO LOAD $module" && dmesg) + fi +} + +# prepare_for_compilation() - Fetch ovs git repository and install packages needed for +# compilation. +function prepare_for_ovs_compilation { + local build_modules=${1:-False} + OVS_DIR=$DEST/$OVS_REPO_NAME + + if [ ! -d $OVS_DIR ] ; then + # We can't use git_clone here because we want to ignore ERROR_ON_CLONE + git_timed clone $OVS_REPO $OVS_DIR + cd $OVS_DIR + git checkout $OVS_BRANCH + else + # Even though the directory already exists, call git_clone to update it + # if needed based on the RECLONE option + git_clone $OVS_REPO $OVS_DIR $OVS_BRANCH + cd $OVS_DIR + fi + + # TODO: Can you create package list files like you can inside devstack? + install_package autoconf automake libtool gcc patch make + + # If build_modules is False, we don't need to install the kernel-* + # packages. Just return. + if [[ "$build_modules" == "False" ]]; then + return + fi + + KERNEL_VERSION=`uname -r` + if is_fedora ; then + # is_fedora covers Fedora, RHEL, CentOS, etc... + if [[ "$os_VENDOR" == "Fedora" ]]; then + install_package elfutils-libelf-devel + KERNEL_VERSION=`echo $KERNEL_VERSION | cut --delimiter='-' --field 1` + elif [[ ${KERNEL_VERSION:0:2} != "3." ]]; then + # dash is illegal character in rpm version so replace + # them with underscore like it is done in the kernel + # https://github.com/torvalds/linux/blob/master/scripts/package/mkspec#L25 + # but only for latest series of the kernel, not 3.x + + KERNEL_VERSION=`echo $KERNEL_VERSION | tr - _` + fi + + echo NOTE: if kernel-devel-$KERNEL_VERSION or kernel-headers-$KERNEL_VERSION installation + echo failed, please, provide a repository with the package, or yum update / reboot + echo your machine to get the latest kernel. + + install_package kernel-devel-$KERNEL_VERSION + install_package kernel-headers-$KERNEL_VERSION + + elif is_ubuntu ; then + install_package linux-headers-$KERNEL_VERSION + fi +} + +# load_ovs_kernel_modules() - load openvswitch kernel module +function load_ovs_kernel_modules { + load_module openvswitch + load_module vport-geneve False + dmesg | tail +} + +# reload_ovs_kernel_modules() - reload openvswitch kernel module +function reload_ovs_kernel_modules { + set +e + ovs_system=$(sudo ovs-dpctl dump-dps | grep ovs-system) + if [ -n "$ovs_system" ]; then + sudo ovs-dpctl del-dp ovs-system + fi + set -e + sudo modprobe -r vport_geneve + sudo modprobe -r openvswitch + load_ovs_kernel_modules +} + +# compile_ovs() - Compile OVS from source and load needed modules. +# Accepts two parameters: +# - first one is False by default and means that modules are not built and installed. +# - second optional parameter defines prefix for ovs compilation +# - third optional parameter defines localstatedir for ovs single machine runtime +# Env variables OVS_REPO_NAME, OVS_REPO and OVS_BRANCH must be set +function compile_ovs { + local _pwd=$PWD + local build_modules=${1:-False} + local prefix=$2 + local localstatedir=$3 + + if [ -n "$prefix" ]; then + prefix="--prefix=$prefix" + fi + + if [ -n "$localstatedir" ]; then + localstatedir="--localstatedir=$localstatedir" + fi + + prepare_for_ovs_compilation $build_modules + + KERNEL_VERSION=$(uname -r) + major_version=$(echo "${KERNEL_VERSION}" | cut -d '.' -f1) + patch_level=$(echo "${KERNEL_VERSION}" | cut -d '.' -f2) + if [ "${major_version}" -gt 5 ] || [ "${major_version}" == 5 ] && [ "${patch_level}" -gt 5 ]; then + echo "NOTE: KERNEL VERSION is ${KERNEL_VERSION} and OVS doesn't support compiling " + echo "Kernel module for version higher than 5.5. Skipping module compilation..." + build_modules="False" + fi + + if [ ! -f configure ] ; then + ./boot.sh + fi + if [ ! -f config.status ] || [ configure -nt config.status ] ; then + if [[ "$build_modules" == "True" ]]; then + ./configure $prefix $localstatedir --with-linux=/lib/modules/$(uname -r)/build + else + ./configure $prefix $localstatedir + fi + fi + make -j$(($(nproc) + 1)) + sudo make install + if [[ "$build_modules" == "True" ]]; then + sudo make INSTALL_MOD_DIR=kernel/net/openvswitch modules_install + reload_ovs_kernel_modules + else + load_ovs_kernel_modules + fi + + cd $_pwd +} + +# action_service - call an action over openvswitch service +# Accepts one parameter that can be either +# 'start', 'restart' and 'stop'. +function action_openvswitch { + local action=$1 + + if is_ubuntu; then + ${action}_service openvswitch-switch + elif is_fedora; then + ${action}_service openvswitch + elif is_suse; then + if [[ $DISTRO == "sle12" ]] && [[ $os_RELEASE -lt 12.2 ]]; then + ${action}_service openvswitch-switch + else + ${action}_service openvswitch + fi + fi +} + +# start_new_ovs() - removes old ovs database, creates a new one and starts ovs +function start_new_ovs { + sudo rm -f /etc/openvswitch/conf.db /etc/openvswitch/.conf.db~lock~ + sudo /usr/share/openvswitch/scripts/ovs-ctl start +} + +# stop_new_ovs() - stops ovs +function stop_new_ovs { + local ovs_ctl='/usr/share/openvswitch/scripts/ovs-ctl' + + if [ -x $ovs_ctl ] ; then + sudo $ovs_ctl stop + fi +} + +# remove_ovs_packages() - removes old ovs packages from the system +function remove_ovs_packages { + for package in openvswitch openvswitch-switch openvswitch-common; do + if is_package_installed $package; then + uninstall_package $package + fi + done +} + + +# load_conntrack_gre_module() - loads nf_conntrack_proto_gre kernel module +function load_conntrack_gre_module { + sudo modprobe nf_conntrack_proto_gre +} From 58f6048dd488664aab6ae42efeb9deb90b051acf Mon Sep 17 00:00:00 2001 From: Anand Bhat Date: Fri, 28 May 2021 11:36:23 +0530 Subject: [PATCH 1429/1936] setup.cfg: Replace dashes with underscores Setuptools v54.1.0 introduces a warning that the use of dash-separated options in 'setup.cfg' will not be supported in a future version [1]. Get ahead of the issue by replacing the dashes with underscores. Without this, we see 'UserWarning' messages like the following on new enough versions of setuptools: UserWarning: Usage of dash-separated 'description-file' will not be supported in future versions. Please use the underscore name 'description_file' instead [1] https://github.com/pypa/setuptools/commit/a2e9ae4cb Change-Id: I6b8e791c06319fa5fa0935337520c36800b1abd6 --- setup.cfg | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/setup.cfg b/setup.cfg index 146f010243..a4e621f6df 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,11 +1,11 @@ [metadata] name = DevStack summary = OpenStack DevStack -description-file = +description_file = README.rst author = OpenStack -author-email = openstack-discuss@lists.openstack.org -home-page = https://docs.openstack.org/devstack/latest +author_email = openstack-discuss@lists.openstack.org +home_page = https://docs.openstack.org/devstack/latest classifier = Intended Audience :: Developers License :: OSI Approved :: Apache Software License From 8ea11c2947753f988979330ecc5fab2a6362881c Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Mon, 31 May 2021 15:04:29 -0500 Subject: [PATCH 1430/1936] Update IRC network to OFTC Change-Id: I260d9e65782add011f00d9087e0a5ac71e2be324 --- doc/source/contributor/contributing.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst index 5e0df569f7..4de238fbf8 100644 --- a/doc/source/contributor/contributing.rst +++ b/doc/source/contributor/contributing.rst @@ -13,7 +13,7 @@ with Devstack. Communication ~~~~~~~~~~~~~ -* IRC channel ``#openstack-qa`` at FreeNode +* IRC channel ``#openstack-qa`` at OFTC. * Mailing list (prefix subjects with ``[qa][devstack]`` for faster responses) http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss From 949f5ada608961fd05435e01bd9d06757b6c62c5 Mon Sep 17 00:00:00 2001 From: Gregory Thiemonge Date: Mon, 15 Mar 2021 18:25:04 +0100 Subject: [PATCH 1431/1936] Add route to IPv6 private subnets in ML2/OVN Enable IPv6 private subnet routing in ML2/OVN, it uses the behavior that already exists in ML2/OVS: add a route from the devstack node to the CIDRs of the default IPv6 subnet pool. Any IPv6 subnet created using the default subnet pool and plugged into the default router is reachable from the host (ex: ipv6-private-subnet). Change-Id: I02ca1d94e9f4d5ad4a06182f5ac9a2434941cf08 --- lib/neutron_plugins/services/l3 | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 75a3567096..5d339a00aa 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -392,8 +392,8 @@ function _neutron_configure_router_v6 { openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID fi - # This logic is specific to using the l3-agent for layer 3 - if is_service_enabled q-l3 || is_service_enabled neutron-l3; then + # This logic is specific to using OVN or the l3-agent for layer 3 + if ([[ $Q_AGENT == "ovn" ]] && is_service_enabled q-svc neutron-server) || is_service_enabled q-l3 neutron-l3; then # if the Linux host considers itself to be a router then it will # ignore all router advertisements # Ensure IPv6 RAs are accepted on interfaces with a default route. @@ -420,6 +420,11 @@ function _neutron_configure_router_v6 { # Configure interface for public bridge sudo ip -6 addr replace $ipv6_ext_gw_ip/$ipv6_cidr_len dev $ext_gw_interface + # Any IPv6 private subnet that uses the default IPV6 subnet pool + # and that is plugged into the default router (Q_ROUTER_NAME) will + # be reachable from the devstack node (ex: ipv6-private-subnet). + # Some scenario tests (such as octavia-tempest-plugin) rely heavily + # on this feature. local replace_range=${SUBNETPOOL_PREFIX_V6} if [[ -z "${SUBNETPOOL_V6_ID}" ]]; then replace_range=${FIXED_RANGE_V6} From cbc0b64a343e31506b43a9395e6a77fcfd6eb70b Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 3 Jun 2021 06:14:05 +0000 Subject: [PATCH 1432/1936] Updated from generate-devstack-plugins-list Change-Id: I2d5b0c59d5dd33f639ec685b16768325d67e9dbf --- doc/source/plugin-registry.rst | 22 +++++----------------- 1 file changed, 5 insertions(+), 17 deletions(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 4e7c2d7b2f..691fffa846 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -24,6 +24,8 @@ official OpenStack projects. ======================================== === Plugin Name URL ======================================== === +inspur/venus `https://opendev.org/inspur/venus `__ +inspur/venus-dashboard `https://opendev.org/inspur/venus-dashboard `__ openstack/aodh `https://opendev.org/openstack/aodh `__ openstack/barbican `https://opendev.org/openstack/barbican `__ openstack/blazar `https://opendev.org/openstack/blazar `__ @@ -39,21 +41,18 @@ openstack/devstack-plugin-container `https://opendev.org/openstack/devstack openstack/devstack-plugin-kafka `https://opendev.org/openstack/devstack-plugin-kafka `__ openstack/devstack-plugin-nfs `https://opendev.org/openstack/devstack-plugin-nfs `__ openstack/devstack-plugin-open-cas `https://opendev.org/openstack/devstack-plugin-open-cas `__ -openstack/devstack-plugin-pika `https://opendev.org/openstack/devstack-plugin-pika `__ -openstack/devstack-plugin-zmq `https://opendev.org/openstack/devstack-plugin-zmq `__ openstack/ec2-api `https://opendev.org/openstack/ec2-api `__ openstack/freezer `https://opendev.org/openstack/freezer `__ openstack/freezer-api `https://opendev.org/openstack/freezer-api `__ openstack/freezer-tempest-plugin `https://opendev.org/openstack/freezer-tempest-plugin `__ openstack/freezer-web-ui `https://opendev.org/openstack/freezer-web-ui `__ +openstack/glance `https://opendev.org/openstack/glance `__ openstack/heat `https://opendev.org/openstack/heat `__ openstack/heat-dashboard `https://opendev.org/openstack/heat-dashboard `__ openstack/ironic `https://opendev.org/openstack/ironic `__ openstack/ironic-inspector `https://opendev.org/openstack/ironic-inspector `__ openstack/ironic-prometheus-exporter `https://opendev.org/openstack/ironic-prometheus-exporter `__ openstack/ironic-ui `https://opendev.org/openstack/ironic-ui `__ -openstack/karbor `https://opendev.org/openstack/karbor `__ -openstack/karbor-dashboard `https://opendev.org/openstack/karbor-dashboard `__ openstack/keystone `https://opendev.org/openstack/keystone `__ openstack/kuryr-kubernetes `https://opendev.org/openstack/kuryr-kubernetes `__ openstack/kuryr-libnetwork `https://opendev.org/openstack/kuryr-libnetwork `__ @@ -65,21 +64,15 @@ openstack/manila-tempest-plugin `https://opendev.org/openstack/manila-t openstack/manila-ui `https://opendev.org/openstack/manila-ui `__ openstack/masakari `https://opendev.org/openstack/masakari `__ openstack/mistral `https://opendev.org/openstack/mistral `__ -openstack/monasca-analytics `https://opendev.org/openstack/monasca-analytics `__ openstack/monasca-api `https://opendev.org/openstack/monasca-api `__ -openstack/monasca-ceilometer `https://opendev.org/openstack/monasca-ceilometer `__ openstack/monasca-events-api `https://opendev.org/openstack/monasca-events-api `__ -openstack/monasca-log-api `https://opendev.org/openstack/monasca-log-api `__ openstack/monasca-tempest-plugin `https://opendev.org/openstack/monasca-tempest-plugin `__ -openstack/monasca-transform `https://opendev.org/openstack/monasca-transform `__ openstack/murano `https://opendev.org/openstack/murano `__ openstack/networking-bagpipe `https://opendev.org/openstack/networking-bagpipe `__ openstack/networking-baremetal `https://opendev.org/openstack/networking-baremetal `__ openstack/networking-bgpvpn `https://opendev.org/openstack/networking-bgpvpn `__ openstack/networking-generic-switch `https://opendev.org/openstack/networking-generic-switch `__ openstack/networking-hyperv `https://opendev.org/openstack/networking-hyperv `__ -openstack/networking-l2gw `https://opendev.org/openstack/networking-l2gw `__ -openstack/networking-midonet `https://opendev.org/openstack/networking-midonet `__ openstack/networking-odl `https://opendev.org/openstack/networking-odl `__ openstack/networking-powervm `https://opendev.org/openstack/networking-powervm `__ openstack/networking-sfc `https://opendev.org/openstack/networking-sfc `__ @@ -93,19 +86,13 @@ openstack/octavia `https://opendev.org/openstack/octavia openstack/octavia-dashboard `https://opendev.org/openstack/octavia-dashboard `__ openstack/octavia-tempest-plugin `https://opendev.org/openstack/octavia-tempest-plugin `__ openstack/openstacksdk `https://opendev.org/openstack/openstacksdk `__ -openstack/os-loganalyze `https://opendev.org/openstack/os-loganalyze `__ openstack/osprofiler `https://opendev.org/openstack/osprofiler `__ openstack/oswin-tempest-plugin `https://opendev.org/openstack/oswin-tempest-plugin `__ openstack/ovn-octavia-provider `https://opendev.org/openstack/ovn-octavia-provider `__ -openstack/panko `https://opendev.org/openstack/panko `__ openstack/patrole `https://opendev.org/openstack/patrole `__ -openstack/qinling `https://opendev.org/openstack/qinling `__ -openstack/qinling-dashboard `https://opendev.org/openstack/qinling-dashboard `__ openstack/rally-openstack `https://opendev.org/openstack/rally-openstack `__ openstack/sahara `https://opendev.org/openstack/sahara `__ openstack/sahara-dashboard `https://opendev.org/openstack/sahara-dashboard `__ -openstack/searchlight `https://opendev.org/openstack/searchlight `__ -openstack/searchlight-ui `https://opendev.org/openstack/searchlight-ui `__ openstack/senlin `https://opendev.org/openstack/senlin `__ openstack/shade `https://opendev.org/openstack/shade `__ openstack/solum `https://opendev.org/openstack/solum `__ @@ -143,6 +130,7 @@ x/devstack-plugin-glusterfs `https://opendev.org/x/devstack-plugin- x/devstack-plugin-hdfs `https://opendev.org/x/devstack-plugin-hdfs `__ x/devstack-plugin-libvirt-qemu `https://opendev.org/x/devstack-plugin-libvirt-qemu `__ x/devstack-plugin-mariadb `https://opendev.org/x/devstack-plugin-mariadb `__ +x/devstack-plugin-tobiko `https://opendev.org/x/devstack-plugin-tobiko `__ x/devstack-plugin-vmax `https://opendev.org/x/devstack-plugin-vmax `__ x/drbd-devstack `https://opendev.org/x/drbd-devstack `__ x/fenix `https://opendev.org/x/fenix `__ @@ -169,6 +157,7 @@ x/networking-fortinet `https://opendev.org/x/networking-forti x/networking-hpe `https://opendev.org/x/networking-hpe `__ x/networking-huawei `https://opendev.org/x/networking-huawei `__ x/networking-infoblox `https://opendev.org/x/networking-infoblox `__ +x/networking-l2gw `https://opendev.org/x/networking-l2gw `__ x/networking-lagopus `https://opendev.org/x/networking-lagopus `__ x/networking-mlnx `https://opendev.org/x/networking-mlnx `__ x/networking-nec `https://opendev.org/x/networking-nec `__ @@ -193,7 +182,6 @@ x/stackube `https://opendev.org/x/stackube `__ x/tap-as-a-service-dashboard `https://opendev.org/x/tap-as-a-service-dashboard `__ x/tatu `https://opendev.org/x/tatu `__ -x/tobiko `https://opendev.org/x/tobiko `__ x/trio2o `https://opendev.org/x/trio2o `__ x/valet `https://opendev.org/x/valet `__ x/vmware-nsx `https://opendev.org/x/vmware-nsx `__ From 4e916aeb060ecf99203f297a0ff726a65d27f50e Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Mon, 26 Apr 2021 08:52:23 -0700 Subject: [PATCH 1433/1936] Configure glance unified limit quotas This makes us configure limits for glance and enable enforcement. Depends-On: https://review.opendev.org/c/openstack/glance/+/794247 Change-Id: If58d8474cae95b1be3044bd52010b8288a7f5fcc --- lib/glance | 45 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/lib/glance b/lib/glance index e789affaf1..fd2e0afcc1 100644 --- a/lib/glance +++ b/lib/glance @@ -84,6 +84,7 @@ GLANCE_STAGING_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/os_glance_stagi GLANCE_TASKS_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/os_glance_tasks_store} GLANCE_USE_IMPORT_WORKFLOW=$(trueorfalse False GLANCE_USE_IMPORT_WORKFLOW) +GLANCE_ENABLE_QUOTAS=$(trueorfalse True GLANCE_ENABLE_QUOTAS) GLANCE_CONF_DIR=${GLANCE_CONF_DIR:-/etc/glance} GLANCE_METADEF_DIR=$GLANCE_CONF_DIR/metadefs @@ -263,6 +264,45 @@ function configure_glance_store { fi } +function configure_glance_quotas { + + # NOTE(danms): We need to have some of the OS_ things unset in + # order to use system scope, which is required for creating these + # limits. This is a hack, but I dunno how else to get osc to use + # system scope. + + bash -c "unset OS_USERNAME OS_TENANT_NAME OS_PROJECT_NAME; + openstack --os-cloud devstack-system-admin registered limit create \ + --service glance --default-limit 1000 --region $REGION_NAME \ + image_size_total; \ + openstack --os-cloud devstack-system-admin registered limit create \ + --service glance --default-limit 1000 --region $REGION_NAME \ + image_stage_total; \ + openstack --os-cloud devstack-system-admin registered limit create \ + --service glance --default-limit 100 --region $REGION_NAME \ + image_count_total; \ + openstack --os-cloud devstack-system-admin registered limit create \ + --service glance --default-limit 100 --region $REGION_NAME \ + image_count_uploading" + + # Tell glance to use these limits + iniset $GLANCE_API_CONF DEFAULT use_keystone_limits True + + # Configure oslo_limit so it can talk to keystone + iniset $GLANCE_API_CONF oslo_limit user_domain_name $SERVICE_DOMAIN_NAME + iniset $GLANCE_API_CONF oslo_limit password $SERVICE_PASSWORD + iniset $GLANCE_API_CONF oslo_limit username glance + iniset $GLANCE_API_CONF oslo_limit auth_type password + iniset $GLANCE_API_CONF oslo_limit auth_url $KEYSTONE_SERVICE_URI + iniset $GLANCE_API_CONF oslo_limit system_scope "'all'" + iniset $GLANCE_API_CONF oslo_limit endpoint_id \ + $(openstack endpoint list --service glance -f value -c ID) + + # Allow the glance service user to read quotas + openstack role add --user glance --user-domain Default --system all \ + reader +} + # configure_glance() - Set config files, create data dirs, etc function configure_glance { sudo install -d -o $STACK_USER $GLANCE_CONF_DIR $GLANCE_METADEF_DIR @@ -403,6 +443,11 @@ function create_glance_accounts { service_domain_id=$(get_or_create_domain $SERVICE_DOMAIN_NAME) iniset $GLANCE_SWIFT_STORE_CONF ref1 project_domain_id $service_domain_id iniset $GLANCE_SWIFT_STORE_CONF ref1 user_domain_id $service_domain_id + + if [[ "$GLANCE_ENABLE_QUOTAS" = True ]]; then + configure_glance_quotas + fi + fi } From 6843bc798c3fe5f17286e1c07ede95171bb49a25 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 14 May 2021 14:51:51 -0500 Subject: [PATCH 1434/1936] Temporary add feature pragma OpenStackSDK has a feature branch "feature/r1" as a preparation for the R1.0 release. Due to different branch naming functional tests with devstack are not running. Add temporarily (for the duration of the feature branch) pragma to the zuul.yaml to allow Zuul run functional tests. It will be dropped once SDK gets next major release. Change-Id: I671b589150fe731125e16316a994a5942219920b --- .zuul.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index 00129b5ca4..9a675c64ce 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -1,3 +1,10 @@ +- pragma: + # NOTE(gtema): this is required for the changes in SDK feature/r1 branch to + # be using devstack + # TODO(gtema): delete this once r1 branch is merged into master + implied-branches: + - feature/r1 + - nodeset: name: openstack-single-node nodes: From 3ad1661384636eee78a4bf5196812b6223e1b0a6 Mon Sep 17 00:00:00 2001 From: Ghanshyam Date: Fri, 4 Jun 2021 21:32:17 +0000 Subject: [PATCH 1435/1936] Revert "Temporary add feature pragma" This reverts commit 6843bc798c3fe5f17286e1c07ede95171bb49a25. Reason for revert: not sure why but this end up disabling the integration job on check pipeline, Change-Id: Icfaf8ea17b3ce2e405414c23f8075b18d297bf8b example: latest recheck on PS12 check pipeline job for neutron - https://review.opendev.org/c/openstack/neutron/+/790060 --- .zuul.yaml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 9a675c64ce..00129b5ca4 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -1,10 +1,3 @@ -- pragma: - # NOTE(gtema): this is required for the changes in SDK feature/r1 branch to - # be using devstack - # TODO(gtema): delete this once r1 branch is merged into master - implied-branches: - - feature/r1 - - nodeset: name: openstack-single-node nodes: From 96509ea025459ac077d2c85289da7725c53235cf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rados=C5=82aw=20Piliszek?= Date: Wed, 21 Oct 2020 20:33:08 +0200 Subject: [PATCH 1436/1936] Check centos-8-stream CentOS Stream 8 (aka CentOS 8 Stream) is the currently supported runtime platform. [0] Some background history: The Manila team has asked QA to test centos-8-stream in the common gate. A bit later it turned out the point releases of CentOS 8 (aka CentOS Linux 8) will stop happening entirely by the end of 2021. [1] Includes a workaround to the edk2-ovmf issue on CentOS Stream 8 x86_64. [0] https://governance.openstack.org/tc/reference/runtimes/xena.html [1] https://lists.centos.org/pipermail/centos-devel/2020-December/075451.html Change-Id: Iee5a262af757f27f79ba1d6f790e949427dca190 --- .zuul.yaml | 19 +++++++++++++++++++ lib/nova_plugins/functions-libvirt | 12 +++++++++++- 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 5bc6a8b424..e45ff8febc 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -58,6 +58,16 @@ nodes: - controller +- nodeset: + name: devstack-single-node-centos-8-stream + nodes: + - name: controller + label: centos-8-stream + groups: + - name: tempest + nodes: + - controller + - nodeset: name: devstack-single-node-opensuse-15 nodes: @@ -591,6 +601,14 @@ voting: false timeout: 9000 +- job: + name: devstack-platform-centos-8-stream + parent: tempest-full-py3 + description: CentOS 8 Stream platform test + nodeset: devstack-single-node-centos-8-stream + voting: false + timeout: 9000 + - job: name: devstack-async parent: tempest-full-py3 @@ -704,6 +722,7 @@ - devstack-ipv6 - devstack-platform-fedora-latest - devstack-platform-centos-8 + - devstack-platform-centos-8-stream - devstack-async - devstack-multinode - devstack-unit-tests diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index d3827c30dd..58adde7cd4 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -81,7 +81,17 @@ function install_libvirt { install_package qemu-kvm install_package libvirt libvirt-devel - if is_arch "aarch64"; then + if is_arch "x86_64"; then + # NOTE(yoctozepto): recent edk2-ovmf on CentOS Stream 8 x86_64 started failing with + # "libvirt.libvirtError: internal error: unknown feature amd-sev-es", + # so reinstall a known working version until the relevant bugs get fixed: + # * https://bugzilla.redhat.com/show_bug.cgi?id=1961558 + # * https://bugzilla.redhat.com/show_bug.cgi?id=1961562 + # TODO(yoctozepto): Remove this code when the time is right. + if [ "$os_VENDOR" = "CentOSStream" ]; then + install_package edk2-ovmf-20200602gitca407c7246bf-4.el8 + fi + elif is_arch "aarch64"; then install_package edk2.git-aarch64 fi From 81937a230a8adb5c028db5a9ba9abf59b122a2ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rados=C5=82aw=20Piliszek?= Date: Mon, 7 Jun 2021 17:28:38 +0000 Subject: [PATCH 1437/1936] [CI] Drop CentOS Linux 8 job and nodeset CentOS Stream 8 (aka CentOS 8 Stream) is the currently supported runtime platform. [0] DevStack works with CentOS Stream only now. [1] The only usage of the nodeset being dropped is handled by the Depends-On. [0] https://governance.openstack.org/tc/reference/runtimes/xena.html [1] https://review.opendev.org/c/openstack/devstack/+/759122 Depends-On: https://review.opendev.org/c/openstack/cinder-tempest-plugin/+/795159 Change-Id: Ic0f696b46dce3dba529b53a8f9de8cda6b913c7b --- .zuul.yaml | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index e45ff8febc..74223d44f5 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -48,16 +48,6 @@ nodes: - controller -- nodeset: - name: devstack-single-node-centos-8 - nodes: - - name: controller - label: centos-8 - groups: - - name: tempest - nodes: - - controller - - nodeset: name: devstack-single-node-centos-8-stream nodes: @@ -593,14 +583,6 @@ # we often have to rush things through devstack to stabilise the gate, # and these platforms don't have the round-the-clock support to avoid # becoming blockers in that situation. -- job: - name: devstack-platform-centos-8 - parent: tempest-full-py3 - description: Centos 8 platform test - nodeset: devstack-single-node-centos-8 - voting: false - timeout: 9000 - - job: name: devstack-platform-centos-8-stream parent: tempest-full-py3 @@ -721,7 +703,6 @@ - devstack - devstack-ipv6 - devstack-platform-fedora-latest - - devstack-platform-centos-8 - devstack-platform-centos-8-stream - devstack-async - devstack-multinode From 2fb8c7a5eea6f9321eef36fb9b8fd8e55465c91e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rados=C5=82aw=20Piliszek?= Date: Mon, 7 Jun 2021 17:44:54 +0000 Subject: [PATCH 1438/1936] Move verify-ipv6-only-deployments from Tempest to DevStack as it tests DevStack side of things and is useful for projects not using Tempest. Verbatim copy except for the devstack- prefix and the /devstack/ path. Change-Id: Ie166730843f874b9c99e37244e460d7ad33b7eeb --- .../README.rst | 16 ++++ .../defaults/main.yaml | 1 + .../tasks/main.yaml | 4 + tools/verify-ipv6-only-deployments.sh | 92 +++++++++++++++++++ 4 files changed, 113 insertions(+) create mode 100644 roles/devstack-ipv6-only-deployments-verification/README.rst create mode 100644 roles/devstack-ipv6-only-deployments-verification/defaults/main.yaml create mode 100644 roles/devstack-ipv6-only-deployments-verification/tasks/main.yaml create mode 100755 tools/verify-ipv6-only-deployments.sh diff --git a/roles/devstack-ipv6-only-deployments-verification/README.rst b/roles/devstack-ipv6-only-deployments-verification/README.rst new file mode 100644 index 0000000000..400a8da222 --- /dev/null +++ b/roles/devstack-ipv6-only-deployments-verification/README.rst @@ -0,0 +1,16 @@ +Verify the IPv6-only deployments + +This role needs to be invoked from a playbook that +run tests. This role verifies the IPv6 setting on +devstack side and devstack deploy services on IPv6. +This role is invoked before tests are run so that +if any missing IPv6 setting or deployments can fail +the job early. + + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. diff --git a/roles/devstack-ipv6-only-deployments-verification/defaults/main.yaml b/roles/devstack-ipv6-only-deployments-verification/defaults/main.yaml new file mode 100644 index 0000000000..fea05c8146 --- /dev/null +++ b/roles/devstack-ipv6-only-deployments-verification/defaults/main.yaml @@ -0,0 +1 @@ +devstack_base_dir: /opt/stack diff --git a/roles/devstack-ipv6-only-deployments-verification/tasks/main.yaml b/roles/devstack-ipv6-only-deployments-verification/tasks/main.yaml new file mode 100644 index 0000000000..59d3b79bc1 --- /dev/null +++ b/roles/devstack-ipv6-only-deployments-verification/tasks/main.yaml @@ -0,0 +1,4 @@ +- name: Verify the ipv6-only deployments + become: true + become_user: stack + shell: "{{ devstack_base_dir }}/devstack/tools/verify-ipv6-only-deployments.sh" diff --git a/tools/verify-ipv6-only-deployments.sh b/tools/verify-ipv6-only-deployments.sh new file mode 100755 index 0000000000..2596395165 --- /dev/null +++ b/tools/verify-ipv6-only-deployments.sh @@ -0,0 +1,92 @@ +#!/bin/bash +# +# +# NOTE(gmann): This script is used in 'devstack-tempest-ipv6' zuul job to verify that +# services are deployed on IPv6 properly or not. This will capture if any devstck or devstack +# plugins are missing the required setting to listen on IPv6 address. This is run as part of +# run phase of zuul job and before test run. Child job of 'devstack-tempest-ipv6' +# can expand the IPv6 verification specific to project by defining the new post-run script which +# will run along with this base script. +# If there are more common verification for IPv6 then we can always extent this script. + +# Keep track of the DevStack directory +TOP_DIR=$(cd $(dirname "$0")/../../devstack && pwd) +source $TOP_DIR/stackrc +source $TOP_DIR/openrc admin admin + +function verify_devstack_ipv6_setting { + local _service_host='' + _service_host=$(echo $SERVICE_HOST | tr -d []) + local _host_ipv6='' + _host_ipv6=$(echo $HOST_IPV6 | tr -d []) + local _service_listen_address='' + _service_listen_address=$(echo $SERVICE_LISTEN_ADDRESS | tr -d []) + local _service_local_host='' + _service_local_host=$(echo $SERVICE_LOCAL_HOST | tr -d []) + if [[ "$SERVICE_IP_VERSION" != 6 ]]; then + echo $SERVICE_IP_VERSION "SERVICE_IP_VERSION is not set to 6 which is must for devstack to deploy services with IPv6 address." + exit 1 + fi + is_service_host_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_host'"))') + if [[ "$is_service_host_ipv6" != "True" ]]; then + echo $SERVICE_HOST "SERVICE_HOST is not ipv6 which means devstack cannot deploy services on IPv6 address." + exit 1 + fi + is_host_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_host_ipv6'"))') + if [[ "$is_host_ipv6" != "True" ]]; then + echo $HOST_IPV6 "HOST_IPV6 is not ipv6 which means devstack cannot deploy services on IPv6 address." + exit 1 + fi + is_service_listen_address=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_listen_address'"))') + if [[ "$is_service_listen_address" != "True" ]]; then + echo $SERVICE_LISTEN_ADDRESS "SERVICE_LISTEN_ADDRESS is not ipv6 which means devstack cannot deploy services on IPv6 address." + exit 1 + fi + is_service_local_host=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_local_host'"))') + if [[ "$is_service_local_host" != "True" ]]; then + echo $SERVICE_LOCAL_HOST "SERVICE_LOCAL_HOST is not ipv6 which means devstack cannot deploy services on IPv6 address." + exit 1 + fi + echo "Devstack is properly configured with IPv6" + echo "SERVICE_IP_VERSION: " $SERVICE_IP_VERSION "HOST_IPV6: " $HOST_IPV6 "SERVICE_HOST: " $SERVICE_HOST "SERVICE_LISTEN_ADDRESS: " $SERVICE_LISTEN_ADDRESS "SERVICE_LOCAL_HOST: " $SERVICE_LOCAL_HOST +} + +function sanity_check_system_ipv6_enabled { + system_ipv6_enabled=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_ipv6_enabled())') + if [[ $system_ipv6_enabled != "True" ]]; then + echo "IPv6 is disabled in system" + exit 1 + fi + echo "IPv6 is enabled in system" +} + +function verify_service_listen_address_is_ipv6 { + local endpoints_verified=False + local all_ipv6=True + endpoints=$(openstack endpoint list -f value -c URL) + for endpoint in ${endpoints}; do + local endpoint_address='' + endpoint_address=$(echo "$endpoint" | awk -F/ '{print $3}' | awk -F] '{print $1}') + endpoint_address=$(echo $endpoint_address | tr -d []) + local is_endpoint_ipv6='' + is_endpoint_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$endpoint_address'"))') + if [[ "$is_endpoint_ipv6" != "True" ]]; then + all_ipv6=False + echo $endpoint ": This is not ipv6 endpoint which means corresponding service is not listening on IPv6 address." + continue + fi + endpoints_verified=True + done + if [[ "$all_ipv6" == "False" ]] || [[ "$endpoints_verified" == "False" ]]; then + exit 1 + fi + echo "All services deployed by devstack is on IPv6 endpoints" + echo $endpoints +} + +#First thing to verify if system has IPv6 enabled or not +sanity_check_system_ipv6_enabled +#Verify whether devstack is configured properly with IPv6 setting +verify_devstack_ipv6_setting +#Get all registrfed endpoints by devstack in keystone and verify that each endpoints address is IPv6. +verify_service_listen_address_is_ipv6 From 95298788085de38342e789bf10c35849c7117dfc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rados=C5=82aw=20Piliszek?= Date: Tue, 8 Jun 2021 16:19:40 +0000 Subject: [PATCH 1439/1936] [OVN] Set up routing on public bridge like OVS did This fixes various reported and unreported issues with the new behaviour. Removes code repetition as well to pay off some technical debt. Closes-Bug: #1930360 Change-Id: I726c532e96ca434520838ae8a35d5b88b6dd337b --- lib/neutron_plugins/ovn_agent | 42 ++------------------------------- lib/neutron_plugins/services/l3 | 6 ++--- 2 files changed, 5 insertions(+), 43 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 948fede3b9..cfcb01ee91 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -262,48 +262,10 @@ function clone_repository { ERROR_ON_CLONE=false git_clone $repo $dir $branch } -function get_ext_gw_interface { - # Get ext_gw_interface depending on value of Q_USE_PUBLIC_VETH - # This function is copied directly from the devstack neutron-legacy script - if [[ "$Q_USE_PUBLIC_VETH" == "True" ]]; then - echo $Q_PUBLIC_VETH_EX - else - # Disable in-band as we are going to use local port - # to communicate with VMs - sudo ovs-vsctl set Bridge $PUBLIC_BRIDGE \ - other_config:disable-in-band=true - echo $PUBLIC_BRIDGE - fi -} - function create_public_bridge { # Create the public bridge that OVN will use - # This logic is based on the devstack neutron-legacy _neutron_configure_router_v4 and _v6 - local ext_gw_ifc - ext_gw_ifc=$(get_ext_gw_interface) - - sudo ovs-vsctl --may-exist add-br $ext_gw_ifc -- set bridge $ext_gw_ifc protocols=OpenFlow13,OpenFlow15 - sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=$PHYSICAL_NETWORK:$ext_gw_ifc - if [ -n "$FLOATING_RANGE" ]; then - local cidr_len=${FLOATING_RANGE#*/} - sudo ip addr replace $PUBLIC_NETWORK_GATEWAY/$cidr_len dev $ext_gw_ifc - fi - - # Ensure IPv6 RAs are accepted on the interface with the default route. - # This is needed for neutron-based devstack clouds to work in - # IPv6-only clouds in the gate. Please do not remove this without - # talking to folks in Infra. This fix is based on a devstack fix for - # neutron L3 agent: https://review.openstack.org/#/c/359490/. - default_route_dev=$(ip route | grep ^default | awk '{print $5}') - sudo sysctl -w net.ipv6.conf.$default_route_dev.accept_ra=2 - - sudo sysctl -w net.ipv6.conf.all.forwarding=1 - if [ -n "$IPV6_PUBLIC_RANGE" ]; then - local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/} - sudo ip -6 addr replace $IPV6_PUBLIC_NETWORK_GATEWAY/$ipv6_cidr_len dev $ext_gw_ifc - fi - - sudo ip link set $ext_gw_ifc up + sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE -- set bridge $PUBLIC_BRIDGE protocols=OpenFlow13,OpenFlow15 + sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=$PHYSICAL_NETWORK:$PUBLIC_BRIDGE } function _disable_libvirt_apparmor { diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 5d339a00aa..b6bc02818c 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -343,8 +343,8 @@ function _neutron_configure_router_v4 { # Configure the external network as the default router gateway openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID - # This logic is specific to using the l3-agent for layer 3 - if is_service_enabled q-l3 || is_service_enabled neutron-l3; then + # This logic is specific to using OVN or the l3-agent for layer 3 + if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-server) || is_service_enabled q-l3 neutron-l3; then # Configure and enable public bridge local ext_gw_interface="none" if is_neutron_ovs_base_plugin; then @@ -393,7 +393,7 @@ function _neutron_configure_router_v6 { fi # This logic is specific to using OVN or the l3-agent for layer 3 - if ([[ $Q_AGENT == "ovn" ]] && is_service_enabled q-svc neutron-server) || is_service_enabled q-l3 neutron-l3; then + if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-server) || is_service_enabled q-l3 neutron-l3; then # if the Linux host considers itself to be a router then it will # ignore all router advertisements # Ensure IPv6 RAs are accepted on interfaces with a default route. From 5a684eb51b4c18aee2051c5a7c703f50bbcc41ca Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Wed, 9 Jun 2021 09:37:34 +0200 Subject: [PATCH 1440/1936] Drop broute from ebtables_dump This table is no longer present on most installations, drop it from the list to avoid error messages during log collection that people mistake to be the real error why devstack is failing. This may lose some debugging information in edge cases, but I think the improvement of the general user experience is more important. Change-Id: Ibb9b247a018a788c8c4b40487762319fe470bf0f Closes-Bug: 1885198 --- tools/worlddump.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/worlddump.py b/tools/worlddump.py index 22770f15b6..e2921737db 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -134,7 +134,7 @@ def disk_space(): def ebtables_dump(): - tables = ['filter', 'nat', 'broute'] + tables = ['filter', 'nat'] _header("EB Tables Dump") if not _find_cmd('ebtables'): return From 20d6a21e8a1daed9eee0a8413217b2f8e4e863dd Mon Sep 17 00:00:00 2001 From: Mohammed Naser Date: Wed, 9 Jun 2021 14:20:01 -0400 Subject: [PATCH 1441/1936] Switch fedora-latest to use fedora-34 Fedora 32 is now EOL, we should test against the newly released version of Fedora which is 34. Depends-On: https://review.opendev.org/c/openstack/project-config/+/795604 Change-Id: I10d868aca20d1a10d3e7fcfeb78f6fda4c896ee8 --- .zuul.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 74223d44f5..968d79f76a 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -72,7 +72,7 @@ name: devstack-single-node-fedora-latest nodes: - name: controller - label: fedora-32 + label: fedora-34 groups: - name: tempest nodes: From 89baa314c1408251abd9f4d61d9cf5e5c945bc4e Mon Sep 17 00:00:00 2001 From: Artem Goncharov Date: Fri, 11 Jun 2021 16:59:52 +0200 Subject: [PATCH 1442/1936] Temporary add feature pragma OpenStackSDK has a feature branch "feature/r1" as a preparation for the R1.0 release. Due to different branch naming functional tests with devstack are not running. Add temporarily (for the duration of the feature branch) pragma to the zuul.yaml to allow Zuul run functional tests. It will be dropped once SDK gets next major release. Previous attemp didn't work well for other projects, therefore explicitly include master as well. Change-Id: I3a5722873f395bc52cc55a0fd6bcea0ebe3b74fc --- .zuul.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index 74223d44f5..3c490ff180 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -1,3 +1,11 @@ +- pragma: + # NOTE(gtema): this is required for the changes in SDK feature/r1 branch to + # be using devstack + # TODO(gtema): delete this once r1 branch is merged into master + implied-branches: + - master + - feature/r1 + - nodeset: name: openstack-single-node nodes: From 6af3cb9eb273c127c20bc07f65c9a5d7f8ba95cd Mon Sep 17 00:00:00 2001 From: Julia Kreger Date: Thu, 11 Mar 2021 11:28:47 -0800 Subject: [PATCH 1443/1936] nova ironic-hypevisor - support scoped auth config The Secure RBAC effort has updated Ironic such that it can support a mode where it is scope enforcing for all interactions with the API. Due to the design, and operating nature of Ironic's API, services speaking with it must authenticate with a system scope to have a full picture of the universe. In this case, we need to update the nova configuration accordingly such that the compute service understands how to talk to ironic so that it can see the nodes under management. Ironic will likely update this again at a later point in time to enable a "hybrid" mixed-mode as the operating model and related permissions *should* allow nova to use a project scoped "owner" account with Ironic, in order to access and command nodes to deploy. But at this time, we're focusing on the exclusive operating mode. Change-Id: I1946725ce08c495178c419eaf38829f921c91bbe Needed-By: https://review.opendev.org/c/openstack/ironic/+/778957 --- functions-common | 5 +++++ lib/nova_plugins/hypervisor-ironic | 9 ++++++--- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/functions-common b/functions-common index 340da754a2..11679e4aa3 100644 --- a/functions-common +++ b/functions-common @@ -1037,6 +1037,11 @@ function is_ironic_hardware { return 1 } +function is_ironic_enforce_scope { + is_service_enabled ironic && [[ "$IRONIC_ENFORCE_SCOPE" == "True" ]] && return 0 + return 1 +} + # Package Functions # ================= diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic index bda6ef6998..f058e9bb53 100644 --- a/lib/nova_plugins/hypervisor-ironic +++ b/lib/nova_plugins/hypervisor-ironic @@ -47,9 +47,13 @@ function configure_nova_hypervisor { iniset $NOVA_CONF ironic username admin iniset $NOVA_CONF ironic password $ADMIN_PASSWORD iniset $NOVA_CONF ironic auth_url $KEYSTONE_SERVICE_URI - iniset $NOVA_CONF ironic project_domain_id default + if is_ironic_enforce_scope; then + iniset $NOVA_CONF ironic system_scope all + else + iniset $NOVA_CONF ironic project_domain_id default + iniset $NOVA_CONF ironic project_name demo + fi iniset $NOVA_CONF ironic user_domain_id default - iniset $NOVA_CONF ironic project_name demo iniset $NOVA_CONF ironic region_name $REGION_NAME # These are used with crufty legacy ironicclient @@ -82,7 +86,6 @@ function stop_nova_hypervisor { : } - # Restore xtrace $_XTRACE_HYP_IRONIC From 808331488dc16afdf9bd4c2c3103a4a8fc9a6209 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rados=C5=82aw=20Piliszek?= Date: Fri, 18 Jun 2021 12:06:02 +0000 Subject: [PATCH 1444/1936] Revert edk2 workaround It is not needed anymore. Change-Id: I706a33b0a7c737a23b9a7270af1e53e5de83c66f --- lib/nova_plugins/functions-libvirt | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 58adde7cd4..d3827c30dd 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -81,17 +81,7 @@ function install_libvirt { install_package qemu-kvm install_package libvirt libvirt-devel - if is_arch "x86_64"; then - # NOTE(yoctozepto): recent edk2-ovmf on CentOS Stream 8 x86_64 started failing with - # "libvirt.libvirtError: internal error: unknown feature amd-sev-es", - # so reinstall a known working version until the relevant bugs get fixed: - # * https://bugzilla.redhat.com/show_bug.cgi?id=1961558 - # * https://bugzilla.redhat.com/show_bug.cgi?id=1961562 - # TODO(yoctozepto): Remove this code when the time is right. - if [ "$os_VENDOR" = "CentOSStream" ]; then - install_package edk2-ovmf-20200602gitca407c7246bf-4.el8 - fi - elif is_arch "aarch64"; then + if is_arch "aarch64"; then install_package edk2.git-aarch64 fi From 5344885a61fe39565692014c15e0b4fb1055c835 Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Thu, 17 Jun 2021 12:37:35 +0100 Subject: [PATCH 1445/1936] os_vif: Add support for configuring os_vif_ovs plugin This change add an os-vif lib that declares two new variables OS_VIF_OVS_OVSDB_INTERFACE and OS_VIF_OVS_ISOLATE_VIF The former is introduced to workaround bug #1929446 which cause the nova and neutron agents to periodically block waiting for ovs to respond. OS_VIF_OVS_ISOLATE_VIF is added to address bug #1734320 when using ml2/ovs vif isolation should always be used to prevent cross tenant traffic during a live migration. This makes devstack more closely mirror reality by enabling it when ml2/ovs is used and disabling it otherwise. Related-Bug: #1734320 Related-Bug: #1929446 Related-Bug: #1912310 Change-Id: I88254c6e22b52585506ee4907c1c03b8d4f2dac7 --- lib/os-vif | 29 +++++++++++++++++++++++++++++ stack.sh | 6 ++++++ 2 files changed, 35 insertions(+) create mode 100644 lib/os-vif diff --git a/lib/os-vif b/lib/os-vif new file mode 100644 index 0000000000..865645c0d5 --- /dev/null +++ b/lib/os-vif @@ -0,0 +1,29 @@ +#!/bin/bash + +# support vsctl or native. +# until bug #1929446 is resolved we override the os-vif default +# and fall back to the legacy "vsctl" driver. +OS_VIF_OVS_OVSDB_INTERFACE=${OS_VIF_OVS_OVSDB_INTERFACE:="vsctl"} + +function is_ml2_ovs { + if [[ "${Q_AGENT}" == "openvswitch" ]]; then + echo "True" + fi + echo "False" +} + +# This should be true for any ml2/ovs job but should be set to false for +# all other ovs based jobs e.g. ml2/ovn +OS_VIF_OVS_ISOLATE_VIF=${OS_VIF_OVS_ISOLATE_VIF:=$(is_ml2_ovs)} +OS_VIF_OVS_ISOLATE_VIF=$(trueorfalse False OS_VIF_OVS_ISOLATE_VIF) + +function configure_os_vif { + if [[ -e ${NOVA_CONF} ]]; then + iniset ${NOVA_CONF} os_vif_ovs ovsdb_interface ${OS_VIF_OVS_OVSDB_INTERFACE} + iniset ${NOVA_CONF} os_vif_ovs isolate_vif ${OS_VIF_OVS_ISOLATE_VIF} + fi + if [[ -e ${NEUTRON_CONF} ]]; then + iniset ${NEUTRON_CONF} os_vif_ovs ovsdb_interface ${OS_VIF_OVS_OVSDB_INTERFACE} + iniset ${NEUTRON_CONF} os_vif_ovs isolate_vif ${OS_VIF_OVS_ISOLATE_VIF} + fi +} diff --git a/stack.sh b/stack.sh index 6858ab8c42..44f1c8fa01 100755 --- a/stack.sh +++ b/stack.sh @@ -597,6 +597,7 @@ source $TOP_DIR/lib/ldap source $TOP_DIR/lib/dstat source $TOP_DIR/lib/tcpdump source $TOP_DIR/lib/etcd3 +source $TOP_DIR/lib/os-vif # Extras Source # -------------- @@ -1159,6 +1160,11 @@ if is_service_enabled q-dhcp; then sudo sysctl -w net.ipv4.ip_forward=1 fi +# os-vif +# ------ +if is_service_enabled nova neutron; then + configure_os_vif +fi # Storage Service # --------------- From 7befae663c6aa99343cb2c90e74ee2e3bc676559 Mon Sep 17 00:00:00 2001 From: Gregory Thiemonge Date: Sat, 19 Jun 2021 13:24:00 +0200 Subject: [PATCH 1446/1936] Delay horizon startup Move the 'Starting Horizon' task after the end of the wait for create_flavors. The start_horizon function restarts the httpd server, the openstack services are unavailable during a short period of time, so the "openstack flavor create" calls might fail randomly. Closes-Bug: #1932580 Change-Id: I32ee7457586e3de8ba4dfce3b1a12025f9776542 --- stack.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index 6858ab8c42..ef1ad3d26a 100755 --- a/stack.sh +++ b/stack.sh @@ -1341,6 +1341,7 @@ if is_service_enabled $DATABASE_BACKENDS && is_service_enabled glance; then done fi +async_wait create_flavors if is_service_enabled horizon; then echo_summary "Starting Horizon" @@ -1348,8 +1349,6 @@ if is_service_enabled horizon; then start_horizon fi -async_wait create_flavors - # Create account rc files # ======================= From a5d52831dc4d357906f5514943e17ab535e9e578 Mon Sep 17 00:00:00 2001 From: Gregory Thiemonge Date: Fri, 18 Jun 2021 13:53:21 +0200 Subject: [PATCH 1447/1936] Fix updating setuptools in Centos In RHEL-based distributions, updating setuptools using pip removes the files from the python3-setuptools RPM. It breaks some tools such as semanage (which is used by diskimage-builder) that use the -s flag of the python interpreter (don't import modules from /usr/local). This commit reinstalls python3-setuptools to fix those applications. Change-Id: Ib44857e83f75acf37823fae912960a801c83cf7f --- tools/fixup_stuff.sh | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 19219435ad..060abb1605 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -106,6 +106,16 @@ function fixup_fedora { # overwriting works. So this hacks around those packages that # have been dragged in by some other system dependency sudo rm -rf /usr/lib64/python3*/site-packages/PyYAML-*.egg-info + + # After updating setuptools based on the requirements, the files from the + # python3-setuptools RPM are deleted, it breaks some tools such as semanage + # (used in diskimage-builder) that use the -s flag of the python + # interpreter, enforcing the use of the packages from /usr/lib. + # Importing setuptools/pkg_resources in a such environment fails. + # Enforce the package re-installation to fix those applications. + if is_package_installed python3-setuptools; then + sudo dnf reinstall -y python3-setuptools + fi } function fixup_suse { From 5c9affdd9a2baff2166146f4743fe75009b32eab Mon Sep 17 00:00:00 2001 From: Julia Kreger Date: Fri, 12 Mar 2021 11:19:52 -0800 Subject: [PATCH 1448/1936] Use specific credentials for tempest plugin setup The tempest plugin expects the classic environment variables to be present for credentials to access the cloud, but this is wrong in cases where we're trying to setup system scoped services and need to remove the environment variables that was being used. Instead, change the plugin to use the os-cloud entry definitions, and specifically in this case devstack-admin which makes sense until we begin to start to make tempest itself scope aware. We likely will want to change the environment variables from being registered in devstack at some point and completely shift towards passing an-os-cloud parameter, but that is outside the scope of this change as doing so will likely break all plugins. Change-Id: I8d4ec68f116eea07bc7346f939e134fa2e655eac --- lib/tempest | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/lib/tempest b/lib/tempest index d835c68d4a..545018b4a4 100644 --- a/lib/tempest +++ b/lib/tempest @@ -107,7 +107,7 @@ function remove_disabled_extensions { # Takes an image ID parameter as input function image_size_in_gib { local size - size=$(openstack image show $1 -c size -f value) + size=$(openstack --os-cloud devstack-admin image show $1 -c size -f value) echo $size | python3 -c "import math; print(int(math.ceil(float(int(input()) / 1024.0 ** 3))))" } @@ -173,7 +173,7 @@ function configure_tempest { image_uuid_alt="$IMAGE_UUID" fi images+=($IMAGE_UUID) - done < <(openstack image list --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }') + done < <(openstack --os-cloud devstack-admin image list --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }') case "${#images[*]}" in 0) @@ -209,23 +209,23 @@ function configure_tempest { local alt_username=${ALT_USERNAME:-alt_demo} local alt_project_name=${ALT_TENANT_NAME:-alt_demo} local admin_project_id - admin_project_id=$(openstack project list | awk "/ admin / { print \$2 }") + admin_project_id=$(openstack --os-cloud devstack-admin project list | awk "/ admin / { print \$2 }") if is_service_enabled nova; then # If ``DEFAULT_INSTANCE_TYPE`` is not declared, use the new behavior # Tempest creates its own instance types - available_flavors=$(nova flavor-list) + available_flavors=$(openstack --os-cloud devstack-admin flavor list) if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then # Determine the flavor disk size based on the image size. disk=$(image_size_in_gib $image_uuid) - openstack flavor create --id 42 --ram 128 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.nano + openstack --os-cloud devstack-admin flavor create --id 42 --ram 128 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.nano fi flavor_ref=42 if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then # Determine the alt flavor disk size based on the alt image size. disk=$(image_size_in_gib $image_uuid_alt) - openstack flavor create --id 84 --ram 192 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.micro + openstack --os-cloud devstack-admin flavor create --id 84 --ram 192 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.micro fi flavor_ref_alt=84 else @@ -251,7 +251,7 @@ function configure_tempest { fi flavor_ref=${flavors[0]} flavor_ref_alt=$flavor_ref - flavor_ref_size=$(openstack flavor show --format value --column disk "${flavor_ref}") + flavor_ref_size=$(openstack --os-cloud devstack-admin flavor show --format value --column disk "${flavor_ref}") # Ensure ``flavor_ref`` and ``flavor_ref_alt`` have different values. # Some resize instance in tempest tests depends on this. @@ -264,7 +264,7 @@ function configure_tempest { # flavor selected as default, e.g. m1.small, # we need to perform additional check. # - flavor_ref_alt_size=$(openstack flavor show --format value --column disk "${f}") + flavor_ref_alt_size=$(openstack --os-cloud devstack-admin flavor show --format value --column disk "${f}") if [[ "${flavor_ref_alt_size}" -lt "${flavor_ref_size}" ]]; then continue fi @@ -285,10 +285,10 @@ function configure_tempest { # If NEUTRON_CREATE_INITIAL_NETWORKS is not true, there is no network created # and the public_network_id should not be set. if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]] && is_networking_extension_supported 'external-net'; then - public_network_id=$(openstack network show -f value -c id $PUBLIC_NETWORK_NAME) + public_network_id=$(openstack --os-cloud devstack-admin network show -f value -c id $PUBLIC_NETWORK_NAME) # make sure shared network presence does not confuses the tempest tests - openstack network create --share shared - openstack subnet create --description shared-subnet --subnet-range ${TEMPEST_SHARED_POOL:-192.168.233.0/24} --network shared shared-subnet + openstack --os-cloud devstack-admin network create --share shared + openstack --os-cloud devstack-admin subnet create --description shared-subnet --subnet-range ${TEMPEST_SHARED_POOL:-192.168.233.0/24} --network shared shared-subnet fi iniset $TEMPEST_CONFIG DEFAULT use_syslog $SYSLOG From bf13075632b076d19b22f347aaa52cba7dcb7169 Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Wed, 23 Jun 2021 13:02:57 +0000 Subject: [PATCH 1449/1936] Make explicit the network backend used in the CI jobs All Neutron CI jobs (except for unit, functional and fullstack jobs), have explicitly defined the network backend used: - linuxbridge - ovs - ovn That was discussed and approved during the Neutron CI meetings [1]. [1]https://meetings.opendev.org/meetings/neutron_ci/2021/neutron_ci.2021-06-15-15.00.log.html Depends-On: https://review.opendev.org/c/openstack/neutron/+/797051 Change-Id: Ib14542311e9b1d49829bef54f433b8a04709a9fd --- .zuul.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 3c490ff180..f8435a55e4 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -730,11 +730,11 @@ irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - - neutron-grenade-multinode: + - neutron-ovs-grenade-multinode: irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - - neutron-tempest-linuxbridge: + - neutron-linuxbridge-tempest: irrelevant-files: - ^.*\.rst$ - ^doc/.*$ @@ -767,11 +767,11 @@ - devstack-multinode - devstack-unit-tests - openstack-tox-bashate - - neutron-grenade-multinode: + - neutron-ovs-grenade-multinode: irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - - neutron-tempest-linuxbridge: + - neutron-linuxbridge-tempest: irrelevant-files: - ^.*\.rst$ - ^doc/.*$ @@ -824,11 +824,11 @@ irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - - neutron-tempest-dvr: + - neutron-ovs-tempest-dvr: irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - - neutron-tempest-dvr-ha-multinode-full: + - neutron-ovs-tempest-dvr-ha-multinode-full: irrelevant-files: - ^.*\.rst$ - ^doc/.*$ From 2175ff31085972911d155144e02fb178cafaa638 Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Fri, 25 Jun 2021 10:59:29 +0100 Subject: [PATCH 1450/1936] zuul: Add /etc/libvirt to log collection Useful when debugging libvirtd issues such as bug #1912310. Related-Bug: #1912310 Change-Id: Ic8504bd61316e44215672cc44436a3b9a19e114d --- .zuul.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.zuul.yaml b/.zuul.yaml index 3c490ff180..b189849655 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -336,6 +336,7 @@ /var/log/postgresql: logs /var/log/mysql: logs /var/log/libvirt: logs + /etc/libvirt: logs /etc/sudoers: logs /etc/sudoers.d: logs '{{ stage_dir }}/iptables.txt': logs From f0bf2bdff12b66eefbb2eae83e919611eb7cc76d Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Tue, 29 Jun 2021 09:18:47 +0100 Subject: [PATCH 1451/1936] libvirt: Stop installing python bindings from pip As set out in bug #1933096 these bindings are dynamically built against the version of libvirt present in the environment at build time. As a result using a pre-built wheel can cause AttributeError's when the bindings have previously been built elsewhere against an older version of libvirt installed on the host. This is currently the case in CentOS 8 stream based CI jobs where we try to use 7.4.0 bindings that appear to be built against libvirt <= 6.10 leading to bug #1933096. This change seeks to avoid this by installing the bindings from packages that will always be built against the correct corresponding version of libvirt. Change-Id: I76184c17a776c4e1ecaab9549d9d36c8c07c60fa Closes-Bug: #1933096 --- lib/nova_plugins/functions-libvirt | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index d3827c30dd..e9ceae4dea 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -58,13 +58,10 @@ EOF function install_libvirt { if is_ubuntu; then - install_package qemu-system libvirt-clients libvirt-daemon-system libvirt-dev + install_package qemu-system libvirt-clients libvirt-daemon-system libvirt-dev python3-libvirt if is_arch "aarch64"; then install_package qemu-efi fi - # uninstall in case the libvirt version changed - pip_uninstall libvirt-python - pip_install_gr libvirt-python #pip_install_gr elif is_fedora || is_suse; then @@ -79,14 +76,11 @@ function install_libvirt { # as the base system version is too old. We should have # pre-installed these install_package qemu-kvm + install_package libvirt libvirt-devel python3-libvirt - install_package libvirt libvirt-devel if is_arch "aarch64"; then install_package edk2.git-aarch64 fi - - pip_uninstall libvirt-python - pip_install_gr libvirt-python fi if [[ $DEBUG_LIBVIRT_COREDUMPS == True ]]; then From 1ab63132df6831bdf7ce180cf7923540305dcd02 Mon Sep 17 00:00:00 2001 From: Przemyslaw Szczerbik Date: Tue, 6 Jul 2021 14:26:40 +0200 Subject: [PATCH 1452/1936] Allow to install os-resource-classes from git repo Example local.conf config snippet: LIBS_FROM_GIT="os-resource-classes" OS_RESOURCE_CLASSES_REPO="${LOCAL_GIT_BASE}/os-resource-classes" OS_RESOURCE_CLASSES_BRANCH="dev_branch" Closes-Bug: #1934784 Change-Id: I972a2a49aa816433152e5cfac4f672c0465d083f --- lib/libraries | 2 ++ stackrc | 4 ++++ tests/test_libs_from_pypi.sh | 2 +- 3 files changed, 7 insertions(+), 1 deletion(-) mode change 100644 => 100755 lib/libraries mode change 100644 => 100755 stackrc diff --git a/lib/libraries b/lib/libraries old mode 100644 new mode 100755 index c7aa8151ae..67ff21f41a --- a/lib/libraries +++ b/lib/libraries @@ -59,6 +59,7 @@ GITDIR["tooz"]=$DEST/tooz # Non oslo libraries are welcomed below as well, this prevents # duplication of this code. GITDIR["os-brick"]=$DEST/os-brick +GITDIR["os-resource-classes"]=$DEST/os-resource-classes GITDIR["os-traits"]=$DEST/os-traits # Support entry points installation of console scripts @@ -122,6 +123,7 @@ function install_libs { # # os-traits for nova _install_lib_from_source "os-brick" + _install_lib_from_source "os-resource-classes" _install_lib_from_source "os-traits" # # python client libraries we might need from git can go here diff --git a/stackrc b/stackrc old mode 100644 new mode 100755 index 05016594eb..620b1fc04d --- a/stackrc +++ b/stackrc @@ -548,6 +548,10 @@ GITREPO["neutron-lib"]=${NEUTRON_LIB_REPO:-${GIT_BASE}/openstack/neutron-lib.git GITBRANCH["neutron-lib"]=${NEUTRON_LIB_BRANCH:-$TARGET_BRANCH} GITDIR["neutron-lib"]=$DEST/neutron-lib +# os-resource-classes library containing a list of standardized resource classes for OpenStack +GITREPO["os-resource-classes"]=${OS_RESOURCE_CLASSES_REPO=:-${GIT_BASE}/openstack/os-resource-classes.git} +GITBRANCH["os-resource-classes"]=${OS_RESOURCE_CLASSES_BRANCH:-$TARGET_BRANCH} + # os-traits library for resource provider traits in the placement service GITREPO["os-traits"]=${OS_TRAITS_REPO:-${GIT_BASE}/openstack/os-traits.git} GITBRANCH["os-traits"]=${OS_TRAITS_BRANCH:-$TARGET_BRANCH} diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index 5b53389073..ce1b34461c 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -44,7 +44,7 @@ ALL_LIBS+=" debtcollector os-brick os-traits automaton futurist oslo.service" ALL_LIBS+=" oslo.cache oslo.reports osprofiler cursive" ALL_LIBS+=" keystoneauth ironic-lib neutron-lib oslo.privsep" ALL_LIBS+=" diskimage-builder os-vif python-brick-cinderclient-ext" -ALL_LIBS+=" castellan python-barbicanclient ovsdbapp os-ken" +ALL_LIBS+=" castellan python-barbicanclient ovsdbapp os-ken os-resource-classes" # Generate the above list with # echo ${!GITREPO[@]} From 6f4eafb823e498a8d5eb344376c41f5bec8a1b04 Mon Sep 17 00:00:00 2001 From: zenkuro Date: Thu, 15 Jul 2021 19:24:28 +0300 Subject: [PATCH 1453/1936] Added AlmaLinux to CentOS 8 family Change-Id: I9fb6f010842a495c838d468b47dc5081596f41a2 --- functions-common | 2 ++ 1 file changed, 2 insertions(+) diff --git a/functions-common b/functions-common index 11679e4aa3..40567f8e1d 100644 --- a/functions-common +++ b/functions-common @@ -391,6 +391,7 @@ function GetDistro { DISTRO="sle${os_RELEASE%.*}" elif [[ "$os_VENDOR" =~ (Red.*Hat) || \ "$os_VENDOR" =~ (CentOS) || \ + "$os_VENDOR" =~ (AlmaLinux) || \ "$os_VENDOR" =~ (Scientific) || \ "$os_VENDOR" =~ (OracleServer) || \ "$os_VENDOR" =~ (Virtuozzo) ]]; then @@ -451,6 +452,7 @@ function is_fedora { [ "$os_VENDOR" = "RedHatEnterpriseServer" ] || \ [ "$os_VENDOR" = "RedHatEnterprise" ] || \ [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "CentOSStream" ] || \ + [ "$os_VENDOR" = "AlmaLinux" ] || \ [ "$os_VENDOR" = "OracleServer" ] || [ "$os_VENDOR" = "Virtuozzo" ] } From 5a642450d6ac94ff1ea2bea3e7ce3887ca79dcc4 Mon Sep 17 00:00:00 2001 From: Julia Kreger Date: Mon, 19 Jul 2021 07:01:29 -0700 Subject: [PATCH 1454/1936] Provide override for glance image size limit The glance image size limitation was added and unfortuantely does prevent larger images from being uploaded to glance. In the case of all baremetal testing, this value is realistically smaller than stock "cloud" images which support booting to baremetal with often requisite firmware blobs, which forces some images over 1GB in size. Adds GLANCE_LIMIT_IMAGE_SIZE_TOTAL which allows users who need larger images to be able to override the default while still enabling limits enforcement in their deployment. The default value is 1000. Change-Id: Id425aa546f1a5973bae8be9c017782d18f0b4a47 --- lib/glance | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/lib/glance b/lib/glance index fd2e0afcc1..b132f37834 100644 --- a/lib/glance +++ b/lib/glance @@ -108,6 +108,10 @@ GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT} GLANCE_SERVICE_PROTOCOL=${GLANCE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} GLANCE_UWSGI=$GLANCE_BIN_DIR/glance-wsgi-api GLANCE_UWSGI_CONF=$GLANCE_CONF_DIR/glance-uwsgi.ini + +# Glance default limit for Devstack +GLANCE_LIMIT_IMAGE_SIZE_TOTAL=${GLANCE_LIMIT_IMAGE_SIZE_TOTAL:-1000} + # If wsgi mode is uwsgi run glance under uwsgi, else default to eventlet # TODO(mtreinish): Remove the eventlet path here and in all the similar # conditionals below after the Pike release @@ -273,11 +277,11 @@ function configure_glance_quotas { bash -c "unset OS_USERNAME OS_TENANT_NAME OS_PROJECT_NAME; openstack --os-cloud devstack-system-admin registered limit create \ - --service glance --default-limit 1000 --region $REGION_NAME \ - image_size_total; \ + --service glance --default-limit $GLANCE_LIMIT_IMAGE_SIZE_TOTAL \ + --region $REGION_NAME image_size_total; \ openstack --os-cloud devstack-system-admin registered limit create \ - --service glance --default-limit 1000 --region $REGION_NAME \ - image_stage_total; \ + --service glance --default-limit $GLANCE_LIMIT_IMAGE_SIZE_TOTAL \ + --region $REGION_NAME image_stage_total; \ openstack --os-cloud devstack-system-admin registered limit create \ --service glance --default-limit 100 --region $REGION_NAME \ image_count_total; \ From c8b66ff33e14c8339c8146d3116b9ff672f912ec Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Mon, 19 Jul 2021 11:14:18 -0700 Subject: [PATCH 1455/1936] Add configuration notes about glance limits Change-Id: I21a43584116f4b719cf99d3942044cbf13fefb9a --- doc/source/configuration.rst | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 2d0c894530..67456142de 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -689,6 +689,24 @@ use the v3 API. It is possible to setup keystone without v2 API, by doing: ENABLE_IDENTITY_V2=False + +Glance +++++++ + +The default image size quota of 1GiB may be too small if larger images +are to be used. Change the default at setup time with: + +:: + + GLANCE_LIMIT_IMAGE_SIZE_TOTAL=5000 + +or at runtime via: + +:: + + openstack --os-cloud devstack-system-admin registered limit update \ + --service glance --default-limit 5000 --region RegionOne image_size_total + .. _arch-configuration: Architectures From 71bd10e45197a405cd497c8923db7442bde14a95 Mon Sep 17 00:00:00 2001 From: Pavan Kesava Rao Date: Mon, 19 Jul 2021 13:33:42 -0400 Subject: [PATCH 1456/1936] Enable tempest tests for hostname sanitization Starting Wallaby release, nova sanitizes instance hostnames having freeform characters with dashes. It should be tested in Devstack. Depends-On: https://review.opendev.org/c/openstack/tempest/+/795699 Change-Id: I54794e58b67620c36e8f2966ec3b62dd24da745b --- lib/tempest | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/tempest b/lib/tempest index 4eacfa09ca..095361d4f4 100644 --- a/lib/tempest +++ b/lib/tempest @@ -419,6 +419,9 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute-feature-enabled live_migrate_back_and_forth ${LIVE_MIGRATE_BACK_AND_FORTH:-False} iniset $TEMPEST_CONFIG compute-feature-enabled attach_encrypted_volume ${ATTACH_ENCRYPTED_VOLUME_AVAILABLE:-True} + # Starting Wallaby, nova sanitizes instance hostnames having freeform characters with dashes + iniset $TEMPEST_CONFIG compute-feature-enabled hostname_fqdn_sanitization True + if [[ -n "$NOVA_FILTERS" ]]; then iniset $TEMPEST_CONFIG compute-feature-enabled scheduler_enabled_filters ${NOVA_FILTERS} fi From 524487728e85388c73ececae3f2eb272321cffc7 Mon Sep 17 00:00:00 2001 From: Marek Tamaskovic Date: Wed, 28 Jul 2021 16:54:50 +0200 Subject: [PATCH 1457/1936] Fix opening shell as user 'stack' The usage of sudo with su is not recommended. It results in incosnistent environment variables. Instead use just sudo with appropriate arguments. The argument '-u stack' specifies that the sudo will execute as user 'stack'. The last argument '-i' will launch an interactive shell. Closes-Bug: #1938148 Change-Id: I42387660480377cdf9a0b04f190e7e1f21fb354f --- doc/source/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/index.rst b/doc/source/index.rst index 9f477ab911..08ce4cb061 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -63,7 +63,7 @@ have sudo privileges: .. code-block:: console $ echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack - $ sudo su - stack + $ sudo -u stack -i Download DevStack ----------------- From 0456baaee5309431cd1f88ba4a0ceaa7f050b743 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Fri, 30 Jul 2021 19:29:57 +0530 Subject: [PATCH 1458/1936] Fix Usage of rdo-release rpm rdo-release.el8.rpm rpm points to latest RDO release, so use it for master, for stable releases use corresponding release rpm. Change-Id: I508eceb00d7501ffcfac73d7bc2272badb241494 --- stack.sh | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/stack.sh b/stack.sh index c439a7217f..e3d67f571f 100755 --- a/stack.sh +++ b/stack.sh @@ -300,10 +300,14 @@ function _install_epel { } function _install_rdo { - # NOTE(ianw) 2020-04-30 : when we have future branches, we - # probably want to install the relevant branch RDO release as - # well. But for now it's all master. - sudo dnf -y install https://rdoproject.org/repos/rdo-release.el8.rpm + if [[ "$TARGET_BRANCH" == "master" ]]; then + # rdo-release.el8.rpm points to latest RDO release, use that for master + sudo dnf -y install https://rdoproject.org/repos/rdo-release.el8.rpm + else + # For stable branches use corresponding release rpm + rdo_release=$(echo $TARGET_BRANCH | sed "s|stable/||g") + sudo dnf -y install https://rdoproject.org/repos/openstack-${rdo_release}/rdo-release-${rdo_release}.el8.rpm + fi sudo dnf -y update } From ba68a49598309c5f6c5e4a0ec9d2b13b8229eee0 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Sat, 31 Jul 2021 06:13:46 +0000 Subject: [PATCH 1459/1936] Updated from generate-devstack-plugins-list Change-Id: I062b9a121c79650973c8d8d975e1c723d5798777 --- doc/source/plugin-registry.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 691fffa846..490132e0d7 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -98,6 +98,7 @@ openstack/shade `https://opendev.org/openstack/shade `__ openstack/storlets `https://opendev.org/openstack/storlets `__ openstack/tacker `https://opendev.org/openstack/tacker `__ +openstack/tap-as-a-service `https://opendev.org/openstack/tap-as-a-service `__ openstack/telemetry-tempest-plugin `https://opendev.org/openstack/telemetry-tempest-plugin `__ openstack/trove `https://opendev.org/openstack/trove `__ openstack/trove-dashboard `https://opendev.org/openstack/trove-dashboard `__ @@ -179,7 +180,6 @@ x/rsd-virt-for-nova `https://opendev.org/x/rsd-virt-for-nov x/scalpels `https://opendev.org/x/scalpels `__ x/slogging `https://opendev.org/x/slogging `__ x/stackube `https://opendev.org/x/stackube `__ -x/tap-as-a-service `https://opendev.org/x/tap-as-a-service `__ x/tap-as-a-service-dashboard `https://opendev.org/x/tap-as-a-service-dashboard `__ x/tatu `https://opendev.org/x/tatu `__ x/trio2o `https://opendev.org/x/trio2o `__ From f44aa0c55a81e24a8ad321c0c741939e86705e09 Mon Sep 17 00:00:00 2001 From: Brian Rosmaita Date: Wed, 4 Aug 2021 18:27:48 -0400 Subject: [PATCH 1460/1936] Allow cinder default quotas configuration The default cinder quotas for volumes, backups, or snapshots may be too low for highly concurrent testing, so make these configurable in devstack. Change-Id: Ie3cf3239b48f9905f5760ad0166eea954ecf5eed --- doc/source/configuration.rst | 6 ++++++ lib/cinder | 5 +++++ 2 files changed, 11 insertions(+) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 67456142de..8244525075 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -642,6 +642,12 @@ with ``VOLUME_BACKING_FILE_SIZE``. VOLUME_NAME_PREFIX="volume-" VOLUME_BACKING_FILE_SIZE=24G +When running highly concurrent tests, the default per-project quotas +for volumes, backups, or snapshots may be too small. These can be +adjusted by setting ``CINDER_QUOTA_VOLUMES``, ``CINDER_QUOTA_BACKUPS``, +or ``CINDER_QUOTA_SNAPSHOTS`` to the desired value. (The default for +each is 10.) + Keystone ~~~~~~~~ diff --git a/lib/cinder b/lib/cinder index 7f2f29f892..9235428335 100644 --- a/lib/cinder +++ b/lib/cinder @@ -267,6 +267,11 @@ function configure_cinder { iniset $CINDER_CONF key_manager backend cinder.keymgr.conf_key_mgr.ConfKeyManager iniset $CINDER_CONF key_manager fixed_key $(openssl rand -hex 16) + # set default quotas + iniset $CINDER_CONF DEFAULT quota_volumes ${CINDER_QUOTA_VOLUMES:-10} + iniset $CINDER_CONF DEFAULT quota_backups ${CINDER_QUOTA_BACKUPS:-10} + iniset $CINDER_CONF DEFAULT quota_snapshots ${CINDER_QUOTA_SNAPSHOTS:-10} + # Avoid RPC timeouts in slow CI and test environments by doubling the # default response timeout set by RPC clients. See bug #1873234 for more # details and example failures. From ac1b723c20fb67aaecd43cd08c6eee88c5f339f2 Mon Sep 17 00:00:00 2001 From: Roman Dobosz Date: Fri, 6 Aug 2021 12:52:01 +0200 Subject: [PATCH 1461/1936] Fix displaying usage for make_cert.sh Now, if no arguments are passed to make_cert.sh script, it will fail on: tools/make_cert.sh: line 30: [: missing `]' and might go on with generating certs depending on the bash settings. It is fixed within this patch. Change-Id: I62bf9c972ebd1644da622439e05114f245f20809 --- tools/make_cert.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/make_cert.sh b/tools/make_cert.sh index e91464fc0f..0212d0033a 100755 --- a/tools/make_cert.sh +++ b/tools/make_cert.sh @@ -27,7 +27,7 @@ function usage { } CN=$1 -if [ -z "$CN" ]]; then +if [ -z "$CN" ]; then usage fi ORG_UNIT_NAME=${2:-$ORG_UNIT_NAME} @@ -52,5 +52,5 @@ init_CA make_cert $INT_CA_DIR $DEVSTACK_CERT_NAME $DEVSTACK_HOSTNAME # Create a cert bundle -cat $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/cacert.pem >$DEVSTACK_CERT - +cat $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key \ + $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/cacert.pem >$DEVSTACK_CERT From 6b9a5646225a766f6240e2a1a93a92b82e088aa0 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 28 Jul 2021 11:19:57 +1000 Subject: [PATCH 1462/1936] Revert "Workaround for new pip 20.3 behavior" This reverts commit 7a3a7ce876a37376fe0dca7278e41a4f46867daa and bcd0acf6c0b5d6501e91133c3a937b3fc40f7122 and part of f1ed7c77c50ac28cb58c9f7ed885c6a3e0a75403 which all cap our pip installs. Given the pip ecosystem can often incorporate major changes, tracking upstream at least generally gives us one problem at a time to solve rather than trying to handle version jumps when LTS distros update. The new dependency resolver included some changes that disallow setting URL's like "file:///path/to/project#egg=project" in constraints. Apparently the fact it used to work was an accident of the requires/constraints mechanism; it does make some sense as the URL doesn't really have a version-number that the resolver can put in an ordering graph. The _setup_package_with_constraints_edit function comment highlights what this is trying to do # Updates the constraints from REQUIREMENTS_DIR to reflect the # future installed state of this package. This ensures when we # install this package we get the from source version. In other words; if constraints has "foo==1.2.3" and Zuul has checked out "foo" for testing, we have to make sure pip doesn't choose version 1.2.3 from pypi. It seems like removing the entry from upper-requirements.txt is the important part; adding the URL path to the on-disk version was just something that seemed to work at the time, but isn't really necessary. We will install the package in question which will be the latest version (from Zuul checkout) and without the package in upper-requirements.txt nothing will try and downgrade it. Therefore the solution proposed here is to remove the adding of the URL parts. This allows us to uncap pip and restore testing with the new dependency resolver. Closes-Bug: #1906322 Change-Id: Ib9ba52147199a9d6d0293182d5db50c4a567d677 --- inc/python | 7 ++++--- lib/tempest | 3 --- tools/cap-pip.txt | 1 - tools/fixup_stuff.sh | 15 +++++++++++++++ tools/install_pip.sh | 9 +-------- 5 files changed, 20 insertions(+), 15 deletions(-) delete mode 100644 tools/cap-pip.txt diff --git a/inc/python b/inc/python index 8941fd038d..9382d352dc 100644 --- a/inc/python +++ b/inc/python @@ -378,12 +378,13 @@ function _setup_package_with_constraints_edit { project_dir=$(cd $project_dir && pwd) if [ -n "$REQUIREMENTS_DIR" ]; then - # Constrain this package to this project directory from here on out. + # Remove this package from constraints before we install it. + # That way, later installs won't "downgrade" the install from + # source we are about to do. local name name=$(awk '/^name.*=/ {print $3}' $project_dir/setup.cfg) $REQUIREMENTS_DIR/.venv/bin/edit-constraints \ - $REQUIREMENTS_DIR/upper-constraints.txt -- $name \ - "$flags file://$project_dir#egg=$name" + $REQUIREMENTS_DIR/upper-constraints.txt -- $name fi setup_package $bindep $project_dir "$flags" $extras diff --git a/lib/tempest b/lib/tempest index 3fa7ce0fb2..a1c02ef183 100644 --- a/lib/tempest +++ b/lib/tempest @@ -718,9 +718,6 @@ function install_tempest { set_tempest_venv_constraints $tmp_u_c_m tox -r --notest -efull - # TODO: remove the trailing pip constraint when a proper fix - # arrives for bug https://bugs.launchpad.net/devstack/+bug/1906322 - $TEMPEST_DIR/.tox/tempest/bin/pip install -U -r $RC_DIR/tools/cap-pip.txt # NOTE(mtreinish) Respect constraints in the tempest full venv, things that # are using a tox job other than full will not be respecting constraints but # running pip install -U on tempest requirements diff --git a/tools/cap-pip.txt b/tools/cap-pip.txt deleted file mode 100644 index 8ee551b261..0000000000 --- a/tools/cap-pip.txt +++ /dev/null @@ -1 +0,0 @@ -pip<20.3 diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 19219435ad..8a2c337fc4 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -155,8 +155,23 @@ function fixup_ovn_centos { yum_install centos-release-openstack-victoria } +function fixup_ubuntu { + if ! is_ubuntu; then + return + fi + + # Since pip10, pip will refuse to uninstall files from packages + # that were created with distutils (rather than more modern + # setuptools). This is because it technically doesn't have a + # manifest of what to remove. However, in most cases, simply + # overwriting works. So this hacks around those packages that + # have been dragged in by some other system dependency + sudo rm -rf /usr/lib/python3/dist-packages/PyYAML-*.egg-info +} + function fixup_all { fixup_keystone + fixup_ubuntu fixup_fedora fixup_suse } diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 9afd2e53c2..0082e9f73c 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -91,9 +91,7 @@ function install_get_pip { die $LINENO "Download of get-pip.py failed" touch $LOCAL_PIP.downloaded fi - # TODO: remove the trailing pip constraint when a proper fix - # arrives for bug https://bugs.launchpad.net/devstack/+bug/1906322 - sudo -H -E python${PYTHON3_VERSION} $LOCAL_PIP -c $TOOLS_DIR/cap-pip.txt + sudo -H -E python${PYTHON3_VERSION} $LOCAL_PIP } @@ -130,11 +128,6 @@ if [[ -n $PYPI_ALTERNATIVE_URL ]]; then configure_pypi_alternative_url fi -# Just use system pkgs on Focal -if [[ "$DISTRO" == focal ]]; then - exit 0 -fi - # Eradicate any and all system packages # Python in fedora/suse depends on the python-pip package so removing it From 2df2aa01584fb2a26112c60f0a16869e233cb7ee Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 10 Aug 2021 13:50:08 +1000 Subject: [PATCH 1463/1936] install_pip: don't fail when not installed On some platforms, "python -m pip" isn't available. Currently this is run undconditionally from the "get_versions" function; remove the call. Change-Id: I91d6c66d055f02fa7b4368593b629933f82d8117 --- tools/install_pip.sh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 0082e9f73c..eb0f6eba48 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -46,15 +46,13 @@ echo "Distro: $DISTRO" function get_versions { # FIXME(dhellmann): Deal with multiple python versions here? This # is just used for reporting, so maybe not? - PIP=$(which pip 2>/dev/null || which pip-python 2>/dev/null || true) + PIP=$(which pip 2>/dev/null || which pip-python 2>/dev/null || which pip3 2>/dev/null || true) if [[ -n $PIP ]]; then PIP_VERSION=$($PIP --version | awk '{ print $2}') echo "pip: $PIP_VERSION" else echo "pip: Not Installed" fi - # Show python3 module version - python${PYTHON3_VERSION} -m pip --version } From 26bd94b45efb63683072006e4281dd34a313d881 Mon Sep 17 00:00:00 2001 From: Ghanshyam Date: Tue, 10 Aug 2021 14:49:54 +0000 Subject: [PATCH 1464/1936] Revert "Add enforce_scope setting support for keystone" This reverts commit 9dc2b88eb42a5f98f43bc8ad3dfa3962a4d44d74. Reason for revert: Devstack creation/setup the things are not yet moved to scope tokens so we need to wait for that first and then do the scope check enable globally. Change-Id: If0368aca39c1325bf90abd23831118b89e746222 --- lib/keystone | 11 ----------- lib/tempest | 9 --------- 2 files changed, 20 deletions(-) diff --git a/lib/keystone b/lib/keystone index e282db0bfa..66e867ca68 100644 --- a/lib/keystone +++ b/lib/keystone @@ -134,12 +134,6 @@ KEYSTONE_PASSWORD_HASH_ROUNDS=${KEYSTONE_PASSWORD_HASH_ROUNDS:-4} # Cache settings KEYSTONE_ENABLE_CACHE=${KEYSTONE_ENABLE_CACHE:-True} -# Flag to set the oslo_policy.enforce_scope. This is used to switch -# the Identity API policies to start checking the scope of token. By Default, -# this flag is False. -# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope -KEYSTONE_ENFORCE_SCOPE=$(trueorfalse False KEYSTONE_ENFORCE_SCOPE) - # Functions # --------- @@ -287,11 +281,6 @@ function configure_keystone { iniset $KEYSTONE_CONF security_compliance lockout_duration $KEYSTONE_LOCKOUT_DURATION iniset $KEYSTONE_CONF security_compliance unique_last_password_count $KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT fi - if [[ "$KEYSTONE_ENFORCE_SCOPE" == True ]] ; then - iniset $KEYSTONE_CONF oslo_policy enforce_scope true - iniset $KEYSTONE_CONF oslo_policy enforce_new_defaults true - iniset $KEYSTONE_CONF oslo_policy policy_file policy.yaml - fi } # create_keystone_accounts() - Sets up common required keystone accounts diff --git a/lib/tempest b/lib/tempest index 3fa7ce0fb2..d39fa1c52b 100644 --- a/lib/tempest +++ b/lib/tempest @@ -600,15 +600,6 @@ function configure_tempest { fi done - # ``enforce_scope`` - # If services enable the enforce_scope for their policy - # we need to enable the same on Tempest side so that - # test can be run with scoped token. - if [[ "$KEYSTONE_ENFORCE_SCOPE" == True ]] ; then - iniset $TEMPEST_CONFIG enforce_scope keystone true - iniset $TEMPEST_CONFIG auth admin_system 'all' - iniset $TEMPEST_CONFIG auth admin_project_name '' - fi iniset $TEMPEST_CONFIG enforce_scope glance "$GLANCE_ENFORCE_SCOPE" iniset $TEMPEST_CONFIG enforce_scope cinder "$CINDER_ENFORCE_SCOPE" From 00ac547acacc9fef86f9045a979adf523ab7617b Mon Sep 17 00:00:00 2001 From: Abhishek Kekane Date: Mon, 9 Aug 2021 05:54:32 +0000 Subject: [PATCH 1465/1936] Glance remote worker should use own cache directory Earlier glance remote worker was using same cache directory used by glance worker. Ideally both should use their own cache directory. This patch makes provision for the same by setting different path for image_cache_dir config option. Change-Id: If2627e9c212fd765b96d925046c04e9cb1001c3d --- lib/glance | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/glance b/lib/glance index cd26d97dc4..6848aa3c1b 100644 --- a/lib/glance +++ b/lib/glance @@ -552,6 +552,11 @@ function start_glance_remote_clone { iniset $(glance_remote_conf "$GLANCE_API_CONF") os_glance_tasks_store \ filesystem_store_datadir "${remote_data}/os_glance_tasks_store" + # Point this worker to use different cache dir + mkdir -p "$remote_data/cache" + iniset $(glance_remote_conf "$GLANCE_API_CONF") DEFAULT \ + image_cache_dir "${remote_data}/cache" + # Change our uwsgi to our new port sed -ri "s/^(http-socket.*):[0-9]+/\1:$glance_remote_port/" \ "$glance_remote_uwsgi" From a20971850afb555ee4b04068a39a67a533b69901 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 10 Aug 2021 14:11:12 +1000 Subject: [PATCH 1466/1936] install_pip: Use packaged pip on Fedora This uses the python3-pip package for Fedora but maintains the status quo for existing distributions (i.e. for Suse we run get-pip.py but don't uninstall, and for everything else we uninstall python3-pip and run get-pip.py to be running the latest pip). As noted inline, installing get-pip.py over Fedora 34's package no longer works, and likely won't ever work again. Unlike the LTS distributions, the Fedora pip should be more up-to-date, so I think it's best we just avoid any package overwrites. Change-Id: I84129aadfcf585bb150a3daa39616246d3d84bbd --- tools/install_pip.sh | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index eb0f6eba48..7ecea4e821 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -126,19 +126,31 @@ if [[ -n $PYPI_ALTERNATIVE_URL ]]; then configure_pypi_alternative_url fi -# Eradicate any and all system packages - -# Python in fedora/suse depends on the python-pip package so removing it -# results in a nonfunctional system. pip on fedora installs to /usr so pip -# can safely override the system pip for all versions of fedora -if ! is_fedora && ! is_suse; then +if is_fedora && [[ ${DISTRO} == f* ]]; then + # get-pip.py will not install over the python3-pip package in + # Fedora 34 any more. + # https://bugzilla.redhat.com/show_bug.cgi?id=1988935 + # https://github.com/pypa/pip/issues/9904 + # You can still install using get-pip.py if python3-pip is *not* + # installed; this *should* remain separate under /usr/local and not break + # if python3-pip is later installed. + # For general sanity, we just use the packaged pip. It should be + # recent enough anyway. + install_package python3-pip +elif is_fedora || is_suse; then + # Python in suse/centos depends on the python-pip package; because + # of the split "system-python" uninstalling python3-pip also + # uninstalls the user python3 package which is bad and leaves us + # without a python to use. Just install over. + install_get_pip +else + # Remove packaged pip, and install the latest upstream. if is_package_installed python3-pip ; then uninstall_package python3-pip fi + install_get_pip fi -install_get_pip - set -x # Note setuptools is part of requirements.txt and we want to make sure From 156ccbad85f30cd4c9c33d30b612b3151d542f8e Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 10 Aug 2021 14:19:28 +1000 Subject: [PATCH 1467/1936] Add Fedora 34 support * update the support distro filter * don't install xinetd which doesn't exist in F34 any more. I think there is probably a bit more to do with swift ring-server but that can be a problem for another time. * remove old F31 workaround Change-Id:If2f74f146a166b9721540aaf3f1f9fce3030525c --- files/rpms/swift | 2 +- lib/nova | 6 ------ stack.sh | 2 +- 3 files changed, 2 insertions(+), 8 deletions(-) diff --git a/files/rpms/swift b/files/rpms/swift index 376c6f3df7..18c957c08a 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -4,4 +4,4 @@ memcached rsync-daemon sqlite xfsprogs -xinetd +xinetd # not:f34 diff --git a/lib/nova b/lib/nova index 930529a433..de91517add 100644 --- a/lib/nova +++ b/lib/nova @@ -298,12 +298,6 @@ function configure_nova { fi fi - if is_fedora && [[ $DISTRO =~ f31] ]]; then - # For f31 use the rebased 2.1.0 version of the package. - sudo dnf copr enable -y lyarwood/iscsi-initiator-utils - sudo dnf update -y - fi - if [[ ${ISCSID_DEBUG} == "True" ]]; then # Install an override that starts iscsid with debugging # enabled. diff --git a/stack.sh b/stack.sh index c439a7217f..47d6dc718a 100755 --- a/stack.sh +++ b/stack.sh @@ -227,7 +227,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="focal|f31|f32|opensuse-15.2|opensuse-tumbleweed|rhel8" +SUPPORTED_DISTROS="focal|f34|opensuse-15.2|opensuse-tumbleweed|rhel8" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" From 8dac135cb89fa5d27d7af344703db0a20cf550e1 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 11 Aug 2021 14:56:05 +1000 Subject: [PATCH 1468/1936] Simplify pip install The uninstall here has been around since Ibb4b42119dc2e51577c77bbbbffb110863e5324d. At the time, there might have been conflicts between packaged and installed pip. We don't need it today; get-pip.py keeps itself separate enough in /usr/local on all platforms. Thus we can also remove the suse/centos special-casing. python3-pip is in the RPM list so we don't need to re-install for Fedora. Add a note on why we are over-installing pip. Remove some old setuptools workarounds that are commented out. Change-Id: Ie3cb81a8ff71cf4b81e23831c380f83b0381de71 --- stack.sh | 14 +++++++++++++- tools/install_pip.sh | 22 ++-------------------- 2 files changed, 15 insertions(+), 21 deletions(-) diff --git a/stack.sh b/stack.sh index 47d6dc718a..1aa96c9d49 100755 --- a/stack.sh +++ b/stack.sh @@ -752,7 +752,19 @@ git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH echo_summary "Installing package prerequisites" source $TOP_DIR/tools/install_prereqs.sh -# Configure an appropriate Python environment +# Configure an appropriate Python environment. +# +# NOTE(ianw) 2021-08-11 : We install the latest pip here because pip +# is very active and changes are not generally reflected in the LTS +# distros. This often involves important things like dependency or +# conflict resolution, and has often been required because the +# complicated constraints etc. used by openstack have tickled bugs in +# distro versions of pip. We want to find these problems as they +# happen, rather than years later when we try to update our LTS +# distro. Whilst it is clear that global installations of upstream +# pip are less and less common, with virtualenv's being the general +# approach now; there are a lot of devstack plugins that assume a +# global install environment. if [[ "$OFFLINE" != "True" ]]; then PYPI_ALTERNATIVE_URL=${PYPI_ALTERNATIVE_URL:-""} $TOP_DIR/tools/install_pip.sh fi diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 7ecea4e821..a80c178f2a 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -111,14 +111,6 @@ function configure_pypi_alternative_url { } -# Setuptools 8 implements PEP 440, and 8.0.4 adds a warning triggered any time -# pkg_resources inspects the list of installed Python packages if there are -# non-compliant version numbers in the egg-info (for example, from distro -# system packaged Python libraries). This is off by default after 8.2 but can -# be enabled by uncommenting the lines below. -#PYTHONWARNINGS=$PYTHONWARNINGS,always::RuntimeWarning:pkg_resources -#export PYTHONWARNINGS - # Show starting versions get_versions @@ -135,19 +127,9 @@ if is_fedora && [[ ${DISTRO} == f* ]]; then # installed; this *should* remain separate under /usr/local and not break # if python3-pip is later installed. # For general sanity, we just use the packaged pip. It should be - # recent enough anyway. - install_package python3-pip -elif is_fedora || is_suse; then - # Python in suse/centos depends on the python-pip package; because - # of the split "system-python" uninstalling python3-pip also - # uninstalls the user python3 package which is bad and leaves us - # without a python to use. Just install over. - install_get_pip + # recent enough anyway. This is included via rpms/general + continue else - # Remove packaged pip, and install the latest upstream. - if is_package_installed python3-pip ; then - uninstall_package python3-pip - fi install_get_pip fi From 6fecfd4fff79620596db45c9d22f8ec63a0d5522 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Wed, 11 Aug 2021 10:32:42 -0500 Subject: [PATCH 1469/1936] Add devstack-enforce-scope job to enable the rbac scope checks keystone has system scope feature implemented since queens release. Now Devstack also started moving towards the new RBAC. This commit adds a new job 'devstack-enforce-scope' which enable the scope checks on service side and see if devstack setting are fine or not. This job will be expanded to enable the scope checks for the other service also once they start supporting the system scope. This will help us to test the scope check setting. Change-Id: Ie9cd9c7e7cd8fdf8c8930e59ae9d297f86eb9a95 --- .zuul.yaml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index 8c275d84dc..517e12bc1c 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -580,6 +580,17 @@ SERVICE_IP_VERSION: 6 SERVICE_HOST: "" +- job: + name: devstack-enforce-scope + parent: devstack + description: | + This job runs the devstack with scope checks enabled. + vars: + devstack_localrc: + # Keep enabeling the services here to run with system scope + CINDER_ENFORCE_SCOPE: true + GLANCE_ENFORCE_SCOPE: true + - job: name: devstack-multinode parent: devstack @@ -711,6 +722,7 @@ jobs: - devstack - devstack-ipv6 + - devstack-enforce-scope - devstack-platform-fedora-latest - devstack-platform-centos-8-stream - devstack-async @@ -765,6 +777,7 @@ jobs: - devstack - devstack-ipv6 + - devstack-enforce-scope - devstack-multinode - devstack-unit-tests - openstack-tox-bashate From 60b5538c337dfa3c8f60fecdc64e671acd1f1cbe Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Fri, 6 Aug 2021 12:49:33 +0530 Subject: [PATCH 1470/1936] Set swap size to 4G for c8 jobs Tempest is failing randomly with different reasons as mentioned in the bug, updating swap size those issues are not seen. Before [1] default swap size used to be 8GB but was dropped to 1G so need to configure it in required job itself. Did couple of tests in [2] and with 4GB+ swap jobs are running green. On investigation found that with qemu-5 both Ubuntu and CentOS jobs have memory crunch, currently Ubuntu jobs are not impacted as they are running with qemu-4. [1] https://review.opendev.org/c/openstack/openstack-zuul-jobs/+/750941 [2] https://review.opendev.org/c/openstack/devstack/+/803144 Closes-Bug: #1938914 Change-Id: I57910b5fde5ddf2bd37d93e06c1aff77c6e231e9 --- .zuul.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index 8c275d84dc..1295fed457 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -599,6 +599,8 @@ nodeset: devstack-single-node-centos-8-stream voting: false timeout: 9000 + vars: + configure_swap_size: 4096 - job: name: devstack-async From 26f814921898390eb263f1060fb99cddae1accdc Mon Sep 17 00:00:00 2001 From: Grzegorz Grasza Date: Mon, 16 Aug 2021 10:36:03 +0200 Subject: [PATCH 1471/1936] Use MDB backend in Ubuntu The MDB backend is the default in Ubuntu and specifying HDB in debconf doesn't change it to HDB. Closes-Bug: #1939700 Change-Id: If98f7fc8395678365fb73f0c5cd926cef083e470 --- files/ldap/manager.ldif.in | 2 +- lib/ldap | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/files/ldap/manager.ldif.in b/files/ldap/manager.ldif.in index 2f1f1395ee..d3b9be8b6e 100644 --- a/files/ldap/manager.ldif.in +++ b/files/ldap/manager.ldif.in @@ -1,4 +1,4 @@ -dn: olcDatabase={${LDAP_OLCDB_NUMBER}}hdb,cn=config +dn: olcDatabase={${LDAP_OLCDB_NUMBER}}${LDAP_OLCDB_TYPE},cn=config changetype: modify replace: olcSuffix olcSuffix: ${BASE_DN} diff --git a/lib/ldap b/lib/ldap index 5a53d0eaee..ea5faa1fe9 100644 --- a/lib/ldap +++ b/lib/ldap @@ -33,14 +33,17 @@ LDAP_SERVICE_NAME=slapd if is_ubuntu; then LDAP_OLCDB_NUMBER=1 + LDAP_OLCDB_TYPE=mdb LDAP_ROOTPW_COMMAND=replace elif is_fedora; then LDAP_OLCDB_NUMBER=2 + LDAP_OLCDB_TYPE=hdb LDAP_ROOTPW_COMMAND=add elif is_suse; then # SUSE has slappasswd in /usr/sbin/ PATH=$PATH:/usr/sbin/ LDAP_OLCDB_NUMBER=1 + LDAP_OLCDB_TYPE=hdb LDAP_ROOTPW_COMMAND=add LDAP_SERVICE_NAME=ldap fi @@ -56,6 +59,7 @@ function _ldap_varsubst { local slappass=$2 sed -e " s|\${LDAP_OLCDB_NUMBER}|$LDAP_OLCDB_NUMBER| + s|\${LDAP_OLCDB_TYPE}|$LDAP_OLCDB_TYPE| s|\${SLAPPASS}|$slappass| s|\${LDAP_ROOTPW_COMMAND}|$LDAP_ROOTPW_COMMAND| s|\${BASE_DC}|$LDAP_BASE_DC| @@ -157,7 +161,7 @@ function configure_ldap { slapd slapd/dump_database_destdir string /var/backups/slapd-VERSION slapd slapd/domain string Users slapd shared/organization string $LDAP_DOMAIN - slapd slapd/backend string HDB + slapd slapd/backend string ${LDAP_OLCDB_TYPE^^} slapd slapd/purge_database boolean true slapd slapd/move_old_database boolean true slapd slapd/allow_ldap_v2 boolean false From ed323805f26e51438fd08d6d51882d758551924c Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Tue, 17 Aug 2021 08:45:02 -0500 Subject: [PATCH 1472/1936] make swift-dsvm-functional job as voting swift-dsvm-functional job test swift under python3 and voting on swift gate whihc means this is a stable job now, let's make this voting to devstack gate too. Removing swift-dsvm-functional-py3 job as it does not exist anymore after- https://review.opendev.org/c/openstack/swift/+/731318 swift-dsvm-functional itself is py3 job now. Change-Id: I58847f74306194eaad132680815101a134fb4022 --- .zuul.yaml | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 517e12bc1c..772cc7b2d7 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -731,14 +731,7 @@ - openstack-tox-bashate - ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa: voting: false - - swift-dsvm-functional: - voting: false - irrelevant-files: &dsvm-irrelevant-files - - ^.*\.rst$ - - ^doc/.*$ - - swift-dsvm-functional-py3: - voting: false - irrelevant-files: *dsvm-irrelevant-files + - swift-dsvm-functional - grenade: irrelevant-files: - ^.*\.rst$ @@ -789,6 +782,7 @@ irrelevant-files: - ^.*\.rst$ - ^doc/.*$ + - swift-dsvm-functional - grenade: irrelevant-files: - ^.*\.rst$ From 31f0418bad5a9542a38e8200131adf4ace08e01a Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Tue, 17 Aug 2021 08:49:28 -0500 Subject: [PATCH 1473/1936] Make Ironic job as voting ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa job is voting on Ironic and neutron gate which mean it is stable enough and make sense to make it voting on devstack gate too. ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa is alias job of ironic-tempest-bios-ipmi-direct-tinyipa so using the original job instead of alias - https://opendev.org/openstack/ironic/src/branch/master/zuul.d/ironic-jobs.yaml#L784 Change-Id: I95c67ad69e6eae6a72d25a851a71b7de85e56fd2 --- .zuul.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 772cc7b2d7..f98107cb3f 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -729,8 +729,7 @@ - devstack-multinode - devstack-unit-tests - openstack-tox-bashate - - ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa: - voting: false + - ironic-tempest-bios-ipmi-direct-tinyipa - swift-dsvm-functional - grenade: irrelevant-files: @@ -782,6 +781,7 @@ irrelevant-files: - ^.*\.rst$ - ^doc/.*$ + - ironic-tempest-bios-ipmi-direct-tinyipa - swift-dsvm-functional - grenade: irrelevant-files: From 8b8a4c75b72eb44dd5c11950951cf6e6d9a1d778 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rados=C5=82aw=20Piliszek?= Date: Wed, 18 Aug 2021 15:00:09 +0000 Subject: [PATCH 1474/1936] Remove libvirt-python from upper-constraints ... when installed from distribution. This is mostly to fix Ironic's gate as their ecosystem is too broad and complex to quickly remove libvirt-python from all possible requirements.txt More details inline. See also: https://review.opendev.org/c/openstack/devstack/+/798514 aka f0bf2bdff12b66eefbb2eae83e919611eb7cc76d Change-Id: Ic44daf15e952bbe3c424984ffb2474261e68008f --- lib/nova_plugins/functions-libvirt | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index e9ceae4dea..63882e05fe 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -56,6 +56,17 @@ EOF # Installs required distro-specific libvirt packages. function install_libvirt { + # NOTE(yoctozepto): The common consensus [1] is that libvirt-python should + # be installed from distro packages. However, various projects might be + # trying to ensure it is installed using pip AND use upper-constraints + # with that, causing pip to try to upgrade it and to fail. + # The following line removes libvirt-python from upper-constraints and + # avoids the situation described above. Now only if installed packages + # explicitly depend on a newer (or, in general, incompatible) libvirt-python + # version, will pip try to reinstall it. + # [1] https://review.opendev.org/c/openstack/devstack/+/798514 + $REQUIREMENTS_DIR/.venv/bin/edit-constraints \ + $REQUIREMENTS_DIR/upper-constraints.txt -- libvirt-python if is_ubuntu; then install_package qemu-system libvirt-clients libvirt-daemon-system libvirt-dev python3-libvirt From 1e86a25cc28e34d7f73a4c6ccbbc3fc667598d50 Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Thu, 19 Aug 2021 14:24:28 +0100 Subject: [PATCH 1475/1936] nova: Enable apic removal workaround for bug #1939108 when using QEMU This change enables [workarounds]libvirt_disable_apic when devstack is deployed using the libvirt virt driver and qemu virt type in an effort to avoid issues outlined in bug #1939108 caused by the older kernel currently used in Cirros 0.5.2. Depends-On: https://review.opendev.org/c/openstack/nova/+/766043 Closes-Bug: #1939108 Change-Id: Ibb6c34133bb1c95ef11cc59d9b12a0f65502c61b --- lib/nova | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/nova b/lib/nova index de91517add..f4f4797b86 100644 --- a/lib/nova +++ b/lib/nova @@ -926,6 +926,11 @@ function start_nova_compute { iniset $NOVA_CPU_CONF os_vif_ovs ovsdb_connection "tcp:$OVSDB_SERVER_LOCAL_HOST:6640" fi + # Workaround bug #1939108 + if [[ "$VIRT_DRIVER" == "libvirt" && "$LIBVIRT_TYPE" == "qemu" ]]; then + iniset $NOVA_CPU_CONF workarounds libvirt_disable_apic True + fi + if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then # The group **$LIBVIRT_GROUP** is added to the current user in this script. # ``sg`` is used in run_process to execute nova-compute as a member of the From 14b7fc500367a24ed995d6598738c6f42dfe49ad Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 20 Aug 2021 06:13:36 +0000 Subject: [PATCH 1476/1936] Updated from generate-devstack-plugins-list Change-Id: I924cdf727818b33d71fe71ac110f224164c6b453 --- doc/source/plugin-registry.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 490132e0d7..4364dd9c72 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -46,7 +46,6 @@ openstack/freezer `https://opendev.org/openstack/freezer openstack/freezer-api `https://opendev.org/openstack/freezer-api `__ openstack/freezer-tempest-plugin `https://opendev.org/openstack/freezer-tempest-plugin `__ openstack/freezer-web-ui `https://opendev.org/openstack/freezer-web-ui `__ -openstack/glance `https://opendev.org/openstack/glance `__ openstack/heat `https://opendev.org/openstack/heat `__ openstack/heat-dashboard `https://opendev.org/openstack/heat-dashboard `__ openstack/ironic `https://opendev.org/openstack/ironic `__ From 25f84277eab5291aa0fa8c12ac39a69594611e08 Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Sat, 21 Aug 2021 21:38:43 +0900 Subject: [PATCH 1477/1936] swift: Fix the empty gid option in rsyncd.conf This change fixes the empty value set to the gid option in rsyncd.conf, which was caused by reference to the invalid USER_GROUP variable, and ensures the option is set to the group which STACK_USER belongs to. This also fixes duplicate declaration of the local user_group variable. Closes-Bug: #1940742 Change-Id: Ifd0a5ef0bc5f3647f43b169df1f7176393971853 --- lib/swift | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/swift b/lib/swift index 790fb99442..98852415da 100644 --- a/lib/swift +++ b/lib/swift @@ -335,7 +335,6 @@ function configure_swift { local node_number local swift_node_config local swift_log_dir - local user_group # Make sure to kill all swift processes first $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true @@ -353,7 +352,7 @@ function configure_swift { # partitions (which make more sense when you have a multi-node # setup) we configure it with our version of rsync. sed -e " - s/%GROUP%/${USER_GROUP}/; + s/%GROUP%/$(id -g -n ${STACK_USER})/; s/%USER%/${STACK_USER}/; s,%SWIFT_DATA_DIR%,$SWIFT_DATA_DIR,; " $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf From b1a89eb80be83fe8c47eeb0431d85a8452e3c70b Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Thu, 26 Aug 2021 21:42:32 +0200 Subject: [PATCH 1478/1936] Configure access to physical network also with ML2/OVN backend Neutron L3 module in Devstack has way to conigure access to physical network on the node. It can put physical interface to the physical bridge or, in case when such physical device isn't set, it creates NAT rule in iptables. There was missing the same operation for ML2/OVN backend as L3 agent is not used there at all. This patch adds the same to be done in both L3 agent and ovn_agent modules. Closes-Bug: #1939627 Change-Id: I9e558d1d5d3edbce9e7a025ba3c11267f1579820 --- lib/neutron-legacy | 21 +++++++++++++++++++++ lib/neutron_plugins/ovn_agent | 1 + lib/neutron_plugins/services/l3 | 16 +--------------- 3 files changed, 23 insertions(+), 15 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 791ff18b10..31968498de 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -663,6 +663,27 @@ function _move_neutron_addresses_route { fi } +# _configure_public_network_connectivity() - Configures connectivity to the +# external network using $PUBLIC_INTERFACE or NAT on the single interface +# machines +function _configure_public_network_connectivity { + # If we've given a PUBLIC_INTERFACE to take over, then we assume + # that we can own the whole thing, and privot it into the OVS + # bridge. If we are not, we're probably on a single interface + # machine, and we just setup NAT so that fixed guests can get out. + if [[ -n "$PUBLIC_INTERFACE" ]]; then + _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True False "inet" + + if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then + _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6" + fi + else + for d in $default_v4_route_devs; do + sudo iptables -t nat -A POSTROUTING -o $d -s $FLOATING_RANGE -j MASQUERADE + done + fi +} + # cleanup_mutnauq() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_mutnauq { diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index cfcb01ee91..1f737fb58b 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -266,6 +266,7 @@ function create_public_bridge { # Create the public bridge that OVN will use sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE -- set bridge $PUBLIC_BRIDGE protocols=OpenFlow13,OpenFlow15 sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=$PHYSICAL_NETWORK:$PUBLIC_BRIDGE + _configure_public_network_connectivity } function _disable_libvirt_apparmor { diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index b6bc02818c..98b96ac06c 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -123,21 +123,7 @@ function _configure_neutron_l3_agent { neutron_plugin_configure_l3_agent $Q_L3_CONF_FILE - # If we've given a PUBLIC_INTERFACE to take over, then we assume - # that we can own the whole thing, and privot it into the OVS - # bridge. If we are not, we're probably on a single interface - # machine, and we just setup NAT so that fixed guests can get out. - if [[ -n "$PUBLIC_INTERFACE" ]]; then - _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True False "inet" - - if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then - _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6" - fi - else - for d in $default_v4_route_devs; do - sudo iptables -t nat -A POSTROUTING -o $d -s $FLOATING_RANGE -j MASQUERADE - done - fi + _configure_public_network_connectivity } # Explicitly set router id in l3 agent configuration From a38d41ed9222c32eb7058f91b004b2ec16cd19e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rados=C5=82aw=20Piliszek?= Date: Wed, 8 Sep 2021 07:51:47 +0000 Subject: [PATCH 1479/1936] Drop dep on libmysqlclient-dev It was required to build MySQL-python bindings but, for some time, we test and rely solely on PyMySQL which is pure Python and hence does not require this dep. This package is going away as distros move towards MariaDB. Change-Id: I6004ccf28a23009a0fc07bfc9458b59a927b969a --- files/debs/general | 1 - files/debs/neutron-common | 1 - files/debs/nova | 1 - 3 files changed, 3 deletions(-) diff --git a/files/debs/general b/files/debs/general index 7e481b4072..364f3cc6e2 100644 --- a/files/debs/general +++ b/files/debs/general @@ -14,7 +14,6 @@ iputils-ping libapache2-mod-proxy-uwsgi libffi-dev # for pyOpenSSL libjpeg-dev # Pillow 3.0.0 -libmysqlclient-dev # MySQL-python libpcre3-dev # for python-pcre libpq-dev # psycopg2 libssl-dev # for pyOpenSSL diff --git a/files/debs/neutron-common b/files/debs/neutron-common index e548396cd7..f6afc5bf55 100644 --- a/files/debs/neutron-common +++ b/files/debs/neutron-common @@ -6,7 +6,6 @@ haproxy # to serve as metadata proxy inside router/dhcp namespaces iptables iputils-arping iputils-ping -libmysqlclient-dev mysql-server #NOPRIME postgresql-server-dev-all python3-mysqldb diff --git a/files/debs/nova b/files/debs/nova index e19441453b..0194f00f2c 100644 --- a/files/debs/nova +++ b/files/debs/nova @@ -8,7 +8,6 @@ iptables iputils-arping kpartx libjs-jquery-tablesorter # Needed for coverage html reports -libmysqlclient-dev libvirt-clients # NOPRIME libvirt-daemon-system # NOPRIME libvirt-dev # NOPRIME From 6c8bd96f72eb913be5b1de5758b15f828fca5912 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 9 Sep 2021 06:10:23 +0000 Subject: [PATCH 1480/1936] Updated from generate-devstack-plugins-list Change-Id: If2ea45a2cc7993a9a80187092f2750496e7c8ab7 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 4364dd9c72..7c8d2b8aac 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -112,6 +112,7 @@ openstack/zaqar-ui `https://opendev.org/openstack/zaqar-ui openstack/zun `https://opendev.org/openstack/zun `__ openstack/zun-ui `https://opendev.org/openstack/zun-ui `__ performa/os-faults `https://opendev.org/performa/os-faults `__ +skyline/skyline-apiserver `https://opendev.org/skyline/skyline-apiserver `__ starlingx/config `https://opendev.org/starlingx/config `__ starlingx/fault `https://opendev.org/starlingx/fault `__ starlingx/ha `https://opendev.org/starlingx/ha `__ From c9f3e5bdd76430a7c14b9f02c3f8ba47214037ae Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Thu, 16 Sep 2021 11:43:03 -0400 Subject: [PATCH 1481/1936] Fix stackrc os-resource-classes typo Attempting to use LIBS_FROM_GIT="ALL" results in a failure due to a typo in stackrc for os-resource-classes repo. Cloning into '/opt/stack/os-resource-classes'... fatal: protocol ':-https' is not supported [ERROR] /opt/stack/devstack/functions-common:629 git call failed: [git clone :-https://opendev.org/openstack/os-resource-classes.git /opt/stack/os-resource-classes --branch master] Remove the extraneous '='. Change-Id: I21f86324dc15fe808b38e366f7af18c96fd3890c --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 620b1fc04d..16aa93ac94 100755 --- a/stackrc +++ b/stackrc @@ -549,7 +549,7 @@ GITBRANCH["neutron-lib"]=${NEUTRON_LIB_BRANCH:-$TARGET_BRANCH} GITDIR["neutron-lib"]=$DEST/neutron-lib # os-resource-classes library containing a list of standardized resource classes for OpenStack -GITREPO["os-resource-classes"]=${OS_RESOURCE_CLASSES_REPO=:-${GIT_BASE}/openstack/os-resource-classes.git} +GITREPO["os-resource-classes"]=${OS_RESOURCE_CLASSES_REPO:-${GIT_BASE}/openstack/os-resource-classes.git} GITBRANCH["os-resource-classes"]=${OS_RESOURCE_CLASSES_BRANCH:-$TARGET_BRANCH} # os-traits library for resource provider traits in the placement service From 8d1bfcacf8ffc73f0aa8c8a8a9e0fee447a1c116 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Fri, 24 Sep 2021 18:01:09 -0500 Subject: [PATCH 1482/1936] Update DEVSTACK_SERIES to yoga stable/xena branch has been created now and current master is for yoga. Change-Id: I0c7809bdac6482bb947f394b0c2535fabb4cf067 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 16aa93ac94..3dc800af2d 100755 --- a/stackrc +++ b/stackrc @@ -247,7 +247,7 @@ REQUIREMENTS_DIR=${REQUIREMENTS_DIR:-$DEST/requirements} # Setting the variable to 'ALL' will activate the download for all # libraries. -DEVSTACK_SERIES="xena" +DEVSTACK_SERIES="yoga" ############## # From 65b46a503a720f2438a6dc73c6f4670cdf89442f Mon Sep 17 00:00:00 2001 From: Rajat Dhasmana Date: Tue, 28 Sep 2021 03:08:10 -0400 Subject: [PATCH 1483/1936] Remove cinder from service names In devstack job, cinder is not a valid service name and logs error in gate[1] so remove it. 2021-09-28 05:44:47.791807 | controller | + functions-common:service_check:1603 : for service in ${ENABLED_SERVICES//,/ } 2021-09-28 05:44:47.795506 | controller | + functions-common:service_check:1605 : sudo systemctl is-enabled devstack@cinder.service 2021-09-28 05:44:47.809647 | controller | Failed to get unit file state for devstack@cinder.service: No such file or directory [1] https://e978bdcfc0235dcd9417-6560bc3b6382c1d289b358872777ca09.ssl.cf1.rackcdn.com/801989/7/check/tempest-integrated-storage/779d1e7/job-output.txt Change-Id: I7ca105201d82b72c7e56778425d3bce7c76047db --- .zuul.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index d1e356f4ea..25cbf923f8 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -524,7 +524,6 @@ c-bak: true c-sch: true c-vol: true - cinder: true # Services we don't need. # This section is not really needed, it's for readability. horizon: false From 56e75e4aef3ea42d13b192e805c48357b0071239 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Tue, 28 Sep 2021 20:02:34 +0200 Subject: [PATCH 1484/1936] Fix uwsgi config for trailing slashes The apache mod_proxy documentation[0] says that trailing slashes need to match for the ProxyPass statement. Since adding a slash to the redirected url would break things that need to access endpoints like /identity without anything added, we need to drop the trailing slash for the target URL. See [1] for the discussion of the CVE fix that changed the previous behavior. [0] https://httpd.apache.org/docs/trunk/mod/mod_proxy.html#proxypass [1] https://bugs.launchpad.net/ubuntu/+source/apache2/+bug/1945274 Depends-On: https://review.opendev.org/c/openstack/devstack/+/811389 Change-Id: Ia6b1a41957833fba87a2e6f048d2483267632385 --- lib/apache | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/apache b/lib/apache index 04259ba31f..4bea07dc55 100644 --- a/lib/apache +++ b/lib/apache @@ -303,7 +303,7 @@ function write_uwsgi_config { apache_conf=$(apache_site_config_for $name) iniset "$file" uwsgi socket "$socket" iniset "$file" uwsgi chmod-socket 666 - echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}/\" retry=0 " | sudo tee -a $apache_conf + echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}\" retry=0 " | sudo tee -a $apache_conf enable_apache_site $name restart_apache_server fi From 982b03c605bc06e0bf9a03ff576995816155c64e Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Thu, 30 Sep 2021 13:08:35 +0100 Subject: [PATCH 1485/1936] zuul: Remove dedicated devstack-async job I83d56c9363d481bb6d5921f5e1f9b024f136044b switched the default of DEVSTACK_PARALLEL over to True so this dedicated job is no longer required as *all* jobs should now be using it. Change-Id: I0f475ab177c2cd49eeb6be861cdd11581e8e0b97 --- .zuul.yaml | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index d1e356f4ea..ab2f80ae1b 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -613,17 +613,6 @@ vars: configure_swap_size: 4096 -- job: - name: devstack-async - parent: tempest-full-py3 - description: Async mode enabled - voting: false - vars: - devstack_localrc: - DEVSTACK_PARALLEL: True - zuul_copy_output: - /opt/stack/async: logs - - job: name: devstack-no-tls-proxy parent: tempest-full-py3 @@ -727,7 +716,6 @@ - devstack-enforce-scope - devstack-platform-fedora-latest - devstack-platform-centos-8-stream - - devstack-async - devstack-multinode - devstack-unit-tests - openstack-tox-bashate From c7791301be5e19ec7a84dffbf62c40e805483f43 Mon Sep 17 00:00:00 2001 From: melanie witt Date: Fri, 1 Oct 2021 17:24:10 +0000 Subject: [PATCH 1486/1936] Enable oslo.limit to be installed from git repo oslo.limit isn't currently in the list of libraries that can be installed from a git repo via LIBS_FROM_GIT. This adds oslo.limit to enable integrated testing against unmerged oslo.limit changes. Change-Id: I26cc567fdf4c84014040ae586bbb029b8de7a236 --- lib/libraries | 2 ++ stackrc | 4 ++++ tests/test_libs_from_pypi.sh | 1 + 3 files changed, 7 insertions(+) diff --git a/lib/libraries b/lib/libraries index 67ff21f41a..9ea32304fc 100755 --- a/lib/libraries +++ b/lib/libraries @@ -38,6 +38,7 @@ GITDIR["oslo.config"]=$DEST/oslo.config GITDIR["oslo.context"]=$DEST/oslo.context GITDIR["oslo.db"]=$DEST/oslo.db GITDIR["oslo.i18n"]=$DEST/oslo.i18n +GITDIR["oslo.limit"]=$DEST/oslo.limit GITDIR["oslo.log"]=$DEST/oslo.log GITDIR["oslo.messaging"]=$DEST/oslo.messaging GITDIR["oslo.middleware"]=$DEST/oslo.middleware @@ -102,6 +103,7 @@ function install_libs { _install_lib_from_source "oslo.context" _install_lib_from_source "oslo.db" _install_lib_from_source "oslo.i18n" + _install_lib_from_source "oslo.limit" _install_lib_from_source "oslo.log" _install_lib_from_source "oslo.messaging" _install_lib_from_source "oslo.middleware" diff --git a/stackrc b/stackrc index 3dc800af2d..e0d71dfa49 100755 --- a/stackrc +++ b/stackrc @@ -415,6 +415,10 @@ GITBRANCH["oslo.db"]=${OSLODB_BRANCH:-$TARGET_BRANCH} GITREPO["oslo.i18n"]=${OSLOI18N_REPO:-${GIT_BASE}/openstack/oslo.i18n.git} GITBRANCH["oslo.i18n"]=${OSLOI18N_BRANCH:-$TARGET_BRANCH} +# oslo.limit +GITREPO["oslo.limit"]=${OSLOLIMIT_REPO:-${GIT_BASE}/openstack/oslo.limit.git} +GITBRANCH["oslo.limit"]=${OSLOLIMIT_BRANCH:-$TARGET_BRANCH} + # oslo.log GITREPO["oslo.log"]=${OSLOLOG_REPO:-${GIT_BASE}/openstack/oslo.log.git} GITBRANCH["oslo.log"]=${OSLOLOG_BRANCH:-$TARGET_BRANCH} diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index ce1b34461c..839e3a1328 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -45,6 +45,7 @@ ALL_LIBS+=" oslo.cache oslo.reports osprofiler cursive" ALL_LIBS+=" keystoneauth ironic-lib neutron-lib oslo.privsep" ALL_LIBS+=" diskimage-builder os-vif python-brick-cinderclient-ext" ALL_LIBS+=" castellan python-barbicanclient ovsdbapp os-ken os-resource-classes" +ALL_LIBS+=" oslo.limit" # Generate the above list with # echo ${!GITREPO[@]} From 959a7c262a65296a404252f8ec2014237196710d Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Sun, 2 May 2021 09:29:15 +0200 Subject: [PATCH 1487/1936] Enable running on Debian Bullseye MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some adaption in database handling is all that is missing. Also add a platform job that tests this. Co-Authored-By: Radosław Piliszek Change-Id: I6dd3e48444dd415d84df5e7f5c74540847cdd6db --- .zuul.yaml | 54 +++++++++++++++++++++++++++++++++++++++++++++ lib/databases/mysql | 4 +++- stack.sh | 2 +- 3 files changed, 58 insertions(+), 2 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index ab2f80ae1b..721897e9e5 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -86,6 +86,16 @@ nodes: - controller +- nodeset: + name: devstack-single-node-debian-bullseye + nodes: + - name: controller + label: debian-bullseye + groups: + - name: tempest + nodes: + - controller + - nodeset: name: openstack-two-node nodes: @@ -613,6 +623,49 @@ vars: configure_swap_size: 4096 +- job: + name: devstack-platform-debian-bullseye + parent: tempest-full-py3 + description: Debian Bullseye platform test + nodeset: devstack-single-node-debian-bullseye + voting: false + timeout: 9000 + vars: + # NOTE(yoctozepto): With concurrency equal 2, there is a random event + # that this job will run out of memory at some point. + tempest_concurrency: 1 + # NOTE(yoctozepto): Debian Bullseye does not yet offer OVN. Switch to OVS + # for the time being. + devstack_localrc: + Q_AGENT: openvswitch + Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch + Q_ML2_TENANT_NETWORK_TYPE: vxlan + devstack_services: + # Disable OVN services + ovn-northd: false + ovn-controller: false + ovs-vswitchd: false + ovsdb-server: false + # Disable Neutron ML2/OVN services + q-ovn-metadata-agent: false + # Enable Neutron ML2/OVS services + q-agt: true + q-dhcp: true + q-l3: true + q-meta: true + q-metering: true + group-vars: + subnode: + devstack_services: + # Disable OVN services + ovn-controller: false + ovs-vswitchd: false + ovsdb-server: false + # Disable Neutron ML2/OVN services + q-ovn-metadata-agent: false + # Enable Neutron ML2/OVS services + q-agt: true + - job: name: devstack-no-tls-proxy parent: tempest-full-py3 @@ -716,6 +769,7 @@ - devstack-enforce-scope - devstack-platform-fedora-latest - devstack-platform-centos-8-stream + - devstack-platform-debian-bullseye - devstack-multinode - devstack-unit-tests - openstack-tox-bashate diff --git a/lib/databases/mysql b/lib/databases/mysql index d4969d713c..d0fa1199a7 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -25,6 +25,8 @@ if [[ -z "$MYSQL_SERVICE_NAME" ]]; then # provide a mysql.service symlink for backwards-compatibility, but # let's not rely on that. MYSQL_SERVICE_NAME=mariadb + elif [[ "$DISTRO" == "bullseye" ]]; then + MYSQL_SERVICE_NAME=mariadb fi fi @@ -105,7 +107,7 @@ function configure_database_mysql { # In mariadb e.g. on Ubuntu socket plugin is used for authentication # as root so it works only as sudo. To restore old "mysql like" behaviour, # we need to change auth plugin for root user - if is_ubuntu && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then + if is_ubuntu && [[ "$DISTRO" != "bullseye" ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then sudo mysql $cmd_args -e "UPDATE mysql.user SET plugin='' WHERE user='$DATABASE_USER' AND host='localhost';" sudo mysql $cmd_args -e "FLUSH PRIVILEGES;" fi diff --git a/stack.sh b/stack.sh index 48f61fb3af..8a8e3ea6be 100755 --- a/stack.sh +++ b/stack.sh @@ -227,7 +227,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="focal|f34|opensuse-15.2|opensuse-tumbleweed|rhel8" +SUPPORTED_DISTROS="bullseye|focal|f34|opensuse-15.2|opensuse-tumbleweed|rhel8" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" From 714826d1a27085ba2384ca495c876588d77f0d27 Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Mon, 4 Oct 2021 18:07:17 +0100 Subject: [PATCH 1488/1936] nova: Ensure each compute uses a unique iSCSI initiator The current initiator name embedded in our CI images is not unique at present and can often cause failures during live migrations with attached volumes. This change ensures the name is unique by running iscsi-iname again and overwriting the existing name. We could potentially do this during the image build process itself but given that devstack systems are not supposed to be multi-purpose this should be safe to do during the devstack run. Closes-Bug: #1945983 Change-Id: I9ed26a17858df96c04be9ae52bf2e33e023869a5 --- lib/nova | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/nova b/lib/nova index f4f4797b86..bbb1039199 100644 --- a/lib/nova +++ b/lib/nova @@ -298,6 +298,9 @@ function configure_nova { fi fi + # Ensure each compute host uses a unique iSCSI initiator + echo InitiatorName=$(iscsi-iname) | sudo tee /etc/iscsi/initiatorname.iscsi + if [[ ${ISCSID_DEBUG} == "True" ]]; then # Install an override that starts iscsid with debugging # enabled. From bfc79dc98bfe68e22d3a58099bf550eda1a90a67 Mon Sep 17 00:00:00 2001 From: Michal Berger Date: Tue, 5 Oct 2021 15:40:20 +0200 Subject: [PATCH 1489/1936] tools: Fix use of continue continue is not used in a proper context here (outside of loop). Use null cmd instead to simply fall through the pip installation. Signed-off-by: Michal Berger Change-Id: Iaea2e5c0177b475edf19d08d71933a74debbb5d9 --- tools/install_pip.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index a80c178f2a..c72dc89a55 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -128,7 +128,7 @@ if is_fedora && [[ ${DISTRO} == f* ]]; then # if python3-pip is later installed. # For general sanity, we just use the packaged pip. It should be # recent enough anyway. This is included via rpms/general - continue + : # Simply fall through else install_get_pip fi From f758b60a4b70261393242b932063d2b6d705594c Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Wed, 6 Oct 2021 12:02:22 +0200 Subject: [PATCH 1490/1936] Rehome functions to enable Neutron's Trunk service plugin Those functions were part of the neutron devstack plugin but we discussed on the neutron team meeting [1] to move it to the Devstack repo as it's mature enough now. [1] https://meetings.opendev.org/meetings/networking/2021/networking.2021-10-05-14.00.log.html#l-156 Change-Id: I35446adad1d8a7fed142d834de20c48b611015a5 --- lib/neutron-legacy | 9 +++++++++ lib/neutron_plugins/services/trunk | 5 +++++ 2 files changed, 14 insertions(+) create mode 100644 lib/neutron_plugins/services/trunk diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 31968498de..7b20a96ed7 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -275,6 +275,10 @@ source $TOP_DIR/lib/neutron_plugins/services/metering # L3 Service functions source $TOP_DIR/lib/neutron_plugins/services/l3 + +# Additional Neutron service plugins +source $TOP_DIR/lib/neutron_plugins/services/trunk + # Use security group or not if has_neutron_plugin_security_group; then Q_USE_SECGROUP=${Q_USE_SECGROUP:-True} @@ -369,6 +373,11 @@ function configure_mutnauq { configure_ovn_plugin fi + # Configure Neutron's advanced services + if is_service_enabled q-trunk neutron-trunk; then + configure_trunk_extension + fi + iniset $NEUTRON_CONF DEFAULT api_workers "$API_WORKERS" # devstack is not a tool for running uber scale OpenStack # clouds, therefore running without a dedicated RPC worker diff --git a/lib/neutron_plugins/services/trunk b/lib/neutron_plugins/services/trunk new file mode 100644 index 0000000000..8e0f6944cf --- /dev/null +++ b/lib/neutron_plugins/services/trunk @@ -0,0 +1,5 @@ +#!/bin/bash + +function configure_trunk_extension { + neutron_service_plugin_class_add "trunk" +} From 61a37bff9a35337f5924ca7cc61c76e42e55d787 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Fri, 8 Oct 2021 10:59:09 +0200 Subject: [PATCH 1491/1936] Further fixup for Ubuntu cloud images The official Ubuntu cloud images have some further python pkgs preinstalled that conflict with our requirements. Allow to overwrite them. Signed-off-by: Dr. Jens Harbott Closes-Bug: 1871485 Change-Id: I793c250cae5e7b9bc835b7016d790d1f9ae8a7f3 --- tools/fixup_stuff.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 8a2c337fc4..197a12d8f5 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -167,6 +167,8 @@ function fixup_ubuntu { # overwriting works. So this hacks around those packages that # have been dragged in by some other system dependency sudo rm -rf /usr/lib/python3/dist-packages/PyYAML-*.egg-info + sudo rm -rf /usr/lib/python3/dist-packages/pyasn1_modules-*.egg-info + sudo rm -rf /usr/lib/python3/dist-packages/simplejson-*.egg-info } function fixup_all { From b4e683e6b9a442ed9c37407cbc85288fe92aa85c Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Tue, 5 Oct 2021 20:44:57 +0200 Subject: [PATCH 1492/1936] Don't fail if there is no nf_conntrack_proto_gre module available It may be that it is already compiled in the kernel so there is no need to load kernel module in such case. Change-Id: Ie1d32e3fd529e13958857cb3ced6710eebde1e4d --- lib/neutron_plugins/ovs_source | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/ovs_source b/lib/neutron_plugins/ovs_source index 294171f18b..08951d175d 100644 --- a/lib/neutron_plugins/ovs_source +++ b/lib/neutron_plugins/ovs_source @@ -211,5 +211,5 @@ function remove_ovs_packages { # load_conntrack_gre_module() - loads nf_conntrack_proto_gre kernel module function load_conntrack_gre_module { - sudo modprobe nf_conntrack_proto_gre + load_module nf_conntrack_proto_gre False } From 84901f563ef6b5d93a00a89f9cdb41a0380d493e Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Fri, 15 Oct 2021 15:04:49 +0200 Subject: [PATCH 1493/1936] Create clouds.yaml early enough When using glance limits, the create_glance_accounts call needs access to the devstack-system-admin cloud definition, so we need to create the clouds.yaml file before that step. Change-Id: Ie6d807c46b88b16b316aa166870a6a13f2bb346d Signed-off-by: Dr. Jens Harbott --- stack.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index 48f61fb3af..979867895f 100755 --- a/stack.sh +++ b/stack.sh @@ -1090,6 +1090,9 @@ fi source $TOP_DIR/userrc_early +# Write a clouds.yaml file +write_clouds_yaml + if is_service_enabled keystone; then echo_summary "Starting Keystone" @@ -1118,9 +1121,6 @@ if is_service_enabled keystone; then fi -# Write a clouds.yaml file -write_clouds_yaml - # Horizon # ------- From c027ddd3f895802f5cab37d2cb04162686a3a3cb Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Sat, 16 Oct 2021 06:26:49 +0000 Subject: [PATCH 1494/1936] Updated from generate-devstack-plugins-list Change-Id: I1abc356970a7f2427bc9683a7e64e54ab52a7651 --- doc/source/plugin-registry.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 7c8d2b8aac..3edd708d8b 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -24,8 +24,6 @@ official OpenStack projects. ======================================== === Plugin Name URL ======================================== === -inspur/venus `https://opendev.org/inspur/venus `__ -inspur/venus-dashboard `https://opendev.org/inspur/venus-dashboard `__ openstack/aodh `https://opendev.org/openstack/aodh `__ openstack/barbican `https://opendev.org/openstack/barbican `__ openstack/blazar `https://opendev.org/openstack/blazar `__ @@ -101,6 +99,8 @@ openstack/tap-as-a-service `https://opendev.org/openstack/tap-as-a openstack/telemetry-tempest-plugin `https://opendev.org/openstack/telemetry-tempest-plugin `__ openstack/trove `https://opendev.org/openstack/trove `__ openstack/trove-dashboard `https://opendev.org/openstack/trove-dashboard `__ +openstack/venus `https://opendev.org/openstack/venus `__ +openstack/venus-dashboard `https://opendev.org/openstack/venus-dashboard `__ openstack/vitrage `https://opendev.org/openstack/vitrage `__ openstack/vitrage-dashboard `https://opendev.org/openstack/vitrage-dashboard `__ openstack/vitrage-tempest-plugin `https://opendev.org/openstack/vitrage-tempest-plugin `__ From ee1c614eda833b38ad0d526b4b1e493dfe5968be Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Sat, 16 Oct 2021 17:33:12 +0200 Subject: [PATCH 1495/1936] Fix use of yaml.load() The use of this function has been deprecated for a long time[0]. With PyYAML==6.0 the call is now failing, so replace it with the safe version. [0] https://msg.pyyaml.org/load Signed-off-by: Jens Harbott Change-Id: I7a170262b50a5c80a516095b872d52e1bea5479d --- tools/update_clouds_yaml.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/update_clouds_yaml.py b/tools/update_clouds_yaml.py index 7be995e8f3..74dcdb2a07 100755 --- a/tools/update_clouds_yaml.py +++ b/tools/update_clouds_yaml.py @@ -65,7 +65,7 @@ def run(self): def _read_clouds(self): try: with open(self._clouds_path) as clouds_file: - self._clouds = yaml.load(clouds_file) + self._clouds = yaml.safe_load(clouds_file) except IOError: # The user doesn't have a clouds.yaml file. print("The user clouds.yaml file didn't exist.") From c2491bac9d3b3f0446e67b4ea960cb88da9aec0e Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Sun, 14 Jun 2020 18:06:23 +0200 Subject: [PATCH 1496/1936] Stop creating a keystone admin site Keystone no longer has any special functionality hidden behind the admin site. KEYSTONE_AUTH_URI which used to point to the admin site has long ago been changed to be a copy of KEYSTONE_SERVICE_URI, which points to the public site. Drop all KEYSTONE_AUTH_* variables except KEYSTONE_AUTH_URI which may still be in use in some plugins. This also allows to finally drop the fixup_keystone() function. Change-Id: I549f3cadc27d137e014241cdd47e90267859c848 --- doc/source/configuration.rst | 1 - files/apache-keystone.template | 25 ------------------------- lib/keystone | 32 ++++---------------------------- lib/swift | 4 ++-- stack.sh | 4 ++-- tools/fixup_stuff.sh | 34 ---------------------------------- 6 files changed, 8 insertions(+), 92 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 67456142de..d1144ae9ed 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -666,7 +666,6 @@ In RegionTwo: disable_service horizon KEYSTONE_SERVICE_HOST= - KEYSTONE_AUTH_HOST= REGION_NAME=RegionTwo KEYSTONE_REGION_NAME=RegionOne diff --git a/files/apache-keystone.template b/files/apache-keystone.template index 128436027d..1a353e5f4a 100644 --- a/files/apache-keystone.template +++ b/files/apache-keystone.template @@ -1,5 +1,4 @@ Listen %PUBLICPORT% -Listen %ADMINPORT% LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" keystone_combined @@ -20,20 +19,6 @@ LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" %SSLKEYFILE% - - WSGIDaemonProcess keystone-admin processes=3 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% - WSGIProcessGroup keystone-admin - WSGIScriptAlias / %KEYSTONE_BIN%/keystone-wsgi-admin - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - ErrorLogFormat "%M" - ErrorLog /var/log/%APACHE_NAME%/keystone.log - CustomLog /var/log/%APACHE_NAME%/keystone_access.log keystone_combined - %SSLENGINE% - %SSLCERTFILE% - %SSLKEYFILE% - - %SSLLISTEN% %SSLLISTEN% %SSLENGINE% %SSLLISTEN% %SSLCERTFILE% @@ -49,13 +34,3 @@ Alias /identity %KEYSTONE_BIN%/keystone-wsgi-public WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - -Alias /identity_admin %KEYSTONE_BIN%/keystone-wsgi-admin - - SetHandler wsgi-script - Options +ExecCGI - - WSGIProcessGroup keystone-admin - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - diff --git a/lib/keystone b/lib/keystone index 66e867ca68..096bafb41f 100644 --- a/lib/keystone +++ b/lib/keystone @@ -50,9 +50,7 @@ fi KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone} KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf KEYSTONE_PUBLIC_UWSGI_CONF=$KEYSTONE_CONF_DIR/keystone-uwsgi-public.ini -KEYSTONE_ADMIN_UWSGI_CONF=$KEYSTONE_CONF_DIR/keystone-uwsgi-admin.ini KEYSTONE_PUBLIC_UWSGI=$KEYSTONE_BIN_DIR/keystone-wsgi-public -KEYSTONE_ADMIN_UWSGI=$KEYSTONE_BIN_DIR/keystone-wsgi-admin # KEYSTONE_DEPLOY defines how keystone is deployed, allowed values: # - mod_wsgi : Run keystone under Apache HTTPd mod_wsgi @@ -81,21 +79,12 @@ KEYSTONE_RESOURCE_BACKEND=${KEYSTONE_RESOURCE_BACKEND:-sql} KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-fernet} KEYSTONE_TOKEN_FORMAT=$(echo ${KEYSTONE_TOKEN_FORMAT} | tr '[:upper:]' '[:lower:]') -# Set Keystone interface configuration -KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST} -KEYSTONE_AUTH_PORT=${KEYSTONE_AUTH_PORT:-35357} -KEYSTONE_AUTH_PORT_INT=${KEYSTONE_AUTH_PORT_INT:-35358} -KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-$SERVICE_PROTOCOL} - # Public facing bits KEYSTONE_SERVICE_HOST=${KEYSTONE_SERVICE_HOST:-$SERVICE_HOST} KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000} KEYSTONE_SERVICE_PORT_INT=${KEYSTONE_SERVICE_PORT_INT:-5001} KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} -# Bind hosts -KEYSTONE_ADMIN_BIND_HOST=${KEYSTONE_ADMIN_BIND_HOST:-$KEYSTONE_SERVICE_HOST} - # Set the project for service accounts in Keystone SERVICE_DOMAIN_NAME=${SERVICE_DOMAIN_NAME:-Default} SERVICE_PROJECT_NAME=${SERVICE_PROJECT_NAME:-service} @@ -106,7 +95,6 @@ SERVICE_TENANT_NAME=${SERVICE_PROJECT_NAME:-service} # if we are running with SSL use https protocols if is_service_enabled tls-proxy; then - KEYSTONE_AUTH_PROTOCOL="https" KEYSTONE_SERVICE_PROTOCOL="https" fi @@ -154,11 +142,8 @@ function cleanup_keystone { sudo rm -f $(apache_site_config_for keystone) else stop_process "keystone" - # TODO: remove admin at pike-2 remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" - remove_uwsgi_config "$KEYSTONE_ADMIN_UWSGI_CONF" "$KEYSTONE_ADMIN_UWSGI" sudo rm -f $(apache_site_config_for keystone-wsgi-public) - sudo rm -f $(apache_site_config_for keystone-wsgi-admin) fi } @@ -171,12 +156,10 @@ function _config_keystone_apache_wsgi { local keystone_certfile="" local keystone_keyfile="" local keystone_service_port=$KEYSTONE_SERVICE_PORT - local keystone_auth_port=$KEYSTONE_AUTH_PORT local venv_path="" if is_service_enabled tls-proxy; then keystone_service_port=$KEYSTONE_SERVICE_PORT_INT - keystone_auth_port=$KEYSTONE_AUTH_PORT_INT fi if [[ ${USE_VENV} = True ]]; then venv_path="python-path=${PROJECT_VENV["keystone"]}/lib/$(python_version)/site-packages" @@ -185,7 +168,6 @@ function _config_keystone_apache_wsgi { sudo cp $FILES/apache-keystone.template $keystone_apache_conf sudo sed -e " s|%PUBLICPORT%|$keystone_service_port|g; - s|%ADMINPORT%|$keystone_auth_port|g; s|%APACHE_NAME%|$APACHE_NAME|g; s|%SSLLISTEN%|$keystone_ssl_listen|g; s|%SSLENGINE%|$keystone_ssl|g; @@ -223,12 +205,10 @@ function configure_keystone { iniset_rpc_backend keystone $KEYSTONE_CONF oslo_messaging_notifications local service_port=$KEYSTONE_SERVICE_PORT - local auth_port=$KEYSTONE_AUTH_PORT if is_service_enabled tls-proxy; then # Set the service ports for a proxy to take the originals service_port=$KEYSTONE_SERVICE_PORT_INT - auth_port=$KEYSTONE_AUTH_PORT_INT fi # Override the endpoints advertised by keystone (the public_endpoint and @@ -238,7 +218,7 @@ function configure_keystone { # don't want the port (in the case of putting keystone on a path in # apache). iniset $KEYSTONE_CONF DEFAULT public_endpoint $KEYSTONE_SERVICE_URI - iniset $KEYSTONE_CONF DEFAULT admin_endpoint $KEYSTONE_AUTH_URI + iniset $KEYSTONE_CONF DEFAULT admin_endpoint $KEYSTONE_SERVICE_URI if [[ "$KEYSTONE_TOKEN_FORMAT" != "" ]]; then iniset $KEYSTONE_CONF token provider $KEYSTONE_TOKEN_FORMAT @@ -261,7 +241,6 @@ function configure_keystone { _config_keystone_apache_wsgi else # uwsgi write_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" "/identity" - write_uwsgi_config "$KEYSTONE_ADMIN_UWSGI_CONF" "$KEYSTONE_ADMIN_UWSGI" "/identity_admin" fi iniset $KEYSTONE_CONF DEFAULT max_token_size 16384 @@ -518,7 +497,7 @@ function install_keystone { function start_keystone { # Get right service port for testing local service_port=$KEYSTONE_SERVICE_PORT - local auth_protocol=$KEYSTONE_AUTH_PROTOCOL + local auth_protocol=$KEYSTONE_SERVICE_PROTOCOL if is_service_enabled tls-proxy; then service_port=$KEYSTONE_SERVICE_PORT_INT auth_protocol="http" @@ -546,7 +525,6 @@ function start_keystone { # Start proxies if enabled if is_service_enabled tls-proxy; then start_tls_proxy keystone-service '*' $KEYSTONE_SERVICE_PORT $KEYSTONE_SERVICE_HOST $KEYSTONE_SERVICE_PORT_INT - start_tls_proxy keystone-auth '*' $KEYSTONE_AUTH_PORT $KEYSTONE_AUTH_HOST $KEYSTONE_AUTH_PORT_INT fi # (re)start memcached to make sure we have a clean memcache. @@ -569,9 +547,7 @@ function stop_keystone { # - ``ADMIN_PASSWORD`` # - ``IDENTITY_API_VERSION`` # - ``REGION_NAME`` -# - ``KEYSTONE_SERVICE_PROTOCOL`` -# - ``KEYSTONE_SERVICE_HOST`` -# - ``KEYSTONE_SERVICE_PORT`` +# - ``KEYSTONE_SERVICE_URI`` function bootstrap_keystone { $KEYSTONE_BIN_DIR/keystone-manage bootstrap \ --bootstrap-username admin \ @@ -580,7 +556,7 @@ function bootstrap_keystone { --bootstrap-role-name admin \ --bootstrap-service-name keystone \ --bootstrap-region-id "$REGION_NAME" \ - --bootstrap-admin-url "$KEYSTONE_AUTH_URI" \ + --bootstrap-admin-url "$KEYSTONE_SERVICE_URI" \ --bootstrap-public-url "$KEYSTONE_SERVICE_URI" } diff --git a/lib/swift b/lib/swift index 98852415da..b376993150 100644 --- a/lib/swift +++ b/lib/swift @@ -430,7 +430,7 @@ function configure_swift { swift_pipeline+=" authtoken" if is_service_enabled s3api;then swift_pipeline+=" s3token" - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token auth_uri ${KEYSTONE_AUTH_URI_V3} + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token auth_uri ${KEYSTONE_SERVICE_URI_V3} iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token delay_auth_decision true fi swift_pipeline+=" keystoneauth" @@ -521,7 +521,7 @@ function configure_swift { local auth_vers auth_vers=$(iniget ${testfile} func_test auth_version) iniset ${testfile} func_test auth_host ${KEYSTONE_SERVICE_HOST} - if [[ "$KEYSTONE_AUTH_PROTOCOL" == "https" ]]; then + if [[ "$KEYSTONE_SERVICE_PROTOCOL" == "https" ]]; then iniset ${testfile} func_test auth_port 443 else iniset ${testfile} func_test auth_port 80 diff --git a/stack.sh b/stack.sh index 48f61fb3af..fc26e21627 100755 --- a/stack.sh +++ b/stack.sh @@ -876,7 +876,7 @@ fi install_keystonemiddleware if is_service_enabled keystone; then - if [ "$KEYSTONE_AUTH_HOST" == "$SERVICE_HOST" ]; then + if [ "$KEYSTONE_SERVICE_HOST" == "$SERVICE_HOST" ]; then stack_install_service keystone configure_keystone fi @@ -1093,7 +1093,7 @@ source $TOP_DIR/userrc_early if is_service_enabled keystone; then echo_summary "Starting Keystone" - if [ "$KEYSTONE_AUTH_HOST" == "$SERVICE_HOST" ]; then + if [ "$KEYSTONE_SERVICE_HOST" == "$SERVICE_HOST" ]; then init_keystone start_keystone bootstrap_keystone diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 71fba2e2a6..fe5dafa994 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -26,39 +26,6 @@ if [[ -z "$TOP_DIR" ]]; then FILES=$TOP_DIR/files fi -# Keystone Port Reservation -# ------------------------- -# Reserve and prevent ``KEYSTONE_AUTH_PORT`` and ``KEYSTONE_AUTH_PORT_INT`` from -# being used as ephemeral ports by the system. The default(s) are 35357 and -# 35358 which are in the Linux defined ephemeral port range (in disagreement -# with the IANA ephemeral port range). This is a workaround for bug #1253482 -# where Keystone will try and bind to the port and the port will already be -# in use as an ephemeral port by another process. This places an explicit -# exception into the Kernel for the Keystone AUTH ports. -function fixup_keystone { - keystone_ports=${KEYSTONE_AUTH_PORT:-35357},${KEYSTONE_AUTH_PORT_INT:-35358} - - # Only do the reserved ports when available, on some system (like containers) - # where it's not exposed we are almost pretty sure these ports would be - # exclusive for our DevStack. - if sysctl net.ipv4.ip_local_reserved_ports >/dev/null 2>&1; then - # Get any currently reserved ports, strip off leading whitespace - reserved_ports=$(sysctl net.ipv4.ip_local_reserved_ports | awk -F'=' '{print $2;}' | sed 's/^ //') - - if [[ -z "${reserved_ports}" ]]; then - # If there are no currently reserved ports, reserve the keystone ports - sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports} - else - # If there are currently reserved ports, keep those and also reserve the - # Keystone specific ports. Duplicate reservations are merged into a single - # reservation (or range) automatically by the kernel. - sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports},${reserved_ports} - fi - else - echo_summary "WARNING: unable to reserve keystone ports" - fi -} - # Python Packages # --------------- @@ -182,7 +149,6 @@ function fixup_ubuntu { } function fixup_all { - fixup_keystone fixup_ubuntu fixup_fedora fixup_suse From eb37657d8e34e1d0f8ee639c3953a752ba615d8d Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Wed, 24 Feb 2021 10:04:31 +0100 Subject: [PATCH 1497/1936] Make creation of keystone admin endpoint optional The keystone admin endpoint technically isn't different any longer from the other keystone endpoints in v3 of the API. However, some applications like heat are still relying on it to exist. So we make the creation of the admin endpoint during bootstrap optional here, with the intention to change the default to False once all jobs that still need this are modified to explicitly require it. Change-Id: I7ab12141c558186e397c174c248a613d1810011b --- lib/keystone | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index 096bafb41f..f8b5ccb009 100644 --- a/lib/keystone +++ b/lib/keystone @@ -122,6 +122,9 @@ KEYSTONE_PASSWORD_HASH_ROUNDS=${KEYSTONE_PASSWORD_HASH_ROUNDS:-4} # Cache settings KEYSTONE_ENABLE_CACHE=${KEYSTONE_ENABLE_CACHE:-True} +# Whether to create a keystone admin endpoint for legacy applications +KEYSTONE_ADMIN_ENDPOINT=$(trueorfalse True KEYSTONE_ADMIN_ENDPOINT) + # Functions # --------- @@ -556,8 +559,16 @@ function bootstrap_keystone { --bootstrap-role-name admin \ --bootstrap-service-name keystone \ --bootstrap-region-id "$REGION_NAME" \ - --bootstrap-admin-url "$KEYSTONE_SERVICE_URI" \ --bootstrap-public-url "$KEYSTONE_SERVICE_URI" + if [ "$KEYSTONE_ADMIN_ENDPOINT" == "True" ]; then + openstack endpoint create --region "$REGION_NAME" \ + --os-username admin \ + --os-user-domain-id default \ + --os-password "$ADMIN_PASSWORD" \ + --os-project-name admin \ + --os-project-domain-id default \ + keystone admin "$KEYSTONE_SERVICE_URI" + fi } # create_ldap_domain() - Create domain file and initialize domain with a user From b538b3267cec11f0b345ce101607bbd36ada8f32 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Wed, 24 Feb 2021 10:24:03 +0100 Subject: [PATCH 1498/1936] Switch off creating a keystone admin endpoint by default With the depending patch, the endpoint will still be created for heat tests, so we can turn it off for everyone else. Depends-On: https://review.opendev.org/c/openstack/openstacksdk/+/777343 Change-Id: I0dc7d6cedd07e942b9f23b26a785b386aff41fbc --- lib/keystone | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index f8b5ccb009..0609abd289 100644 --- a/lib/keystone +++ b/lib/keystone @@ -123,7 +123,7 @@ KEYSTONE_PASSWORD_HASH_ROUNDS=${KEYSTONE_PASSWORD_HASH_ROUNDS:-4} KEYSTONE_ENABLE_CACHE=${KEYSTONE_ENABLE_CACHE:-True} # Whether to create a keystone admin endpoint for legacy applications -KEYSTONE_ADMIN_ENDPOINT=$(trueorfalse True KEYSTONE_ADMIN_ENDPOINT) +KEYSTONE_ADMIN_ENDPOINT=$(trueorfalse False KEYSTONE_ADMIN_ENDPOINT) # Functions # --------- From 4aa27976ebb2e4a4dc95a20f96e5d8f25b1ac10d Mon Sep 17 00:00:00 2001 From: Tristan Cacqueray Date: Tue, 19 Oct 2021 21:47:27 +0000 Subject: [PATCH 1499/1936] [ci] Remove the implied-branches pragma This change enables using devstack job with custom branche names. Change-Id: I95c368f05042a6f8f208988af9a6d89a522a5526 --- .zuul.yaml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index b756db3b22..d114053d26 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -1,11 +1,3 @@ -- pragma: - # NOTE(gtema): this is required for the changes in SDK feature/r1 branch to - # be using devstack - # TODO(gtema): delete this once r1 branch is merged into master - implied-branches: - - master - - feature/r1 - - nodeset: name: openstack-single-node nodes: From e06d954229fc4fca827105f5bb0809a19075d590 Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Thu, 21 Oct 2021 08:15:12 -0700 Subject: [PATCH 1500/1936] Use Nehalem CPU model by default CentOS/RHEL 9 are being compiled for the x86_64-v2 architecture which is newer than the qemu default of qemu64. This means that for devstack to boot these instances we need a newer CPU model. Nehalem is apparently the oldest model that works for x86_64-v2 and is expected to work on Intel and AMD cpus with kvm or qemu. Switch devstack to this model by default. Note that we cannot use host-passthrough or host-model because we want to support live migration between devstack deployed nova-compute instances and even within the CI instances that we get the host CPUs can differ. Also, we should run this change against as many clouds as possible to ensure that the newer model works across all of our clouds. There is some fear that the virtual CPUs presented to us in some clouds may not be able to run these newer CPU models. Change-Id: Ibd6e11b59f3c8655bc60ace7383a08458b2177f2 --- lib/nova | 3 ++- lib/nova_plugins/hypervisor-libvirt | 3 +++ stackrc | 3 ++- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/nova b/lib/nova index bbb1039199..9aae2c4a9c 100644 --- a/lib/nova +++ b/lib/nova @@ -260,7 +260,8 @@ function configure_nova { if [ ! -e /dev/kvm ]; then echo "WARNING: Switching to QEMU" LIBVIRT_TYPE=qemu - LIBVIRT_CPU_MODE=none + LIBVIRT_CPU_MODE=custom + LIBVIRT_CPU_MODEL=Nehalem if which selinuxenabled >/dev/null 2>&1 && selinuxenabled; then # https://bugzilla.redhat.com/show_bug.cgi?id=753589 sudo setsebool virt_use_execmem on diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index 321775d324..c1cd132548 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -40,6 +40,9 @@ function configure_nova_hypervisor { configure_libvirt iniset $NOVA_CONF libvirt virt_type "$LIBVIRT_TYPE" iniset $NOVA_CONF libvirt cpu_mode "$LIBVIRT_CPU_MODE" + if [ "$LIBVIRT_CPU_MODE" == "custom" ] ; then + iniset $NOVA_CONF libvirt cpu_model "$LIBVIRT_CPU_MODEL" + fi # Do not enable USB tablet input devices to avoid QEMU CPU overhead. iniset $NOVA_CONF DEFAULT pointer_model "ps2mouse" iniset $NOVA_CONF libvirt live_migration_uri "qemu+ssh://$STACK_USER@%s/system" diff --git a/stackrc b/stackrc index 3dc800af2d..ebe472c033 100755 --- a/stackrc +++ b/stackrc @@ -623,7 +623,8 @@ VIRT_DRIVER=${VIRT_DRIVER:-$DEFAULT_VIRT_DRIVER} case "$VIRT_DRIVER" in ironic|libvirt) LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm} - LIBVIRT_CPU_MODE=${LIBVIRT_CPU_MODE:-none} + LIBVIRT_CPU_MODE=${LIBVIRT_CPU_MODE:-custom} + LIBVIRT_CPU_MODEL=${LIBVIRT_CPU_MODEL:-Nehalem} if [[ "$os_VENDOR" =~ (Debian|Ubuntu) ]]; then # The groups change with newer libvirt. Older Ubuntu used # 'libvirtd', but now uses libvirt like Debian. Do a quick check From 7f6d9283b85bb743d3224981c031d331f5e9608a Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Wed, 27 Oct 2021 16:40:30 +0200 Subject: [PATCH 1501/1936] Rehome functions to enable Neutron's placement integration Those functions were part of the neutron devstack plugin but we discussed it during last PTG [1] and decided to move to the Devstack repo as plugins which are used by e.g. CI jobs which are defined outside of the neutron repository. Placement integration is used e.g. in the tempest-slow job which is defined in tempest and used by many different OpenStack projects. [1] https://etherpad.opendev.org/p/neutron-yoga-ptg#L142 Change-Id: Ib86071881f16de1b69c0f9b1b19b6df8b7e66a07 --- lib/neutron-legacy | 4 ++++ lib/neutron_plugins/services/placement | 21 +++++++++++++++++++++ 2 files changed, 25 insertions(+) create mode 100644 lib/neutron_plugins/services/placement diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 7b20a96ed7..b41dfcae42 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -277,6 +277,7 @@ source $TOP_DIR/lib/neutron_plugins/services/metering source $TOP_DIR/lib/neutron_plugins/services/l3 # Additional Neutron service plugins +source $TOP_DIR/lib/neutron_plugins/services/placement source $TOP_DIR/lib/neutron_plugins/services/trunk # Use security group or not @@ -374,6 +375,9 @@ function configure_mutnauq { fi # Configure Neutron's advanced services + if is_service_enabled q-placement neutron-placement; then + configure_placement_extension + fi if is_service_enabled q-trunk neutron-trunk; then configure_trunk_extension fi diff --git a/lib/neutron_plugins/services/placement b/lib/neutron_plugins/services/placement new file mode 100644 index 0000000000..3ec185bae6 --- /dev/null +++ b/lib/neutron_plugins/services/placement @@ -0,0 +1,21 @@ +#!/bin/bash + +function configure_placement_service_plugin { + neutron_service_plugin_class_add "placement" +} + +function configure_placement_neutron { + iniset $NEUTRON_CONF placement auth_type "$NEUTRON_PLACEMENT_AUTH_TYPE" + iniset $NEUTRON_CONF placement auth_url "$KEYSTONE_SERVICE_URI" + iniset $NEUTRON_CONF placement username "$NEUTRON_PLACEMENT_USERNAME" + iniset $NEUTRON_CONF placement password "$SERVICE_PASSWORD" + iniset $NEUTRON_CONF placement user_domain_name "$SERVICE_DOMAIN_NAME" + iniset $NEUTRON_CONF placement project_name "$SERVICE_TENANT_NAME" + iniset $NEUTRON_CONF placement project_domain_name "$SERVICE_DOMAIN_NAME" + iniset $NEUTRON_CONF placement region_name "$REGION_NAME" +} + +function configure_placement_extension { + configure_placement_service_plugin + configure_placement_neutron +} From 325792d9b9238f9a8b8b493ba50572add99b9d82 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Fri, 15 Oct 2021 15:55:54 -0500 Subject: [PATCH 1502/1936] Clarify error message for ERROR_ON_CLONE=True If ERROR_ON_CLONE is set to True which is case for all the devstack based job, devstack does not clone the repo instead raise error. From current error message, it is difficult to know that ERROR_ON_CLONE is True until we traceback the code or check devstack-base job set ERROR_ON_CLONE to True. Current error message is like: ------- + functions-common:git_clone:560 : echo 'The /opt/stack/oslo.limit project was not found; if this is a gate job, add' The /opt/stack/oslo.limit project was not found; if this is a gate job, add + functions-common:git_clone:561 : echo 'the project to the $PROJECTS variable in the job definition.' the project to the $PROJECTS variable in the job definition. + functions-common:git_clone:562 : die 562 'Cloning not allowed in this configuration' -------- Adding ERROR_ON_CLONE info in error message will help to know the reason of devstack not cloning the repo. Change-Id: I9e9852f046fefb299b4ef4446323e9c86437212f --- functions-common | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/functions-common b/functions-common index 11679e4aa3..7a628db737 100644 --- a/functions-common +++ b/functions-common @@ -547,7 +547,7 @@ function git_clone { if [[ "$ERROR_ON_CLONE" = "True" ]]; then echo "The $git_dest project was not found; if this is a gate job, add" echo "the project to 'required-projects' in the job definition." - die $LINENO "Cloning not allowed in this configuration" + die $LINENO "ERROR_ON_CLONE is set to True so cloning not allowed in this configuration" fi git_timed clone $git_clone_flags $git_remote $git_dest fi @@ -559,7 +559,7 @@ function git_clone { if [[ "$ERROR_ON_CLONE" = "True" ]]; then echo "The $git_dest project was not found; if this is a gate job, add" echo "the project to the \$PROJECTS variable in the job definition." - die $LINENO "Cloning not allowed in this configuration" + die $LINENO "ERROR_ON_CLONE is set to True so cloning not allowed in this configuration" fi # '--branch' can also take tags git_timed clone $git_clone_flags $git_remote $git_dest --branch $git_ref From f8e00b86aee9a8f9646bf5aed2c618843307b963 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Fri, 29 Oct 2021 14:39:41 +0200 Subject: [PATCH 1503/1936] Run Bullseye with more swap Since Bullseye like Centos 8 Stream needs more memory due to changed default settings in newer qemu versions, set the swap size to 4G, which is the same setting already being used for the CS8 jobs successfully. Change-Id: I83ea34d6aa647d2ab9d4d78ed354904fce836e68 Signed-off-by: Dr. Jens Harbott --- .zuul.yaml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index cbcb8638c4..7a85266eaa 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -630,9 +630,7 @@ voting: false timeout: 9000 vars: - # NOTE(yoctozepto): With concurrency equal 2, there is a random event - # that this job will run out of memory at some point. - tempest_concurrency: 1 + configure_swap_size: 4096 # NOTE(yoctozepto): Debian Bullseye does not yet offer OVN. Switch to OVS # for the time being. devstack_localrc: From 021ae0bcc8f67b6fd307aaf3c8ac59ba6cbe23b6 Mon Sep 17 00:00:00 2001 From: Lance Bragstad Date: Thu, 11 Mar 2021 15:47:50 +0000 Subject: [PATCH 1504/1936] Update lib/keystone to add more system users Keystone has supported system-scope since Queens and we already make sure we create a cloud profile for system-admin in /etc/openstack/clouds.yaml. This commit ensures keystone creates a couple of new users to model system-member and system-reader personas. Doing this by default in devstack makes it easier for people to use. We've already taken a similar approach in tempest by setting up the various system personas for tempest clients to use. Change-Id: Iceb7c5f517db20072e121dc7538abaa888423c67 --- functions-common | 53 ++++++++++++++++++++++++++++++++++++++++++++++++ lib/keystone | 23 ++++++++++++++++++++- 2 files changed, 75 insertions(+), 1 deletion(-) diff --git a/functions-common b/functions-common index 11679e4aa3..111d339372 100644 --- a/functions-common +++ b/functions-common @@ -129,6 +129,28 @@ function write_clouds_yaml { --os-password $ADMIN_PASSWORD \ --os-system-scope all + # system member + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-system-member \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username system_member \ + --os-password $ADMIN_PASSWORD \ + --os-system-scope all + + # system reader + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-system-reader \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username system_reader \ + --os-password $ADMIN_PASSWORD \ + --os-system-scope all + cat >> $CLOUDS_YAML < [] +function get_or_add_user_system_role { + local user_role_id + local domain_args + + domain_args=$(_get_domain_args $4) + + # Gets user role id + user_role_id=$(openstack role assignment list \ + --role $1 \ + --user $2 \ + --system $3 \ + $domain_args \ + -f value -c Role) + if [[ -z "$user_role_id" ]]; then + # Adds role to user and get it + openstack role add $1 \ + --user $2 \ + --system $3 \ + $domain_args + user_role_id=$(openstack role assignment list \ + --role $1 \ + --user $2 \ + --system $3 \ + $domain_args \ + -f value -c Role) + fi + echo $user_role_id +} + # Gets or adds group role to project # Usage: get_or_add_group_project_role function get_or_add_group_project_role { diff --git a/lib/keystone b/lib/keystone index 0609abd289..065ca70ec3 100644 --- a/lib/keystone +++ b/lib/keystone @@ -285,20 +285,28 @@ function configure_keystone { # admins admin admin admin # nonadmins demo, alt_demo member, anotherrole demo, alt_demo +# System User Roles +# ------------------------------------------------------------------ +# all admin admin +# all system_reader reader +# all system_member member + # Migrated from keystone_data.sh function create_keystone_accounts { # The keystone bootstrapping process (performed via keystone-manage - # bootstrap) creates an admin user, admin role, member role, and admin + # bootstrap) creates an admin user and an admin # project. As a sanity check we exercise the CLI to retrieve the IDs for # these values. local admin_project admin_project=$(openstack project show "admin" -f value -c id) local admin_user admin_user=$(openstack user show "admin" -f value -c id) + # These roles are also created during bootstrap but we don't need their IDs local admin_role="admin" local member_role="member" + local reader_role="reader" async_run ks-domain-role get_or_add_user_domain_role $admin_role $admin_user default @@ -349,6 +357,18 @@ function create_keystone_accounts { async_run ks-alt-admin get_or_add_user_project_role $admin_role $admin_user $alt_demo_project async_run ks-alt-another get_or_add_user_project_role $another_role $alt_demo_user $alt_demo_project + # Create two users, give one the member role on the system and the other + # the reader role on the system. These two users model system-member and + # system-reader personas. The admin user already has the admin role on the + # system and we can re-use this user as a system-admin. + system_member_user=$(get_or_create_user "system_member" \ + "$ADMIN_PASSWORD" "default" "system_member@example.com") + async_run ks-system-member get_or_add_user_system_role $member_role $system_member_user "all" + + system_reader_user=$(get_or_create_user "system_reader" \ + "$ADMIN_PASSWORD" "default" "system_reader@example.com") + async_run ks-system-reader get_or_add_user_system_role $reader_role $system_reader_user "all" + # groups local admin_group admin_group=$(get_or_create_group "admins" \ @@ -365,6 +385,7 @@ function create_keystone_accounts { async_wait ks-demo-{member,admin,another,invis} async_wait ks-alt-{member,admin,another} + async_wait ks-system-{member,reader} async_wait ks-group-{memberdemo,anotherdemo,memberalt,anotheralt,admin} if is_service_enabled ldap; then From 95555ba39827e2f3648eb89bde91b1342c493306 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Wed, 10 Nov 2021 06:22:52 +0100 Subject: [PATCH 1505/1936] Cleanup keystone library IDENTITY_API_VERSION is hardcoded to 3 in most locations already, drop the remaining occurrences, but keep the variable definition since some plugins still depend on it. Drop ENABLE_IDENTITY_V2 which no longer has any effect. Amend variable list for bootstrap_keystone(). Signed-off-by: Dr. Jens Harbott Change-Id: I06f476d2105bc6ec2b511fc5effcfcc3973eaf97 --- doc/source/configuration.rst | 11 ----------- lib/keystone | 4 +--- openrc | 2 +- stackrc | 18 +++--------------- 4 files changed, 5 insertions(+), 30 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index b4fff4f208..dd8f21faaf 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -684,17 +684,6 @@ KEYSTONE_REGION_NAME to specify the region of Keystone service. KEYSTONE_REGION_NAME has a default value the same as REGION_NAME thus we omit it in the configuration of RegionOne. -Disabling Identity API v2 -+++++++++++++++++++++++++ - -The Identity API v2 is deprecated as of Mitaka and it is recommended to only -use the v3 API. It is possible to setup keystone without v2 API, by doing: - -:: - - ENABLE_IDENTITY_V2=False - - Glance ++++++ diff --git a/lib/keystone b/lib/keystone index 065ca70ec3..349d2573b6 100644 --- a/lib/keystone +++ b/lib/keystone @@ -9,7 +9,6 @@ # - ``tls`` file # - ``DEST``, ``STACK_USER`` # - ``FILES`` -# - ``IDENTITY_API_VERSION`` # - ``BASE_SQL_CONN`` # - ``SERVICE_HOST``, ``SERVICE_PROTOCOL`` # - ``S3_SERVICE_PORT`` (template backend only) @@ -540,7 +539,7 @@ function start_keystone { # unencryted traffic at this point. # If running in Apache, use the path rather than port. - local service_uri=$auth_protocol://$KEYSTONE_SERVICE_HOST/identity/v$IDENTITY_API_VERSION/ + local service_uri=$auth_protocol://$KEYSTONE_SERVICE_HOST/identity/v3/ if ! wait_for_service $SERVICE_TIMEOUT $service_uri; then die $LINENO "keystone did not start" @@ -569,7 +568,6 @@ function stop_keystone { # This function uses the following GLOBAL variables: # - ``KEYSTONE_BIN_DIR`` # - ``ADMIN_PASSWORD`` -# - ``IDENTITY_API_VERSION`` # - ``REGION_NAME`` # - ``KEYSTONE_SERVICE_URI`` function bootstrap_keystone { diff --git a/openrc b/openrc index beeaebea42..6d488bb0ba 100644 --- a/openrc +++ b/openrc @@ -74,7 +74,7 @@ else fi # Identity API version -export OS_IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-3} +export OS_IDENTITY_API_VERSION=3 # Ask keystoneauth1 to use keystone export OS_AUTH_TYPE=password diff --git a/stackrc b/stackrc index ebe472c033..62749a7005 100755 --- a/stackrc +++ b/stackrc @@ -175,21 +175,9 @@ else export PS4='+ $(short_source): ' fi -# Configure Identity API version: 2.0, 3 -IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-3} - -# Set the option ENABLE_IDENTITY_V2 to True. It defines whether the DevStack -# deployment will be deploying the Identity v2 pipelines. If this option is set -# to ``False``, DevStack will: i) disable Identity v2; ii) configure Tempest to -# skip Identity v2 specific tests; and iii) configure Horizon to use Identity -# v3. When this option is set to ``False``, the option IDENTITY_API_VERSION -# will to be set to ``3`` in order to make DevStack register the Identity -# endpoint as v3. This flag is experimental and will be used as basis to -# identify the projects which still have issues to operate with Identity v3. -ENABLE_IDENTITY_V2=$(trueorfalse False ENABLE_IDENTITY_V2) -if [ "$ENABLE_IDENTITY_V2" == "False" ]; then - IDENTITY_API_VERSION=3 -fi +# Configure Identity API version +# TODO(frickler): Drop this when plugins no longer need it +IDENTITY_API_VERSION=3 # Enable use of Python virtual environments. Individual project use of # venvs are controlled by the PROJECT_VENV array; every project with From 9c81321bfc694bd511dee8dd5d04273e368e5545 Mon Sep 17 00:00:00 2001 From: Lance Bragstad Date: Thu, 11 Mar 2021 16:29:31 +0000 Subject: [PATCH 1506/1936] Add additional project personas for secure RBAC This commit formalizes some additional users to act as different project users and updates clouds.yaml file so they're easy to use. It creates: - a reader on the demo project - a reader on the alt_demo project - a member on the alt_demo project With the adoption of secure RBAC personas, these are useful for using OpenStack APIs as that work continues. Change-Id: I3237a771275311377313b7d7d80ac059ac69d031 --- functions-common | 51 +++++++++++++++++++++++++++++++++++++++--------- lib/keystone | 32 ++++++++++++++++++++++++------ 2 files changed, 68 insertions(+), 15 deletions(-) diff --git a/functions-common b/functions-common index 111d339372..996827f292 100644 --- a/functions-common +++ b/functions-common @@ -85,7 +85,7 @@ function write_clouds_yaml { if [ -f "$SSL_BUNDLE_FILE" ]; then CA_CERT_ARG="--os-cacert $SSL_BUNDLE_FILE" fi - # demo -> devstack + # devstack: user with the member role on demo project $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ --file $CLOUDS_YAML \ --os-cloud devstack \ @@ -96,7 +96,18 @@ function write_clouds_yaml { --os-password $ADMIN_PASSWORD \ --os-project-name demo - # alt_demo -> devstack-alt + # devstack-admin: user with the admin role on the admin project + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-admin \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username admin \ + --os-password $ADMIN_PASSWORD \ + --os-project-name admin + + # devstack-alt: user with the member role on alt_demo project $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ --file $CLOUDS_YAML \ --os-cloud devstack-alt \ @@ -107,18 +118,40 @@ function write_clouds_yaml { --os-password $ADMIN_PASSWORD \ --os-project-name alt_demo - # admin -> devstack-admin + # devstack-alt-member: user with the member role on alt_demo project $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ --file $CLOUDS_YAML \ - --os-cloud devstack-admin \ + --os-cloud devstack-alt-member \ --os-region-name $REGION_NAME \ $CA_CERT_ARG \ --os-auth-url $KEYSTONE_SERVICE_URI \ - --os-username admin \ + --os-username alt_demo_member \ --os-password $ADMIN_PASSWORD \ - --os-project-name admin + --os-project-name alt_demo + + # devstack-alt-reader: user with the reader role on alt_demo project + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-alt-reader \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username alt_demo_reader \ + --os-password $ADMIN_PASSWORD \ + --os-project-name alt_demo + + # devstack-reader: user with the reader role on demo project + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-reader \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username demo_reader \ + --os-password $ADMIN_PASSWORD \ + --os-project-name demo - # admin with a system-scoped token -> devstack-system + # devstack-system-admin: user with the admin role on the system $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ --file $CLOUDS_YAML \ --os-cloud devstack-system-admin \ @@ -129,7 +162,7 @@ function write_clouds_yaml { --os-password $ADMIN_PASSWORD \ --os-system-scope all - # system member + # devstack-system-member: user with the member role on the system $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ --file $CLOUDS_YAML \ --os-cloud devstack-system-member \ @@ -140,7 +173,7 @@ function write_clouds_yaml { --os-password $ADMIN_PASSWORD \ --os-system-scope all - # system reader + # devstack-system-reader: user with the reader role on the system $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ --file $CLOUDS_YAML \ --os-cloud devstack-system-reader \ diff --git a/lib/keystone b/lib/keystone index 065ca70ec3..1ae950d0d1 100644 --- a/lib/keystone +++ b/lib/keystone @@ -346,19 +346,39 @@ function create_keystone_accounts { async_run ks-demo-another get_or_add_user_project_role $another_role $demo_user $demo_project async_run ks-demo-invis get_or_add_user_project_role $member_role $demo_user $invis_project - # alt_demo + # Create a user to act as a reader on project demo + local demo_reader + demo_reader=$(get_or_create_user "demo_reader" \ + "$ADMIN_PASSWORD" "default" "demo_reader@example.com") + + async_run ks-demo-reader get_or_add_user_project_role $reader_role $demo_reader $demo_project + + # Create a different project called alt_demo local alt_demo_project alt_demo_project=$(get_or_create_project "alt_demo" default) + # Create a user to act as member, admin and anotherrole on project alt_demo local alt_demo_user alt_demo_user=$(get_or_create_user "alt_demo" \ "$ADMIN_PASSWORD" "default" "alt_demo@example.com") async_run ks-alt-member get_or_add_user_project_role $member_role $alt_demo_user $alt_demo_project - async_run ks-alt-admin get_or_add_user_project_role $admin_role $admin_user $alt_demo_project + async_run ks-alt-admin get_or_add_user_project_role $admin_role $alt_demo_user $alt_demo_project async_run ks-alt-another get_or_add_user_project_role $another_role $alt_demo_user $alt_demo_project - # Create two users, give one the member role on the system and the other - # the reader role on the system. These two users model system-member and + # Create another user to act as a member on project alt_demo + local alt_demo_member + alt_demo_member=$(get_or_create_user "alt_demo_member" \ + "$ADMIN_PASSWORD" "default" "alt_demo_member@example.com") + async_run ks-alt-member-user get_or_add_user_project_role $member_role $alt_demo_member $alt_demo_project + + # Create another user to act as a reader on project alt_demo + local alt_demo_reader + alt_demo_reader=$(get_or_create_user "alt_demo_reader" \ + "$ADMIN_PASSWORD" "default" "alt_demo_reader@example.com") + async_run ks-alt-reader-user get_or_add_user_project_role $reader_role $alt_demo_reader $alt_demo_project + + # Create two users, give one the member role on the system and the other the + # reader role on the system. These two users model system-member and # system-reader personas. The admin user already has the admin role on the # system and we can re-use this user as a system-admin. system_member_user=$(get_or_create_user "system_member" \ @@ -383,8 +403,8 @@ function create_keystone_accounts { async_run ks-group-anotheralt get_or_add_group_project_role $another_role $non_admin_group $alt_demo_project async_run ks-group-admin get_or_add_group_project_role $admin_role $admin_group $admin_project - async_wait ks-demo-{member,admin,another,invis} - async_wait ks-alt-{member,admin,another} + async_wait ks-demo-{member,admin,another,invis,reader} + async_wait ks-alt-{member,admin,another,member-user,reader-user} async_wait ks-system-{member,reader} async_wait ks-group-{memberdemo,anotherdemo,memberalt,anotheralt,admin} From f9a896c6e6afcf52e9a50613285940c26e353ba3 Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Wed, 27 Oct 2021 16:50:11 +0200 Subject: [PATCH 1507/1936] Rehome functions to enable Neutron's QoS service Those functions were part of the neutron devstack plugin but we discussed it during last PTG [1] and decided to move to the Devstack repo plugins which are used by e.g. CI jobs which are defined outside of the neutron repository. QoS service is used e.g. in the tempest-slow job which is defined in tempest and used by many different OpenStack projects. [1] https://etherpad.opendev.org/p/neutron-yoga-ptg#L142 Change-Id: I48f65d530db53fe2c94cad57a8072e1158d738b0 --- lib/neutron-legacy | 8 ++++++++ lib/neutron_plugins/services/qos | 30 ++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+) create mode 100644 lib/neutron_plugins/services/qos diff --git a/lib/neutron-legacy b/lib/neutron-legacy index be29f99024..a3f6f0788d 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -279,6 +279,7 @@ source $TOP_DIR/lib/neutron_plugins/services/l3 # Additional Neutron service plugins source $TOP_DIR/lib/neutron_plugins/services/placement source $TOP_DIR/lib/neutron_plugins/services/trunk +source $TOP_DIR/lib/neutron_plugins/services/qos # Use security group or not if has_neutron_plugin_security_group; then @@ -381,6 +382,13 @@ function configure_mutnauq { if is_service_enabled q-trunk neutron-trunk; then configure_trunk_extension fi + if is_service_enabled q-qos neutron-qos; then + configure_qos + if is_service_enabled q-l3 neutron-l3; then + configure_l3_agent_extension_fip_qos + configure_l3_agent_extension_gateway_ip_qos + fi + fi iniset $NEUTRON_CONF DEFAULT api_workers "$API_WORKERS" # devstack is not a tool for running uber scale OpenStack diff --git a/lib/neutron_plugins/services/qos b/lib/neutron_plugins/services/qos new file mode 100644 index 0000000000..af9eb3d5b4 --- /dev/null +++ b/lib/neutron_plugins/services/qos @@ -0,0 +1,30 @@ +#!/bin/bash + +function configure_qos_service_plugin { + neutron_service_plugin_class_add "qos" +} + + +function configure_qos_core_plugin { + configure_qos_$NEUTRON_CORE_PLUGIN +} + + +function configure_qos_l2_agent { + plugin_agent_add_l2_agent_extension "qos" +} + + +function configure_qos { + configure_qos_service_plugin + configure_qos_core_plugin + configure_qos_l2_agent +} + +function configure_l3_agent_extension_fip_qos { + plugin_agent_add_l3_agent_extension "fip_qos" +} + +function configure_l3_agent_extension_gateway_ip_qos { + plugin_agent_add_l3_agent_extension "gateway_ip_qos" +} From f56f7a557ac4941b5204852f4173db9cf82b4dae Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Tue, 9 Nov 2021 10:57:25 +0100 Subject: [PATCH 1508/1936] Stop creating userrc_early We can use the devstack-admin cloud configuration everywhere now and don't need to set environment variables with cloud credentials any longer. Fix the swift setup, where some more options need to be explicitly specified now and the default OS_CLOUD setting overridden. Signed-off-by: Dr. Jens Harbott Change-Id: I86ffa9cd52454f1c1c72d29b3a0e0caa3e44b829 --- lib/swift | 15 +++++++++------ stack.sh | 28 +++------------------------- 2 files changed, 12 insertions(+), 31 deletions(-) diff --git a/lib/swift b/lib/swift index b376993150..9c13701c6e 100644 --- a/lib/swift +++ b/lib/swift @@ -866,12 +866,15 @@ function stop_swift { function swift_configure_tempurls { # note we are using swift credentials! - OS_USERNAME=swift \ - OS_PASSWORD=$SERVICE_PASSWORD \ - OS_USER_DOMAIN_NAME=$SERVICE_DOMAIN_NAME \ - OS_PROJECT_NAME=$SERVICE_PROJECT_NAME \ - OS_PROJECT_DOMAIN_NAME=$SERVICE_DOMAIN_NAME \ - openstack object store account \ + openstack --os-cloud "" \ + --os-region-name $REGION_NAME \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username=swift \ + --os-password=$SERVICE_PASSWORD \ + --os-user-domain-name=$SERVICE_DOMAIN_NAME \ + --os-project-name=$SERVICE_PROJECT_NAME \ + --os-project-domain-name=$SERVICE_DOMAIN_NAME \ + object store account \ set --property "Temp-URL-Key=$SWIFT_TEMPURL_KEY" } diff --git a/stack.sh b/stack.sh index b5ad81b081..ead56e68c0 100755 --- a/stack.sh +++ b/stack.sh @@ -1063,35 +1063,13 @@ fi # Keystone # -------- -# Rather than just export these, we write them out to a -# intermediate userrc file that can also be used to debug if -# something goes wrong between here and running -# tools/create_userrc.sh (this script relies on services other -# than keystone being available, so we can't call it right now) -cat > $TOP_DIR/userrc_early <> $TOP_DIR/userrc_early start_tls_proxy http-services '*' 443 $SERVICE_HOST 80 fi -source $TOP_DIR/userrc_early - -# Write a clouds.yaml file +# Write a clouds.yaml file and use the devstack-admin cloud write_clouds_yaml +export OS_CLOUD=devstack-admin if is_service_enabled keystone; then echo_summary "Starting Keystone" @@ -1380,7 +1358,7 @@ fi # which is helpful in image bundle steps. if is_service_enabled nova && is_service_enabled keystone; then - USERRC_PARAMS="-PA --target-dir $TOP_DIR/accrc" + USERRC_PARAMS="-PA --target-dir $TOP_DIR/accrc --os-password $ADMIN_PASSWORD" if [ -f $SSL_BUNDLE_FILE ]; then USERRC_PARAMS="$USERRC_PARAMS --os-cacert $SSL_BUNDLE_FILE" From faed11d2a1c9a9bb06ba855d5b551b231dd6bf82 Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Thu, 18 Nov 2021 10:36:57 +0100 Subject: [PATCH 1509/1936] Add missing ml2, L2 and L3 agent functions to devstack Previously those functions were defined in the neutron's devstack plugin but with [1] we moved qos related code into devstack and we missed about moving them too. This is follow up patch to fix that issue. [1] https://review.opendev.org/c/openstack/devstack/+/815686 Change-Id: Icf459a2f8c6ae3c3cb29b16ba0b92766af41af30 --- lib/neutron-legacy | 9 +++++++++ lib/neutron_plugins/ml2 | 4 ++++ lib/neutron_plugins/services/l3 | 9 +++++++++ 3 files changed, 22 insertions(+) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index a3f6f0788d..a5a608df72 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -1039,6 +1039,15 @@ function _ssh_check_neutron { test_with_retry "$testcmd" "server $ip didn't become ssh-able" $timeout_sec } +function plugin_agent_add_l2_agent_extension { + local l2_agent_extension=$1 + if [[ -z "$L2_AGENT_EXTENSIONS" ]]; then + L2_AGENT_EXTENSIONS=$l2_agent_extension + elif [[ ! ,${L2_AGENT_EXTENSIONS}, =~ ,${l2_agent_extension}, ]]; then + L2_AGENT_EXTENSIONS+=",$l2_agent_extension" + fi +} + # Restore xtrace $_XTRACE_NEUTRON diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index e1f868f0a7..f00feac6b4 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -156,5 +156,9 @@ function has_neutron_plugin_security_group { return 0 } +function configure_qos_ml2 { + neutron_ml2_extension_driver_add "qos" +} + # Restore xtrace $_XTRACE_NEUTRON_ML2 diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 98b96ac06c..72f7a32b26 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -427,3 +427,12 @@ function is_networking_extension_supported { EXT_LIST=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" extension list --network -c Alias -f value) [[ $EXT_LIST =~ $extension ]] && return 0 } + +function plugin_agent_add_l3_agent_extension { + local l3_agent_extension=$1 + if [[ -z "$L3_AGENT_EXTENSIONS" ]]; then + L3_AGENT_EXTENSIONS=$l3_agent_extension + elif [[ ! ,${L3_AGENT_EXTENSIONS}, =~ ,${l3_agent_extension}, ]]; then + L3_AGENT_EXTENSIONS+=",$l3_agent_extension" + fi +} From c96993d138ea9ba447fc3b3dbbbf4879fd8c20db Mon Sep 17 00:00:00 2001 From: Julia Kreger Date: Thu, 18 Nov 2021 10:39:36 -0800 Subject: [PATCH 1510/1936] Make OS_CLOUD be able to be configured OS_CLOUD is used to communiate to CLI tools what cloud credentials to utilize. The change I86ffa9cd52454f1c1c72d29b3a0e0caa3e44b829 unfortunately set an explicit OS_CLOUD account which breaks any jobs which are expecting a previosuly set OS_CLOUD which may be different to work. For example, OS_CLOUD being set as devstack-system-admin to facilitate Secure RBAC testing. Change-Id: Iee900e552584dda622f57eea3508df48dff2e071 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index ead56e68c0..b5dc0ee05e 100755 --- a/stack.sh +++ b/stack.sh @@ -1069,7 +1069,7 @@ fi # Write a clouds.yaml file and use the devstack-admin cloud write_clouds_yaml -export OS_CLOUD=devstack-admin +export OS_CLOUD=${OS_CLOUD:-devstack-admin} if is_service_enabled keystone; then echo_summary "Starting Keystone" From 1d8888dc24143d81c13557ffdfa615052e794ebe Mon Sep 17 00:00:00 2001 From: Lance Bragstad Date: Thu, 11 Mar 2021 16:36:28 +0000 Subject: [PATCH 1511/1936] Remove unnecessary member role assignments from alt_demo This user already has the admin role assignment on a project, which implies the member role, making explicit calls to add the member role redundant. Change-Id: I398c5e2f098aeeb896de83872085cbce529a778a --- lib/keystone | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/keystone b/lib/keystone index 1ae950d0d1..2d48bb10bd 100644 --- a/lib/keystone +++ b/lib/keystone @@ -342,6 +342,7 @@ function create_keystone_accounts { async_wait ks-{domain-role,domain,project,service,reseller,anotherrole} async_run ks-demo-member get_or_add_user_project_role $member_role $demo_user $demo_project + async_run ks-demo-admin get_or_add_user_project_role $admin_role $admin_user $demo_project async_run ks-demo-another get_or_add_user_project_role $another_role $demo_user $demo_project async_run ks-demo-invis get_or_add_user_project_role $member_role $demo_user $invis_project @@ -361,7 +362,6 @@ function create_keystone_accounts { alt_demo_user=$(get_or_create_user "alt_demo" \ "$ADMIN_PASSWORD" "default" "alt_demo@example.com") - async_run ks-alt-member get_or_add_user_project_role $member_role $alt_demo_user $alt_demo_project async_run ks-alt-admin get_or_add_user_project_role $admin_role $alt_demo_user $alt_demo_project async_run ks-alt-another get_or_add_user_project_role $another_role $alt_demo_user $alt_demo_project @@ -404,7 +404,7 @@ function create_keystone_accounts { async_run ks-group-admin get_or_add_group_project_role $admin_role $admin_group $admin_project async_wait ks-demo-{member,admin,another,invis,reader} - async_wait ks-alt-{member,admin,another,member-user,reader-user} + async_wait ks-alt-{admin,another,member-user,reader-user} async_wait ks-system-{member,reader} async_wait ks-group-{memberdemo,anotherdemo,memberalt,anotheralt,admin} From 65a5db8e3376fbeb6871629cbfe5d77fe848e039 Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Mon, 3 May 2021 00:08:15 +0900 Subject: [PATCH 1512/1936] keystone: Dot not set the removed admin_endpoint parameter The admin_endpoint parameter has been removed from keystone[1], and setting the parameter is no longer effective. [1] 192cde56e57a06750641b319da8a72cdcaa554d0 Change-Id: I6ae6a3122668551acc018972624e914fcbb79a22 --- lib/keystone | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/lib/keystone b/lib/keystone index 1ae950d0d1..dafe005664 100644 --- a/lib/keystone +++ b/lib/keystone @@ -214,14 +214,11 @@ function configure_keystone { service_port=$KEYSTONE_SERVICE_PORT_INT fi - # Override the endpoints advertised by keystone (the public_endpoint and - # admin_endpoint) so that clients use the correct endpoint. By default, the - # keystone server uses the public_port and admin_port which isn't going to - # work when you want to use a different port (in the case of proxy), or you - # don't want the port (in the case of putting keystone on a path in - # apache). + # Override the endpoints advertised by keystone so that clients use the correct + # endpoint. By default, the keystone server uses the public_port which isn't + # going to work when you want to use a different port (in the case of proxy), + # or you don't want the port (in the case of putting keystone on a path in apache). iniset $KEYSTONE_CONF DEFAULT public_endpoint $KEYSTONE_SERVICE_URI - iniset $KEYSTONE_CONF DEFAULT admin_endpoint $KEYSTONE_SERVICE_URI if [[ "$KEYSTONE_TOKEN_FORMAT" != "" ]]; then iniset $KEYSTONE_CONF token provider $KEYSTONE_TOKEN_FORMAT From 6822ff39444f8ebe01084e0d92bc05ac40db8216 Mon Sep 17 00:00:00 2001 From: Gregory Thiemonge Date: Wed, 1 Sep 2021 09:36:31 +0200 Subject: [PATCH 1513/1936] Fix OVN DBs cleanup on startup When initializing OVN, clean up the correct database directory when using OVN from packages (/var/lib/ovn/ instead of /opt/stack/data/ovn/). The /opt/stack/data/ovn location is used only when building OVN from sources, so a fresh devstack deployment with OVN packages may already have hundreds of existing routers and ports, creating ARP collisions. Closes-Bug: #1942201 Change-Id: Ic90d4f2f9d8aaef825ea3325c0ad8fef2a1c5e39 --- lib/neutron_plugins/ovn_agent | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 1f737fb58b..999851e33d 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -119,7 +119,13 @@ OVS_SCRIPTDIR=$OVS_SHAREDIR/scripts OVS_DATADIR=$DATA_DIR/ovs OVS_SYSCONFDIR=${OVS_SYSCONFDIR:-$OVS_PREFIX/etc/openvswitch} -OVN_DATADIR=$DATA_DIR/ovn +if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + OVN_DATADIR=$DATA_DIR/ovn +else + # When using OVN from packages, the data dir for OVN DBs is + # /var/lib/ovn + OVN_DATADIR=/var/lib/ovn +fi OVN_SHAREDIR=$OVS_PREFIX/share/ovn OVN_SCRIPTDIR=$OVN_SHAREDIR/scripts OVN_RUNDIR=$OVS_PREFIX/var/run/ovn @@ -562,13 +568,13 @@ function init_ovn { _disable_libvirt_apparmor - mkdir -p $OVN_DATADIR + sudo mkdir -p $OVN_DATADIR mkdir -p $OVS_DATADIR rm -f $OVS_DATADIR/*.db rm -f $OVS_DATADIR/.*.db.~lock~ - rm -f $OVN_DATADIR/*.db - rm -f $OVN_DATADIR/.*.db.~lock~ + sudo rm -f $OVN_DATADIR/*.db + sudo rm -f $OVN_DATADIR/.*.db.~lock~ } function _start_ovs { From c20cd8ed9d0294e99293cdcb5eea0885a5a12573 Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Wed, 24 Nov 2021 01:35:29 +0900 Subject: [PATCH 1514/1936] cinder-backup: Ensure ca cert is defined when tls-proxy is enabled Change-Id: Id679eb7061d8e609ce76fbb5b720a041990e8e86 --- lib/cinder_backups/swift | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/cinder_backups/swift b/lib/cinder_backups/swift index d7c977e1e3..c7ec306246 100644 --- a/lib/cinder_backups/swift +++ b/lib/cinder_backups/swift @@ -24,6 +24,9 @@ function configure_cinder_backup_swift { # to use it. iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.swift.SwiftBackupDriver" iniset $CINDER_CONF DEFAULT backup_swift_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_" + if is_service_enabled tls-proxy; then + iniset $CINDER_CONF DEFAULT backup_swift_ca_cert_file $SSL_BUNDLE_FILE + fi } # init_cinder_backup_swift: nothing to do From afd0f84eae75fd5a5a7611cb6e8368ef7b845211 Mon Sep 17 00:00:00 2001 From: Lance Bragstad Date: Mon, 8 Nov 2021 19:53:40 +0000 Subject: [PATCH 1515/1936] Remove unnecessary unset for project-scoped token in glance Before, we needed to unset a couple of parameters that would make the client return a project-scoped token instead of a system-scoped token, which we need when interacting with registered limits in keystone. This commit removes those unsets since we no longer source those variables by default. This commit also cleans up some of the redundant parameters in the registered limit calls, like region. Change-Id: I1af8a168a29e895d57504d41e30efea271ea232d --- lib/glance | 29 +++++++++++------------------ 1 file changed, 11 insertions(+), 18 deletions(-) diff --git a/lib/glance b/lib/glance index f18bea9ccb..4c2755f76f 100644 --- a/lib/glance +++ b/lib/glance @@ -288,24 +288,17 @@ function configure_glance_store { function configure_glance_quotas { - # NOTE(danms): We need to have some of the OS_ things unset in - # order to use system scope, which is required for creating these - # limits. This is a hack, but I dunno how else to get osc to use - # system scope. - - bash -c "unset OS_USERNAME OS_TENANT_NAME OS_PROJECT_NAME; - openstack --os-cloud devstack-system-admin registered limit create \ - --service glance --default-limit $GLANCE_LIMIT_IMAGE_SIZE_TOTAL \ - --region $REGION_NAME image_size_total; \ - openstack --os-cloud devstack-system-admin registered limit create \ - --service glance --default-limit $GLANCE_LIMIT_IMAGE_SIZE_TOTAL \ - --region $REGION_NAME image_stage_total; \ - openstack --os-cloud devstack-system-admin registered limit create \ - --service glance --default-limit 100 --region $REGION_NAME \ - image_count_total; \ - openstack --os-cloud devstack-system-admin registered limit create \ - --service glance --default-limit 100 --region $REGION_NAME \ - image_count_uploading" + # Registered limit resources in keystone are system-specific resources. + # Make sure we use a system-scoped token to interact with this API. + + openstack --os-cloud devstack-system-admin registered limit create --service glance \ + --default-limit $GLANCE_LIMIT_IMAGE_SIZE_TOTAL --region $REGION_NAME image_size_total + openstack --os-cloud devstack-system-admin registered limit create --service glance \ + --default-limit $GLANCE_LIMIT_IMAGE_SIZE_TOTAL --region $REGION_NAME image_stage_total + openstack --os-cloud devstack-system-admin registered limit create --service glance \ + --default-limit 100 --region $REGION_NAME image_count_total + openstack --os-cloud devstack-system-admin registered limit create --service glance \ + --default-limit 100 --region $REGION_NAME image_count_uploading # Tell glance to use these limits iniset $GLANCE_API_CONF DEFAULT use_keystone_limits True From b575af0cfe8aac71825b406394e9927a33ca51bd Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Fri, 26 Nov 2021 12:44:41 +0530 Subject: [PATCH 1516/1936] Do not use sudo with OVN_DATADIR when building from source Jobs with OVN_BUILD_FROMS_SOURCE=True are broken since [1] as ovn nortd not starting due to permission issues. Fix it by not using sudo for creating OVN_DATADIR when building from source. [1] https://review.opendev.org/c/openstack/devstack/+/806858 Closes-Bug: #1952393 Change-Id: I00f0c8c8173b4d8270fbb3e6079d0d8b332e9de5 --- lib/neutron_plugins/ovn_agent | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 999851e33d..56686f2741 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -568,7 +568,11 @@ function init_ovn { _disable_libvirt_apparmor - sudo mkdir -p $OVN_DATADIR + if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + mkdir -p $OVN_DATADIR + else + sudo mkdir -p $OVN_DATADIR + fi mkdir -p $OVS_DATADIR rm -f $OVS_DATADIR/*.db From bd68251463b2a86e07643387b56deca53a90f3c5 Mon Sep 17 00:00:00 2001 From: Roman Dobosz Date: Fri, 26 Nov 2021 15:34:50 +0100 Subject: [PATCH 1517/1936] Change a way for creating data dir in case of OVN. Calculate the sudo usage with local variable. Change-Id: I39dff770ff296dc06395acdb430a9cfe1722a30f --- lib/neutron_plugins/ovn_agent | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 56686f2741..3fc38288f1 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -567,12 +567,13 @@ function init_ovn { # create new ones on each devstack run. _disable_libvirt_apparmor + local mkdir_cmd="mkdir -p ${OVN_DATADIR}" - if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then - mkdir -p $OVN_DATADIR - else - sudo mkdir -p $OVN_DATADIR + if [[ "$OVN_BUILD_FROM_SOURCE" == "False" ]]; then + mkdir_cmd="sudo ${mkdir_cmd}" fi + + $mkdir_cmd mkdir -p $OVS_DATADIR rm -f $OVS_DATADIR/*.db From 5ea4c3c18c558ec1fa37e0dd9b28ce66c3da1130 Mon Sep 17 00:00:00 2001 From: Alfredo Moralejo Date: Tue, 16 Nov 2021 15:13:03 +0100 Subject: [PATCH 1518/1936] Support CentOS Stream 9 This patch includes changes required to run devstack on CentOS Stream 9 which has been already published in official repos by CentOS team [1]: - Add RDO deps repository for CS9. - remove xinetd package from installation for swift. Note that rsync-daemon is installed which should work fine. - Replace genisoimage by xorriso in CS9. - Use /etc/os-release to identify the distro in CS9 as it doesn't provide lsb_release command. - Use pip from rpm package instead of from get-pip.py as done in Fedora. - Add non-voting job devstack-platform-centos-9-stream to the check pipeline. Change-Id: Ic67cddabd5069211dc0611994b8b8360bcd61bef --- .zuul.yaml | 21 +++++++++++++++++++++ files/rpms/ceph | 2 +- files/rpms/n-cpu | 3 ++- files/rpms/nova | 3 ++- files/rpms/swift | 2 +- functions-common | 17 ++++++++++++----- lib/nova | 3 ++- stack.sh | 24 ++++++++++++++++-------- tools/install_pip.sh | 2 +- 9 files changed, 58 insertions(+), 19 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 7a85266eaa..3945faf82e 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -66,6 +66,16 @@ nodes: - controller +- nodeset: + name: devstack-single-node-centos-9-stream + nodes: + - name: controller + label: centos-9-stream + groups: + - name: tempest + nodes: + - controller + - nodeset: name: devstack-single-node-opensuse-15 nodes: @@ -622,6 +632,16 @@ vars: configure_swap_size: 4096 +- job: + name: devstack-platform-centos-9-stream + parent: tempest-full-py3 + description: CentOS 9 Stream platform test + nodeset: devstack-single-node-centos-9-stream + voting: false + timeout: 9000 + vars: + configure_swap_size: 4096 + - job: name: devstack-platform-debian-bullseye parent: tempest-full-py3 @@ -766,6 +786,7 @@ - devstack-enforce-scope - devstack-platform-fedora-latest - devstack-platform-centos-8-stream + - devstack-platform-centos-9-stream - devstack-platform-debian-bullseye - devstack-multinode - devstack-unit-tests diff --git a/files/rpms/ceph b/files/rpms/ceph index 64befc5f00..33a55f80ea 100644 --- a/files/rpms/ceph +++ b/files/rpms/ceph @@ -1,3 +1,3 @@ ceph # NOPRIME -redhat-lsb-core +redhat-lsb-core # not:rhel9 xfsprogs diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu index 68e5472685..7ce5a72d6b 100644 --- a/files/rpms/n-cpu +++ b/files/rpms/n-cpu @@ -1,9 +1,10 @@ cryptsetup dosfstools -genisoimage +genisoimage # not:rhel9 iscsi-initiator-utils libosinfo lvm2 sg3_utils # Stuff for diablo volumes sysfsutils +xorriso # not:rhel8 diff --git a/files/rpms/nova b/files/rpms/nova index 8ea8ccc5ca..9522e5729d 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -3,7 +3,7 @@ curl dnsmasq # for q-dhcp dnsmasq-utils # for dhcp_release ebtables -genisoimage # required for config_drive +genisoimage # not:rhel9 required for config_drive iptables iputils kernel-modules @@ -13,3 +13,4 @@ polkit rabbitmq-server # NOPRIME sqlite sudo +xorriso # not:rhel8 diff --git a/files/rpms/swift b/files/rpms/swift index 18c957c08a..b6009a321e 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -4,4 +4,4 @@ memcached rsync-daemon sqlite xfsprogs -xinetd # not:f34 +xinetd # not:f34,rhel9 diff --git a/functions-common b/functions-common index b1ca6ad3c0..bd029dd700 100644 --- a/functions-common +++ b/functions-common @@ -368,12 +368,19 @@ function _ensure_lsb_release { # - os_VENDOR # - os_PACKAGE function GetOSVersion { - # We only support distros that provide a sane lsb_release - _ensure_lsb_release + # CentOS Stream 9 does not provide lsb_release + source /etc/os-release + if [[ "${ID}${VERSION}" == "centos9" ]]; then + os_RELEASE=${VERSION_ID} + os_CODENAME="n/a" + os_VENDOR=$(echo $NAME | tr -d '[:space:]') + else + _ensure_lsb_release - os_RELEASE=$(lsb_release -r -s) - os_CODENAME=$(lsb_release -c -s) - os_VENDOR=$(lsb_release -i -s) + os_RELEASE=$(lsb_release -r -s) + os_CODENAME=$(lsb_release -c -s) + os_VENDOR=$(lsb_release -i -s) + fi if [[ $os_VENDOR =~ (Debian|Ubuntu|LinuxMint) ]]; then os_PACKAGE="deb" diff --git a/lib/nova b/lib/nova index 9aae2c4a9c..31b7642efc 100644 --- a/lib/nova +++ b/lib/nova @@ -479,7 +479,8 @@ function create_nova_conf { fi # nova defaults to genisoimage but only mkisofs is available for 15.0+ - if is_suse; then + # rhel provides mkisofs symlink to genisoimage or xorriso appropiately + if is_suse || is_fedora; then iniset $NOVA_CONF DEFAULT mkisofs_cmd /usr/bin/mkisofs fi diff --git a/stack.sh b/stack.sh index b5ad81b081..fa4e7e9006 100755 --- a/stack.sh +++ b/stack.sh @@ -227,7 +227,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="bullseye|focal|f34|opensuse-15.2|opensuse-tumbleweed|rhel8" +SUPPORTED_DISTROS="bullseye|focal|f34|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" @@ -300,13 +300,17 @@ function _install_epel { } function _install_rdo { - if [[ "$TARGET_BRANCH" == "master" ]]; then - # rdo-release.el8.rpm points to latest RDO release, use that for master - sudo dnf -y install https://rdoproject.org/repos/rdo-release.el8.rpm - else - # For stable branches use corresponding release rpm - rdo_release=$(echo $TARGET_BRANCH | sed "s|stable/||g") - sudo dnf -y install https://rdoproject.org/repos/openstack-${rdo_release}/rdo-release-${rdo_release}.el8.rpm + if [[ $DISTRO == "rhel8" ]]; then + if [[ "$TARGET_BRANCH" == "master" ]]; then + # rdo-release.el8.rpm points to latest RDO release, use that for master + sudo dnf -y install https://rdoproject.org/repos/rdo-release.el8.rpm + else + # For stable branches use corresponding release rpm + rdo_release=$(echo $TARGET_BRANCH | sed "s|stable/||g") + sudo dnf -y install https://rdoproject.org/repos/openstack-${rdo_release}/rdo-release-${rdo_release}.el8.rpm + fi + elif [[ $DISTRO == "rhel9" ]]; then + sudo curl -L -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos9-master/delorean-deps.repo fi sudo dnf -y update } @@ -385,6 +389,10 @@ if [[ $DISTRO == "rhel8" ]]; then # RHBZ: https://bugzilla.redhat.com/show_bug.cgi?id=1154272 # Patch: https://github.com/rpm-software-management/dnf/pull/1448 echo "[]" | sudo tee /var/cache/dnf/expired_repos.json +elif [[ $DISTRO == "rhel9" ]]; then + sudo dnf config-manager --set-enabled crb + # rabbitmq and other packages are provided by RDO repositories. + _install_rdo fi # Ensure python is installed diff --git a/tools/install_pip.sh b/tools/install_pip.sh index c72dc89a55..259375a150 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -118,7 +118,7 @@ if [[ -n $PYPI_ALTERNATIVE_URL ]]; then configure_pypi_alternative_url fi -if is_fedora && [[ ${DISTRO} == f* ]]; then +if is_fedora && [[ ${DISTRO} == f* || ${DISTRO} == rhel9 ]]; then # get-pip.py will not install over the python3-pip package in # Fedora 34 any more. # https://bugzilla.redhat.com/show_bug.cgi?id=1988935 From 31334f9a9b2edbdc4a7b45e2e04aaec827639d62 Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Thu, 4 Nov 2021 18:30:29 +0000 Subject: [PATCH 1519/1936] nova: Use noVNC 1.3.0 when installing from source Additionally make the repo name lowercase to match the project name in our zuul config so that jobs can check the repo out. Change-Id: Ic2d9c4fa837461bbc29e067a81912b5f72efd3ca --- lib/nova | 2 +- stackrc | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/nova b/lib/nova index 9aae2c4a9c..3460c2145a 100644 --- a/lib/nova +++ b/lib/nova @@ -831,7 +831,7 @@ function install_nova { NOVNC_WEB_DIR=/usr/share/novnc install_package novnc else - NOVNC_WEB_DIR=$DEST/noVNC + NOVNC_WEB_DIR=$DEST/novnc git_clone $NOVNC_REPO $NOVNC_WEB_DIR $NOVNC_BRANCH fi fi diff --git a/stackrc b/stackrc index 62749a7005..4fc09af9a8 100755 --- a/stackrc +++ b/stackrc @@ -590,8 +590,8 @@ IRONIC_PYTHON_AGENT_REPO=${IRONIC_PYTHON_AGENT_REPO:-${GIT_BASE}/openstack/ironi IRONIC_PYTHON_AGENT_BRANCH=${IRONIC_PYTHON_AGENT_BRANCH:-$TARGET_BRANCH} # a websockets/html5 or flash powered VNC console for vm instances -NOVNC_REPO=${NOVNC_REPO:-https://github.com/novnc/noVNC.git} -NOVNC_BRANCH=${NOVNC_BRANCH:-v1.1.0} +NOVNC_REPO=${NOVNC_REPO:-https://github.com/novnc/novnc.git} +NOVNC_BRANCH=${NOVNC_BRANCH:-v1.3.0} # a websockets/html5 or flash powered SPICE console for vm instances SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git} From fc8ef86fbe09a467ee8bcffa79760d3f1e699450 Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Tue, 9 Mar 2021 17:32:25 +0000 Subject: [PATCH 1520/1936] Only write out uwsgi configs when deploying API services Previously this would always happen for Nova and Cinder even if n-api and c-api were not enabled on the host respectively. This change stops this by placing both calls write_uwsgi_config behind is_service_enabled checks. Change-Id: I997685da771736dbad79bcfe4b00dbc63bd6d6b6 --- lib/cinder | 4 +++- lib/nova | 9 +++++++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/lib/cinder b/lib/cinder index cefb609676..76314c1d1f 100644 --- a/lib/cinder +++ b/lib/cinder @@ -353,7 +353,9 @@ function configure_cinder { # Format logging setup_logging $CINDER_CONF $CINDER_USE_MOD_WSGI - write_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" "/volume" + if is_service_enabled c-api; then + write_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" "/volume" + fi if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then configure_cinder_driver diff --git a/lib/nova b/lib/nova index 9aae2c4a9c..8109446572 100644 --- a/lib/nova +++ b/lib/nova @@ -488,8 +488,13 @@ function create_nova_conf { iniset $NOVA_CONF upgrade_levels compute "auto" - write_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" "/compute" - write_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" "" "$SERVICE_LISTEN_ADDRESS:${METADATA_SERVICE_PORT}" + if is_service_enabled n-api; then + write_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" "/compute" + fi + + if is_service_enabled n-api-meta; then + write_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" "" "$SERVICE_LISTEN_ADDRESS:${METADATA_SERVICE_PORT}" + fi if is_service_enabled ceilometer; then iniset $NOVA_CONF DEFAULT instance_usage_audit "True" From 418535883763cb31e54ab3882ee3eacc42afd4f3 Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Tue, 6 Jul 2021 12:05:31 +0200 Subject: [PATCH 1521/1936] Install OVS from source when it was configured like that Function _neutron_ovs_base_install_agent_packages always tried to install openvswitch from packages and start it using systemd units. That was failing when ovs was expected to be installed from source. This patch fixes that. Change-Id: Iae8625dd800d30061ea3dbed9eb0dfbe16f21572 --- lib/neutron_plugins/ovn_agent | 13 +++------ lib/neutron_plugins/ovs_base | 49 +++++++++++++++++++++------------- lib/neutron_plugins/ovs_source | 1 + 3 files changed, 35 insertions(+), 28 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 1f737fb58b..c0bba2c370 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -24,11 +24,6 @@ source ${TOP_DIR}/lib/neutron_plugins/openvswitch_agent # Load devstack ovs compliation and loading functions source ${TOP_DIR}/lib/neutron_plugins/ovs_source -# Defaults -# -------- - -Q_BUILD_OVS_FROM_GIT=$(trueorfalse True Q_BUILD_OVS_FROM_GIT) - # Set variables for building OVN from source OVN_REPO=${OVN_REPO:-https://github.com/ovn-org/ovn.git} OVN_REPO_NAME=$(basename ${OVN_REPO} | cut -f1 -d'.') @@ -74,6 +69,9 @@ OVN_UUID=${OVN_UUID:-} # unless the distro kernel includes ovs+conntrack support. OVN_BUILD_MODULES=$(trueorfalse False OVN_BUILD_MODULES) OVN_BUILD_FROM_SOURCE=$(trueorfalse False OVN_BUILD_FROM_SOURCE) +if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + Q_BUILD_OVS_FROM_GIT=True +fi # Whether or not to install the ovs python module from ovs source. This can be # used to test and validate new ovs python features. This should only be used @@ -341,11 +339,6 @@ function ovn_sanity_check { # install_ovn() - Collect source and prepare function install_ovn { - if [[ "$Q_BUILD_OVS_FROM_GIT" == "False" ]]; then - echo "Installation of OVS from source disabled." - return 0 - fi - echo "Installing OVN and dependent packages" # Check the OVN configuration diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index 2e63fe3c7b..8acf586189 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -7,6 +7,12 @@ _XTRACE_NEUTRON_OVS_BASE=$(set +o | grep xtrace) set +o xtrace +# Load devstack ovs compliation and loading functions +source ${TOP_DIR}/lib/neutron_plugins/ovs_source + +# Defaults +# -------- + OVS_BRIDGE=${OVS_BRIDGE:-br-int} # OVS recognize default 'system' datapath or 'netdev' for userspace datapath OVS_DATAPATH_TYPE=${OVS_DATAPATH_TYPE:-system} @@ -60,26 +66,33 @@ function _neutron_ovs_base_install_ubuntu_dkms { } function _neutron_ovs_base_install_agent_packages { - # Install deps - install_package $(get_packages "openvswitch") - if is_ubuntu; then - _neutron_ovs_base_install_ubuntu_dkms - restart_service openvswitch-switch - elif is_fedora; then - restart_service openvswitch - sudo systemctl enable openvswitch - elif is_suse; then - if [[ $DISTRO == "sle12" ]] && vercmp "$os_RELEASE" "<" "12.2" ; then + if [ "$Q_BUILD_OVS_FROM_GIT" == "True" ]; then + remove_ovs_packages + compile_ovs False /usr /var + load_conntrack_gre_module + start_new_ovs + else + # Install deps + install_package $(get_packages "openvswitch") + if is_ubuntu; then + _neutron_ovs_base_install_ubuntu_dkms restart_service openvswitch-switch - else - # workaround for https://bugzilla.suse.com/show_bug.cgi?id=1085971 - if [[ $DISTRO =~ "tumbleweed" ]]; then - sudo sed -i -e "s,^OVS_USER_ID=.*,OVS_USER_ID='root:root'," /etc/sysconfig/openvswitch + elif is_fedora; then + restart_service openvswitch + sudo systemctl enable openvswitch + elif is_suse; then + if [[ $DISTRO == "sle12" ]] && vercmp "$os_RELEASE" "<" "12.2" ; then + restart_service openvswitch-switch + else + # workaround for https://bugzilla.suse.com/show_bug.cgi?id=1085971 + if [[ $DISTRO =~ "tumbleweed" ]]; then + sudo sed -i -e "s,^OVS_USER_ID=.*,OVS_USER_ID='root:root'," /etc/sysconfig/openvswitch + fi + restart_service openvswitch || { + journalctl -xe || : + systemctl status openvswitch + } fi - restart_service openvswitch || { - journalctl -xe || : - systemctl status openvswitch - } fi fi } diff --git a/lib/neutron_plugins/ovs_source b/lib/neutron_plugins/ovs_source index 08951d175d..9c87dce551 100644 --- a/lib/neutron_plugins/ovs_source +++ b/lib/neutron_plugins/ovs_source @@ -14,6 +14,7 @@ # Defaults # -------- +Q_BUILD_OVS_FROM_GIT=$(trueorfalse False Q_BUILD_OVS_FROM_GIT) # Set variables for building OVS from source OVS_REPO=${OVS_REPO:-https://github.com/openvswitch/ovs.git} From c3b7051387d4332f956148c5676383499fa31859 Mon Sep 17 00:00:00 2001 From: Ade Lee Date: Fri, 6 Aug 2021 14:26:37 -0400 Subject: [PATCH 1522/1936] Add option to set chap algorithms for iscsid for FIPS The default CHAP algorithm for iscsid is md5, which is disallowed under fips. We will set the chap algorithm to "SHA3-256,SHA256", which should work under all configurations. Change-Id: Ide186fb53b3f9826ff602cb7fb797f245a15033a --- lib/nova | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/nova b/lib/nova index 5fcccffec1..1420183a19 100644 --- a/lib/nova +++ b/lib/nova @@ -315,6 +315,10 @@ EOF sudo systemctl daemon-reload fi + # set chap algorithms. The default chap_algorithm is md5 which will + # not work under FIPS + iniset -sudo /etc/iscsi/iscsid.conf DEFAULT "node.session.auth.chap_algs" "SHA3-256,SHA256" + # ensure that iscsid is started, even when disabled by default restart_service iscsid fi From 24b65adc9cedff9c7a8ab412fb39613ef5d4a627 Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Tue, 22 Jun 2021 15:31:46 +0200 Subject: [PATCH 1523/1936] Deploy Neutron with enforced new RBAC rules This patch adds new config option NEUTRON_ENFORCE_NEW_DEFAULTS which if set to True will deploy Neutron with enforce new rbac defaults and scopes. It will also use SYSTEM_ADMIN user to interact with Neutron where it is needed. Depends-On: https://review.opendev.org/c/openstack/neutron/+/798821 Change-Id: I14d934f0deced34d74003b92824cad3c44ec4f5e --- .zuul.yaml | 1 + lib/neutron | 19 ++++++++++ lib/neutron-legacy | 18 ++++++++++ lib/neutron_plugins/services/l3 | 62 +++++++++++++++++++++------------ lib/tempest | 10 ++++-- 5 files changed, 85 insertions(+), 25 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 3945faf82e..b5ab1277b6 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -609,6 +609,7 @@ # Keep enabeling the services here to run with system scope CINDER_ENFORCE_SCOPE: true GLANCE_ENFORCE_SCOPE: true + NEUTRON_ENFORCE_SCOPE: true - job: name: devstack-multinode diff --git a/lib/neutron b/lib/neutron index 885df97f7c..15d548e33d 100644 --- a/lib/neutron +++ b/lib/neutron @@ -37,6 +37,11 @@ NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse False NEUTRON_DEPLOY_MOD_WSGI) NEUTRON_AGENT=${NEUTRON_AGENT:-openvswitch} NEUTRON_DIR=$DEST/neutron +# If NEUTRON_ENFORCE_SCOPE == True, it will set "enforce_scope" +# and "enforce_new_defaults" to True in the Neutron's config to enforce usage +# of the new RBAC policies and scopes. +NEUTRON_ENFORCE_SCOPE=$(trueorfalse False NEUTRON_ENFORCE_SCOPE) + NEUTRON_DISTRIBUTED_ROUTING=$(trueorfalse False NEUTRON_DISTRIBUTED_ROUTING) # Distributed Virtual Router (DVR) configuration # Can be: @@ -232,6 +237,7 @@ function configure_neutron_new { if [[ "$NEUTRON_PORT_SECURITY" = "True" ]]; then neutron_ml2_extension_driver_add port_security fi + configure_rbac_policies fi # Neutron OVS or LB agent @@ -612,6 +618,19 @@ function configure_neutron { fi } +# configure_rbac_policies() - Configure Neutron to enforce new RBAC +# policies and scopes if NEUTRON_ENFORCE_SCOPE == True +function configure_rbac_policies { + if [ "$NEUTRON_ENFORCE_SCOPE" == "True" ]; then + iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True + iniset $NEUTRON_CONF oslo_policy enforce_scope True + else + iniset $NEUTRON_CONF oslo_policy enforce_new_defaults False + iniset $NEUTRON_CONF oslo_policy enforce_scope False + fi +} + + function configure_neutron_nova { if is_neutron_legacy_enabled; then # Call back to old function diff --git a/lib/neutron-legacy b/lib/neutron-legacy index a5a608df72..b906a1b2ff 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -90,6 +90,11 @@ NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse False NEUTRON_DEPLOY_MOD_WSGI) NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini +# If NEUTRON_ENFORCE_SCOPE == True, it will set "enforce_scope" +# and "enforce_new_defaults" to True in the Neutron's config to enforce usage +# of the new RBAC policies and scopes. +NEUTRON_ENFORCE_SCOPE=$(trueorfalse False NEUTRON_ENFORCE_SCOPE) + # Agent binaries. Note, binary paths for other agents are set in per-service # scripts in lib/neutron_plugins/services/ AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent" @@ -489,6 +494,19 @@ function configure_neutron_after_post_config { if [[ $Q_SERVICE_PLUGIN_CLASSES != '' ]]; then iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES fi + configure_rbac_policies +} + +# configure_rbac_policies() - Configure Neutron to enforce new RBAC +# policies and scopes if NEUTRON_ENFORCE_SCOPE == True +function configure_rbac_policies { + if [ "$NEUTRON_ENFORCE_SCOPE" == "True" ]; then + iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True + iniset $NEUTRON_CONF oslo_policy enforce_scope True + else + iniset $NEUTRON_CONF oslo_policy enforce_new_defaults False + iniset $NEUTRON_CONF oslo_policy enforce_scope False + fi } # Start running OVN processes diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 72f7a32b26..ccb5398f75 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -100,6 +100,11 @@ SUBNETPOOL_PREFIX_V6=${SUBNETPOOL_PREFIX_V6:-$IPV6_ADDRS_SAFE_TO_USE} SUBNETPOOL_SIZE_V4=${SUBNETPOOL_SIZE_V4:-26} SUBNETPOOL_SIZE_V6=${SUBNETPOOL_SIZE_V6:-64} +NEUTRON_ADMIN_CLOUD_NAME="devstack-admin" +if [ "$NEUTRON_ENFORCE_SCOPE" == "True" ]; then + NEUTRON_ADMIN_CLOUD_NAME="devstack-system-admin" +fi + default_v4_route_devs=$(ip -4 route | grep ^default | awk '{print $5}') default_v6_route_devs=$(ip -6 route list match default table all | grep via | awk '{print $5}') @@ -151,6 +156,10 @@ function create_neutron_initial_network { project_id=$(openstack project list | grep " demo " | get_field 1) die_if_not_set $LINENO project_id "Failure retrieving project_id for demo" + local admin_project_id + admin_project_id=$(openstack project list | grep " admin " | get_field 1) + die_if_not_set $LINENO admin_project_id "Failure retrieving project_id for admin" + # Allow drivers that need to create an initial network to do so here if type -p neutron_plugin_create_initial_network_profile > /dev/null; then neutron_plugin_create_initial_network_profile $PHYSICAL_NETWORK @@ -159,10 +168,10 @@ function create_neutron_initial_network { if is_networking_extension_supported "auto-allocated-topology"; then if [[ "$USE_SUBNETPOOL" == "True" ]]; then if [[ "$IP_VERSION" =~ 4.* ]]; then - SUBNETPOOL_V4_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME_V4 --default-prefix-length $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --share --default -f value -c id) + SUBNETPOOL_V4_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME_V4 --project "$admin_project_id" --default-prefix-length $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --share --default -f value -c id) fi if [[ "$IP_VERSION" =~ .*6 ]]; then - SUBNETPOOL_V6_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME_V6 --default-prefix-length $SUBNETPOOL_SIZE_V6 --pool-prefix $SUBNETPOOL_PREFIX_V6 --share --default -f value -c id) + SUBNETPOOL_V6_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME_V6 --project "$admin_project_id" --default-prefix-length $SUBNETPOOL_SIZE_V6 --pool-prefix $SUBNETPOOL_PREFIX_V6 --share --default -f value -c id) fi fi fi @@ -170,14 +179,14 @@ function create_neutron_initial_network { if is_provider_network; then die_if_not_set $LINENO PHYSICAL_NETWORK "You must specify the PHYSICAL_NETWORK" die_if_not_set $LINENO PROVIDER_NETWORK_TYPE "You must specify the PROVIDER_NETWORK_TYPE" - NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --project $project_id --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share | grep ' id ' | get_field 2) + NET_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --project $project_id --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share | grep ' id ' | get_field 2) die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $project_id" if [[ "$IP_VERSION" =~ 4.* ]]; then if [ -z $SUBNETPOOL_V4_ID ]; then fixed_range_v4=$FIXED_RANGE fi - SUBNET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} | grep ' id ' | get_field 2) + SUBNET_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} | grep ' id ' | get_field 2) die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME $project_id" fi @@ -187,7 +196,7 @@ function create_neutron_initial_network { if [ -z $SUBNETPOOL_V6_ID ]; then fixed_range_v6=$IPV6_PROVIDER_FIXED_RANGE fi - IPV6_SUBNET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} | grep ' id ' | get_field 2) + IPV6_SUBNET_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} | grep ' id ' | get_field 2) die_if_not_set $LINENO IPV6_SUBNET_ID "Failure creating IPV6_SUBNET_ID for $IPV6_PROVIDER_SUBNET_NAME $project_id" fi @@ -197,7 +206,7 @@ function create_neutron_initial_network { sudo ip link set $PUBLIC_INTERFACE up fi else - NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create --project $project_id "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) + NET_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" network create --project $project_id "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PRIVATE_NETWORK_NAME $project_id" if [[ "$IP_VERSION" =~ 4.* ]]; then @@ -215,11 +224,11 @@ function create_neutron_initial_network { # Create a router, and add the private subnet as one of its interfaces if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then # create a tenant-owned router. - ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create --project $project_id $Q_ROUTER_NAME | grep ' id ' | get_field 2) + ROUTER_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" router create --project $project_id $Q_ROUTER_NAME | grep ' id ' | get_field 2) die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $project_id $Q_ROUTER_NAME" else # Plugin only supports creating a single router, which should be admin owned. - ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create $Q_ROUTER_NAME | grep ' id ' | get_field 2) + ROUTER_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" router create $Q_ROUTER_NAME --project $admin_project_id | grep ' id ' | get_field 2) die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $Q_ROUTER_NAME" fi @@ -229,9 +238,9 @@ function create_neutron_initial_network { fi # Create an external network, and a subnet. Configure the external network as router gw if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then - EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type ${PUBLIC_PROVIDERNET_TYPE:-flat} ${PUBLIC_PROVIDERNET_SEGMENTATION_ID:+--provider-segment $PUBLIC_PROVIDERNET_SEGMENTATION_ID} --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} | grep ' id ' | get_field 2) + EXT_NET_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type ${PUBLIC_PROVIDERNET_TYPE:-flat} ${PUBLIC_PROVIDERNET_SEGMENTATION_ID:+--provider-segment $PUBLIC_PROVIDERNET_SEGMENTATION_ID} --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} --project $admin_project_id | grep ' id ' | get_field 2) else - EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS | grep ' id ' | get_field 2) + EXT_NET_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --project $admin_project_id | grep ' id ' | get_field 2) fi die_if_not_set $LINENO EXT_NET_ID "Failure creating EXT_NET_ID for $PUBLIC_NETWORK_NAME" @@ -258,11 +267,12 @@ function _neutron_create_private_subnet_v4 { if [[ -n "$NETWORK_GATEWAY" ]]; then subnet_params+="--gateway $NETWORK_GATEWAY " fi + subnet_params+="${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} " subnet_params+="${fixed_range_v4:+--subnet-range $fixed_range_v4} " subnet_params+="--network $NET_ID $PRIVATE_SUBNET_NAME" local subnet_id - subnet_id=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2) + subnet_id=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2) die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet for $project_id" echo $subnet_id } @@ -285,14 +295,17 @@ function _neutron_create_private_subnet_v6 { subnet_params+="${fixed_range_v6:+--subnet-range $fixed_range_v6} " subnet_params+="$ipv6_modes --network $NET_ID $IPV6_PRIVATE_SUBNET_NAME " local ipv6_subnet_id - ipv6_subnet_id=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2) + ipv6_subnet_id=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2) die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet for $project_id" echo $ipv6_subnet_id } # Create public IPv4 subnet function _neutron_create_public_subnet_v4 { - local subnet_params="--ip-version 4 " + local admin_project_id + admin_project_id=$(openstack project list | grep " admin " | get_field 1) + die_if_not_set $LINENO admin_project_id "Failure retrieving project_id for admin" + local subnet_params="--ip-version 4 --project $admin_project_id" subnet_params+="${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} " if [[ -n "$PUBLIC_NETWORK_GATEWAY" ]]; then subnet_params+="--gateway $PUBLIC_NETWORK_GATEWAY " @@ -300,26 +313,29 @@ function _neutron_create_public_subnet_v4 { subnet_params+="--network $EXT_NET_ID --subnet-range $FLOATING_RANGE --no-dhcp " subnet_params+="$PUBLIC_SUBNET_NAME" local id_and_ext_gw_ip - id_and_ext_gw_ip=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ') + id_and_ext_gw_ip=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ') die_if_not_set $LINENO id_and_ext_gw_ip "Failure creating public IPv4 subnet" echo $id_and_ext_gw_ip } # Create public IPv6 subnet function _neutron_create_public_subnet_v6 { - local subnet_params="--ip-version 6 " + local admin_project_id + admin_project_id=$(openstack project list | grep " admin " | get_field 1) + die_if_not_set $LINENO admin_project_id "Failure retrieving project_id for admin" + local subnet_params="--ip-version 6 --project $admin_project_id " subnet_params+="--gateway $IPV6_PUBLIC_NETWORK_GATEWAY " subnet_params+="--network $EXT_NET_ID --subnet-range $IPV6_PUBLIC_RANGE --no-dhcp " subnet_params+="$IPV6_PUBLIC_SUBNET_NAME" local ipv6_id_and_ext_gw_ip - ipv6_id_and_ext_gw_ip=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ') + ipv6_id_and_ext_gw_ip=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ') die_if_not_set $LINENO ipv6_id_and_ext_gw_ip "Failure creating an IPv6 public subnet" echo $ipv6_id_and_ext_gw_ip } # Configure neutron router for IPv4 public access function _neutron_configure_router_v4 { - openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router add subnet $ROUTER_ID $SUBNET_ID + openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" router add subnet $ROUTER_ID $SUBNET_ID # Create a public subnet on the external network local id_and_ext_gw_ip id_and_ext_gw_ip=$(_neutron_create_public_subnet_v4 $EXT_NET_ID) @@ -327,7 +343,7 @@ function _neutron_configure_router_v4 { ext_gw_ip=$(echo $id_and_ext_gw_ip | get_field 2) PUB_SUBNET_ID=$(echo $id_and_ext_gw_ip | get_field 5) # Configure the external network as the default router gateway - openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID + openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID # This logic is specific to using OVN or the l3-agent for layer 3 if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-server) || is_service_enabled q-l3 neutron-l3; then @@ -354,7 +370,7 @@ function _neutron_configure_router_v4 { sudo ip addr add $ext_gw_ip/$cidr_len dev $ext_gw_interface sudo ip link set $ext_gw_interface up fi - ROUTER_GW_IP=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' --device-owner network:router_gateway | awk -F'ip_address' '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ') + ROUTER_GW_IP=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' --device-owner network:router_gateway | awk -F'ip_address' '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ') die_if_not_set $LINENO ROUTER_GW_IP "Failure retrieving ROUTER_GW_IP" fi _neutron_set_router_id @@ -363,7 +379,7 @@ function _neutron_configure_router_v4 { # Configure neutron router for IPv6 public access function _neutron_configure_router_v6 { - openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router add subnet $ROUTER_ID $IPV6_SUBNET_ID + openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" router add subnet $ROUTER_ID $IPV6_SUBNET_ID # Create a public subnet on the external network local ipv6_id_and_ext_gw_ip ipv6_id_and_ext_gw_ip=$(_neutron_create_public_subnet_v6 $EXT_NET_ID) @@ -375,7 +391,7 @@ function _neutron_configure_router_v6 { # If the external network has not already been set as the default router # gateway when configuring an IPv4 public subnet, do so now if [[ "$IP_VERSION" == "6" ]]; then - openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID + openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID fi # This logic is specific to using OVN or the l3-agent for layer 3 @@ -396,7 +412,7 @@ function _neutron_configure_router_v6 { sudo sysctl -w net.ipv6.conf.all.forwarding=1 # Configure and enable public bridge # Override global IPV6_ROUTER_GW_IP with the true value from neutron - IPV6_ROUTER_GW_IP=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' | grep $ipv6_pub_subnet_id | awk -F'ip_address' '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ') + IPV6_ROUTER_GW_IP=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' | grep $ipv6_pub_subnet_id | awk -F'ip_address' '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ') die_if_not_set $LINENO IPV6_ROUTER_GW_IP "Failure retrieving IPV6_ROUTER_GW_IP" if is_neutron_ovs_base_plugin; then @@ -424,7 +440,7 @@ function _neutron_configure_router_v6 { function is_networking_extension_supported { local extension=$1 # TODO(sc68cal) cache this instead of calling every time - EXT_LIST=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" extension list --network -c Alias -f value) + EXT_LIST=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" extension list --network -c Alias -f value) [[ $EXT_LIST =~ $extension ]] && return 0 } diff --git a/lib/tempest b/lib/tempest index 8fd54c5d5e..ab802171d1 100644 --- a/lib/tempest +++ b/lib/tempest @@ -90,6 +90,10 @@ TEMPEST_USE_TEST_ACCOUNTS=$(trueorfalse False TEMPEST_USE_TEST_ACCOUNTS) # it will run tempest with TEMPEST_CONCURRENCY=${TEMPEST_CONCURRENCY:-$(nproc)} +NEUTRON_ADMIN_CLOUD_NAME="devstack-admin" +if [ "$NEUTRON_ENFORCE_SCOPE" == "True" ]; then + NEUTRON_ADMIN_CLOUD_NAME="devstack-system-admin" +fi # Functions # --------- @@ -287,8 +291,8 @@ function configure_tempest { if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]] && is_networking_extension_supported 'external-net'; then public_network_id=$(openstack --os-cloud devstack-admin network show -f value -c id $PUBLIC_NETWORK_NAME) # make sure shared network presence does not confuses the tempest tests - openstack --os-cloud devstack-admin network create --share shared - openstack --os-cloud devstack-admin subnet create --description shared-subnet --subnet-range ${TEMPEST_SHARED_POOL:-192.168.233.0/24} --network shared shared-subnet + openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" network create --share shared --project "$admin_project_id" + openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create --description shared-subnet --subnet-range ${TEMPEST_SHARED_POOL:-192.168.233.0/24} --network shared shared-subnet --project "$admin_project_id" fi iniset $TEMPEST_CONFIG DEFAULT use_syslog $SYSLOG @@ -443,6 +447,8 @@ function configure_tempest { iniset $TEMPEST_CONFIG network-feature-enabled ipv6_subnet_attributes "$IPV6_SUBNET_ATTRIBUTES_ENABLED" iniset $TEMPEST_CONFIG network-feature-enabled port_security $NEUTRON_PORT_SECURITY + iniset $TEMPEST_CONFIG enforce_scope neutron "$NEUTRON_ENFORCE_SCOPE" + # Scenario SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES} SCENARIO_IMAGE_FILE=$DEFAULT_IMAGE_FILE_NAME From 7880ba665e2d594b2eabb0533e6dca52e042ca50 Mon Sep 17 00:00:00 2001 From: Kevin Zhao Date: Wed, 31 Mar 2021 04:58:28 +0000 Subject: [PATCH 1524/1936] openEuler 20.03 LTS SP2 support openEuler is an open-source Linux based operating system. The current openEuler kernel is based on Linux and supports multi arch, such as X86_64 and aarch64. It fully unleashes the potential of computing chips. As an efficient, stable, and secure open-source OS built by global open-source contributors, openEuler applies to database, big data, cloud computing, and AI scenarios. openEuler is using RPM for package management. Note: Currently there is no available package for uwsgi-plugin-python3 and ovn, so that openEuler needs manually install them from source. Website: https://www.openeuler.org/en/ Change-Id: I169a0017998054604a63ac6c177d0f43f8a32ba6 Co-Authored-By: wangxiyuan Signed-off-by: Kevin Zhao --- .zuul.yaml | 25 +++++++++++++++++++++++++ doc/source/index.rst | 2 +- files/rpms/ceph | 2 +- files/rpms/general | 4 +++- files/rpms/nova | 2 +- files/rpms/swift | 2 +- functions-common | 14 ++++++++++++-- lib/apache | 16 +++------------- lib/nova_plugins/functions-libvirt | 2 +- roles/apache-logs-conf/tasks/main.yaml | 1 + stack.sh | 8 +++++++- tools/fixup_stuff.sh | 24 ++++++++++++++++++++++++ 12 files changed, 80 insertions(+), 22 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 3945faf82e..a4385572bc 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -106,6 +106,16 @@ nodes: - controller +- nodeset: + name: devstack-single-node-openeuler-20.03-sp2 + nodes: + - name: controller + label: openEuler-20-03-LTS-SP2 + groups: + - name: tempest + nodes: + - controller + - nodeset: name: openstack-two-node nodes: @@ -683,6 +693,20 @@ # Enable Neutron ML2/OVS services q-agt: true +- job: + name: devstack-platform-openEuler-20.03-SP2 + parent: tempest-full-py3 + description: openEuler 20.03 SP2 platform test + nodeset: devstack-single-node-openeuler-20.03-sp2 + voting: false + timeout: 9000 + vars: + configure_swap_size: 4096 + devstack_localrc: + # NOTE(wxy): OVN package is not supported by openEuler yet. Build it + # from source instead. + OVN_BUILD_FROM_SOURCE: True + - job: name: devstack-no-tls-proxy parent: tempest-full-py3 @@ -788,6 +812,7 @@ - devstack-platform-centos-8-stream - devstack-platform-centos-9-stream - devstack-platform-debian-bullseye + - devstack-platform-openEuler-20.03-SP2 - devstack-multinode - devstack-unit-tests - openstack-tox-bashate diff --git a/doc/source/index.rst b/doc/source/index.rst index 08ce4cb061..feb50ce4e9 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -38,7 +38,7 @@ Install Linux Start with a clean and minimal install of a Linux system. DevStack attempts to support the two latest LTS releases of Ubuntu, the -latest/current Fedora version, CentOS/RHEL 8 and OpenSUSE. +latest/current Fedora version, CentOS/RHEL 8, OpenSUSE and openEuler. If you do not have a preference, Ubuntu 20.04 (Focal Fossa) is the most tested, and will probably go the smoothest. diff --git a/files/rpms/ceph b/files/rpms/ceph index 33a55f80ea..93b5746aa6 100644 --- a/files/rpms/ceph +++ b/files/rpms/ceph @@ -1,3 +1,3 @@ ceph # NOPRIME -redhat-lsb-core # not:rhel9 +redhat-lsb-core # not:rhel9,openEuler-20.03 xfsprogs diff --git a/files/rpms/general b/files/rpms/general index 33da0a5385..163a7c8f24 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -16,6 +16,7 @@ libjpeg-turbo-devel # Pillow 3.0.0 libxml2-devel # lxml libxslt-devel # lxml libyaml-devel +make # dist:openEuler-20.03 net-tools openssh-server openssl @@ -27,7 +28,8 @@ psmisc python3-devel python3-pip python3-systemd -redhat-rpm-config # missing dep for gcc hardening flags, see rhbz#1217376 +redhat-rpm-config # not:openEuler-20.03 missing dep for gcc hardening flags, see rhbz#1217376 +systemd-devel # dist:openEuler-20.03 tar tcpdump unzip diff --git a/files/rpms/nova b/files/rpms/nova index 9522e5729d..9e8621c628 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -6,7 +6,7 @@ ebtables genisoimage # not:rhel9 required for config_drive iptables iputils -kernel-modules +kernel-modules # not:openEuler-20.03 kpartx parted polkit diff --git a/files/rpms/swift b/files/rpms/swift index b6009a321e..faf0a3175a 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -1,5 +1,5 @@ curl -liberasurecode-devel +liberasurecode-devel # not:openEuler-20.03 memcached rsync-daemon sqlite diff --git a/functions-common b/functions-common index 80f43554d0..e593328f37 100644 --- a/functions-common +++ b/functions-common @@ -388,7 +388,7 @@ function _ensure_lsb_release { elif [[ -x $(command -v zypper 2>/dev/null) ]]; then sudo zypper -n install lsb-release elif [[ -x $(command -v dnf 2>/dev/null) ]]; then - sudo dnf install -y redhat-lsb-core + sudo dnf install -y redhat-lsb-core || sudo dnf install -y openeuler-lsb else die $LINENO "Unable to find or auto-install lsb_release" fi @@ -459,6 +459,10 @@ function GetDistro { # Drop the . release as we assume it's compatible # XXX re-evaluate when we get RHEL10 DISTRO="rhel${os_RELEASE::1}" + elif [[ "$os_VENDOR" =~ (openEuler) ]]; then + # The DISTRO here is `openEuler-20.03`. While, actually only openEuler + # 20.03 LTS SP2 is fully tested. Other SP version maybe have bugs. + DISTRO="openEuler-$os_RELEASE" else # We can't make a good choice here. Setting a sensible DISTRO # is part of the problem, but not the major issue -- we really @@ -510,6 +514,7 @@ function is_fedora { fi [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || \ + [ "$os_VENDOR" = "openEuler" ] || \ [ "$os_VENDOR" = "RedHatEnterpriseServer" ] || \ [ "$os_VENDOR" = "RedHatEnterprise" ] || \ [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "CentOSStream" ] || \ @@ -558,7 +563,12 @@ function is_ubuntu { [ "$os_PACKAGE" = "deb" ] } - +function is_openeuler { + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + [ "$os_VENDOR" = "openEuler" ] +} # Git Functions # ============= diff --git a/lib/apache b/lib/apache index 4bea07dc55..cbe61adf34 100644 --- a/lib/apache +++ b/lib/apache @@ -82,19 +82,10 @@ function install_apache_uwsgi { apxs="apxs" fi - # This varies based on packaged/installed. If we've - # pip_installed, then the pip setup will only build a "python" - # module that will be either python2 or python3 depending on what - # it was built with. - # - # For package installs, the distro ships both plugins and you need - # to select the right one ... it will not be autodetected. - UWSGI_PYTHON_PLUGIN=python3 - if is_ubuntu; then local pkg_list="uwsgi uwsgi-plugin-python3 libapache2-mod-proxy-uwsgi" install_package ${pkg_list} - elif is_fedora; then + elif is_fedora && ! is_openeuler; then # Note httpd comes with mod_proxy_uwsgi and it is loaded by # default; the mod_proxy_uwsgi package actually conflicts now. # See: @@ -122,7 +113,6 @@ function install_apache_uwsgi { popd # delete the temp directory sudo rm -rf $dir - UWSGI_PYTHON_PLUGIN=python fi if is_ubuntu || is_suse ; then @@ -283,7 +273,7 @@ function write_uwsgi_config { # configured after graceful shutdown iniset "$file" uwsgi worker-reload-mercy $WORKER_TIMEOUT iniset "$file" uwsgi enable-threads true - iniset "$file" uwsgi plugins http,${UWSGI_PYTHON_PLUGIN} + iniset "$file" uwsgi plugins http,python3 # uwsgi recommends this to prevent thundering herd on accept. iniset "$file" uwsgi thunder-lock true # Set hook to trigger graceful shutdown on SIGTERM @@ -336,7 +326,7 @@ function write_local_uwsgi_http_config { iniset "$file" uwsgi die-on-term true iniset "$file" uwsgi exit-on-reload false iniset "$file" uwsgi enable-threads true - iniset "$file" uwsgi plugins http,${UWSGI_PYTHON_PLUGIN} + iniset "$file" uwsgi plugins http,python3 # uwsgi recommends this to prevent thundering herd on accept. iniset "$file" uwsgi thunder-lock true # Set hook to trigger graceful shutdown on SIGTERM diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 63882e05fe..3e7d2801d6 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -90,7 +90,7 @@ function install_libvirt { install_package libvirt libvirt-devel python3-libvirt if is_arch "aarch64"; then - install_package edk2.git-aarch64 + install_package edk2-aarch64 fi fi diff --git a/roles/apache-logs-conf/tasks/main.yaml b/roles/apache-logs-conf/tasks/main.yaml index bd64574c9b..6b7ea37857 100644 --- a/roles/apache-logs-conf/tasks/main.yaml +++ b/roles/apache-logs-conf/tasks/main.yaml @@ -64,6 +64,7 @@ 'Debian': '/etc/apache2/sites-enabled/' 'Suse': '/etc/apache2/conf.d/' 'RedHat': '/etc/httpd/conf.d/' + 'openEuler': '/etc/httpd/conf.d/' - name: Discover configurations find: diff --git a/stack.sh b/stack.sh index a10e6eff25..cb16fda998 100755 --- a/stack.sh +++ b/stack.sh @@ -227,7 +227,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="bullseye|focal|f34|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9" +SUPPORTED_DISTROS="bullseye|focal|f34|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9|openEuler-20.03" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" @@ -278,6 +278,12 @@ chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh +# TODO(wxy): Currently some base packages are not installed by default in +# openEuler. Remove the code below once the packaged are installed by default +# in the future. +if [[ $DISTRO == "openEuler-20.03" ]]; then + install_package hostname +fi # Configure Distro Repositories # ----------------------------- diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index fe5dafa994..750849db68 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -148,8 +148,32 @@ function fixup_ubuntu { sudo rm -rf /usr/lib/python3/dist-packages/simplejson-*.egg-info } +function fixup_openeuler { + if ! is_openeuler; then + return + fi + + if is_arch "x86_64"; then + arch="x86_64" + elif is_arch "aarch64"; then + arch="aarch64" + fi + + # Some packages' version in openEuler are too old, use the newer ones we + # provide in oepkg. (oepkg is an openEuler third part yum repo which is + # endorsed by openEuler community) + (echo '[openstack-ci]' + echo 'name=openstack' + echo 'baseurl=https://repo.oepkgs.net/openEuler/rpm/openEuler-20.03-LTS-SP2/budding-openeuler/openstack-master-ci/'$arch'/' + echo 'enabled=1' + echo 'gpgcheck=0') | sudo tee -a /etc/yum.repos.d/openstack-master.repo > /dev/null + + yum_install liberasurecode-devel +} + function fixup_all { fixup_ubuntu fixup_fedora fixup_suse + fixup_openeuler } From 588894753971c32f6fff9b2158c3427b012cf9ec Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Wed, 22 Dec 2021 16:00:29 +0100 Subject: [PATCH 1525/1936] Clean up compile_ovn function's parameters That function was accepting 3 positional arguments and first of them was boolean value "build_modules" which isn't used anywhere in that function. So this patch cleans it a bit by removing that not used parameter. Change-Id: I5c57b9116338a63b7bfb170c02e33bb4eae725da --- lib/neutron_plugins/ovn_agent | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 3fc38288f1..099b639458 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -294,16 +294,13 @@ function _disable_libvirt_apparmor { # compile_ovn() - Compile OVN from source and load needed modules # Accepts three parameters: -# - first optional is False by default and means that -# modules are built and installed. -# - second optional parameter defines prefix for +# - first optional parameter defines prefix for # ovn compilation -# - third optional parameter defines localstatedir for +# - second optional parameter defines localstatedir for # ovn single machine runtime function compile_ovn { - local build_modules=${1:-False} - local prefix=$2 - local localstatedir=$3 + local prefix=$1 + local localstatedir=$2 if [ -n "$prefix" ]; then prefix="--prefix=$prefix" @@ -381,7 +378,7 @@ function install_ovn { compile_ovs $OVN_BUILD_MODULES if use_new_ovn_repository; then - compile_ovn $OVN_BUILD_MODULES + compile_ovn fi sudo mkdir -p $OVS_PREFIX/var/log/openvswitch From 353c3f9cb1e70929898116b0b6c0020c43d93aea Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Thu, 23 Dec 2021 12:01:44 +0100 Subject: [PATCH 1526/1936] Fix stacking without preconfigured DATABASE_PASSWORD When we need to read a DATABASE_PASSWORD from the user, make sure we actually use it in our database URLs. Signed-off-by: Dr. Jens Harbott Change-Id: I5ebf6b0280e82f2c87a63cbee7a9957c6bd26898 --- lib/database | 6 ++++-- stack.sh | 2 ++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/database b/lib/database index 7940cf2208..78563f6f6d 100644 --- a/lib/database +++ b/lib/database @@ -89,6 +89,10 @@ function initialize_database_backends { DATABASE_PASSWORD=$MYSQL_PASSWORD fi + return 0 +} + +function define_database_baseurl { # We configure Nova, Horizon, Glance and Keystone to use MySQL as their # database server. While they share a single server, each has their own # database and tables. @@ -100,8 +104,6 @@ function initialize_database_backends { # NOTE: Don't specify ``/db`` in this string so we can use it for multiple services BASE_SQL_CONN=${BASE_SQL_CONN:-$(get_database_type_$DATABASE_TYPE)://$DATABASE_USER:$DATABASE_PASSWORD@$DATABASE_HOST} - - return 0 } # Recreate a given database diff --git a/stack.sh b/stack.sh index a10e6eff25..6b5625e922 100755 --- a/stack.sh +++ b/stack.sh @@ -691,6 +691,8 @@ if initialize_database_backends; then # Last chance for the database password. This must be handled here # because read_password is not a library function. read_password DATABASE_PASSWORD "ENTER A PASSWORD TO USE FOR THE DATABASE." + + define_database_baseurl else echo "No database enabled" fi From 134205c1388ac69169698ff2fe36cba23044ff62 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Thu, 23 Dec 2021 12:26:36 +0100 Subject: [PATCH 1527/1936] Don't enable the dstat service in CI jobs We still are seeing regular job failures because the pcp package fails to install. Assume that we can still enable it on demand when someone needs to debug specific job issues, let us just disable it by default. Related-Bug: 1943184 Signed-off-by: Dr. Jens Harbott Change-Id: I32ef8038e21c818623db9389588b3c6d3f98dcad --- .zuul.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index b5ab1277b6..e5ef7ee080 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -432,7 +432,7 @@ PUBLIC_BRIDGE_MTU: '{{ external_bridge_mtu }}' devstack_services: # Shared services - dstat: true + dstat: false etcd3: true memory_tracker: true mysql: true @@ -441,7 +441,7 @@ subnode: devstack_services: # Shared services - dstat: true + dstat: false memory_tracker: true devstack_localrc: # Multinode specific settings @@ -507,7 +507,7 @@ # Core services enabled for this branch. # This list replaces the test-matrix. # Shared services - dstat: true + dstat: false etcd3: true memory_tracker: true mysql: true @@ -557,7 +557,7 @@ # Core services enabled for this branch. # This list replaces the test-matrix. # Shared services - dstat: true + dstat: false memory_tracker: true tls-proxy: true # Nova services From 05e622ead243325658ae5aff5b5b53ce60135c57 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Wed, 29 Dec 2021 12:30:01 +0530 Subject: [PATCH 1528/1936] Use upper-constraints from in review changes Currently upper-constraints.txt is not getting used from in-review changes of requirements project and leading to merge of broken requirements[1]. Use master branch to fetch constraints instead of the remote branch. [1] https://review.opendev.org/c/openstack/requirements/+/822575 Depends-On: https://review.opendev.org/c/openstack/requirements/+/823128 Change-Id: I5d42ac6b54bf20804d7e5faa39d1289102318b64 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index ab802171d1..bdbd3caec8 100644 --- a/lib/tempest +++ b/lib/tempest @@ -119,7 +119,7 @@ function set_tempest_venv_constraints { local tmp_c tmp_c=$1 if [[ $TEMPEST_VENV_UPPER_CONSTRAINTS == "master" ]]; then - (cd $REQUIREMENTS_DIR && git show origin/master:upper-constraints.txt) > $tmp_c + (cd $REQUIREMENTS_DIR && git show master:upper-constraints.txt) > $tmp_c else echo "Using $TEMPEST_VENV_UPPER_CONSTRAINTS constraints in Tempest virtual env." cat $TEMPEST_VENV_UPPER_CONSTRAINTS > $tmp_c From c1a75c6a504d720e9d46f924f5c3da07fddfee72 Mon Sep 17 00:00:00 2001 From: Miguel Lavalle Date: Fri, 31 Dec 2021 16:14:23 -0600 Subject: [PATCH 1529/1936] Fix mysqladmin failure for Fedora 34 and mariadb mysqladmin is incorrectly installed in Fedora 34 with mariadb. This causes the failure of Zuul Fedora based jobs. The issue is a conflict between mariadb and community mysql that is described in [1] and [2]. The workaround is to explicitly install package "mariadb" Also configure an increased swap size like for the other platform jobs in order to avoid OOM issues. [1] https://bugzilla.redhat.com/show_bug.cgi?id=2026933 [2] https://lists.launchpad.net/maria-discuss/msg06179.html Closes-Bug: #1956116 Change-Id: Icf6d7e1af5130689ea10b29d37cc9b188b2c9754 --- .zuul.yaml | 3 +++ lib/databases/mysql | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 2c55b545e0..272018fa06 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -725,6 +725,8 @@ description: Fedora latest platform test nodeset: devstack-single-node-fedora-latest voting: false + vars: + configure_swap_size: 4096 - job: name: devstack-platform-fedora-latest-virt-preview @@ -733,6 +735,7 @@ nodeset: devstack-single-node-fedora-latest voting: false vars: + configure_swap_size: 4096 devstack_localrc: ENABLE_FEDORA_VIRT_PREVIEW_REPO: true diff --git a/lib/databases/mysql b/lib/databases/mysql index d0fa1199a7..8edbf8c4a4 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -178,7 +178,7 @@ EOF if is_oraclelinux; then install_package mysql-community-server elif is_fedora; then - install_package mariadb-server mariadb-devel + install_package mariadb-server mariadb-devel mariadb sudo systemctl enable $MYSQL_SERVICE_NAME elif is_suse; then install_package mariadb-server From 2ef4a4c8516bc6373bc7f4cafee62db715144952 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Mon, 3 Jan 2022 15:13:44 +0100 Subject: [PATCH 1530/1936] Fix tempest upper-constraints When deploying devstack in a stable branch, the master branch is available locally only in a CI environment where Zuul prepares all available branches. For a non-CI deployment we need to stick to using the remote branch as was the case before [0]. While the situation on the master branch isn't really broken, we apply the fix here anyway so that future stable branches are created in a working state. [0] I5d42ac6b54bf20804d7e5faa39d1289102318b64 Closes-Bug: #1956219 Signed-off-by: Dr. Jens Harbott Change-Id: Ib7719cb2d48b34db70f885e0afe77d904abba3b5 --- lib/tempest | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index bdbd3caec8..adffeda371 100644 --- a/lib/tempest +++ b/lib/tempest @@ -119,7 +119,9 @@ function set_tempest_venv_constraints { local tmp_c tmp_c=$1 if [[ $TEMPEST_VENV_UPPER_CONSTRAINTS == "master" ]]; then - (cd $REQUIREMENTS_DIR && git show master:upper-constraints.txt) > $tmp_c + (cd $REQUIREMENTS_DIR && + git show master:upper-constraints.txt 2>/dev/null || + git show origin/master:upper-constraints.txt) > $tmp_c else echo "Using $TEMPEST_VENV_UPPER_CONSTRAINTS constraints in Tempest virtual env." cat $TEMPEST_VENV_UPPER_CONSTRAINTS > $tmp_c From 4448f243f396c502e34fda5bb148ba0224f934e7 Mon Sep 17 00:00:00 2001 From: Eduardo Santos Date: Thu, 6 Jan 2022 14:03:16 -0300 Subject: [PATCH 1531/1936] Fix public subnet creation command There was no space after the --project option in the command that creates the public subnet, thus if any option follows, the option itself will be parsed as part of the value passed to the --project option. This change just adds the missing space. Change-Id: I1e7375578342a82717222e902fcd65a4a62e33a7 --- lib/neutron_plugins/services/l3 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index ccb5398f75..a8844c475e 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -305,7 +305,7 @@ function _neutron_create_public_subnet_v4 { local admin_project_id admin_project_id=$(openstack project list | grep " admin " | get_field 1) die_if_not_set $LINENO admin_project_id "Failure retrieving project_id for admin" - local subnet_params="--ip-version 4 --project $admin_project_id" + local subnet_params="--ip-version 4 --project $admin_project_id " subnet_params+="${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} " if [[ -n "$PUBLIC_NETWORK_GATEWAY" ]]; then subnet_params+="--gateway $PUBLIC_NETWORK_GATEWAY " From ac958698d0e29cc0bc4bbad4476fc7bc01ed870d Mon Sep 17 00:00:00 2001 From: Ade Lee Date: Wed, 5 Jan 2022 16:23:46 -0500 Subject: [PATCH 1532/1936] Only set chap algorithms if not openeuler For some reason, setting the CHAPAlgorithms as in c3b705138 breaks OpenEuler. Making this conditional so that tests continue to pass. Change-Id: Iaa740ecfbb9173dd97e90485bad88225caedb523 --- lib/nova | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/nova b/lib/nova index 5a12da6731..4f98d4d52c 100644 --- a/lib/nova +++ b/lib/nova @@ -320,8 +320,12 @@ EOF fi # set chap algorithms. The default chap_algorithm is md5 which will - # not work under FIPS - iniset -sudo /etc/iscsi/iscsid.conf DEFAULT "node.session.auth.chap_algs" "SHA3-256,SHA256" + # not work under FIPS. + # FIXME(alee) For some reason, this breaks openeuler. Openeuler devs should weigh in + # and determine the correct solution for openeuler here + if ! is_openeuler; then + iniset -sudo /etc/iscsi/iscsid.conf DEFAULT "node.session.auth.chap_algs" "SHA3-256,SHA256" + fi # ensure that iscsid is started, even when disabled by default restart_service iscsid From 807330ac370e8d0130cea2a99363cd3299422837 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Fri, 7 Jan 2022 11:40:54 +0100 Subject: [PATCH 1533/1936] Fix cloning requirements when GIT_DEPTH is set We always need the master branch of requirements in order to be able to install tempest with it, so override GIT_DEPTH when cloning that repo. Closes-Bug: 1956616 Change-Id: Id0b409bfadd73f2c30314724178d6e199121050b Signed-off-by: Dr. Jens Harbott --- stack.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 0659212ab3..c92cc79b40 100755 --- a/stack.sh +++ b/stack.sh @@ -765,7 +765,9 @@ save_stackenv $LINENO # Bring down global requirements before any use of pip_install. This is # necessary to ensure that the constraints file is in place before we # attempt to apply any constraints to pip installs. -git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH +# We always need the master branch in addition to any stable branch, so +# override GIT_DEPTH here. +GIT_DEPTH=0 git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH # Install package requirements # Source it so the entire environment is available From cc6e20b24d22475720f7b938aa08edf9ee7514fb Mon Sep 17 00:00:00 2001 From: Carlos Camacho Date: Fri, 7 Jan 2022 15:30:56 +0100 Subject: [PATCH 1534/1936] Allow skip the database server installation This patch allows to skip the installation of the database backend packages (MySQL or Postgres) with the introduction of the INSTALL_DATABASE_SERVER_PACKAGES variable (defaulted to True). This is useful in such environments that do not require to install the MySQL/Postgres server packages directly but using a container serving that purpose, for those cases all the remaining steps should be executed just skipping the packages install. Change-Id: I26628a31fdda3ce95ed04a2b7ae7b132c288581f --- lib/databases/mysql | 27 +++++++++++++++------------ lib/databases/postgresql | 20 +++++++++++--------- 2 files changed, 26 insertions(+), 21 deletions(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index 8edbf8c4a4..30e4b7c496 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -12,6 +12,7 @@ _XTRACE_DB_MYSQL=$(set +o | grep xtrace) set +o xtrace MYSQL_DRIVER=${MYSQL_DRIVER:-PyMySQL} +INSTALL_DATABASE_SERVER_PACKAGES=$(trueorfalse True INSTALL_DATABASE_SERVER_PACKAGES) register_database mysql @@ -175,18 +176,20 @@ EOF chmod 0600 $HOME/.my.cnf fi # Install mysql-server - if is_oraclelinux; then - install_package mysql-community-server - elif is_fedora; then - install_package mariadb-server mariadb-devel mariadb - sudo systemctl enable $MYSQL_SERVICE_NAME - elif is_suse; then - install_package mariadb-server - sudo systemctl enable $MYSQL_SERVICE_NAME - elif is_ubuntu; then - install_package $MYSQL_SERVICE_NAME-server - else - exit_distro_not_supported "mysql installation" + if [[ "$INSTALL_DATABASE_SERVER_PACKAGES" == "True" ]]; then + if is_oraclelinux; then + install_package mysql-community-server + elif is_fedora; then + install_package mariadb-server mariadb-devel mariadb + sudo systemctl enable $MYSQL_SERVICE_NAME + elif is_suse; then + install_package mariadb-server + sudo systemctl enable $MYSQL_SERVICE_NAME + elif is_ubuntu; then + install_package $MYSQL_SERVICE_NAME-server + else + exit_distro_not_supported "mysql installation" + fi fi } diff --git a/lib/databases/postgresql b/lib/databases/postgresql index 1f347f5548..4f0a5a0a4c 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -13,7 +13,7 @@ set +o xtrace MAX_DB_CONNECTIONS=${MAX_DB_CONNECTIONS:-200} - +INSTALL_DATABASE_SERVER_PACKAGES=$(trueorfalse True INSTALL_DATABASE_SERVER_PACKAGES) register_database postgresql @@ -104,15 +104,17 @@ EOF else sed -i "s/:root:\w\+/:root:$DATABASE_PASSWORD/" $pgpass fi - if is_ubuntu; then - install_package postgresql - elif is_fedora || is_suse; then - install_package postgresql-server - if is_fedora; then - sudo systemctl enable postgresql + if [[ "$INSTALL_DATABASE_SERVER_PACKAGES" == "True" ]]; then + if is_ubuntu; then + install_package postgresql + elif is_fedora || is_suse; then + install_package postgresql-server + if is_fedora; then + sudo systemctl enable postgresql + fi + else + exit_distro_not_supported "postgresql installation" fi - else - exit_distro_not_supported "postgresql installation" fi } From c994dc4de23620f74b750932e060306a27193add Mon Sep 17 00:00:00 2001 From: elajkat Date: Thu, 6 Jan 2022 11:28:55 +0100 Subject: [PATCH 1535/1936] Deprecate lib/neutron lib/neutron-legacy was recently undeprecated (see [0]), Openstack CI uses neutron-legacy and latest work was done in it also. To avoid double maintenance lib/neutron can be deprecated. For latest discussion see [1] and [2]. [0]: https://review.opendev.org/c/openstack/devstack/+/704829 [1]: https://meetings.opendev.org/meetings/networking/2022/networking.2022-01-04-14.04.log.html#l-52 [2]: https://meetings.opendev.org/irclogs/%23openstack-qa/%23openstack-qa.2022-01-05.log.html#t2022-01-05T15:57:37 Related-Bug: #1955765 Change-Id: I3fc328b7f47ccd7c1a97cceeea98fb2fbd609017 --- lib/neutron | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/lib/neutron b/lib/neutron index 15d548e33d..e7719d4ebc 100644 --- a/lib/neutron +++ b/lib/neutron @@ -146,6 +146,7 @@ fi # cleanup_neutron() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_neutron_new { + deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" source $TOP_DIR/lib/neutron_plugins/${NEUTRON_AGENT}_agent if is_neutron_ovs_base_plugin; then neutron_ovs_base_cleanup @@ -169,6 +170,7 @@ function configure_root_helper_options { # configure_neutron() - Set config files, create data dirs, etc function configure_neutron_new { + deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh) @@ -359,6 +361,7 @@ function configure_neutron_rootwrap { # Takes a single optional argument which is the config file to update, # if not passed $NOVA_CONF is used. function configure_neutron_nova_new { + deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" local conf=${1:-$NOVA_CONF} iniset $conf neutron auth_type "password" iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI" @@ -385,6 +388,7 @@ function configure_neutron_nova_new { # create_neutron_accounts() - Create required service accounts function create_neutron_accounts_new { + deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" local neutron_url if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then @@ -408,6 +412,7 @@ function create_neutron_accounts_new { # init_neutron() - Initialize databases, etc. function init_neutron_new { + deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" recreate_database neutron time_start "dbsync" @@ -418,6 +423,7 @@ function init_neutron_new { # install_neutron() - Collect source and prepare function install_neutron_new { + deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH setup_develop $NEUTRON_DIR @@ -491,6 +497,7 @@ function start_neutron_api { # start_neutron() - Start running processes function start_neutron_new { + deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" # Start up the neutron agents if enabled # TODO(sc68cal) Make this pluggable so different DevStack plugins for different Neutron plugins # can resolve the $NEUTRON_AGENT_BINARY @@ -528,6 +535,7 @@ function start_neutron_new { # stop_neutron() - Stop running processes function stop_neutron_new { + deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" for serv in neutron-api neutron-agent neutron-l3; do stop_process $serv done @@ -550,6 +558,7 @@ function stop_neutron_new { # neutron_service_plugin_class_add() - add service plugin class function neutron_service_plugin_class_add_new { + deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" local service_plugin_class=$1 local plugins="" @@ -574,11 +583,13 @@ function _neutron_ml2_extension_driver_add { } function neutron_server_config_add_new { + deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" _NEUTRON_SERVER_EXTRA_CONF_FILES_ABS+=($1) } # neutron_deploy_rootwrap_filters() - deploy rootwrap filters function neutron_deploy_rootwrap_filters_new { + deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" local srcdir=$1 sudo install -d -o root -g root -m 755 $NEUTRON_CONF_DIR/rootwrap.d sudo install -o root -g root -m 644 $srcdir/etc/neutron/rootwrap.d/*.filters $NEUTRON_CONF_DIR/rootwrap.d From d5d0bed479497560489983ae1fc80444b44fe029 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Mon, 17 Jan 2022 12:04:16 +0530 Subject: [PATCH 1536/1936] Workaround CentOS 8-stream bug until fixed Recent iputils release in CentOS 8-stream causing ping failures with non root user. This needs a fix in systemd package as mentioned in the Related Bugs, until it's fixed and is in 8-stream mirrors let's workaround it by setting net.ipv4.ping_group_range setting manually. Related-Bug: #1957941 Related-Bug: rhbz#2037807 Change-Id: I0d8dac910647968b625020c2a94e626ba5255058 --- tools/fixup_stuff.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 750849db68..f24ac40ad5 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -83,6 +83,11 @@ function fixup_fedora { if is_package_installed python3-setuptools; then sudo dnf reinstall -y python3-setuptools fi + # Workaround CentOS 8-stream iputils and systemd Bug + # https://bugzilla.redhat.com/show_bug.cgi?id=2037807 + if [[ $os_VENDOR == "CentOSStream" && $os_RELEASE -eq 8 ]]; then + sudo sysctl -w net.ipv4.ping_group_range='0 2147483647' + fi } function fixup_suse { From 0a31630323cc172561d6544c8bee50004538cfb2 Mon Sep 17 00:00:00 2001 From: Pierre Riteau Date: Fri, 21 Jan 2022 10:07:07 +0100 Subject: [PATCH 1537/1936] Adapt compute node local.conf to OVN The default Neutron configuration is now using OVN, but the multinode lab was using an incompatible configuration: The q-agt/neutron-agt service must be disabled with OVN. Change-Id: I518a739a3daac941880463cde6b47951331d0911 --- doc/source/guides/multinode-lab.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index c0b3f58157..f62e7a969c 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -169,7 +169,7 @@ machines, create a ``local.conf`` with: MYSQL_HOST=$SERVICE_HOST RABBIT_HOST=$SERVICE_HOST GLANCE_HOSTPORT=$SERVICE_HOST:9292 - ENABLED_SERVICES=n-cpu,q-agt,c-vol,placement-client + ENABLED_SERVICES=n-cpu,c-vol,placement-client,ovn-controller,ovs-vswitchd,ovsdb-server,q-ovn-metadata-agent NOVA_VNC_ENABLED=True NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_lite.html" VNCSERVER_LISTEN=$HOST_IP From d6909e41af4b776e68fb133a31ff086fdaff38ff Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Sat, 22 Jan 2022 13:54:12 +0100 Subject: [PATCH 1538/1936] Use distro pip on Ubuntu Running get-pip.py fails on Ubuntu when running twice, e.g. after a unstack/stack cycle. Just use distro pip instead. Closes-Bug: #1957048 Signed-off-by: Dr. Jens Harbott Change-Id: I87a8d53ed8860dd017a6c826dee6b6f4baef3c96 --- tools/install_pip.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 259375a150..5d73a1f0d8 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -129,6 +129,8 @@ if is_fedora && [[ ${DISTRO} == f* || ${DISTRO} == rhel9 ]]; then # For general sanity, we just use the packaged pip. It should be # recent enough anyway. This is included via rpms/general : # Simply fall through +elif is_ubuntu; then + : # pip on Ubuntu 20.04 is new enough, too else install_get_pip fi From ae40825df618a6b8164be8345f5cc1b11a2dc614 Mon Sep 17 00:00:00 2001 From: Grzegorz Grasza Date: Tue, 26 Oct 2021 10:37:07 +0200 Subject: [PATCH 1539/1936] Use devstack-system-admin for keystone objects creation This is needed so we can set keystone into enforcing secure RBAC. This also adjusts lib/glance, which already partially used devstack-system-admin. Change-Id: I6df8ad23a3077a8420340167a748ae23ad094962 --- functions-common | 46 +++++++++++++++++++++++----------------------- lib/glance | 6 +++--- 2 files changed, 26 insertions(+), 26 deletions(-) diff --git a/functions-common b/functions-common index 7042408f40..7a83b4bc5c 100644 --- a/functions-common +++ b/functions-common @@ -867,10 +867,10 @@ function get_or_create_domain { # Gets domain id domain_id=$( # Gets domain id - openstack domain show $1 \ + openstack --os-cloud devstack-system-admin domain show $1 \ -f value -c id 2>/dev/null || # Creates new domain - openstack domain create $1 \ + openstack --os-cloud devstack-system-admin domain create $1 \ --description "$2" \ -f value -c id ) @@ -885,7 +885,7 @@ function get_or_create_group { # Gets group id group_id=$( # Creates new group with --or-show - openstack group create $1 \ + openstack --os-cloud devstack-system-admin group create $1 \ --domain $2 --description "$desc" --or-show \ -f value -c id ) @@ -904,7 +904,7 @@ function get_or_create_user { # Gets user id user_id=$( # Creates new user with --or-show - openstack user create \ + openstack --os-cloud devstack-system-admin user create \ $1 \ --password "$2" \ --domain=$3 \ @@ -921,7 +921,7 @@ function get_or_create_project { local project_id project_id=$( # Creates new project with --or-show - openstack project create $1 \ + openstack --os-cloud devstack-system-admin project create $1 \ --domain=$2 \ --or-show -f value -c id ) @@ -934,7 +934,7 @@ function get_or_create_role { local role_id role_id=$( # Creates role with --or-show - openstack role create $1 \ + openstack --os-cloud devstack-system-admin role create $1 \ --or-show -f value -c id ) echo $role_id @@ -964,7 +964,7 @@ function get_or_add_user_project_role { domain_args=$(_get_domain_args $4 $5) # Gets user role id - user_role_id=$(openstack role assignment list \ + user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ --role $1 \ --user $2 \ --project $3 \ @@ -972,11 +972,11 @@ function get_or_add_user_project_role { | grep '^|\s[a-f0-9]\+' | get_field 1) if [[ -z "$user_role_id" ]]; then # Adds role to user and get it - openstack role add $1 \ + openstack --os-cloud devstack-system-admin role add $1 \ --user $2 \ --project $3 \ $domain_args - user_role_id=$(openstack role assignment list \ + user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ --role $1 \ --user $2 \ --project $3 \ @@ -991,17 +991,17 @@ function get_or_add_user_project_role { function get_or_add_user_domain_role { local user_role_id # Gets user role id - user_role_id=$(openstack role assignment list \ + user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ --role $1 \ --user $2 \ --domain $3 \ | grep '^|\s[a-f0-9]\+' | get_field 1) if [[ -z "$user_role_id" ]]; then # Adds role to user and get it - openstack role add $1 \ + openstack --os-cloud devstack-system-admin role add $1 \ --user $2 \ --domain $3 - user_role_id=$(openstack role assignment list \ + user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ --role $1 \ --user $2 \ --domain $3 \ @@ -1019,7 +1019,7 @@ function get_or_add_user_system_role { domain_args=$(_get_domain_args $4) # Gets user role id - user_role_id=$(openstack role assignment list \ + user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ --role $1 \ --user $2 \ --system $3 \ @@ -1027,11 +1027,11 @@ function get_or_add_user_system_role { -f value -c Role) if [[ -z "$user_role_id" ]]; then # Adds role to user and get it - openstack role add $1 \ + openstack --os-cloud devstack-system-admin role add $1 \ --user $2 \ --system $3 \ $domain_args - user_role_id=$(openstack role assignment list \ + user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ --role $1 \ --user $2 \ --system $3 \ @@ -1046,17 +1046,17 @@ function get_or_add_user_system_role { function get_or_add_group_project_role { local group_role_id # Gets group role id - group_role_id=$(openstack role assignment list \ + group_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ --role $1 \ --group $2 \ --project $3 \ -f value) if [[ -z "$group_role_id" ]]; then # Adds role to group and get it - openstack role add $1 \ + openstack --os-cloud devstack-system-admin role add $1 \ --group $2 \ --project $3 - group_role_id=$(openstack role assignment list \ + group_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ --role $1 \ --group $2 \ --project $3 \ @@ -1072,9 +1072,9 @@ function get_or_create_service { # Gets service id service_id=$( # Gets service id - openstack service show $2 -f value -c id 2>/dev/null || + openstack --os-cloud devstack-system-admin service show $2 -f value -c id 2>/dev/null || # Creates new service if not exists - openstack service create \ + openstack --os-cloud devstack-system-admin service create \ $2 \ --name $1 \ --description="$3" \ @@ -1087,14 +1087,14 @@ function get_or_create_service { # Usage: _get_or_create_endpoint_with_interface function _get_or_create_endpoint_with_interface { local endpoint_id - endpoint_id=$(openstack endpoint list \ + endpoint_id=$(openstack --os-cloud devstack-system-admin endpoint list \ --service $1 \ --interface $2 \ --region $4 \ -c ID -f value) if [[ -z "$endpoint_id" ]]; then # Creates new endpoint - endpoint_id=$(openstack endpoint create \ + endpoint_id=$(openstack --os-cloud devstack-system-admin endpoint create \ $1 $2 $3 --region $4 -f value -c id) fi @@ -1128,7 +1128,7 @@ function get_or_create_endpoint { # Get a URL from the identity service # Usage: get_endpoint_url function get_endpoint_url { - echo $(openstack endpoint list \ + echo $(openstack --os-cloud devstack-system-admin endpoint list \ --service $1 --interface $2 \ -c URL -f value) } diff --git a/lib/glance b/lib/glance index 4c2755f76f..9bba938b9d 100644 --- a/lib/glance +++ b/lib/glance @@ -311,11 +311,11 @@ function configure_glance_quotas { iniset $GLANCE_API_CONF oslo_limit auth_url $KEYSTONE_SERVICE_URI iniset $GLANCE_API_CONF oslo_limit system_scope "'all'" iniset $GLANCE_API_CONF oslo_limit endpoint_id \ - $(openstack endpoint list --service glance -f value -c ID) + $(openstack --os-cloud devstack-system-admin endpoint list --service glance -f value -c ID) # Allow the glance service user to read quotas - openstack role add --user glance --user-domain Default --system all \ - reader + openstack --os-cloud devstack-system-admin role add --user glance --user-domain Default \ + --system all reader } # configure_glance() - Set config files, create data dirs, etc From 5f5002a3781e255a16711f99cb784a28d6f27258 Mon Sep 17 00:00:00 2001 From: Grzegorz Grasza Date: Tue, 26 Oct 2021 10:50:37 +0200 Subject: [PATCH 1540/1936] Revert "Revert "Add enforce_scope setting support for keystone"" This reverts commit 26bd94b45efb63683072006e4281dd34a313d881. Reason for revert: Devstack keystone creation/setup are moved to scope tokens, so we can reintroduce the scope check enable. Change-Id: I6e1c261196dbcaf632748fb6f04e0867648b76c7 --- lib/keystone | 11 +++++++++++ lib/tempest | 9 +++++++++ 2 files changed, 20 insertions(+) diff --git a/lib/keystone b/lib/keystone index b953972dd3..a4c8a52121 100644 --- a/lib/keystone +++ b/lib/keystone @@ -124,6 +124,12 @@ KEYSTONE_ENABLE_CACHE=${KEYSTONE_ENABLE_CACHE:-True} # Whether to create a keystone admin endpoint for legacy applications KEYSTONE_ADMIN_ENDPOINT=$(trueorfalse False KEYSTONE_ADMIN_ENDPOINT) +# Flag to set the oslo_policy.enforce_scope. This is used to switch +# the Identity API policies to start checking the scope of token. By Default, +# this flag is False. +# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope +KEYSTONE_ENFORCE_SCOPE=$(trueorfalse False KEYSTONE_ENFORCE_SCOPE) + # Functions # --------- @@ -259,6 +265,11 @@ function configure_keystone { iniset $KEYSTONE_CONF security_compliance lockout_duration $KEYSTONE_LOCKOUT_DURATION iniset $KEYSTONE_CONF security_compliance unique_last_password_count $KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT fi + if [[ "$KEYSTONE_ENFORCE_SCOPE" == True ]] ; then + iniset $KEYSTONE_CONF oslo_policy enforce_scope true + iniset $KEYSTONE_CONF oslo_policy enforce_new_defaults true + iniset $KEYSTONE_CONF oslo_policy policy_file policy.yaml + fi } # create_keystone_accounts() - Sets up common required keystone accounts diff --git a/lib/tempest b/lib/tempest index adffeda371..9d5e1fce9f 100644 --- a/lib/tempest +++ b/lib/tempest @@ -608,6 +608,15 @@ function configure_tempest { fi done + # ``enforce_scope`` + # If services enable the enforce_scope for their policy + # we need to enable the same on Tempest side so that + # test can be run with scoped token. + if [[ "$KEYSTONE_ENFORCE_SCOPE" == True ]] ; then + iniset $TEMPEST_CONFIG enforce_scope keystone true + iniset $TEMPEST_CONFIG auth admin_system 'all' + iniset $TEMPEST_CONFIG auth admin_project_name '' + fi iniset $TEMPEST_CONFIG enforce_scope glance "$GLANCE_ENFORCE_SCOPE" iniset $TEMPEST_CONFIG enforce_scope cinder "$CINDER_ENFORCE_SCOPE" From be7b5bf671b4cdc082fb9b7bb73ec55cab0054dd Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Thu, 27 Jan 2022 16:04:32 +0100 Subject: [PATCH 1541/1936] Disable enforcing scopes in Neutron temporary After patch [1] was merged in Neutron, enforcing scopes there is broken. So lets disable it temporary to unblock Devstack's gate for now. [1] https://review.opendev.org/c/openstack/neutron/+/821208 Related-Bug: #1959196 Change-Id: I24da6f3897a638749d16f738329a873a5f9a291d --- .zuul.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 248a56beb9..5a58d743fe 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -619,7 +619,9 @@ # Keep enabeling the services here to run with system scope CINDER_ENFORCE_SCOPE: true GLANCE_ENFORCE_SCOPE: true - NEUTRON_ENFORCE_SCOPE: true + # TODO(slaweq): Enable enforce scopes in Neutron when bug + # https://bugs.launchpad.net/neutron/+bug/1959196 will be fixed + # NEUTRON_ENFORCE_SCOPE: true - job: name: devstack-multinode From 1fd45940f370dc3aab6a5e9492c36e735f673c8a Mon Sep 17 00:00:00 2001 From: Ade Lee Date: Tue, 25 Jan 2022 16:44:36 -0500 Subject: [PATCH 1542/1936] Add openstack-two-node-centos-8-stream This will allow multinode FIPS testing Change-Id: I82b3b8fe56275aed72e13f6d1bd9170c50e5da0d --- .zuul.yaml | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index 248a56beb9..ea7708f751 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -146,6 +146,36 @@ nodes: - compute1 +- nodeset: + name: openstack-two-node-centos-8-stream + nodes: + - name: controller + label: centos-8-stream + - name: compute1 + label: centos-8-stream + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + - nodeset: name: openstack-two-node-focal nodes: From 14a0c09001e8e2304eff4918206163cc7e6db1eb Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Fri, 28 Jan 2022 09:44:40 +0100 Subject: [PATCH 1543/1936] Fix deployment of Neutron with enforced scopes After patch [1] new RBAC policies changed in the way that SYSTEM_ADMIN user isn't anymore allowed to e.g. create resources in behalf of some projects. Now PROJECT_ADMIN needs to create such resources instead. So this patch basically reverts most of the changes which were done in [2] some time ago. It also introduces new entry in the clouds.yaml file - "devstack-admin-demo" which is "admin" user in the "demo" project as it's needed to create some resouces in the demo project now. Additionally, because of bug [3] this patch changes way how IPv6 external gateway IP is found using Neutron API. This change may be reverted in the future when bug [3] will be fixed. [1] https://review.opendev.org/c/openstack/neutron/+/821208 [2] https://review.opendev.org/c/openstack/devstack/+/797450 [3] https://bugs.launchpad.net/neutron/+bug/1959332 Depends-On: https://review.opendev.org/c/openstack/neutron/+/826828 Closes-Bug: #1959196 Change-Id: I32a6e8b9b59269a8699644b563657363425f7174 --- functions-common | 11 +++++ lib/neutron_plugins/services/l3 | 73 ++++++++++++++------------------- lib/tempest | 9 +--- 3 files changed, 44 insertions(+), 49 deletions(-) diff --git a/functions-common b/functions-common index 7042408f40..b407ca5fe6 100644 --- a/functions-common +++ b/functions-common @@ -107,6 +107,17 @@ function write_clouds_yaml { --os-password $ADMIN_PASSWORD \ --os-project-name admin + # devstack-admin-demo: user with the admin role on the demo project + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-admin-demo \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username admin \ + --os-password $ADMIN_PASSWORD \ + --os-project-name demo + # devstack-alt: user with the member role on alt_demo project $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ --file $CLOUDS_YAML \ diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index a8844c475e..cd98115746 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -100,11 +100,6 @@ SUBNETPOOL_PREFIX_V6=${SUBNETPOOL_PREFIX_V6:-$IPV6_ADDRS_SAFE_TO_USE} SUBNETPOOL_SIZE_V4=${SUBNETPOOL_SIZE_V4:-26} SUBNETPOOL_SIZE_V6=${SUBNETPOOL_SIZE_V6:-64} -NEUTRON_ADMIN_CLOUD_NAME="devstack-admin" -if [ "$NEUTRON_ENFORCE_SCOPE" == "True" ]; then - NEUTRON_ADMIN_CLOUD_NAME="devstack-system-admin" -fi - default_v4_route_devs=$(ip -4 route | grep ^default | awk '{print $5}') default_v6_route_devs=$(ip -6 route list match default table all | grep via | awk '{print $5}') @@ -156,10 +151,6 @@ function create_neutron_initial_network { project_id=$(openstack project list | grep " demo " | get_field 1) die_if_not_set $LINENO project_id "Failure retrieving project_id for demo" - local admin_project_id - admin_project_id=$(openstack project list | grep " admin " | get_field 1) - die_if_not_set $LINENO admin_project_id "Failure retrieving project_id for admin" - # Allow drivers that need to create an initial network to do so here if type -p neutron_plugin_create_initial_network_profile > /dev/null; then neutron_plugin_create_initial_network_profile $PHYSICAL_NETWORK @@ -168,10 +159,10 @@ function create_neutron_initial_network { if is_networking_extension_supported "auto-allocated-topology"; then if [[ "$USE_SUBNETPOOL" == "True" ]]; then if [[ "$IP_VERSION" =~ 4.* ]]; then - SUBNETPOOL_V4_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME_V4 --project "$admin_project_id" --default-prefix-length $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --share --default -f value -c id) + SUBNETPOOL_V4_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME_V4 --default-prefix-length $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --share --default -f value -c id) fi if [[ "$IP_VERSION" =~ .*6 ]]; then - SUBNETPOOL_V6_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME_V6 --project "$admin_project_id" --default-prefix-length $SUBNETPOOL_SIZE_V6 --pool-prefix $SUBNETPOOL_PREFIX_V6 --share --default -f value -c id) + SUBNETPOOL_V6_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME_V6 --default-prefix-length $SUBNETPOOL_SIZE_V6 --pool-prefix $SUBNETPOOL_PREFIX_V6 --share --default -f value -c id) fi fi fi @@ -179,14 +170,14 @@ function create_neutron_initial_network { if is_provider_network; then die_if_not_set $LINENO PHYSICAL_NETWORK "You must specify the PHYSICAL_NETWORK" die_if_not_set $LINENO PROVIDER_NETWORK_TYPE "You must specify the PROVIDER_NETWORK_TYPE" - NET_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --project $project_id --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share | grep ' id ' | get_field 2) + NET_ID=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share | grep ' id ' | get_field 2) die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $project_id" if [[ "$IP_VERSION" =~ 4.* ]]; then if [ -z $SUBNETPOOL_V4_ID ]; then fixed_range_v4=$FIXED_RANGE fi - SUBNET_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} | grep ' id ' | get_field 2) + SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} | grep ' id ' | get_field 2) die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME $project_id" fi @@ -196,7 +187,7 @@ function create_neutron_initial_network { if [ -z $SUBNETPOOL_V6_ID ]; then fixed_range_v6=$IPV6_PROVIDER_FIXED_RANGE fi - IPV6_SUBNET_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} | grep ' id ' | get_field 2) + IPV6_SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} | grep ' id ' | get_field 2) die_if_not_set $LINENO IPV6_SUBNET_ID "Failure creating IPV6_SUBNET_ID for $IPV6_PROVIDER_SUBNET_NAME $project_id" fi @@ -206,7 +197,7 @@ function create_neutron_initial_network { sudo ip link set $PUBLIC_INTERFACE up fi else - NET_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" network create --project $project_id "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) + NET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" network create "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PRIVATE_NETWORK_NAME $project_id" if [[ "$IP_VERSION" =~ 4.* ]]; then @@ -224,11 +215,11 @@ function create_neutron_initial_network { # Create a router, and add the private subnet as one of its interfaces if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then # create a tenant-owned router. - ROUTER_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" router create --project $project_id $Q_ROUTER_NAME | grep ' id ' | get_field 2) + ROUTER_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" router create $Q_ROUTER_NAME | grep ' id ' | get_field 2) die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $project_id $Q_ROUTER_NAME" else # Plugin only supports creating a single router, which should be admin owned. - ROUTER_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" router create $Q_ROUTER_NAME --project $admin_project_id | grep ' id ' | get_field 2) + ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create $Q_ROUTER_NAME | grep ' id ' | get_field 2) die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $Q_ROUTER_NAME" fi @@ -238,9 +229,9 @@ function create_neutron_initial_network { fi # Create an external network, and a subnet. Configure the external network as router gw if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then - EXT_NET_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type ${PUBLIC_PROVIDERNET_TYPE:-flat} ${PUBLIC_PROVIDERNET_SEGMENTATION_ID:+--provider-segment $PUBLIC_PROVIDERNET_SEGMENTATION_ID} --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} --project $admin_project_id | grep ' id ' | get_field 2) + EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type ${PUBLIC_PROVIDERNET_TYPE:-flat} ${PUBLIC_PROVIDERNET_SEGMENTATION_ID:+--provider-segment $PUBLIC_PROVIDERNET_SEGMENTATION_ID} --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} | grep ' id ' | get_field 2) else - EXT_NET_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --project $admin_project_id | grep ' id ' | get_field 2) + EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS | grep ' id ' | get_field 2) fi die_if_not_set $LINENO EXT_NET_ID "Failure creating EXT_NET_ID for $PUBLIC_NETWORK_NAME" @@ -262,8 +253,7 @@ function _neutron_create_private_subnet_v4 { if [ -z $SUBNETPOOL_V4_ID ]; then fixed_range_v4=$FIXED_RANGE fi - local subnet_params="--project $project_id " - subnet_params+="--ip-version 4 " + local subnet_params="--ip-version 4 " if [[ -n "$NETWORK_GATEWAY" ]]; then subnet_params+="--gateway $NETWORK_GATEWAY " fi @@ -272,7 +262,7 @@ function _neutron_create_private_subnet_v4 { subnet_params+="${fixed_range_v4:+--subnet-range $fixed_range_v4} " subnet_params+="--network $NET_ID $PRIVATE_SUBNET_NAME" local subnet_id - subnet_id=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2) + subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2) die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet for $project_id" echo $subnet_id } @@ -286,8 +276,7 @@ function _neutron_create_private_subnet_v6 { if [ -z $SUBNETPOOL_V6_ID ]; then fixed_range_v6=$FIXED_RANGE_V6 fi - local subnet_params="--project $project_id " - subnet_params+="--ip-version 6 " + local subnet_params="--ip-version 6 " if [[ -n "$IPV6_PRIVATE_NETWORK_GATEWAY" ]]; then subnet_params+="--gateway $IPV6_PRIVATE_NETWORK_GATEWAY " fi @@ -295,17 +284,14 @@ function _neutron_create_private_subnet_v6 { subnet_params+="${fixed_range_v6:+--subnet-range $fixed_range_v6} " subnet_params+="$ipv6_modes --network $NET_ID $IPV6_PRIVATE_SUBNET_NAME " local ipv6_subnet_id - ipv6_subnet_id=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2) + ipv6_subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2) die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet for $project_id" echo $ipv6_subnet_id } # Create public IPv4 subnet function _neutron_create_public_subnet_v4 { - local admin_project_id - admin_project_id=$(openstack project list | grep " admin " | get_field 1) - die_if_not_set $LINENO admin_project_id "Failure retrieving project_id for admin" - local subnet_params="--ip-version 4 --project $admin_project_id " + local subnet_params="--ip-version 4 " subnet_params+="${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} " if [[ -n "$PUBLIC_NETWORK_GATEWAY" ]]; then subnet_params+="--gateway $PUBLIC_NETWORK_GATEWAY " @@ -313,29 +299,26 @@ function _neutron_create_public_subnet_v4 { subnet_params+="--network $EXT_NET_ID --subnet-range $FLOATING_RANGE --no-dhcp " subnet_params+="$PUBLIC_SUBNET_NAME" local id_and_ext_gw_ip - id_and_ext_gw_ip=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ') + id_and_ext_gw_ip=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ') die_if_not_set $LINENO id_and_ext_gw_ip "Failure creating public IPv4 subnet" echo $id_and_ext_gw_ip } # Create public IPv6 subnet function _neutron_create_public_subnet_v6 { - local admin_project_id - admin_project_id=$(openstack project list | grep " admin " | get_field 1) - die_if_not_set $LINENO admin_project_id "Failure retrieving project_id for admin" - local subnet_params="--ip-version 6 --project $admin_project_id " + local subnet_params="--ip-version 6 " subnet_params+="--gateway $IPV6_PUBLIC_NETWORK_GATEWAY " subnet_params+="--network $EXT_NET_ID --subnet-range $IPV6_PUBLIC_RANGE --no-dhcp " subnet_params+="$IPV6_PUBLIC_SUBNET_NAME" local ipv6_id_and_ext_gw_ip - ipv6_id_and_ext_gw_ip=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ') + ipv6_id_and_ext_gw_ip=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ') die_if_not_set $LINENO ipv6_id_and_ext_gw_ip "Failure creating an IPv6 public subnet" echo $ipv6_id_and_ext_gw_ip } # Configure neutron router for IPv4 public access function _neutron_configure_router_v4 { - openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" router add subnet $ROUTER_ID $SUBNET_ID + openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router add subnet $ROUTER_ID $SUBNET_ID # Create a public subnet on the external network local id_and_ext_gw_ip id_and_ext_gw_ip=$(_neutron_create_public_subnet_v4 $EXT_NET_ID) @@ -343,7 +326,7 @@ function _neutron_configure_router_v4 { ext_gw_ip=$(echo $id_and_ext_gw_ip | get_field 2) PUB_SUBNET_ID=$(echo $id_and_ext_gw_ip | get_field 5) # Configure the external network as the default router gateway - openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID + openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID # This logic is specific to using OVN or the l3-agent for layer 3 if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-server) || is_service_enabled q-l3 neutron-l3; then @@ -370,7 +353,7 @@ function _neutron_configure_router_v4 { sudo ip addr add $ext_gw_ip/$cidr_len dev $ext_gw_interface sudo ip link set $ext_gw_interface up fi - ROUTER_GW_IP=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' --device-owner network:router_gateway | awk -F'ip_address' '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ') + ROUTER_GW_IP=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' --device-owner network:router_gateway | awk -F'ip_address' '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ') die_if_not_set $LINENO ROUTER_GW_IP "Failure retrieving ROUTER_GW_IP" fi _neutron_set_router_id @@ -379,7 +362,7 @@ function _neutron_configure_router_v4 { # Configure neutron router for IPv6 public access function _neutron_configure_router_v6 { - openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" router add subnet $ROUTER_ID $IPV6_SUBNET_ID + openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router add subnet $ROUTER_ID $IPV6_SUBNET_ID # Create a public subnet on the external network local ipv6_id_and_ext_gw_ip ipv6_id_and_ext_gw_ip=$(_neutron_create_public_subnet_v6 $EXT_NET_ID) @@ -391,7 +374,7 @@ function _neutron_configure_router_v6 { # If the external network has not already been set as the default router # gateway when configuring an IPv4 public subnet, do so now if [[ "$IP_VERSION" == "6" ]]; then - openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID + openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID fi # This logic is specific to using OVN or the l3-agent for layer 3 @@ -412,7 +395,13 @@ function _neutron_configure_router_v6 { sudo sysctl -w net.ipv6.conf.all.forwarding=1 # Configure and enable public bridge # Override global IPV6_ROUTER_GW_IP with the true value from neutron - IPV6_ROUTER_GW_IP=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' | grep $ipv6_pub_subnet_id | awk -F'ip_address' '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ') + # NOTE(slaweq): when enforce scopes is enabled in Neutron, router's + # gateway ports aren't visible in API because such ports don't belongs + # to any tenant. Because of that, at least temporary we need to find + # IPv6 address of the router's gateway in a bit different way. + # It can be reverted when bug + # https://bugs.launchpad.net/neutron/+bug/1959332 will be fixed + IPV6_ROUTER_GW_IP=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router show $ROUTER_ID -c external_gateway_info -f json | grep -C 1 $ipv6_pub_subnet_id | grep ip_address | awk '{print $2}' | tr -d '"') die_if_not_set $LINENO IPV6_ROUTER_GW_IP "Failure retrieving IPV6_ROUTER_GW_IP" if is_neutron_ovs_base_plugin; then @@ -440,7 +429,7 @@ function _neutron_configure_router_v6 { function is_networking_extension_supported { local extension=$1 # TODO(sc68cal) cache this instead of calling every time - EXT_LIST=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" extension list --network -c Alias -f value) + EXT_LIST=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" extension list --network -c Alias -f value) [[ $EXT_LIST =~ $extension ]] && return 0 } diff --git a/lib/tempest b/lib/tempest index adffeda371..9ca06acabf 100644 --- a/lib/tempest +++ b/lib/tempest @@ -90,11 +90,6 @@ TEMPEST_USE_TEST_ACCOUNTS=$(trueorfalse False TEMPEST_USE_TEST_ACCOUNTS) # it will run tempest with TEMPEST_CONCURRENCY=${TEMPEST_CONCURRENCY:-$(nproc)} -NEUTRON_ADMIN_CLOUD_NAME="devstack-admin" -if [ "$NEUTRON_ENFORCE_SCOPE" == "True" ]; then - NEUTRON_ADMIN_CLOUD_NAME="devstack-system-admin" -fi - # Functions # --------- @@ -293,8 +288,8 @@ function configure_tempest { if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]] && is_networking_extension_supported 'external-net'; then public_network_id=$(openstack --os-cloud devstack-admin network show -f value -c id $PUBLIC_NETWORK_NAME) # make sure shared network presence does not confuses the tempest tests - openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" network create --share shared --project "$admin_project_id" - openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create --description shared-subnet --subnet-range ${TEMPEST_SHARED_POOL:-192.168.233.0/24} --network shared shared-subnet --project "$admin_project_id" + openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create --share shared + openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --description shared-subnet --subnet-range ${TEMPEST_SHARED_POOL:-192.168.233.0/24} --network shared shared-subnet fi iniset $TEMPEST_CONFIG DEFAULT use_syslog $SYSLOG From 081c9b716fc742ffc12263e46ae499d7a1f65a7e Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Fri, 28 Jan 2022 09:52:28 +0100 Subject: [PATCH 1544/1936] Revert "Disable enforcing scopes in Neutron temporary" This reverts commit be7b5bf671b4cdc082fb9b7bb73ec55cab0054dd. As related bug is fixed, lets enabled scope enforcement in Neutron again. Related-bug: #1959196 Change-Id: I72db7ef533e78a10734d105e6a0debef288e41a1 --- .zuul.yaml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 5a58d743fe..248a56beb9 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -619,9 +619,7 @@ # Keep enabeling the services here to run with system scope CINDER_ENFORCE_SCOPE: true GLANCE_ENFORCE_SCOPE: true - # TODO(slaweq): Enable enforce scopes in Neutron when bug - # https://bugs.launchpad.net/neutron/+bug/1959196 will be fixed - # NEUTRON_ENFORCE_SCOPE: true + NEUTRON_ENFORCE_SCOPE: true - job: name: devstack-multinode From 099a048fb933649606e58310e8e705e7c7e29cd7 Mon Sep 17 00:00:00 2001 From: melanie witt Date: Thu, 6 May 2021 00:09:33 +0000 Subject: [PATCH 1545/1936] Configure nova unified limits quotas This enables the configuration of nova to use unified limits in keystone and enforcement in oslo.limit. Related to blueprint unified-limits-nova Depends-On: https://review.opendev.org/c/openstack/nova/+/715271 Change-Id: Ifdef3510bc7da3098a71739814e35dbaf612ae34 --- lib/nova | 66 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/lib/nova b/lib/nova index 930529a433..9039c6b454 100644 --- a/lib/nova +++ b/lib/nova @@ -159,6 +159,9 @@ NOVA_NOTIFICATION_FORMAT=${NOVA_NOTIFICATION_FORMAT:-unversioned} # image in devstack is CirrOS. NOVA_SHUTDOWN_TIMEOUT=${NOVA_SHUTDOWN_TIMEOUT:-0} +# Whether to use Keystone unified limits instead of legacy quota limits. +NOVA_USE_UNIFIED_LIMITS=$(trueorfalse False NOVA_USE_UNIFIED_LIMITS) + # Functions # --------- @@ -384,6 +387,13 @@ function create_nova_accounts { "http://$SERVICE_HOST:$S3_SERVICE_PORT" \ "http://$SERVICE_HOST:$S3_SERVICE_PORT" fi + + # Unified limits + if is_service_enabled n-api; then + if [[ "$NOVA_USE_UNIFIED_LIMITS" = True ]]; then + configure_nova_unified_limits + fi + fi } # create_nova_conf() - Create a new nova.conf file @@ -719,6 +729,62 @@ function configure_console_proxies { fi } +function configure_nova_unified_limits { + # Default limits. Mirror the config-based default values. + # Note: disk quota is new in nova as of unified limits. + bash -c "unset OS_USERNAME OS_TENANT_NAME OS_PROJECT_NAME; + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 10 --region $REGION_NAME \ + servers; \ + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 20 --region $REGION_NAME \ + class:VCPU; \ + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit $((50 * 1024)) --region $REGION_NAME \ + class:MEMORY_MB; \ + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 20 --region $REGION_NAME \ + class:DISK_GB; \ + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 128 --region $REGION_NAME \ + server_metadata_items; \ + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 5 --region $REGION_NAME \ + server_injected_files; \ + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 10240 --region $REGION_NAME \ + server_injected_file_content_bytes; \ + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 255 --region $REGION_NAME \ + server_injected_file_path_bytes; \ + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 100 --region $REGION_NAME \ + server_key_pairs; \ + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 10 --region $REGION_NAME \ + server_groups; \ + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 10 --region $REGION_NAME \ + server_group_members" + + # Tell nova to use these limits + iniset $NOVA_CONF quota driver "nova.quota.UnifiedLimitsDriver" + + # Configure oslo_limit so it can talk to keystone + iniset $NOVA_CONF oslo_limit user_domain_name $SERVICE_DOMAIN_NAME + iniset $NOVA_CONF oslo_limit password $SERVICE_PASSWORD + iniset $NOVA_CONF oslo_limit username nova + iniset $NOVA_CONF oslo_limit auth_type password + iniset $NOVA_CONF oslo_limit auth_url $KEYSTONE_SERVICE_URI + iniset $NOVA_CONF oslo_limit system_scope "'all'" + iniset $NOVA_CONF oslo_limit endpoint_id \ + $(openstack endpoint list --service nova -f value -c ID) + + # Allow the nova service user to read quotas + openstack role add --user nova --user-domain Default --system all \ + reader +} + function init_nova_service_user_conf { iniset $NOVA_CONF service_user send_service_user_token True iniset $NOVA_CONF service_user auth_type password From a756f4b9681d429f2612164eb01d57c800ff2d2a Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Mon, 31 Jan 2022 16:38:31 +0000 Subject: [PATCH 1546/1936] Add python3.6 pip support Since pip v22, python3.6 is not supported (the minimum version is python3.7). This patch adds the reference for the pip3.6 URL to be used instead of the default one. Closes-Bug: #1959600 Change-Id: Iab2c391d5388461fe9e9037cee81884ce8032e72 --- tools/install_pip.sh | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 5d73a1f0d8..e9c52eacb7 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -38,7 +38,7 @@ FILES=$TOP_DIR/files # [1] https://opendev.org/openstack/project-config/src/branch/master/nodepool/elements/cache-devstack/source-repository-pip PIP_GET_PIP_URL=${PIP_GET_PIP_URL:-"https://bootstrap.pypa.io/get-pip.py"} -LOCAL_PIP="$FILES/$(basename $PIP_GET_PIP_URL)" +PIP_GET_PIP36_URL=${PIP_GET_PIP36_URL:-"https://bootstrap.pypa.io/pip/3.6/get-pip.py"} GetDistro echo "Distro: $DISTRO" @@ -57,12 +57,21 @@ function get_versions { function install_get_pip { + if [[ "$PYTHON3_VERSION" = "3.6" ]]; then + _pip_url=$PIP_GET_PIP36_URL + _local_pip="$FILES/$(basename $_pip_url)-py36" + else + _pip_url=$PIP_GET_PIP_URL + _local_pip="$FILES/$(basename $_pip_url)" + fi + + # If get-pip.py isn't python, delete it. This was probably an # outage on the server. - if [[ -r $LOCAL_PIP ]]; then - if ! head -1 $LOCAL_PIP | grep -q '#!/usr/bin/env python'; then - echo "WARNING: Corrupt $LOCAL_PIP found removing" - rm $LOCAL_PIP + if [[ -r $_local_pip ]]; then + if ! head -1 $_local_pip | grep -q '#!/usr/bin/env python'; then + echo "WARNING: Corrupt $_local_pip found removing" + rm $_local_pip fi fi @@ -76,20 +85,20 @@ function install_get_pip { # Thus we use curl's "-z" feature to always check the modified # since and only download if a new version is out -- but only if # it seems we downloaded the file originally. - if [[ ! -r $LOCAL_PIP || -r $LOCAL_PIP.downloaded ]]; then + if [[ ! -r $_local_pip || -r $_local_pip.downloaded ]]; then # only test freshness if LOCAL_PIP is actually there, # otherwise we generate a scary warning. local timecond="" - if [[ -r $LOCAL_PIP ]]; then - timecond="-z $LOCAL_PIP" + if [[ -r $_local_pip ]]; then + timecond="-z $_local_pip" fi curl -f --retry 6 --retry-delay 5 \ - $timecond -o $LOCAL_PIP $PIP_GET_PIP_URL || \ + $timecond -o $_local_pip $_pip_url || \ die $LINENO "Download of get-pip.py failed" - touch $LOCAL_PIP.downloaded + touch $_local_pip.downloaded fi - sudo -H -E python${PYTHON3_VERSION} $LOCAL_PIP + sudo -H -E python${PYTHON3_VERSION} $_local_pip } From 85c7d8db4eef2e367a7466a39b4f1fba7a983eef Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Fri, 4 Feb 2022 08:29:32 +0000 Subject: [PATCH 1547/1936] revert stackrc execute permissions This change reverts the execute permissions from stackrc which is not meant to be run as a script but sourced as part of stack.sh Change-Id: I9a05051e5a297cfaf78d097fa5f90a7c5fd254a6 --- stackrc | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 stackrc diff --git a/stackrc b/stackrc old mode 100755 new mode 100644 From 343e35162798af4c0399f2f7c0a733c568782686 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 3 Feb 2022 11:19:08 +1100 Subject: [PATCH 1548/1936] Bump fedora-latest to F35 Generally this is straight forward, but Horizon has a dependency issue with pyScss (described in [1]) so it is disabled, for now. [1] https://bugs.launchpad.net/horizon/+bug/1960204 Co-Authored-By: Dr. Jens Harbott Depends-On: https://review.opendev.org/c/openstack/devstack/+/827578 Change-Id: I7c4bf0945f9ac5bd563fe0a698c09b8571c97c5e --- .zuul.yaml | 6 +++++- files/rpms/swift | 2 +- lib/apache | 7 ++++++- stack.sh | 2 +- 4 files changed, 13 insertions(+), 4 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 5a58d743fe..3278eeb9e0 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -90,7 +90,7 @@ name: devstack-single-node-fedora-latest nodes: - name: controller - label: fedora-34 + label: fedora-35 groups: - name: tempest nodes: @@ -729,6 +729,10 @@ voting: false vars: configure_swap_size: 4096 + # Python 3.10 dependency issues; see + # https://bugs.launchpad.net/horizon/+bug/1960204 + devstack_services: + horizon: false - job: name: devstack-platform-fedora-latest-virt-preview diff --git a/files/rpms/swift b/files/rpms/swift index faf0a3175a..a838d7839e 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -4,4 +4,4 @@ memcached rsync-daemon sqlite xfsprogs -xinetd # not:f34,rhel9 +xinetd # not:f35,rhel9 diff --git a/lib/apache b/lib/apache index cbe61adf34..f29c7ea2cb 100644 --- a/lib/apache +++ b/lib/apache @@ -85,7 +85,12 @@ function install_apache_uwsgi { if is_ubuntu; then local pkg_list="uwsgi uwsgi-plugin-python3 libapache2-mod-proxy-uwsgi" install_package ${pkg_list} - elif is_fedora && ! is_openeuler; then + # NOTE(ianw) 2022-02-03 : Fedora 35 needs to skip this and fall + # into the install-from-source because the upstream packages + # didn't fix Python 3.10 compatibility before release. Should be + # fixed in uwsgi 4.9.0; can remove this when packages available + # or we drop this release + elif is_fedora && ! is_openeuler && ! [[ $DISTRO =~ f35 ]]; then # Note httpd comes with mod_proxy_uwsgi and it is loaded by # default; the mod_proxy_uwsgi package actually conflicts now. # See: diff --git a/stack.sh b/stack.sh index c92cc79b40..0082b99f11 100755 --- a/stack.sh +++ b/stack.sh @@ -227,7 +227,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="bullseye|focal|f34|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9|openEuler-20.03" +SUPPORTED_DISTROS="bullseye|focal|f35|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9|openEuler-20.03" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" From accd99e7cddabb60633bf7e8f8832c961a2805f4 Mon Sep 17 00:00:00 2001 From: Jakob Meng Date: Tue, 8 Feb 2022 11:05:14 +0100 Subject: [PATCH 1549/1936] Made LVM backing disk persistent Previously, loop devices for LVM volume groups backing files were not created after reboots, causing e.g. Cinder to fail with messages such as ERROR cinder.service [-] Manager for service cinder-volume devstack@lvmdriver-1 is reporting problems, not sending heartbeat. Service will appear "down". Now, we use systemd services to manage loop devices for backing files. Change-Id: I27ec027834966e44aa9a99999358f5b4debc43e0 --- files/lvm-backing-file.template | 16 +++++++++++ lib/lvm | 49 ++++++++++++++++++--------------- 2 files changed, 43 insertions(+), 22 deletions(-) create mode 100644 files/lvm-backing-file.template diff --git a/files/lvm-backing-file.template b/files/lvm-backing-file.template new file mode 100644 index 0000000000..dc519d7745 --- /dev/null +++ b/files/lvm-backing-file.template @@ -0,0 +1,16 @@ +[Unit] +Description=Activate LVM backing file %BACKING_FILE% +DefaultDependencies=no +After=systemd-udev-settle.service +Before=lvm2-activation-early.service +Wants=systemd-udev-settle.service + +[Service] +ExecStart=/sbin/losetup --find --show %DIRECTIO% %BACKING_FILE% +ExecStop=/bin/sh -c '/sbin/losetup -d $$(/sbin/losetup --associated %BACKING_FILE% -O NAME -n)' +RemainAfterExit=yes +Type=oneshot + +[Install] +WantedBy=local-fs.target +Also=systemd-udev-settle.service diff --git a/lib/lvm b/lib/lvm index b826c1bc63..d3f6bf1792 100644 --- a/lib/lvm +++ b/lib/lvm @@ -53,28 +53,10 @@ function _remove_lvm_volume_group { sudo vgremove -f $vg } -# _clean_lvm_backing_file() removes the backing file of the -# volume group -# -# Usage: _clean_lvm_backing_file() $backing_file -function _clean_lvm_backing_file { - local backing_file=$1 - - # If the backing physical device is a loop device, it was probably setup by DevStack - if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then - local vg_dev - vg_dev=$(sudo losetup -j $backing_file | awk -F':' '/'$BACKING_FILE_SUFFIX'/ { print $1}') - if [[ -n "$vg_dev" ]]; then - sudo losetup -d $vg_dev - fi - rm -f $backing_file - fi -} - # clean_lvm_volume_group() cleans up the volume group and removes the # backing file # -# Usage: clean_lvm_volume_group $vg +# Usage: clean_lvm_volume_group() $vg function clean_lvm_volume_group { local vg=$1 @@ -83,11 +65,22 @@ function clean_lvm_volume_group { # if there is no logical volume left, it's safe to attempt a cleanup # of the backing file if [[ -z "$(sudo lvs --noheadings -o lv_name $vg 2>/dev/null)" ]]; then - _clean_lvm_backing_file $DATA_DIR/$vg$BACKING_FILE_SUFFIX + local backing_file=$DATA_DIR/$vg$BACKING_FILE_SUFFIX + + if [[ -n "$vg$BACKING_FILE_SUFFIX" ]] && \ + [[ -e "/etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service" ]]; then + sudo systemctl disable --now $vg$BACKING_FILE_SUFFIX.service + sudo rm -f /etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service + sudo systemctl daemon-reload + fi + + # If the backing physical device is a loop device, it was probably setup by DevStack + if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then + rm -f $backing_file + fi fi } - # _create_lvm_volume_group creates default volume group # # Usage: _create_lvm_volume_group() $vg $size @@ -106,8 +99,20 @@ function _create_lvm_volume_group { directio="--direct-io=on" fi + # Only create systemd service if it doesn't already exists + if [[ ! -e "/etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service" ]]; then + sed -e " + s|%DIRECTIO%|${directio}|g; + s|%BACKING_FILE%|${backing_file}|g; + " $FILES/lvm-backing-file.template | sudo tee \ + /etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service + + sudo systemctl daemon-reload + sudo systemctl enable --now $vg$BACKING_FILE_SUFFIX.service + fi + local vg_dev - vg_dev=$(sudo losetup -f --show $directio $backing_file) + vg_dev=$(sudo losetup --associated $backing_file -O NAME -n) # Only create volume group if it doesn't already exist if ! sudo vgs $vg; then From 8c6710326eaf8114b579720185161091a0a9f38c Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Wed, 9 Feb 2022 18:01:46 +0000 Subject: [PATCH 1550/1936] Fix installation with OVN backend and compilation This patch fixes several issues related to the installation with OVN backend with the OVS/OVN compilation enabled. The OVS/OVN local directories prefix, when both services are compiled, is now "/usr/local". The "ovn_agent._run_process" function is calling "ovs-appctl" to configure the logging settings of several services. Instead of using the service name, the ctl socket file is used instead. That is more robust and does not fail in systems with previous installations. Closes-Bug: #1960514 Change-Id: I69de5333393957593db6e05495f0c3c758efefdf --- lib/neutron_plugins/ovn_agent | 7 ++++++- lib/neutron_plugins/ovs_base | 2 +- lib/neutron_plugins/ovs_source | 4 ++-- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 09b28b6c3f..927896b70b 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -253,7 +253,12 @@ function _run_process { local testcmd="test -e $OVS_RUNDIR/$service.pid" test_with_retry "$testcmd" "$service did not start" $SERVICE_TIMEOUT 1 - sudo ovs-appctl -t $service vlog/set console:off syslog:info file:info + local service_ctl_file + service_ctl_file=$(ls $OVS_RUNDIR | grep $service | grep ctl) + if [ -z "$service_ctl_file" ]; then + die $LINENO "ctl file for service $service is not present." + fi + sudo ovs-appctl -t $OVS_RUNDIR/$service_ctl_file vlog/set console:off syslog:info file:info } function clone_repository { diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index 8acf586189..cc41a8cd46 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -68,7 +68,7 @@ function _neutron_ovs_base_install_ubuntu_dkms { function _neutron_ovs_base_install_agent_packages { if [ "$Q_BUILD_OVS_FROM_GIT" == "True" ]; then remove_ovs_packages - compile_ovs False /usr /var + compile_ovs False /usr/local /var load_conntrack_gre_module start_new_ovs else diff --git a/lib/neutron_plugins/ovs_source b/lib/neutron_plugins/ovs_source index 9c87dce551..9ae5555afb 100644 --- a/lib/neutron_plugins/ovs_source +++ b/lib/neutron_plugins/ovs_source @@ -188,12 +188,12 @@ function action_openvswitch { # start_new_ovs() - removes old ovs database, creates a new one and starts ovs function start_new_ovs { sudo rm -f /etc/openvswitch/conf.db /etc/openvswitch/.conf.db~lock~ - sudo /usr/share/openvswitch/scripts/ovs-ctl start + sudo /usr/local/share/openvswitch/scripts/ovs-ctl start } # stop_new_ovs() - stops ovs function stop_new_ovs { - local ovs_ctl='/usr/share/openvswitch/scripts/ovs-ctl' + local ovs_ctl='/usr/local/share/openvswitch/scripts/ovs-ctl' if [ -x $ovs_ctl ] ; then sudo $ovs_ctl stop From 17b1999eabab92a7820a2900853dc23d7150dbe9 Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Wed, 9 Feb 2022 22:14:24 +0000 Subject: [PATCH 1551/1936] Default CIRROS_ARCH to host arch This change use uname -m to get the portable host arch and uses that as a new default. on x86_64 hosts this should result in no visable change in behavior however on a non x86 host it will cause devstack to attempt to download a cirros image that matches the host. Change-Id: I6d1495a23400ef4cf496302028324fa5794dd45f --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 681e9dee38..e48fd81d16 100644 --- a/stackrc +++ b/stackrc @@ -663,7 +663,7 @@ esac #IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image CIRROS_VERSION=${CIRROS_VERSION:-"0.5.2"} -CIRROS_ARCH=${CIRROS_ARCH:-"x86_64"} +CIRROS_ARCH=${CIRROS_ARCH:-$(uname -m)} # Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of # which may be set in ``local.conf``. Also allow ``DEFAULT_IMAGE_NAME`` and From ef6fac7959b257bb08ff3014e9cbf9cbc6b28ec3 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 16 Feb 2022 02:16:15 +0000 Subject: [PATCH 1552/1936] Updated from generate-devstack-plugins-list Change-Id: If1b667cd4af88511cb1672645a980c9c4fc557ae --- doc/source/plugin-registry.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 3edd708d8b..6850553b52 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -75,6 +75,7 @@ openstack/networking-powervm `https://opendev.org/openstack/networki openstack/networking-sfc `https://opendev.org/openstack/networking-sfc `__ openstack/neutron `https://opendev.org/openstack/neutron `__ openstack/neutron-dynamic-routing `https://opendev.org/openstack/neutron-dynamic-routing `__ +openstack/neutron-fwaas-dashboard `https://opendev.org/openstack/neutron-fwaas-dashboard `__ openstack/neutron-tempest-plugin `https://opendev.org/openstack/neutron-tempest-plugin `__ openstack/neutron-vpnaas `https://opendev.org/openstack/neutron-vpnaas `__ openstack/neutron-vpnaas-dashboard `https://opendev.org/openstack/neutron-vpnaas-dashboard `__ @@ -92,6 +93,7 @@ openstack/sahara `https://opendev.org/openstack/sahara < openstack/sahara-dashboard `https://opendev.org/openstack/sahara-dashboard `__ openstack/senlin `https://opendev.org/openstack/senlin `__ openstack/shade `https://opendev.org/openstack/shade `__ +openstack/skyline-apiserver `https://opendev.org/openstack/skyline-apiserver `__ openstack/solum `https://opendev.org/openstack/solum `__ openstack/storlets `https://opendev.org/openstack/storlets `__ openstack/tacker `https://opendev.org/openstack/tacker `__ @@ -112,7 +114,6 @@ openstack/zaqar-ui `https://opendev.org/openstack/zaqar-ui openstack/zun `https://opendev.org/openstack/zun `__ openstack/zun-ui `https://opendev.org/openstack/zun-ui `__ performa/os-faults `https://opendev.org/performa/os-faults `__ -skyline/skyline-apiserver `https://opendev.org/skyline/skyline-apiserver `__ starlingx/config `https://opendev.org/starlingx/config `__ starlingx/fault `https://opendev.org/starlingx/fault `__ starlingx/ha `https://opendev.org/starlingx/ha `__ From c0882aeaae8ebdc692f2f0e005f8795bbb6c3d53 Mon Sep 17 00:00:00 2001 From: Victor Morales Date: Wed, 16 Feb 2022 18:15:12 -0800 Subject: [PATCH 1553/1936] Add rsync deb package for swift The rsync debian package is required for swift service. This requirement has been covered by rpms but not for deb packages. Change-Id: Iefd1302be9c7fd80e037bbae3638602d6d823580 --- files/debs/swift | 1 + 1 file changed, 1 insertion(+) diff --git a/files/debs/swift b/files/debs/swift index 4b8ac3d793..67c6c8ddb4 100644 --- a/files/debs/swift +++ b/files/debs/swift @@ -2,5 +2,6 @@ curl liberasurecode-dev make memcached +rsync sqlite3 xfsprogs From e30620e9a62cd1243bded2b922b21c269d801aa6 Mon Sep 17 00:00:00 2001 From: Jakob Meng Date: Fri, 4 Feb 2022 20:55:48 +0100 Subject: [PATCH 1554/1936] Made Swift backing disk persistent Previously, Swift's backing disk were not be mounted after reboots, causing swift-proxy-server service to fail with cryptic error messages such as 'proxy-server: ERROR Insufficient Storage'. Now, we use Dan Smith' create_disk function from functions to create the backing disk for us and add it to /etc/fstab. Change-Id: I9cbccc87bc94a55b58e9badf3fdb127d6f1cf599 --- lib/swift | 32 ++++---------------------------- 1 file changed, 4 insertions(+), 28 deletions(-) diff --git a/lib/swift b/lib/swift index 9c13701c6e..ba92f3dcc3 100644 --- a/lib/swift +++ b/lib/swift @@ -179,12 +179,9 @@ function is_swift_enabled { # cleanup_swift() - Remove residual data files function cleanup_swift { rm -f ${SWIFT_CONF_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz} - if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then - sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 - fi - if [[ -e ${SWIFT_DISK_IMAGE} ]]; then - rm ${SWIFT_DISK_IMAGE} - fi + + destroy_disk ${SWIFT_DISK_IMAGE} ${SWIFT_DATA_DIR}/drives/sdb1 + rm -rf ${SWIFT_DATA_DIR}/run/ if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then _cleanup_swift_apache_wsgi @@ -575,28 +572,7 @@ function create_swift_disk { sudo install -d -o ${STACK_USER} -g ${user_group} ${SWIFT_DATA_DIR}/{drives,cache,run,logs} # Create a loopback disk and format it to XFS. - if [[ -e ${SWIFT_DISK_IMAGE} ]]; then - if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then - sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 - sudo rm -f ${SWIFT_DISK_IMAGE} - fi - fi - - mkdir -p ${SWIFT_DATA_DIR}/drives/images - sudo touch ${SWIFT_DISK_IMAGE} - sudo chown ${STACK_USER}: ${SWIFT_DISK_IMAGE} - - truncate -s ${SWIFT_LOOPBACK_DISK_SIZE} ${SWIFT_DISK_IMAGE} - - # Make a fresh XFS filesystem - /sbin/mkfs.xfs -f -i size=1024 ${SWIFT_DISK_IMAGE} - - # Mount the disk with mount options to make it as efficient as possible - mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1 - if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then - sudo mount -t xfs -o loop,noatime,nodiratime,logbufs=8 \ - ${SWIFT_DISK_IMAGE} ${SWIFT_DATA_DIR}/drives/sdb1 - fi + create_disk ${SWIFT_DISK_IMAGE} ${SWIFT_DATA_DIR}/drives/sdb1 ${SWIFT_LOOPBACK_DISK_SIZE} # Create a link to the above mount and # create all of the directories needed to emulate a few different servers From a2ff7545366b1be960c1175b47e20c5845c3a6e2 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 2 Mar 2022 02:13:44 +0000 Subject: [PATCH 1555/1936] Updated from generate-devstack-plugins-list Change-Id: Iff2bf021edee9be3bae21b67e66fe07c552f3a05 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 6850553b52..2e8e8f53d7 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -75,6 +75,7 @@ openstack/networking-powervm `https://opendev.org/openstack/networki openstack/networking-sfc `https://opendev.org/openstack/networking-sfc `__ openstack/neutron `https://opendev.org/openstack/neutron `__ openstack/neutron-dynamic-routing `https://opendev.org/openstack/neutron-dynamic-routing `__ +openstack/neutron-fwaas `https://opendev.org/openstack/neutron-fwaas `__ openstack/neutron-fwaas-dashboard `https://opendev.org/openstack/neutron-fwaas-dashboard `__ openstack/neutron-tempest-plugin `https://opendev.org/openstack/neutron-tempest-plugin `__ openstack/neutron-vpnaas `https://opendev.org/openstack/neutron-vpnaas `__ From 2c96180ac8482d912c487c18d400f418bf933cab Mon Sep 17 00:00:00 2001 From: melanie witt Date: Thu, 3 Mar 2022 23:54:49 +0000 Subject: [PATCH 1556/1936] Clean up unified limits configuration for nova and glance This is a followup for change Ifdef3510bc7da3098a71739814e35dbaf612ae34 which added configuration of unified limits for nova. This removes an unnecessary wrapper unsetting of OS_ env variables, unnecessary quoting on an iniset config value, and a hardcoding of user domain. The glance code from which the nova code was originally copied is also cleaned up. Change-Id: I4921af5cc0f624dd5aa848533f7049ee816be593 --- lib/glance | 6 ++--- lib/nova | 67 +++++++++++++++++++++++------------------------------- 2 files changed, 32 insertions(+), 41 deletions(-) diff --git a/lib/glance b/lib/glance index 9bba938b9d..b94c06dc93 100644 --- a/lib/glance +++ b/lib/glance @@ -309,13 +309,13 @@ function configure_glance_quotas { iniset $GLANCE_API_CONF oslo_limit username glance iniset $GLANCE_API_CONF oslo_limit auth_type password iniset $GLANCE_API_CONF oslo_limit auth_url $KEYSTONE_SERVICE_URI - iniset $GLANCE_API_CONF oslo_limit system_scope "'all'" + iniset $GLANCE_API_CONF oslo_limit system_scope all iniset $GLANCE_API_CONF oslo_limit endpoint_id \ $(openstack --os-cloud devstack-system-admin endpoint list --service glance -f value -c ID) # Allow the glance service user to read quotas - openstack --os-cloud devstack-system-admin role add --user glance --user-domain Default \ - --system all reader + openstack --os-cloud devstack-system-admin role add --user glance \ + --user-domain $SERVICE_DOMAIN_NAME --system all reader } # configure_glance() - Set config files, create data dirs, etc diff --git a/lib/nova b/lib/nova index 90289b139a..509cba6ff2 100644 --- a/lib/nova +++ b/lib/nova @@ -746,42 +746,33 @@ function configure_console_proxies { } function configure_nova_unified_limits { - # Default limits. Mirror the config-based default values. + # Registered limit resources in keystone are system-specific resources. + # Make sure we use a system-scoped token to interact with this API. + + # Default limits here mirror the legacy config-based default values. # Note: disk quota is new in nova as of unified limits. - bash -c "unset OS_USERNAME OS_TENANT_NAME OS_PROJECT_NAME; - openstack --os-cloud devstack-system-admin registered limit create \ - --service nova --default-limit 10 --region $REGION_NAME \ - servers; \ - openstack --os-cloud devstack-system-admin registered limit create \ - --service nova --default-limit 20 --region $REGION_NAME \ - class:VCPU; \ - openstack --os-cloud devstack-system-admin registered limit create \ - --service nova --default-limit $((50 * 1024)) --region $REGION_NAME \ - class:MEMORY_MB; \ - openstack --os-cloud devstack-system-admin registered limit create \ - --service nova --default-limit 20 --region $REGION_NAME \ - class:DISK_GB; \ - openstack --os-cloud devstack-system-admin registered limit create \ - --service nova --default-limit 128 --region $REGION_NAME \ - server_metadata_items; \ - openstack --os-cloud devstack-system-admin registered limit create \ - --service nova --default-limit 5 --region $REGION_NAME \ - server_injected_files; \ - openstack --os-cloud devstack-system-admin registered limit create \ - --service nova --default-limit 10240 --region $REGION_NAME \ - server_injected_file_content_bytes; \ - openstack --os-cloud devstack-system-admin registered limit create \ - --service nova --default-limit 255 --region $REGION_NAME \ - server_injected_file_path_bytes; \ - openstack --os-cloud devstack-system-admin registered limit create \ - --service nova --default-limit 100 --region $REGION_NAME \ - server_key_pairs; \ - openstack --os-cloud devstack-system-admin registered limit create \ - --service nova --default-limit 10 --region $REGION_NAME \ - server_groups; \ - openstack --os-cloud devstack-system-admin registered limit create \ - --service nova --default-limit 10 --region $REGION_NAME \ - server_group_members" + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 10 --region $REGION_NAME servers + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 20 --region $REGION_NAME class:VCPU + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit $((50 * 1024)) --region $REGION_NAME class:MEMORY_MB + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 20 --region $REGION_NAME class:DISK_GB + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 128 --region $REGION_NAME server_metadata_items + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 5 --region $REGION_NAME server_injected_files + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 10240 --region $REGION_NAME server_injected_file_content_bytes + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 255 --region $REGION_NAME server_injected_file_path_bytes + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 100 --region $REGION_NAME server_key_pairs + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 10 --region $REGION_NAME server_groups + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 10 --region $REGION_NAME server_group_members # Tell nova to use these limits iniset $NOVA_CONF quota driver "nova.quota.UnifiedLimitsDriver" @@ -792,13 +783,13 @@ function configure_nova_unified_limits { iniset $NOVA_CONF oslo_limit username nova iniset $NOVA_CONF oslo_limit auth_type password iniset $NOVA_CONF oslo_limit auth_url $KEYSTONE_SERVICE_URI - iniset $NOVA_CONF oslo_limit system_scope "'all'" + iniset $NOVA_CONF oslo_limit system_scope all iniset $NOVA_CONF oslo_limit endpoint_id \ $(openstack endpoint list --service nova -f value -c ID) # Allow the nova service user to read quotas - openstack role add --user nova --user-domain Default --system all \ - reader + openstack --os-cloud devstack-system-admin role add --user nova \ + --user-domain $SERVICE_DOMAIN_NAME --system all reader } function init_nova_service_user_conf { From 13e8db5a6f44a8f537988a102f9869e725bb97bb Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Wed, 9 Mar 2022 20:17:31 +0000 Subject: [PATCH 1557/1936] ignore failures to copy the devstack cache If the ci images do not have any cached data we should ignore any error when trying to copying it. This is requried when using unmodified cloud images. Change-Id: Ia6e94fc01343d0c292b1477905f8a96a6b43bcf8 --- roles/setup-devstack-cache/tasks/main.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/setup-devstack-cache/tasks/main.yaml b/roles/setup-devstack-cache/tasks/main.yaml index 84f33f0e16..3adff17d5d 100644 --- a/roles/setup-devstack-cache/tasks/main.yaml +++ b/roles/setup-devstack-cache/tasks/main.yaml @@ -2,6 +2,7 @@ # This uses hard links to avoid using extra space. command: "find {{ devstack_cache_dir }}/files -mindepth 1 -maxdepth 1 -exec cp -l {} {{ devstack_base_dir }}/devstack/files/ ;" become: true + ignore_errors: yes - name: Set ownership of cached files file: From 35bc600da17c7342345fa9c4d0b8078a8388fad1 Mon Sep 17 00:00:00 2001 From: Michael Johnson Date: Mon, 28 Feb 2022 18:42:34 +0000 Subject: [PATCH 1558/1936] Fix tls-proxy on newer versions of openssl Newer versions of openssl (CentOS9Stream for example) do not like using sha1. Devstack will fail on these systems[1] with the following error: 801B93DCE77F0000:error:03000098:digital envelope routines:do_sigver_init:invalid digest:crypto/evp/m_sigver.c:333: This patch updates the tls-proxy code in devstack to use sha256 instead of sha1 which allows devstack to complete when tls-proxy is enabled. [1] https://zuul.opendev.org/t/openstack/build/1d90b22a39c74e24a8390861b3c5f957/log/job-output.txt#5535 Closes-Bug: #1962600 Change-Id: I71e1371affe32f070167037b0109a489d196bd31 --- lib/tls | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/tls b/lib/tls index b3cc0b4159..5a7f5ae324 100644 --- a/lib/tls +++ b/lib/tls @@ -169,7 +169,7 @@ default_md = default [ req ] default_bits = 1024 -default_md = sha1 +default_md = sha256 prompt = no distinguished_name = req_distinguished_name @@ -261,7 +261,7 @@ function make_cert { if [ ! -r "$ca_dir/$cert_name.crt" ]; then # Generate a signing request $OPENSSL req \ - -sha1 \ + -sha256 \ -newkey rsa \ -nodes \ -keyout $ca_dir/private/$cert_name.key \ @@ -301,7 +301,7 @@ function make_int_CA { if [ ! -r "$ca_dir/cacert.pem" ]; then # Create a signing certificate request $OPENSSL req -config $ca_dir/ca.conf \ - -sha1 \ + -sha256 \ -newkey rsa \ -nodes \ -keyout $ca_dir/private/cacert.key \ From 7943a92bdbdd2a3b2f75fe66ee8c69db65147692 Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Mon, 14 Mar 2022 13:53:41 -0400 Subject: [PATCH 1559/1936] Do not use hardcoded IPv4 localhost value There are a couple of places that still use a hardcoded 127.0.0.1 value, even if devstack is run with SERVICE_IP_VERSION=6 in local.conf. While things still work, SERVICE_LOCAL_HOST should be used instead since everything else could be using IPv6. Change-Id: I2dd9247a4ac19f565d4d5ecb2e1490501fda8bca --- lib/apache | 9 +++++++-- lib/databases/mysql | 14 +++++++++----- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/lib/apache b/lib/apache index f29c7ea2cb..02827d1f1b 100644 --- a/lib/apache +++ b/lib/apache @@ -27,6 +27,11 @@ set +o xtrace APACHE_USER=${APACHE_USER:-$STACK_USER} APACHE_GROUP=${APACHE_GROUP:-$(id -gn $APACHE_USER)} +APACHE_LOCAL_HOST=$SERVICE_LOCAL_HOST +if [[ "$SERVICE_IP_VERSION" == 6 ]]; then + APACHE_LOCAL_HOST=[$APACHE_LOCAL_HOST] +fi + # Set up apache name and configuration directory # Note that APACHE_CONF_DIR is really more accurately apache's vhost @@ -323,7 +328,7 @@ function write_local_uwsgi_http_config { rm -rf $file iniset "$file" uwsgi wsgi-file "$wsgi" port=$(get_random_port) - iniset "$file" uwsgi http-socket "127.0.0.1:$port" + iniset "$file" uwsgi http-socket "$APACHE_LOCAL_HOST:$port" iniset "$file" uwsgi processes $API_WORKERS # This is running standalone iniset "$file" uwsgi master true @@ -359,7 +364,7 @@ function write_local_uwsgi_http_config { apache_conf=$(apache_site_config_for $name) echo "KeepAlive Off" | sudo tee $apache_conf echo "SetEnv proxy-sendchunked 1" | sudo tee -a $apache_conf - echo "ProxyPass \"${url}\" \"http://127.0.0.1:$port\" retry=0 " | sudo tee -a $apache_conf + echo "ProxyPass \"${url}\" \"http://$APACHE_LOCAL_HOST:$port\" retry=0 " | sudo tee -a $apache_conf enable_apache_site $name restart_apache_server } diff --git a/lib/databases/mysql b/lib/databases/mysql index 30e4b7c496..0f45273d4b 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -86,10 +86,16 @@ function configure_database_mysql { exit_distro_not_supported "mysql configuration" fi - # Start mysql-server + # Change bind-address from localhost (127.0.0.1) to any (::) + iniset -sudo $my_conf mysqld bind-address "$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)" + + # (Re)Start mysql-server if is_fedora || is_suse; then # service is not started by default start_service $MYSQL_SERVICE_NAME + elif is_ubuntu; then + # required since bind-address could have changed above + restart_service $MYSQL_SERVICE_NAME fi # Set the root password - only works the first time. For Ubuntu, we already @@ -102,7 +108,7 @@ function configure_database_mysql { if is_ubuntu && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then local cmd_args="-uroot -p$DATABASE_PASSWORD " else - local cmd_args="-uroot -p$DATABASE_PASSWORD -h127.0.0.1 " + local cmd_args="-uroot -p$DATABASE_PASSWORD -h$SERVICE_LOCAL_HOST " fi # In mariadb e.g. on Ubuntu socket plugin is used for authentication @@ -119,9 +125,7 @@ function configure_database_mysql { # Now update ``my.cnf`` for some local needs and restart the mysql service - # Change bind-address from localhost (127.0.0.1) to any (::) and - # set default db type to InnoDB - iniset -sudo $my_conf mysqld bind-address "$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)" + # Set default db type to InnoDB iniset -sudo $my_conf mysqld sql_mode TRADITIONAL iniset -sudo $my_conf mysqld default-storage-engine InnoDB iniset -sudo $my_conf mysqld max_connections 1024 From 369042b74fe07e6f0f471fd50d7108586d55b97a Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Mon, 21 Mar 2022 15:29:38 -0500 Subject: [PATCH 1560/1936] Make centos-9-stream job voting bug#1960346 is fixed by the below series - https://review.opendev.org/q/(topic:bug/1960346+OR+topic:wait_until_sshable_pingable)+status:merged and now centos-9-stream job is passing and made voting on tempest gate. This commit makes devstack centos9 steam platform job as voting and add it gate pipeline too. Change-Id: Ic35420c5d58926ae90a136045a1558112accc533 --- .zuul.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index fc80e6c413..067d3f5b08 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -678,7 +678,6 @@ parent: tempest-full-py3 description: CentOS 9 Stream platform test nodeset: devstack-single-node-centos-9-stream - voting: false timeout: 9000 vars: configure_swap_size: 4096 @@ -894,6 +893,7 @@ jobs: - devstack - devstack-ipv6 + - devstack-platform-centos-9-stream - devstack-enforce-scope - devstack-multinode - devstack-unit-tests From cebd00aa0468a084d21fc6f43ed7d4c15db878f5 Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Thu, 17 Feb 2022 11:57:30 +0100 Subject: [PATCH 1561/1936] Clean usage of project_id in the Neutron's L3 service module After patch [1] project_id in that module is no longer needed as to make it working with new secure RBAC policies we had to hardcode "demo" project to be used always. This is small follow-up patch with cleaning after [1]. [1] https://review.opendev.org/c/openstack/devstack/+/826851/ Change-Id: Iddf9692817c91807fc3269547910e4f83585f07f --- lib/neutron_plugins/services/l3 | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index cd98115746..c0d74c7728 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -147,10 +147,6 @@ function _neutron_get_ext_gw_interface { } function create_neutron_initial_network { - local project_id - project_id=$(openstack project list | grep " demo " | get_field 1) - die_if_not_set $LINENO project_id "Failure retrieving project_id for demo" - # Allow drivers that need to create an initial network to do so here if type -p neutron_plugin_create_initial_network_profile > /dev/null; then neutron_plugin_create_initial_network_profile $PHYSICAL_NETWORK @@ -171,14 +167,14 @@ function create_neutron_initial_network { die_if_not_set $LINENO PHYSICAL_NETWORK "You must specify the PHYSICAL_NETWORK" die_if_not_set $LINENO PROVIDER_NETWORK_TYPE "You must specify the PROVIDER_NETWORK_TYPE" NET_ID=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share | grep ' id ' | get_field 2) - die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $project_id" + die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK" if [[ "$IP_VERSION" =~ 4.* ]]; then if [ -z $SUBNETPOOL_V4_ID ]; then fixed_range_v4=$FIXED_RANGE fi SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} | grep ' id ' | get_field 2) - die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME $project_id" + die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME" fi if [[ "$IP_VERSION" =~ .*6 ]]; then @@ -188,7 +184,7 @@ function create_neutron_initial_network { fixed_range_v6=$IPV6_PROVIDER_FIXED_RANGE fi IPV6_SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} | grep ' id ' | get_field 2) - die_if_not_set $LINENO IPV6_SUBNET_ID "Failure creating IPV6_SUBNET_ID for $IPV6_PROVIDER_SUBNET_NAME $project_id" + die_if_not_set $LINENO IPV6_SUBNET_ID "Failure creating IPV6_SUBNET_ID for $IPV6_PROVIDER_SUBNET_NAME" fi if [[ $Q_AGENT == "openvswitch" ]]; then @@ -198,16 +194,16 @@ function create_neutron_initial_network { fi else NET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" network create "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) - die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PRIVATE_NETWORK_NAME $project_id" + die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PRIVATE_NETWORK_NAME" if [[ "$IP_VERSION" =~ 4.* ]]; then # Create IPv4 private subnet - SUBNET_ID=$(_neutron_create_private_subnet_v4 $project_id) + SUBNET_ID=$(_neutron_create_private_subnet_v4) fi if [[ "$IP_VERSION" =~ .*6 ]]; then # Create IPv6 private subnet - IPV6_SUBNET_ID=$(_neutron_create_private_subnet_v6 $project_id) + IPV6_SUBNET_ID=$(_neutron_create_private_subnet_v6) fi fi @@ -216,11 +212,11 @@ function create_neutron_initial_network { if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then # create a tenant-owned router. ROUTER_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" router create $Q_ROUTER_NAME | grep ' id ' | get_field 2) - die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $project_id $Q_ROUTER_NAME" + die_if_not_set $LINENO ROUTER_ID "Failure creating router $Q_ROUTER_NAME" else # Plugin only supports creating a single router, which should be admin owned. ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create $Q_ROUTER_NAME | grep ' id ' | get_field 2) - die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $Q_ROUTER_NAME" + die_if_not_set $LINENO ROUTER_ID "Failure creating router $Q_ROUTER_NAME" fi EXTERNAL_NETWORK_FLAGS="--external" @@ -249,7 +245,6 @@ function create_neutron_initial_network { # Create private IPv4 subnet function _neutron_create_private_subnet_v4 { - local project_id=$1 if [ -z $SUBNETPOOL_V4_ID ]; then fixed_range_v4=$FIXED_RANGE fi @@ -263,13 +258,12 @@ function _neutron_create_private_subnet_v4 { subnet_params+="--network $NET_ID $PRIVATE_SUBNET_NAME" local subnet_id subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2) - die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet for $project_id" + die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet" echo $subnet_id } # Create private IPv6 subnet function _neutron_create_private_subnet_v6 { - local project_id=$1 die_if_not_set $LINENO IPV6_RA_MODE "IPV6 RA Mode not set" die_if_not_set $LINENO IPV6_ADDRESS_MODE "IPV6 Address Mode not set" local ipv6_modes="--ipv6-ra-mode $IPV6_RA_MODE --ipv6-address-mode $IPV6_ADDRESS_MODE" @@ -285,7 +279,7 @@ function _neutron_create_private_subnet_v6 { subnet_params+="$ipv6_modes --network $NET_ID $IPV6_PRIVATE_SUBNET_NAME " local ipv6_subnet_id ipv6_subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2) - die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet for $project_id" + die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet" echo $ipv6_subnet_id } From 189c7ff14222fe365a7376e7ef7171bfb2c74b24 Mon Sep 17 00:00:00 2001 From: Martin Kopec Date: Fri, 25 Mar 2022 14:06:52 +0100 Subject: [PATCH 1562/1936] Update DEVSTACK_SERIES to zed stable/yoga branch has been created now and current master is for zed. Change-Id: I8743a3440a0ce96acb24b34971548b43ae7c8d4c --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index e48fd81d16..d22fa88373 100644 --- a/stackrc +++ b/stackrc @@ -235,7 +235,7 @@ REQUIREMENTS_DIR=${REQUIREMENTS_DIR:-$DEST/requirements} # Setting the variable to 'ALL' will activate the download for all # libraries. -DEVSTACK_SERIES="yoga" +DEVSTACK_SERIES="zed" ############## # From 8dc342d400e4e19541bdd0627a746052875364c3 Mon Sep 17 00:00:00 2001 From: zhouyanbing Date: Sat, 26 Mar 2022 10:44:40 +0800 Subject: [PATCH 1563/1936] remove unuseful local variable define the local varibale: api_cell_conf in start_nova_rest function is unuseful, so remove it now. Change-Id: I0019ce807cf3905ee246b684fce2abcb46336306 --- lib/nova | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/nova b/lib/nova index 509cba6ff2..4c14374d0f 100644 --- a/lib/nova +++ b/lib/nova @@ -1045,7 +1045,6 @@ function start_nova_rest { local old_path=$PATH export PATH=$NOVA_BIN_DIR:$PATH - local api_cell_conf=$NOVA_CONF local compute_cell_conf=$NOVA_CONF run_process n-sch "$NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf" From 5c51a95d10ba886fc9136e804844f60bc71aecf9 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Mon, 28 Mar 2022 14:00:54 +0200 Subject: [PATCH 1564/1936] Drop setup.py and setup.cfg devstack isn't a python project, these were introduced only for docs building and made redundant with [0]. We can remove them now. [0] Iedcc008b170821aa74acefc02ec6a243a0dc307c Signed-off-by: Dr. Jens Harbott Change-Id: I90ca1c6918c016d10c579fbae49d13fff1ed59af --- setup.cfg | 12 ------------ setup.py | 22 ---------------------- 2 files changed, 34 deletions(-) delete mode 100644 setup.cfg delete mode 100755 setup.py diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index a4e621f6df..0000000000 --- a/setup.cfg +++ /dev/null @@ -1,12 +0,0 @@ -[metadata] -name = DevStack -summary = OpenStack DevStack -description_file = - README.rst -author = OpenStack -author_email = openstack-discuss@lists.openstack.org -home_page = https://docs.openstack.org/devstack/latest -classifier = - Intended Audience :: Developers - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux diff --git a/setup.py b/setup.py deleted file mode 100755 index 70c2b3f32b..0000000000 --- a/setup.py +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT -import setuptools - -setuptools.setup( - setup_requires=['pbr'], - pbr=True) From 45b029064f3f9ebb94cca97e572d9c0500abe21f Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Fri, 25 Mar 2022 22:23:04 -0500 Subject: [PATCH 1565/1936] Move openEuler job to experimental pipeline OpenEuler job fails 100% of the time. As discussed in QA meeting, we agreed to move OpenEuler job to experimental pipeline. - https://meetings.opendev.org/meetings/qa/2022/qa.2022-03-22-15.00.log.html#l-76 Once it is fixed, we can think of adding back to regular pipeline. Change-Id: I831889a09fabe5bed5522d17e352ec8009eac321 --- .zuul.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 067d3f5b08..0dda2624d2 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -849,7 +849,6 @@ - devstack-platform-centos-8-stream - devstack-platform-centos-9-stream - devstack-platform-debian-bullseye - - devstack-platform-openEuler-20.03-SP2 - devstack-multinode - devstack-unit-tests - openstack-tox-bashate @@ -948,6 +947,7 @@ experimental: jobs: + - devstack-platform-openEuler-20.03-SP2 - nova-multi-cell - nova-next - neutron-fullstack-with-uwsgi From f4a703661ebea05690fadf93fd13df6e54a49b59 Mon Sep 17 00:00:00 2001 From: afariasa Date: Wed, 6 Apr 2022 15:23:11 +0000 Subject: [PATCH 1566/1936] Add OpenStack two nodes nodeset for Centos 9 Change-Id: I01c8e5e0e88d0dcfe778f19548a2e268406ef6bf --- .zuul.yaml | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index 0dda2624d2..1c517f1def 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -176,6 +176,36 @@ nodes: - compute1 +- nodeset: + name: openstack-two-node-centos-9-stream + nodes: + - name: controller + label: centos-9-stream + - name: compute1 + label: centos-9-stream + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + - nodeset: name: openstack-two-node-focal nodes: From eca9783a0ad9c7d7e2e8267457be8d4dd8b55502 Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Wed, 9 Mar 2022 23:26:13 +0000 Subject: [PATCH 1567/1936] ensure /usr/local/bin in in path osc is typicaly installed in /usr/local/bin to avoid command not found errors when invoking osc in devstack ensure that /usr/local/bin is included in the PATH. Change-Id: I605fbc4b131149bf5d1b6307b360fe365c680b1a --- stack.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 0082b99f11..6e9ced985e 100755 --- a/stack.sh +++ b/stack.sh @@ -67,7 +67,9 @@ unset `env | grep -E '^OS_' | cut -d = -f 1` umask 022 # Not all distros have sbin in PATH for regular users. -PATH=$PATH:/usr/local/sbin:/usr/sbin:/sbin +# osc will normally be installed at /usr/local/bin/openstack so ensure +# /usr/local/bin is also in the path +PATH=$PATH:/usr/local/bin:/usr/local/sbin:/usr/sbin:/sbin # Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0") && pwd) From bfbd2be00b38fbc4a5fd082cf44e0fdf06cc5330 Mon Sep 17 00:00:00 2001 From: Artur Angiel Date: Sun, 10 Apr 2022 11:31:21 +0200 Subject: [PATCH 1568/1936] Added recursive for deletion of $OVN_RUNDIR After ./unstack.sh trying to 'enable_plugin venus https://opendev.org/openstack/venus' gived following error: +lib/neutron_plugins/ovn_agent:install_ovn:363 sudo ln -s /var/run/openvswitch /var/run/ovn ln: failed to create symbolic link '/var/run/ovn/openvswitch': File exists which led to: +lib/neutron_plugins/ovn_agent:cleanup_ovn:801 sudo rm -f /var/run/ovn rm: cannot remove '/var/run/ovn': Is a directory Change-Id: I1cafdc0c71093ed7249bb9748b57d51110986686 --- lib/neutron_plugins/ovn_agent | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 927896b70b..2938f472bc 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -798,5 +798,5 @@ function cleanup_ovn { _cleanup $ovs_path fi - sudo rm -f $OVN_RUNDIR + sudo rm -rf $OVN_RUNDIR } From 676dcaf94487665882be048cfe1f3206d6807e0f Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 13 Apr 2022 15:04:46 +1000 Subject: [PATCH 1569/1936] Mark our source trees as safe for git to use as other users git commit [1] introduced a new behaviour to work around a CVE that disallows any git operations in directories not owned by the current user. This may seem unrelated to installation, but it plays havoc with PBR, which calls out to git to get to get revision history. So if you are "pip install"-ing from a source tree you don't own, the PBR git calls in that tree now fail and the install blows up. This plays havoc with our model. Firstly, we checkout all code as "stack" then install it globally with "sudo" (i.e. root) -- which breaks. We also have cases of essentially the opposite -- checkouts we have installed as root, but then run tox in them as a regular user; tox wants to install the source in its venv but now we have another user conflict. This uses the only available configuration option to avoid that by globally setting the source directories we clone as safe. This is an encroachment of the global system for sure, but is about the only switch available at the moment. For discussion of other approaches, see [2]. Related-Bug: https://bugs.launchpad.net/devstack/+bug/1968798 [1] https://github.com/git/git/commit/8959555cee7ec045958f9b6dd62e541affb7e7d9 [2] https://review.opendev.org/c/openstack/devstack/+/837636 Change-Id: Ib9896a99b6d6c4d359ee412743ce30512b3c4fb7 --- functions-common | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/functions-common b/functions-common index b2cf9d99c6..ddef2e4980 100644 --- a/functions-common +++ b/functions-common @@ -673,6 +673,13 @@ function git_clone { fi fi + # NOTE(ianw) 2022-04-13 : commit [1] has broken many assumptions + # about how we clone and work with repos. Mark them safe globally + # as a work-around. + # + # [1] https://github.com/git/git/commit/8959555cee7ec045958f9b6dd62e541affb7e7d9 + sudo git config --global --add safe.directory ${git_dest} + # print out the results so we know what change was used in the logs cd $git_dest git show --oneline | head -1 From 4baeb3b51fcb6196fa311f823ad3f0ac13ccf341 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Wed, 13 Apr 2022 13:44:07 -0700 Subject: [PATCH 1570/1936] Write safe.directory items to system git config This is necessary for more consistent behavior across multiple distro versions. Apparently somewhere along the way, git started looking at the current user's home directory instead of $HOME. Related-Bug: https://bugs.launchpad.net/devstack/+bug/1968798 Change-Id: I941ef5ea90970a0901236afe81c551aaf24ac1d8 --- functions-common | 7 ++++++- unstack.sh | 5 +++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/functions-common b/functions-common index ddef2e4980..8651604b79 100644 --- a/functions-common +++ b/functions-common @@ -677,8 +677,13 @@ function git_clone { # about how we clone and work with repos. Mark them safe globally # as a work-around. # + # NOTE(danms): On bionic (and likely others) git-config may write + # ~stackuser/.gitconfig if not run with sudo -H. Using --system + # writes these changes to /etc/gitconfig which is more + # discoverable anyway. + # # [1] https://github.com/git/git/commit/8959555cee7ec045958f9b6dd62e541affb7e7d9 - sudo git config --global --add safe.directory ${git_dest} + sudo git config --system --add safe.directory ${git_dest} # print out the results so we know what change was used in the logs cd $git_dest diff --git a/unstack.sh b/unstack.sh index 4b57b6e344..813f9a8117 100755 --- a/unstack.sh +++ b/unstack.sh @@ -181,3 +181,8 @@ fi clean_pyc_files rm -Rf $DEST/async + +# Clean any safe.directory items we wrote into the global +# gitconfig. We can identify the relevant ones by checking that they +# point to somewhere in our $DEST directory. +sudo sed -i "/directory=${DEST}/ d" /etc/gitconfig From 28bed125a2555fb3da778898d6ae310175972d3c Mon Sep 17 00:00:00 2001 From: Mohammed Naser Date: Wed, 20 Apr 2022 15:11:39 -0400 Subject: [PATCH 1571/1936] nova: unset cpu_model on aarch64 Without this, running DevStack on an `aarch64` environment will end up in cpu_model set to "Nehalem" and cpu_mode set to "host-passthrough" which does not work. This patch drops that value under aarch64 environments. Change-Id: I30be5a388dda5ccf08718670dbb14a28a4a8a8eb --- lib/nova_plugins/hypervisor-libvirt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index c1cd132548..6c6f4c4261 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -56,6 +56,10 @@ function configure_nova_hypervisor { # arm64-specific configuration if is_arch "aarch64"; then iniset $NOVA_CONF libvirt cpu_mode "host-passthrough" + # NOTE(mnaser): We cannot have `cpu_model` set if the `cpu_mode` is + # set to `host-passthrough`, or `nova-compute` refuses to + # start. + inidelete $NOVA_CONF libvirt cpu_model fi if isset ENABLE_FILE_INJECTION; then From 7fa24750a676a44ab224206cc13096f904057d44 Mon Sep 17 00:00:00 2001 From: Mohammed Naser Date: Wed, 20 Apr 2022 15:42:43 -0400 Subject: [PATCH 1572/1936] ovn: use bundled ovs We are using the latest OVS, however, OVN needs to build using the OVS submodule since some of the signatures don't work[1]. [1]: https://github.com/ovn-org/ovn/issues/128 Change-Id: I3ad7e5e80f1141c3d94f7ce7c8b8f8fdb9fb7c3c --- lib/neutron_plugins/ovn_agent | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 927896b70b..bf1b23a82b 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -320,8 +320,24 @@ function compile_ovn { ./boot.sh fi + # NOTE(mnaser): OVN requires that you build using the OVS from the + # submodule. + # + # https://github.com/ovn-org/ovn/blob/3fb397b63663297acbcbf794e1233951222ae5af/Documentation/intro/install/general.rst#bootstrapping + # https://github.com/ovn-org/ovn/issues/128 + git submodule update --init + pushd ovs + if [ ! -f configure ] ; then + ./boot.sh + fi + if [ ! -f config.status ] || [ configure -nt config.status ] ; then + ./configure + fi + make -j$(($(nproc) + 1)) + popd + if [ ! -f config.status ] || [ configure -nt config.status ] ; then - ./configure --with-ovs-source=$DEST/$OVS_REPO_NAME $prefix $localstatedir + ./configure $prefix $localstatedir fi make -j$(($(nproc) + 1)) sudo make install From c2772c2984e4f29aa6032725e4f7d8680a54ed19 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Fri, 8 Apr 2022 08:48:49 -0700 Subject: [PATCH 1573/1936] Gather performance data after tempest This makes us gather a bunch of consistent statistics after we run tempest that can be use to measure the impact of a given change. These are stable metrics such as "number of DB queries made" and "how much memory is each service using after a tempest run." Note that this will always run after devstack to generate the JSON file, but there are two things that control its completeness: - MYSQL_GATHER_PERFORMANCE must be enabled to get per-db stats - Unless tls-proxy is enabled, we will only get API stats for keystone Change-Id: Ie3b1504256dc1c9c6b59634e86fa98494bcb07b1 --- .zuul.yaml | 1 + lib/databases/mysql | 9 + playbooks/post.yaml | 3 + roles/capture-performance-data/README.rst | 25 +++ .../defaults/main.yaml | 3 + .../capture-performance-data/tasks/main.yaml | 15 ++ stackrc | 4 + tools/get-stats.py | 155 ++++++++++++++++++ 8 files changed, 215 insertions(+) create mode 100644 roles/capture-performance-data/README.rst create mode 100644 roles/capture-performance-data/defaults/main.yaml create mode 100644 roles/capture-performance-data/tasks/main.yaml create mode 100755 tools/get-stats.py diff --git a/.zuul.yaml b/.zuul.yaml index 0dda2624d2..329cb527f3 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -389,6 +389,7 @@ '{{ devstack_log_dir }}/worlddump-latest.txt': logs '{{ devstack_full_log}}': logs '{{ stage_dir }}/verify_tempest_conf.log': logs + '{{ stage_dir }}/performance.json': logs '{{ stage_dir }}/apache': logs '{{ stage_dir }}/apache_config': logs '{{ stage_dir }}/etc': logs diff --git a/lib/databases/mysql b/lib/databases/mysql index 0f45273d4b..6b3ea0287c 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -150,6 +150,15 @@ function configure_database_mysql { iniset -sudo $my_conf mysqld log-queries-not-using-indexes 1 fi + if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" ]]; then + echo "enabling MySQL performance_schema items" + # Enable long query history + iniset -sudo $my_conf mysqld \ + performance-schema-consumer-events-statements-history-long TRUE + iniset -sudo $my_conf mysqld \ + performance_schema_events_stages_history_long_size 1000000 + fi + restart_service $MYSQL_SERVICE_NAME } diff --git a/playbooks/post.yaml b/playbooks/post.yaml index 9e66f20e9e..d8d5f6833c 100644 --- a/playbooks/post.yaml +++ b/playbooks/post.yaml @@ -20,6 +20,9 @@ roles: - export-devstack-journal - apache-logs-conf + # This should run as early as possible to make sure we don't skew + # the post-tempest results with other activities. + - capture-performance-data - devstack-project-conf # capture-system-logs should be the last role before stage-output - capture-system-logs diff --git a/roles/capture-performance-data/README.rst b/roles/capture-performance-data/README.rst new file mode 100644 index 0000000000..b7a37c223f --- /dev/null +++ b/roles/capture-performance-data/README.rst @@ -0,0 +1,25 @@ +Generate performance logs for staging + +Captures usage information from mysql, systemd, apache logs, and other +parts of the system and generates a performance.json file in the +staging directory. + +**Role Variables** + +.. zuul:rolevar:: stage_dir + :default: {{ ansible_user_dir }} + + The base stage directory + +.. zuul:rolevar:: devstack_conf_dir + :default: /opt/stack + + The base devstack destination directory + +.. zuul:rolevar:: debian_suse_apache_deref_logs + + The apache logs found in the debian/suse locations + +.. zuul:rolevar:: redhat_apache_deref_logs + + The apache logs found in the redhat locations diff --git a/roles/capture-performance-data/defaults/main.yaml b/roles/capture-performance-data/defaults/main.yaml new file mode 100644 index 0000000000..7bd79f4c4f --- /dev/null +++ b/roles/capture-performance-data/defaults/main.yaml @@ -0,0 +1,3 @@ +devstack_base_dir: /opt/stack +devstack_conf_dir: "{{ devstack_base_dir }}" +stage_dir: "{{ ansible_user_dir }}" diff --git a/roles/capture-performance-data/tasks/main.yaml b/roles/capture-performance-data/tasks/main.yaml new file mode 100644 index 0000000000..2d2cfe4b8b --- /dev/null +++ b/roles/capture-performance-data/tasks/main.yaml @@ -0,0 +1,15 @@ +- name: Generate statistics + shell: + executable: /bin/bash + cmd: | + source {{ devstack_conf_dir }}/stackrc + python3 {{ devstack_conf_dir }}/tools/get-stats.py \ + --db-user="$DATABASE_USER" \ + --db-pass="$DATABASE_PASSWORD" \ + --db-host="$DATABASE_HOST" \ + {{ apache_logs }} > {{ stage_dir }}/performance.json + vars: + apache_logs: >- + {% for i in debian_suse_apache_deref_logs.results | default([]) + redhat_apache_deref_logs.results | default([]) %} + --apache-log="{{ i.stat.path }}" + {% endfor %} diff --git a/stackrc b/stackrc index d22fa88373..c3254dcce4 100644 --- a/stackrc +++ b/stackrc @@ -193,6 +193,10 @@ ADDITIONAL_VENV_PACKAGES=${ADITIONAL_VENV_PACKAGES:-""} # (currently only implemented for MySQL backend) DATABASE_QUERY_LOGGING=$(trueorfalse False DATABASE_QUERY_LOGGING) +# This can be used to turn on various non-default items in the +# performance_schema that are of interest to us +MYSQL_GATHER_PERFORMANCE=$(trueorfalse True MYSQL_GATHER_PERFORMANCE) + # Set a timeout for git operations. If git is still running when the # timeout expires, the command will be retried up to 3 times. This is # in the format for timeout(1); diff --git a/tools/get-stats.py b/tools/get-stats.py new file mode 100755 index 0000000000..dc0bd0f9e5 --- /dev/null +++ b/tools/get-stats.py @@ -0,0 +1,155 @@ +#!/usr/bin/python3 + +import argparse +import datetime +import glob +import itertools +import json +import os +import psutil +import re +import socket +import subprocess +import sys +import pymysql + +# https://www.elastic.co/blog/found-crash-elasticsearch#mapping-explosion + + +def tryint(value): + try: + return int(value) + except (ValueError, TypeError): + return value + + +def get_service_stats(service): + stats = {'MemoryCurrent': 0} + output = subprocess.check_output(['/usr/bin/systemctl', 'show', service] + + ['-p%s' % stat for stat in stats]) + for line in output.decode().split('\n'): + if not line: + continue + stat, val = line.split('=') + stats[stat] = int(val) + + return stats + + +def get_services_stats(): + services = [os.path.basename(s) for s in + glob.glob('/etc/systemd/system/devstack@*.service')] + return [dict(service=service, **get_service_stats(service)) + for service in services] + + +def get_process_stats(proc): + cmdline = proc.cmdline() + if 'python' in cmdline[0]: + cmdline = cmdline[1:] + return {'cmd': cmdline[0], + 'pid': proc.pid, + 'args': ' '.join(cmdline[1:]), + 'rss': proc.memory_info().rss} + + +def get_processes_stats(matches): + me = os.getpid() + procs = psutil.process_iter() + + def proc_matches(proc): + return me != proc.pid and any( + re.search(match, ' '.join(proc.cmdline())) + for match in matches) + + return [ + get_process_stats(proc) + for proc in procs + if proc_matches(proc)] + + +def get_db_stats(host, user, passwd): + dbs = [] + db = pymysql.connect(host=host, user=user, password=passwd, + database='performance_schema', + cursorclass=pymysql.cursors.DictCursor) + with db: + with db.cursor() as cur: + cur.execute( + 'SELECT COUNT(*) AS queries,current_schema AS db FROM ' + 'events_statements_history_long GROUP BY current_schema') + for row in cur: + dbs.append({k: tryint(v) for k, v in row.items()}) + return dbs + + +def get_http_stats_for_log(logfile): + stats = {} + for line in open(logfile).readlines(): + m = re.search('"([A-Z]+) /([^" ]+)( HTTP/1.1)?" ([0-9]{3}) ([0-9]+)', + line) + if m: + method = m.group(1) + path = m.group(2) + status = m.group(4) + size = int(m.group(5)) + + try: + service, rest = path.split('/', 1) + except ValueError: + # Root calls like "GET /identity" + service = path + rest = '' + + stats.setdefault(service, {'largest': 0}) + stats[service].setdefault(method, 0) + stats[service][method] += 1 + stats[service]['largest'] = max(stats[service]['largest'], size) + + # Flatten this for ES + return [{'service': service, 'log': os.path.basename(logfile), + **vals} + for service, vals in stats.items()] + + +def get_http_stats(logfiles): + return list(itertools.chain.from_iterable(get_http_stats_for_log(log) + for log in logfiles)) + + +def get_report_info(): + return { + 'timestamp': datetime.datetime.now().isoformat(), + 'hostname': socket.gethostname(), + } + + +if __name__ == '__main__': + process_defaults = ['privsep', 'mysqld', 'erlang', 'etcd'] + parser = argparse.ArgumentParser() + parser.add_argument('--db-user', default='root', + help=('MySQL user for collecting stats ' + '(default: "root")')) + parser.add_argument('--db-pass', default=None, + help='MySQL password for db-user') + parser.add_argument('--db-host', default='localhost', + help='MySQL hostname') + parser.add_argument('--apache-log', action='append', default=[], + help='Collect API call stats from this apache log') + parser.add_argument('--process', action='append', + default=process_defaults, + help=('Include process stats for this cmdline regex ' + '(default is %s)' % ','.join(process_defaults))) + args = parser.parse_args() + + data = { + 'services': get_services_stats(), + 'db': args.db_pass and get_db_stats(args.db_host, + args.db_user, + args.db_pass) or [], + 'processes': get_processes_stats(args.process), + 'api': get_http_stats(args.apache_log), + 'report': get_report_info(), + } + + print(json.dumps(data, indent=2)) From 4423450eb334aad6f889ddae2ab9b9a2a2197a4a Mon Sep 17 00:00:00 2001 From: Zhou Yanbing Date: Thu, 21 Apr 2022 15:00:41 +0800 Subject: [PATCH 1574/1936] modify the sample value of LOGDAYS the value of LOGDAYS in samples/local.conf is 2, so change the value in the comment and the sample value in the document to be consistent with it. Change-Id: I5822bbf1d6ad347c67c886be1e3325113d079114 --- doc/source/configuration.rst | 2 +- samples/local.conf | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index dd8f21faaf..40a8725b8d 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -279,7 +279,7 @@ number of days of old log files to keep. :: - LOGDAYS=1 + LOGDAYS=2 Some coloring is used during the DevStack runs to make it easier to see what is going on. This can be disabled with:: diff --git a/samples/local.conf b/samples/local.conf index 8b76137c38..55b729809d 100644 --- a/samples/local.conf +++ b/samples/local.conf @@ -49,7 +49,7 @@ SERVICE_PASSWORD=$ADMIN_PASSWORD # path of the destination log file. A timestamp will be appended to the given name. LOGFILE=$DEST/logs/stack.sh.log -# Old log files are automatically removed after 7 days to keep things neat. Change +# Old log files are automatically removed after 2 days to keep things neat. Change # the number of days by setting ``LOGDAYS``. LOGDAYS=2 From af75f689fa5d03446593c3b4c7724f0922f0f19a Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Thu, 14 Apr 2022 21:48:38 +0100 Subject: [PATCH 1575/1936] install mod_ssl on centos 9 stream by default This change adds mod_ssl to the default set of rpms installed on rpm based distros. this is required if the tls-proxy service is enabled for multi node centos based jobs. Change-Id: I52652de88352094c824da68e5baf7db4c17cb027 --- files/rpms/general | 1 + 1 file changed, 1 insertion(+) diff --git a/files/rpms/general b/files/rpms/general index 163a7c8f24..668705b1c3 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -17,6 +17,7 @@ libxml2-devel # lxml libxslt-devel # lxml libyaml-devel make # dist:openEuler-20.03 +mod_ssl # required for tls-proxy on centos 9 stream computes net-tools openssh-server openssl From 7191c5e7e7edb1e2f28c0ce71294f61b409cf16b Mon Sep 17 00:00:00 2001 From: Balazs Gibizer Date: Fri, 22 Apr 2022 12:01:13 +0200 Subject: [PATCH 1576/1936] Use proper sed separator for paths I941ef5ea90970a0901236afe81c551aaf24ac1d8 added a sed command that should match and delete path values but used '/' as sed separator. This leads to error in unstack.sh runs when the path also contains '/': +./unstack.sh:main:188 sudo sed -i '/directory=/opt/stack/ d' /etc/gitconfig sed: -e expression #1, char 13: unknown command: `o' So this patch replace '/' separator with '+'. Change-Id: I06811c0d9ee7ecddf84ef1c6dd6cff5129dbf4b1 --- unstack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unstack.sh b/unstack.sh index 813f9a8117..a36af3fb59 100755 --- a/unstack.sh +++ b/unstack.sh @@ -185,4 +185,4 @@ rm -Rf $DEST/async # Clean any safe.directory items we wrote into the global # gitconfig. We can identify the relevant ones by checking that they # point to somewhere in our $DEST directory. -sudo sed -i "/directory=${DEST}/ d" /etc/gitconfig +sudo sed -i "\+directory = ${DEST}+ d" /etc/gitconfig From c6dfd169aeb3fbf2ba73ad8403cc1feb7c6ecf76 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Mon, 25 Apr 2022 15:19:06 -0500 Subject: [PATCH 1577/1936] Drop centos 8 stream testing In Zed cycle testing runtime, we are targetting the centos 9 stream - https://governance.openstack.org/tc/reference/runtimes/zed.html With dropping the python 3.6 support, project started adding python 3.8 as minimum, example nova: - https://github.com/openstack/nova/blob/56b5aed08c6a3ed81b78dc216f0165ebfe3c3350/setup.cfg#L13 with that, centos 8 stream job is failing 100% - https://zuul.openstack.org/build/970d029dc96742c3aa0f6932a35e97cf - https://zuul.openstack.org/builds?job_name=devstack-platform-centos-8-stream&skip=0 This commit drops centos-8-stream testing so that we focus on centos-9-stream. Change-Id: I045e67b1ca79aba1b2a7be9f88d7804c69c6d781 --- .zuul.yaml | 51 --------------------------------------------------- 1 file changed, 51 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 8fd4d0212c..a437c1cc02 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -56,16 +56,6 @@ nodes: - controller -- nodeset: - name: devstack-single-node-centos-8-stream - nodes: - - name: controller - label: centos-8-stream - groups: - - name: tempest - nodes: - - controller - - nodeset: name: devstack-single-node-centos-9-stream nodes: @@ -146,36 +136,6 @@ nodes: - compute1 -- nodeset: - name: openstack-two-node-centos-8-stream - nodes: - - name: controller - label: centos-8-stream - - name: compute1 - label: centos-8-stream - groups: - # Node where tests are executed and test results collected - - name: tempest - nodes: - - controller - # Nodes running the compute service - - name: compute - nodes: - - controller - - compute1 - # Nodes that are not the controller - - name: subnode - nodes: - - compute1 - # Switch node for multinode networking setup - - name: switch - nodes: - - controller - # Peer nodes for multinode networking setup - - name: peers - nodes: - - compute1 - - nodeset: name: openstack-two-node-centos-9-stream nodes: @@ -694,16 +654,6 @@ # we often have to rush things through devstack to stabilise the gate, # and these platforms don't have the round-the-clock support to avoid # becoming blockers in that situation. -- job: - name: devstack-platform-centos-8-stream - parent: tempest-full-py3 - description: CentOS 8 Stream platform test - nodeset: devstack-single-node-centos-8-stream - voting: false - timeout: 9000 - vars: - configure_swap_size: 4096 - - job: name: devstack-platform-centos-9-stream parent: tempest-full-py3 @@ -877,7 +827,6 @@ - devstack-ipv6 - devstack-enforce-scope - devstack-platform-fedora-latest - - devstack-platform-centos-8-stream - devstack-platform-centos-9-stream - devstack-platform-debian-bullseye - devstack-multinode From 8615563df47261d9c6dab7c5badbceb399d0e14d Mon Sep 17 00:00:00 2001 From: Grzegorz Grasza Date: Mon, 18 Oct 2021 16:52:06 +0200 Subject: [PATCH 1578/1936] Global option for enforcing scope (ENFORCE_SCOPE) This updates each devstack service library, to use it as the default value for service-specific RBAC configuration. Change-Id: I41061d042206c411ee3dd94ce91098e612af7ae7 --- .zuul.yaml | 5 +---- functions-common | 2 +- lib/cinder | 2 +- lib/glance | 2 +- lib/keystone | 2 +- lib/neutron | 2 +- lib/neutron-legacy | 2 +- lib/tempest | 11 ++++++++--- stackrc | 4 ++++ 9 files changed, 19 insertions(+), 13 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index fc80e6c413..0f047166fa 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -646,10 +646,7 @@ This job runs the devstack with scope checks enabled. vars: devstack_localrc: - # Keep enabeling the services here to run with system scope - CINDER_ENFORCE_SCOPE: true - GLANCE_ENFORCE_SCOPE: true - NEUTRON_ENFORCE_SCOPE: true + ENFORCE_SCOPE: true - job: name: devstack-multinode diff --git a/functions-common b/functions-common index b2cf9d99c6..603e7d896d 100644 --- a/functions-common +++ b/functions-common @@ -1154,7 +1154,7 @@ function is_ironic_hardware { } function is_ironic_enforce_scope { - is_service_enabled ironic && [[ "$IRONIC_ENFORCE_SCOPE" == "True" ]] && return 0 + is_service_enabled ironic && [[ "$IRONIC_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]] && return 0 return 1 } diff --git a/lib/cinder b/lib/cinder index b029fa0db4..52818a81eb 100644 --- a/lib/cinder +++ b/lib/cinder @@ -380,7 +380,7 @@ function configure_cinder { iniset $CINDER_CONF coordination backend_url "etcd3+http://${SERVICE_HOST}:$ETCD_PORT" fi - if [[ "$CINDER_ENFORCE_SCOPE" == True ]] ; then + if [[ "$CINDER_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then iniset $CINDER_CONF oslo_policy enforce_scope true iniset $CINDER_CONF oslo_policy enforce_new_defaults true fi diff --git a/lib/glance b/lib/glance index 9bba938b9d..04b901181c 100644 --- a/lib/glance +++ b/lib/glance @@ -432,7 +432,7 @@ function configure_glance { iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS" fi - if [[ "$GLANCE_ENFORCE_SCOPE" == True ]] ; then + if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then iniset $GLANCE_API_CONF oslo_policy enforce_scope true iniset $GLANCE_API_CONF oslo_policy enforce_new_defaults true iniset $GLANCE_API_CONF DEFAULT enforce_secure_rbac true diff --git a/lib/keystone b/lib/keystone index a4c8a52121..80a136f78d 100644 --- a/lib/keystone +++ b/lib/keystone @@ -265,7 +265,7 @@ function configure_keystone { iniset $KEYSTONE_CONF security_compliance lockout_duration $KEYSTONE_LOCKOUT_DURATION iniset $KEYSTONE_CONF security_compliance unique_last_password_count $KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT fi - if [[ "$KEYSTONE_ENFORCE_SCOPE" == True ]] ; then + if [[ "$KEYSTONE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then iniset $KEYSTONE_CONF oslo_policy enforce_scope true iniset $KEYSTONE_CONF oslo_policy enforce_new_defaults true iniset $KEYSTONE_CONF oslo_policy policy_file policy.yaml diff --git a/lib/neutron b/lib/neutron index e7719d4ebc..f24ccfb1a9 100644 --- a/lib/neutron +++ b/lib/neutron @@ -632,7 +632,7 @@ function configure_neutron { # configure_rbac_policies() - Configure Neutron to enforce new RBAC # policies and scopes if NEUTRON_ENFORCE_SCOPE == True function configure_rbac_policies { - if [ "$NEUTRON_ENFORCE_SCOPE" == "True" ]; then + if [[ "$NEUTRON_ENFORCE_SCOPE" == "True" || "ENFORCE_SCOPE" == "True" ]]; then iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True iniset $NEUTRON_CONF oslo_policy enforce_scope True else diff --git a/lib/neutron-legacy b/lib/neutron-legacy index b906a1b2ff..253b457ae1 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -500,7 +500,7 @@ function configure_neutron_after_post_config { # configure_rbac_policies() - Configure Neutron to enforce new RBAC # policies and scopes if NEUTRON_ENFORCE_SCOPE == True function configure_rbac_policies { - if [ "$NEUTRON_ENFORCE_SCOPE" == "True" ]; then + if [[ "$NEUTRON_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == True ]]; then iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True iniset $NEUTRON_CONF oslo_policy enforce_scope True else diff --git a/lib/tempest b/lib/tempest index 45046632b4..1fd4184763 100644 --- a/lib/tempest +++ b/lib/tempest @@ -607,14 +607,19 @@ function configure_tempest { # If services enable the enforce_scope for their policy # we need to enable the same on Tempest side so that # test can be run with scoped token. - if [[ "$KEYSTONE_ENFORCE_SCOPE" == True ]] ; then + if [[ "$KEYSTONE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then iniset $TEMPEST_CONFIG enforce_scope keystone true iniset $TEMPEST_CONFIG auth admin_system 'all' iniset $TEMPEST_CONFIG auth admin_project_name '' fi - iniset $TEMPEST_CONFIG enforce_scope glance "$GLANCE_ENFORCE_SCOPE" - iniset $TEMPEST_CONFIG enforce_scope cinder "$CINDER_ENFORCE_SCOPE" + if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $TEMPEST_CONFIG enforce_scope glance true + fi + + if [[ "$CINDER_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $TEMPEST_CONFIG enforce_scope cinder true + fi if [ "$VIRT_DRIVER" = "libvirt" ] && [ "$LIBVIRT_TYPE" = "lxc" ]; then # libvirt-lxc does not support boot from volume or attaching volumes diff --git a/stackrc b/stackrc index 681e9dee38..72180d07f2 100644 --- a/stackrc +++ b/stackrc @@ -179,6 +179,10 @@ fi # TODO(frickler): Drop this when plugins no longer need it IDENTITY_API_VERSION=3 +# Global option for enforcing scope. If enabled, ENFORCE_SCOPE overrides +# each services ${SERVICE}_ENFORCE_SCOPE variables +ENFORCE_SCOPE=$(trueorfalse False ENFORCE_SCOPE) + # Enable use of Python virtual environments. Individual project use of # venvs are controlled by the PROJECT_VENV array; every project with # an entry in the array will be installed into the named venv. From 6964ba4a984691d79cff77506a37d0fa222a5599 Mon Sep 17 00:00:00 2001 From: Julia Kreger Date: Mon, 25 Apr 2022 08:48:20 -0700 Subject: [PATCH 1579/1936] Set public bridge up for v6 only configurations A long time ago, Ironic's IPv6 only job started to fail working with errors indicated the host was unreacable. Turns out, this was because the $ext_gw_interface was not being set to up, and thus could be found in a Down state, and thus the kernel would not accept routes for it. Adds an explicit step to turn up the public bridge, much as done in the IPv4 router plugin code which would also be executed in 4+6. That being said, Ironic's CI jobs are very intentionally IPv6 only to ensure that we have no chances of v4 addressing getting used at any point in time. This should allow Ironic to return it's IPv6 only CI job back to the normal check queue, once a ironic plugin issue has been resolved which was introduced while it was removed. Change-Id: I121ec8a2e9640b21a7126f2eeb23da36b4aa95bf --- lib/neutron_plugins/services/l3 | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index c0d74c7728..fbd4692bba 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -403,7 +403,10 @@ function _neutron_configure_router_v6 { ext_gw_interface=$(_neutron_get_ext_gw_interface) local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/} - # Configure interface for public bridge + # Configure interface for public bridge by setting the interface + # to "up" in case the job is running entirely private network based + # testing. + sudo ip link set $ext_gw_interface up sudo ip -6 addr replace $ipv6_ext_gw_ip/$ipv6_cidr_len dev $ext_gw_interface # Any IPv6 private subnet that uses the default IPV6 subnet pool # and that is plugged into the default router (Q_ROUTER_NAME) will From bab0c9210371e1cfa321b8528217a7f2e156d7a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Harald=20Jens=C3=A5s?= Date: Tue, 26 Apr 2022 15:46:56 +0200 Subject: [PATCH 1580/1936] Use tryint() for stats value In some cases the value is [not set], in this case the conversion to integer does not work. Closes-Bug: #1970431 Change-Id: I74df7d8bc9f5cbe0709a6471cf7639caea0b58e8 --- tools/get-stats.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/get-stats.py b/tools/get-stats.py index dc0bd0f9e5..05f088ef8d 100755 --- a/tools/get-stats.py +++ b/tools/get-stats.py @@ -31,7 +31,7 @@ def get_service_stats(service): if not line: continue stat, val = line.split('=') - stats[stat] = int(val) + stats[stat] = tryint(val) return stats From 1b601c7b1e8a3ec4816cb827ccd8bf909a05debb Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Mon, 25 Apr 2022 07:47:56 -0700 Subject: [PATCH 1581/1936] Tolerate missing deps in get-stats.py In order to run on systems where not all requirements are present, we should be tolerant of missing external dependencies, such as psutil and pymysql. Print a warning (to stderr) and just leave out those stats in that case. Also make running the stats collector use ignore_errors:yes to avoid failures in the future. I think the stats is not critical enough to fail a job for bugs like this. Related-Bug: #1970195 Change-Id: I132b0e1f5033c4f109a8b8cc776c0877574c4a49 --- .../capture-performance-data/tasks/main.yaml | 1 + tools/get-stats.py | 24 ++++++++++++++----- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/roles/capture-performance-data/tasks/main.yaml b/roles/capture-performance-data/tasks/main.yaml index 2d2cfe4b8b..f9bb0f7851 100644 --- a/roles/capture-performance-data/tasks/main.yaml +++ b/roles/capture-performance-data/tasks/main.yaml @@ -13,3 +13,4 @@ {% for i in debian_suse_apache_deref_logs.results | default([]) + redhat_apache_deref_logs.results | default([]) %} --apache-log="{{ i.stat.path }}" {% endfor %} + ignore_errors: yes diff --git a/tools/get-stats.py b/tools/get-stats.py index dc0bd0f9e5..2418c851f9 100755 --- a/tools/get-stats.py +++ b/tools/get-stats.py @@ -6,12 +6,24 @@ import itertools import json import os -import psutil import re import socket import subprocess import sys -import pymysql + +try: + import psutil +except ImportError: + psutil = None + print('No psutil, process information will not be included', + file=sys.stderr) + +try: + import pymysql +except ImportError: + pymysql = None + print('No pymysql, database information will not be included', + file=sys.stderr) # https://www.elastic.co/blog/found-crash-elasticsearch#mapping-explosion @@ -144,10 +156,10 @@ def get_report_info(): data = { 'services': get_services_stats(), - 'db': args.db_pass and get_db_stats(args.db_host, - args.db_user, - args.db_pass) or [], - 'processes': get_processes_stats(args.process), + 'db': pymysql and args.db_pass and get_db_stats(args.db_host, + args.db_user, + args.db_pass) or [], + 'processes': psutil and get_processes_stats(args.process) or [], 'api': get_http_stats(args.apache_log), 'report': get_report_info(), } From 42be2425d8782799d3d9f82c6f1789f8b05a4301 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Thu, 28 Apr 2022 18:15:50 +0530 Subject: [PATCH 1582/1936] Collect status of all services Would be helpful in troubleshooting services which either fails to start or takes time to start. Related-Bug: #1970679 Change-Id: Iba2fce5f8b1cd00708f092e6eb5a1fbd96e97da0 --- .zuul.yaml | 1 + roles/capture-system-logs/README.rst | 1 + roles/capture-system-logs/tasks/main.yaml | 3 +++ 3 files changed, 5 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index a437c1cc02..2935560951 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -398,6 +398,7 @@ '{{ stage_dir }}/rpm-qa.txt': logs '{{ stage_dir }}/core': logs '{{ stage_dir }}/listen53.txt': logs + '{{ stage_dir }}/services.txt': logs '{{ stage_dir }}/deprecations.log': logs '{{ stage_dir }}/audit.log': logs /etc/ceph: logs diff --git a/roles/capture-system-logs/README.rst b/roles/capture-system-logs/README.rst index c28412457a..1376f63bfc 100644 --- a/roles/capture-system-logs/README.rst +++ b/roles/capture-system-logs/README.rst @@ -9,6 +9,7 @@ Stage a number of different logs / reports: - coredumps - dns resolver - listen53 +- services - unbound.log - deprecation messages diff --git a/roles/capture-system-logs/tasks/main.yaml b/roles/capture-system-logs/tasks/main.yaml index 905806d529..77b5ec5098 100644 --- a/roles/capture-system-logs/tasks/main.yaml +++ b/roles/capture-system-logs/tasks/main.yaml @@ -19,6 +19,9 @@ rpm -qa | sort > {{ stage_dir }}/rpm-qa.txt fi + # Services status + sudo systemctl status --all > services.txt 2>/dev/null + # NOTE(kchamart) The 'audit.log' can be useful in cases when QEMU # failed to start due to denials from SELinux — useful for CentOS # and Fedora machines. For Ubuntu (which runs AppArmor), DevStack From 1baa8905d5a3d677b5298e76621b9e08e0ed0f13 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Fri, 6 May 2022 17:53:54 +0530 Subject: [PATCH 1583/1936] Wait for OVN dbs also along with sockets When OVN is setup from distro packages, the main service is ovn-central which when restarted, restarts ovn-northd, ovn nb and db services. And during the restart ovn dbs(ovnnb_db.db and ovnsb_db.db) are created, which may sometime takes time as seen with ubuntu jammy tests[1]. We already checking for socket's file to be available, let's also check for db files as without it ovn-*ctl operations succeed but changes are not persisted until db files are available and changes are lost with the restart. [1] https://review.opendev.org/c/openstack/devstack/+/839389 Change-Id: I178da7af8cba8bcc8a67174e439df7c0f2c7d4d5 --- lib/neutron_plugins/ovn_agent | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 927896b70b..9022f2d382 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -169,6 +169,17 @@ Q_LOG_DRIVER_LOG_BASE=${Q_LOG_DRIVER_LOG_BASE:-acl_log_meter} # Utility Functions # ----------------- +function wait_for_db_file { + local count=0 + while [ ! -f $1 ]; do + sleep 1 + count=$((count+1)) + if [ "$count" -gt 5 ]; then + die $LINENO "DB File $1 not found" + fi + done +} + function wait_for_sock_file { local count=0 while [ ! -S $1 ]; do @@ -695,8 +706,11 @@ function start_ovn { fi # Wait for the service to be ready + # Check for socket and db files for both OVN NB and SB wait_for_sock_file $OVS_RUNDIR/ovnnb_db.sock wait_for_sock_file $OVS_RUNDIR/ovnsb_db.sock + wait_for_db_file $OVN_DATADIR/ovnnb_db.db + wait_for_db_file $OVN_DATADIR/ovnsb_db.db if is_service_enabled tls-proxy; then sudo ovn-nbctl --db=unix:$OVS_RUNDIR/ovnnb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem From 5c765cb8a1866bd3405946d097d7fb06066ae4d4 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Tue, 26 Apr 2022 13:08:21 +0200 Subject: [PATCH 1584/1936] Add Ubuntu 22.04 LTS (jammy) platform job The new Ubuntu LTS release has been made last week, start running devstack on it as a platform job. Horizon has issues with py310, so gets disabled for now. Run variants with OVS and OVN(default). Co-Authored-By: yatinkarel Signed-off-by: Dr. Jens Harbott Change-Id: I47696273d6b009f754335b44ef3356b4f5115cd8 --- .zuul.yaml | 76 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ stack.sh | 2 +- 2 files changed, 77 insertions(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index a437c1cc02..e87f410202 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -16,6 +16,16 @@ nodes: - controller +- nodeset: + name: openstack-single-node-jammy + nodes: + - name: controller + label: ubuntu-jammy + groups: + - name: tempest + nodes: + - controller + - nodeset: name: openstack-single-node-focal nodes: @@ -718,6 +728,69 @@ # from source instead. OVN_BUILD_FROM_SOURCE: True +- job: + name: devstack-platform-ubuntu-jammy + parent: tempest-full-py3 + description: Ubuntu 22.04 LTS (jammy) platform test + nodeset: openstack-single-node-jammy + timeout: 9000 + vars: + configure_swap_size: 4096 + devstack_services: + # Horizon doesn't like py310 + horizon: false + +- job: + name: devstack-platform-ubuntu-jammy-ovn-source + parent: devstack-platform-ubuntu-jammy + description: Ubuntu 22.04 LTS (jammy) platform test (OVN from source) + vars: + devstack_localrc: + OVN_BUILD_FROM_SOURCE: True + OVN_BRANCH: "v21.06.0" + OVS_BRANCH: "a4b04276ab5934d087669ff2d191a23931335c87" + OVS_SYSCONFDIR: "/usr/local/etc/openvswitch" + +- job: + name: devstack-platform-ubuntu-jammy-ovs + parent: tempest-full-py3 + description: Ubuntu 22.04 LTS (jammy) platform test (OVS) + nodeset: openstack-single-node-jammy + timeout: 9000 + vars: + configure_swap_size: 8192 + devstack_localrc: + Q_AGENT: openvswitch + Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch + Q_ML2_TENANT_NETWORK_TYPE: vxlan + devstack_services: + # Horizon doesn't like py310 + horizon: false + # Disable OVN services + ovn-northd: false + ovn-controller: false + ovs-vswitchd: false + ovsdb-server: false + # Disable Neutron ML2/OVN services + q-ovn-metadata-agent: false + # Enable Neutron ML2/OVS services + q-agt: true + q-dhcp: true + q-l3: true + q-meta: true + q-metering: true + group-vars: + subnode: + devstack_services: + # Disable OVN services + ovn-controller: false + ovs-vswitchd: false + ovsdb-server: false + # Disable Neutron ML2/OVN services + q-ovn-metadata-agent: false + # Enable Neutron ML2/OVS services + q-agt: true + - job: name: devstack-no-tls-proxy parent: tempest-full-py3 @@ -829,6 +902,9 @@ - devstack-platform-fedora-latest - devstack-platform-centos-9-stream - devstack-platform-debian-bullseye + - devstack-platform-ubuntu-jammy + - devstack-platform-ubuntu-jammy-ovn-source + - devstack-platform-ubuntu-jammy-ovs - devstack-multinode - devstack-unit-tests - openstack-tox-bashate diff --git a/stack.sh b/stack.sh index 6e9ced985e..1970105c1d 100755 --- a/stack.sh +++ b/stack.sh @@ -229,7 +229,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="bullseye|focal|f35|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9|openEuler-20.03" +SUPPORTED_DISTROS="bullseye|focal|jammy|f35|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9|openEuler-20.03" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" From fe52d7f0a88de2dc330923cf6cf52c83ccb92bd6 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 28 Apr 2022 12:34:38 -0700 Subject: [PATCH 1585/1936] Change DB counting mechanism The mysql performance_schema method for counting per-database queries is very heavyweight in that it requires full logging (in a table) of every query. We do hundreds of thousands in the course of a tempest run, which ends up creating its own performance problem. This changes the approach we take, which is to bundle a very tiny sqlalchemy plugin module which counts just what we care about in a special database. It is more complex than just enabling the features in mysql, but it is a massively smaller runtime overhead. It also provides us the opportunity to easily zero the counters just before a tempest run. Change-Id: I361bc30bb970cdaf18b966951f217862d302f0b9 --- lib/databases/mysql | 28 ++++++-- stack.sh | 13 ++++ tools/dbcounter/dbcounter.py | 120 +++++++++++++++++++++++++++++++++ tools/dbcounter/pyproject.toml | 3 + tools/dbcounter/setup.cfg | 14 ++++ tools/get-stats.py | 6 +- 6 files changed, 173 insertions(+), 11 deletions(-) create mode 100644 tools/dbcounter/dbcounter.py create mode 100644 tools/dbcounter/pyproject.toml create mode 100644 tools/dbcounter/setup.cfg diff --git a/lib/databases/mysql b/lib/databases/mysql index 6b3ea0287c..b292da25bd 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -151,12 +151,16 @@ function configure_database_mysql { fi if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" ]]; then - echo "enabling MySQL performance_schema items" - # Enable long query history - iniset -sudo $my_conf mysqld \ - performance-schema-consumer-events-statements-history-long TRUE - iniset -sudo $my_conf mysqld \ - performance_schema_events_stages_history_long_size 1000000 + echo "enabling MySQL performance counting" + + # Install our sqlalchemy plugin + pip_install ${TOP_DIR}/tools/dbcounter + + # Create our stats database for accounting + recreate_database stats + mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST -e \ + "CREATE TABLE queries (db VARCHAR(32), op VARCHAR(32), + count INT, PRIMARY KEY (db, op)) ENGINE MEMORY" stats fi restart_service $MYSQL_SERVICE_NAME @@ -218,7 +222,17 @@ function install_database_python_mysql { function database_connection_url_mysql { local db=$1 - echo "$BASE_SQL_CONN/$db?charset=utf8" + local plugin + + # NOTE(danms): We don't enable perf on subnodes yet because the + # plugin is not installed there + if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" ]]; then + if is_service_enabled mysql; then + plugin="&plugin=dbcounter" + fi + fi + + echo "$BASE_SQL_CONN/$db?charset=utf8$plugin" } diff --git a/stack.sh b/stack.sh index 6e9ced985e..16dce81d3d 100755 --- a/stack.sh +++ b/stack.sh @@ -1512,6 +1512,19 @@ async_cleanup time_totals async_print_timing +if is_service_enabled mysql; then + if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" && "$MYSQL_HOST" ]]; then + echo "" + echo "" + echo "Post-stack database query stats:" + mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST stats -e \ + 'SELECT * FROM queries' -t 2>/dev/null + mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST stats -e \ + 'DELETE FROM queries' 2>/dev/null + fi +fi + + # Using the cloud # =============== diff --git a/tools/dbcounter/dbcounter.py b/tools/dbcounter/dbcounter.py new file mode 100644 index 0000000000..5057f0f393 --- /dev/null +++ b/tools/dbcounter/dbcounter.py @@ -0,0 +1,120 @@ +import json +import logging +import os +import threading +import time +import queue + +import sqlalchemy +from sqlalchemy.engine import CreateEnginePlugin +from sqlalchemy import event + +# https://docs.sqlalchemy.org/en/14/core/connections.html? +# highlight=createengineplugin#sqlalchemy.engine.CreateEnginePlugin + +LOG = logging.getLogger(__name__) + +# The theory of operation here is that we register this plugin with +# sqlalchemy via an entry_point. It gets loaded by virtue of plugin= +# being in the database connection URL, which gives us an opportunity +# to hook the engines that get created. +# +# We opportunistically spawn a thread, which we feed "hits" to over a +# queue, and which occasionally writes those hits to a special +# database called 'stats'. We access that database with the same user, +# pass, and host as the main connection URL for simplicity. + + +class LogCursorEventsPlugin(CreateEnginePlugin): + def __init__(self, url, kwargs): + self.db_name = url.database + LOG.info('Registered counter for database %s' % self.db_name) + new_url = sqlalchemy.engine.URL.create(url.drivername, + url.username, + url.password, + url.host, + url.port, + 'stats') + + self.engine = sqlalchemy.create_engine(new_url) + self.queue = queue.Queue() + self.thread = None + + def engine_created(self, engine): + """Hook the engine creation process. + + This is the plug point for the sqlalchemy plugin. Using + plugin=$this in the URL causes this method to be called when + the engine is created, giving us a chance to hook it below. + """ + event.listen(engine, "before_cursor_execute", self._log_event) + + def ensure_writer_thread(self): + self.thread = threading.Thread(target=self.stat_writer, daemon=True) + self.thread.start() + + def _log_event(self, conn, cursor, statement, parameters, context, + executemany): + """Queue a "hit" for this operation to be recorded. + + Attepts to determine the operation by the first word of the + statement, or 'OTHER' if it cannot be determined. + """ + + # Start our thread if not running. If we were forked after the + # engine was created and this plugin was associated, our + # writer thread is gone, so respawn. + if not self.thread or not self.thread.is_alive(): + self.ensure_writer_thread() + + try: + op = statement.strip().split(' ', 1)[0] or 'OTHER' + except Exception: + op = 'OTHER' + + self.queue.put((self.db_name, op)) + + def do_incr(self, db, op, count): + """Increment the counter for (db,op) by count.""" + + query = ('INSERT INTO queries (db, op, count) ' + ' VALUES (%s, %s, %s) ' + ' ON DUPLICATE KEY UPDATE count=count+%s') + try: + with self.engine.begin() as conn: + r = conn.execute(query, (db, op, count, count)) + except Exception as e: + LOG.error('Failed to account for access to database %r: %s', + db, e) + + def stat_writer(self): + """Consume messages from the queue and write them in batches. + + This reads "hists" from from a queue fed by _log_event() and + writes (db,op)+=count stats to the database after ten seconds + of no activity to avoid triggering a write for every SELECT + call. Write no less often than every thirty seconds and/or 100 + pending hits to avoid being starved by constant activity. + """ + LOG.debug('[%i] Writer thread running' % os.getpid()) + while True: + to_write = {} + total = 0 + last = time.time() + while time.time() - last < 30 and total < 100: + try: + item = self.queue.get(timeout=10) + to_write.setdefault(item, 0) + to_write[item] += 1 + total += 1 + except queue.Empty: + break + + if to_write: + LOG.debug('[%i] Writing DB stats %s' % ( + os.getpid(), + ','.join(['%s:%s=%i' % (db, op, count) + for (db, op), count in to_write.items()]))) + + for (db, op), count in to_write.items(): + self.do_incr(db, op, count) diff --git a/tools/dbcounter/pyproject.toml b/tools/dbcounter/pyproject.toml new file mode 100644 index 0000000000..d74d688997 --- /dev/null +++ b/tools/dbcounter/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["sqlalchemy", "setuptools>=42"] +build-backend = "setuptools.build_meta" \ No newline at end of file diff --git a/tools/dbcounter/setup.cfg b/tools/dbcounter/setup.cfg new file mode 100644 index 0000000000..f9f26f2175 --- /dev/null +++ b/tools/dbcounter/setup.cfg @@ -0,0 +1,14 @@ +[metadata] +name = dbcounter +author = Dan Smith +author_email = dms@danplanet.com +version = 0.1 +description = A teeny tiny dbcounter plugin for use with devstack +url = http://github.com/openstack/devstack +license = Apache + +[options] +modules = dbcounter +entry_points = + [sqlalchemy.plugins] + dbcounter = dbcounter:LogCursorEventsPlugin diff --git a/tools/get-stats.py b/tools/get-stats.py index 670e723e82..465afcab5a 100755 --- a/tools/get-stats.py +++ b/tools/get-stats.py @@ -83,13 +83,11 @@ def proc_matches(proc): def get_db_stats(host, user, passwd): dbs = [] db = pymysql.connect(host=host, user=user, password=passwd, - database='performance_schema', + database='stats', cursorclass=pymysql.cursors.DictCursor) with db: with db.cursor() as cur: - cur.execute( - 'SELECT COUNT(*) AS queries,current_schema AS db FROM ' - 'events_statements_history_long GROUP BY current_schema') + cur.execute('SELECT db,op,count FROM queries') for row in cur: dbs.append({k: tryint(v) for k, v in row.items()}) return dbs From 64d68679d9660e304ab3550929fe9892a124ac6f Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Fri, 22 Apr 2022 07:58:29 -0700 Subject: [PATCH 1586/1936] Improve API log parsing Two runs of the same job on the same patch can yield quite different numbers for API calls if we just count the raw calls. Many of these are tempest polling for resources, which on a slow worker can require many more calls than a fast one. Tempest seems to not change its User-Agent string, but the client libraries do. So, if we ignore the regular "python-urllib" agent calls, we get a much more stable count of service-to-service API calls in the performance report. Note that we were also logging in a different (less-rich) format for the tls-proxy.log file, which hampers our ability to parse that data in the same format. This switches it to "combined" which is used by the access.log and contains more useful information, like the user-agent, among other things. Change-Id: I8889c2e53f85c41150e1245dcbe2a79bac702aad --- lib/tls | 2 +- tools/get-stats.py | 77 ++++++++++++++++++++++++++++++++++------------ 2 files changed, 58 insertions(+), 21 deletions(-) diff --git a/lib/tls b/lib/tls index 5a7f5ae324..b8758cd6d3 100644 --- a/lib/tls +++ b/lib/tls @@ -557,7 +557,7 @@ $listen_string ErrorLog $APACHE_LOG_DIR/tls-proxy_error.log ErrorLogFormat "%{cu}t [%-m:%l] [pid %P:tid %T] %7F: %E: [client\ %a] [frontend\ %A] %M% ,\ referer\ %{Referer}i" LogLevel info - CustomLog $APACHE_LOG_DIR/tls-proxy_access.log "%{%Y-%m-%d}t %{%T}t.%{msec_frac}t [%l] %a \"%r\" %>s %b" + CustomLog $APACHE_LOG_DIR/tls-proxy_access.log combined EOF if is_suse ; then diff --git a/tools/get-stats.py b/tools/get-stats.py index 465afcab5a..ffe467691c 100755 --- a/tools/get-stats.py +++ b/tools/get-stats.py @@ -1,10 +1,12 @@ #!/usr/bin/python3 import argparse +import csv import datetime import glob import itertools import json +import logging import os import re import socket @@ -25,6 +27,8 @@ print('No pymysql, database information will not be included', file=sys.stderr) +LOG = logging.getLogger('perf') + # https://www.elastic.co/blog/found-crash-elasticsearch#mapping-explosion @@ -95,26 +99,56 @@ def get_db_stats(host, user, passwd): def get_http_stats_for_log(logfile): stats = {} - for line in open(logfile).readlines(): - m = re.search('"([A-Z]+) /([^" ]+)( HTTP/1.1)?" ([0-9]{3}) ([0-9]+)', - line) - if m: - method = m.group(1) - path = m.group(2) - status = m.group(4) - size = int(m.group(5)) - - try: - service, rest = path.split('/', 1) - except ValueError: - # Root calls like "GET /identity" - service = path - rest = '' - - stats.setdefault(service, {'largest': 0}) - stats[service].setdefault(method, 0) - stats[service][method] += 1 - stats[service]['largest'] = max(stats[service]['largest'], size) + apache_fields = ('host', 'a', 'b', 'date', 'tz', 'request', 'status', + 'length', 'c', 'agent') + ignore_agents = ('curl', 'uwsgi', 'nova-status') + for line in csv.reader(open(logfile), delimiter=' '): + fields = dict(zip(apache_fields, line)) + if len(fields) != len(apache_fields): + # Not a combined access log, so we can bail completely + return [] + try: + method, url, http = fields['request'].split(' ') + except ValueError: + method = url = http = '' + if 'HTTP' not in http: + # Not a combined access log, so we can bail completely + return [] + + # Tempest's User-Agent is unchanged, but client libraries and + # inter-service API calls use proper strings. So assume + # 'python-urllib' is tempest so we can tell it apart. + if 'python-urllib' in fields['agent'].lower(): + agent = 'tempest' + else: + agent = fields['agent'].split(' ')[0] + if agent.startswith('python-'): + agent = agent.replace('python-', '') + if '/' in agent: + agent = agent.split('/')[0] + + if agent in ignore_agents: + continue + + try: + service, rest = url.strip('/').split('/', 1) + except ValueError: + # Root calls like "GET /identity" + service = url.strip('/') + rest = '' + + method_key = '%s-%s' % (agent, method) + try: + length = int(fields['length']) + except ValueError: + LOG.warning('[%s] Failed to parse length %r from line %r' % ( + logfile, fields['length'], line)) + length = 0 + stats.setdefault(service, {'largest': 0}) + stats[service].setdefault(method_key, 0) + stats[service][method_key] += 1 + stats[service]['largest'] = max(stats[service]['largest'], + length) # Flatten this for ES return [{'service': service, 'log': os.path.basename(logfile), @@ -131,6 +165,7 @@ def get_report_info(): return { 'timestamp': datetime.datetime.now().isoformat(), 'hostname': socket.gethostname(), + 'version': 2, } @@ -152,6 +187,8 @@ def get_report_info(): '(default is %s)' % ','.join(process_defaults))) args = parser.parse_args() + logging.basicConfig(level=logging.WARNING) + data = { 'services': get_services_stats(), 'db': pymysql and args.db_pass and get_db_stats(args.db_host, From 92a34dbe951f2ab31fb3432e61cf34db034b0145 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Tue, 17 May 2022 20:10:48 +0530 Subject: [PATCH 1587/1936] Configure placement section in neutron conf Without it segment plugin fails to connect with placement api. Configure the placement section if service is deployed. Closes-Bug: #1973783 Change-Id: Ie7f37770a04f622735cf2263c601257669ab5064 --- lib/neutron-legacy | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 253b457ae1..88ac991167 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -931,6 +931,9 @@ function _configure_neutron_service { configure_keystone_authtoken_middleware $NEUTRON_CONF nova nova + # Configuration for placement client + configure_keystone_authtoken_middleware $NEUTRON_CONF placement placement + # Configure plugin neutron_plugin_configure_service } From 111a38b4d6e6f61c21570e0adba58f6c59c52900 Mon Sep 17 00:00:00 2001 From: Brian Rosmaita Date: Fri, 13 May 2022 20:53:26 -0400 Subject: [PATCH 1588/1936] lib/tempest: add wait for Glance image import Glance image import is asynchronous and may be configured to do image conversion. If image import is being used, it's possible that the tempest configuration code is executed before the import has completed and there may be no active images yet. In that case, we will poll glance every TEMPEST_GLANCE_IMPORT_POLL_INTERVAL seconds (default: 1) to see if there are TEMPEST_GLANCE_IMAGE_COUNT active images (default: 1) up to TEMPEST_GLANCE_IMPORT_POLL_LIMIT times (default: 12). You can see an example of the issue this patch addresses in real life: https://review.opendev.org/c/openstack/glance/+/841278/1#message-456096e48b28e5b866deb8bf53e9258ee08219a0 Change-Id: Ie99f12691d9062611a8930accfa14d9540970cc5 --- lib/tempest | 73 ++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 67 insertions(+), 6 deletions(-) diff --git a/lib/tempest b/lib/tempest index 1fd4184763..206b37b5bf 100644 --- a/lib/tempest +++ b/lib/tempest @@ -71,6 +71,17 @@ TEMPEST_VOLUME_VENDOR=${TEMPEST_VOLUME_VENDOR:-$TEMPEST_DEFAULT_VOLUME_VENDOR} TEMPEST_DEFAULT_STORAGE_PROTOCOL="iSCSI" TEMPEST_STORAGE_PROTOCOL=${TEMPEST_STORAGE_PROTOCOL:-$TEMPEST_DEFAULT_STORAGE_PROTOCOL} +# Glance/Image variables +# When Glance image import is enabled, image creation is asynchronous and images +# may not yet be active when tempest looks for them. In that case, we poll +# Glance every TEMPEST_GLANCE_IMPORT_POLL_INTERVAL seconds for the number of +# times specified by TEMPEST_GLANCE_IMPORT_POLL_LIMIT. If you are importing +# multiple images, set TEMPEST_GLANCE_IMAGE_COUNT so the poller does not quit +# too early (though it will not exceed the polling limit). +TEMPEST_GLANCE_IMPORT_POLL_INTERVAL=${TEMPEST_GLANCE_IMPORT_POLL_INTERVAL:-1} +TEMPEST_GLANCE_IMPORT_POLL_LIMIT=${TEMPEST_GLANCE_IMPORT_POLL_LIMIT:-12} +TEMPEST_GLANCE_IMAGE_COUNT=${TEMPEST_GLANCE_IMAGE_COUNT:-1} + # Neutron/Network variables IPV6_ENABLED=$(trueorfalse True IPV6_ENABLED) IPV6_SUBNET_ATTRIBUTES_ENABLED=$(trueorfalse True IPV6_SUBNET_ATTRIBUTES_ENABLED) @@ -127,6 +138,48 @@ function set_tempest_venv_constraints { fi } +# Makes a call to glance to get a list of active images, ignoring +# ramdisk and kernel images. Takes 3 arguments, an array and two +# variables. The array will contain the list of active image UUIDs; +# if an image with ``DEFAULT_IMAGE_NAME`` is found, its UUID will be +# set as the value of *both* other parameters. +function get_active_images { + declare -n img_array=$1 + declare -n img_id=$2 + declare -n img_id_alt=$3 + + # start with a fresh array in case we are called multiple times + img_array=() + + while read -r IMAGE_NAME IMAGE_UUID; do + if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then + img_id="$IMAGE_UUID" + img_id_alt="$IMAGE_UUID" + fi + img_array+=($IMAGE_UUID) + done < <(openstack --os-cloud devstack-admin image list --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }') +} + +function poll_glance_images { + declare -n image_array=$1 + declare -n image_id=$2 + declare -n image_id_alt=$3 + local -i poll_count + + poll_count=$TEMPEST_GLANCE_IMPORT_POLL_LIMIT + while (( poll_count-- > 0 )) ; do + sleep $TEMPEST_GLANCE_IMPORT_POLL_INTERVAL + get_active_images image_array image_id image_id_alt + if (( ${#image_array[*]} >= $TEMPEST_GLANCE_IMAGE_COUNT )) ; then + return + fi + done + local msg + msg="Polling limit of $TEMPEST_GLANCE_IMPORT_POLL_LIMIT exceeded; " + msg+="poll interval was $TEMPEST_GLANCE_IMPORT_POLL_INTERVAL sec" + warn $LINENO "$msg" +} + # configure_tempest() - Set config files, create data dirs, etc function configure_tempest { if [[ "$INSTALL_TEMPEST" == "True" ]]; then @@ -168,13 +221,21 @@ function configure_tempest { declare -a images if is_service_enabled glance; then - while read -r IMAGE_NAME IMAGE_UUID; do - if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then - image_uuid="$IMAGE_UUID" - image_uuid_alt="$IMAGE_UUID" + get_active_images images image_uuid image_uuid_alt + + if (( ${#images[*]} < $TEMPEST_GLANCE_IMAGE_COUNT )); then + # Glance image import is asynchronous and may be configured + # to do image conversion. If image import is being used, + # it's possible that this code is being executed before the + # import has completed and there may be no active images yet. + if [[ "$GLANCE_USE_IMPORT_WORKFLOW" == "True" ]]; then + poll_glance_images images image_uuid image_uuid_alt + if (( ${#images[*]} < $TEMPEST_GLANCE_IMAGE_COUNT )); then + echo "Only found ${#images[*]} image(s), was looking for $TEMPEST_GLANCE_IMAGE_COUNT" + exit 1 + fi fi - images+=($IMAGE_UUID) - done < <(openstack --os-cloud devstack-admin image list --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }') + fi case "${#images[*]}" in 0) From 083eeee5af61a19a932138b5035a916c7421beee Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Thu, 19 May 2022 13:55:35 +0200 Subject: [PATCH 1589/1936] Make jammy platform jobs non-voting We missed to add the jobs to the gate queue and so they have already regressed before they were actually in place. Make them non-voting for now until the issues are fixed. Signed-off-by: Dr. Jens Harbott Change-Id: I5d1f83dfe23747096163076dcf80750585c0260e --- .zuul.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index b449ea67be..0e114afb3e 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -731,6 +731,7 @@ parent: tempest-full-py3 description: Ubuntu 22.04 LTS (jammy) platform test nodeset: openstack-single-node-jammy + voting: false timeout: 9000 vars: configure_swap_size: 4096 @@ -742,6 +743,7 @@ name: devstack-platform-ubuntu-jammy-ovn-source parent: devstack-platform-ubuntu-jammy description: Ubuntu 22.04 LTS (jammy) platform test (OVN from source) + voting: false vars: devstack_localrc: OVN_BUILD_FROM_SOURCE: True @@ -754,6 +756,7 @@ parent: tempest-full-py3 description: Ubuntu 22.04 LTS (jammy) platform test (OVS) nodeset: openstack-single-node-jammy + voting: false timeout: 9000 vars: configure_swap_size: 8192 From 560ee16a85b22b4456177d289cf53c31c6a1ca6b Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Thu, 19 May 2022 13:58:11 +0200 Subject: [PATCH 1590/1936] Drop openEuler support The job is broken since it is running with python3.7 and most services now require at least python3.8. Signed-off-by: Dr. Jens Harbott Change-Id: Ie21f71acffabd78c79e2b141951ccf30a5c06445 --- .zuul.yaml | 25 ------------------------- doc/source/index.rst | 2 +- files/rpms/ceph | 2 +- files/rpms/general | 4 +--- files/rpms/nova | 2 +- files/rpms/swift | 2 +- functions-common | 13 +------------ lib/apache | 2 +- lib/nova | 6 +----- roles/apache-logs-conf/tasks/main.yaml | 1 - stack.sh | 9 +-------- tools/fixup_stuff.sh | 24 ------------------------ 12 files changed, 9 insertions(+), 83 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index b449ea67be..3beae1f822 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -106,16 +106,6 @@ nodes: - controller -- nodeset: - name: devstack-single-node-openeuler-20.03-sp2 - nodes: - - name: controller - label: openEuler-20-03-LTS-SP2 - groups: - - name: tempest - nodes: - - controller - - nodeset: name: openstack-two-node nodes: @@ -712,20 +702,6 @@ # Enable Neutron ML2/OVS services q-agt: true -- job: - name: devstack-platform-openEuler-20.03-SP2 - parent: tempest-full-py3 - description: openEuler 20.03 SP2 platform test - nodeset: devstack-single-node-openeuler-20.03-sp2 - voting: false - timeout: 9000 - vars: - configure_swap_size: 4096 - devstack_localrc: - # NOTE(wxy): OVN package is not supported by openEuler yet. Build it - # from source instead. - OVN_BUILD_FROM_SOURCE: True - - job: name: devstack-platform-ubuntu-jammy parent: tempest-full-py3 @@ -1001,7 +977,6 @@ experimental: jobs: - - devstack-platform-openEuler-20.03-SP2 - nova-multi-cell - nova-next - neutron-fullstack-with-uwsgi diff --git a/doc/source/index.rst b/doc/source/index.rst index feb50ce4e9..08ce4cb061 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -38,7 +38,7 @@ Install Linux Start with a clean and minimal install of a Linux system. DevStack attempts to support the two latest LTS releases of Ubuntu, the -latest/current Fedora version, CentOS/RHEL 8, OpenSUSE and openEuler. +latest/current Fedora version, CentOS/RHEL 8 and OpenSUSE. If you do not have a preference, Ubuntu 20.04 (Focal Fossa) is the most tested, and will probably go the smoothest. diff --git a/files/rpms/ceph b/files/rpms/ceph index 93b5746aa6..33a55f80ea 100644 --- a/files/rpms/ceph +++ b/files/rpms/ceph @@ -1,3 +1,3 @@ ceph # NOPRIME -redhat-lsb-core # not:rhel9,openEuler-20.03 +redhat-lsb-core # not:rhel9 xfsprogs diff --git a/files/rpms/general b/files/rpms/general index 668705b1c3..7697513149 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -16,7 +16,6 @@ libjpeg-turbo-devel # Pillow 3.0.0 libxml2-devel # lxml libxslt-devel # lxml libyaml-devel -make # dist:openEuler-20.03 mod_ssl # required for tls-proxy on centos 9 stream computes net-tools openssh-server @@ -29,8 +28,7 @@ psmisc python3-devel python3-pip python3-systemd -redhat-rpm-config # not:openEuler-20.03 missing dep for gcc hardening flags, see rhbz#1217376 -systemd-devel # dist:openEuler-20.03 +redhat-rpm-config # missing dep for gcc hardening flags, see rhbz#1217376 tar tcpdump unzip diff --git a/files/rpms/nova b/files/rpms/nova index 9e8621c628..9522e5729d 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -6,7 +6,7 @@ ebtables genisoimage # not:rhel9 required for config_drive iptables iputils -kernel-modules # not:openEuler-20.03 +kernel-modules kpartx parted polkit diff --git a/files/rpms/swift b/files/rpms/swift index a838d7839e..7d906aa926 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -1,5 +1,5 @@ curl -liberasurecode-devel # not:openEuler-20.03 +liberasurecode-devel memcached rsync-daemon sqlite diff --git a/functions-common b/functions-common index b660245337..be966e96a6 100644 --- a/functions-common +++ b/functions-common @@ -399,7 +399,7 @@ function _ensure_lsb_release { elif [[ -x $(command -v zypper 2>/dev/null) ]]; then sudo zypper -n install lsb-release elif [[ -x $(command -v dnf 2>/dev/null) ]]; then - sudo dnf install -y redhat-lsb-core || sudo dnf install -y openeuler-lsb + sudo dnf install -y redhat-lsb-core else die $LINENO "Unable to find or auto-install lsb_release" fi @@ -471,10 +471,6 @@ function GetDistro { # Drop the . release as we assume it's compatible # XXX re-evaluate when we get RHEL10 DISTRO="rhel${os_RELEASE::1}" - elif [[ "$os_VENDOR" =~ (openEuler) ]]; then - # The DISTRO here is `openEuler-20.03`. While, actually only openEuler - # 20.03 LTS SP2 is fully tested. Other SP version maybe have bugs. - DISTRO="openEuler-$os_RELEASE" else # We can't make a good choice here. Setting a sensible DISTRO # is part of the problem, but not the major issue -- we really @@ -526,7 +522,6 @@ function is_fedora { fi [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || \ - [ "$os_VENDOR" = "openEuler" ] || \ [ "$os_VENDOR" = "RedHatEnterpriseServer" ] || \ [ "$os_VENDOR" = "RedHatEnterprise" ] || \ [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "CentOSStream" ] || \ @@ -576,12 +571,6 @@ function is_ubuntu { [ "$os_PACKAGE" = "deb" ] } -function is_openeuler { - if [[ -z "$os_PACKAGE" ]]; then - GetOSVersion - fi - [ "$os_VENDOR" = "openEuler" ] -} # Git Functions # ============= diff --git a/lib/apache b/lib/apache index 02827d1f1b..94f3cfc95a 100644 --- a/lib/apache +++ b/lib/apache @@ -95,7 +95,7 @@ function install_apache_uwsgi { # didn't fix Python 3.10 compatibility before release. Should be # fixed in uwsgi 4.9.0; can remove this when packages available # or we drop this release - elif is_fedora && ! is_openeuler && ! [[ $DISTRO =~ f35 ]]; then + elif is_fedora && ! [[ $DISTRO =~ f35 ]]; then # Note httpd comes with mod_proxy_uwsgi and it is loaded by # default; the mod_proxy_uwsgi package actually conflicts now. # See: diff --git a/lib/nova b/lib/nova index 4c14374d0f..da3a10edd0 100644 --- a/lib/nova +++ b/lib/nova @@ -324,11 +324,7 @@ EOF # set chap algorithms. The default chap_algorithm is md5 which will # not work under FIPS. - # FIXME(alee) For some reason, this breaks openeuler. Openeuler devs should weigh in - # and determine the correct solution for openeuler here - if ! is_openeuler; then - iniset -sudo /etc/iscsi/iscsid.conf DEFAULT "node.session.auth.chap_algs" "SHA3-256,SHA256" - fi + iniset -sudo /etc/iscsi/iscsid.conf DEFAULT "node.session.auth.chap_algs" "SHA3-256,SHA256" # ensure that iscsid is started, even when disabled by default restart_service iscsid diff --git a/roles/apache-logs-conf/tasks/main.yaml b/roles/apache-logs-conf/tasks/main.yaml index 6b7ea37857..bd64574c9b 100644 --- a/roles/apache-logs-conf/tasks/main.yaml +++ b/roles/apache-logs-conf/tasks/main.yaml @@ -64,7 +64,6 @@ 'Debian': '/etc/apache2/sites-enabled/' 'Suse': '/etc/apache2/conf.d/' 'RedHat': '/etc/httpd/conf.d/' - 'openEuler': '/etc/httpd/conf.d/' - name: Discover configurations find: diff --git a/stack.sh b/stack.sh index e53280e00c..df283bbe50 100755 --- a/stack.sh +++ b/stack.sh @@ -229,7 +229,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="bullseye|focal|jammy|f35|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9|openEuler-20.03" +SUPPORTED_DISTROS="bullseye|focal|jammy|f35|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" @@ -280,13 +280,6 @@ chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh -# TODO(wxy): Currently some base packages are not installed by default in -# openEuler. Remove the code below once the packaged are installed by default -# in the future. -if [[ $DISTRO == "openEuler-20.03" ]]; then - install_package hostname -fi - # Configure Distro Repositories # ----------------------------- diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index f24ac40ad5..daa1bc6301 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -153,32 +153,8 @@ function fixup_ubuntu { sudo rm -rf /usr/lib/python3/dist-packages/simplejson-*.egg-info } -function fixup_openeuler { - if ! is_openeuler; then - return - fi - - if is_arch "x86_64"; then - arch="x86_64" - elif is_arch "aarch64"; then - arch="aarch64" - fi - - # Some packages' version in openEuler are too old, use the newer ones we - # provide in oepkg. (oepkg is an openEuler third part yum repo which is - # endorsed by openEuler community) - (echo '[openstack-ci]' - echo 'name=openstack' - echo 'baseurl=https://repo.oepkgs.net/openEuler/rpm/openEuler-20.03-LTS-SP2/budding-openeuler/openstack-master-ci/'$arch'/' - echo 'enabled=1' - echo 'gpgcheck=0') | sudo tee -a /etc/yum.repos.d/openstack-master.repo > /dev/null - - yum_install liberasurecode-devel -} - function fixup_all { fixup_ubuntu fixup_fedora fixup_suse - fixup_openeuler } From 50e3c06ec245e8a5e7ca24015b0c152e3bc40a5c Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Thu, 19 May 2022 13:36:43 -0700 Subject: [PATCH 1591/1936] Fix dbcounter installation on Jammy There are two problems with dbcounter installation on Jammy. The first is straightforward. We have to use `py_modules` instead of `modules` to specify the source file. I don't know how this works on other distros but the docs [0] seem to clearly indicate py_modules does this. The second issue is quite an issue and requires story time. When pip/setuptools insteall editable installs (as is done for many of the openstack projects) it creates an easy-install.pth file that tells the python interpreter to add the source dirs of those repos to the python path. Normally these paths are appended to your sys.path. Pip's isolated build env relies on the assumption that these paths are appeneded to the path when it santizes sys.path to create the isolated environemnt. However, when SETUPTOOLS_SYS_PATH_TECHNIQUE is set to rewrite the paths are not appended and are inserted in the middle. This breaks pip's isolated build env which broke dbcounter installations. We fix this by not setting SETUPTOOLS_SYS_PATH_TECHNIQUE to rewrite. Upstream indicates the reason we set this half a decade ago has since been fixed properly. The reason Jammy and nothing else breaks is that python3.10 is the first python version to use pip's isolated build envs by default. I've locally fiddled with a patch to pip [1] to try and fix this behavior even when rewrite is set. I don't plan to push this upstream but it helps to illustrate where the problem lies. If someone else would like to upstream this feel free. Finally this change makes the jammy platform job voting again and adds it to the gate to ensure we don't regress again. [0] https://docs.python.org/3/distutils/sourcedist.html#specifying-the-files-to-distribute [1] https://paste.opendev.org/show/bqVAuhgMtVtfYupZK5J6/ Change-Id: I237f5663b0f8b060f6df130de04e17e2b1695f8a --- .zuul.yaml | 2 +- inc/python | 1 - tools/dbcounter/setup.cfg | 2 +- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 0e114afb3e..fc3d76d2b3 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -731,7 +731,6 @@ parent: tempest-full-py3 description: Ubuntu 22.04 LTS (jammy) platform test nodeset: openstack-single-node-jammy - voting: false timeout: 9000 vars: configure_swap_size: 4096 @@ -950,6 +949,7 @@ - devstack - devstack-ipv6 - devstack-platform-centos-9-stream + - devstack-platform-ubuntu-jammy - devstack-enforce-scope - devstack-multinode - devstack-unit-tests diff --git a/inc/python b/inc/python index 9382d352dc..d032a10eb9 100644 --- a/inc/python +++ b/inc/python @@ -194,7 +194,6 @@ function pip_install { https_proxy="${https_proxy:-}" \ no_proxy="${no_proxy:-}" \ PIP_FIND_LINKS=$PIP_FIND_LINKS \ - SETUPTOOLS_SYS_PATH_TECHNIQUE=rewrite \ $cmd_pip $upgrade \ $@ result=$? diff --git a/tools/dbcounter/setup.cfg b/tools/dbcounter/setup.cfg index f9f26f2175..12300bf619 100644 --- a/tools/dbcounter/setup.cfg +++ b/tools/dbcounter/setup.cfg @@ -8,7 +8,7 @@ url = http://github.com/openstack/devstack license = Apache [options] -modules = dbcounter +py_modules = dbcounter entry_points = [sqlalchemy.plugins] dbcounter = dbcounter:LogCursorEventsPlugin From 1d5be95196d31ba1a4ef125f4b06a5730f2af113 Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Mon, 23 May 2022 08:46:05 -0700 Subject: [PATCH 1592/1936] Cleanup comment that should've been removed The previous change, I237f5663b0f8b060f6df130de04e17e2b1695f8a, removed a SETUPTOOLS flag, but not the comment explaining why that flag was previously set. Clean up that comment. Change-Id: I32b0240fd56310d7f10596aaa8ef432679bfd66a --- inc/python | 3 --- 1 file changed, 3 deletions(-) diff --git a/inc/python b/inc/python index d032a10eb9..3eb3efe80e 100644 --- a/inc/python +++ b/inc/python @@ -186,9 +186,6 @@ function pip_install { $xtrace - # adding SETUPTOOLS_SYS_PATH_TECHNIQUE is a workaround to keep - # the same behaviour of setuptools before version 25.0.0. - # related issue: https://github.com/pypa/pip/issues/3874 $sudo_pip \ http_proxy="${http_proxy:-}" \ https_proxy="${https_proxy:-}" \ From 1cdf413ac6f993dc2074741be4627acdc3f10304 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Mon, 23 May 2022 13:56:13 -0700 Subject: [PATCH 1593/1936] Do not barf stack trace if stats DB is missing This can happen if devstack fails to run, but we still run the post tasks. Also could happen if some sort of hybrid job configuration does not run all of devstack but we still end up running post jobs. Just warn to stderr and assume no DB info. Change-Id: I211a331ab668dbb0ad7882908cca4363f865d924 --- tools/get-stats.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/tools/get-stats.py b/tools/get-stats.py index ffe467691c..e0c20f2db9 100755 --- a/tools/get-stats.py +++ b/tools/get-stats.py @@ -86,9 +86,17 @@ def proc_matches(proc): def get_db_stats(host, user, passwd): dbs = [] - db = pymysql.connect(host=host, user=user, password=passwd, - database='stats', - cursorclass=pymysql.cursors.DictCursor) + try: + db = pymysql.connect(host=host, user=user, password=passwd, + database='stats', + cursorclass=pymysql.cursors.DictCursor) + except pymysql.err.OperationalError as e: + if 'Unknown database' in str(e): + print('No stats database; assuming devstack failed', + file=sys.stderr) + return [] + raise + with db: with db.cursor() as cur: cur.execute('SELECT db,op,count FROM queries') From c64ea4f213afebd1602d05cdd4d5bc14eaf5356b Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Wed, 20 Apr 2022 12:30:09 +0530 Subject: [PATCH 1594/1936] Fix doc and user create script to set homedir permissions RHEL based distros set homedir permissions to 700, and Ubuntu 21.04+ to 750[1], i.e missing executable permission for group or others, this results into failures as defined in the below bug. Since in doc we add useradd command, it's good to add instructions to fix the permissions there itself instead of getting failures during installation and then fixing it. Also update user create script to fix permissions by adding executable bit to DEST directory if missing. [1] https://discourse.ubuntu.com/t/private-home-directories-for-ubuntu-21-04-onwards/19533 Closes-Bug: #1966858 Change-Id: Id2787886433281238eb95ee11a75eddeef514293 --- doc/source/guides/multinode-lab.rst | 8 ++++++++ doc/source/guides/single-machine.rst | 8 ++++++++ doc/source/index.rst | 8 ++++++++ tools/create-stack-user.sh | 9 +++++++++ 4 files changed, 33 insertions(+) diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index 81c5945307..79a76dedb1 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -75,6 +75,14 @@ Otherwise create the stack user: useradd -s /bin/bash -d /opt/stack -m stack +Ensure home directory for the ``stack`` user has executable permission for all, +as RHEL based distros create it with ``700`` and Ubuntu 21.04+ with ``750`` +which can cause issues during deployment. + +:: + + chmod +x /opt/stack + This user will be making many changes to your system during installation and operation so it needs to have sudo privileges to root without a password: diff --git a/doc/source/guides/single-machine.rst b/doc/source/guides/single-machine.rst index a0e97edb37..03d93743f7 100644 --- a/doc/source/guides/single-machine.rst +++ b/doc/source/guides/single-machine.rst @@ -49,6 +49,14 @@ below) $ sudo useradd -s /bin/bash -d /opt/stack -m stack +Ensure home directory for the ``stack`` user has executable permission for all, +as RHEL based distros create it with ``700`` and Ubuntu 21.04+ with ``750`` +which can cause issues during deployment. + +.. code-block:: console + + $ sudo chmod +x /opt/stack + Since this user will be making many changes to your system, it will need to have sudo privileges: diff --git a/doc/source/index.rst b/doc/source/index.rst index feb50ce4e9..a79a7e602c 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -57,6 +57,14 @@ to run DevStack with $ sudo useradd -s /bin/bash -d /opt/stack -m stack +Ensure home directory for the ``stack`` user has executable permission for all, +as RHEL based distros create it with ``700`` and Ubuntu 21.04+ with ``750`` +which can cause issues during deployment. + +.. code-block:: console + + $ sudo chmod +x /opt/stack + Since this user will be making many changes to your system, it should have sudo privileges: diff --git a/tools/create-stack-user.sh b/tools/create-stack-user.sh index 919cacb036..cb8d7aa328 100755 --- a/tools/create-stack-user.sh +++ b/tools/create-stack-user.sh @@ -44,6 +44,15 @@ fi if ! getent passwd $STACK_USER >/dev/null; then echo "Creating a user called $STACK_USER" useradd -g $STACK_USER -s /bin/bash -d $DEST -m $STACK_USER + # RHEL based distros create home dir with 700 permissions, + # And Ubuntu 21.04+ with 750, i.e missing executable + # permission for either group or others + # Devstack deploy will have issues with this, fix it by + # adding executable permission + if [[ $(stat -c '%A' $DEST|grep -o x|wc -l) -lt 3 ]]; then + echo "Executable permission missing for $DEST, adding it" + chmod +x $DEST + fi fi echo "Giving stack user passwordless sudo privileges" From 599b241d32cd067a9a26c54fe178dd2bd28426d6 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Thu, 21 Oct 2021 12:07:17 +0200 Subject: [PATCH 1595/1936] Run debian platform job with OVN Packages for OVN are now available in bullseye, so we can drop the special handling. Signed-off-by: Dr. Jens Harbott Change-Id: I5e5c78aa19c5208c207ddcf14e208bae8fbc3c55 --- .zuul.yaml | 31 ------------------------------- 1 file changed, 31 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 001ac84f12..03553d3b57 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -670,37 +670,6 @@ timeout: 9000 vars: configure_swap_size: 4096 - # NOTE(yoctozepto): Debian Bullseye does not yet offer OVN. Switch to OVS - # for the time being. - devstack_localrc: - Q_AGENT: openvswitch - Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch - Q_ML2_TENANT_NETWORK_TYPE: vxlan - devstack_services: - # Disable OVN services - ovn-northd: false - ovn-controller: false - ovs-vswitchd: false - ovsdb-server: false - # Disable Neutron ML2/OVN services - q-ovn-metadata-agent: false - # Enable Neutron ML2/OVS services - q-agt: true - q-dhcp: true - q-l3: true - q-meta: true - q-metering: true - group-vars: - subnode: - devstack_services: - # Disable OVN services - ovn-controller: false - ovs-vswitchd: false - ovsdb-server: false - # Disable Neutron ML2/OVN services - q-ovn-metadata-agent: false - # Enable Neutron ML2/OVS services - q-agt: true - job: name: devstack-platform-ubuntu-jammy From e85c68e60ff460f0e16eefd5f084862628a3c54d Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 26 May 2022 09:31:36 -0700 Subject: [PATCH 1596/1936] Add apache2 to the services we collect for memory Change-Id: Ic6daef5b4df50ce43c6782542cb54c1958e54655 --- tools/get-stats.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/get-stats.py b/tools/get-stats.py index ffe467691c..80e7c642a2 100755 --- a/tools/get-stats.py +++ b/tools/get-stats.py @@ -54,7 +54,8 @@ def get_service_stats(service): def get_services_stats(): services = [os.path.basename(s) for s in - glob.glob('/etc/systemd/system/devstack@*.service')] + glob.glob('/etc/systemd/system/devstack@*.service')] + \ + ['apache2.service'] return [dict(service=service, **get_service_stats(service)) for service in services] From 6dd896feface3d0413437221a63e508b359ed615 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Tue, 26 Apr 2022 16:37:07 +0530 Subject: [PATCH 1597/1936] Allow to skip stop of ovn services Grenade jobs stop services, check fip connectivity for a nova server and then upgrade to next release. But since ovn data plane and db services are stopped along with other services, fip connectivity fails as a result. We shouldn't stop these services along with other neutron services. This patch adds a new variable "SKIP_STOP_OVN" which can be used by grenade jobs to skip stop of ovn services. This will also fix the ovn grenade jobs. Also source fixup_stuff.sh so function fixup_ovn_centos is available. It's already sourced in stack.sh but that's not used in grenade run. Change-Id: I94818a19f19973779cb2e11753d2881d54dfa3bc --- lib/neutron-legacy | 5 ++++- lib/neutron_plugins/ovn_agent | 2 ++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 88ac991167..e9b55b6b02 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -138,6 +138,9 @@ Q_NOTIFY_NOVA_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_DATA_CHANGES:-True} VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True} VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300} +# Allow to skip stopping of OVN services +SKIP_STOP_OVN=${SKIP_STOP_OVN:-False} + # The directory which contains files for Q_PLUGIN_EXTRA_CONF_FILES. # /etc/neutron is assumed by many of devstack plugins. Do not change. _Q_PLUGIN_EXTRA_CONF_PATH=/etc/neutron @@ -638,7 +641,7 @@ function stop_mutnauq { stop_mutnauq_other stop_mutnauq_l2_agent - if [[ $Q_AGENT == "ovn" ]]; then + if [[ $Q_AGENT == "ovn" && $SKIP_STOP_OVN != "True" ]]; then stop_ovn fi } diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 9022f2d382..dfd55deae5 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -395,6 +395,8 @@ function install_ovn { sudo mkdir -p $OVS_PREFIX/var/log/ovn sudo chown $(whoami) $OVS_PREFIX/var/log/ovn else + # Load fixup_ovn_centos + source ${TOP_DIR}/tools/fixup_stuff.sh fixup_ovn_centos install_package $(get_packages openvswitch) install_package $(get_packages ovn) From 35fb53423a68f8d156693ae79c1c6950538a33b7 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Fri, 3 Jun 2022 10:10:50 +0530 Subject: [PATCH 1598/1936] [ironic][swift]Temporary add sha1 to allowed_digests Swift removed sha1 from supported digests with [1] and that broked ironic tinyipa job. Temorary add sha1 to allowed_digests until it's fixed in ironic. [1] https://review.opendev.org/c/openstack/swift/+/525771 Story: 2010068 Task: 45539 Change-Id: I68dfc472ce901058b6a7d691c98ed1641d431e54 --- lib/swift | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/swift b/lib/swift index ba92f3dcc3..251c4625b5 100644 --- a/lib/swift +++ b/lib/swift @@ -402,6 +402,11 @@ function configure_swift { # Versioned Writes iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:versioned_writes allow_versioned_writes true + # Add sha1 temporary https://storyboard.openstack.org/#!/story/2010068 + if [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]]; then + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempurl allowed_digests "sha1 sha256 sha512" + fi + # Configure Ceilometer if is_service_enabled ceilometer; then iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer "set log_level" "WARN" From f7d87aa433d344f5db0201aca047a987cba3a0af Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 2 Jun 2022 11:08:32 -0700 Subject: [PATCH 1599/1936] Capture QEMU core dumps when possible Some of the hardest-to-debug issues are qemu crashes deep in a nova workflow that can't be reproduced locally. This adds a post task to the playbook so that we capture the most recent qemu core dump, if there is one. Change-Id: I48a2ea883325ca920b7e7909edad53a9832fb319 --- .zuul.yaml | 1 + playbooks/post.yaml | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index 001ac84f12..fdcee59bc5 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -377,6 +377,7 @@ '{{ devstack_log_dir }}/devstacklog.txt.summary': logs '{{ devstack_log_dir }}/tcpdump.pcap': logs '{{ devstack_log_dir }}/worlddump-latest.txt': logs + '{{ devstack_log_dir }}/qemu.coredump': logs '{{ devstack_full_log}}': logs '{{ stage_dir }}/verify_tempest_conf.log': logs '{{ stage_dir }}/performance.json': logs diff --git a/playbooks/post.yaml b/playbooks/post.yaml index d8d5f6833c..0047d78ea5 100644 --- a/playbooks/post.yaml +++ b/playbooks/post.yaml @@ -17,6 +17,12 @@ dest: "{{ stage_dir }}/verify_tempest_conf.log" state: hard when: tempest_log.stat.exists + - name: Capture most recent qemu crash dump, if any + shell: + executable: /bin/bash + cmd: | + coredumpctl -o {{ devstack_log_dir }}/qemu.coredump dump /usr/bin/qemu-system-x86_64 + ignore_errors: yes roles: - export-devstack-journal - apache-logs-conf From 96dbf55016a22dc121589a70181e5c7e7e55f8c0 Mon Sep 17 00:00:00 2001 From: Francesco Pantano Date: Fri, 18 Mar 2022 10:56:31 +0100 Subject: [PATCH 1600/1936] Do not create cinder backup pool and key when cephadm is used When cephadm is used, if ENABLE_CEPH_C_BAK is True both pool and key are created by devstack-plugin-ceph. This piece of code can still stay here to make sure the cinder config is properly built. Change-Id: I799521f008123b8e42b2021c1c11d374b834bec3 --- lib/cinder_backups/ceph | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/lib/cinder_backups/ceph b/lib/cinder_backups/ceph index e4003c0720..4b180490d7 100644 --- a/lib/cinder_backups/ceph +++ b/lib/cinder_backups/ceph @@ -26,12 +26,15 @@ CINDER_BAK_CEPH_USER=${CINDER_BAK_CEPH_USER:-cinder-bak} function configure_cinder_backup_ceph { - sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP} - if [[ "$REMOTE_CEPH" = "False" && "$CEPH_REPLICAS" -ne 1 ]]; then - sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID} + # Execute this part only when cephadm is not used + if [[ "$CEPHADM_DEPLOY" = "False" ]]; then + sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP} + if [[ "$REMOTE_CEPH" = "False" && "$CEPH_REPLICAS" -ne 1 ]]; then + sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID} + fi + sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring + sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring fi - sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring - sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph.CephBackupDriver" iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF_FILE" From c869d59857c636d21ecd0329023038b24252627d Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Fri, 28 Feb 2020 14:55:08 -0500 Subject: [PATCH 1601/1936] Add support for IPv6 tunnel endpoints Currently, neutron tunnel endpoints must be IPv4 addresses, i.e. $HOST_IP, although IPv6 endpoints are supported by most drivers. Create a TUNNEL_IP_VERSION variable to choose which host IP to use, either HOST_IP or HOST_IPV6, and configure it in the OVS and Linuxbridge agent driver files. The default is still IPv4, but it can be over-ridden by specifying TUNNEL_ENDPOINT_IP accordingly. This behaves similar to the SERVICE_IP_VERSION option, which can either be set to 4 or 6, but not 4+6 - the tunnel overhead should be consistent on all systems in order not to have MTU issues. Must set the ML2 overlay_ip_version config option to match else agent tunnel sync RPC will not work. Must set the OVN external_ids:ovn-encap-ip config option to the correct address. Updated 'devstack-ipv6-only' job definition and verification role that will set all services and tunnels to use IPv6 addresses. Closes-bug: #1619476 Change-Id: I6034278dfc17b55d7863bc4db541bbdaa983a686 --- .zuul.yaml | 4 ++- doc/source/configuration.rst | 25 ++++++++++++++++-- functions-common | 2 +- lib/neutron | 5 ++-- lib/neutron-legacy | 7 ----- lib/neutron_plugins/ml2 | 1 + lib/neutron_plugins/ovn_agent | 6 +++-- .../README.rst | 10 +++---- stackrc | 26 +++++++++++++++++++ tools/verify-ipv6-only-deployments.sh | 25 +++++++++++++----- 10 files changed, 84 insertions(+), 27 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 001ac84f12..7322f78963 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -625,11 +625,13 @@ name: devstack-ipv6 parent: devstack description: | - Devstack single node job for integration gate with IPv6. + Devstack single node job for integration gate with IPv6, + all services and tunnels using IPv6 addresses. vars: devstack_localrc: SERVICE_IP_VERSION: 6 SERVICE_HOST: "" + TUNNEL_IP_VERSION: 6 - job: name: devstack-enforce-scope diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 40a8725b8d..757b4001d9 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -521,8 +521,8 @@ behavior: can be configured with any valid IPv6 prefix. The default values make use of an auto-generated ``IPV6_GLOBAL_ID`` to comply with RFC4193. -Service Version -~~~~~~~~~~~~~~~ +Service IP Version +~~~~~~~~~~~~~~~~~~ DevStack can enable service operation over either IPv4 or IPv6 by setting ``SERVICE_IP_VERSION`` to either ``SERVICE_IP_VERSION=4`` or @@ -542,6 +542,27 @@ optionally be used to alter the default IPv6 address:: HOST_IPV6=${some_local_ipv6_address} +Tunnel IP Version +~~~~~~~~~~~~~~~~~ + +DevStack can enable tunnel operation over either IPv4 or IPv6 by +setting ``TUNNEL_IP_VERSION`` to either ``TUNNEL_IP_VERSION=4`` or +``TUNNEL_IP_VERSION=6`` respectively. + +When set to ``4`` Neutron will use an IPv4 address for tunnel endpoints, +for example, ``HOST_IP``. + +When set to ``6`` Neutron will use an IPv6 address for tunnel endpoints, +for example, ``HOST_IPV6``. + +The default value for this setting is ``4``. Dual-mode support, for +example ``4+6`` is not supported, as this value must match the address +family of the local tunnel endpoint IP(v6) address. + +The value of ``TUNNEL_IP_VERSION`` has a direct relationship to the +setting of ``TUNNEL_ENDPOINT_IP``, which will default to ``HOST_IP`` +when set to ``4``, and ``HOST_IPV6`` when set to ``6``. + Multi-node setup ~~~~~~~~~~~~~~~~ diff --git a/functions-common b/functions-common index be966e96a6..f299ef1cc9 100644 --- a/functions-common +++ b/functions-common @@ -49,7 +49,7 @@ KILL_PATH="$(which kill)" STACK_ENV_VARS="BASE_SQL_CONN DATA_DIR DEST ENABLED_SERVICES HOST_IP \ KEYSTONE_SERVICE_URI \ LOGFILE OS_CACERT SERVICE_HOST STACK_USER TLS_IP \ - HOST_IPV6 SERVICE_IP_VERSION" + HOST_IPV6 SERVICE_IP_VERSION TUNNEL_ENDPOINT_IP TUNNEL_IP_VERSION" # Saves significant environment variables to .stackenv for later use diff --git a/lib/neutron b/lib/neutron index f24ccfb1a9..1b78493919 100644 --- a/lib/neutron +++ b/lib/neutron @@ -230,6 +230,7 @@ function configure_neutron_new { mech_drivers+=",linuxbridge" fi iniset $NEUTRON_CORE_PLUGIN_CONF ml2 mechanism_drivers $mech_drivers + iniset $NEUTRON_CORE_PLUGIN_CONF ml2 overlay_ip_version $TUNNEL_IP_VERSION iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vxlan vni_ranges 1001:2000 iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_flat flat_networks $PUBLIC_NETWORK_NAME @@ -251,10 +252,10 @@ function configure_neutron_new { # Configure the neutron agent if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup firewall_driver iptables - iniset $NEUTRON_CORE_PLUGIN_CONF vxlan local_ip $HOST_IP + iniset $NEUTRON_CORE_PLUGIN_CONF vxlan local_ip $TUNNEL_ENDPOINT_IP elif [[ $NEUTRON_AGENT == "openvswitch" ]]; then iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup firewall_driver openvswitch - iniset $NEUTRON_CORE_PLUGIN_CONF ovs local_ip $HOST_IP + iniset $NEUTRON_CORE_PLUGIN_CONF ovs local_ip $TUNNEL_ENDPOINT_IP if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then iniset $NEUTRON_CORE_PLUGIN_CONF agent l2_population True diff --git a/lib/neutron-legacy b/lib/neutron-legacy index e9b55b6b02..5e6af0f249 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -246,13 +246,6 @@ if [[ $Q_AGENT == "linuxbridge" && -z ${LB_PHYSICAL_INTERFACE} ]]; then LB_PHYSICAL_INTERFACE=$default_route_dev fi -# When Neutron tunnels are enabled it is needed to specify the -# IP address of the end point in the local server. This IP is set -# by default to the same IP address that the HOST IP. -# This variable can be used to specify a different end point IP address -# Example: ``TUNNEL_ENDPOINT_IP=1.1.1.1`` -TUNNEL_ENDPOINT_IP=${TUNNEL_ENDPOINT_IP:-$HOST_IP} - # With the openvswitch plugin, set to True in ``localrc`` to enable # provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False. # diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index f00feac6b4..7343606aac 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -125,6 +125,7 @@ function neutron_plugin_configure_service { fi populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 overlay_ip_version=$TUNNEL_IP_VERSION if [[ -n "$Q_ML2_PLUGIN_TYPE_DRIVERS" ]]; then populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 type_drivers=$Q_ML2_PLUGIN_TYPE_DRIVERS diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index dfd55deae5..24bdf92b60 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -99,8 +99,10 @@ ENABLE_CHASSIS_AS_GW=$(trueorfalse True ENABLE_CHASSIS_AS_GW) OVN_L3_CREATE_PUBLIC_NETWORK=$(trueorfalse True OVN_L3_CREATE_PUBLIC_NETWORK) export OVSDB_SERVER_LOCAL_HOST=$SERVICE_LOCAL_HOST +TUNNEL_IP=$TUNNEL_ENDPOINT_IP if [[ "$SERVICE_IP_VERSION" == 6 ]]; then OVSDB_SERVER_LOCAL_HOST=[$OVSDB_SERVER_LOCAL_HOST] + TUNNEL_IP=[$TUNNEL_IP] fi OVN_IGMP_SNOOPING_ENABLE=$(trueorfalse False OVN_IGMP_SNOOPING_ENABLE) @@ -639,7 +641,7 @@ function _start_ovs { sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-remote="$OVN_SB_REMOTE" sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-bridge="br-int" sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-type="geneve" - sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-ip="$HOST_IP" + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-ip="$TUNNEL_IP" sudo ovs-vsctl --no-wait set open_vswitch . external-ids:hostname="$LOCAL_HOSTNAME" # Select this chassis to host gateway routers if [[ "$ENABLE_CHASSIS_AS_GW" == "True" ]]; then @@ -654,7 +656,7 @@ function _start_ovs { if is_service_enabled ovn-controller-vtep ; then ovn_base_setup_bridge br-v vtep-ctl add-ps br-v - vtep-ctl set Physical_Switch br-v tunnel_ips=$HOST_IP + vtep-ctl set Physical_Switch br-v tunnel_ips=$TUNNEL_IP enable_service ovs-vtep local vtepcmd="$OVS_SCRIPTDIR/ovs-vtep --log-file --pidfile --detach br-v" diff --git a/roles/devstack-ipv6-only-deployments-verification/README.rst b/roles/devstack-ipv6-only-deployments-verification/README.rst index 400a8da222..3bddf5ea60 100644 --- a/roles/devstack-ipv6-only-deployments-verification/README.rst +++ b/roles/devstack-ipv6-only-deployments-verification/README.rst @@ -1,10 +1,10 @@ -Verify the IPv6-only deployments +Verify all addresses in IPv6-only deployments This role needs to be invoked from a playbook that -run tests. This role verifies the IPv6 setting on -devstack side and devstack deploy services on IPv6. -This role is invoked before tests are run so that -if any missing IPv6 setting or deployments can fail +runs tests. This role verifies the IPv6 settings on the +devstack side and that devstack deploys with all addresses +being IPv6. This role is invoked before tests are run so that +if there is any missing IPv6 setting, deployments can fail the job early. diff --git a/stackrc b/stackrc index 0c76de0531..f0039f0043 100644 --- a/stackrc +++ b/stackrc @@ -877,6 +877,32 @@ SERVICE_HOST=${SERVICE_HOST:-${DEF_SERVICE_HOST}} # This is either 127.0.0.1 for IPv4 or ::1 for IPv6 SERVICE_LOCAL_HOST=${SERVICE_LOCAL_HOST:-${DEF_SERVICE_LOCAL_HOST}} +# TUNNEL IP version +# This is the IP version to use for tunnel endpoints +TUNNEL_IP_VERSION=${TUNNEL_IP_VERSION:-4} + +# Validate TUNNEL_IP_VERSION +if [[ $TUNNEL_IP_VERSION != "4" ]] && [[ $TUNNEL_IP_VERSION != "6" ]]; then + die $LINENO "TUNNEL_IP_VERSION must be either 4 or 6" +fi + +if [[ "$TUNNEL_IP_VERSION" == 4 ]]; then + DEF_TUNNEL_ENDPOINT_IP=$HOST_IP +fi + +if [[ "$TUNNEL_IP_VERSION" == 6 ]]; then + # Only die if the user has not over-ridden the endpoint IP + if [[ "$HOST_IPV6" == "" ]] && [[ "$TUNNEL_ENDPOINT_IP" == "" ]]; then + die $LINENO "Could not determine host IPv6 address. See local.conf for suggestions on setting HOST_IPV6." + fi + + DEF_TUNNEL_ENDPOINT_IP=$HOST_IPV6 +fi + +# Allow the use of an alternate address for tunnel endpoints. +# Default is dependent on TUNNEL_IP_VERSION above. +TUNNEL_ENDPOINT_IP=${TUNNEL_ENDPOINT_IP:-${DEF_TUNNEL_ENDPOINT_IP}} + REGION_NAME=${REGION_NAME:-RegionOne} # Configure services to use syslog instead of writing to individual log files diff --git a/tools/verify-ipv6-only-deployments.sh b/tools/verify-ipv6-only-deployments.sh index 2596395165..0f0cba8afe 100755 --- a/tools/verify-ipv6-only-deployments.sh +++ b/tools/verify-ipv6-only-deployments.sh @@ -23,32 +23,43 @@ function verify_devstack_ipv6_setting { _service_listen_address=$(echo $SERVICE_LISTEN_ADDRESS | tr -d []) local _service_local_host='' _service_local_host=$(echo $SERVICE_LOCAL_HOST | tr -d []) + local _tunnel_endpoint_ip='' + _tunnel_endpoint_ip=$(echo $TUNNEL_ENDPOINT_IP | tr -d []) if [[ "$SERVICE_IP_VERSION" != 6 ]]; then echo $SERVICE_IP_VERSION "SERVICE_IP_VERSION is not set to 6 which is must for devstack to deploy services with IPv6 address." exit 1 fi + if [[ "$TUNNEL_IP_VERSION" != 6 ]]; then + echo $TUNNEL_IP_VERSION "TUNNEL_IP_VERSION is not set to 6 so TUNNEL_ENDPOINT_IP cannot be an IPv6 address." + exit 1 + fi is_service_host_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_host'"))') if [[ "$is_service_host_ipv6" != "True" ]]; then - echo $SERVICE_HOST "SERVICE_HOST is not ipv6 which means devstack cannot deploy services on IPv6 address." + echo $SERVICE_HOST "SERVICE_HOST is not IPv6 which means devstack cannot deploy services on IPv6 addresses." exit 1 fi is_host_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_host_ipv6'"))') if [[ "$is_host_ipv6" != "True" ]]; then - echo $HOST_IPV6 "HOST_IPV6 is not ipv6 which means devstack cannot deploy services on IPv6 address." + echo $HOST_IPV6 "HOST_IPV6 is not IPv6 which means devstack cannot deploy services on IPv6 addresses." exit 1 fi is_service_listen_address=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_listen_address'"))') if [[ "$is_service_listen_address" != "True" ]]; then - echo $SERVICE_LISTEN_ADDRESS "SERVICE_LISTEN_ADDRESS is not ipv6 which means devstack cannot deploy services on IPv6 address." + echo $SERVICE_LISTEN_ADDRESS "SERVICE_LISTEN_ADDRESS is not IPv6 which means devstack cannot deploy services on IPv6 addresses." exit 1 fi is_service_local_host=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_local_host'"))') if [[ "$is_service_local_host" != "True" ]]; then - echo $SERVICE_LOCAL_HOST "SERVICE_LOCAL_HOST is not ipv6 which means devstack cannot deploy services on IPv6 address." + echo $SERVICE_LOCAL_HOST "SERVICE_LOCAL_HOST is not IPv6 which means devstack cannot deploy services on IPv6 addresses." + exit 1 + fi + is_tunnel_endpoint_ip=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_tunnel_endpoint_ip'"))') + if [[ "$is_tunnel_endpoint_ip" != "True" ]]; then + echo $TUNNEL_ENDPOINT_IP "TUNNEL_ENDPOINT_IP is not IPv6 which means devstack will not deploy with an IPv6 endpoint address." exit 1 fi echo "Devstack is properly configured with IPv6" - echo "SERVICE_IP_VERSION: " $SERVICE_IP_VERSION "HOST_IPV6: " $HOST_IPV6 "SERVICE_HOST: " $SERVICE_HOST "SERVICE_LISTEN_ADDRESS: " $SERVICE_LISTEN_ADDRESS "SERVICE_LOCAL_HOST: " $SERVICE_LOCAL_HOST + echo "SERVICE_IP_VERSION:" $SERVICE_IP_VERSION "HOST_IPV6:" $HOST_IPV6 "SERVICE_HOST:" $SERVICE_HOST "SERVICE_LISTEN_ADDRESS:" $SERVICE_LISTEN_ADDRESS "SERVICE_LOCAL_HOST:" $SERVICE_LOCAL_HOST "TUNNEL_IP_VERSION:" $TUNNEL_IP_VERSION "TUNNEL_ENDPOINT_IP:" $TUNNEL_ENDPOINT_IP } function sanity_check_system_ipv6_enabled { @@ -72,7 +83,7 @@ function verify_service_listen_address_is_ipv6 { is_endpoint_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$endpoint_address'"))') if [[ "$is_endpoint_ipv6" != "True" ]]; then all_ipv6=False - echo $endpoint ": This is not ipv6 endpoint which means corresponding service is not listening on IPv6 address." + echo $endpoint ": This is not an IPv6 endpoint which means corresponding service is not listening on an IPv6 address." continue fi endpoints_verified=True @@ -80,7 +91,7 @@ function verify_service_listen_address_is_ipv6 { if [[ "$all_ipv6" == "False" ]] || [[ "$endpoints_verified" == "False" ]]; then exit 1 fi - echo "All services deployed by devstack is on IPv6 endpoints" + echo "All services deployed by devstack are on IPv6 endpoints" echo $endpoints } From e6e7100e853f2ba06bf2157fd87ae948faba1d1f Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Tue, 7 Jun 2022 10:12:59 +0200 Subject: [PATCH 1602/1936] Don't install pinned setuptools with distro pip We are seeing failures when using an updated setuptools version installed together with distro pip on Ubuntu 22.04. Install the version from u-c only when we are also installing pip from upstream. Change-Id: Ibb6e9424e5794ccbf9a937d2eecfa3bf60ed312e --- tools/install_pip.sh | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index e9c52eacb7..7c5d4c6555 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -139,15 +139,18 @@ if is_fedora && [[ ${DISTRO} == f* || ${DISTRO} == rhel9 ]]; then # recent enough anyway. This is included via rpms/general : # Simply fall through elif is_ubuntu; then - : # pip on Ubuntu 20.04 is new enough, too + # pip on Ubuntu 20.04 is new enough, too + # drop setuptools from u-c + sed -i -e '/setuptools/d' $REQUIREMENTS_DIR/upper-constraints.txt else install_get_pip + + # Note setuptools is part of requirements.txt and we want to make sure + # we obey any versioning as described there. + pip_install_gr setuptools fi set -x -# Note setuptools is part of requirements.txt and we want to make sure -# we obey any versioning as described there. -pip_install_gr setuptools get_versions From d5af514ac9485009229f3b594bccc09e905782fb Mon Sep 17 00:00:00 2001 From: Gorka Eguileor Date: Wed, 8 Jun 2022 10:19:50 +0200 Subject: [PATCH 1603/1936] Reduce memory consumption in Cinder services This patch reduces memory usage on the Cinder Volume and Backup services by tuning glibc. The specific tuning consist on disabling the per thread arenas and disabling dynamic thresholds. The Cinder Backup service suffers from high water mark memory usage and uses excessive memory. As an example just after 10 restore operations the service uses almost 1GB of RAM and does not ever free it afterwards. With this patch the memory consumption of the service is reduced down to almost 130MB. If we add a revert from Cinder (Change-Id I43a20c8687f12bc52b014611cc6977c4c3ca212c) it goes down to 100MB during my tests. This glibc tuning is not applied to all Python services because I haven't done proper testings on them and at first glance they don't seem to have such great improvements. Related-bug: #1908805 Change-Id: Ic9030d01468b3189350f83b04a8d1d346c489d3c --- functions-common | 22 ++++++++++++++++++---- lib/cinder | 9 +++++++-- 2 files changed, 25 insertions(+), 6 deletions(-) diff --git a/functions-common b/functions-common index be966e96a6..0b896dde59 100644 --- a/functions-common +++ b/functions-common @@ -1564,6 +1564,7 @@ function write_user_unit_file { local command="$2" local group=$3 local user=$4 + local env_vars="$5" local extra="" if [[ -n "$group" ]]; then extra="Group=$group" @@ -1577,6 +1578,9 @@ function write_user_unit_file { iniset -sudo $unitfile "Service" "KillMode" "process" iniset -sudo $unitfile "Service" "TimeoutStopSec" "300" iniset -sudo $unitfile "Service" "ExecReload" "$KILL_PATH -HUP \$MAINPID" + if [[ -n "$env_vars" ]] ; then + iniset -sudo $unitfile "Service" "Environment" "$env_vars" + fi if [[ -n "$group" ]]; then iniset -sudo $unitfile "Service" "Group" "$group" fi @@ -1591,6 +1595,7 @@ function write_uwsgi_user_unit_file { local command="$2" local group=$3 local user=$4 + local env_vars="$5" local unitfile="$SYSTEMD_DIR/$service" mkdir -p $SYSTEMD_DIR @@ -1605,6 +1610,9 @@ function write_uwsgi_user_unit_file { iniset -sudo $unitfile "Service" "NotifyAccess" "all" iniset -sudo $unitfile "Service" "RestartForceExitStatus" "100" + if [[ -n "$env_vars" ]] ; then + iniset -sudo $unitfile "Service" "Environment" "$env_vars" + fi if [[ -n "$group" ]]; then iniset -sudo $unitfile "Service" "Group" "$group" fi @@ -1652,10 +1660,14 @@ function _run_under_systemd { local systemd_service="devstack@$service.service" local group=$3 local user=${4:-$STACK_USER} + if [[ -z "$user" ]]; then + user=$STACK_USER + fi + local env_vars="$5" if [[ "$command" =~ "uwsgi" ]] ; then - write_uwsgi_user_unit_file $systemd_service "$cmd" "$group" "$user" + write_uwsgi_user_unit_file $systemd_service "$cmd" "$group" "$user" "$env_vars" else - write_user_unit_file $systemd_service "$cmd" "$group" "$user" + write_user_unit_file $systemd_service "$cmd" "$group" "$user" "$env_vars" fi $SYSTEMCTL enable $systemd_service @@ -1676,18 +1688,20 @@ function is_running { # If the command includes shell metachatacters (;<>*) it must be run using a shell # If an optional group is provided sg will be used to run the # command as that group. -# run_process service "command-line" [group] [user] +# run_process service "command-line" [group] [user] [env_vars] +# env_vars must be a space separated list of variable assigments, ie: "A=1 B=2" function run_process { local service=$1 local command="$2" local group=$3 local user=$4 + local env_vars="$5" local name=$service time_start "run_process" if is_service_enabled $service; then - _run_under_systemd "$name" "$command" "$group" "$user" + _run_under_systemd "$name" "$command" "$group" "$user" "$env_vars" fi time_stop "run_process" } diff --git a/lib/cinder b/lib/cinder index 52818a81eb..ca2c084aff 100644 --- a/lib/cinder +++ b/lib/cinder @@ -552,8 +552,13 @@ function start_cinder { fi run_process c-sch "$CINDER_BIN_DIR/cinder-scheduler --config-file $CINDER_CONF" - run_process c-bak "$CINDER_BIN_DIR/cinder-backup --config-file $CINDER_CONF" - run_process c-vol "$CINDER_BIN_DIR/cinder-volume --config-file $CINDER_CONF" + # Tune glibc for Python Services using single malloc arena for all threads + # and disabling dynamic thresholds to reduce memory usage when using native + # threads directly or via eventlet.tpool + # https://www.gnu.org/software/libc/manual/html_node/Memory-Allocation-Tunables.html + malloc_tuning="MALLOC_ARENA_MAX=1 MALLOC_MMAP_THRESHOLD_=131072 MALLOC_TRIM_THRESHOLD_=262144" + run_process c-bak "$CINDER_BIN_DIR/cinder-backup --config-file $CINDER_CONF" "" "" "$malloc_tuning" + run_process c-vol "$CINDER_BIN_DIR/cinder-volume --config-file $CINDER_CONF" "" "" "$malloc_tuning" # NOTE(jdg): For cinder, startup order matters. To ensure that repor_capabilities is received # by the scheduler start the cinder-volume service last (or restart it) after the scheduler From 8ff52ea12bb855adc9fe26de48b022310c1a5893 Mon Sep 17 00:00:00 2001 From: Martin Kopec Date: Tue, 21 Jun 2022 17:31:50 +0200 Subject: [PATCH 1604/1936] Mark devstack-platform-centos-9-stream as n-v Due to the below bug the job has been constantly failing. Let's make it n-v until the bug is resolved: - https://bugs.launchpad.net/neutron/+bug/1979047 Change-Id: Ifc8cc96843a8eac5c98cd1e1f9e4b6287a7f2e7c --- .zuul.yaml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 5b93a77017..c3f8914eee 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -661,6 +661,9 @@ description: CentOS 9 Stream platform test nodeset: devstack-single-node-centos-9-stream timeout: 9000 + # TODO(kopecmartin) n-v until the following is resolved: + # https://bugs.launchpad.net/neutron/+bug/1979047 + voting: false vars: configure_swap_size: 4096 @@ -896,7 +899,9 @@ jobs: - devstack - devstack-ipv6 - - devstack-platform-centos-9-stream + # TODO(kopecmartin) n-v until the following is resolved: + # https://bugs.launchpad.net/neutron/+bug/1979047 + # - devstack-platform-centos-9-stream - devstack-platform-ubuntu-jammy - devstack-enforce-scope - devstack-multinode From 8a38a73ddf2930e9662cb22109f4a6ef341476d6 Mon Sep 17 00:00:00 2001 From: Vladislav Belogrudov Date: Wed, 25 May 2022 12:58:52 +0300 Subject: [PATCH 1605/1936] Correct hostname for OVN agent Currently Devstack uses short hostname for configuration of OVN. This leads to inability to start instances (failing port binding) on hosts with full hostnames (including dots). Open vSwitch expects hostname in external_ids that corresponds to one returned by ``hostname`` command. Closes-Bug: #1943631 Change-Id: I15b71a49c482be0c8f15ad834e29ea1b33307c86 --- lib/neutron_plugins/ovn_agent | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 24bdf92b60..e8a9babc1c 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -642,7 +642,7 @@ function _start_ovs { sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-bridge="br-int" sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-type="geneve" sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-ip="$TUNNEL_IP" - sudo ovs-vsctl --no-wait set open_vswitch . external-ids:hostname="$LOCAL_HOSTNAME" + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:hostname=$(hostname) # Select this chassis to host gateway routers if [[ "$ENABLE_CHASSIS_AS_GW" == "True" ]]; then sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-cms-options="enable-chassis-as-gw" From fe7cfa6b8c7573d643d66d3684de03e4183651bb Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 23 Jun 2022 09:25:22 -0700 Subject: [PATCH 1606/1936] Avoid including bad service names in perf.json Some of the API services are not properly mounted under /$service/ in the apache proxy. This patch tries to avoid recording data for "services" like "v2.0" (in the case of neutron) by only adding names if they're all letters. A single warning is emitted for any services excluded by this check. For the moment this will mean we don't collect data for those services, but when their devstack API config is fixed, they'll start to show up. Change-Id: I41cc300e89a4f97a008a8ba97c91f0980f9b9c3f --- tools/get-stats.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tools/get-stats.py b/tools/get-stats.py index a3ed7f2625..b958af61b2 100755 --- a/tools/get-stats.py +++ b/tools/get-stats.py @@ -111,6 +111,7 @@ def get_http_stats_for_log(logfile): apache_fields = ('host', 'a', 'b', 'date', 'tz', 'request', 'status', 'length', 'c', 'agent') ignore_agents = ('curl', 'uwsgi', 'nova-status') + ignored_services = set() for line in csv.reader(open(logfile), delimiter=' '): fields = dict(zip(apache_fields, line)) if len(fields) != len(apache_fields): @@ -146,6 +147,10 @@ def get_http_stats_for_log(logfile): service = url.strip('/') rest = '' + if not service.isalpha(): + ignored_services.add(service) + continue + method_key = '%s-%s' % (agent, method) try: length = int(fields['length']) @@ -159,6 +164,10 @@ def get_http_stats_for_log(logfile): stats[service]['largest'] = max(stats[service]['largest'], length) + if ignored_services: + LOG.warning('Ignored services: %s' % ','.join( + sorted(ignored_services))) + # Flatten this for ES return [{'service': service, 'log': os.path.basename(logfile), **vals} From ce1ae9ddef4dd05a294dc630bf81b264a4b5a703 Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Wed, 29 Jun 2022 09:56:12 +0200 Subject: [PATCH 1607/1936] Fix missing "$" in the ENFORCE_SCOPE's variable name Because of the missing "$" before ENFORCE_SCOPE in the lib/neutron module, it was treated as an ENFORCE_SCOPE string instead of variable and Neutron was deployed always with old defaults and disabled scope enforcement. Change-Id: Ibe67fea634c5f7abb521c0369ff30dd5db84db8c --- lib/neutron | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index 1b78493919..2d77df699a 100644 --- a/lib/neutron +++ b/lib/neutron @@ -633,7 +633,7 @@ function configure_neutron { # configure_rbac_policies() - Configure Neutron to enforce new RBAC # policies and scopes if NEUTRON_ENFORCE_SCOPE == True function configure_rbac_policies { - if [[ "$NEUTRON_ENFORCE_SCOPE" == "True" || "ENFORCE_SCOPE" == "True" ]]; then + if [[ "$NEUTRON_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]]; then iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True iniset $NEUTRON_CONF oslo_policy enforce_scope True else From e1fb94f82a2a5bfdc3a0d56c69455e95e265195e Mon Sep 17 00:00:00 2001 From: Martin Kopec Date: Wed, 29 Jun 2022 10:43:33 +0200 Subject: [PATCH 1608/1936] Make devstack-platform-debian-bullseye voting The job has been successfully passing lately, let's make it voting. Change-Id: Ib3b803a26c8647fd49c89371516c0ac7baba2703 --- .zuul.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index c3f8914eee..c29cb31f31 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -672,7 +672,6 @@ parent: tempest-full-py3 description: Debian Bullseye platform test nodeset: devstack-single-node-debian-bullseye - voting: false timeout: 9000 vars: configure_swap_size: 4096 @@ -902,6 +901,7 @@ # TODO(kopecmartin) n-v until the following is resolved: # https://bugs.launchpad.net/neutron/+bug/1979047 # - devstack-platform-centos-9-stream + - devstack-platform-debian-bullseye - devstack-platform-ubuntu-jammy - devstack-enforce-scope - devstack-multinode From bd6e5205b115fb0cafed7f50a676699a4b9fc0fe Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Sun, 3 Jul 2022 22:27:15 +0200 Subject: [PATCH 1609/1936] Increase timeout waiting for OVN startup We see some cases where OVN startup takes much longer than 5 seconds, up to 28 seconds have been observed, so increase the limit to 40 to be on the safe side. Signed-off-by: Dr. Jens Harbott Closes-Bug: 1980421 Change-Id: I6da4a537e6a8d527ff71a821f07164fc7d342882 --- lib/neutron_plugins/ovn_agent | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index e8a9babc1c..341b84d959 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -176,7 +176,7 @@ function wait_for_db_file { while [ ! -f $1 ]; do sleep 1 count=$((count+1)) - if [ "$count" -gt 5 ]; then + if [ "$count" -gt 40 ]; then die $LINENO "DB File $1 not found" fi done @@ -187,7 +187,7 @@ function wait_for_sock_file { while [ ! -S $1 ]; do sleep 1 count=$((count+1)) - if [ "$count" -gt 5 ]; then + if [ "$count" -gt 40 ]; then die $LINENO "Socket $1 not found" fi done From 85340e77f3d15b77dd0dc7c9df240428bfd2e30f Mon Sep 17 00:00:00 2001 From: Yadnesh Kulkarni Date: Mon, 11 Jul 2022 17:14:40 +0530 Subject: [PATCH 1610/1936] delete __pycache__ directory with sudo privileges Signed-off-by: Yadnesh Kulkarni Change-Id: I9cf3cd8921347eacc1effb2b197b97bc6ff3e0df --- functions-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions-common b/functions-common index e16bb27ef5..e5b07514e5 100644 --- a/functions-common +++ b/functions-common @@ -646,7 +646,7 @@ function git_clone { # remove the existing ignored files (like pyc) as they cause breakage # (due to the py files having older timestamps than our pyc, so python # thinks the pyc files are correct using them) - find $git_dest -name '*.pyc' -delete + sudo find $git_dest -name '*.pyc' -delete # handle git_ref accordingly to type (tag, branch) if [[ -n "`git show-ref refs/tags/$git_ref`" ]]; then From cf0bf746e996b780714a085b0e6f38899c2c832e Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Wed, 13 Jul 2022 22:34:47 +0900 Subject: [PATCH 1611/1936] Neutron: Set experimental option to use linuxbridge agent Recently the experimental mechanism has been added to Neutron and now it requires the [experimental] linuxbridge option when the linuxbridge mechanism driver is used. Depends-on: https://review.opendev.org/c/openstack/neutron/+/845181 Change-Id: Ice82a391cda9eb0193f23e6794be7ab3df12c40b --- lib/neutron | 4 ++++ lib/neutron_plugins/ml2 | 3 +++ 2 files changed, 7 insertions(+) diff --git a/lib/neutron b/lib/neutron index 1b78493919..6e787f213a 100644 --- a/lib/neutron +++ b/lib/neutron @@ -229,6 +229,10 @@ function configure_neutron_new { else mech_drivers+=",linuxbridge" fi + if [[ "$mech_drivers" == *"linuxbridge"* ]]; then + iniset $NEUTRON_CONF experimental linuxbridge True + fi + iniset $NEUTRON_CORE_PLUGIN_CONF ml2 mechanism_drivers $mech_drivers iniset $NEUTRON_CORE_PLUGIN_CONF ml2 overlay_ip_version $TUNNEL_IP_VERSION diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index 7343606aac..fa61f1ea30 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -125,6 +125,9 @@ function neutron_plugin_configure_service { fi populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS + if [[ "$Q_ML2_PLUGIN_MECHANISM_DRIVERS" == *"linuxbridge"* ]]; then + iniset $NEUTRON_CONF experimental linuxbridge True + fi populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 overlay_ip_version=$TUNNEL_IP_VERSION if [[ -n "$Q_ML2_PLUGIN_TYPE_DRIVERS" ]]; then From 1a21ccbdf8eb66582a06f181f8c9af1f43bd52f5 Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Fri, 8 Jul 2022 21:57:45 +0200 Subject: [PATCH 1612/1936] Add NEUTRON_ENDPOINT_SERVICE_NAME variable to set service name This option can be used to set name of the service used in the networking service endpoint URL. Depends-On: https://review.opendev.org/c/openstack/grenade/+/850306 Change-Id: I9e9a06eadc1604214c627bd3bda010cc00aaf83d --- lib/neutron | 18 +++++++++++++++--- lib/neutron-legacy | 31 ++++++++++++++++++++++++++++--- 2 files changed, 43 insertions(+), 6 deletions(-) diff --git a/lib/neutron b/lib/neutron index 6e787f213a..a885fbf16e 100644 --- a/lib/neutron +++ b/lib/neutron @@ -114,6 +114,12 @@ NEUTRON_TENANT_VLAN_RANGE=${NEUTRON_TENANT_VLAN_RANGE:-${TENANT_VLAN_RANGE:-100: # Physical network for VLAN network usage. NEUTRON_PHYSICAL_NETWORK=${NEUTRON_PHYSICAL_NETWORK:-} +# The name of the service in the endpoint URL +NEUTRON_ENDPOINT_SERVICE_NAME=${NEUTRON_ENDPOINT_SERVICE_NAME-"networking"} +if [[ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" && -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then + NEUTRON_ENDPOINT_SERVICE_NAME="networking" +fi + # Additional neutron api config files declare -a -g _NEUTRON_SERVER_EXTRA_CONF_FILES_ABS @@ -397,10 +403,13 @@ function create_neutron_accounts_new { local neutron_url if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - neutron_url=$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST/networking/ + neutron_url=$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST/ else neutron_url=$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST:$NEUTRON_SERVICE_PORT/ fi + if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then + neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME + fi if [[ "$ENABLED_SERVICES" =~ "neutron-api" ]]; then @@ -481,19 +490,22 @@ function start_neutron_api { if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF" - neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST/networking/ + neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST/ enable_service neutron-rpc-server run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $opts" else # Start the Neutron service # TODO(sc68cal) Stop hard coding this run_process neutron-api "$NEUTRON_BIN_DIR/neutron-server $opts" - neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST:$service_port + neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST:$service_port/ # Start proxy if enabled if is_service_enabled tls-proxy; then start_tls_proxy neutron '*' $NEUTRON_SERVICE_PORT $NEUTRON_SERVICE_HOST $NEUTRON_SERVICE_PORT_INT fi fi + if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then + neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME + fi if ! wait_for_service $SERVICE_TIMEOUT $neutron_url; then die $LINENO "neutron-api did not start" diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 5e6af0f249..1a6995511b 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -145,6 +145,12 @@ SKIP_STOP_OVN=${SKIP_STOP_OVN:-False} # /etc/neutron is assumed by many of devstack plugins. Do not change. _Q_PLUGIN_EXTRA_CONF_PATH=/etc/neutron +# The name of the service in the endpoint URL +NEUTRON_ENDPOINT_SERVICE_NAME=${NEUTRON_ENDPOINT_SERVICE_NAME-"networking"} +if [[ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" && -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then + NEUTRON_ENDPOINT_SERVICE_NAME="networking" +fi + # List of config file names in addition to the main plugin config file # To add additional plugin config files, use ``neutron_server_config_add`` # utility function. For example: @@ -431,10 +437,13 @@ function create_nova_conf_neutron { function create_mutnauq_accounts { local neutron_url if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - neutron_url=$Q_PROTOCOL://$SERVICE_HOST/networking/ + neutron_url=$Q_PROTOCOL://$SERVICE_HOST/ else neutron_url=$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/ fi + if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then + neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME + fi if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then @@ -538,17 +547,20 @@ function start_neutron_service_and_check { if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then enable_service neutron-api run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF" - neutron_url=$Q_PROTOCOL://$Q_HOST/networking/ + neutron_url=$Q_PROTOCOL://$Q_HOST/ enable_service neutron-rpc-server run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options" else run_process q-svc "$NEUTRON_BIN_DIR/neutron-server $cfg_file_options" - neutron_url=$service_protocol://$Q_HOST:$service_port + neutron_url=$service_protocol://$Q_HOST:$service_port/ # Start proxy if enabled if is_service_enabled tls-proxy; then start_tls_proxy neutron '*' $Q_PORT $Q_HOST $Q_PORT_INT fi fi + if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then + neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME + fi echo "Waiting for Neutron to start..." local testcmd="wget ${ssl_ca} --no-proxy -q -O- $neutron_url" @@ -905,12 +917,25 @@ function _configure_neutron_plugin_agent { neutron_plugin_configure_plugin_agent } +function _replace_api_paste_composite { + local sep + sep=$(echo -ne "\x01") + # Replace it + $sudo sed -i -e "s/\/\: neutronversions_composite/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/\: neutronversions_composite/" "$Q_API_PASTE_FILE" + $sudo sed -i -e "s/\/healthcheck\: healthcheck/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/healthcheck\: healthcheck/" "$Q_API_PASTE_FILE" + $sudo sed -i -e "s/\/v2.0\: neutronapi_v2_0/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/v2.0\: neutronapi_v2_0/" "$Q_API_PASTE_FILE" +} + # _configure_neutron_service() - Set config files for neutron service # It is called when q-svc is enabled. function _configure_neutron_service { Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE + if [[ -n "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then + _replace_api_paste_composite + fi + # Update either configuration file with plugin iniset $NEUTRON_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS From facf15626e5776bc64a2f072bdccadbda714a8f2 Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Wed, 13 Jul 2022 15:58:42 +0900 Subject: [PATCH 1613/1936] Neutron: Do not set removed allow_overlapping_ips The parameter has been removed from neutron by [1]. [1] fde91e8059a9a23fb7ece6e3463984329c7ea581 Change-Id: I3b838ea741d19729d6fcf03c0478b1b4d8ec1213 --- lib/neutron | 1 - lib/neutron-legacy | 1 - 2 files changed, 2 deletions(-) diff --git a/lib/neutron b/lib/neutron index 6e787f213a..d4815cd5f8 100644 --- a/lib/neutron +++ b/lib/neutron @@ -213,7 +213,6 @@ function configure_neutron_new { iniset $NEUTRON_CONF DEFAULT core_plugin $NEUTRON_CORE_PLUGIN iniset $NEUTRON_CONF DEFAULT policy_file $policy_file - iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips True iniset $NEUTRON_CONF DEFAULT router_distributed $NEUTRON_DISTRIBUTED_ROUTING iniset $NEUTRON_CONF DEFAULT auth_strategy $NEUTRON_AUTH_STRATEGY diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 5e6af0f249..d21be51dcd 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -916,7 +916,6 @@ function _configure_neutron_service { iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $NEUTRON_CONF oslo_policy policy_file $Q_POLICY_FILE - iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP iniset $NEUTRON_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY configure_keystone_authtoken_middleware $NEUTRON_CONF $Q_ADMIN_USERNAME From b70d98fe75621d7c71197f82b9fde630d2fa50b2 Mon Sep 17 00:00:00 2001 From: Martin Kopec Date: Wed, 20 Jul 2022 14:05:15 +0000 Subject: [PATCH 1614/1936] Fix doc for adding sudo privileges to stack user Writing NOPASSWD directive into /etc/sudoers was throwing permission denied errors. This commit writes the directive to the /etc/sudoers.d/stack file instead. Closes-Bug: #1981541 Change-Id: If30f01aa5f3a33dda79ff4a6892116511c8e1542 --- doc/source/guides/multinode-lab.rst | 2 +- doc/source/guides/single-machine.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index 79a76dedb1..658422b0af 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -89,7 +89,7 @@ password: :: - echo "stack ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers + echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack From here on use the ``stack`` user. **Logout** and **login** as the ``stack`` user. diff --git a/doc/source/guides/single-machine.rst b/doc/source/guides/single-machine.rst index 03d93743f7..0529e30f08 100644 --- a/doc/source/guides/single-machine.rst +++ b/doc/source/guides/single-machine.rst @@ -63,7 +63,7 @@ to have sudo privileges: .. code-block:: console $ apt-get install sudo -y || yum install -y sudo - $ echo "stack ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers + $ echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack .. note:: On some systems you may need to use ``sudo visudo``. From 79bef068b69c7e97a63aaa3e7fae13bcbc649ebd Mon Sep 17 00:00:00 2001 From: Hoai-Thu Vuong Date: Tue, 2 Aug 2022 14:41:59 +0700 Subject: [PATCH 1615/1936] remove duplicate line of REGION_NAME Change-Id: I42b270749f057c5751e809aba282112b990b9f38 --- stackrc | 2 -- 1 file changed, 2 deletions(-) diff --git a/stackrc b/stackrc index f0039f0043..b3130e5f7f 100644 --- a/stackrc +++ b/stackrc @@ -903,8 +903,6 @@ fi # Default is dependent on TUNNEL_IP_VERSION above. TUNNEL_ENDPOINT_IP=${TUNNEL_ENDPOINT_IP:-${DEF_TUNNEL_ENDPOINT_IP}} -REGION_NAME=${REGION_NAME:-RegionOne} - # Configure services to use syslog instead of writing to individual log files SYSLOG=$(trueorfalse False SYSLOG) SYSLOG_HOST=${SYSLOG_HOST:-$HOST_IP} From d266c87b1d3ecae1b40589832efc9bf7cf3e524c Mon Sep 17 00:00:00 2001 From: Nobuhiro MIKI Date: Mon, 8 Aug 2022 16:45:31 +0900 Subject: [PATCH 1616/1936] iniset: fix handling of values containg ampersand Attempting to set a value containing the ampersand character (&) by iniset would corrupt the value. So, add an escaping process. Signed-off-by: Nobuhiro MIKI Closes-Bug: #1983816 Change-Id: Ie2633bacd2d761d110e6cb12f95382325c329415 --- inc/ini-config | 3 +++ tests/test_ini_config.sh | 12 +++++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/inc/ini-config b/inc/ini-config index 79936823d2..f65e42d3a5 100644 --- a/inc/ini-config +++ b/inc/ini-config @@ -189,6 +189,9 @@ function iniset { local option=$3 local value=$4 + # Escape the ampersand character (&) + value=$(echo $value | sed -e 's/&/\\&/g') + if [[ -z $section || -z $option ]]; then $xtrace return diff --git a/tests/test_ini_config.sh b/tests/test_ini_config.sh index 6ed1647f34..6367cde441 100755 --- a/tests/test_ini_config.sh +++ b/tests/test_ini_config.sh @@ -44,6 +44,9 @@ empty = multi = foo1 multi = foo2 +[fff] +ampersand = + [key_with_spaces] rgw special key = something @@ -85,7 +88,7 @@ fi # test iniget_sections VAL=$(iniget_sections "${TEST_INI}") -assert_equal "$VAL" "default aaa bbb ccc ddd eee key_with_spaces \ +assert_equal "$VAL" "default aaa bbb ccc ddd eee fff key_with_spaces \ del_separate_options del_same_option del_missing_option \ del_missing_option_multi del_no_options" @@ -124,6 +127,13 @@ iniset ${SUDO_ARG} ${TEST_INI} bbb handlers "33,44" VAL=$(iniget ${TEST_INI} bbb handlers) assert_equal "$VAL" "33,44" "inset at EOF" +# Test with ampersand in values +for i in `seq 3`; do + iniset ${TEST_INI} fff ampersand '&y' +done +VAL=$(iniget ${TEST_INI} fff ampersand) +assert_equal "$VAL" "&y" "iniset ampersands in option" + # test empty option if ini_has_option ${SUDO_ARG} ${TEST_INI} ddd empty; then passed "ini_has_option: ddd.empty present" From 90e5479f382af1a5482f0acccdc36c6d18321634 Mon Sep 17 00:00:00 2001 From: Martin Kopec Date: Tue, 16 Aug 2022 17:29:16 +0200 Subject: [PATCH 1617/1936] Remove forgotten LinuxMint occurrence Right now we don't officialy support LinuxMint as our documentation says [1], it seems LinuxMint is a relict and got forgotten over time. This patch removes LinuxMint from the code in order not to confuse users. [1] https://docs.openstack.org/devstack/latest/ Closes-Bug: #1983427 Change-Id: Ie1ced25f89389494b28a7b2e9bb1c4273e002dd5 --- doc/source/plugins.rst | 2 +- functions-common | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst index 7d70d74dd0..62dd15bfb1 100644 --- a/doc/source/plugins.rst +++ b/doc/source/plugins.rst @@ -238,7 +238,7 @@ package dependencies, packages may be listed at the following locations in the top-level of the plugin repository: - ``./devstack/files/debs/$plugin_name`` - Packages to install when running - on Ubuntu, Debian or Linux Mint. + on Ubuntu or Debian. - ``./devstack/files/rpms/$plugin_name`` - Packages to install when running on Red Hat, Fedora, or CentOS. diff --git a/functions-common b/functions-common index e5b07514e5..92a6678de0 100644 --- a/functions-common +++ b/functions-common @@ -426,7 +426,7 @@ function GetOSVersion { os_VENDOR=$(lsb_release -i -s) fi - if [[ $os_VENDOR =~ (Debian|Ubuntu|LinuxMint) ]]; then + if [[ $os_VENDOR =~ (Debian|Ubuntu) ]]; then os_PACKAGE="deb" else os_PACKAGE="rpm" @@ -444,9 +444,8 @@ declare -g DISTRO function GetDistro { GetOSVersion - if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) || \ - "$os_VENDOR" =~ (LinuxMint) ]]; then - # 'Everyone' refers to Ubuntu / Debian / Mint releases by + if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) ]]; then + # 'Everyone' refers to Ubuntu / Debian releases by # the code name adjective DISTRO=$os_CODENAME elif [[ "$os_VENDOR" =~ (Fedora) ]]; then From fdfc14451afc4d7f78edadb1b26a3a845eace715 Mon Sep 17 00:00:00 2001 From: Eliad Cohen Date: Tue, 16 Aug 2022 13:00:45 -0400 Subject: [PATCH 1618/1936] Clean up use of get_field Openstack client can return the id field for create/show commands using `-f value -c id`. Cleaned up the use of grep 'id' with get_field Change-Id: I2f4338f30c11e5139cda51c92524782b86f0aacc --- functions | 4 ++-- lib/neutron_plugins/services/l3 | 20 ++++++++++---------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/functions b/functions index ccca5cda51..7ada0feba7 100644 --- a/functions +++ b/functions @@ -414,10 +414,10 @@ function upload_image { # kernel for use when uploading the root filesystem. local kernel_id="" ramdisk_id=""; if [ -n "$kernel" ]; then - kernel_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-kernel" $(_image_properties_to_arg $img_property) --public --container-format aki --disk-format aki < "$kernel" | grep ' id ' | get_field 2) + kernel_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-kernel" $(_image_properties_to_arg $img_property) --public --container-format aki --disk-format aki < "$kernel" -f value -c id) fi if [ -n "$ramdisk" ]; then - ramdisk_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-ramdisk" $(_image_properties_to_arg $img_property) --public --container-format ari --disk-format ari < "$ramdisk" | grep ' id ' | get_field 2) + ramdisk_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-ramdisk" $(_image_properties_to_arg $img_property) --public --container-format ari --disk-format ari < "$ramdisk" -f value -c id) fi _upload_image "${image_name%.img}" ami ami "$image" ${kernel_id:+ kernel_id=$kernel_id} ${ramdisk_id:+ ramdisk_id=$ramdisk_id} $img_property fi diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index fbd4692bba..3dffc33d37 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -166,14 +166,14 @@ function create_neutron_initial_network { if is_provider_network; then die_if_not_set $LINENO PHYSICAL_NETWORK "You must specify the PHYSICAL_NETWORK" die_if_not_set $LINENO PROVIDER_NETWORK_TYPE "You must specify the PROVIDER_NETWORK_TYPE" - NET_ID=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share | grep ' id ' | get_field 2) + NET_ID=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share -f value -c id) die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK" if [[ "$IP_VERSION" =~ 4.* ]]; then if [ -z $SUBNETPOOL_V4_ID ]; then fixed_range_v4=$FIXED_RANGE fi - SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} | grep ' id ' | get_field 2) + SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} -f value -c id) die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME" fi @@ -183,7 +183,7 @@ function create_neutron_initial_network { if [ -z $SUBNETPOOL_V6_ID ]; then fixed_range_v6=$IPV6_PROVIDER_FIXED_RANGE fi - IPV6_SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} | grep ' id ' | get_field 2) + IPV6_SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} -f value -c id) die_if_not_set $LINENO IPV6_SUBNET_ID "Failure creating IPV6_SUBNET_ID for $IPV6_PROVIDER_SUBNET_NAME" fi @@ -193,7 +193,7 @@ function create_neutron_initial_network { sudo ip link set $PUBLIC_INTERFACE up fi else - NET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" network create "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) + NET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" network create "$PRIVATE_NETWORK_NAME" -f value -c id) die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PRIVATE_NETWORK_NAME" if [[ "$IP_VERSION" =~ 4.* ]]; then @@ -211,11 +211,11 @@ function create_neutron_initial_network { # Create a router, and add the private subnet as one of its interfaces if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then # create a tenant-owned router. - ROUTER_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" router create $Q_ROUTER_NAME | grep ' id ' | get_field 2) + ROUTER_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" router create $Q_ROUTER_NAME -f value -c id) die_if_not_set $LINENO ROUTER_ID "Failure creating router $Q_ROUTER_NAME" else # Plugin only supports creating a single router, which should be admin owned. - ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create $Q_ROUTER_NAME | grep ' id ' | get_field 2) + ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create $Q_ROUTER_NAME -f value -c id) die_if_not_set $LINENO ROUTER_ID "Failure creating router $Q_ROUTER_NAME" fi @@ -225,9 +225,9 @@ function create_neutron_initial_network { fi # Create an external network, and a subnet. Configure the external network as router gw if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then - EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type ${PUBLIC_PROVIDERNET_TYPE:-flat} ${PUBLIC_PROVIDERNET_SEGMENTATION_ID:+--provider-segment $PUBLIC_PROVIDERNET_SEGMENTATION_ID} --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} | grep ' id ' | get_field 2) + EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type ${PUBLIC_PROVIDERNET_TYPE:-flat} ${PUBLIC_PROVIDERNET_SEGMENTATION_ID:+--provider-segment $PUBLIC_PROVIDERNET_SEGMENTATION_ID} --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} -f value -c id) else - EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS | grep ' id ' | get_field 2) + EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS -f value -c id) fi die_if_not_set $LINENO EXT_NET_ID "Failure creating EXT_NET_ID for $PUBLIC_NETWORK_NAME" @@ -257,7 +257,7 @@ function _neutron_create_private_subnet_v4 { subnet_params+="${fixed_range_v4:+--subnet-range $fixed_range_v4} " subnet_params+="--network $NET_ID $PRIVATE_SUBNET_NAME" local subnet_id - subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2) + subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params -f value -c id) die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet" echo $subnet_id } @@ -278,7 +278,7 @@ function _neutron_create_private_subnet_v6 { subnet_params+="${fixed_range_v6:+--subnet-range $fixed_range_v6} " subnet_params+="$ipv6_modes --network $NET_ID $IPV6_PRIVATE_SUBNET_NAME " local ipv6_subnet_id - ipv6_subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2) + ipv6_subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params -f value -c id) die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet" echo $ipv6_subnet_id } From ca5f9195610a94ca0a567700a94f9417ca877336 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Sun, 21 Aug 2022 10:52:41 +0200 Subject: [PATCH 1619/1936] Clean up n-net remnants In I90316208d1af42c1659d3bee386f95e38aaf2c56 support for nova-network was removed, but some bits remained, fix this up. Change-Id: Iba7e1785fd0bdf0a6e94e5e03438fc7634621e49 --- files/debs/nova | 2 -- files/rpms-suse/nova | 2 -- files/rpms/nova | 2 -- lib/nova | 14 -------------- stack.sh | 10 +++------- 5 files changed, 3 insertions(+), 27 deletions(-) diff --git a/files/debs/nova b/files/debs/nova index 0194f00f2c..5c00ad72d9 100644 --- a/files/debs/nova +++ b/files/debs/nova @@ -1,7 +1,5 @@ conntrack curl -dnsmasq-base -dnsmasq-utils # for dhcp_release ebtables genisoimage # required for config_drive iptables diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova index 1cc2f62ea5..082b9aca22 100644 --- a/files/rpms-suse/nova +++ b/files/rpms-suse/nova @@ -1,8 +1,6 @@ cdrkit-cdrtools-compat # dist:sle12 conntrack-tools curl -dnsmasq -dnsmasq-utils # dist:opensuse-12.3,opensuse-13.1 ebtables iptables iputils diff --git a/files/rpms/nova b/files/rpms/nova index 9522e5729d..f2824ee2c4 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -1,7 +1,5 @@ conntrack-tools curl -dnsmasq # for q-dhcp -dnsmasq-utils # for dhcp_release ebtables genisoimage # not:rhel9 required for config_drive iptables diff --git a/lib/nova b/lib/nova index da3a10edd0..6de1d3382f 100644 --- a/lib/nova +++ b/lib/nova @@ -107,20 +107,6 @@ NOVA_FILTERS="AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,Ima QEMU_CONF=/etc/libvirt/qemu.conf -# Set default defaults here as some hypervisor drivers override these -PUBLIC_INTERFACE_DEFAULT=br100 -# Set ``GUEST_INTERFACE_DEFAULT`` to some interface on the box so that -# the default isn't completely crazy. This will match ``eth*``, ``em*``, or -# the new ``p*`` interfaces, then basically picks the first -# alphabetically. It's probably wrong, however it's less wrong than -# always using ``eth0`` which doesn't exist on new Linux distros at all. -GUEST_INTERFACE_DEFAULT=$(ip link \ - | grep 'state UP' \ - | awk '{print $2}' \ - | sed 's/://' \ - | grep ^[ep] \ - | head -1) - # ``NOVA_VNC_ENABLED`` can be used to forcibly enable VNC configuration. # In multi-node setups allows compute hosts to not run ``n-novnc``. NOVA_VNC_ENABLED=$(trueorfalse False NOVA_VNC_ENABLED) diff --git a/stack.sh b/stack.sh index df283bbe50..c99189e6dc 100755 --- a/stack.sh +++ b/stack.sh @@ -1152,7 +1152,8 @@ fi # ---- if is_service_enabled q-dhcp; then - # Delete traces of nova networks from prior runs + # TODO(frickler): These are remnants from n-net, check which parts are really + # still needed for Neutron. # Do not kill any dnsmasq instance spawned by NetworkManager netman_pid=$(pidof NetworkManager || true) if [ -z "$netman_pid" ]; then @@ -1212,12 +1213,7 @@ if is_service_enabled nova; then echo_summary "Configuring Nova" init_nova - # Additional Nova configuration that is dependent on other services - # TODO(stephenfin): Is it possible for neutron to *not* be enabled now? If - # not, remove the if here - if is_service_enabled neutron; then - async_runfunc configure_neutron_nova - fi + async_runfunc configure_neutron_nova fi From ccd116d36447ba1c5efad58ee360eb7f276eb7c6 Mon Sep 17 00:00:00 2001 From: Alan Bishop Date: Wed, 10 Aug 2022 10:30:19 -0700 Subject: [PATCH 1620/1936] Cinder: add creator role when barbican is enabled When barbican is enabled, add the "creator" role to cinder's service user so that cinder can create secrets. Cinder needs to create barbican secrets when migrating encryption keys from the legacy ConfKeyManager to barbican. Cinder also needs to create barbican secrets in order to support transferring encrypted volumes. Implements: bp/transfer-encrypted-volume Depends-On: I216f78e8a300ab3f79bbcbb38110adf2bbec2196 Change-Id: Ia3f414c4b9b0829f60841a6dd63c97a893fdde4d --- lib/cinder | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/lib/cinder b/lib/cinder index ca2c084aff..7dd7539eca 100644 --- a/lib/cinder +++ b/lib/cinder @@ -388,16 +388,24 @@ function configure_cinder { # create_cinder_accounts() - Set up common required cinder accounts -# Tenant User Roles +# Project User Roles # ------------------------------------------------------------------ -# service cinder admin # if enabled +# SERVICE_PROJECT_NAME cinder service +# SERVICE_PROJECT_NAME cinder creator (if Barbican is enabled) # Migrated from keystone_data.sh function create_cinder_accounts { # Cinder if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then - create_service_user "cinder" + local extra_role="" + + # cinder needs the "creator" role in order to interact with barbican + if is_service_enabled barbican; then + extra_role=$(get_or_create_role "creator") + fi + + create_service_user "cinder" $extra_role # block-storage is the official service type get_or_create_service "cinder" "block-storage" "Cinder Volume Service" From e7d2623dca483497ec51c75dfe1b6162801eead0 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Sun, 21 Aug 2022 12:54:57 +0200 Subject: [PATCH 1621/1936] Clean up neutron cleanup code neutron-ns-metadata-proxy was dropped from Neutron 5 years ago, no need to keep trying to kill it. Change-Id: I20b6d68dd8dde36057a2418bca0841bdea377b07 --- lib/neutron | 1 - lib/neutron-legacy | 1 - 2 files changed, 2 deletions(-) diff --git a/lib/neutron b/lib/neutron index 1f54e0e043..b3e3d72e8c 100644 --- a/lib/neutron +++ b/lib/neutron @@ -567,7 +567,6 @@ function stop_neutron_new { fi if is_service_enabled neutron-metadata-agent; then - sudo pkill -9 -f neutron-ns-metadata-proxy || : stop_process neutron-metadata-agent fi } diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 9229b47988..baf67f209e 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -628,7 +628,6 @@ function stop_mutnauq_other { fi if is_service_enabled q-meta; then - sudo pkill -9 -f neutron-ns-metadata-proxy || : stop_process q-meta fi From b9b6d6b862ce69a875c152ad30da8f4717c75272 Mon Sep 17 00:00:00 2001 From: June Yi Date: Sat, 2 Jul 2022 13:07:43 +0900 Subject: [PATCH 1622/1936] Respect constraints on tempest venv consistently In case of online mode, there is a procedure to recreate tempest venv. For consistency of tempest venv during the entire stack.sh process, add logic to consider the TEMPEST_VENV_UPPER_CONSTRAINTS option here. Closes-bug: #1980483 Signed-off-by: June Yi Change-Id: I0cea282152fd363af8671cab1b5f733ebe2bd4df --- lib/tempest | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/tempest b/lib/tempest index 206b37b5bf..87a2244784 100644 --- a/lib/tempest +++ b/lib/tempest @@ -695,13 +695,13 @@ function configure_tempest { local tmp_cfg_file tmp_cfg_file=$(mktemp) cd $TEMPEST_DIR - if [[ "$OFFLINE" != "True" ]]; then - tox -revenv-tempest --notest - fi local tmp_u_c_m tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX) set_tempest_venv_constraints $tmp_u_c_m + if [[ "$OFFLINE" != "True" ]]; then + tox -revenv-tempest --notest + fi tox -evenv-tempest -- pip install -c $tmp_u_c_m -r requirements.txt rm -f $tmp_u_c_m From 3de92db6634a6d1455b7211ec869aed35508c58c Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Fri, 26 Aug 2022 12:58:29 +0200 Subject: [PATCH 1623/1936] Fix installation of OVS/OVN from sources This patch changes user who runs ovsdb-server and ovn-nortd services to root. It also adds installation of the libssl dev package before compilation of the openvswitch if TLS service is enabled. Co-Authored-By: Fernando Royo Closes-Bug: #1987832 Change-Id: I83fc9250ae5b7c1686938a0dd25d66b40fc6c6aa --- lib/neutron_plugins/ovn_agent | 4 ++-- lib/neutron_plugins/ovs_source | 6 ++++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 341b84d959..8eb2993b94 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -616,7 +616,7 @@ function _start_ovs { dbcmd+=" --remote=db:hardware_vtep,Global,managers $OVS_DATADIR/vtep.db" fi dbcmd+=" $OVS_DATADIR/conf.db" - _run_process ovsdb-server "$dbcmd" + _run_process ovsdb-server "$dbcmd" "" "$STACK_GROUP" "root" # Note: ovn-controller will create and configure br-int once it is started. # So, no need to create it now because nothing depends on that bridge here. @@ -704,7 +704,7 @@ function start_ovn { local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_northd" local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_northd" - _run_process ovn-northd "$cmd" "$stop_cmd" + _run_process ovn-northd "$cmd" "$stop_cmd" "$STACK_GROUP" "root" else _start_process "$OVN_NORTHD_SERVICE" fi diff --git a/lib/neutron_plugins/ovs_source b/lib/neutron_plugins/ovs_source index 9ae5555afb..164d574c42 100644 --- a/lib/neutron_plugins/ovs_source +++ b/lib/neutron_plugins/ovs_source @@ -87,9 +87,15 @@ function prepare_for_ovs_compilation { install_package kernel-devel-$KERNEL_VERSION install_package kernel-headers-$KERNEL_VERSION + if is_service_enabled tls-proxy; then + install_package openssl-devel + fi elif is_ubuntu ; then install_package linux-headers-$KERNEL_VERSION + if is_service_enabled tls-proxy; then + install_package libssl-dev + fi fi } From 97061c9a1f2a2989e0bacb5f7cc5910c75aaeb44 Mon Sep 17 00:00:00 2001 From: Gorka Eguileor Date: Thu, 14 Oct 2021 09:55:56 +0200 Subject: [PATCH 1624/1936] Add LVM NVMe support This patch adds NVMe LVM support to the existing iSCSI LVM configuration support. We deprecate the CINDER_ISCSI_HELPER configuration option since we are no longer limited to iSCSI, and replace it with the CINDER_TARGET_HELPER option. The patch also adds another 3 target configuration options: - CINDER_TARGET_PROTOCOL - CINDER_TARGET_PREFIX - CINDER_TARGET_PORT These options will have different defaults based on the selected target helper. For tgtadm and lioadm they'll be iSCSI, iqn.2010-10.org.openstack:, and 3260 respectively, and for nvmet they'll be nvmet_rdma, nvme-subsystem-1, and 4420. Besides nvmet_rdma the CINDER_TARGET_PROTOCOL option can also be set to nvmet_tcp, and nvmet_fc. For the RDMA transport protocol devstack will be using Soft-RoCE and creating a device on top of the network interface. LVM NVMe-TCP support is added in the dependency mentioned in the footer and LVM NVMe-FC will be added in later patches (need os-brick and cinder patches) but the code here should still be valid. Change-Id: I6578cdc27489b34916cdeb72ba3fdf06ea9d4ad8 --- doc/source/configuration.rst | 29 +++++++++++ lib/cinder | 95 +++++++++++++++++++++++++++++------ lib/cinder_backends/fake_gate | 2 +- lib/cinder_backends/lvm | 5 +- lib/lvm | 10 ++-- lib/nova | 40 ++++++++++++--- 6 files changed, 154 insertions(+), 27 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 757b4001d9..0d8773fb6a 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -669,6 +669,35 @@ adjusted by setting ``CINDER_QUOTA_VOLUMES``, ``CINDER_QUOTA_BACKUPS``, or ``CINDER_QUOTA_SNAPSHOTS`` to the desired value. (The default for each is 10.) +DevStack's Cinder LVM configuration module currently supports both iSCSI and +NVMe connections, and we can choose which one to use with options +``CINDER_TARGET_HELPER``, ``CINDER_TARGET_PROTOCOL``, ``CINDER_TARGET_PREFIX``, +and ``CINDER_TARGET_PORT``. + +Defaults use iSCSI with the LIO target manager:: + + CINDER_TARGET_HELPER="lioadm" + CINDER_TARGET_PROTOCOL="iscsi" + CINDER_TARGET_PREFIX="iqn.2010-10.org.openstack:" + CINDER_TARGET_PORT=3260 + +Additionally there are 3 supported transport protocols for NVMe, +``nvmet_rdma``, ``nvmet_tcp``, and ``nvmet_fc``, and when the ``nvmet`` target +is selected the protocol, prefix, and port defaults will change to more +sensible defaults for NVMe:: + + CINDER_TARGET_HELPER="nvmet" + CINDER_TARGET_PROTOCOL="nvmet_rdma" + CINDER_TARGET_PREFIX="nvme-subsystem-1" + CINDER_TARGET_PORT=4420 + +When selecting the RDMA transport protocol DevStack will create on Cinder nodes +a Software RoCE device on top of the ``HOST_IP_IFACE`` and if it is not defined +then on top of the interface with IP address ``HOST_IP`` or ``HOST_IPV6``. + +This Soft-RoCE device will always be created on the Nova compute side since we +cannot tell beforehand whether there will be an RDMA connection or not. + Keystone ~~~~~~~~ diff --git a/lib/cinder b/lib/cinder index ca2c084aff..bc704c1e5d 100644 --- a/lib/cinder +++ b/lib/cinder @@ -43,6 +43,13 @@ GITDIR["python-cinderclient"]=$DEST/python-cinderclient GITDIR["python-brick-cinderclient-ext"]=$DEST/python-brick-cinderclient-ext CINDER_DIR=$DEST/cinder +if [[ $SERVICE_IP_VERSION == 6 ]]; then + CINDER_MY_IP="$HOST_IPV6" +else + CINDER_MY_IP="$HOST_IP" +fi + + # Cinder virtual environment if [[ ${USE_VENV} = True ]]; then PROJECT_VENV["cinder"]=${CINDER_DIR}.venv @@ -88,13 +95,32 @@ CINDER_ENABLED_BACKENDS=${CINDER_ENABLED_BACKENDS:-lvm:lvmdriver-1} CINDER_VOLUME_CLEAR=${CINDER_VOLUME_CLEAR:-${CINDER_VOLUME_CLEAR_DEFAULT:-zero}} CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]') -# Default to lioadm -CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-lioadm} + +if [[ -n "$CINDER_ISCSI_HELPER" ]]; then + if [[ -z "$CINDER_TARGET_HELPER" ]]; then + deprecated 'Using CINDER_ISCSI_HELPER is deprecated, use CINDER_TARGET_HELPER instead' + CINDER_TARGET_HELPER="$CINDER_ISCSI_HELPER" + else + deprecated 'Deprecated CINDER_ISCSI_HELPER is set, but is being overwritten by CINDER_TARGET_HELPER' + fi +fi +CINDER_TARGET_HELPER=${CINDER_TARGET_HELPER:-lioadm} + +if [[ $CINDER_TARGET_HELPER == 'nvmet' ]]; then + CINDER_TARGET_PROTOCOL=${CINDER_TARGET_PROTOCOL:-'nvmet_rdma'} + CINDER_TARGET_PREFIX=${CINDER_TARGET_PREFIX:-'nvme-subsystem-1'} + CINDER_TARGET_PORT=${CINDER_TARGET_PORT:-4420} +else + CINDER_TARGET_PROTOCOL=${CINDER_TARGET_PROTOCOL:-'iscsi'} + CINDER_TARGET_PREFIX=${CINDER_TARGET_PREFIX:-'iqn.2010-10.org.openstack:'} + CINDER_TARGET_PORT=${CINDER_TARGET_PORT:-3260} +fi + # EL and SUSE should only use lioadm if is_fedora || is_suse; then - if [[ ${CINDER_ISCSI_HELPER} != "lioadm" ]]; then - die "lioadm is the only valid Cinder target_helper config on this platform" + if [[ ${CINDER_TARGET_HELPER} != "lioadm" && ${CINDER_TARGET_HELPER} != 'nvmet' ]]; then + die "lioadm and nvmet are the only valid Cinder target_helper config on this platform" fi fi @@ -187,7 +213,7 @@ function _cinder_cleanup_apache_wsgi { function cleanup_cinder { # ensure the volume group is cleared up because fails might # leave dead volumes in the group - if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then + if [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then local targets targets=$(sudo tgtadm --op show --mode target) if [ $? -ne 0 ]; then @@ -215,8 +241,14 @@ function cleanup_cinder { else stop_service tgtd fi - else + elif [ "$CINDER_TARGET_HELPER" = "lioadm" ]; then sudo cinder-rtstool get-targets | sudo xargs -rn 1 cinder-rtstool delete + elif [ "$CINDER_TARGET_HELPER" = "nvmet" ]; then + # If we don't disconnect everything vgremove will block + sudo nvme disconnect-all + sudo nvmetcli clear + else + die $LINENO "Unknown value \"$CINDER_TARGET_HELPER\" for CINDER_TARGET_HELPER" fi if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then @@ -267,7 +299,7 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $CINDER_CONF DEFAULT target_helper "$CINDER_ISCSI_HELPER" + iniset $CINDER_CONF DEFAULT target_helper "$CINDER_TARGET_HELPER" iniset $CINDER_CONF database connection `database_connection_url cinder` iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI iniset $CINDER_CONF DEFAULT rootwrap_config "$CINDER_CONF_DIR/rootwrap.conf" @@ -275,11 +307,7 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT osapi_volume_listen $CINDER_SERVICE_LISTEN_ADDRESS iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH iniset $CINDER_CONF oslo_concurrency lock_path $CINDER_STATE_PATH - if [[ $SERVICE_IP_VERSION == 6 ]]; then - iniset $CINDER_CONF DEFAULT my_ip "$HOST_IPV6" - else - iniset $CINDER_CONF DEFAULT my_ip "$HOST_IP" - fi + iniset $CINDER_CONF DEFAULT my_ip "$CINDER_MY_IP" iniset $CINDER_CONF key_manager backend cinder.keymgr.conf_key_mgr.ConfKeyManager iniset $CINDER_CONF key_manager fixed_key $(openssl rand -hex 16) if [[ -n "$CINDER_ALLOWED_DIRECT_URL_SCHEMES" ]]; then @@ -465,9 +493,9 @@ function init_cinder { function install_cinder { git_clone $CINDER_REPO $CINDER_DIR $CINDER_BRANCH setup_develop $CINDER_DIR - if [[ "$CINDER_ISCSI_HELPER" == "tgtadm" ]]; then + if [[ "$CINDER_TARGET_HELPER" == "tgtadm" ]]; then install_package tgt - elif [[ "$CINDER_ISCSI_HELPER" == "lioadm" ]]; then + elif [[ "$CINDER_TARGET_HELPER" == "lioadm" ]]; then if is_ubuntu; then # TODO(frickler): Workaround for https://launchpad.net/bugs/1819819 sudo mkdir -p /etc/target @@ -476,6 +504,43 @@ function install_cinder { else install_package targetcli fi + elif [[ "$CINDER_TARGET_HELPER" == "nvmet" ]]; then + install_package nvme-cli + + # TODO: Remove manual installation of the dependency when the + # requirement is added to nvmetcli: + # http://lists.infradead.org/pipermail/linux-nvme/2022-July/033576.html + if is_ubuntu; then + install_package python3-configshell-fb + else + install_package python3-configshell + fi + # Install from source because Ubuntu doesn't have the package and some packaged versions didn't work on Python 3 + pip_install git+git://git.infradead.org/users/hch/nvmetcli.git + + sudo modprobe nvmet + sudo modprobe nvme-fabrics + + if [[ $CINDER_TARGET_PROTOCOL == 'nvmet_rdma' ]]; then + install_package rdma-core + sudo modprobe nvme-rdma + + # Create the Soft-RoCE device over the networking interface + local iface=${HOST_IP_IFACE:-`ip -br -$SERVICE_IP_VERSION a | grep $CINDER_MY_IP | awk '{print $1}'`} + if [[ -z "$iface" ]]; then + die $LINENO "Cannot find interface to bind Soft-RoCE" + fi + + if ! sudo rdma link | grep $iface ; then + sudo rdma link add rxe_$iface type rxe netdev $iface + fi + + elif [[ $CINDER_TARGET_PROTOCOL == 'nvmet_tcp' ]]; then + sudo modprobe nvme-tcp + + else # 'nvmet_fc' + sudo modprobe nvme-fc + fi fi } @@ -512,7 +577,7 @@ function start_cinder { service_port=$CINDER_SERVICE_PORT_INT service_protocol="http" fi - if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then + if [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then if is_service_enabled c-vol; then # Delete any old stack.conf sudo rm -f /etc/tgt/conf.d/stack.conf diff --git a/lib/cinder_backends/fake_gate b/lib/cinder_backends/fake_gate index 3ffd9a6785..3b9f1d1164 100644 --- a/lib/cinder_backends/fake_gate +++ b/lib/cinder_backends/fake_gate @@ -50,7 +50,7 @@ function configure_cinder_backend_lvm { iniset $CINDER_CONF $be_name volume_backend_name $be_name iniset $CINDER_CONF $be_name volume_driver "cinder.tests.fake_driver.FakeGateDriver" iniset $CINDER_CONF $be_name volume_group $VOLUME_GROUP_NAME-$be_name - iniset $CINDER_CONF $be_name target_helper "$CINDER_ISCSI_HELPER" + iniset $CINDER_CONF $be_name target_helper "$CINDER_TARGET_HELPER" iniset $CINDER_CONF $be_name lvm_type "$CINDER_LVM_TYPE" if [[ "$CINDER_VOLUME_CLEAR" == "non" ]]; then diff --git a/lib/cinder_backends/lvm b/lib/cinder_backends/lvm index e03ef14c55..42865119da 100644 --- a/lib/cinder_backends/lvm +++ b/lib/cinder_backends/lvm @@ -50,7 +50,10 @@ function configure_cinder_backend_lvm { iniset $CINDER_CONF $be_name volume_backend_name $be_name iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.lvm.LVMVolumeDriver" iniset $CINDER_CONF $be_name volume_group $VOLUME_GROUP_NAME-$be_name - iniset $CINDER_CONF $be_name target_helper "$CINDER_ISCSI_HELPER" + iniset $CINDER_CONF $be_name target_helper "$CINDER_TARGET_HELPER" + iniset $CINDER_CONF $be_name target_protocol "$CINDER_TARGET_PROTOCOL" + iniset $CINDER_CONF $be_name target_port "$CINDER_TARGET_PORT" + iniset $CINDER_CONF $be_name target_prefix "$CINDER_TARGET_PREFIX" iniset $CINDER_CONF $be_name lvm_type "$CINDER_LVM_TYPE" iniset $CINDER_CONF $be_name volume_clear "$CINDER_VOLUME_CLEAR" } diff --git a/lib/lvm b/lib/lvm index d3f6bf1792..57ffb967c3 100644 --- a/lib/lvm +++ b/lib/lvm @@ -130,7 +130,7 @@ function init_lvm_volume_group { local size=$2 # Start the tgtd service on Fedora and SUSE if tgtadm is used - if is_fedora || is_suse && [[ "$CINDER_ISCSI_HELPER" = "tgtadm" ]]; then + if is_fedora || is_suse && [[ "$CINDER_TARGET_HELPER" = "tgtadm" ]]; then start_service tgtd fi @@ -138,10 +138,14 @@ function init_lvm_volume_group { _create_lvm_volume_group $vg $size # Remove iscsi targets - if [ "$CINDER_ISCSI_HELPER" = "lioadm" ]; then + if [ "$CINDER_TARGET_HELPER" = "lioadm" ]; then sudo cinder-rtstool get-targets | sudo xargs -rn 1 cinder-rtstool delete - else + elif [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then sudo tgtadm --op show --mode target | awk '/Target/ {print $3}' | sudo xargs -r -n1 tgt-admin --delete + elif [ "$CINDER_TARGET_HELPER" = "nvmet" ]; then + # If we don't disconnect everything vgremove will block + sudo nvme disconnect-all + sudo nvmetcli clear fi _clean_lvm_volume_group $vg } diff --git a/lib/nova b/lib/nova index da3a10edd0..7902c5fdb9 100644 --- a/lib/nova +++ b/lib/nova @@ -97,6 +97,12 @@ NOVA_SERVICE_LISTEN_ADDRESS=${NOVA_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVI METADATA_SERVICE_PORT=${METADATA_SERVICE_PORT:-8775} NOVA_ENABLE_CACHE=${NOVA_ENABLE_CACHE:-True} +if [[ $SERVICE_IP_VERSION == 6 ]]; then + NOVA_MY_IP="$HOST_IPV6" +else + NOVA_MY_IP="$HOST_IP" +fi + # Option to enable/disable config drive # NOTE: Set ``FORCE_CONFIG_DRIVE="False"`` to turn OFF config drive FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"False"} @@ -219,6 +225,9 @@ function cleanup_nova { done sudo iscsiadm --mode node --op delete || true + # Disconnect all nvmeof connections + sudo nvme disconnect-all || true + # Clean out the instances directory. sudo rm -rf $NOVA_INSTANCES_PATH/* fi @@ -306,6 +315,7 @@ function configure_nova { fi fi + # Due to cinder bug #1966513 we ALWAYS need an initiator name for LVM # Ensure each compute host uses a unique iSCSI initiator echo InitiatorName=$(iscsi-iname) | sudo tee /etc/iscsi/initiatorname.iscsi @@ -326,8 +336,28 @@ EOF # not work under FIPS. iniset -sudo /etc/iscsi/iscsid.conf DEFAULT "node.session.auth.chap_algs" "SHA3-256,SHA256" - # ensure that iscsid is started, even when disabled by default - restart_service iscsid + if [[ $CINDER_TARGET_HELPER != 'nvmet' ]]; then + # ensure that iscsid is started, even when disabled by default + restart_service iscsid + + # For NVMe-oF we need different packages that many not be present + else + install_package nvme-cli + sudo modprobe nvme-fabrics + + # Ensure NVMe is ready and create the Soft-RoCE device over the networking interface + if [[ $CINDER_TARGET_PROTOCOL == 'nvmet_rdma' ]]; then + sudo modprobe nvme-rdma + iface=${HOST_IP_IFACE:-`ip -br -$SERVICE_IP_VERSION a | grep $NOVA_MY_IP | awk '{print $1}'`} + if ! sudo rdma link | grep $iface ; then + sudo rdma link add rxe_$iface type rxe netdev $iface + fi + elif [[ $CINDER_TARGET_PROTOCOL == 'nvmet_tcp' ]]; then + sudo modprobe nvme-tcp + else # 'nvmet_fc' + sudo modprobe nvme-fc + fi + fi fi # Rebuild the config file from scratch @@ -418,11 +448,7 @@ function create_nova_conf { iniset $NOVA_CONF filter_scheduler enabled_filters "$NOVA_FILTERS" iniset $NOVA_CONF scheduler workers "$API_WORKERS" iniset $NOVA_CONF neutron default_floating_pool "$PUBLIC_NETWORK_NAME" - if [[ $SERVICE_IP_VERSION == 6 ]]; then - iniset $NOVA_CONF DEFAULT my_ip "$HOST_IPV6" - else - iniset $NOVA_CONF DEFAULT my_ip "$HOST_IP" - fi + iniset $NOVA_CONF DEFAULT my_ip "$NOVA_MY_IP" iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x" iniset $NOVA_CONF DEFAULT osapi_compute_listen "$NOVA_SERVICE_LISTEN_ADDRESS" iniset $NOVA_CONF DEFAULT metadata_listen "$NOVA_SERVICE_LISTEN_ADDRESS" From f49b435e98cd9d119179d98829241954b8d73669 Mon Sep 17 00:00:00 2001 From: Masayuki Igawa Date: Thu, 22 Sep 2022 11:22:21 +0900 Subject: [PATCH 1625/1936] [Doc] Fix Glance image size limit command This commit fixes the configuration document which mentions how to change Glance default image size quota at runtime because we don't have `openstack registered limit update` command but `openstack registered limit set` command[1]. [1] https://docs.openstack.org/python-openstackclient/latest/cli/command-objects/registered-limit.html#registered-limit-set Change-Id: I399685ed1f864f8f1ce7295ed6f83336cfccbd81 --- doc/source/configuration.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 757b4001d9..d0f2b02419 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -719,7 +719,7 @@ or at runtime via: :: - openstack --os-cloud devstack-system-admin registered limit update \ + openstack --os-cloud devstack-system-admin registered limit set \ --service glance --default-limit 5000 --region RegionOne image_size_total .. _arch-configuration: From 1516997afe888ebc3cd06653a4f29a05bba7b346 Mon Sep 17 00:00:00 2001 From: Tom Weininger Date: Wed, 14 Sep 2022 17:16:00 +0200 Subject: [PATCH 1626/1936] Update user guide for Octavia Change-Id: I8e3134c3b2d591f7ab72b8040e1b931e967e11be --- doc/source/guides.rst | 8 +- doc/source/guides/devstack-with-lbaas-v2.rst | 145 ------------------ .../guides/devstack-with-nested-kvm.rst | 2 + doc/source/guides/devstack-with-octavia.rst | 144 +++++++++++++++++ 4 files changed, 150 insertions(+), 149 deletions(-) delete mode 100644 doc/source/guides/devstack-with-lbaas-v2.rst create mode 100644 doc/source/guides/devstack-with-octavia.rst diff --git a/doc/source/guides.rst b/doc/source/guides.rst index e7ec629962..e7b46b6e55 100644 --- a/doc/source/guides.rst +++ b/doc/source/guides.rst @@ -20,7 +20,7 @@ Walk through various setups used by stackers guides/neutron guides/devstack-with-nested-kvm guides/nova - guides/devstack-with-lbaas-v2 + guides/devstack-with-octavia guides/devstack-with-ldap All-In-One Single VM @@ -69,10 +69,10 @@ Nova and devstack Guide to working with nova features :doc:`Nova and devstack `. -Configure Load-Balancer Version 2 ------------------------------------ +Configure Octavia +----------------- -Guide on :doc:`Configure Load-Balancer Version 2 `. +Guide on :doc:`Configure Octavia `. Deploying DevStack with LDAP ---------------------------- diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst deleted file mode 100644 index 5d96ca7d74..0000000000 --- a/doc/source/guides/devstack-with-lbaas-v2.rst +++ /dev/null @@ -1,145 +0,0 @@ -Devstack with Octavia Load Balancing -==================================== - -Starting with the OpenStack Pike release, Octavia is now a standalone service -providing load balancing services for OpenStack. - -This guide will show you how to create a devstack with `Octavia API`_ enabled. - -.. _Octavia API: https://docs.openstack.org/api-ref/load-balancer/v2/index.html - -Phase 1: Create DevStack + 2 nova instances --------------------------------------------- - -First, set up a vm of your choice with at least 8 GB RAM and 16 GB disk space, -make sure it is updated. Install git and any other developer tools you find -useful. - -Install devstack - -:: - - git clone https://opendev.org/openstack/devstack - cd devstack/tools - sudo ./create-stack-user.sh - cd ../.. - sudo mv devstack /opt/stack - sudo chown -R stack.stack /opt/stack/devstack - -This will clone the current devstack code locally, then setup the "stack" -account that devstack services will run under. Finally, it will move devstack -into its default location in /opt/stack/devstack. - -Edit your ``/opt/stack/devstack/local.conf`` to look like - -:: - - [[local|localrc]] - enable_plugin octavia https://opendev.org/openstack/octavia - # If you are enabling horizon, include the octavia dashboard - # enable_plugin octavia-dashboard https://opendev.org/openstack/octavia-dashboard.git - # If you are enabling barbican for TLS offload in Octavia, include it here. - # enable_plugin barbican https://opendev.org/openstack/barbican - - # ===== BEGIN localrc ===== - DATABASE_PASSWORD=password - ADMIN_PASSWORD=password - SERVICE_PASSWORD=password - SERVICE_TOKEN=password - RABBIT_PASSWORD=password - # Enable Logging - LOGFILE=$DEST/logs/stack.sh.log - VERBOSE=True - LOG_COLOR=True - # Pre-requisite - ENABLED_SERVICES=rabbit,mysql,key - # Horizon - enable for the OpenStack web GUI - # ENABLED_SERVICES+=,horizon - # Nova - ENABLED_SERVICES+=,n-api,n-crt,n-cpu,n-cond,n-sch,n-api-meta,n-sproxy - ENABLED_SERVICES+=,placement-api,placement-client - # Glance - ENABLED_SERVICES+=,g-api - # Neutron - ENABLED_SERVICES+=,q-svc,q-agt,q-dhcp,q-l3,q-meta,neutron - ENABLED_SERVICES+=,octavia,o-cw,o-hk,o-hm,o-api - # Cinder - ENABLED_SERVICES+=,c-api,c-vol,c-sch - # Tempest - ENABLED_SERVICES+=,tempest - # Barbican - Optionally used for TLS offload in Octavia - # ENABLED_SERVICES+=,barbican - # ===== END localrc ===== - -Run stack.sh and do some sanity checks - -:: - - sudo su - stack - cd /opt/stack/devstack - ./stack.sh - . ./openrc - - openstack network list # should show public and private networks - -Create two nova instances that we can use as test http servers: - -:: - - #create nova instances on private network - openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node1 - openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node2 - openstack server list # should show the nova instances just created - - #add secgroup rules to allow ssh etc.. - openstack security group rule create default --protocol icmp - openstack security group rule create default --protocol tcp --dst-port 22:22 - openstack security group rule create default --protocol tcp --dst-port 80:80 - -Set up a simple web server on each of these instances. ssh into each instance (username 'cirros', password 'cubswin:)' or 'gocubsgo') and run - -:: - - MYIP=$(ifconfig eth0|grep 'inet addr'|awk -F: '{print $2}'| awk '{print $1}') - while true; do echo -e "HTTP/1.0 200 OK\r\n\r\nWelcome to $MYIP" | sudo nc -l -p 80 ; done& - -Phase 2: Create your load balancer ----------------------------------- - -Make sure you have the 'openstack loadbalancer' commands: - -:: - - pip install python-octaviaclient - -Create your load balancer: - -:: - - openstack loadbalancer create --name lb1 --vip-subnet-id private-subnet - openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE. - openstack loadbalancer listener create --protocol HTTP --protocol-port 80 --name listener1 lb1 - openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE. - openstack loadbalancer pool create --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1 - openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE. - openstack loadbalancer healthmonitor create --delay 5 --timeout 2 --max-retries 1 --type HTTP pool1 - openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE. - openstack loadbalancer member create --subnet-id private-subnet --address --protocol-port 80 pool1 - openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE. - openstack loadbalancer member create --subnet-id private-subnet --address --protocol-port 80 pool1 - -Please note: The fields are the IP addresses of the nova -servers created in Phase 1. -Also note, using the API directly you can do all of the above commands in one -API call. - -Phase 3: Test your load balancer --------------------------------- - -:: - - openstack loadbalancer show lb1 # Note the vip_address - curl http:// - curl http:// - -This should show the "Welcome to " message from each member server. diff --git a/doc/source/guides/devstack-with-nested-kvm.rst b/doc/source/guides/devstack-with-nested-kvm.rst index 3732f06fd8..ba483e9ec9 100644 --- a/doc/source/guides/devstack-with-nested-kvm.rst +++ b/doc/source/guides/devstack-with-nested-kvm.rst @@ -1,3 +1,5 @@ +.. _kvm_nested_virt: + ======================================================= Configure DevStack with KVM-based Nested Virtualization ======================================================= diff --git a/doc/source/guides/devstack-with-octavia.rst b/doc/source/guides/devstack-with-octavia.rst new file mode 100644 index 0000000000..55939f0f12 --- /dev/null +++ b/doc/source/guides/devstack-with-octavia.rst @@ -0,0 +1,144 @@ +Devstack with Octavia Load Balancing +==================================== + +Starting with the OpenStack Pike release, Octavia is now a standalone service +providing load balancing services for OpenStack. + +This guide will show you how to create a devstack with `Octavia API`_ enabled. + +.. _Octavia API: https://docs.openstack.org/api-ref/load-balancer/v2/index.html + +Phase 1: Create DevStack + 2 nova instances +-------------------------------------------- + +First, set up a VM of your choice with at least 8 GB RAM and 16 GB disk space, +make sure it is updated. Install git and any other developer tools you find +useful. + +Install devstack:: + + git clone https://opendev.org/openstack/devstack + cd devstack/tools + sudo ./create-stack-user.sh + cd ../.. + sudo mv devstack /opt/stack + sudo chown -R stack.stack /opt/stack/devstack + +This will clone the current devstack code locally, then setup the "stack" +account that devstack services will run under. Finally, it will move devstack +into its default location in /opt/stack/devstack. + +Edit your ``/opt/stack/devstack/local.conf`` to look like:: + + [[local|localrc]] + # ===== BEGIN localrc ===== + DATABASE_PASSWORD=password + ADMIN_PASSWORD=password + SERVICE_PASSWORD=password + SERVICE_TOKEN=password + RABBIT_PASSWORD=password + GIT_BASE=https://opendev.org + # Optional settings: + # OCTAVIA_AMP_BASE_OS=centos + # OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID=9-stream + # OCTAVIA_AMP_IMAGE_SIZE=3 + # OCTAVIA_LB_TOPOLOGY=ACTIVE_STANDBY + # OCTAVIA_ENABLE_AMPHORAV2_JOBBOARD=True + # LIBS_FROM_GIT+=octavia-lib, + # Enable Logging + LOGFILE=$DEST/logs/stack.sh.log + VERBOSE=True + LOG_COLOR=True + enable_service rabbit + enable_plugin neutron $GIT_BASE/openstack/neutron + # Octavia supports using QoS policies on the VIP port: + enable_service q-qos + enable_service placement-api placement-client + # Octavia services + enable_plugin octavia $GIT_BASE/openstack/octavia master + enable_plugin octavia-dashboard $GIT_BASE/openstack/octavia-dashboard + enable_plugin ovn-octavia-provider $GIT_BASE/openstack/ovn-octavia-provider + enable_plugin octavia-tempest-plugin $GIT_BASE/openstack/octavia-tempest-plugin + enable_service octavia o-api o-cw o-hm o-hk o-da + # If you are enabling barbican for TLS offload in Octavia, include it here. + # enable_plugin barbican $GIT_BASE/openstack/barbican + # enable_service barbican + # Cinder (optional) + disable_service c-api c-vol c-sch + # Tempest + enable_service tempest + # ===== END localrc ===== + +.. note:: + For best performance it is highly recommended to use KVM + virtualization instead of QEMU. + Also make sure nested virtualization is enabled as documented in + :ref:`the respective guide `. + By adding ``LIBVIRT_CPU_MODE="host-passthrough"`` to your + ``local.conf`` you enable the guest VMs to make use of all features your + host's CPU provides. + +Run stack.sh and do some sanity checks:: + + sudo su - stack + cd /opt/stack/devstack + ./stack.sh + . ./openrc + + openstack network list # should show public and private networks + +Create two nova instances that we can use as test http servers:: + + # create nova instances on private network + openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node1 + openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node2 + openstack server list # should show the nova instances just created + + # add secgroup rules to allow ssh etc.. + openstack security group rule create default --protocol icmp + openstack security group rule create default --protocol tcp --dst-port 22:22 + openstack security group rule create default --protocol tcp --dst-port 80:80 + +Set up a simple web server on each of these instances. One possibility is to use +the `Golang test server`_ that is used by the Octavia project for CI testing +as well. +Copy the binary to your instances and start it as shown below +(username 'cirros', password 'gocubsgo'):: + + INST_IP= + scp -O test_server.bin cirros@${INST_IP}: + ssh -f cirros@${INST_IP} ./test_server.bin -id ${INST_IP} + +When started this way the test server will respond to HTTP requests with +its own IP. + +Phase 2: Create your load balancer +---------------------------------- + +Create your load balancer:: + + openstack loadbalancer create --wait --name lb1 --vip-subnet-id private-subnet + openstack loadbalancer listener create --wait --protocol HTTP --protocol-port 80 --name listener1 lb1 + openstack loadbalancer pool create --wait --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1 + openstack loadbalancer healthmonitor create --wait --delay 5 --timeout 2 --max-retries 1 --type HTTP pool1 + openstack loadbalancer member create --wait --subnet-id private-subnet --address --protocol-port 80 pool1 + openstack loadbalancer member create --wait --subnet-id private-subnet --address --protocol-port 80 pool1 + +Please note: The fields are the IP addresses of the nova +servers created in Phase 1. +Also note, using the API directly you can do all of the above commands in one +API call. + +Phase 3: Test your load balancer +-------------------------------- + +:: + + openstack loadbalancer show lb1 # Note the vip_address + curl http:// + curl http:// + +This should show the "Welcome to " message from each member server. + + +.. _Golang test server: https://opendev.org/openstack/octavia-tempest-plugin/src/branch/master/octavia_tempest_plugin/contrib/test_server From 0d5c8d6643d5f532ec4b0e9f4a588d604db51dba Mon Sep 17 00:00:00 2001 From: Martin Kopec Date: Wed, 28 Sep 2022 02:13:58 +0200 Subject: [PATCH 1627/1936] Update DEVSTACK_SERIES to 2023.1 stable/zed branch has been created now and current master is for 2023.1 Antelope. Change-Id: I6186d01b1bf8548425500cc9feee6ab494a3db03 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index b3130e5f7f..a05d1e5553 100644 --- a/stackrc +++ b/stackrc @@ -243,7 +243,7 @@ REQUIREMENTS_DIR=${REQUIREMENTS_DIR:-$DEST/requirements} # Setting the variable to 'ALL' will activate the download for all # libraries. -DEVSTACK_SERIES="zed" +DEVSTACK_SERIES="2023.1" ############## # From 9ece457b7b704d1218f8746829b7950b70e0a406 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 24 Aug 2022 14:43:00 +1000 Subject: [PATCH 1628/1936] Update to Fedora 36 Update the Fedora job to the latest release nodes Depends-On: https://review.opendev.org/c/openstack/devstack/+/860634 Change-Id: If2d7f99e3665a2e3df4cf763efc64dd381f02350 --- .zuul.yaml | 2 +- files/rpms/swift | 2 +- lib/apache | 2 +- stack.sh | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 6ad7148449..441a9cf1e9 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -82,7 +82,7 @@ name: devstack-single-node-fedora-latest nodes: - name: controller - label: fedora-35 + label: fedora-36 groups: - name: tempest nodes: diff --git a/files/rpms/swift b/files/rpms/swift index 7d906aa926..49a1833dc4 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -4,4 +4,4 @@ memcached rsync-daemon sqlite xfsprogs -xinetd # not:f35,rhel9 +xinetd # not:f36,rhel9 diff --git a/lib/apache b/lib/apache index 94f3cfc95a..705776c55b 100644 --- a/lib/apache +++ b/lib/apache @@ -95,7 +95,7 @@ function install_apache_uwsgi { # didn't fix Python 3.10 compatibility before release. Should be # fixed in uwsgi 4.9.0; can remove this when packages available # or we drop this release - elif is_fedora && ! [[ $DISTRO =~ f35 ]]; then + elif is_fedora && ! [[ $DISTRO =~ f36 ]]; then # Note httpd comes with mod_proxy_uwsgi and it is loaded by # default; the mod_proxy_uwsgi package actually conflicts now. # See: diff --git a/stack.sh b/stack.sh index c99189e6dc..cc90fca576 100755 --- a/stack.sh +++ b/stack.sh @@ -12,7 +12,7 @@ # a multi-node developer install. # To keep this script simple we assume you are running on a recent **Ubuntu** -# (Bionic or newer), **Fedora** (F24 or newer), or **CentOS/RHEL** +# (Bionic or newer), **Fedora** (F36 or newer), or **CentOS/RHEL** # (7 or newer) machine. (It may work on other platforms but support for those # platforms is left to those who added them to DevStack.) It should work in # a VM or physical server. Additionally, we maintain a list of ``deb`` and @@ -229,7 +229,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="bullseye|focal|jammy|f35|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9" +SUPPORTED_DISTROS="bullseye|focal|jammy|f36|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" From e3bc6b5f571a5b291617ee5227c153002ef8d9c3 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 24 Sep 2019 12:44:16 +1000 Subject: [PATCH 1629/1936] get_or_create_domain: simplify with "--or-show" argument Similar to other functions, this uses "--or-show" to avoid double calls. Co-Authored-By: Jens Harbott Change-Id: I548f9acd812687838e04b705f86f3b70d2b10caf --- functions-common | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/functions-common b/functions-common index 92a6678de0..ed44186804 100644 --- a/functions-common +++ b/functions-common @@ -875,14 +875,9 @@ function policy_add { # Usage: get_or_create_domain function get_or_create_domain { local domain_id - # Gets domain id domain_id=$( - # Gets domain id - openstack --os-cloud devstack-system-admin domain show $1 \ - -f value -c id 2>/dev/null || - # Creates new domain openstack --os-cloud devstack-system-admin domain create $1 \ - --description "$2" \ + --description "$2" --or-show \ -f value -c id ) echo $domain_id From e69b78df6fc48a1e70c180d3878164e416adbbdd Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 24 Sep 2019 12:51:25 +1000 Subject: [PATCH 1630/1936] Simplify role addtion helper functions Because adding the role is idempotent, we can save doing the initial check for role assignment. Also simplify the output matching by using osc's filters where appropriate. Co-Authored-By: Jens Harbott Change-Id: If2a661cc565a43a7821b8f0a10edd97de08eb911 --- functions-common | 65 ++++++++++++++++++------------------------------ 1 file changed, 24 insertions(+), 41 deletions(-) diff --git a/functions-common b/functions-common index ed44186804..e9984fd65e 100644 --- a/functions-common +++ b/functions-common @@ -966,29 +966,22 @@ function _get_domain_args { # Usage: get_or_add_user_project_role [ ] function get_or_add_user_project_role { local user_role_id + local domain_args domain_args=$(_get_domain_args $4 $5) - # Gets user role id + # Note this is idempotent so we are safe across multiple + # duplicate calls. + openstack --os-cloud devstack-system-admin role add $1 \ + --user $2 \ + --project $3 \ + $domain_args user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ --role $1 \ --user $2 \ --project $3 \ $domain_args \ - | grep '^|\s[a-f0-9]\+' | get_field 1) - if [[ -z "$user_role_id" ]]; then - # Adds role to user and get it - openstack --os-cloud devstack-system-admin role add $1 \ - --user $2 \ - --project $3 \ - $domain_args - user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ - --role $1 \ - --user $2 \ - --project $3 \ - $domain_args \ - | grep '^|\s[a-f0-9]\+' | get_field 1) - fi + -c Role -f value) echo $user_role_id } @@ -996,23 +989,18 @@ function get_or_add_user_project_role { # Usage: get_or_add_user_domain_role function get_or_add_user_domain_role { local user_role_id - # Gets user role id + + # Note this is idempotent so we are safe across multiple + # duplicate calls. + openstack --os-cloud devstack-system-admin role add $1 \ + --user $2 \ + --domain $3 user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ --role $1 \ --user $2 \ --domain $3 \ - | grep '^|\s[a-f0-9]\+' | get_field 1) - if [[ -z "$user_role_id" ]]; then - # Adds role to user and get it - openstack --os-cloud devstack-system-admin role add $1 \ - --user $2 \ - --domain $3 - user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ - --role $1 \ - --user $2 \ - --domain $3 \ - | grep '^|\s[a-f0-9]\+' | get_field 1) - fi + -c Role -f value) + echo $user_role_id } @@ -1051,23 +1039,18 @@ function get_or_add_user_system_role { # Usage: get_or_add_group_project_role function get_or_add_group_project_role { local group_role_id - # Gets group role id + + # Note this is idempotent so we are safe across multiple + # duplicate calls. + openstack role add $1 \ + --group $2 \ + --project $3 group_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ --role $1 \ --group $2 \ --project $3 \ - -f value) - if [[ -z "$group_role_id" ]]; then - # Adds role to group and get it - openstack --os-cloud devstack-system-admin role add $1 \ - --group $2 \ - --project $3 - group_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ - --role $1 \ - --group $2 \ - --project $3 \ - -f value) - fi + -f value -c Role) + echo $group_role_id } From 2e6756640c8e85cb924f9dfcd968aad303b481b3 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Thu, 6 Oct 2022 17:24:57 +0200 Subject: [PATCH 1631/1936] Re-enable horizon in jammy-based jobs The issue that Horizon had with python3.10 has been fixed some time ago, so we can stop disabling it for those jobs. Also stop including roles from devstack-gate which we no longer need. Change-Id: Ia5d0b31561adc5051acd96fcaab183e60c3c2f99 --- .zuul.yaml | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 6ad7148449..99b48dae45 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -335,7 +335,6 @@ required-projects: - opendev.org/openstack/devstack roles: - - zuul: opendev.org/openstack/devstack-gate - zuul: opendev.org/openstack/openstack-zuul-jobs vars: devstack_localrc: @@ -676,9 +675,6 @@ timeout: 9000 vars: configure_swap_size: 4096 - devstack_services: - # Horizon doesn't like py310 - horizon: false - job: name: devstack-platform-ubuntu-jammy-ovn-source @@ -706,8 +702,6 @@ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch Q_ML2_TENANT_NETWORK_TYPE: vxlan devstack_services: - # Horizon doesn't like py310 - horizon: false # Disable OVN services ovn-northd: false ovn-controller: false @@ -752,10 +746,6 @@ voting: false vars: configure_swap_size: 4096 - # Python 3.10 dependency issues; see - # https://bugs.launchpad.net/horizon/+bug/1960204 - devstack_services: - horizon: false - job: name: devstack-platform-fedora-latest-virt-preview From 7d1ba835c38839a62cee94dc281773b62c554932 Mon Sep 17 00:00:00 2001 From: Masayuki Igawa Date: Tue, 11 Oct 2022 12:35:18 +0900 Subject: [PATCH 1632/1936] [Doc] Fix tox command option to run smoke tests This commit fixes the tox command option to run the smoke tests. The original arguments fail with the error[1], and `-efull` and `tempest.scenario.test_network_basic_ops` are not for the smoke tests. [1] $ tox -efull tempest.scenario.test_network_basic_ops ... tempest run: error: unrecognized arguments: tempest.scenario.test_network_basic_ops Change-Id: I9c3dd9fb4f64bf856c5cab88a2aeaae355c84a65 --- doc/source/configuration.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 757b4001d9..3191ae824b 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -636,7 +636,7 @@ tests can be run as follows: :: $ cd /opt/stack/tempest - $ tox -efull tempest.scenario.test_network_basic_ops + $ tox -e smoke By default tempest is downloaded and the config file is generated, but the tempest package is not installed in the system's global site-packages (the From 781fbf47b557d92bcb71e60c535f6249e729637d Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Tue, 11 Oct 2022 15:41:02 +0200 Subject: [PATCH 1633/1936] docs: Add warnings about password selection Some services fail when using special characters in passwords, add some warnings to our docs. Closes-Bug: 1744985 Change-Id: I601149e2e7362507b38f01719f7197385a27e0a8 --- doc/source/configuration.rst | 3 +++ doc/source/guides/single-machine.rst | 3 +++ doc/source/index.rst | 5 ++++- 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 757b4001d9..d59c1edafd 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -181,6 +181,9 @@ values that most often need to be set. If the ``*_PASSWORD`` variables are not set here you will be prompted to enter values for them by ``stack.sh``. +.. warning:: Only use alphanumeric characters in your passwords, as some + services fail to work when using special characters. + The network ranges must not overlap with any networks in use on the host. Overlap is not uncommon as RFC-1918 'private' ranges are commonly used for both the local networking and Nova's fixed and floating ranges. diff --git a/doc/source/guides/single-machine.rst b/doc/source/guides/single-machine.rst index 0529e30f08..a4385b5b4b 100644 --- a/doc/source/guides/single-machine.rst +++ b/doc/source/guides/single-machine.rst @@ -106,6 +106,9 @@ do the following: - Set the service password. This is used by the OpenStack services (Nova, Glance, etc) to authenticate with Keystone. +.. warning:: Only use alphanumeric characters in your passwords, as some + services fail to work when using special characters. + ``local.conf`` should look something like this: .. code-block:: ini diff --git a/doc/source/index.rst b/doc/source/index.rst index 0434d68838..ba7ea42943 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -101,7 +101,10 @@ devstack git repo. This is the minimum required config to get started with DevStack. .. note:: There is a sample :download:`local.conf ` file - under the *samples* directory in the devstack repository. + under the *samples* directory in the devstack repository. + +.. warning:: Only use alphanumeric characters in your passwords, as some + services fail to work when using special characters. Start the install ----------------- From 358987f065af05d166539982c282e2f587b5c952 Mon Sep 17 00:00:00 2001 From: Adrian Fusco Arnejo Date: Wed, 31 Aug 2022 19:38:49 +0200 Subject: [PATCH 1634/1936] Adding devstack support for Rocky Linux 9 Adding job and nodeset to run tempest-full-py3 in Rocky Linux 9 instance Change-Id: I6fb390bfeec436b50a3ddc18d154bbce3f3b1975 --- .zuul.yaml | 20 ++++++++++++++++++++ doc/source/index.rst | 2 +- functions-common | 7 ++++++- 3 files changed, 27 insertions(+), 2 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index c29cb31f31..8e6f8633ff 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -106,6 +106,16 @@ nodes: - controller +- nodeset: + name: devstack-single-node-rockylinux-9 + nodes: + - name: controller + label: rockylinux-9 + groups: + - name: tempest + nodes: + - controller + - nodeset: name: openstack-two-node nodes: @@ -676,6 +686,15 @@ vars: configure_swap_size: 4096 +- job: + name: devstack-platform-rocky-blue-onyx + parent: tempest-full-py3 + description: Rocky Linux 9 Blue Onyx platform test + nodeset: devstack-single-node-rockylinux-9 + timeout: 9000 + vars: + configure_swap_size: 4096 + - job: name: devstack-platform-ubuntu-jammy parent: tempest-full-py3 @@ -852,6 +871,7 @@ - devstack-platform-fedora-latest - devstack-platform-centos-9-stream - devstack-platform-debian-bullseye + - devstack-platform-rocky-blue-onyx - devstack-platform-ubuntu-jammy - devstack-platform-ubuntu-jammy-ovn-source - devstack-platform-ubuntu-jammy-ovs diff --git a/doc/source/index.rst b/doc/source/index.rst index 0434d68838..626d5e1a85 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -38,7 +38,7 @@ Install Linux Start with a clean and minimal install of a Linux system. DevStack attempts to support the two latest LTS releases of Ubuntu, the -latest/current Fedora version, CentOS/RHEL 8 and OpenSUSE. +latest/current Fedora version, CentOS/RHEL/Rocky Linux 9 and OpenSUSE. If you do not have a preference, Ubuntu 20.04 (Focal Fossa) is the most tested, and will probably go the smoothest. diff --git a/functions-common b/functions-common index 92a6678de0..e27518f559 100644 --- a/functions-common +++ b/functions-common @@ -418,6 +418,9 @@ function GetOSVersion { os_RELEASE=${VERSION_ID} os_CODENAME="n/a" os_VENDOR=$(echo $NAME | tr -d '[:space:]') + elif [[ "${ID}${VERSION}" =~ "rocky9" ]]; then + os_VENDOR="Rocky" + os_RELEASE=${VERSION_ID} else _ensure_lsb_release @@ -466,6 +469,7 @@ function GetDistro { "$os_VENDOR" =~ (AlmaLinux) || \ "$os_VENDOR" =~ (Scientific) || \ "$os_VENDOR" =~ (OracleServer) || \ + "$os_VENDOR" =~ (Rocky) || \ "$os_VENDOR" =~ (Virtuozzo) ]]; then # Drop the . release as we assume it's compatible # XXX re-evaluate when we get RHEL10 @@ -513,7 +517,7 @@ function is_oraclelinux { # Determine if current distribution is a Fedora-based distribution -# (Fedora, RHEL, CentOS, etc). +# (Fedora, RHEL, CentOS, Rocky, etc). # is_fedora function is_fedora { if [[ -z "$os_VENDOR" ]]; then @@ -523,6 +527,7 @@ function is_fedora { [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || \ [ "$os_VENDOR" = "RedHatEnterpriseServer" ] || \ [ "$os_VENDOR" = "RedHatEnterprise" ] || \ + [ "$os_VENDOR" = "Rocky" ] || \ [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "CentOSStream" ] || \ [ "$os_VENDOR" = "AlmaLinux" ] || \ [ "$os_VENDOR" = "OracleServer" ] || [ "$os_VENDOR" = "Virtuozzo" ] From 71c99655479174750bcedfe458328328a1596766 Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Wed, 19 Oct 2022 14:08:43 -0400 Subject: [PATCH 1635/1936] Use separate OVS and OVN directories If stack.sh is run on a system that already has OVN packages installed, it could fail to find its DB sockets. This is because the 'ln -s' will place the symlink inside of /var/run/ovn instead of using a single directory as intended. Change the code in neutron_plugins/ovn_agent to not make the symlink and instead use separate directories for OVS and OVN. Closes-bug: #1980421 Change-Id: Ic28a93bdc3dfe4a6159234baeabd0064db452b07 --- lib/neutron_plugins/ovn_agent | 43 ++++++++++++++++------------------- 1 file changed, 20 insertions(+), 23 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 8eb2993b94..e64224cbaa 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -244,11 +244,12 @@ function _run_process { local cmd="$2" local stop_cmd="$3" local group=$4 - local user=${5:-$STACK_USER} + local user=$5 + local rundir=${6:-$OVS_RUNDIR} local systemd_service="devstack@$service.service" local unit_file="$SYSTEMD_DIR/$systemd_service" - local environment="OVN_RUNDIR=$OVS_RUNDIR OVN_DBDIR=$OVN_DATADIR OVN_LOGDIR=$LOGDIR OVS_RUNDIR=$OVS_RUNDIR OVS_DBDIR=$OVS_DATADIR OVS_LOGDIR=$LOGDIR" + local environment="OVN_RUNDIR=$OVN_RUNDIR OVN_DBDIR=$OVN_DATADIR OVN_LOGDIR=$LOGDIR OVS_RUNDIR=$OVS_RUNDIR OVS_DBDIR=$OVS_DATADIR OVS_LOGDIR=$LOGDIR" echo "Starting $service executed command": $cmd @@ -264,14 +265,14 @@ function _run_process { _start_process $systemd_service - local testcmd="test -e $OVS_RUNDIR/$service.pid" + local testcmd="test -e $rundir/$service.pid" test_with_retry "$testcmd" "$service did not start" $SERVICE_TIMEOUT 1 local service_ctl_file - service_ctl_file=$(ls $OVS_RUNDIR | grep $service | grep ctl) + service_ctl_file=$(ls $rundir | grep $service | grep ctl) if [ -z "$service_ctl_file" ]; then die $LINENO "ctl file for service $service is not present." fi - sudo ovs-appctl -t $OVS_RUNDIR/$service_ctl_file vlog/set console:off syslog:info file:info + sudo ovs-appctl -t $rundir/$service_ctl_file vlog/set console:off syslog:info file:info } function clone_repository { @@ -370,10 +371,6 @@ function install_ovn { sudo mkdir -p $OVS_RUNDIR sudo chown $(whoami) $OVS_RUNDIR - # NOTE(lucasagomes): To keep things simpler, let's reuse the same - # RUNDIR for both OVS and OVN. This way we avoid having to specify the - # --db option in the ovn-{n,s}bctl commands while playing with DevStack - sudo ln -s $OVS_RUNDIR $OVN_RUNDIR if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then # If OVS is already installed, remove it, because we're about to @@ -616,12 +613,12 @@ function _start_ovs { dbcmd+=" --remote=db:hardware_vtep,Global,managers $OVS_DATADIR/vtep.db" fi dbcmd+=" $OVS_DATADIR/conf.db" - _run_process ovsdb-server "$dbcmd" "" "$STACK_GROUP" "root" + _run_process ovsdb-server "$dbcmd" "" "$STACK_GROUP" "root" "$OVS_RUNDIR" # Note: ovn-controller will create and configure br-int once it is started. # So, no need to create it now because nothing depends on that bridge here. local ovscmd="$OVS_SBINDIR/ovs-vswitchd --log-file --pidfile --detach" - _run_process ovs-vswitchd "$ovscmd" "" "$STACK_GROUP" "root" + _run_process ovs-vswitchd "$ovscmd" "" "$STACK_GROUP" "root" "$OVS_RUNDIR" else _start_process "$OVSDB_SERVER_SERVICE" _start_process "$OVS_VSWITCHD_SERVICE" @@ -660,7 +657,7 @@ function _start_ovs { enable_service ovs-vtep local vtepcmd="$OVS_SCRIPTDIR/ovs-vtep --log-file --pidfile --detach br-v" - _run_process ovs-vtep "$vtepcmd" "" "$STACK_GROUP" "root" + _run_process ovs-vtep "$vtepcmd" "" "$STACK_GROUP" "root" "$OVS_RUNDIR" vtep-ctl set-manager tcp:$HOST_IP:6640 fi @@ -704,26 +701,26 @@ function start_ovn { local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_northd" local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_northd" - _run_process ovn-northd "$cmd" "$stop_cmd" "$STACK_GROUP" "root" + _run_process ovn-northd "$cmd" "$stop_cmd" "$STACK_GROUP" "root" "$OVN_RUNDIR" else _start_process "$OVN_NORTHD_SERVICE" fi # Wait for the service to be ready # Check for socket and db files for both OVN NB and SB - wait_for_sock_file $OVS_RUNDIR/ovnnb_db.sock - wait_for_sock_file $OVS_RUNDIR/ovnsb_db.sock + wait_for_sock_file $OVN_RUNDIR/ovnnb_db.sock + wait_for_sock_file $OVN_RUNDIR/ovnsb_db.sock wait_for_db_file $OVN_DATADIR/ovnnb_db.db wait_for_db_file $OVN_DATADIR/ovnsb_db.db if is_service_enabled tls-proxy; then - sudo ovn-nbctl --db=unix:$OVS_RUNDIR/ovnnb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem - sudo ovn-sbctl --db=unix:$OVS_RUNDIR/ovnsb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem + sudo ovn-nbctl --db=unix:$OVN_RUNDIR/ovnnb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem + sudo ovn-sbctl --db=unix:$OVN_RUNDIR/ovnsb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem fi - sudo ovn-nbctl --db=unix:$OVS_RUNDIR/ovnnb_db.sock set-connection p${OVN_PROTO}:6641:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 - sudo ovn-sbctl --db=unix:$OVS_RUNDIR/ovnsb_db.sock set-connection p${OVN_PROTO}:6642:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 - sudo ovs-appctl -t $OVS_RUNDIR/ovnnb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL - sudo ovs-appctl -t $OVS_RUNDIR/ovnsb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL + sudo ovn-nbctl --db=unix:$OVN_RUNDIR/ovnnb_db.sock set-connection p${OVN_PROTO}:6641:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 + sudo ovn-sbctl --db=unix:$OVN_RUNDIR/ovnsb_db.sock set-connection p${OVN_PROTO}:6642:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 + sudo ovs-appctl -t $OVN_RUNDIR/ovnnb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL + sudo ovs-appctl -t $OVN_RUNDIR/ovnsb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL fi if is_service_enabled ovn-controller ; then @@ -731,7 +728,7 @@ function start_ovn { local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_controller" local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_controller" - _run_process ovn-controller "$cmd" "$stop_cmd" "$STACK_GROUP" "root" + _run_process ovn-controller "$cmd" "$stop_cmd" "$STACK_GROUP" "root" "$OVN_RUNDIR" else _start_process "$OVN_CONTROLLER_SERVICE" fi @@ -740,7 +737,7 @@ function start_ovn { if is_service_enabled ovn-controller-vtep ; then if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then local cmd="$OVS_BINDIR/ovn-controller-vtep --log-file --pidfile --detach --ovnsb-db=$OVN_SB_REMOTE" - _run_process ovn-controller-vtep "$cmd" "" "$STACK_GROUP" "root" + _run_process ovn-controller-vtep "$cmd" "" "$STACK_GROUP" "root" "$OVN_RUNDIR" else _start_process "$OVN_CONTROLLER_VTEP_SERVICE" fi From 5e7afb779c469f593a1628e8f63c66989b7e2c49 Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Mon, 24 Oct 2022 12:17:48 +0200 Subject: [PATCH 1636/1936] Run dmesg command with sudo It seems that setting "sysctl kernel.dmesg_restrict" was changed in Ubuntu 22.04 (Jammy) to "1" and because of that running "dmesg" command requires now root privileges. Closes-bug: #1994023 Change-Id: I2adc76e3025fadf994bab2e2e1fd608e688874fc --- lib/neutron_plugins/ovs_source | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/neutron_plugins/ovs_source b/lib/neutron_plugins/ovs_source index 164d574c42..ea71e60e68 100644 --- a/lib/neutron_plugins/ovs_source +++ b/lib/neutron_plugins/ovs_source @@ -33,9 +33,9 @@ function load_module { local fatal=$2 if [ "$(trueorfalse True fatal)" == "True" ]; then - sudo modprobe $module || (dmesg && die $LINENO "FAILED TO LOAD $module") + sudo modprobe $module || (sudo dmesg && die $LINENO "FAILED TO LOAD $module") else - sudo modprobe $module || (echo "FAILED TO LOAD $module" && dmesg) + sudo modprobe $module || (echo "FAILED TO LOAD $module" && sudo dmesg) fi } @@ -103,7 +103,7 @@ function prepare_for_ovs_compilation { function load_ovs_kernel_modules { load_module openvswitch load_module vport-geneve False - dmesg | tail + sudo dmesg | tail } # reload_ovs_kernel_modules() - reload openvswitch kernel module From 47a429777ce71e4d69e1894f173cf87e731b3a6e Mon Sep 17 00:00:00 2001 From: Martin Kopec Date: Fri, 4 Nov 2022 14:31:03 +0100 Subject: [PATCH 1637/1936] Extend single-core-review for non-functional changes Adding a second exception for single-core-review in Devstack repository - changes which do not affect core functionality, like f.e. job cleanups, can be reviewed by a single core. Change-Id: Idb6cefa510fdbfed41379eb410f4884852d1177f --- doc/source/contributor/contributing.rst | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst index 4de238fbf8..8b5a85b3df 100644 --- a/doc/source/contributor/contributing.rst +++ b/doc/source/contributor/contributing.rst @@ -42,8 +42,9 @@ Getting Your Patch Merged ~~~~~~~~~~~~~~~~~~~~~~~~~ All changes proposed to the Devstack require two ``Code-Review +2`` votes from Devstack core reviewers before one of the core reviewers can approve the patch -by giving ``Workflow +1`` vote. One exception is for patches to unblock the gate -which can be approved by single core reviewers. +by giving ``Workflow +1`` vote. There are 2 exceptions, approving patches to +unblock the gate and patches that do not relate to the Devstack's core logic, +like for example old job cleanups, can be approved by single core reviewers. Project Team Lead Duties ~~~~~~~~~~~~~~~~~~~~~~~~ From d1c2bf5e7c739bc5a7eeac602b477edb9f6630c2 Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Wed, 2 Nov 2022 16:43:41 +0100 Subject: [PATCH 1638/1936] Add new service "file_tracker" This new service periodically tracks the file open in the system. Closes-Bug: #1995502 Change-Id: I02e097fef07655ff571af9f35bf258b2ed975098 --- .zuul.yaml | 4 ++++ doc/source/debugging.rst | 6 +++++ lib/dstat | 6 +++++ tools/file_tracker.sh | 47 ++++++++++++++++++++++++++++++++++++++++ 4 files changed, 63 insertions(+) create mode 100755 tools/file_tracker.sh diff --git a/.zuul.yaml b/.zuul.yaml index 6dbb5bacbe..ce38760203 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -469,6 +469,7 @@ dstat: false etcd3: true memory_tracker: true + file_tracker: true mysql: true rabbit: true group-vars: @@ -477,6 +478,7 @@ # Shared services dstat: false memory_tracker: true + file_tracker: true devstack_localrc: # Multinode specific settings HOST_IP: "{{ hostvars[inventory_hostname]['nodepool']['private_ipv4'] }}" @@ -544,6 +546,7 @@ dstat: false etcd3: true memory_tracker: true + file_tracker: true mysql: true rabbit: true tls-proxy: true @@ -593,6 +596,7 @@ # Shared services dstat: false memory_tracker: true + file_tracker: true tls-proxy: true # Nova services n-cpu: true diff --git a/doc/source/debugging.rst b/doc/source/debugging.rst index fd0d9cdf74..3ca0ad94b4 100644 --- a/doc/source/debugging.rst +++ b/doc/source/debugging.rst @@ -20,6 +20,12 @@ provides consumption output when available memory is seen to be falling (i.e. processes are consuming memory). It also provides output showing locked (unswappable) memory. +file_tracker +------------ + +The ``file_tracker`` service periodically monitors the number of +open files in the system. + tcpdump ------- diff --git a/lib/dstat b/lib/dstat index eb03ae0fb2..870c901d2a 100644 --- a/lib/dstat +++ b/lib/dstat @@ -40,12 +40,18 @@ function start_dstat { if is_service_enabled peakmem_tracker; then die $LINENO "The peakmem_tracker service has been removed, use memory_tracker instead" fi + + # To enable file_tracker add: + # enable_service file_tracker + # to your localrc + run_process file_tracker "$TOP_DIR/tools/file_tracker.sh" } # stop_dstat() stop dstat process function stop_dstat { stop_process dstat stop_process memory_tracker + stop_process file_tracker } # Restore xtrace diff --git a/tools/file_tracker.sh b/tools/file_tracker.sh new file mode 100755 index 0000000000..9c31b30a56 --- /dev/null +++ b/tools/file_tracker.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +set -o errexit + +# time to sleep between checks +SLEEP_TIME=20 + +function tracker { + echo "Number of open files | Number of open files not in use | Maximum number of files allowed to be opened" + while true; do + cat /proc/sys/fs/file-nr + sleep $SLEEP_TIME + done +} + +function usage { + echo "Usage: $0 [-x] [-s N]" 1>&2 + exit 1 +} + +while getopts ":s:x" opt; do + case $opt in + s) + SLEEP_TIME=$OPTARG + ;; + x) + set -o xtrace + ;; + *) + usage + ;; + esac +done + +tracker From a4680766515ed9317b71cfb39cd0d75dc04f3d9c Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Wed, 9 Nov 2022 10:11:46 -0800 Subject: [PATCH 1639/1936] Make debian-bullseye job non-voting As noted in the QA meeting this week, this job is failing due to something that seems outside of our control: https://meetings.opendev.org/meetings/qa/2022/qa.2022-11-08-15.00.log.html Make it non-voting until that is resolved. Change-Id: Ia571d1dab45eb1bbb8665373d416515d3c95fb14 --- .zuul.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index 6dbb5bacbe..1923444601 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -674,6 +674,8 @@ description: Debian Bullseye platform test nodeset: devstack-single-node-debian-bullseye timeout: 9000 + # TODO(danms) n-v until the known issue is resolved + voting: false vars: configure_swap_size: 4096 From 97b2a51d6beee4fd58b93027d823d6fd90f5c11f Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Wed, 9 Nov 2022 11:58:37 -0800 Subject: [PATCH 1640/1936] Fix dbcounter install on Debian Bullseye The dbcounter install on Debian Bullseye is broken in a really fun way. The problem is that we end up mixing pypi openssl and distro cryptography under pip and those two versions of libraries are not compatible. The reason this happens is that debian's pip package debundles the pip deps. This splits them out into /usr/share/python-wheels and it will prefer distro versions of libraries over pypi installed versions of libraries. But if a pypi version is installed and a distro version is not then the pypi version is used. If the pypi version of library A does not work with distro version of library B then debundled pip breaks. This has happened with crypytography and pyOpenSSL. This happens because urllib3 (a debundled pip dep) appears to use pyopenssl conditionally. Novnc depends on python3-cryptography, and openstack depends on cryptogrpahy from pypi ensuring we get both a distro and a pypi version installed. However, pyOpenSSL is only pulled in from pypi via openstack deps. This leaves debundled urllib3 attempting to use pypi pyOpenSSL with distro cryptography and that combo isn't valid due to an interface change. To fix this we install python3-openssl ensuring that debundled pip will use distro pyOpenSSL with distro cryptography making everything happy again. But we only do this when we install novnc as novnc is what pulls in distro cryptography in the first place. We can't simply install python3-openssl on all debuntu platforms because this breaks Ubuntu Focal in the other direction. On Ubuntu focal distro pip uses distro pyOpenSSL when no pypi pyOpenSSl is installed (prior to keystone install) and is not compatible with pypi cryptography. Honestly, this whole intersection between distro and pypi installs of cryptography and pyOpenSSL could probably be made cleaner. One option would be for us to always install the constraints version of both packages from pypi and the distro pacakges very early in the devstack run. But that seems far more complicated so I'm not attempting that here. Change-Id: I0fc6a8e66e365ac49c6c7ceb4c71c68714b9f541 --- lib/nova | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 6de1d3382f..5c619bb762 100644 --- a/lib/nova +++ b/lib/nova @@ -885,8 +885,23 @@ function install_nova { # a websockets/html5 or flash powered VNC console for vm instances NOVNC_FROM_PACKAGE=$(trueorfalse False NOVNC_FROM_PACKAGE) if [ "$NOVNC_FROM_PACKAGE" = "True" ]; then + # Installing novnc on Debian bullseye breaks the global pip + # install. This happens because novnc pulls in distro cryptography + # which will be prefered by distro pip, but if anything has + # installed pyOpenSSL from pypi (keystone) that is not compatible + # with distro cryptography. Fix this by installing + # python3-openssl (pyOpenSSL) from the distro which pip will prefer + # on Debian. Ubuntu has inverse problems so we only do this for + # Debian. + local novnc_packages + novnc_packages="novnc" + GetOSVersion + if [[ "$os_VENDOR" = "Debian" ]] ; then + novnc_packages="$novnc_packages python3-openssl" + fi + NOVNC_WEB_DIR=/usr/share/novnc - install_package novnc + install_package $novnc_packages else NOVNC_WEB_DIR=$DEST/novnc git_clone $NOVNC_REPO $NOVNC_WEB_DIR $NOVNC_BRANCH From 857f4993f35fbdc83771b9632d3525766de194a1 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Wed, 19 Oct 2022 20:15:42 -0500 Subject: [PATCH 1641/1936] Add RBAC scope and new defaults setting support for Nova & Tempest Nova is ready with the scope and new defaults as per the new RBAC design. Adding devstack flag to enable the scope checks and new defaults enforcement in nova side. Change-Id: I305ea626a4b622c5534d523f4b619832f9d35f8d --- lib/nova | 10 ++++++++++ lib/tempest | 4 ++++ 2 files changed, 14 insertions(+) diff --git a/lib/nova b/lib/nova index 8e8ea8a175..63c6a86a66 100644 --- a/lib/nova +++ b/lib/nova @@ -97,6 +97,12 @@ NOVA_SERVICE_LISTEN_ADDRESS=${NOVA_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVI METADATA_SERVICE_PORT=${METADATA_SERVICE_PORT:-8775} NOVA_ENABLE_CACHE=${NOVA_ENABLE_CACHE:-True} +# Flag to set the oslo_policy.enforce_scope and oslo_policy.enforce_new_defaults. +# This is used to switch the compute API policies enable the scope and new defaults. +# By Default, these flag are False. +# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope +NOVA_ENFORCE_SCOPE=$(trueorfalse False NOVA_ENFORCE_SCOPE) + if [[ $SERVICE_IP_VERSION == 6 ]]; then NOVA_MY_IP="$HOST_IPV6" else @@ -481,6 +487,10 @@ function create_nova_conf { NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/,metadata//") fi iniset $NOVA_CONF DEFAULT enabled_apis "$NOVA_ENABLED_APIS" + if [[ "$NOVA_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]]; then + iniset $NOVA_CONF oslo_policy enforce_new_defaults True + iniset $NOVA_CONF oslo_policy enforce_scope True + fi if is_service_enabled tls-proxy && [ "$NOVA_USE_MOD_WSGI" == "False" ]; then # Set the service port for a proxy to take the original iniset $NOVA_CONF DEFAULT osapi_compute_listen_port "$NOVA_SERVICE_PORT_INT" diff --git a/lib/tempest b/lib/tempest index 87a2244784..b232f24540 100644 --- a/lib/tempest +++ b/lib/tempest @@ -674,6 +674,10 @@ function configure_tempest { iniset $TEMPEST_CONFIG auth admin_project_name '' fi + if [[ "$NOVA_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $TEMPEST_CONFIG enforce_scope nova true + fi + if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then iniset $TEMPEST_CONFIG enforce_scope glance true fi From d00921a57bcd9b408817ac7feddfc49b49b9cea2 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Mon, 14 Nov 2022 06:50:45 +0000 Subject: [PATCH 1642/1936] Revert "Make debian-bullseye job non-voting" This reverts commit a4680766515ed9317b71cfb39cd0d75dc04f3d9c. Reason for revert: Debian job got repaired Change-Id: I3ef969f6e373de103d26c9282cab94cea7ae87e5 --- .zuul.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 1923444601..6dbb5bacbe 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -674,8 +674,6 @@ description: Debian Bullseye platform test nodeset: devstack-single-node-debian-bullseye timeout: 9000 - # TODO(danms) n-v until the known issue is resolved - voting: false vars: configure_swap_size: 4096 From 8d299efa4b6346ccfc3c6fcf9cf011b3c884bebc Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Sun, 9 Oct 2022 11:00:07 +0200 Subject: [PATCH 1643/1936] Switch devstack nodeset to Ubuntu 22.04 (jammy) Depends-On: https://review.opendev.org/c/openstack/devstack-plugin-ceph/+/864948 Change-Id: I26b4784a4d772abbf8572f6273bda37f2fec5336 --- .zuul.yaml | 54 +++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 39 insertions(+), 15 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 56acb37a03..76a70dc85c 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -168,6 +168,36 @@ nodes: - compute1 +- nodeset: + name: openstack-two-node-jammy + nodes: + - name: controller + label: ubuntu-jammy + - name: compute1 + label: ubuntu-jammy + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + - nodeset: name: openstack-two-node-focal nodes: @@ -455,7 +485,7 @@ description: | Minimal devstack base job, intended for use by jobs that need less than the normal minimum set of required-projects. - nodeset: openstack-single-node-focal + nodeset: openstack-single-node-jammy required-projects: - opendev.org/openstack/requirements vars: @@ -526,6 +556,7 @@ - opendev.org/openstack/swift timeout: 7200 vars: + configure_swap_size: 4096 devstack_localrc: # Common OpenStack services settings SWIFT_REPLICAS: 1 @@ -651,7 +682,7 @@ - job: name: devstack-multinode parent: devstack - nodeset: openstack-two-node-focal + nodeset: openstack-two-node-jammy description: | Simple multinode test to verify multinode functionality on devstack side. This is not meant to be used as a parent job. @@ -669,8 +700,6 @@ # TODO(kopecmartin) n-v until the following is resolved: # https://bugs.launchpad.net/neutron/+bug/1979047 voting: false - vars: - configure_swap_size: 4096 - job: name: devstack-platform-debian-bullseye @@ -693,13 +722,11 @@ configure_swap_size: 4096 - job: - name: devstack-platform-ubuntu-jammy + name: devstack-platform-ubuntu-focal parent: tempest-full-py3 - description: Ubuntu 22.04 LTS (jammy) platform test - nodeset: openstack-single-node-jammy + description: Ubuntu 20.04 LTS (focal) platform test + nodeset: openstack-single-node-focal timeout: 9000 - vars: - configure_swap_size: 4096 - job: name: devstack-platform-ubuntu-jammy-ovn-source @@ -769,8 +796,6 @@ description: Fedora latest platform test nodeset: devstack-single-node-fedora-latest voting: false - vars: - configure_swap_size: 4096 - job: name: devstack-platform-fedora-latest-virt-preview @@ -779,7 +804,6 @@ nodeset: devstack-single-node-fedora-latest voting: false vars: - configure_swap_size: 4096 devstack_localrc: ENABLE_FEDORA_VIRT_PREVIEW_REPO: true @@ -839,7 +863,7 @@ - job: name: devstack-unit-tests - nodeset: ubuntu-focal + nodeset: ubuntu-jammy description: | Runs unit tests on devstack project. @@ -860,7 +884,7 @@ - devstack-platform-centos-9-stream - devstack-platform-debian-bullseye - devstack-platform-rocky-blue-onyx - - devstack-platform-ubuntu-jammy + - devstack-platform-ubuntu-focal - devstack-platform-ubuntu-jammy-ovn-source - devstack-platform-ubuntu-jammy-ovs - devstack-multinode @@ -910,7 +934,7 @@ # https://bugs.launchpad.net/neutron/+bug/1979047 # - devstack-platform-centos-9-stream - devstack-platform-debian-bullseye - - devstack-platform-ubuntu-jammy + - devstack-platform-ubuntu-focal - devstack-enforce-scope - devstack-multinode - devstack-unit-tests From 818d1a225d54291d1da1f8011f92affb2998d0e9 Mon Sep 17 00:00:00 2001 From: Martin Kopec Date: Mon, 28 Nov 2022 11:19:45 +0100 Subject: [PATCH 1644/1936] [doc] Update Ubuntu to 22.04 This updates documentation to reflect the switch to Ubuntu 22.04 (jammy) in the CI: https://review.opendev.org/c/openstack/devstack/+/860795 Change-Id: I8bee430029dcc719629bd92451c2791571f8a30c --- doc/source/index.rst | 2 +- tools/install_pip.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/index.rst b/doc/source/index.rst index ba53c6d279..3f206f411e 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -40,7 +40,7 @@ Start with a clean and minimal install of a Linux system. DevStack attempts to support the two latest LTS releases of Ubuntu, the latest/current Fedora version, CentOS/RHEL/Rocky Linux 9 and OpenSUSE. -If you do not have a preference, Ubuntu 20.04 (Focal Fossa) is the +If you do not have a preference, Ubuntu 22.04 (Jammy) is the most tested, and will probably go the smoothest. Add Stack User (optional) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 7c5d4c6555..91b180c06f 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -139,7 +139,7 @@ if is_fedora && [[ ${DISTRO} == f* || ${DISTRO} == rhel9 ]]; then # recent enough anyway. This is included via rpms/general : # Simply fall through elif is_ubuntu; then - # pip on Ubuntu 20.04 is new enough, too + # pip on Ubuntu 20.04 and higher is new enough, too # drop setuptools from u-c sed -i -e '/setuptools/d' $REQUIREMENTS_DIR/upper-constraints.txt else From db3eff7dd27acdc973e8d189bda80d642be92f03 Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Wed, 30 Nov 2022 14:03:36 +0900 Subject: [PATCH 1645/1936] Stop setting [ovs_vif_ovs] ovsdb_interface The option was already deprecated in os-vif 2.2.0[1]. The override is no longer required since bug 1929446 was already resolved. [1] https://review.opendev.org/c/openstack/os-vif/+/744816 Related-Bug: #1929446 Change-Id: I5bc55723a178b32d947da2ac91d2f62aa8124990 --- lib/os-vif | 7 ------- 1 file changed, 7 deletions(-) diff --git a/lib/os-vif b/lib/os-vif index 865645c0d5..7c8bee3744 100644 --- a/lib/os-vif +++ b/lib/os-vif @@ -1,10 +1,5 @@ #!/bin/bash -# support vsctl or native. -# until bug #1929446 is resolved we override the os-vif default -# and fall back to the legacy "vsctl" driver. -OS_VIF_OVS_OVSDB_INTERFACE=${OS_VIF_OVS_OVSDB_INTERFACE:="vsctl"} - function is_ml2_ovs { if [[ "${Q_AGENT}" == "openvswitch" ]]; then echo "True" @@ -19,11 +14,9 @@ OS_VIF_OVS_ISOLATE_VIF=$(trueorfalse False OS_VIF_OVS_ISOLATE_VIF) function configure_os_vif { if [[ -e ${NOVA_CONF} ]]; then - iniset ${NOVA_CONF} os_vif_ovs ovsdb_interface ${OS_VIF_OVS_OVSDB_INTERFACE} iniset ${NOVA_CONF} os_vif_ovs isolate_vif ${OS_VIF_OVS_ISOLATE_VIF} fi if [[ -e ${NEUTRON_CONF} ]]; then - iniset ${NEUTRON_CONF} os_vif_ovs ovsdb_interface ${OS_VIF_OVS_OVSDB_INTERFACE} iniset ${NEUTRON_CONF} os_vif_ovs isolate_vif ${OS_VIF_OVS_ISOLATE_VIF} fi } From 16c2b389ed8efca70fa1e65395becdaea84f8b44 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Wed, 30 Nov 2022 14:24:07 -0600 Subject: [PATCH 1646/1936] Add RBAC scope and new defaults setting support for placement Adding devstack flag to enable and test the Placement API policies scope and new defaults. Depends-On: https://review.opendev.org/c/openstack/tempest/+/866212 Change-Id: I6f56fc28f2c1e4cdde946deb2ae06afddf85ff0d --- lib/placement | 10 ++++++++++ lib/tempest | 4 ++++ 2 files changed, 14 insertions(+) diff --git a/lib/placement b/lib/placement index b7798669a1..bc22c564f4 100644 --- a/lib/placement +++ b/lib/placement @@ -48,6 +48,12 @@ fi PLACEMENT_SERVICE_PROTOCOL=${PLACEMENT_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} PLACEMENT_SERVICE_HOST=${PLACEMENT_SERVICE_HOST:-$SERVICE_HOST} +# Flag to set the oslo_policy.enforce_scope and oslo_policy.enforce_new_defaults. +# This is used to switch the Placement API policies scope and new defaults. +# By Default, these flag are False. +# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope +PLACEMENT_ENFORCE_SCOPE=$(trueorfalse False PLACEMENT_ENFORCE_SCOPE) + # Functions # --------- @@ -111,6 +117,10 @@ function configure_placement { else _config_placement_apache_wsgi fi + if [[ "$PLACEMENT_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]]; then + iniset $PLACEMENT_CONF oslo_policy enforce_new_defaults True + iniset $PLACEMENT_CONF oslo_policy enforce_scope True + fi } # create_placement_accounts() - Set up required placement accounts diff --git a/lib/tempest b/lib/tempest index b232f24540..5cd4d18439 100644 --- a/lib/tempest +++ b/lib/tempest @@ -678,6 +678,10 @@ function configure_tempest { iniset $TEMPEST_CONFIG enforce_scope nova true fi + if [[ "$PLACEMENT_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $TEMPEST_CONFIG enforce_scope placement true + fi + if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then iniset $TEMPEST_CONFIG enforce_scope glance true fi From 6440c6d7e69c6726c8d31ea225b90967c50528e8 Mon Sep 17 00:00:00 2001 From: wangxiyuan Date: Fri, 5 Aug 2022 14:18:13 +0800 Subject: [PATCH 1647/1936] Add openEuler 22.03 LTS support openEuler 20.03 LTS SP2 support was removed from devstack in last few months due to its python version is too old and the CI job always fail. And openEuler 20.03 LTS SP2 was out of maintainer in May 2022 by openEuler community. The newest LTS version was released in March 2022 called 22.03 LTS. This release will be maintained for at least 2 years. And the python version is 3.9 which works well for devstack. This Patch add the openEuler distro support back. And add the related CI job to make sure its works well. Change-Id: I99c99d08b4a44d3dc644bd2e56b5ae7f7ee44210 --- .zuul.yaml | 72 ++++++++++++++++++++++++++ doc/source/index.rst | 3 +- files/rpms/ceph | 2 +- files/rpms/general | 4 +- files/rpms/nova | 2 +- functions-common | 13 ++++- lib/apache | 2 +- lib/nova_plugins/functions-libvirt | 8 ++- roles/apache-logs-conf/tasks/main.yaml | 1 + stack.sh | 11 +++- 10 files changed, 109 insertions(+), 9 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 471ca100c8..8e20f6ed34 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -108,6 +108,16 @@ nodes: - controller +- nodeset: + name: devstack-single-node-openeuler-22.03 + nodes: + - name: controller + label: openEuler-22-03-LTS + groups: + - name: tempest + nodes: + - controller + - nodeset: name: openstack-two-node nodes: @@ -777,6 +787,62 @@ # Enable Neutron ML2/OVS services q-agt: true +- job: + name: devstack-platform-openEuler-22.03-ovn-source + parent: tempest-full-py3 + description: openEuler 22.03 LTS platform test (OVN) + nodeset: devstack-single-node-openeuler-22.03 + voting: false + timeout: 9000 + vars: + configure_swap_size: 4096 + devstack_localrc: + # NOTE(wxy): OVN package is not supported by openEuler yet. Build it + # from source instead. + OVN_BUILD_FROM_SOURCE: True + OVN_BRANCH: "v21.06.0" + OVS_BRANCH: "a4b04276ab5934d087669ff2d191a23931335c87" + OVS_SYSCONFDIR: "/usr/local/etc/openvswitch" + +- job: + name: devstack-platform-openEuler-22.03-ovs + parent: tempest-full-py3 + description: openEuler 22.03 LTS platform test (OVS) + nodeset: devstack-single-node-openeuler-22.03 + voting: false + timeout: 9000 + vars: + configure_swap_size: 8192 + devstack_localrc: + Q_AGENT: openvswitch + Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch + Q_ML2_TENANT_NETWORK_TYPE: vxlan + devstack_services: + # Disable OVN services + ovn-northd: false + ovn-controller: false + ovs-vswitchd: false + ovsdb-server: false + # Disable Neutron ML2/OVN services + q-ovn-metadata-agent: false + # Enable Neutron ML2/OVS services + q-agt: true + q-dhcp: true + q-l3: true + q-meta: true + q-metering: true + group-vars: + subnode: + devstack_services: + # Disable OVN services + ovn-controller: false + ovs-vswitchd: false + ovsdb-server: false + # Disable Neutron ML2/OVN services + q-ovn-metadata-agent: false + # Enable Neutron ML2/OVS services + q-agt: true + - job: name: devstack-no-tls-proxy parent: tempest-full-py3 @@ -885,6 +951,8 @@ - devstack-platform-ubuntu-focal - devstack-platform-ubuntu-jammy-ovn-source - devstack-platform-ubuntu-jammy-ovs + - devstack-platform-openEuler-22.03-ovn-source + - devstack-platform-openEuler-22.03-ovs - devstack-multinode - devstack-unit-tests - openstack-tox-bashate @@ -1017,3 +1085,7 @@ periodic: jobs: - devstack-no-tls-proxy + periodic-weekly: + jobs: + - devstack-platform-openEuler-22.03-ovn-source + - devstack-platform-openEuler-22.03-ovs diff --git a/doc/source/index.rst b/doc/source/index.rst index 3f206f411e..1e932f88a5 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -38,7 +38,8 @@ Install Linux Start with a clean and minimal install of a Linux system. DevStack attempts to support the two latest LTS releases of Ubuntu, the -latest/current Fedora version, CentOS/RHEL/Rocky Linux 9 and OpenSUSE. +latest/current Fedora version, CentOS/RHEL/Rocky Linux 9, OpenSUSE and +openEuler. If you do not have a preference, Ubuntu 22.04 (Jammy) is the most tested, and will probably go the smoothest. diff --git a/files/rpms/ceph b/files/rpms/ceph index 33a55f80ea..19f158fd57 100644 --- a/files/rpms/ceph +++ b/files/rpms/ceph @@ -1,3 +1,3 @@ ceph # NOPRIME -redhat-lsb-core # not:rhel9 +redhat-lsb-core # not:rhel9,openEuler-22.03 xfsprogs diff --git a/files/rpms/general b/files/rpms/general index 7697513149..b6866de62d 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -26,9 +26,9 @@ pkgconfig postgresql-devel # psycopg2 psmisc python3-devel -python3-pip +python3-pip # not:openEuler-22.03 python3-systemd -redhat-rpm-config # missing dep for gcc hardening flags, see rhbz#1217376 +redhat-rpm-config # not:openEuler-22.03 missing dep for gcc hardening flags, see rhbz#1217376 tar tcpdump unzip diff --git a/files/rpms/nova b/files/rpms/nova index f2824ee2c4..e0f13b854a 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -4,7 +4,7 @@ ebtables genisoimage # not:rhel9 required for config_drive iptables iputils -kernel-modules +kernel-modules # not:openEuler-22.03 kpartx parted polkit diff --git a/functions-common b/functions-common index 0aee5d163e..4eed5d8407 100644 --- a/functions-common +++ b/functions-common @@ -399,7 +399,7 @@ function _ensure_lsb_release { elif [[ -x $(command -v zypper 2>/dev/null) ]]; then sudo zypper -n install lsb-release elif [[ -x $(command -v dnf 2>/dev/null) ]]; then - sudo dnf install -y redhat-lsb-core + sudo dnf install -y redhat-lsb-core || sudo dnf install -y openeuler-lsb else die $LINENO "Unable to find or auto-install lsb_release" fi @@ -474,6 +474,8 @@ function GetDistro { # Drop the . release as we assume it's compatible # XXX re-evaluate when we get RHEL10 DISTRO="rhel${os_RELEASE::1}" + elif [[ "$os_VENDOR" =~ (openEuler) ]]; then + DISTRO="openEuler-$os_RELEASE" else # We can't make a good choice here. Setting a sensible DISTRO # is part of the problem, but not the major issue -- we really @@ -525,6 +527,7 @@ function is_fedora { fi [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || \ + [ "$os_VENDOR" = "openEuler" ] || \ [ "$os_VENDOR" = "RedHatEnterpriseServer" ] || \ [ "$os_VENDOR" = "RedHatEnterprise" ] || \ [ "$os_VENDOR" = "Rocky" ] || \ @@ -575,6 +578,14 @@ function is_ubuntu { [ "$os_PACKAGE" = "deb" ] } +# Determine if current distribution is an openEuler distribution +# is_openeuler +function is_openeuler { + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + [ "$os_VENDOR" = "openEuler" ] +} # Git Functions # ============= diff --git a/lib/apache b/lib/apache index 705776c55b..dd8c9a0f06 100644 --- a/lib/apache +++ b/lib/apache @@ -95,7 +95,7 @@ function install_apache_uwsgi { # didn't fix Python 3.10 compatibility before release. Should be # fixed in uwsgi 4.9.0; can remove this when packages available # or we drop this release - elif is_fedora && ! [[ $DISTRO =~ f36 ]]; then + elif is_fedora && ! is_openeuler && ! [[ $DISTRO =~ f36 ]]; then # Note httpd comes with mod_proxy_uwsgi and it is loaded by # default; the mod_proxy_uwsgi package actually conflicts now. # See: diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 3e7d2801d6..c0e45ebb85 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -82,11 +82,17 @@ function install_libvirt { sudo dnf copr enable -y @virtmaint-sig/virt-preview fi + if is_openeuler; then + qemu_package=qemu + else + qemu_package=qemu-kvm + fi + # Note that in CentOS/RHEL this needs to come from the RDO # repositories (qemu-kvm-ev ... which provides this package) # as the base system version is too old. We should have # pre-installed these - install_package qemu-kvm + install_package $qemu_package install_package libvirt libvirt-devel python3-libvirt if is_arch "aarch64"; then diff --git a/roles/apache-logs-conf/tasks/main.yaml b/roles/apache-logs-conf/tasks/main.yaml index bd64574c9b..6b7ea37857 100644 --- a/roles/apache-logs-conf/tasks/main.yaml +++ b/roles/apache-logs-conf/tasks/main.yaml @@ -64,6 +64,7 @@ 'Debian': '/etc/apache2/sites-enabled/' 'Suse': '/etc/apache2/conf.d/' 'RedHat': '/etc/httpd/conf.d/' + 'openEuler': '/etc/httpd/conf.d/' - name: Discover configurations find: diff --git a/stack.sh b/stack.sh index cc90fca576..28576d1e14 100755 --- a/stack.sh +++ b/stack.sh @@ -229,7 +229,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="bullseye|focal|jammy|f36|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9" +SUPPORTED_DISTROS="bullseye|focal|jammy|f36|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9|openEuler-22.03" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" @@ -394,6 +394,15 @@ elif [[ $DISTRO == "rhel9" ]]; then sudo dnf config-manager --set-enabled crb # rabbitmq and other packages are provided by RDO repositories. _install_rdo +elif [[ $DISTRO == "openEuler-22.03" ]]; then + # There are some problem in openEuler. We should fix it first. Some required + # package/action runs before fixup script. So we can't fix there. + # + # 1. the hostname package is not installed by default + # 2. Some necessary packages are in openstack repo, for example liberasurecode-devel + # 3. python3-pip can be uninstalled by `get_pip.py` automaticly. + install_package hostname openstack-release-wallaby + uninstall_package python3-pip fi # Ensure python is installed From 0a40648b3884c374e314105c33c2a20c85ab2f7f Mon Sep 17 00:00:00 2001 From: Miguel Lavalle Date: Wed, 7 Dec 2022 16:51:28 -0600 Subject: [PATCH 1648/1936] Fix the db user for mariadb in ubuntu 22.04 In Ubuntu 22.04, mariadb version 10.6 is installed. Per [0] and [1] authentication management was changed in version 10.4. This change adapts the way the db user is created to the new rules in versions 10.4 and later. [0] https://mariadb.com/kb/en/authentication-from-mariadb-104/ [1] https://mariadb.org/authentication-in-mariadb-10-4/ Closes-Bug: #1999090 Change-Id: I77a699a9e191eb83628ad5d361282e66744b6e4a --- lib/databases/mysql | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index b292da25bd..fbad44e36a 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -100,8 +100,13 @@ function configure_database_mysql { # Set the root password - only works the first time. For Ubuntu, we already # did that with debconf before installing the package, but we still try, - # because the package might have been installed already. - sudo mysqladmin -u root password $DATABASE_PASSWORD || true + # because the package might have been installed already. We don't do this + # for Ubuntu 22.04 (jammy) because the authorization model change in + # version 10.4 of mariadb. See + # https://mariadb.org/authentication-in-mariadb-10-4/ + if ! (is_ubuntu && [[ "$DISTRO" == "jammy" ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]); then + sudo mysqladmin -u root password $DATABASE_PASSWORD || true + fi # In case of Mariadb, giving hostname in arguments causes permission # problems as it expects connection through socket @@ -115,13 +120,21 @@ function configure_database_mysql { # as root so it works only as sudo. To restore old "mysql like" behaviour, # we need to change auth plugin for root user if is_ubuntu && [[ "$DISTRO" != "bullseye" ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then - sudo mysql $cmd_args -e "UPDATE mysql.user SET plugin='' WHERE user='$DATABASE_USER' AND host='localhost';" - sudo mysql $cmd_args -e "FLUSH PRIVILEGES;" + if [[ "$DISTRO" == "jammy" ]]; then + # For Ubuntu 22.04 (jammy) we follow the model outlined in + # https://mariadb.org/authentication-in-mariadb-10-4/ + sudo mysql -e "ALTER USER $DATABASE_USER@localhost IDENTIFIED VIA mysql_native_password USING PASSWORD('$DATABASE_PASSWORD');" + else + sudo mysql $cmd_args -e "UPDATE mysql.user SET plugin='' WHERE user='$DATABASE_USER' AND host='localhost';" + sudo mysql $cmd_args -e "FLUSH PRIVILEGES;" + fi + fi + if ! (is_ubuntu && [[ "$DISTRO" == "jammy" ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]); then + # Create DB user if it does not already exist + sudo mysql $cmd_args -e "CREATE USER IF NOT EXISTS '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';" + # Update the DB to give user '$DATABASE_USER'@'%' full control of the all databases: + sudo mysql $cmd_args -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%';" fi - # Create DB user if it does not already exist - sudo mysql $cmd_args -e "CREATE USER IF NOT EXISTS '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';" - # Update the DB to give user '$DATABASE_USER'@'%' full control of the all databases: - sudo mysql $cmd_args -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%';" # Now update ``my.cnf`` for some local needs and restart the mysql service From 30acfc6d14bb42db822352426cc2d4e337717c72 Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Thu, 24 Nov 2022 14:12:08 +0100 Subject: [PATCH 1649/1936] [neutron] Don't configure firewall_driver for core ML2 plugin In the past firewall_driver setting was configured for ML2 plugin because it was used in the neutron.agent.securitygroups_rpc.is_firewall_enabled() function but currently it's not needed anymore as there is other config option "enable_security_group" for that. Related-bug: #1996748 Change-Id: I9b09c6afb3f1f1c33d1bdfea52ba6f4c0d0cf2dc --- lib/neutron_plugins/ml2 | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index fa61f1ea30..46edacdc54 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -111,18 +111,7 @@ function neutron_plugin_configure_service { fi fi fi - # REVISIT(rkukura): Setting firewall_driver here for - # neutron.agent.securitygroups_rpc.is_firewall_enabled() which is - # used in the server, in case no L2 agent is configured on the - # server's node. If an L2 agent is configured, this will get - # overridden with the correct driver. The ml2 plugin should - # instead use its own config variable to indicate whether security - # groups is enabled, and that will need to be set here instead. - if [[ "$Q_USE_SECGROUP" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.not.a.real.FirewallDriver - else - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver - fi + populate_ml2_config /$Q_PLUGIN_CONF_FILE securitygroup enable_security_group=$Q_USE_SECGROUP populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS if [[ "$Q_ML2_PLUGIN_MECHANISM_DRIVERS" == *"linuxbridge"* ]]; then From 9a1be7794bd3b1b06a89183a800f42f77cd1b1b9 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Thu, 8 Dec 2022 20:24:46 -0600 Subject: [PATCH 1650/1936] Pin tox<4.0.0 for <=stable/zed branch testing Tox 4.0.0 has some incompatible changes, epecially more strict on allowlist_externals. Tempest recently changed allowlist_externals not to be *[1] causing the failure on jobs where lib/tempest failing to run the tempest as command in virtual env. ---------- venv: commands[0]> tempest verify-config -uro /tmp/tmp.qH5KgJHTF4 venv: failed with tempest is not allowed, use allowlist_externals to allow it ------ We do not need to test/fix the <=stable/zed branches with tox 4.0.0 and pinning them with the compatible tox version of the time stable brnaches were releaased is better way. This commit proposes: 1. Pinning the tox<4.0.0 for <=stable/ze branches testing 2. Workaround to unblock the master gate by pinning it <4.0.0 but we should make our testing compatible with tox 4.0.0 soon. Depends-On: https://review.opendev.org/c/openstack/devstack/+/867066 Related-Bug: #1999183 [1] https://review.opendev.org/c/openstack/tempest/+/865314 devstack based job started failing to run tempest command on venv. Change-Id: I9a138af94dedc0d8ce5a0d519d75779415d3c30b --- lib/tempest | 7 ++++++- playbooks/tox/pre.yaml | 8 +++++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index b232f24540..ec2949ac38 100644 --- a/lib/tempest +++ b/lib/tempest @@ -779,7 +779,12 @@ function configure_tempest { # install_tempest() - Collect source and prepare function install_tempest { git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH - pip_install 'tox!=2.8.0' + # NOTE(gmann): Pinning tox<4.0.0 for stable/zed and lower. Tox 4.0.0 + # released after zed was released and has some incompatible changes + # and it is ok not to fix the issues caused by tox 4.0.0 in stable + # beanches jobs. We can continue testing the stable/zed and lower + # branches with tox<4.0.0 + pip_install 'tox!=2.8.0,<4.0.0' pushd $TEMPEST_DIR # NOTE(gmann): checkout the TEMPEST_BRANCH in case TEMPEST_BRANCH # is tag name not master. git_clone would not checkout tag because diff --git a/playbooks/tox/pre.yaml b/playbooks/tox/pre.yaml index d7e4670a80..68d5254251 100644 --- a/playbooks/tox/pre.yaml +++ b/playbooks/tox/pre.yaml @@ -5,4 +5,10 @@ bindep_profile: test bindep_dir: "{{ zuul_work_dir }}" - test-setup - - ensure-tox + # NOTE(gmann): Pinning tox<4.0.0 for stable/zed and lower. Tox 4.0.0 + # released after zed was released and has some incompatible changes + # and it is ok not to fix the issues caused by tox 4.0.0 in stable + # beanches jobs. We can continue testing the stable/zed and lower + # branches with tox<4.0.0 + - role: ensure-tox + ensure_tox_version: "<4" From a52041cd3f067156e478e355f5712a60e12ce649 Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Fri, 18 Nov 2022 11:39:56 +0100 Subject: [PATCH 1651/1936] Drop lib/neutron module Module lib/neutron was introduced long time ago as new module to deploy neutron. It was intended to replace old lib/neutron-legacy module. But since very long time it wasn't really finished and used by anyone and lib/neutron-legacy is defacto standard module used by everyone to deploy neutron with devstack. In [1] unfinished lib/neutron was deprecated and now it's time to remove it from the devstack code. This patch also renames old "lib/neutron-legacy" module to be "lib/neutron" now. Previously "old" lib/neutron-legacy module was accepting neutron services names wit "q-" prefix and "new" lib/neutron module was accepting services with "neutron-" prefix. Now, as there is only one module it accepts both prefixes. For historical reasons and to be consistent with old lib/neutron-legacy which was widely used everywhere, services will be named with "q-" prefix but both prefixes will be accepted to enable or disable services. This patch also moves _configure_neutron_service function to be called at the end of the "configure_neutron" after all agents and service plugins are already configured. [1] https://review.opendev.org/c/openstack/devstack/+/823653 Related-bug: #1996748 Change-Id: Ibf1c8b2ee6b6618f77cd8486e9c687993d7cb4a0 --- clean.sh | 1 - lib/neutron | 1438 ++++++++++++++-------- lib/neutron-legacy | 1097 +---------------- lib/neutron_plugins/README.md | 2 +- lib/neutron_plugins/bigswitch_floodlight | 2 +- lib/neutron_plugins/brocade | 2 +- lib/neutron_plugins/linuxbridge_agent | 2 +- lib/neutron_plugins/ml2 | 2 +- lib/neutron_plugins/openvswitch_agent | 2 +- lib/neutron_plugins/ovn_agent | 18 +- lib/neutron_plugins/services/l3 | 4 +- lib/neutron_plugins/services/metering | 2 +- lib/neutron_plugins/services/qos | 2 +- lib/tempest | 4 +- 14 files changed, 933 insertions(+), 1645 deletions(-) diff --git a/clean.sh b/clean.sh index 870dfd4313..6a31cc624a 100755 --- a/clean.sh +++ b/clean.sh @@ -50,7 +50,6 @@ source $TOP_DIR/lib/placement source $TOP_DIR/lib/cinder source $TOP_DIR/lib/swift source $TOP_DIR/lib/neutron -source $TOP_DIR/lib/neutron-legacy set -o xtrace diff --git a/lib/neutron b/lib/neutron index b3e3d72e8c..c8ee8c5e76 100644 --- a/lib/neutron +++ b/lib/neutron @@ -1,128 +1,311 @@ #!/bin/bash # # lib/neutron -# Install and start **Neutron** network services +# functions - functions specific to neutron # Dependencies: -# # ``functions`` file # ``DEST`` must be defined +# ``STACK_USER`` must be defined # ``stack.sh`` calls the entry points in this order: # -# - is_XXXX_enabled -# - install_XXXX -# - configure_XXXX -# - init_XXXX -# - start_XXXX -# - stop_XXXX -# - cleanup_XXXX +# - install_neutron_agent_packages +# - install_neutronclient +# - install_neutron +# - install_neutron_third_party +# - configure_neutron +# - init_neutron +# - configure_neutron_third_party +# - init_neutron_third_party +# - start_neutron_third_party +# - create_nova_conf_neutron +# - configure_neutron_after_post_config +# - start_neutron_service_and_check +# - check_neutron_third_party_integration +# - start_neutron_agents +# - create_neutron_initial_network +# +# ``unstack.sh`` calls the entry points in this order: +# +# - stop_neutron +# - stop_neutron_third_party +# - cleanup_neutron -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace +# Functions in lib/neutron are classified into the following categories: +# +# - entry points (called from stack.sh or unstack.sh) +# - internal functions +# - neutron exercises +# - 3rd party programs -# Defaults + +# Neutron Networking +# ------------------ + +# Make sure that neutron is enabled in ``ENABLED_SERVICES``. If you want +# to run Neutron on this host, make sure that q-svc is also in +# ``ENABLED_SERVICES``. +# +# See "Neutron Network Configuration" below for additional variables +# that must be set in localrc for connectivity across hosts with +# Neutron. + +# Settings # -------- + +# Neutron Network Configuration +# ----------------------------- + +if is_service_enabled tls-proxy; then + Q_PROTOCOL="https" +fi + + # Set up default directories GITDIR["python-neutronclient"]=$DEST/python-neutronclient + +NEUTRON_DIR=$DEST/neutron +NEUTRON_FWAAS_DIR=$DEST/neutron-fwaas + +# Support entry points installation of console scripts +if [[ -d $NEUTRON_DIR/bin/neutron-server ]]; then + NEUTRON_BIN_DIR=$NEUTRON_DIR/bin +else + NEUTRON_BIN_DIR=$(get_python_exec_prefix) +fi + +NEUTRON_CONF_DIR=/etc/neutron +NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf +export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"} + # NEUTRON_DEPLOY_MOD_WSGI defines how neutron is deployed, allowed values: # - False (default) : Run neutron under Eventlet # - True : Run neutron under uwsgi # TODO(annp): Switching to uwsgi in next cycle if things turn out to be stable # enough NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse False NEUTRON_DEPLOY_MOD_WSGI) -NEUTRON_AGENT=${NEUTRON_AGENT:-openvswitch} -NEUTRON_DIR=$DEST/neutron + +NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini # If NEUTRON_ENFORCE_SCOPE == True, it will set "enforce_scope" # and "enforce_new_defaults" to True in the Neutron's config to enforce usage # of the new RBAC policies and scopes. NEUTRON_ENFORCE_SCOPE=$(trueorfalse False NEUTRON_ENFORCE_SCOPE) -NEUTRON_DISTRIBUTED_ROUTING=$(trueorfalse False NEUTRON_DISTRIBUTED_ROUTING) -# Distributed Virtual Router (DVR) configuration -# Can be: -# - ``legacy`` - No DVR functionality -# - ``dvr_snat`` - Controller or single node DVR -# - ``dvr`` - Compute node in multi-node DVR -# - ``dvr_no_external`` - Compute node in multi-node DVR, no external network -# -# Default is 'dvr_snat' since it can handle both DVR and legacy routers -NEUTRON_DVR_MODE=${NEUTRON_DVR_MODE:-dvr_snat} - -NEUTRON_BIN_DIR=$(get_python_exec_prefix) -NEUTRON_DHCP_BINARY="neutron-dhcp-agent" - -NEUTRON_CONF_DIR=/etc/neutron -NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf -NEUTRON_META_CONF=$NEUTRON_CONF_DIR/metadata_agent.ini -NEUTRON_META_DATA_HOST=${NEUTRON_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)} - -NEUTRON_DHCP_CONF=$NEUTRON_CONF_DIR/dhcp_agent.ini -NEUTRON_L3_CONF=$NEUTRON_CONF_DIR/l3_agent.ini -NEUTRON_AGENT_CONF=$NEUTRON_CONF_DIR/ -NEUTRON_CREATE_INITIAL_NETWORKS=${NEUTRON_CREATE_INITIAL_NETWORKS:-True} +# Agent binaries. Note, binary paths for other agents are set in per-service +# scripts in lib/neutron_plugins/services/ +AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent" +AGENT_L3_BINARY=${AGENT_L3_BINARY:-"$NEUTRON_BIN_DIR/neutron-l3-agent"} +AGENT_META_BINARY="$NEUTRON_BIN_DIR/neutron-metadata-agent" + +# Agent config files. Note, plugin-specific Q_PLUGIN_CONF_FILE is set and +# loaded from per-plugin scripts in lib/neutron_plugins/ +Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini +# NOTE(slaweq): NEUTRON_DHCP_CONF is used e.g. in neutron repository, +# it was previously defined in the lib/neutron module which is now deleted. +NEUTRON_DHCP_CONF=$Q_DHCP_CONF_FILE +Q_L3_CONF_FILE=$NEUTRON_CONF_DIR/l3_agent.ini +# NOTE(slaweq): NEUTRON_L3_CONF is used e.g. in neutron repository, +# it was previously defined in the lib/neutron module which is now deleted. +NEUTRON_L3_CONF=$Q_L3_CONF_FILE +Q_META_CONF_FILE=$NEUTRON_CONF_DIR/metadata_agent.ini + +# Default name for Neutron database +Q_DB_NAME=${Q_DB_NAME:-neutron} +# Default Neutron Plugin +Q_PLUGIN=${Q_PLUGIN:-ml2} +# Default Neutron Port +Q_PORT=${Q_PORT:-9696} +# Default Neutron Internal Port when using TLS proxy +Q_PORT_INT=${Q_PORT_INT:-19696} +# Default Neutron Host +Q_HOST=${Q_HOST:-$SERVICE_HOST} +# Default protocol +Q_PROTOCOL=${Q_PROTOCOL:-$SERVICE_PROTOCOL} +# Default listen address +Q_LISTEN_ADDRESS=${Q_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)} +# Default admin username +Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-neutron} +# Default auth strategy +Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone} +# RHEL's support for namespaces requires using veths with ovs +Q_OVS_USE_VETH=${Q_OVS_USE_VETH:-False} +Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True} +Q_USE_ROOTWRAP_DAEMON=$(trueorfalse True Q_USE_ROOTWRAP_DAEMON) +# Meta data IP +Q_META_DATA_IP=${Q_META_DATA_IP:-$(ipv6_unquote $SERVICE_HOST)} +# Allow Overlapping IP among subnets +Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True} +Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True} +Q_NOTIFY_NOVA_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_DATA_CHANGES:-True} +VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True} +VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300} + +# Allow to skip stopping of OVN services +SKIP_STOP_OVN=${SKIP_STOP_OVN:-False} + +# The directory which contains files for Q_PLUGIN_EXTRA_CONF_FILES. +# /etc/neutron is assumed by many of devstack plugins. Do not change. +_Q_PLUGIN_EXTRA_CONF_PATH=/etc/neutron -NEUTRON_STATE_PATH=${NEUTRON_STATE_PATH:=$DATA_DIR/neutron} +# The name of the service in the endpoint URL +NEUTRON_ENDPOINT_SERVICE_NAME=${NEUTRON_ENDPOINT_SERVICE_NAME-"networking"} +if [[ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" && -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then + NEUTRON_ENDPOINT_SERVICE_NAME="networking" +fi -NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini +# List of config file names in addition to the main plugin config file +# To add additional plugin config files, use ``neutron_server_config_add`` +# utility function. For example: +# +# ``neutron_server_config_add file1`` +# +# These config files are relative to ``/etc/neutron``. The above +# example would specify ``--config-file /etc/neutron/file1`` for +# neutron server. +declare -a -g Q_PLUGIN_EXTRA_CONF_FILES + +# same as Q_PLUGIN_EXTRA_CONF_FILES, but with absolute path. +declare -a -g _Q_PLUGIN_EXTRA_CONF_FILES_ABS + + +Q_RR_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf +if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then + Q_RR_COMMAND="sudo" +else + NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron) + Q_RR_COMMAND="sudo $NEUTRON_ROOTWRAP $Q_RR_CONF_FILE" + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + Q_RR_DAEMON_COMMAND="sudo $NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE" + fi +fi -# By default, use the ML2 plugin -NEUTRON_CORE_PLUGIN=${NEUTRON_CORE_PLUGIN:-ml2} -NEUTRON_CORE_PLUGIN_CONF_FILENAME=${NEUTRON_CORE_PLUGIN_CONF_FILENAME:-ml2_conf.ini} -NEUTRON_CORE_PLUGIN_CONF_PATH=$NEUTRON_CONF_DIR/plugins/$NEUTRON_CORE_PLUGIN -NEUTRON_CORE_PLUGIN_CONF=$NEUTRON_CORE_PLUGIN_CONF_PATH/$NEUTRON_CORE_PLUGIN_CONF_FILENAME -NEUTRON_METERING_AGENT_CONF_FILENAME=${NEUTRON_METERING_AGENT_CONF_FILENAME:-metering_agent.ini} -NEUTRON_METERING_AGENT_CONF=$NEUTRON_CONF_DIR/$NEUTRON_METERING_AGENT_CONF_FILENAME +# Distributed Virtual Router (DVR) configuration +# Can be: +# - ``legacy`` - No DVR functionality +# - ``dvr_snat`` - Controller or single node DVR +# - ``dvr`` - Compute node in multi-node DVR +# - ``dvr_no_external`` - Compute node in multi-node DVR, no external network +# +Q_DVR_MODE=${Q_DVR_MODE:-legacy} +if [[ "$Q_DVR_MODE" != "legacy" ]]; then + Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,l2population +fi -NEUTRON_AGENT_BINARY=${NEUTRON_AGENT_BINARY:-neutron-$NEUTRON_AGENT-agent} -NEUTRON_L3_BINARY=${NEUTRON_L3_BINARY:-neutron-l3-agent} -NEUTRON_META_BINARY=${NEUTRON_META_BINARY:-neutron-metadata-agent} -NEUTRON_METERING_BINARY=${NEUTRON_METERING_BINARY:-neutron-metering-agent} +# Provider Network Configurations +# -------------------------------- + +# The following variables control the Neutron ML2 plugins' allocation +# of tenant networks and availability of provider networks. If these +# are not configured in ``localrc``, tenant networks will be local to +# the host (with no remote connectivity), and no physical resources +# will be available for the allocation of provider networks. + +# To disable tunnels (GRE or VXLAN) for tenant networks, +# set to False in ``local.conf``. +# GRE tunnels are only supported by the openvswitch. +ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-True} + +# If using GRE, VXLAN or GENEVE tunnels for tenant networks, +# specify the range of IDs from which tenant networks are +# allocated. Can be overridden in ``localrc`` if necessary. +TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGES:-1:1000} + +# To use VLANs for tenant networks, set to True in localrc. VLANs +# are supported by the ML2 plugins, requiring additional configuration +# described below. +ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False} + +# If using VLANs for tenant networks, set in ``localrc`` to specify +# the range of VLAN VIDs from which tenant networks are +# allocated. An external network switch must be configured to +# trunk these VLANs between hosts for multi-host connectivity. +# +# Example: ``TENANT_VLAN_RANGE=1000:1999`` +TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} + +# If using VLANs for tenant networks, or if using flat or VLAN +# provider networks, set in ``localrc`` to the name of the physical +# network, and also configure ``OVS_PHYSICAL_BRIDGE`` for the +# openvswitch agent or ``LB_PHYSICAL_INTERFACE`` for the linuxbridge +# agent, as described below. +# +# Example: ``PHYSICAL_NETWORK=default`` +PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-public} + +# With the openvswitch agent, if using VLANs for tenant networks, +# or if using flat or VLAN provider networks, set in ``localrc`` to +# the name of the OVS bridge to use for the physical network. The +# bridge will be created if it does not already exist, but a +# physical interface must be manually added to the bridge as a +# port for external connectivity. +# +# Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` +OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-br-ex} -# Public facing bits -if is_service_enabled tls-proxy; then - NEUTRON_SERVICE_PROTOCOL="https" +# With the linuxbridge agent, if using VLANs for tenant networks, +# or if using flat or VLAN provider networks, set in ``localrc`` to +# the name of the network interface to use for the physical +# network. +# +# Example: ``LB_PHYSICAL_INTERFACE=eth1`` +if [[ $Q_AGENT == "linuxbridge" && -z ${LB_PHYSICAL_INTERFACE} ]]; then + default_route_dev=$( (ip route; ip -6 route) | grep ^default | head -n 1 | awk '{print $5}') + die_if_not_set $LINENO default_route_dev "Failure retrieving default route device" + LB_PHYSICAL_INTERFACE=$default_route_dev fi -NEUTRON_SERVICE_HOST=${NEUTRON_SERVICE_HOST:-$SERVICE_HOST} -NEUTRON_SERVICE_PORT=${NEUTRON_SERVICE_PORT:-9696} -NEUTRON_SERVICE_PORT_INT=${NEUTRON_SERVICE_PORT_INT:-19696} -NEUTRON_SERVICE_PROTOCOL=${NEUTRON_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} -NEUTRON_AUTH_STRATEGY=${NEUTRON_AUTH_STRATEGY:-keystone} -NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron) -NEUTRON_ROOTWRAP_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf -NEUTRON_ROOTWRAP_CMD="$NEUTRON_ROOTWRAP $NEUTRON_ROOTWRAP_CONF_FILE" -NEUTRON_ROOTWRAP_DAEMON_CMD="$NEUTRON_ROOTWRAP-daemon $NEUTRON_ROOTWRAP_CONF_FILE" +# With the openvswitch plugin, set to True in ``localrc`` to enable +# provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False. +# +# Example: ``OVS_ENABLE_TUNNELING=True`` +OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS} + +# Use DHCP agent for providing metadata service in the case of +# without L3 agent (No Route Agent), set to True in localrc. +ENABLE_ISOLATED_METADATA=${ENABLE_ISOLATED_METADATA:-False} + +# Add a static route as dhcp option, so the request to 169.254.169.254 +# will be able to reach through a route(DHCP agent) +# This option require ENABLE_ISOLATED_METADATA = True +ENABLE_METADATA_NETWORK=${ENABLE_METADATA_NETWORK:-False} +# Neutron plugin specific functions +# --------------------------------- + +# Please refer to ``lib/neutron_plugins/README.md`` for details. +if [ -f $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN ]; then + source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN +fi -# This is needed because _neutron_ovs_base_configure_l3_agent uses it to create -# an external network bridge -PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} -PUBLIC_BRIDGE_MTU=${PUBLIC_BRIDGE_MTU:-1500} +# Agent metering service plugin functions +# ------------------------------------------- -# Network type - default vxlan, however enables vlan based jobs to override -# using the legacy environment variable as well as a new variable in greater -# alignment with the naming scheme of this plugin. -NEUTRON_TENANT_NETWORK_TYPE=${NEUTRON_TENANT_NETWORK_TYPE:-vxlan} +# Hardcoding for 1 service plugin for now +source $TOP_DIR/lib/neutron_plugins/services/metering -NEUTRON_TENANT_VLAN_RANGE=${NEUTRON_TENANT_VLAN_RANGE:-${TENANT_VLAN_RANGE:-100:150}} +# L3 Service functions +source $TOP_DIR/lib/neutron_plugins/services/l3 -# Physical network for VLAN network usage. -NEUTRON_PHYSICAL_NETWORK=${NEUTRON_PHYSICAL_NETWORK:-} +# Additional Neutron service plugins +source $TOP_DIR/lib/neutron_plugins/services/placement +source $TOP_DIR/lib/neutron_plugins/services/trunk +source $TOP_DIR/lib/neutron_plugins/services/qos -# The name of the service in the endpoint URL -NEUTRON_ENDPOINT_SERVICE_NAME=${NEUTRON_ENDPOINT_SERVICE_NAME-"networking"} -if [[ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" && -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then - NEUTRON_ENDPOINT_SERVICE_NAME="networking" +# Use security group or not +if has_neutron_plugin_security_group; then + Q_USE_SECGROUP=${Q_USE_SECGROUP:-True} +else + Q_USE_SECGROUP=False fi +# Save trace setting +_XTRACE_NEUTRON=$(set +o | grep xtrace) +set +o xtrace -# Additional neutron api config files -declare -a -g _NEUTRON_SERVER_EXTRA_CONF_FILES_ABS # Functions # --------- @@ -136,310 +319,194 @@ function is_neutron_enabled { } # Test if any Neutron services are enabled -# is_neutron_enabled +# TODO(slaweq): this is not really needed now and we should remove it as soon +# as it will not be called from any other Devstack plugins, like e.g. Neutron +# plugin function is_neutron_legacy_enabled { - # first we need to remove all "neutron-" from DISABLED_SERVICES list - disabled_services_copy=$(echo $DISABLED_SERVICES | sed 's/neutron-//g') - [[ ,${disabled_services_copy} =~ ,"neutron" ]] && return 1 - [[ ,${ENABLED_SERVICES} =~ ,"q-" ]] && return 0 - return 1 + return 0 } -if is_neutron_legacy_enabled; then - source $TOP_DIR/lib/neutron-legacy -fi - -# cleanup_neutron() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_neutron_new { - deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" - source $TOP_DIR/lib/neutron_plugins/${NEUTRON_AGENT}_agent - if is_neutron_ovs_base_plugin; then - neutron_ovs_base_cleanup +function _determine_config_server { + if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" != '' ]]; then + if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" = "$_Q_PLUGIN_EXTRA_CONF_PATH" ]]; then + deprecated "Q_PLUGIN_EXTRA_CONF_PATH is deprecated" + else + die $LINENO "Q_PLUGIN_EXTRA_CONF_PATH is deprecated" + fi fi - - if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then - neutron_lb_cleanup + if [[ ${#Q_PLUGIN_EXTRA_CONF_FILES[@]} > 0 ]]; then + deprecated "Q_PLUGIN_EXTRA_CONF_FILES is deprecated. Use neutron_server_config_add instead." fi - # delete all namespaces created by neutron - for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|qlbaas|fip|snat)-[0-9a-f-]*'); do - sudo ip netns delete ${ns} + for cfg_file in ${Q_PLUGIN_EXTRA_CONF_FILES[@]}; do + _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($_Q_PLUGIN_EXTRA_CONF_PATH/$cfg_file) done -} -# configure_root_helper_options() - Configure agent rootwrap helper options -function configure_root_helper_options { - local conffile=$1 - iniset $conffile agent root_helper "sudo $NEUTRON_ROOTWRAP_CMD" - iniset $conffile agent root_helper_daemon "sudo $NEUTRON_ROOTWRAP_DAEMON_CMD" + local cfg_file + local opts="--config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" + for cfg_file in ${_Q_PLUGIN_EXTRA_CONF_FILES_ABS[@]}; do + opts+=" --config-file $cfg_file" + done + echo "$opts" } -# configure_neutron() - Set config files, create data dirs, etc -function configure_neutron_new { - deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" - sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR - - (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh) - - cp $NEUTRON_DIR/etc/neutron.conf.sample $NEUTRON_CONF - - configure_neutron_rootwrap - - mkdir -p $NEUTRON_CORE_PLUGIN_CONF_PATH +function _determine_config_l3 { + local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE" + echo "$opts" +} - # NOTE(yamamoto): A decomposed plugin should prepare the config file in - # its devstack plugin. - if [ -f $NEUTRON_DIR/etc/neutron/plugins/$NEUTRON_CORE_PLUGIN/$NEUTRON_CORE_PLUGIN_CONF_FILENAME.sample ]; then - cp $NEUTRON_DIR/etc/neutron/plugins/$NEUTRON_CORE_PLUGIN/$NEUTRON_CORE_PLUGIN_CONF_FILENAME.sample $NEUTRON_CORE_PLUGIN_CONF +# For services and agents that require it, dynamically construct a list of +# --config-file arguments that are passed to the binary. +function determine_config_files { + local opts="" + case "$1" in + "neutron-server") opts="$(_determine_config_server)" ;; + "neutron-l3-agent") opts="$(_determine_config_l3)" ;; + esac + if [ -z "$opts" ] ; then + die $LINENO "Could not determine config files for $1." fi + echo "$opts" +} - iniset $NEUTRON_CONF database connection `database_connection_url neutron` - iniset $NEUTRON_CONF DEFAULT state_path $NEUTRON_STATE_PATH - iniset $NEUTRON_CONF oslo_concurrency lock_path $NEUTRON_STATE_PATH/lock - iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG - - iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - +# configure_neutron() +# Set common config for all neutron server and agents. +function configure_neutron { + _configure_neutron_common iniset_rpc_backend neutron $NEUTRON_CONF - # Neutron API server & Neutron plugin - if is_service_enabled neutron-api; then - local policy_file=$NEUTRON_CONF_DIR/policy.json - # Allow neutron user to administer neutron to match neutron account - # NOTE(amotoki): This is required for nova works correctly with neutron. - if [ -f $NEUTRON_DIR/etc/policy.json ]; then - cp $NEUTRON_DIR/etc/policy.json $policy_file - sed -i 's/"context_is_admin": "role:admin"/"context_is_admin": "role:admin or user_name:neutron"/g' $policy_file - else - echo '{"context_is_admin": "role:admin or user_name:neutron"}' > $policy_file - fi - - cp $NEUTRON_DIR/etc/api-paste.ini $NEUTRON_CONF_DIR/api-paste.ini - - iniset $NEUTRON_CONF DEFAULT core_plugin $NEUTRON_CORE_PLUGIN - - iniset $NEUTRON_CONF DEFAULT policy_file $policy_file - iniset $NEUTRON_CONF DEFAULT router_distributed $NEUTRON_DISTRIBUTED_ROUTING - - iniset $NEUTRON_CONF DEFAULT auth_strategy $NEUTRON_AUTH_STRATEGY - configure_keystone_authtoken_middleware $NEUTRON_CONF neutron - configure_keystone_authtoken_middleware $NEUTRON_CONF nova nova - - # Configure tenant network type - iniset $NEUTRON_CORE_PLUGIN_CONF ml2 tenant_network_types $NEUTRON_TENANT_NETWORK_TYPE - - local mech_drivers="openvswitch" - if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then - mech_drivers+=",l2population" - else - mech_drivers+=",linuxbridge" - fi - if [[ "$mech_drivers" == *"linuxbridge"* ]]; then - iniset $NEUTRON_CONF experimental linuxbridge True - fi - - iniset $NEUTRON_CORE_PLUGIN_CONF ml2 mechanism_drivers $mech_drivers - iniset $NEUTRON_CORE_PLUGIN_CONF ml2 overlay_ip_version $TUNNEL_IP_VERSION - - iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vxlan vni_ranges 1001:2000 - iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_flat flat_networks $PUBLIC_NETWORK_NAME - if [[ "$NEUTRON_TENANT_NETWORK_TYPE" =~ "vlan" ]] && [[ "$NEUTRON_PHYSICAL_NETWORK" != "" ]]; then - iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vlan network_vlan_ranges ${NEUTRON_PHYSICAL_NETWORK}:${NEUTRON_TENANT_VLAN_RANGE} - fi - if [[ "$NEUTRON_PORT_SECURITY" = "True" ]]; then - neutron_ml2_extension_driver_add port_security - fi - configure_rbac_policies - fi - - # Neutron OVS or LB agent - if is_service_enabled neutron-agent; then - iniset $NEUTRON_CORE_PLUGIN_CONF agent tunnel_types vxlan - iniset $NEUTRON_CORE_PLUGIN_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - configure_root_helper_options $NEUTRON_CORE_PLUGIN_CONF - - # Configure the neutron agent - if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then - iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup firewall_driver iptables - iniset $NEUTRON_CORE_PLUGIN_CONF vxlan local_ip $TUNNEL_ENDPOINT_IP - elif [[ $NEUTRON_AGENT == "openvswitch" ]]; then - iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup firewall_driver openvswitch - iniset $NEUTRON_CORE_PLUGIN_CONF ovs local_ip $TUNNEL_ENDPOINT_IP - - if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then - iniset $NEUTRON_CORE_PLUGIN_CONF agent l2_population True - iniset $NEUTRON_CORE_PLUGIN_CONF agent enable_distributed_routing True - iniset $NEUTRON_CORE_PLUGIN_CONF agent arp_responder True - fi - fi - - if ! running_in_container; then - enable_kernel_bridge_firewall - fi + if is_service_enabled q-metering neutron-metering; then + _configure_neutron_metering fi - - # DHCP Agent - if is_service_enabled neutron-dhcp; then - cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $NEUTRON_DHCP_CONF - - iniset $NEUTRON_DHCP_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - # make it so we have working DNS from guests - iniset $NEUTRON_DHCP_CONF DEFAULT dnsmasq_local_resolv True - - configure_root_helper_options $NEUTRON_DHCP_CONF - iniset $NEUTRON_DHCP_CONF DEFAULT interface_driver $NEUTRON_AGENT - neutron_plugin_configure_dhcp_agent $NEUTRON_DHCP_CONF + if is_service_enabled q-agt neutron-agent; then + _configure_neutron_plugin_agent + fi + if is_service_enabled q-dhcp neutron-dhcp; then + _configure_neutron_dhcp_agent + fi + if is_service_enabled q-l3 neutron-l3; then + _configure_neutron_l3_agent + fi + if is_service_enabled q-meta neutron-metadata-agent; then + _configure_neutron_metadata_agent fi - if is_service_enabled neutron-l3; then - cp $NEUTRON_DIR/etc/l3_agent.ini.sample $NEUTRON_L3_CONF - iniset $NEUTRON_L3_CONF DEFAULT interface_driver $NEUTRON_AGENT - neutron_service_plugin_class_add router - configure_root_helper_options $NEUTRON_L3_CONF - iniset $NEUTRON_L3_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - neutron_plugin_configure_l3_agent $NEUTRON_L3_CONF + if [[ "$Q_DVR_MODE" != "legacy" ]]; then + _configure_dvr + fi + if is_service_enabled ceilometer; then + _configure_neutron_ceilometer_notifications + fi - # Configure the neutron agent to serve external network ports - if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then - iniset $NEUTRON_CORE_PLUGIN_CONF linux_bridge bridge_mappings "$PUBLIC_NETWORK_NAME:$PUBLIC_BRIDGE" - else - iniset $NEUTRON_CORE_PLUGIN_CONF ovs bridge_mappings "$PUBLIC_NETWORK_NAME:$PUBLIC_BRIDGE" - fi + if [[ $Q_AGENT == "ovn" ]]; then + configure_ovn + configure_ovn_plugin + fi - if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then - iniset $NEUTRON_L3_CONF DEFAULT agent_mode $NEUTRON_DVR_MODE + # Configure Neutron's advanced services + if is_service_enabled q-placement neutron-placement; then + configure_placement_extension + fi + if is_service_enabled q-trunk neutron-trunk; then + configure_trunk_extension + fi + if is_service_enabled q-qos neutron-qos; then + configure_qos + if is_service_enabled q-l3 neutron-l3; then + configure_l3_agent_extension_fip_qos + configure_l3_agent_extension_gateway_ip_qos fi fi - # Metadata - if is_service_enabled neutron-metadata-agent; then - cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $NEUTRON_META_CONF - - iniset $NEUTRON_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $NEUTRON_META_CONF DEFAULT nova_metadata_host $NEUTRON_META_DATA_HOST - iniset $NEUTRON_META_CONF DEFAULT metadata_workers $API_WORKERS - # TODO(ihrachys) do we really need to set rootwrap for metadata agent? - configure_root_helper_options $NEUTRON_META_CONF - - # TODO(dtroyer): remove the v2.0 hard code below - iniset $NEUTRON_META_CONF DEFAULT auth_url $KEYSTONE_SERVICE_URI - configure_keystone_authtoken_middleware $NEUTRON_META_CONF neutron DEFAULT + # Finally configure Neutron server and core plugin + if is_service_enabled q-agt neutron-agent q-svc neutron-api; then + _configure_neutron_service fi - # Format logging - setup_logging $NEUTRON_CONF + iniset $NEUTRON_CONF DEFAULT api_workers "$API_WORKERS" + # devstack is not a tool for running uber scale OpenStack + # clouds, therefore running without a dedicated RPC worker + # for state reports is more than adequate. + iniset $NEUTRON_CONF DEFAULT rpc_state_report_workers 0 - if is_service_enabled tls-proxy && [ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" ]; then - # Set the service port for a proxy to take the original - iniset $NEUTRON_CONF DEFAULT bind_port "$NEUTRON_SERVICE_PORT_INT" - iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True - fi - - # Metering - if is_service_enabled neutron-metering; then - cp $NEUTRON_DIR/etc/metering_agent.ini.sample $NEUTRON_METERING_AGENT_CONF - neutron_service_plugin_class_add metering + if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then + write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api" "/networking" fi } -# configure_neutron_rootwrap() - configure Neutron's rootwrap -function configure_neutron_rootwrap { - # Deploy new rootwrap filters files (owned by root). - # Wipe any existing rootwrap.d files first - if [[ -d $NEUTRON_CONF_DIR/rootwrap.d ]]; then - sudo rm -rf $NEUTRON_CONF_DIR/rootwrap.d +function configure_neutron_nova { + create_nova_conf_neutron $NOVA_CONF + if [[ "${CELLSV2_SETUP}" == "superconductor" ]]; then + for i in $(seq 1 $NOVA_NUM_CELLS); do + local conf + conf=$(conductor_conf $i) + create_nova_conf_neutron $conf + done fi +} - # Deploy filters to /etc/neutron/rootwrap.d - sudo install -d -o root -g root -m 755 $NEUTRON_CONF_DIR/rootwrap.d - sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/neutron/rootwrap.d/*.filters $NEUTRON_CONF_DIR/rootwrap.d - - # Set up ``rootwrap.conf``, pointing to ``$NEUTRON_CONF_DIR/rootwrap.d`` - sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $NEUTRON_CONF_DIR - sudo sed -e "s:^filters_path=.*$:filters_path=$NEUTRON_CONF_DIR/rootwrap.d:" -i $NEUTRON_CONF_DIR/rootwrap.conf - - # Set up the rootwrap sudoers for Neutron - tempfile=`mktemp` - echo "$STACK_USER ALL=(root) NOPASSWD: $NEUTRON_ROOTWRAP_CMD *" >$tempfile - echo "$STACK_USER ALL=(root) NOPASSWD: $NEUTRON_ROOTWRAP_DAEMON_CMD" >>$tempfile - chmod 0440 $tempfile - sudo chown root:root $tempfile - sudo mv $tempfile /etc/sudoers.d/neutron-rootwrap -} - -# Make Neutron-required changes to nova.conf -# Takes a single optional argument which is the config file to update, -# if not passed $NOVA_CONF is used. -function configure_neutron_nova_new { - deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" +function create_nova_conf_neutron { local conf=${1:-$NOVA_CONF} iniset $conf neutron auth_type "password" iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI" - iniset $conf neutron username neutron + iniset $conf neutron username "$Q_ADMIN_USERNAME" iniset $conf neutron password "$SERVICE_PASSWORD" - iniset $conf neutron user_domain_name "Default" - iniset $conf neutron project_name "$SERVICE_TENANT_NAME" - iniset $conf neutron project_domain_name "Default" - iniset $conf neutron auth_strategy $NEUTRON_AUTH_STRATEGY + iniset $conf neutron user_domain_name "$SERVICE_DOMAIN_NAME" + iniset $conf neutron project_name "$SERVICE_PROJECT_NAME" + iniset $conf neutron project_domain_name "$SERVICE_DOMAIN_NAME" + iniset $conf neutron auth_strategy "$Q_AUTH_STRATEGY" iniset $conf neutron region_name "$REGION_NAME" # optionally set options in nova_conf neutron_plugin_create_nova_conf $conf - if is_service_enabled neutron-metadata-agent; then + if is_service_enabled q-meta neutron-metadata-agent; then iniset $conf neutron service_metadata_proxy "True" fi + iniset $conf DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL" + iniset $conf DEFAULT vif_plugging_timeout "$VIF_PLUGGING_TIMEOUT" } +# create_neutron_accounts() - Set up common required neutron accounts + # Tenant User Roles # ------------------------------------------------------------------ # service neutron admin # if enabled -# create_neutron_accounts() - Create required service accounts -function create_neutron_accounts_new { - deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" +# Migrated from keystone_data.sh +function create_neutron_accounts { local neutron_url - if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - neutron_url=$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST/ + neutron_url=$Q_PROTOCOL://$SERVICE_HOST/ else - neutron_url=$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST:$NEUTRON_SERVICE_PORT/ + neutron_url=$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/ fi if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME fi - - if [[ "$ENABLED_SERVICES" =~ "neutron-api" ]]; then + if is_service_enabled q-svc neutron-api; then create_service_user "neutron" - neutron_service=$(get_or_create_service "neutron" \ - "network" "Neutron Service") - get_or_create_endpoint $neutron_service \ + get_or_create_service "neutron" "network" "Neutron Service" + get_or_create_endpoint \ + "network" \ "$REGION_NAME" "$neutron_url" fi } # init_neutron() - Initialize databases, etc. -function init_neutron_new { - - deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" - recreate_database neutron - +function init_neutron { + recreate_database $Q_DB_NAME time_start "dbsync" # Run Neutron db migrations - $NEUTRON_BIN_DIR/neutron-db-manage upgrade heads + $NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head time_stop "dbsync" } # install_neutron() - Collect source and prepare -function install_neutron_new { - deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" - git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH - setup_develop $NEUTRON_DIR - +function install_neutron { # Install neutron-lib from git so we make sure we're testing # the latest code. if use_library_from_git "neutron-lib"; then @@ -447,17 +514,12 @@ function install_neutron_new { setup_dev_lib "neutron-lib" fi - # L3 service requires radvd - if is_service_enabled neutron-l3; then - install_package radvd - fi + git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH + setup_develop $NEUTRON_DIR - if is_service_enabled neutron-agent neutron-dhcp neutron-l3; then - #TODO(sc68cal) - kind of ugly - source $TOP_DIR/lib/neutron_plugins/${NEUTRON_AGENT}_agent - neutron_plugin_install_agent_packages + if [[ $Q_AGENT == "ovn" ]]; then + install_ovn fi - } # install_neutronclient() - Collect source and prepare @@ -469,152 +531,256 @@ function install_neutronclient { fi } -# start_neutron_api() - Start the API process ahead of other things -function start_neutron_api { - local service_port=$NEUTRON_SERVICE_PORT - local service_protocol=$NEUTRON_SERVICE_PROTOCOL +# install_neutron_agent_packages() - Collect source and prepare +function install_neutron_agent_packages { + # radvd doesn't come with the OS. Install it if the l3 service is enabled. + if is_service_enabled q-l3 neutron-l3; then + install_package radvd + fi + # install packages that are specific to plugin agent(s) + if is_service_enabled q-agt neutron-agent q-dhcp neutron-dhcp q-l3 neutron-l3; then + neutron_plugin_install_agent_packages + fi +} + +# Finish neutron configuration +function configure_neutron_after_post_config { + if [[ $Q_SERVICE_PLUGIN_CLASSES != '' ]]; then + iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES + fi + configure_rbac_policies +} + +# configure_rbac_policies() - Configure Neutron to enforce new RBAC +# policies and scopes if NEUTRON_ENFORCE_SCOPE == True +function configure_rbac_policies { + if [[ "$NEUTRON_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == True ]]; then + iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True + iniset $NEUTRON_CONF oslo_policy enforce_scope True + else + iniset $NEUTRON_CONF oslo_policy enforce_new_defaults False + iniset $NEUTRON_CONF oslo_policy enforce_scope False + fi +} + +# Start running OVN processes +function start_ovn_services { + if [[ $Q_AGENT == "ovn" ]]; then + init_ovn + start_ovn + if [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]]; then + if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" != "True" ]]; then + echo "OVN_L3_CREATE_PUBLIC_NETWORK=True is being ignored " + echo "because NEUTRON_CREATE_INITIAL_NETWORKS is set to False" + else + create_public_bridge + fi + fi + fi +} + +# Start running processes +function start_neutron_service_and_check { + local service_port=$Q_PORT + local service_protocol=$Q_PROTOCOL + local cfg_file_options local neutron_url + + cfg_file_options="$(determine_config_files neutron-server)" + if is_service_enabled tls-proxy; then - service_port=$NEUTRON_SERVICE_PORT_INT + service_port=$Q_PORT_INT service_protocol="http" fi - - local opts="" - opts+=" --config-file $NEUTRON_CONF" - opts+=" --config-file $NEUTRON_CORE_PLUGIN_CONF" - local cfg_file - for cfg_file in ${_NEUTRON_SERVER_EXTRA_CONF_FILES_ABS[@]}; do - opts+=" --config-file $cfg_file" - done - + # Start the Neutron service if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then + enable_service neutron-api run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF" - neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST/ + neutron_url=$Q_PROTOCOL://$Q_HOST/ enable_service neutron-rpc-server - run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $opts" + run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options" else - # Start the Neutron service - # TODO(sc68cal) Stop hard coding this - run_process neutron-api "$NEUTRON_BIN_DIR/neutron-server $opts" - neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST:$service_port/ + run_process q-svc "$NEUTRON_BIN_DIR/neutron-server $cfg_file_options" + neutron_url=$service_protocol://$Q_HOST:$service_port/ # Start proxy if enabled if is_service_enabled tls-proxy; then - start_tls_proxy neutron '*' $NEUTRON_SERVICE_PORT $NEUTRON_SERVICE_HOST $NEUTRON_SERVICE_PORT_INT + start_tls_proxy neutron '*' $Q_PORT $Q_HOST $Q_PORT_INT fi fi if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME fi + echo "Waiting for Neutron to start..." - if ! wait_for_service $SERVICE_TIMEOUT $neutron_url; then - die $LINENO "neutron-api did not start" - fi + local testcmd="wget ${ssl_ca} --no-proxy -q -O- $neutron_url" + test_with_retry "$testcmd" "Neutron did not start" $SERVICE_TIMEOUT } -# start_neutron() - Start running processes -function start_neutron_new { - deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" - # Start up the neutron agents if enabled - # TODO(sc68cal) Make this pluggable so different DevStack plugins for different Neutron plugins - # can resolve the $NEUTRON_AGENT_BINARY - if is_service_enabled neutron-agent; then - # TODO(ihrachys) stop loading ml2_conf.ini into agents, instead load agent specific files - run_process neutron-agent "$NEUTRON_BIN_DIR/$NEUTRON_AGENT_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_CORE_PLUGIN_CONF" - fi - if is_service_enabled neutron-dhcp; then - neutron_plugin_configure_dhcp_agent $NEUTRON_DHCP_CONF - run_process neutron-dhcp "$NEUTRON_BIN_DIR/$NEUTRON_DHCP_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_DHCP_CONF" - fi - if is_service_enabled neutron-l3; then - run_process neutron-l3 "$NEUTRON_BIN_DIR/$NEUTRON_L3_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_L3_CONF" - fi - if is_service_enabled neutron-api && [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]]; then - # XXX(sc68cal) - Here's where plugins can wire up their own networks instead - # of the code in lib/neutron_plugins/services/l3 - if type -p neutron_plugin_create_initial_networks > /dev/null; then - neutron_plugin_create_initial_networks - else - # XXX(sc68cal) Load up the built in Neutron networking code and build a topology - source $TOP_DIR/lib/neutron_plugins/services/l3 - # Create the networks using servic - create_neutron_initial_network +function start_neutron { + start_l2_agent "$@" + start_other_agents "$@" +} + +# Control of the l2 agent is separated out to make it easier to test partial +# upgrades (everything upgraded except the L2 agent) +function start_l2_agent { + run_process q-agt "$AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" + + if is_provider_network && [[ $Q_AGENT == "openvswitch" ]]; then + sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE + sudo ip link set $OVS_PHYSICAL_BRIDGE up + sudo ip link set br-int up + sudo ip link set $PUBLIC_INTERFACE up + if is_ironic_hardware; then + for IP in $(ip addr show dev $PUBLIC_INTERFACE | grep ' inet ' | awk '{print $2}'); do + sudo ip addr del $IP dev $PUBLIC_INTERFACE + sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE + done + sudo ip route replace $FIXED_RANGE via $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE fi fi - if is_service_enabled neutron-metadata-agent; then - run_process neutron-metadata-agent "$NEUTRON_BIN_DIR/$NEUTRON_META_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_META_CONF" - fi +} - if is_service_enabled neutron-metering; then - run_process neutron-metering "$NEUTRON_BIN_DIR/$NEUTRON_METERING_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_METERING_AGENT_CONF" - fi +function start_other_agents { + run_process q-dhcp "$AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file $Q_DHCP_CONF_FILE" + + run_process q-l3 "$AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)" + + run_process q-meta "$AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file $Q_META_CONF_FILE" + run_process q-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME" } -# stop_neutron() - Stop running processes -function stop_neutron_new { - deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" - for serv in neutron-api neutron-agent neutron-l3; do - stop_process $serv - done +# Start running processes, including screen +function start_neutron_agents { + # NOTE(slaweq): it's now just a wrapper for start_neutron function + start_neutron "$@" +} - if is_service_enabled neutron-rpc-server; then - stop_process neutron-rpc-server - fi +function stop_l2_agent { + stop_process q-agt +} - if is_service_enabled neutron-dhcp; then - stop_process neutron-dhcp +# stop_other() - Stop running processes +function stop_other { + if is_service_enabled q-dhcp neutron-dhcp; then + stop_process q-dhcp pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }') [ ! -z "$pid" ] && sudo kill -9 $pid fi - if is_service_enabled neutron-metadata-agent; then - stop_process neutron-metadata-agent + if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then + stop_process neutron-rpc-server + stop_process neutron-api + else + stop_process q-svc fi -} -# neutron_service_plugin_class_add() - add service plugin class -function neutron_service_plugin_class_add_new { - deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" - local service_plugin_class=$1 - local plugins="" + if is_service_enabled q-l3 neutron-l3; then + sudo pkill -f "radvd -C $DATA_DIR/neutron/ra" + stop_process q-l3 + fi - plugins=$(iniget $NEUTRON_CONF DEFAULT service_plugins) - if [ $plugins ]; then - plugins+="," + if is_service_enabled q-meta neutron-metadata-agent; then + stop_process q-meta + fi + + if is_service_enabled q-metering neutron-metering; then + neutron_metering_stop + fi + + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + sudo pkill -9 -f $NEUTRON_ROOTWRAP-daemon || : fi - plugins+="${service_plugin_class}" - iniset $NEUTRON_CONF DEFAULT service_plugins $plugins } -function _neutron_ml2_extension_driver_add { - local driver=$1 - local drivers="" +# stop_neutron() - Stop running processes (non-screen) +function stop_neutron { + stop_other + stop_l2_agent - drivers=$(iniget $NEUTRON_CORE_PLUGIN_CONF ml2 extension_drivers) - if [ $drivers ]; then - drivers+="," + if [[ $Q_AGENT == "ovn" && $SKIP_STOP_OVN != "True" ]]; then + stop_ovn fi - drivers+="${driver}" - iniset $NEUTRON_CORE_PLUGIN_CONF ml2 extension_drivers $drivers } -function neutron_server_config_add_new { - deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" - _NEUTRON_SERVER_EXTRA_CONF_FILES_ABS+=($1) +# _move_neutron_addresses_route() - Move the primary IP to the OVS bridge +# on startup, or back to the public interface on cleanup. If no IP is +# configured on the interface, just add it as a port to the OVS bridge. +function _move_neutron_addresses_route { + local from_intf=$1 + local to_intf=$2 + local add_ovs_port=$3 + local del_ovs_port=$4 + local af=$5 + + if [[ -n "$from_intf" && -n "$to_intf" ]]; then + # Remove the primary IP address from $from_intf and add it to $to_intf, + # along with the default route, if it exists. Also, when called + # on configure we will also add $from_intf as a port on $to_intf, + # assuming it is an OVS bridge. + + local IP_REPLACE="" + local IP_DEL="" + local IP_UP="" + local DEFAULT_ROUTE_GW + DEFAULT_ROUTE_GW=$(ip -f $af r | awk "/default.+$from_intf\s/ { print \$3; exit }") + local ADD_OVS_PORT="" + local DEL_OVS_PORT="" + local ARP_CMD="" + + IP_BRD=$(ip -f $af a s dev $from_intf scope global primary | grep inet | awk '{ print $2, $3, $4; exit }') + + if [ "$DEFAULT_ROUTE_GW" != "" ]; then + ADD_DEFAULT_ROUTE="sudo ip -f $af r replace default via $DEFAULT_ROUTE_GW dev $to_intf" + fi + + if [[ "$add_ovs_port" == "True" ]]; then + ADD_OVS_PORT="sudo ovs-vsctl --may-exist add-port $to_intf $from_intf" + fi + + if [[ "$del_ovs_port" == "True" ]]; then + DEL_OVS_PORT="sudo ovs-vsctl --if-exists del-port $from_intf $to_intf" + fi + + if [[ "$IP_BRD" != "" ]]; then + IP_DEL="sudo ip addr del $IP_BRD dev $from_intf" + IP_REPLACE="sudo ip addr replace $IP_BRD dev $to_intf" + IP_UP="sudo ip link set $to_intf up" + if [[ "$af" == "inet" ]]; then + IP=$(echo $IP_BRD | awk '{ print $1; exit }' | grep -o -E '(.*)/' | cut -d "/" -f1) + ARP_CMD="sudo arping -A -c 3 -w 5 -I $to_intf $IP " + fi + fi + + # The add/del OVS port calls have to happen either before or + # after the address is moved in order to not leave it orphaned. + $DEL_OVS_PORT; $IP_DEL; $IP_REPLACE; $IP_UP; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE; $ARP_CMD + fi } -# neutron_deploy_rootwrap_filters() - deploy rootwrap filters -function neutron_deploy_rootwrap_filters_new { - deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" - local srcdir=$1 - sudo install -d -o root -g root -m 755 $NEUTRON_CONF_DIR/rootwrap.d - sudo install -o root -g root -m 644 $srcdir/etc/neutron/rootwrap.d/*.filters $NEUTRON_CONF_DIR/rootwrap.d +# _configure_public_network_connectivity() - Configures connectivity to the +# external network using $PUBLIC_INTERFACE or NAT on the single interface +# machines +function _configure_public_network_connectivity { + # If we've given a PUBLIC_INTERFACE to take over, then we assume + # that we can own the whole thing, and privot it into the OVS + # bridge. If we are not, we're probably on a single interface + # machine, and we just setup NAT so that fixed guests can get out. + if [[ -n "$PUBLIC_INTERFACE" ]]; then + _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True False "inet" + + if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then + _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6" + fi + else + for d in $default_v4_route_devs; do + sudo iptables -t nat -A POSTROUTING -o $d -s $FLOATING_RANGE -j MASQUERADE + done + fi } -# Dispatch functions -# These are needed for compatibility between the old and new implementations -# where there are function name overlaps. These will be removed when -# neutron-legacy is removed. -# TODO(sc68cal) Remove when neutron-legacy is no more. +# cleanup_neutron() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up function cleanup_neutron { if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then stop_process neutron-api @@ -623,153 +789,367 @@ function cleanup_neutron { sudo rm -f $(apache_site_config_for neutron-api) fi - if is_neutron_legacy_enabled; then - # Call back to old function - cleanup_mutnauq "$@" - else - cleanup_neutron_new "$@" + if [[ -n "$OVS_PHYSICAL_BRIDGE" ]]; then + _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False True "inet" + + if [[ $(ip -f inet6 a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then + # ip(8) wants the prefix length when deleting + local v6_gateway + v6_gateway=$(ip -6 a s dev $OVS_PHYSICAL_BRIDGE | grep $IPV6_PUBLIC_NETWORK_GATEWAY | awk '{ print $2 }') + sudo ip -6 addr del $v6_gateway dev $OVS_PHYSICAL_BRIDGE + _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False False "inet6" + fi + + if is_provider_network && is_ironic_hardware; then + for IP in $(ip addr show dev $OVS_PHYSICAL_BRIDGE | grep ' inet ' | awk '{print $2}'); do + sudo ip addr del $IP dev $OVS_PHYSICAL_BRIDGE + sudo ip addr add $IP dev $PUBLIC_INTERFACE + done + sudo route del -net $FIXED_RANGE gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE + fi fi -} -function configure_neutron { - if is_neutron_legacy_enabled; then - # Call back to old function - configure_mutnauq "$@" - else - configure_neutron_new "$@" + if is_neutron_ovs_base_plugin; then + neutron_ovs_base_cleanup fi - if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api" "/networking" + if [[ $Q_AGENT == "linuxbridge" ]]; then + neutron_lb_cleanup + fi + + # delete all namespaces created by neutron + for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|fip|snat)-[0-9a-f-]*'); do + sudo ip netns delete ${ns} + done + + if [[ $Q_AGENT == "ovn" ]]; then + cleanup_ovn fi } -# configure_rbac_policies() - Configure Neutron to enforce new RBAC -# policies and scopes if NEUTRON_ENFORCE_SCOPE == True -function configure_rbac_policies { - if [[ "$NEUTRON_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]]; then - iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True - iniset $NEUTRON_CONF oslo_policy enforce_scope True + +function _create_neutron_conf_dir { + # Put config files in ``NEUTRON_CONF_DIR`` for everyone to find + sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR +} + +# _configure_neutron_common() +# Set common config for all neutron server and agents. +# This MUST be called before other ``_configure_neutron_*`` functions. +function _configure_neutron_common { + _create_neutron_conf_dir + + # Uses oslo config generator to generate core sample configuration files + (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh) + + cp $NEUTRON_DIR/etc/neutron.conf.sample $NEUTRON_CONF + + Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json + + # allow neutron user to administer neutron to match neutron account + # NOTE(amotoki): This is required for nova works correctly with neutron. + if [ -f $NEUTRON_DIR/etc/policy.json ]; then + cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE + sed -i 's/"context_is_admin": "role:admin"/"context_is_admin": "role:admin or user_name:neutron"/g' $Q_POLICY_FILE else - iniset $NEUTRON_CONF oslo_policy enforce_new_defaults False - iniset $NEUTRON_CONF oslo_policy enforce_scope False + echo '{"context_is_admin": "role:admin or user_name:neutron"}' > $Q_POLICY_FILE + fi + + # Set plugin-specific variables ``Q_DB_NAME``, ``Q_PLUGIN_CLASS``. + # For main plugin config file, set ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``. + neutron_plugin_configure_common + + if [[ "$Q_PLUGIN_CONF_PATH" == '' || "$Q_PLUGIN_CONF_FILENAME" == '' || "$Q_PLUGIN_CLASS" == '' ]]; then + die $LINENO "Neutron plugin not set.. exiting" + fi + + # If needed, move config file from ``$NEUTRON_DIR/etc/neutron`` to ``NEUTRON_CONF_DIR`` + mkdir -p /$Q_PLUGIN_CONF_PATH + Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME + # NOTE(slaweq): NEUTRON_CORE_PLUGIN_CONF is used e.g. in neutron repository, + # it was previously defined in the lib/neutron module which is now deleted. + NEUTRON_CORE_PLUGIN_CONF=$Q_PLUGIN_CONF_FILE + # NOTE(hichihara): Some neutron vendor plugins were already decomposed and + # there is no config file in Neutron tree. They should prepare the file in each plugin. + if [ -f "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" ]; then + cp "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" /$Q_PLUGIN_CONF_FILE + elif [ -f $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE ]; then + cp $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE + fi + + iniset $NEUTRON_CONF database connection `database_connection_url $Q_DB_NAME` + iniset $NEUTRON_CONF DEFAULT state_path $DATA_DIR/neutron + iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG + iniset $NEUTRON_CONF DEFAULT bind_host $Q_LISTEN_ADDRESS + iniset $NEUTRON_CONF oslo_concurrency lock_path $DATA_DIR/neutron/lock + + # NOTE(freerunner): Need to adjust Region Name for nova in multiregion installation + iniset $NEUTRON_CONF nova region_name $REGION_NAME + + if [ "$VIRT_DRIVER" = 'fake' ]; then + # Disable arbitrary limits + iniset $NEUTRON_CONF quotas quota_network -1 + iniset $NEUTRON_CONF quotas quota_subnet -1 + iniset $NEUTRON_CONF quotas quota_port -1 + iniset $NEUTRON_CONF quotas quota_security_group -1 + iniset $NEUTRON_CONF quotas quota_security_group_rule -1 + fi + + # Format logging + setup_logging $NEUTRON_CONF + + if is_service_enabled tls-proxy && [ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" ]; then + # Set the service port for a proxy to take the original + iniset $NEUTRON_CONF DEFAULT bind_port "$Q_PORT_INT" + iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True fi + + _neutron_setup_rootwrap } +function _configure_neutron_dhcp_agent { -function configure_neutron_nova { - if is_neutron_legacy_enabled; then - # Call back to old function - create_nova_conf_neutron $NOVA_CONF - if [[ "${CELLSV2_SETUP}" == "superconductor" ]]; then - for i in $(seq 1 $NOVA_NUM_CELLS); do - local conf - conf=$(conductor_conf $i) - create_nova_conf_neutron $conf - done - fi - else - configure_neutron_nova_new $NOVA_CONF - if [[ "${CELLSV2_SETUP}" == "superconductor" ]]; then - for i in $(seq 1 $NOVA_NUM_CELLS); do - local conf - conf=$(conductor_conf $i) - configure_neutron_nova_new $conf - done + cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $Q_DHCP_CONF_FILE + + iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + # make it so we have working DNS from guests + iniset $Q_DHCP_CONF_FILE DEFAULT dnsmasq_local_resolv True + configure_root_helper_options $Q_DHCP_CONF_FILE + + if ! is_service_enabled q-l3 neutron-l3; then + if [[ "$ENABLE_ISOLATED_METADATA" = "True" ]]; then + iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata $ENABLE_ISOLATED_METADATA + iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network $ENABLE_METADATA_NETWORK + else + if [[ "$ENABLE_METADATA_NETWORK" = "True" ]]; then + die "$LINENO" "Enable isolated metadata is a must for metadata network" + fi fi fi + + _neutron_setup_interface_driver $Q_DHCP_CONF_FILE + + neutron_plugin_configure_dhcp_agent $Q_DHCP_CONF_FILE } -function create_neutron_accounts { - if is_neutron_legacy_enabled; then - # Call back to old function - create_mutnauq_accounts "$@" - else - create_neutron_accounts_new "$@" - fi + +function _configure_neutron_metadata_agent { + cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $Q_META_CONF_FILE + + iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $Q_META_CONF_FILE DEFAULT nova_metadata_host $Q_META_DATA_IP + iniset $Q_META_CONF_FILE DEFAULT metadata_workers $API_WORKERS + configure_root_helper_options $Q_META_CONF_FILE } -function init_neutron { - if is_neutron_legacy_enabled; then - # Call back to old function - init_mutnauq "$@" - else - init_neutron_new "$@" - fi +function _configure_neutron_ceilometer_notifications { + iniset $NEUTRON_CONF oslo_messaging_notifications driver messagingv2 } -function install_neutron { - if is_neutron_legacy_enabled; then - # Call back to old function - install_mutnauq "$@" - else - install_neutron_new "$@" +function _configure_neutron_metering { + neutron_agent_metering_configure_common + neutron_agent_metering_configure_agent +} + +function _configure_dvr { + iniset $NEUTRON_CONF DEFAULT router_distributed True + iniset $Q_L3_CONF_FILE DEFAULT agent_mode $Q_DVR_MODE +} + + +# _configure_neutron_plugin_agent() - Set config files for neutron plugin agent +# It is called when q-agt is enabled. +function _configure_neutron_plugin_agent { + # Specify the default root helper prior to agent configuration to + # ensure that an agent's configuration can override the default + configure_root_helper_options /$Q_PLUGIN_CONF_FILE + iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + + # Configure agent for plugin + neutron_plugin_configure_plugin_agent +} + +function _replace_api_paste_composite { + local sep + sep=$(echo -ne "\x01") + # Replace it + $sudo sed -i -e "s/\/\: neutronversions_composite/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/\: neutronversions_composite/" "$Q_API_PASTE_FILE" + $sudo sed -i -e "s/\/healthcheck\: healthcheck/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/healthcheck\: healthcheck/" "$Q_API_PASTE_FILE" + $sudo sed -i -e "s/\/v2.0\: neutronapi_v2_0/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/v2.0\: neutronapi_v2_0/" "$Q_API_PASTE_FILE" +} + +# _configure_neutron_service() - Set config files for neutron service +# It is called when q-svc is enabled. +function _configure_neutron_service { + Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini + cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE + + if [[ -n "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then + _replace_api_paste_composite fi + + # Update either configuration file with plugin + iniset $NEUTRON_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS + + iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $NEUTRON_CONF oslo_policy policy_file $Q_POLICY_FILE + + iniset $NEUTRON_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY + configure_keystone_authtoken_middleware $NEUTRON_CONF $Q_ADMIN_USERNAME + + # Configuration for neutron notifications to nova. + iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES + iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES + + configure_keystone_authtoken_middleware $NEUTRON_CONF nova nova + + # Configuration for placement client + configure_keystone_authtoken_middleware $NEUTRON_CONF placement placement + + # Configure plugin + neutron_plugin_configure_service } +# Utility Functions +#------------------ + +# neutron_service_plugin_class_add() - add service plugin class function neutron_service_plugin_class_add { - if is_neutron_legacy_enabled; then - # Call back to old function - _neutron_service_plugin_class_add "$@" - else - neutron_service_plugin_class_add_new "$@" + local service_plugin_class=$1 + if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then + Q_SERVICE_PLUGIN_CLASSES=$service_plugin_class + elif [[ ! ,${Q_SERVICE_PLUGIN_CLASSES}, =~ ,${service_plugin_class}, ]]; then + Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$service_plugin_class" fi } +# neutron_ml2_extension_driver_add() - add ML2 extension driver function neutron_ml2_extension_driver_add { - if is_neutron_legacy_enabled; then - # Call back to old function - _neutron_ml2_extension_driver_add_old "$@" - else - _neutron_ml2_extension_driver_add "$@" + local extension=$1 + if [[ $Q_ML2_PLUGIN_EXT_DRIVERS == '' ]]; then + Q_ML2_PLUGIN_EXT_DRIVERS=$extension + elif [[ ! ,${Q_ML2_PLUGIN_EXT_DRIVERS}, =~ ,${extension}, ]]; then + Q_ML2_PLUGIN_EXT_DRIVERS="$Q_ML2_PLUGIN_EXT_DRIVERS,$extension" fi } -function install_neutron_agent_packages { - if is_neutron_legacy_enabled; then - # Call back to old function - install_neutron_agent_packages_mutnauq "$@" - else - : - fi +# neutron_server_config_add() - add server config file +function neutron_server_config_add { + _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($1) } -function neutron_server_config_add { - if is_neutron_legacy_enabled; then - # Call back to old function - mutnauq_server_config_add "$@" - else - neutron_server_config_add_new "$@" +# neutron_deploy_rootwrap_filters() - deploy rootwrap filters to $Q_CONF_ROOTWRAP_D (owned by root). +function neutron_deploy_rootwrap_filters { + if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then + return fi + local srcdir=$1 + sudo install -d -o root -m 755 $Q_CONF_ROOTWRAP_D + sudo install -o root -m 644 $srcdir/etc/neutron/rootwrap.d/* $Q_CONF_ROOTWRAP_D/ } -function start_neutron { - if is_neutron_legacy_enabled; then - # Call back to old function - start_mutnauq_l2_agent "$@" - start_mutnauq_other_agents "$@" +# _neutron_setup_rootwrap() - configure Neutron's rootwrap +function _neutron_setup_rootwrap { + if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then + return + fi + # Wipe any existing ``rootwrap.d`` files first + Q_CONF_ROOTWRAP_D=$NEUTRON_CONF_DIR/rootwrap.d + if [[ -d $Q_CONF_ROOTWRAP_D ]]; then + sudo rm -rf $Q_CONF_ROOTWRAP_D + fi + + neutron_deploy_rootwrap_filters $NEUTRON_DIR + + # Set up ``rootwrap.conf``, pointing to ``$NEUTRON_CONF_DIR/rootwrap.d`` + # location moved in newer versions, prefer new location + if test -r $NEUTRON_DIR/etc/neutron/rootwrap.conf; then + sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/neutron/rootwrap.conf $Q_RR_CONF_FILE else - start_neutron_new "$@" + sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE fi + sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE + sudo sed -e 's:^exec_dirs=\(.*\)$:exec_dirs=\1,/usr/local/bin:' -i $Q_RR_CONF_FILE + + # Specify ``rootwrap.conf`` as first parameter to neutron-rootwrap + ROOTWRAP_SUDOER_CMD="$NEUTRON_ROOTWRAP $Q_RR_CONF_FILE *" + ROOTWRAP_DAEMON_SUDOER_CMD="$NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE" + + # Set up the rootwrap sudoers for neutron + TEMPFILE=`mktemp` + echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE + echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_DAEMON_SUDOER_CMD" >>$TEMPFILE + chmod 0440 $TEMPFILE + sudo chown root:root $TEMPFILE + sudo mv $TEMPFILE /etc/sudoers.d/neutron-rootwrap + + # Update the root_helper + configure_root_helper_options $NEUTRON_CONF } -function stop_neutron { - if is_neutron_legacy_enabled; then - # Call back to old function - stop_mutnauq "$@" - else - stop_neutron_new "$@" +function configure_root_helper_options { + local conffile=$1 + iniset $conffile agent root_helper "$Q_RR_COMMAND" + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + iniset $conffile agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" fi } -function neutron_deploy_rootwrap_filters { - if is_neutron_legacy_enabled; then - # Call back to old function - _neutron_deploy_rootwrap_filters "$@" - else - neutron_deploy_rootwrap_filters_new "$@" +function _neutron_setup_interface_driver { + + # ovs_use_veth needs to be set before the plugin configuration + # occurs to allow plugins to override the setting. + iniset $1 DEFAULT ovs_use_veth $Q_OVS_USE_VETH + + neutron_plugin_setup_interface_driver $1 +} +# Functions for Neutron Exercises +#-------------------------------- + +function delete_probe { + local from_net="$1" + net_id=`_get_net_id $from_net` + probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'` + neutron-debug --os-tenant-name admin --os-username admin probe-delete $probe_id +} + +function _get_net_id { + openstack --os-cloud devstack-admin --os-region-name="$REGION_NAME" --os-project-name admin --os-username admin --os-password $ADMIN_PASSWORD network list | grep $1 | awk '{print $2}' +} + +function _get_probe_cmd_prefix { + local from_net="$1" + net_id=`_get_net_id $from_net` + probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}' | head -n 1` + echo "$Q_RR_COMMAND ip netns exec qprobe-$probe_id" +} + +# ssh check +function _ssh_check_neutron { + local from_net=$1 + local key_file=$2 + local ip=$3 + local user=$4 + local timeout_sec=$5 + local probe_cmd = "" + probe_cmd=`_get_probe_cmd_prefix $from_net` + local testcmd="$probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success" + test_with_retry "$testcmd" "server $ip didn't become ssh-able" $timeout_sec +} + +function plugin_agent_add_l2_agent_extension { + local l2_agent_extension=$1 + if [[ -z "$L2_AGENT_EXTENSIONS" ]]; then + L2_AGENT_EXTENSIONS=$l2_agent_extension + elif [[ ! ,${L2_AGENT_EXTENSIONS}, =~ ,${l2_agent_extension}, ]]; then + L2_AGENT_EXTENSIONS+=",$l2_agent_extension" fi } # Restore xtrace -$XTRACE +$_XTRACE_NEUTRON + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/neutron-legacy b/lib/neutron-legacy index baf67f209e..e90400fec1 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -1,1097 +1,6 @@ #!/bin/bash -# -# lib/neutron -# functions - functions specific to neutron -# Dependencies: -# ``functions`` file -# ``DEST`` must be defined -# ``STACK_USER`` must be defined +# TODO(slaweq): remove that file when other projects, like e.g. Grenade will +# be using lib/neutron -# ``stack.sh`` calls the entry points in this order: -# -# - install_neutron_agent_packages -# - install_neutronclient -# - install_neutron -# - install_neutron_third_party -# - configure_neutron -# - init_neutron -# - configure_neutron_third_party -# - init_neutron_third_party -# - start_neutron_third_party -# - create_nova_conf_neutron -# - configure_neutron_after_post_config -# - start_neutron_service_and_check -# - check_neutron_third_party_integration -# - start_neutron_agents -# - create_neutron_initial_network -# -# ``unstack.sh`` calls the entry points in this order: -# -# - stop_neutron -# - stop_neutron_third_party -# - cleanup_neutron - -# Functions in lib/neutron are classified into the following categories: -# -# - entry points (called from stack.sh or unstack.sh) -# - internal functions -# - neutron exercises -# - 3rd party programs - - -# Neutron Networking -# ------------------ - -# Make sure that neutron is enabled in ``ENABLED_SERVICES``. If you want -# to run Neutron on this host, make sure that q-svc is also in -# ``ENABLED_SERVICES``. -# -# See "Neutron Network Configuration" below for additional variables -# that must be set in localrc for connectivity across hosts with -# Neutron. - -# Settings -# -------- - - -# Neutron Network Configuration -# ----------------------------- - -if is_service_enabled tls-proxy; then - Q_PROTOCOL="https" -fi - - -# Set up default directories -GITDIR["python-neutronclient"]=$DEST/python-neutronclient - - -NEUTRON_DIR=$DEST/neutron -NEUTRON_FWAAS_DIR=$DEST/neutron-fwaas - -# Support entry points installation of console scripts -if [[ -d $NEUTRON_DIR/bin/neutron-server ]]; then - NEUTRON_BIN_DIR=$NEUTRON_DIR/bin -else - NEUTRON_BIN_DIR=$(get_python_exec_prefix) -fi - -NEUTRON_CONF_DIR=/etc/neutron -NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf -export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"} - -# NEUTRON_DEPLOY_MOD_WSGI defines how neutron is deployed, allowed values: -# - False (default) : Run neutron under Eventlet -# - True : Run neutron under uwsgi -# TODO(annp): Switching to uwsgi in next cycle if things turn out to be stable -# enough -NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse False NEUTRON_DEPLOY_MOD_WSGI) - -NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini - -# If NEUTRON_ENFORCE_SCOPE == True, it will set "enforce_scope" -# and "enforce_new_defaults" to True in the Neutron's config to enforce usage -# of the new RBAC policies and scopes. -NEUTRON_ENFORCE_SCOPE=$(trueorfalse False NEUTRON_ENFORCE_SCOPE) - -# Agent binaries. Note, binary paths for other agents are set in per-service -# scripts in lib/neutron_plugins/services/ -AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent" -AGENT_L3_BINARY=${AGENT_L3_BINARY:-"$NEUTRON_BIN_DIR/neutron-l3-agent"} -AGENT_META_BINARY="$NEUTRON_BIN_DIR/neutron-metadata-agent" - -# Agent config files. Note, plugin-specific Q_PLUGIN_CONF_FILE is set and -# loaded from per-plugin scripts in lib/neutron_plugins/ -Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini -Q_L3_CONF_FILE=$NEUTRON_CONF_DIR/l3_agent.ini -Q_META_CONF_FILE=$NEUTRON_CONF_DIR/metadata_agent.ini - -# Default name for Neutron database -Q_DB_NAME=${Q_DB_NAME:-neutron} -# Default Neutron Plugin -Q_PLUGIN=${Q_PLUGIN:-ml2} -# Default Neutron Port -Q_PORT=${Q_PORT:-9696} -# Default Neutron Internal Port when using TLS proxy -Q_PORT_INT=${Q_PORT_INT:-19696} -# Default Neutron Host -Q_HOST=${Q_HOST:-$SERVICE_HOST} -# Default protocol -Q_PROTOCOL=${Q_PROTOCOL:-$SERVICE_PROTOCOL} -# Default listen address -Q_LISTEN_ADDRESS=${Q_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)} -# Default admin username -Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-neutron} -# Default auth strategy -Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone} -# RHEL's support for namespaces requires using veths with ovs -Q_OVS_USE_VETH=${Q_OVS_USE_VETH:-False} -Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True} -Q_USE_ROOTWRAP_DAEMON=$(trueorfalse True Q_USE_ROOTWRAP_DAEMON) -# Meta data IP -Q_META_DATA_IP=${Q_META_DATA_IP:-$(ipv6_unquote $SERVICE_HOST)} -# Allow Overlapping IP among subnets -Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True} -Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True} -Q_NOTIFY_NOVA_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_DATA_CHANGES:-True} -VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True} -VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300} - -# Allow to skip stopping of OVN services -SKIP_STOP_OVN=${SKIP_STOP_OVN:-False} - -# The directory which contains files for Q_PLUGIN_EXTRA_CONF_FILES. -# /etc/neutron is assumed by many of devstack plugins. Do not change. -_Q_PLUGIN_EXTRA_CONF_PATH=/etc/neutron - -# The name of the service in the endpoint URL -NEUTRON_ENDPOINT_SERVICE_NAME=${NEUTRON_ENDPOINT_SERVICE_NAME-"networking"} -if [[ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" && -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then - NEUTRON_ENDPOINT_SERVICE_NAME="networking" -fi - -# List of config file names in addition to the main plugin config file -# To add additional plugin config files, use ``neutron_server_config_add`` -# utility function. For example: -# -# ``neutron_server_config_add file1`` -# -# These config files are relative to ``/etc/neutron``. The above -# example would specify ``--config-file /etc/neutron/file1`` for -# neutron server. -declare -a -g Q_PLUGIN_EXTRA_CONF_FILES - -# same as Q_PLUGIN_EXTRA_CONF_FILES, but with absolute path. -declare -a -g _Q_PLUGIN_EXTRA_CONF_FILES_ABS - - -Q_RR_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf -if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then - Q_RR_COMMAND="sudo" -else - NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron) - Q_RR_COMMAND="sudo $NEUTRON_ROOTWRAP $Q_RR_CONF_FILE" - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - Q_RR_DAEMON_COMMAND="sudo $NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE" - fi -fi - - -# Distributed Virtual Router (DVR) configuration -# Can be: -# - ``legacy`` - No DVR functionality -# - ``dvr_snat`` - Controller or single node DVR -# - ``dvr`` - Compute node in multi-node DVR -# -Q_DVR_MODE=${Q_DVR_MODE:-legacy} -if [[ "$Q_DVR_MODE" != "legacy" ]]; then - Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,l2population -fi - -# Provider Network Configurations -# -------------------------------- - -# The following variables control the Neutron ML2 plugins' allocation -# of tenant networks and availability of provider networks. If these -# are not configured in ``localrc``, tenant networks will be local to -# the host (with no remote connectivity), and no physical resources -# will be available for the allocation of provider networks. - -# To disable tunnels (GRE or VXLAN) for tenant networks, -# set to False in ``local.conf``. -# GRE tunnels are only supported by the openvswitch. -ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-True} - -# If using GRE, VXLAN or GENEVE tunnels for tenant networks, -# specify the range of IDs from which tenant networks are -# allocated. Can be overridden in ``localrc`` if necessary. -TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGES:-1:1000} - -# To use VLANs for tenant networks, set to True in localrc. VLANs -# are supported by the ML2 plugins, requiring additional configuration -# described below. -ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False} - -# If using VLANs for tenant networks, set in ``localrc`` to specify -# the range of VLAN VIDs from which tenant networks are -# allocated. An external network switch must be configured to -# trunk these VLANs between hosts for multi-host connectivity. -# -# Example: ``TENANT_VLAN_RANGE=1000:1999`` -TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} - -# If using VLANs for tenant networks, or if using flat or VLAN -# provider networks, set in ``localrc`` to the name of the physical -# network, and also configure ``OVS_PHYSICAL_BRIDGE`` for the -# openvswitch agent or ``LB_PHYSICAL_INTERFACE`` for the linuxbridge -# agent, as described below. -# -# Example: ``PHYSICAL_NETWORK=default`` -PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-public} - -# With the openvswitch agent, if using VLANs for tenant networks, -# or if using flat or VLAN provider networks, set in ``localrc`` to -# the name of the OVS bridge to use for the physical network. The -# bridge will be created if it does not already exist, but a -# physical interface must be manually added to the bridge as a -# port for external connectivity. -# -# Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` -OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-br-ex} - -# With the linuxbridge agent, if using VLANs for tenant networks, -# or if using flat or VLAN provider networks, set in ``localrc`` to -# the name of the network interface to use for the physical -# network. -# -# Example: ``LB_PHYSICAL_INTERFACE=eth1`` -if [[ $Q_AGENT == "linuxbridge" && -z ${LB_PHYSICAL_INTERFACE} ]]; then - default_route_dev=$( (ip route; ip -6 route) | grep ^default | head -n 1 | awk '{print $5}') - die_if_not_set $LINENO default_route_dev "Failure retrieving default route device" - LB_PHYSICAL_INTERFACE=$default_route_dev -fi - -# With the openvswitch plugin, set to True in ``localrc`` to enable -# provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False. -# -# Example: ``OVS_ENABLE_TUNNELING=True`` -OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS} - -# Use DHCP agent for providing metadata service in the case of -# without L3 agent (No Route Agent), set to True in localrc. -ENABLE_ISOLATED_METADATA=${ENABLE_ISOLATED_METADATA:-False} - -# Add a static route as dhcp option, so the request to 169.254.169.254 -# will be able to reach through a route(DHCP agent) -# This option require ENABLE_ISOLATED_METADATA = True -ENABLE_METADATA_NETWORK=${ENABLE_METADATA_NETWORK:-False} -# Neutron plugin specific functions -# --------------------------------- - -# Please refer to ``lib/neutron_plugins/README.md`` for details. -if [ -f $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN ]; then - source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN -fi - -# Agent metering service plugin functions -# ------------------------------------------- - -# Hardcoding for 1 service plugin for now -source $TOP_DIR/lib/neutron_plugins/services/metering - -# L3 Service functions -source $TOP_DIR/lib/neutron_plugins/services/l3 - -# Additional Neutron service plugins -source $TOP_DIR/lib/neutron_plugins/services/placement -source $TOP_DIR/lib/neutron_plugins/services/trunk -source $TOP_DIR/lib/neutron_plugins/services/qos - -# Use security group or not -if has_neutron_plugin_security_group; then - Q_USE_SECGROUP=${Q_USE_SECGROUP:-True} -else - Q_USE_SECGROUP=False -fi - -# Save trace setting -_XTRACE_NEUTRON=$(set +o | grep xtrace) -set +o xtrace - - -# Functions -# --------- - -function _determine_config_server { - if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" != '' ]]; then - if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" = "$_Q_PLUGIN_EXTRA_CONF_PATH" ]]; then - deprecated "Q_PLUGIN_EXTRA_CONF_PATH is deprecated" - else - die $LINENO "Q_PLUGIN_EXTRA_CONF_PATH is deprecated" - fi - fi - if [[ ${#Q_PLUGIN_EXTRA_CONF_FILES[@]} > 0 ]]; then - deprecated "Q_PLUGIN_EXTRA_CONF_FILES is deprecated. Use neutron_server_config_add instead." - fi - for cfg_file in ${Q_PLUGIN_EXTRA_CONF_FILES[@]}; do - _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($_Q_PLUGIN_EXTRA_CONF_PATH/$cfg_file) - done - - local cfg_file - local opts="--config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" - for cfg_file in ${_Q_PLUGIN_EXTRA_CONF_FILES_ABS[@]}; do - opts+=" --config-file $cfg_file" - done - echo "$opts" -} - -function _determine_config_l3 { - local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE" - echo "$opts" -} - -# For services and agents that require it, dynamically construct a list of -# --config-file arguments that are passed to the binary. -function determine_config_files { - local opts="" - case "$1" in - "neutron-server") opts="$(_determine_config_server)" ;; - "neutron-l3-agent") opts="$(_determine_config_l3)" ;; - esac - if [ -z "$opts" ] ; then - die $LINENO "Could not determine config files for $1." - fi - echo "$opts" -} - -# configure_mutnauq() -# Set common config for all neutron server and agents. -function configure_mutnauq { - _configure_neutron_common - iniset_rpc_backend neutron $NEUTRON_CONF - - if is_service_enabled q-metering; then - _configure_neutron_metering - fi - if is_service_enabled q-agt q-svc; then - _configure_neutron_service - fi - if is_service_enabled q-agt; then - _configure_neutron_plugin_agent - fi - if is_service_enabled q-dhcp; then - _configure_neutron_dhcp_agent - fi - if is_service_enabled q-l3; then - _configure_neutron_l3_agent - fi - if is_service_enabled q-meta; then - _configure_neutron_metadata_agent - fi - - if [[ "$Q_DVR_MODE" != "legacy" ]]; then - _configure_dvr - fi - if is_service_enabled ceilometer; then - _configure_neutron_ceilometer_notifications - fi - - if [[ $Q_AGENT == "ovn" ]]; then - configure_ovn - configure_ovn_plugin - fi - - # Configure Neutron's advanced services - if is_service_enabled q-placement neutron-placement; then - configure_placement_extension - fi - if is_service_enabled q-trunk neutron-trunk; then - configure_trunk_extension - fi - if is_service_enabled q-qos neutron-qos; then - configure_qos - if is_service_enabled q-l3 neutron-l3; then - configure_l3_agent_extension_fip_qos - configure_l3_agent_extension_gateway_ip_qos - fi - fi - - iniset $NEUTRON_CONF DEFAULT api_workers "$API_WORKERS" - # devstack is not a tool for running uber scale OpenStack - # clouds, therefore running without a dedicated RPC worker - # for state reports is more than adequate. - iniset $NEUTRON_CONF DEFAULT rpc_state_report_workers 0 -} - -function create_nova_conf_neutron { - local conf=${1:-$NOVA_CONF} - iniset $conf neutron auth_type "password" - iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI" - iniset $conf neutron username "$Q_ADMIN_USERNAME" - iniset $conf neutron password "$SERVICE_PASSWORD" - iniset $conf neutron user_domain_name "$SERVICE_DOMAIN_NAME" - iniset $conf neutron project_name "$SERVICE_PROJECT_NAME" - iniset $conf neutron project_domain_name "$SERVICE_DOMAIN_NAME" - iniset $conf neutron auth_strategy "$Q_AUTH_STRATEGY" - iniset $conf neutron region_name "$REGION_NAME" - - # optionally set options in nova_conf - neutron_plugin_create_nova_conf $conf - - if is_service_enabled q-meta; then - iniset $conf neutron service_metadata_proxy "True" - fi - - iniset $conf DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL" - iniset $conf DEFAULT vif_plugging_timeout "$VIF_PLUGGING_TIMEOUT" -} - -# create_mutnauq_accounts() - Set up common required neutron accounts - -# Tenant User Roles -# ------------------------------------------------------------------ -# service neutron admin # if enabled - -# Migrated from keystone_data.sh -function create_mutnauq_accounts { - local neutron_url - if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - neutron_url=$Q_PROTOCOL://$SERVICE_HOST/ - else - neutron_url=$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/ - fi - if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then - neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME - fi - - if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then - - create_service_user "neutron" - - get_or_create_service "neutron" "network" "Neutron Service" - get_or_create_endpoint \ - "network" \ - "$REGION_NAME" "$neutron_url" - fi -} - -# init_mutnauq() - Initialize databases, etc. -function init_mutnauq { - recreate_database $Q_DB_NAME - time_start "dbsync" - # Run Neutron db migrations - $NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head - time_stop "dbsync" -} - -# install_mutnauq() - Collect source and prepare -function install_mutnauq { - # Install neutron-lib from git so we make sure we're testing - # the latest code. - if use_library_from_git "neutron-lib"; then - git_clone_by_name "neutron-lib" - setup_dev_lib "neutron-lib" - fi - - git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH - setup_develop $NEUTRON_DIR - - if [[ $Q_AGENT == "ovn" ]]; then - install_ovn - fi -} - -# install_neutron_agent_packages() - Collect source and prepare -function install_neutron_agent_packages_mutnauq { - # radvd doesn't come with the OS. Install it if the l3 service is enabled. - if is_service_enabled q-l3; then - install_package radvd - fi - # install packages that are specific to plugin agent(s) - if is_service_enabled q-agt q-dhcp q-l3; then - neutron_plugin_install_agent_packages - fi -} - -# Finish neutron configuration -function configure_neutron_after_post_config { - if [[ $Q_SERVICE_PLUGIN_CLASSES != '' ]]; then - iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES - fi - configure_rbac_policies -} - -# configure_rbac_policies() - Configure Neutron to enforce new RBAC -# policies and scopes if NEUTRON_ENFORCE_SCOPE == True -function configure_rbac_policies { - if [[ "$NEUTRON_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == True ]]; then - iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True - iniset $NEUTRON_CONF oslo_policy enforce_scope True - else - iniset $NEUTRON_CONF oslo_policy enforce_new_defaults False - iniset $NEUTRON_CONF oslo_policy enforce_scope False - fi -} - -# Start running OVN processes -function start_ovn_services { - if [[ $Q_AGENT == "ovn" ]]; then - init_ovn - start_ovn - if [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]]; then - if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" != "True" ]]; then - echo "OVN_L3_CREATE_PUBLIC_NETWORK=True is being ignored " - echo "because NEUTRON_CREATE_INITIAL_NETWORKS is set to False" - else - create_public_bridge - fi - fi - fi -} - -# Start running processes -function start_neutron_service_and_check { - local service_port=$Q_PORT - local service_protocol=$Q_PROTOCOL - local cfg_file_options - local neutron_url - - cfg_file_options="$(determine_config_files neutron-server)" - - if is_service_enabled tls-proxy; then - service_port=$Q_PORT_INT - service_protocol="http" - fi - # Start the Neutron service - if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - enable_service neutron-api - run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF" - neutron_url=$Q_PROTOCOL://$Q_HOST/ - enable_service neutron-rpc-server - run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options" - else - run_process q-svc "$NEUTRON_BIN_DIR/neutron-server $cfg_file_options" - neutron_url=$service_protocol://$Q_HOST:$service_port/ - # Start proxy if enabled - if is_service_enabled tls-proxy; then - start_tls_proxy neutron '*' $Q_PORT $Q_HOST $Q_PORT_INT - fi - fi - if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then - neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME - fi - echo "Waiting for Neutron to start..." - - local testcmd="wget ${ssl_ca} --no-proxy -q -O- $neutron_url" - test_with_retry "$testcmd" "Neutron did not start" $SERVICE_TIMEOUT -} - -# Control of the l2 agent is separated out to make it easier to test partial -# upgrades (everything upgraded except the L2 agent) -function start_mutnauq_l2_agent { - run_process q-agt "$AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" - - if is_provider_network && [[ $Q_AGENT == "openvswitch" ]]; then - sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE - sudo ip link set $OVS_PHYSICAL_BRIDGE up - sudo ip link set br-int up - sudo ip link set $PUBLIC_INTERFACE up - if is_ironic_hardware; then - for IP in $(ip addr show dev $PUBLIC_INTERFACE | grep ' inet ' | awk '{print $2}'); do - sudo ip addr del $IP dev $PUBLIC_INTERFACE - sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE - done - sudo ip route replace $FIXED_RANGE via $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE - fi - fi -} - -function start_mutnauq_other_agents { - run_process q-dhcp "$AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file $Q_DHCP_CONF_FILE" - - run_process q-l3 "$AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)" - - run_process q-meta "$AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file $Q_META_CONF_FILE" - run_process q-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME" -} - -# Start running processes, including screen -function start_neutron_agents { - # Start up the neutron agents if enabled - start_mutnauq_l2_agent - start_mutnauq_other_agents -} - -function stop_mutnauq_l2_agent { - stop_process q-agt -} - -# stop_mutnauq_other() - Stop running processes -function stop_mutnauq_other { - if is_service_enabled q-dhcp; then - stop_process q-dhcp - pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }') - [ ! -z "$pid" ] && sudo kill -9 $pid - fi - - if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - stop_process neutron-rpc-server - stop_process neutron-api - else - stop_process q-svc - fi - - if is_service_enabled q-l3; then - sudo pkill -f "radvd -C $DATA_DIR/neutron/ra" - stop_process q-l3 - fi - - if is_service_enabled q-meta; then - stop_process q-meta - fi - - if is_service_enabled q-metering; then - neutron_metering_stop - fi - - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - sudo pkill -9 -f $NEUTRON_ROOTWRAP-daemon || : - fi -} - -# stop_neutron() - Stop running processes (non-screen) -function stop_mutnauq { - stop_mutnauq_other - stop_mutnauq_l2_agent - - if [[ $Q_AGENT == "ovn" && $SKIP_STOP_OVN != "True" ]]; then - stop_ovn - fi -} - -# _move_neutron_addresses_route() - Move the primary IP to the OVS bridge -# on startup, or back to the public interface on cleanup. If no IP is -# configured on the interface, just add it as a port to the OVS bridge. -function _move_neutron_addresses_route { - local from_intf=$1 - local to_intf=$2 - local add_ovs_port=$3 - local del_ovs_port=$4 - local af=$5 - - if [[ -n "$from_intf" && -n "$to_intf" ]]; then - # Remove the primary IP address from $from_intf and add it to $to_intf, - # along with the default route, if it exists. Also, when called - # on configure we will also add $from_intf as a port on $to_intf, - # assuming it is an OVS bridge. - - local IP_REPLACE="" - local IP_DEL="" - local IP_UP="" - local DEFAULT_ROUTE_GW - DEFAULT_ROUTE_GW=$(ip -f $af r | awk "/default.+$from_intf\s/ { print \$3; exit }") - local ADD_OVS_PORT="" - local DEL_OVS_PORT="" - local ARP_CMD="" - - IP_BRD=$(ip -f $af a s dev $from_intf scope global primary | grep inet | awk '{ print $2, $3, $4; exit }') - - if [ "$DEFAULT_ROUTE_GW" != "" ]; then - ADD_DEFAULT_ROUTE="sudo ip -f $af r replace default via $DEFAULT_ROUTE_GW dev $to_intf" - fi - - if [[ "$add_ovs_port" == "True" ]]; then - ADD_OVS_PORT="sudo ovs-vsctl --may-exist add-port $to_intf $from_intf" - fi - - if [[ "$del_ovs_port" == "True" ]]; then - DEL_OVS_PORT="sudo ovs-vsctl --if-exists del-port $from_intf $to_intf" - fi - - if [[ "$IP_BRD" != "" ]]; then - IP_DEL="sudo ip addr del $IP_BRD dev $from_intf" - IP_REPLACE="sudo ip addr replace $IP_BRD dev $to_intf" - IP_UP="sudo ip link set $to_intf up" - if [[ "$af" == "inet" ]]; then - IP=$(echo $IP_BRD | awk '{ print $1; exit }' | grep -o -E '(.*)/' | cut -d "/" -f1) - ARP_CMD="sudo arping -A -c 3 -w 5 -I $to_intf $IP " - fi - fi - - # The add/del OVS port calls have to happen either before or - # after the address is moved in order to not leave it orphaned. - $DEL_OVS_PORT; $IP_DEL; $IP_REPLACE; $IP_UP; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE; $ARP_CMD - fi -} - -# _configure_public_network_connectivity() - Configures connectivity to the -# external network using $PUBLIC_INTERFACE or NAT on the single interface -# machines -function _configure_public_network_connectivity { - # If we've given a PUBLIC_INTERFACE to take over, then we assume - # that we can own the whole thing, and privot it into the OVS - # bridge. If we are not, we're probably on a single interface - # machine, and we just setup NAT so that fixed guests can get out. - if [[ -n "$PUBLIC_INTERFACE" ]]; then - _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True False "inet" - - if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then - _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6" - fi - else - for d in $default_v4_route_devs; do - sudo iptables -t nat -A POSTROUTING -o $d -s $FLOATING_RANGE -j MASQUERADE - done - fi -} - -# cleanup_mutnauq() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_mutnauq { - - if [[ -n "$OVS_PHYSICAL_BRIDGE" ]]; then - _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False True "inet" - - if [[ $(ip -f inet6 a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then - # ip(8) wants the prefix length when deleting - local v6_gateway - v6_gateway=$(ip -6 a s dev $OVS_PHYSICAL_BRIDGE | grep $IPV6_PUBLIC_NETWORK_GATEWAY | awk '{ print $2 }') - sudo ip -6 addr del $v6_gateway dev $OVS_PHYSICAL_BRIDGE - _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False False "inet6" - fi - - if is_provider_network && is_ironic_hardware; then - for IP in $(ip addr show dev $OVS_PHYSICAL_BRIDGE | grep ' inet ' | awk '{print $2}'); do - sudo ip addr del $IP dev $OVS_PHYSICAL_BRIDGE - sudo ip addr add $IP dev $PUBLIC_INTERFACE - done - sudo route del -net $FIXED_RANGE gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE - fi - fi - - if is_neutron_ovs_base_plugin; then - neutron_ovs_base_cleanup - fi - - if [[ $Q_AGENT == "linuxbridge" ]]; then - neutron_lb_cleanup - fi - - # delete all namespaces created by neutron - for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|fip|snat)-[0-9a-f-]*'); do - sudo ip netns delete ${ns} - done - - if [[ $Q_AGENT == "ovn" ]]; then - cleanup_ovn - fi -} - - -function _create_neutron_conf_dir { - # Put config files in ``NEUTRON_CONF_DIR`` for everyone to find - sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR -} - -# _configure_neutron_common() -# Set common config for all neutron server and agents. -# This MUST be called before other ``_configure_neutron_*`` functions. -function _configure_neutron_common { - _create_neutron_conf_dir - - # Uses oslo config generator to generate core sample configuration files - (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh) - - cp $NEUTRON_DIR/etc/neutron.conf.sample $NEUTRON_CONF - - Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json - - # allow neutron user to administer neutron to match neutron account - # NOTE(amotoki): This is required for nova works correctly with neutron. - if [ -f $NEUTRON_DIR/etc/policy.json ]; then - cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE - sed -i 's/"context_is_admin": "role:admin"/"context_is_admin": "role:admin or user_name:neutron"/g' $Q_POLICY_FILE - else - echo '{"context_is_admin": "role:admin or user_name:neutron"}' > $Q_POLICY_FILE - fi - - # Set plugin-specific variables ``Q_DB_NAME``, ``Q_PLUGIN_CLASS``. - # For main plugin config file, set ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``. - neutron_plugin_configure_common - - if [[ "$Q_PLUGIN_CONF_PATH" == '' || "$Q_PLUGIN_CONF_FILENAME" == '' || "$Q_PLUGIN_CLASS" == '' ]]; then - die $LINENO "Neutron plugin not set.. exiting" - fi - - # If needed, move config file from ``$NEUTRON_DIR/etc/neutron`` to ``NEUTRON_CONF_DIR`` - mkdir -p /$Q_PLUGIN_CONF_PATH - Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME - # NOTE(hichihara): Some neutron vendor plugins were already decomposed and - # there is no config file in Neutron tree. They should prepare the file in each plugin. - if [ -f "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" ]; then - cp "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" /$Q_PLUGIN_CONF_FILE - elif [ -f $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE ]; then - cp $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE - fi - - iniset $NEUTRON_CONF database connection `database_connection_url $Q_DB_NAME` - iniset $NEUTRON_CONF DEFAULT state_path $DATA_DIR/neutron - iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG - iniset $NEUTRON_CONF DEFAULT bind_host $Q_LISTEN_ADDRESS - iniset $NEUTRON_CONF oslo_concurrency lock_path $DATA_DIR/neutron/lock - - # NOTE(freerunner): Need to adjust Region Name for nova in multiregion installation - iniset $NEUTRON_CONF nova region_name $REGION_NAME - - if [ "$VIRT_DRIVER" = 'fake' ]; then - # Disable arbitrary limits - iniset $NEUTRON_CONF quotas quota_network -1 - iniset $NEUTRON_CONF quotas quota_subnet -1 - iniset $NEUTRON_CONF quotas quota_port -1 - iniset $NEUTRON_CONF quotas quota_security_group -1 - iniset $NEUTRON_CONF quotas quota_security_group_rule -1 - fi - - # Format logging - setup_logging $NEUTRON_CONF - - if is_service_enabled tls-proxy && [ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" ]; then - # Set the service port for a proxy to take the original - iniset $NEUTRON_CONF DEFAULT bind_port "$Q_PORT_INT" - iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True - fi - - _neutron_setup_rootwrap -} - -function _configure_neutron_dhcp_agent { - - cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $Q_DHCP_CONF_FILE - - iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - # make it so we have working DNS from guests - iniset $Q_DHCP_CONF_FILE DEFAULT dnsmasq_local_resolv True - iniset $Q_DHCP_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - iniset $Q_DHCP_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND" - fi - - if ! is_service_enabled q-l3; then - if [[ "$ENABLE_ISOLATED_METADATA" = "True" ]]; then - iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata $ENABLE_ISOLATED_METADATA - iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network $ENABLE_METADATA_NETWORK - else - if [[ "$ENABLE_METADATA_NETWORK" = "True" ]]; then - die "$LINENO" "Enable isolated metadata is a must for metadata network" - fi - fi - fi - - _neutron_setup_interface_driver $Q_DHCP_CONF_FILE - - neutron_plugin_configure_dhcp_agent $Q_DHCP_CONF_FILE -} - - -function _configure_neutron_metadata_agent { - cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $Q_META_CONF_FILE - - iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $Q_META_CONF_FILE DEFAULT nova_metadata_host $Q_META_DATA_IP - iniset $Q_META_CONF_FILE DEFAULT metadata_workers $API_WORKERS - iniset $Q_META_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - iniset $Q_META_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND" - fi -} - -function _configure_neutron_ceilometer_notifications { - iniset $NEUTRON_CONF oslo_messaging_notifications driver messagingv2 -} - -function _configure_neutron_metering { - neutron_agent_metering_configure_common - neutron_agent_metering_configure_agent -} - -function _configure_dvr { - iniset $NEUTRON_CONF DEFAULT router_distributed True - iniset $Q_L3_CONF_FILE DEFAULT agent_mode $Q_DVR_MODE -} - - -# _configure_neutron_plugin_agent() - Set config files for neutron plugin agent -# It is called when q-agt is enabled. -function _configure_neutron_plugin_agent { - # Specify the default root helper prior to agent configuration to - # ensure that an agent's configuration can override the default - iniset /$Q_PLUGIN_CONF_FILE agent root_helper "$Q_RR_COMMAND" - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" - fi - iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - - # Configure agent for plugin - neutron_plugin_configure_plugin_agent -} - -function _replace_api_paste_composite { - local sep - sep=$(echo -ne "\x01") - # Replace it - $sudo sed -i -e "s/\/\: neutronversions_composite/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/\: neutronversions_composite/" "$Q_API_PASTE_FILE" - $sudo sed -i -e "s/\/healthcheck\: healthcheck/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/healthcheck\: healthcheck/" "$Q_API_PASTE_FILE" - $sudo sed -i -e "s/\/v2.0\: neutronapi_v2_0/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/v2.0\: neutronapi_v2_0/" "$Q_API_PASTE_FILE" -} - -# _configure_neutron_service() - Set config files for neutron service -# It is called when q-svc is enabled. -function _configure_neutron_service { - Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini - cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE - - if [[ -n "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then - _replace_api_paste_composite - fi - - # Update either configuration file with plugin - iniset $NEUTRON_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS - - iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $NEUTRON_CONF oslo_policy policy_file $Q_POLICY_FILE - - iniset $NEUTRON_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY - configure_keystone_authtoken_middleware $NEUTRON_CONF $Q_ADMIN_USERNAME - - # Configuration for neutron notifications to nova. - iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES - iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES - - configure_keystone_authtoken_middleware $NEUTRON_CONF nova nova - - # Configuration for placement client - configure_keystone_authtoken_middleware $NEUTRON_CONF placement placement - - # Configure plugin - neutron_plugin_configure_service -} - -# Utility Functions -#------------------ - -# _neutron_service_plugin_class_add() - add service plugin class -function _neutron_service_plugin_class_add { - local service_plugin_class=$1 - if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then - Q_SERVICE_PLUGIN_CLASSES=$service_plugin_class - elif [[ ! ,${Q_SERVICE_PLUGIN_CLASSES}, =~ ,${service_plugin_class}, ]]; then - Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$service_plugin_class" - fi -} - -# _neutron_ml2_extension_driver_add_old() - add ML2 extension driver -function _neutron_ml2_extension_driver_add_old { - local extension=$1 - if [[ $Q_ML2_PLUGIN_EXT_DRIVERS == '' ]]; then - Q_ML2_PLUGIN_EXT_DRIVERS=$extension - elif [[ ! ,${Q_ML2_PLUGIN_EXT_DRIVERS}, =~ ,${extension}, ]]; then - Q_ML2_PLUGIN_EXT_DRIVERS="$Q_ML2_PLUGIN_EXT_DRIVERS,$extension" - fi -} - -# mutnauq_server_config_add() - add server config file -function mutnauq_server_config_add { - _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($1) -} - -# _neutron_deploy_rootwrap_filters() - deploy rootwrap filters to $Q_CONF_ROOTWRAP_D (owned by root). -function _neutron_deploy_rootwrap_filters { - if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then - return - fi - local srcdir=$1 - sudo install -d -o root -m 755 $Q_CONF_ROOTWRAP_D - sudo install -o root -m 644 $srcdir/etc/neutron/rootwrap.d/* $Q_CONF_ROOTWRAP_D/ -} - -# _neutron_setup_rootwrap() - configure Neutron's rootwrap -function _neutron_setup_rootwrap { - if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then - return - fi - # Wipe any existing ``rootwrap.d`` files first - Q_CONF_ROOTWRAP_D=$NEUTRON_CONF_DIR/rootwrap.d - if [[ -d $Q_CONF_ROOTWRAP_D ]]; then - sudo rm -rf $Q_CONF_ROOTWRAP_D - fi - - _neutron_deploy_rootwrap_filters $NEUTRON_DIR - - # Set up ``rootwrap.conf``, pointing to ``$NEUTRON_CONF_DIR/rootwrap.d`` - # location moved in newer versions, prefer new location - if test -r $NEUTRON_DIR/etc/neutron/rootwrap.conf; then - sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/neutron/rootwrap.conf $Q_RR_CONF_FILE - else - sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE - fi - sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE - sudo sed -e 's:^exec_dirs=\(.*\)$:exec_dirs=\1,/usr/local/bin:' -i $Q_RR_CONF_FILE - - # Specify ``rootwrap.conf`` as first parameter to neutron-rootwrap - ROOTWRAP_SUDOER_CMD="$NEUTRON_ROOTWRAP $Q_RR_CONF_FILE *" - ROOTWRAP_DAEMON_SUDOER_CMD="$NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE" - - # Set up the rootwrap sudoers for neutron - TEMPFILE=`mktemp` - echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE - echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_DAEMON_SUDOER_CMD" >>$TEMPFILE - chmod 0440 $TEMPFILE - sudo chown root:root $TEMPFILE - sudo mv $TEMPFILE /etc/sudoers.d/neutron-rootwrap - - # Update the root_helper - iniset $NEUTRON_CONF agent root_helper "$Q_RR_COMMAND" - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - iniset $NEUTRON_CONF agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" - fi -} - -function _neutron_setup_interface_driver { - - # ovs_use_veth needs to be set before the plugin configuration - # occurs to allow plugins to override the setting. - iniset $1 DEFAULT ovs_use_veth $Q_OVS_USE_VETH - - neutron_plugin_setup_interface_driver $1 -} -# Functions for Neutron Exercises -#-------------------------------- - -function delete_probe { - local from_net="$1" - net_id=`_get_net_id $from_net` - probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'` - neutron-debug --os-tenant-name admin --os-username admin probe-delete $probe_id -} - -function _get_net_id { - openstack --os-cloud devstack-admin --os-region-name="$REGION_NAME" --os-project-name admin --os-username admin --os-password $ADMIN_PASSWORD network list | grep $1 | awk '{print $2}' -} - -function _get_probe_cmd_prefix { - local from_net="$1" - net_id=`_get_net_id $from_net` - probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}' | head -n 1` - echo "$Q_RR_COMMAND ip netns exec qprobe-$probe_id" -} - -# ssh check -function _ssh_check_neutron { - local from_net=$1 - local key_file=$2 - local ip=$3 - local user=$4 - local timeout_sec=$5 - local probe_cmd = "" - probe_cmd=`_get_probe_cmd_prefix $from_net` - local testcmd="$probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success" - test_with_retry "$testcmd" "server $ip didn't become ssh-able" $timeout_sec -} - -function plugin_agent_add_l2_agent_extension { - local l2_agent_extension=$1 - if [[ -z "$L2_AGENT_EXTENSIONS" ]]; then - L2_AGENT_EXTENSIONS=$l2_agent_extension - elif [[ ! ,${L2_AGENT_EXTENSIONS}, =~ ,${l2_agent_extension}, ]]; then - L2_AGENT_EXTENSIONS+=",$l2_agent_extension" - fi -} - -# Restore xtrace -$_XTRACE_NEUTRON - -# Tell emacs to use shell-script-mode -## Local variables: -## mode: shell-script -## End: +source $TOP_DIR/lib/neutron diff --git a/lib/neutron_plugins/README.md b/lib/neutron_plugins/README.md index ed40886fda..728aaee85f 100644 --- a/lib/neutron_plugins/README.md +++ b/lib/neutron_plugins/README.md @@ -13,7 +13,7 @@ Plugin specific configuration variables should be in this file. functions --------- -``lib/neutron-legacy`` calls the following functions when the ``$Q_PLUGIN`` is enabled +``lib/neutron`` calls the following functions when the ``$Q_PLUGIN`` is enabled * ``neutron_plugin_create_nova_conf`` : optionally set options in nova_conf diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight index d3f5bd5752..84ca7ec42c 100644 --- a/lib/neutron_plugins/bigswitch_floodlight +++ b/lib/neutron_plugins/bigswitch_floodlight @@ -67,7 +67,7 @@ function has_neutron_plugin_security_group { } function neutron_plugin_check_adv_test_requirements { - is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 + is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0 } # Restore xtrace diff --git a/lib/neutron_plugins/brocade b/lib/neutron_plugins/brocade index 310b72e5ad..96400634af 100644 --- a/lib/neutron_plugins/brocade +++ b/lib/neutron_plugins/brocade @@ -72,7 +72,7 @@ function has_neutron_plugin_security_group { } function neutron_plugin_check_adv_test_requirements { - is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 + is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0 } # Restore xtrace diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent index bdeaf0f3c6..a392bd0baf 100644 --- a/lib/neutron_plugins/linuxbridge_agent +++ b/lib/neutron_plugins/linuxbridge_agent @@ -97,7 +97,7 @@ function neutron_plugin_setup_interface_driver { } function neutron_plugin_check_adv_test_requirements { - is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 + is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0 } # Restore xtrace diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index 46edacdc54..c2e78c65cc 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -67,7 +67,7 @@ function neutron_plugin_configure_common { Q_PLUGIN_CLASS="ml2" # The ML2 plugin delegates L3 routing/NAT functionality to # the L3 service plugin which must therefore be specified. - _neutron_service_plugin_class_add $ML2_L3_PLUGIN + neutron_service_plugin_class_add $ML2_L3_PLUGIN } function neutron_plugin_configure_service { diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent index 7fed8bf853..6e79984e9b 100644 --- a/lib/neutron_plugins/openvswitch_agent +++ b/lib/neutron_plugins/openvswitch_agent @@ -68,7 +68,7 @@ function neutron_plugin_setup_interface_driver { } function neutron_plugin_check_adv_test_requirements { - is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 + is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0 } # Restore xtrace diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index e64224cbaa..dc8129553c 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -348,7 +348,7 @@ function compile_ovn { # OVN service sanity check function ovn_sanity_check { - if is_service_enabled q-agt neutron-agt; then + if is_service_enabled q-agt neutron-agent; then die $LINENO "The q-agt/neutron-agt service must be disabled with OVN." elif is_service_enabled q-l3 neutron-l3; then die $LINENO "The q-l3/neutron-l3 service must be disabled with OVN." @@ -461,7 +461,7 @@ function filter_network_api_extensions { function configure_ovn_plugin { echo "Configuring Neutron for OVN" - if is_service_enabled q-svc ; then + if is_service_enabled q-svc neutron-api; then filter_network_api_extensions populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_geneve max_header_size=$OVN_GENEVE_OVERHEAD populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_connection="$OVN_NB_REMOTE" @@ -485,7 +485,7 @@ function configure_ovn_plugin { inicomment /$Q_PLUGIN_CONF_FILE network_log local_output_log_base="$Q_LOG_DRIVER_LOG_BASE" fi - if is_service_enabled q-ovn-metadata-agent; then + if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True else populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=False @@ -506,7 +506,7 @@ function configure_ovn_plugin { fi if is_service_enabled n-api-meta ; then - if is_service_enabled q-ovn-metadata-agent ; then + if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then iniset $NOVA_CONF neutron service_metadata_proxy True fi fi @@ -539,7 +539,7 @@ function configure_ovn { fi # Metadata - if is_service_enabled q-ovn-metadata-agent && is_service_enabled ovn-controller; then + if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent && is_service_enabled ovn-controller; then sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR mkdir -p $NEUTRON_DIR/etc/neutron/plugins/ml2 @@ -551,7 +551,7 @@ function configure_ovn { iniset $OVN_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $OVN_META_CONF DEFAULT nova_metadata_host $OVN_META_DATA_HOST iniset $OVN_META_CONF DEFAULT metadata_workers $API_WORKERS - iniset $OVN_META_CONF DEFAULT state_path $NEUTRON_STATE_PATH + iniset $OVN_META_CONF DEFAULT state_path $DATA_DIR/neutron iniset $OVN_META_CONF ovs ovsdb_connection tcp:$OVSDB_SERVER_LOCAL_HOST:6640 iniset $OVN_META_CONF ovn ovn_sb_connection $OVN_SB_REMOTE if is_service_enabled tls-proxy; then @@ -680,7 +680,7 @@ function _start_ovn_services { if is_service_enabled ovs-vtep ; then _start_process "devstack@ovs-vtep.service" fi - if is_service_enabled q-ovn-metadata-agent; then + if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent ; then _start_process "devstack@q-ovn-metadata-agent.service" fi } @@ -743,7 +743,7 @@ function start_ovn { fi fi - if is_service_enabled q-ovn-metadata-agent; then + if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then run_process q-ovn-metadata-agent "$NEUTRON_OVN_BIN_DIR/$NEUTRON_OVN_METADATA_BINARY --config-file $OVN_META_CONF" # Format logging setup_logging $OVN_META_CONF @@ -767,7 +767,7 @@ function _stop_process { } function stop_ovn { - if is_service_enabled q-ovn-metadata-agent; then + if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then sudo pkill -9 -f haproxy || : _stop_process "devstack@q-ovn-metadata-agent.service" fi diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 3dffc33d37..2bf884a8c4 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -323,7 +323,7 @@ function _neutron_configure_router_v4 { openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID # This logic is specific to using OVN or the l3-agent for layer 3 - if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-server) || is_service_enabled q-l3 neutron-l3; then + if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-api) || is_service_enabled q-l3 neutron-l3; then # Configure and enable public bridge local ext_gw_interface="none" if is_neutron_ovs_base_plugin; then @@ -372,7 +372,7 @@ function _neutron_configure_router_v6 { fi # This logic is specific to using OVN or the l3-agent for layer 3 - if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-server) || is_service_enabled q-l3 neutron-l3; then + if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-api) || is_service_enabled q-l3 neutron-l3; then # if the Linux host considers itself to be a router then it will # ignore all router advertisements # Ensure IPv6 RAs are accepted on interfaces with a default route. diff --git a/lib/neutron_plugins/services/metering b/lib/neutron_plugins/services/metering index 5b32468d21..757a562ee6 100644 --- a/lib/neutron_plugins/services/metering +++ b/lib/neutron_plugins/services/metering @@ -12,7 +12,7 @@ AGENT_METERING_BINARY="$NEUTRON_BIN_DIR/neutron-metering-agent" METERING_PLUGIN="neutron.services.metering.metering_plugin.MeteringPlugin" function neutron_agent_metering_configure_common { - _neutron_service_plugin_class_add $METERING_PLUGIN + neutron_service_plugin_class_add $METERING_PLUGIN } function neutron_agent_metering_configure_agent { diff --git a/lib/neutron_plugins/services/qos b/lib/neutron_plugins/services/qos index af9eb3d5b4..c11c315586 100644 --- a/lib/neutron_plugins/services/qos +++ b/lib/neutron_plugins/services/qos @@ -6,7 +6,7 @@ function configure_qos_service_plugin { function configure_qos_core_plugin { - configure_qos_$NEUTRON_CORE_PLUGIN + configure_qos_$Q_PLUGIN } diff --git a/lib/tempest b/lib/tempest index eaad6d255e..44a9b6f29f 100644 --- a/lib/tempest +++ b/lib/tempest @@ -737,12 +737,12 @@ function configure_tempest { # Neutron API Extensions # disable metering if we didn't enable the service - if ! is_service_enabled q-metering; then + if ! is_service_enabled q-metering neutron-metering; then DISABLE_NETWORK_API_EXTENSIONS+=", metering" fi # disable l3_agent_scheduler if we didn't enable L3 agent - if ! is_service_enabled q-l3; then + if ! is_service_enabled q-l3 neutron-l3; then DISABLE_NETWORK_API_EXTENSIONS+=", l3_agent_scheduler" fi From aa47cb34ae25b66e46a216e1c9b7b668615b520b Mon Sep 17 00:00:00 2001 From: Rajat Dhasmana Date: Tue, 27 Dec 2022 06:11:07 +0000 Subject: [PATCH 1652/1936] Add config options for cinder nfs backend Currently the cinder nfs backend leaves out few options in a multi backend deployment. It works in single nfs backend deployment as devstack-plugin-nfs correctly configures all options[1]. We can clearly see the difference between what devstack-plugin-nfs configures[1] and what devstack nfs configures[2]. Following options are missing which are added by this patch. * nas_host * nas_share_path * nas_secure_file_operations * nas_secure_file_permissions * nfs_snapshot_support [1] https://github.com/openstack/devstack-plugin-nfs/blob/dd12367f90fc86d42bfebe8a0ebb694dc0308810/devstack/plugin.sh#L60-L68 [2] https://github.com/openstack/devstack/blob/a52041cd3f067156e478e355f5712a60e12ce649/lib/cinder_backends/nfs#L32-L34 Change-Id: I03cad66abb3c6f2ae1d5cf943ac952a30961f783 --- lib/cinder_backends/nfs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/lib/cinder_backends/nfs b/lib/cinder_backends/nfs index 89a37a1f02..f3fcbeff19 100644 --- a/lib/cinder_backends/nfs +++ b/lib/cinder_backends/nfs @@ -32,6 +32,15 @@ function configure_cinder_backend_nfs { iniset $CINDER_CONF $be_name volume_backend_name $be_name iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.nfs.NfsDriver" iniset $CINDER_CONF $be_name nfs_shares_config "$CINDER_CONF_DIR/nfs-shares-$be_name.conf" + iniset $CINDER_CONF $be_name nas_host localhost + iniset $CINDER_CONF $be_name nas_share_path ${NFS_EXPORT_DIR} + iniset $CINDER_CONF $be_name nas_secure_file_operations \ + ${NFS_SECURE_FILE_OPERATIONS} + iniset $CINDER_CONF $be_name nas_secure_file_permissions \ + ${NFS_SECURE_FILE_PERMISSIONS} + + # NFS snapshot support is currently opt-in only. + iniset $CINDER_CONF $be_name nfs_snapshot_support True echo "$CINDER_NFS_SERVERPATH" | tee "$CINDER_CONF_DIR/nfs-shares-$be_name.conf" } From 69d71cfdf9c24d48fbea366714f4595cbd120723 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Tue, 10 Jan 2023 20:13:47 -0600 Subject: [PATCH 1653/1936] Option to disable the scope & new defaults enforcement In this release cycle, a few services are enabling the enforce scope and new defaults by default. Example Nova: - https://review.opendev.org/c/openstack/nova/+/866218) Until the new defaults enalbing by default is not released we should keep testing the old defaults in existing jobs and we can add new jobs testing new defautls. To do that we can provide the way in devstack to keep scope/new defaults disable by default which can be enabled by setting enforce_scope variable to true. Once any service release the new defaults enabled by default then we can switch the bhavior, enable the scope/new defaults by default and a single job can disbale them to keep testing the old defaults until service does not remove those. Change-Id: I5c2ec3e1667172a75e06458f16cf3d57947b2c53 --- lib/cinder | 3 +++ lib/glance | 4 ++++ lib/keystone | 7 ++++++- lib/nova | 3 +++ lib/placement | 3 +++ 5 files changed, 19 insertions(+), 1 deletion(-) diff --git a/lib/cinder b/lib/cinder index bf2fe50e08..2424f928d1 100644 --- a/lib/cinder +++ b/lib/cinder @@ -411,6 +411,9 @@ function configure_cinder { if [[ "$CINDER_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then iniset $CINDER_CONF oslo_policy enforce_scope true iniset $CINDER_CONF oslo_policy enforce_new_defaults true + else + iniset $CINDER_CONF oslo_policy enforce_scope false + iniset $CINDER_CONF oslo_policy enforce_new_defaults false fi } diff --git a/lib/glance b/lib/glance index ba98f4133e..041acafc92 100644 --- a/lib/glance +++ b/lib/glance @@ -436,6 +436,10 @@ function configure_glance { iniset $GLANCE_API_CONF oslo_policy enforce_scope true iniset $GLANCE_API_CONF oslo_policy enforce_new_defaults true iniset $GLANCE_API_CONF DEFAULT enforce_secure_rbac true + else + iniset $GLANCE_API_CONF oslo_policy enforce_scope false + iniset $GLANCE_API_CONF oslo_policy enforce_new_defaults false + iniset $GLANCE_API_CONF DEFAULT enforce_secure_rbac false fi } diff --git a/lib/keystone b/lib/keystone index 80a136f78d..6cb4aac46a 100644 --- a/lib/keystone +++ b/lib/keystone @@ -265,10 +265,15 @@ function configure_keystone { iniset $KEYSTONE_CONF security_compliance lockout_duration $KEYSTONE_LOCKOUT_DURATION iniset $KEYSTONE_CONF security_compliance unique_last_password_count $KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT fi + + iniset $KEYSTONE_CONF oslo_policy policy_file policy.yaml + if [[ "$KEYSTONE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then iniset $KEYSTONE_CONF oslo_policy enforce_scope true iniset $KEYSTONE_CONF oslo_policy enforce_new_defaults true - iniset $KEYSTONE_CONF oslo_policy policy_file policy.yaml + else + iniset $KEYSTONE_CONF oslo_policy enforce_scope false + iniset $KEYSTONE_CONF oslo_policy enforce_new_defaults false fi } diff --git a/lib/nova b/lib/nova index 14eb8fc3da..3aa6b9e3b3 100644 --- a/lib/nova +++ b/lib/nova @@ -490,6 +490,9 @@ function create_nova_conf { if [[ "$NOVA_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]]; then iniset $NOVA_CONF oslo_policy enforce_new_defaults True iniset $NOVA_CONF oslo_policy enforce_scope True + else + iniset $NOVA_CONF oslo_policy enforce_new_defaults False + iniset $NOVA_CONF oslo_policy enforce_scope False fi if is_service_enabled tls-proxy && [ "$NOVA_USE_MOD_WSGI" == "False" ]; then # Set the service port for a proxy to take the original diff --git a/lib/placement b/lib/placement index bc22c564f4..c6bf99f868 100644 --- a/lib/placement +++ b/lib/placement @@ -120,6 +120,9 @@ function configure_placement { if [[ "$PLACEMENT_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]]; then iniset $PLACEMENT_CONF oslo_policy enforce_new_defaults True iniset $PLACEMENT_CONF oslo_policy enforce_scope True + else + iniset $PLACEMENT_CONF oslo_policy enforce_new_defaults False + iniset $PLACEMENT_CONF oslo_policy enforce_scope False fi } From 7fecba2f135f16204050b627bb850a87aa597bad Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Thu, 12 Jan 2023 17:31:36 +0530 Subject: [PATCH 1654/1936] [OVN] Ensure socket files are absent in init_ovn Just like we remove db files let's also remove socket files when initializing ovn. Those will reappear once service fully restarts along with db files. Without it we see random issue as described in the below bug. Closes-Bug: #2002629 Change-Id: I726a9cac9c805d017273aa79e844724f0d00cdf0 --- lib/neutron_plugins/ovn_agent | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index dc8129553c..f27777867d 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -587,6 +587,7 @@ function init_ovn { rm -f $OVS_DATADIR/.*.db.~lock~ sudo rm -f $OVN_DATADIR/*.db sudo rm -f $OVN_DATADIR/.*.db.~lock~ + sudo rm -f $OVN_RUNDIR/*.sock } function _start_ovs { From 5a6f0bbd4c3c3006a50e9e70c81f31ea1fa409c6 Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Sat, 21 Jan 2023 20:21:26 +0100 Subject: [PATCH 1655/1936] Remove the neutron bash completion installation The python-neutronclient CLI code is going to be removed from this repository. Change-Id: I39b3a43a7742481ec6d9501d5459bf0837ba0122 Related-Bug: #2003861 --- lib/neutron | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index c8ee8c5e76..9fb09ab9bb 100644 --- a/lib/neutron +++ b/lib/neutron @@ -527,7 +527,6 @@ function install_neutronclient { if use_library_from_git "python-neutronclient"; then git_clone_by_name "python-neutronclient" setup_dev_lib "python-neutronclient" - sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-neutronclient"]}/tools/,/etc/bash_completion.d/}neutron.bash_completion fi } From 91efe177b170c3874989affc73842dc4ffbe062d Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Thu, 29 Sep 2022 08:38:24 +0200 Subject: [PATCH 1656/1936] Bump cirros version to 0.6.1 Cirros has made a fresh release, let us use it. Switch the download URLs to https and drop an old example that no longer is available. Depends-On: https://review.opendev.org/c/openstack/tempest/+/871271 Change-Id: I1d391b871fc9bfa825db30db9434922226b94d8a --- doc/source/guides/nova.rst | 2 +- stackrc | 15 +++++++-------- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/doc/source/guides/nova.rst b/doc/source/guides/nova.rst index 5b427972c4..d0fb274c13 100644 --- a/doc/source/guides/nova.rst +++ b/doc/source/guides/nova.rst @@ -122,7 +122,7 @@ when creating the server, for example: .. code-block:: shell $ openstack --os-compute-api-version 2.37 server create --flavor cirros256 \ - --image cirros-0.3.5-x86_64-disk --nic none --wait test-server + --image cirros-0.6.1-x86_64-disk --nic none --wait test-server .. note:: ``--os-compute-api-version`` greater than or equal to 2.37 is required to use ``--nic=none``. diff --git a/stackrc b/stackrc index b3130e5f7f..a71d843362 100644 --- a/stackrc +++ b/stackrc @@ -657,20 +657,19 @@ esac # If the file ends in .tar.gz, uncompress the tarball and and select the first # .img file inside it as the image. If present, use "*-vmlinuz*" as the kernel # and "*-initrd*" as the ramdisk -# example: http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-amd64.tar.gz +# example: https://cloud-images.ubuntu.com/releases/jammy/release/ubuntu-22.04-server-cloudimg-amd64.tar.gz # * disk image (*.img,*.img.gz) # if file ends in .img, then it will be uploaded and registered as a to # glance as a disk image. If it ends in .gz, it is uncompressed first. # example: -# http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-armel-disk1.img -# http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz +# https://cloud-images.ubuntu.com/releases/jammy/release/ubuntu-22.04-server-cloudimg-amd64.img +# https://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz # * OpenVZ image: # OpenVZ uses its own format of image, and does not support UEC style images -#IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image -#IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image +#IMAGE_URLS="https://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image -CIRROS_VERSION=${CIRROS_VERSION:-"0.5.2"} +CIRROS_VERSION=${CIRROS_VERSION:-"0.6.1"} CIRROS_ARCH=${CIRROS_ARCH:-$(uname -m)} # Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of @@ -687,11 +686,11 @@ if [[ "$DOWNLOAD_DEFAULT_IMAGES" == "True" ]]; then lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs} DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz} - IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; + IMAGE_URLS+="https://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; *) # otherwise, use the qcow image DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk} DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img} - IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; + IMAGE_URLS+="https://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; esac ;; vsphere) From 71c3c40c269a50303247855319d1d3a5d30f6773 Mon Sep 17 00:00:00 2001 From: Bence Romsics Date: Wed, 21 Dec 2022 13:50:54 +0100 Subject: [PATCH 1657/1936] 'sudo pkill -f' should not match the sudo process pkill already takes care that it does not kill itself, however the same problem may happen with 'sudo pkill -f' killing sudo. Use one of the usual regex tricks to avoid that. Change-Id: Ic6a94f516cbc509a2d77699494aa7bcaecf96ebc Closes-Bug: #1999395 --- lib/neutron | 4 +++- lib/neutron_plugins/ovn_agent | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/neutron b/lib/neutron index c8ee8c5e76..0d6a148c8e 100644 --- a/lib/neutron +++ b/lib/neutron @@ -689,7 +689,9 @@ function stop_other { fi if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - sudo pkill -9 -f $NEUTRON_ROOTWRAP-daemon || : + # pkill takes care not to kill itself, but it may kill its parent + # sudo unless we use the "ps | grep [f]oo" trick + sudo pkill -9 -f "$NEUTRON_ROOTWRAP-[d]aemon" || : fi } diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index f27777867d..34903924b3 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -769,7 +769,9 @@ function _stop_process { function stop_ovn { if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then - sudo pkill -9 -f haproxy || : + # pkill takes care not to kill itself, but it may kill its parent + # sudo unless we use the "ps | grep [f]oo" trick + sudo pkill -9 -f "[h]aproxy" || : _stop_process "devstack@q-ovn-metadata-agent.service" fi if is_service_enabled ovn-controller-vtep ; then From 7fe998109bda8cdd5cb5ba4a0e02c6c83cb0566d Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Thu, 26 Jan 2023 22:28:07 -0600 Subject: [PATCH 1658/1936] Fix setting the tempest virtual env constraints env var Devstack set the env var TOX_CONSTRAINTS_FILE/UPPER_CONSTRAINTS_FILE which are used to use the constraints during Tempest virtual env installation. Those env var are set to non-master constraint when we need to use non-master constraints but when we need to use the master constraints we do not set/reset them point to master constraints. This create the issue when running the grenade job where we run Tempest on the old devstack as well as in the new devstack. When tempest is installed on old devstack then old tempest is used and it sets these env var to stable/ constraints (this is the case when old devstack (the stable branch is in EM phase) uses the old tempest not the master tempest), all good till now. But the problem comes when in the same grenade script run upgrade-tempest install the master tempest (when new devstack branches are in the 'supported' phase and use the master tempest means) and are supposed to use the master constraints. But the TOX_CONSTRAINTS_FILE/UPPER_CONSTRAINTS_FILE env var set by old tempest is used by the tempest and due to a mismatch in constraints it fails. This happened when we tried to pin the stable/wallaby with Tempest 29.0.0 - https://review.opendev.org/c/openstack/devstack/+/871782 and table/xena grenade job failed (stable/xena use master tempest and supposed to use master constraints) - https://zuul.opendev.org/t/openstack/build/fb7b2a8b562c42bab4c741819f5e9732/log/controller/logs/grenade.sh_log.txt#16641 We should set/reset those constraint env var to master constraints if configuration tell devstack to use the master constraints. [1] https://github.com/openstack/devstack/blob/71c3c40c269a50303247855319d1d3a5d30f6773/lib/tempest#L124 Closes-Bug: #2003993 Change-Id: I5e938139b47f443a4c358415d0d4dcf6549cd085 --- lib/tempest | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/tempest b/lib/tempest index 44a9b6f29f..c3d3e9ac30 100644 --- a/lib/tempest +++ b/lib/tempest @@ -128,6 +128,13 @@ function set_tempest_venv_constraints { (cd $REQUIREMENTS_DIR && git show master:upper-constraints.txt 2>/dev/null || git show origin/master:upper-constraints.txt) > $tmp_c + # NOTE(gmann): we need to set the below env var pointing to master + # constraints even that is what default in tox.ini. Otherwise it can + # create the issue for grenade run where old and new devstack can have + # different tempest (old and master) to install. For detail problem, + # refer to the https://bugs.launchpad.net/devstack/+bug/2003993 + export UPPER_CONSTRAINTS_FILE=https://releases.openstack.org/constraints/upper/master + export TOX_CONSTRAINTS_FILE=https://releases.openstack.org/constraints/upper/master else echo "Using $TEMPEST_VENV_UPPER_CONSTRAINTS constraints in Tempest virtual env." cat $TEMPEST_VENV_UPPER_CONSTRAINTS > $tmp_c From a84b2091cf2c84eb4b81e542233bf446440e02b1 Mon Sep 17 00:00:00 2001 From: elajkat Date: Wed, 17 Nov 2021 11:52:56 +0100 Subject: [PATCH 1659/1936] Rehome functions to enable Neutron's segments integration Those functions were part of the neutron devstack plugin but we discussed it during last PTG [1] and decided to move to the Devstack repo as plugins which are used by e.g. CI jobs which are defined outside of the neutron repository. Placement integration is used e.g. in the tempest-slow job which is defined in tempest and used by many different OpenStack projects. [1] https://etherpad.opendev.org/p/neutron-yoga-ptg#L142 Change-Id: I2c26063896ab2679cffd01227a40a3283caa3b17 --- lib/neutron | 5 +++++ lib/neutron_plugins/services/segments | 10 ++++++++++ 2 files changed, 15 insertions(+) create mode 100644 lib/neutron_plugins/services/segments diff --git a/lib/neutron b/lib/neutron index 8708bf43ca..368a1b9c55 100644 --- a/lib/neutron +++ b/lib/neutron @@ -294,6 +294,7 @@ source $TOP_DIR/lib/neutron_plugins/services/l3 source $TOP_DIR/lib/neutron_plugins/services/placement source $TOP_DIR/lib/neutron_plugins/services/trunk source $TOP_DIR/lib/neutron_plugins/services/qos +source $TOP_DIR/lib/neutron_plugins/services/segments # Use security group or not if has_neutron_plugin_security_group; then @@ -416,6 +417,10 @@ function configure_neutron { configure_l3_agent_extension_gateway_ip_qos fi fi + if is_service_enabled neutron-segments; then + configure_placement_neutron + configure_segments_extension + fi # Finally configure Neutron server and core plugin if is_service_enabled q-agt neutron-agent q-svc neutron-api; then diff --git a/lib/neutron_plugins/services/segments b/lib/neutron_plugins/services/segments new file mode 100644 index 0000000000..08936bae49 --- /dev/null +++ b/lib/neutron_plugins/services/segments @@ -0,0 +1,10 @@ +#!/bin/bash + +function configure_segments_service_plugin { + neutron_service_plugin_class_add segments +} + +function configure_segments_extension { + configure_segments_service_plugin +} + From 48af5d4b1bf5332c879ee52fb4686874b212697f Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Tue, 14 Feb 2023 17:11:24 +0100 Subject: [PATCH 1660/1936] Make rockylinux job non-voting It is currently failing, let's unblock the CI until we have a fix. Change-Id: I7f072ceef57c302eb6ce20e108043d2390e9f481 --- .zuul.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.zuul.yaml b/.zuul.yaml index 8e20f6ed34..30e53976a5 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -726,6 +726,7 @@ description: Rocky Linux 9 Blue Onyx platform test nodeset: devstack-single-node-rockylinux-9 timeout: 9000 + voting: false vars: configure_swap_size: 4096 From fcc525f4fc5022a4b1f4e3d961e1b27cfbfa9d71 Mon Sep 17 00:00:00 2001 From: Gregory Thiemonge Date: Thu, 16 Feb 2023 10:26:32 +0100 Subject: [PATCH 1661/1936] Fix rockylinux and make it voting Some rockylinux deployments have the curl-minimal package installed by default (the latest GenericCloud image still has the curl package), it triggers an error when devstack wants to install the curl package. Fix this issue by swaping curl-minimal with curl before installing base packages. Change-Id: I969e8dc22e7d11c9917a843d9245f33a04fe197d --- .zuul.yaml | 1 - stack.sh | 7 +++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 30e53976a5..8e20f6ed34 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -726,7 +726,6 @@ description: Rocky Linux 9 Blue Onyx platform test nodeset: devstack-single-node-rockylinux-9 timeout: 9000 - voting: false vars: configure_swap_size: 4096 diff --git a/stack.sh b/stack.sh index 28576d1e14..ccd2d16baa 100755 --- a/stack.sh +++ b/stack.sh @@ -394,6 +394,13 @@ elif [[ $DISTRO == "rhel9" ]]; then sudo dnf config-manager --set-enabled crb # rabbitmq and other packages are provided by RDO repositories. _install_rdo + + # Some distributions (Rocky Linux 9) provide curl-minimal instead of curl, + # it triggers a conflict when devstack wants to install "curl". + # Swap curl-minimal with curl. + if is_package_installed curl-minimal; then + sudo dnf swap -y curl-minimal curl + fi elif [[ $DISTRO == "openEuler-22.03" ]]; then # There are some problem in openEuler. We should fix it first. Some required # package/action runs before fixup script. So we can't fix there. From ec07b343d25e9964db57ef9c3e2a89deeb5ac56e Mon Sep 17 00:00:00 2001 From: Martin Kopec Date: Tue, 24 Jan 2023 17:38:45 +0100 Subject: [PATCH 1662/1936] Remove support for opensuse We haven't been testing the distro for a while in CI, e.g. in Tempest, the jobs on opensuse15 haven't been executed for a year now. Therefore the patch removes opensuse support from devstack. Closes-Bug: #2002900 Change-Id: I0f5e4c644e2d14d1b8bb5bc0096d1469febe5fcc --- doc/source/index.rst | 3 +- doc/source/plugins.rst | 3 -- files/rpms-suse/baremetal | 1 - files/rpms-suse/ceph | 3 -- files/rpms-suse/cinder | 3 -- files/rpms-suse/dstat | 1 - files/rpms-suse/general | 34 --------------------- files/rpms-suse/horizon | 2 -- files/rpms-suse/keystone | 4 --- files/rpms-suse/ldap | 3 -- files/rpms-suse/n-api | 1 - files/rpms-suse/n-cpu | 10 ------ files/rpms-suse/neutron-agent | 1 - files/rpms-suse/neutron-common | 12 -------- files/rpms-suse/neutron-l3 | 2 -- files/rpms-suse/nova | 21 ------------- files/rpms-suse/openvswitch | 3 -- files/rpms-suse/os-brick | 2 -- files/rpms-suse/q-agt | 1 - files/rpms-suse/q-l3 | 1 - files/rpms-suse/swift | 6 ---- functions-common | 47 ----------------------------- inc/python | 4 --- lib/apache | 25 +++------------ lib/cinder | 9 ++---- lib/databases/mysql | 15 ++------- lib/databases/postgresql | 9 ++---- lib/glance | 4 --- lib/horizon | 2 +- lib/ldap | 14 --------- lib/lvm | 4 +-- lib/neutron_plugins/ovs_base | 13 -------- lib/neutron_plugins/ovs_source | 6 ---- lib/nova | 6 ++-- lib/nova_plugins/functions-libvirt | 6 ++-- lib/nova_plugins/hypervisor-libvirt | 3 -- lib/rpc_backend | 15 +-------- lib/swift | 3 -- lib/tls | 12 -------- stack.sh | 2 +- tests/test_package_ordering.sh | 2 +- tools/fixup_stuff.sh | 40 ------------------------ tools/install_prereqs.sh | 2 -- 43 files changed, 25 insertions(+), 335 deletions(-) delete mode 100644 files/rpms-suse/baremetal delete mode 100644 files/rpms-suse/ceph delete mode 100644 files/rpms-suse/cinder delete mode 100644 files/rpms-suse/dstat delete mode 100644 files/rpms-suse/general delete mode 100644 files/rpms-suse/horizon delete mode 100644 files/rpms-suse/keystone delete mode 100644 files/rpms-suse/ldap delete mode 100644 files/rpms-suse/n-api delete mode 100644 files/rpms-suse/n-cpu delete mode 100644 files/rpms-suse/neutron-agent delete mode 100644 files/rpms-suse/neutron-common delete mode 100644 files/rpms-suse/neutron-l3 delete mode 100644 files/rpms-suse/nova delete mode 100644 files/rpms-suse/openvswitch delete mode 100644 files/rpms-suse/os-brick delete mode 120000 files/rpms-suse/q-agt delete mode 120000 files/rpms-suse/q-l3 delete mode 100644 files/rpms-suse/swift diff --git a/doc/source/index.rst b/doc/source/index.rst index 1e932f88a5..ccd0fef330 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -38,8 +38,7 @@ Install Linux Start with a clean and minimal install of a Linux system. DevStack attempts to support the two latest LTS releases of Ubuntu, the -latest/current Fedora version, CentOS/RHEL/Rocky Linux 9, OpenSUSE and -openEuler. +latest/current Fedora version, CentOS/RHEL/Rocky Linux 9 and openEuler. If you do not have a preference, Ubuntu 22.04 (Jammy) is the most tested, and will probably go the smoothest. diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst index 62dd15bfb1..dd75b5a22d 100644 --- a/doc/source/plugins.rst +++ b/doc/source/plugins.rst @@ -243,9 +243,6 @@ locations in the top-level of the plugin repository: - ``./devstack/files/rpms/$plugin_name`` - Packages to install when running on Red Hat, Fedora, or CentOS. -- ``./devstack/files/rpms-suse/$plugin_name`` - Packages to install when - running on SUSE Linux or openSUSE. - Although there a no plans to remove this method of installing packages, plugins should consider it deprecated for ``bindep`` support described below. diff --git a/files/rpms-suse/baremetal b/files/rpms-suse/baremetal deleted file mode 100644 index 61f73eeae3..0000000000 --- a/files/rpms-suse/baremetal +++ /dev/null @@ -1 +0,0 @@ -dnsmasq diff --git a/files/rpms-suse/ceph b/files/rpms-suse/ceph deleted file mode 100644 index 8c4955df90..0000000000 --- a/files/rpms-suse/ceph +++ /dev/null @@ -1,3 +0,0 @@ -ceph # NOPRIME -lsb -xfsprogs diff --git a/files/rpms-suse/cinder b/files/rpms-suse/cinder deleted file mode 100644 index b39cc79a27..0000000000 --- a/files/rpms-suse/cinder +++ /dev/null @@ -1,3 +0,0 @@ -lvm2 -qemu-tools -tgt # NOPRIME diff --git a/files/rpms-suse/dstat b/files/rpms-suse/dstat deleted file mode 100644 index 2b643b8b1b..0000000000 --- a/files/rpms-suse/dstat +++ /dev/null @@ -1 +0,0 @@ -dstat diff --git a/files/rpms-suse/general b/files/rpms-suse/general deleted file mode 100644 index f63611025c..0000000000 --- a/files/rpms-suse/general +++ /dev/null @@ -1,34 +0,0 @@ -apache2 -apache2-devel -bc -ca-certificates-mozilla -curl -gawk -gcc -gcc-c++ -git-core -graphviz # docs -iputils -libffi-devel # pyOpenSSL -libjpeg8-devel # Pillow 3.0.0 -libopenssl-devel # to rebuild pyOpenSSL if needed -libxslt-devel # lxml -lsof # useful when debugging -make -net-tools -openssh -openssl -pcre-devel # python-pcre -postgresql-devel # psycopg2 -psmisc -python3-systemd -python-cmd2 # dist:opensuse-12.3 -python-devel # pyOpenSSL -python-xml -tar -tcpdump -unzip -util-linux -wget -which -zlib-devel diff --git a/files/rpms-suse/horizon b/files/rpms-suse/horizon deleted file mode 100644 index 753ea76e04..0000000000 --- a/files/rpms-suse/horizon +++ /dev/null @@ -1,2 +0,0 @@ -apache2-mod_wsgi # NOPRIME -apache2 # NOPRIME diff --git a/files/rpms-suse/keystone b/files/rpms-suse/keystone deleted file mode 100644 index 66cfc23423..0000000000 --- a/files/rpms-suse/keystone +++ /dev/null @@ -1,4 +0,0 @@ -cyrus-sasl-devel -memcached -openldap2-devel -sqlite3 diff --git a/files/rpms-suse/ldap b/files/rpms-suse/ldap deleted file mode 100644 index 46d26f0796..0000000000 --- a/files/rpms-suse/ldap +++ /dev/null @@ -1,3 +0,0 @@ -openldap2 -openldap2-client -python-ldap diff --git a/files/rpms-suse/n-api b/files/rpms-suse/n-api deleted file mode 100644 index 0f08daace3..0000000000 --- a/files/rpms-suse/n-api +++ /dev/null @@ -1 +0,0 @@ -python-dateutil diff --git a/files/rpms-suse/n-cpu b/files/rpms-suse/n-cpu deleted file mode 100644 index 9c724cb9d8..0000000000 --- a/files/rpms-suse/n-cpu +++ /dev/null @@ -1,10 +0,0 @@ -cdrkit-cdrtools-compat # dist:sle12 -cryptsetup -dosfstools -libosinfo -lvm2 -mkisofs # not:sle12 -open-iscsi -sg3_utils -# Stuff for diablo volumes -sysfsutils diff --git a/files/rpms-suse/neutron-agent b/files/rpms-suse/neutron-agent deleted file mode 100644 index ea8819e884..0000000000 --- a/files/rpms-suse/neutron-agent +++ /dev/null @@ -1 +0,0 @@ -ipset diff --git a/files/rpms-suse/neutron-common b/files/rpms-suse/neutron-common deleted file mode 100644 index e3799a9353..0000000000 --- a/files/rpms-suse/neutron-common +++ /dev/null @@ -1,12 +0,0 @@ -acl -dnsmasq -dnsmasq-utils # dist:opensuse-12.3,opensuse-13.1 -ebtables -haproxy # to serve as metadata proxy inside router/dhcp namespaces -iptables -iputils -rabbitmq-server # NOPRIME -radvd # NOPRIME -sqlite3 -sudo -vlan diff --git a/files/rpms-suse/neutron-l3 b/files/rpms-suse/neutron-l3 deleted file mode 100644 index a7a190c063..0000000000 --- a/files/rpms-suse/neutron-l3 +++ /dev/null @@ -1,2 +0,0 @@ -conntrack-tools -keepalived diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova deleted file mode 100644 index 082b9aca22..0000000000 --- a/files/rpms-suse/nova +++ /dev/null @@ -1,21 +0,0 @@ -cdrkit-cdrtools-compat # dist:sle12 -conntrack-tools -curl -ebtables -iptables -iputils -kpartx -kvm # NOPRIME -libvirt # NOPRIME -libvirt-python # NOPRIME -# mkisofs is required for config_drive -mkisofs # not:sle12 -parted -polkit -# qemu as fallback if kvm cannot be used -qemu # NOPRIME -rabbitmq-server # NOPRIME -socat -sqlite3 -sudo -vlan diff --git a/files/rpms-suse/openvswitch b/files/rpms-suse/openvswitch deleted file mode 100644 index 53f8bb22cf..0000000000 --- a/files/rpms-suse/openvswitch +++ /dev/null @@ -1,3 +0,0 @@ - -openvswitch -openvswitch-switch diff --git a/files/rpms-suse/os-brick b/files/rpms-suse/os-brick deleted file mode 100644 index 67b33a9861..0000000000 --- a/files/rpms-suse/os-brick +++ /dev/null @@ -1,2 +0,0 @@ -lsscsi -open-iscsi diff --git a/files/rpms-suse/q-agt b/files/rpms-suse/q-agt deleted file mode 120000 index 99fe353094..0000000000 --- a/files/rpms-suse/q-agt +++ /dev/null @@ -1 +0,0 @@ -neutron-agent \ No newline at end of file diff --git a/files/rpms-suse/q-l3 b/files/rpms-suse/q-l3 deleted file mode 120000 index 0a5ca2a45f..0000000000 --- a/files/rpms-suse/q-l3 +++ /dev/null @@ -1 +0,0 @@ -neutron-l3 \ No newline at end of file diff --git a/files/rpms-suse/swift b/files/rpms-suse/swift deleted file mode 100644 index 3663b98545..0000000000 --- a/files/rpms-suse/swift +++ /dev/null @@ -1,6 +0,0 @@ -curl -liberasurecode-devel -memcached -sqlite3 -xfsprogs -xinetd diff --git a/functions-common b/functions-common index 4eed5d8407..c7a1c6e0bf 100644 --- a/functions-common +++ b/functions-common @@ -454,16 +454,6 @@ function GetDistro { elif [[ "$os_VENDOR" =~ (Fedora) ]]; then # For Fedora, just use 'f' and the release DISTRO="f$os_RELEASE" - elif is_opensuse; then - DISTRO="opensuse-$os_RELEASE" - # Tumbleweed uses "n/a" as a codename, and the release is a datestring - # like 20180218, so not very useful. Leap however uses a release - # with a "dot", so for example 15.0 - [ "$os_CODENAME" = "n/a" -a "$os_RELEASE" = "${os_RELEASE/\./}" ] && \ - DISTRO="opensuse-tumbleweed" - elif is_suse_linux_enterprise; then - # just use major release - DISTRO="sle${os_RELEASE%.*}" elif [[ "$os_VENDOR" =~ (Red.*Hat) || \ "$os_VENDOR" =~ (CentOS) || \ "$os_VENDOR" =~ (AlmaLinux) || \ @@ -537,37 +527,6 @@ function is_fedora { } -# Determine if current distribution is a SUSE-based distribution -# (openSUSE, SLE). -# is_suse -function is_suse { - is_opensuse || is_suse_linux_enterprise -} - - -# Determine if current distribution is an openSUSE distribution -# is_opensuse -function is_opensuse { - if [[ -z "$os_VENDOR" ]]; then - GetOSVersion - fi - - [[ "$os_VENDOR" =~ (openSUSE) ]] -} - - -# Determine if current distribution is a SUSE Linux Enterprise (SLE) -# distribution -# is_suse_linux_enterprise -function is_suse_linux_enterprise { - if [[ -z "$os_VENDOR" ]]; then - GetOSVersion - fi - - [[ "$os_VENDOR" =~ (^SUSE) ]] -} - - # Determine if current distribution is an Ubuntu-based distribution # It will also detect non-Ubuntu but Debian-based distros # is_ubuntu @@ -1168,8 +1127,6 @@ function _get_package_dir { pkg_dir=$base_dir/debs elif is_fedora; then pkg_dir=$base_dir/rpms - elif is_suse; then - pkg_dir=$base_dir/rpms-suse else exit_distro_not_supported "list of packages" fi @@ -1444,8 +1401,6 @@ function real_install_package { apt_get install "$@" elif is_fedora; then yum_install "$@" - elif is_suse; then - zypper_install "$@" else exit_distro_not_supported "installing packages" fi @@ -1487,8 +1442,6 @@ function uninstall_package { apt_get purge "$@" elif is_fedora; then sudo dnf remove -y "$@" ||: - elif is_suse; then - sudo zypper remove -y "$@" ||: else exit_distro_not_supported "uninstalling packages" fi diff --git a/inc/python b/inc/python index 3eb3efe80e..a24f4e910a 100644 --- a/inc/python +++ b/inc/python @@ -7,7 +7,6 @@ # External functions used: # - GetOSVersion # - is_fedora -# - is_suse # - safe_chown # Save trace setting @@ -62,7 +61,6 @@ function get_python_exec_prefix { $xtrace local PYTHON_PATH=/usr/local/bin - is_suse && PYTHON_PATH=/usr/bin echo $PYTHON_PATH } @@ -462,8 +460,6 @@ function install_python { function install_python3 { if is_ubuntu; then apt_get install python${PYTHON3_VERSION} python${PYTHON3_VERSION}-dev - elif is_suse; then - install_package python3-devel python3-dbm elif is_fedora; then if [ "$os_VENDOR" = "Fedora" ]; then install_package python${PYTHON3_VERSION//.} diff --git a/lib/apache b/lib/apache index dd8c9a0f06..4d68b49767 100644 --- a/lib/apache +++ b/lib/apache @@ -44,10 +44,6 @@ elif is_fedora; then APACHE_NAME=httpd APACHE_CONF_DIR=${APACHE_CONF_DIR:-/etc/$APACHE_NAME/conf.d} APACHE_SETTINGS_DIR=${APACHE_SETTINGS_DIR:-/etc/$APACHE_NAME/conf.d} -elif is_suse; then - APACHE_NAME=apache2 - APACHE_CONF_DIR=${APACHE_CONF_DIR:-/etc/$APACHE_NAME/vhosts.d} - APACHE_SETTINGS_DIR=${APACHE_SETTINGS_DIR:-/etc/$APACHE_NAME/conf.d} fi APACHE_LOG_DIR="/var/log/${APACHE_NAME}" @@ -65,11 +61,6 @@ function enable_apache_mod { sudo a2enmod $mod restart_apache_server fi - elif is_suse; then - if ! a2enmod -q $mod ; then - sudo a2enmod $mod - restart_apache_server - fi elif is_fedora; then # pass true @@ -104,10 +95,6 @@ function install_apache_uwsgi { # Thus there is nothing else to do after this install install_package uwsgi \ uwsgi-plugin-python3 - elif [[ $os_VENDOR =~ openSUSE ]]; then - install_package uwsgi \ - uwsgi-python3 \ - apache2-mod_uwsgi else # Compile uwsgi from source. local dir @@ -125,7 +112,7 @@ function install_apache_uwsgi { sudo rm -rf $dir fi - if is_ubuntu || is_suse ; then + if is_ubuntu; then # we've got to enable proxy and proxy_uwsgi for this to work sudo a2enmod proxy sudo a2enmod proxy_uwsgi @@ -155,8 +142,6 @@ function install_apache_wsgi { sudo sed -i '/mod_mpm_prefork.so/s/^/#/g' /etc/httpd/conf.modules.d/00-mpm.conf sudo sed -i '/mod_mpm_event.so/s/^/#/g' /etc/httpd/conf.modules.d/00-mpm.conf sudo sed -i '/mod_mpm_worker.so/s/^#//g' /etc/httpd/conf.modules.d/00-mpm.conf - elif is_suse; then - install_package apache2 apache2-mod_wsgi else exit_distro_not_supported "apache wsgi installation" fi @@ -171,7 +156,7 @@ function install_apache_wsgi { # recognise it. a2ensite and a2dissite ignore the .conf suffix used as parameter. The default sites' # files are 000-default.conf and default-ssl.conf. # -# On Fedora and openSUSE, any file in /etc/httpd/conf.d/ whose name ends with .conf is enabled. +# On Fedora, any file in /etc/httpd/conf.d/ whose name ends with .conf is enabled. # # On RHEL and CentOS, things should hopefully work as in Fedora. # @@ -187,7 +172,7 @@ function apache_site_config_for { if is_ubuntu; then # Ubuntu 14.04 - Apache 2.4 echo $APACHE_CONF_DIR/${site}.conf - elif is_fedora || is_suse; then + elif is_fedora; then # fedora conf.d is only imported if it ends with .conf so this is approx the same local enabled_site_file="$APACHE_CONF_DIR/${site}.conf" if [ -f $enabled_site_file ]; then @@ -205,7 +190,7 @@ function enable_apache_site { enable_apache_mod version if is_ubuntu; then sudo a2ensite ${site} - elif is_fedora || is_suse; then + elif is_fedora; then local enabled_site_file="$APACHE_CONF_DIR/${site}.conf" # Do nothing if site already enabled or no site config exists if [[ -f ${enabled_site_file}.disabled ]] && [[ ! -f ${enabled_site_file} ]]; then @@ -219,7 +204,7 @@ function disable_apache_site { local site=$@ if is_ubuntu; then sudo a2dissite ${site} || true - elif is_fedora || is_suse; then + elif is_fedora; then local enabled_site_file="$APACHE_CONF_DIR/${site}.conf" # Do nothing if no site config exists if [[ -f ${enabled_site_file} ]]; then diff --git a/lib/cinder b/lib/cinder index 2424f928d1..602e8dad0e 100644 --- a/lib/cinder +++ b/lib/cinder @@ -117,8 +117,8 @@ else fi -# EL and SUSE should only use lioadm -if is_fedora || is_suse; then +# EL should only use lioadm +if is_fedora; then if [[ ${CINDER_TARGET_HELPER} != "lioadm" && ${CINDER_TARGET_HELPER} != 'nvmet' ]]; then die "lioadm and nvmet are the only valid Cinder target_helper config on this platform" fi @@ -595,11 +595,6 @@ function start_cinder { _configure_tgt_for_config_d if is_ubuntu; then sudo service tgt restart - elif is_suse; then - # NOTE(dmllr): workaround restart bug - # https://bugzilla.suse.com/show_bug.cgi?id=934642 - stop_service tgtd - start_service tgtd else restart_service tgtd fi diff --git a/lib/databases/mysql b/lib/databases/mysql index fbad44e36a..ed8006e7db 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -20,12 +20,6 @@ if [[ -z "$MYSQL_SERVICE_NAME" ]]; then MYSQL_SERVICE_NAME=mysql if is_fedora && ! is_oraclelinux; then MYSQL_SERVICE_NAME=mariadb - elif is_suse && systemctl list-unit-files | grep -q 'mariadb\.service'; then - # Older mariadb packages on SLES 12 provided mysql.service. The - # newer ones on SLES 12 and 15 use mariadb.service; they also - # provide a mysql.service symlink for backwards-compatibility, but - # let's not rely on that. - MYSQL_SERVICE_NAME=mariadb elif [[ "$DISTRO" == "bullseye" ]]; then MYSQL_SERVICE_NAME=mariadb fi @@ -54,7 +48,7 @@ function cleanup_database_mysql { elif is_oraclelinux; then uninstall_package mysql-community-server sudo rm -rf /var/lib/mysql - elif is_suse || is_fedora; then + elif is_fedora; then uninstall_package mariadb-server sudo rm -rf /var/lib/mysql else @@ -74,7 +68,7 @@ function configure_database_mysql { if is_ubuntu; then my_conf=/etc/mysql/my.cnf - elif is_suse || is_oraclelinux; then + elif is_oraclelinux; then my_conf=/etc/my.cnf elif is_fedora; then my_conf=/etc/my.cnf @@ -90,7 +84,7 @@ function configure_database_mysql { iniset -sudo $my_conf mysqld bind-address "$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)" # (Re)Start mysql-server - if is_fedora || is_suse; then + if is_fedora; then # service is not started by default start_service $MYSQL_SERVICE_NAME elif is_ubuntu; then @@ -212,9 +206,6 @@ EOF elif is_fedora; then install_package mariadb-server mariadb-devel mariadb sudo systemctl enable $MYSQL_SERVICE_NAME - elif is_suse; then - install_package mariadb-server - sudo systemctl enable $MYSQL_SERVICE_NAME elif is_ubuntu; then install_package $MYSQL_SERVICE_NAME-server else diff --git a/lib/databases/postgresql b/lib/databases/postgresql index 4f0a5a0a4c..b21418b75e 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -32,7 +32,7 @@ function cleanup_database_postgresql { # Get ruthless with mysql apt_get purge -y postgresql* return - elif is_fedora || is_suse; then + elif is_fedora; then uninstall_package postgresql-server else return @@ -66,11 +66,6 @@ function configure_database_postgresql { pg_dir=`find /etc/postgresql -name pg_hba.conf|xargs dirname` pg_hba=$pg_dir/pg_hba.conf pg_conf=$pg_dir/postgresql.conf - elif is_suse; then - pg_hba=/var/lib/pgsql/data/pg_hba.conf - pg_conf=/var/lib/pgsql/data/postgresql.conf - # initdb is called when postgresql is first started - sudo [ -e $pg_hba ] || start_service postgresql else exit_distro_not_supported "postgresql configuration" fi @@ -107,7 +102,7 @@ EOF if [[ "$INSTALL_DATABASE_SERVER_PACKAGES" == "True" ]]; then if is_ubuntu; then install_package postgresql - elif is_fedora || is_suse; then + elif is_fedora; then install_package postgresql-server if is_fedora; then sudo systemctl enable postgresql diff --git a/lib/glance b/lib/glance index 041acafc92..5aeae16c61 100644 --- a/lib/glance +++ b/lib/glance @@ -47,10 +47,6 @@ USE_CINDER_FOR_GLANCE=$(trueorfalse False USE_CINDER_FOR_GLANCE) # from CINDER_ENABLED_BACKENDS GLANCE_CINDER_DEFAULT_BACKEND=${GLANCE_CINDER_DEFAULT_BACKEND:-lvmdriver-1} GLANCE_STORE_ROOTWRAP_BASE_DIR=/usr/local/etc/glance -# NOTE (abhishekk): For opensuse data files are stored in different directory -if is_opensuse; then - GLANCE_STORE_ROOTWRAP_BASE_DIR=/usr/etc/glance -fi # When Cinder is used as a glance store, you can optionally configure cinder to # optimize bootable volume creation by allowing volumes to be cloned directly # in the backend instead of transferring data via Glance. To use this feature, diff --git a/lib/horizon b/lib/horizon index b2bf7bcb49..f76f9e557d 100644 --- a/lib/horizon +++ b/lib/horizon @@ -129,7 +129,7 @@ function configure_horizon { if is_ubuntu; then disable_apache_site 000-default sudo touch $horizon_conf - elif is_fedora || is_suse; then + elif is_fedora; then : # nothing to do else exit_distro_not_supported "horizon apache configuration" diff --git a/lib/ldap b/lib/ldap index ea5faa1fe9..b0195db258 100644 --- a/lib/ldap +++ b/lib/ldap @@ -39,13 +39,6 @@ elif is_fedora; then LDAP_OLCDB_NUMBER=2 LDAP_OLCDB_TYPE=hdb LDAP_ROOTPW_COMMAND=add -elif is_suse; then - # SUSE has slappasswd in /usr/sbin/ - PATH=$PATH:/usr/sbin/ - LDAP_OLCDB_NUMBER=1 - LDAP_OLCDB_TYPE=hdb - LDAP_ROOTPW_COMMAND=add - LDAP_SERVICE_NAME=ldap fi @@ -76,8 +69,6 @@ function cleanup_ldap { sudo rm -rf /etc/ldap/ldap.conf /var/lib/ldap elif is_fedora; then sudo rm -rf /etc/openldap /var/lib/ldap - elif is_suse; then - sudo rm -rf /var/lib/ldap fi } @@ -126,11 +117,6 @@ function install_ldap { configure_ldap elif is_fedora; then start_ldap - elif is_suse; then - _ldap_varsubst $FILES/ldap/suse-base-config.ldif.in >$tmp_ldap_dir/suse-base-config.ldif - sudo slapadd -F /etc/openldap/slapd.d/ -bcn=config -l $tmp_ldap_dir/suse-base-config.ldif - sudo sed -i '/^OPENLDAP_START_LDAPI=/s/"no"/"yes"/g' /etc/sysconfig/openldap - start_ldap fi echo "LDAP_PASSWORD is $LDAP_PASSWORD" diff --git a/lib/lvm b/lib/lvm index 57ffb967c3..57d2cd4e62 100644 --- a/lib/lvm +++ b/lib/lvm @@ -129,8 +129,8 @@ function init_lvm_volume_group { local vg=$1 local size=$2 - # Start the tgtd service on Fedora and SUSE if tgtadm is used - if is_fedora || is_suse && [[ "$CINDER_TARGET_HELPER" = "tgtadm" ]]; then + # Start the tgtd service on Fedora if tgtadm is used + if is_fedora; then start_service tgtd fi diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index cc41a8cd46..adabc56412 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -80,19 +80,6 @@ function _neutron_ovs_base_install_agent_packages { elif is_fedora; then restart_service openvswitch sudo systemctl enable openvswitch - elif is_suse; then - if [[ $DISTRO == "sle12" ]] && vercmp "$os_RELEASE" "<" "12.2" ; then - restart_service openvswitch-switch - else - # workaround for https://bugzilla.suse.com/show_bug.cgi?id=1085971 - if [[ $DISTRO =~ "tumbleweed" ]]; then - sudo sed -i -e "s,^OVS_USER_ID=.*,OVS_USER_ID='root:root'," /etc/sysconfig/openvswitch - fi - restart_service openvswitch || { - journalctl -xe || : - systemctl status openvswitch - } - fi fi fi } diff --git a/lib/neutron_plugins/ovs_source b/lib/neutron_plugins/ovs_source index ea71e60e68..288eb1d69b 100644 --- a/lib/neutron_plugins/ovs_source +++ b/lib/neutron_plugins/ovs_source @@ -182,12 +182,6 @@ function action_openvswitch { ${action}_service openvswitch-switch elif is_fedora; then ${action}_service openvswitch - elif is_suse; then - if [[ $DISTRO == "sle12" ]] && [[ $os_RELEASE -lt 12.2 ]]; then - ${action}_service openvswitch-switch - else - ${action}_service openvswitch - fi fi } diff --git a/lib/nova b/lib/nova index 3aa6b9e3b3..f34e823074 100644 --- a/lib/nova +++ b/lib/nova @@ -448,8 +448,8 @@ function create_nova_conf { iniset $NOVA_CONF key_manager backend nova.keymgr.conf_key_mgr.ConfKeyManager - if is_fedora || is_suse; then - # nova defaults to /usr/local/bin, but fedora and suse pip like to + if is_fedora; then + # nova defaults to /usr/local/bin, but fedora pip like to # install things in /usr/bin iniset $NOVA_CONF DEFAULT bindir "/usr/bin" fi @@ -523,7 +523,7 @@ function create_nova_conf { # nova defaults to genisoimage but only mkisofs is available for 15.0+ # rhel provides mkisofs symlink to genisoimage or xorriso appropiately - if is_suse || is_fedora; then + if is_fedora; then iniset $NOVA_CONF DEFAULT mkisofs_cmd /usr/bin/mkisofs fi diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index c0e45ebb85..799230603c 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -74,7 +74,7 @@ function install_libvirt { install_package qemu-efi fi #pip_install_gr - elif is_fedora || is_suse; then + elif is_fedora; then # Optionally enable the virt-preview repo when on Fedora if [[ $DISTRO =~ f[0-9][0-9] ]] && [[ ${ENABLE_FEDORA_VIRT_PREVIEW_REPO} == "True" ]]; then @@ -121,8 +121,8 @@ cgroup_device_acl = [ EOF fi - if is_fedora || is_suse; then - # Starting with fedora 18 and opensuse-12.3 enable stack-user to + if is_fedora; then + # Starting with fedora 18 enable stack-user to # virsh -c qemu:///system by creating a policy-kit rule for # stack-user using the new Javascript syntax rules_dir=/etc/polkit-1/rules.d diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index c1cd132548..87c3d3addc 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -114,9 +114,6 @@ function install_nova_hypervisor { sudo dpkg-statoverride --add --update $STAT_OVERRIDE fi done - elif is_suse; then - # Workaround for missing dependencies in python-libguestfs - install_package python-libguestfs guestfs-data augeas augeas-lenses elif is_fedora; then install_package python3-libguestfs fi diff --git a/lib/rpc_backend b/lib/rpc_backend index 743b4ae170..bbb41499be 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -52,20 +52,7 @@ function install_rpc_backend { if is_service_enabled rabbit; then # Install rabbitmq-server install_package rabbitmq-server - if is_suse; then - install_package rabbitmq-server-plugins - # the default systemd socket activation only listens on the loopback interface - # which causes rabbitmq to try to start its own epmd - sudo mkdir -p /etc/systemd/system/epmd.socket.d - cat </dev/null -[Socket] -ListenStream= -ListenStream=[::]:4369 -EOF - sudo systemctl daemon-reload - sudo systemctl restart epmd.socket epmd.service - fi - if is_fedora || is_suse; then + if is_fedora; then # NOTE(jangutter): If rabbitmq is not running (as in a fresh # install) then rabbit_setuser triggers epmd@0.0.0.0.socket with # socket activation. This fails the first time and does not get diff --git a/lib/swift b/lib/swift index 251c4625b5..1ebf073318 100644 --- a/lib/swift +++ b/lib/swift @@ -547,9 +547,6 @@ function configure_swift { local swift_log_dir=${SWIFT_DATA_DIR}/logs sudo rm -rf ${swift_log_dir} local swift_log_group=adm - if is_suse; then - swift_log_group=root - fi sudo install -d -o ${STACK_USER} -g ${swift_log_group} ${swift_log_dir}/hourly if [[ $SYSLOG != "False" ]]; then diff --git a/lib/tls b/lib/tls index b8758cd6d3..e0c7500b20 100644 --- a/lib/tls +++ b/lib/tls @@ -212,9 +212,6 @@ function init_CA { if is_fedora; then sudo cp $INT_CA_DIR/ca-chain.pem /usr/share/pki/ca-trust-source/anchors/devstack-chain.pem sudo update-ca-trust - elif is_suse; then - sudo cp $INT_CA_DIR/ca-chain.pem /usr/share/pki/trust/anchors/devstack-chain.pem - sudo update-ca-certificates elif is_ubuntu; then sudo cp $INT_CA_DIR/ca-chain.pem /usr/local/share/ca-certificates/devstack-int.crt sudo cp $ROOT_CA_DIR/cacert.pem /usr/local/share/ca-certificates/devstack-root.crt @@ -376,9 +373,6 @@ function fix_system_ca_bundle_path { elif is_ubuntu; then sudo rm -f $capath sudo ln -s /etc/ssl/certs/ca-certificates.crt $capath - elif is_suse; then - sudo rm -f $capath - sudo ln -s /etc/ssl/ca-bundle.pem $capath else echo "Don't know how to set the CA bundle, expect the install to fail." fi @@ -441,9 +435,6 @@ function enable_mod_ssl { if is_ubuntu; then sudo a2enmod ssl - elif is_suse; then - sudo a2enmod ssl - sudo a2enflag SSL elif is_fedora; then # Fedora enables mod_ssl by default : @@ -560,9 +551,6 @@ $listen_string CustomLog $APACHE_LOG_DIR/tls-proxy_access.log combined EOF - if is_suse ; then - sudo a2enflag SSL - fi for mod in headers ssl proxy proxy_http; do enable_apache_mod $mod done diff --git a/stack.sh b/stack.sh index 28576d1e14..8d450aadc1 100755 --- a/stack.sh +++ b/stack.sh @@ -229,7 +229,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="bullseye|focal|jammy|f36|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9|openEuler-22.03" +SUPPORTED_DISTROS="bullseye|focal|jammy|f36|rhel8|rhel9|openEuler-22.03" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" diff --git a/tests/test_package_ordering.sh b/tests/test_package_ordering.sh index bfc2a1954f..f221c821a0 100755 --- a/tests/test_package_ordering.sh +++ b/tests/test_package_ordering.sh @@ -8,7 +8,7 @@ TOP=$(cd $(dirname "$0")/.. && pwd) source $TOP/tests/unittest.sh export LC_ALL=en_US.UTF-8 -PKG_FILES=$(find $TOP/files/debs $TOP/files/rpms $TOP/files/rpms-suse -type f) +PKG_FILES=$(find $TOP/files/debs $TOP/files/rpms -type f) TMPDIR=$(mktemp -d) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index daa1bc6301..0ec426b601 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -90,45 +90,6 @@ function fixup_fedora { fi } -function fixup_suse { - if ! is_suse; then - return - fi - - # Deactivate and disable apparmor profiles in openSUSE and SLE - # distros to avoid issues with haproxy and dnsmasq. In newer - # releases, systemctl stop apparmor is actually a no-op, so we - # have to use aa-teardown to make sure we've deactivated the - # profiles: - # - # https://www.suse.com/releasenotes/x86_64/SUSE-SLES/15/#fate-325343 - # https://gitlab.com/apparmor/apparmor/merge_requests/81 - # https://build.opensuse.org/package/view_file/openSUSE:Leap:15.2/apparmor/apparmor.service?expand=1 - if sudo systemctl is-active -q apparmor; then - sudo systemctl stop apparmor - fi - if [ -x /usr/sbin/aa-teardown ]; then - sudo /usr/sbin/aa-teardown - fi - if sudo systemctl is-enabled -q apparmor; then - sudo systemctl disable apparmor - fi - - # Since pip10, pip will refuse to uninstall files from packages - # that were created with distutils (rather than more modern - # setuptools). This is because it technically doesn't have a - # manifest of what to remove. However, in most cases, simply - # overwriting works. So this hacks around those packages that - # have been dragged in by some other system dependency - sudo rm -rf /usr/lib/python3.6/site-packages/ply-*.egg-info - sudo rm -rf /usr/lib/python3.6/site-packages/six-*.egg-info - - # Ensure trusted CA certificates are up to date - # See https://bugzilla.suse.com/show_bug.cgi?id=1154871 - # May be removed once a new opensuse-15 image is available in nodepool - sudo zypper up -y p11-kit ca-certificates-mozilla -} - function fixup_ovn_centos { if [[ $os_VENDOR != "CentOS" ]]; then return @@ -156,5 +117,4 @@ function fixup_ubuntu { function fixup_all { fixup_ubuntu fixup_fedora - fixup_suse } diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh index a7c03d26cd..f2d57c8451 100755 --- a/tools/install_prereqs.sh +++ b/tools/install_prereqs.sh @@ -74,8 +74,6 @@ install_package $PACKAGES if [[ -n "$SYSLOG" && "$SYSLOG" != "False" ]]; then if is_ubuntu || is_fedora; then install_package rsyslog-relp - elif is_suse; then - install_package rsyslog-module-relp else exit_distro_not_supported "rsyslog-relp installation" fi From 7567359755a105e7278bbf97541332f28228b87d Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Mon, 13 Feb 2023 14:41:40 +0000 Subject: [PATCH 1663/1936] Try to reduce mysql memory usage These are a few tweaks I applied to my own memory-constrained cloud instances that seemed to help. I have lower performance requirements so this may make things worse and not better, but it's worth seeing what the impact is. I'll admit to not knowing the full impact of these as they're mostly collected from various tutorials on lowering memory usage. Enable this for now on devstack-multinode Change-Id: I7b223391d3de01e3e81b02076debd01d9d2f097c --- .zuul.yaml | 3 +++ lib/databases/mysql | 10 ++++++++++ stackrc | 5 +++++ 3 files changed, 18 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index 8e20f6ed34..fa7f180797 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -696,6 +696,9 @@ description: | Simple multinode test to verify multinode functionality on devstack side. This is not meant to be used as a parent job. + vars: + devstack_localrc: + MYSQL_REDUCE_MEMORY: true # NOTE(ianw) Platform tests have traditionally been non-voting because # we often have to rush things through devstack to stabilise the gate, diff --git a/lib/databases/mysql b/lib/databases/mysql index fbad44e36a..e805b3e73f 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -176,6 +176,16 @@ function configure_database_mysql { count INT, PRIMARY KEY (db, op)) ENGINE MEMORY" stats fi + if [[ "$MYSQL_REDUCE_MEMORY" == "True" ]]; then + iniset -sudo $my_conf mysqld read_buffer_size 64K + iniset -sudo $my_conf mysqld innodb_buffer_pool_size 16M + iniset -sudo $my_conf mysqld thread_stack 192K + iniset -sudo $my_conf mysqld thread_cache_size 8 + iniset -sudo $my_conf mysqld tmp_table_size 8M + iniset -sudo $my_conf mysqld sort_buffer_size 8M + iniset -sudo $my_conf mysqld max_allowed_packet 8M + fi + restart_service $MYSQL_SERVICE_NAME } diff --git a/stackrc b/stackrc index a05d1e5553..442e9a0351 100644 --- a/stackrc +++ b/stackrc @@ -201,6 +201,11 @@ DATABASE_QUERY_LOGGING=$(trueorfalse False DATABASE_QUERY_LOGGING) # performance_schema that are of interest to us MYSQL_GATHER_PERFORMANCE=$(trueorfalse True MYSQL_GATHER_PERFORMANCE) +# This can be used to reduce the amount of memory mysqld uses while running. +# These are unscientifically determined, and could reduce performance or +# cause other issues. +MYSQL_REDUCE_MEMORY=$(trueorfalse False MYSQL_REDUCE_MEMORY) + # Set a timeout for git operations. If git is still running when the # timeout expires, the command will be retried up to 3 times. This is # in the format for timeout(1); From 37d11d00e56ec6ff402a13a28ec308c13291a937 Mon Sep 17 00:00:00 2001 From: Ghanshyam Date: Tue, 21 Feb 2023 21:41:40 +0000 Subject: [PATCH 1664/1936] Revert "Bump cirros version to 0.6.1" This reverts commit 91efe177b170c3874989affc73842dc4ffbe062d. Reason for revert: it broke tempest-slow job https://9afe3d390e4175b60a80-89b1085289883615a17bd93ef47f6ca9.ssl.cf5.rackcdn.com/871018/13/gate/tempest-slow-py3/d139ae1/testr_results.html Change-Id: Ib74e51a780d3e8101f4147db9d24eebea4980fb1 --- doc/source/guides/nova.rst | 2 +- stackrc | 15 ++++++++------- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/doc/source/guides/nova.rst b/doc/source/guides/nova.rst index d0fb274c13..5b427972c4 100644 --- a/doc/source/guides/nova.rst +++ b/doc/source/guides/nova.rst @@ -122,7 +122,7 @@ when creating the server, for example: .. code-block:: shell $ openstack --os-compute-api-version 2.37 server create --flavor cirros256 \ - --image cirros-0.6.1-x86_64-disk --nic none --wait test-server + --image cirros-0.3.5-x86_64-disk --nic none --wait test-server .. note:: ``--os-compute-api-version`` greater than or equal to 2.37 is required to use ``--nic=none``. diff --git a/stackrc b/stackrc index a71d843362..b3130e5f7f 100644 --- a/stackrc +++ b/stackrc @@ -657,19 +657,20 @@ esac # If the file ends in .tar.gz, uncompress the tarball and and select the first # .img file inside it as the image. If present, use "*-vmlinuz*" as the kernel # and "*-initrd*" as the ramdisk -# example: https://cloud-images.ubuntu.com/releases/jammy/release/ubuntu-22.04-server-cloudimg-amd64.tar.gz +# example: http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-amd64.tar.gz # * disk image (*.img,*.img.gz) # if file ends in .img, then it will be uploaded and registered as a to # glance as a disk image. If it ends in .gz, it is uncompressed first. # example: -# https://cloud-images.ubuntu.com/releases/jammy/release/ubuntu-22.04-server-cloudimg-amd64.img -# https://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz +# http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-armel-disk1.img +# http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz # * OpenVZ image: # OpenVZ uses its own format of image, and does not support UEC style images -#IMAGE_URLS="https://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image +#IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image +#IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image -CIRROS_VERSION=${CIRROS_VERSION:-"0.6.1"} +CIRROS_VERSION=${CIRROS_VERSION:-"0.5.2"} CIRROS_ARCH=${CIRROS_ARCH:-$(uname -m)} # Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of @@ -686,11 +687,11 @@ if [[ "$DOWNLOAD_DEFAULT_IMAGES" == "True" ]]; then lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs} DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz} - IMAGE_URLS+="https://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; + IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; *) # otherwise, use the qcow image DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk} DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img} - IMAGE_URLS+="https://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; + IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; esac ;; vsphere) From 0572d73f8561f3304f897bf9ee2f63c406cc21b7 Mon Sep 17 00:00:00 2001 From: Nobuhiro MIKI Date: Wed, 22 Feb 2023 10:38:49 +0900 Subject: [PATCH 1665/1936] Disable memory_tracker and file_tracker in unstask.sh properly stop_dstat() calls stop_process() for dstat, memory_tracker and file_tracker respectively. Inside stop_process(), a check for the existence of the service is performed by is_service_enabled(). So even if we apply this seemingly dangerous commit, is_service_enabled() is respected, so it's safe. Closes-Bug: #1998990 Change-Id: Ica58cdb1d60c4c796f582d82ed2cde0be94b1a7e Signed-off-by: Nobuhiro MIKI --- unstack.sh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/unstack.sh b/unstack.sh index a36af3fb59..33b069b6a3 100755 --- a/unstack.sh +++ b/unstack.sh @@ -168,9 +168,7 @@ if is_service_enabled etcd3; then cleanup_etcd3 fi -if is_service_enabled dstat; then - stop_dstat -fi +stop_dstat # NOTE: Cinder automatically installs the lvm2 package, independently of the # enabled backends. So if Cinder is enabled, and installed successfully we are From 03c3fd763e301077ecfa0a2d3428a091bedd691d Mon Sep 17 00:00:00 2001 From: Yamato Tanaka Date: Fri, 10 Feb 2023 19:44:20 +0900 Subject: [PATCH 1666/1936] Support RHEL 9 This patch includes changes required to run devstack on RHEL 9. - en_US.utf8 is provided by glibc-langpack-en - iptables command is provided by iptables-nft - Use /etc/os-release to identify the distro in RHEL 9 as it doesn't provide lsb_release command. - CRB repository name is different from CentOS 9 Change-Id: I8f6d9263b24f9c2cf82e09258e2d14d7766ad337 --- files/rpms/general | 2 ++ functions-common | 5 +++-- stack.sh | 3 +++ 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/files/rpms/general b/files/rpms/general index b6866de62d..8a5755cc37 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -6,9 +6,11 @@ gcc gcc-c++ gettext # used for compiling message catalogs git-core +glibc-langpack-en # dist:rhel9 graphviz # needed only for docs httpd httpd-devel +iptables-nft # dist:rhel9 iptables-services java-1.8.0-openjdk-headless libffi-devel diff --git a/functions-common b/functions-common index 4eed5d8407..3e07a49e22 100644 --- a/functions-common +++ b/functions-common @@ -412,9 +412,9 @@ function _ensure_lsb_release { # - os_VENDOR # - os_PACKAGE function GetOSVersion { - # CentOS Stream 9 does not provide lsb_release + # CentOS Stream 9 and RHEL 9 do not provide lsb_release source /etc/os-release - if [[ "${ID}${VERSION}" == "centos9" ]]; then + if [[ "${ID}${VERSION}" == "centos9" ]] || [[ "${ID}${VERSION}" =~ "rhel9" ]]; then os_RELEASE=${VERSION_ID} os_CODENAME="n/a" os_VENDOR=$(echo $NAME | tr -d '[:space:]') @@ -530,6 +530,7 @@ function is_fedora { [ "$os_VENDOR" = "openEuler" ] || \ [ "$os_VENDOR" = "RedHatEnterpriseServer" ] || \ [ "$os_VENDOR" = "RedHatEnterprise" ] || \ + [ "$os_VENDOR" = "RedHatEnterpriseLinux" ] || \ [ "$os_VENDOR" = "Rocky" ] || \ [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "CentOSStream" ] || \ [ "$os_VENDOR" = "AlmaLinux" ] || \ diff --git a/stack.sh b/stack.sh index ccd2d16baa..6e7b11a0bc 100755 --- a/stack.sh +++ b/stack.sh @@ -391,7 +391,10 @@ if [[ $DISTRO == "rhel8" ]]; then # Patch: https://github.com/rpm-software-management/dnf/pull/1448 echo "[]" | sudo tee /var/cache/dnf/expired_repos.json elif [[ $DISTRO == "rhel9" ]]; then + # for CentOS Stream 9 repository sudo dnf config-manager --set-enabled crb + # for RHEL 9 repository + sudo dnf config-manager --set-enabled codeready-builder-for-rhel-9-x86_64-rpms # rabbitmq and other packages are provided by RDO repositories. _install_rdo From f834f9adaf9c228ff4ec6a5e24e6d4cf3ca6a992 Mon Sep 17 00:00:00 2001 From: Michael Johnson Date: Mon, 6 Mar 2023 18:47:03 +0000 Subject: [PATCH 1667/1936] Fix NotImplementedError in dbcounter on SQLA 2.x This patch fixes a NotImplementedError raised in the dbcounter plugin when using SQLAlchemy 2.x. The plugin signature has changed and now requires an "update_url" method as part of the plugin[1]. This patch also updates the do_incr() explicit SQL string to use a TextClause and the new requirement for named bound parameters[2]. Closes-Bug: #2009521 [1] https://docs.sqlalchemy.org/en/20/changelog/migration_14.html#changes-to-createengineplugin [2] https://docs.sqlalchemy.org/en/20/changelog/migration_20.html#execute-method-more-strict-execution-options-are-more-prominent Change-Id: Ie5484597057a3306757cc46b657446ad61ac2098 --- ...ementedError-on-SQLAlchemy-2-21bb6dcdf3ce4225.yaml | 5 +++++ tools/dbcounter/dbcounter.py | 11 +++++++---- 2 files changed, 12 insertions(+), 4 deletions(-) create mode 100644 releasenotes/notes/Fix-dbcounter-NotImplementedError-on-SQLAlchemy-2-21bb6dcdf3ce4225.yaml diff --git a/releasenotes/notes/Fix-dbcounter-NotImplementedError-on-SQLAlchemy-2-21bb6dcdf3ce4225.yaml b/releasenotes/notes/Fix-dbcounter-NotImplementedError-on-SQLAlchemy-2-21bb6dcdf3ce4225.yaml new file mode 100644 index 0000000000..f815e14ccb --- /dev/null +++ b/releasenotes/notes/Fix-dbcounter-NotImplementedError-on-SQLAlchemy-2-21bb6dcdf3ce4225.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixes a NotImplementedError when using the dbcounter SQLAlchemy plugin on + SQLAlchemy 2.x. diff --git a/tools/dbcounter/dbcounter.py b/tools/dbcounter/dbcounter.py index 5057f0f393..0ed7bb813a 100644 --- a/tools/dbcounter/dbcounter.py +++ b/tools/dbcounter/dbcounter.py @@ -40,6 +40,9 @@ def __init__(self, url, kwargs): self.queue = queue.Queue() self.thread = None + def update_url(self, url): + return url.difference_update_query(["dbcounter"]) + def engine_created(self, engine): """Hook the engine creation process. @@ -77,12 +80,12 @@ def _log_event(self, conn, cursor, statement, parameters, context, def do_incr(self, db, op, count): """Increment the counter for (db,op) by count.""" - query = ('INSERT INTO queries (db, op, count) ' - ' VALUES (%s, %s, %s) ' - ' ON DUPLICATE KEY UPDATE count=count+%s') + query = sqlalchemy.text('INSERT INTO queries (db, op, count) ' + ' VALUES (:db, :op, :count) ' + ' ON DUPLICATE KEY UPDATE count=count+:count') try: with self.engine.begin() as conn: - r = conn.execute(query, (db, op, count, count)) + r = conn.execute(query, {'db': db, 'op': op, 'count': count}) except Exception as e: LOG.error('Failed to account for access to database %r: %s', db, e) From 07a7293721736e1184ae7dc22da33b2ce7babf61 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Fri, 10 Mar 2023 20:30:53 -0600 Subject: [PATCH 1668/1936] Update DEVSTACK_SERIES to 2023.2 stable/2023.1 branch has been created now and current master is for 2023.2. Change-Id: Ibd499ac35a38a5c1818c1df6009c5273ef3e90f7 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 442e9a0351..b7ce238366 100644 --- a/stackrc +++ b/stackrc @@ -248,7 +248,7 @@ REQUIREMENTS_DIR=${REQUIREMENTS_DIR:-$DEST/requirements} # Setting the variable to 'ALL' will activate the download for all # libraries. -DEVSTACK_SERIES="2023.1" +DEVSTACK_SERIES="2023.2" ############## # From 1898a683be78622445e48f1f071cf7188ab19450 Mon Sep 17 00:00:00 2001 From: Rajat Dhasmana Date: Tue, 14 Mar 2023 05:35:33 +0000 Subject: [PATCH 1669/1936] Create multiattach volume type for tempest Creating multiattach volume is a non-admin operation but creating multiattach volume type is an admin operation. Previously cinder allowed creating multiattach volumes without a volume type but that support is being removed with[1]. The change requires updating tempest tests[2] but some tempest tests are non-admin, which require admin priviledges to create the multiattach volume type. Based on the last discussion with tempest team[3], the proposed solution is to create a multiattach volume type in devstack, if ENABLE_VOLUME_MULTIATTACH is True, and use it in tempest tests. Similar to how admins create multiattach volume types for non-admin users. This patch creates a multiattach volume type if ENABLE_VOLUME_MULTIATTACH is True. Also we set the multiattach type name as a tempest config option 'volume_type_multiattach'. [1] https://review.opendev.org/c/openstack/cinder/+/874865 [2] https://review.opendev.org/c/openstack/tempest/+/875372 [3] https://meetings.opendev.org/irclogs/%23openstack-cinder/%23openstack-cinder.2023-03-13.log.html#t2023-03-13T18:47:56 Change-Id: Icd3690565bf7b27898cd206641e612da3993703d --- lib/cinder | 34 +++++++++++++++++++++++----------- lib/tempest | 4 ++++ 2 files changed, 27 insertions(+), 11 deletions(-) diff --git a/lib/cinder b/lib/cinder index 2424f928d1..c50a205f20 100644 --- a/lib/cinder +++ b/lib/cinder @@ -95,6 +95,7 @@ CINDER_ENABLED_BACKENDS=${CINDER_ENABLED_BACKENDS:-lvm:lvmdriver-1} CINDER_VOLUME_CLEAR=${CINDER_VOLUME_CLEAR:-${CINDER_VOLUME_CLEAR_DEFAULT:-zero}} CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]') +VOLUME_TYPE_MULTIATTACH=${VOLUME_TYPE_MULTIATTACH:-multiattach} if [[ -n "$CINDER_ISCSI_HELPER" ]]; then if [[ -z "$CINDER_TARGET_HELPER" ]]; then @@ -649,6 +650,23 @@ function stop_cinder { stop_process c-vol } +function create_one_type { + type_name=$1 + property_key=$2 + property_value=$3 + # NOTE (e0ne): openstack client doesn't work with cinder in noauth mode + if is_service_enabled keystone; then + openstack --os-region-name="$REGION_NAME" volume type create --property $property_key="$property_value" $type_name + else + # TODO (e0ne): use openstack client once it will support cinder in noauth mode: + # https://bugs.launchpad.net/python-cinderclient/+bug/1755279 + local cinder_url + cinder_url=$CINDER_SERVICE_PROTOCOL://$SERVICE_HOST:$CINDER_SERVICE_PORT/v3 + OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-create $type_name + OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-key $type_name set $property_key="$property_value" + fi +} + # create_volume_types() - Create Cinder's configured volume types function create_volume_types { # Create volume types @@ -656,19 +674,13 @@ function create_volume_types { local be be_name for be in ${CINDER_ENABLED_BACKENDS//,/ }; do be_name=${be##*:} - # NOTE (e0ne): openstack client doesn't work with cinder in noauth mode - if is_service_enabled keystone; then - openstack --os-region-name="$REGION_NAME" volume type create --property volume_backend_name="${be_name}" ${be_name} - else - # TODO (e0ne): use openstack client once it will support cinder in noauth mode: - # https://bugs.launchpad.net/python-cinderclient/+bug/1755279 - local cinder_url - cinder_url=$CINDER_SERVICE_PROTOCOL://$SERVICE_HOST:$CINDER_SERVICE_PORT/v3 - OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-create ${be_name} - OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-key ${be_name} set volume_backend_name=${be_name} - fi + create_one_type $be_name "volume_backend_name" $be_name done + if [[ $ENABLE_VOLUME_MULTIATTACH == "True" ]]; then + create_one_type $VOLUME_TYPE_MULTIATTACH $VOLUME_TYPE_MULTIATTACH " True" + fi + # Increase quota for the service project if glance is using cinder, # since it's likely to occasionally go above the default 10 in parallel # test execution. diff --git a/lib/tempest b/lib/tempest index c3d3e9ac30..7da9f17052 100644 --- a/lib/tempest +++ b/lib/tempest @@ -604,6 +604,10 @@ function configure_tempest { iniset $TEMPEST_CONFIG volume storage_protocol "$TEMPEST_STORAGE_PROTOCOL" fi + if [[ $ENABLE_VOLUME_MULTIATTACH == "True" ]]; then + iniset $TEMPEST_CONFIG volume volume_type_multiattach $VOLUME_TYPE_MULTIATTACH + fi + # Placement Features # Set the microversion range for placement. # Setting [None, latest] range of microversion which allow Tempest to run all microversions tests. From 80c3ffe154fd79e03d8c4258b500b77a26efa008 Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Mon, 27 Mar 2023 20:56:20 +0000 Subject: [PATCH 1670/1936] Fix reboot on fedora like nodes This change enables httpd in systemd so that it starts after a reboot and updates how selinux is disabled to use /etc/selinux/config in addtion to setenforce. Change-Id: I5ea8693c0b967937483bd921b1d9984ea14bc723 --- lib/apache | 2 ++ tools/fixup_stuff.sh | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/lib/apache b/lib/apache index dd8c9a0f06..771a7d7ec0 100644 --- a/lib/apache +++ b/lib/apache @@ -150,6 +150,8 @@ function install_apache_wsgi { elif is_fedora; then sudo rm -f /etc/httpd/conf.d/000-* install_package httpd python3-mod_wsgi + # rpm distros dont enable httpd by default so enable it to support reboots. + sudo systemctl enable httpd # For consistency with Ubuntu, switch to the worker mpm, as # the default is event sudo sed -i '/mod_mpm_prefork.so/s/^/#/g' /etc/httpd/conf.modules.d/00-mpm.conf diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index daa1bc6301..fef47263de 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -36,6 +36,12 @@ function fixup_fedora { # Disable selinux to avoid configuring to allow Apache access # to Horizon files (LP#1175444) if selinuxenabled; then + #persit selinux config across reboots + cat << EOF | sudo tee /etc/selinux/config +SELINUX=permissive +SELINUXTYPE=targeted +EOF + # then disable at runtime sudo setenforce 0 fi From fa42b3ca7bbac7746644693241ea1dd58a4939f0 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 14 Apr 2023 02:16:59 +0000 Subject: [PATCH 1671/1936] Updated from generate-devstack-plugins-list Change-Id: I84015f860155e5c8ec3bcf54353d91405a13e549 --- doc/source/plugin-registry.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 2e8e8f53d7..b244ca5dd8 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -88,7 +88,6 @@ openstack/openstacksdk `https://opendev.org/openstack/openstac openstack/osprofiler `https://opendev.org/openstack/osprofiler `__ openstack/oswin-tempest-plugin `https://opendev.org/openstack/oswin-tempest-plugin `__ openstack/ovn-octavia-provider `https://opendev.org/openstack/ovn-octavia-provider `__ -openstack/patrole `https://opendev.org/openstack/patrole `__ openstack/rally-openstack `https://opendev.org/openstack/rally-openstack `__ openstack/sahara `https://opendev.org/openstack/sahara `__ openstack/sahara-dashboard `https://opendev.org/openstack/sahara-dashboard `__ From 42517968ff7bdced07c5bc08b6cb2b8d10d246cc Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Fri, 14 Apr 2023 19:06:03 +0530 Subject: [PATCH 1672/1936] [ovs] Reload ovs kernel module always Irrespective of build_modules is True or False reload ovs modules always. If ovs is installed from package before(like with multi-node-bridge role), then installing ovs from source requires openvswitch kernel module to be reloaded. The issue was not seen before jammy as there module was reloaded when build_modules was set to True. Closes-Bug: #2015364 Change-Id: I1785b49b2ef72ca1f817f504d5ea56021410c052 --- lib/neutron_plugins/ovs_source | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/lib/neutron_plugins/ovs_source b/lib/neutron_plugins/ovs_source index ea71e60e68..d0ca75334e 100644 --- a/lib/neutron_plugins/ovs_source +++ b/lib/neutron_plugins/ovs_source @@ -164,10 +164,8 @@ function compile_ovs { sudo make install if [[ "$build_modules" == "True" ]]; then sudo make INSTALL_MOD_DIR=kernel/net/openvswitch modules_install - reload_ovs_kernel_modules - else - load_ovs_kernel_modules fi + reload_ovs_kernel_modules cd $_pwd } From 15b2e429685fc753759ef8f3773ac559424e028f Mon Sep 17 00:00:00 2001 From: Ade Lee Date: Tue, 24 Jan 2023 14:44:13 +0100 Subject: [PATCH 1673/1936] Modify devstack-base to allow for fips devstack-base is changed to descend from openstack-multinode-fips which is defined in project-config. This allows jobs to execute the enable_fips playbook to enable FIPS mode on the node, but only if they opt-in by setting enable_fips to True. Otherwise, this is a no-op. Change-Id: I5631281662dbd18056ffba291290ed0978ab937e --- .zuul.yaml | 2 +- functions-common | 5 +++++ lib/databases/mysql | 11 ++++++++++- 3 files changed, 16 insertions(+), 2 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index fa7f180797..37625f3d11 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -370,7 +370,7 @@ - job: name: devstack-base - parent: multinode + parent: openstack-multinode-fips abstract: true description: | Base abstract Devstack job. diff --git a/functions-common b/functions-common index 4eed5d8407..844fffac37 100644 --- a/functions-common +++ b/functions-common @@ -2545,6 +2545,11 @@ function clean_pyc_files { fi } +function is_fips_enabled { + fips=`cat /proc/sys/crypto/fips_enabled` + [ "$fips" == "1" ] +} + # Restore xtrace $_XTRACE_FUNCTIONS_COMMON diff --git a/lib/databases/mysql b/lib/databases/mysql index e805b3e73f..bc6ce3d5c2 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -69,7 +69,7 @@ function recreate_database_mysql { } function configure_database_mysql { - local my_conf mysql slow_log + local my_conf mysql slow_log my_client_conf echo_summary "Configuring and starting MySQL" if is_ubuntu; then @@ -86,6 +86,15 @@ function configure_database_mysql { exit_distro_not_supported "mysql configuration" fi + # Set fips mode on + if is_ubuntu; then + if is_fips_enabled; then + my_client_conf=/etc/mysql/mysql.conf.d/mysql.cnf + iniset -sudo $my_client_conf mysql ssl-fips-mode "on" + iniset -sudo $my_conf mysqld ssl-fips-mode "on" + fi + fi + # Change bind-address from localhost (127.0.0.1) to any (::) iniset -sudo $my_conf mysqld bind-address "$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)" From 991a2794a3d6424f3b25cde471342846f9876470 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Mon, 17 Apr 2023 13:00:21 +0530 Subject: [PATCH 1674/1936] Fix name for neutron tempest uwsgi job This was renamed long back in [1]. [1] https://review.opendev.org/c/openstack/neutron/+/797051 Change-Id: If11e975fd890f55f99efc2c7d8122256ff831ad8 --- .zuul.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index fa7f180797..98f3353319 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -1047,7 +1047,7 @@ # * neutron-functional-with-uwsgi: maintained by neutron for functional # test. Next cycle we can remove this one if things turn out to be # stable engouh with uwsgi. - # * neutron-tempest-with-uwsgi: maintained by neutron for tempest test. + # * neutron-ovn-tempest-with-uwsgi: maintained by neutron for tempest test. # Next cycle we can remove this if everything run out stable enough. # * nova-multi-cell: maintained by nova and currently non-voting in the # check queue for nova changes but relies on devstack configuration @@ -1062,7 +1062,7 @@ - nova-next - neutron-fullstack-with-uwsgi - neutron-functional-with-uwsgi - - neutron-tempest-with-uwsgi + - neutron-ovn-tempest-with-uwsgi - devstack-plugin-ceph-tempest-py3: irrelevant-files: - ^.*\.rst$ From e8915786e1e007742f47fee507b1b6288b6cedae Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Wed, 19 Apr 2023 16:57:44 -0400 Subject: [PATCH 1675/1936] git: support git checkout for a commit hash git_clone assumes a branch or a tag is passed as the last argument, and it fails when a commit hash is passed, as in: timeout -s SIGINT 0 git clone https://github.com/ovn-org/ovn.git /opt/stack/ovn --branch 36e3ab9b47e93af0599a818e9d6b2930e49473f0 Cloning into '/opt/stack/ovn'... fatal: Remote branch 36e3ab9b47e93af0599a818e9d6b2930e49473f0 not found in upstream origin Change-Id: Id1328d7cba418fa7c227ae9db4fe83c09fd06035 --- functions-common | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/functions-common b/functions-common index c7a1c6e0bf..d8b15024a8 100644 --- a/functions-common +++ b/functions-common @@ -609,8 +609,9 @@ function git_clone { echo "the project to the \$PROJECTS variable in the job definition." die $LINENO "ERROR_ON_CLONE is set to True so cloning not allowed in this configuration" fi - # '--branch' can also take tags - git_timed clone $git_clone_flags $git_remote $git_dest --branch $git_ref + git_timed clone $git_clone_flags $git_remote $git_dest + cd $git_dest + git checkout $git_ref elif [[ "$RECLONE" = "True" ]]; then # if it does exist then simulate what clone does if asked to RECLONE cd $git_dest From b8f228620f6ad038ab8f31db861580f5e664a280 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9=20Ribaud?= Date: Mon, 24 Apr 2023 14:22:01 +0200 Subject: [PATCH 1676/1936] Add manila service to configuration Manila is a service in OpenStack that enables shared filesystems. The modifications add the manila section in nova configuration files enabling the virtiofs feature. Implements: blueprint libvirt-virtiofs-attach-manila-shares Change-Id: Ia17c7a136cbe83efa1ef4e302d1c404034a50cda --- lib/nova | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/lib/nova b/lib/nova index f34e823074..f5f002dd10 100644 --- a/lib/nova +++ b/lib/nova @@ -507,6 +507,10 @@ function create_nova_conf { configure_cinder_access fi + if is_service_enabled manila; then + configure_manila_access + fi + if [ -n "$NOVA_STATE_PATH" ]; then iniset $NOVA_CONF DEFAULT state_path "$NOVA_STATE_PATH" iniset $NOVA_CONF oslo_concurrency lock_path "$NOVA_STATE_PATH" @@ -652,6 +656,18 @@ function configure_cinder_access { fi } +# Configure access to manila. +function configure_manila_access { + iniset $NOVA_CONF manila os_region_name "$REGION_NAME" + iniset $NOVA_CONF manila auth_type "password" + iniset $NOVA_CONF manila auth_url "$KEYSTONE_SERVICE_URI" + iniset $NOVA_CONF manila username nova + iniset $NOVA_CONF manila password "$SERVICE_PASSWORD" + iniset $NOVA_CONF manila user_domain_name "$SERVICE_DOMAIN_NAME" + iniset $NOVA_CONF manila project_name "$SERVICE_TENANT_NAME" + iniset $NOVA_CONF manila project_domain_name "$SERVICE_DOMAIN_NAME" +} + function configure_console_compute { # If we are running multiple cells (and thus multiple console proxies) on a # single host, we offset the ports to avoid collisions. We need to From 6764eab2644b2f76769e4492ab136ff65763d2ef Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Fri, 12 May 2023 16:34:08 -0400 Subject: [PATCH 1677/1936] Remove usage of neutron-debug since it has been removed The neutron-debug command was deprecated and finally removed, so tools/ping_neutron.sh can no longer rely on it to create a probe namespace. Instead, just try and use any namespace with the network ID in it, since it's either the DHCP (ML2/OVS) or Metadata (OVN) namespace, which should work just as well. As this code is rarely (never?) used, this best-effort attempt is good enough. Change-Id: I98c992a2a774ef1fb22cee2e90ee342ab2d537ac Depends-on: https://review.opendev.org/c/openstack/neutron/+/883081 --- lib/neutron | 18 ------------------ tools/ping_neutron.sh | 16 +++++++++++----- 2 files changed, 11 insertions(+), 23 deletions(-) diff --git a/lib/neutron b/lib/neutron index 368a1b9c55..a6de7222db 100644 --- a/lib/neutron +++ b/lib/neutron @@ -1112,24 +1112,6 @@ function _neutron_setup_interface_driver { # Functions for Neutron Exercises #-------------------------------- -function delete_probe { - local from_net="$1" - net_id=`_get_net_id $from_net` - probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'` - neutron-debug --os-tenant-name admin --os-username admin probe-delete $probe_id -} - -function _get_net_id { - openstack --os-cloud devstack-admin --os-region-name="$REGION_NAME" --os-project-name admin --os-username admin --os-password $ADMIN_PASSWORD network list | grep $1 | awk '{print $2}' -} - -function _get_probe_cmd_prefix { - local from_net="$1" - net_id=`_get_net_id $from_net` - probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}' | head -n 1` - echo "$Q_RR_COMMAND ip netns exec qprobe-$probe_id" -} - # ssh check function _ssh_check_neutron { local from_net=$1 diff --git a/tools/ping_neutron.sh b/tools/ping_neutron.sh index 73fe3f3bdf..ab8e8dfca8 100755 --- a/tools/ping_neutron.sh +++ b/tools/ping_neutron.sh @@ -30,7 +30,8 @@ ping_neutron.sh [ping args] This provides a wrapper to ping neutron guests that are on isolated tenant networks that the caller can't normally reach. It does so by -creating a network namespace probe. +using either the DHCP or Metadata network namespace to support both +ML2/OVS and OVN. It takes arguments like ping, except the first arg must be the network name. @@ -44,6 +45,12 @@ EOF exit 1 } +# BUG: with duplicate network names, this fails pretty hard since it +# will just pick the first match. +function _get_net_id { + openstack --os-cloud devstack-admin --os-region-name="$REGION_NAME" --os-project-name admin --os-username admin --os-password $ADMIN_PASSWORD network list | grep $1 | head -n 1 | awk '{print $2}' +} + NET_NAME=$1 if [[ -z "$NET_NAME" ]]; then @@ -53,12 +60,11 @@ fi REMAINING_ARGS="${@:2}" -# BUG: with duplicate network names, this fails pretty hard. -NET_ID=$(openstack network show -f value -c id "$NET_NAME") -PROBE_ID=$(neutron-debug probe-list -c id -c network_id | grep "$NET_ID" | awk '{print $2}' | head -n 1) +NET_ID=`_get_net_id $NET_NAME` +NET_NS=$(ip netns list | grep "$NET_ID" | head -n 1) # This runs a command inside the specific netns -NET_NS_CMD="ip netns exec qprobe-$PROBE_ID" +NET_NS_CMD="ip netns exec $NET_NS" PING_CMD="sudo $NET_NS_CMD ping $REMAINING_ARGS" echo "Running $PING_CMD" From bfa43975bca48bb021fb266a206885c5b09f5f45 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Thu, 18 May 2023 12:54:19 -0500 Subject: [PATCH 1678/1936] Enable NOVA_ENFORCE_SCOPE to True by default Nova antelope release has enabled the RBAC new defaults by default - https://review.opendev.org/c/openstack/nova/+/866218 With the latest release of Nova have new defaults enable, we should test the same by default in devstack. This change make NOVA_ENFORCE_SCOPE flag to True by default so that every job will run with Nova new defaults. As old defaults are still supported (in deprecated way), we will keep NOVA_ENFORCE_SCOPE flag so that we can have a single job can disable the new defaults and continue testing the old defaults. Change-Id: Id56819f03c19a5b7fe30adf799ecd3b8aeb67695 --- lib/nova | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/nova b/lib/nova index f34e823074..21067f302b 100644 --- a/lib/nova +++ b/lib/nova @@ -98,10 +98,10 @@ METADATA_SERVICE_PORT=${METADATA_SERVICE_PORT:-8775} NOVA_ENABLE_CACHE=${NOVA_ENABLE_CACHE:-True} # Flag to set the oslo_policy.enforce_scope and oslo_policy.enforce_new_defaults. -# This is used to switch the compute API policies enable the scope and new defaults. -# By Default, these flag are False. +# This is used to disable the compute API policies scope and new defaults. +# By Default, it is True. # For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope -NOVA_ENFORCE_SCOPE=$(trueorfalse False NOVA_ENFORCE_SCOPE) +NOVA_ENFORCE_SCOPE=$(trueorfalse True NOVA_ENFORCE_SCOPE) if [[ $SERVICE_IP_VERSION == 6 ]]; then NOVA_MY_IP="$HOST_IPV6" From cb1ec1834de0b1eaddb02b7847b21d1d617efb6e Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Thu, 18 May 2023 19:58:41 -0500 Subject: [PATCH 1679/1936] Enable GLANCE_ENFORCE_SCOPE to True by default Glance antelope release has enabled the RBAC new defaults by default - https://review.opendev.org/c/openstack/glance/+/872522 With the latest release of Glance have new defaults enable, we should test the same by default in devstack. This change make GLANCE_ENFORCE_SCOPE flag to True by default so that every job will run with Glance new defaults. As old defaults are still supported (in deprecated way), we will keep GLANCE_ENFORCE_SCOPE flag so that we can have a single job can disable the new defaults and continue testing the old defaults. Depends-On: https://review.opendev.org/c/openstack/tempest/+/883701 Change-Id: Idde6f3cb766597575ca822f21b4bb3a465e5e753 --- lib/glance | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/glance b/lib/glance index 5aeae16c61..430d94d3a4 100644 --- a/lib/glance +++ b/lib/glance @@ -95,10 +95,10 @@ GLANCE_USE_IMPORT_WORKFLOW=$(trueorfalse False GLANCE_USE_IMPORT_WORKFLOW) GLANCE_ENABLE_QUOTAS=$(trueorfalse True GLANCE_ENABLE_QUOTAS) # Flag to set the oslo_policy.enforce_scope. This is used to switch -# the Image API policies to start checking the scope of token. By Default, -# this flag is False. +# This is used to disable the Image API policies scope and new defaults. +# By Default, it is True. # For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope -GLANCE_ENFORCE_SCOPE=$(trueorfalse False GLANCE_ENFORCE_SCOPE) +GLANCE_ENFORCE_SCOPE=$(trueorfalse True GLANCE_ENFORCE_SCOPE) GLANCE_CONF_DIR=${GLANCE_CONF_DIR:-/etc/glance} GLANCE_METADEF_DIR=$GLANCE_CONF_DIR/metadefs From a2943894031e5b1c7662512d54ffb75a3cd3ca9d Mon Sep 17 00:00:00 2001 From: Martin Kopec Date: Tue, 25 Apr 2023 21:50:31 +0200 Subject: [PATCH 1680/1936] Set dhcp_client based on cirros version This change allows us to bump the default cirros version in devstack. Since cirros version 0.6.0 dhcpcd is the default dhcp client. The older cirros images used udhcpc client (the only available client at that time) which is also the default client in Tempest. This patch makes devstack configure dhcpcd client in tempest.conf if cirros >= 0.6.0 is going to be used in scenario tests. The commit also introduces a new SCENARIO_IMAGE_TYPE option. It is now a trigger for cirros specific settings, later it might be used for any other image's settings. Closes-Bug: #2007973 Change-Id: I2738c3b1d302c6656ce2c209671ea954fbc1b05b --- lib/tempest | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/lib/tempest b/lib/tempest index 7da9f17052..9fa989a2f6 100644 --- a/lib/tempest +++ b/lib/tempest @@ -517,8 +517,19 @@ function configure_tempest { # Scenario SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES} SCENARIO_IMAGE_FILE=$DEFAULT_IMAGE_FILE_NAME + SCENARIO_IMAGE_TYPE=${SCENARIO_IMAGE_TYPE:-cirros} iniset $TEMPEST_CONFIG scenario img_file $SCENARIO_IMAGE_DIR/$SCENARIO_IMAGE_FILE + # since version 0.6.0 cirros uses dhcpcd dhcp client by default, however, cirros, prior to the + # version 0.6.0, used udhcpc (the only available client at that time) which is also tempest's default + if [[ "$SCENARIO_IMAGE_TYPE" == "cirros" ]]; then + # the image is a cirros image + # use dhcpcd client when version greater or equal 0.6.0 + if [[ $(echo $CIRROS_VERSION | tr -d '.') -ge 060 ]]; then + iniset $TEMPEST_CONFIG scenario dhcp_client dhcpcd + fi + fi + # If using provider networking, use the physical network for validation rather than private TEMPEST_SSH_NETWORK_NAME=$PRIVATE_NETWORK_NAME if is_provider_network; then From b5f4b1148a3f646a82a759f1dde3da1f74eb803c Mon Sep 17 00:00:00 2001 From: Martin Kopec Date: Tue, 25 Apr 2023 20:01:42 +0000 Subject: [PATCH 1681/1936] Revert "Revert "Bump cirros version to 0.6.1"" This reverts commit 37d11d00e56ec6ff402a13a28ec308c13291a937. Reason for revert: reverting this revert as the issue caused by the original patch (before the first revert) is fixed by: https://review.opendev.org/c/openstack/devstack/+/881504 Therefore we can proceed with the cirros version bump. Change-Id: I43e2b04a0142c19fb1a79da5a33cc444149e18f1 --- doc/source/guides/nova.rst | 2 +- stackrc | 15 +++++++-------- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/doc/source/guides/nova.rst b/doc/source/guides/nova.rst index 5b427972c4..d0fb274c13 100644 --- a/doc/source/guides/nova.rst +++ b/doc/source/guides/nova.rst @@ -122,7 +122,7 @@ when creating the server, for example: .. code-block:: shell $ openstack --os-compute-api-version 2.37 server create --flavor cirros256 \ - --image cirros-0.3.5-x86_64-disk --nic none --wait test-server + --image cirros-0.6.1-x86_64-disk --nic none --wait test-server .. note:: ``--os-compute-api-version`` greater than or equal to 2.37 is required to use ``--nic=none``. diff --git a/stackrc b/stackrc index b7ce238366..a17d88ecbd 100644 --- a/stackrc +++ b/stackrc @@ -662,20 +662,19 @@ esac # If the file ends in .tar.gz, uncompress the tarball and and select the first # .img file inside it as the image. If present, use "*-vmlinuz*" as the kernel # and "*-initrd*" as the ramdisk -# example: http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-amd64.tar.gz +# example: https://cloud-images.ubuntu.com/releases/jammy/release/ubuntu-22.04-server-cloudimg-amd64.tar.gz # * disk image (*.img,*.img.gz) # if file ends in .img, then it will be uploaded and registered as a to # glance as a disk image. If it ends in .gz, it is uncompressed first. # example: -# http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-armel-disk1.img -# http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz +# https://cloud-images.ubuntu.com/releases/jammy/release/ubuntu-22.04-server-cloudimg-amd64.img +# https://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz # * OpenVZ image: # OpenVZ uses its own format of image, and does not support UEC style images -#IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image -#IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image +#IMAGE_URLS="https://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image -CIRROS_VERSION=${CIRROS_VERSION:-"0.5.2"} +CIRROS_VERSION=${CIRROS_VERSION:-"0.6.1"} CIRROS_ARCH=${CIRROS_ARCH:-$(uname -m)} # Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of @@ -692,11 +691,11 @@ if [[ "$DOWNLOAD_DEFAULT_IMAGES" == "True" ]]; then lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs} DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz} - IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; + IMAGE_URLS+="https://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; *) # otherwise, use the qcow image DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk} DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img} - IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; + IMAGE_URLS+="https://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; esac ;; vsphere) From 814e659e32a919ea68c29451753aa49c993ce5ed Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Mon, 22 May 2023 10:25:38 -0700 Subject: [PATCH 1682/1936] Default MYSQL_REDUCE_MEMORY=True We have lots of evidence that this is a net benefit, so enable it by default instead of everyone having to opt-in. Change-Id: I66fa1799ff5177c3667630a89e15c072a8bf975a --- .zuul.yaml | 3 --- stackrc | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 316e89ae32..9cad5d4084 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -696,9 +696,6 @@ description: | Simple multinode test to verify multinode functionality on devstack side. This is not meant to be used as a parent job. - vars: - devstack_localrc: - MYSQL_REDUCE_MEMORY: true # NOTE(ianw) Platform tests have traditionally been non-voting because # we often have to rush things through devstack to stabilise the gate, diff --git a/stackrc b/stackrc index b7ce238366..672679e0b2 100644 --- a/stackrc +++ b/stackrc @@ -204,7 +204,7 @@ MYSQL_GATHER_PERFORMANCE=$(trueorfalse True MYSQL_GATHER_PERFORMANCE) # This can be used to reduce the amount of memory mysqld uses while running. # These are unscientifically determined, and could reduce performance or # cause other issues. -MYSQL_REDUCE_MEMORY=$(trueorfalse False MYSQL_REDUCE_MEMORY) +MYSQL_REDUCE_MEMORY=$(trueorfalse True MYSQL_REDUCE_MEMORY) # Set a timeout for git operations. If git is still running when the # timeout expires, the command will be retried up to 3 times. This is From a37b6abc8ecab1a32593aecdf8f74d54f3c4adb1 Mon Sep 17 00:00:00 2001 From: Martin Kopec Date: Fri, 26 May 2023 13:46:42 +0200 Subject: [PATCH 1683/1936] Resolve distutils deprecation warning The distutils package is deprecated and slated for removal in Python 3.12. Let's use shutil.which which is also recomended by PEP 632: https://peps.python.org/pep-0632/#migration-advice Closes-Bug: #2009229 Change-Id: Ibb2a9731449e765c4a56952a9f02679e9618778b --- tools/worlddump.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tools/worlddump.py b/tools/worlddump.py index e2921737db..aadd33b634 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -19,7 +19,6 @@ import argparse import datetime -from distutils import spawn import fnmatch import io import os @@ -76,7 +75,7 @@ def _dump_cmd(cmd): def _find_cmd(cmd): - if not spawn.find_executable(cmd): + if not shutil.which(cmd): print("*** %s not found: skipping" % cmd) return False return True From b2ad00cb66bd38ec6179d3bd1bf41556b966dc8c Mon Sep 17 00:00:00 2001 From: Alfredo Moralejo Date: Wed, 24 May 2023 21:03:28 +0200 Subject: [PATCH 1684/1936] Use RDO official CloudSIG mirrors for C9S deployments Instead of using RDO Trunk repo server, CentOS official mirrors provide a most reliable infrastructure and supports EMS which is required when enabling FIPS in C9S. In order to install the rdo-release rpm from repo.fedoraproject.org, which does not support EMS, I'm using a workaround to wget, which works with non-EMS servers because it uses gnutls instead of openssl, and install it locally with rpm. This is also consistent to CentOS 8 implementatioin. Closes-Bug: #2020661 Closes-Bug: #2020434 Change-Id: Icd99f467d47aaafaaf3ee8f2a3c4da08842cb672 --- stack.sh | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index be3c9dda11..1d32ed8156 100755 --- a/stack.sh +++ b/stack.sh @@ -311,7 +311,22 @@ function _install_rdo { sudo dnf -y install https://rdoproject.org/repos/openstack-${rdo_release}/rdo-release-${rdo_release}.el8.rpm fi elif [[ $DISTRO == "rhel9" ]]; then - sudo curl -L -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos9-master/delorean-deps.repo + install_package wget + # We need to download rdo-release package using wget as installing with dnf from repo.fedoraproject.org fails in + # FIPS enabled systems after https://bugzilla.redhat.com/show_bug.cgi?id=2157951 + # Until we can pull rdo-release from a server which supports EMS, this workaround is doing wget, which does + # not relies on openssl but on gnutls, and then install it locally using rpm + TEMPRDODIR=$(mktemp -d) + if [[ "$TARGET_BRANCH" == "master" ]]; then + # rdo-release.el9.rpm points to latest RDO release, use that for master + wget -P $TEMPRDODIR https://rdoproject.org/repos/rdo-release.el9.rpm + else + # For stable branches use corresponding release rpm + rdo_release=$(echo $TARGET_BRANCH | sed "s|stable/||g") + wget -P $TEMPRDODIR https://rdoproject.org/repos/openstack-${rdo_release}/rdo-release-${rdo_release}.el9.rpm + fi + sudo rpm -ivh $TEMPRDODIR/rdo-release*rpm + rm -rf $TEMPRDODIR fi sudo dnf -y update } From a13201646d7ca50d92c44b73ba3f20bbf0f3f1d3 Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Tue, 30 May 2023 13:31:05 -0400 Subject: [PATCH 1685/1936] Install systemd-coredump on Debian-based distros On Debian-based distros, the 'coredumpctl' command is provided by the systemd-coredump package, which is not installed by default. On failure, when "post" commands are executed this error is seen: controller | /bin/bash: line 1: coredumpctl: command not found Install it along with other libvirt packages to avoid the error. On Fedora distros it is in the systemd package, so the problem is not seen since it is always installed. Change-Id: I6012bd3240d68736a5db8ae49dc32098a086f320 --- lib/nova_plugins/functions-libvirt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 799230603c..ba2e98e304 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -69,7 +69,7 @@ function install_libvirt { $REQUIREMENTS_DIR/upper-constraints.txt -- libvirt-python if is_ubuntu; then - install_package qemu-system libvirt-clients libvirt-daemon-system libvirt-dev python3-libvirt + install_package qemu-system libvirt-clients libvirt-daemon-system libvirt-dev python3-libvirt systemd-coredump if is_arch "aarch64"; then install_package qemu-efi fi From fbc1865dc4e5b84ebafaf1d30cffc582ae3f0c0f Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Wed, 7 Jun 2023 15:19:37 +0200 Subject: [PATCH 1686/1936] Drop Fedora support Fedora 36 is EOL, also opendev is dropping support for Fedora images completely since interest in running jobs on that platform is no longer existing. CentOS 9 Stream has evolved as replacement platform for new features. Only drop the Zuul configuration and the tag in stack.sh for now plus update some docs. Cleanup of the deployment code will be done in a second step. Change-Id: Ica483fde27346e3939b5fc0d7e0a6dfeae0e8d1e --- .zuul.yaml | 33 --------------------------------- README.rst | 8 ++++---- doc/source/index.rst | 6 +++--- doc/source/overview.rst | 5 ++--- stack.sh | 4 ++-- 5 files changed, 11 insertions(+), 45 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 9cad5d4084..a7be67153b 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -78,16 +78,6 @@ nodes: - controller -- nodeset: - name: devstack-single-node-fedora-latest - nodes: - - name: controller - label: fedora-36 - groups: - - name: tempest - nodes: - - controller - - nodeset: name: devstack-single-node-debian-bullseye nodes: @@ -854,23 +844,6 @@ devstack_services: tls-proxy: false -- job: - name: devstack-platform-fedora-latest - parent: tempest-full-py3 - description: Fedora latest platform test - nodeset: devstack-single-node-fedora-latest - voting: false - -- job: - name: devstack-platform-fedora-latest-virt-preview - parent: tempest-full-py3 - description: Fedora latest platform test using the virt-preview repo. - nodeset: devstack-single-node-fedora-latest - voting: false - vars: - devstack_localrc: - ENABLE_FEDORA_VIRT_PREVIEW_REPO: true - - job: name: devstack-tox-base parent: devstack @@ -944,7 +917,6 @@ - devstack - devstack-ipv6 - devstack-enforce-scope - - devstack-platform-fedora-latest - devstack-platform-centos-9-stream - devstack-platform-debian-bullseye - devstack-platform-rocky-blue-onyx @@ -1048,10 +1020,6 @@ # Next cycle we can remove this if everything run out stable enough. # * nova-multi-cell: maintained by nova and currently non-voting in the # check queue for nova changes but relies on devstack configuration - # * devstack-platform-fedora-latest-virt-preview: Maintained by lyarwood - # for Nova to allow early testing of the latest versions of Libvirt and - # QEMU. Should only graduate out of experimental if it ever moves into - # the check queue for Nova. experimental: jobs: @@ -1080,7 +1048,6 @@ irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - - devstack-platform-fedora-latest-virt-preview - devstack-no-tls-proxy periodic: jobs: diff --git a/README.rst b/README.rst index f3a585a926..86b85da956 100644 --- a/README.rst +++ b/README.rst @@ -4,7 +4,7 @@ from git source trees. Goals ===== -* To quickly build dev OpenStack environments in a clean Ubuntu or Fedora +* To quickly build dev OpenStack environments in a clean Ubuntu or RockyLinux environment * To describe working configurations of OpenStack (which code branches work together? what do config files look like for those branches?) @@ -28,9 +28,9 @@ Versions The DevStack master branch generally points to trunk versions of OpenStack components. For older, stable versions, look for branches named stable/[release] in the DevStack repo. For example, you can do the -following to create a Pike OpenStack cloud:: +following to create a Zed OpenStack cloud:: - git checkout stable/pike + git checkout stable/zed ./stack.sh You can also pick specific OpenStack project releases by setting the appropriate @@ -55,7 +55,7 @@ When the script finishes executing, you should be able to access OpenStack endpoints, like so: * Horizon: http://myhost/ -* Keystone: http://myhost/identity/v2.0/ +* Keystone: http://myhost/identity/v3/ We also provide an environment file that you can use to interact with your cloud via CLI:: diff --git a/doc/source/index.rst b/doc/source/index.rst index ccd0fef330..a5a11e251b 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -37,8 +37,8 @@ Install Linux ------------- Start with a clean and minimal install of a Linux system. DevStack -attempts to support the two latest LTS releases of Ubuntu, the -latest/current Fedora version, CentOS/RHEL/Rocky Linux 9 and openEuler. +attempts to support the two latest LTS releases of Ubuntu, +Rocky Linux 9 and openEuler. If you do not have a preference, Ubuntu 22.04 (Jammy) is the most tested, and will probably go the smoothest. @@ -113,7 +113,7 @@ Start the install $ ./stack.sh -This will take a 15 - 20 minutes, largely depending on the speed of +This will take 15 - 30 minutes, largely depending on the speed of your internet connection. Many git trees and packages will be installed during this process. diff --git a/doc/source/overview.rst b/doc/source/overview.rst index a609333289..4384081769 100644 --- a/doc/source/overview.rst +++ b/doc/source/overview.rst @@ -23,13 +23,12 @@ strategy to include the latest Ubuntu release and the latest RHEL release.* - Ubuntu: current LTS release plus current development release -- Fedora: current release plus previous release -- RHEL/CentOS: current major release +- RHEL/CentOS/RockyLinux: current major release - Other OS platforms may continue to be included but the maintenance of those platforms shall not be assumed simply due to their presence. Having a listed point-of-contact for each additional OS will greatly increase its chance of being well-maintained. -- Patches for Ubuntu and/or Fedora will not be held up due to +- Patches for Ubuntu and/or RockyLinux will not be held up due to side-effects on other OS platforms. Databases diff --git a/stack.sh b/stack.sh index 1d32ed8156..e9617eee78 100755 --- a/stack.sh +++ b/stack.sh @@ -12,7 +12,7 @@ # a multi-node developer install. # To keep this script simple we assume you are running on a recent **Ubuntu** -# (Bionic or newer), **Fedora** (F36 or newer), or **CentOS/RHEL** +# (Bionic or newer) or **CentOS/RHEL/RockyLinux** # (7 or newer) machine. (It may work on other platforms but support for those # platforms is left to those who added them to DevStack.) It should work in # a VM or physical server. Additionally, we maintain a list of ``deb`` and @@ -229,7 +229,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="bullseye|focal|jammy|f36|rhel8|rhel9|openEuler-22.03" +SUPPORTED_DISTROS="bullseye|focal|jammy|rhel8|rhel9|openEuler-22.03" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" From f1c5442becad6fcdfb16676e8bc99835d4a75b22 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Sat, 10 Jun 2023 03:07:59 +0000 Subject: [PATCH 1687/1936] Updated from generate-devstack-plugins-list Change-Id: Icc3aa69d7bbfa217676402682454cd4b37fb6c29 --- doc/source/plugin-registry.rst | 4 ---- 1 file changed, 4 deletions(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index b244ca5dd8..ec502ea252 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -69,10 +69,6 @@ openstack/networking-bagpipe `https://opendev.org/openstack/networki openstack/networking-baremetal `https://opendev.org/openstack/networking-baremetal `__ openstack/networking-bgpvpn `https://opendev.org/openstack/networking-bgpvpn `__ openstack/networking-generic-switch `https://opendev.org/openstack/networking-generic-switch `__ -openstack/networking-hyperv `https://opendev.org/openstack/networking-hyperv `__ -openstack/networking-odl `https://opendev.org/openstack/networking-odl `__ -openstack/networking-powervm `https://opendev.org/openstack/networking-powervm `__ -openstack/networking-sfc `https://opendev.org/openstack/networking-sfc `__ openstack/neutron `https://opendev.org/openstack/neutron `__ openstack/neutron-dynamic-routing `https://opendev.org/openstack/neutron-dynamic-routing `__ openstack/neutron-fwaas `https://opendev.org/openstack/neutron-fwaas `__ From 39228451b6542ff63f288affbda13897089eb16d Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Thu, 15 Jun 2023 10:46:51 +0200 Subject: [PATCH 1688/1936] Bump default cirros version to 0.6.2 Cirros has made a new release, including a newer kernel that should fix some issues when using nested virtualization. Related-Bug: 2023559 Change-Id: I63469371b13801094a3ee1baae6e343999fbefa5 --- doc/source/guides/nova.rst | 2 +- stackrc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/guides/nova.rst b/doc/source/guides/nova.rst index d0fb274c13..705d427e68 100644 --- a/doc/source/guides/nova.rst +++ b/doc/source/guides/nova.rst @@ -122,7 +122,7 @@ when creating the server, for example: .. code-block:: shell $ openstack --os-compute-api-version 2.37 server create --flavor cirros256 \ - --image cirros-0.6.1-x86_64-disk --nic none --wait test-server + --image cirros-0.6.2-x86_64-disk --nic none --wait test-server .. note:: ``--os-compute-api-version`` greater than or equal to 2.37 is required to use ``--nic=none``. diff --git a/stackrc b/stackrc index 8820c621e5..7160d0a390 100644 --- a/stackrc +++ b/stackrc @@ -674,7 +674,7 @@ esac #IMAGE_URLS="https://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image -CIRROS_VERSION=${CIRROS_VERSION:-"0.6.1"} +CIRROS_VERSION=${CIRROS_VERSION:-"0.6.2"} CIRROS_ARCH=${CIRROS_ARCH:-$(uname -m)} # Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of From 7288df34f8513caf6f3985c75855feb572f6b004 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Fri, 16 Jun 2023 14:25:33 +0530 Subject: [PATCH 1689/1936] Add 10 second buffer for uwsgi service stop Default for systemd TimeoutStopSec is 90 seconds and that is same for default graceful shutdown of uwsgi service(WORKER_TIMEOUT). Due to the Related-Bug graceful stop attempt fails and there is no room for force shutdown. This patch reduces default for WORKER_TIMEOUT by 10 seconds so there is a buffer to force stop the service. Closes-Bug: #2020643 Related-Bug: #2015065 Change-Id: I6aacac94f9697088338b3d2f99d8eaa22c2be67b --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 8820c621e5..7465f54112 100644 --- a/stackrc +++ b/stackrc @@ -804,7 +804,7 @@ NOVA_READY_TIMEOUT=${NOVA_READY_TIMEOUT:-$SERVICE_TIMEOUT} SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT=${SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT:-5} # Service graceful shutdown timeout -WORKER_TIMEOUT=${WORKER_TIMEOUT:-90} +WORKER_TIMEOUT=${WORKER_TIMEOUT:-80} # Common Configuration # -------------------- From ad029c0e8b66d81889c80d4a68b4654dd169fecf Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Mon, 26 Jun 2023 10:57:49 +0100 Subject: [PATCH 1690/1936] The AZ filter is deprecated and planned for removal this cycle To facilitate that this change removes it form the default filter list. By default nova has used placement for AZs so this filter has not been requried since xena. Change-Id: Ie5e216dd8c2a7ecf43cc6954ec4f73d4d67b5b3b --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 21067f302b..abf4eee465 100644 --- a/lib/nova +++ b/lib/nova @@ -115,7 +115,7 @@ FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"False"} # The following NOVA_FILTERS contains SameHostFilter and DifferentHostFilter with # the default filters. -NOVA_FILTERS="AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter" +NOVA_FILTERS="ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter" QEMU_CONF=/etc/libvirt/qemu.conf From 4a1b2808af68ab50a15a9c16bfe217fac50bf309 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Mon, 26 Jun 2023 12:23:58 +0200 Subject: [PATCH 1691/1936] Remove unused file This was forgotten in [0] [0] I20501fec140998b91c9ddfd84b7b10168624430a Change-Id: Iacd86e3953f573a0fc38dc4898aafefccb3a9a79 --- files/dnsmasq-for-baremetal-from-nova-network.conf | 3 --- 1 file changed, 3 deletions(-) delete mode 100644 files/dnsmasq-for-baremetal-from-nova-network.conf diff --git a/files/dnsmasq-for-baremetal-from-nova-network.conf b/files/dnsmasq-for-baremetal-from-nova-network.conf deleted file mode 100644 index 66a375190e..0000000000 --- a/files/dnsmasq-for-baremetal-from-nova-network.conf +++ /dev/null @@ -1,3 +0,0 @@ -enable-tftp -tftp-root=/tftpboot -dhcp-boot=pxelinux.0 From 27568ea33460b9ea4635a7d0a0bb06d32654150b Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Tue, 27 Jun 2023 02:19:53 +0000 Subject: [PATCH 1692/1936] Updated from generate-devstack-plugins-list Change-Id: I6fd6a718ce39d849342b30970ca39477ce285374 --- doc/source/plugin-registry.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index ec502ea252..f54fca92e6 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -69,6 +69,9 @@ openstack/networking-bagpipe `https://opendev.org/openstack/networki openstack/networking-baremetal `https://opendev.org/openstack/networking-baremetal `__ openstack/networking-bgpvpn `https://opendev.org/openstack/networking-bgpvpn `__ openstack/networking-generic-switch `https://opendev.org/openstack/networking-generic-switch `__ +openstack/networking-hyperv `https://opendev.org/openstack/networking-hyperv `__ +openstack/networking-powervm `https://opendev.org/openstack/networking-powervm `__ +openstack/networking-sfc `https://opendev.org/openstack/networking-sfc `__ openstack/neutron `https://opendev.org/openstack/neutron `__ openstack/neutron-dynamic-routing `https://opendev.org/openstack/neutron-dynamic-routing `__ openstack/neutron-fwaas `https://opendev.org/openstack/neutron-fwaas `__ From e32715b2515fdae523a3d113a881f0a57fff9410 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Thu, 22 Jun 2023 21:10:31 -0500 Subject: [PATCH 1693/1936] Set two different image in tempest irespective of DEFAULT_IMAGE_NAME In current logic to set two different image in Tempest in config option image_ref and image_ref_alt, we consider if DEFAULT_IMAGE_NAME is found in glance then set the same image in tempest for those two config option. This means even we have two different image available in glance, still we set same image in image_ref as well as image_ref_alt and all the rebuild tests are rebuilt on the same image. I could not find any reason why we set same image if DEFAULT_IMAGE_NAME exist, below are the original change added this logic - https://review.opendev.org/c/openstack/devstack/+/17553 We had a requirement of test to run on two different images - https://review.opendev.org/c/openstack/tempest/+/831018 and for that we need to set DEFAULT_IMAGE_NAME to non exist image name but that broke the Ironic which was reply on the valid name in DEFAULT_IMAGE_NAME - https://review.opendev.org/c/openstack/ironic/+/886790 As we do not have any reason not to set two different image if DEFAULT_IMAGE_NAME is set, I am removing the condition of DEFAULT_IMAGE_NAME from lib/tempest logic and always set two different images if they are available. Depends-On: https://review.opendev.org/c/openstack/tempest/+/886796 Change-Id: I9d215f48d4440f2fa6dcc0d222a10896caf01215 --- lib/tempest | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/lib/tempest b/lib/tempest index 9fa989a2f6..4f72a6d174 100644 --- a/lib/tempest +++ b/lib/tempest @@ -149,11 +149,10 @@ function set_tempest_venv_constraints { # ramdisk and kernel images. Takes 3 arguments, an array and two # variables. The array will contain the list of active image UUIDs; # if an image with ``DEFAULT_IMAGE_NAME`` is found, its UUID will be -# set as the value of *both* other parameters. +# set as the value img_id ($2) parameters. function get_active_images { declare -n img_array=$1 declare -n img_id=$2 - declare -n img_id_alt=$3 # start with a fresh array in case we are called multiple times img_array=() @@ -161,7 +160,6 @@ function get_active_images { while read -r IMAGE_NAME IMAGE_UUID; do if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then img_id="$IMAGE_UUID" - img_id_alt="$IMAGE_UUID" fi img_array+=($IMAGE_UUID) done < <(openstack --os-cloud devstack-admin image list --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }') @@ -170,13 +168,12 @@ function get_active_images { function poll_glance_images { declare -n image_array=$1 declare -n image_id=$2 - declare -n image_id_alt=$3 local -i poll_count poll_count=$TEMPEST_GLANCE_IMPORT_POLL_LIMIT while (( poll_count-- > 0 )) ; do sleep $TEMPEST_GLANCE_IMPORT_POLL_INTERVAL - get_active_images image_array image_id image_id_alt + get_active_images image_array image_id if (( ${#image_array[*]} >= $TEMPEST_GLANCE_IMAGE_COUNT )) ; then return fi @@ -228,7 +225,7 @@ function configure_tempest { declare -a images if is_service_enabled glance; then - get_active_images images image_uuid image_uuid_alt + get_active_images images image_uuid if (( ${#images[*]} < $TEMPEST_GLANCE_IMAGE_COUNT )); then # Glance image import is asynchronous and may be configured @@ -236,7 +233,7 @@ function configure_tempest { # it's possible that this code is being executed before the # import has completed and there may be no active images yet. if [[ "$GLANCE_USE_IMPORT_WORKFLOW" == "True" ]]; then - poll_glance_images images image_uuid image_uuid_alt + poll_glance_images images image_uuid if (( ${#images[*]} < $TEMPEST_GLANCE_IMAGE_COUNT )); then echo "Only found ${#images[*]} image(s), was looking for $TEMPEST_GLANCE_IMAGE_COUNT" exit 1 @@ -258,7 +255,15 @@ function configure_tempest { *) if [ -z "$image_uuid" ]; then image_uuid=${images[0]} - image_uuid_alt=${images[1]} + if [ -z "$image_uuid_alt" ]; then + image_uuid_alt=${images[1]} + fi + elif [ -z "$image_uuid_alt" ]; then + for image in $images; do + if [[ "$image" != "$image_uuid" ]]; then + image_uuid_alt=$image + fi + done fi ;; esac From 58c80b2424623096e4a1f7a901f424be0ce6cb3f Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Tue, 27 Jun 2023 12:16:32 -0700 Subject: [PATCH 1694/1936] nova: Bump timeout-per-gb for BFV rebuild ops This increases the timeout we use to wait for cinder to perform a volume reimage. Since devstack is often running on a single machine with non-production IO performance, we should bump this limit to avoid hitting it before the rebuild completes. Change-Id: Ie2663b951acb0c1a65597a39e032948764e6ae6a --- lib/nova | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/nova b/lib/nova index 21067f302b..e60b71c36d 100644 --- a/lib/nova +++ b/lib/nova @@ -1036,6 +1036,10 @@ function start_nova_compute { # by the compute process. configure_console_compute + # Set rebuild timeout longer for BFV instances because we likely have + # slower disk than expected. Default is 20s/GB + iniset $NOVA_CPU_CONF DEFAULT reimage_timeout_per_gb 60 + # Configure the OVSDB connection for os-vif if [ -n "$OVSDB_SERVER_LOCAL_HOST" ]; then iniset $NOVA_CPU_CONF os_vif_ovs ovsdb_connection "tcp:$OVSDB_SERVER_LOCAL_HOST:6640" From 931b45defd07991890707b434638166800ec948a Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Wed, 19 Jul 2023 12:15:52 +0530 Subject: [PATCH 1695/1936] Handle more than 1 image while configuring tempest [1] caused a regression causing failures when more than 1 images are setup. Fixing it by correctly using the array variable. Also add a break in the for loop once if condition is met. [1] https://review.opendev.org/c/openstack/devstack/+/886795 Closes-Bug: #2028123 Change-Id: I4f13c1239312bbcca8c65e875d65d03702161c18 --- lib/tempest | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 4f72a6d174..4ba101f218 100644 --- a/lib/tempest +++ b/lib/tempest @@ -259,9 +259,10 @@ function configure_tempest { image_uuid_alt=${images[1]} fi elif [ -z "$image_uuid_alt" ]; then - for image in $images; do + for image in ${images[@]}; do if [[ "$image" != "$image_uuid" ]]; then image_uuid_alt=$image + break fi done fi From e261bd809e81c01c153cdcdb50be47ed3c89c46a Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Wed, 19 Jul 2023 16:04:12 -0400 Subject: [PATCH 1696/1936] Always set image_uuid_alt in configure_tempest() When there is only a single image, configure_tempest() needs to always set image_uuid_alt the same as image_uuid, else it will fail trying to determine the size of the flavor to use for it later in the function. Introduced by [0], and subsequent change did not fix it. [0] https://review.opendev.org/c/openstack/devstack/+/886795 Change-Id: Ibfe99ff732570dbd415772c5625f43e35b68c871 Related-bug: #2028123 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 4ba101f218..2f62f6ea62 100644 --- a/lib/tempest +++ b/lib/tempest @@ -249,8 +249,8 @@ function configure_tempest { 1) if [ -z "$image_uuid" ]; then image_uuid=${images[0]} - image_uuid_alt=${images[0]} fi + image_uuid_alt=$image_uuid ;; *) if [ -z "$image_uuid" ]; then From 770352beb05f63fb9192ad488b6b8344fd57c985 Mon Sep 17 00:00:00 2001 From: jskunda Date: Tue, 18 Jul 2023 09:32:05 +0200 Subject: [PATCH 1697/1936] git: git checkout for a commit hash combinated with depth argument This patch: https://review.opendev.org/c/openstack/devstack/+/882299 provides functionality, that commit hash can be passed as last arugment, however when GIT_DEPTH is set, it fails, as in: timeout -s SIGINT 0 git clone https://github.com/ovn-org/ovn.git ./ovn --depth 1 --branch 03b95a4566a15f7544f4cdf35629dacede4dcf55 fatal: Remote branch 03b95a4566a15f7544f4cdf35629dacede4dcf55 not found in upstream origin Closes-Bug: #2023020 Change-Id: I748354964a133e028e12458cc9014d6d014cbdb9 --- functions-common | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/functions-common b/functions-common index 63144d6ed7..a668d55b8d 100644 --- a/functions-common +++ b/functions-common @@ -609,9 +609,10 @@ function git_clone { echo "the project to the \$PROJECTS variable in the job definition." die $LINENO "ERROR_ON_CLONE is set to True so cloning not allowed in this configuration" fi - git_timed clone $git_clone_flags $git_remote $git_dest + git_timed clone --no-checkout $git_clone_flags $git_remote $git_dest cd $git_dest - git checkout $git_ref + git_timed fetch $git_clone_flags origin $git_ref + git_timed checkout FETCH_HEAD elif [[ "$RECLONE" = "True" ]]; then # if it does exist then simulate what clone does if asked to RECLONE cd $git_dest From d115bfd72a61f23bba0eb5d2d82c2ad94eac15e2 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Mon, 31 Jul 2023 07:04:34 -0700 Subject: [PATCH 1698/1936] Reduce the flush frequency of dbcounter plugin This relaxes the limits for dbcounter to make it flush stats to the database less often. Currently every thirty seconds or 100 hits, we write a stats line to the database. In some services (like keystone) this can trigger more than one write per second because of the massive number of SELECT calls that service makes. This removes the hit limit and decreases the mandatory flush interval to once a minute. Hopefully this will manifest as lower load on the database triggered by what would be readonly operations. Change-Id: I43a58532c0541075a2d36408abc50a41f7994bda --- tools/dbcounter/dbcounter.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/tools/dbcounter/dbcounter.py b/tools/dbcounter/dbcounter.py index 0ed7bb813a..86e5529c97 100644 --- a/tools/dbcounter/dbcounter.py +++ b/tools/dbcounter/dbcounter.py @@ -96,20 +96,18 @@ def stat_writer(self): This reads "hists" from from a queue fed by _log_event() and writes (db,op)+=count stats to the database after ten seconds of no activity to avoid triggering a write for every SELECT - call. Write no less often than every thirty seconds and/or 100 - pending hits to avoid being starved by constant activity. + call. Write no less often than every sixty seconds to avoid being + starved by constant activity. """ LOG.debug('[%i] Writer thread running' % os.getpid()) while True: to_write = {} - total = 0 last = time.time() - while time.time() - last < 30 and total < 100: + while time.time() - last < 60: try: item = self.queue.get(timeout=10) to_write.setdefault(item, 0) to_write[item] += 1 - total += 1 except queue.Empty: break From 7a2021dfa01368a69e1e43785419ac68b62a9b5f Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Wed, 7 Jun 2023 15:30:02 +0200 Subject: [PATCH 1699/1936] Add rocky job to gate It was made voting some time ago, but we missed also running it in gate. With that RHEL platform test in place, we can keep c9s permanently non-voting, which is better suited to match its instability. Change-Id: I6712ac6dc64e4fe2203b2a5f6a381f6d2150ba0f --- .zuul.yaml | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index a7be67153b..948a9af8ca 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -697,8 +697,6 @@ description: CentOS 9 Stream platform test nodeset: devstack-single-node-centos-9-stream timeout: 9000 - # TODO(kopecmartin) n-v until the following is resolved: - # https://bugs.launchpad.net/neutron/+bug/1979047 voting: false - job: @@ -968,11 +966,9 @@ jobs: - devstack - devstack-ipv6 - # TODO(kopecmartin) n-v until the following is resolved: - # https://bugs.launchpad.net/neutron/+bug/1979047 - # - devstack-platform-centos-9-stream - devstack-platform-debian-bullseye - devstack-platform-ubuntu-focal + - devstack-platform-rocky-blue-onyx - devstack-enforce-scope - devstack-multinode - devstack-unit-tests From a40f9cb91fbedddec89f0ffd6c7dd4b3828a232e Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Wed, 4 Apr 2018 14:02:30 -0700 Subject: [PATCH 1700/1936] Add option to install everything in global venvs Since we are python3 only for openstack we create a single python3 virtualenv to install all the packages into. This gives us the benefits of installing into a virtualenv while still ensuring coinstallability. This is a major change and will likely break many things. There are several reasons for this. The change that started this effort was pip stopped uninstalling packages which used distutils to generate their package installation. Many distro packages do this which meant that pip installed packages and distro packages could not coexist in the global install space. More recently git has made pip installing repos as root more difficult due to file ownership concerns. Currently the switch to the global venv is optional, but if we go down this path we should very quickly remove the old global installation method as it has only caused us problems. Major hurdles we have to get over are convincing rootwrap to trust binaries in the virtualenvs (so you'll notice we update rootwrap configs). Some distros still have issues, keep them using the old setup for now. Depends-On: https://review.opendev.org/c/openstack/grenade/+/880266 Co-Authored-By: Dr. Jens Harbott Change-Id: If9bc7ba45522189d03f19b86cb681bb150ee2f25 --- .zuul.yaml | 8 ++++++++ files/apache-horizon.template | 1 + functions-common | 5 +++++ inc/python | 34 +++++++++++++++++++++++++++++++--- inc/rootwrap | 5 +++++ lib/glance | 3 +++ lib/horizon | 6 ++++++ lib/tls | 7 +++++-- stack.sh | 12 ++++++++++++ stackrc | 8 ++++++++ tools/install_prereqs.sh | 2 ++ tools/memory_tracker.sh | 7 ++++++- 12 files changed, 92 insertions(+), 6 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index a7be67153b..803db3a3fa 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -700,6 +700,9 @@ # TODO(kopecmartin) n-v until the following is resolved: # https://bugs.launchpad.net/neutron/+bug/1979047 voting: false + vars: + devstack_localrc: + GLOBAL_VENV: false - job: name: devstack-platform-debian-bullseye @@ -709,6 +712,9 @@ timeout: 9000 vars: configure_swap_size: 4096 + devstack_localrc: + # TODO(frickler): drop this once wheel build is fixed + MYSQL_GATHER_PERFORMANCE: false - job: name: devstack-platform-rocky-blue-onyx @@ -718,6 +724,8 @@ timeout: 9000 vars: configure_swap_size: 4096 + devstack_localrc: + GLOBAL_VENV: false - job: name: devstack-platform-ubuntu-focal diff --git a/files/apache-horizon.template b/files/apache-horizon.template index efcfc0360b..da7a7d26c3 100644 --- a/files/apache-horizon.template +++ b/files/apache-horizon.template @@ -39,4 +39,5 @@ CustomLog /var/log/%APACHE_NAME%/horizon_access.log combined +%WSGIPYTHONHOME% WSGISocketPrefix /var/run/%APACHE_NAME% diff --git a/functions-common b/functions-common index 5e1aa34279..f752271976 100644 --- a/functions-common +++ b/functions-common @@ -1522,6 +1522,7 @@ function write_user_unit_file { mkdir -p $SYSTEMD_DIR iniset -sudo $unitfile "Unit" "Description" "Devstack $service" + iniset -sudo $unitfile "Service" "Environment" "\"PATH=$PATH\"" iniset -sudo $unitfile "Service" "User" "$user" iniset -sudo $unitfile "Service" "ExecStart" "$command" iniset -sudo $unitfile "Service" "KillMode" "process" @@ -1549,6 +1550,7 @@ function write_uwsgi_user_unit_file { mkdir -p $SYSTEMD_DIR iniset -sudo $unitfile "Unit" "Description" "Devstack $service" + iniset -sudo $unitfile "Service" "Environment" "\"PATH=$PATH\"" iniset -sudo $unitfile "Service" "SyslogIdentifier" "$service" iniset -sudo $unitfile "Service" "User" "$user" iniset -sudo $unitfile "Service" "ExecStart" "$command" @@ -1614,6 +1616,9 @@ function _run_under_systemd { fi local env_vars="$5" if [[ "$command" =~ "uwsgi" ]] ; then + if [[ "$GLOBAL_VENV" == "True" ]] ; then + cmd="$cmd --venv $DEVSTACK_VENV" + fi write_uwsgi_user_unit_file $systemd_service "$cmd" "$group" "$user" "$env_vars" else write_user_unit_file $systemd_service "$cmd" "$group" "$user" "$env_vars" diff --git a/inc/python b/inc/python index a24f4e910a..cc6e01fede 100644 --- a/inc/python +++ b/inc/python @@ -32,6 +32,23 @@ function join_extras { # Python Functions # ================ +# Setup the global devstack virtualenvs and the associated environment +# updates. +function setup_devstack_virtualenv { + # We run devstack out of a global virtualenv. + if [[ ! -d $DEVSTACK_VENV ]] ; then + # Using system site packages to enable nova to use libguestfs. + # This package is currently installed via the distro and not + # available on pypi. + python$PYTHON3_VERSION -m venv --system-site-packages $DEVSTACK_VENV + pip_install -U pip + fi + if [[ ":$PATH:" != *":$DEVSTACK_VENV/bin:"* ]] ; then + export PATH="$DEVSTACK_VENV/bin:$PATH" + export PYTHON="$DEVSTACK_VENV/bin/python3" + fi +} + # Get the path to the pip command. # get_pip_command function get_pip_command { @@ -60,8 +77,11 @@ function get_python_exec_prefix { fi $xtrace - local PYTHON_PATH=/usr/local/bin - echo $PYTHON_PATH + if [[ "$GLOBAL_VENV" == "True" ]] ; then + echo "$DEVSTACK_VENV/bin" + else + echo "/usr/local/bin" + fi } # Wrapper for ``pip install`` that only installs versions of libraries @@ -166,6 +186,14 @@ function pip_install { if [[ -n ${PIP_VIRTUAL_ENV:=} && -d ${PIP_VIRTUAL_ENV} ]]; then local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip local sudo_pip="env" + elif [[ "${GLOBAL_VENV}" == "True" && -d ${DEVSTACK_VENV} ]] ; then + # We have to check that the DEVSTACK_VENV exists because early + # devstack boostrapping needs to operate in a system context + # too bootstrap pip. Once pip is bootstrapped we create the + # global venv and can start to use it. + local cmd_pip=$DEVSTACK_VENV/bin/pip + local sudo_pip="env" + echo "Using python $PYTHON3_VERSION to install $package_dir" else local cmd_pip="python$PYTHON3_VERSION -m pip" # See @@ -439,7 +467,7 @@ function setup_package { pip_install $flags "$project_dir$extras" # ensure that further actions can do things like setup.py sdist - if [[ "$flags" == "-e" ]]; then + if [[ "$flags" == "-e" && "$GLOBAL_VENV" == "False" ]]; then safe_chown -R $STACK_USER $1/*.egg-info fi } diff --git a/inc/rootwrap b/inc/rootwrap index 2a6e4b648f..4c65440a4e 100644 --- a/inc/rootwrap +++ b/inc/rootwrap @@ -60,6 +60,11 @@ function configure_rootwrap { sudo install -o root -g root -m 644 $rootwrap_conf_src_dir/rootwrap.conf /etc/${project}/rootwrap.conf sudo sed -e "s:^filters_path=.*$:filters_path=/etc/${project}/rootwrap.d:" -i /etc/${project}/rootwrap.conf + # Rely on $PATH set by devstack to determine what is safe to execute + # by rootwrap rather than use explicit whitelist of paths in + # rootwrap.conf + sudo sed -e 's/^exec_dirs=.*/#&/' -i /etc/${project}/rootwrap.conf + # Set up the rootwrap sudoers local tempfile tempfile=$(mktemp) diff --git a/lib/glance b/lib/glance index 430d94d3a4..e64f00027e 100644 --- a/lib/glance +++ b/lib/glance @@ -47,6 +47,9 @@ USE_CINDER_FOR_GLANCE=$(trueorfalse False USE_CINDER_FOR_GLANCE) # from CINDER_ENABLED_BACKENDS GLANCE_CINDER_DEFAULT_BACKEND=${GLANCE_CINDER_DEFAULT_BACKEND:-lvmdriver-1} GLANCE_STORE_ROOTWRAP_BASE_DIR=/usr/local/etc/glance +if [[ "$GLOBAL_VENV" == "True" ]] ; then + GLANCE_STORE_ROOTWRAP_BASE_DIR=${DEVSTACK_VENV}/etc/glance +fi # When Cinder is used as a glance store, you can optionally configure cinder to # optimize bootable volume creation by allowing volumes to be cloned directly # in the backend instead of transferring data via Glance. To use this feature, diff --git a/lib/horizon b/lib/horizon index f76f9e557d..611329d619 100644 --- a/lib/horizon +++ b/lib/horizon @@ -115,6 +115,11 @@ function configure_horizon { local horizon_conf horizon_conf=$(apache_site_config_for horizon) + local wsgi_venv_config="" + if [[ "$GLOBAL_VENV" == "True" ]] ; then + wsgi_venv_config="WSGIPythonHome $DEVSTACK_VENV" + fi + # Configure apache to run horizon # Set up the django horizon application to serve via apache/wsgi sudo sh -c "sed -e \" @@ -124,6 +129,7 @@ function configure_horizon { s,%APACHE_NAME%,$APACHE_NAME,g; s,%DEST%,$DEST,g; s,%WEBROOT%,$HORIZON_APACHE_ROOT,g; + s,%WSGIPYTHONHOME%,$wsgi_venv_config,g; \" $FILES/apache-horizon.template >$horizon_conf" if is_ubuntu; then diff --git a/lib/tls b/lib/tls index a1e162d2e2..d35e9e2cee 100644 --- a/lib/tls +++ b/lib/tls @@ -364,8 +364,11 @@ function deploy_int_CA { function fix_system_ca_bundle_path { if is_service_enabled tls-proxy; then local capath - capath=$(python3 -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass') - + if [[ "$GLOBAL_VENV" == "True" ]] ; then + capath=$($DEVSTACK_VENV/bin/python3 -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass') + else + capath=$(python3 -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass') + fi if [[ ! $capath == "" && ! $capath =~ ^/etc/.* && ! -L $capath ]]; then if is_fedora; then sudo rm -f $capath diff --git a/stack.sh b/stack.sh index ad88eab9d5..c8f7c9d79e 100755 --- a/stack.sh +++ b/stack.sh @@ -1,5 +1,6 @@ #!/usr/bin/env bash + # ``stack.sh`` is an opinionated OpenStack developer installation. It # installs and configures various combinations of **Cinder**, **Glance**, # **Horizon**, **Keystone**, **Nova**, **Neutron**, and **Swift** @@ -824,6 +825,17 @@ fi source $TOP_DIR/tools/fixup_stuff.sh fixup_all +if [[ "$GLOBAL_VENV" == "True" ]] ; then + # TODO(frickler): find a better solution for this + sudo ln -sf /opt/stack/data/venv/bin/privsep-helper /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/cinder-rtstool /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/openstack /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/tox /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/nova-manage /usr/local/bin + + setup_devstack_virtualenv +fi + # Install subunit for the subunit output stream pip_install -U os-testr diff --git a/stackrc b/stackrc index dcc0ce45e0..0d1880cec9 100644 --- a/stackrc +++ b/stackrc @@ -183,6 +183,14 @@ IDENTITY_API_VERSION=3 # each services ${SERVICE}_ENFORCE_SCOPE variables ENFORCE_SCOPE=$(trueorfalse False ENFORCE_SCOPE) +# Devstack supports the use of a global virtualenv. These variables enable +# and disable this functionality as well as set the path to the virtualenv. +# Note that the DATA_DIR is selected because grenade testing uses a shared +# DATA_DIR but different DEST dirs and we don't want two sets of venvs, +# instead we want one global set. +GLOBAL_VENV=$(trueorfalse True GLOBAL_VENV) +DEVSTACK_VENV=${DEVSTACK_VENV:-$DATA_DIR/venv} + # Enable use of Python virtual environments. Individual project use of # venvs are controlled by the PROJECT_VENV array; every project with # an entry in the array will be installed into the named venv. diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh index f2d57c8451..bb470b2927 100755 --- a/tools/install_prereqs.sh +++ b/tools/install_prereqs.sh @@ -79,6 +79,8 @@ if [[ -n "$SYSLOG" && "$SYSLOG" != "False" ]]; then fi fi +# TODO(clarkb) remove these once we are switched to global venv by default +export PYTHON=$(which python${PYTHON3_VERSION} 2>/dev/null || which python3 2>/dev/null) # Mark end of run # --------------- diff --git a/tools/memory_tracker.sh b/tools/memory_tracker.sh index 6c36534f01..2f404c26fb 100755 --- a/tools/memory_tracker.sh +++ b/tools/memory_tracker.sh @@ -14,7 +14,12 @@ set -o errexit -PYTHON=${PYTHON:-python3} +# TODO(frickler): make this use stackrc variables +if [ -x /opt/stack/data/venv/bin/python ]; then + PYTHON=/opt/stack/data/venv/bin/python +else + PYTHON=${PYTHON:-python3} +fi # time to sleep between checks SLEEP_TIME=20 From 0b79f6f7690773701a37921f626782e528fa9c36 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Tue, 4 Jul 2023 07:18:01 +0200 Subject: [PATCH 1701/1936] Add debian-bookworm job Change-Id: Id5e54775e2be38a75db0bd1f55d1d3b5ae7ef71f --- .zuul.yaml | 24 ++++++++++++++++++++++++ lib/databases/mysql | 5 +++-- stack.sh | 2 +- 3 files changed, 28 insertions(+), 3 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 803db3a3fa..9cc95b607e 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -78,6 +78,16 @@ nodes: - controller +- nodeset: + name: devstack-single-node-debian-bookworm + nodes: + - name: controller + label: debian-bookworm + groups: + - name: tempest + nodes: + - controller + - nodeset: name: devstack-single-node-debian-bullseye nodes: @@ -704,6 +714,19 @@ devstack_localrc: GLOBAL_VENV: false +- job: + name: devstack-platform-debian-bookworm + parent: tempest-full-py3 + description: Debian Bookworm platform test + nodeset: devstack-single-node-debian-bookworm + timeout: 9000 + voting: false + vars: + configure_swap_size: 4096 + devstack_localrc: + # TODO(frickler): drop this once wheel build is fixed + MYSQL_GATHER_PERFORMANCE: false + - job: name: devstack-platform-debian-bullseye parent: tempest-full-py3 @@ -926,6 +949,7 @@ - devstack-ipv6 - devstack-enforce-scope - devstack-platform-centos-9-stream + - devstack-platform-debian-bookworm - devstack-platform-debian-bullseye - devstack-platform-rocky-blue-onyx - devstack-platform-ubuntu-focal diff --git a/lib/databases/mysql b/lib/databases/mysql index 27d1ec600f..e069e128e9 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -20,7 +20,7 @@ if [[ -z "$MYSQL_SERVICE_NAME" ]]; then MYSQL_SERVICE_NAME=mysql if is_fedora && ! is_oraclelinux; then MYSQL_SERVICE_NAME=mariadb - elif [[ "$DISTRO" == "bullseye" ]]; then + elif [[ "$DISTRO" =~ bookworm|bullseye ]]; then MYSQL_SERVICE_NAME=mariadb fi fi @@ -122,7 +122,8 @@ function configure_database_mysql { # In mariadb e.g. on Ubuntu socket plugin is used for authentication # as root so it works only as sudo. To restore old "mysql like" behaviour, # we need to change auth plugin for root user - if is_ubuntu && [[ "$DISTRO" != "bullseye" ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then + # TODO(frickler): simplify this logic + if is_ubuntu && [[ ! "$DISTRO" =~ bookworm|bullseye ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then if [[ "$DISTRO" == "jammy" ]]; then # For Ubuntu 22.04 (jammy) we follow the model outlined in # https://mariadb.org/authentication-in-mariadb-10-4/ diff --git a/stack.sh b/stack.sh index c8f7c9d79e..0434001b7a 100755 --- a/stack.sh +++ b/stack.sh @@ -230,7 +230,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="bullseye|focal|jammy|rhel8|rhel9|openEuler-22.03" +SUPPORTED_DISTROS="bookworm|bullseye|focal|jammy|rhel8|rhel9|openEuler-22.03" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" From 113689ee4694de20c019735fdace447225aa18f7 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Wed, 2 Aug 2023 12:58:45 +0530 Subject: [PATCH 1702/1936] Woraround systemd issue on CentOS 9-stream systemd-252-16.el9 introduced a regression where libvirtd process exits after 120s of inactivity. Add a workaround to unset 120s timeout for libvirtd, the workaround can be removed once the fix is available in systemd rpm. Related-Bug: #2029335 Change-Id: Id6db6c17518b54d5fef7c381c509066a569aff6d --- tools/fixup_stuff.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index faea44f1e0..80a83bb128 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -94,6 +94,11 @@ EOF if [[ $os_VENDOR == "CentOSStream" && $os_RELEASE -eq 8 ]]; then sudo sysctl -w net.ipv4.ping_group_range='0 2147483647' fi + # TODO(ykarel): Workaround for systemd issue, remove once fix is + # included in systemd rpm https://bugs.launchpad.net/devstack/+bug/2029335 + if [[ $os_VENDOR == "CentOSStream" && $os_RELEASE -eq 9 ]]; then + echo 'LIBVIRTD_ARGS=""' | sudo tee /etc/sysconfig/libvirtd + fi } function fixup_ovn_centos { From 3832ff52b4445324b58a5da123ef4e3880df1591 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 3 Aug 2023 09:16:55 -0700 Subject: [PATCH 1703/1936] Add SERVICE_REPORT_INTERVAL knob Heavily-loaded workers in CI consistently fail to complete the service checkin task, which is configured for every ten seconds in nova and cinder. This generates additional load on the database server as well as consumes a threadpool worker. If we're not making the deadline, there's really no point in having it be so high. Further, since the workers must remain up for all the tempest tests we're running against them, there's really no benefit to a fast-fail detection. This sets the report_interval to 120s for nova and cinder, and sets service_down_time to 6x that value, which is consistent with the default scale. Depends-On: https://review.opendev.org/c/openstack/tempest/+/890448 Change-Id: Idd7aa1daf354256b143a3778f161cfc72b318ea5 --- lib/cinder | 8 ++++++++ lib/nova | 8 ++++++++ 2 files changed, 16 insertions(+) diff --git a/lib/cinder b/lib/cinder index e37eff4019..f8682d5a71 100644 --- a/lib/cinder +++ b/lib/cinder @@ -76,6 +76,11 @@ CINDER_SERVICE_PORT_INT=${CINDER_SERVICE_PORT_INT:-18776} CINDER_SERVICE_PROTOCOL=${CINDER_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} CINDER_SERVICE_LISTEN_ADDRESS=${CINDER_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)} +# We do not need to report service status every 10s for devstack-like +# deployments. In the gate this generates extra work for the services and the +# database which are already taxed. +CINDER_SERVICE_REPORT_INTERVAL=120 + # What type of LVM device should Cinder use for LVM backend # Defaults to auto, which will do thin provisioning if it's a fresh # volume group, otherwise it will do thick. The other valid choices are @@ -325,6 +330,9 @@ function configure_cinder { # details and example failures. iniset $CINDER_CONF DEFAULT rpc_response_timeout 120 + iniset $CINDER_CONF DEFAULT report_interval $CINDER_SERVICE_REPORT_INTERVAL + iniset $CINDER_CONF DEFAULT service_down_time $(($CINDER_SERVICE_REPORT_INTERVAL * 6)) + if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then local enabled_backends="" local default_name="" diff --git a/lib/nova b/lib/nova index 888a2e2b25..905788f28f 100644 --- a/lib/nova +++ b/lib/nova @@ -75,6 +75,11 @@ NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini} # mean "use uwsgi" because we'll be always using uwsgi. NOVA_USE_MOD_WSGI=${NOVA_USE_MOD_WSGI:-True} +# We do not need to report service status every 10s for devstack-like +# deployments. In the gate this generates extra work for the services and the +# database which are already taxed. +NOVA_SERVICE_REPORT_INTERVAL=120 + if is_service_enabled tls-proxy; then NOVA_SERVICE_PROTOCOL="https" fi @@ -448,6 +453,9 @@ function create_nova_conf { iniset $NOVA_CONF key_manager backend nova.keymgr.conf_key_mgr.ConfKeyManager + iniset $NOVA_CONF DEFAULT report_interval $NOVA_SERVICE_REPORT_INTERVAL + iniset $NOVA_CONF DEFAULT service_down_time $(($NOVA_SERVICE_REPORT_INTERVAL * 6)) + if is_fedora; then # nova defaults to /usr/local/bin, but fedora pip like to # install things in /usr/bin From c3b0b9034e6b35187a125283e55056ae90cbbc4a Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Fri, 4 Aug 2023 06:41:30 -0700 Subject: [PATCH 1704/1936] Disable waiting forever for connpool workers This will cause apache to no longer wait forever for a connection pool member to become available before returning 503 to the client. This may help us determine if some of the timeouts we see when talking to the services come from an overloaded apache. Change-Id: Ibc19fc9a53e2330f9aca45f5a10a59c576cb22e6 --- lib/apache | 6 +++--- lib/tls | 4 +++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/lib/apache b/lib/apache index 76eae9c057..cf7215bef2 100644 --- a/lib/apache +++ b/lib/apache @@ -290,7 +290,7 @@ function write_uwsgi_config { apache_conf=$(apache_site_config_for $name) iniset "$file" uwsgi socket "$socket" iniset "$file" uwsgi chmod-socket 666 - echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}\" retry=0 " | sudo tee -a $apache_conf + echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}\" retry=0 acquire=1 " | sudo tee -a $apache_conf enable_apache_site $name restart_apache_server fi @@ -351,7 +351,7 @@ function write_local_uwsgi_http_config { apache_conf=$(apache_site_config_for $name) echo "KeepAlive Off" | sudo tee $apache_conf echo "SetEnv proxy-sendchunked 1" | sudo tee -a $apache_conf - echo "ProxyPass \"${url}\" \"http://$APACHE_LOCAL_HOST:$port\" retry=0 " | sudo tee -a $apache_conf + echo "ProxyPass \"${url}\" \"http://$APACHE_LOCAL_HOST:$port\" retry=0 acquire=1 " | sudo tee -a $apache_conf enable_apache_site $name restart_apache_server } @@ -370,7 +370,7 @@ function write_local_proxy_http_config { echo "KeepAlive Off" | sudo tee $apache_conf echo "SetEnv proxy-sendchunked 1" | sudo tee -a $apache_conf - echo "ProxyPass \"${loc}\" \"$url\" retry=0 " | sudo tee -a $apache_conf + echo "ProxyPass \"${loc}\" \"$url\" retry=0 acquire=1 " | sudo tee -a $apache_conf enable_apache_site $name restart_apache_server } diff --git a/lib/tls b/lib/tls index a1e162d2e2..48e5929a2b 100644 --- a/lib/tls +++ b/lib/tls @@ -541,9 +541,11 @@ $listen_string # Avoid races (at the cost of performance) to re-use a pooled connection # where the connection is closed (bug 1807518). + # Set acquire=1 to disable waiting for connection pool members so that + # we can determine when apache is overloaded (returns 503). SetEnv proxy-initial-not-pooled - ProxyPass http://$b_host:$b_port/ retry=0 nocanon + ProxyPass http://$b_host:$b_port/ retry=0 nocanon acquire=1 ProxyPassReverse http://$b_host:$b_port/ ErrorLog $APACHE_LOG_DIR/tls-proxy_error.log From 0da88c4af096ab95ccf438960433bb113278181e Mon Sep 17 00:00:00 2001 From: Alfredo Moralejo Date: Mon, 7 Aug 2023 14:13:41 +0200 Subject: [PATCH 1705/1936] Remove wget + rpm workaround to manage repos install in CentOS RDO has moved rdo-release packages to a new infra which supports EMS so we do not need to wget it and install it using local rpm install. This partially reverts [1]. [1] https://review.opendev.org/c/openstack/devstack/+/884277/ Change-Id: I189d0c3da0e7b017e2568022c14e6c8fb28251f1 --- stack.sh | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/stack.sh b/stack.sh index ad88eab9d5..b03c3cda1f 100755 --- a/stack.sh +++ b/stack.sh @@ -311,22 +311,14 @@ function _install_rdo { sudo dnf -y install https://rdoproject.org/repos/openstack-${rdo_release}/rdo-release-${rdo_release}.el8.rpm fi elif [[ $DISTRO == "rhel9" ]]; then - install_package wget - # We need to download rdo-release package using wget as installing with dnf from repo.fedoraproject.org fails in - # FIPS enabled systems after https://bugzilla.redhat.com/show_bug.cgi?id=2157951 - # Until we can pull rdo-release from a server which supports EMS, this workaround is doing wget, which does - # not relies on openssl but on gnutls, and then install it locally using rpm - TEMPRDODIR=$(mktemp -d) if [[ "$TARGET_BRANCH" == "master" ]]; then # rdo-release.el9.rpm points to latest RDO release, use that for master - wget -P $TEMPRDODIR https://rdoproject.org/repos/rdo-release.el9.rpm + sudo dnf -y install https://rdoproject.org/repos/rdo-release.el9.rpm else # For stable branches use corresponding release rpm rdo_release=$(echo $TARGET_BRANCH | sed "s|stable/||g") - wget -P $TEMPRDODIR https://rdoproject.org/repos/openstack-${rdo_release}/rdo-release-${rdo_release}.el9.rpm + sudo dnf -y install https://rdoproject.org/repos/openstack-${rdo_release}/rdo-release-${rdo_release}.el9.rpm fi - sudo rpm -ivh $TEMPRDODIR/rdo-release*rpm - rm -rf $TEMPRDODIR fi sudo dnf -y update } From 4363b0bd84aad8984ee148b3b4868b311e5d855b Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Tue, 8 Aug 2023 08:38:00 -0400 Subject: [PATCH 1706/1936] Fix $LOGDIR owner to be stack.stack I have seen this failure in the gate a few times: [ERROR] /opt/stack/devstack/functions-common:2334 Neutron did not start /opt/stack/devstack/functions-common: line 310: /opt/stack/logs/error.log: Permission denied So whatever was trying to be written to error.log never happened. Change to be like other directories in this file and make the $LOGDIR owner stack.stack. Change-Id: I673011aba10c8d03234100503ccc5876e75baff2 --- stack.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index ad88eab9d5..e0caafafa6 100755 --- a/stack.sh +++ b/stack.sh @@ -349,7 +349,9 @@ fi # Destination path for devstack logs if [[ -n ${LOGDIR:-} ]]; then - mkdir -p $LOGDIR + sudo mkdir -p $LOGDIR + safe_chown -R $STACK_USER $LOGDIR + safe_chmod 0755 $LOGDIR fi # Destination path for service data From 26b5eddeaaeb3e142d483c12d9a501fdc6abaf10 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Fri, 11 Aug 2023 21:51:05 +0200 Subject: [PATCH 1707/1936] GLOBAL_VENV: add nova to linked binaries This is being used in some nova jobs, so we need to add it. Also order the list of linked binaries to allow easier maintenance. Change-Id: Ief012f7842d6e14380c9575740d1856bc1f2355e --- stack.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index c8f7c9d79e..641c3c3b5a 100755 --- a/stack.sh +++ b/stack.sh @@ -827,11 +827,12 @@ fixup_all if [[ "$GLOBAL_VENV" == "True" ]] ; then # TODO(frickler): find a better solution for this - sudo ln -sf /opt/stack/data/venv/bin/privsep-helper /usr/local/bin sudo ln -sf /opt/stack/data/venv/bin/cinder-rtstool /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/nova /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/nova-manage /usr/local/bin sudo ln -sf /opt/stack/data/venv/bin/openstack /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/privsep-helper /usr/local/bin sudo ln -sf /opt/stack/data/venv/bin/tox /usr/local/bin - sudo ln -sf /opt/stack/data/venv/bin/nova-manage /usr/local/bin setup_devstack_virtualenv fi From 4c45bec6ebb965202d8d7d7832c093f47ecc2910 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Sat, 12 Aug 2023 11:35:08 +0200 Subject: [PATCH 1708/1936] GLOBAL_VENV: add more binaries glance and rally binaries are also needed. Also make sure the cinder-rtstool is only called when cinder is actually enabled. Change-Id: I18113eabf2fa83e36bace276883775303f6a1e9a --- lib/lvm | 20 +++++++++++--------- stack.sh | 2 ++ 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/lib/lvm b/lib/lvm index 57d2cd4e62..162c491f22 100644 --- a/lib/lvm +++ b/lib/lvm @@ -137,15 +137,17 @@ function init_lvm_volume_group { # Start with a clean volume group _create_lvm_volume_group $vg $size - # Remove iscsi targets - if [ "$CINDER_TARGET_HELPER" = "lioadm" ]; then - sudo cinder-rtstool get-targets | sudo xargs -rn 1 cinder-rtstool delete - elif [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then - sudo tgtadm --op show --mode target | awk '/Target/ {print $3}' | sudo xargs -r -n1 tgt-admin --delete - elif [ "$CINDER_TARGET_HELPER" = "nvmet" ]; then - # If we don't disconnect everything vgremove will block - sudo nvme disconnect-all - sudo nvmetcli clear + if is_service_enabled cinder; then + # Remove iscsi targets + if [ "$CINDER_TARGET_HELPER" = "lioadm" ]; then + sudo cinder-rtstool get-targets | sudo xargs -rn 1 cinder-rtstool delete + elif [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then + sudo tgtadm --op show --mode target | awk '/Target/ {print $3}' | sudo xargs -r -n1 tgt-admin --delete + elif [ "$CINDER_TARGET_HELPER" = "nvmet" ]; then + # If we don't disconnect everything vgremove will block + sudo nvme disconnect-all + sudo nvmetcli clear + fi fi _clean_lvm_volume_group $vg } diff --git a/stack.sh b/stack.sh index 94d586e812..d8b70a2b39 100755 --- a/stack.sh +++ b/stack.sh @@ -828,10 +828,12 @@ fixup_all if [[ "$GLOBAL_VENV" == "True" ]] ; then # TODO(frickler): find a better solution for this sudo ln -sf /opt/stack/data/venv/bin/cinder-rtstool /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/glance /usr/local/bin sudo ln -sf /opt/stack/data/venv/bin/nova /usr/local/bin sudo ln -sf /opt/stack/data/venv/bin/nova-manage /usr/local/bin sudo ln -sf /opt/stack/data/venv/bin/openstack /usr/local/bin sudo ln -sf /opt/stack/data/venv/bin/privsep-helper /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/rally /usr/local/bin sudo ln -sf /opt/stack/data/venv/bin/tox /usr/local/bin setup_devstack_virtualenv From 08b434e5b06a0f28a1779159df494d27db95704c Mon Sep 17 00:00:00 2001 From: Ghanshyam Date: Mon, 14 Aug 2023 17:05:45 +0000 Subject: [PATCH 1709/1936] Revert "GLOBAL_VENV: add nova to linked binaries" This reverts commit 26b5eddeaaeb3e142d483c12d9a501fdc6abaf10. Reason for revert: nova changed to use osc - https://review.opendev.org/c/openstack/nova/+/891247/2 Resolving conflict due to - https://review.opendev.org/c/openstack/devstack/+/891248 Change-Id: I69e179a90a241946b3f426a41c38ae72a66ba6dc --- stack.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/stack.sh b/stack.sh index d8b70a2b39..a8f46bfeb9 100755 --- a/stack.sh +++ b/stack.sh @@ -829,7 +829,6 @@ if [[ "$GLOBAL_VENV" == "True" ]] ; then # TODO(frickler): find a better solution for this sudo ln -sf /opt/stack/data/venv/bin/cinder-rtstool /usr/local/bin sudo ln -sf /opt/stack/data/venv/bin/glance /usr/local/bin - sudo ln -sf /opt/stack/data/venv/bin/nova /usr/local/bin sudo ln -sf /opt/stack/data/venv/bin/nova-manage /usr/local/bin sudo ln -sf /opt/stack/data/venv/bin/openstack /usr/local/bin sudo ln -sf /opt/stack/data/venv/bin/privsep-helper /usr/local/bin From 220004fb5c529d84e2e8d909db71cf17a00c0815 Mon Sep 17 00:00:00 2001 From: Artom Lifshitz Date: Wed, 16 Aug 2023 14:08:15 -0400 Subject: [PATCH 1710/1936] Allow others to override NOVA_SERVICE_REPORT_INTERVAL While the patch where this was first introduced and set to 120 [1] is sensible for the vast majority of jobs, it's conceivable that some jobs might want a different value. Specifically, the whitebox-tempest-plugin changes configurations and restarts Nova services, and to do so it waits for the service status to update in the API before continuing with the tests. With the report interval set to 120 and the down time threshold set to 720, the service would continue showing 'up' in the API long after it was actually down, causing the wait to time out. Whitebox is a low-traffic project with only a couple of devstack jobs that run tempest tests sequentially (concurrency=1). Its CI is also pretty stable. It seems legitimate for it to keep the old default values of report_interval and service_down_time. This patch keeps the 120 default for NOVA_SERVICE_REPORT_INTERVAL, but makes it configurable by individual jobs. Since the original patch also introduced CINDER_SERVICE_REPORT_INTERVAL as a constant, make that configurable as well. [1] https://review.opendev.org/c/openstack/devstack/+/890439 Needed-by: https://review.opendev.org/c/openstack/whitebox-tempest-plugin/+/891612 Change-Id: I64fa2059537ea072a38fb4900d3c7d2d8f0ce429 --- lib/cinder | 2 +- lib/nova | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/cinder b/lib/cinder index f8682d5a71..768a069a12 100644 --- a/lib/cinder +++ b/lib/cinder @@ -79,7 +79,7 @@ CINDER_SERVICE_LISTEN_ADDRESS=${CINDER_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $S # We do not need to report service status every 10s for devstack-like # deployments. In the gate this generates extra work for the services and the # database which are already taxed. -CINDER_SERVICE_REPORT_INTERVAL=120 +CINDER_SERVICE_REPORT_INTERVAL=${CINDER_SERVICE_REPORT_INTERVAL:-120} # What type of LVM device should Cinder use for LVM backend # Defaults to auto, which will do thin provisioning if it's a fresh diff --git a/lib/nova b/lib/nova index 905788f28f..da3118f4cd 100644 --- a/lib/nova +++ b/lib/nova @@ -78,7 +78,7 @@ NOVA_USE_MOD_WSGI=${NOVA_USE_MOD_WSGI:-True} # We do not need to report service status every 10s for devstack-like # deployments. In the gate this generates extra work for the services and the # database which are already taxed. -NOVA_SERVICE_REPORT_INTERVAL=120 +NOVA_SERVICE_REPORT_INTERVAL=${NOVA_SERVICE_REPORT_INTERVAL:-120} if is_service_enabled tls-proxy; then NOVA_SERVICE_PROTOCOL="https" From 7c4a955c52ead024ef50f448b3894b5ef362508d Mon Sep 17 00:00:00 2001 From: yatin Date: Mon, 21 Aug 2023 06:28:30 +0000 Subject: [PATCH 1711/1936] Revert "Woraround systemd issue on CentOS 9-stream" This reverts commit 113689ee4694de20c019735fdace447225aa18f7. Reason for revert: systemd-252-17.el9 which includes the fix is now available in CentOS 9-stream repos. Change-Id: I6fe19838a75a30fd5d2434c03b7f403f1c7e4b50 --- tools/fixup_stuff.sh | 5 ----- 1 file changed, 5 deletions(-) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 80a83bb128..faea44f1e0 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -94,11 +94,6 @@ EOF if [[ $os_VENDOR == "CentOSStream" && $os_RELEASE -eq 8 ]]; then sudo sysctl -w net.ipv4.ping_group_range='0 2147483647' fi - # TODO(ykarel): Workaround for systemd issue, remove once fix is - # included in systemd rpm https://bugs.launchpad.net/devstack/+bug/2029335 - if [[ $os_VENDOR == "CentOSStream" && $os_RELEASE -eq 9 ]]; then - echo 'LIBVIRTD_ARGS=""' | sudo tee /etc/sysconfig/libvirtd - fi } function fixup_ovn_centos { From 3a7a3cd8c5a5ac3f1655d6ff17974f8623fb3330 Mon Sep 17 00:00:00 2001 From: Jan Gutter Date: Mon, 14 Aug 2023 21:02:04 +0100 Subject: [PATCH 1712/1936] Update etcd version to 3.4.27 * etcd 3.3 is no longer maintained. * etcd 3.4 removes deprecated interfaces, and clients may need updated configs. * The cinder backend coordination URL needs to explicitly specify the version, until tooz can be updated https://review.opendev.org/c/openstack/tooz/+/891355 * etcd only supports in-place upgrades between minor versions, so any jobs testing upgrades could fail if they skip from 3.2 directly to 3.4 Change-Id: Ifcecdffa17a3a2b1075aa503978c44545c4a2a3c --- lib/cinder | 4 +++- stackrc | 10 +++++----- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/lib/cinder b/lib/cinder index f8682d5a71..dad17980bc 100644 --- a/lib/cinder +++ b/lib/cinder @@ -414,7 +414,9 @@ function configure_cinder { if [[ ! -z "$CINDER_COORDINATION_URL" ]]; then iniset $CINDER_CONF coordination backend_url "$CINDER_COORDINATION_URL" elif is_service_enabled etcd3; then - iniset $CINDER_CONF coordination backend_url "etcd3+http://${SERVICE_HOST}:$ETCD_PORT" + # NOTE(jan.gutter): api_version can revert to default once tooz is + # updated with the etcd v3.4 defaults + iniset $CINDER_CONF coordination backend_url "etcd3+http://${SERVICE_HOST}:$ETCD_PORT?api_version=v3" fi if [[ "$CINDER_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then diff --git a/stackrc b/stackrc index 0d1880cec9..2d25e379fd 100644 --- a/stackrc +++ b/stackrc @@ -728,11 +728,11 @@ fi EXTRA_CACHE_URLS="" # etcd3 defaults -ETCD_VERSION=${ETCD_VERSION:-v3.3.12} -ETCD_SHA256_AMD64=${ETCD_SHA256_AMD64:-"dc5d82df095dae0a2970e4d870b6929590689dd707ae3d33e7b86da0f7f211b6"} -ETCD_SHA256_ARM64=${ETCD_SHA256_ARM64:-"170b848ac1a071fe7d495d404a868a2c0090750b2944f8a260ef1c6125b2b4f4"} -ETCD_SHA256_PPC64=${ETCD_SHA256_PPC64:-"77f807b1b51abbf51e020bb05bdb8ce088cb58260fcd22749ea32eee710463d3"} -# etcd v3.2.x doesn't have anything for s390x +ETCD_VERSION=${ETCD_VERSION:-v3.4.27} +ETCD_SHA256_AMD64=${ETCD_SHA256_AMD64:-"a32d21e006252dbc3405b0645ba8468021ed41376974b573285927bf39b39eb9"} +ETCD_SHA256_ARM64=${ETCD_SHA256_ARM64:-"ed7e257c225b9b9545fac22246b97f4074a4b5109676e92dbaebfb9315b69cc0"} +ETCD_SHA256_PPC64=${ETCD_SHA256_PPC64:-"eb8825e0bc2cbaf9e55947f5ee373ebc9ca43b6a2ea5ced3b992c81855fff37e"} +# etcd v3.2.x and later doesn't have anything for s390x ETCD_SHA256_S390X=${ETCD_SHA256_S390X:-""} # Make sure etcd3 downloads the correct architecture if is_arch "x86_64"; then From 7cd3a8eebe1830f94c02bb6ec010c0365f6ab6f1 Mon Sep 17 00:00:00 2001 From: Martin Kopec Date: Tue, 22 Aug 2023 20:40:20 +0200 Subject: [PATCH 1713/1936] Set GLOBAL_VENV to false for centos and rocky As a temporary workaround, let's set the GLOBAL_VENV to false specifically for centos 9 stream and rocky distros where we encountered issues after changing the default value of GLOBAL_VENV to True in Devstack: https://review.opendev.org/c/openstack/devstack/+/558930 Related-Bug: #2031639 Change-Id: I708b5a81c32b0bd650dcd63a51e16346863a6fc0 --- .zuul.yaml | 5 ----- stackrc | 11 ++++++++++- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index e65dc5b7cf..8b60fc9936 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -708,9 +708,6 @@ nodeset: devstack-single-node-centos-9-stream timeout: 9000 voting: false - vars: - devstack_localrc: - GLOBAL_VENV: false - job: name: devstack-platform-debian-bookworm @@ -745,8 +742,6 @@ timeout: 9000 vars: configure_swap_size: 4096 - devstack_localrc: - GLOBAL_VENV: false - job: name: devstack-platform-ubuntu-focal diff --git a/stackrc b/stackrc index 0d1880cec9..bd4e2f17a2 100644 --- a/stackrc +++ b/stackrc @@ -188,9 +188,18 @@ ENFORCE_SCOPE=$(trueorfalse False ENFORCE_SCOPE) # Note that the DATA_DIR is selected because grenade testing uses a shared # DATA_DIR but different DEST dirs and we don't want two sets of venvs, # instead we want one global set. -GLOBAL_VENV=$(trueorfalse True GLOBAL_VENV) DEVSTACK_VENV=${DEVSTACK_VENV:-$DATA_DIR/venv} +# NOTE(kopecmartin): remove this once this is fixed +# https://bugs.launchpad.net/devstack/+bug/2031639 +# This couldn't go to fixup_stuff as that's called after projects +# (e.g. certain paths) are set taking GLOBAL_VENV into account +if [[ "$os_VENDOR" =~ (CentOSStream|Rocky) ]]; then + GLOBAL_VENV=$(trueorfalse False GLOBAL_VENV) +else + GLOBAL_VENV=$(trueorfalse True GLOBAL_VENV) +fi + # Enable use of Python virtual environments. Individual project use of # venvs are controlled by the PROJECT_VENV array; every project with # an entry in the array will be installed into the named venv. From 5a51aa524c1f955a4650099c344756acc6c6b507 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Wed, 23 Aug 2023 10:43:32 -0700 Subject: [PATCH 1714/1936] Fix glance-remote with global venv The base systemd unit file setup now writes an Environment= line to the file for the venv. The glance-remote code was setting that to point at the alternate config location, using iniset which was clobbering the venv one. Switch to iniadd to fix. Also, we need to explicitly put the --venv flag into the command since we write our unit file ourselves. This probably needs a cleanup at this point, but since the glance gate is blocked, do this for now. Change-Id: I2bd33de45c41b18ed7d4270a7301b1e322134987 --- lib/glance | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/glance b/lib/glance index e64f00027e..3cf8230f41 100644 --- a/lib/glance +++ b/lib/glance @@ -584,9 +584,10 @@ function start_glance_remote_clone { write_uwsgi_user_unit_file devstack@g-api-r.service "$(which uwsgi) \ --procname-prefix \ glance-api-remote \ - --ini $glance_remote_uwsgi" \ + --ini $glance_remote_uwsgi \ + --venv $DEVSTACK_VENV" \ "" "$STACK_USER" - iniset -sudo ${SYSTEMD_DIR}/devstack@g-api-r.service \ + iniadd -sudo ${SYSTEMD_DIR}/devstack@g-api-r.service \ "Service" "Environment" \ "OS_GLANCE_CONFIG_DIR=$glance_remote_conf_dir" From ef53db76d029382dd8b3566224e51351b9d36280 Mon Sep 17 00:00:00 2001 From: melanie witt Date: Thu, 24 Aug 2023 00:42:19 +0000 Subject: [PATCH 1715/1936] Fix configuration of LVM global_filter As far as I could tell, the global_filter config added in change I5d5c48e188cbb9b4208096736807f082bce524e8 wasn't actually making it into the lvm.conf. Given the volume (or rather LVM volume) related issues we've been seeing in the gate recently, we can give this a try to see if the global_filter setting has any positive effect. This also adds the contents of /etc/lvm/* to the logs collected by the jobs, so that we can see the LVM config. Change-Id: I2b39acd352669231d16b5cb2e151f290648355c0 --- .zuul.yaml | 1 + lib/lvm | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 8b60fc9936..46e1e45e39 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -430,6 +430,7 @@ /var/log/mysql: logs /var/log/libvirt: logs /etc/libvirt: logs + /etc/lvm: logs /etc/sudoers: logs /etc/sudoers.d: logs '{{ stage_dir }}/iptables.txt': logs diff --git a/lib/lvm b/lib/lvm index 162c491f22..b7e84d9505 100644 --- a/lib/lvm +++ b/lib/lvm @@ -200,7 +200,7 @@ function set_lvm_filter { filter_string=$filter_string$filter_suffix clean_lvm_filter - sudo sed -i "/# global_filter = \[*\]/a\ $global_filter$filter_string" /etc/lvm/lvm.conf + sudo sed -i "/# global_filter = \[.*\]/a\ $filter_string" /etc/lvm/lvm.conf echo_summary "set lvm.conf device global_filter to: $filter_string" } From ffc1b76f64341e18b5a6e60783f1e33297623f99 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Mon, 28 Aug 2023 10:52:26 +0530 Subject: [PATCH 1716/1936] [neutron] Rely on PATH env set by devstack This was missed as part of [1], neutron sets exec_dirs in rootwrap.conf differently so that also needs to be fixed. Without it neutron openvswitch jobs relying on neutron-keepalived-state-change scripts were failing when deployed with GLOBAL_VENV=True as binaries no longer found at /usr/local/bin. [1] https://review.opendev.org/c/openstack/devstack/+/558930 Closes-Bug: #2031415 Change-Id: I9aa56bff02594f253381ffe47a70949079f4c240 --- lib/neutron | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index a6de7222db..e90ada8929 100644 --- a/lib/neutron +++ b/lib/neutron @@ -1075,7 +1075,10 @@ function _neutron_setup_rootwrap { sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE fi sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE - sudo sed -e 's:^exec_dirs=\(.*\)$:exec_dirs=\1,/usr/local/bin:' -i $Q_RR_CONF_FILE + # Rely on $PATH set by devstack to determine what is safe to execute + # by rootwrap rather than use explicit whitelist of paths in + # rootwrap.conf + sudo sed -e 's/^exec_dirs=.*/#&/' -i $Q_RR_CONF_FILE # Specify ``rootwrap.conf`` as first parameter to neutron-rootwrap ROOTWRAP_SUDOER_CMD="$NEUTRON_ROOTWRAP $Q_RR_CONF_FILE *" From 427a4e1a9b7f20a8be0ad5091f2229945ce711a8 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Wed, 7 Jun 2023 15:26:07 +0200 Subject: [PATCH 1717/1936] Drop focal platform job and support This was dropped in tempest, too[0], and we want to focus on getting and keeping the jammy job stable. Still retaining the nodeset definitions until we are sure they are not needed in other projects. [0] https://review.opendev.org/c/openstack/tempest/+/884952 Change-Id: Iafb5a939a650b763935d8b7ce7069ac4c6d9a95b --- .zuul.yaml | 9 --------- stack.sh | 2 +- 2 files changed, 1 insertion(+), 10 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 46e1e45e39..356acec479 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -744,13 +744,6 @@ vars: configure_swap_size: 4096 -- job: - name: devstack-platform-ubuntu-focal - parent: tempest-full-py3 - description: Ubuntu 20.04 LTS (focal) platform test - nodeset: openstack-single-node-focal - timeout: 9000 - - job: name: devstack-platform-ubuntu-jammy-ovn-source parent: devstack-platform-ubuntu-jammy @@ -946,7 +939,6 @@ - devstack-platform-debian-bookworm - devstack-platform-debian-bullseye - devstack-platform-rocky-blue-onyx - - devstack-platform-ubuntu-focal - devstack-platform-ubuntu-jammy-ovn-source - devstack-platform-ubuntu-jammy-ovs - devstack-platform-openEuler-22.03-ovn-source @@ -995,7 +987,6 @@ - devstack - devstack-ipv6 - devstack-platform-debian-bullseye - - devstack-platform-ubuntu-focal - devstack-platform-rocky-blue-onyx - devstack-enforce-scope - devstack-multinode diff --git a/stack.sh b/stack.sh index a8f46bfeb9..c8810cd2f0 100755 --- a/stack.sh +++ b/stack.sh @@ -230,7 +230,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="bookworm|bullseye|focal|jammy|rhel8|rhel9|openEuler-22.03" +SUPPORTED_DISTROS="bookworm|bullseye|jammy|rhel8|rhel9|openEuler-22.03" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" From 16ac21f0da4f1b83963c4beb876f8494d9594b7a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Harald=20Jens=C3=A5s?= Date: Thu, 31 Aug 2023 15:06:52 +0200 Subject: [PATCH 1718/1936] Add OVN_BRIDGE_MAPPINGS - support extra bridge Add's the OVN_BRIDGE_MAPPINGS variable to ovn_agent. Uses the same format as OVS_BRIDGE_MAPPINGS, it defaults to "$PYSICAL_NETWORK:$PUBLIC_BRIDGE". This enables use of providernet for public network and setting up additional bridges, for example a for baremetal. Example: Q_USE_PROVIDER_NETWORKING="True" OVS_PHYSICAL_BRIDGE="brbm" PHYSICAL_NETWORK="mynetwork" PUBLIC_PHYSICAL_NETWORK="public" PUBLIC_BRIDGE="br-ex" OVN_BRIDGE_MAPPINGS="public:br-ex,mynetwork:brbm" Change-Id: I37317251bbe95d64de06d6232c2d472a98c0ee4d --- lib/neutron | 5 +++++ lib/neutron_plugins/ovn_agent | 2 +- lib/neutron_plugins/services/l3 | 8 +++++++- 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/lib/neutron b/lib/neutron index e90ada8929..ca9b788b2e 100644 --- a/lib/neutron +++ b/lib/neutron @@ -303,6 +303,11 @@ else Q_USE_SECGROUP=False fi +# OVN_BRIDGE_MAPPINGS - ovn-bridge-mappings +# NOTE(hjensas): Initialize after sourcing neutron_plugins/services/l3 +# which initialize PUBLIC_BRIDGE. +OVN_BRIDGE_MAPPINGS=${OVN_BRIDGE_MAPPINGS:-$PHYSICAL_NETWORK:$PUBLIC_BRIDGE} + # Save trace setting _XTRACE_NEUTRON=$(set +o | grep xtrace) set +o xtrace diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 3526ccd354..c51b708130 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -288,7 +288,7 @@ function clone_repository { function create_public_bridge { # Create the public bridge that OVN will use sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE -- set bridge $PUBLIC_BRIDGE protocols=OpenFlow13,OpenFlow15 - sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=$PHYSICAL_NETWORK:$PUBLIC_BRIDGE + sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=${OVN_BRIDGE_MAPPINGS} _configure_public_network_connectivity } diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 2bf884a8c4..c6d4663114 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -47,7 +47,8 @@ Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-True} # used for the network. In case of ofagent, you should add the # corresponding entry to your OFAGENT_PHYSICAL_INTERFACE_MAPPINGS. # For openvswitch agent, you should add the corresponding entry to -# your OVS_BRIDGE_MAPPINGS. +# your OVS_BRIDGE_MAPPINGS and for OVN add the corresponding entry +# to your OVN_BRIDGE_MAPPINGS. # # eg. (ofagent) # Q_USE_PROVIDERNET_FOR_PUBLIC=True @@ -60,6 +61,11 @@ Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-True} # PUBLIC_PHYSICAL_NETWORK=public # OVS_BRIDGE_MAPPINGS=public:br-ex # +# eg. (ovn agent) +# Q_USER_PROVIDERNET_FOR_PUBLIC=True +# PUBLIC_PHYSICAL_NETWORK=public +# OVN_BRIDGE_MAPPINGS=public:br-ex +# # The provider-network-type defaults to flat, however, the values # PUBLIC_PROVIDERNET_TYPE and PUBLIC_PROVIDERNET_SEGMENTATION_ID could # be set to specify the parameters for an alternate network type. From a389128dba4ce7d7051b86f3ac7db4164d24b95f Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Tue, 18 Jul 2023 16:31:28 +0100 Subject: [PATCH 1719/1936] OVN: Let ironic manage the OVN startup in it's case. In order for Ironic perform full testing with devstack, it uses virtual machines attached to a ovs bridge network to simulate bare metal machines. This worked great for OVS because often OVS was already running on the nodes due to the package, and we could just apply configuration and be done with it when Ironic's devstack plugin was applying initial configuration and setting up the test environment. With OVN, and the requirement of a specific co-installed OVS version, Ironic has discovered that we cannot perform this same configuration without having already started OVN during the initial system setup. Which is fine, but we can't initialize and start OVN twice. It just doesn't work. The original form of this patch was proposed by lucasgnomes in order to validate that we, did, indeed, need to do this to enable Ironic to successfully test an OVN based configuration, and is now being revised to handle that case automatically when Ironic is the selected virt plugin. Co-Authored-By: Julia Kreger Change-Id: Ifbfdaaa97fdbe75ede49dc47235e92a8035d1de6 --- lib/neutron | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/lib/neutron b/lib/neutron index e90ada8929..5407f8a7b8 100644 --- a/lib/neutron +++ b/lib/neutron @@ -570,8 +570,15 @@ function configure_rbac_policies { # Start running OVN processes function start_ovn_services { if [[ $Q_AGENT == "ovn" ]]; then - init_ovn - start_ovn + if [ "$VIRT_DRIVER" != 'ironic' ]; then + # NOTE(TheJulia): Ironic's devstack plugin needs to perform + # additional networking configuration to setup a working test + # environment with test virtual machines to emulate baremetal, + # which requires OVN to be up and running earlier to complete + # that base configuration. + init_ovn + start_ovn + fi if [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]]; then if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" != "True" ]]; then echo "OVN_L3_CREATE_PUBLIC_NETWORK=True is being ignored " From e1297193dcb93acc1f7b89f5fe91babbcc6dda49 Mon Sep 17 00:00:00 2001 From: Jay Faulkner Date: Sun, 10 Sep 2023 16:24:38 -0700 Subject: [PATCH 1720/1936] [nova][ironic] Support configuring 1 shard on n-cpu Allows for testing of basic sharding configuration. Change-Id: Idfb2bd1822898d95af8643d69d97d9a76b4d64cc Needed-By: https://review.opendev.org/c/openstack/ironic/+/894460 --- functions-common | 6 ++++++ lib/nova_plugins/hypervisor-ironic | 4 ++++ 2 files changed, 10 insertions(+) diff --git a/functions-common b/functions-common index f752271976..c57c4cc054 100644 --- a/functions-common +++ b/functions-common @@ -1114,6 +1114,12 @@ function is_ironic_enforce_scope { return 1 } +function is_ironic_sharded { + # todo(JayF): Support >1 shard with multiple n-cpu instances for each + is_service_enabled ironic && [[ "$IRONIC_SHARDS" == "1" ]] && return 0 + return 1 +} + # Package Functions # ================= diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic index f058e9bb53..9a39c798a8 100644 --- a/lib/nova_plugins/hypervisor-ironic +++ b/lib/nova_plugins/hypervisor-ironic @@ -53,6 +53,10 @@ function configure_nova_hypervisor { iniset $NOVA_CONF ironic project_domain_id default iniset $NOVA_CONF ironic project_name demo fi + if is_ironic_sharded; then + iniset $NOVA_CONF ironic shard $IRONIC_SHARD_1_NAME + fi + iniset $NOVA_CONF ironic user_domain_id default iniset $NOVA_CONF ironic region_name $REGION_NAME From 290a02d1f80b4de1bdeaaddaef7f59402a767d02 Mon Sep 17 00:00:00 2001 From: Martin Kopec Date: Tue, 12 Sep 2023 17:32:03 +0200 Subject: [PATCH 1721/1936] Remove openeuler job from periodic and check queue The openeuler job running version 22.03 fails due to old libvirt. Nova requires version 7.0.0 or greater. Related-Bug: #2035224 Change-Id: I4ad6151c3d8555de059c9228253d287aecf9f953 --- .zuul.yaml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 356acec479..5a7edd6c93 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -941,8 +941,6 @@ - devstack-platform-rocky-blue-onyx - devstack-platform-ubuntu-jammy-ovn-source - devstack-platform-ubuntu-jammy-ovs - - devstack-platform-openEuler-22.03-ovn-source - - devstack-platform-openEuler-22.03-ovs - devstack-multinode - devstack-unit-tests - openstack-tox-bashate @@ -1067,7 +1065,3 @@ periodic: jobs: - devstack-no-tls-proxy - periodic-weekly: - jobs: - - devstack-platform-openEuler-22.03-ovn-source - - devstack-platform-openEuler-22.03-ovs From d3953db76641e825565390acc6f68501777c0f53 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Tue, 19 Sep 2023 02:15:19 +0000 Subject: [PATCH 1722/1936] Updated from generate-devstack-plugins-list Change-Id: I18a47f5d604bbb83173151fb0b129deee2fcbe62 --- doc/source/plugin-registry.rst | 2 -- 1 file changed, 2 deletions(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index f54fca92e6..03c7469c8f 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -69,7 +69,6 @@ openstack/networking-bagpipe `https://opendev.org/openstack/networki openstack/networking-baremetal `https://opendev.org/openstack/networking-baremetal `__ openstack/networking-bgpvpn `https://opendev.org/openstack/networking-bgpvpn `__ openstack/networking-generic-switch `https://opendev.org/openstack/networking-generic-switch `__ -openstack/networking-hyperv `https://opendev.org/openstack/networking-hyperv `__ openstack/networking-powervm `https://opendev.org/openstack/networking-powervm `__ openstack/networking-sfc `https://opendev.org/openstack/networking-sfc `__ openstack/neutron `https://opendev.org/openstack/neutron `__ @@ -85,7 +84,6 @@ openstack/octavia-dashboard `https://opendev.org/openstack/octavia- openstack/octavia-tempest-plugin `https://opendev.org/openstack/octavia-tempest-plugin `__ openstack/openstacksdk `https://opendev.org/openstack/openstacksdk `__ openstack/osprofiler `https://opendev.org/openstack/osprofiler `__ -openstack/oswin-tempest-plugin `https://opendev.org/openstack/oswin-tempest-plugin `__ openstack/ovn-octavia-provider `https://opendev.org/openstack/ovn-octavia-provider `__ openstack/rally-openstack `https://opendev.org/openstack/rally-openstack `__ openstack/sahara `https://opendev.org/openstack/sahara `__ From f73d3127832798db8d7830d1456bdedd1a6a6903 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Wed, 20 Sep 2023 07:04:37 +0200 Subject: [PATCH 1723/1936] CI: Make bookworm platform job voting It has been very stable for some time and it is going to be a major platform for the next cycle. Signed-off-by: Dr. Jens Harbott Change-Id: Id2df9514b41eda0798179157282a8486b1e9ae23 --- .zuul.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 5a7edd6c93..1d1e3c9807 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -716,7 +716,6 @@ description: Debian Bookworm platform test nodeset: devstack-single-node-debian-bookworm timeout: 9000 - voting: false vars: configure_swap_size: 4096 devstack_localrc: @@ -984,6 +983,7 @@ jobs: - devstack - devstack-ipv6 + - devstack-platform-debian-bookworm - devstack-platform-debian-bullseye - devstack-platform-rocky-blue-onyx - devstack-enforce-scope From 5441b3df6e534101e66f8187ac9ff2bba2533fb5 Mon Sep 17 00:00:00 2001 From: Jake Yip Date: Sat, 10 Jun 2023 00:17:53 +1000 Subject: [PATCH 1724/1936] Use OS_CLOUD in sample local.sh local.sh, if present, will be executed at the end of stack.sh. The sample file here is meant to be copied to devstack root if desired. Unfortunately, due to Change I86ffa9cd52454f1c1c72d29b3a0e0caa3e44b829 changing to use OS_CLOUD in stack.sh, sourcing openrc here will cause both OS_CLOUD and traditional OS_* env vars to be set, which causes a conflict. Change-Id: Id80b46acab7d600ad7394ab5bc1984304825a672 --- samples/local.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/local.sh b/samples/local.sh index a1c5c8143b..7e6ae70ad4 100755 --- a/samples/local.sh +++ b/samples/local.sh @@ -31,7 +31,7 @@ if is_service_enabled nova; then # ``demo``) # Get OpenStack user auth - source $TOP_DIR/openrc + export OS_CLOUD=devstack # Add first keypair found in localhost:$HOME/.ssh for i in $HOME/.ssh/id_rsa.pub $HOME/.ssh/id_dsa.pub; do From 25cd7eb67286ba39060d05b3f3f9e785d125195a Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 21 Sep 2023 07:12:15 -0700 Subject: [PATCH 1725/1936] Fix g-api-r for non-global venv This makes the glance-api-remote setup honor the GLOBAL_VENV flag, and not pass the --venv stuff to uwsgi if it is disabled. This should fix the glance-multistore-cinder-import-fips job. Change-Id: I2005da5ced027d273e1f25f47b644fecafffc6c1 --- lib/glance | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/lib/glance b/lib/glance index 3cf8230f41..796ebdb68d 100644 --- a/lib/glance +++ b/lib/glance @@ -543,7 +543,7 @@ function glance_remote_conf { # start_glance_remote_clone() - Clone the regular glance api worker function start_glance_remote_clone { local glance_remote_conf_dir glance_remote_port remote_data - local glance_remote_uwsgi + local glance_remote_uwsgi venv glance_remote_conf_dir="$(glance_remote_conf "")" glance_remote_port=$(get_random_port) @@ -581,11 +581,14 @@ function start_glance_remote_clone { # We need to create the systemd service for the clone, but then # change it to include an Environment line to point the WSGI app # at the alternate config directory. + if [[ "$GLOBAL_VENV" == True ]]; then + venv="--venv $DEVSTACK_VENV" + fi write_uwsgi_user_unit_file devstack@g-api-r.service "$(which uwsgi) \ --procname-prefix \ glance-api-remote \ --ini $glance_remote_uwsgi \ - --venv $DEVSTACK_VENV" \ + $venv" \ "" "$STACK_USER" iniadd -sudo ${SYSTEMD_DIR}/devstack@g-api-r.service \ "Service" "Environment" \ From 3d37d13ee7aacd5594b351e324d8780e6d64d61b Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Thu, 28 Sep 2023 11:36:07 -0700 Subject: [PATCH 1726/1936] Update DEVSTACK_SERIES to 2024.1 stable/2023.2 branch has been created now and current master is for 2024.1 Change-Id: I67eee1ba721a1ad99b3503312acc2f94a52c5552 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index ff30d37721..464e935839 100644 --- a/stackrc +++ b/stackrc @@ -265,7 +265,7 @@ REQUIREMENTS_DIR=${REQUIREMENTS_DIR:-$DEST/requirements} # Setting the variable to 'ALL' will activate the download for all # libraries. -DEVSTACK_SERIES="2023.2" +DEVSTACK_SERIES="2024.1" ############## # From 8c25a8586122d5f00bdcec9b6c4826309891ba62 Mon Sep 17 00:00:00 2001 From: Lukas Piwowarski Date: Thu, 5 Oct 2023 08:11:05 +0000 Subject: [PATCH 1727/1936] Add support volume backup_driver config option The depends-on patch adds a new backup_driver option to tempest. The goal of this change is to be able to do a proper cleanup of containers when swift is used as a backup driver. Thich change makes sure that the new option is properly set to "swift" when Swift is used as the driver. Depends-On: https://review.opendev.org/c/openstack/tempest/+/896011/13 Change-Id: I76e7fd712ee352051f8aa2f2912a29abad9ad017 --- lib/tempest | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/tempest b/lib/tempest index 2f62f6ea62..7b5fde170e 100644 --- a/lib/tempest +++ b/lib/tempest @@ -29,6 +29,7 @@ # - ``DEFAULT_INSTANCE_USER`` # - ``DEFAULT_INSTANCE_ALT_USER`` # - ``CINDER_ENABLED_BACKENDS`` +# - ``CINDER_BACKUP_DRIVER`` # - ``NOVA_ALLOW_DUPLICATE_NETWORKS`` # # ``stack.sh`` calls the entry points in this order: @@ -571,6 +572,9 @@ function configure_tempest { TEMPEST_VOLUME_REVERT_TO_SNAPSHOT=${TEMPEST_VOLUME_REVERT_TO_SNAPSHOT:-True} fi iniset $TEMPEST_CONFIG volume-feature-enabled volume_revert $(trueorfalse False TEMPEST_VOLUME_REVERT_TO_SNAPSHOT) + if [[ "$CINDER_BACKUP_DRIVER" == *"swift"* ]]; then + iniset $TEMPEST_CONFIG volume backup_driver swift + fi local tempest_volume_min_microversion=${TEMPEST_VOLUME_MIN_MICROVERSION:-None} local tempest_volume_max_microversion=${TEMPEST_VOLUME_MAX_MICROVERSION:-"latest"} if [ "$tempest_volume_min_microversion" == "None" ]; then From ca4d5132e63752878620c4e4f374d98d433b3f52 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Tue, 10 Oct 2023 09:22:16 +0200 Subject: [PATCH 1728/1936] zuul: Drop neutron-linuxbridge-tempest job Neutron has deprecated linuxbridge support and is only doing reduced testing for the neutron-linuxbridge-tempest job, so we need no longer run it in devstack, even less gate on it. Signed-off-by: Dr. Jens Harbott Change-Id: Ie1a8f978efe7fc9b037cf6a6b70b67d539d76fd6 --- .zuul.yaml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 1d1e3c9807..6ee8177a6d 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -953,10 +953,6 @@ irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - - neutron-linuxbridge-tempest: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ - neutron-ovn-tempest-ovs-release: voting: false irrelevant-files: @@ -994,10 +990,6 @@ irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - - neutron-linuxbridge-tempest: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ - ironic-tempest-bios-ipmi-direct-tinyipa - swift-dsvm-functional - grenade: From 72cf4e60060d8024a9fb79c845babc621f35dd2f Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 12 Oct 2023 11:08:30 -0700 Subject: [PATCH 1729/1936] Allow forcing nova compute_id Developers that need to stack and re-stack non-AIO compute-only environments will want to be able to keep the compute node uuid the same across runs. This mimics the behavior of a deployment tool that pre-creates the uuids, so it matches pretty well. Default to the current behavior of create-on-start, but allow forcing it ahead of time to something specific. Change-Id: Icab0b783e2233cad9a93c04758a5bccac0832203 --- lib/nova | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/lib/nova b/lib/nova index da3118f4cd..b04f94beef 100644 --- a/lib/nova +++ b/lib/nova @@ -58,6 +58,14 @@ NOVA_METADATA_UWSGI=$NOVA_BIN_DIR/nova-metadata-wsgi NOVA_UWSGI_CONF=$NOVA_CONF_DIR/nova-api-uwsgi.ini NOVA_METADATA_UWSGI_CONF=$NOVA_CONF_DIR/nova-metadata-uwsgi.ini +# Allow forcing the stable compute uuid to something specific. This would be +# done by deployment tools that pre-allocate the UUIDs, but it is also handy +# for developers that need to re-stack a compute-only deployment multiple +# times. Since the DB is non-local and not erased on an unstack, making it +# stay the same each time is what developers want. Set to a uuid here or +# leave it blank for default allocate-on-start behavior. +NOVA_CPU_UUID="" + # The total number of cells we expect. Must be greater than one and doesn't # count cell0. NOVA_NUM_CELLS=${NOVA_NUM_CELLS:-1} @@ -1058,6 +1066,10 @@ function start_nova_compute { iniset $NOVA_CPU_CONF workarounds libvirt_disable_apic True fi + if [[ "$NOVA_CPU_UUID" ]]; then + echo -n $NOVA_CPU_UUID > $NOVA_CONF_DIR/compute_id + fi + if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then # The group **$LIBVIRT_GROUP** is added to the current user in this script. # ``sg`` is used in run_process to execute nova-compute as a member of the From eb9b08a8833884b7c7c5b55813d7621715fe7adf Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Sat, 14 Oct 2023 02:26:11 +0000 Subject: [PATCH 1730/1936] Updated from generate-devstack-plugins-list Change-Id: Ieecc17159ac36b65124598c36fc92b77c2a75399 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 03c7469c8f..b2e733337a 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -78,6 +78,7 @@ openstack/neutron-fwaas-dashboard `https://opendev.org/openstack/neutron- openstack/neutron-tempest-plugin `https://opendev.org/openstack/neutron-tempest-plugin `__ openstack/neutron-vpnaas `https://opendev.org/openstack/neutron-vpnaas `__ openstack/neutron-vpnaas-dashboard `https://opendev.org/openstack/neutron-vpnaas-dashboard `__ +openstack/nova `https://opendev.org/openstack/nova `__ openstack/nova-powervm `https://opendev.org/openstack/nova-powervm `__ openstack/octavia `https://opendev.org/openstack/octavia `__ openstack/octavia-dashboard `https://opendev.org/openstack/octavia-dashboard `__ From d2acd60870c63b486d4802cc3af0fdb27bd506c7 Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Mon, 16 Oct 2023 15:02:08 +0900 Subject: [PATCH 1731/1936] Horizon: Install pymemcached ... so that we can use PyMemcacheCache backend. The MemcachedCache backend, which has been used previously, has been removed in recent Django, and we are switching the default backend in [1]. [1] https://review.opendev.org/c/openstack/horizon/+/891828 Change-Id: Ie1da8970628e34c41721198cdada8c7bb3b26ec0 --- lib/horizon | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/horizon b/lib/horizon index 611329d619..6f753f546f 100644 --- a/lib/horizon +++ b/lib/horizon @@ -169,6 +169,10 @@ function install_horizon { # Apache installation, because we mark it NOPRIME install_apache_wsgi + # Install the memcache library so that horizon can use memcached as its + # cache backend + pip_install_gr pymemcache + git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH } From cace4044316befbbef9bcb7af2003f3045350830 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Mon, 23 Oct 2023 11:21:24 -0700 Subject: [PATCH 1732/1936] Fix performance stats gathering for global VENV Change-Id: I113c571ffddb241b29b1394e181ed0145b3c1e04 --- roles/capture-performance-data/tasks/main.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/roles/capture-performance-data/tasks/main.yaml b/roles/capture-performance-data/tasks/main.yaml index f9bb0f7851..51a11b60bc 100644 --- a/roles/capture-performance-data/tasks/main.yaml +++ b/roles/capture-performance-data/tasks/main.yaml @@ -3,7 +3,9 @@ executable: /bin/bash cmd: | source {{ devstack_conf_dir }}/stackrc - python3 {{ devstack_conf_dir }}/tools/get-stats.py \ + source {{ devstack_conf_dir }}/inc/python + setup_devstack_virtualenv + $PYTHON {{ devstack_conf_dir }}/tools/get-stats.py \ --db-user="$DATABASE_USER" \ --db-pass="$DATABASE_PASSWORD" \ --db-host="$DATABASE_HOST" \ From 29e73a215557b2d20d0d9611e0d5317e08cf9538 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Tue, 24 Oct 2023 06:18:22 +0200 Subject: [PATCH 1733/1936] Enable performance collection on Debian Change-Id: I84f1432262138cc9ff0942e1a2b2abe7447afe34 --- .zuul.yaml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 6ee8177a6d..12bef3bff1 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -718,9 +718,6 @@ timeout: 9000 vars: configure_swap_size: 4096 - devstack_localrc: - # TODO(frickler): drop this once wheel build is fixed - MYSQL_GATHER_PERFORMANCE: false - job: name: devstack-platform-debian-bullseye @@ -730,9 +727,6 @@ timeout: 9000 vars: configure_swap_size: 4096 - devstack_localrc: - # TODO(frickler): drop this once wheel build is fixed - MYSQL_GATHER_PERFORMANCE: false - job: name: devstack-platform-rocky-blue-onyx From bacb8400942b2ed6b724bdd3d28797896e1054c6 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Wed, 25 Oct 2023 12:52:28 -0700 Subject: [PATCH 1734/1936] Enable NEUTRON_ENFORCE_SCOPE to True by default Neutron bobcat release has enabled the RBAC new defaults by default. With the latest release of Neutron have new defaults enable, we should configure the same by default in devstack. This change make NEUTRON_ENFORCE_SCOPE flag to True by default so that every job will run with Neutron new defaults. As old defaults are still supported (in deprecated way), we will keep this flag so that we can have one job disable it and test the old defaults. Change-Id: I3361d33885b2e3af7cad0141f9b799b2723ee8a1 --- lib/neutron | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/neutron b/lib/neutron index 808043cebe..3628bfc25e 100644 --- a/lib/neutron +++ b/lib/neutron @@ -92,8 +92,9 @@ NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini # If NEUTRON_ENFORCE_SCOPE == True, it will set "enforce_scope" # and "enforce_new_defaults" to True in the Neutron's config to enforce usage -# of the new RBAC policies and scopes. -NEUTRON_ENFORCE_SCOPE=$(trueorfalse False NEUTRON_ENFORCE_SCOPE) +# of the new RBAC policies and scopes. Set it to False if you do not +# want to run Neutron with new RBAC. +NEUTRON_ENFORCE_SCOPE=$(trueorfalse True NEUTRON_ENFORCE_SCOPE) # Agent binaries. Note, binary paths for other agents are set in per-service # scripts in lib/neutron_plugins/services/ From 67630d4c52aef5ddcb15cff4f3b6594d447e8992 Mon Sep 17 00:00:00 2001 From: Artem Goncharov Date: Sun, 18 Jun 2023 14:46:06 +0200 Subject: [PATCH 1735/1936] Enable keystone token caching by OSC SDK uses python keyring library to enable token caching. Normally this is requiring a proper desktop (interactive) session, but there are some backend plugins working in non-interactive mode. Store cache in an unencrypted file on FS (this is not worse than storing passwords in plaintext). Change-Id: I42d698f15db5918443073fff8f27b926126d1d0f --- functions-common | 10 +++++++++- lib/libraries | 4 ++++ tools/update_clouds_yaml.py | 27 ++++++++++++++++++++++----- 3 files changed, 35 insertions(+), 6 deletions(-) diff --git a/functions-common b/functions-common index c57c4cc054..03d7c96417 100644 --- a/functions-common +++ b/functions-common @@ -1047,6 +1047,8 @@ function get_or_create_service { --description="$3" \ -f value -c id ) + # Drop cached token to invalidate catalog info in the token + remove_token_cache echo $service_id } @@ -1064,7 +1066,6 @@ function _get_or_create_endpoint_with_interface { endpoint_id=$(openstack --os-cloud devstack-system-admin endpoint create \ $1 $2 $3 --region $4 -f value -c id) fi - echo $endpoint_id } @@ -1088,6 +1089,8 @@ function get_or_create_endpoint { if [[ -n "$5" ]]; then _get_or_create_endpoint_with_interface $1 internal $5 $2 fi + # Drop cached token to invalidate catalog info in the token + remove_token_cache # return the public id to indicate success, and this is the endpoint most likely wanted echo $public_id } @@ -2517,6 +2520,11 @@ function is_fips_enabled { [ "$fips" == "1" ] } +function remove_token_cache { + # Remove Keyring cache file + rm ~/.local/share/python_keyring/keyring_pass.cfg +} + # Restore xtrace $_XTRACE_FUNCTIONS_COMMON diff --git a/lib/libraries b/lib/libraries index 9ea32304fc..146434e2b9 100755 --- a/lib/libraries +++ b/lib/libraries @@ -138,6 +138,10 @@ function install_libs { # doesn't pull in etcd3. pip_install etcd3 pip_install etcd3gw + + # Add libraries required for token caching by OpenStackSDK/CLI + pip_install keyring + pip_install keyrings.alt } # Restore xtrace diff --git a/tools/update_clouds_yaml.py b/tools/update_clouds_yaml.py index 74dcdb2a07..918988245b 100755 --- a/tools/update_clouds_yaml.py +++ b/tools/update_clouds_yaml.py @@ -30,7 +30,9 @@ def __init__(self, args): self._clouds_path = os.path.expanduser( '~/.config/openstack/clouds.yaml') self._create_directory = True - self._clouds = {} + self._keyringrc_path = os.path.expanduser( + '~/.config/python_keyring/keyringrc.cfg') + self._config = {} self._cloud = args.os_cloud self._cloud_data = { @@ -65,14 +67,17 @@ def run(self): def _read_clouds(self): try: with open(self._clouds_path) as clouds_file: - self._clouds = yaml.safe_load(clouds_file) + self._config = yaml.safe_load(clouds_file) except IOError: # The user doesn't have a clouds.yaml file. print("The user clouds.yaml file didn't exist.") - self._clouds = {} + if "cache" not in self._config: + # Enable auth (and only auth) caching. Currently caching into the + # file on FS is configured in `_write_clouds` function. + self._config["cache"] = {"auth": True} def _update_clouds(self): - self._clouds.setdefault('clouds', {})[self._cloud] = self._cloud_data + self._config.setdefault('clouds', {})[self._cloud] = self._cloud_data def _write_clouds(self): @@ -81,7 +86,19 @@ def _write_clouds(self): os.makedirs(clouds_dir) with open(self._clouds_path, 'w') as clouds_file: - yaml.dump(self._clouds, clouds_file, default_flow_style=False) + yaml.dump(self._config, clouds_file, default_flow_style=False) + + # Enable keyring token caching + keyringrc_dir = os.path.dirname(self._keyringrc_path) + os.makedirs(keyringrc_dir, exist_ok=True) + + # Configure auth caching into the file on FS. We do not bother of any + # expiration since SDK is smart enough to reauth once the token becomes + # invalid. + with open(self._keyringrc_path, 'w') as keyringrc_file: + keyringrc_file.write("[backend]\n") + keyringrc_file.write( + "default-keyring=keyrings.alt.file.PlaintextKeyring\n") def main(): From 5123700ea6fe25164bd51e967ce85aaefb5c364c Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Thu, 19 Oct 2023 17:04:56 +0000 Subject: [PATCH 1736/1936] ignore dbcounter sub dirs currently id you run devstack with the dbcounter service enabled the created subdirs show up in git status this change justs add them to .gitgnore Change-Id: Iee48eb4e12ac22734c8a2c1dcbe0b92a0a387eaa --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index 8fe56ad6ab..ad153f4a07 100644 --- a/.gitignore +++ b/.gitignore @@ -38,3 +38,5 @@ stack-screenrc userrc_early AUTHORS ChangeLog +tools/dbcounter/build/ +tools/dbcounter/dbcounter.egg-info/ From 0f402b8327cc3e501df93c735c1b049361ed3dbb Mon Sep 17 00:00:00 2001 From: tzing Date: Mon, 6 Nov 2023 02:24:14 +0000 Subject: [PATCH 1737/1936] Fix openEuler support openEuler 22.03 LTS support was removed from devstack in last few months due to its libvirt version is too old and the CI job always fail. This Patch add a yum repository for libvirt7.2.0, and add the related CI job to make sure its works well. Change-Id: Ic507f165cfa117451283360854c4776a968bbb10 --- .zuul.yaml | 2 ++ stack.sh | 6 +++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 12bef3bff1..75930112ca 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -934,6 +934,8 @@ - devstack-platform-rocky-blue-onyx - devstack-platform-ubuntu-jammy-ovn-source - devstack-platform-ubuntu-jammy-ovs + - devstack-platform-openEuler-22.03-ovn-source + - devstack-platform-openEuler-22.03-ovs - devstack-multinode - devstack-unit-tests - openstack-tox-bashate diff --git a/stack.sh b/stack.sh index 530fda48aa..dce15ac01c 100755 --- a/stack.sh +++ b/stack.sh @@ -421,8 +421,12 @@ elif [[ $DISTRO == "openEuler-22.03" ]]; then # 1. the hostname package is not installed by default # 2. Some necessary packages are in openstack repo, for example liberasurecode-devel # 3. python3-pip can be uninstalled by `get_pip.py` automaticly. - install_package hostname openstack-release-wallaby + # 4. Ensure wget installation before use + install_package hostname openstack-release-wallaby wget uninstall_package python3-pip + + # Add yum repository for libvirt7.X + sudo wget https://eur.openeuler.openatom.cn/coprs/g/sig-openstack/Libvirt-7.X/repo/openeuler-22.03_LTS/group_sig-openstack-Libvirt-7.X-openeuler-22.03_LTS.repo -O /etc/yum.repos.d/libvirt7.2.0.repo fi # Ensure python is installed From e7c12616e27ad2987c2dead1e1a413aaa2c632ee Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Tue, 14 Nov 2023 16:27:56 +0100 Subject: [PATCH 1738/1936] Add periodic-weekly pipeline with platform jobs Originally we only had the openeuler jobs there, but the other platforms could also do with some regular testing. Change-Id: I93526a4c592d85acd4debf72eb59e306ab8e6382 --- .zuul.yaml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index 75930112ca..47466cb3eb 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -1053,3 +1053,13 @@ periodic: jobs: - devstack-no-tls-proxy + periodic-weekly: + jobs: + - devstack-platform-centos-9-stream + - devstack-platform-debian-bookworm + - devstack-platform-debian-bullseye + - devstack-platform-rocky-blue-onyx + - devstack-platform-ubuntu-jammy-ovn-source + - devstack-platform-ubuntu-jammy-ovs + - devstack-platform-openEuler-22.03-ovn-source + - devstack-platform-openEuler-22.03-ovs From 82c30cd82ee00012d21bee94dad2bcbc2c047f78 Mon Sep 17 00:00:00 2001 From: yatin Date: Wed, 15 Nov 2023 12:44:50 +0000 Subject: [PATCH 1739/1936] Revert "Enable keystone token caching by OSC" This reverts commit 67630d4c52aef5ddcb15cff4f3b6594d447e8992. Reason for revert: Seeing random failures across jobs as sometimes 'keyring_pass.cfg' gets duplicated keys and that makes executions of any openstackclient command to fail until the file is removed. This should be handled before re enabling the token caching again. Change-Id: I3d2fe53a2e7552ac6304c30aa2fe5be33d77df53 Related-Bug: #2042943 --- functions-common | 10 +--------- lib/libraries | 4 ---- tools/update_clouds_yaml.py | 27 +++++---------------------- 3 files changed, 6 insertions(+), 35 deletions(-) diff --git a/functions-common b/functions-common index 03d7c96417..c57c4cc054 100644 --- a/functions-common +++ b/functions-common @@ -1047,8 +1047,6 @@ function get_or_create_service { --description="$3" \ -f value -c id ) - # Drop cached token to invalidate catalog info in the token - remove_token_cache echo $service_id } @@ -1066,6 +1064,7 @@ function _get_or_create_endpoint_with_interface { endpoint_id=$(openstack --os-cloud devstack-system-admin endpoint create \ $1 $2 $3 --region $4 -f value -c id) fi + echo $endpoint_id } @@ -1089,8 +1088,6 @@ function get_or_create_endpoint { if [[ -n "$5" ]]; then _get_or_create_endpoint_with_interface $1 internal $5 $2 fi - # Drop cached token to invalidate catalog info in the token - remove_token_cache # return the public id to indicate success, and this is the endpoint most likely wanted echo $public_id } @@ -2520,11 +2517,6 @@ function is_fips_enabled { [ "$fips" == "1" ] } -function remove_token_cache { - # Remove Keyring cache file - rm ~/.local/share/python_keyring/keyring_pass.cfg -} - # Restore xtrace $_XTRACE_FUNCTIONS_COMMON diff --git a/lib/libraries b/lib/libraries index 146434e2b9..9ea32304fc 100755 --- a/lib/libraries +++ b/lib/libraries @@ -138,10 +138,6 @@ function install_libs { # doesn't pull in etcd3. pip_install etcd3 pip_install etcd3gw - - # Add libraries required for token caching by OpenStackSDK/CLI - pip_install keyring - pip_install keyrings.alt } # Restore xtrace diff --git a/tools/update_clouds_yaml.py b/tools/update_clouds_yaml.py index 918988245b..74dcdb2a07 100755 --- a/tools/update_clouds_yaml.py +++ b/tools/update_clouds_yaml.py @@ -30,9 +30,7 @@ def __init__(self, args): self._clouds_path = os.path.expanduser( '~/.config/openstack/clouds.yaml') self._create_directory = True - self._keyringrc_path = os.path.expanduser( - '~/.config/python_keyring/keyringrc.cfg') - self._config = {} + self._clouds = {} self._cloud = args.os_cloud self._cloud_data = { @@ -67,17 +65,14 @@ def run(self): def _read_clouds(self): try: with open(self._clouds_path) as clouds_file: - self._config = yaml.safe_load(clouds_file) + self._clouds = yaml.safe_load(clouds_file) except IOError: # The user doesn't have a clouds.yaml file. print("The user clouds.yaml file didn't exist.") - if "cache" not in self._config: - # Enable auth (and only auth) caching. Currently caching into the - # file on FS is configured in `_write_clouds` function. - self._config["cache"] = {"auth": True} + self._clouds = {} def _update_clouds(self): - self._config.setdefault('clouds', {})[self._cloud] = self._cloud_data + self._clouds.setdefault('clouds', {})[self._cloud] = self._cloud_data def _write_clouds(self): @@ -86,19 +81,7 @@ def _write_clouds(self): os.makedirs(clouds_dir) with open(self._clouds_path, 'w') as clouds_file: - yaml.dump(self._config, clouds_file, default_flow_style=False) - - # Enable keyring token caching - keyringrc_dir = os.path.dirname(self._keyringrc_path) - os.makedirs(keyringrc_dir, exist_ok=True) - - # Configure auth caching into the file on FS. We do not bother of any - # expiration since SDK is smart enough to reauth once the token becomes - # invalid. - with open(self._keyringrc_path, 'w') as keyringrc_file: - keyringrc_file.write("[backend]\n") - keyringrc_file.write( - "default-keyring=keyrings.alt.file.PlaintextKeyring\n") + yaml.dump(self._clouds, clouds_file, default_flow_style=False) def main(): From bb0c273697bf54dd569ad38e459cd161b62f96cb Mon Sep 17 00:00:00 2001 From: elajkat Date: Thu, 16 Nov 2023 11:30:04 +0100 Subject: [PATCH 1740/1936] Option for SQLAlchemy and alembic git source Change-Id: If7ff0075834a1e9cee01713676166e56b797debd Closes-Bug: #2042941 --- lib/neutron | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/lib/neutron b/lib/neutron index 3628bfc25e..bc77f161d7 100644 --- a/lib/neutron +++ b/lib/neutron @@ -158,6 +158,14 @@ if [[ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" && -z "$NEUTRON_ENDPOINT_SERVICE_NAME NEUTRON_ENDPOINT_SERVICE_NAME="networking" fi +# Source install libraries +ALEMBIC_REPO=${ALEMBIC_REPO:-https://github.com/sqlalchemy/alembic.git} +ALEMBIC_DIR=${ALEMBIC_DIR:-$DEST/alembic} +ALEMBIC_BRANCH=${ALEMBIC_BRANCH:-main} +SQLALCHEMY_REPO=${SQLALCHEMY_REPO:-https://github.com/sqlalchemy/sqlalchemy.git} +SQLALCHEMY_DIR=${SQLALCHEMY_DIR:-$DEST/sqlalchemy} +SQLALCHEMY_BRANCH=${SQLALCHEMY_BRANCH:-main} + # List of config file names in addition to the main plugin config file # To add additional plugin config files, use ``neutron_server_config_add`` # utility function. For example: @@ -525,6 +533,17 @@ function install_neutron { setup_dev_lib "neutron-lib" fi + # Install SQLAlchemy and alembic from git when these are required + # see https://bugs.launchpad.net/neutron/+bug/2042941 + if use_library_from_git "sqlalchemy"; then + git_clone $SQLALCHEMY_REPO $SQLALCHEMY_DIR $SQLALCHEMY_BRANCH + setup_develop $SQLALCHEMY_DIR + fi + if use_library_from_git "alembic"; then + git_clone $ALEMBIC_REPO $ALEMBIC_DIR $ALEMBIC_BRANCH + setup_develop $ALEMBIC_DIR + fi + git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH setup_develop $NEUTRON_DIR From 2211c778db0e18702c7177f7750571cba3697509 Mon Sep 17 00:00:00 2001 From: Abhishek Kekane Date: Wed, 22 Nov 2023 06:21:55 +0000 Subject: [PATCH 1741/1936] Allow devstack to set cache driver for glance Added new devstack variable `GLANCE_CACHE_DRIVER` default to `sqlite` to set the cache driver for glance service. Related blueprint centralized-cache-db Change-Id: I76d064590356e2d65bfc6a3f57d1bdaeeb83a74a --- lib/glance | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/glance b/lib/glance index 796ebdb68d..4ff9a34ca8 100644 --- a/lib/glance +++ b/lib/glance @@ -75,6 +75,7 @@ GLANCE_MULTIPLE_FILE_STORES=${GLANCE_MULTIPLE_FILE_STORES:-fast} GLANCE_DEFAULT_BACKEND=${GLANCE_DEFAULT_BACKEND:-fast} GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache} +GLANCE_CACHE_DRIVER=${GLANCE_CACHE_DRIVER:-sqlite} # Full Glance functionality requires running in standalone mode. If we are # not in uwsgi mode, then we are standalone, otherwise allow separate control. @@ -329,6 +330,7 @@ function configure_glance { iniset $GLANCE_API_CONF database connection $dburl iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG iniset $GLANCE_API_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ + iniset $GLANCE_API_CONF DEFAULT image_cache_driver $GLANCE_CACHE_DRIVER iniset $GLANCE_API_CONF oslo_concurrency lock_path $GLANCE_LOCK_DIR iniset $GLANCE_API_CONF paste_deploy flavor keystone+cachemanagement configure_keystone_authtoken_middleware $GLANCE_API_CONF glance @@ -392,6 +394,7 @@ function configure_glance { iniset $GLANCE_CACHE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $GLANCE_CACHE_CONF DEFAULT use_syslog $SYSLOG iniset $GLANCE_CACHE_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ + iniset $GLANCE_CACHE_CONF DEFAULT image_cache_driver $GLANCE_CACHE_DRIVER iniset $GLANCE_CACHE_CONF DEFAULT auth_url $KEYSTONE_SERVICE_URI iniset $GLANCE_CACHE_CONF DEFAULT admin_tenant_name $SERVICE_PROJECT_NAME iniset $GLANCE_CACHE_CONF DEFAULT admin_user glance From 2e14add0fdbc749f40caf075e42221d85ff2f27e Mon Sep 17 00:00:00 2001 From: Eric Harney Date: Wed, 29 Nov 2023 09:22:10 -0500 Subject: [PATCH 1742/1936] Add cinder-manage to /usr/local/bin/ This is useful in a dev environment. Change-Id: I247eb4aea23a906d0e667ec6c5ac79f932bdca24 --- stack.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/stack.sh b/stack.sh index 530fda48aa..4d649f6cec 100755 --- a/stack.sh +++ b/stack.sh @@ -821,6 +821,7 @@ fixup_all if [[ "$GLOBAL_VENV" == "True" ]] ; then # TODO(frickler): find a better solution for this + sudo ln -sf /opt/stack/data/venv/bin/cinder-manage /usr/local/bin sudo ln -sf /opt/stack/data/venv/bin/cinder-rtstool /usr/local/bin sudo ln -sf /opt/stack/data/venv/bin/glance /usr/local/bin sudo ln -sf /opt/stack/data/venv/bin/nova-manage /usr/local/bin From d126330efebb98b7fe8ce74d8da333e13782576d Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Wed, 6 Dec 2023 09:58:18 +0000 Subject: [PATCH 1743/1936] lib/apache: Rename variable This is a little more meaningful, IMO. Change-Id: Ib9d3fdc54b1cdbd822c2a4eca0a3310ca3f6324c Signed-off-by: Stephen Finucane --- lib/apache | 84 +++++++++++++++++++++++++++--------------------------- 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/lib/apache b/lib/apache index cf7215bef2..9017e0a38a 100644 --- a/lib/apache +++ b/lib/apache @@ -238,7 +238,7 @@ function restart_apache_server { } function write_uwsgi_config { - local file=$1 + local conf=$1 local wsgi=$2 local url=$3 local http=$4 @@ -258,38 +258,38 @@ function write_uwsgi_config { local socket="$socket_dir/${name}.socket" # always cleanup given that we are using iniset here - rm -rf $file - iniset "$file" uwsgi wsgi-file "$wsgi" - iniset "$file" uwsgi processes $API_WORKERS + rm -rf $conf + iniset "$conf" uwsgi wsgi-file "$wsgi" + iniset "$conf" uwsgi processes $API_WORKERS # This is running standalone - iniset "$file" uwsgi master true + iniset "$conf" uwsgi master true # Set die-on-term & exit-on-reload so that uwsgi shuts down - iniset "$file" uwsgi die-on-term true - iniset "$file" uwsgi exit-on-reload false + iniset "$conf" uwsgi die-on-term true + iniset "$conf" uwsgi exit-on-reload false # Set worker-reload-mercy so that worker will not exit till the time # configured after graceful shutdown - iniset "$file" uwsgi worker-reload-mercy $WORKER_TIMEOUT - iniset "$file" uwsgi enable-threads true - iniset "$file" uwsgi plugins http,python3 + iniset "$conf" uwsgi worker-reload-mercy $WORKER_TIMEOUT + iniset "$conf" uwsgi enable-threads true + iniset "$conf" uwsgi plugins http,python3 # uwsgi recommends this to prevent thundering herd on accept. - iniset "$file" uwsgi thunder-lock true + iniset "$conf" uwsgi thunder-lock true # Set hook to trigger graceful shutdown on SIGTERM - iniset "$file" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all" + iniset "$conf" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all" # Override the default size for headers from the 4k default. - iniset "$file" uwsgi buffer-size 65535 + iniset "$conf" uwsgi buffer-size 65535 # Make sure the client doesn't try to re-use the connection. - iniset "$file" uwsgi add-header "Connection: close" + iniset "$conf" uwsgi add-header "Connection: close" # This ensures that file descriptors aren't shared between processes. - iniset "$file" uwsgi lazy-apps true + iniset "$conf" uwsgi lazy-apps true # If we said bind directly to http, then do that and don't start the apache proxy if [[ -n "$http" ]]; then - iniset "$file" uwsgi http $http + iniset "$conf" uwsgi http $http else local apache_conf="" apache_conf=$(apache_site_config_for $name) - iniset "$file" uwsgi socket "$socket" - iniset "$file" uwsgi chmod-socket 666 + iniset "$conf" uwsgi socket "$socket" + iniset "$conf" uwsgi chmod-socket 666 echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}\" retry=0 acquire=1 " | sudo tee -a $apache_conf enable_apache_site $name restart_apache_server @@ -303,7 +303,7 @@ function write_uwsgi_config { # but that involves having apache buffer the request before sending it to # uwsgi. function write_local_uwsgi_http_config { - local file=$1 + local conf=$1 local wsgi=$2 local url=$3 name=$(basename $wsgi) @@ -312,38 +312,38 @@ function write_local_uwsgi_http_config { # a private view of it on some platforms. # always cleanup given that we are using iniset here - rm -rf $file - iniset "$file" uwsgi wsgi-file "$wsgi" + rm -rf $conf + iniset "$conf" uwsgi wsgi-file "$wsgi" port=$(get_random_port) - iniset "$file" uwsgi http-socket "$APACHE_LOCAL_HOST:$port" - iniset "$file" uwsgi processes $API_WORKERS + iniset "$conf" uwsgi http-socket "$APACHE_LOCAL_HOST:$port" + iniset "$conf" uwsgi processes $API_WORKERS # This is running standalone - iniset "$file" uwsgi master true + iniset "$conf" uwsgi master true # Set die-on-term & exit-on-reload so that uwsgi shuts down - iniset "$file" uwsgi die-on-term true - iniset "$file" uwsgi exit-on-reload false - iniset "$file" uwsgi enable-threads true - iniset "$file" uwsgi plugins http,python3 + iniset "$conf" uwsgi die-on-term true + iniset "$conf" uwsgi exit-on-reload false + iniset "$conf" uwsgi enable-threads true + iniset "$conf" uwsgi plugins http,python3 # uwsgi recommends this to prevent thundering herd on accept. - iniset "$file" uwsgi thunder-lock true + iniset "$conf" uwsgi thunder-lock true # Set hook to trigger graceful shutdown on SIGTERM - iniset "$file" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all" + iniset "$conf" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all" # Set worker-reload-mercy so that worker will not exit till the time # configured after graceful shutdown - iniset "$file" uwsgi worker-reload-mercy $WORKER_TIMEOUT + iniset "$conf" uwsgi worker-reload-mercy $WORKER_TIMEOUT # Override the default size for headers from the 4k default. - iniset "$file" uwsgi buffer-size 65535 + iniset "$conf" uwsgi buffer-size 65535 # Make sure the client doesn't try to re-use the connection. - iniset "$file" uwsgi add-header "Connection: close" + iniset "$conf" uwsgi add-header "Connection: close" # This ensures that file descriptors aren't shared between processes. - iniset "$file" uwsgi lazy-apps true - iniset "$file" uwsgi chmod-socket 666 - iniset "$file" uwsgi http-raw-body true - iniset "$file" uwsgi http-chunked-input true - iniset "$file" uwsgi http-auto-chunked true - iniset "$file" uwsgi http-keepalive false + iniset "$conf" uwsgi lazy-apps true + iniset "$conf" uwsgi chmod-socket 666 + iniset "$conf" uwsgi http-raw-body true + iniset "$conf" uwsgi http-chunked-input true + iniset "$conf" uwsgi http-auto-chunked true + iniset "$conf" uwsgi http-keepalive false # Increase socket timeout for slow chunked uploads - iniset "$file" uwsgi socket-timeout 30 + iniset "$conf" uwsgi socket-timeout 30 enable_apache_mod proxy enable_apache_mod proxy_http @@ -376,12 +376,12 @@ function write_local_proxy_http_config { } function remove_uwsgi_config { - local file=$1 + local conf=$1 local wsgi=$2 local name="" name=$(basename $wsgi) - rm -rf $file + rm -rf $conf disable_apache_site $name } From 6b0f055b4ed407f8a190f768d0e654235ac015dd Mon Sep 17 00:00:00 2001 From: Yadnesh Kulkarni Date: Thu, 23 Nov 2023 11:59:49 +0530 Subject: [PATCH 1744/1936] Make multiple attempts to download image Downloading an image can fail due to network issues, so let's retry 5 times before giving up. We have seen issues in CI due to network issues as described below and in the Related-Bug:- Often times fetching Fedora image in FIPS jobs fails due to "GnuTLS: One of the involved algorithms has insufficient security level." This occurs when request to pull image is redirected to a mirror that's incompatible with FIPS enabled system. Making multiple attempts to download images could provide better chance of pulling images from different mirrors and avoid failure of the job. This will also save a few rechecks. Related-Bug: #2045725 Change-Id: I7163aea4d121cb27620e4f2a083a543abfc286bf --- functions | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/functions b/functions index 7ada0feba7..01e1d259ad 100644 --- a/functions +++ b/functions @@ -133,17 +133,28 @@ function upload_image { local image image_fname image_name + local max_attempts=5 + # Create a directory for the downloaded image tarballs. mkdir -p $FILES/images image_fname=`basename "$image_url"` if [[ $image_url != file* ]]; then # Downloads the image (uec ami+akistyle), then extracts it. if [[ ! -f $FILES/$image_fname || "$(stat -c "%s" $FILES/$image_fname)" = "0" ]]; then - wget --progress=dot:giga -c $image_url -O $FILES/$image_fname - if [[ $? -ne 0 ]]; then - echo "Not found: $image_url" - return - fi + for attempt in `seq $max_attempts`; do + local rc=0 + wget --progress=dot:giga -c $image_url -O $FILES/$image_fname || rc=$? + if [[ $rc -ne 0 ]]; then + if [[ "$attempt" -eq "$max_attempts" ]]; then + echo "Not found: $image_url" + return + fi + echo "Download failed, retrying in $attempt second, attempt: $attempt" + sleep $attempt + else + break + fi + done fi image="$FILES/${image_fname}" else From 5e98509eaad724bb68d1a457bd690a387c51a114 Mon Sep 17 00:00:00 2001 From: Rajat Dhasmana Date: Tue, 12 Dec 2023 12:40:58 +0000 Subject: [PATCH 1745/1936] Increase timeout for reimage operation Looking at the recent failures in the tempest-integrated-compute job, the reimage operation seems to be taking longer than our expected time of 60 seconds (which was increased because of a similar failure in the past, default is 20 seconds). The main culprit for this failure is the image conversion from qcow2 to raw which is taking ~159 seconds. Dec 05 13:29:59.709129 np0035951188 cinder-volume[77000]: DEBUG oslo_concurrency.processutils [req-5113eccb-05ba-486a-8130-a58898c8ad35 req-0edf972a-109a-465f-a771-ceb87ecbda3e tempest-ServerActionsV293TestJSON-1780705112 None] CMD "sudo cinder-rootwrap /etc/cinder/rootwrap.conf qemu-img convert -O raw -t none -f qcow2 /opt/stack/data/cinder/conversion/image_download_dbe01f18-1c90-4536-a09a-b49f0811c7a0_copod3cm /dev/mapper/stack--volumes--lvmdriver--1-volume--073a98e8--3c89--4734--9ae5--59af25f8914a" returned: 0 in 159.272s {{(pid=77000) execute /opt/stack/data/venv/lib/python3.10/site-packages/oslo_concurrency/processutils.py:422}} The recent run took ~165 seconds on the cinder side but it failed early since the nova operation timed out in 60 seconds hence deleting the volume. To be on the safer side, 180 seconds seems to be a sane time for the operation to complete which this patch configures. Closes-Bug: 2046252 Change-Id: I8a9628216038f6d363cab5dd8177274c9cfc17c2 --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index b04f94beef..17c90dfe26 100644 --- a/lib/nova +++ b/lib/nova @@ -1054,7 +1054,7 @@ function start_nova_compute { # Set rebuild timeout longer for BFV instances because we likely have # slower disk than expected. Default is 20s/GB - iniset $NOVA_CPU_CONF DEFAULT reimage_timeout_per_gb 60 + iniset $NOVA_CPU_CONF DEFAULT reimage_timeout_per_gb 180 # Configure the OVSDB connection for os-vif if [ -n "$OVSDB_SERVER_LOCAL_HOST" ]; then From 6fc0e74aa7369ed1503e2d0f12d7543d4835212e Mon Sep 17 00:00:00 2001 From: Fabian Wiesel Date: Tue, 19 Dec 2023 11:24:02 +0100 Subject: [PATCH 1746/1936] Fix spelling of `ADITIONAL_VENV_PACKAGES` This preserved `ADITIONAL_VENV_PACKAGES` as an input for backwards compatiblity, but takes `ADDITIONAL_VENV_PACKAGES` with priority. Fixes spelling in comment. Related-Bug: #2046936 Change-Id: I84151d8f71b12da134e8fb9dbf3ae30f2a171fe2 --- stackrc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/stackrc b/stackrc index 464e935839..6cbadf9915 100644 --- a/stackrc +++ b/stackrc @@ -207,8 +207,9 @@ fi USE_VENV=$(trueorfalse False USE_VENV) # Add packages that need to be installed into a venv but are not in any -# requirmenets files here, in a comma-separated list -ADDITIONAL_VENV_PACKAGES=${ADITIONAL_VENV_PACKAGES:-""} +# requirements files here, in a comma-separated list. +# Currently only used when USE_VENV is true (individual project venvs) +ADDITIONAL_VENV_PACKAGES=${ADDITIONAL_VENV_PACKAGES:-""}} # This can be used to turn database query logging on and off # (currently only implemented for MySQL backend) From 7699ce2d5c465f86f2aace7af6b150dceb0e6e1c Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Mon, 25 Dec 2023 03:14:20 +0000 Subject: [PATCH 1747/1936] Updated from generate-devstack-plugins-list Change-Id: Ie5cbd87269a10d6abdf1d24f7e6224d9aac3bf5d --- doc/source/plugin-registry.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index b2e733337a..f70041162b 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -85,6 +85,7 @@ openstack/octavia-dashboard `https://opendev.org/openstack/octavia- openstack/octavia-tempest-plugin `https://opendev.org/openstack/octavia-tempest-plugin `__ openstack/openstacksdk `https://opendev.org/openstack/openstacksdk `__ openstack/osprofiler `https://opendev.org/openstack/osprofiler `__ +openstack/ovn-bgp-agent `https://opendev.org/openstack/ovn-bgp-agent `__ openstack/ovn-octavia-provider `https://opendev.org/openstack/ovn-octavia-provider `__ openstack/rally-openstack `https://opendev.org/openstack/rally-openstack `__ openstack/sahara `https://opendev.org/openstack/sahara `__ @@ -185,6 +186,7 @@ x/trio2o `https://opendev.org/x/trio2o `__ x/vmware-nsx `https://opendev.org/x/vmware-nsx `__ x/vmware-vspc `https://opendev.org/x/vmware-vspc `__ +x/whitebox-neutron-tempest-plugin `https://opendev.org/x/whitebox-neutron-tempest-plugin `__ ======================================== === From a2da805f8107703e5f6738399ce5f5e358190fdc Mon Sep 17 00:00:00 2001 From: Fabian Wiesel Date: Mon, 8 Jan 2024 10:18:28 +0100 Subject: [PATCH 1748/1936] Fixup of 'Fix spelling of `ADITIONAL_VENV_PACKAGES`' Introduced a dangling } in the environment variable. This removes it. Change-Id: If9413dc1751399e5b9c9a0094772394252e5a81c --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 6cbadf9915..59ba9074f2 100644 --- a/stackrc +++ b/stackrc @@ -209,7 +209,7 @@ USE_VENV=$(trueorfalse False USE_VENV) # Add packages that need to be installed into a venv but are not in any # requirements files here, in a comma-separated list. # Currently only used when USE_VENV is true (individual project venvs) -ADDITIONAL_VENV_PACKAGES=${ADDITIONAL_VENV_PACKAGES:-""}} +ADDITIONAL_VENV_PACKAGES=${ADDITIONAL_VENV_PACKAGES:-""} # This can be used to turn database query logging on and off # (currently only implemented for MySQL backend) From 6091df25a39c9b17883d86ccb091bf2b9c39aa15 Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Wed, 20 Dec 2023 23:06:18 +0000 Subject: [PATCH 1749/1936] [OVN] Add support for the Neutron OVN agent service The Neutron OVN agent is a service that could run in any node. The functionality will depend on the extensions configured. This new agent is meant to be the replacement for the Neutron OVN metadata agent once the "metadata" extension is implemented in this service [1]. [1]https://review.opendev.org/c/openstack/neutron/+/898238 Related-Bug: #2017871 Change-Id: I8f82f0047e89aac122a67f59db84f03e1a6bf519 --- lib/neutron_plugins/ovn_agent | 62 ++++++++++++++++++++++++++++------- 1 file changed, 50 insertions(+), 12 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index c51b708130..e646258651 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -91,9 +91,14 @@ OVN_GENEVE_OVERHEAD=${OVN_GENEVE_OVERHEAD:-38} # http://www.openvswitch.org/support/dist-docs/ovs-appctl.8.txt OVN_DBS_LOG_LEVEL=${OVN_DBS_LOG_LEVEL:-info} +# OVN metadata agent configuration OVN_META_CONF=$NEUTRON_CONF_DIR/neutron_ovn_metadata_agent.ini OVN_META_DATA_HOST=${OVN_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)} +# OVN agent configuration +OVN_AGENT_CONF=$NEUTRON_CONF_DIR/plugins/ml2/ovn_agent.ini +OVN_AGENT_EXTENSIONS=${OVN_AGENT_EXTENSIONS:-} + # If True (default) the node will be considered a gateway node. ENABLE_CHASSIS_AS_GW=$(trueorfalse True ENABLE_CHASSIS_AS_GW) OVN_L3_CREATE_PUBLIC_NETWORK=$(trueorfalse True OVN_L3_CREATE_PUBLIC_NETWORK) @@ -132,6 +137,7 @@ OVN_RUNDIR=$OVS_PREFIX/var/run/ovn NEUTRON_OVN_BIN_DIR=$(get_python_exec_prefix) NEUTRON_OVN_METADATA_BINARY="neutron-ovn-metadata-agent" +NEUTRON_OVN_AGENT_BINARY="neutron-ovn-agent" STACK_GROUP="$( id --group --name "$STACK_USER" )" @@ -487,6 +493,8 @@ function configure_ovn_plugin { if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True + elif is_service_enabled q-ovn-agent neutron-ovn-agent && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]]; then + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True else populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=False fi @@ -508,6 +516,8 @@ function configure_ovn_plugin { if is_service_enabled n-api-meta ; then if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then iniset $NOVA_CONF neutron service_metadata_proxy True + elif is_service_enabled q-ovn-agent neutron-ovn-agent && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]]; then + iniset $NOVA_CONF neutron service_metadata_proxy True fi fi } @@ -539,29 +549,42 @@ function configure_ovn { fi # Metadata - if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent && is_service_enabled ovn-controller; then + local sample_file="" + local config_file="" + if is_service_enabled q-ovn-agent neutron-ovn-agent && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]] && is_service_enabled ovn-controller; then + sample_file=$NEUTRON_DIR/etc/neutron/plugins/ml2/ovn_agent.ini.sample + config_file=$OVN_AGENT_CONF + elif is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent && is_service_enabled ovn-controller; then + sample_file=$NEUTRON_DIR/etc/neutron_ovn_metadata_agent.ini.sample + config_file=$OVN_META_CONF + fi + if [ -n ${config_file} ]; then sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR mkdir -p $NEUTRON_DIR/etc/neutron/plugins/ml2 (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh) - cp $NEUTRON_DIR/etc/neutron_ovn_metadata_agent.ini.sample $OVN_META_CONF - configure_root_helper_options $OVN_META_CONF + cp $sample_file $config_file + configure_root_helper_options $config_file - iniset $OVN_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $OVN_META_CONF DEFAULT nova_metadata_host $OVN_META_DATA_HOST - iniset $OVN_META_CONF DEFAULT metadata_workers $API_WORKERS - iniset $OVN_META_CONF DEFAULT state_path $DATA_DIR/neutron - iniset $OVN_META_CONF ovs ovsdb_connection tcp:$OVSDB_SERVER_LOCAL_HOST:6640 - iniset $OVN_META_CONF ovn ovn_sb_connection $OVN_SB_REMOTE + iniset $config_file DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $config_file DEFAULT nova_metadata_host $OVN_META_DATA_HOST + iniset $config_file DEFAULT metadata_workers $API_WORKERS + iniset $config_file DEFAULT state_path $DATA_DIR/neutron + iniset $config_file ovs ovsdb_connection tcp:$OVSDB_SERVER_LOCAL_HOST:6640 + iniset $config_file ovn ovn_sb_connection $OVN_SB_REMOTE if is_service_enabled tls-proxy; then - iniset $OVN_META_CONF ovn \ + iniset $config_file ovn \ ovn_sb_ca_cert $INT_CA_DIR/ca-chain.pem - iniset $OVN_META_CONF ovn \ + iniset $config_file ovn \ ovn_sb_certificate $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt - iniset $OVN_META_CONF ovn \ + iniset $config_file ovn \ ovn_sb_private_key $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key fi + if [[ $config_file == $OVN_AGENT_CONF ]]; then + iniset $config_file agent extensions $OVN_AGENT_EXTENSIONS + iniset $config_file ovn ovn_nb_connection $OVN_NB_REMOTE + fi fi } @@ -684,6 +707,9 @@ function _start_ovn_services { if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent ; then _start_process "devstack@q-ovn-metadata-agent.service" fi + if is_service_enabled q-ovn-agent neutron-ovn-agent ; then + _start_process "devstack@q-ovn-agent.service" + fi } # start_ovn() - Start running processes, including screen @@ -750,6 +776,12 @@ function start_ovn { setup_logging $OVN_META_CONF fi + if is_service_enabled q-ovn-agent neutron-ovn-agent; then + run_process q-ovn-agent "$NEUTRON_OVN_BIN_DIR/$NEUTRON_OVN_AGENT_BINARY --config-file $OVN_AGENT_CONF" + # Format logging + setup_logging $OVN_AGENT_CONF + fi + _start_ovn_services } @@ -774,6 +806,12 @@ function stop_ovn { sudo pkill -9 -f "[h]aproxy" || : _stop_process "devstack@q-ovn-metadata-agent.service" fi + if is_service_enabled q-ovn-agent neutron-ovn-agent; then + # pkill takes care not to kill itself, but it may kill its parent + # sudo unless we use the "ps | grep [f]oo" trick + sudo pkill -9 -f "[h]aproxy" || : + _stop_process "devstack@q-ovn-agent.service" + fi if is_service_enabled ovn-controller-vtep ; then _stop_process "$OVN_CONTROLLER_VTEP_SERVICE" fi From 224fe1b09adb3adcdd02d680a46eeed5b271f7e4 Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Mon, 7 Aug 2023 19:30:31 +0000 Subject: [PATCH 1750/1936] add support for zswap and ksmtuned This change add a new lib/host-mem file and moves the existing ksm support to a new configure_ksm function. Additional support for ksmtuned is added with a new flag "ENABLE_KSMTUNED" which defaults to true. This change also adds support for zswap. zswap is disabled by default. When enabled on ubuntu lz4 will be used as the default compressor and z3fold as the zpool. On non debian distros the compressor and zpool are not set. The default values should result in very low overhead although the zstd compressor may provide better overall performance in ci or with slow io due to the higher compression ratio. Additionally memory and network sysctl tunings are optionally applied to defer writes, prefer swapping and optimise tcp connection startup and keepalive. The sysctl tunings are disabled by default The base devstack job has been modifed to enable zram and sysctl tuning. Both ksm and zswap are wrapped by a tune_host function which is now called very early in devstack to ensure they are configured before any memory/network intensive operations are executed. The ci jobs do not enable this functionality by default. To use this functionaltiy define ENABLE_SYSCTL_MEM_TUNING: true ENABLE_SYSCTL_NET_TUNING: true ENABLE_ZSWAP: true in the devstack_localrc section of the job vars. Change-Id: Ia5202d5a9903492a4c18b50ea8d12bd91cc9f135 --- functions-common | 21 +++++++++++ lib/host | 98 ++++++++++++++++++++++++++++++++++++++++++++++++ stack.sh | 22 +++-------- stackrc | 9 ----- 4 files changed, 125 insertions(+), 25 deletions(-) create mode 100644 lib/host diff --git a/functions-common b/functions-common index c57c4cc054..5238dff30a 100644 --- a/functions-common +++ b/functions-common @@ -236,6 +236,27 @@ function trueorfalse { $xtrace } +# bool_to_int +# +# Convert True|False to int 1 or 0 +# This function can be used to convert the output of trueorfalse +# to an int follow c conventions where false is 0 and 1 it true. +function bool_to_int { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + if [ -z $1 ]; then + die $LINENO "Bool value required" + fi + if [[ $1 == "True" ]] ; then + echo '1' + else + echo '0' + fi + $xtrace +} + + function isset { [[ -v "$1" ]] } diff --git a/lib/host b/lib/host new file mode 100644 index 0000000000..95c5b9bbcb --- /dev/null +++ b/lib/host @@ -0,0 +1,98 @@ +#!/bin/bash + +# Kernel Samepage Merging (KSM) +# ----------------------------- + +# Processes that mark their memory as mergeable can share identical memory +# pages if KSM is enabled. This is particularly useful for nova + libvirt +# backends but any other setup that marks its memory as mergeable can take +# advantage. The drawback is there is higher cpu load; however, we tend to +# be memory bound not cpu bound so enable KSM by default but allow people +# to opt out if the CPU time is more important to them. +ENABLE_KSM=$(trueorfalse True ENABLE_KSM) +ENABLE_KSMTUNED=$(trueorfalse True ENABLE_KSMTUNED) +function configure_ksm { + if [[ $ENABLE_KSMTUNED == "True" ]] ; then + install_package "ksmtuned" + fi + if [[ -f /sys/kernel/mm/ksm/run ]] ; then + echo $(bool_to_int ENABLE_KSM) | sudo tee /sys/kernel/mm/ksm/run + fi +} + +# Compressed swap (ZSWAP) +#------------------------ + +# as noted in the kernel docs https://docs.kernel.org/admin-guide/mm/zswap.html +# Zswap is a lightweight compressed cache for swap pages. +# It takes pages that are in the process of being swapped out and attempts +# to compress them into a dynamically allocated RAM-based memory pool. +# zswap basically trades CPU cycles for potentially reduced swap I/O. +# This trade-off can also result in a significant performance improvement +# if reads from the compressed cache are faster than reads from a swap device. + +ENABLE_ZSWAP=$(trueorfalse False ENABLE_ZSWAP) +# lz4 is very fast although it does not have the best compression +# zstd has much better compression but more latency +ZSWAP_COMPRESSOR=${ZSWAP_COMPRESSOR:="lz4"} +ZSWAP_ZPOOL=${ZSWAP_ZPOOL:="z3fold"} +function configure_zswap { + if [[ $ENABLE_KSMTUNED == "True" ]] ; then + # Centos 9 stream seems to only support enabling but not run time + # tuning so dont try to choose better default on centos + if is_ubuntu; then + echo ${ZSWAP_COMPRESSOR} | sudo tee /sys/module/zswap/parameters/compressor + echo ${ZSWAP_ZPOOL} | sudo tee /sys/module/zswap/parameters/zpool + fi + echo 1 | sudo tee /sys/module/zswap/parameters/enabled + # print curent zswap kernel config + sudo grep -R . /sys/module/zswap/parameters || /bin/true + fi +} + +ENABLE_SYSCTL_MEM_TUNING=$(trueorfalse False ENABLE_SYSCTL_MEM_TUNING) +function configure_sysctl_mem_parmaters { + if [[ $ENABLE_SYSCTL_MEM_TUNING == "True" ]] ; then + # defer write when memory is available + sudo sysctl -w vm.dirty_ratio=60 + sudo sysctl -w vm.dirty_background_ratio=10 + sudo sysctl -w vm.vfs_cache_pressure=50 + # assume swap is compressed so on new kernels + # give it equal priority as page cache which is + # uncompressed. on kernels < 5.8 the max is 100 + # not 200 so it will strongly prefer swapping. + sudo sysctl -w vm.swappiness=100 + sudo grep -R . /proc/sys/vm/ || /bin/true + fi +} + +function configure_host_mem { + configure_zswap + configure_ksm + configure_sysctl_mem_parmaters +} + +ENABLE_SYSCTL_NET_TUNING=$(trueorfalse False ENABLE_SYSCTL_NET_TUNING) +function configure_sysctl_net_parmaters { + if [[ $ENABLE_SYSCTL_NET_TUNING == "True" ]] ; then + # detect dead TCP connections after 120 seconds + sudo sysctl -w net.ipv4.tcp_keepalive_time=60 + sudo sysctl -w net.ipv4.tcp_keepalive_intvl=10 + sudo sysctl -w net.ipv4.tcp_keepalive_probes=6 + # reudce network latency for new connections + sudo sysctl -w net.ipv4.tcp_fastopen=3 + # print tcp options + sudo grep -R . /proc/sys/net/ipv4/tcp* || /bin/true + # disable qos by default + sudo sysctl -w net.core.default_qdisc=pfifo_fast + fi +} + +function configure_host_net { + configure_sysctl_net_parmaters +} + +function tune_host { + configure_host_mem + configure_host_net +} \ No newline at end of file diff --git a/stack.sh b/stack.sh index dce15ac01c..a816efda22 100755 --- a/stack.sh +++ b/stack.sh @@ -611,6 +611,12 @@ rm -f $SSL_BUNDLE_FILE source $TOP_DIR/lib/database source $TOP_DIR/lib/rpc_backend +# load host tuning functions and defaults +source $TOP_DIR/lib/host +# tune host memory early to ensure zswap/ksm are configured before +# doing memory intensive operation like cloning repos or unpacking packages. +tune_host + # Configure Projects # ================== @@ -1079,22 +1085,6 @@ fi # Save configuration values save_stackenv $LINENO -# Kernel Samepage Merging (KSM) -# ----------------------------- - -# Processes that mark their memory as mergeable can share identical memory -# pages if KSM is enabled. This is particularly useful for nova + libvirt -# backends but any other setup that marks its memory as mergeable can take -# advantage. The drawback is there is higher cpu load; however, we tend to -# be memory bound not cpu bound so enable KSM by default but allow people -# to opt out if the CPU time is more important to them. - -if [[ $ENABLE_KSM == "True" ]] ; then - if [[ -f /sys/kernel/mm/ksm/run ]] ; then - sudo sh -c "echo 1 > /sys/kernel/mm/ksm/run" - fi -fi - # Start Services # ============== diff --git a/stackrc b/stackrc index 59ba9074f2..097913a4e9 100644 --- a/stackrc +++ b/stackrc @@ -121,15 +121,6 @@ else SYSTEMCTL="sudo systemctl" fi - -# Whether or not to enable Kernel Samepage Merging (KSM) if available. -# This allows programs that mark their memory as mergeable to share -# memory pages if they are identical. This is particularly useful with -# libvirt backends. This reduces memory usage at the cost of CPU overhead -# to scan memory. We default to enabling it because we tend to be more -# memory constrained than CPU bound. -ENABLE_KSM=$(trueorfalse True ENABLE_KSM) - # Passwords generated by interactive devstack runs if [[ -r $RC_DIR/.localrc.password ]]; then source $RC_DIR/.localrc.password From 5c1736b78256f5da86a91c4489f43f8ba1bce224 Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Wed, 24 Jan 2024 10:53:12 +0000 Subject: [PATCH 1751/1936] fix zswap enable flag zswap should only be enabled if ENABLE_ZSWAP is true. The if condition was checking ENABLE_KSMTUNED. That is now fixed. Change-Id: I76ba139de69fb1710bcb96cc9f638260463e2032 --- lib/host | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/host b/lib/host index 95c5b9bbcb..2fa22e24ea 100644 --- a/lib/host +++ b/lib/host @@ -37,7 +37,7 @@ ENABLE_ZSWAP=$(trueorfalse False ENABLE_ZSWAP) ZSWAP_COMPRESSOR=${ZSWAP_COMPRESSOR:="lz4"} ZSWAP_ZPOOL=${ZSWAP_ZPOOL:="z3fold"} function configure_zswap { - if [[ $ENABLE_KSMTUNED == "True" ]] ; then + if [[ $ENABLE_ZSWAP == "True" ]] ; then # Centos 9 stream seems to only support enabling but not run time # tuning so dont try to choose better default on centos if is_ubuntu; then From b485549efc9851bfb2cabd1fce40cf39c403c24e Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Sat, 27 Jan 2024 18:58:11 +0900 Subject: [PATCH 1752/1936] Uncap bashate The bashate tool has been very stable for a while and we rarely expect changes which may break existing scripts. This removes the current capping to avoid updating the upper limit when when a new release is created in bashate. Change-Id: Iae94811aebf58b491d6b2b2773db88ac50fdd737 --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index ec764abc87..26cd68c031 100644 --- a/tox.ini +++ b/tox.ini @@ -12,7 +12,7 @@ basepython = python3 # against devstack, just set BASHATE_INSTALL_PATH=/path/... to your # modified bashate tree deps = - {env:BASHATE_INSTALL_PATH:bashate==2.0.0} + {env:BASHATE_INSTALL_PATH:bashate} allowlist_externals = bash commands = bash -c "find {toxinidir} \ -not \( -type d -name .?\* -prune \) \ From d251d12d71ebca758e8584204a0ba14d3c6bab6c Mon Sep 17 00:00:00 2001 From: Abhishek Kekane Date: Mon, 29 Jan 2024 18:20:06 +0000 Subject: [PATCH 1753/1936] Make `centralized_db` driver as default cache driver Making newly introduced `centralized_db` driver as default cache driver for glance so that it can be tested in available CI jobs. New cache driver `centralized_db` needs `worker_self_reference_url` in glance-api.conf file otherwise glance api service will fail to start. Related blueprint centralized-cache-db Depends-On: https://review.opendev.org/c/openstack/glance/+/899871 Change-Id: I75267988b1c80ac9daa5843ce8462bbac49ffe27 --- lib/glance | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/glance b/lib/glance index 4ff9a34ca8..e4bfc8f5a3 100644 --- a/lib/glance +++ b/lib/glance @@ -75,7 +75,7 @@ GLANCE_MULTIPLE_FILE_STORES=${GLANCE_MULTIPLE_FILE_STORES:-fast} GLANCE_DEFAULT_BACKEND=${GLANCE_DEFAULT_BACKEND:-fast} GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache} -GLANCE_CACHE_DRIVER=${GLANCE_CACHE_DRIVER:-sqlite} +GLANCE_CACHE_DRIVER=${GLANCE_CACHE_DRIVER:-centralized_db} # Full Glance functionality requires running in standalone mode. If we are # not in uwsgi mode, then we are standalone, otherwise allow separate control. @@ -432,6 +432,7 @@ function configure_glance { iniset $GLANCE_API_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS iniset $GLANCE_API_CONF DEFAULT bind_port $GLANCE_SERVICE_PORT_INT iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS" + iniset $GLANCE_API_CONF DEFAULT worker_self_reference_url $GLANCE_URL fi if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then From 4ddd456dd3e71bcdf9a02a12dd5914b82ec48e91 Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Fri, 9 Feb 2024 14:11:44 +0100 Subject: [PATCH 1754/1936] Add support for the pyproject.toml file in setup with constraints In the _setup_package_with_constraints_edit name of the package was always discovered from the setup.cfg file. But as some projects implements PEP-621 (see [1] for the SQLAlchemy for example) it is not enough now. This patch adds parsing pyproject.toml file also if name is not found in the setup.cfg file. [1] https://github.com/sqlalchemy/sqlalchemy/commit/a8dbf8763a8fa2ca53cc01033f06681a421bf60b Closes-Bug: #2052509 Change-Id: Iee9262079d09a8bd22cd05a8f17950a41a0d1f9d --- inc/python | 3 +++ 1 file changed, 3 insertions(+) diff --git a/inc/python b/inc/python index cc6e01fede..43b06eb520 100644 --- a/inc/python +++ b/inc/python @@ -405,6 +405,9 @@ function _setup_package_with_constraints_edit { # source we are about to do. local name name=$(awk '/^name.*=/ {print $3}' $project_dir/setup.cfg) + if [ -z $name ]; then + name=$(awk '/^name =/ {gsub(/"/, "", $3); print $3}' $project_dir/pyproject.toml) + fi $REQUIREMENTS_DIR/.venv/bin/edit-constraints \ $REQUIREMENTS_DIR/upper-constraints.txt -- $name fi From 402b7e89b60035b39b40e8886dee82487c54de97 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Fri, 23 Feb 2024 11:46:03 +0100 Subject: [PATCH 1755/1936] Drop nodesets with ubuntu-xenial The ubuntu-xenial labels are going to disappear from opendev as that image is EOL and will we deleted. Clean up our zuul config. Update some example reference as well. Change-Id: Id04110f7c871caa1739ff2b62e9796be4fb9aa00 --- .zuul.yaml | 80 ------------------------------------------------ functions-common | 4 +-- 2 files changed, 2 insertions(+), 82 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 47466cb3eb..13b4633e13 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -1,13 +1,3 @@ -- nodeset: - name: openstack-single-node - nodes: - - name: controller - label: ubuntu-xenial - groups: - - name: tempest - nodes: - - controller - - nodeset: name: openstack-single-node-jammy nodes: @@ -38,16 +28,6 @@ nodes: - controller -- nodeset: - name: openstack-single-node-xenial - nodes: - - name: controller - label: ubuntu-xenial - groups: - - name: tempest - nodes: - - controller - - nodeset: name: devstack-single-node-centos-7 nodes: @@ -118,36 +98,6 @@ nodes: - controller -- nodeset: - name: openstack-two-node - nodes: - - name: controller - label: ubuntu-xenial - - name: compute1 - label: ubuntu-xenial - groups: - # Node where tests are executed and test results collected - - name: tempest - nodes: - - controller - # Nodes running the compute service - - name: compute - nodes: - - controller - - compute1 - # Nodes that are not the controller - - name: subnode - nodes: - - compute1 - # Switch node for multinode networking setup - - name: switch - nodes: - - controller - # Peer nodes for multinode networking setup - - name: peers - nodes: - - compute1 - - nodeset: name: openstack-two-node-centos-9-stream nodes: @@ -268,36 +218,6 @@ nodes: - compute1 -- nodeset: - name: openstack-two-node-xenial - nodes: - - name: controller - label: ubuntu-xenial - - name: compute1 - label: ubuntu-xenial - groups: - # Node where tests are executed and test results collected - - name: tempest - nodes: - - controller - # Nodes running the compute service - - name: compute - nodes: - - controller - - compute1 - # Nodes that are not the controller - - name: subnode - nodes: - - compute1 - # Switch node for multinode networking setup - - name: switch - nodes: - - controller - # Peer nodes for multinode networking setup - - name: peers - nodes: - - compute1 - - nodeset: name: openstack-three-node-focal nodes: diff --git a/functions-common b/functions-common index 5238dff30a..8ea6df7c1d 100644 --- a/functions-common +++ b/functions-common @@ -401,9 +401,9 @@ function warn { # such as "install_package" further abstract things in better ways. # # ``os_VENDOR`` - vendor name: ``Ubuntu``, ``Fedora``, etc -# ``os_RELEASE`` - major release: ``16.04`` (Ubuntu), ``23`` (Fedora) +# ``os_RELEASE`` - major release: ``22.04`` (Ubuntu), ``23`` (Fedora) # ``os_PACKAGE`` - package type: ``deb`` or ``rpm`` -# ``os_CODENAME`` - vendor's codename for release: ``xenial`` +# ``os_CODENAME`` - vendor's codename for release: ``jammy`` declare -g os_VENDOR os_RELEASE os_PACKAGE os_CODENAME From 50c791c0ae3bd75335c89312a5595f0ad2864945 Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Thu, 29 Feb 2024 17:02:55 +0900 Subject: [PATCH 1756/1936] Drop unused environments for TripleO and heat agents TripleO was already retired. These environments are not actually used by heat jobs. Change-Id: I63b7413a1575a620f9d2cbd56e93be78816639e0 --- stackrc | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/stackrc b/stackrc index 464e935839..966e5ed69d 100644 --- a/stackrc +++ b/stackrc @@ -588,28 +588,6 @@ GITREPO["os-ken"]=${OS_KEN_REPO:-${GIT_BASE}/openstack/os-ken.git} GITBRANCH["os-ken"]=${OS_KEN_BRANCH:-$TARGET_BRANCH} GITDIR["os-ken"]=$DEST/os-ken -################## -# -# TripleO / Heat Agent Components -# -################## - -# run-parts script required by os-refresh-config -DIB_UTILS_REPO=${DIB_UTILS_REPO:-${GIT_BASE}/openstack/dib-utils.git} -DIB_UTILS_BRANCH=${DIB_UTILS_BRANCH:-$BRANCHLESS_TARGET_BRANCH} - -# os-apply-config configuration template tool -OAC_REPO=${OAC_REPO:-${GIT_BASE}/openstack/os-apply-config.git} -OAC_BRANCH=${OAC_BRANCH:-$TRAILING_TARGET_BRANCH} - -# os-collect-config configuration agent -OCC_REPO=${OCC_REPO:-${GIT_BASE}/openstack/os-collect-config.git} -OCC_BRANCH=${OCC_BRANCH:-$TRAILING_TARGET_BRANCH} - -# os-refresh-config configuration run-parts tool -ORC_REPO=${ORC_REPO:-${GIT_BASE}/openstack/os-refresh-config.git} -ORC_BRANCH=${ORC_BRANCH:-$TRAILING_TARGET_BRANCH} - ################# # From af57c0b778bb13a9b5ffd784fc456a21614e67b1 Mon Sep 17 00:00:00 2001 From: Jeremy Stanley Date: Mon, 4 Mar 2024 18:24:24 +0000 Subject: [PATCH 1757/1936] Drop the devstack-single-node-centos-7 nodeset OpenDev is preparing to remove centos-7 nodes on March 15[*]. This change drops one nodeset definition which is the last remaining reference on DevStack's master branch. [*] https://lists.openstack.org/archives/list/openstack-discuss@lists.openstack.org/message/A2YIY5L7MVYSQMTVZU3L3OM7GLVVZPLK/ Change-Id: Icd487e1012263a9b0bc13b529d31ff2025108adf --- .zuul.yaml | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 13b4633e13..8bc082364c 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -28,16 +28,6 @@ nodes: - controller -- nodeset: - name: devstack-single-node-centos-7 - nodes: - - name: controller - label: centos-7 - groups: - - name: tempest - nodes: - - controller - - nodeset: name: devstack-single-node-centos-9-stream nodes: From 1fe7707cf04852d024b64f695e40568696851b15 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Tue, 5 Mar 2024 08:30:19 -0800 Subject: [PATCH 1758/1936] Ignore 500 status code in generate plugin script Due to various reasons, this script may encounter the 500 status code from some repo (x/fuel-plugin-onos in current case[1]) If that happen then it return failure status code to the propose-updates job and fail that job - https://zuul.openstack.org/builds?job_name=propose-updates&project=openstack%2Fdevstack&skip=0 It is better not to raise the 500 error in this script and just ignore those repo to process further to detect the plugin. [1] https://zuul.openstack.org/build/dba0aa41d145472397916dfcd13948de/log/job-output.txt#2442 Change-Id: Ibca0a2aac404161340e8fc00170018eecf5c8326 --- tools/generate-devstack-plugins-list.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/generate-devstack-plugins-list.py b/tools/generate-devstack-plugins-list.py index 1cacd06bf8..bc28515a26 100644 --- a/tools/generate-devstack-plugins-list.py +++ b/tools/generate-devstack-plugins-list.py @@ -73,8 +73,11 @@ def has_devstack_plugin(session, proj): s = requests.Session() # sometimes gitea gives us a 500 error; retry sanely # https://stackoverflow.com/a/35636367 +# We need to disable raise_on_status because if any repo endup with 500 then +# propose-updates job which run this script will fail. retries = Retry(total=3, backoff_factor=1, - status_forcelist=[ 500 ]) + status_forcelist=[ 500 ], + raise_on_status=False) s.mount('https://', HTTPAdapter(max_retries=retries)) found_plugins = filter(functools.partial(has_devstack_plugin, s), projects) From 5e837d1f0d9078c58bc634474a1adf311bc2b491 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 6 Mar 2024 03:13:36 +0000 Subject: [PATCH 1759/1936] Updated from generate-devstack-plugins-list Change-Id: Ic99b518ddf1045893991accaa089f44d0d4f4b0d --- doc/source/plugin-registry.rst | 4 ---- 1 file changed, 4 deletions(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index f70041162b..2d2a92c4a9 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -28,8 +28,6 @@ openstack/aodh `https://opendev.org/openstack/aodh `__ openstack/blazar `https://opendev.org/openstack/blazar `__ openstack/ceilometer `https://opendev.org/openstack/ceilometer `__ -openstack/ceilometer-powervm `https://opendev.org/openstack/ceilometer-powervm `__ -openstack/cinderlib `https://opendev.org/openstack/cinderlib `__ openstack/cloudkitty `https://opendev.org/openstack/cloudkitty `__ openstack/cyborg `https://opendev.org/openstack/cyborg `__ openstack/designate `https://opendev.org/openstack/designate `__ @@ -69,7 +67,6 @@ openstack/networking-bagpipe `https://opendev.org/openstack/networki openstack/networking-baremetal `https://opendev.org/openstack/networking-baremetal `__ openstack/networking-bgpvpn `https://opendev.org/openstack/networking-bgpvpn `__ openstack/networking-generic-switch `https://opendev.org/openstack/networking-generic-switch `__ -openstack/networking-powervm `https://opendev.org/openstack/networking-powervm `__ openstack/networking-sfc `https://opendev.org/openstack/networking-sfc `__ openstack/neutron `https://opendev.org/openstack/neutron `__ openstack/neutron-dynamic-routing `https://opendev.org/openstack/neutron-dynamic-routing `__ @@ -79,7 +76,6 @@ openstack/neutron-tempest-plugin `https://opendev.org/openstack/neutron- openstack/neutron-vpnaas `https://opendev.org/openstack/neutron-vpnaas `__ openstack/neutron-vpnaas-dashboard `https://opendev.org/openstack/neutron-vpnaas-dashboard `__ openstack/nova `https://opendev.org/openstack/nova `__ -openstack/nova-powervm `https://opendev.org/openstack/nova-powervm `__ openstack/octavia `https://opendev.org/openstack/octavia `__ openstack/octavia-dashboard `https://opendev.org/openstack/octavia-dashboard `__ openstack/octavia-tempest-plugin `https://opendev.org/openstack/octavia-tempest-plugin `__ From 5f5255bc011ef885f254c659009662870499de5a Mon Sep 17 00:00:00 2001 From: huicoffee <784657156@qq.com> Date: Fri, 15 Mar 2024 17:15:33 +0800 Subject: [PATCH 1760/1936] Remove Glance uWSGI config in clean.sh Updated clean.sh to remove Glance's Apache uWSGI config files in APACHE_CONF_DIR, including /etc/apache2/sites-enabled/ on Ubuntu. Test Plan: - Run clean.sh. - Confirm Glance uWSGI configs are removed from APACHE_CONF_DIR. Closes-Bug: #2057999 Change-Id: I44475b8e084c4b20d7b7cb7f28574f797dbda7a2 --- lib/glance | 1 + lib/host | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/glance b/lib/glance index e4bfc8f5a3..8ee842625f 100644 --- a/lib/glance +++ b/lib/glance @@ -168,6 +168,7 @@ function cleanup_glance { # Cleanup reserved stores directories sudo rm -rf $GLANCE_STAGING_DIR $GLANCE_TASKS_DIR fi + remove_uwsgi_config "$GLANCE_UWSGI_CONF" "$GLANCE_UWSGI" } # Set multiple cinder store related config options for each of the cinder store diff --git a/lib/host b/lib/host index 2fa22e24ea..a812c39612 100644 --- a/lib/host +++ b/lib/host @@ -95,4 +95,4 @@ function configure_host_net { function tune_host { configure_host_mem configure_host_net -} \ No newline at end of file +} From e1b7cc0ef8db3f7363cd478effa8f7292b61b9bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Douglas=20Mendiz=C3=A1bal?= Date: Mon, 25 Mar 2024 12:09:04 -0400 Subject: [PATCH 1761/1936] Do not configure system-scope admin for keystone This patch removes a couple of tempest.conf settings that are being overwrriten when Keystone is set to enforce scope. These settings are already being set by the keystone devstack plugin [1] and do not need to be overwritten here. Keystone is changing the default admin credentials to be project-admin instead of system-admin to address some failing tests in services that require project-scoped admin for their admin APIs. [2] These overrides are preventing that change from taking effect. [1] https://opendev.org/openstack/keystone/src/branch/stable/2024.1/devstack/lib/scope.sh#L24-L25 [2] https://review.opendev.org/c/openstack/keystone/+/913999 Change-Id: I48edbcbaa993f2d1f35160c415986d21a15a4999 --- lib/tempest | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index 7b5fde170e..6bd203e6f4 100644 --- a/lib/tempest +++ b/lib/tempest @@ -702,8 +702,6 @@ function configure_tempest { # test can be run with scoped token. if [[ "$KEYSTONE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then iniset $TEMPEST_CONFIG enforce_scope keystone true - iniset $TEMPEST_CONFIG auth admin_system 'all' - iniset $TEMPEST_CONFIG auth admin_project_name '' fi if [[ "$NOVA_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then From 99a96288eb14e66723d85e6ca24ff51babac7ec8 Mon Sep 17 00:00:00 2001 From: Martin Kopec Date: Thu, 28 Mar 2024 23:38:19 +0100 Subject: [PATCH 1762/1936] Update DEVSTACK_SERIES to 2024.2 stable/2024.1 branch has been created now and current master is for 2024.2. Change-Id: I4af9e87318ef9cbfede7df7c23872a1a7e38c820 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 4e49b461d4..de81f01f38 100644 --- a/stackrc +++ b/stackrc @@ -257,7 +257,7 @@ REQUIREMENTS_DIR=${REQUIREMENTS_DIR:-$DEST/requirements} # Setting the variable to 'ALL' will activate the download for all # libraries. -DEVSTACK_SERIES="2024.1" +DEVSTACK_SERIES="2024.2" ############## # From c336b873421c954921ee26c22bd9bfe65b330d0c Mon Sep 17 00:00:00 2001 From: Jaromir Wysoglad Date: Wed, 27 Mar 2024 11:36:26 +0100 Subject: [PATCH 1763/1936] Fix neutron empty string check The variable should be in quotes for the check to work Testing the behavior in bash: current behavior: $ config_file="" $ if [ -n ${config_file} ]; then echo a; fi a $ config_file="abc" $ if [ -n ${config_file} ]; then echo a; fi a behavior with quotes: $ config_file="" $ if [ -n "$config_file" ]; then echo a; fi $ config_file="abc" $ if [ -n "$config_file" ]; then echo a; fi a Change-Id: Iba956d9d4f43b925848174a632aabe58999be74b --- lib/neutron_plugins/ovn_agent | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index e646258651..699bd54f4e 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -558,7 +558,7 @@ function configure_ovn { sample_file=$NEUTRON_DIR/etc/neutron_ovn_metadata_agent.ini.sample config_file=$OVN_META_CONF fi - if [ -n ${config_file} ]; then + if [ -n "$config_file" ]; then sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR mkdir -p $NEUTRON_DIR/etc/neutron/plugins/ml2 From b6613b1e71fb6a0efb63ec9346bd2e67131657e0 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Wed, 6 Dec 2023 10:22:30 +0000 Subject: [PATCH 1764/1936] lib/apache: Use module paths instead of WSGI scripts pbr's 'wsgi_scripts' entrypoint functionality is not long for this world so we need to start working towards an alternative. We could start packaging our own WSGI scripts in DevStack but using module paths seems like a better option, particularly when it's supported by other WSGI servers like gunicorn. Currently only nova is migrated. We should switch additional projects as they migrate and eventually remove the support for WSGI scripts entirely. Change-Id: I057dc635c01e54740ee04dfe7b39ef83db5dc180 Signed-off-by: Stephen Finucane Depends-on: https://review.opendev.org/c/openstack/nova/+/902687/ --- lib/apache | 33 ++++++++++++++++++++++++++++----- lib/nova | 8 ++++---- 2 files changed, 32 insertions(+), 9 deletions(-) diff --git a/lib/apache b/lib/apache index 9017e0a38a..a314b76fb7 100644 --- a/lib/apache +++ b/lib/apache @@ -237,13 +237,17 @@ function restart_apache_server { restart_service $APACHE_NAME } +# write_uwsgi_config() - Create a new uWSGI config file function write_uwsgi_config { local conf=$1 local wsgi=$2 local url=$3 local http=$4 - local name="" - name=$(basename $wsgi) + local name=$5 + + if [ -z "$name" ]; then + name=$(basename $wsgi) + fi # create a home for the sockets; note don't use /tmp -- apache has # a private view of it on some platforms. @@ -259,7 +263,15 @@ function write_uwsgi_config { # always cleanup given that we are using iniset here rm -rf $conf - iniset "$conf" uwsgi wsgi-file "$wsgi" + # Set either the module path or wsgi script path depending on what we've + # been given. Note that the regex isn't exhaustive - neither Python modules + # nor Python variables can start with a number - but it's "good enough" + if [[ "$wsgi" =~ ^[a-zA-Z0-9_.]+:[a-zA-Z0-9_]+$ ]]; then + iniset "$conf" uwsgi module "$wsgi" + else + deprecated 'Configuring uWSGI with a WSGI file is deprecated, use module paths instead' + iniset "$conf" uwsgi wsgi-file "$wsgi" + fi iniset "$conf" uwsgi processes $API_WORKERS # This is running standalone iniset "$conf" uwsgi master true @@ -306,14 +318,25 @@ function write_local_uwsgi_http_config { local conf=$1 local wsgi=$2 local url=$3 - name=$(basename $wsgi) + local name=$4 + + if [ -z "$name" ]; then + name=$(basename $wsgi) + fi # create a home for the sockets; note don't use /tmp -- apache has # a private view of it on some platforms. # always cleanup given that we are using iniset here rm -rf $conf - iniset "$conf" uwsgi wsgi-file "$wsgi" + # Set either the module path or wsgi script path depending on what we've + # been given + if [[ "$wsgi" =~ ^[a-zA-Z0-9_.]+:[a-zA-Z0-9_]+$ ]]; then + iniset "$conf" uwsgi module "$wsgi" + else + deprecated 'Configuring uWSGI with a WSGI file is deprecated, use module paths instead' + iniset "$conf" uwsgi wsgi-file "$wsgi" + fi port=$(get_random_port) iniset "$conf" uwsgi http-socket "$APACHE_LOCAL_HOST:$port" iniset "$conf" uwsgi processes $API_WORKERS diff --git a/lib/nova b/lib/nova index 17c90dfe26..a261fac8f6 100644 --- a/lib/nova +++ b/lib/nova @@ -53,8 +53,8 @@ NOVA_COND_CONF=$NOVA_CONF_DIR/nova.conf NOVA_CPU_CONF=$NOVA_CONF_DIR/nova-cpu.conf NOVA_FAKE_CONF=$NOVA_CONF_DIR/nova-fake.conf NOVA_API_DB=${NOVA_API_DB:-nova_api} -NOVA_UWSGI=$NOVA_BIN_DIR/nova-api-wsgi -NOVA_METADATA_UWSGI=$NOVA_BIN_DIR/nova-metadata-wsgi +NOVA_UWSGI=nova.wsgi.osapi_compute:application +NOVA_METADATA_UWSGI=nova.wsgi.metadata:application NOVA_UWSGI_CONF=$NOVA_CONF_DIR/nova-api-uwsgi.ini NOVA_METADATA_UWSGI_CONF=$NOVA_CONF_DIR/nova-metadata-uwsgi.ini @@ -549,11 +549,11 @@ function create_nova_conf { iniset $NOVA_CONF upgrade_levels compute "auto" if is_service_enabled n-api; then - write_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" "/compute" + write_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" "/compute" "" "nova-api" fi if is_service_enabled n-api-meta; then - write_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" "" "$SERVICE_LISTEN_ADDRESS:${METADATA_SERVICE_PORT}" + write_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" "" "$SERVICE_LISTEN_ADDRESS:${METADATA_SERVICE_PORT}" "nova-metadata" fi if is_service_enabled ceilometer; then From 9be4ceeaa10f6ed92291e77ec52794acfb67c147 Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Tue, 23 Apr 2024 15:37:37 -0400 Subject: [PATCH 1765/1936] Fix datetime.utcnow() deprecation warning Running stack.sh on a python 3.12 system generates this warning from worlddump.py: DeprecationWarning: datetime.datetime.utcnow() is deprecated Use datetime.now(timezone.utc) instead, which should be backwards-compatible with older python versions. TrivialFix Change-Id: I11fe60f6b04842412045c6cb97f493f7fef66e1a --- tools/worlddump.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/worlddump.py b/tools/worlddump.py index aadd33b634..edbfa268db 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -51,7 +51,7 @@ def get_options(): def filename(dirname, name=""): - now = datetime.datetime.utcnow() + now = datetime.datetime.now(datetime.timezone.utc) fmt = "worlddump-%Y-%m-%d-%H%M%S" if name: fmt += "-" + name From aee9b0ff9e68f9306d9a55bca5304366fb85e91b Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 9 May 2024 10:29:43 -0700 Subject: [PATCH 1766/1936] Make rocky 9 job non-voting This job is currently failing with mirror or repo issues. Change-Id: Ie0f862f933cd99cc9fe698d5a178b952e6e93ac4 --- .zuul.yaml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 8bc082364c..294dd48f4d 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -644,6 +644,11 @@ description: Rocky Linux 9 Blue Onyx platform test nodeset: devstack-single-node-rockylinux-9 timeout: 9000 + # NOTE(danms): This has been failing lately with some repository metadata + # errors. We're marking this as non-voting until it appears to have + # stabilized: + # https://zuul.openstack.org/builds?job_name=devstack-platform-rocky-blue-onyx&skip=0 + voting: false vars: configure_swap_size: 4096 @@ -887,7 +892,9 @@ - devstack-ipv6 - devstack-platform-debian-bookworm - devstack-platform-debian-bullseye - - devstack-platform-rocky-blue-onyx + # NOTE(danms): Disabled due to instability, see comment in the job + # definition above. + # - devstack-platform-rocky-blue-onyx - devstack-enforce-scope - devstack-multinode - devstack-unit-tests From 769adbd69daf89f05c96d877519efc81a25fd3c1 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Wed, 8 May 2024 16:58:46 +0000 Subject: [PATCH 1767/1936] Upload images with --file instead of stdin This is more likely how people will actually upload their images, but it also prevents the "osc as a service" feature from working because stdin isn't proxied (of course). So just convert our uses of "image create" to use --file instead of stdin. Change-Id: I7205eb0100ba7406650ed609cf517cba2c8d30aa --- functions | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/functions b/functions index 01e1d259ad..f81e8f0a08 100644 --- a/functions +++ b/functions @@ -118,7 +118,7 @@ function _upload_image { useimport="--import" fi - openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" --public --container-format "$container" --disk-format "$disk" $useimport $properties < "${image}" + openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" --public --container-format "$container" --disk-format "$disk" $useimport $properties --file $(readlink -f "${image}") } # Retrieve an image from a URL and upload into Glance. @@ -425,10 +425,10 @@ function upload_image { # kernel for use when uploading the root filesystem. local kernel_id="" ramdisk_id=""; if [ -n "$kernel" ]; then - kernel_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-kernel" $(_image_properties_to_arg $img_property) --public --container-format aki --disk-format aki < "$kernel" -f value -c id) + kernel_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-kernel" $(_image_properties_to_arg $img_property) --public --container-format aki --disk-format aki --file $(readlink -f "$kernel") -f value -c id) fi if [ -n "$ramdisk" ]; then - ramdisk_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-ramdisk" $(_image_properties_to_arg $img_property) --public --container-format ari --disk-format ari < "$ramdisk" -f value -c id) + ramdisk_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-ramdisk" $(_image_properties_to_arg $img_property) --public --container-format ari --disk-format ari --file $(readlink -f "$ramdisk") -f value -c id) fi _upload_image "${image_name%.img}" ami ami "$image" ${kernel_id:+ kernel_id=$kernel_id} ${ramdisk_id:+ ramdisk_id=$ramdisk_id} $img_property fi From c80b9f4fc16997631696100a8e468d907a177f7d Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Mon, 13 May 2024 11:04:45 +0200 Subject: [PATCH 1768/1936] Drop reno devstack doesn't do releases, so there should be no release notes, either. Drop the one that was accidentally created to avoid confusion. Change-Id: I75a295e50c36925a0137a5458444fb48bd5d9f8a --- ...NotImplementedError-on-SQLAlchemy-2-21bb6dcdf3ce4225.yaml | 5 ----- 1 file changed, 5 deletions(-) delete mode 100644 releasenotes/notes/Fix-dbcounter-NotImplementedError-on-SQLAlchemy-2-21bb6dcdf3ce4225.yaml diff --git a/releasenotes/notes/Fix-dbcounter-NotImplementedError-on-SQLAlchemy-2-21bb6dcdf3ce4225.yaml b/releasenotes/notes/Fix-dbcounter-NotImplementedError-on-SQLAlchemy-2-21bb6dcdf3ce4225.yaml deleted file mode 100644 index f815e14ccb..0000000000 --- a/releasenotes/notes/Fix-dbcounter-NotImplementedError-on-SQLAlchemy-2-21bb6dcdf3ce4225.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Fixes a NotImplementedError when using the dbcounter SQLAlchemy plugin on - SQLAlchemy 2.x. From d5182ce3fcf5caf8f7dca84217b2c3cb70993df7 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 19 Apr 2024 12:27:14 +0100 Subject: [PATCH 1769/1936] lib/apache: Pass name, not path, to remove_uwsgi_config We'd like to move from configuring uWSGI with '.wsgi' files to configuring with module paths. Do this for all in-tree services and log a deprecation warning for anyone still passing a path. Note that since 'basepath foo' returns 'foo', this is effectively a no-op for the services being converted here. Change-Id: Ia1ad5ff160a9821ceab97ff1c24bc48cd4bf1d6f Signed-off-by: Stephen Finucane --- lib/apache | 6 ++++++ lib/cinder | 2 +- lib/glance | 2 +- lib/keystone | 2 +- lib/neutron | 2 +- lib/nova | 4 ++-- lib/placement | 2 +- 7 files changed, 13 insertions(+), 7 deletions(-) diff --git a/lib/apache b/lib/apache index a314b76fb7..48438da6a1 100644 --- a/lib/apache +++ b/lib/apache @@ -402,8 +402,14 @@ function remove_uwsgi_config { local conf=$1 local wsgi=$2 local name="" + # TODO(stephenfin): Remove this call when everyone is using module path + # configuration instead of file path configuration name=$(basename $wsgi) + if [[ "$wsgi" = /* ]]; then + deprecated "Passing a wsgi script to remove_uwsgi_config is deprecated, pass an application name instead" + fi + rm -rf $conf disable_apache_site $name } diff --git a/lib/cinder b/lib/cinder index f7824eb6f4..ae898e9522 100644 --- a/lib/cinder +++ b/lib/cinder @@ -275,7 +275,7 @@ function cleanup_cinder { fi stop_process "c-api" - remove_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" + remove_uwsgi_config "$CINDER_UWSGI_CONF" "cinder-wsgi" } # configure_cinder() - Set config files, create data dirs, etc diff --git a/lib/glance b/lib/glance index 8ee842625f..274687112e 100644 --- a/lib/glance +++ b/lib/glance @@ -168,7 +168,7 @@ function cleanup_glance { # Cleanup reserved stores directories sudo rm -rf $GLANCE_STAGING_DIR $GLANCE_TASKS_DIR fi - remove_uwsgi_config "$GLANCE_UWSGI_CONF" "$GLANCE_UWSGI" + remove_uwsgi_config "$GLANCE_UWSGI_CONF" "glance-wsgi-api" } # Set multiple cinder store related config options for each of the cinder store diff --git a/lib/keystone b/lib/keystone index 6cb4aac46a..7d6b05fd41 100644 --- a/lib/keystone +++ b/lib/keystone @@ -150,7 +150,7 @@ function cleanup_keystone { sudo rm -f $(apache_site_config_for keystone) else stop_process "keystone" - remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" + remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "keystone-wsgi-public" sudo rm -f $(apache_site_config_for keystone-wsgi-public) fi } diff --git a/lib/neutron b/lib/neutron index bc77f161d7..ed854fdd66 100644 --- a/lib/neutron +++ b/lib/neutron @@ -823,7 +823,7 @@ function cleanup_neutron { if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then stop_process neutron-api stop_process neutron-rpc-server - remove_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api" + remove_uwsgi_config "$NEUTRON_UWSGI_CONF" "neutron-api" sudo rm -f $(apache_site_config_for neutron-api) fi diff --git a/lib/nova b/lib/nova index a261fac8f6..ee3f29eebf 100644 --- a/lib/nova +++ b/lib/nova @@ -248,8 +248,8 @@ function cleanup_nova { stop_process "n-api" stop_process "n-api-meta" - remove_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" - remove_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" + remove_uwsgi_config "$NOVA_UWSGI_CONF" "nova-api" + remove_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "nova-metadata" if [[ "$NOVA_BACKEND" == "LVM" ]]; then clean_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME diff --git a/lib/placement b/lib/placement index c6bf99f868..63fdfb6c1a 100644 --- a/lib/placement +++ b/lib/placement @@ -68,7 +68,7 @@ function is_placement_enabled { # runs that a clean run would need to clean up function cleanup_placement { sudo rm -f $(apache_site_config_for placement-api) - remove_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI" + remove_uwsgi_config "$PLACEMENT_UWSGI_CONF" "placement-api" } # _config_placement_apache_wsgi() - Set WSGI config files From a6f3901a4bf81f3fe9f6132629bc552e179dd8c9 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Wed, 6 Dec 2023 17:20:37 +0000 Subject: [PATCH 1770/1936] lib/apache: Reshuffle lines Make it a little more obvious what the difference between the two helper functions is. Change-Id: I07ec34ecfcd2b7925485145c4b4bf68eda385a32 Signed-off-by: Stephen Finucane --- lib/apache | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/apache b/lib/apache index 48438da6a1..1420f76ff2 100644 --- a/lib/apache +++ b/lib/apache @@ -345,15 +345,15 @@ function write_local_uwsgi_http_config { # Set die-on-term & exit-on-reload so that uwsgi shuts down iniset "$conf" uwsgi die-on-term true iniset "$conf" uwsgi exit-on-reload false + # Set worker-reload-mercy so that worker will not exit till the time + # configured after graceful shutdown + iniset "$conf" uwsgi worker-reload-mercy $WORKER_TIMEOUT iniset "$conf" uwsgi enable-threads true iniset "$conf" uwsgi plugins http,python3 # uwsgi recommends this to prevent thundering herd on accept. iniset "$conf" uwsgi thunder-lock true # Set hook to trigger graceful shutdown on SIGTERM iniset "$conf" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all" - # Set worker-reload-mercy so that worker will not exit till the time - # configured after graceful shutdown - iniset "$conf" uwsgi worker-reload-mercy $WORKER_TIMEOUT # Override the default size for headers from the 4k default. iniset "$conf" uwsgi buffer-size 65535 # Make sure the client doesn't try to re-use the connection. From 9a97326c3f3b04728cf4484df37ce7260f6367af Mon Sep 17 00:00:00 2001 From: Ben Nemec Date: Mon, 12 Aug 2019 20:10:49 +0000 Subject: [PATCH 1771/1936] Use OSCaaS to speed up devstack runs OpenStackClient has a significant amount of startup overhead, which adds a non-trivial amount of time to each devstack run because it makes a lot of OSC calls. This change uses the OSC service from [0] to run a persistent process that handles openstack calls. This removes most of the startup overhead and in my local testing removes about three minutes per devstack run. Currently this is implemented as an opt-in feature. There are likely a lot of edge cases in projects that use a devstack plugin so turning it on universally is going to require boiling the ocean. I think getting this in and enabled for some of the major projects should give us a lot of the benefit without the enormous effort of making it 100% compatible across all of OpenStack. Depends-On: https://review.opendev.org/c/openstack/nova/+/918689 Depends-On: https://review.opendev.org/c/openstack/ironic/+/918690 Change-Id: I28e6159944746abe2d320369249b87f1c4b9e24e 0: http://lists.openstack.org/pipermail/openstack-dev/2016-April/092546.html --- files/openstack-cli-server/openstack | 119 ++++++++++++++++++ .../openstack-cli-server/openstack-cli-server | 118 +++++++++++++++++ functions-common | 5 + stack.sh | 3 + unstack.sh | 4 + 5 files changed, 249 insertions(+) create mode 100755 files/openstack-cli-server/openstack create mode 100755 files/openstack-cli-server/openstack-cli-server diff --git a/files/openstack-cli-server/openstack b/files/openstack-cli-server/openstack new file mode 100755 index 0000000000..ef05f1b841 --- /dev/null +++ b/files/openstack-cli-server/openstack @@ -0,0 +1,119 @@ +#!/usr/bin/env python3 +# Copyright 2016 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import socket +import sys +import os +import os.path +import json + +server_address = "/tmp/openstack.sock" + +sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + +try: + sock.connect(server_address) +except socket.error as msg: + print(msg, file=sys.stderr) + sys.exit(1) + + +def send(sock, doc): + jdoc = json.dumps(doc) + sock.send(b'%d\n' % len(jdoc)) + sock.sendall(jdoc.encode('utf-8')) + +def recv(sock): + length_str = b'' + + char = sock.recv(1) + if len(char) == 0: + print("Unexpected end of file", file=sys.stderr) + sys.exit(1) + + while char != b'\n': + length_str += char + char = sock.recv(1) + if len(char) == 0: + print("Unexpected end of file", file=sys.stderr) + sys.exit(1) + + total = int(length_str) + + # use a memoryview to receive the data chunk by chunk efficiently + jdoc = memoryview(bytearray(total)) + next_offset = 0 + while total - next_offset > 0: + recv_size = sock.recv_into(jdoc[next_offset:], total - next_offset) + next_offset += recv_size + try: + doc = json.loads(jdoc.tobytes()) + except (TypeError, ValueError) as e: + raise Exception('Data received was not in JSON format') + return doc + +try: + env = {} + passenv = ["CINDER_VERSION", + "OS_AUTH_URL", + "OS_IDENTITY_API_VERSION", + "OS_NO_CACHE", + "OS_PASSWORD", + "OS_PROJECT_NAME", + "OS_REGION_NAME", + "OS_TENANT_NAME", + "OS_USERNAME", + "OS_VOLUME_API_VERSION", + "OS_CLOUD"] + for name in passenv: + if name in os.environ: + env[name] = os.environ[name] + + cmd = { + "app": os.path.basename(sys.argv[0]), + "env": env, + "argv": sys.argv[1:] + } + try: + image_idx = sys.argv.index('image') + create_idx = sys.argv.index('create') + missing_file = image_idx < create_idx and \ + not any(x.startswith('--file') for x in sys.argv) + except ValueError: + missing_file = False + + if missing_file: + # This means we were called with an image create command, but were + # not provided a --file option. That likely means we're being passed + # the image data to stdin, which won't work because we do not proxy + # stdin to the server. So, we just reject the operation and ask the + # caller to provide the file with --file instead. + # We've already connected to the server, we need to send it some dummy + # data so it doesn't wait forever. + send(sock, {}) + print('Image create without --file is not allowed in server mode', + file=sys.stderr) + sys.exit(1) + else: + send(sock, cmd) + + doc = recv(sock) + if doc["stdout"] != b'': + print(doc["stdout"], end='') + if doc["stderr"] != b'': + print(doc["stderr"], file=sys.stderr) + sys.exit(doc["status"]) +finally: + sock.close() diff --git a/files/openstack-cli-server/openstack-cli-server b/files/openstack-cli-server/openstack-cli-server new file mode 100755 index 0000000000..f3d2747e52 --- /dev/null +++ b/files/openstack-cli-server/openstack-cli-server @@ -0,0 +1,118 @@ +#!/usr/bin/env python3 +# Copyright 2016 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import socket +import sys +import os +import json + +from openstackclient import shell as osc_shell +from io import StringIO + +server_address = "/tmp/openstack.sock" + +try: + os.unlink(server_address) +except OSError: + if os.path.exists(server_address): + raise + +sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) +print('starting up on %s' % server_address, file=sys.stderr) +sock.bind(server_address) + +# Listen for incoming connections +sock.listen(1) + +def send(sock, doc): + jdoc = json.dumps(doc) + sock.send(b'%d\n' % len(jdoc)) + sock.sendall(jdoc.encode('utf-8')) + +def recv(sock): + length_str = b'' + char = sock.recv(1) + while char != b'\n': + length_str += char + char = sock.recv(1) + + total = int(length_str) + + # use a memoryview to receive the data chunk by chunk efficiently + jdoc = memoryview(bytearray(total)) + next_offset = 0 + while total - next_offset > 0: + recv_size = sock.recv_into(jdoc[next_offset:], total - next_offset) + next_offset += recv_size + try: + doc = json.loads(jdoc.tobytes()) + except (TypeError, ValueError) as e: + raise Exception('Data received was not in JSON format') + return doc + +while True: + csock, client_address = sock.accept() + try: + doc = recv(csock) + + print("%s %s" % (doc["app"], doc["argv"]), file=sys.stderr) + oldenv = {} + for name in doc["env"].keys(): + oldenv[name] = os.environ.get(name, None) + os.environ[name] = doc["env"][name] + + try: + old_stdout = sys.stdout + old_stderr = sys.stderr + my_stdout = sys.stdout = StringIO() + my_stderr = sys.stderr = StringIO() + + class Exit(BaseException): + def __init__(self, status): + self.status = status + + def noexit(stat): + raise Exit(stat) + + sys.exit = noexit + + if doc["app"] == "openstack": + sh = osc_shell.OpenStackShell() + ret = sh.run(doc["argv"]) + else: + print("Unknown application %s" % doc["app"], file=sys.stderr) + ret = 1 + except Exit as e: + ret = e.status + finally: + sys.stdout = old_stdout + sys.stderr = old_stderr + + for name in oldenv.keys(): + if oldenv[name] is None: + del os.environ[name] + else: + os.environ[name] = oldenv[name] + + send(csock, { + "stdout": my_stdout.getvalue(), + "stderr": my_stderr.getvalue(), + "status": ret, + }) + + except BaseException as e: + print(e, file=sys.stderr) + finally: + csock.close() diff --git a/functions-common b/functions-common index 8ea6df7c1d..84d281b21e 100644 --- a/functions-common +++ b/functions-common @@ -2438,6 +2438,11 @@ function time_stop { _TIME_TOTAL[$name]=$(($total + $elapsed_time)) } +function install_openstack_cli_server { + export PATH=$TOP_DIR/files/openstack-cli-server:$PATH + run_process openstack-cli-server "$PYTHON $TOP_DIR/files/openstack-cli-server/openstack-cli-server" +} + function oscwrap { local xtrace xtrace=$(set +o | grep xtrace) diff --git a/stack.sh b/stack.sh index c6652e5c6a..0c36e1034e 100755 --- a/stack.sh +++ b/stack.sh @@ -1022,6 +1022,9 @@ if use_library_from_git "python-openstackclient"; then setup_dev_lib "python-openstackclient" else pip_install_gr python-openstackclient + if is_service_enabled openstack-cli-server; then + install_openstack_cli_server + fi fi # Installs alias for osc so that we can collect timing for all diff --git a/unstack.sh b/unstack.sh index 33b069b6a3..1b2d8dd62a 100755 --- a/unstack.sh +++ b/unstack.sh @@ -168,6 +168,10 @@ if is_service_enabled etcd3; then cleanup_etcd3 fi +if is_service_enabled openstack-cli-server; then + stop_service devstack@openstack-cli-server +fi + stop_dstat # NOTE: Cinder automatically installs the lvm2 package, independently of the From 6971ccc49ad97216d97be46c70c241c5473aff92 Mon Sep 17 00:00:00 2001 From: MinhNLH2 Date: Wed, 1 May 2024 21:29:15 +0700 Subject: [PATCH 1772/1936] Display backup dashboard on Horizon when c-bak is enabled Currently, when enabling c-bak service, the backup tab will not be shown on Horizon by default. This patch tells Horizon to display backup dashboard when c-bak is enabled. Closes-Bug: 2064496 Change-Id: I06295706e985bac58de2878c6d24c51f3267c205 Signed-off-by: MinhNLH2 --- lib/horizon | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/horizon b/lib/horizon index 6f753f546f..7c0d443aa6 100644 --- a/lib/horizon +++ b/lib/horizon @@ -109,6 +109,10 @@ function configure_horizon { _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT "True" fi + if is_service_enabled c-bak; then + _horizon_config_set $local_settings OPENSTACK_CINDER_FEATURES enable_backup "True" + fi + # Create an empty directory that apache uses as docroot sudo mkdir -p $HORIZON_DIR/.blackhole From fadf63e4a962e4922cdf529c17231fbb49f91e89 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 16 May 2024 02:37:02 +0000 Subject: [PATCH 1773/1936] Updated from generate-devstack-plugins-list Change-Id: Ifa6db2e765f5f15a1d7421eef061377e55b58ec7 --- doc/source/plugin-registry.rst | 6 ------ 1 file changed, 6 deletions(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 2d2a92c4a9..21cf52c736 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -37,7 +37,6 @@ openstack/devstack-plugin-container `https://opendev.org/openstack/devstack openstack/devstack-plugin-kafka `https://opendev.org/openstack/devstack-plugin-kafka `__ openstack/devstack-plugin-nfs `https://opendev.org/openstack/devstack-plugin-nfs `__ openstack/devstack-plugin-open-cas `https://opendev.org/openstack/devstack-plugin-open-cas `__ -openstack/ec2-api `https://opendev.org/openstack/ec2-api `__ openstack/freezer `https://opendev.org/openstack/freezer `__ openstack/freezer-api `https://opendev.org/openstack/freezer-api `__ openstack/freezer-tempest-plugin `https://opendev.org/openstack/freezer-tempest-plugin `__ @@ -62,7 +61,6 @@ openstack/mistral `https://opendev.org/openstack/mistral openstack/monasca-api `https://opendev.org/openstack/monasca-api `__ openstack/monasca-events-api `https://opendev.org/openstack/monasca-events-api `__ openstack/monasca-tempest-plugin `https://opendev.org/openstack/monasca-tempest-plugin `__ -openstack/murano `https://opendev.org/openstack/murano `__ openstack/networking-bagpipe `https://opendev.org/openstack/networking-bagpipe `__ openstack/networking-baremetal `https://opendev.org/openstack/networking-baremetal `__ openstack/networking-bgpvpn `https://opendev.org/openstack/networking-bgpvpn `__ @@ -84,12 +82,8 @@ openstack/osprofiler `https://opendev.org/openstack/osprofil openstack/ovn-bgp-agent `https://opendev.org/openstack/ovn-bgp-agent `__ openstack/ovn-octavia-provider `https://opendev.org/openstack/ovn-octavia-provider `__ openstack/rally-openstack `https://opendev.org/openstack/rally-openstack `__ -openstack/sahara `https://opendev.org/openstack/sahara `__ -openstack/sahara-dashboard `https://opendev.org/openstack/sahara-dashboard `__ -openstack/senlin `https://opendev.org/openstack/senlin `__ openstack/shade `https://opendev.org/openstack/shade `__ openstack/skyline-apiserver `https://opendev.org/openstack/skyline-apiserver `__ -openstack/solum `https://opendev.org/openstack/solum `__ openstack/storlets `https://opendev.org/openstack/storlets `__ openstack/tacker `https://opendev.org/openstack/tacker `__ openstack/tap-as-a-service `https://opendev.org/openstack/tap-as-a-service `__ From 608489cd59b0d0f6f82937abb6a317489ac4d7a4 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Tue, 28 May 2024 13:27:14 +0100 Subject: [PATCH 1774/1936] openrc: Stop setting OS_TENANT_NAME MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All clients - OSC included - use keystoneauth under the hood which hasn't required this in a very long time. Stop setting it and remove the warning. We also remove references to 'NOVA_*' variables that haven't been a thing since well before *I* started working on OpenStack 😅 Change-Id: I882081040215d8e32932ec5d03be34e467e4fbc2 Signed-off-by: Stephen Finucane --- openrc | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/openrc b/openrc index 6d488bb0ba..b72bedbea4 100644 --- a/openrc +++ b/openrc @@ -7,9 +7,6 @@ # Set OS_USERNAME to override the default user name 'demo' # Set ADMIN_PASSWORD to set the password for 'admin' and 'demo' -# NOTE: support for the old NOVA_* novaclient environment variables has -# been removed. - if [[ -n "$1" ]]; then OS_USERNAME=$1 fi @@ -36,22 +33,14 @@ fi source $RC_DIR/lib/tls # The OpenStack ecosystem has standardized the term **project** as the -# entity that owns resources. In some places **tenant** remains -# referenced, but in all cases this just means **project**. We will -# warn if we need to turn on legacy **tenant** support to have a -# working environment. +# entity that owns resources. export OS_PROJECT_NAME=${OS_PROJECT_NAME:-demo} -echo "WARNING: setting legacy OS_TENANT_NAME to support cli tools." -export OS_TENANT_NAME=$OS_PROJECT_NAME - # In addition to the owning entity (project), nova stores the entity performing # the action as the **user**. export OS_USERNAME=${OS_USERNAME:-demo} # With Keystone you pass the keystone password instead of an api key. -# Recent versions of novaclient use OS_PASSWORD instead of NOVA_API_KEYs -# or NOVA_PASSWORD. export OS_PASSWORD=${ADMIN_PASSWORD:-secret} # Region From 9fff87fbc7c972d18b9bf59847b61b0bbd8e4dd9 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Tue, 28 May 2024 13:33:32 +0100 Subject: [PATCH 1775/1936] openrc: Group auth-related options together Change-Id: I98f283b33c2350cc4388463571013896086b31fa Signed-off-by: Stephen Finucane --- openrc | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/openrc b/openrc index b72bedbea4..e20a5a56b9 100644 --- a/openrc +++ b/openrc @@ -32,18 +32,11 @@ fi # Get some necessary configuration source $RC_DIR/lib/tls -# The OpenStack ecosystem has standardized the term **project** as the -# entity that owns resources. +# Minimal configuration +export OS_AUTH_TYPE=password export OS_PROJECT_NAME=${OS_PROJECT_NAME:-demo} - -# In addition to the owning entity (project), nova stores the entity performing -# the action as the **user**. export OS_USERNAME=${OS_USERNAME:-demo} - -# With Keystone you pass the keystone password instead of an api key. export OS_PASSWORD=${ADMIN_PASSWORD:-secret} - -# Region export OS_REGION_NAME=${REGION_NAME:-RegionOne} # Set the host API endpoint. This will default to HOST_IP if SERVICE_IP_VERSION @@ -65,9 +58,6 @@ fi # Identity API version export OS_IDENTITY_API_VERSION=3 -# Ask keystoneauth1 to use keystone -export OS_AUTH_TYPE=password - # Authenticating against an OpenStack cloud using Keystone returns a **Token** # and **Service Catalog**. The catalog contains the endpoints for all services # the user/project has access to - including nova, glance, keystone, swift, ... From 5412dbfe7b797149f1f68100de8003b1876398fe Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Tue, 28 May 2024 13:35:28 +0100 Subject: [PATCH 1776/1936] stackrc: Remove USE_PYTHON3 This is no longer necessary and any users of this should be updated to remove references. Change-Id: Ice5083d8897376fd2ed6bd509419526e15baaf12 Signed-off-by: Stephen Finucane --- stackrc | 4 ---- 1 file changed, 4 deletions(-) diff --git a/stackrc b/stackrc index de81f01f38..b37959712b 100644 --- a/stackrc +++ b/stackrc @@ -126,10 +126,6 @@ if [[ -r $RC_DIR/.localrc.password ]]; then source $RC_DIR/.localrc.password fi -# Control whether Python 3 should be used at all. -# TODO(frickler): Drop this when all consumers are fixed -export USE_PYTHON3=True - # Adding the specific version of Python 3 to this variable will install # the app using that version of the interpreter instead of just 3. _DEFAULT_PYTHON3_VERSION="$(_get_python_version python3)" From b500d80c7641583039188baf62c215676e3d81db Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Fri, 14 Jun 2024 12:58:58 +0200 Subject: [PATCH 1777/1936] Fix deployment of the neutron with uwsgi After patch [1] deploying neutron with uwsgi was not working correctly due to the fact that there was different paths for the applications set in the api-paste.ini file. Instead of default ones like: /: neutronversions_composite /healthcheck: healthcheck /v2.0: neutronapi_v2_0 it was changing it to something like: /networking/: neutronversions_composite /networking/healthcheck: healthcheck /networking/v2.0: neutronapi_v2_0 where 'networking' can be configured to something else. This patch fixes deployment of neutron with uwsgi by not changing its api-paste.ini file when NEUTRON_DEPLOY_MOD_WSGI=True. [1] https://review.opendev.org/c/openstack/devstack/+/849145 Closes-bug: #2069418 Change-Id: I12b860d4d98442e2b5ac0c9fd854f1226633b518 --- lib/neutron | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index 808043cebe..021ffeb11e 100644 --- a/lib/neutron +++ b/lib/neutron @@ -1002,7 +1002,7 @@ function _configure_neutron_service { Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE - if [[ -n "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then + if [[ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" && -n "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then _replace_api_paste_composite fi From 4d69238383c45c862d588cfe1e0234e6a13a1220 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Fri, 21 Jun 2024 18:27:32 +0530 Subject: [PATCH 1778/1936] Fix rdo_release for unmaintained branches Only branches with stable/ as prefix were considered but now we have branches even with different prefix like unmaintained/, fix it to consider such cases by using a generic filter instead of assuming branch name starts with stable. Change-Id: I967de13094ff6df46737a22d4e1758f9900dfbc9 --- stack.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/stack.sh b/stack.sh index 0c36e1034e..740682920c 100755 --- a/stack.sh +++ b/stack.sh @@ -307,8 +307,8 @@ function _install_rdo { # rdo-release.el8.rpm points to latest RDO release, use that for master sudo dnf -y install https://rdoproject.org/repos/rdo-release.el8.rpm else - # For stable branches use corresponding release rpm - rdo_release=$(echo $TARGET_BRANCH | sed "s|stable/||g") + # For stable/unmaintained branches use corresponding release rpm + rdo_release=${TARGET_BRANCH#*/} sudo dnf -y install https://rdoproject.org/repos/openstack-${rdo_release}/rdo-release-${rdo_release}.el8.rpm fi elif [[ $DISTRO == "rhel9" ]]; then @@ -316,8 +316,8 @@ function _install_rdo { # rdo-release.el9.rpm points to latest RDO release, use that for master sudo dnf -y install https://rdoproject.org/repos/rdo-release.el9.rpm else - # For stable branches use corresponding release rpm - rdo_release=$(echo $TARGET_BRANCH | sed "s|stable/||g") + # For stable/unmaintained branches use corresponding release rpm + rdo_release=${TARGET_BRANCH#*/} sudo dnf -y install https://rdoproject.org/repos/openstack-${rdo_release}/rdo-release-${rdo_release}.el9.rpm fi fi From 56368c271d5915af76e8e5d2b0bd873d09ba3a49 Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Mon, 17 Jun 2024 15:10:40 +0000 Subject: [PATCH 1779/1936] [Neutron] Add a new Neutron service: neutron-periodic-workers This new service is spawned when using Neutron WSGI module. This new service executes the plugin workers inside a wrapper executor class called ``AllServicesNeutronWorker``. The workers are executed as threads inside the process. Depends-On: https://review.opendev.org/c/openstack/neutron/+/922110 Related-Bug: #2069581 Change-Id: I6b76b7bcee1365c80f76231e0311406831f8ce41 --- lib/neutron | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/neutron b/lib/neutron index 8b65980e90..e0b5d5d68c 100644 --- a/lib/neutron +++ b/lib/neutron @@ -634,7 +634,9 @@ function start_neutron_service_and_check { run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF" neutron_url=$Q_PROTOCOL://$Q_HOST/ enable_service neutron-rpc-server + enable_service neutron-periodic-workers run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options" + run_process neutron-periodic-workers "$NEUTRON_BIN_DIR/neutron-periodic-workers $cfg_file_options" else run_process q-svc "$NEUTRON_BIN_DIR/neutron-server $cfg_file_options" neutron_url=$service_protocol://$Q_HOST:$service_port/ @@ -706,6 +708,7 @@ function stop_other { if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then stop_process neutron-rpc-server + stop_process neutron-periodic-workers stop_process neutron-api else stop_process q-svc @@ -823,6 +826,7 @@ function cleanup_neutron { if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then stop_process neutron-api stop_process neutron-rpc-server + stop_process neutron-periodic-workers remove_uwsgi_config "$NEUTRON_UWSGI_CONF" "neutron-api" sudo rm -f $(apache_site_config_for neutron-api) fi From 41d253a6f94c1646f2bd28ac373d6aaf8bfa6089 Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Thu, 20 Jun 2024 19:03:37 +0100 Subject: [PATCH 1780/1936] add ubuntu noble (24.04) support This change installs setuptools in the requirements and global venv to ensure that distutils is present This change also adds new single and two node nodeset for noble and a devstack platform job as nonvoting. Change-Id: Ie1f8ebc5db75d6913239c529ee923395a764e19c --- .zuul.yaml | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++++ inc/python | 2 +- lib/infra | 2 +- stack.sh | 2 +- 4 files changed, 55 insertions(+), 3 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 294dd48f4d..50a34ae0b3 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -8,6 +8,16 @@ nodes: - controller +- nodeset: + name: openstack-single-node-noble + nodes: + - name: controller + label: ubuntu-noble + groups: + - name: tempest + nodes: + - controller + - nodeset: name: openstack-single-node-focal nodes: @@ -148,6 +158,36 @@ nodes: - compute1 +- nodeset: + name: openstack-two-node-noble + nodes: + - name: controller + label: ubuntu-noble + - name: compute1 + label: ubuntu-noble + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + - nodeset: name: openstack-two-node-focal nodes: @@ -652,6 +692,17 @@ vars: configure_swap_size: 4096 + +- job: + name: devstack-platform-ubuntu-noble + parent: tempest-full-py3 + description: Ubuntu 24.04 LTS (noble) platform test + nodeset: openstack-single-node-noble + timeout: 9000 + voting: false + vars: + configure_swap_size: 8192 + - job: name: devstack-platform-ubuntu-jammy-ovn-source parent: devstack-platform-ubuntu-jammy @@ -849,6 +900,7 @@ - devstack-platform-rocky-blue-onyx - devstack-platform-ubuntu-jammy-ovn-source - devstack-platform-ubuntu-jammy-ovs + - devstack-platform-ubuntu-noble - devstack-platform-openEuler-22.03-ovn-source - devstack-platform-openEuler-22.03-ovs - devstack-multinode diff --git a/inc/python b/inc/python index 43b06eb520..2339afdd6d 100644 --- a/inc/python +++ b/inc/python @@ -41,7 +41,7 @@ function setup_devstack_virtualenv { # This package is currently installed via the distro and not # available on pypi. python$PYTHON3_VERSION -m venv --system-site-packages $DEVSTACK_VENV - pip_install -U pip + pip_install -U pip setuptools fi if [[ ":$PATH:" != *":$DEVSTACK_VENV/bin:"* ]] ; then export PATH="$DEVSTACK_VENV/bin:$PATH" diff --git a/lib/infra b/lib/infra index b983f2b739..2aad00354a 100644 --- a/lib/infra +++ b/lib/infra @@ -31,7 +31,7 @@ function install_infra { local PIP_VIRTUAL_ENV="$REQUIREMENTS_DIR/.venv" [ ! -d $PIP_VIRTUAL_ENV ] && ${VIRTUALENV_CMD} $PIP_VIRTUAL_ENV # We don't care about testing git pbr in the requirements venv. - PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install -U pbr + PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install -U pbr setuptools PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install $REQUIREMENTS_DIR # Unset the PIP_VIRTUAL_ENV so that PBR does not end up trapped diff --git a/stack.sh b/stack.sh index 0c36e1034e..6ae324bb0c 100755 --- a/stack.sh +++ b/stack.sh @@ -230,7 +230,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="bookworm|bullseye|jammy|rhel8|rhel9|openEuler-22.03" +SUPPORTED_DISTROS="bookworm|bullseye|jammy|noble|rhel8|rhel9|openEuler-22.03" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" From db305d2a4bb36c3d3a4ef4a108069cd77bca540e Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Mon, 24 Jun 2024 15:27:58 +0100 Subject: [PATCH 1781/1936] enable openstack-cli-server and other perfromace tunings This commit enabeles a number of performance optimizations to tune the host vms memory and io by leveraging zswap and other kernel parmaters to minimize the effect of io latency and memory pressure. The openstack-cli-server has been enabled in the nova ci for several months now and has proven to speed up devstack signifcantly, while this change does not enable it by default in devstack it does enable it by default in the ci jobs. simiarly the zswap and other tuning remain disabled by default in devstack but are enabled by default in the devstack job. This change limits the qemu tb_cache_size to 128MB form 1G, this requires libvirt 8.0.0 or newer. as bullseye and openeuler-22.03 do not meet that requirement they have been removed. libvirt 8.0.0 will be the new min version supported in nova in the 2025.1 release so the decions was made to drop supprot for older release now instead of doing it at the start of the 2025.1 cycle. debain coverage is still provided by the newer bookworm relase. openeuler-22.03 has been superseded by the openeuler-24.03 lts release. openeuler-24.03 is not currnetly aviable in ci but supprot could be readded if desired however that is out os scope of this change. Change-Id: Ib45ca08c7e3e833b14f7e6ec496ad2d2f7073f99 --- .zuul.yaml | 148 +++++++++++++++++++++++------------------------------ stack.sh | 2 +- 2 files changed, 64 insertions(+), 86 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 50a34ae0b3..06d76d0093 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -68,6 +68,9 @@ nodes: - controller +# Note(sean-k-mooney): this is still used by horizon for +# horizon-integration-tests, horizon-integration-pytest and +# horizon-ui-pytest, remove when horizon is updated. - nodeset: name: devstack-single-node-debian-bullseye nodes: @@ -88,16 +91,6 @@ nodes: - controller -- nodeset: - name: devstack-single-node-openeuler-22.03 - nodes: - - name: controller - label: openEuler-22-03-LTS - groups: - - name: tempest - nodes: - - controller - - nodeset: name: openstack-two-node-centos-9-stream nodes: @@ -463,6 +456,7 @@ file_tracker: true mysql: true rabbit: true + openstack-cli-server: true group-vars: subnode: devstack_services: @@ -470,6 +464,7 @@ dstat: false memory_tracker: true file_tracker: true + openstack-cli-server: true devstack_localrc: # Multinode specific settings HOST_IP: "{{ hostvars[inventory_hostname]['nodepool']['private_ipv4'] }}" @@ -517,7 +512,14 @@ - opendev.org/openstack/swift timeout: 7200 vars: - configure_swap_size: 4096 + # based on observation of the integrated gate + # tempest-integrated-compute was only using ~1.7GB of swap + # when zswap and the host turning are enabled that increase + # slightly to ~2GB. we are setting the swap size to 8GB to + # be safe and account for more complex scenarios. + # we should revisit this value after some time to see if we + # can reduce it. + configure_swap_size: 8192 devstack_localrc: # Common OpenStack services settings SWIFT_REPLICAS: 1 @@ -526,11 +528,33 @@ DEBUG_LIBVIRT_COREDUMPS: true NOVA_VNC_ENABLED: true OVN_DBS_LOG_LEVEL: dbg + # tune the host to optimize memory usage and hide io latency + # these setting will configure the kernel to treat the host page + # cache and swap with equal priority, and prefer deferring writes + # changing the default swappiness, dirty_ratio and + # the vfs_cache_pressure + ENABLE_SYSCTL_MEM_TUNING: true + # the net tuning optimizes ipv4 tcp fast open and config the default + # qdisk policy to pfifo_fast which effectively disable all qos. + # this minimizes the cpu load of the host network stack + ENABLE_SYSCTL_NET_TUNING: true + # zswap allows the kernel to compress pages in memory before swapping + # them to disk. this can reduce the amount of swap used and improve + # performance. effectively this trades a small amount of cpu for an + # increase in swap performance by reducing the amount of data + # written to disk. the overall speedup is proportional to the + # compression ratio and the speed of the swap device. + ENABLE_ZSWAP: true devstack_local_conf: post-config: $NEUTRON_CONF: DEFAULT: global_physnet_mtu: '{{ external_bridge_mtu }}' + $NOVA_CPU_CONF: + libvirt: + # Use lower TB cache than default(1GiB), only applicable with + # libvirt>=8.0.0 + tb_cache_size: 128 devstack_services: # Core services enabled for this branch. # This list replaces the test-matrix. @@ -618,6 +642,30 @@ Q_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" NOVA_VNC_ENABLED: true ENABLE_CHASSIS_AS_GW: false + # tune the host to optimize memory usage and hide io latency + # these setting will configure the kernel to treat the host page + # cache and swap with equal priority, and prefer deferring writes + # changing the default swappiness, dirty_ratio and + # the vfs_cache_pressure + ENABLE_SYSCTL_MEM_TUNING: true + # the net tuning optimizes ipv4 tcp fast open and config the default + # qdisk policy to pfifo_fast which effectively disable all qos. + # this minimizes the cpu load of the host network stack + ENABLE_SYSCTL_NET_TUNING: true + # zswap allows the kernel to compress pages in memory before swapping + # them to disk. this can reduce the amount of swap used and improve + # performance. effectivly this trades a small amount of cpu for an + # increase in swap performance by reducing the amount of data + # written to disk. the overall speedup is porportional to the + # compression ratio and the speed of the swap device. + ENABLE_ZSWAP: true + devstack_local_conf: + post-config: + $NOVA_CPU_CONF: + libvirt: + # Use lower TB cache than default(1GiB), only applicable with + # libvirt>=8.0.0 + tb_cache_size: 128 - job: name: devstack-ipv6 @@ -669,15 +717,6 @@ vars: configure_swap_size: 4096 -- job: - name: devstack-platform-debian-bullseye - parent: tempest-full-py3 - description: Debian Bullseye platform test - nodeset: devstack-single-node-debian-bullseye - timeout: 9000 - vars: - configure_swap_size: 4096 - - job: name: devstack-platform-rocky-blue-onyx parent: tempest-full-py3 @@ -754,62 +793,6 @@ # Enable Neutron ML2/OVS services q-agt: true -- job: - name: devstack-platform-openEuler-22.03-ovn-source - parent: tempest-full-py3 - description: openEuler 22.03 LTS platform test (OVN) - nodeset: devstack-single-node-openeuler-22.03 - voting: false - timeout: 9000 - vars: - configure_swap_size: 4096 - devstack_localrc: - # NOTE(wxy): OVN package is not supported by openEuler yet. Build it - # from source instead. - OVN_BUILD_FROM_SOURCE: True - OVN_BRANCH: "v21.06.0" - OVS_BRANCH: "a4b04276ab5934d087669ff2d191a23931335c87" - OVS_SYSCONFDIR: "/usr/local/etc/openvswitch" - -- job: - name: devstack-platform-openEuler-22.03-ovs - parent: tempest-full-py3 - description: openEuler 22.03 LTS platform test (OVS) - nodeset: devstack-single-node-openeuler-22.03 - voting: false - timeout: 9000 - vars: - configure_swap_size: 8192 - devstack_localrc: - Q_AGENT: openvswitch - Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch - Q_ML2_TENANT_NETWORK_TYPE: vxlan - devstack_services: - # Disable OVN services - ovn-northd: false - ovn-controller: false - ovs-vswitchd: false - ovsdb-server: false - # Disable Neutron ML2/OVN services - q-ovn-metadata-agent: false - # Enable Neutron ML2/OVS services - q-agt: true - q-dhcp: true - q-l3: true - q-meta: true - q-metering: true - group-vars: - subnode: - devstack_services: - # Disable OVN services - ovn-controller: false - ovs-vswitchd: false - ovsdb-server: false - # Disable Neutron ML2/OVN services - q-ovn-metadata-agent: false - # Enable Neutron ML2/OVS services - q-agt: true - - job: name: devstack-no-tls-proxy parent: tempest-full-py3 @@ -896,13 +879,10 @@ - devstack-enforce-scope - devstack-platform-centos-9-stream - devstack-platform-debian-bookworm - - devstack-platform-debian-bullseye - devstack-platform-rocky-blue-onyx - devstack-platform-ubuntu-jammy-ovn-source - devstack-platform-ubuntu-jammy-ovs - devstack-platform-ubuntu-noble - - devstack-platform-openEuler-22.03-ovn-source - - devstack-platform-openEuler-22.03-ovs - devstack-multinode - devstack-unit-tests - openstack-tox-bashate @@ -943,7 +923,6 @@ - devstack - devstack-ipv6 - devstack-platform-debian-bookworm - - devstack-platform-debian-bullseye # NOTE(danms): Disabled due to instability, see comment in the job # definition above. # - devstack-platform-rocky-blue-onyx @@ -978,7 +957,9 @@ # pruned. # # * nova-next: maintained by nova for unreleased/undefaulted - # things + # things, this job is not experimental but often is used to test + # things that are not yet production ready or to test what will be + # the new default after a deprecation period has ended. # * neutron-fullstack-with-uwsgi: maintained by neutron for fullstack test # when neutron-api is served by uwsgi, it's in exprimental for testing. # the next cycle we can remove this job if things turn out to be @@ -988,7 +969,7 @@ # stable engouh with uwsgi. # * neutron-ovn-tempest-with-uwsgi: maintained by neutron for tempest test. # Next cycle we can remove this if everything run out stable enough. - # * nova-multi-cell: maintained by nova and currently non-voting in the + # * nova-multi-cell: maintained by nova and now is voting in the # check queue for nova changes but relies on devstack configuration experimental: @@ -1026,9 +1007,6 @@ jobs: - devstack-platform-centos-9-stream - devstack-platform-debian-bookworm - - devstack-platform-debian-bullseye - devstack-platform-rocky-blue-onyx - devstack-platform-ubuntu-jammy-ovn-source - devstack-platform-ubuntu-jammy-ovs - - devstack-platform-openEuler-22.03-ovn-source - - devstack-platform-openEuler-22.03-ovs diff --git a/stack.sh b/stack.sh index 77548105a7..ab3f01cdbd 100755 --- a/stack.sh +++ b/stack.sh @@ -230,7 +230,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="bookworm|bullseye|jammy|noble|rhel8|rhel9|openEuler-22.03" +SUPPORTED_DISTROS="bookworm|jammy|noble|rhel9" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" From e825ba07a1b88ab0570053f92123aa451e4b2ec8 Mon Sep 17 00:00:00 2001 From: Dmitry Tantsur Date: Wed, 26 Jun 2024 18:10:43 +0200 Subject: [PATCH 1782/1936] Create parent directory in merge_config_file The code accounts for the config file not existing but it does not account for the parent directory missing. This is currently breaking any Ironic jobs that disable Nova. Change-Id: Ia5fcfe6c63f5cc40b11f7e1f3be244d7897f26f6 --- inc/meta-config | 1 + 1 file changed, 1 insertion(+) diff --git a/inc/meta-config b/inc/meta-config index be73b60800..b9d9649e4b 100644 --- a/inc/meta-config +++ b/inc/meta-config @@ -90,6 +90,7 @@ function merge_config_file { local real_configfile real_configfile=$(eval echo $configfile) if [ ! -f $real_configfile ]; then + mkdir -p $(dirname $real_configfile) || die $LINENO "could not create the directory of $real_configfile ($configfile)" touch $real_configfile || die $LINENO "could not create config file $real_configfile ($configfile)" fi From c707dd3fc2d601db5169508ed39e24dde89e9631 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Wed, 26 Apr 2023 14:59:25 +0000 Subject: [PATCH 1783/1936] [nova] Add flag to set libvirt tb_cache_size A config option is being added in nova with [1] in order to allow configuring lower tb-cache size for qemu guest VMs. This patch adds a flag in devstack so jobs can utilize it to set required tb-cache size. [1] https://review.opendev.org/c/openstack/nova/+/868419 Co-Authored-By: Sean Mooney Related: blueprint libvirt-tb-cache-size Change-Id: Ifde737eb5d87dfe860445097d1f2b0ce16b0de05 --- .zuul.yaml | 18 ++++++------------ lib/nova | 6 ++++++ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 06d76d0093..3e6c42e68f 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -544,17 +544,15 @@ # increase in swap performance by reducing the amount of data # written to disk. the overall speedup is proportional to the # compression ratio and the speed of the swap device. + # NOTE: this option is ignored when not using nova with the libvirt + # virt driver. + NOVA_LIBVIRT_TB_CACHE_SIZE: 128 ENABLE_ZSWAP: true devstack_local_conf: post-config: $NEUTRON_CONF: DEFAULT: global_physnet_mtu: '{{ external_bridge_mtu }}' - $NOVA_CPU_CONF: - libvirt: - # Use lower TB cache than default(1GiB), only applicable with - # libvirt>=8.0.0 - tb_cache_size: 128 devstack_services: # Core services enabled for this branch. # This list replaces the test-matrix. @@ -659,13 +657,9 @@ # written to disk. the overall speedup is porportional to the # compression ratio and the speed of the swap device. ENABLE_ZSWAP: true - devstack_local_conf: - post-config: - $NOVA_CPU_CONF: - libvirt: - # Use lower TB cache than default(1GiB), only applicable with - # libvirt>=8.0.0 - tb_cache_size: 128 + # NOTE: this option is ignored when not using nova with the libvirt + # virt driver. + NOVA_LIBVIRT_TB_CACHE_SIZE: 128 - job: name: devstack-ipv6 diff --git a/lib/nova b/lib/nova index ee3f29eebf..7c6ffb2239 100644 --- a/lib/nova +++ b/lib/nova @@ -173,6 +173,9 @@ NOVA_SHUTDOWN_TIMEOUT=${NOVA_SHUTDOWN_TIMEOUT:-0} # Whether to use Keystone unified limits instead of legacy quota limits. NOVA_USE_UNIFIED_LIMITS=$(trueorfalse False NOVA_USE_UNIFIED_LIMITS) +# TB Cache Size in MiB for qemu guests +NOVA_LIBVIRT_TB_CACHE_SIZE=${NOVA_LIBVIRT_TB_CACHE_SIZE:-0} + # Functions # --------- @@ -1071,6 +1074,9 @@ function start_nova_compute { fi if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then + if [ ${NOVA_LIBVIRT_TB_CACHE_SIZE} -gt 0 ]; then + iniset $NOVA_CPU_CONF libvirt tb_cache_size ${NOVA_LIBVIRT_TB_CACHE_SIZE} + fi # The group **$LIBVIRT_GROUP** is added to the current user in this script. # ``sg`` is used in run_process to execute nova-compute as a member of the # **$LIBVIRT_GROUP** group. From 3a0c0b9ff4bb3568efc471e1bf98fc273e8bc767 Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Mon, 24 Jun 2024 11:09:34 +0000 Subject: [PATCH 1784/1936] [Neutron] Add a new Neutron service: neutron-ovn-maintenance-worker This new service is spawned when using Neutron WSGI module. This new service executes the OVN maintenance task that syncs the Neutron database and the OVN database. Depends-On: https://review.opendev.org/c/openstack/neutron/+/922074 Related-Bug: #1912359 Change-Id: I495459cd9e35e2e76ba7fc9611a589e1685814f5 --- lib/neutron | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/lib/neutron b/lib/neutron index e0b5d5d68c..a8cc953b0e 100644 --- a/lib/neutron +++ b/lib/neutron @@ -369,6 +369,24 @@ function _determine_config_l3 { echo "$opts" } +function _enable_ovn_maintenance { + if [[ $Q_AGENT == "ovn" ]]; then + enable_service neutron-ovn-maintenance-worker + fi +} + +function _run_ovn_maintenance { + if [[ $Q_AGENT == "ovn" ]]; then + run_process neutron-ovn-maintenance-worker "$NEUTRON_BIN_DIR/neutron-ovn-maintenance-worker $cfg_file_options" + fi +} + +function _stop_ovn_maintenance { + if [[ $Q_AGENT == "ovn" ]]; then + stop_process neutron-ovn-maintenance-worker + fi +} + # For services and agents that require it, dynamically construct a list of # --config-file arguments that are passed to the binary. function determine_config_files { @@ -635,8 +653,10 @@ function start_neutron_service_and_check { neutron_url=$Q_PROTOCOL://$Q_HOST/ enable_service neutron-rpc-server enable_service neutron-periodic-workers + _enable_ovn_maintenance run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options" run_process neutron-periodic-workers "$NEUTRON_BIN_DIR/neutron-periodic-workers $cfg_file_options" + _run_ovn_maintenance else run_process q-svc "$NEUTRON_BIN_DIR/neutron-server $cfg_file_options" neutron_url=$service_protocol://$Q_HOST:$service_port/ @@ -710,6 +730,7 @@ function stop_other { stop_process neutron-rpc-server stop_process neutron-periodic-workers stop_process neutron-api + _stop_ovn_maintenance else stop_process q-svc fi @@ -827,6 +848,7 @@ function cleanup_neutron { stop_process neutron-api stop_process neutron-rpc-server stop_process neutron-periodic-workers + _stop_ovn_maintenance remove_uwsgi_config "$NEUTRON_UWSGI_CONF" "neutron-api" sudo rm -f $(apache_site_config_for neutron-api) fi From eb0ac1d217fe8a545f2e697d09fbb650efecb9ef Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Mon, 8 Jul 2024 18:02:25 +0200 Subject: [PATCH 1785/1936] Drop remainders of identity API v2.0 references keystone has dropped the v2.0 API in queens, time to drop all special casing for it. Change-Id: If628c4627f7c8b8c2ee9bca16ea6db693cf8526a --- files/openstack-cli-server/openstack | 1 - lib/tempest | 15 +-------------- openrc | 17 ++--------------- stackrc | 4 ---- 4 files changed, 3 insertions(+), 34 deletions(-) diff --git a/files/openstack-cli-server/openstack b/files/openstack-cli-server/openstack index ef05f1b841..47fbfc5e17 100755 --- a/files/openstack-cli-server/openstack +++ b/files/openstack-cli-server/openstack @@ -68,7 +68,6 @@ try: env = {} passenv = ["CINDER_VERSION", "OS_AUTH_URL", - "OS_IDENTITY_API_VERSION", "OS_NO_CACHE", "OS_PASSWORD", "OS_PROJECT_NAME", diff --git a/lib/tempest b/lib/tempest index 6bd203e6f4..7beaf21292 100644 --- a/lib/tempest +++ b/lib/tempest @@ -18,7 +18,7 @@ # - ``PUBLIC_NETWORK_NAME`` # - ``VIRT_DRIVER`` # - ``LIBVIRT_TYPE`` -# - ``KEYSTONE_SERVICE_URI``, ``KEYSTONE_SERVICE_URI_V3`` from lib/keystone +# - ``KEYSTONE_SERVICE_URI_V3`` from lib/keystone # # Optional Dependencies: # @@ -381,7 +381,6 @@ function configure_tempest { iniset $TEMPEST_CONFIG volume build_timeout $BUILD_TIMEOUT # Identity - iniset $TEMPEST_CONFIG identity uri "$KEYSTONE_SERVICE_URI/v2.0/" iniset $TEMPEST_CONFIG identity uri_v3 "$KEYSTONE_SERVICE_URI_V3" iniset $TEMPEST_CONFIG identity user_lockout_failure_attempts $KEYSTONE_LOCKOUT_FAILURE_ATTEMPTS iniset $TEMPEST_CONFIG identity user_lockout_duration $KEYSTONE_LOCKOUT_DURATION @@ -392,19 +391,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG auth admin_project_name $admin_project_name iniset $TEMPEST_CONFIG auth admin_domain_name $admin_domain_name fi - if [ "$ENABLE_IDENTITY_V2" == "True" ]; then - # Run Identity API v2 tests ONLY if needed - iniset $TEMPEST_CONFIG identity-feature-enabled api_v2 True - else - # Skip Identity API v2 tests by default - iniset $TEMPEST_CONFIG identity-feature-enabled api_v2 False - fi iniset $TEMPEST_CONFIG identity auth_version ${TEMPEST_AUTH_VERSION:-v3} - if [[ "$TEMPEST_AUTH_VERSION" != "v2" ]]; then - # we're going to disable v2 admin unless we're using v2 by default. - iniset $TEMPEST_CONFIG identity-feature-enabled api_v2_admin False - fi - if is_service_enabled tls-proxy; then iniset $TEMPEST_CONFIG identity ca_certificates_file $SSL_BUNDLE_FILE fi diff --git a/openrc b/openrc index e20a5a56b9..5ec7634638 100644 --- a/openrc +++ b/openrc @@ -55,27 +55,14 @@ else GLANCE_HOST=${GLANCE_HOST:-$HOST_IP} fi -# Identity API version -export OS_IDENTITY_API_VERSION=3 - -# Authenticating against an OpenStack cloud using Keystone returns a **Token** -# and **Service Catalog**. The catalog contains the endpoints for all services -# the user/project has access to - including nova, glance, keystone, swift, ... -# We currently recommend using the version 3 *identity api*. -# - # If you don't have a working .stackenv, this is the backup position KEYSTONE_BACKUP=$SERVICE_PROTOCOL://$SERVICE_HOST:5000 KEYSTONE_SERVICE_URI=${KEYSTONE_SERVICE_URI:-$KEYSTONE_BACKUP} export OS_AUTH_URL=${OS_AUTH_URL:-$KEYSTONE_SERVICE_URI} -# Currently, in order to use openstackclient with Identity API v3, -# we need to set the domain which the user and project belong to. -if [ "$OS_IDENTITY_API_VERSION" = "3" ]; then - export OS_USER_DOMAIN_ID=${OS_USER_DOMAIN_ID:-"default"} - export OS_PROJECT_DOMAIN_ID=${OS_PROJECT_DOMAIN_ID:-"default"} -fi +export OS_USER_DOMAIN_ID=${OS_USER_DOMAIN_ID:-"default"} +export OS_PROJECT_DOMAIN_ID=${OS_PROJECT_DOMAIN_ID:-"default"} # Set OS_CACERT to a default CA certificate chain if it exists. if [[ ! -v OS_CACERT ]] ; then diff --git a/stackrc b/stackrc index b37959712b..0492c73d1f 100644 --- a/stackrc +++ b/stackrc @@ -162,10 +162,6 @@ else export PS4='+ $(short_source): ' fi -# Configure Identity API version -# TODO(frickler): Drop this when plugins no longer need it -IDENTITY_API_VERSION=3 - # Global option for enforcing scope. If enabled, ENFORCE_SCOPE overrides # each services ${SERVICE}_ENFORCE_SCOPE variables ENFORCE_SCOPE=$(trueorfalse False ENFORCE_SCOPE) From d714f7deaac8d56abe8b028385f5282d6c02d355 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Tue, 9 Jul 2024 17:14:54 +0200 Subject: [PATCH 1786/1936] Add devstack-platform-ubuntu-noble to periodic Seems the platform is stable, let's add it to the periodic-weekly tests that we run. Change-Id: I185443c0fdb9e1248542a16fd877dc6b8ffd7683 --- .zuul.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 3e6c42e68f..af7e74b57b 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -725,7 +725,6 @@ vars: configure_swap_size: 4096 - - job: name: devstack-platform-ubuntu-noble parent: tempest-full-py3 @@ -1004,3 +1003,4 @@ - devstack-platform-rocky-blue-onyx - devstack-platform-ubuntu-jammy-ovn-source - devstack-platform-ubuntu-jammy-ovs + - devstack-platform-ubuntu-noble From 696dbdf045cbf1c1525bb25c005ce767d1c9e9b8 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Tue, 9 Jul 2024 16:36:37 +0200 Subject: [PATCH 1787/1936] Make nova only use the nova account Each service should only be using that service's user account within its configuration, in order to reduce the possible impact of credential leaks. Start with nova, other services will follow. Change-Id: I6b3fef5de05d5e0cc032b83a2ed834f1c997a048 --- lib/neutron | 2 +- lib/nova | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/neutron b/lib/neutron index a8cc953b0e..da21d46079 100644 --- a/lib/neutron +++ b/lib/neutron @@ -485,7 +485,7 @@ function create_nova_conf_neutron { local conf=${1:-$NOVA_CONF} iniset $conf neutron auth_type "password" iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI" - iniset $conf neutron username "$Q_ADMIN_USERNAME" + iniset $conf neutron username nova iniset $conf neutron password "$SERVICE_PASSWORD" iniset $conf neutron user_domain_name "$SERVICE_DOMAIN_NAME" iniset $conf neutron project_name "$SERVICE_PROJECT_NAME" diff --git a/lib/nova b/lib/nova index 7c6ffb2239..35c6893763 100644 --- a/lib/nova +++ b/lib/nova @@ -640,7 +640,7 @@ function configure_placement_nova_compute { local conf=${1:-$NOVA_CONF} iniset $conf placement auth_type "password" iniset $conf placement auth_url "$KEYSTONE_SERVICE_URI" - iniset $conf placement username placement + iniset $conf placement username nova iniset $conf placement password "$SERVICE_PASSWORD" iniset $conf placement user_domain_name "$SERVICE_DOMAIN_NAME" iniset $conf placement project_name "$SERVICE_TENANT_NAME" From 6df53719180c8d587e058a26ed3bb19562e55745 Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Fri, 12 Jul 2024 20:08:58 +0100 Subject: [PATCH 1788/1936] bump guest ram to prevent kernel panics one observation we had in down stream ci is sometimes the cirros 0.6.2 image appared to crash when using 128MB of ram. upstream we have been dealing with semi random kernel panics which are losely corralated with cinder volume usage. Recently we optimisted the devstack jobs by using zswap this has reduced memory pressure in the jobs. This patch increase the ram allocated to a flavor to see if we can afford that with the current conncurnace level in an attempt to reduce kernel panics. Two new parmaters are added to allow jobs or users to set the desired ram size. TEMPEST_FLAVOR_RAM=${TEMPEST_FLAVOR_RAM:-192} TEMPEST_FLAVOR_ALT_RAM=${TEMPEST_FLAVOR_ALT_RAM:-256} Change-Id: Ib6a2d5ab61a771d4f85bd2c2412052efadc77ac5 --- lib/tempest | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index 7beaf21292..a14ed1af72 100644 --- a/lib/tempest +++ b/lib/tempest @@ -102,6 +102,9 @@ TEMPEST_USE_TEST_ACCOUNTS=$(trueorfalse False TEMPEST_USE_TEST_ACCOUNTS) # it will run tempest with TEMPEST_CONCURRENCY=${TEMPEST_CONCURRENCY:-$(nproc)} +TEMPEST_FLAVOR_RAM=${TEMPEST_FLAVOR_RAM:-192} +TEMPEST_FLAVOR_ALT_RAM=${TEMPEST_FLAVOR_ALT_RAM:-256} + # Functions # --------- @@ -295,13 +298,15 @@ function configure_tempest { if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then # Determine the flavor disk size based on the image size. disk=$(image_size_in_gib $image_uuid) - openstack --os-cloud devstack-admin flavor create --id 42 --ram 128 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.nano + ram=${TEMPEST_FLAVOR_RAM} + openstack --os-cloud devstack-admin flavor create --id 42 --ram ${ram} --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.nano fi flavor_ref=42 if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then # Determine the alt flavor disk size based on the alt image size. disk=$(image_size_in_gib $image_uuid_alt) - openstack --os-cloud devstack-admin flavor create --id 84 --ram 192 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.micro + ram=${TEMPEST_FLAVOR_ALT_RAM} + openstack --os-cloud devstack-admin flavor create --id 84 --ram ${ram} --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.micro fi flavor_ref_alt=84 else From aaaa03718bdc05df197708f9354e985936f96853 Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Fri, 19 Jul 2024 08:09:25 +0000 Subject: [PATCH 1789/1936] [Neutron] Do not execute RPC workers if "rpc_workers=0" When the Neutron WSGI module is used, an independent service called "neutron-rpc-server" is configured and executed. However it will fail if the number of RPC workers is configured to zero. In that case, the configuration and execution of this service should be skipped. If the service is explicitly disabled in the devstack configuration, it won't be executed neither. Closes-Bug: #2073572 Change-Id: Idd023a2a8f588152221f20a13ae24fbb7d1618a4 --- lib/neutron | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/lib/neutron b/lib/neutron index a8cc953b0e..474613926b 100644 --- a/lib/neutron +++ b/lib/neutron @@ -142,6 +142,7 @@ Q_META_DATA_IP=${Q_META_DATA_IP:-$(ipv6_unquote $SERVICE_HOST)} Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True} Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True} Q_NOTIFY_NOVA_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_DATA_CHANGES:-True} +_Q_RUN_RPC_SERVER=True VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True} VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300} @@ -464,6 +465,15 @@ function configure_neutron { # clouds, therefore running without a dedicated RPC worker # for state reports is more than adequate. iniset $NEUTRON_CONF DEFAULT rpc_state_report_workers 0 + # The default value of "rpc_workers" is None (not defined). If + # "rpc_workers" is explicitly set to 0, the RPC workers process should not + # be executed. NOTE: this service is only executed when WSGI is enabled + # (NEUTRON_DEPLOY_MOD_WSGI=True) for the Neutron server. + local rpc_workers + rpc_workers=$(iniget_multiline /etc/neutron/neutron.conf DEFAULT rpc_workers) + if ! is_service_enabled neutron-rpc-server || [ "$rpc_workers" -eq "0" ]; then + _Q_RUN_RPC_SERVER=False + fi if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api" "/networking" @@ -651,10 +661,14 @@ function start_neutron_service_and_check { enable_service neutron-api run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF" neutron_url=$Q_PROTOCOL://$Q_HOST/ - enable_service neutron-rpc-server + if [[ "$_Q_RUN_RPC_SERVER" = True ]]; then + enable_service neutron-rpc-server + fi enable_service neutron-periodic-workers _enable_ovn_maintenance - run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options" + if [[ "$_Q_RUN_RPC_SERVER" = True ]]; then + run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options" + fi run_process neutron-periodic-workers "$NEUTRON_BIN_DIR/neutron-periodic-workers $cfg_file_options" _run_ovn_maintenance else From 13888a31d2bac9aa46adf72a154be4aa4fbcd790 Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Sat, 20 Jul 2024 15:50:30 +0000 Subject: [PATCH 1790/1936] [Neutron] neutron-rpc-server is not a configurable service The "neutron-rpc-server" is not a configurable service that can be enabled or disabled. This service is a dependant process of the "neutron-api-server" service that is spawned when the Neutron API uses the WSGI module. The execution of this child service will depend on: * The Neutron API service when running with the WSGI module. If the Neutron API uses the eventlet module, this service won't run (the RPC workers will be spawned by the eventlet server). * The "rpc_workers" configuration variable. If this variable is explicitly set to "0", the server must not run. Closes-Bug: #2073844 Related-Bug: #2073572 Change-Id: Ic019423ca033ded8609d82bb11841b975862ac14 --- lib/neutron | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index 474613926b..69bcb86d4d 100644 --- a/lib/neutron +++ b/lib/neutron @@ -471,7 +471,7 @@ function configure_neutron { # (NEUTRON_DEPLOY_MOD_WSGI=True) for the Neutron server. local rpc_workers rpc_workers=$(iniget_multiline /etc/neutron/neutron.conf DEFAULT rpc_workers) - if ! is_service_enabled neutron-rpc-server || [ "$rpc_workers" -eq "0" ]; then + if [ "$rpc_workers" == "0" ]; then _Q_RUN_RPC_SERVER=False fi From 0cd876384a77d2144c3ebc51a0228433fdb7facb Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 19 Apr 2024 12:12:16 +0100 Subject: [PATCH 1791/1936] lib/neutron: Migrate neutron to WSGI module path Change-Id: Ie99ec3bf4198fa7cd7583d2dca648e1474f94aea Signed-off-by: Stephen Finucane Depends-on: https://review.opendev.org/c/openstack/neutron/+/916407 --- lib/neutron | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index a8cc953b0e..6336795f2e 100644 --- a/lib/neutron +++ b/lib/neutron @@ -88,6 +88,7 @@ export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/d # enough NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse False NEUTRON_DEPLOY_MOD_WSGI) +NEUTRON_UWSGI=neutron.wsgi.api:application NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini # If NEUTRON_ENFORCE_SCOPE == True, it will set "enforce_scope" @@ -466,7 +467,7 @@ function configure_neutron { iniset $NEUTRON_CONF DEFAULT rpc_state_report_workers 0 if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api" "/networking" + write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_UWSGI" "/networking" "" "neutron-api" fi } From 95697d84cb59dcbc53748ccdb472987cf61df1f4 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Tue, 23 Jul 2024 11:36:49 +0100 Subject: [PATCH 1792/1936] docs: Add a minimal Tempest guide This can be fleshed out more in the future, including with information about managing plugins, but this is a start. Change-Id: I1094d093b704e37370e3e434ebf3697954e99da3 Signed-off-by: Stephen Finucane --- doc/source/tempest.rst | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 doc/source/tempest.rst diff --git a/doc/source/tempest.rst b/doc/source/tempest.rst new file mode 100644 index 0000000000..65dd5b16b2 --- /dev/null +++ b/doc/source/tempest.rst @@ -0,0 +1,25 @@ +======= +Tempest +======= + +`Tempest`_ is the OpenStack Integration test suite. It is installed by default +and is used to provide integration testing for many of the OpenStack services. +Just like DevStack itself, it is possible to extend Tempest with plugins. In +fact, many Tempest plugin packages also include DevStack plugin to do things +like pre-create required static resources. + +The `Tempest documentation `_ provides a thorough guide to using +Tempest. However, if you simply wish to run the standard set of Tempest tests +against an existing deployment, you can do the following: + +.. code-block:: shell + + cd /opt/stack/tempest + /opt/stack/data/venv/bin/tempest run ... + +The above assumes you have installed DevStack in the default location +(configured via the ``DEST`` configuration variable) and have enabled +virtualenv-based installation in the standard location (configured via the +``USE_VENV`` and ``VENV_DEST`` configuration variables, respectively). + +.. _Tempest: https://docs.openstack.org/tempest/latest/ From 6990b06cd321930f69907ba42ee744755f8029fe Mon Sep 17 00:00:00 2001 From: Riccardo Pittau Date: Wed, 24 Jul 2024 18:01:51 +0200 Subject: [PATCH 1793/1936] Install simplejson in devstack venv Workaround to avoid failure due to missing osc dependency removed in [1] [1] https://review.opendev.org/c/openstack/python-openstackclient/+/920001 Change-Id: I3f7541e691717186b7c73f10ffabae6fc0c5c9f9 --- inc/python | 3 +++ 1 file changed, 3 insertions(+) diff --git a/inc/python b/inc/python index 2339afdd6d..1fd414773f 100644 --- a/inc/python +++ b/inc/python @@ -42,6 +42,9 @@ function setup_devstack_virtualenv { # available on pypi. python$PYTHON3_VERSION -m venv --system-site-packages $DEVSTACK_VENV pip_install -U pip setuptools + #NOTE(rpittau): workaround for simplejson removal in osc + # https://review.opendev.org/c/openstack/python-openstackclient/+/920001 + pip_install -U simplejson fi if [[ ":$PATH:" != *":$DEVSTACK_VENV/bin:"* ]] ; then export PATH="$DEVSTACK_VENV/bin:$PATH" From 8784a3027fc3154aa2f6482d0127e45070e60b5a Mon Sep 17 00:00:00 2001 From: karolinku Date: Wed, 31 Jul 2024 12:34:00 +0200 Subject: [PATCH 1794/1936] Replacing usage of rdo-release rpm with centos-release-openstack rpms follwing [1]. [1] https://issues.redhat.com/browse/RDO-311 Change-Id: I50951e077e73297d10b075677a440992d1e2fa91 --- stack.sh | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/stack.sh b/stack.sh index ab3f01cdbd..dcfd398c01 100755 --- a/stack.sh +++ b/stack.sh @@ -302,23 +302,14 @@ function _install_epel { } function _install_rdo { - if [[ $DISTRO == "rhel8" ]]; then + if [[ $DISTRO == "rhel9" ]]; then + rdo_release=${TARGET_BRANCH#*/} if [[ "$TARGET_BRANCH" == "master" ]]; then - # rdo-release.el8.rpm points to latest RDO release, use that for master - sudo dnf -y install https://rdoproject.org/repos/rdo-release.el8.rpm + # adding delorean-deps repo to provide current master rpms + sudo wget https://trunk.rdoproject.org/centos9-master/delorean-deps.repo -O /etc/yum.repos.d/delorean-deps.repo else # For stable/unmaintained branches use corresponding release rpm - rdo_release=${TARGET_BRANCH#*/} - sudo dnf -y install https://rdoproject.org/repos/openstack-${rdo_release}/rdo-release-${rdo_release}.el8.rpm - fi - elif [[ $DISTRO == "rhel9" ]]; then - if [[ "$TARGET_BRANCH" == "master" ]]; then - # rdo-release.el9.rpm points to latest RDO release, use that for master - sudo dnf -y install https://rdoproject.org/repos/rdo-release.el9.rpm - else - # For stable/unmaintained branches use corresponding release rpm - rdo_release=${TARGET_BRANCH#*/} - sudo dnf -y install https://rdoproject.org/repos/openstack-${rdo_release}/rdo-release-${rdo_release}.el9.rpm + sudo dnf -y install centos-release-openstack-${rdo_release} fi fi sudo dnf -y update From 92b65a84cc8135316922a0f8b91420ed221f3269 Mon Sep 17 00:00:00 2001 From: elajkat Date: Mon, 5 Aug 2024 11:39:06 +0200 Subject: [PATCH 1795/1936] Handle_tags and branches for unmaintained also Related-Bug: #2056276 Change-Id: Iaa34624d1d85cadf1b45bec780ef8d97dd054041 --- roles/setup-devstack-source-dirs/tasks/main.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/setup-devstack-source-dirs/tasks/main.yaml b/roles/setup-devstack-source-dirs/tasks/main.yaml index 294c29cd29..cb7c6e3af8 100644 --- a/roles/setup-devstack-source-dirs/tasks/main.yaml +++ b/roles/setup-devstack-source-dirs/tasks/main.yaml @@ -43,9 +43,9 @@ base_branch={{ devstack_sources_branch }} if git branch -a | grep "$base_branch" > /dev/null ; then git checkout $base_branch - elif [[ "$base_branch" == stable/* ]]; then + elif [[ "$base_branch" == stable/* ]] || [[ "$base_branch" == unmaintained/* ]]; then # Look for an eol tag for the stable branch. - eol_tag=${base_branch#stable/}-eol + eol_tag="${base_branch#*/}-eol" if git tag -l |grep $eol_tag >/dev/null; then git checkout $eol_tag git reset --hard $eol_tag From 38dea33fe9a5e6bef39566295cc8d05fb1d88223 Mon Sep 17 00:00:00 2001 From: melanie witt Date: Thu, 1 Aug 2024 23:41:43 +0000 Subject: [PATCH 1796/1936] oslo.log: Configure log color by $LOG_COLOR Relatively recently oslo.log 6.1.0 was released and contains change I7966d4f4977b267f620946de4a5509f53b043652 which added an option to enable color in logs which defaults to False. This caused a change in behavior for DevStack such that viewing logs with journalctl no longer showed different colors for different log levels, which can make debugging more difficult when developing with DevStack. This adds olso.log color configuration based on the existing $LOG_COLOR DevStack variable for log color which defaults to True for interactive invocations. Change-Id: If10aada573eb4360e81585d4fb7e5d97f15bc52b --- functions | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/functions b/functions index f81e8f0a08..42d08d7c4a 100644 --- a/functions +++ b/functions @@ -694,6 +694,8 @@ function setup_colorized_logging { iniset $conf_file DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" iniset $conf_file DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" iniset $conf_file DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" + # Enable or disable color for oslo.log + iniset $conf_file DEFAULT log_color $LOG_COLOR } function setup_systemd_logging { @@ -715,6 +717,9 @@ function setup_systemd_logging { iniset $conf_file DEFAULT logging_context_format_string "%(color)s%(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(project_name)s %(user_name)s%(color)s] %(instance)s%(color)s%(message)s" iniset $conf_file DEFAULT logging_default_format_string "%(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" iniset $conf_file DEFAULT logging_exception_prefix "ERROR %(name)s %(instance)s" + + # Enable or disable color for oslo.log + iniset $conf_file DEFAULT log_color $LOG_COLOR } function setup_standard_logging_identity { From 79a812a69e4015f6c911aa54989970e35bfc241f Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Wed, 31 Jul 2024 14:41:33 +0000 Subject: [PATCH 1797/1936] Move the check of "rpc_workers" after the post-config phase The configuration variable can be checked in the Neutron configuration during the post-config phase when the configuration files and sections are merged together. Closes-Bug: #2075342 Change-Id: Ic42463e2f72488a1b14ce49e4e435cb4a2c0c855 --- lib/neutron | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/lib/neutron b/lib/neutron index 2325188f94..bcef8a5042 100644 --- a/lib/neutron +++ b/lib/neutron @@ -143,7 +143,6 @@ Q_META_DATA_IP=${Q_META_DATA_IP:-$(ipv6_unquote $SERVICE_HOST)} Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True} Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True} Q_NOTIFY_NOVA_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_DATA_CHANGES:-True} -_Q_RUN_RPC_SERVER=True VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True} VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300} @@ -466,15 +465,6 @@ function configure_neutron { # clouds, therefore running without a dedicated RPC worker # for state reports is more than adequate. iniset $NEUTRON_CONF DEFAULT rpc_state_report_workers 0 - # The default value of "rpc_workers" is None (not defined). If - # "rpc_workers" is explicitly set to 0, the RPC workers process should not - # be executed. NOTE: this service is only executed when WSGI is enabled - # (NEUTRON_DEPLOY_MOD_WSGI=True) for the Neutron server. - local rpc_workers - rpc_workers=$(iniget_multiline /etc/neutron/neutron.conf DEFAULT rpc_workers) - if [ "$rpc_workers" == "0" ]; then - _Q_RUN_RPC_SERVER=False - fi if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_UWSGI" "/networking" "" "neutron-api" @@ -657,17 +647,24 @@ function start_neutron_service_and_check { service_port=$Q_PORT_INT service_protocol="http" fi + # Start the Neutron service if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then + # The default value of "rpc_workers" is None (not defined). If + # "rpc_workers" is explicitly set to 0, the RPC workers process + # should not be executed. + local rpc_workers + rpc_workers=$(iniget_multiline $NEUTRON_CONF DEFAULT rpc_workers) + enable_service neutron-api run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF" neutron_url=$Q_PROTOCOL://$Q_HOST/ - if [[ "$_Q_RUN_RPC_SERVER" = True ]]; then + if [ "$rpc_workers" != "0" ]; then enable_service neutron-rpc-server fi enable_service neutron-periodic-workers _enable_ovn_maintenance - if [[ "$_Q_RUN_RPC_SERVER" = True ]]; then + if [ "$rpc_workers" != "0" ]; then run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options" fi run_process neutron-periodic-workers "$NEUTRON_BIN_DIR/neutron-periodic-workers $cfg_file_options" From 84ce1984b1f0639025af599b426019a4b140fcb4 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Wed, 31 Jul 2024 19:04:08 +0000 Subject: [PATCH 1798/1936] Add os-test-images support in lib/tempest This generates the test images in os-test-images and also configures tempest to know where it is (and if image conversion is enabled in glance). Change-Id: Ib74002828a77838ab95d2322e92bdab68caac37c --- .zuul.yaml | 1 + lib/tempest | 17 +++++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index af7e74b57b..59a577e522 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -510,6 +510,7 @@ - opendev.org/openstack/nova - opendev.org/openstack/placement - opendev.org/openstack/swift + - opendev.org/openstack/os-test-images timeout: 7200 vars: # based on observation of the integrated gate diff --git a/lib/tempest b/lib/tempest index a14ed1af72..24c8271132 100644 --- a/lib/tempest +++ b/lib/tempest @@ -105,6 +105,10 @@ TEMPEST_CONCURRENCY=${TEMPEST_CONCURRENCY:-$(nproc)} TEMPEST_FLAVOR_RAM=${TEMPEST_FLAVOR_RAM:-192} TEMPEST_FLAVOR_ALT_RAM=${TEMPEST_FLAVOR_ALT_RAM:-256} +OSTESTIMAGES_REPO=${OSTESTIMAGES_REPO:-${GIT_BASE}/openstack/os-test-images.git} +OSTESTIMAGES_BRANCH=${OSTESTIMAGES_BRANCH:-$BRANCHLESS_TARGET_BRANCH} +OSTESTIMAGES_DIR=${DEST}/os-test-images + # Functions # --------- @@ -357,6 +361,19 @@ function configure_tempest { fi fi + if is_service_enabled glance; then + git_clone $OSTESTIMAGES_REPO $OSTESTIMAGES_DIR $OSTESTIMAGES_BRANCH + pushd $OSTESTIMAGES_DIR + tox -egenerate + popd + iniset $TEMPEST_CONFIG image images_manifest_file ${OSTESTIMAGES_DIR}/images/manifest.yaml + local image_conversion + image_conversion=$(iniget $GLANCE_IMAGE_IMPORT_CONF image_conversion output_format) + if [[ "$image_conversion" ]]; then + iniset $TEMPEST_CONFIG image-feature-enabled image_conversion True + fi + fi + iniset $TEMPEST_CONFIG network project_network_cidr $FIXED_RANGE ssh_connect_method=${TEMPEST_SSH_CONNECT_METHOD:-$ssh_connect_method} From 3b0d76c30bf63332f494e8aae18dc2f1feed28dd Mon Sep 17 00:00:00 2001 From: Eric Harney Date: Mon, 12 Aug 2024 17:01:12 +0000 Subject: [PATCH 1799/1936] Fix get_default_host_ip ipv6 address parsing This is another occurrence of the issue fixed in bug 1786259 with change I30bf655f which occurs when there are multiple IPv6 gateways present. Before this change: $ source openrc +++++functions-common:get_default_host_ip:776 ip -f inet6 addr show 100 Device "100" does not exist. This is because the ip route command returns: default proto ra metric 100 expires 1497sec pref medium nexthop via fe80::4e16:fc01:298c:98ed dev ens3 weight 1 nexthop via fe80::4e16:fc01:2983:88aa dev ens3 weight 1 Related-Bug: #1786259 Change-Id: I7729730df66a4dc7ee11df1d23b19b9c0794b575 --- functions-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions-common b/functions-common index 84d281b21e..e265256ccf 100644 --- a/functions-common +++ b/functions-common @@ -771,7 +771,7 @@ function get_default_host_ip { if [ -z "$host_ip" -o "$host_ip" == "dhcp" ]; then host_ip="" # Find the interface used for the default route - host_ip_iface=${host_ip_iface:-$(ip -f $af route | awk '/default/ {print $5}' | head -1)} + host_ip_iface=${host_ip_iface:-$(ip -f $af route list match default table all | grep via | awk '/default/ {print $5}' | head -1)} local host_ips host_ips=$(LC_ALL=C ip -f $af addr show ${host_ip_iface} | sed /temporary/d |awk /$af'/ {split($2,parts,"/"); print parts[1]}') local ip From d6e3d06001e7c4bb092cf9dc77188627bd2b9358 Mon Sep 17 00:00:00 2001 From: Rajat Dhasmana Date: Thu, 22 Feb 2024 00:11:15 +0530 Subject: [PATCH 1800/1936] Add config options for optimized upload volume When glance is using cinder as a backend, we can use optimized path for upload volume to image operation. The config options image_upload_use_cinder_backend and image_upload_use_internal_tenant are used to configure optimization in the upload volume to image workflow where we create a cinder volume in the internal service project and register the location in glance. Recently it was found that the glance location API workflow was broken[1] for the upload volume case and it wasn't detected because we are not testing it in our glance cinder job "cinder-for-glance-optimized". This patch adds the config option to test the optimized path. Note that the optimized upload functionality is only possible when glance uses cinder as it's backend since it uses clone volume functionality to clone the Image-Volume from the source volume. [1] https://bugs.launchpad.net/glance/+bug/2054575 Change-Id: I521ed04696a5a545b2a2923cf8008bd64add7782 --- lib/cinder | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/lib/cinder b/lib/cinder index ae898e9522..f80542a35f 100644 --- a/lib/cinder +++ b/lib/cinder @@ -183,6 +183,12 @@ fi # Environment variables to configure the image-volume cache CINDER_IMG_CACHE_ENABLED=${CINDER_IMG_CACHE_ENABLED:-True} +# Environment variables to configure the optimized volume upload +CINDER_UPLOAD_OPTIMIZED=${CINDER_UPLOAD_OPTIMIZED:-False} + +# Environment variables to configure the internal tenant during optimized volume upload +CINDER_UPLOAD_INTERNAL_TENANT=${CINDER_UPLOAD_INTERNAL_TENANT:-False} + # For limits, if left unset, it will use cinder defaults of 0 for unlimited CINDER_IMG_CACHE_SIZE_GB=${CINDER_IMG_CACHE_SIZE_GB:-} CINDER_IMG_CACHE_SIZE_COUNT=${CINDER_IMG_CACHE_SIZE_COUNT:-} @@ -192,6 +198,11 @@ CINDER_IMG_CACHE_SIZE_COUNT=${CINDER_IMG_CACHE_SIZE_COUNT:-} # enable the cache for all cinder backends. CINDER_CACHE_ENABLED_FOR_BACKENDS=${CINDER_CACHE_ENABLED_FOR_BACKENDS:-$CINDER_ENABLED_BACKENDS} +# Configure which cinder backends will have optimized volume upload, this takes the same +# form as the CINDER_ENABLED_BACKENDS config option. By default it will +# enable the cache for all cinder backends. +CINDER_UPLOAD_OPTIMIZED_BACKENDS=${CINDER_UPLOAD_OPTIMIZED_BACKENDS:-$CINDER_ENABLED_BACKENDS} + # Flag to set the oslo_policy.enforce_scope. This is used to switch # the Volume API policies to start checking the scope of token. by default, # this flag is False. @@ -353,6 +364,14 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT default_volume_type ${default_name} fi configure_cinder_image_volume_cache + + # The upload optimization uses Cinder's clone volume functionality to + # clone the Image-Volume from source volume hence can only be + # performed when glance is using cinder as it's backend. + if [[ "$USE_CINDER_FOR_GLANCE" == "True" ]]; then + # Configure optimized volume upload + configure_cinder_volume_upload + fi fi if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then @@ -729,6 +748,18 @@ function configure_cinder_image_volume_cache { done } +function configure_cinder_volume_upload { + # Expect UPLOAD_VOLUME_OPTIMIZED_FOR_BACKENDS to be a list of backends + # similar to CINDER_ENABLED_BACKENDS with NAME:TYPE where NAME will + # be the backend specific configuration stanza in cinder.conf. + local be be_name + for be in ${CINDER_UPLOAD_OPTIMIZED_BACKENDS//,/ }; do + be_name=${be##*:} + + iniset $CINDER_CONF $be_name image_upload_use_cinder_backend $CINDER_UPLOAD_OPTIMIZED + iniset $CINDER_CONF $be_name image_upload_use_internal_tenant $CINDER_UPLOAD_INTERNAL_TENANT + done +} # Restore xtrace $_XTRACE_CINDER From 80c1605a1df9687c7d1d842b258a3d99ec2eda35 Mon Sep 17 00:00:00 2001 From: Rajat Dhasmana Date: Wed, 28 Feb 2024 13:08:12 +0530 Subject: [PATCH 1801/1936] Configure cinder service token Glance is implementing new location APIs, for which, cinder needs to pass service token to register a location in glance. This is required in the case when glance is using cinder as a backend and cinder tries to upload a volume in the optimized path. We are adding a new option, ``CINDER_USE_SERVICE_TOKEN`` that will configure the service user section in cinder.conf. By default, it is set to False. Change-Id: I0045539f1e31a6d26c4f31935c5ddfaaa7607a48 --- lib/cinder | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/lib/cinder b/lib/cinder index f80542a35f..0adca4f4ec 100644 --- a/lib/cinder +++ b/lib/cinder @@ -88,6 +88,10 @@ CINDER_SERVICE_REPORT_INTERVAL=${CINDER_SERVICE_REPORT_INTERVAL:-120} # thin provisioning. CINDER_LVM_TYPE=${CINDER_LVM_TYPE:-auto} +# ``CINDER_USE_SERVICE_TOKEN`` is a mode where service token is passed along with +# user token while communicating to external REST APIs like Glance. +CINDER_USE_SERVICE_TOKEN=$(trueorfalse True CINDER_USE_SERVICE_TOKEN) + # Default backends # The backend format is type:name where type is one of the supported backend # types (lvm, nfs, etc) and name is the identifier used in the Cinder @@ -445,6 +449,10 @@ function configure_cinder { iniset $CINDER_CONF oslo_policy enforce_scope false iniset $CINDER_CONF oslo_policy enforce_new_defaults false fi + + if [ "$CINDER_USE_SERVICE_TOKEN" == "True" ]; then + init_cinder_service_user_conf + fi } # create_cinder_accounts() - Set up common required cinder accounts @@ -761,6 +769,12 @@ function configure_cinder_volume_upload { done } +function init_cinder_service_user_conf { + configure_keystone_authtoken_middleware $CINDER_CONF cinder service_user + iniset $CINDER_CONF service_user send_service_user_token True + iniset $CINDER_CONF service_user auth_strategy keystone +} + # Restore xtrace $_XTRACE_CINDER From 1a336ef4aec1c908b139db3b67e766a437c2cbb9 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Mon, 12 Aug 2024 11:34:02 -0700 Subject: [PATCH 1802/1936] Trivial fixes from review of os-test-images This fixes some trivial things from the review where this support was added: https://review.opendev.org/c/openstack/devstack/+/925425 Change-Id: I990a3816f425a1b4c8680ec43d698e32eea2238b --- lib/tempest | 6 +----- stackrc | 3 +++ 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/lib/tempest b/lib/tempest index 24c8271132..310db2daa6 100644 --- a/lib/tempest +++ b/lib/tempest @@ -105,10 +105,6 @@ TEMPEST_CONCURRENCY=${TEMPEST_CONCURRENCY:-$(nproc)} TEMPEST_FLAVOR_RAM=${TEMPEST_FLAVOR_RAM:-192} TEMPEST_FLAVOR_ALT_RAM=${TEMPEST_FLAVOR_ALT_RAM:-256} -OSTESTIMAGES_REPO=${OSTESTIMAGES_REPO:-${GIT_BASE}/openstack/os-test-images.git} -OSTESTIMAGES_BRANCH=${OSTESTIMAGES_BRANCH:-$BRANCHLESS_TARGET_BRANCH} -OSTESTIMAGES_DIR=${DEST}/os-test-images - # Functions # --------- @@ -369,7 +365,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG image images_manifest_file ${OSTESTIMAGES_DIR}/images/manifest.yaml local image_conversion image_conversion=$(iniget $GLANCE_IMAGE_IMPORT_CONF image_conversion output_format) - if [[ "$image_conversion" ]]; then + if [[ -n "$image_conversion" ]]; then iniset $TEMPEST_CONFIG image-feature-enabled image_conversion True fi fi diff --git a/stackrc b/stackrc index 0492c73d1f..0b3e1c61da 100644 --- a/stackrc +++ b/stackrc @@ -304,6 +304,9 @@ TEMPEST_REPO=${TEMPEST_REPO:-${GIT_BASE}/openstack/tempest.git} TEMPEST_BRANCH=${TEMPEST_BRANCH:-$BRANCHLESS_TARGET_BRANCH} TEMPEST_VENV_UPPER_CONSTRAINTS=${TEMPEST_VENV_UPPER_CONSTRAINTS:-master} +OSTESTIMAGES_REPO=${OSTESTIMAGES_REPO:-${GIT_BASE}/openstack/os-test-images.git} +OSTESTIMAGES_BRANCH=${OSTESTIMAGES_BRANCH:-$BRANCHLESS_TARGET_BRANCH} +OSTESTIMAGES_DIR=${DEST}/os-test-images ############## # From 9e1348f81b84e3bef93d6998606e09725c585b1a Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Tue, 20 Aug 2024 17:23:02 +0900 Subject: [PATCH 1803/1936] etcd: Replace deprecated --debug option ... to resolve the following warning. [WARNING] Deprecated '--debug' flag is set to true (use '--log-level=debug' instead Change-Id: Idb412cea64dfc42e3d1223b77f134804eeb7bd60 --- lib/etcd3 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/etcd3 b/lib/etcd3 index 4f3a7a4349..0d22de8c73 100644 --- a/lib/etcd3 +++ b/lib/etcd3 @@ -51,7 +51,7 @@ function start_etcd3 { fi cmd+=" --listen-client-urls http://$SERVICE_HOST:$ETCD_PORT" if [ "$ENABLE_DEBUG_LOG_LEVEL" == "True" ]; then - cmd+=" --debug" + cmd+=" --log-level=debug" fi local unitfile="$SYSTEMD_DIR/$ETCD_SYSTEMD_SERVICE" From 5ed2b7c6b2e2a5da50c3db9cda9e9b8e4ae4402f Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Fri, 30 Aug 2024 14:15:40 +0100 Subject: [PATCH 1804/1936] make devstack-platform-ubuntu-noble voting devstack-platform-ubuntu-noble was added in Ie1f8ebc5db75d6913239c529ee923395a764e19c and has been runnning for a little over 2 months in that time https://zuul.openstack.org/builds?job_name=devstack-platform-ubuntu-noble the job has been pretty stable so its time to make this voting in advance of it becoming required in the 2025.1 release. Change-Id: Iffd6ccf9603117d6720931e260afa2da13c26ec4 --- .zuul.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 59a577e522..a1c251a398 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -732,7 +732,6 @@ description: Ubuntu 24.04 LTS (noble) platform test nodeset: openstack-single-node-noble timeout: 9000 - voting: false vars: configure_swap_size: 8192 @@ -917,6 +916,7 @@ - devstack - devstack-ipv6 - devstack-platform-debian-bookworm + - devstack-platform-ubuntu-noble # NOTE(danms): Disabled due to instability, see comment in the job # definition above. # - devstack-platform-rocky-blue-onyx From 0ff627286297a3957143577412884dc50ff8a57a Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Mon, 2 Sep 2024 17:29:55 +0530 Subject: [PATCH 1805/1936] Run chown for egg-info only if the directory exists 9-stream jobs failing since [1] merged as these still use GLOBAL_VENV=False. egg-info directory is not created in project source directory when pyproject.toml is used in the project. pyproject.toml being added across projects[2] to support pip 23.1. [1] https://review.opendev.org/c/openstack/nova/+/899753 [2] https://review.opendev.org/q/topic:%22pip-23.1-support%22 Change-Id: I53954a37461aee5dd7f487d6bd205caef4408392 --- inc/python | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/inc/python b/inc/python index 1fd414773f..2083b74dc1 100644 --- a/inc/python +++ b/inc/python @@ -474,7 +474,10 @@ function setup_package { pip_install $flags "$project_dir$extras" # ensure that further actions can do things like setup.py sdist if [[ "$flags" == "-e" && "$GLOBAL_VENV" == "False" ]]; then - safe_chown -R $STACK_USER $1/*.egg-info + # egg-info is not created when project have pyproject.toml + if [ -d $1/*.egg-info ]; then + safe_chown -R $STACK_USER $1/*.egg-info + fi fi } From d7c3c0accc89e4e99915c24fa7c3bff2e90a715e Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 20 Sep 2024 10:56:04 +0100 Subject: [PATCH 1806/1936] lib/cinder: Remove 'volume3' endpoint This was needed when 'block-storage' pointed to the v2 API. This is no longer the case (and hasn't been for some time). This is unnecessary duplication now. Change-Id: I00cfb56d3e54d0162b1609f4bf58814e9000c103 Signed-off-by: Stephen Finucane Depends-on: https://review.opendev.org/c/openstack/tempest/+/930296 --- lib/cinder | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/lib/cinder b/lib/cinder index 0adca4f4ec..2f9955b1d3 100644 --- a/lib/cinder +++ b/lib/cinder @@ -483,23 +483,11 @@ function create_cinder_accounts { "block-storage" \ "$REGION_NAME" \ "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s" - - get_or_create_service "cinderv3" "volumev3" "Cinder Volume Service V3" - get_or_create_endpoint \ - "volumev3" \ - "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s" else get_or_create_endpoint \ "block-storage" \ "$REGION_NAME" \ "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v3/\$(project_id)s" - - get_or_create_service "cinderv3" "volumev3" "Cinder Volume Service V3" - get_or_create_endpoint \ - "volumev3" \ - "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v3/\$(project_id)s" fi configure_cinder_internal_tenant From 2d487d8c7b424a76eb484d09f09530e24b7207fb Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 20 Sep 2024 11:00:39 +0100 Subject: [PATCH 1807/1936] lib/cinder: Strip project_id from URL This is optional. There's no need to include it. Change-Id: I2e745865696dbb317f819ecb74f5b5df88a9ed76 Signed-off-by: Stephen Finucane --- lib/cinder | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/cinder b/lib/cinder index 2f9955b1d3..6da5d4579d 100644 --- a/lib/cinder +++ b/lib/cinder @@ -482,12 +482,12 @@ function create_cinder_accounts { get_or_create_endpoint \ "block-storage" \ "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s" + "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3" else get_or_create_endpoint \ "block-storage" \ "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v3/\$(project_id)s" + "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v3" fi configure_cinder_internal_tenant From 9b4439038144f380c1d1c8a8e87ad76cd26b72b5 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 20 Sep 2024 11:03:15 +0100 Subject: [PATCH 1808/1936] lib/cinder: Align endpoint creation code Do this the same way we do it for Nova, to make for easier review. Change-Id: I31877705894a21570f130723e0a27ff38f945eea Signed-off-by: Stephen Finucane --- lib/cinder | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/lib/cinder b/lib/cinder index 6da5d4579d..dc284920e0 100644 --- a/lib/cinder +++ b/lib/cinder @@ -476,20 +476,19 @@ function create_cinder_accounts { create_service_user "cinder" $extra_role - # block-storage is the official service type - get_or_create_service "cinder" "block-storage" "Cinder Volume Service" - if [ "$CINDER_USE_MOD_WSGI" == "False" ]; then - get_or_create_endpoint \ - "block-storage" \ - "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3" + local cinder_api_url + if [[ "$CINDER_USE_MOD_WSGI" == "False" ]]; then + cinder_api_url="$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT" else - get_or_create_endpoint \ - "block-storage" \ - "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v3" + cinder_api_url="$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume" fi + # block-storage is the official service type + get_or_create_service "cinder" "block-storage" "Cinder Volume Service" + get_or_create_endpoint \ + "block-storage" \ + "$REGION_NAME" \ + "$cinder_api_url/v3" configure_cinder_internal_tenant fi } From 03bc214525c7d7f9dfb6cb855025b70053839a72 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Wed, 25 Sep 2024 12:03:40 -0700 Subject: [PATCH 1809/1936] Update DEVSTACK_SERIES to 2025.1 stable/2024.2 branch has been created now and current master is for 2025.1. Change-Id: If5c9de9ddfab1bff313c70cf2c40ce7fbe60473f --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 0b3e1c61da..ab1f8a6ffd 100644 --- a/stackrc +++ b/stackrc @@ -249,7 +249,7 @@ REQUIREMENTS_DIR=${REQUIREMENTS_DIR:-$DEST/requirements} # Setting the variable to 'ALL' will activate the download for all # libraries. -DEVSTACK_SERIES="2024.2" +DEVSTACK_SERIES="2025.1" ############## # From fec589a1ce6b1dd29e27ed2d5aa088390a7dfa92 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Thu, 26 Sep 2024 21:23:17 +0200 Subject: [PATCH 1810/1936] Bump cirros version to 0.6.3 This is the latest cirros release, featuring an updated kernel and some fixes and added features, let's use it. [0] https://github.com/cirros-dev/cirros/releases/tag/0.6.3 Change-Id: I2506fa713e0426789fa40a5f4f7fd4e963a158f0 --- doc/source/guides/nova.rst | 2 +- stackrc | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/source/guides/nova.rst b/doc/source/guides/nova.rst index 705d427e68..6b8aabf8db 100644 --- a/doc/source/guides/nova.rst +++ b/doc/source/guides/nova.rst @@ -122,7 +122,7 @@ when creating the server, for example: .. code-block:: shell $ openstack --os-compute-api-version 2.37 server create --flavor cirros256 \ - --image cirros-0.6.2-x86_64-disk --nic none --wait test-server + --image cirros-0.6.3-x86_64-disk --nic none --wait test-server .. note:: ``--os-compute-api-version`` greater than or equal to 2.37 is required to use ``--nic=none``. diff --git a/stackrc b/stackrc index 0b3e1c61da..b9f86d1ae6 100644 --- a/stackrc +++ b/stackrc @@ -656,7 +656,7 @@ esac #IMAGE_URLS="https://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image -CIRROS_VERSION=${CIRROS_VERSION:-"0.6.2"} +CIRROS_VERSION=${CIRROS_VERSION:-"0.6.3"} CIRROS_ARCH=${CIRROS_ARCH:-$(uname -m)} # Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of @@ -673,11 +673,11 @@ if [[ "$DOWNLOAD_DEFAULT_IMAGES" == "True" ]]; then lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs} DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz} - IMAGE_URLS+="https://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; + IMAGE_URLS+="https://github.com/cirros-dev/cirros/releases/download/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; *) # otherwise, use the qcow image DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk} DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img} - IMAGE_URLS+="https://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; + IMAGE_URLS+="https://github.com/cirros-dev/cirros/releases/download/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; esac ;; vsphere) @@ -688,7 +688,7 @@ if [[ "$DOWNLOAD_DEFAULT_IMAGES" == "True" ]]; then # Use the same as the default for libvirt DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk} DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img} - IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; + IMAGE_URLS+="https://github.com/cirros-dev/cirros/releases/download/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; esac DOWNLOAD_DEFAULT_IMAGES=False fi From 6512f0140c9a312f9455bfe420462c64635fd622 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Mon, 26 Aug 2024 12:19:06 +0100 Subject: [PATCH 1811/1936] doc: drop sphinxcontrib-nwdiag, sphinxcontrib-blockdiag usage sphinxcontrib-nwdiag does not appear to be maintained anymore [1] and there have been no releases in nearly 5 years. Statically generate the images and include them this way. We can revert this change if the maintainership issue resolves itself. sphinxcontrib-blockdiag has had activity more recently [2], but it's still been nearly 3 years. More importantly, we don't actually use it so there's no reason to keep it around. [1] https://pypi.org/project/sphinxcontrib-nwdiag/#history [1] https://pypi.org/project/sphinxcontrib-blockdiag/#history Change-Id: Ic5244c792acd01f8aec5ff626e53303c1738aa69 Signed-off-by: Stephen Finucane --- doc/requirements.txt | 4 -- .../assets/images/neutron-network-1.png | Bin 0 -> 10251 bytes .../assets/images/neutron-network-2.png | Bin 0 -> 11243 bytes .../assets/images/neutron-network-3.png | Bin 0 -> 13293 bytes doc/source/conf.py | 12 ++-- doc/source/guides/neutron.rst | 60 ++---------------- 6 files changed, 12 insertions(+), 64 deletions(-) create mode 100644 doc/source/assets/images/neutron-network-1.png create mode 100644 doc/source/assets/images/neutron-network-2.png create mode 100644 doc/source/assets/images/neutron-network-3.png diff --git a/doc/requirements.txt b/doc/requirements.txt index ffce3ff74c..7980b93ed7 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -4,8 +4,4 @@ Pygments docutils sphinx>=2.0.0,!=2.1.0 # BSD openstackdocstheme>=2.2.1 # Apache-2.0 -nwdiag -blockdiag -sphinxcontrib-blockdiag -sphinxcontrib-nwdiag zuul-sphinx>=0.2.0 diff --git a/doc/source/assets/images/neutron-network-1.png b/doc/source/assets/images/neutron-network-1.png new file mode 100644 index 0000000000000000000000000000000000000000..7730ca93f175651c1ec860f37ea6be33310e73e1 GIT binary patch literal 10251 zcmeHtc~Fyi*CmZ8pt5M&;sOMeMn9!N1wk|W~rH}si~LBQw6DK`91e{ z?>YCJ%e|A1wrZ=ltX5D^P_wr?`ip|XckdMx6i=wEf^VX(nAa#M>}ax`jL)jns8@T< z{r_oy9t7huP_xHhH`uc(>iHvqxQrj|+XY_nH>-XkjjG*@+ZgZ8?ybLp&obvuUgMfO zu*}vu9zc4fAb-w26_K;4ub*GHG+9X~Ph3zn9Jh=fJhl04HDl)m;j1){?GGj5*!A-d zt!J)0!*`>OZraX$V1_w8dwqO;UTnQVN}W(QpfmK5yLrv#pB+j{-JT4;$uD)fe!{%6 z5T8{?;C>9kjq*lT?jBc-9?YpUVHEAfv48Y*G5$t1?yhXTV|T72T5|E;qnKcp=)QPM z3ntr^+8l@*xow10h#u4v3`8~IIH;wLhMz+ZDW%5DJbqhE*#594-_3mi{(Q+4f6p^4 zy!V>x&ReB?Sex{mPc>ka72&g<^U!0rjmK|Vmo2=Tj;B%_?msp1$rV*->_f9Z^&3ZG|PqeMy?bnOSWmG$H0}z!oa=%xb@2izn z-mB=Y<;m_nx%eR5LQUpHpEK1W%SQ2K#uM>ur22Uu1z#sU;_i9LmCpjr3qR2LUu&Ek zo?C1-Hvh=;a@eW=7Oy+r)6g;<)Dsvz9cgji=i&jYck!N!+r)bP`+GJlAIZdY2H`wh zI%74n+*NztAg4B)+67Mm&_%@yg6KIlK@ zm7^>4W9Msmk_nQi5h0x_WSL1CTU%6yB^;?p)@~wcmt}amuxK5`TxVh~o0w}M59Xwo z__O-;!M)$Z7O7lL_Y69<0qc^^m<~l|pD@fokd~)qQ_CV@$@%l=Bm9X(Vrsz{yAthY z=C?7v3n80E$T$!OmMB8%@#21+UY&Va%rQ)(!DkUC*kjos|p{1qe7A9R5IU&R6$9I#Xmd2dwL#MwsmY0`nc$IU) zoJvCFxfcbna~>!%LIcgxw6W4n*0Qy=HOWOqE{%mx z)-TYN5fM`e+U)Npk^%7~O%}sqvApGHFv`vk6EFOZ-}MlA)hlKEoR-a%8Dz)KX(Jex2abK)h*9)(XL=glqw=g#5}Q9)jn69MQv(<4&`bH~B@3v70o3-i0{l#( zU&H{q0N))Y%;weEv1ZAA3yZX=HMA*-rj5AOFG?7pH6Z&|BK;=AZfFWPV3oh~evnIg zF%z}Sp-W*CC02S|B$EwKGS6{zLlS2OMXW+6bmY?PShAy|W3Ce{OFuzBw)x+5^-{(^ z`hViP-+N`mz;Y`yaPqnchJ4TebY*Ym31buZ*%;q6ef3q=uh{)t)HkW>2R0t7nB-1R zPajjsqnAB*OdTXoTv|3_e8VN8b(6gZrbAI%5;rz-Ly@_+HM`4yhh?|z=gOCIFa6!E97+zm2Nk#^zpXL0~A3?1o9ATLevDl%odek|ijkZG3av+LK zZu2T0{<1^D}@)I6vssA4ur!X{0kyxj3whBQ$&O*C0%bZrLdrgiLP zn=nGMgEX^4#)(XnZKX|bmD!T?K2VJq9g)d0SK6GbjAF!S^4Le@C{K#$bh;=M+3-PF zH$gQv!+d{d=v$c)`-oX`Qj&22Ypm(hr%%Udh=2>}Lv=gv&uPAI6(4JS>cE2NT_%NY{imAB;VcTnmloA;CX9Xs_azz^h8!ts336|-|a1B zCN3d$nMp}W7aR*&@!{Gs-n{sRmLyx|^nlaU=ec6hVKX6rNEjLLf@FR(9CHpbX@N$;A$tH!YF?nLhbCqM2AoH`JWFAZ)T@MJDS*EA$ zU^+TEkqJsO9s9B6M%Rw0U{&%2-uJLHw+dB#AF}Mqu*y~-3RfskGNMa$z zS$8zMpYH5zjH-?ro_mU}?|%xV&#mNKdifX-NFv8MQe;$=bjS=N%Pww~g^lm>)og&kWKF8{e2qEE9{37+GeRY^gYE&YM)26ufxD z^!`^BGV}#8U5gG?_Do726+m&n5kpB; zdu9z<&Cs4@#yRjfgwuOzdC&5RGNWSW;*LS;bexMHT3QHc$%JHU0@wuL>aH#s7`D=(mHM69)E}lu zkD-3GU+S<7jYM`!6;hqc^CK46SHokery5S+y8)~874}bQUFPlYCwnj+ac+|w@W>iJFb|8?GKNLnRb@c{5G(= znXH3Akktmrxw$Ab3KexdA|hh%mP9Y^$X-MHnHr(3PQ;3k?J47FCQqK^7T~!-xOQGY ziqrE`N&&u|nSMd%(LpF3@#eEYtD^&_A9>bjC+*B}Ec9-CW7#;w0c?gs=&yStRa|LL zlliHPd%|+$>?(;`u0T(DTWv|4@LU!}M75KZ!?@FK>7F%RcQ2RM$F8o^jrQQx-}fS& zvIi(ti?vWSo$@V1hm{SvkWr2VuSOhd`MA@VIbOS%)T?b~DT0ZS?~Vi0NQfa*JA#VgJ&ZDwnhIuDEx z8*~$oX8`Ez6d9$EXvNb!53%?FYPMu`-)2R6-e@~BSHg`VmhqjmHY=V!gMGD$^4e7U zyF*ArPzY$ur5s4x>f_($RcCs=52==DqwP}%B?~-R3ocPs!1Z(aVIrA@JaB-kr9Eom zo}p%YTiy1KdZE2fu0}Pn<8iTL3e9(UEHErejtsa&5`wGIqXClTSX=(uNdI;N`?te1 ze*Y-DzfG^oQe&!U8jzfFrg)6fOe+y6_dT%=_*iL^_D+iNQyRR%NU2g~%tK^4f^ZqN zTVxGYh|tLS!vP<+J*b{X-3jS+(u)5$oU<=@kF?$wJ+<^fuU~yhxUcbupK@uq)K>YX zO0?3yj{gfWxC=-&n`dz=#5t=#aKvu~n@??9<9YWzB5XzI0%*LC*{^?SH^bb~qC?+g zpStLI*h#Pd0pB~NZ7CG~w%Cu*h{6X{dQ>OspDEmBpSmkH#lXIOp7CO8r>a41YSv-h zJMr=H3s9ZU30%3VK%}2zA=saG>*)R%?XiPOF^xfLnp~(`ET!j3UhcYo>5-?4ihh?eJ>M{8JR$z1_8pK3CIF>bCk)Gh zI5=jQJ`niSR_vM3i1UYIaCU+iq&77Cbh+{>VGlt22EC{3YxCp-TZ|rOvsh1~5qOmX z?}^2F0<-K1pA`?h{N65fu&8KPanUYrWh=mqxmWVkKU58n0R-;G$W7%@!#k7oQmna^ zW6gL=JSl@5*Jl*!uc*3KtbKc(xeA=;da9yswN-TNp`Tf^IknV;Y{Nz^5m|9looh#& z#o*5ek*gaMO88V}t8`*!L6A&nKy#>GWAoxm0~6~^9_qi9Z9=Yoc@a0a!2kM6^s;qL z8PBUnz*L^kww@VX`=I;Sa|+j@hEcMn2kr2DYE@;-&ZwygAh}a_1&2;hToe-p&UL<= zKw)IIZOw(bplAsYe7-+da^G*pvMLhczv+YCcX~Ge-6OhE#_}XxjIma$lshf-=@Io`3w_?l%mpg3r?pbG_&%e{cO*-vQlpT&jVM>Vz_#`-L7%yRBIy z?>JsKus-kGtzuAX4&<)|Y6CR4GC5*~juySqh?-(H$Rv|8Awn{0i3;?>1nMIg=xbgoo|KgW@txYh2c>V< zt);3+SD#}9@_8k1JSLpRhslA)Nb^ea;zN49iWEP{6 zJ>YUp3^MJ6|S!T!BLGCIZGcE=T)0>yjqAxKT(&;>lua)K*4&td-z(4XnG{@0I z-a3JH5<(`sqz~S% zSjVK@Pm|C84^XHtYt?nVH@yhvd?6Fg^)1eWWaN`l;{>&3zN3@}& zDp%An8zH=*K!g(1zBE?y(O#EHEpK%Z-YcHb2{*@)s73rAqBM^ZCQ( zuHaCeT5lIzK(g^Yq5^1xMQ8f}146$yXW9a$;6bICyeg4-A9w&qkgbS zL%$5Yx~2^a@r;#aHPZ#q_BpbXUZJMHww!LxDAQaspiAzawgihnOaAO@+r!*chMLE8 zd`T-6>6AH#AUvs_2`FtIo8tS^AG!}A0pYv__Z#$}YpUk!Y&Grrk_2gOaWs;<+N9>K z%{Etb!?s>%`N5%}x3gtIrlsI_4}@HAXi)l*uE`&4LNm6_7)P4;mnfyi1^&aSEsomz zT4!89?^2jbQChFC?Y}&h)nZj@{#u?Ko)BYT4*y1czoN{=|N{8qI2GtT==mD-ocgz_3fh=jWE zDrd?(^YGgh)T{JqOvbjVnArk{EU%l$Fgh>>oR`AAdKq3Ggz7uYPxoYW(=s zo5n4^hm?Atc@^Nif}L0Ts$2|cU6-GojPX_Z7Bt{CXX&@EEi%H5n;advJ;h%+M9sjz zdO2BLVV$Dt|Jos<(5%OY|5(`D7nvlg>{(!53h%)ss7;;K=}G%TA>sj9^|gd6S{tC5 zZPnz3%1-7=7l(P5lY1jhj0-3CNrOJ!OS}&5w3s$|SzJ{W|`?PoTw5UOQCaN0o3VYw_I4i-j_C^;1~|7RK~Q znpk}}X5~!L@9%1<--K=<%i?qCb@(Ul1&akdSA4w_ue@nc+lVd4z5K+W$TXlmtFI`*{)h2ro6w% zo|FjCJzw(_#6#W<^BsfXB(Vnlidhz@?n3?8gRvh088vOP4eYyeaeW7_Tu`)J}IGc#26jNjSzr|Da|a2oHNf>E_H5LsFJ0uWxAn-uSyK% zew8Gi!1a>2j#7;UyuMLBa`Cnzr7RM~n3TkWvZDanxm+@tL z`p*w2$51uiQfHO4I2kQnB*_VuK_TrEjX#qCjet!9ZI*W8@pvGhLkAC17w82y9D` zegNHbOjjYOZA9X6;LCl8pY#adkg%YCbfFR7zat<<7~ejkaA6$jfx{vYb%FUZ$y# z#Y`cUw)6E>Fxueha=F_F2Jlkxx4KV`@9Wok7%8xE;@Q15-$wj9nBVU21?AZ-UH+}e z?at^O4p_tBK>iy<`x~`QJYD`G{xR)^-)7xM8tCPzNkrJzd3q`G;D=RW;Z|^U|ATc< z=^gWr0#gkvKkOcRm!yKZApF|RUb%n%adGo4EF|Rue$eYW5?ywB=6xgOEnwr%;TZF6EIwb&3zzwKP1l zsx0>8VkG1Ki>5kW`Pq|O-(KldsokIB$n-Afq{a0mMZOA3=^vTsxt2J~ihk$^_hPx5+D>Z%_*oLA+J zz`Zi3{35AhJ404JPdK6CUDfdvX75Z zOq+4ZrkZv)Z3ltgcn(=&^EPX}@A?k2bu?K)xavtxXYJ0Qme89Lutvf&GJCPnTNNLl zzwvH#_;T9TTVcJ6&XH}|2{+h4CpRXpp`TeRj^C?YmD)QKukgQJCI5#aRQwVopO4O( z``1(W>cQL6(X*=*>h4kax?$f}%RQt$sxWp1W%XN>i=yi5wpr8n&hE%(Cs!LN96HSo zT4!^GWkz6`2~rL4Xubz^;lT#GFDEUdzW&#ekQGU`Uv1!@@}EhRca&jUc|J6^zWCXwictAh)BEsOcarJw;`+-A;!~x)& z118AFSm3_HREEMm@7e359DaMnmrpd6s7nudnij{%~HYjO7sY|#qao3!R7&i*KKD4D@kYk==eno?TgNo z`xqlH_DcMrtBsc}(B49cbJI-0Xfvuo(%hSQSh435w*0N&B%y7i-=W6Zf4kxwDGd%J z-cVHaw+&G}>0=(+@gY!n!}r|R#6l*o*J0ul(x6E{OFn*#&qu&1$x?=bn5!zdAyFZvW z9uI|~_dh2w)xU~FdTUJ=`xjD1L@=jng1H0Oll6~5gMmDN2@lK}+RX?Wo+N;ZIIn-W zW)$GJ{xxr0Iv}602!XKPU9UO!q8Y>50O_rtLic{qBl8}$s;W8P9aqv?_Iyj?ak(;+ z&oF#I_Q|J#aBGD9v=gQ;T?tI?zpXaXQ%@_*vyU~$u&Eq^mqd60GauU%{HMw(EpxQLmv_$*UQjKkPlBt z*1d$@}U4CD5M6e5&!%iW_C91U$*VEegFGQPBTecKaD9gvrjx0CgdH@d`97|1*!DMW^;Ji%Ni-W8 z5>W7R@7=DjcY$-co%Xa__Co1yS5IMA+@1=;)=TbzS6e0sM<>_h^&F@l=s+=)d%IjU z?Y?}hKXbvf8wV$o1D^_2kXU2_qxG3D!wENfbe>w(>vhNoEbZlT2HGNiqANymDM=3TJ GZ~Z%CB(@^} literal 0 HcmV?d00001 diff --git a/doc/source/assets/images/neutron-network-2.png b/doc/source/assets/images/neutron-network-2.png new file mode 100644 index 0000000000000000000000000000000000000000..919935119de1655ca6c7ffb4e943a5f517b57fb0 GIT binary patch literal 11243 zcmeHtc~F!2wrx-poWOx2QHF?ggGXrwMNq^rj;&^C>LGD!eY2q+R}NLo--z}T&V z5+SYh0XraJkN^P^XohIh2sA=~7$TDlfiNZkLXx**pZls_)vLO7UcEowyLD2b%KH4i z{HDFvUTf{|?zhg4TQ=_Ah(I8=oH_mVcL>B9KlnSVz8a`kygdRQ^0M ze*&YiM(n@Lt|#2;xw&pbd2aHSoYzGBe6t$&Ld?49;2=iLy#Ncs<*%1IwjJ*4`y#E# zA06})Mf1e>+62|)=HQmBnYx5s{q!xn5Qw);sc01hvA}_-g80Iy9)Z}t1Lk^ngE@lO zq>4nG@L#hTvByGXJ>tO=#9GAZv;V&Bzt4t5Z?y~7U(rPzN%-%xrhhF?OTa@p$00di*f;cwKr@U7wru z&S9sA7oTXBuvC$nLC31}57Jle#Ae;7B4IydP45%jG&!_?eF^I9$)7%;g`*WNV-YAi zD|f414leXlb{}KS$CJBM44)=$rcM9&f{w!+`U&6tvgZ?fk;b~Z`x=G$F1O7~MVyQ< zyqid{9F7Y1*1lxj+=q5L$vne)8j=}^L;2Zl-lNC;w&aFX-#2{N$>ry|nUyv#>GXls zgX8vIiU3@_26#053SX$T((^-(kzL0sPFwX5hdGbkPu`ka5B6|?#kez z;)7j(gagUz9FM5D;D-0s<~bB!;mB7r6IzP@l)_=gn5^lWCePf-h1JJ zH|Rmi*7WM7&GtR+2*;UI+DRF|Ol57g6X(n%jImf%RWXEvPoKRDDj)8?diaA^I-j74 zTJciRH&khHuE6PJ=ij9y%)fOB!(4r^xH8i)BO91WjJC9|VN8Khh|BNzHc8i-jzYAbgdPIafw&{lt^t@4NGmb&BWOgRF4 zsSj-d%_!bTYtvJxRAJVn=kzkKM5on~k=O;^Op=7PI;crl>M}^dd`Li@j3Uq#GQvbw z(uAUQp;qYAV}Gs))~IcNZY-85WLa}By4*1%^0xX-eUI{q&84-LdAYeAMrouz>t!f%T#f;7#Ey!qAesk4=Zab7geXhgNHb3xa`w6rv{ zS{TY0y*%>JjIcV##)#%eTQ%F?y+c1aYc$gqRGue_J3gLEE}7PUTDjVVk#!NO)3$Ch z2*5~gU?lTqig`0Ww|kBtzDy8L(d3h~+?mNGmNWriC+4MNgCYbyTq382@17>f&z-J zo?ac5deuj(d4I~DCPY+|Iwx}F|Tv?gPLzzi^*7GUA%BQb4ofalAnk&ga~a`YDO=?ei7173(x5gI zg$-3+dp0w)#9Aq2tq>ju9-XNq?Q4zjbn71KvL-N|UOlHJ$iFgg(whBZU#4;>eUdLa zUOd&jFxHMqlCLh4oSmJ0(jD?z(Y5~R6<_T`M14anJtTW*BXND;=Jr)~el_4_%GQ2} z{{}yEoeYO8_-L2+O~sUYq@OSB{@Hh{J~mWpe3u!{X0x>|T5Q@E@R@;%NNsleLPA~P zux4uG+iORjX{=B7R2+n3wrI(xQv2Y_efDq}N;bJB2Ikc6?Be1ZWq6sQi$YQGD)ioG zH($#)`9+^SS>`HB`p&7ViBX&eDe+ZSYhGVylx2>y3q;gJz`>)_^skX-JJa$YxQYaf zsl6t2oDQUTl8i%;NrYIfzo$HqeF~R3U zj9bE1Ds`tMah1RFioX&KN9B;$S!nvw=NWup1+6dk_29TIjAAS%=b~G88L3PexN|WH z*CQh%J729OpLaX_Rcl0_nn&d(<~ehs-ZsA}ugaY!k>|cG;s^l8wq&-B2E&ehT3C)j zQ&4SV3wfgvv9G;M3q^6>km!qMiB&RXgtC&d?rbm!U`cCO(mGS-PMC^Xa>HvXH8yul z5T3L^R!n-66kL{!tGMWy2?3-HcdSoth%`t3A)H%M#6vv^Tb%8lo11IB+@4!E?oj6Q zIIwyue_MG`{LSey6ONmkQ_Z=u2{?hxa3*eUea(995BjPYLra>7e66 zvyEs18=8RO_3C{0)r#GbC#@hEyB&z4ZRbt1S_c!*nC!M7V~k<}qre14C!_>ma>Uc& z1Sw(SW`G59bH1aaV;20;U~;cbS_8TCL2bgEwt`!ETwci(y_$(4iJxtO;OOk^EL83! ziLI#Q#W6gsb7GR>;#SUBuMy9WkB_@~W-he7`qDqWkV-ud_``5KX3EG4gygYp_r<7< zw7kzltnD>WaX2W83}78gl6)6wasEBtyO<+*!9~7?Ioo$x+DrER-jIMsLDW19>z^M9 z@s;*RmszKrMVS(2)d;f~Hj*3Gsdy1@`EeN%v@gaMJ@Z>2kOeegyV1k95XB1*A3iJ}9v&`_KO$%GV&1jLm$Z$Ijm>h8$BW-iOiUQ- zsEAjN>MhuY!y=H_u8fFR4Ez;*`t^8)nV~P@u1AW?L4y=J*Rw9ef)43%CEKWdq1Unv zzyPKms~r^U&5=B+_Rh zL)Uhg_H`FF4K9SYxJR$LD3@`a^@Pdo3i)v?R?qHb4NWSd7^I-9vZpJ2hBocdGxW2) zlOZq-E!q|@`}3<0?b3BiGRu)rD;?PGE_a$w9f#%Rp53;$#_w9NrPVA2 zQ>l}jgu2Q6X`e+yM+l0`jf2;|(`sfE0}Jw!u!Vcg`L+@&HAsdIGDykiJ7dj}i(T1f zR*v~$*H?QLAKhViNfWtMf|zL z&-P`=-j5>v@4cT{^4838@{R(Ja_UOS7!HWdwU*fw0ULQKu(`dpY}#I+oD5Vf6b7on z!?~E%GM%H@%}7C5dy#xwih=YxvKz=wxk)P^GiEX>OOqE=kk;Tg8@DLXyU!XI+v)h+ zSbYm9<4IWU>gv)4@HZooNM?{00r4oBNh`XXklidoNKX+byg8^0mA%b+~+`?Tf)|3!bky0W45k9TO z-OWv8co}ESsF>34Ub=}ob!s#XM7=b$sISD8I^nz+HVNH5+ zhrP!R>>p(&7e%C@1NUB_PG&2K!3E%TVJoFTg2XP#?{W8RXw<9yuhayTQ*)x5qOhiy zn!j+^W-uq@W>fa?+&S?TqD)qNUtv1NZ|0mL6e6nDskA%~ZY;oeMs`KxDs${n<^avv zqGRI{($7q!)hIq*{GjN8bkYj|COEYoud_5J7WZb?Z zN4PqSD3|MxB&7XQD9_&T;gV`p&!^%U4(>!;*Bp!wD6y;^jbD`>At1yQ#PA;PQj*8i zO``zMoz^sgyz?RNjca+`X&E)9TDTPG`A$2nImVsMqAjTTu{M}%^q+qd;O^a}^TYhp zD-3Q*t9Wj~)|bW+T!njP;2wm!mz6@opobQ^OT$!+gQ=+7e1S zu|Y3V`crc%S`(o`*4glH-pGi_LHb0ZLbSNjC2btgA}8bjv&vT zg#Jq>LDJnpV6%$MY!OgVo{Yujj4khP@n9pR74Z$Hk@aEf=20#Ev_^9vi>+nP+}-{L zB`wgCe$N6>vGhQLdhewwrDC((?F4LB7mzh+|H0KN<|E7c<2(GOzUI;$ z>NT2Lv&=BSCCniE{nMQkf6{ReWijW_2+3*Co5BxjAC)4}q_qr)KH%o>WnLq(`q=ay z;^#?T4xNujzI*qM>5)0=590v24D{yMbLpix>&pfzRW4EWLE1MD`=w$JKD8yc1+ig4 zRUnr0ulQ@XGf9YJd+U35h16>_LQORT?sc_-I>~q^?KIgTWt9_lN#kqk*lv#z@qaik*=~^CapTe{yXqLN{7OE3 zc_mbEbVsFcboX`19p!;8Bxe~9Qui=~GuF+WpSV_(YpYglo;(JTYpT-nu*w{Shdh$13D%W%z>SMMSU#NM^Gq;cFW)ze2|7Pu5xxDYK0BweFA#XF! zP2m;`;<8B9(*i`j`oCnL1~V`?uI7U~|MXcs)t^^4*Qi``Q7+jarHz$+TlMq_qKcFH z^JyRb6)!}ts>*s7uk_L_28)K5aG8M+e(ZbT=IUdt7i2rJUbnVR3jeXi;K^;(UJDTO zPC#%#k0eAzi5nbIiHj~tv&pE{@)?3cs#tZztaQ4tK(T%UlBsxQA*|A`duug=&~&~}BDD_?6f#t1V6{nW#4c=t-ld+KDpQ%@ zVKA<|vP$U~iS@|}tS+KnMeoiq0`2c#?WF;+Nu^!@!9gt+)Z(~Lz%Zi6$14Fx++t&pvrQ{fUQ1-cE0drW}v6G%H_gt z>ecLg5FnxJdvjM@SgWIE3TZ0(Ze?flH65&%QqDN+ZKnWj>ja{%v?9W^LV2W{#}|*T zwLQNk-TNvwUA8!!0GZY!5Vb;90#^cpa#@*H7hX~6IFR4Iq`2uDcVzEfWbYNyjSxsxg3j@mN(kx;sWY?VVGsn-&-Tjaqt5f3C+MyQGS|y*^{H@MYe!=;7^MZ5jV-WxvW^H`?vMPB}Kh2@bmrR-#!|Gts8u;n%#2j)_5c7*Ldh%D5KuCVdbqmdr1KqIEo zPx$dSael6hkFjRs)lcrk7QqbzEr(t2P4{pm3LV6(y1Myyal^vN)Dq#omZc|qM=cV+ zLa3c0&OOi&eImRZ+ftv~%WX4NLFga&-!x4Y_MJZcGvAwu<5n$8oZRbP5qRDt%H$FE zI)yh$b5?8FK>gVJ<8|$0{)LkKpQ|UZ;WqMO(n3B>KxeDPP=RG=cx#{FWFUC>fN}Z{ zcoe(Owz4K^Kx*ScWdEj)wEbC-KQe%)h8>F;!2jN^zO!Do-LK@b&@fwQvu@eQp#&k! zLL{q2blsHDae3#AiR=TfaW#teq|NbrQ#w8b223)DlWPyu3~oy$&yed3dc$nwc+`R^ zyP8A`ktQN?w*%;2;~>@&3#)!a{NHI9d6WuVpb=4K(%Mh2%jy-7qABdNqfEtDbW%+k zb}d3O^D(Sv&G5o18Bu?!Z&}@AhT2IfbO^KJhfAl)1lDAtHGhmsVzthN-^(t%f1L|6 zWz7t)R<`5a4RMHinL3VpJ16O8$j!n02QjWlZ*8COjZZ6&%1xQY(;o4h1%48b{7$A3 z6^GM(vDJ=8$H_a!RO;VV)Y!(9Z7abgdW5DnilqVWU;jy$A9JZexVb4!jUl`60jzE zqn96;)>O7EYbQdgCSwrw1v}H4t0+!j4D%?Gh?x(|>b*e-F9|>V zMp2;;t8kb#kywcl<_~*`=!Nf;PiHMuJE$AH=S#VSs^*$C^<8s^d}aS3u4BnkL(P$A z!ADp?4%@W!HX%X%Jq+~xps!a`m_s7!F$DW19jHs-@il2963(N1$pp-?W73j+_uS5D zfn^Tx!^!gxm&WRcZs~n~WU+&BZ+tzGt7Hn0Z#B}(Z^}X+kHz-Tr!P7xTZ&Z;kv7M1 zU?VVYEgPiRq;#|_iLsVzrY0r?dWfo96OnPx0zDssWex-6j>T5)EMidxVzs6^ooLs~ z>A3P&bxZ_N$7E+(Cm26qq?8p4UR=!88W_!8`KUs#^PFsHJWeJLi0Nz>{IeQeh1J-N zEJ7+bK{y(5boD7MY1FDf4dK!vk&97fa{1EdT>w9h!|H|pN?jAcl4RbQGS*=2U-wR& zbe=2qs=HF&NgfKwVpj_W_JqfyIduxX7iMJVf&#FRw4;yuB4RcseJb>Q%aqj3$h3Jo zweYNU_Lb|4rmX@KLo`>u|GH1l3R5S@+cv{65Y*_7QIA&%ZzOlMy@_*u#v8gq_Mu!= ze|{8$Q4E0P(YVcqIfP)LQ&I*i8m}nNLd+<1wg7SRkXzfKsg-}?I zlgx$V{o`xY6%6AhX4{THet`7oUOt|DS8DSPaORjQUr^wF2cUReCFpwl%2F>l8YnS= zZf*`rZ7}V9M-Uh&DzqO*!0f3V0B=GdH+UE;r;k7R-^d!G=5AA z8_K2QprVN^{)R0AvJ6l5`IkOPXAd|0)mgyIKaSp{(X2i7#zj8jB8Qr+rfYXJZM{D0d)@k12Y7iek-Sk;kNyT7r5(cl)k=f{CWEi>{byzPyUw9O?v8_Uix;Sb|A_0 zjbo|0f*#HlNoTlYbpnflp%|RIu$0nz-vSCCSp5Fh-AL=)sD_u?Uwr)W?bJ^KLEJ^n z?*!P;8-jxNTA??F@8f)O{#Py6B3`prKmAqB-&^8BsGV^8_io**{~Yz0?EO&7`vCg# zSYm>PiUa-nIh;avV=ho@bJT$r56oXDUL4gH>0cuf$JeQBG`Fg|?kx2ltNnpoConAh zXjDu));Ga=j2JeOJKkDcC9qAanc}90+!htY$LIg`O9F~ne7_XbSeWQy#5V`7&8gugH)Z}Vb z4zH{fNj2gP{Sa!C44wCQ>w7f=Cxi!r!)`olyAhw&9~tryV^x7XNcR4!=IXwoB;T*TTKJK?ZV1m87D8l^ihfMqg;WVo0TcHzMX zI?nP!Q%%fO4kwY61nt0EbX;Si-ezK@wq3AreyyCL<3(8eM{)JZFjlzuw$7vznpYK1 z#M(CLd?^(?pqZ(xuYU(3;9yHHbl|tzkpn#!CK3=aV(K_-Kn)r!%;c%QSc2>CH1Y*pQ(eOywm<7&jFUZmCY1*0f8HJBQsb5qko=d!QM3*-t&h>LWC-?nQ=50 z%cPYvW9$MA(aP`e22agvP!Z6Mg1&ntjvSuuTz~A?h;l+Dhn%_|@iHuC9E>!EKE1hg ze_Ni_DxSK3h z=SjjjvQoadY9=CVW~F+Dio3{~@*TAB@vT|SF23=S!#SC2d19l~_8)UrV>|6)-V_8K zec5?eOMh5Yv0QqWR*_u8Oed;%4Km1VarhEPCS#C0#Ox1?a{Aj6%bw*ey zi?=w^7*oXIxUt9L9O@Cp*T}y#u4gdMCyl*x2U6VrS$-}Um9PJd2Q z-6CLOHqiW4wy>AR^GNcynqVLh#4<1tD2j0~5GJ(Ed=e&<9SrDRfqDAkN;Z6hM+nBC z&39nEwCzjvLH{bdPs$#2D&zqWfEJ`L{|n8)SN8=W;kLgk15@o+a#Zjsb*J1hJoJO8 z)K7o)g3X{UY2I2^M5Q)^Ar77rm!1JC5*NP}L(#Ou*u5v9fDeFhJ%&c3Q6#oDJocSi zcP)?;_$44zDIyzy%2^K@4bV^RC99?aEV>qWd!TeO?0y#b5(%Xqi`MLA|579*CiG^d zIsxJdrw9eXZl=0zB3IbLffmPZu-la_z=(+Xw@O71U1s35z(AUjf8V&X~!Q){l+OJN4=wuLOZ|( z`seq-0sxBj`Q4WCcEcTjQ2H@VpP{zBhw%VfnAbu7gYEb?`_kQjlJZYLgqexSBXZop z9&9=-{3j)G1S6+$a8~J)Kyx!J#XlT%X?&$0Lz-Bb++hB3YlWxeRJHU{?eArwyRo6* zFe5?1 z;7b2Dq)#iqGyM6Aa@DHeK4p`I4oGtTU3VT5x1}Ur$GWbax6hic7E_l$cG5+>6GDBo z)EkoMNF=Qw$x8m=+4dooQ0HAZCxbvK`ta*!QAMhLqq5$x{iU~;swudzCp9)-qL*vmVQR;#rN!+cA#CwAAPhPL};rJr>`AdDa(SY&LMWZRlT0`hUNR f`TxdSr!vLnS%)6&RKoxSe9qWAe_d?nck_P$>ZA$( literal 0 HcmV?d00001 diff --git a/doc/source/assets/images/neutron-network-3.png b/doc/source/assets/images/neutron-network-3.png new file mode 100644 index 0000000000000000000000000000000000000000..34f03ed5c9aa81863dda17be757d70daaa05735b GIT binary patch literal 13293 zcmeI3dpy*6|M$PxqPEa>poHv!y-Fk^r%~Fr`jynSNQ|k35-Ay{F=%ORskG~gGNH0d z>ohrz;|y7(iAjUOU`QH_nZcMD=6JuSc7Koix*yl?ci)fuas74Qm&Z6fX1?E<&*^=5 zJ>PFPjvYO)dgZ2-2!gCWbnu(+5JY_uzK>`vhiB4a-XtQ3b>*RN_I@AzkUQL&_T?1b zOcb5-(~HcPg}j{ZyISf9sBS8Vsd)fNe8XZ7(cGMaremN z#Fo>R+bFsUvtM_G?p0@8eb$`xXqoxqXsXWOz0viR>pC7xmGVko702DsGB$nkApL{~ zI&9#SmQhORC&B9k%v;*sK##fo5RIS8|(lkeTE}TGE?kQuL3KZ8?rq+aKa_xsP;^ zp>1lpnqMRzR`1u&{i!*v)iO0}&r=GK-NutfCyTBTzOjRPn&}n|D5N(n?v! ztGGS)y@y8+){u~YgNygOtbs&*KAK*+?*g^@r&JCqkNoyq1=h)<>4_Ann2c@Mw+&%qcq)Xl$G#&xw)PJ zH6+pm^I9`e^Jo+M(F$>9eaHhh6Jx}v`Ya0u6IgX>HS-wdj#Z#{M(^0Tb$H`W^+e5s zd<)I-C%d?vBb1T-oJj^Y+i`9KVq~h8+c==TAYCOuA0$)tdH%-Y0LKN=H5cn59b}6B zq{SfWr;7u|Z%*=BMB=rsPs?wo1_WgEeUf-&D^UfznkE(=<@4^Y{ryD#=QV<`m(sa#>kt~4~|6g zUbo0YeQo?%X2Y%JbrZW9`!y3ICYw`yqK7}tAisJKYrT^u*xGgyyZ!s4ZOanlt&?a+ z4J9*pt+BNgb40=eR!I9mdlQZG3JZxM~w@U!~s5l_{u8$Y^ljY|LwmPb&!G zu&}Eh2+x1FpZSLL{0p!?0WmqmdcAskv}OR5HN$fTLsK>4=EKCyE$nXLcF?3ooN=UB4$ zcFGhzOTPx^?4!n=Y{7VM@UB6pfoiEC4_Aydb!sind{4J|T*_JOK42{G7ZqAJ#0>LC zNeNOC#hK7)q`E~Q5OjpsZZ7dGDlaU2Qp!2+7Kcn)Zg-gZhHk^$)}2gQB2bpx2OlXZ zLC)wa`NTRsPs(B{Ws#BI@oplv(SpIjmiZ9{7Co4CGj_!OPmzVBMB34il;@%^yByk1 zCPR{I*`{L;bfGH7Z6x7<6IP8QmdNz)yT-m&s-Oui^i`j z81-K#E1QjF&AAQHb8kCBY;g@I&40wA*;Z|VJM>?r-MQ2CZC*DuFE3ATd~PwsW|Ny{ zQge)VMv=vgkA^WK*)~}f;5AtEq)ix$w#cM*huAvC%)M<*bHYm&4!B4r^Jw_Rv6S2d z(ab=QySqEr zU2)!Y`~86f2WUA?G4o?SVU5COM}C9AIN{xa_;Z!)JAzo1d8*Z+FjC%_QB- zJ^pfd_`GUQvCXKqlir`NMI?utf`SJ7e)ih#kkowoI78@7w=qe6{Vv9DbxO-pv=hqF z(Gh2rOH0#BZH)$(RlgSHf6Gnl@ks09owPK}ANoa3G=_JEJP$qNLK6r}MJ}6e=jU~w zx81DG9u9>Khhc*wPWv`*;M~r?)elHT%laaAK6r?N74&u#D7)w8<}Mh_>Jhd(Ncdph zm6Oj-498Am(d~tWH^I10T4Wv&EXmH>5}dO-1Jm;9Q+bgcZ5?OJcAqlekXKmv()L|@ z{smVLk5;1WB~d2Azxp9Xw;Us_!bnLmIbQpZn^VhuH|#HywYbPylrpkNSwxW(DZOx? zbHaD%oAYo=ih@H?gu%Itb26fYXh`Yl>FMme?#be>Z(`{-WrcQ=xvH^#0HMA&bP#~Wg^@joY)78Us_t~ zR(*%pI-Rn}Oo9XN>ERJU;$3gHls&hUQR|c6>AH%1C_*1TX)N9f%rB*7EM$lNL$F81 zAcHKzu&_$XoB?Gn;qL*pTdCo2$@`YE%c_MI*C>@0=G6UCUORPsd^{-4*d>0j>kkU} zvw`Y?bKpR^E4nz%r*~WoS_*>Aae#YZw@0cL-^Gx565e(2c9v?nb|#gaw<-A-&Y~GH z9v<+R02f{CS#-u=@c>5{`)ti@{auX8OjYxUL%A#s_Vr^c9ntk2cI3I+%97O$ zsbiOSJ32Txz;~?uX6>K=Zwo^H3e3_=FGX*tSSchX$eG;Wy@KxZHqNR`9vtx-8doUY zaHy7sP*RLyvO6zraHA|pwaV0TCzh{dXIsstRmZerh_8pPHWF{?>WR!jH8$iYtzerkCS_Ted~?!4F7#+=+-_rQvQL(IOb^S@c* zrw9WVe+bvRl^~8v5GUM#sBow;HFDPjHkfPyxP};mTYBKf;OhW=YGE#uBAZYu>ZD2u zgQDQ7oaptIrSmopgd?Uj_oGLTrlqC1^Uo-i`NPU`bLu6fB|MjdikB_s9Y{06SFV}W z8}2BN$zfQvdE6?y6g1_r>AEiFXs(w>FW#KN5su1aaf8)=SDgw8-9=8vWFIX^+oz?RFK5ddS<=RY;hbWh zc70B{c_!L8)=?lYMjOrV_s*c!l?s;PP=C95R%n(mZ>H=NuxT!9T^IQlOl6qjeHfXQ z{nQ6yf)ATQ@J^5|&e3s$2@oV=t%pcb)}<47}V^~FKUc_*T)mWLEik?6Zls^c4n zFIo<1?Ai2cv_j7St7jX#fEgZKdsV>ugjwnqg!-`Pd6||Na_?|gSJz0^(E>B>PQo}? z#!Rf(&U3~)%zW+A+jsBY<+Z07CoH_{NY_!VU9~lX|+mI-tB#>F^rQB1iD4rZ+Bp5wGM+nsm7IFe?yS(}#nlQH=mDIcx5KP^1AqpG)B7Qj^KA04BNPQGO+#q?isK~!e$~^ zHmq9m-O${>$A*W8OThAUz;npD&+Uxq>x!fK-}0yrj3cSLy@=OuD1s9OjwH>_QJX_dZq3Net+f2Q1z+n(6{l{DAL*f=**ngG$2u5v@|?d_I7wv0vK45j8&)D<~m zMpJKU*)n?dAPRfsH(gg4&j$aQAEt_`%XMA*x;!bT)(6>a%C52=O{y#PH^}i)MOUBh zKnNF+W{jiYdAaL-^{m?JeC)`LO`d%sN|dE;7uefK3_F`&E!E-PsHcuyj<%HMb_SYp zu}EpX5+d|z@)}+dcf)36^vs%B`v{_I0`9xX;2ruA)=D==M@ooSYfSA)^E+KMT03~T z*NwVg-#lv%DS*nks!kCkF_5mp8}ByMPGNKF<~XA4Yp<8R%fFBnd8l(D+9}7)G@T7= z<08p(kr>y>N!BysY9)6L6QzW(UzdX`u|sB`tyo-eHJIn<`)33U6#6X7|N7>o<#DkQ z5ewn9zIO&i4?X=jU<5|?(P_KT8r0z~~%V8HqKIjNI zzK$BXeNiY{mXno6cxO4B0E1zMQRF)HgGzKeal0n+?1*V&1-`i$!T`ho(pERpPB+r4 zckg3o30|R%GwSYM+yu~H2@2U#P-K--s_*h~Zf}-xCfjXyq?l9R2BM>i_?6adq3F<4 zz3byt5hi0|>1^43$ic+B;*rs@&t|8pN*+B#e@{11TU;Az&REgLl+xgI`w=x^Aow(6 zBszLwlzve!dB}ZxRc#eMG94vMbzNw45JCFWct`d6tI?L#m7Mu@e3}L_y7LF1SpVic z|KC!tyRU8B`o&ugdO>C?)~`85xAB|Q*yDN=uamg2%dLWMa`^_hn8p_%5xg^&0@U`< zRxHjoRaq|*e`o~!AU7Rt(IEK|srvqgu21)Axn%?h+wEPnmOdnP_3vbNj35!sgSZb# zh*jIxw*G9#HLu=j>`{W?yEP(>zYn>oD!*T~ybR<(MnI9u%)N)JISAqmo_JgHs(2Tb zMB>gxJC$%GpoBW>JhNw8BKY?tP`FefM8mRb(UsfTw)Dqsf46ZG?pKtY2{=m!|HAM1!z4^fPN17BD$fL*q6e5r!6>}V_ExTk!YL8MtUVp0-@A8WM zmqy(SUce5=7i%K&FI9_ls$_XlZ}2e&cErYwY=zT77VakM==h=;0R}UI$oGCAB#qY2 zj$JF_IE0vA+*EE(&(T1fO-D7*v_=S^D|F#C=1SqW_|#~M87_RPUaTH3Zj-85X5l1!(~n8HNR+0MAW$orSp7$ zqhMhDb{jomFQzxLB8K&Tt7E3KCDK%SznQ(a<`PDZHo7`2MUcpjRsS~td0Lc5<*Dyc z{|iT-13|ZmpoTbafhE*)?U#K|&_)&>ChGKJxNb?b2E2yn%h~ zNn0;9LZ;Sk(DAu-WxeauL7%m*uk$}FX2pq$ff-c_j&gR!vy*gCi*tNDAE#WbXUTak zOU*j*vuoW!v5RMBW;!5SKiB97*(^NT@#Knyk}y1H7*gQFEEZ!t>ggS;KC3qD@pT+k zMvg+E@KP>`0xlZ@Yrw5Jj~9$oFp#f+u7olV09%4&=4H9B_VOeFoUrU8rj{AhiLa^U zAXUu!IKoGTg$)<7?gx1H+2E*|{G5jm<0{+RS@k@_zw^Y$*0qA@dIiaHt~*5Y@wsRR z&^aK`0KZ(<4VHt3!T^+c{hPov`B=Mt(UY$|Lu*`AIksNvemx%iY<&u7@IqGqGTF@d zT}oQ=k~<4xe?tHGIKMZr-pyc$0|weMHXFC|LBno)`=#>`s#HS!Pn~|NY~+5c9pO(} zgm#It!>RNt26~(AOUlNN*X|E|tQ!(m?FYz)m5FzeuxJiT#hJTx?El1)V$fk^1bV+r-dq zh=E-3#swGU5=|+^qZDMUUfJ~EC08v1_q!m~cse>dDqw-PQ^SA$pmjPS$#z+>IP-@m zOI&D9-M#6yD)WJ?^LG3A1w9_8u8X9hKsAa-aLP-$X{>vliN^kM>*qf*9C!Eya&vy9Ud$IQ zEgDW8&Ei^|_oy-(DjoEQ*kIti*z2i-LSBCXs+Yd}r9&HRdUbG+0HlW=2r%v|JRq6~ zwFH(w3>3#HA@M3-O`we_{+(?e7dh3}gR|K&_t;AS46Dor`T|wrADqwcOS{=A4sHFN z?^OyY^#{38W}7To1(2k1GKxu;-%v>2)-hUX5_AYc6dkxq`X+@IzF$3ZYQ0 zR7VmW*UZMG`{A%X`zED#tVaI!ZbUd2b~3kZCE^Q&4P_Wy)4-|G7E09-qeMJr*E$5L zIq=WxOJ~;`_E+5A)*cA1mpPI^mt)vgOeQ8miRS@ccGUFh&*MH~;^XPkusUT?HRU{^ zh(}421MY5Lg_`Mh3wjpiJdXE1mM|=j8g@Z0CwvmO8?n3KUF&xEm4)G8G&1`5>W&|s z>o2&((rxDM20qsbbwXxeuULF`fH0x7WIF95e?$;BTH8; zEW}0__YGaFt9&YJ#LhWu?zD3*2wJpWZiP9woK>qQGQD))6tKlG$0?F z`a%A_{qBk)5CbZ_-KNigSQ(woEh^lZq=Wc=o%}i^F2t5c<87NXqd%e7J~cG0|8+P} zs^DE@OjS^3h7+k05pfgEgT>&S!e5$iL<^k|q`^M5_3RsLnu-vNH=e6F7dT65neiAC znPTcY<9Ys7*_~WI2gm%(RjXH;AQane>`M~0S~g@Z#i$`s-4i!Jx&S6{KYI%lhM|A) z*;}vyS8$Qa{9m$~`QwFVUXA;naoFoU!;ZetL@r%)JPQnEGl6fauR(>t^1s|^}xsp}L4){fd z=ntl@>w4GoXB*_3pCu}1lT*mDuaKt4kT$hL`2$oEi0z4(98%*V+OK}jJf6t(si}oJ zN=X$@4e3vBJ!|Dx;tAMS1%5(d6@Dq{ke)g|0~*h7&{six9`)5?F2)sS1XSH#SfFd9$R%&7YsFdH@nYdwxv z390vjrMU3c?h4Pa@DWW9^hbL7RgC&<2R`s%Og0X-tI_ zOBl}*&5<_sH!qWQVFM{7QX@}PFEVWt07&y2>z()+0D#YBzzg43>Ul!k#{lgF*32&I zomP>-QU(W-!?Fum4LH5idMt*C6ot}C!R080?O_7my@KN8F!NCj*`azhLn#LgDo{uKRN#gX@J&+*mk!|j6V#S^IZ ztKH)m#>=a{{*G2{puC}Y;i1Hx5V!DMAvI=rH7zi4J?Io}7iKtkDOd6njfQQGD ziayHO1a`=;b)V7P74O^?tYL~$LR3mzhWY2Ce?mzIXo_KS`<}WUJ=i+b?-z+vf#Xae zmjgTqur+Mn;4{h~`Ipd)fD3*dq9-Gw&fYV&xHpz{F7;ndjzmaPJzvxYH;A2cwz*2G zypXzgbKrB_e7@n>jzW574q%*9cLxBbLD1FqbvTaYe6yypY?0jF)FSNj36q~@`ENl_ zr=q{Zcm)qOFd{g*WVj(Z{H?p?M zDkOf8ha-;kuEa)%9iq8C9b>RF-{o=+H));@>f>NH`WNrh!fHM4C@)ql?%o)C>GR5fYmIk-k}m!) zlyr&VI$^Id9Q9!71iU9#m@?fM#o^{X{w1!I`&H>V+#ul)*mxv zRT<60!vpdwLZAT6Ysn)^_QH9?mk>d`kN7?QH2T>OMtzF=z~|ZE-c`B(hi-)a9|`;9 z&PLNUHHom&u_rM^t;Ic-Q)jp-z~{YzGCNuc41+39o17;@!p!or&H>xe&O$wRy2-;lZaHFi;Wxo z{t<`Eq~Gb%r(EeHvNp@@Vt^owAgRmt)7~^{QakfTKQblJ{w})mvwYokefl0#|k5i^&qF#LSX7x}d&lW9K z6a~)~ZsPGApN;-TzVyglh}{zFgK0F}-BjuusarRxpenLM4WHTuF)f)qa$sp`Jxjy! zE-ISzdp);3j4|ZF3=7K)?VY;T^=tTHe34~B0q@%;&6?+>%+rzf7qK2&9A6LYJjE!f zx0}PQo1tHw6n-_t%HCj@xNV4i?Y*0E<>t-Oi@9nAmS$mBaVfqSuPWzoeBi8FwXbNy zeCR1(KL!+q{FZnEZhOc`HJ=qfB=VVKD&OxI3K~WSx#)#fEG>CpBaRPW>6d(mVWv$) zz8cl=9W7593Md-z6?43}(z-Ot9qFJ>SYPNguiW+~p|1%`@;V^@jclY1t8`b3|K{1@ z=^_`nZ_nqi;>-X1`~R@jEfHPFOxy4}P)}v|Ovz_|wZu6BP9TZNG!rr=qh5{87a8%Vd zmZ~nF`JsnnqkkOs?#yA!B8h|k^kF>ryI)izZ|ot54kqhP1H^+;1^@x5_kJj!Kq(3$ zrU4Yup{)QcFHF^0kg=hu@t)bs zy+-U3#O;DYl&Z!9Md>uv-E+}#w!lmvU2fIq-1dI9@8Rkb7)@9F??m)}RFeJH%wF@G zmYCEJ5WzzNoS}IY_3i*%DE|Uq2d)E)&cvcqIyK&1lLX~ThuDfpR#MGv+hQB2OG1q> z(sX_ax&%=7;YJ_^mD$UDqAE^HjRYP&j5EhE(-$+pDVHFt`mYdc-J{8*- z4{FXcW8&@N;$j4pGN9`42y(&;SzZ->DRGZvGPaA7FLwTsFlnJqZwxhb?GLfr{b$`Y zM+Gl{Rl=U=`{Q3&W3~&iNKd9@@VDd-E;pzsEF5U+=G)%}L3*?Qcrg%)O2^%O*z7Rl zEjj}@ej~66=O3BDt%h8q-grmtyg zOi6X@Nu8f14(1<@SFrFNIOo@1T;q3tQIMADzbJ^8ih^jqz^5%ajKp$EP!`sVDeb$% zE0VD}Y=BiCfpVeqp%T=>fTa+7w})}<1SVx4jIwYELhH1?zny_6JI2V;VTaM|oABJr zP%(l$wx;BE(nx{!-wmkL-eFs*!Bo{#qDCHBJ6Gbc8OGwn_4&<&TJ8XnG5>e}TW2`i zxqcs@h*1l^KaPsyY@aRG(QH40H?$FA@AojqbZ4)TndHfX;-!L-ecTo`>kpY0=}L!B zALLg$O1;?&(;VdEBKASF`Sop;m)^`i*120{UYb8M4o>5aV>Hcv=kLcGKJ-hlR7c${ zBrC4)I5oyuWJQj|ftQj`q!f_vINEFC%05Am(f5C`(SM{2e{6NQtONREm)lwrpGj6? zrTx94%-iV-&OZ5YzMuUnRb1MKNAK#}hn>|(-q+_xt|Lz>LdMgY_A?u%xERuEIlVwq z-yzz(97KokH*&QxLoaZ4Oj{DZXNtPJ_5oSuC*8qpEl}kvmcn~g{;)x!GsT8##))=M z+#&Q?o{H$dhq^9tQ>;avU48Cc-(%x8L%+2{Ug7P9e`*C@d8Q|@MsJ}uEchn6Zt@)q z)pP34CdsesQ1yh6gl;*rF_y8UQAXtwH?gr4uSmX0Z|)#Y@LdV9sG8k*sFqwR74G{S zU<*NU?FZ?vivF)YD5RFRdeGhcuWnri>T=rnFX}>yzeeZgU1j2j8Sx0R#X2bg2armt zH7>4#HtH7=+a!uTvvK215})<(F)OEpH6$wMS}<86!ARhc-2lyvOiZn&Ozl=fCMh3_ zE~VqxGqO76$OvOUtxR4+6xbVc5cS7qiupJgi!uvhODyjOF5bNJ* z5j9e@QPx!&R8TJuOOe_Aj^hzfO2c-8toLh$qEYpD(Hu>&H=YF>`KP8BKB{F>&u?N; z6BE*`d&G4_X@ocRHD_pEI{5*-IKNgFdw4v!b~@}oKgQW4IdI&+;z~j!W-rBVyTich z*x3q$%0EKFuF~aU_2#EB)k?yTVona`a_`B~O?n{hNhi%s(WGy+9$#=7Ic&@6U8M-P zCVV2bx#{NuHKdBvOKtGlE3Ts^~(a(kmesdZ(r$9Ppp{ z`$8**zCX08RG6nSE2MV4Fk@6rS5fJ{^O`G7dY(HYO$7RNX(tW3+gzak0(!>+z%S^J z7kSH>D0gDutaMcOxs(%qTobvhufhaCal-`OFUmteM)%otmh2-ZLbp2(n%9haq4kS< zgJhDtHTm@)(NcSVHRad}9qiI!BypcLG`Cfl7lDX}*07fXr!d@HeM989w=5~lY%G44 zTY6{-RtIUyG2f6n2ijic452wORIDA<5z?V4OSRZgRrY0fRfdT}GS`kGs^by`L)3}z z1(rPzW7#eggTO- zlqZZ>{OpZAe6HGg(GAyKO}EIh&T75Qxe(c86Zj zfe_Jo=M z1oDB)jeePc8wj;D=2}!T4oPJ6uUh(8#SuO6gchu5n8R66)=?z5pX(9I?@6{C6~Btq zi5K%zC9{^FivPFt!V6XX8SKkMoha*)Eg8)sL)+*_W?#5kTEyKp5a=CQux-GU8GOZ? z`Z6+2g8g9*MWO^VQ;n|n3PPs3-e^O?i8sTA^)D=}OHi?h1C2riA-$JMjk1Y?;1+Wf(j-Ke_XGgo$E`l8 zj`BJV=uR9~V4d&TJ+?KUqlUx=7MNWI&>fbcvZ8;r Date: Thu, 22 Aug 2024 16:24:21 +0100 Subject: [PATCH 1812/1936] docs: Add SSH guide This is really easy win for people using DevStack for the first time. Change-Id: I8de2d4d115d34e9d87dd461016b5b894d3f000e7 Signed-off-by: Stephen Finucane --- doc/source/index.rst | 2 ++ doc/source/networking.rst | 40 ++++++++++++++++++++++++++++++++++++++- 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/doc/source/index.rst b/doc/source/index.rst index a5a11e251b..70871ef876 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -133,6 +133,8 @@ there. You can ``source openrc`` in your shell, and then use the ``openstack`` command line tool to manage your devstack. +You can :ref:`create a VM and SSH into it `. + You can ``cd /opt/stack/tempest`` and run tempest tests that have been configured to work with your devstack. diff --git a/doc/source/networking.rst b/doc/source/networking.rst index e65c7ef195..f7df4f2e46 100644 --- a/doc/source/networking.rst +++ b/doc/source/networking.rst @@ -68,7 +68,7 @@ Shared Guest Interface .. warning:: This is not a recommended configuration. Because of interactions - between ovs and bridging, if you reboot your box with active + between OVS and bridging, if you reboot your box with active networking you may lose network connectivity to your system. If you need your guests accessible on the network, but only have 1 @@ -114,3 +114,41 @@ For IPv6, ``FIXED_RANGE_V6`` will default to the first /64 of the value of ``FIXED_RANGE_V6`` will just use the value of that directly. ``SUBNETPOOL_PREFIX_V6`` will just default to the value of ``IPV6_ADDRS_SAFE_TO_USE`` directly. + +.. _ssh: + +SSH access to instances +======================= + +To validate connectivity, you can create an instance using the +``$PRIVATE_NETWORK_NAME`` network (default: ``private``), create a floating IP +using the ``$PUBLIC_NETWORK_NAME`` network (default: ``public``), and attach +this floating IP to the instance: + +.. code-block:: shell + + openstack keypair create --public-key ~/.ssh/id_rsa.pub test-keypair + openstack server create --network private --key-name test-keypair ... test-server + fip_id=$(openstack floating ip create public -f value -c id) + openstack server add floating ip test-server ${fip_id} + +Once done, ensure you have enabled SSH and ICMP (ping) access for the security +group used for the instance. You can either create a custom security group and +specify it when creating the instance or add it after creation, or you can +modify the ``default`` security group created by default for each project. +Let's do the latter: + +.. code-block:: shell + + openstack security group rule create --proto icmp --dst-port 0 default + openstack security group rule create --proto tcp --dst-port 22 default + +Finally, SSH into the instance. If you used the Cirros instance uploaded by +default, then you can run the following: + +.. code-block:: shell + + openstack server ssh test-server -- -l cirros + +This will connect using the ``cirros`` user and the keypair you configured when +creating the instance. From 14f60b951aa03ded3779057c7fef28f6c3ee15cc Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Mon, 26 Aug 2024 11:56:31 +0100 Subject: [PATCH 1813/1936] docs: Expand SSH guide Detail how one can SSH into guests running on a remote DevStack host. Change-Id: I9f988b1193d67859b129f05d08b32a23e50aee49 Signed-off-by: Stephen Finucane --- doc/source/networking.rst | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/doc/source/networking.rst b/doc/source/networking.rst index f7df4f2e46..93332f07a5 100644 --- a/doc/source/networking.rst +++ b/doc/source/networking.rst @@ -152,3 +152,41 @@ default, then you can run the following: This will connect using the ``cirros`` user and the keypair you configured when creating the instance. + +Remote SSH access to instances +============================== + +You can also SSH to created instances on your DevStack host from other hosts. +This can be helpful if you are e.g. deploying DevStack in a VM on an existing +cloud and wish to do development on your local machine. To do this, you will +either need to configure the guest to be `locally accessible `__ or you will need to enable tunneling for the floating IP +address range of the ``$PUBLIC_NETWORK_NAME`` network (default: ``public``) +defined by ``$FLOATING_RANGE`` (default: ``172.24.4.0/24``). We're going to use +a useful utility called `shuttle`__ here, but there are many other ways to +accomplish this. + +First, ensure you have allowed SSH and HTTP(S) traffic to your DevStack host. +Allowing HTTP(S) traffic is necessary so you can use the OpenStack APIs +remotely. How you do this will depend on where your DevStack host is running. + +Once this is done, install ``sshuttle`` on your localhost: + +.. code-block:: bash + + sudo apt-get install sshuttle || yum install sshuttle + +Finally, start ``sshuttle`` using the floating IP address range. Assuming you +are using the default value for ``$FLOATING_RANGE``, you can do: + +.. code-block:: bash + + sshuttle -r username@devstack-host 172.24.4.0/24 + +(where ``username`` and ``devstack-host`` are the username and hostname of your +DevStack host). + +You should now be able to create an instance and SSH into it, using the +instructions provided :ref:`above `. + +.. __: https://github.com/sshuttle/sshuttle From 49933804c9d151ce4a220c188ba4301afa0bf98c Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Tue, 27 Aug 2024 12:08:23 +0100 Subject: [PATCH 1814/1936] docs: Expand SSH guide further smooney noted that using your DevStack host as a jump host is yet another reasonable option. Add this option also. Change-Id: I24887c254e131a8979653a7d17e64a708acf294a Signed-off-by: Stephen Finucane --- doc/source/networking.rst | 76 +++++++++++++++++++++++++++++++-------- 1 file changed, 61 insertions(+), 15 deletions(-) diff --git a/doc/source/networking.rst b/doc/source/networking.rst index 93332f07a5..05b4f34164 100644 --- a/doc/source/networking.rst +++ b/doc/source/networking.rst @@ -158,26 +158,66 @@ Remote SSH access to instances You can also SSH to created instances on your DevStack host from other hosts. This can be helpful if you are e.g. deploying DevStack in a VM on an existing -cloud and wish to do development on your local machine. To do this, you will -either need to configure the guest to be `locally accessible `__ or you will need to enable tunneling for the floating IP -address range of the ``$PUBLIC_NETWORK_NAME`` network (default: ``public``) -defined by ``$FLOATING_RANGE`` (default: ``172.24.4.0/24``). We're going to use -a useful utility called `shuttle`__ here, but there are many other ways to -accomplish this. +cloud and wish to do development on your local machine. There are a few ways to +do this. + +.. rubric:: Configure instances to be locally accessible + +The most obvious way is to configure guests to be locally accessible, as +described `above `__. This has the advantage of +requiring no further effort on the client. However, it is more involved and +requires either support from your cloud or some inadvisable workarounds. + +.. rubric:: Use your DevStack host as a jump host + +You can choose to use your DevStack host as a jump host. To SSH to a instance +this way, pass the standard ``-J`` option to the ``openstack ssh`` / ``ssh`` +command. For example: + +.. code-block:: + + openstack server ssh test-server -- -l cirros -J username@devstack-host + +(where ``test-server`` is name of an existing instance, as described +:ref:`previously `, and ``username`` and ``devstack-host`` are the +username and hostname of your DevStack host). + +This can also be configured via your ``~/.ssh/config`` file, making it rather +effortless. However, it only allows SSH access. If you want to access e.g. a +web application on the instance, you will need to configure an SSH tunnel and +forward select ports using the ``-L`` option. For example, to forward HTTP +traffic: + +.. code-block:: -First, ensure you have allowed SSH and HTTP(S) traffic to your DevStack host. -Allowing HTTP(S) traffic is necessary so you can use the OpenStack APIs -remotely. How you do this will depend on where your DevStack host is running. + openstack server ssh test-server -- -l cirros -L 8080:username@devstack-host:80 -Once this is done, install ``sshuttle`` on your localhost: +(where ``test-server`` is name of an existing instance, as described +:ref:`previously `, and ``username`` and ``devstack-host`` are the +username and hostname of your DevStack host). + +As you can imagine, this can quickly get out of hand, particularly for more +complex guest applications with multiple ports. + +.. rubric:: Use a proxy or VPN tool + +You can use a proxy or VPN tool to enable tunneling for the floating IP +address range of the ``$PUBLIC_NETWORK_NAME`` network (default: ``public``) +defined by ``$FLOATING_RANGE`` (default: ``172.24.4.0/24``). There are many +such tools available to do this. For example, we could use a useful utility +called `shuttle`__. To enable tunneling using ``shuttle``, first ensure you +have allowed SSH and HTTP(S) traffic to your DevStack host. Allowing HTTP(S) +traffic is necessary so you can use the OpenStack APIs remotely. How you do +this will depend on where your DevStack host is running. Once this is done, +install ``sshuttle`` on your localhost: .. code-block:: bash sudo apt-get install sshuttle || yum install sshuttle -Finally, start ``sshuttle`` using the floating IP address range. Assuming you -are using the default value for ``$FLOATING_RANGE``, you can do: +Finally, start ``sshuttle`` on your localhost using the floating IP address +range. For example, assuming you are using the default value for +``$FLOATING_RANGE``, you can do: .. code-block:: bash @@ -186,7 +226,13 @@ are using the default value for ``$FLOATING_RANGE``, you can do: (where ``username`` and ``devstack-host`` are the username and hostname of your DevStack host). -You should now be able to create an instance and SSH into it, using the -instructions provided :ref:`above `. +You should now be able to create an instance and SSH into it: + +.. code-block:: bash + + openstack server ssh test-server -- -l cirros + +(where ``test-server`` is name of an existing instance, as described +:ref:`previously `) .. __: https://github.com/sshuttle/sshuttle From 6a8f65b476883d5cccffbcad8650894850033231 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Wed, 21 Aug 2024 15:57:32 +0100 Subject: [PATCH 1815/1936] lib/swift: Consistently quota variables Change-Id: I6c3245a77cdc2849067568cfda5a838afda687e3 Signed-off-by: Stephen Finucane --- lib/swift | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/lib/swift b/lib/swift index 1ebf073318..3659624d5b 100644 --- a/lib/swift +++ b/lib/swift @@ -844,14 +844,14 @@ function stop_swift { function swift_configure_tempurls { # note we are using swift credentials! - openstack --os-cloud "" \ - --os-region-name $REGION_NAME \ - --os-auth-url $KEYSTONE_SERVICE_URI \ - --os-username=swift \ - --os-password=$SERVICE_PASSWORD \ - --os-user-domain-name=$SERVICE_DOMAIN_NAME \ - --os-project-name=$SERVICE_PROJECT_NAME \ - --os-project-domain-name=$SERVICE_DOMAIN_NAME \ + openstack --os-cloud="" \ + --os-region-name="$REGION_NAME" \ + --os-auth-url="$KEYSTONE_SERVICE_URI" \ + --os-username="swift" \ + --os-password="$SERVICE_PASSWORD" \ + --os-user-domain-name="$SERVICE_DOMAIN_NAME" \ + --os-project-name="$SERVICE_PROJECT_NAME" \ + --os-project-domain-name="$SERVICE_DOMAIN_NAME" \ object store account \ set --property "Temp-URL-Key=$SWIFT_TEMPURL_KEY" } From 31980e436b73db18297a295969069cf00bd43754 Mon Sep 17 00:00:00 2001 From: Jan Hartkopf Date: Mon, 10 Oct 2022 10:40:38 +0200 Subject: [PATCH 1816/1936] Configure option backup_ceph_max_snapshots in Cinder backup Sets the config option backup_ceph_max_snapshots for the Cinder Ceph backup driver to the specified value. Depends-On: https://review.opendev.org/c/openstack/cinder/+/810457 Signed-off-by: Jan Hartkopf Change-Id: I9e12e395288db1fe59490b4075bb2d933ccd4d78 --- lib/cinder_backups/ceph | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/cinder_backups/ceph b/lib/cinder_backups/ceph index 4b180490d7..ea9b44fe8c 100644 --- a/lib/cinder_backups/ceph +++ b/lib/cinder_backups/ceph @@ -19,6 +19,7 @@ set +o xtrace # Defaults # -------- +CINDER_BAK_CEPH_MAX_SNAPSHOTS=${CINDER_BAK_CEPH_MAX_SNAPSHOTS:-0} CINDER_BAK_CEPH_POOL=${CINDER_BAK_CEPH_POOL:-backups} CINDER_BAK_CEPH_POOL_PG=${CINDER_BAK_CEPH_POOL_PG:-8} CINDER_BAK_CEPH_POOL_PGP=${CINDER_BAK_CEPH_POOL_PGP:-8} @@ -38,6 +39,7 @@ function configure_cinder_backup_ceph { iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph.CephBackupDriver" iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF_FILE" + iniset $CINDER_CONF DEFAULT backup_ceph_max_snapshots "$CINDER_BAK_CEPH_MAX_SNAPSHOTS" iniset $CINDER_CONF DEFAULT backup_ceph_pool "$CINDER_BAK_CEPH_POOL" iniset $CINDER_CONF DEFAULT backup_ceph_user "$CINDER_BAK_CEPH_USER" iniset $CINDER_CONF DEFAULT backup_ceph_stripe_unit 0 From f49d475bf2e186ef3d7800e2bb55c9c360e7ac95 Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Wed, 2 Oct 2024 09:36:55 +0000 Subject: [PATCH 1817/1936] Catch and print the postgresql initdb error The logs are stored, by default, in /var/lib/pgsql/initdb_postgresql.log. Related-Bug: #2083482 Change-Id: I2c83e896819b20cd7a1ee8d8ee33354fb047a6d9 --- lib/databases/postgresql | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/databases/postgresql b/lib/databases/postgresql index b21418b75e..2aa38ccf76 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -46,6 +46,10 @@ function recreate_database_postgresql { createdb -h $DATABASE_HOST -U$DATABASE_USER -l C -T template0 -E utf8 $db } +function _exit_pg_init { + sudo cat /var/lib/pgsql/initdb_postgresql.log +} + function configure_database_postgresql { local pg_conf pg_dir pg_hba check_role version echo_summary "Configuring and starting PostgreSQL" @@ -53,7 +57,9 @@ function configure_database_postgresql { pg_hba=/var/lib/pgsql/data/pg_hba.conf pg_conf=/var/lib/pgsql/data/postgresql.conf if ! sudo [ -e $pg_hba ]; then + trap _exit_pg_init EXIT sudo postgresql-setup initdb + trap - EXIT fi elif is_ubuntu; then version=`psql --version | cut -d ' ' -f3 | cut -d. -f1-2` From 803a7d44c49f6adbfa878ee520756869df1ffe8a Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Tue, 1 Oct 2024 07:42:34 -0700 Subject: [PATCH 1818/1936] Add image format enforcement toggle Related to blueprint glance-as-defender Needed-By: https://review.opendev.org/c/openstack/tempest/+/931028 Change-Id: I8b22ed85eefde399f2e472780106dd39e51a5700 --- lib/glance | 4 ++++ lib/tempest | 1 + 2 files changed, 5 insertions(+) diff --git a/lib/glance b/lib/glance index 274687112e..2eb4954f4a 100644 --- a/lib/glance +++ b/lib/glance @@ -104,6 +104,9 @@ GLANCE_ENABLE_QUOTAS=$(trueorfalse True GLANCE_ENABLE_QUOTAS) # For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope GLANCE_ENFORCE_SCOPE=$(trueorfalse True GLANCE_ENFORCE_SCOPE) +# Flag to disable image format inspection on upload +GLANCE_ENFORCE_IMAGE_FORMAT=$(trueorfalse True GLANCE_ENFORCE_IMAGE_FORMAT) + GLANCE_CONF_DIR=${GLANCE_CONF_DIR:-/etc/glance} GLANCE_METADEF_DIR=$GLANCE_CONF_DIR/metadefs GLANCE_API_CONF=$GLANCE_CONF_DIR/glance-api.conf @@ -343,6 +346,7 @@ function configure_glance { # Only use these if you know what you are doing! See OSSN-0065 iniset $GLANCE_API_CONF DEFAULT show_image_direct_url $GLANCE_SHOW_DIRECT_URL iniset $GLANCE_API_CONF DEFAULT show_multiple_locations $GLANCE_SHOW_MULTIPLE_LOCATIONS + iniset $GLANCE_API_CONF image_format require_image_format_match $GLANCE_ENFORCE_IMAGE_FORMAT # Configure glance_store configure_glance_store $USE_CINDER_FOR_GLANCE $GLANCE_ENABLE_MULTIPLE_STORES diff --git a/lib/tempest b/lib/tempest index 310db2daa6..eeeef67a8b 100644 --- a/lib/tempest +++ b/lib/tempest @@ -368,6 +368,7 @@ function configure_tempest { if [[ -n "$image_conversion" ]]; then iniset $TEMPEST_CONFIG image-feature-enabled image_conversion True fi + iniset $TEMPEST_CONFIG image-feature-enabled image_format_enforcement $GLANCE_ENFORCE_IMAGE_FORMAT fi iniset $TEMPEST_CONFIG network project_network_cidr $FIXED_RANGE From 9f9dccd608d5415e0de988a2aa0e74d049038e2a Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Mon, 7 Oct 2024 11:57:58 -0700 Subject: [PATCH 1819/1936] Switch devstack nodeset to Ubuntu 24.04 (Noble) Ref: https://governance.openstack.org/tc/goals/selected/migrate-ci-jobs-to-ubuntu-noble.html Change-Id: I7ac8f9443c386e56c4ca45a171e104f0b350bc7f --- .zuul.yaml | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index a1c251a398..6a6b686ac1 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -439,7 +439,7 @@ description: | Minimal devstack base job, intended for use by jobs that need less than the normal minimum set of required-projects. - nodeset: openstack-single-node-jammy + nodeset: openstack-single-node-noble required-projects: - opendev.org/openstack/requirements vars: @@ -686,7 +686,7 @@ - job: name: devstack-multinode parent: devstack - nodeset: openstack-two-node-jammy + nodeset: openstack-two-node-noble description: | Simple multinode test to verify multinode functionality on devstack side. This is not meant to be used as a parent job. @@ -727,18 +727,18 @@ configure_swap_size: 4096 - job: - name: devstack-platform-ubuntu-noble + name: devstack-platform-ubuntu-jammy parent: tempest-full-py3 - description: Ubuntu 24.04 LTS (noble) platform test - nodeset: openstack-single-node-noble + description: Ubuntu 22.04 LTS (Jammy) platform test + nodeset: openstack-single-node-jammy timeout: 9000 vars: configure_swap_size: 8192 - job: - name: devstack-platform-ubuntu-jammy-ovn-source - parent: devstack-platform-ubuntu-jammy - description: Ubuntu 22.04 LTS (jammy) platform test (OVN from source) + name: devstack-platform-ubuntu-noble-ovn-source + parent: devstack-platform-ubuntu-noble + description: Ubuntu 24.04 LTS (noble) platform test (OVN from source) voting: false vars: devstack_localrc: @@ -748,10 +748,10 @@ OVS_SYSCONFDIR: "/usr/local/etc/openvswitch" - job: - name: devstack-platform-ubuntu-jammy-ovs + name: devstack-platform-ubuntu-noble-ovs parent: tempest-full-py3 - description: Ubuntu 22.04 LTS (jammy) platform test (OVS) - nodeset: openstack-single-node-jammy + description: Ubuntu 24.04 LTS (noble) platform test (OVS) + nodeset: openstack-single-node-noble voting: false timeout: 9000 vars: @@ -853,7 +853,7 @@ - job: name: devstack-unit-tests - nodeset: ubuntu-jammy + nodeset: ubuntu-noble description: | Runs unit tests on devstack project. @@ -873,9 +873,9 @@ - devstack-platform-centos-9-stream - devstack-platform-debian-bookworm - devstack-platform-rocky-blue-onyx - - devstack-platform-ubuntu-jammy-ovn-source - - devstack-platform-ubuntu-jammy-ovs - - devstack-platform-ubuntu-noble + - devstack-platform-ubuntu-noble-ovn-source + - devstack-platform-ubuntu-noble-ovs + - devstack-platform-ubuntu-jammy - devstack-multinode - devstack-unit-tests - openstack-tox-bashate @@ -1002,6 +1002,6 @@ - devstack-platform-centos-9-stream - devstack-platform-debian-bookworm - devstack-platform-rocky-blue-onyx - - devstack-platform-ubuntu-jammy-ovn-source - - devstack-platform-ubuntu-jammy-ovs - - devstack-platform-ubuntu-noble + - devstack-platform-ubuntu-noble-ovn-source + - devstack-platform-ubuntu-noble-ovs + - devstack-platform-ubuntu-jammy From e8e3eb6dc9353a9df663bfdbb6448bb001ba6ee4 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 11 Oct 2024 15:20:17 +0100 Subject: [PATCH 1820/1936] doc: Update WSGI section Change-Id: Iaefe12e7fdeddb0fb6fe272cd4df3ce46470cc28 Signed-off-by: Stephen Finucane --- doc/source/configuration.rst | 28 +++++++++++----------------- stackrc | 2 +- 2 files changed, 12 insertions(+), 18 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index a83b2de0df..f5f30c4d09 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -351,26 +351,21 @@ Example disabling RabbitMQ in ``local.conf``:: disable_service rabbit - Apache Frontend --------------- -The Apache web server can be enabled for wsgi services that support -being deployed under HTTPD + mod_wsgi. By default, services that -recommend running under HTTPD + mod_wsgi are deployed under Apache. To -use an alternative deployment strategy (e.g. eventlet) for services -that support an alternative to HTTPD + mod_wsgi set -``ENABLE_HTTPD_MOD_WSGI_SERVICES`` to ``False`` in your -``local.conf``. - -Each service that can be run under HTTPD + mod_wsgi also has an -override toggle available that can be set in your ``local.conf``. - -Keystone is run under Apache with ``mod_wsgi`` by default. +The Apache web server is enabled for services that support via WSGI. Today this +means HTTPD and uWSGI but historically this meant HTTPD + mod_wsgi. This +historical legacy is captured by the naming of many variables, which include +``MOD_WSGI`` rather than ``UWSGI``. -Example (Keystone):: - - KEYSTONE_USE_MOD_WSGI="True" +Some services support alternative deployment strategies (e.g. eventlet). You +can enable these ``ENABLE_HTTPD_MOD_WSGI_SERVICES`` to ``False`` in your +``local.conf``. In addition, each service that can be run under HTTPD + +mod_wsgi also has an override toggle available that can be set in your +``local.conf``. These are, however, slowly being removed as services have +adopted standardized deployment mechanisms and more generally moved away from +eventlet. Example (Nova):: @@ -388,7 +383,6 @@ Example (Cinder):: CINDER_USE_MOD_WSGI="True" - Libraries from Git ------------------ diff --git a/stackrc b/stackrc index ab1f8a6ffd..33c18ce0a8 100644 --- a/stackrc +++ b/stackrc @@ -85,7 +85,7 @@ fi # Global toggle for enabling services under mod_wsgi. If this is set to # ``True`` all services that use HTTPD + mod_wsgi as the preferred method of # deployment, will be deployed under Apache. If this is set to ``False`` all -# services will rely on the local toggle variable (e.g. ``KEYSTONE_USE_MOD_WSGI``) +# services will rely on the local toggle variable. ENABLE_HTTPD_MOD_WSGI_SERVICES=True # Set the default Nova APIs to enable From 851d82ccca40d61cbfd2319c38e0128bdaea247e Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 11 Oct 2024 15:37:57 +0100 Subject: [PATCH 1821/1936] stackrc: Die if attempting to use mod_wsgi We do not want to support this anymore. Change-Id: I8823e98809ed6b66c27dbcf21a00eea68ef403e8 Signed-off-by: Stephen Finucane --- stackrc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stackrc b/stackrc index 33c18ce0a8..c5a0fd4d2f 100644 --- a/stackrc +++ b/stackrc @@ -223,6 +223,9 @@ GIT_TIMEOUT=${GIT_TIMEOUT:-0} # proxy uwsgi in front of it, or "mod_wsgi", which runs in # apache. mod_wsgi is deprecated, don't use it. WSGI_MODE=${WSGI_MODE:-"uwsgi"} +if [[ "$WSGI_MODE" != "uwsgi" ]]; then + die $LINENO "$WSGI_MODE is no longer a supported WSGI mode. Only uwsgi is valid." +fi # Repositories # ------------ From dda40363e6a78b5f3f018a962e5dff14e2c12cd9 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 11 Oct 2024 15:20:01 +0100 Subject: [PATCH 1822/1936] lib/nova: Remove NOVA_USE_MOD_WSGI We are in the process of migrating away from Eventlet. Nothing is setting this to a non-default value, meaning there is no reason to keep it around any more. Drop it. Change-Id: I036851810360539335502481955769b2308e3dcc Signed-off-by: Stephen Finucane --- doc/source/configuration.rst | 4 ---- lib/nova | 38 ++++-------------------------------- 2 files changed, 4 insertions(+), 38 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index f5f30c4d09..6b8a80563d 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -367,10 +367,6 @@ mod_wsgi also has an override toggle available that can be set in your adopted standardized deployment mechanisms and more generally moved away from eventlet. -Example (Nova):: - - NOVA_USE_MOD_WSGI="True" - Example (Swift):: SWIFT_USE_MOD_WSGI="True" diff --git a/lib/nova b/lib/nova index 35c6893763..20e19da9a4 100644 --- a/lib/nova +++ b/lib/nova @@ -75,14 +75,6 @@ NOVA_CPU_CELL=${NOVA_CPU_CELL:-1} NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini} -# Toggle for deploying Nova-API under a wsgi server. We default to -# true to use UWSGI, but allow False so that fall back to the -# eventlet server can happen for grenade runs. -# NOTE(cdent): We can adjust to remove the eventlet-base api service -# after pike, at which time we can stop using NOVA_USE_MOD_WSGI to -# mean "use uwsgi" because we'll be always using uwsgi. -NOVA_USE_MOD_WSGI=${NOVA_USE_MOD_WSGI:-True} - # We do not need to report service status every 10s for devstack-like # deployments. In the gate this generates extra work for the services and the # database which are already taxed. @@ -393,11 +385,7 @@ function create_nova_accounts { create_service_user "nova" "admin" local nova_api_url - if [[ "$NOVA_USE_MOD_WSGI" == "False" ]]; then - nova_api_url="$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT" - else - nova_api_url="$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST/compute" - fi + nova_api_url="$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST/compute" get_or_create_service "nova_legacy" "compute_legacy" "Nova Compute Service (Legacy 2.0)" get_or_create_endpoint \ @@ -513,11 +501,6 @@ function create_nova_conf { iniset $NOVA_CONF oslo_policy enforce_new_defaults False iniset $NOVA_CONF oslo_policy enforce_scope False fi - if is_service_enabled tls-proxy && [ "$NOVA_USE_MOD_WSGI" == "False" ]; then - # Set the service port for a proxy to take the original - iniset $NOVA_CONF DEFAULT osapi_compute_listen_port "$NOVA_SERVICE_PORT_INT" - iniset $NOVA_CONF DEFAULT osapi_compute_link_prefix $NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT - fi configure_keystone_authtoken_middleware $NOVA_CONF nova fi @@ -998,17 +981,8 @@ function start_nova_api { local old_path=$PATH export PATH=$NOVA_BIN_DIR:$PATH - if [ "$NOVA_USE_MOD_WSGI" == "False" ]; then - run_process n-api "$NOVA_BIN_DIR/nova-api" - nova_url=$service_protocol://$SERVICE_HOST:$service_port - # Start proxy if tsl enabled - if is_service_enabled tls-proxy; then - start_tls_proxy nova '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT - fi - else - run_process "n-api" "$(which uwsgi) --procname-prefix nova-api --ini $NOVA_UWSGI_CONF" - nova_url=$service_protocol://$SERVICE_HOST/compute/v2.1/ - fi + run_process "n-api" "$(which uwsgi) --procname-prefix nova-api --ini $NOVA_UWSGI_CONF" + nova_url=$service_protocol://$SERVICE_HOST/compute/v2.1/ echo "Waiting for nova-api to start..." if ! wait_for_service $SERVICE_TIMEOUT $nova_url; then @@ -1114,11 +1088,7 @@ function start_nova_rest { local compute_cell_conf=$NOVA_CONF run_process n-sch "$NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf" - if [ "$NOVA_USE_MOD_WSGI" == "False" ]; then - run_process n-api-meta "$NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf" - else - run_process n-api-meta "$(which uwsgi) --procname-prefix nova-api-meta --ini $NOVA_METADATA_UWSGI_CONF" - fi + run_process n-api-meta "$(which uwsgi) --procname-prefix nova-api-meta --ini $NOVA_METADATA_UWSGI_CONF" export PATH=$old_path } From 7e8d5efdf2e0d1ff784e8aee2838e4bc7942856e Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 11 Oct 2024 15:34:18 +0100 Subject: [PATCH 1823/1936] lib/cinder: Remove CINDER_USE_MOD_WSGI Like Nova, nothing is setting this to False nowadays so there's no reason to persist with it. Change-Id: I0e1550992dde81c601175ef04da771ce8ca1dd29 Signed-off-by: Stephen Finucane --- doc/source/configuration.rst | 4 ---- lib/cinder | 39 ++++++------------------------------ 2 files changed, 6 insertions(+), 37 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 6b8a80563d..9a1fd4e179 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -375,10 +375,6 @@ Example (Heat):: HEAT_USE_MOD_WSGI="True" -Example (Cinder):: - - CINDER_USE_MOD_WSGI="True" - Libraries from Git ------------------ diff --git a/lib/cinder b/lib/cinder index dc284920e0..259018e7ab 100644 --- a/lib/cinder +++ b/lib/cinder @@ -160,10 +160,6 @@ fi # Supported backup drivers are in lib/cinder_backups CINDER_BACKUP_DRIVER=${CINDER_BACKUP_DRIVER:-swift} -# Toggle for deploying Cinder under a wsgi server. Legacy mod_wsgi -# reference should be cleaned up to more accurately refer to uwsgi. -CINDER_USE_MOD_WSGI=${CINDER_USE_MOD_WSGI:-True} - # Source the enabled backends if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then for be in ${CINDER_ENABLED_BACKENDS//,/ }; do @@ -393,14 +389,8 @@ function configure_cinder { if is_service_enabled tls-proxy; then if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then # Set the service port for a proxy to take the original - if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then - iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT - iniset $CINDER_CONF oslo_middleware enable_proxy_headers_parsing True - else - iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT - iniset $CINDER_CONF DEFAULT public_endpoint $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT - iniset $CINDER_CONF DEFAULT osapi_volume_base_URL $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT - fi + iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT + iniset $CINDER_CONF oslo_middleware enable_proxy_headers_parsing True fi fi @@ -411,7 +401,7 @@ function configure_cinder { iniset_rpc_backend cinder $CINDER_CONF # Format logging - setup_logging $CINDER_CONF $CINDER_USE_MOD_WSGI + setup_logging $CINDER_CONF if is_service_enabled c-api; then write_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" "/volume" @@ -477,11 +467,7 @@ function create_cinder_accounts { create_service_user "cinder" $extra_role local cinder_api_url - if [[ "$CINDER_USE_MOD_WSGI" == "False" ]]; then - cinder_api_url="$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT" - else - cinder_api_url="$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume" - fi + cinder_api_url="$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume" # block-storage is the official service type get_or_create_service "cinder" "block-storage" "Cinder Volume Service" @@ -609,10 +595,6 @@ function start_cinder { local service_port=$CINDER_SERVICE_PORT local service_protocol=$CINDER_SERVICE_PROTOCOL local cinder_url - if is_service_enabled tls-proxy && [ "$CINDER_USE_MOD_WSGI" == "False" ]; then - service_port=$CINDER_SERVICE_PORT_INT - service_protocol="http" - fi if [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then if is_service_enabled c-vol; then # Delete any old stack.conf @@ -629,17 +611,8 @@ function start_cinder { fi if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then - if [ "$CINDER_USE_MOD_WSGI" == "False" ]; then - run_process c-api "$CINDER_BIN_DIR/cinder-api --config-file $CINDER_CONF" - cinder_url=$service_protocol://$SERVICE_HOST:$service_port - # Start proxy if tls enabled - if is_service_enabled tls-proxy; then - start_tls_proxy cinder '*' $CINDER_SERVICE_PORT $CINDER_SERVICE_HOST $CINDER_SERVICE_PORT_INT - fi - else - run_process "c-api" "$(which uwsgi) --procname-prefix cinder-api --ini $CINDER_UWSGI_CONF" - cinder_url=$service_protocol://$SERVICE_HOST/volume/v3 - fi + run_process "c-api" "$(which uwsgi) --procname-prefix cinder-api --ini $CINDER_UWSGI_CONF" + cinder_url=$service_protocol://$SERVICE_HOST/volume/v3 fi echo "Waiting for Cinder API to start..." From e1465f1d73ad146c820d047932af1410dbc18675 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 11 Oct 2024 15:38:42 +0100 Subject: [PATCH 1824/1936] lib/placement: Remove support for deploying with mod_wsgi Change-Id: If17deabc35d35dca1d94b0d15d258769f347b130 Signed-off-by: Stephen Finucane --- files/apache-placement-api.template | 27 ----------------- lib/placement | 47 ++--------------------------- 2 files changed, 3 insertions(+), 71 deletions(-) delete mode 100644 files/apache-placement-api.template diff --git a/files/apache-placement-api.template b/files/apache-placement-api.template deleted file mode 100644 index 011abb95fc..0000000000 --- a/files/apache-placement-api.template +++ /dev/null @@ -1,27 +0,0 @@ -# NOTE(sbauza): This virtualhost is only here because some directives can -# only be set by a virtualhost or server context, so that's why the port is not bound. -# TODO(sbauza): Find a better way to identify a free port that is not corresponding to an existing -# vhost. - - WSGIDaemonProcess placement-api processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% - WSGIProcessGroup placement-api - WSGIScriptAlias / %PUBLICWSGI% - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%M" - - ErrorLog /var/log/%APACHE_NAME%/placement-api.log - %SSLENGINE% - %SSLCERTFILE% - %SSLKEYFILE% - - -Alias /placement %PUBLICWSGI% - - SetHandler wsgi-script - Options +ExecCGI - WSGIProcessGroup placement-api - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - diff --git a/lib/placement b/lib/placement index 63fdfb6c1a..6297ab24fe 100644 --- a/lib/placement +++ b/lib/placement @@ -71,32 +71,6 @@ function cleanup_placement { remove_uwsgi_config "$PLACEMENT_UWSGI_CONF" "placement-api" } -# _config_placement_apache_wsgi() - Set WSGI config files -function _config_placement_apache_wsgi { - local placement_api_apache_conf - local venv_path="" - local placement_bin_dir="" - placement_bin_dir=$(get_python_exec_prefix) - placement_api_apache_conf=$(apache_site_config_for placement-api) - - if [[ ${USE_VENV} = True ]]; then - venv_path="python-path=${PROJECT_VENV["placement"]}/lib/$(python_version)/site-packages" - placement_bin_dir=${PROJECT_VENV["placement"]}/bin - fi - - sudo cp $FILES/apache-placement-api.template $placement_api_apache_conf - sudo sed -e " - s|%APACHE_NAME%|$APACHE_NAME|g; - s|%PUBLICWSGI%|$placement_bin_dir/placement-api|g; - s|%SSLENGINE%|$placement_ssl|g; - s|%SSLCERTFILE%|$placement_certfile|g; - s|%SSLKEYFILE%|$placement_keyfile|g; - s|%USER%|$STACK_USER|g; - s|%VIRTUALENV%|$venv_path|g - s|%APIWORKERS%|$API_WORKERS|g - " -i $placement_api_apache_conf -} - # create_placement_conf() - Write config function create_placement_conf { rm -f $PLACEMENT_CONF @@ -112,11 +86,7 @@ function configure_placement { sudo install -d -o $STACK_USER $PLACEMENT_CONF_DIR create_placement_conf - if [[ "$WSGI_MODE" == "uwsgi" ]]; then - write_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI" "/placement" - else - _config_placement_apache_wsgi - fi + write_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI" "/placement" if [[ "$PLACEMENT_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]]; then iniset $PLACEMENT_CONF oslo_policy enforce_new_defaults True iniset $PLACEMENT_CONF oslo_policy enforce_scope True @@ -147,7 +117,6 @@ function init_placement { # install_placement() - Collect source and prepare function install_placement { - install_apache_wsgi # Install the openstackclient placement client plugin for CLI pip_install_gr osc-placement git_clone $PLACEMENT_REPO $PLACEMENT_DIR $PLACEMENT_BRANCH @@ -156,12 +125,7 @@ function install_placement { # start_placement_api() - Start the API processes ahead of other things function start_placement_api { - if [[ "$WSGI_MODE" == "uwsgi" ]]; then - run_process "placement-api" "$(which uwsgi) --procname-prefix placement --ini $PLACEMENT_UWSGI_CONF" - else - enable_apache_site placement-api - restart_apache_server - fi + run_process "placement-api" "$(which uwsgi) --procname-prefix placement --ini $PLACEMENT_UWSGI_CONF" echo "Waiting for placement-api to start..." if ! wait_for_service $SERVICE_TIMEOUT $PLACEMENT_SERVICE_PROTOCOL://$PLACEMENT_SERVICE_HOST/placement; then @@ -175,12 +139,7 @@ function start_placement { # stop_placement() - Disable the api service and stop it. function stop_placement { - if [[ "$WSGI_MODE" == "uwsgi" ]]; then - stop_process "placement-api" - else - disable_apache_site placement-api - restart_apache_server - fi + stop_process "placement-api" } # Restore xtrace From 169be464c2ac1c5fc16396e1c320465c4ce6a658 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 11 Oct 2024 15:42:10 +0100 Subject: [PATCH 1825/1936] lib/keystone: Remove support for deploying with mod_wsgi Change-Id: I2409385a87ee7cc7869bba9711bf95ab5fe77dc7 Signed-off-by: Stephen Finucane --- lib/keystone | 46 ++++++---------------------------------------- 1 file changed, 6 insertions(+), 40 deletions(-) diff --git a/lib/keystone b/lib/keystone index 7d6b05fd41..8f4f4b1366 100644 --- a/lib/keystone +++ b/lib/keystone @@ -51,15 +51,6 @@ KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf KEYSTONE_PUBLIC_UWSGI_CONF=$KEYSTONE_CONF_DIR/keystone-uwsgi-public.ini KEYSTONE_PUBLIC_UWSGI=$KEYSTONE_BIN_DIR/keystone-wsgi-public -# KEYSTONE_DEPLOY defines how keystone is deployed, allowed values: -# - mod_wsgi : Run keystone under Apache HTTPd mod_wsgi -# - uwsgi : Run keystone under uwsgi -if [[ "$WSGI_MODE" == "uwsgi" ]]; then - KEYSTONE_DEPLOY=uwsgi -else - KEYSTONE_DEPLOY=mod_wsgi -fi - # Select the Identity backend driver KEYSTONE_IDENTITY_BACKEND=${KEYSTONE_IDENTITY_BACKEND:-sql} @@ -144,15 +135,9 @@ function is_keystone_enabled { # cleanup_keystone() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_keystone { - if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then - # These files will be created if we are running WSGI_MODE="mod_wsgi" - disable_apache_site keystone - sudo rm -f $(apache_site_config_for keystone) - else - stop_process "keystone" - remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "keystone-wsgi-public" - sudo rm -f $(apache_site_config_for keystone-wsgi-public) - fi + stop_process "keystone" + remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "keystone-wsgi-public" + sudo rm -f $(apache_site_config_for keystone-wsgi-public) } # _config_keystone_apache_wsgi() - Set WSGI config files of Keystone @@ -241,12 +226,7 @@ function configure_keystone { iniset $KEYSTONE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then - iniset $KEYSTONE_CONF DEFAULT logging_exception_prefix "%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s" - _config_keystone_apache_wsgi - else # uwsgi - write_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" "/identity" - fi + write_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" "/identity" iniset $KEYSTONE_CONF DEFAULT max_token_size 16384 @@ -543,10 +523,6 @@ function install_keystone { if is_service_enabled ldap; then setup_develop $KEYSTONE_DIR ldap fi - - if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then - install_apache_wsgi - fi } # start_keystone() - Start running processes @@ -559,12 +535,7 @@ function start_keystone { auth_protocol="http" fi - if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then - enable_apache_site keystone - restart_apache_server - else # uwsgi - run_process keystone "$(which uwsgi) --procname-prefix keystone --ini $KEYSTONE_PUBLIC_UWSGI_CONF" "" - fi + run_process keystone "$(which uwsgi) --procname-prefix keystone --ini $KEYSTONE_PUBLIC_UWSGI_CONF" "" echo "Waiting for keystone to start..." # Check that the keystone service is running. Even if the tls tunnel @@ -589,12 +560,7 @@ function start_keystone { # stop_keystone() - Stop running processes function stop_keystone { - if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then - disable_apache_site keystone - restart_apache_server - else - stop_process keystone - fi + stop_process keystone } # bootstrap_keystone() - Initialize user, role and project From 29545a5109df51f9e98b715b16968090a3928ab7 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 16 Oct 2024 02:50:33 +0000 Subject: [PATCH 1826/1936] Updated from generate-devstack-plugins-list Change-Id: I374de22c7c916f9497c55bf404141776fd17f6c8 --- doc/source/plugin-registry.rst | 2 -- 1 file changed, 2 deletions(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 21cf52c736..8b9d1f2b96 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -48,9 +48,7 @@ openstack/ironic-inspector `https://opendev.org/openstack/ironic-i openstack/ironic-prometheus-exporter `https://opendev.org/openstack/ironic-prometheus-exporter `__ openstack/ironic-ui `https://opendev.org/openstack/ironic-ui `__ openstack/keystone `https://opendev.org/openstack/keystone `__ -openstack/kuryr-kubernetes `https://opendev.org/openstack/kuryr-kubernetes `__ openstack/kuryr-libnetwork `https://opendev.org/openstack/kuryr-libnetwork `__ -openstack/kuryr-tempest-plugin `https://opendev.org/openstack/kuryr-tempest-plugin `__ openstack/magnum `https://opendev.org/openstack/magnum `__ openstack/magnum-ui `https://opendev.org/openstack/magnum-ui `__ openstack/manila `https://opendev.org/openstack/manila `__ From 50b0b602279fc0eb8d2bb9cab1d235197df72ec6 Mon Sep 17 00:00:00 2001 From: Takashi Natsume Date: Sun, 29 Sep 2024 21:28:47 +0900 Subject: [PATCH 1827/1936] Replace deprecated datetime.utcnow() The datetime.utcnow() is deprecated in Python 3.12. Replace datetime.utcnow() with datetime.now(datetime.timezone.utc).replace(tzinfo=None). Change-Id: I9bf6f69d9e174d490bb4f3eaef3b364ddf97a954 Signed-off-by: Takashi Natsume --- tools/outfilter.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/outfilter.py b/tools/outfilter.py index e910f79ff2..55f9ee1487 100644 --- a/tools/outfilter.py +++ b/tools/outfilter.py @@ -76,7 +76,8 @@ def main(): # with zuulv3 native jobs and ansible capture it may become # clearer what to do if HAS_DATE.search(line) is None: - now = datetime.datetime.utcnow() + now = datetime.datetime.now(datetime.timezone.utc).replace( + tzinfo=None) ts_line = ("%s | %s" % ( now.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3], line)) From 2e04d0fa20d2f6ceaf537423dad6b00d289b531c Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Thu, 24 Oct 2024 00:54:28 +0900 Subject: [PATCH 1828/1936] Globally skip devstack job for pre-commit config update pre-commit has been introduced to number of projects like oslo to run lint checks such as hacking. The pre-commit config file does not affect functionality, so devstack job is not needed when only the file is updated. Change-Id: I4294fe0c4df2c36c8575613b05a1f9c2eb745d18 --- .zuul.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index a1c251a398..b5477d1a8f 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -432,6 +432,8 @@ - ^releasenotes/.*$ # Translations - ^.*/locale/.*po$ + # pre-commit config + - ^.pre-commit-config.yaml$ - job: name: devstack-minimal From 2fdb729e04e8227ac5bfe619df20c1854bf255bc Mon Sep 17 00:00:00 2001 From: Jay Faulkner Date: Tue, 12 Nov 2024 15:02:15 -0800 Subject: [PATCH 1829/1936] Use venv module for PROJECT_VENV building Currently, if USE_VENV=True, PROJECT_VENVs are initialized using the tools/build_venv.sh script; this script depends on the virtualenv module, which is much less commonly available than the built-in venv module which we already use many places. This changes the build_venv.sh script to use `python -m venv` instead. Needed-By: https://review.opendev.org/c/openstack/ironic/+/930776 Change-Id: I89fa2c0c4261e715064e77a766d98a34299532b3 --- tools/build_venv.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/build_venv.sh b/tools/build_venv.sh index cfa39a82e0..a439163b5d 100755 --- a/tools/build_venv.sh +++ b/tools/build_venv.sh @@ -38,7 +38,7 @@ if [[ -z "$TOP_DIR" ]]; then fi # Build new venv -virtualenv $VENV_DEST +python$PYTHON3_VERSION -m venv --system-site-packages $VENV_DEST # Install modern pip PIP_VIRTUAL_ENV=$VENV_DEST pip_install -U pip From f41479f370e75c7ea7f17a62135f3af99b91c781 Mon Sep 17 00:00:00 2001 From: Joel Capitao Date: Mon, 30 Sep 2024 15:14:20 +0200 Subject: [PATCH 1830/1936] Pull RDO Trunk repos when CentOS Stream official RPM not available The RDO project releases centos-release-openstack-* RPM a couple of weeks after the OpenStack upstream project cut master branch. In order to fill the gap in the meantime, we have to pull the repos from RDO Trunk. Change-Id: If95a687f2d7579779129eeb689cfa46b92450dc5 --- stack.sh | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index dcfd398c01..bfa0573f21 100755 --- a/stack.sh +++ b/stack.sh @@ -308,8 +308,11 @@ function _install_rdo { # adding delorean-deps repo to provide current master rpms sudo wget https://trunk.rdoproject.org/centos9-master/delorean-deps.repo -O /etc/yum.repos.d/delorean-deps.repo else - # For stable/unmaintained branches use corresponding release rpm - sudo dnf -y install centos-release-openstack-${rdo_release} + if sudo dnf provides centos-release-openstack-${rdo_release} >/dev/null 2>&1; then + sudo dnf -y install centos-release-openstack-${rdo_release} + else + sudo wget https://trunk.rdoproject.org/centos9-${rdo_release}/delorean-deps.repo -O /etc/yum.repos.d/delorean-deps.repo + fi fi fi sudo dnf -y update From c6c5e12f6b1b5f7dac3d9f942c5ab8135618112d Mon Sep 17 00:00:00 2001 From: Joel Capitao Date: Wed, 13 Nov 2024 10:33:28 +0100 Subject: [PATCH 1831/1936] Revert "Use SETUPTOOLS_USE_DISTUTILS=stdlib for global pip installs" This partially reverts commit 18b4251bf4f689923a19bf7fbc50d5c2ea422b21. Support for loading distutils from the standard library is now deprecated since v70.3.0 [1]. It was needed initially to ease the transition and can be removed now. [1] https://setuptools.pypa.io/en/latest/history.html#v70-3-0 Change-Id: Ib929219ae81b802a4632963ef71a258edf4deee5 --- inc/python | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/inc/python b/inc/python index 2083b74dc1..c94e5a4952 100644 --- a/inc/python +++ b/inc/python @@ -199,13 +199,7 @@ function pip_install { echo "Using python $PYTHON3_VERSION to install $package_dir" else local cmd_pip="python$PYTHON3_VERSION -m pip" - # See - # https://github.com/pypa/setuptools/issues/2232 - # http://lists.openstack.org/pipermail/openstack-discuss/2020-August/016905.html - # this makes setuptools >=50 use the platform distutils. - # We only want to do this on global pip installs, not if - # installing in a virtualenv - local sudo_pip="sudo -H LC_ALL=en_US.UTF-8 SETUPTOOLS_USE_DISTUTILS=stdlib " + local sudo_pip="sudo -H LC_ALL=en_US.UTF-8" echo "Using python $PYTHON3_VERSION to install $package_dir" fi From b8cd9d11730206eb81c08b6d181503068b93c1d5 Mon Sep 17 00:00:00 2001 From: Abhishek Kekane Date: Thu, 7 Nov 2024 08:27:13 +0000 Subject: [PATCH 1832/1936] Support for s3 backend of glance This commit introduces support for s3 backend for glance. You can enabled it in your deployment by adding below options in your local.conf file. For single store support: enable_service s3api s-proxy s-account s-container disable_service tls-proxy GLANCE_USE_S3 = True For multistore support: enable_service s3api s-proxy s-account s-container disable_service tls-proxy GLANCE_USE_S3 = True GLANCE_ENABLE_MULTIPLE_STORES: True NOTE: At the moment devstack does not support tls with s3, this support will be added soon. Needed-By: https://review.opendev.org/c/openstack/glance/+/934311 Change-Id: Ic7264dc7faccb5e68c8df3b929eaa6d04149c6a2 --- lib/glance | 61 ++++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 57 insertions(+), 4 deletions(-) diff --git a/lib/glance b/lib/glance index 2eb4954f4a..5c3643d008 100644 --- a/lib/glance +++ b/lib/glance @@ -41,6 +41,12 @@ else GLANCE_BIN_DIR=$(get_python_exec_prefix) fi +#S3 for Glance +GLANCE_USE_S3=$(trueorfalse False GLANCE_USE_S3) +GLANCE_S3_DEFAULT_BACKEND=${GLANCE_S3_DEFAULT_BACKEND:-s3_fast} +GLANCE_S3_BUCKET_ON_PUT=$(trueorfalse True GLANCE_S3_BUCKET_ON_PUT) +GLANCE_S3_BUCKET_NAME=${GLANCE_S3_BUCKET_NAME:-images} + # Cinder for Glance USE_CINDER_FOR_GLANCE=$(trueorfalse False USE_CINDER_FOR_GLANCE) # GLANCE_CINDER_DEFAULT_BACKEND should be one of the values @@ -174,6 +180,34 @@ function cleanup_glance { remove_uwsgi_config "$GLANCE_UWSGI_CONF" "glance-wsgi-api" } +# Set multiple s3 store related config options +# +function configure_multiple_s3_stores { + enabled_backends="${GLANCE_S3_DEFAULT_BACKEND}:s3" + + iniset $GLANCE_API_CONF DEFAULT enabled_backends ${enabled_backends} + iniset $GLANCE_API_CONF glance_store default_backend $GLANCE_S3_DEFAULT_BACKEND +} + +# Set common S3 store options to given config section +# +# Arguments: +# config_section +# +function set_common_s3_store_params { + local config_section="$1" + openstack ec2 credential create + iniset $GLANCE_API_CONF $config_section s3_store_host "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$S3_SERVICE_PORT" + iniset $GLANCE_API_CONF $config_section s3_store_access_key "$(openstack ec2 credential list -c Access -f value)" + iniset $GLANCE_API_CONF $config_section s3_store_secret_key "$(openstack ec2 credential list -c Secret -f value)" + iniset $GLANCE_API_CONF $config_section s3_store_create_bucket_on_put $GLANCE_S3_BUCKET_ON_PUT + iniset $GLANCE_API_CONF $config_section s3_store_bucket $GLANCE_S3_BUCKET_NAME + iniset $GLANCE_API_CONF $config_section s3_store_bucket_url_format "path" + if is_service_enabled tls-proxy; then + iniset $GLANCE_API_CONF $config_section s3_store_cacert $SSL_BUNDLE_FILE + fi +} + # Set multiple cinder store related config options for each of the cinder store # function configure_multiple_cinder_stores { @@ -258,7 +292,6 @@ function configure_glance_store { local be if [[ "$glance_enable_multiple_stores" == "False" ]]; then - # Configure traditional glance_store if [[ "$use_cinder_for_glance" == "True" ]]; then # set common glance_store parameters iniset $GLANCE_API_CONF glance_store stores "cinder,file,http" @@ -281,7 +314,7 @@ function configure_glance_store { if [[ "$use_cinder_for_glance" == "True" ]]; then # Configure multiple cinder stores for glance configure_multiple_cinder_stores - else + elif ! is_service_enabled s-proxy && [[ "$GLANCE_USE_S3" == "False" ]]; then # Configure multiple file stores for glance configure_multiple_file_stores fi @@ -360,8 +393,15 @@ function configure_glance { # No multiple stores for swift yet if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "False" ]]; then - # Store the images in swift if enabled. - if is_service_enabled s-proxy; then + # Return if s3api is enabled for glance + if [[ "$GLANCE_USE_S3" == "True" ]]; then + if is_service_enabled s3api; then + # set common glance_store parameters + iniset $GLANCE_API_CONF glance_store stores "s3,file,http" + iniset $GLANCE_API_CONF glance_store default_store s3 + fi + elif is_service_enabled s-proxy; then + # Store the images in swift if enabled. iniset $GLANCE_API_CONF glance_store default_store swift iniset $GLANCE_API_CONF glance_store swift_store_create_container_on_put True @@ -379,6 +419,12 @@ function configure_glance { iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $KEYSTONE_SERVICE_URI/v3 iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_version 3 fi + else + if [[ "$GLANCE_USE_S3" == "True" ]]; then + if is_service_enabled s3api; then + configure_multiple_s3_stores + fi + fi fi # We need to tell glance what it's public endpoint is so that the version @@ -484,6 +530,13 @@ function create_glance_accounts { configure_glance_quotas fi + if is_service_enabled s3api && [[ "$GLANCE_USE_S3" == "True" ]]; then + if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "False" ]]; then + set_common_s3_store_params glance_store + else + set_common_s3_store_params $GLANCE_S3_DEFAULT_BACKEND + fi + fi fi } From 3f1cd9b076b1338d42031e3801cb6316d169c51e Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Tue, 19 Nov 2024 10:50:05 +0000 Subject: [PATCH 1833/1936] Remove the Neutron WSGI experimental jobs Neutron is moving all the jobs to use the Neutron API WSGI module, thus this module is no longer an experimental configuration. Change-Id: Iaf708cd5e6ab414b262a0daecb3909ace2376f68 --- .zuul.yaml | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index b5477d1a8f..3632dc03e8 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -956,15 +956,6 @@ # things, this job is not experimental but often is used to test # things that are not yet production ready or to test what will be # the new default after a deprecation period has ended. - # * neutron-fullstack-with-uwsgi: maintained by neutron for fullstack test - # when neutron-api is served by uwsgi, it's in exprimental for testing. - # the next cycle we can remove this job if things turn out to be - # stable enough. - # * neutron-functional-with-uwsgi: maintained by neutron for functional - # test. Next cycle we can remove this one if things turn out to be - # stable engouh with uwsgi. - # * neutron-ovn-tempest-with-uwsgi: maintained by neutron for tempest test. - # Next cycle we can remove this if everything run out stable enough. # * nova-multi-cell: maintained by nova and now is voting in the # check queue for nova changes but relies on devstack configuration @@ -972,9 +963,6 @@ jobs: - nova-multi-cell - nova-next - - neutron-fullstack-with-uwsgi - - neutron-functional-with-uwsgi - - neutron-ovn-tempest-with-uwsgi - devstack-plugin-ceph-tempest-py3: irrelevant-files: - ^.*\.rst$ From df0bfe4d5c3519d5c4deb5033bec2217c90c9425 Mon Sep 17 00:00:00 2001 From: 0weng Date: Tue, 12 Nov 2024 11:44:13 -0800 Subject: [PATCH 1834/1936] Doc: Fix variable name in logging config example $LOGDIR --> $DEST Change-Id: I461422f48525edf4ecb388b2f962edb03795db50 --- doc/source/configuration.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 9a1fd4e179..3cfba716ca 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -323,7 +323,7 @@ a file, keep service logs and disable color in the stored files. [[local|localrc]] DEST=/opt/stack/ - LOGFILE=$LOGDIR/stack.sh.log + LOGFILE=$DEST/stack.sh.log LOG_COLOR=False Database Backend From c1eeb773a85b0485b6329e325ac1685d8e3b2dc4 Mon Sep 17 00:00:00 2001 From: Artem Goncharov Date: Thu, 10 Oct 2024 15:26:52 +0200 Subject: [PATCH 1835/1936] lib/keystone: Migrate Keystone to WSGI module path Depends-on: https://review.opendev.org/c/openstack/keystone/+/932060 Change-Id: I10bea74fb0bce1888d324a61f23c25b8f7082e97 --- lib/keystone | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/keystone b/lib/keystone index 8f4f4b1366..76e2598ba3 100644 --- a/lib/keystone +++ b/lib/keystone @@ -49,7 +49,7 @@ fi KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone} KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf KEYSTONE_PUBLIC_UWSGI_CONF=$KEYSTONE_CONF_DIR/keystone-uwsgi-public.ini -KEYSTONE_PUBLIC_UWSGI=$KEYSTONE_BIN_DIR/keystone-wsgi-public +KEYSTONE_PUBLIC_UWSGI=keystone.wsgi.api:application # Select the Identity backend driver KEYSTONE_IDENTITY_BACKEND=${KEYSTONE_IDENTITY_BACKEND:-sql} @@ -226,7 +226,7 @@ function configure_keystone { iniset $KEYSTONE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - write_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" "/identity" + write_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" "/identity" "" "keystone-api" iniset $KEYSTONE_CONF DEFAULT max_token_size 16384 From 6578d6ad27f04bdbfd1c30a13a7fc7ae47c2fb49 Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Sat, 23 Nov 2024 21:44:17 +0900 Subject: [PATCH 1836/1936] Remove unused python-etcd3 python-etcd3 can't be used since etcd3 driver was removed from tooz in tooz 5.0.0 [1]. [1] 6bc02cda5b452bbf4821621eafc031bd676f8a2f Change-Id: I30b895b4473e2676085e27969a43b91be9b1e413 --- lib/libraries | 5 ----- 1 file changed, 5 deletions(-) diff --git a/lib/libraries b/lib/libraries index 9ea32304fc..fa418785dd 100755 --- a/lib/libraries +++ b/lib/libraries @@ -131,12 +131,7 @@ function install_libs { # python client libraries we might need from git can go here _install_lib_from_source "python-barbicanclient" - # etcd (because tooz does not have a hard dependency on these) - # - # NOTE(sdague): this is currently a work around because tooz - # doesn't pull in etcd3. - pip_install etcd3 pip_install etcd3gw } From ef63c690f119e3d9a7890215ee8832da4f5fb4dc Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Sun, 24 Nov 2024 22:48:08 +0900 Subject: [PATCH 1837/1936] Drop redundant lib/oslo It was kept for compatibility in renaming which was merged long ago[1], and is no longer necessary. [1] 3ed99c0b27122ff00e2d236086ab16b0cc1887c1 Depends-on: https://review.opendev.org/c/openstack/grenade/+/936095 Change-Id: I6a66359c19d0385beafb4c5e57b6ec3cd6d9cc54 --- clean.sh | 2 +- lib/libraries | 2 +- lib/oslo | 11 ----------- 3 files changed, 2 insertions(+), 13 deletions(-) delete mode 100644 lib/oslo diff --git a/clean.sh b/clean.sh index 6a31cc624a..092f557a88 100755 --- a/clean.sh +++ b/clean.sh @@ -40,7 +40,7 @@ source $TOP_DIR/lib/rpc_backend source $TOP_DIR/lib/tls -source $TOP_DIR/lib/oslo +source $TOP_DIR/lib/libraries source $TOP_DIR/lib/lvm source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone diff --git a/lib/libraries b/lib/libraries index 9ea32304fc..9d5d65532e 100755 --- a/lib/libraries +++ b/lib/libraries @@ -1,6 +1,6 @@ #!/bin/bash # -# lib/oslo +# lib/libraries # # Functions to install libraries from git # diff --git a/lib/oslo b/lib/oslo deleted file mode 100644 index 3ae64c8210..0000000000 --- a/lib/oslo +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -# -# lib/oslo -# -# Functions to install **Oslo** libraries from git -# -# We need this to handle the fact that projects would like to use -# pre-released versions of oslo libraries. -# -# Included for compatibility with grenade, remove in Queens -source $TOP_DIR/lib/libraries From ec49b3e1672ef47d59509132e95f94d6be13abfe Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 27 Nov 2024 02:42:01 +0000 Subject: [PATCH 1838/1936] Updated from generate-devstack-plugins-list Change-Id: I344c3492159d53c68002b78ac3c385c1beca0e61 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 8b9d1f2b96..2984a5c15f 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -31,6 +31,7 @@ openstack/ceilometer `https://opendev.org/openstack/ceilomet openstack/cloudkitty `https://opendev.org/openstack/cloudkitty `__ openstack/cyborg `https://opendev.org/openstack/cyborg `__ openstack/designate `https://opendev.org/openstack/designate `__ +openstack/designate-tempest-plugin `https://opendev.org/openstack/designate-tempest-plugin `__ openstack/devstack-plugin-amqp1 `https://opendev.org/openstack/devstack-plugin-amqp1 `__ openstack/devstack-plugin-ceph `https://opendev.org/openstack/devstack-plugin-ceph `__ openstack/devstack-plugin-container `https://opendev.org/openstack/devstack-plugin-container `__ From 451236381d4f6af0072b60fc65743b55ee33ab95 Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Fri, 29 Nov 2024 07:39:37 +0000 Subject: [PATCH 1839/1936] Add start time (in seconds) to the WSGI configuration file This new variable "start-time" is initialized when the WSGI starts and is the timestamp in seconds. Related-Bug: #2083570 Change-Id: I1b984b93d1352683097c1417b22d64341a68f72a --- lib/apache | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/apache b/lib/apache index 1420f76ff2..1c034d3c7e 100644 --- a/lib/apache +++ b/lib/apache @@ -293,6 +293,8 @@ function write_uwsgi_config { iniset "$conf" uwsgi add-header "Connection: close" # This ensures that file descriptors aren't shared between processes. iniset "$conf" uwsgi lazy-apps true + # Starting time of the WSGi server + iniset "$conf" uwsgi start-time %t # If we said bind directly to http, then do that and don't start the apache proxy if [[ -n "$http" ]]; then @@ -367,6 +369,8 @@ function write_local_uwsgi_http_config { iniset "$conf" uwsgi http-keepalive false # Increase socket timeout for slow chunked uploads iniset "$conf" uwsgi socket-timeout 30 + # Starting time of the WSGi server + iniset "$conf" uwsgi start-time %t enable_apache_mod proxy enable_apache_mod proxy_http From 97ea68ec4611391de2e245a1def655cbebc7649d Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Mon, 2 Dec 2024 17:46:42 +0530 Subject: [PATCH 1840/1936] Fix the db user for mariadb in ubuntu 24.04 It was fixed in past for ubuntu 22.04 with [1], removing the check for jammy so it applies to all ubuntu versions since jammy. The checks now only refer debian distros so those can be adjusted with new debian releases. [1] https://review.opendev.org/c/openstack/devstack/+/866944 Related-Bug: #1999090 Closes-Bug: #2090835 Change-Id: Iff843c5c3f9c081aa1cec6c399a6ed8c05e06abe --- lib/databases/mysql | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index e069e128e9..629014c1d8 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -104,10 +104,10 @@ function configure_database_mysql { # Set the root password - only works the first time. For Ubuntu, we already # did that with debconf before installing the package, but we still try, # because the package might have been installed already. We don't do this - # for Ubuntu 22.04 (jammy) because the authorization model change in + # for Ubuntu 22.04+ because the authorization model change in # version 10.4 of mariadb. See # https://mariadb.org/authentication-in-mariadb-10-4/ - if ! (is_ubuntu && [[ "$DISTRO" == "jammy" ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]); then + if ! (is_ubuntu && [[ ! "$DISTRO" =~ bookworm|bullseye ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]); then sudo mysqladmin -u root password $DATABASE_PASSWORD || true fi @@ -124,16 +124,11 @@ function configure_database_mysql { # we need to change auth plugin for root user # TODO(frickler): simplify this logic if is_ubuntu && [[ ! "$DISTRO" =~ bookworm|bullseye ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then - if [[ "$DISTRO" == "jammy" ]]; then - # For Ubuntu 22.04 (jammy) we follow the model outlined in - # https://mariadb.org/authentication-in-mariadb-10-4/ - sudo mysql -e "ALTER USER $DATABASE_USER@localhost IDENTIFIED VIA mysql_native_password USING PASSWORD('$DATABASE_PASSWORD');" - else - sudo mysql $cmd_args -e "UPDATE mysql.user SET plugin='' WHERE user='$DATABASE_USER' AND host='localhost';" - sudo mysql $cmd_args -e "FLUSH PRIVILEGES;" - fi + # For Ubuntu 22.04+ we follow the model outlined in + # https://mariadb.org/authentication-in-mariadb-10-4/ + sudo mysql -e "ALTER USER $DATABASE_USER@localhost IDENTIFIED VIA mysql_native_password USING PASSWORD('$DATABASE_PASSWORD');" fi - if ! (is_ubuntu && [[ "$DISTRO" == "jammy" ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]); then + if ! (is_ubuntu && [[ ! "$DISTRO" =~ bookworm|bullseye ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]); then # Create DB user if it does not already exist sudo mysql $cmd_args -e "CREATE USER IF NOT EXISTS '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';" # Update the DB to give user '$DATABASE_USER'@'%' full control of the all databases: From 320c2bf42ae41d751c72d80a6c85b26f3f6951bd Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Tue, 3 Dec 2024 17:04:39 +0000 Subject: [PATCH 1841/1936] Install setuptools 'core' extra Under as-yet-unidentified conditions, we can end up with a version of packaging that is too old for the version of latest version of setuptools. This is a known issue and expected behavior and per [1] $subject is the preferred resolution. [1] https://github.com/pypa/setuptools/issues/4483#issuecomment-2237219597 Change-Id: I9232f3fae1598297e83c4ea37339896f7dcbd44f Signed-off-by: Stephen Finucane --- inc/python | 4 ++-- lib/infra | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/inc/python b/inc/python index c94e5a4952..857f1f2d06 100644 --- a/inc/python +++ b/inc/python @@ -40,8 +40,8 @@ function setup_devstack_virtualenv { # Using system site packages to enable nova to use libguestfs. # This package is currently installed via the distro and not # available on pypi. - python$PYTHON3_VERSION -m venv --system-site-packages $DEVSTACK_VENV - pip_install -U pip setuptools + $PYTHON -m venv --system-site-packages "${DEVSTACK_VENV}" + pip_install -U pip setuptools[core] #NOTE(rpittau): workaround for simplejson removal in osc # https://review.opendev.org/c/openstack/python-openstackclient/+/920001 pip_install -U simplejson diff --git a/lib/infra b/lib/infra index 2aad00354a..f4760c352c 100644 --- a/lib/infra +++ b/lib/infra @@ -31,7 +31,7 @@ function install_infra { local PIP_VIRTUAL_ENV="$REQUIREMENTS_DIR/.venv" [ ! -d $PIP_VIRTUAL_ENV ] && ${VIRTUALENV_CMD} $PIP_VIRTUAL_ENV # We don't care about testing git pbr in the requirements venv. - PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install -U pbr setuptools + PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install -U pbr setuptools[core] PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install $REQUIREMENTS_DIR # Unset the PIP_VIRTUAL_ENV so that PBR does not end up trapped From 9486709dc5e6f156dc5beb051f1861ea362ae10c Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Tue, 3 Dec 2024 17:07:57 +0000 Subject: [PATCH 1842/1936] Revert "Install simplejson in devstack venv" This reverts commit 6990b06cd321930f69907ba42ee744755f8029fe. This should no longer be necessary as packages are requiring simplejson. Change-Id: I74b0f93457f91e7d53d54737d52f67075088faca Signed-off-by: Stephen Finucane --- inc/python | 3 --- 1 file changed, 3 deletions(-) diff --git a/inc/python b/inc/python index 857f1f2d06..bd58905e9e 100644 --- a/inc/python +++ b/inc/python @@ -42,9 +42,6 @@ function setup_devstack_virtualenv { # available on pypi. $PYTHON -m venv --system-site-packages "${DEVSTACK_VENV}" pip_install -U pip setuptools[core] - #NOTE(rpittau): workaround for simplejson removal in osc - # https://review.opendev.org/c/openstack/python-openstackclient/+/920001 - pip_install -U simplejson fi if [[ ":$PATH:" != *":$DEVSTACK_VENV/bin:"* ]] ; then export PATH="$DEVSTACK_VENV/bin:$PATH" From 5bf9d13f2737ca9c8a15b7d250a48ef8be935a05 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Mon, 9 Dec 2024 14:03:44 +0000 Subject: [PATCH 1843/1936] lib/placement: Migrate placement to WSGI module path Change-Id: If9e2cc9247d707a451ef394615e547515115f9e0 Signed-off-by: Stephen Finucane Depends-on: https://review.opendev.org/c/openstack/placement/+/919569 --- lib/placement | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/placement b/lib/placement index 6297ab24fe..03aaa0344b 100644 --- a/lib/placement +++ b/lib/placement @@ -37,7 +37,7 @@ if [[ ${USE_VENV} = True ]]; then else PLACEMENT_BIN_DIR=$(get_python_exec_prefix) fi -PLACEMENT_UWSGI=$PLACEMENT_BIN_DIR/placement-api +PLACEMENT_UWSGI=placement.wsgi.api:application PLACEMENT_UWSGI_CONF=$PLACEMENT_CONF_DIR/placement-uwsgi.ini if is_service_enabled tls-proxy; then @@ -86,7 +86,7 @@ function configure_placement { sudo install -d -o $STACK_USER $PLACEMENT_CONF_DIR create_placement_conf - write_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI" "/placement" + write_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI" "/placement" "" "placement-api" if [[ "$PLACEMENT_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]]; then iniset $PLACEMENT_CONF oslo_policy enforce_new_defaults True iniset $PLACEMENT_CONF oslo_policy enforce_scope True From 05f7d302cfa2da73b2887afcde92ef65b1001194 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Thu, 7 Dec 2023 10:48:10 +0000 Subject: [PATCH 1844/1936] lib/cinder: Migrate cinder to WSGI module path Change-Id: I494dae51c65318299d4fe2ff5887c97ac2be3224 Signed-off-by: Stephen Finucane Depends-on: https://review.opendev.org/c/openstack/cinder/+/902876 --- lib/cinder | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/cinder b/lib/cinder index 259018e7ab..b557d4b10b 100644 --- a/lib/cinder +++ b/lib/cinder @@ -62,7 +62,7 @@ CINDER_STATE_PATH=${CINDER_STATE_PATH:=$DATA_DIR/cinder} CINDER_CONF_DIR=/etc/cinder CINDER_CONF=$CINDER_CONF_DIR/cinder.conf -CINDER_UWSGI=$CINDER_BIN_DIR/cinder-wsgi +CINDER_UWSGI=cinder.wsgi.api:application CINDER_UWSGI_CONF=$CINDER_CONF_DIR/cinder-api-uwsgi.ini CINDER_API_PASTE_INI=$CINDER_CONF_DIR/api-paste.ini @@ -404,7 +404,7 @@ function configure_cinder { setup_logging $CINDER_CONF if is_service_enabled c-api; then - write_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" "/volume" + write_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" "/volume" "" "cinder-api" fi if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then From b8cbcff693f3f1ddfa9c60c7c826629987a2d23e Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Mon, 25 Mar 2024 20:00:10 +0900 Subject: [PATCH 1845/1936] Drop removed glance-cache.conf options These were removed when glance-registry was removed[1]. [1] 30680961994b36ed12713c0f106b661535ce41c6 Change-Id: Iaa4a35fddcd4763e12e5140b71e4022421c476fc --- lib/glance | 4 ---- 1 file changed, 4 deletions(-) diff --git a/lib/glance b/lib/glance index 5c3643d008..6d6b158e74 100644 --- a/lib/glance +++ b/lib/glance @@ -446,10 +446,6 @@ function configure_glance { iniset $GLANCE_CACHE_CONF DEFAULT use_syslog $SYSLOG iniset $GLANCE_CACHE_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ iniset $GLANCE_CACHE_CONF DEFAULT image_cache_driver $GLANCE_CACHE_DRIVER - iniset $GLANCE_CACHE_CONF DEFAULT auth_url $KEYSTONE_SERVICE_URI - iniset $GLANCE_CACHE_CONF DEFAULT admin_tenant_name $SERVICE_PROJECT_NAME - iniset $GLANCE_CACHE_CONF DEFAULT admin_user glance - iniset $GLANCE_CACHE_CONF DEFAULT admin_password $SERVICE_PASSWORD # Store specific confs iniset $GLANCE_CACHE_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/ From c9a4454450429491c34184d0ceb85eaba62cc525 Mon Sep 17 00:00:00 2001 From: Fernando Royo Date: Thu, 12 Dec 2024 11:01:36 +0100 Subject: [PATCH 1846/1936] Removing start_ovn_services call The function _start_ovn is responsible for starting the OVS/OVN services. However, its final action is a call to _start_ovn_services, which restarts all OVS/OVN services without any justified reason. This patch removes that call to avoid unnecessarily restarting all OVS/OVN services immediately after they have been started. Closes-Bug: #2091614 Change-Id: Ia791ecb734531fa933c570d687ac9224ed6b27e4 --- lib/neutron_plugins/ovn_agent | 26 -------------------------- 1 file changed, 26 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index be3a9e78b2..b7633c8c17 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -704,30 +704,6 @@ function _start_ovs { fi } -function _start_ovn_services { - _start_process "$OVSDB_SERVER_SERVICE" - _start_process "$OVS_VSWITCHD_SERVICE" - - if is_service_enabled ovn-northd ; then - _start_process "$OVN_NORTHD_SERVICE" - fi - if is_service_enabled ovn-controller ; then - _start_process "$OVN_CONTROLLER_SERVICE" - fi - if is_service_enabled ovn-controller-vtep ; then - _start_process "$OVN_CONTROLLER_VTEP_SERVICE" - fi - if is_service_enabled ovs-vtep ; then - _start_process "devstack@ovs-vtep.service" - fi - if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent ; then - _start_process "devstack@q-ovn-metadata-agent.service" - fi - if is_service_enabled q-ovn-agent neutron-ovn-agent ; then - _start_process "devstack@q-ovn-agent.service" - fi -} - # start_ovn() - Start running processes, including screen function start_ovn { echo "Starting OVN" @@ -797,8 +773,6 @@ function start_ovn { # Format logging setup_logging $OVN_AGENT_CONF fi - - _start_ovn_services } function _stop_ovs_dp { From bf04bf517b839fa495384f636b7f8d4f05c6fa0e Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Fri, 27 Dec 2024 13:01:16 +0530 Subject: [PATCH 1847/1936] Switch to OVS/OVN LTS branches As discussed in Epoxy PTG[1] switching jobs to run with latest OVS/OVN LTS branches. Ubuntu noble and CentOS 9-stream also including these LTS versions. [1] https://etherpad.opendev.org/p/oct2024-ptg-neutron Change-Id: Iecb33628641cd33b6e46d09759e3180cc0bd55e9 --- .zuul.yaml | 4 ++-- lib/neutron_plugins/ovn_agent | 2 +- lib/neutron_plugins/ovs_source | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 2fbfa0417c..74ce39cdfa 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -745,8 +745,8 @@ vars: devstack_localrc: OVN_BUILD_FROM_SOURCE: True - OVN_BRANCH: "v21.06.0" - OVS_BRANCH: "a4b04276ab5934d087669ff2d191a23931335c87" + OVN_BRANCH: "branch-24.03" + OVS_BRANCH: "branch-3.3" OVS_SYSCONFDIR: "/usr/local/etc/openvswitch" - job: diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index be3a9e78b2..ad5c1f3003 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -28,7 +28,7 @@ source ${TOP_DIR}/lib/neutron_plugins/ovs_source OVN_REPO=${OVN_REPO:-https://github.com/ovn-org/ovn.git} OVN_REPO_NAME=$(basename ${OVN_REPO} | cut -f1 -d'.') OVN_REPO_NAME=${OVN_REPO_NAME:-ovn} -OVN_BRANCH=${OVN_BRANCH:-v20.06.1} +OVN_BRANCH=${OVN_BRANCH:-branch-24.03} # The commit removing OVN bits from the OVS tree, it is the commit that is not # present in OVN tree and is used to distinguish if OVN is part of OVS or not. # https://github.com/openvswitch/ovs/commit/05bf1dbb98b0635a51f75e268ef8aed27601401d diff --git a/lib/neutron_plugins/ovs_source b/lib/neutron_plugins/ovs_source index 75e7d7cb36..6b6f531a01 100644 --- a/lib/neutron_plugins/ovs_source +++ b/lib/neutron_plugins/ovs_source @@ -20,7 +20,7 @@ Q_BUILD_OVS_FROM_GIT=$(trueorfalse False Q_BUILD_OVS_FROM_GIT) OVS_REPO=${OVS_REPO:-https://github.com/openvswitch/ovs.git} OVS_REPO_NAME=$(basename ${OVS_REPO} | cut -f1 -d'.') OVS_REPO_NAME=${OVS_REPO_NAME:-ovs} -OVS_BRANCH=${OVS_BRANCH:-0047ca3a0290f1ef954f2c76b31477cf4b9755f5} +OVS_BRANCH=${OVS_BRANCH:-branch-3.3} # Functions From b609c80a36f7ac77c1eb7ec256e6808ab483440b Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Tue, 7 Jan 2025 10:51:03 +0900 Subject: [PATCH 1848/1936] doc: Use dnf instead of yum The yum command has been replaced by the dnf command in recent releases of Fedora-based distributions. Use the native command instead of the alias kept for compatibility. Change-Id: I0a1dfdaca91164eff2c25795f66976ec70356574 --- doc/source/guides/multinode-lab.rst | 2 +- doc/source/guides/single-machine.rst | 4 ++-- doc/source/guides/single-vm.rst | 4 ++-- doc/source/networking.rst | 2 +- doc/source/plugins.rst | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index 658422b0af..4b50b2c4ae 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -24,7 +24,7 @@ Install a couple of packages to bootstrap configuration: :: - apt-get install -y git sudo || yum install -y git sudo + apt-get install -y git sudo || dnf install -y git sudo Network Configuration --------------------- diff --git a/doc/source/guides/single-machine.rst b/doc/source/guides/single-machine.rst index a4385b5b4b..263fbb9d6f 100644 --- a/doc/source/guides/single-machine.rst +++ b/doc/source/guides/single-machine.rst @@ -62,7 +62,7 @@ to have sudo privileges: .. code-block:: console - $ apt-get install sudo -y || yum install -y sudo + $ apt-get install sudo -y || dnf install -y sudo $ echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack .. note:: On some systems you may need to use ``sudo visudo``. @@ -81,7 +81,7 @@ We'll grab the latest version of DevStack via https: .. code-block:: console - $ sudo apt-get install git -y || sudo yum install -y git + $ sudo apt-get install git -y || sudo dnf install -y git $ git clone https://opendev.org/openstack/devstack $ cd devstack diff --git a/doc/source/guides/single-vm.rst b/doc/source/guides/single-vm.rst index 7dac18b333..4272a4b180 100644 --- a/doc/source/guides/single-vm.rst +++ b/doc/source/guides/single-vm.rst @@ -56,8 +56,8 @@ passed as the user-data file when booting the VM. write_files: - content: | #!/bin/sh - DEBIAN_FRONTEND=noninteractive sudo apt-get -qqy update || sudo yum update -qy - DEBIAN_FRONTEND=noninteractive sudo apt-get install -qqy git || sudo yum install -qy git + DEBIAN_FRONTEND=noninteractive sudo apt-get -qqy update || sudo dnf update -qy + DEBIAN_FRONTEND=noninteractive sudo apt-get install -qqy git || sudo dnf install -qy git sudo chown stack:stack /home/stack cd /home/stack git clone https://opendev.org/openstack/devstack diff --git a/doc/source/networking.rst b/doc/source/networking.rst index 05b4f34164..10e1c3ff2c 100644 --- a/doc/source/networking.rst +++ b/doc/source/networking.rst @@ -213,7 +213,7 @@ install ``sshuttle`` on your localhost: .. code-block:: bash - sudo apt-get install sshuttle || yum install sshuttle + sudo apt-get install sshuttle || dnf install sshuttle Finally, start ``sshuttle`` on your localhost using the floating IP address range. For example, assuming you are using the default value for diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst index dd75b5a22d..fe567e2277 100644 --- a/doc/source/plugins.rst +++ b/doc/source/plugins.rst @@ -232,7 +232,7 @@ an early phase of its execution. These packages may be defined in a plugin as files that contain new-line separated lists of packages required by the plugin -Supported packaging systems include apt and yum across multiple +Supported packaging systems include apt and dnf across multiple distributions. To enable a plugin to hook into this and install package dependencies, packages may be listed at the following locations in the top-level of the plugin repository: From 9a1cdbc3c809f785ad01a3bbdfef8f552eafce30 Mon Sep 17 00:00:00 2001 From: Rajat Dhasmana Date: Wed, 8 Jan 2025 18:43:40 +0530 Subject: [PATCH 1849/1936] Update glance image size limit The image size limit for glance using the unified limits is set to 1000MB (~1GB). This is pretty low given that a volume's minimum size is 1GB and when uploaded to glance fills out the whole limit. The limit issue can also be seen by a recent tempest change[1] where uploading two volumes failed[2] across various jobs due to this limit. We do have a config option in devstack ``GLANCE_LIMIT_IMAGE_SIZE_TOTAL`` but that will need to be configured for various jobs and a 2GB seems to be a sensible default which this patch configures. [1] https://review.opendev.org/c/openstack/tempest/+/938592 [2] Jan 07 23:05:33 devstack-ceph cinder-volume[909965]: ERROR oslo_messaging.rpc.server cinder.exception.ImageLimitExceeded: HTTP 413 Request Entity Too Large: The request returned a 413 Request Entity Too Large. This generally means that rate limiting or a quota threshold was breached.: The response body:: Project dfe8648c188d46409349eac2c449c0b4 is over a limit for [Resource image_size_total is over limit of 1000 due to current usage 1024 and delta 0] Change-Id: I533b7444e5f71275ea3d5c18914e306b1dbbc5cb --- lib/glance | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/glance b/lib/glance index 5c3643d008..9655cc4103 100644 --- a/lib/glance +++ b/lib/glance @@ -137,7 +137,7 @@ GLANCE_UWSGI=$GLANCE_BIN_DIR/glance-wsgi-api GLANCE_UWSGI_CONF=$GLANCE_CONF_DIR/glance-uwsgi.ini # Glance default limit for Devstack -GLANCE_LIMIT_IMAGE_SIZE_TOTAL=${GLANCE_LIMIT_IMAGE_SIZE_TOTAL:-1000} +GLANCE_LIMIT_IMAGE_SIZE_TOTAL=${GLANCE_LIMIT_IMAGE_SIZE_TOTAL:-2000} # If wsgi mode is uwsgi run glance under uwsgi, else default to eventlet # TODO(mtreinish): Remove the eventlet path here and in all the similar From ad698f0b8c658fcdddb11d5edae1e77f08b5d0b5 Mon Sep 17 00:00:00 2001 From: Jakub Libosvar Date: Wed, 8 Jan 2025 15:26:41 -0500 Subject: [PATCH 1850/1936] Introduce SERVICES_FOR_TEMPEST variable for localrc This variable overrides the `ENABLED_SERVICES` global variable only for the `configure_tempest()` function from `lib/tempest`. If the `SERVICES_FOR_TEMPEST` variable is not defined then `ENABLED_SERVICES` is used as it had used it before. This is useful for cases where Tempest is executed from a remote node. Change-Id: Ic62e48f2f1eb861ec64f51e03353868076cbcc04 --- doc/source/guides/multinode-lab.rst | 42 +++++++++++++++++++++++++++++ lib/tempest | 2 ++ 2 files changed, 44 insertions(+) diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index 658422b0af..e6b0b96eb4 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -210,6 +210,48 @@ only needs to be performed for subnodes. .. _Cells v2: https://docs.openstack.org/nova/latest/user/cells.html +Configure Tempest Node to run the Tempest tests +----------------------------------------------- + +If there is a need to execute Tempest tests against different Cluster +Controller node then it can be done by re-using the ``local.conf`` file from +the Cluster Controller node but with not enabled Controller services in +``ENABLED_SERVICES`` variable. This variable needs to contain only ``tempest`` +as a configured service. Then variable ``SERVICES_FOR_TEMPEST`` must be +configured to contain those services that were enabled on the Cluster +Controller node in the ``ENABLED_SERVICES`` variable. For example the +``local.conf`` file could look as follows: + +:: + + [[local|localrc]] + HOST_IP=192.168.42.12 # change this per compute node + FIXED_RANGE=10.4.128.0/20 + FLOATING_RANGE=192.168.42.128/25 + LOGFILE=/opt/stack/logs/stack.sh.log + ADMIN_PASSWORD=labstack + DATABASE_PASSWORD=supersecret + RABBIT_PASSWORD=supersecret + SERVICE_PASSWORD=supersecret + DATABASE_TYPE=mysql + SERVICE_HOST=192.168.42.11 + MYSQL_HOST=$SERVICE_HOST + RABBIT_HOST=$SERVICE_HOST + GLANCE_HOSTPORT=$SERVICE_HOST:9292 + NOVA_VNC_ENABLED=True + NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_lite.html" + VNCSERVER_LISTEN=$HOST_IP + VNCSERVER_PROXYCLIENT_ADDRESS=$VNCSERVER_LISTEN + ENABLED_SERVICES=tempest + SERVICES_FOR_TEMPEST=keystone,nova,neutron,glance + +Then just execute the devstack: + +:: + + ./stack.sh + + Cleaning Up After DevStack -------------------------- diff --git a/lib/tempest b/lib/tempest index eeeef67a8b..b8f9915a87 100644 --- a/lib/tempest +++ b/lib/tempest @@ -197,6 +197,8 @@ function configure_tempest { pip_install_gr testrepository fi + local ENABLED_SERVICES=${SERVICES_FOR_TEMPEST:=$ENABLED_SERVICES} + local image_lines local images local num_images From a976168235bd79c9a8c960aa4889fe9ab03570c0 Mon Sep 17 00:00:00 2001 From: Fernando Royo Date: Wed, 4 Dec 2024 16:44:52 +0100 Subject: [PATCH 1851/1936] Refactor readiness and custom config for ovn-nortd Initially, this patch ensured that the custom configuration and readiness checks were applied after every restart of the OVN North services. However, after removing the call that triggered the restarting of the OVN/OVS services in [1], this patch now serves as a refactor, separating the readiness checks and custom configuration into a dedicated function. [1] https://review.opendev.org/c/openstack/devstack/+/937606 Related-bug: #2091614 Related-bug: #2091019 Change-Id: Icba271292830204da94aa3353e93d52088d82eec --- lib/neutron_plugins/ovn_agent | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index b7633c8c17..8c5d82d3f0 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -704,6 +704,25 @@ function _start_ovs { fi } +function _wait_for_ovn_and_set_custom_config { + # Wait for the service to be ready + # Check for socket and db files for both OVN NB and SB + wait_for_sock_file $OVN_RUNDIR/ovnnb_db.sock + wait_for_sock_file $OVN_RUNDIR/ovnsb_db.sock + wait_for_db_file $OVN_DATADIR/ovnnb_db.db + wait_for_db_file $OVN_DATADIR/ovnsb_db.db + + if is_service_enabled tls-proxy; then + sudo ovn-nbctl --db=unix:$OVN_RUNDIR/ovnnb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem + sudo ovn-sbctl --db=unix:$OVN_RUNDIR/ovnsb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem + fi + + sudo ovn-nbctl --db=unix:$OVN_RUNDIR/ovnnb_db.sock set-connection p${OVN_PROTO}:6641:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 + sudo ovn-sbctl --db=unix:$OVN_RUNDIR/ovnsb_db.sock set-connection p${OVN_PROTO}:6642:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 + sudo ovs-appctl -t $OVN_RUNDIR/ovnnb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL + sudo ovs-appctl -t $OVN_RUNDIR/ovnsb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL +} + # start_ovn() - Start running processes, including screen function start_ovn { echo "Starting OVN" @@ -725,21 +744,8 @@ function start_ovn { _start_process "$OVN_NORTHD_SERVICE" fi - # Wait for the service to be ready - # Check for socket and db files for both OVN NB and SB - wait_for_sock_file $OVN_RUNDIR/ovnnb_db.sock - wait_for_sock_file $OVN_RUNDIR/ovnsb_db.sock - wait_for_db_file $OVN_DATADIR/ovnnb_db.db - wait_for_db_file $OVN_DATADIR/ovnsb_db.db + _wait_for_ovn_and_set_custom_config - if is_service_enabled tls-proxy; then - sudo ovn-nbctl --db=unix:$OVN_RUNDIR/ovnnb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem - sudo ovn-sbctl --db=unix:$OVN_RUNDIR/ovnsb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem - fi - sudo ovn-nbctl --db=unix:$OVN_RUNDIR/ovnnb_db.sock set-connection p${OVN_PROTO}:6641:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 - sudo ovn-sbctl --db=unix:$OVN_RUNDIR/ovnsb_db.sock set-connection p${OVN_PROTO}:6642:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 - sudo ovs-appctl -t $OVN_RUNDIR/ovnnb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL - sudo ovs-appctl -t $OVN_RUNDIR/ovnsb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL fi if is_service_enabled ovn-controller ; then From 7129f3a45e66060d19a250e31fd35156e45a8af8 Mon Sep 17 00:00:00 2001 From: Eric Harney Date: Fri, 10 Jan 2025 11:02:35 -0500 Subject: [PATCH 1852/1936] Quiet regex SyntaxWarning in mlock_report Use a raw string to avoid SyntaxWarnings being issued by this script. Change-Id: I81557158013aa36fe27235c461486dfbc37c9f27 --- tools/mlock_report.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/mlock_report.py b/tools/mlock_report.py index 1b081bbe6f..8cbda15895 100644 --- a/tools/mlock_report.py +++ b/tools/mlock_report.py @@ -6,7 +6,7 @@ LCK_SUMMARY_REGEX = re.compile( - "^VmLck:\s+(?P[\d]+)\s+kB", re.MULTILINE) + r"^VmLck:\s+(?P[\d]+)\s+kB", re.MULTILINE) def main(): From 497b4fdf97d8b4e5f1ea1130f4e145014bbb462c Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 18 Oct 2024 13:47:55 +0100 Subject: [PATCH 1853/1936] lib/glance: Migrate Glance to WSGI module path We also remove an out-of-date note from the called method. Change-Id: I7cc9fd6a568246342395388c31ae0a0918a2c79a Signed-off-by: Stephen Finucane Depends-on: https://review.opendev.org/c/openstack/glance/+/932701 --- lib/apache | 4 +--- lib/glance | 7 +++---- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/lib/apache b/lib/apache index 1c034d3c7e..fc174f3901 100644 --- a/lib/apache +++ b/lib/apache @@ -313,9 +313,7 @@ function write_uwsgi_config { # For services using chunked encoding, the only services known to use this # currently are Glance and Swift, we need to use an http proxy instead of # mod_proxy_uwsgi because the chunked encoding gets dropped. See: -# https://github.com/unbit/uwsgi/issues/1540 You can workaround this on python2 -# but that involves having apache buffer the request before sending it to -# uwsgi. +# https://github.com/unbit/uwsgi/issues/1540. function write_local_uwsgi_http_config { local conf=$1 local wsgi=$2 diff --git a/lib/glance b/lib/glance index 9655cc4103..4e519102ec 100644 --- a/lib/glance +++ b/lib/glance @@ -133,7 +133,7 @@ GLANCE_SERVICE_PORT=${GLANCE_SERVICE_PORT:-9292} GLANCE_SERVICE_PORT_INT=${GLANCE_SERVICE_PORT_INT:-19292} GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT} GLANCE_SERVICE_PROTOCOL=${GLANCE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} -GLANCE_UWSGI=$GLANCE_BIN_DIR/glance-wsgi-api +GLANCE_UWSGI=glance.wsgi.api:application GLANCE_UWSGI_CONF=$GLANCE_CONF_DIR/glance-uwsgi.ini # Glance default limit for Devstack @@ -472,12 +472,11 @@ function configure_glance { fi if [[ "$GLANCE_STANDALONE" == False ]]; then - write_local_uwsgi_http_config "$GLANCE_UWSGI_CONF" "$GLANCE_UWSGI" "/image" + write_local_uwsgi_http_config "$GLANCE_UWSGI_CONF" "$GLANCE_UWSGI" "/image" "glance-api" # Grab our uwsgi listen address and use that to fill out our # worker_self_reference_url config iniset $GLANCE_API_CONF DEFAULT worker_self_reference_url \ - $(awk '-F= ' '/^http-socket/ { print "http://"$2}' \ - $GLANCE_UWSGI_CONF) + $(awk '-F= ' '/^http-socket/ { print "http://"$2}' $GLANCE_UWSGI_CONF) else write_local_proxy_http_config glance "http://$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT_INT" "/image" iniset $GLANCE_API_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS From d84761e18676a04fc9d1b9e68dff9c573fdd3ba1 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Tue, 28 Jan 2025 02:20:15 +0000 Subject: [PATCH 1854/1936] Updated from generate-devstack-plugins-list Change-Id: Ic2239e12306226943c645b7c439d5636f8c3df0e --- doc/source/plugin-registry.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 2984a5c15f..f7873c962d 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -38,6 +38,7 @@ openstack/devstack-plugin-container `https://opendev.org/openstack/devstack openstack/devstack-plugin-kafka `https://opendev.org/openstack/devstack-plugin-kafka `__ openstack/devstack-plugin-nfs `https://opendev.org/openstack/devstack-plugin-nfs `__ openstack/devstack-plugin-open-cas `https://opendev.org/openstack/devstack-plugin-open-cas `__ +openstack/devstack-plugin-prometheus `https://opendev.org/openstack/devstack-plugin-prometheus `__ openstack/freezer `https://opendev.org/openstack/freezer `__ openstack/freezer-api `https://opendev.org/openstack/freezer-api `__ openstack/freezer-tempest-plugin `https://opendev.org/openstack/freezer-tempest-plugin `__ @@ -169,7 +170,6 @@ x/rsd-virt-for-nova `https://opendev.org/x/rsd-virt-for-nov x/scalpels `https://opendev.org/x/scalpels `__ x/slogging `https://opendev.org/x/slogging `__ x/stackube `https://opendev.org/x/stackube `__ -x/tap-as-a-service-dashboard `https://opendev.org/x/tap-as-a-service-dashboard `__ x/tatu `https://opendev.org/x/tatu `__ x/trio2o `https://opendev.org/x/trio2o `__ x/valet `https://opendev.org/x/valet `__ From a08a53de424e3ed8cad4cbaf566d0b08f8ad5199 Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Thu, 30 Jan 2025 12:43:30 +0100 Subject: [PATCH 1855/1936] Remove leftover from the usage of the removed lib/neutron module In the patch [1] lib/neutron module was removed completely but it left call to the non existing currently function 'start_neutron_api' when the neutron-api service is enabled. Devstack should start neutron in the same way in case when q-svc or neutron-api service is enabled and this patch is removing that leftover call to the 'start_neutron_api' function and make it behave the same way for both service names. Additionally this patch adds service "neutron-api" to be checked when initial networks are going to be created. It is like that as just one of the services "q-svc" or "neutron-api" is enough to create those initial networks. [1] https://review.opendev.org/c/openstack/devstack/+/865014 Related-bug: #2096912 Change-Id: I1287af6a31f60b4e522f0ce3ea525e3336ffd8ba --- stack.sh | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/stack.sh b/stack.sh index bfa0573f21..b1c7df5d73 100755 --- a/stack.sh +++ b/stack.sh @@ -1307,10 +1307,7 @@ if is_service_enabled ovn-controller ovn-controller-vtep; then start_ovn_services fi -if is_service_enabled neutron-api; then - echo_summary "Starting Neutron" - start_neutron_api -elif is_service_enabled q-svc; then +if is_service_enabled q-svc neutron-api; then echo_summary "Starting Neutron" configure_neutron_after_post_config start_neutron_service_and_check @@ -1327,7 +1324,7 @@ if is_service_enabled neutron; then start_neutron fi # Once neutron agents are started setup initial network elements -if is_service_enabled q-svc && [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]]; then +if is_service_enabled q-svc neutron-api && [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]]; then echo_summary "Creating initial neutron network elements" # Here's where plugins can wire up their own networks instead # of the code in lib/neutron_plugins/services/l3 From 12abc726e68b547258978c7fbe3630d133f72943 Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Mon, 10 Feb 2025 13:48:37 -0500 Subject: [PATCH 1856/1936] Remove Neutron Linux Bridge agent code Linux Bridge agent support was removed in the Neutron master branch in [0], let's remove any code here now as well since it will just fail. [0] https://review.opendev.org/c/openstack/neutron/+/927216 Change-Id: Idffa6579000322acfeb860189fb83a317d56bb4e --- doc/source/guides/neutron.rst | 40 +--------- doc/source/overview.rst | 2 +- lib/neutron | 19 +---- lib/neutron_plugins/linuxbridge_agent | 104 -------------------------- lib/neutron_plugins/ml2 | 3 - lib/neutron_plugins/services/l3 | 10 --- tools/worlddump.py | 1 - 7 files changed, 3 insertions(+), 176 deletions(-) delete mode 100644 lib/neutron_plugins/linuxbridge_agent diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst index fb36b3ec5b..a7adeeff73 100644 --- a/doc/source/guides/neutron.rst +++ b/doc/source/guides/neutron.rst @@ -451,44 +451,6 @@ by default. If you want to remove all the extension drivers (even 'port_security'), set ``Q_ML2_PLUGIN_EXT_DRIVERS`` to blank. -Using Linux Bridge instead of Open vSwitch ------------------------------------------- - -The configuration for using the Linux Bridge ML2 driver is fairly -straight forward. The Linux Bridge configuration for DevStack is similar -to the :ref:`Open vSwitch based single interface ` -setup, with small modifications for the interface mappings. - - -:: - - [[local|localrc]] - HOST_IP=172.18.161.6 - SERVICE_HOST=172.18.161.6 - MYSQL_HOST=172.18.161.6 - RABBIT_HOST=172.18.161.6 - GLANCE_HOSTPORT=172.18.161.6:9292 - ADMIN_PASSWORD=secret - DATABASE_PASSWORD=secret - RABBIT_PASSWORD=secret - SERVICE_PASSWORD=secret - - ## Neutron options - Q_USE_SECGROUP=True - FLOATING_RANGE="172.18.161.0/24" - IPV4_ADDRS_SAFE_TO_USE="10.0.0.0/24" - Q_FLOATING_ALLOCATION_POOL=start=172.18.161.250,end=172.18.161.254 - PUBLIC_NETWORK_GATEWAY="172.18.161.1" - PUBLIC_INTERFACE=eth0 - - Q_USE_PROVIDERNET_FOR_PUBLIC=True - - # Linuxbridge Settings - Q_AGENT=linuxbridge - LB_PHYSICAL_INTERFACE=eth0 - PUBLIC_PHYSICAL_NETWORK=default - LB_INTERFACE_MAPPINGS=default:eth0 - Using MacVTap instead of Open vSwitch ------------------------------------------ @@ -556,7 +518,7 @@ the MacVTap mechanism driver: [[local|localrc]] ... - Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,linuxbridge,macvtap + Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,macvtap ... For the MacVTap compute node, use this local.conf: diff --git a/doc/source/overview.rst b/doc/source/overview.rst index 4384081769..81e58a341e 100644 --- a/doc/source/overview.rst +++ b/doc/source/overview.rst @@ -56,7 +56,7 @@ OpenStack Network ----------------- - Neutron: A basic configuration approximating the original FlatDHCP - mode using linuxbridge or OpenVSwitch. + mode using OpenVSwitch. Services -------- diff --git a/lib/neutron b/lib/neutron index bcef8a5042..69ff212991 100644 --- a/lib/neutron +++ b/lib/neutron @@ -241,8 +241,7 @@ TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} # If using VLANs for tenant networks, or if using flat or VLAN # provider networks, set in ``localrc`` to the name of the physical # network, and also configure ``OVS_PHYSICAL_BRIDGE`` for the -# openvswitch agent or ``LB_PHYSICAL_INTERFACE`` for the linuxbridge -# agent, as described below. +# openvswitch agent, as described below. # # Example: ``PHYSICAL_NETWORK=default`` PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-public} @@ -257,18 +256,6 @@ PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-public} # Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-br-ex} -# With the linuxbridge agent, if using VLANs for tenant networks, -# or if using flat or VLAN provider networks, set in ``localrc`` to -# the name of the network interface to use for the physical -# network. -# -# Example: ``LB_PHYSICAL_INTERFACE=eth1`` -if [[ $Q_AGENT == "linuxbridge" && -z ${LB_PHYSICAL_INTERFACE} ]]; then - default_route_dev=$( (ip route; ip -6 route) | grep ^default | head -n 1 | awk '{print $5}') - die_if_not_set $LINENO default_route_dev "Failure retrieving default route device" - LB_PHYSICAL_INTERFACE=$default_route_dev -fi - # With the openvswitch plugin, set to True in ``localrc`` to enable # provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False. # @@ -889,10 +876,6 @@ function cleanup_neutron { neutron_ovs_base_cleanup fi - if [[ $Q_AGENT == "linuxbridge" ]]; then - neutron_lb_cleanup - fi - # delete all namespaces created by neutron for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|fip|snat)-[0-9a-f-]*'); do sudo ip netns delete ${ns} diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent deleted file mode 100644 index a392bd0baf..0000000000 --- a/lib/neutron_plugins/linuxbridge_agent +++ /dev/null @@ -1,104 +0,0 @@ -#!/bin/bash -# -# Neutron Linux Bridge L2 agent -# ----------------------------- - -# Save trace setting -_XTRACE_NEUTRON_LB=$(set +o | grep xtrace) -set +o xtrace - -function neutron_lb_cleanup { - sudo ip link delete $PUBLIC_BRIDGE - - bridge_list=`ls /sys/class/net/*/bridge/bridge_id 2>/dev/null | cut -f5 -d/` - if [[ -z "$bridge_list" ]]; then - return - fi - if [[ "$Q_ML2_TENANT_NETWORK_TYPE" = "vxlan" ]]; then - for port in $(echo $bridge_list | grep -o -e [a-zA-Z\-]*tap[0-9a-f\-]* -e vxlan-[0-9a-f\-]*); do - sudo ip link delete $port - done - elif [[ "$Q_ML2_TENANT_NETWORK_TYPE" = "vlan" ]]; then - for port in $(echo $bridge_list | grep -o -e [a-zA-Z\-]*tap[0-9a-f\-]* -e ${LB_PHYSICAL_INTERFACE}\.[0-9a-f\-]*); do - sudo ip link delete $port - done - fi - for bridge in $(echo $bridge_list |grep -o -e brq[0-9a-f\-]*); do - sudo ip link delete $bridge - done -} - -function is_neutron_ovs_base_plugin { - # linuxbridge doesn't use OVS - return 1 -} - -function neutron_plugin_create_nova_conf { - : -} - -function neutron_plugin_install_agent_packages { - : -} - -function neutron_plugin_configure_dhcp_agent { - local conf_file=$1 - : -} - -function neutron_plugin_configure_l3_agent { - local conf_file=$1 - sudo ip link add $PUBLIC_BRIDGE type bridge - set_mtu $PUBLIC_BRIDGE $PUBLIC_BRIDGE_MTU -} - -function neutron_plugin_configure_plugin_agent { - # Setup physical network interface mappings. Override - # ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` for more - # complex physical network configurations. - if [[ "$LB_INTERFACE_MAPPINGS" == "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$LB_PHYSICAL_INTERFACE" != "" ]]; then - LB_INTERFACE_MAPPINGS=$PHYSICAL_NETWORK:$LB_PHYSICAL_INTERFACE - fi - if [[ "$PUBLIC_BRIDGE" != "" ]] && [[ "$PUBLIC_PHYSICAL_NETWORK" != "" ]]; then - if is_service_enabled q-l3 || is_service_enabled neutron-l3; then - iniset /$Q_PLUGIN_CONF_FILE linux_bridge bridge_mappings "$PUBLIC_PHYSICAL_NETWORK:$PUBLIC_BRIDGE" - fi - fi - if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE linux_bridge physical_interface_mappings $LB_INTERFACE_MAPPINGS - fi - if [[ "$Q_USE_SECGROUP" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver - if ! running_in_container; then - enable_kernel_bridge_firewall - fi - else - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver - fi - AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-linuxbridge-agent" - iniset /$Q_PLUGIN_CONF_FILE agent tunnel_types $Q_TUNNEL_TYPES - - # Configure vxlan tunneling - if [[ "$ENABLE_TENANT_TUNNELS" == "True" ]]; then - if [[ "$Q_ML2_TENANT_NETWORK_TYPE" == "vxlan" ]]; then - iniset /$Q_PLUGIN_CONF_FILE vxlan enable_vxlan "True" - iniset /$Q_PLUGIN_CONF_FILE vxlan local_ip $TUNNEL_ENDPOINT_IP - else - iniset /$Q_PLUGIN_CONF_FILE vxlan enable_vxlan "False" - fi - else - iniset /$Q_PLUGIN_CONF_FILE vxlan enable_vxlan "False" - fi -} - -function neutron_plugin_setup_interface_driver { - local conf_file=$1 - iniset $conf_file DEFAULT interface_driver linuxbridge -} - -function neutron_plugin_check_adv_test_requirements { - is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0 -} - -# Restore xtrace -$_XTRACE_NEUTRON_LB diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index c2e78c65cc..687167bf79 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -114,9 +114,6 @@ function neutron_plugin_configure_service { populate_ml2_config /$Q_PLUGIN_CONF_FILE securitygroup enable_security_group=$Q_USE_SECGROUP populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS - if [[ "$Q_ML2_PLUGIN_MECHANISM_DRIVERS" == *"linuxbridge"* ]]; then - iniset $NEUTRON_CONF experimental linuxbridge True - fi populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 overlay_ip_version=$TUNNEL_IP_VERSION if [[ -n "$Q_ML2_PLUGIN_TYPE_DRIVERS" ]]; then diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index c6d4663114..bbedc57a44 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -334,16 +334,6 @@ function _neutron_configure_router_v4 { local ext_gw_interface="none" if is_neutron_ovs_base_plugin; then ext_gw_interface=$(_neutron_get_ext_gw_interface) - elif [[ "$Q_AGENT" = "linuxbridge" ]]; then - # Get the device the neutron router and network for $FIXED_RANGE - # will be using. - if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then - # in provider nets a bridge mapping uses the public bridge directly - ext_gw_interface=$PUBLIC_BRIDGE - else - # e.x. brq3592e767-da for NET_ID 3592e767-da66-4bcb-9bec-cdb03cd96102 - ext_gw_interface=brq${EXT_NET_ID:0:11} - fi fi if [[ "$ext_gw_interface" != "none" ]]; then local cidr_len=${FLOATING_RANGE#*/} diff --git a/tools/worlddump.py b/tools/worlddump.py index edbfa268db..26ced3f653 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -31,7 +31,6 @@ 'nova-compute', 'neutron-dhcp-agent', 'neutron-l3-agent', - 'neutron-linuxbridge-agent', 'neutron-metadata-agent', 'neutron-openvswitch-agent', 'cinder-volume', From 4a1d242a1a274e6d5d93c3d06055d313f4170a88 Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Wed, 5 Feb 2025 20:50:05 +0000 Subject: [PATCH 1857/1936] enable multinode supprot for spice and serial proxy This change mirrors change Ie02734bb598d27560cf5d674c9e1d9b8dca3801f which ensure that its posible to enable vnc for vms on compute nodes without deploying the vnc proxy. In this change two new flags are added NOVA_SPICE_ENABLED and NOVA_SERIAL_ENABLED to enable configuration of the relevent console create_nova_conf is also modifed to include the db url if the console proxies are deployed on a host. As spice supprot is nolonger avaible in qemu as packged by ubuntu 24.04 and centos 9 a devstack-two-node-debian-bookworm to allow testing with spice in a multinode job. Change-Id: Ie944e518122f2b0059f28acbf68fb7ad0a560ca4 --- .zuul.yaml | 30 ++++++++++++++++++++++++++++++ lib/nova | 9 ++++++--- lib/tempest | 8 +++++++- 3 files changed, 43 insertions(+), 4 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 74ce39cdfa..6cf79f5f03 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -311,6 +311,36 @@ - compute1 - compute2 +- nodeset: + name: devstack-two-node-debian-bookworm + nodes: + - name: controller + label: debian-bookworm + - name: compute1 + label: debian-bookworm + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + - job: name: devstack-base parent: openstack-multinode-fips diff --git a/lib/nova b/lib/nova index 95ed4d035c..810a3d9554 100644 --- a/lib/nova +++ b/lib/nova @@ -127,6 +127,9 @@ QEMU_CONF=/etc/libvirt/qemu.conf # ``NOVA_VNC_ENABLED`` can be used to forcibly enable VNC configuration. # In multi-node setups allows compute hosts to not run ``n-novnc``. NOVA_VNC_ENABLED=$(trueorfalse False NOVA_VNC_ENABLED) +# same as ``NOVA_VNC_ENABLED`` but for Spice and serial console respectively. +NOVA_SPICE_ENABLED=$(trueorfalse False NOVA_SPICE_ENABLED) +NOVA_SERIAL_ENABLED=$(trueorfalse False NOVA_SERIAL_ENABLED) # Get hypervisor configuration # ---------------------------- @@ -464,7 +467,7 @@ function create_nova_conf { # only setup database connections and cache backend if there are services # that require them running on the host. The ensures that n-cpu doesn't # leak a need to use the db in a multinode scenario. - if is_service_enabled n-api n-cond n-sched; then + if is_service_enabled n-api n-cond n-sched n-spice n-novnc n-sproxy; then # If we're in multi-tier cells mode, we want our control services pointing # at cell0 instead of cell1 to ensure isolation. If not, we point everything # at the main database like normal. @@ -716,7 +719,7 @@ function configure_console_compute { iniset $NOVA_CPU_CONF vnc enabled false fi - if is_service_enabled n-spice; then + if is_service_enabled n-spice || [ "$NOVA_SPICE_ENABLED" != False ]; then # Address on which instance spiceservers will listen on compute hosts. # For multi-host, this should be the management ip of the compute host. SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS:-$default_proxyclient_addr} @@ -726,7 +729,7 @@ function configure_console_compute { iniset $NOVA_CPU_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS" fi - if is_service_enabled n-sproxy; then + if is_service_enabled n-sproxy || [ "$NOVA_SERIAL_ENABLED" != False ]; then iniset $NOVA_CPU_CONF serial_console enabled True iniset $NOVA_CPU_CONF serial_console base_url "ws://$SERVICE_HOST:$((6082 + offset))/" fi diff --git a/lib/tempest b/lib/tempest index b8f9915a87..29b01f186f 100644 --- a/lib/tempest +++ b/lib/tempest @@ -514,9 +514,15 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute-feature-enabled volume_multiattach True fi - if is_service_enabled n-novnc; then + if is_service_enabled n-novnc || [ "$NOVA_VNC_ENABLED" != False ]; then iniset $TEMPEST_CONFIG compute-feature-enabled vnc_console True fi + if is_service_enabled n-spice || [ "$NOVA_SPICE_ENABLED" != False ]; then + iniset $TEMPEST_CONFIG compute-feature-enabled spice_console True + fi + if is_service_enabled n-sproxy || [ "$NOVA_SERIAL_ENABLED" != False ]; then + iniset $TEMPEST_CONFIG compute-feature-enabled serial_console True + fi # Network iniset $TEMPEST_CONFIG network project_networks_reachable false From 754f1c66f53240e3ebda53fbb95bfdeee05b5796 Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Thu, 13 Feb 2025 08:39:24 +0000 Subject: [PATCH 1858/1936] [eventlet-removal] Remove "logger" mechanism from ML2/OVN CI jobs The "logger" mechanism is a testing class that is still calling monkey_patch. This mechanism driver is not relevant nor neccessary for the ML2/OVN CI jobs. Change-Id: I539b202ca81f62f4ae26b5275fd6b245d2066fe7 --- lib/neutron_plugins/ovn_agent | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 01dc1edfdd..71b5e3350d 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -161,8 +161,10 @@ fi # Defaults Overwrite # ------------------ - -Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-ovn,logger} +# NOTE(ralonsoh): during the eventlet removal, the "logger" mech +# driver has been removed from this list. Re-add it once the removal +# is finished or the mech driver does not call monkey_patch(). +Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-ovn} Q_ML2_PLUGIN_TYPE_DRIVERS=${Q_ML2_PLUGIN_TYPE_DRIVERS:-local,flat,vlan,geneve} Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"geneve"} Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS:-"vni_ranges=1:65536"} From 1aac81ee881534276fd7d6540ed356a85d064a13 Mon Sep 17 00:00:00 2001 From: Vasyl Saienko Date: Sat, 18 Jan 2025 08:40:51 +0000 Subject: [PATCH 1859/1936] Allow to enable atop It may be required to troubleshoot performance related bugs during tests exection, to have ability to inspect environment processes and theirs status during test run this patch installs atop by default if not explicitly disabled. Related-Bug: #2095224 Change-Id: Iedbd61f3ce3cd2255ea5f2a7a93ba2f39ad28ff2 --- .zuul.yaml | 1 + lib/atop | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ stack.sh | 7 +++++++ unstack.sh | 5 +++++ 4 files changed, 61 insertions(+) create mode 100644 lib/atop diff --git a/.zuul.yaml b/.zuul.yaml index 74ce39cdfa..aec7113ab4 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -357,6 +357,7 @@ '{{ devstack_conf_dir }}/.localrc.auto': logs '{{ devstack_conf_dir }}/.stackenv': logs '{{ devstack_log_dir }}/dstat-csv.log': logs + '{{ devstack_log_dir }}/atop': logs '{{ devstack_log_dir }}/devstacklog.txt': logs '{{ devstack_log_dir }}/devstacklog.txt.summary': logs '{{ devstack_log_dir }}/tcpdump.pcap': logs diff --git a/lib/atop b/lib/atop new file mode 100644 index 0000000000..e0b14cb039 --- /dev/null +++ b/lib/atop @@ -0,0 +1,48 @@ +#!/bin/bash +# +# lib/atop +# Functions to start and stop atop + +# Dependencies: +# +# - ``functions`` file + +# ``stack.sh`` calls the entry points in this order: +# +# - configure_atop +# - install_atop +# - start_atop +# - stop_atop + +# Save trace setting +_XTRACE_ATOP=$(set +o | grep xtrace) +set +o xtrace + +function configure_atop { + cat </dev/null +# /etc/default/atop +# see man atoprc for more possibilities to configure atop execution + +LOGOPTS="-R" +LOGINTERVAL=${ATOP_LOGINTERVAL:-"30"} +LOGGENERATIONS=${ATOP_LOGGENERATIONS:-"1"} +LOGPATH=$LOGDIR/atop +EOF +} + +function install_atop { + install_package atop +} + +# start_() - Start running processes +function start_atop { + start_service atop +} + +# stop_atop() stop atop process +function stop_atop { + stop_service atop +} + +# Restore xtrace +$_XTRACE_ATOP diff --git a/stack.sh b/stack.sh index bfa0573f21..c2a4b5dc9a 100755 --- a/stack.sh +++ b/stack.sh @@ -641,6 +641,7 @@ source $TOP_DIR/lib/swift source $TOP_DIR/lib/neutron source $TOP_DIR/lib/ldap source $TOP_DIR/lib/dstat +source $TOP_DIR/lib/atop source $TOP_DIR/lib/tcpdump source $TOP_DIR/lib/etcd3 source $TOP_DIR/lib/os-vif @@ -1093,6 +1094,12 @@ save_stackenv $LINENO # A better kind of sysstat, with the top process per time slice start_dstat +if is_service_enabled atop; then + configure_atop + install_atop + start_atop +fi + # Run a background tcpdump for debugging # Note: must set TCPDUMP_ARGS with the enabled service if is_service_enabled tcpdump; then diff --git a/unstack.sh b/unstack.sh index 1b2d8dd62a..29c80718f8 100755 --- a/unstack.sh +++ b/unstack.sh @@ -73,6 +73,7 @@ source $TOP_DIR/lib/swift source $TOP_DIR/lib/neutron source $TOP_DIR/lib/ldap source $TOP_DIR/lib/dstat +source $TOP_DIR/lib/atop source $TOP_DIR/lib/etcd3 # Extras Source @@ -174,6 +175,10 @@ fi stop_dstat +if is_service_enabled atop; then + stop_atop +fi + # NOTE: Cinder automatically installs the lvm2 package, independently of the # enabled backends. So if Cinder is enabled, and installed successfully we are # sure lvm2 (lvremove, /etc/lvm/lvm.conf, etc.) is here. From 4ed29f85911642fb5d01f919703697746be19d7c Mon Sep 17 00:00:00 2001 From: Christian Schwede Date: Wed, 26 Feb 2025 11:40:10 +0100 Subject: [PATCH 1860/1936] Fix Swift rsync/replication configuration Swift rsync and replication services are up and running, however they fail to replicate any data if needed and if used with more than one replica. This patch removes a deprecated option setting[1] and replaces it with the required setting to use the correct rsync module suffix. Additionally it removes an outdated subdirectory suffix in the rsyncd configuration that has been removed as well[2]. Closes-Bug: #2100272 [1] https://opendev.org/openstack/swift/commit/675145ef4a131d548cc1122689732b9b65e5def4 [2] https://opendev.org/openstack/devstack/commit/0e58d22897457831b9dbf02d66a2f29d43803597 Change-Id: I5283405d00883a4dd11b7c001b1bba3776e576b8 --- files/swift/rsyncd.conf | 24 ++++++++++++------------ lib/swift | 4 ++-- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/files/swift/rsyncd.conf b/files/swift/rsyncd.conf index c49f716fa7..937d6c4b9a 100644 --- a/files/swift/rsyncd.conf +++ b/files/swift/rsyncd.conf @@ -6,74 +6,74 @@ address = 127.0.0.1 [account6612] max connections = 25 -path = %SWIFT_DATA_DIR%/1/node/ +path = %SWIFT_DATA_DIR%/1/ read only = false lock file = %SWIFT_DATA_DIR%/run/account6612.lock [account6622] max connections = 25 -path = %SWIFT_DATA_DIR%/2/node/ +path = %SWIFT_DATA_DIR%/2/ read only = false lock file = %SWIFT_DATA_DIR%/run/account6622.lock [account6632] max connections = 25 -path = %SWIFT_DATA_DIR%/3/node/ +path = %SWIFT_DATA_DIR%/3/ read only = false lock file = %SWIFT_DATA_DIR%/run/account6632.lock [account6642] max connections = 25 -path = %SWIFT_DATA_DIR%/4/node/ +path = %SWIFT_DATA_DIR%/4/ read only = false lock file = %SWIFT_DATA_DIR%/run/account6642.lock [container6611] max connections = 25 -path = %SWIFT_DATA_DIR%/1/node/ +path = %SWIFT_DATA_DIR%/1/ read only = false lock file = %SWIFT_DATA_DIR%/run/container6611.lock [container6621] max connections = 25 -path = %SWIFT_DATA_DIR%/2/node/ +path = %SWIFT_DATA_DIR%/2/ read only = false lock file = %SWIFT_DATA_DIR%/run/container6621.lock [container6631] max connections = 25 -path = %SWIFT_DATA_DIR%/3/node/ +path = %SWIFT_DATA_DIR%/3/ read only = false lock file = %SWIFT_DATA_DIR%/run/container6631.lock [container6641] max connections = 25 -path = %SWIFT_DATA_DIR%/4/node/ +path = %SWIFT_DATA_DIR%/4/ read only = false lock file = %SWIFT_DATA_DIR%/run/container6641.lock [object6613] max connections = 25 -path = %SWIFT_DATA_DIR%/1/node/ +path = %SWIFT_DATA_DIR%/1/ read only = false lock file = %SWIFT_DATA_DIR%/run/object6613.lock [object6623] max connections = 25 -path = %SWIFT_DATA_DIR%/2/node/ +path = %SWIFT_DATA_DIR%/2/ read only = false lock file = %SWIFT_DATA_DIR%/run/object6623.lock [object6633] max connections = 25 -path = %SWIFT_DATA_DIR%/3/node/ +path = %SWIFT_DATA_DIR%/3/ read only = false lock file = %SWIFT_DATA_DIR%/run/object6633.lock [object6643] max connections = 25 -path = %SWIFT_DATA_DIR%/4/node/ +path = %SWIFT_DATA_DIR%/4/ read only = false lock file = %SWIFT_DATA_DIR%/run/object6643.lock diff --git a/lib/swift b/lib/swift index 3659624d5b..862927437d 100644 --- a/lib/swift +++ b/lib/swift @@ -318,8 +318,8 @@ function generate_swift_config_services { iniuncomment ${swift_node_config} DEFAULT mount_check iniset ${swift_node_config} DEFAULT mount_check false - iniuncomment ${swift_node_config} ${server_type}-replicator vm_test_mode - iniset ${swift_node_config} ${server_type}-replicator vm_test_mode yes + iniuncomment ${swift_node_config} ${server_type}-replicator rsync_module + iniset ${swift_node_config} ${server_type}-replicator rsync_module "{replication_ip}::${server_type}{replication_port}" # Using a sed and not iniset/iniuncomment because we want to a global # modification and make sure it works for new sections. From e650b827904fe8835800a96332937bb1c8f4516e Mon Sep 17 00:00:00 2001 From: Rajat Dhasmana Date: Wed, 26 Feb 2025 00:38:26 +0530 Subject: [PATCH 1861/1936] Tempest: Add support for extend attached encrypted volumes tests We've LUKSv1 and LUKSv2 tests[1] in tempest that requires the ``extend_attached_encrypted_volume`` option to be True but currently there is no way to set it in devstack. This patch adds the parameter ``TEMPEST_EXTEND_ATTACHED_ENCRYPTED_VOLUME`` to enable the tests. [1] https://github.com/openstack/tempest/blob/cb03598a65f47c51406fc86c9c1503fe42424848/tempest/api/volume/admin/test_encrypted_volumes_extend.py Change-Id: Id3a3483629794ac38cb314812eeff84b677f35cd --- lib/tempest | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/tempest b/lib/tempest index 29b01f186f..c9486f6310 100644 --- a/lib/tempest +++ b/lib/tempest @@ -586,6 +586,7 @@ function configure_tempest { TEMPEST_VOLUME_REVERT_TO_SNAPSHOT=${TEMPEST_VOLUME_REVERT_TO_SNAPSHOT:-True} fi iniset $TEMPEST_CONFIG volume-feature-enabled volume_revert $(trueorfalse False TEMPEST_VOLUME_REVERT_TO_SNAPSHOT) + iniset $TEMPEST_CONFIG volume-feature-enabled extend_attached_encrypted_volume ${TEMPEST_EXTEND_ATTACHED_ENCRYPTED_VOLUME:-False} if [[ "$CINDER_BACKUP_DRIVER" == *"swift"* ]]; then iniset $TEMPEST_CONFIG volume backup_driver swift fi From 85576bbfd430a9f419fbd837dfa20a2ef687da94 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Wed, 26 Feb 2025 18:04:09 +0000 Subject: [PATCH 1862/1936] tools: Set user_domain_id in generated clouds.yaml MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If you specify a username, you also need to specify a domain that the user exists in. Failure to do so results in the following error: Expecting to find domain in user. The server could not comply with the request since it is either malformed or otherwise incorrect. The client is assumed to be in error. (HTTP 400)␏ This was mostly being masked for us in python-openstackclient by this little helper in osc-lib [1], but we can't rely on that for openstacksdk (and shouldn't really rely on it elsewhere either). We also deprecate the '--os-identity-api-version' and '--os-volume-api-version' options and will remove them shortly: both services only have v3 APIs nowadays. [1] https://github.com/openstack/osc-lib/blob/3.2.0/osc_lib/cli/client_config.py#L136-L147 Change-Id: I5537b0a7d58efb8a325ed61bad358f677f7a3cdf Signed-off-by: Stephen Finucane --- lib/keystone | 2 +- tools/update_clouds_yaml.py | 29 ++++++++++++++++++++--------- 2 files changed, 21 insertions(+), 10 deletions(-) diff --git a/lib/keystone b/lib/keystone index 76e2598ba3..8371045026 100644 --- a/lib/keystone +++ b/lib/keystone @@ -592,7 +592,7 @@ function bootstrap_keystone { # create_ldap_domain() - Create domain file and initialize domain with a user function create_ldap_domain { # Creates domain Users - openstack --os-identity-api-version=3 domain create --description "LDAP domain" Users + openstack domain create --description "LDAP domain" Users # Create domain file inside etc/keystone/domains KEYSTONE_LDAP_DOMAIN_FILE=$KEYSTONE_CONF_DIR/domains/keystone.Users.conf diff --git a/tools/update_clouds_yaml.py b/tools/update_clouds_yaml.py index 74dcdb2a07..c0a54838cc 100755 --- a/tools/update_clouds_yaml.py +++ b/tools/update_clouds_yaml.py @@ -14,14 +14,14 @@ # Update the clouds.yaml file. - import argparse import os.path +import sys import yaml -class UpdateCloudsYaml(object): +class UpdateCloudsYaml: def __init__(self, args): if args.file: self._clouds_path = args.file @@ -32,6 +32,14 @@ def __init__(self, args): self._create_directory = True self._clouds = {} + if args.os_identity_api_version != '3': + print("ERROR: Only identity API v3 is supported") + sys.exit(1) + + if args.os_volume_api_version != '3': + print("ERROR: Only block storage API v3 is supported") + sys.exit(1) + self._cloud = args.os_cloud self._cloud_data = { 'region_name': args.os_region_name, @@ -40,20 +48,23 @@ def __init__(self, args): 'auth': { 'auth_url': args.os_auth_url, 'username': args.os_username, + 'user_domain_id': 'default', 'password': args.os_password, }, } + if args.os_project_name and args.os_system_scope: print( - "WARNING: os_project_name and os_system_scope were both" - " given. os_system_scope will take priority.") - if args.os_project_name and not args.os_system_scope: + "WARNING: os_project_name and os_system_scope were both " + "given. os_system_scope will take priority." + ) + + if args.os_system_scope: # system-scoped + self._cloud_data['auth']['system_scope'] = args.os_system_scope + elif args.os_project_name: # project-scoped self._cloud_data['auth']['project_name'] = args.os_project_name - if args.os_identity_api_version == '3' and not args.os_system_scope: - self._cloud_data['auth']['user_domain_id'] = 'default' self._cloud_data['auth']['project_domain_id'] = 'default' - if args.os_system_scope: - self._cloud_data['auth']['system_scope'] = args.os_system_scope + if args.os_cacert: self._cloud_data['cacert'] = args.os_cacert From 1c96b4ef5fbe5425d9c6e80b34bbb805a31a0808 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Wed, 26 Feb 2025 18:12:23 +0000 Subject: [PATCH 1863/1936] openrc: Stop setting OS_VOLUME_API_VERSION, CINDER_VERSION We have not needed these in years. Change-Id: I4d76a7d3a8513ce5a927d533b34fb609e5dacdfa Signed-off-by: Stephen Finucane --- openrc | 5 ----- 1 file changed, 5 deletions(-) diff --git a/openrc b/openrc index 5ec7634638..e800abeb3d 100644 --- a/openrc +++ b/openrc @@ -72,8 +72,3 @@ if [[ ! -v OS_CACERT ]] ; then export OS_CACERT=$DEFAULT_OS_CACERT fi fi - -# Currently cinderclient needs you to specify the *volume api* version. This -# needs to match the config of your catalog returned by Keystone. -export CINDER_VERSION=${CINDER_VERSION:-3} -export OS_VOLUME_API_VERSION=${OS_VOLUME_API_VERSION:-$CINDER_VERSION} From ca15453625638d2660b7fd2fce261096f9f15dd0 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Thu, 27 Feb 2025 13:24:31 +0000 Subject: [PATCH 1864/1936] tools: Remove --os-identity-api-version, --os-volume-api-version opts There is only one volume API and one identity API, and their collective number is 3. Change-Id: Ie269817c5bb0eddd8cfcf279a46cffe4a56377b2 Signed-off-by: Stephen Finucane Depends-on: https://review.opendev.org/c/openstack/openstacksdk/+/942898 --- tools/update_clouds_yaml.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/tools/update_clouds_yaml.py b/tools/update_clouds_yaml.py index c0a54838cc..87312d9469 100755 --- a/tools/update_clouds_yaml.py +++ b/tools/update_clouds_yaml.py @@ -32,19 +32,9 @@ def __init__(self, args): self._create_directory = True self._clouds = {} - if args.os_identity_api_version != '3': - print("ERROR: Only identity API v3 is supported") - sys.exit(1) - - if args.os_volume_api_version != '3': - print("ERROR: Only block storage API v3 is supported") - sys.exit(1) - self._cloud = args.os_cloud self._cloud_data = { 'region_name': args.os_region_name, - 'identity_api_version': args.os_identity_api_version, - 'volume_api_version': args.os_volume_api_version, 'auth': { 'auth_url': args.os_auth_url, 'username': args.os_username, @@ -100,8 +90,6 @@ def main(): parser.add_argument('--file') parser.add_argument('--os-cloud', required=True) parser.add_argument('--os-region-name', default='RegionOne') - parser.add_argument('--os-identity-api-version', default='3') - parser.add_argument('--os-volume-api-version', default='3') parser.add_argument('--os-cacert') parser.add_argument('--os-auth-url', required=True) parser.add_argument('--os-username', required=True) From a0938e6dcf93d77530685391b7850d34ffa206fc Mon Sep 17 00:00:00 2001 From: Ivan Anfimov Date: Sat, 1 Mar 2025 20:46:26 +0000 Subject: [PATCH 1865/1936] Fix for CSS problems in Horizon After installation DevStack icons are not displayed. Change-Id: I1bedf97e4d2b7f13b4a0c5b98e29ac53cf502e96 Closes-Bug: #2093844 --- files/apache-horizon.template | 1 + 1 file changed, 1 insertion(+) diff --git a/files/apache-horizon.template b/files/apache-horizon.template index da7a7d26c3..98d02e168e 100644 --- a/files/apache-horizon.template +++ b/files/apache-horizon.template @@ -10,6 +10,7 @@ DocumentRoot %HORIZON_DIR%/.blackhole/ Alias %WEBROOT%/media %HORIZON_DIR%/openstack_dashboard/static Alias %WEBROOT%/static %HORIZON_DIR%/static + Alias /static %HORIZON_DIR%/static RedirectMatch "^/$" "%WEBROOT%/" From 9f2f499ded039dc2545c4e8860204f013f460350 Mon Sep 17 00:00:00 2001 From: Vasyl Saienko Date: Fri, 28 Feb 2025 07:47:11 +0000 Subject: [PATCH 1866/1936] Pre create logs directory for atop The race may happen and atop can't start due to missing logs directory. This patch pre-creates directory before starting atop process. Closes-Bug: #2100871 Change-Id: I89e3100dc86d60266913b5c5776db65e8882847c --- lib/atop | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/atop b/lib/atop index e0b14cb039..25c8e9a83f 100644 --- a/lib/atop +++ b/lib/atop @@ -19,7 +19,8 @@ _XTRACE_ATOP=$(set +o | grep xtrace) set +o xtrace function configure_atop { - cat </dev/null + mkdir -p $LOGDIR/atop + cat </dev/null # /etc/default/atop # see man atoprc for more possibilities to configure atop execution From ab9fb4eb8732b1cc5505b8c9e722a7310038efdf Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Wed, 5 Mar 2025 09:34:25 -0500 Subject: [PATCH 1867/1936] Remove openstack network section from overview This section is old and doesn't make much sense anymore, let's remove it. Neutron is already mentioned as a default service and has its own config guide. TrivialFix Change-Id: I2a2ed574f9eca7b87fb9bb6422568ed4fc55f057 --- doc/source/overview.rst | 6 ------ 1 file changed, 6 deletions(-) diff --git a/doc/source/overview.rst b/doc/source/overview.rst index 81e58a341e..c978e8d2cf 100644 --- a/doc/source/overview.rst +++ b/doc/source/overview.rst @@ -52,12 +52,6 @@ Web Server - Apache -OpenStack Network ------------------ - -- Neutron: A basic configuration approximating the original FlatDHCP - mode using OpenVSwitch. - Services -------- From 0572e59775c91494fb6009ac4be539fb892226c7 Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Sat, 15 Mar 2025 10:44:05 +0900 Subject: [PATCH 1868/1936] Skip functional tests for .gitreview update ... because the file does not affect any functionality. Also apply the consistent irrelevant files to skip functional tests to avoid unnecessary jobs. Change-Id: Ibce79d6b7627c26aa69989ed17ae32d7c3b63d19 --- .zuul.yaml | 72 +++++++++++++++--------------------------------------- 1 file changed, 20 insertions(+), 52 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 7d72ab101a..48dd55e2d2 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -455,7 +455,7 @@ pre-run: playbooks/pre.yaml run: playbooks/devstack.yaml post-run: playbooks/post.yaml - irrelevant-files: + irrelevant-files: &common-irrelevant-files # Documentation related - ^.*\.rst$ - ^api-ref/.*$ @@ -465,6 +465,8 @@ - ^.*/locale/.*po$ # pre-commit config - ^.pre-commit-config.yaml$ + # gitreview config + - ^.gitreview$ - job: name: devstack-minimal @@ -915,35 +917,21 @@ - ironic-tempest-bios-ipmi-direct-tinyipa - swift-dsvm-functional - grenade: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - neutron-ovs-grenade-multinode: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - neutron-ovn-tempest-ovs-release: voting: false - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - tempest-multinode-full-py3: voting: false - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - openstacksdk-functional-devstack: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - tempest-ipv6-only: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - nova-ceph-multistore: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files gate: jobs: - devstack @@ -958,27 +946,17 @@ - devstack-unit-tests - openstack-tox-bashate - neutron-ovs-grenade-multinode: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - ironic-tempest-bios-ipmi-direct-tinyipa - swift-dsvm-functional - grenade: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - openstacksdk-functional-devstack: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - tempest-ipv6-only: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - nova-ceph-multistore: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files # Please add a note on each job and conditions for the job not # being experimental any more, so we can keep this list somewhat # pruned. @@ -995,25 +973,15 @@ - nova-multi-cell - nova-next - devstack-plugin-ceph-tempest-py3: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - neutron-ovs-tempest-dvr: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - neutron-ovs-tempest-dvr-ha-multinode-full: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - cinder-tempest-lvm-multibackend: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - tempest-pg-full: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - devstack-no-tls-proxy periodic: jobs: From da40accd158ed55200de93a4191dbe334c82db22 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Fri, 21 Mar 2025 10:33:51 -0700 Subject: [PATCH 1869/1936] Update DEVSTACK_SERIES to 2025.2 stable/2025.1 branch has been created now and current master is for 2025.2. Change-Id: Iba81d280ebf1bd488bd590bdc4e31c49782c7099 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index c05d4e2d98..0319fc8a50 100644 --- a/stackrc +++ b/stackrc @@ -252,7 +252,7 @@ REQUIREMENTS_DIR=${REQUIREMENTS_DIR:-$DEST/requirements} # Setting the variable to 'ALL' will activate the download for all # libraries. -DEVSTACK_SERIES="2025.1" +DEVSTACK_SERIES="2025.2" ############## # From b9be941b9b96478e6bfcceb1e75ae5c66d467f07 Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Wed, 26 Mar 2025 10:09:38 -0700 Subject: [PATCH 1870/1936] Reduce unnecessary apache restarts Systemd limits the total number of restarts that a service can undergo in a short period of time. On faster nodes all of our apache restarts hit that limit and we eventually fail. Mitigate this by removing unnecessary restarts. Change-Id: I425bb9eec525d82372f05edc63e4fb931e5a4887 --- lib/apache | 16 +++++++++++----- lib/tls | 15 ++++++++++++--- 2 files changed, 23 insertions(+), 8 deletions(-) diff --git a/lib/apache b/lib/apache index fc174f3901..15b4297809 100644 --- a/lib/apache +++ b/lib/apache @@ -53,13 +53,16 @@ APACHE_LOG_DIR="/var/log/${APACHE_NAME}" # Enable apache mod and restart apache if it isn't already enabled. function enable_apache_mod { local mod=$1 + local should_restart=$2 # Apache installation, because we mark it NOPRIME if is_ubuntu; then # Skip mod_version as it is not a valid mod to enable # on debuntu, instead it is built in. if [[ "$mod" != "version" ]] && ! a2query -m $mod ; then sudo a2enmod $mod - restart_apache_server + if [[ "$should_restart" != "norestart" ]] ; then + restart_apache_server + fi fi elif is_fedora; then # pass @@ -113,15 +116,18 @@ function install_apache_uwsgi { fi if is_ubuntu; then - # we've got to enable proxy and proxy_uwsgi for this to work - sudo a2enmod proxy - sudo a2enmod proxy_uwsgi + if ! a2query -m proxy || ! a2query -m proxy_uwsgi ; then + # we've got to enable proxy and proxy_uwsgi for this to work + sudo a2enmod proxy + sudo a2enmod proxy_uwsgi + restart_apache_server + fi elif is_fedora; then # redhat is missing a nice way to turn on/off modules echo "LoadModule proxy_uwsgi_module modules/mod_proxy_uwsgi.so" \ | sudo tee /etc/httpd/conf.modules.d/02-proxy-uwsgi.conf + restart_apache_server fi - restart_apache_server } # install_apache_wsgi() - Install Apache server and wsgi module diff --git a/lib/tls b/lib/tls index 0a598e14f7..cff5c630a5 100644 --- a/lib/tls +++ b/lib/tls @@ -452,6 +452,7 @@ function enable_mod_ssl { # =============== function tune_apache_connections { + local should_restart=$1 local tuning_file=$APACHE_SETTINGS_DIR/connection-tuning.conf if ! [ -f $tuning_file ] ; then sudo bash -c "cat > $tuning_file" << EOF @@ -494,7 +495,12 @@ ThreadLimit 64 MaxRequestsPerChild 0 EOF - restart_apache_server + if [ "$should_restart" != "norestart" ] ; then + # Only restart the apache server if we know we really want to + # do so. Too many restarts in a short period of time is treated + # as an error by systemd. + restart_apache_server + fi fi } @@ -509,7 +515,8 @@ function start_tls_proxy { # 8190 is the default apache size. local f_header_size=${6:-8190} - tune_apache_connections + # We don't restart apache here as we'll do it at the end of the function. + tune_apache_connections norestart local config_file config_file=$(apache_site_config_for $b_service) @@ -558,7 +565,9 @@ $listen_string EOF for mod in headers ssl proxy proxy_http; do - enable_apache_mod $mod + # We don't need to restart here as we will restart once at the end + # of the function. + enable_apache_mod $mod norestart done enable_apache_site $b_service restart_apache_server From cb177ba84bd90f70a02fbac8b60549e7323ec7d2 Mon Sep 17 00:00:00 2001 From: Ivan Anfimov Date: Sat, 1 Mar 2025 22:15:04 +0000 Subject: [PATCH 1871/1936] Fix for module proxy_uwsgi_module is already loaded Rocky Linux 9.5 64 bit httpd -t [Sun Mar 02 01:10:49.272260 2025] [so:warn] [pid 201497:tid 201497] AH01574: module proxy_uwsgi_module is already loaded, skipping Change-Id: Id6a88c2b7958789f7d4947d3259276f120f5f44e --- lib/apache | 5 ----- 1 file changed, 5 deletions(-) diff --git a/lib/apache b/lib/apache index 15b4297809..744c0f10b6 100644 --- a/lib/apache +++ b/lib/apache @@ -122,11 +122,6 @@ function install_apache_uwsgi { sudo a2enmod proxy_uwsgi restart_apache_server fi - elif is_fedora; then - # redhat is missing a nice way to turn on/off modules - echo "LoadModule proxy_uwsgi_module modules/mod_proxy_uwsgi.so" \ - | sudo tee /etc/httpd/conf.modules.d/02-proxy-uwsgi.conf - restart_apache_server fi } From f41a16c11801f986a6e799e02b5340adf6b04fbb Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Wed, 9 Apr 2025 18:14:00 +0530 Subject: [PATCH 1872/1936] Fix python3 version for rpm distros pythonX.Y version is virtually provided since long[1], and pythonXY-devel no longer provided in latest CentOS and Fedora releases. So switching to use pythonX.Y-devel as that will also pull pythonX.Y as a dependency. Additionally install pythonX.Y-pip as for rpm distros it don't install pip via source. [1] https://src.fedoraproject.org/rpms/python3/c/75005c20f68f3b4ceb734e876b37009c8c3b99f3 Change-Id: I990586cce876533c67e3da4c97d9e5995c762340 --- inc/python | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/inc/python b/inc/python index bd58905e9e..cd90ac82c6 100644 --- a/inc/python +++ b/inc/python @@ -489,11 +489,7 @@ function install_python3 { if is_ubuntu; then apt_get install python${PYTHON3_VERSION} python${PYTHON3_VERSION}-dev elif is_fedora; then - if [ "$os_VENDOR" = "Fedora" ]; then - install_package python${PYTHON3_VERSION//.} - else - install_package python${PYTHON3_VERSION//.} python${PYTHON3_VERSION//.}-devel - fi + install_package python${PYTHON3_VERSION}-devel python${PYTHON3_VERSION}-pip fi } From c4340a64ee35b2b8b5395461b6702ef765786465 Mon Sep 17 00:00:00 2001 From: Gorka Eguileor Date: Fri, 8 Jul 2022 14:08:03 +0200 Subject: [PATCH 1873/1936] Add support for shared os_brick file lock path There can be problems with some os-brick connectors if nova and cinder run on the same host with different lock path locations, which we currently do, or if Cinder and Glance with cinder store run on the same host, and a recent os-brick change (Ic52338278eb5bb3d90ce582fe6b23f37eb5568c4) allows for an os-brick specific lock_path to facilitate these kind of deployment. This patch adds the ``lock_path`` configuration option in the ``[os_brick]`` section of the nova, cinder, and glance config files. If the os-brick, cinder, nova, or glance-store changes are not present then the new config option is be ignored in the respective service, and it will be used otherwise, so there's no need to make this patch dependent on any other since we won't be worse off than we are now. Change-Id: Ibe7da160460151734224863cddec5e0d549b4938 --- lib/cinder | 16 ++++++++++++++++ stack.sh | 5 +++++ 2 files changed, 21 insertions(+) diff --git a/lib/cinder b/lib/cinder index b557d4b10b..2b565c9535 100644 --- a/lib/cinder +++ b/lib/cinder @@ -59,6 +59,7 @@ else fi CINDER_STATE_PATH=${CINDER_STATE_PATH:=$DATA_DIR/cinder} +OS_BRICK_LOCK_PATH=${OS_BRICK_LOCK_PATH:=$DATA_DIR/os_brick} CINDER_CONF_DIR=/etc/cinder CINDER_CONF=$CINDER_CONF_DIR/cinder.conf @@ -511,6 +512,21 @@ function init_cinder { mkdir -p $CINDER_STATE_PATH/volumes } + +function init_os_brick { + mkdir -p $OS_BRICK_LOCK_PATH + if is_service_enabled cinder; then + iniset $CINDER_CONF os_brick lock_path $OS_BRICK_LOCK_PATH + fi + if is_service_enabled nova; then + iniset $NOVA_CONF os_brick lock_path $OS_BRICK_LOCK_PATH + fi + if is_service_enabled glance; then + iniset $GLANCE_API_CONF os_brick lock_path $OS_BRICK_LOCK_PATH + iniset $GLANCE_CACHE_CONF os_brick lock_path $OS_BRICK_LOCK_PATH + fi +} + # install_cinder() - Collect source and prepare function install_cinder { git_clone $CINDER_REPO $CINDER_DIR $CINDER_BRANCH diff --git a/stack.sh b/stack.sh index 04b5f4ca6a..afca5250d5 100755 --- a/stack.sh +++ b/stack.sh @@ -1005,6 +1005,11 @@ if is_service_enabled tls-proxy; then fix_system_ca_bundle_path fi +if is_service_enabled cinder || [[ "$USE_CINDER_FOR_GLANCE" == "True" ]]; then + # os-brick setup required by glance, cinder, and nova + init_os_brick +fi + # Extras Install # -------------- From 3fe8873a15db27d2d8b7df4e708210ee3ca1465d Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Sat, 26 Apr 2025 02:19:56 +0000 Subject: [PATCH 1874/1936] Updated from generate-devstack-plugins-list Change-Id: I7932dc96301cb2a52607007aa6935bbf6aa66397 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index f7873c962d..e84c946287 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -24,6 +24,7 @@ official OpenStack projects. ======================================== === Plugin Name URL ======================================== === +openstack/aetos `https://opendev.org/openstack/aetos `__ openstack/aodh `https://opendev.org/openstack/aodh `__ openstack/barbican `https://opendev.org/openstack/barbican `__ openstack/blazar `https://opendev.org/openstack/blazar `__ From ea23079321231c63e78e84cd58088663e92134a8 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Wed, 30 Apr 2025 21:41:37 +0530 Subject: [PATCH 1875/1936] Fix cert detection with custom PYTHON3_VERSION PYTHON3_VERSION was not considered for detecting ca path with GLOBAL_VENV=False, this patch fixes it. Related-Bug: #2109591 Change-Id: Ie597494a2a11293cbd20e6d0b23dcb31bf343957 --- lib/tls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tls b/lib/tls index cff5c630a5..fa0a448d7d 100644 --- a/lib/tls +++ b/lib/tls @@ -367,7 +367,7 @@ function fix_system_ca_bundle_path { if [[ "$GLOBAL_VENV" == "True" ]] ; then capath=$($DEVSTACK_VENV/bin/python3 -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass') else - capath=$(python3 -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass') + capath=$(python$PYTHON3_VERSION -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass') fi if [[ ! $capath == "" && ! $capath =~ ^/etc/.* && ! -L $capath ]]; then if is_fedora; then From 24870ec45a9486a31239218d74d0fd3fa3e5f118 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Mon, 5 May 2025 14:13:06 +0530 Subject: [PATCH 1876/1936] Pass PYTHON env var to memory tracker It was not honoring PYTHON3_VERSION when running with GLOBAL_VENV=false, this patch fixes it. Related-Bug: #2109591 Change-Id: Ib34c099b897e59fce24cab6e98dc31a505e4922e --- lib/dstat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/dstat b/lib/dstat index 870c901d2a..9bd0370847 100644 --- a/lib/dstat +++ b/lib/dstat @@ -33,7 +33,7 @@ function start_dstat { # To enable memory_tracker add: # enable_service memory_tracker # to your localrc - run_process memory_tracker "$TOP_DIR/tools/memory_tracker.sh" "" "root" + run_process memory_tracker "$TOP_DIR/tools/memory_tracker.sh" "" "root" "PYTHON=python${PYTHON3_VERSION}" # TODO(jh): Fail when using the old service name otherwise consumers might # never notice that is has been removed. From bf1ef3278c30572f5b958c5f1d2f59839e3d2b87 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Mon, 5 May 2025 18:08:12 +0530 Subject: [PATCH 1877/1936] Honor PYTHON3_VERSION for mod_wsgi rpm installation Different variants of mod_wsgi are provided like:- python3-mod_wsgi.x86_64 python3.11-mod_wsgi.x86_64 python3.12-mod_wsgi.x86_64 Adjust script to also consider PYTHON3_VERSION var to correctly install the package. Related-Bug: #2109591 Change-Id: I6bbfd92ef727ef9b343cd5778bb78f43c13165ad --- lib/apache | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/apache b/lib/apache index 744c0f10b6..5ab74b7087 100644 --- a/lib/apache +++ b/lib/apache @@ -137,7 +137,7 @@ function install_apache_wsgi { install_package libapache2-mod-wsgi-py3 elif is_fedora; then sudo rm -f /etc/httpd/conf.d/000-* - install_package httpd python3-mod_wsgi + install_package httpd python${PYTHON3_VERSION}-mod_wsgi # rpm distros dont enable httpd by default so enable it to support reboots. sudo systemctl enable httpd # For consistency with Ubuntu, switch to the worker mpm, as From 74837e0b30ea782b073eb6d23b0aa3060068a3a1 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 11 Oct 2024 16:15:47 +0100 Subject: [PATCH 1878/1936] lib/neutron: Deploy under uWSGI by default Change-Id: I6256ca1725c56859947d957156d865949879b130 Signed-off-by: Stephen Finucane Depends-on: https://review.opendev.org/c/openstack/grenade/+/949166 --- lib/neutron | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/lib/neutron b/lib/neutron index 69ff212991..78ff6cfb24 100644 --- a/lib/neutron +++ b/lib/neutron @@ -82,11 +82,9 @@ NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"} # NEUTRON_DEPLOY_MOD_WSGI defines how neutron is deployed, allowed values: -# - False (default) : Run neutron under Eventlet -# - True : Run neutron under uwsgi -# TODO(annp): Switching to uwsgi in next cycle if things turn out to be stable -# enough -NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse False NEUTRON_DEPLOY_MOD_WSGI) +# - False : Run neutron under Eventlet +# - True (default) : Run neutron under uwsgi +NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse True NEUTRON_DEPLOY_MOD_WSGI) NEUTRON_UWSGI=neutron.wsgi.api:application NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini From d040e15961050f6e3fe538d8d65f13df554870f7 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 11 Oct 2024 16:18:19 +0100 Subject: [PATCH 1879/1936] lib/neutron: Remove NEUTRON_DEPLOY_MOD_WSGI neutron no longer supports running under eventlet, so this option is a no-op. Change-Id: Ib2767c0e2bb5aad5d8173dc5653e44a42c2bd499 Signed-off-by: Stephen Finucane --- lib/neutron | 111 +++++++++++++++------------------------------------- 1 file changed, 32 insertions(+), 79 deletions(-) diff --git a/lib/neutron b/lib/neutron index 78ff6cfb24..ea2d8e728a 100644 --- a/lib/neutron +++ b/lib/neutron @@ -62,11 +62,9 @@ if is_service_enabled tls-proxy; then Q_PROTOCOL="https" fi - # Set up default directories GITDIR["python-neutronclient"]=$DEST/python-neutronclient - NEUTRON_DIR=$DEST/neutron NEUTRON_FWAAS_DIR=$DEST/neutron-fwaas @@ -81,11 +79,6 @@ NEUTRON_CONF_DIR=/etc/neutron NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"} -# NEUTRON_DEPLOY_MOD_WSGI defines how neutron is deployed, allowed values: -# - False : Run neutron under Eventlet -# - True (default) : Run neutron under uwsgi -NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse True NEUTRON_DEPLOY_MOD_WSGI) - NEUTRON_UWSGI=neutron.wsgi.api:application NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini @@ -153,7 +146,7 @@ _Q_PLUGIN_EXTRA_CONF_PATH=/etc/neutron # The name of the service in the endpoint URL NEUTRON_ENDPOINT_SERVICE_NAME=${NEUTRON_ENDPOINT_SERVICE_NAME-"networking"} -if [[ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" && -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then +if [[ -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then NEUTRON_ENDPOINT_SERVICE_NAME="networking" fi @@ -451,9 +444,7 @@ function configure_neutron { # for state reports is more than adequate. iniset $NEUTRON_CONF DEFAULT rpc_state_report_workers 0 - if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_UWSGI" "/networking" "" "neutron-api" - fi + write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_UWSGI" "/networking" "" "neutron-api" } function configure_neutron_nova { @@ -499,11 +490,7 @@ function create_nova_conf_neutron { # Migrated from keystone_data.sh function create_neutron_accounts { local neutron_url - if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - neutron_url=$Q_PROTOCOL://$SERVICE_HOST/ - else - neutron_url=$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/ - fi + neutron_url=$Q_PROTOCOL://$SERVICE_HOST/ if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME fi @@ -634,34 +621,25 @@ function start_neutron_service_and_check { fi # Start the Neutron service - if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - # The default value of "rpc_workers" is None (not defined). If - # "rpc_workers" is explicitly set to 0, the RPC workers process - # should not be executed. - local rpc_workers - rpc_workers=$(iniget_multiline $NEUTRON_CONF DEFAULT rpc_workers) - - enable_service neutron-api - run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF" - neutron_url=$Q_PROTOCOL://$Q_HOST/ - if [ "$rpc_workers" != "0" ]; then - enable_service neutron-rpc-server - fi - enable_service neutron-periodic-workers - _enable_ovn_maintenance - if [ "$rpc_workers" != "0" ]; then - run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options" - fi - run_process neutron-periodic-workers "$NEUTRON_BIN_DIR/neutron-periodic-workers $cfg_file_options" - _run_ovn_maintenance - else - run_process q-svc "$NEUTRON_BIN_DIR/neutron-server $cfg_file_options" - neutron_url=$service_protocol://$Q_HOST:$service_port/ - # Start proxy if enabled - if is_service_enabled tls-proxy; then - start_tls_proxy neutron '*' $Q_PORT $Q_HOST $Q_PORT_INT - fi - fi + # The default value of "rpc_workers" is None (not defined). If + # "rpc_workers" is explicitly set to 0, the RPC workers process + # should not be executed. + local rpc_workers + rpc_workers=$(iniget_multiline $NEUTRON_CONF DEFAULT rpc_workers) + + enable_service neutron-api + run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF" + neutron_url=$Q_PROTOCOL://$Q_HOST/ + if [ "$rpc_workers" != "0" ]; then + enable_service neutron-rpc-server + fi + enable_service neutron-periodic-workers + _enable_ovn_maintenance + if [ "$rpc_workers" != "0" ]; then + run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options" + fi + run_process neutron-periodic-workers "$NEUTRON_BIN_DIR/neutron-periodic-workers $cfg_file_options" + _run_ovn_maintenance if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME fi @@ -723,14 +701,10 @@ function stop_other { [ ! -z "$pid" ] && sudo kill -9 $pid fi - if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - stop_process neutron-rpc-server - stop_process neutron-periodic-workers - stop_process neutron-api - _stop_ovn_maintenance - else - stop_process q-svc - fi + stop_process neutron-rpc-server + stop_process neutron-periodic-workers + stop_process neutron-api + _stop_ovn_maintenance if is_service_enabled q-l3 neutron-l3; then sudo pkill -f "radvd -C $DATA_DIR/neutron/ra" @@ -841,14 +815,12 @@ function _configure_public_network_connectivity { # cleanup_neutron() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_neutron { - if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - stop_process neutron-api - stop_process neutron-rpc-server - stop_process neutron-periodic-workers - _stop_ovn_maintenance - remove_uwsgi_config "$NEUTRON_UWSGI_CONF" "neutron-api" - sudo rm -f $(apache_site_config_for neutron-api) - fi + stop_process neutron-api + stop_process neutron-rpc-server + stop_process neutron-periodic-workers + _stop_ovn_maintenance + remove_uwsgi_config "$NEUTRON_UWSGI_CONF" "neutron-api" + sudo rm -f $(apache_site_config_for neutron-api) if [[ -n "$OVS_PHYSICAL_BRIDGE" ]]; then _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False True "inet" @@ -955,12 +927,6 @@ function _configure_neutron_common { # Format logging setup_logging $NEUTRON_CONF - if is_service_enabled tls-proxy && [ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" ]; then - # Set the service port for a proxy to take the original - iniset $NEUTRON_CONF DEFAULT bind_port "$Q_PORT_INT" - iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True - fi - _neutron_setup_rootwrap } @@ -1026,25 +992,12 @@ function _configure_neutron_plugin_agent { neutron_plugin_configure_plugin_agent } -function _replace_api_paste_composite { - local sep - sep=$(echo -ne "\x01") - # Replace it - $sudo sed -i -e "s/\/\: neutronversions_composite/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/\: neutronversions_composite/" "$Q_API_PASTE_FILE" - $sudo sed -i -e "s/\/healthcheck\: healthcheck/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/healthcheck\: healthcheck/" "$Q_API_PASTE_FILE" - $sudo sed -i -e "s/\/v2.0\: neutronapi_v2_0/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/v2.0\: neutronapi_v2_0/" "$Q_API_PASTE_FILE" -} - # _configure_neutron_service() - Set config files for neutron service # It is called when q-svc is enabled. function _configure_neutron_service { Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE - if [[ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" && -n "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then - _replace_api_paste_composite - fi - # Update either configuration file with plugin iniset $NEUTRON_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS From 5cb2abf79ef103838ab0f922643f4a62ddf16cfb Mon Sep 17 00:00:00 2001 From: Mohammed Naser Date: Tue, 25 Feb 2025 22:54:05 -0500 Subject: [PATCH 1880/1936] Switch ZSWAP_ZPOOL to zsmalloc The z3fold compressed pages allocator is on it's way out of the Linux kernel and running this on newer systems will give you an error: + lib/host:configure_zswap:45 : echo z3fold z3fold tee: /sys/module/zswap/parameters/zpool: No such file or directory So, get ahead of things and move to the much faster recommended by the kernel developers alternative. [1]: https://lore.kernel.org/all/20240904233343.933462-1-yosryahmed@google.com/T/#u Change-Id: I7c137114dd7585d3179a8d5dee818bb379bbcb1f --- lib/host | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/host b/lib/host index a812c39612..58062eff6b 100644 --- a/lib/host +++ b/lib/host @@ -35,7 +35,7 @@ ENABLE_ZSWAP=$(trueorfalse False ENABLE_ZSWAP) # lz4 is very fast although it does not have the best compression # zstd has much better compression but more latency ZSWAP_COMPRESSOR=${ZSWAP_COMPRESSOR:="lz4"} -ZSWAP_ZPOOL=${ZSWAP_ZPOOL:="z3fold"} +ZSWAP_ZPOOL=${ZSWAP_ZPOOL:="zsmalloc"} function configure_zswap { if [[ $ENABLE_ZSWAP == "True" ]] ; then # Centos 9 stream seems to only support enabling but not run time From cbae98949193085f0ef5cfc250d5e8cc7530de5f Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Mon, 12 May 2025 15:17:42 +0100 Subject: [PATCH 1881/1936] Remove libvirt apparmor workaround This is triggering bug #2065685 [1] on Ubuntu 24.04 (Noble) if podman is installed (since that brings in the broken paast and pasta packages). Given the workaround is nearly 10 years old [2], it should not be necessary anymore. [1] https://bugs.launchpad.net/ubuntu/+source/apparmor/+bug/2065685 [2] https://bugs.launchpad.net/networking-ovn/+bug/1466631 Change-Id: I525b1f30bca7093791f927ff647db7745d25df22 Signed-off-by: Stephen Finucane Related-bug: #2065685 Related-bug: #1466631 --- lib/neutron_plugins/ovn_agent | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 71b5e3350d..e58cd4fb38 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -300,18 +300,6 @@ function create_public_bridge { _configure_public_network_connectivity } -function _disable_libvirt_apparmor { - if ! sudo aa-status --enabled ; then - return 0 - fi - # NOTE(arosen): This is used as a work around to allow newer versions - # of libvirt to work with ovs configured ports. See LP#1466631. - # requires the apparmor-utils - install_package apparmor-utils - # disables apparmor for libvirtd - sudo aa-complain /etc/apparmor.d/usr.sbin.libvirtd -} - # OVN compilation functions # ------------------------- @@ -614,7 +602,6 @@ function init_ovn { # in the ovn, ovn-nb, or ovs databases. We're going to trash them and # create new ones on each devstack run. - _disable_libvirt_apparmor local mkdir_cmd="mkdir -p ${OVN_DATADIR}" if [[ "$OVN_BUILD_FROM_SOURCE" == "False" ]]; then From f5b9596f24c03079c6ea2bcac29b1973bf5fe589 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Fri, 2 May 2025 15:51:01 +0530 Subject: [PATCH 1882/1936] Temporary fixes rhel 9 distros With [1] py39 constraints drop can't run fips jobs on centos 9-stream or rocky9. As a workaround can run with PYTHON3_VERSION: 3.11 in affected jobs. Until centos 10-stream support is ready[1] we also need to install libvirt-python and uwsgi from source as uwsgi and libvirt rpms are bundled with python3. [1] https://review.opendev.org/c/openstack/requirements/+/948285 [2] https://review.opendev.org/c/openstack/devstack/+/937251 Depends-On: https://review.opendev.org/c/openstack/devstack/+/946763 Depends-On: https://review.opendev.org/c/openstack/devstack/+/948558 Depends-On: https://review.opendev.org/c/openstack/devstack/+/948786 Depends-On: https://review.opendev.org/c/openstack/devstack/+/948797 Related-Bug: #2109591 Change-Id: I80d4c65ba0728c3e4b18738c6b0d539409f19976 --- .zuul.yaml | 7 +++++++ lib/apache | 2 +- lib/nova_plugins/functions-libvirt | 8 +++++++- 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 48dd55e2d2..9552fa3b47 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -735,6 +735,10 @@ parent: tempest-full-py3 description: CentOS 9 Stream platform test nodeset: devstack-single-node-centos-9-stream + vars: + devstack_localrc: + # TODO(ykarel) Remove this when moving to 10-stream + PYTHON3_VERSION: 3.11 timeout: 9000 voting: false @@ -760,6 +764,9 @@ voting: false vars: configure_swap_size: 4096 + devstack_localrc: + # TODO(ykarel) Remove this when moving to rocky10 + PYTHON3_VERSION: 3.11 - job: name: devstack-platform-ubuntu-jammy diff --git a/lib/apache b/lib/apache index 744c0f10b6..b971ecf8cd 100644 --- a/lib/apache +++ b/lib/apache @@ -89,7 +89,7 @@ function install_apache_uwsgi { # didn't fix Python 3.10 compatibility before release. Should be # fixed in uwsgi 4.9.0; can remove this when packages available # or we drop this release - elif is_fedora && ! is_openeuler && ! [[ $DISTRO =~ f36 ]]; then + elif is_fedora && ! is_openeuler && ! [[ $DISTRO =~ f36|rhel9 ]]; then # Note httpd comes with mod_proxy_uwsgi and it is loaded by # default; the mod_proxy_uwsgi package actually conflicts now. # See: diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index ba2e98e304..35840539da 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -93,7 +93,13 @@ function install_libvirt { # as the base system version is too old. We should have # pre-installed these install_package $qemu_package - install_package libvirt libvirt-devel python3-libvirt + install_package libvirt libvirt-devel + + if [[ $DISTRO =~ rhel9 ]]; then + pip_install_gr libvirt-python + else + install_package python3-libvirt + fi if is_arch "aarch64"; then install_package edk2-aarch64 From 7fedf7f78764542a05429a22e980de4a1486faa1 Mon Sep 17 00:00:00 2001 From: Ivan Anfimov Date: Wed, 7 May 2025 18:17:40 +0000 Subject: [PATCH 1883/1936] Remove temporary fix for problems with CSS styles Fixed by Horizon side: https://review.opendev.org/c/openstack/horizon/+/949036 Change-Id: I8acb029b0562381cdbe28f0ee32f3aed07de5784 --- files/apache-horizon.template | 1 - 1 file changed, 1 deletion(-) diff --git a/files/apache-horizon.template b/files/apache-horizon.template index 98d02e168e..da7a7d26c3 100644 --- a/files/apache-horizon.template +++ b/files/apache-horizon.template @@ -10,7 +10,6 @@ DocumentRoot %HORIZON_DIR%/.blackhole/ Alias %WEBROOT%/media %HORIZON_DIR%/openstack_dashboard/static Alias %WEBROOT%/static %HORIZON_DIR%/static - Alias /static %HORIZON_DIR%/static RedirectMatch "^/$" "%WEBROOT%/" From 46e14fb1f70e25aada290f6f5648800ec7a147b3 Mon Sep 17 00:00:00 2001 From: Jay Faulkner Date: Sun, 18 May 2025 16:59:13 -0700 Subject: [PATCH 1884/1936] Bubble up image download failures Currently, we're still returning 0 out of the upload_image method despite the download failing. This changes behavior such that if the image download fails, it returns an exit code of 1 to the caller to be handled (or fail early) accordingly. Change-Id: I901dc065b51946f363145ae888cca602946ceeea --- functions | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/functions b/functions index 42d08d7c4a..829fc86c55 100644 --- a/functions +++ b/functions @@ -147,7 +147,8 @@ function upload_image { if [[ $rc -ne 0 ]]; then if [[ "$attempt" -eq "$max_attempts" ]]; then echo "Not found: $image_url" - return + # Signal failure to download to the caller, so they can fail early + return 1 fi echo "Download failed, retrying in $attempt second, attempt: $attempt" sleep $attempt From 5d41cb1f51cccdbecf375cf84f9893b29f8c3ffc Mon Sep 17 00:00:00 2001 From: Eric Harney Date: Tue, 20 May 2025 13:17:49 -0400 Subject: [PATCH 1885/1936] Silence SyntaxWarnings in outfilter.py Use raw strings for these regexes. Change-Id: If5d35fa527b464f34a0d2335e5c6b388be726a54 --- tools/outfilter.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/outfilter.py b/tools/outfilter.py index 55f9ee1487..c9907b072a 100644 --- a/tools/outfilter.py +++ b/tools/outfilter.py @@ -26,8 +26,8 @@ import re import sys -IGNORE_LINES = re.compile('(set \+o|xtrace)') -HAS_DATE = re.compile('^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{3} \|') +IGNORE_LINES = re.compile(r'(set \+o|xtrace)') +HAS_DATE = re.compile(r'^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{3} \|') def get_options(): From 6ebe6f1b26b371b11ecd9cb6d0d154839ba6941e Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 22 May 2025 02:13:55 +0000 Subject: [PATCH 1886/1936] Updated from generate-devstack-plugins-list Change-Id: I443a7715aa85e4ffe65994dc77f7091c7f441876 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index e84c946287..f3f11cfe16 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -44,6 +44,7 @@ openstack/freezer `https://opendev.org/openstack/freezer openstack/freezer-api `https://opendev.org/openstack/freezer-api `__ openstack/freezer-tempest-plugin `https://opendev.org/openstack/freezer-tempest-plugin `__ openstack/freezer-web-ui `https://opendev.org/openstack/freezer-web-ui `__ +openstack/grian-ui `https://opendev.org/openstack/grian-ui `__ openstack/heat `https://opendev.org/openstack/heat `__ openstack/heat-dashboard `https://opendev.org/openstack/heat-dashboard `__ openstack/ironic `https://opendev.org/openstack/ironic `__ From 0e8042deff33bffbab732f70a66ece162aa470f7 Mon Sep 17 00:00:00 2001 From: Balazs Gibizer Date: Tue, 29 Apr 2025 15:36:28 +0200 Subject: [PATCH 1887/1936] Add SYSTEMD_ENV_VARS dictionary This will allow to pass env variables from zuul job definitions to to systemd service files via the local conf. The first use case of this is to pass OS_NOVA_DISABLE_EVENTLET_PATCHING=true to nova services that already supports running in native threading mode instead of with Eventlet. During the Eventlet removal effort this will allow us to have separate jobs testing the same service in different concurrency mode. Change-Id: I675043e42006286bb7e1190ea9462fb8d8daa38c --- functions-common | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/functions-common b/functions-common index e265256ccf..db2367cef6 100644 --- a/functions-common +++ b/functions-common @@ -43,6 +43,9 @@ declare -A -g GITREPO declare -A -g GITBRANCH declare -A -g GITDIR +# Systemd service file environment variables per service +declare -A -g SYSTEMD_ENV_VARS + KILL_PATH="$(which kill)" # Save these variables to .stackenv @@ -1642,6 +1645,9 @@ function _run_under_systemd { user=$STACK_USER fi local env_vars="$5" + if [[ -v SYSTEMD_ENV_VARS[$service] ]]; then + env_vars="${SYSTEMD_ENV_VARS[$service]} $env_vars" + fi if [[ "$command" =~ "uwsgi" ]] ; then if [[ "$GLOBAL_VENV" == "True" ]] ; then cmd="$cmd --venv $DEVSTACK_VENV" From df3fa124689402831543a24fc036f55e9e0bab33 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 29 May 2025 02:49:43 +0000 Subject: [PATCH 1888/1936] Updated from generate-devstack-plugins-list Change-Id: Ibebfa75ae6b233d6a913ffb3f8dced4290a8ab1b --- doc/source/plugin-registry.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index f3f11cfe16..9185263443 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -114,7 +114,6 @@ starlingx/nfv `https://opendev.org/starlingx/nfv `__ vexxhost/openstack-operator `https://opendev.org/vexxhost/openstack-operator `__ x/almanach `https://opendev.org/x/almanach `__ -x/apmec `https://opendev.org/x/apmec `__ x/bilean `https://opendev.org/x/bilean `__ x/broadview-collector `https://opendev.org/x/broadview-collector `__ x/collectd-openstack-plugins `https://opendev.org/x/collectd-openstack-plugins `__ From d2e309f0481f2e8577737bc2e5d6761a70b93dca Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Tue, 17 Jun 2025 21:15:16 +0900 Subject: [PATCH 1889/1936] Bump etcd to 3.25.x The etcd project maintains release branches for the current version and pevious release[1]. Because 3.26.0 was already released, 3.24.x is no longer supported. Bump it to the latest bug fix release of 3.25.x . Also, the binary for s390x has been restored upstream so add it back. [1] https://etcd.io/docs/v3.6/op-guide/versioning/ Change-Id: I108466c65bd1ebd1e42c75dfbe9b2173d04ba122 --- stackrc | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/stackrc b/stackrc index 0319fc8a50..817b445c7f 100644 --- a/stackrc +++ b/stackrc @@ -705,12 +705,11 @@ fi EXTRA_CACHE_URLS="" # etcd3 defaults -ETCD_VERSION=${ETCD_VERSION:-v3.4.27} -ETCD_SHA256_AMD64=${ETCD_SHA256_AMD64:-"a32d21e006252dbc3405b0645ba8468021ed41376974b573285927bf39b39eb9"} -ETCD_SHA256_ARM64=${ETCD_SHA256_ARM64:-"ed7e257c225b9b9545fac22246b97f4074a4b5109676e92dbaebfb9315b69cc0"} -ETCD_SHA256_PPC64=${ETCD_SHA256_PPC64:-"eb8825e0bc2cbaf9e55947f5ee373ebc9ca43b6a2ea5ced3b992c81855fff37e"} -# etcd v3.2.x and later doesn't have anything for s390x -ETCD_SHA256_S390X=${ETCD_SHA256_S390X:-""} +ETCD_VERSION=${ETCD_VERSION:-v3.5.21} +ETCD_SHA256_AMD64=${ETCD_SHA256_AMD64:-"adddda4b06718e68671ffabff2f8cee48488ba61ad82900e639d108f2148501c"} +ETCD_SHA256_ARM64=${ETCD_SHA256_ARM64:-"95bf6918623a097c0385b96f139d90248614485e781ec9bee4768dbb6c79c53f"} +ETCD_SHA256_PPC64=${ETCD_SHA256_PPC64:-"6fb6ecb3d1b331eb177dc610a8efad3aceb1f836d6aeb439ba0bfac5d5c2a38c"} +ETCD_SHA256_S390X=${ETCD_SHA256_S390X:-"a211a83961ba8a7e94f7d6343ad769e699db21a715ba4f3b68cf31ea28f9c951"} # Make sure etcd3 downloads the correct architecture if is_arch "x86_64"; then ETCD_ARCH="amd64" @@ -722,15 +721,8 @@ elif is_arch "ppc64le"; then ETCD_ARCH="ppc64le" ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_PPC64} elif is_arch "s390x"; then - # An etcd3 binary for s390x is not available on github like it is - # for other arches. Only continue if a custom download URL was - # provided. - if [[ -n "${ETCD_DOWNLOAD_URL}" ]]; then - ETCD_ARCH="s390x" - ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_S390X} - else - exit_distro_not_supported "etcd3. No custom ETCD_DOWNLOAD_URL provided." - fi + ETCD_ARCH="s390x" + ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_S390X} else exit_distro_not_supported "invalid hardware type - $ETCD_ARCH" fi From 5822439d95b02a7033f6333cda1dfafdc342b852 Mon Sep 17 00:00:00 2001 From: Jay Faulkner Date: Wed, 18 Jun 2025 08:20:50 -0700 Subject: [PATCH 1890/1936] Update base OS recommendation to 24.04 This is what all of OpenStack tests on now, it's likely a better choice for a default. Worth noting 22.04 doesn't work for latest-ironic (at least). Change-Id: Ibe4c1d4416dded4ac3280cb6ef423b0792b584ab --- doc/source/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/index.rst b/doc/source/index.rst index 70871ef876..a07bb84922 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -40,7 +40,7 @@ Start with a clean and minimal install of a Linux system. DevStack attempts to support the two latest LTS releases of Ubuntu, Rocky Linux 9 and openEuler. -If you do not have a preference, Ubuntu 22.04 (Jammy) is the +If you do not have a preference, Ubuntu 24.04 (Noble) is the most tested, and will probably go the smoothest. Add Stack User (optional) From 37c755e70721fa49d9c344158592761a75b124d3 Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Mon, 23 Jun 2025 22:32:56 +0900 Subject: [PATCH 1891/1936] Drop logic for Python < 3 Python 2 support was removed globally multiple cycles ago. Change-Id: I503ef9be68e59c8983d245f1fbb689651eb564ff --- tools/outfilter.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/tools/outfilter.py b/tools/outfilter.py index 55f9ee1487..df03a779b5 100644 --- a/tools/outfilter.py +++ b/tools/outfilter.py @@ -90,13 +90,10 @@ def main(): if outfile: # We've opened outfile as a binary file to get the - # non-buffered behaviour. on python3, sys.stdin was + # non-buffered behaviour. on python3, sys.stdin was # opened with the system encoding and made the line into # utf-8, so write the logfile out in utf-8 bytes. - if sys.version_info < (3,): - outfile.write(ts_line) - else: - outfile.write(ts_line.encode('utf-8')) + outfile.write(ts_line.encode('utf-8')) outfile.flush() From a8aecbad4f7f4cd4b6ddc57b78844cede5325d4d Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Sat, 23 Nov 2024 21:50:13 +0900 Subject: [PATCH 1892/1936] Allow installing etcd3gw from source ... to enable forward testing in etcd3gw. Change-Id: I249243fc913a82c28d096ef48aacecd07f2c2694 --- lib/libraries | 9 +++++++-- stackrc | 4 ++++ tests/test_libs_from_pypi.sh | 2 +- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/lib/libraries b/lib/libraries index fa418785dd..c3248f11b2 100755 --- a/lib/libraries +++ b/lib/libraries @@ -27,6 +27,7 @@ GITDIR["castellan"]=$DEST/castellan GITDIR["cliff"]=$DEST/cliff GITDIR["cursive"]=$DEST/cursive GITDIR["debtcollector"]=$DEST/debtcollector +GITDIR["etcd3gw"]=$DEST/etcd3gw GITDIR["futurist"]=$DEST/futurist GITDIR["openstacksdk"]=$DEST/openstacksdk GITDIR["os-client-config"]=$DEST/os-client-config @@ -131,8 +132,12 @@ function install_libs { # python client libraries we might need from git can go here _install_lib_from_source "python-barbicanclient" - # etcd (because tooz does not have a hard dependency on these) - pip_install etcd3gw + if use_library_from_git etcd3gw ; then + _install_lib_from_source "etcd3gw" + else + # etcd (because tooz does not have a hard dependency on these) + pip_install etcd3gw + fi } # Restore xtrace diff --git a/stackrc b/stackrc index c05d4e2d98..ddd623fc06 100644 --- a/stackrc +++ b/stackrc @@ -395,6 +395,10 @@ GITBRANCH["futurist"]=${FUTURIST_BRANCH:-$TARGET_BRANCH} GITREPO["debtcollector"]=${DEBTCOLLECTOR_REPO:-${GIT_BASE}/openstack/debtcollector.git} GITBRANCH["debtcollector"]=${DEBTCOLLECTOR_BRANCH:-$TARGET_BRANCH} +# etcd3gw library +GITREPO["etcd3gw"]=${ETCD3GW_REPO:-${GIT_BASE}/openstack/etcd3gw.git} +GITBRANCH["etcd3gw"]=${ETCD3GW_BRANCH:-$BRANCHLESS_TARGET_BRANCH} + # helpful state machines GITREPO["automaton"]=${AUTOMATON_REPO:-${GIT_BASE}/openstack/automaton.git} GITBRANCH["automaton"]=${AUTOMATON_BRANCH:-$TARGET_BRANCH} diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index 839e3a1328..9552c93c4f 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -45,7 +45,7 @@ ALL_LIBS+=" oslo.cache oslo.reports osprofiler cursive" ALL_LIBS+=" keystoneauth ironic-lib neutron-lib oslo.privsep" ALL_LIBS+=" diskimage-builder os-vif python-brick-cinderclient-ext" ALL_LIBS+=" castellan python-barbicanclient ovsdbapp os-ken os-resource-classes" -ALL_LIBS+=" oslo.limit" +ALL_LIBS+=" oslo.limit etcd3gw" # Generate the above list with # echo ${!GITREPO[@]} From 06633c6c3033cc92329e1849266f4f2ed33d2124 Mon Sep 17 00:00:00 2001 From: Artem Goncharov Date: Fri, 27 Jun 2025 15:05:51 +0200 Subject: [PATCH 1893/1936] Restart slapd after cleanup A bug in openldap mdb (memory database) causes it to crash in an attempt to delete nonexisting tree, which is exactly what we do in the cleanup. After the coredump it does not start automatically (what maybe make sense to change). The fix is merged in https://bugs.openldap.org/show_bug.cgi?id=10336 but we do not have this fix in Noble. For now try simply to restart the process. Change-Id: Iae597aae345d12a2c82f66342ff40ac0a387eddf Signed-off-by: Artem Goncharov --- lib/ldap | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lib/ldap b/lib/ldap index b0195db258..66c2afc4d5 100644 --- a/lib/ldap +++ b/lib/ldap @@ -82,6 +82,14 @@ function init_ldap { # Remove data but not schemas clear_ldap_state + if is_ubuntu; then + # a bug in OpenLDAP 2.6.7+ + # (https://bugs.openldap.org/show_bug.cgi?id=10336) causes slapd crash + # after deleting nonexisting tree. It is fixed upstream, but Ubuntu is + # still not having a fix in Noble. Try temporarily simly restarting the + # process. + sudo service $LDAP_SERVICE_NAME restart + fi # Add our top level ldap nodes if ldapsearch -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -b "$LDAP_BASE_DN" | grep -q "Success"; then From 47aa8d1744dad23c4ace34a7edbff78360fb0079 Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Sat, 12 Jul 2025 00:25:42 +0900 Subject: [PATCH 1894/1936] Drop unused [service_user] auth_strategy The option does not actually exist. Change-Id: I659bba38ca038fa370a411ae43ca942b6390c779 Signed-off-by: Takashi Kajinami --- lib/cinder | 1 - lib/nova | 1 - 2 files changed, 2 deletions(-) diff --git a/lib/cinder b/lib/cinder index b557d4b10b..eb8a63dbfc 100644 --- a/lib/cinder +++ b/lib/cinder @@ -732,7 +732,6 @@ function configure_cinder_volume_upload { function init_cinder_service_user_conf { configure_keystone_authtoken_middleware $CINDER_CONF cinder service_user iniset $CINDER_CONF service_user send_service_user_token True - iniset $CINDER_CONF service_user auth_strategy keystone } # Restore xtrace diff --git a/lib/nova b/lib/nova index 810a3d9554..2357d87ee3 100644 --- a/lib/nova +++ b/lib/nova @@ -843,7 +843,6 @@ function init_nova_service_user_conf { iniset $NOVA_CONF service_user user_domain_name "$SERVICE_DOMAIN_NAME" iniset $NOVA_CONF service_user project_name "$SERVICE_PROJECT_NAME" iniset $NOVA_CONF service_user project_domain_name "$SERVICE_DOMAIN_NAME" - iniset $NOVA_CONF service_user auth_strategy keystone } function conductor_conf { From e221349e56414e1cb89c75311bf659bd869e16a7 Mon Sep 17 00:00:00 2001 From: Grzegorz Grasza Date: Mon, 14 Jul 2025 12:37:23 +0200 Subject: [PATCH 1895/1936] keystone: Set user_enabled_default for LDAP domain When using the LDAP identity backend, stack.sh fails during the create_keystone_accounts phase when trying to verify the newly created demo user. This is caused by a BadRequestException from the Keystone API with the error, 'enabled' is a required property. The error occurs because the default LDAP user object created by the DevStack scripts does not contain an attribute that Keystone can map to its mandatory enabled property. This change fixes the issue by adding user_enabled_emulation = True to the domain-specific LDAP configuration in the create_ldap_domain function. This tells Keystone to assume a user is enabled if the attribute is not explicitly defined in their LDAP entry, which resolves the schema incompatibility and allows the script to complete successfully. Signed-off-by: Grzegorz Grasza Change-Id: I15ddf0b88ee93615c318d4845a026ca1e25c3e69 --- lib/keystone | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/keystone b/lib/keystone index 8371045026..0311e24c67 100644 --- a/lib/keystone +++ b/lib/keystone @@ -608,6 +608,7 @@ function create_ldap_domain { iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_name_attribute "cn" iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_mail_attribute "mail" iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_id_attribute "uid" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_enabled_emulation "True" iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user "cn=Manager,dc=openstack,dc=org" iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap url "ldap://localhost" iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap suffix $LDAP_BASE_DN From 9c180f2f060bfed65bc1b24c16010466b48dc0da Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Fri, 4 Jul 2025 11:40:38 -0700 Subject: [PATCH 1896/1936] Configure 'manager' role in tempest In this release, nova is implementing the manager role in policy[depends-on], and Tempest added (depends-on) a new config option to decide if new defaults are present in testing env. Setting the manager role availability in Tempest so that test can use manager role user to perform the required operation in nova. Depends-On: https://review.opendev.org/c/openstack/nova/+/953063 Depends-On: https://review.opendev.org/c/openstack/tempest/+/953265 Change-Id: I69e32c7de5a63df1c21979f748b77e512068eeec Signed-off-by: Ghanshyam Mann --- lib/tempest | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/tempest b/lib/tempest index c9486f6310..286cb56d41 100644 --- a/lib/tempest +++ b/lib/tempest @@ -524,6 +524,10 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute-feature-enabled serial_console True fi + # NOTE(gmaan): Since 2025.2, 'manager' role is available in nova. + local nova_policy_roles="admin,manager,member,reader" + iniset $TEMPEST_CONFIG compute-feature-enabled nova_policy_roles $nova_policy_roles + # Network iniset $TEMPEST_CONFIG network project_networks_reachable false iniset $TEMPEST_CONFIG network public_network_id "$public_network_id" From 6180e73702cfef2011c32f315cde97128a4b7eec Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Thu, 26 Jun 2025 07:56:44 +0000 Subject: [PATCH 1897/1936] Replace the OVN Metadata agent with the OVN agent The OVN Metadata agent is replaced in any CI job with the OVN agent. This is an incremental step on the deprecation of the OVN Metadata agent. Related-Bug: #2112313 Signed-off-by: Rodolfo Alonso Hernandez Change-Id: I4e8d12762099c91d773c4f5e5699bc9fed43a9c9 --- .zuul.yaml | 4 ++-- lib/neutron_plugins/ovn_agent | 3 ++- stackrc | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 9552fa3b47..9f9c69c925 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -620,7 +620,7 @@ ovsdb-server: true # Neutron services q-svc: true - q-ovn-metadata-agent: true + q-ovn-agent: true # Swift services s-account: true s-container: true @@ -657,7 +657,7 @@ ovs-vswitchd: true ovsdb-server: true # Neutron services - q-ovn-metadata-agent: true + q-ovn-agent: true # Cinder services c-bak: true c-vol: true diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index e58cd4fb38..b128fde2b6 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -96,8 +96,9 @@ OVN_META_CONF=$NEUTRON_CONF_DIR/neutron_ovn_metadata_agent.ini OVN_META_DATA_HOST=${OVN_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)} # OVN agent configuration +# The OVN agent is configured, by default, with the "metadata" extension. OVN_AGENT_CONF=$NEUTRON_CONF_DIR/plugins/ml2/ovn_agent.ini -OVN_AGENT_EXTENSIONS=${OVN_AGENT_EXTENSIONS:-} +OVN_AGENT_EXTENSIONS=${OVN_AGENT_EXTENSIONS:-metadata} # If True (default) the node will be considered a gateway node. ENABLE_CHASSIS_AS_GW=$(trueorfalse True ENABLE_CHASSIS_AS_GW) diff --git a/stackrc b/stackrc index 0319fc8a50..325af580ad 100644 --- a/stackrc +++ b/stackrc @@ -75,7 +75,7 @@ if ! isset ENABLED_SERVICES ; then # OVN ENABLED_SERVICES+=,ovn-controller,ovn-northd,ovs-vswitchd,ovsdb-server # Neutron - ENABLED_SERVICES+=,q-svc,q-ovn-metadata-agent + ENABLED_SERVICES+=,q-svc,q-ovn-agent # Dashboard ENABLED_SERVICES+=,horizon # Additional services From a8f98073b97a2485a7505ebe36dba9cad1e0a7e1 Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Thu, 17 Jul 2025 10:20:27 +0000 Subject: [PATCH 1898/1936] Fix the nodeset "devstack-single-node-opensuse-15" The label "opensuse-15" is no longer available since [1]. Since there are pending references to this nodeset from older branches, use an empty node list for it to fix the zuul config until all references can be dropped. [1]https://review.opendev.org/c/openstack/project-config/+/955214 Signed-off-by: Rodolfo Alonso Hernandez Change-Id: I2f5105178482402aa108910d1bd1ec2f2c7c8933 --- .zuul.yaml | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 9552fa3b47..693edffe6e 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -48,15 +48,10 @@ nodes: - controller +# TODO(frickler): drop this dummy nodeset once all references have been removed - nodeset: name: devstack-single-node-opensuse-15 - nodes: - - name: controller - label: opensuse-15 - groups: - - name: tempest - nodes: - - controller + nodes: [] - nodeset: name: devstack-single-node-debian-bookworm From bfa9e547a901df5dd74926385010421157b6fca7 Mon Sep 17 00:00:00 2001 From: Ghanshyam Maan Date: Sat, 26 Jul 2025 00:58:51 +0000 Subject: [PATCH 1899/1936] Avoid setting iso image in tempest config Tempest use image_ref and image_ref_alt as their base image to run test against and perform ssh etc. Most of the iso image require ssh to be enabled explicitly so avoid setting them as image_ref and image_ref_alt unless it is explicitly requested. One example that how setting iso image in tempest can fail tests- https://review.opendev.org/c/openstack/tempest/+/954404 Needed-By: https://review.opendev.org/c/openstack/whitebox-tempest-plugin/+/955950 Change-Id: Ic385a702758d9d38880ec92cfdce2528766fc95d Signed-off-by: Ghanshyam Maan --- lib/tempest | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index c9486f6310..cac2633324 100644 --- a/lib/tempest +++ b/lib/tempest @@ -105,6 +105,8 @@ TEMPEST_CONCURRENCY=${TEMPEST_CONCURRENCY:-$(nproc)} TEMPEST_FLAVOR_RAM=${TEMPEST_FLAVOR_RAM:-192} TEMPEST_FLAVOR_ALT_RAM=${TEMPEST_FLAVOR_ALT_RAM:-256} +TEMPEST_USE_ISO_IMAGE=$(trueorfalse False TEMPEST_USE_ISO_IMAGE) + # Functions # --------- @@ -161,12 +163,20 @@ function get_active_images { # start with a fresh array in case we are called multiple times img_array=() - while read -r IMAGE_NAME IMAGE_UUID; do + # NOTE(gmaan): Most of the iso image require ssh to be enabled explicitly + # and if we set those iso images in image_ref and image_ref_alt that can + # cause test to fail because many tests using image_ref and image_ref_alt + # to boot server also perform ssh. We skip to set iso image in tempest + # unless it is requested via TEMPEST_USE_ISO_IMAGE. + while read -r IMAGE_NAME IMAGE_UUID DISK_FORMAT; do + if [[ "$DISK_FORMAT" == "iso" ]] && [[ "$TEMPEST_USE_ISO_IMAGE" == False ]]; then + continue + fi if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then img_id="$IMAGE_UUID" fi img_array+=($IMAGE_UUID) - done < <(openstack --os-cloud devstack-admin image list --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }') + done < <(openstack --os-cloud devstack-admin image list --long --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2,$4 }') } function poll_glance_images { From 5c338f47d57fe849215d6b9f5c1f4eb53c193ab1 Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Mon, 4 Aug 2025 23:31:17 +0900 Subject: [PATCH 1900/1936] Fix uninialized os_VENDOR ... to fix the wrong libvirt group name detected. Closes-Bug: #2119496 Change-Id: I2988fcb5010f333eab5a88b83ff14aab1cb15ebd Signed-off-by: Takashi Kajinami --- stackrc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/stackrc b/stackrc index 0319fc8a50..44bc6b321d 100644 --- a/stackrc +++ b/stackrc @@ -615,6 +615,11 @@ case "$VIRT_DRIVER" in LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm} LIBVIRT_CPU_MODE=${LIBVIRT_CPU_MODE:-custom} LIBVIRT_CPU_MODEL=${LIBVIRT_CPU_MODEL:-Nehalem} + + if [[ -z "$os_VENDOR" ]]; then + GetOSVersion + fi + if [[ "$os_VENDOR" =~ (Debian|Ubuntu) ]]; then # The groups change with newer libvirt. Older Ubuntu used # 'libvirtd', but now uses libvirt like Debian. Do a quick check From 90b0a6760ba95e8b2fa4a43dda1d0eeb56c00f2d Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Tue, 5 Aug 2025 12:55:21 +0900 Subject: [PATCH 1901/1936] Drop old libvirt group detection for Ubuntu/Debian The "libvirtd" group was used in quite old Ubuntu such as Xenial, and the "libvirt" group is used instead in recent versions. Change-Id: I2df747d54d3cb395c245ecc2aa24dcbf395e7a46 Signed-off-by: Takashi Kajinami --- stackrc | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/stackrc b/stackrc index 44bc6b321d..811a285d19 100644 --- a/stackrc +++ b/stackrc @@ -621,11 +621,7 @@ case "$VIRT_DRIVER" in fi if [[ "$os_VENDOR" =~ (Debian|Ubuntu) ]]; then - # The groups change with newer libvirt. Older Ubuntu used - # 'libvirtd', but now uses libvirt like Debian. Do a quick check - # to see if libvirtd group already exists to handle grenade's case. - LIBVIRT_GROUP=$(cut -d ':' -f 1 /etc/group | grep 'libvirtd$' || true) - LIBVIRT_GROUP=${LIBVIRT_GROUP:-libvirt} + LIBVIRT_GROUP=libvirt else LIBVIRT_GROUP=libvirtd fi From ec96b1a067684bf729f4dbd84dce9db02171b234 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Din=C3=A7er=20=C3=87elik?= Date: Wed, 6 Aug 2025 12:27:15 +0300 Subject: [PATCH 1902/1936] Fix default settings for Ubuntu aarch64 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes-Bug: #2080957 Change-Id: I441897937253f8d44144fa7f5f4622f42bf74a5f Signed-off-by: Dinçer Çelik --- lib/nova_plugins/functions-libvirt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 35840539da..c0713f9953 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -71,7 +71,7 @@ function install_libvirt { if is_ubuntu; then install_package qemu-system libvirt-clients libvirt-daemon-system libvirt-dev python3-libvirt systemd-coredump if is_arch "aarch64"; then - install_package qemu-efi + install_package qemu-efi-aarch64 fi #pip_install_gr elif is_fedora; then From 4f065ca80e4589513ca639cb39d3899943698b41 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Tue, 13 May 2025 14:56:44 +0100 Subject: [PATCH 1903/1936] Replace use of fgrep It is deprecated. Change-Id: Iad071865361d51c148fc157d715bdf517ec5b94b Signed-off-by: Stephen Finucane --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 04b5f4ca6a..a06c7be9a7 100755 --- a/stack.sh +++ b/stack.sh @@ -356,7 +356,7 @@ async_init # Certain services such as rabbitmq require that the local hostname resolves # correctly. Make sure it exists in /etc/hosts so that is always true. LOCAL_HOSTNAME=`hostname -s` -if ! fgrep -qwe "$LOCAL_HOSTNAME" /etc/hosts; then +if ! grep -Fqwe "$LOCAL_HOSTNAME" /etc/hosts; then sudo sed -i "s/\(^127.0.0.1.*\)/\1 $LOCAL_HOSTNAME/" /etc/hosts fi From 67fa02fc5fd6a8786baff61695a578338462b3d3 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Wed, 6 Aug 2025 10:53:32 +0100 Subject: [PATCH 1904/1936] Remove requirement on system oslo.utils This was only working because the noVNC package on Ubuntu pulls in oslo.utils. Change-Id: I3733df3e2667f16082b3ff57d39cf086d81fbe02 Signed-off-by: Stephen Finucane --- tools/verify-ipv6-address.py | 41 +++++++++++++++++++++++++++ tools/verify-ipv6-only-deployments.sh | 24 ++++++---------- 2 files changed, 49 insertions(+), 16 deletions(-) create mode 100644 tools/verify-ipv6-address.py diff --git a/tools/verify-ipv6-address.py b/tools/verify-ipv6-address.py new file mode 100644 index 0000000000..dc18fa6d8a --- /dev/null +++ b/tools/verify-ipv6-address.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python3 + +import argparse +import ipaddress +import sys + +def main(): + parser = argparse.ArgumentParser( + description="Check if a given string is a valid IPv6 address.", + formatter_class=argparse.RawTextHelpFormatter, + ) + parser.add_argument( + "address", + help=( + "The IPv6 address string to validate.\n" + "Examples:\n" + " 2001:0db8:85a3:0000:0000:8a2e:0370:7334\n" + " 2001:db8::1\n" + " ::1\n" + " fe80::1%eth0 (scope IDs are handled)" + ), + ) + args = parser.parse_args() + + try: + # try to create a IPv6Address: if we fail to parse or get an + # IPv4Address then die + ip_obj = ipaddress.ip_address(args.address.strip('[]')) + if isinstance(ip_obj, ipaddress.IPv6Address): + sys.exit(0) + else: + sys.exit(1) + except ValueError: + sys.exit(1) + except Exception as e: + print(f"An unexpected error occurred during validation: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/tools/verify-ipv6-only-deployments.sh b/tools/verify-ipv6-only-deployments.sh index 0f0cba8afe..a1acecbb3f 100755 --- a/tools/verify-ipv6-only-deployments.sh +++ b/tools/verify-ipv6-only-deployments.sh @@ -33,28 +33,23 @@ function verify_devstack_ipv6_setting { echo $TUNNEL_IP_VERSION "TUNNEL_IP_VERSION is not set to 6 so TUNNEL_ENDPOINT_IP cannot be an IPv6 address." exit 1 fi - is_service_host_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_host'"))') - if [[ "$is_service_host_ipv6" != "True" ]]; then + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$_service_host"; then echo $SERVICE_HOST "SERVICE_HOST is not IPv6 which means devstack cannot deploy services on IPv6 addresses." exit 1 fi - is_host_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_host_ipv6'"))') - if [[ "$is_host_ipv6" != "True" ]]; then + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$_host_ipv6"; then echo $HOST_IPV6 "HOST_IPV6 is not IPv6 which means devstack cannot deploy services on IPv6 addresses." exit 1 fi - is_service_listen_address=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_listen_address'"))') - if [[ "$is_service_listen_address" != "True" ]]; then + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$_service_listen_address"; then echo $SERVICE_LISTEN_ADDRESS "SERVICE_LISTEN_ADDRESS is not IPv6 which means devstack cannot deploy services on IPv6 addresses." exit 1 fi - is_service_local_host=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_local_host'"))') - if [[ "$is_service_local_host" != "True" ]]; then + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$_service_local_host"; then echo $SERVICE_LOCAL_HOST "SERVICE_LOCAL_HOST is not IPv6 which means devstack cannot deploy services on IPv6 addresses." exit 1 fi - is_tunnel_endpoint_ip=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_tunnel_endpoint_ip'"))') - if [[ "$is_tunnel_endpoint_ip" != "True" ]]; then + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$_tunnel_endpoint_ip"; then echo $TUNNEL_ENDPOINT_IP "TUNNEL_ENDPOINT_IP is not IPv6 which means devstack will not deploy with an IPv6 endpoint address." exit 1 fi @@ -63,8 +58,7 @@ function verify_devstack_ipv6_setting { } function sanity_check_system_ipv6_enabled { - system_ipv6_enabled=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_ipv6_enabled())') - if [[ $system_ipv6_enabled != "True" ]]; then + if [ ! -f "/proc/sys/net/ipv6/conf/default/disable_ipv6" ] || [ "$(cat /proc/sys/net/ipv6/conf/default/disable_ipv6)" -ne "0" ]; then echo "IPv6 is disabled in system" exit 1 fi @@ -78,10 +72,8 @@ function verify_service_listen_address_is_ipv6 { for endpoint in ${endpoints}; do local endpoint_address='' endpoint_address=$(echo "$endpoint" | awk -F/ '{print $3}' | awk -F] '{print $1}') - endpoint_address=$(echo $endpoint_address | tr -d []) - local is_endpoint_ipv6='' - is_endpoint_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$endpoint_address'"))') - if [[ "$is_endpoint_ipv6" != "True" ]]; then + endpoint_address=$(echo $endpoint_address | tr -d '[]') + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$endpoint_address"; then all_ipv6=False echo $endpoint ": This is not an IPv6 endpoint which means corresponding service is not listening on an IPv6 address." continue From b6bf0b126b46042341cc3d47a92ad9b0d9b9a523 Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Tue, 5 Aug 2025 16:44:26 +0000 Subject: [PATCH 1905/1936] Use novnc from source by default This change restores the default devstack behavior in the zuul jobs by removing the override of NOVNC_FROM_PACKAGE from devstack-base. When installed locally, devstack defaults to installing novnc from git. As reported in bug #2109592, Ubuntu and possibly other distros have a packaging bug where the python3-novnc package 1) exists and 2) depends on `oslo.config` and, as a result, `oslo.utils`. The reason python3-novnc existing is a bug is that novnc has not had any Python deliverable since the 0.6.0 release around 2016. So this package is no longer used and is effectively empty since novnc fully moved to using JavaScript. For unrelated reasons, devstack creates the global venv with --site-packages to install `libvirt-python`, which also means that any other Python dependencies installed at the system level also infect the devstack venv. In the past, this was not a problem, but as of epoxy, Nova requires a newer version of oslo than Ubuntu provides in the distro package. This is where the python3-novnc package and its incorrect dependency on oslo breaks CI. This is not seen locally, as devstack uses novnc from git. This change makes CI do that also. Closes-Bug: #2109592 Change-Id: I8f018e1e57e3f54997d2cf55b1b3aa728e82899b Signed-off-by: Sean Mooney --- .zuul.yaml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index b42c800068..eee450a6ad 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -352,6 +352,12 @@ nodes (everything but the controller). required-projects: - opendev.org/openstack/devstack + # this is a workaround for a packaging bug in ubuntu + # remove when https://bugs.launchpad.net/nova/+bug/2109592 + # is resolved and oslo.config is not a dep of the novnc deb + # via the defunct python3-novnc package. + - novnc/novnc + roles: - zuul: opendev.org/openstack/openstack-zuul-jobs vars: @@ -369,7 +375,6 @@ LOG_COLOR: false VERBOSE: true VERBOSE_NO_TIMESTAMP: true - NOVNC_FROM_PACKAGE: true ERROR_ON_CLONE: true # Gate jobs can't deal with nested virt. Disable it by default. LIBVIRT_TYPE: '{{ devstack_libvirt_type | default("qemu") }}' @@ -442,7 +447,6 @@ LOG_COLOR: false VERBOSE: true VERBOSE_NO_TIMESTAMP: true - NOVNC_FROM_PACKAGE: true ERROR_ON_CLONE: true LIBVIRT_TYPE: qemu devstack_services: From 3b3aab52646e7a7ed737716efbfbe7fbef170911 Mon Sep 17 00:00:00 2001 From: Joel Capitao Date: Fri, 8 Nov 2024 16:11:06 +0000 Subject: [PATCH 1906/1936] Support CentOS Stream 10 This patch includes changes required to run devstack on CentOS Stream 10 which has been already published in official repos by CentOS team [1]: - Add RDO deps repository for CS10. - remove xinetd package from installation for swift. Note that rsync-daemon is installed which should work fine. - Use python3-distro to identify the distro - Add devstack-single-node-centos-10-stream nodeset - Add devstack-platform-centos-10-stream job to the check pipeline. Closes https://issues.redhat.com/browse/RDO-379 [1] https://mirror.stream.centos.org/10-stream/ Change-Id: I33a6c5530482c28a24f2043cd4195e7bcd46427d Signed-off-by: Cyril Roelandt Signed-off-by: Sean Mooney --- .zuul.yaml | 50 ++++++++++++++++++++++++++++++++++++++++++++ files/rpms/general | 8 ++++--- files/rpms/n-cpu | 2 +- files/rpms/nova | 2 +- files/rpms/swift | 2 +- functions-common | 11 +++++----- stack.sh | 14 +++++++++---- tools/install_pip.sh | 2 +- 8 files changed, 74 insertions(+), 17 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index eee450a6ad..3deab35e87 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -38,6 +38,16 @@ nodes: - controller +- nodeset: + name: devstack-single-node-centos-10-stream + nodes: + - name: controller + label: centos-10-stream-8GB + groups: + - name: tempest + nodes: + - controller + - nodeset: name: devstack-single-node-centos-9-stream nodes: @@ -86,6 +96,36 @@ nodes: - controller +- nodeset: + name: openstack-two-node-centos-10-stream + nodes: + - name: controller + label: centos-10-stream-8GB + - name: compute1 + label: centos-10-stream-8GB + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + - nodeset: name: openstack-two-node-centos-9-stream nodes: @@ -729,6 +769,14 @@ # we often have to rush things through devstack to stabilise the gate, # and these platforms don't have the round-the-clock support to avoid # becoming blockers in that situation. +- job: + name: devstack-platform-centos-10-stream + parent: tempest-full-py3 + description: CentOS 10 Stream platform test + nodeset: devstack-single-node-centos-10-stream + timeout: 9000 + voting: false + - job: name: devstack-platform-centos-9-stream parent: tempest-full-py3 @@ -911,6 +959,7 @@ - devstack - devstack-ipv6 - devstack-enforce-scope + - devstack-platform-centos-10-stream - devstack-platform-centos-9-stream - devstack-platform-debian-bookworm - devstack-platform-rocky-blue-onyx @@ -994,6 +1043,7 @@ - devstack-no-tls-proxy periodic-weekly: jobs: + - devstack-platform-centos-10-stream - devstack-platform-centos-9-stream - devstack-platform-debian-bookworm - devstack-platform-rocky-blue-onyx diff --git a/files/rpms/general b/files/rpms/general index 8a5755cc37..6f4572c708 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -10,9 +10,10 @@ glibc-langpack-en # dist:rhel9 graphviz # needed only for docs httpd httpd-devel -iptables-nft # dist:rhel9 +iptables-nft # dist:rhel9,rhel10 iptables-services -java-1.8.0-openjdk-headless +java-1.8.0-openjdk-headless # not:rhel10 +java-21-openjdk-headless # dist:rhel10 libffi-devel libjpeg-turbo-devel # Pillow 3.0.0 libxml2-devel # lxml @@ -23,7 +24,8 @@ net-tools openssh-server openssl openssl-devel # to rebuild pyOpenSSL if needed -pcre-devel # for python-pcre +pcre2-devel # dist:rhel10 for python-pcre2 +pcre-devel # not:rhel10 for python-pcre pkgconfig postgresql-devel # psycopg2 psmisc diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu index 7ce5a72d6b..5683862ee0 100644 --- a/files/rpms/n-cpu +++ b/files/rpms/n-cpu @@ -1,6 +1,6 @@ cryptsetup dosfstools -genisoimage # not:rhel9 +genisoimage # not:rhel9,rhel10 iscsi-initiator-utils libosinfo lvm2 diff --git a/files/rpms/nova b/files/rpms/nova index e0f13b854a..3ed2943c1d 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -1,7 +1,7 @@ conntrack-tools curl ebtables -genisoimage # not:rhel9 required for config_drive +genisoimage # not:rhel9,rhel10 required for config_drive iptables iputils kernel-modules # not:openEuler-22.03 diff --git a/files/rpms/swift b/files/rpms/swift index 49a1833dc4..cf614335c1 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -4,4 +4,4 @@ memcached rsync-daemon sqlite xfsprogs -xinetd # not:f36,rhel9 +xinetd # not:f36,rhel9,rhel10 diff --git a/functions-common b/functions-common index db2367cef6..85ee294afa 100644 --- a/functions-common +++ b/functions-common @@ -423,7 +423,7 @@ function _ensure_lsb_release { elif [[ -x $(command -v zypper 2>/dev/null) ]]; then sudo zypper -n install lsb-release elif [[ -x $(command -v dnf 2>/dev/null) ]]; then - sudo dnf install -y redhat-lsb-core || sudo dnf install -y openeuler-lsb + sudo dnf install -y python3-distro || sudo dnf install -y openeuler-lsb else die $LINENO "Unable to find or auto-install lsb_release" fi @@ -436,9 +436,9 @@ function _ensure_lsb_release { # - os_VENDOR # - os_PACKAGE function GetOSVersion { - # CentOS Stream 9 and RHEL 9 do not provide lsb_release + # CentOS Stream 9 or later and RHEL 9 or later do not provide lsb_release source /etc/os-release - if [[ "${ID}${VERSION}" == "centos9" ]] || [[ "${ID}${VERSION}" =~ "rhel9" ]]; then + if [[ "${ID}${VERSION}" =~ "centos" ]] || [[ "${ID}${VERSION}" =~ "rhel" ]]; then os_RELEASE=${VERSION_ID} os_CODENAME="n/a" os_VENDOR=$(echo $NAME | tr -d '[:space:]') @@ -485,9 +485,8 @@ function GetDistro { "$os_VENDOR" =~ (OracleServer) || \ "$os_VENDOR" =~ (Rocky) || \ "$os_VENDOR" =~ (Virtuozzo) ]]; then - # Drop the . release as we assume it's compatible - # XXX re-evaluate when we get RHEL10 - DISTRO="rhel${os_RELEASE::1}" + MAJOR_VERSION=$(echo $os_RELEASE | cut -d. -f1) + DISTRO="rhel${MAJOR_VERSION}" elif [[ "$os_VENDOR" =~ (openEuler) ]]; then DISTRO="openEuler-$os_RELEASE" else diff --git a/stack.sh b/stack.sh index 04b5f4ca6a..2e130aec58 100755 --- a/stack.sh +++ b/stack.sh @@ -230,7 +230,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="bookworm|jammy|noble|rhel9" +SUPPORTED_DISTROS="bookworm|jammy|noble|rhel9|rhel10" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" @@ -302,16 +302,17 @@ function _install_epel { } function _install_rdo { - if [[ $DISTRO == "rhel9" ]]; then + if [[ $DISTRO =~ "rhel" ]]; then + VERSION=${DISTRO:4:2} rdo_release=${TARGET_BRANCH#*/} if [[ "$TARGET_BRANCH" == "master" ]]; then # adding delorean-deps repo to provide current master rpms - sudo wget https://trunk.rdoproject.org/centos9-master/delorean-deps.repo -O /etc/yum.repos.d/delorean-deps.repo + sudo wget https://trunk.rdoproject.org/centos${VERSION}-master/delorean-deps.repo -O /etc/yum.repos.d/delorean-deps.repo else if sudo dnf provides centos-release-openstack-${rdo_release} >/dev/null 2>&1; then sudo dnf -y install centos-release-openstack-${rdo_release} else - sudo wget https://trunk.rdoproject.org/centos9-${rdo_release}/delorean-deps.repo -O /etc/yum.repos.d/delorean-deps.repo + sudo wget https://trunk.rdoproject.org/centos${VERSION}-${rdo_release}/delorean-deps.repo -O /etc/yum.repos.d/delorean-deps.repo fi fi fi @@ -408,6 +409,11 @@ elif [[ $DISTRO == "rhel9" ]]; then if is_package_installed curl-minimal; then sudo dnf swap -y curl-minimal curl fi +elif [[ $DISTRO == "rhel10" ]]; then + # for CentOS Stream 10 repository + sudo dnf config-manager --set-enabled crb + # rabbitmq and other packages are provided by RDO repositories. + _install_rdo elif [[ $DISTRO == "openEuler-22.03" ]]; then # There are some problem in openEuler. We should fix it first. Some required # package/action runs before fixup script. So we can't fix there. diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 91b180c06f..79f97c5f7a 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -127,7 +127,7 @@ if [[ -n $PYPI_ALTERNATIVE_URL ]]; then configure_pypi_alternative_url fi -if is_fedora && [[ ${DISTRO} == f* || ${DISTRO} == rhel9 ]]; then +if is_fedora && [[ ${DISTRO} == f* || ${DISTRO} == rhel* ]]; then # get-pip.py will not install over the python3-pip package in # Fedora 34 any more. # https://bugzilla.redhat.com/show_bug.cgi?id=1988935 From 9c295d0da30b8e4d0809623e886dc9aaf6f52c25 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Thu, 7 Aug 2025 10:53:59 +0100 Subject: [PATCH 1907/1936] Remove dead checks for Python 3.6 Change-Id: I9fab7209955ebdfda0f309aa0160749bd0f962e6 Signed-off-by: Stephen Finucane --- tools/install_pip.sh | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 79f97c5f7a..027693fc0a 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -38,7 +38,6 @@ FILES=$TOP_DIR/files # [1] https://opendev.org/openstack/project-config/src/branch/master/nodepool/elements/cache-devstack/source-repository-pip PIP_GET_PIP_URL=${PIP_GET_PIP_URL:-"https://bootstrap.pypa.io/get-pip.py"} -PIP_GET_PIP36_URL=${PIP_GET_PIP36_URL:-"https://bootstrap.pypa.io/pip/3.6/get-pip.py"} GetDistro echo "Distro: $DISTRO" @@ -57,14 +56,8 @@ function get_versions { function install_get_pip { - if [[ "$PYTHON3_VERSION" = "3.6" ]]; then - _pip_url=$PIP_GET_PIP36_URL - _local_pip="$FILES/$(basename $_pip_url)-py36" - else - _pip_url=$PIP_GET_PIP_URL - _local_pip="$FILES/$(basename $_pip_url)" - fi - + _pip_url=$PIP_GET_PIP_URL + _local_pip="$FILES/$(basename $_pip_url)" # If get-pip.py isn't python, delete it. This was probably an # outage on the server. From fdc41d76abf353b6a9b206bf92c6b9adb90b253a Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Thu, 7 Aug 2025 10:54:45 +0100 Subject: [PATCH 1908/1936] Remove dead checks for Fedora 36 It is EOL. Change-Id: I609cfce8a98f9933380ddbc719ed22e6fcda4785 Signed-off-by: Stephen Finucane --- files/rpms/swift | 1 - lib/apache | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/files/rpms/swift b/files/rpms/swift index cf614335c1..c3921a47d4 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -4,4 +4,3 @@ memcached rsync-daemon sqlite xfsprogs -xinetd # not:f36,rhel9,rhel10 diff --git a/lib/apache b/lib/apache index 449d2e70d4..c49da711e1 100644 --- a/lib/apache +++ b/lib/apache @@ -89,7 +89,7 @@ function install_apache_uwsgi { # didn't fix Python 3.10 compatibility before release. Should be # fixed in uwsgi 4.9.0; can remove this when packages available # or we drop this release - elif is_fedora && ! is_openeuler && ! [[ $DISTRO =~ f36|rhel9 ]]; then + elif is_fedora && ! is_openeuler && ! [[ $DISTRO =~ rhel9 ]]; then # Note httpd comes with mod_proxy_uwsgi and it is loaded by # default; the mod_proxy_uwsgi package actually conflicts now. # See: From 1a74605eb4c30e28a99edd2e824c2ce38d6315ad Mon Sep 17 00:00:00 2001 From: Douglas Mendizabal Date: Fri, 8 Aug 2025 10:44:41 -0400 Subject: [PATCH 1909/1936] Fix iniset to escape backslash characters MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch fixes an issue in iniset where backslash (\) characters are removed from the config value. This patch ensures that backslash characters (\) are escaped in addition to the ampersand (&) character that was already being escaped. Closes-Bug: #2120180 Signed-off-by: Douglas Mendizábal Change-Id: Ica53ed42269931d151daf815d2e2c10c1f9e29a8 --- inc/ini-config | 6 ++++-- tests/test_ini_config.sh | 15 ++++++++++++++- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/inc/ini-config b/inc/ini-config index f65e42d3a5..920d4775fa 100644 --- a/inc/ini-config +++ b/inc/ini-config @@ -189,8 +189,10 @@ function iniset { local option=$3 local value=$4 - # Escape the ampersand character (&) - value=$(echo $value | sed -e 's/&/\\&/g') + # Escape the ampersand (&) and backslash (\) characters for sed + # Order of substitution matters: we escape backslashes first before + # adding more backslashes to escape ampersands + value=$(echo $value | sed -e 's/\\/\\\\/g' -e 's/&/\\&/g') if [[ -z $section || -z $option ]]; then $xtrace diff --git a/tests/test_ini_config.sh b/tests/test_ini_config.sh index 6367cde441..fd3896d6ba 100755 --- a/tests/test_ini_config.sh +++ b/tests/test_ini_config.sh @@ -47,6 +47,9 @@ multi = foo2 [fff] ampersand = +[ggg] +backslash = + [key_with_spaces] rgw special key = something @@ -88,7 +91,7 @@ fi # test iniget_sections VAL=$(iniget_sections "${TEST_INI}") -assert_equal "$VAL" "default aaa bbb ccc ddd eee fff key_with_spaces \ +assert_equal "$VAL" "default aaa bbb ccc ddd eee fff ggg key_with_spaces \ del_separate_options del_same_option del_missing_option \ del_missing_option_multi del_no_options" @@ -134,6 +137,16 @@ done VAL=$(iniget ${TEST_INI} fff ampersand) assert_equal "$VAL" "&y" "iniset ampersands in option" +# Test with backslash in value +iniset ${TEST_INI} ggg backslash 'foo\bar' +VAL=$(iniget ${TEST_INI} ggg backslash) +assert_equal "$VAL" 'foo\bar' "iniset backslash in value" + +# Test with both ampersand and backslash +iniset ${TEST_INI} ggg backslash 'foo\bar&baz' +VAL=$(iniget ${TEST_INI} ggg backslash) +assert_equal "$VAL" 'foo\bar&baz' "iniset ampersand and backslash in value" + # test empty option if ini_has_option ${SUDO_ARG} ${TEST_INI} ddd empty; then passed "ini_has_option: ddd.empty present" From 34689f587966f9fd512a03c85762bd79dd4a4e9d Mon Sep 17 00:00:00 2001 From: Ghanshyam Maan Date: Wed, 27 Aug 2025 02:40:21 +0000 Subject: [PATCH 1910/1936] Configure nova 'service' role in tempest In this release, nova is implementing the service role in policy[depends-on], and Tempest being branchless needs to decide if service defaults are present in testing release/ env (Needed-By). Setting the service role availability in Tempest so that from this release onward, tests can use service role user to perform the required operation in nova. Depends-On: https://review.opendev.org/c/openstack/nova/+/957578 Needed-By: https://review.opendev.org/c/openstack/tempest/+/892639 Change-Id: I463cb85f3fcb9f2fdd7aa4a0a5f2ae49782e3fc1 Signed-off-by: Ghanshyam Maan --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 53851209d2..1ebe9c5f1f 100644 --- a/lib/tempest +++ b/lib/tempest @@ -535,7 +535,7 @@ function configure_tempest { fi # NOTE(gmaan): Since 2025.2, 'manager' role is available in nova. - local nova_policy_roles="admin,manager,member,reader" + local nova_policy_roles="admin,manager,member,reader,service" iniset $TEMPEST_CONFIG compute-feature-enabled nova_policy_roles $nova_policy_roles # Network From 2aae15c93f4383c0a38dff276700c762e56a7134 Mon Sep 17 00:00:00 2001 From: Tobias Urdin Date: Tue, 6 May 2025 08:54:39 +0200 Subject: [PATCH 1911/1936] Use profile rbd for Ceph authx for cinder-backup Use the RBD profile instead of setting explicit permissions. Change-Id: Idc2258e3b69df3df57894c17018a2a35043c8fa9 Signed-off-by: Tobias Urdin --- lib/cinder_backups/ceph | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/cinder_backups/ceph b/lib/cinder_backups/ceph index 4b180490d7..c46b90c5b9 100644 --- a/lib/cinder_backups/ceph +++ b/lib/cinder_backups/ceph @@ -32,7 +32,7 @@ function configure_cinder_backup_ceph { if [[ "$REMOTE_CEPH" = "False" && "$CEPH_REPLICAS" -ne 1 ]]; then sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID} fi - sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring + sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "profile rbd" osd "profile rbd pool=${CINDER_BAK_CEPH_POOL}, profile rbd pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring fi From 2d7ff93390ca3c0f47a8b37c5f4912de69ab5b2f Mon Sep 17 00:00:00 2001 From: Jan Jasek Date: Mon, 1 Sep 2025 12:08:48 +0200 Subject: [PATCH 1912/1936] Remove debian-bullseye nodeset Horizon no longer use debian-bullseye nodeset Change-Id: I78094a9dd7e51641dfb9b1a851b46744184df702 Signed-off-by: Jan Jasek --- .zuul.yaml | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 3deab35e87..7bfd3e33b6 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -73,19 +73,6 @@ nodes: - controller -# Note(sean-k-mooney): this is still used by horizon for -# horizon-integration-tests, horizon-integration-pytest and -# horizon-ui-pytest, remove when horizon is updated. -- nodeset: - name: devstack-single-node-debian-bullseye - nodes: - - name: controller - label: debian-bullseye - groups: - - name: tempest - nodes: - - controller - - nodeset: name: devstack-single-node-rockylinux-9 nodes: From f6d8dab0e885b8de8c0f44388d538da7d4f9b7ec Mon Sep 17 00:00:00 2001 From: Saikumar Pulluri Date: Thu, 4 Sep 2025 08:48:42 -0400 Subject: [PATCH 1913/1936] Add service type to keystone authtoken middleware Configuring devstack's configure_keystone_authtoken_middleware to set service_type as an additional option. Needed-By: https://review.opendev.org/c/openstack/barbican/+/958845 Needed-By: https://review.opendev.org/c/openstack/manila/+/955393 Change-Id: I140c8392465965d68f52489b5e5bf3e47ae979be Signed-off-by: Saikumar Pulluri --- lib/keystone | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/keystone b/lib/keystone index 0311e24c67..241909cb9d 100644 --- a/lib/keystone +++ b/lib/keystone @@ -432,6 +432,7 @@ function configure_keystone_authtoken_middleware { local conf_file=$1 local admin_user=$2 local section=${3:-keystone_authtoken} + local service_type=$4 iniset $conf_file $section auth_type password iniset $conf_file $section interface public @@ -444,6 +445,9 @@ function configure_keystone_authtoken_middleware { iniset $conf_file $section cafile $SSL_BUNDLE_FILE iniset $conf_file $section memcached_servers $MEMCACHE_SERVERS + if [[ -n "$service_type" ]]; then + iniset $conf_file $section service_type $service_type + fi } # configure_auth_token_middleware conf_file admin_user IGNORED [section] From 2145b0a0031977ef2809a3eaa9abe6937e4777e6 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 10 Sep 2025 10:24:17 +0200 Subject: [PATCH 1914/1936] Add Rocky Linux Red Quartz singlenode job (10) Change-Id: Iaad9eb034348d559809108d254601d51719ff3e0 Signed-off-by: Michal Nasiadka --- .zuul.yaml | 21 +++++++++++++++++++++ functions-common | 11 ++++------- 2 files changed, 25 insertions(+), 7 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 3deab35e87..0a81dd82c2 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -96,6 +96,16 @@ nodes: - controller +- nodeset: + name: devstack-single-node-rockylinux-10 + nodes: + - name: controller + label: rockylinux-10-8GB + groups: + - name: tempest + nodes: + - controller + - nodeset: name: openstack-two-node-centos-10-stream nodes: @@ -815,6 +825,16 @@ # TODO(ykarel) Remove this when moving to rocky10 PYTHON3_VERSION: 3.11 +- job: + name: devstack-platform-rocky-red-quartz + parent: tempest-full-py3 + description: Rocky Linux Red Quartz platform test + nodeset: devstack-single-node-rockylinux-10 + timeout: 9000 + voting: false + vars: + configure_swap_size: 4096 + - job: name: devstack-platform-ubuntu-jammy parent: tempest-full-py3 @@ -963,6 +983,7 @@ - devstack-platform-centos-9-stream - devstack-platform-debian-bookworm - devstack-platform-rocky-blue-onyx + - devstack-platform-rocky-red-quartz - devstack-platform-ubuntu-noble-ovn-source - devstack-platform-ubuntu-noble-ovs - devstack-platform-ubuntu-jammy diff --git a/functions-common b/functions-common index 85ee294afa..37c1862c28 100644 --- a/functions-common +++ b/functions-common @@ -438,13 +438,10 @@ function _ensure_lsb_release { function GetOSVersion { # CentOS Stream 9 or later and RHEL 9 or later do not provide lsb_release source /etc/os-release - if [[ "${ID}${VERSION}" =~ "centos" ]] || [[ "${ID}${VERSION}" =~ "rhel" ]]; then + if [[ "${ID}" =~ (centos|rocky|rhel) ]]; then os_RELEASE=${VERSION_ID} - os_CODENAME="n/a" + os_CODENAME=$(echo $VERSION | grep -oP '(?<=[(])[^)]*') os_VENDOR=$(echo $NAME | tr -d '[:space:]') - elif [[ "${ID}${VERSION}" =~ "rocky9" ]]; then - os_VENDOR="Rocky" - os_RELEASE=${VERSION_ID} else _ensure_lsb_release @@ -483,7 +480,7 @@ function GetDistro { "$os_VENDOR" =~ (AlmaLinux) || \ "$os_VENDOR" =~ (Scientific) || \ "$os_VENDOR" =~ (OracleServer) || \ - "$os_VENDOR" =~ (Rocky) || \ + "$os_VENDOR" =~ (RockyLinux) || \ "$os_VENDOR" =~ (Virtuozzo) ]]; then MAJOR_VERSION=$(echo $os_RELEASE | cut -d. -f1) DISTRO="rhel${MAJOR_VERSION}" @@ -544,7 +541,7 @@ function is_fedora { [ "$os_VENDOR" = "RedHatEnterpriseServer" ] || \ [ "$os_VENDOR" = "RedHatEnterprise" ] || \ [ "$os_VENDOR" = "RedHatEnterpriseLinux" ] || \ - [ "$os_VENDOR" = "Rocky" ] || \ + [ "$os_VENDOR" = "RockyLinux" ] || \ [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "CentOSStream" ] || \ [ "$os_VENDOR" = "AlmaLinux" ] || \ [ "$os_VENDOR" = "OracleServer" ] || [ "$os_VENDOR" = "Virtuozzo" ] From 1aa22aa6d4b8dac710b50b6aabd3ce9ce8280a98 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Tue, 6 May 2025 17:28:37 +0100 Subject: [PATCH 1915/1936] lib/glance: Remove support for deploying in standalone mode Change-Id: Ia70accd3e04bf9bea7fa50c18541fc71cff75f5f Signed-off-by: Stephen Finucane Depends-on: https://review.opendev.org/c/openstack/glance/+/933614 --- lib/glance | 52 ++++++++++++---------------------------------------- 1 file changed, 12 insertions(+), 40 deletions(-) diff --git a/lib/glance b/lib/glance index b596b53271..4dade5142d 100644 --- a/lib/glance +++ b/lib/glance @@ -83,13 +83,6 @@ GLANCE_DEFAULT_BACKEND=${GLANCE_DEFAULT_BACKEND:-fast} GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache} GLANCE_CACHE_DRIVER=${GLANCE_CACHE_DRIVER:-centralized_db} -# Full Glance functionality requires running in standalone mode. If we are -# not in uwsgi mode, then we are standalone, otherwise allow separate control. -if [[ "$WSGI_MODE" != "uwsgi" ]]; then - GLANCE_STANDALONE=True -fi -GLANCE_STANDALONE=${GLANCE_STANDALONE:-False} - # File path for each store specified in GLANCE_MULTIPLE_FILE_STORES, the store # identifier will be appended to this path at runtime. If GLANCE_MULTIPLE_FILE_STORES # has fast,cheap specified then filepath will be generated like $DATA_DIR/glance/fast @@ -139,14 +132,7 @@ GLANCE_UWSGI_CONF=$GLANCE_CONF_DIR/glance-uwsgi.ini # Glance default limit for Devstack GLANCE_LIMIT_IMAGE_SIZE_TOTAL=${GLANCE_LIMIT_IMAGE_SIZE_TOTAL:-2000} -# If wsgi mode is uwsgi run glance under uwsgi, else default to eventlet -# TODO(mtreinish): Remove the eventlet path here and in all the similar -# conditionals below after the Pike release -if [[ "$WSGI_MODE" == "uwsgi" ]]; then - GLANCE_URL="$GLANCE_SERVICE_PROTOCOL://$GLANCE_SERVICE_HOST/image" -else - GLANCE_URL="$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" -fi +GLANCE_URL="$GLANCE_SERVICE_PROTOCOL://$GLANCE_SERVICE_HOST/image" # Functions # --------- @@ -451,12 +437,11 @@ function configure_glance { iniset $GLANCE_CACHE_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/ # Set default configuration options for the glance-image-import - iniset $GLANCE_IMAGE_IMPORT_CONF image_import_opts image_import_plugins [] + iniset $GLANCE_IMAGE_IMPORT_CONF image_import_opts image_import_plugins "[]" iniset $GLANCE_IMAGE_IMPORT_CONF inject_metadata_properties ignore_user_roles admin iniset $GLANCE_IMAGE_IMPORT_CONF inject_metadata_properties inject cp -p $GLANCE_DIR/etc/schema-image.json $GLANCE_SCHEMA_JSON - cp -p $GLANCE_DIR/etc/metadefs/*.json $GLANCE_METADEF_DIR if is_service_enabled tls-proxy; then @@ -467,19 +452,15 @@ function configure_glance { iniset $GLANCE_CACHE_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/%(project_id)s" fi - if [[ "$GLANCE_STANDALONE" == False ]]; then - write_local_uwsgi_http_config "$GLANCE_UWSGI_CONF" "$GLANCE_UWSGI" "/image" "glance-api" - # Grab our uwsgi listen address and use that to fill out our - # worker_self_reference_url config - iniset $GLANCE_API_CONF DEFAULT worker_self_reference_url \ - $(awk '-F= ' '/^http-socket/ { print "http://"$2}' $GLANCE_UWSGI_CONF) - else - write_local_proxy_http_config glance "http://$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT_INT" "/image" - iniset $GLANCE_API_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS - iniset $GLANCE_API_CONF DEFAULT bind_port $GLANCE_SERVICE_PORT_INT - iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS" - iniset $GLANCE_API_CONF DEFAULT worker_self_reference_url $GLANCE_URL - fi + write_local_uwsgi_http_config "$GLANCE_UWSGI_CONF" "$GLANCE_UWSGI" "/image" "glance-api" + + # Grab our uwsgi listen address and use that to fill out our + # worker_self_reference_url config + iniset $GLANCE_API_CONF DEFAULT worker_self_reference_url $(awk '-F= ' '/^http-socket/ { print "http://"$2}' $GLANCE_UWSGI_CONF) + + # Configure the Python binary used for "import" plugins. If unset, these + # will attempt the uwsgi binary instead. + iniset $GLANCE_API_CONF wsgi python_interpreter $PYTHON if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then iniset $GLANCE_API_CONF oslo_policy enforce_scope true @@ -664,17 +645,8 @@ function start_glance_remote_clone { # start_glance() - Start running processes function start_glance { local service_protocol=$GLANCE_SERVICE_PROTOCOL - if is_service_enabled tls-proxy; then - if [[ "$WSGI_MODE" != "uwsgi" ]]; then - start_tls_proxy glance-service '*' $GLANCE_SERVICE_PORT $GLANCE_SERVICE_HOST $GLANCE_SERVICE_PORT_INT - fi - fi - if [[ "$GLANCE_STANDALONE" == False ]]; then - run_process g-api "$(which uwsgi) --procname-prefix glance-api --ini $GLANCE_UWSGI_CONF" - else - run_process g-api "$GLANCE_BIN_DIR/glance-api --config-dir=$GLANCE_CONF_DIR" - fi + run_process g-api "$(which uwsgi) --procname-prefix glance-api --ini $GLANCE_UWSGI_CONF" if is_service_enabled g-api-r; then echo "Starting the g-api-r clone service..." From 2df0d7ab8230a0cc7ca1c5a90c254717c9ff2dc6 Mon Sep 17 00:00:00 2001 From: Ghanshyam Maan Date: Thu, 28 Aug 2025 03:39:25 +0000 Subject: [PATCH 1916/1936] Configure glance user in cinder conf Cinder talk to glance for new image location APIs which are default to 'service' role[1]. That needs cinder to have the glance service user configured. We need to assign admin role also to service user so that it can access images from glance. Needed-By: https://review.opendev.org/c/openstack/glance/+/958715 [1] https://review.opendev.org/c/openstack/glance/+/958715 Change-Id: I52d118672c053b9d6890bc6289bf12dcf5d7dce3 Signed-off-by: Ghanshyam Maan --- lib/cinder | 3 +++ lib/glance | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/cinder b/lib/cinder index eb8a63dbfc..aef6854062 100644 --- a/lib/cinder +++ b/lib/cinder @@ -419,6 +419,9 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT glance_ca_certificates_file $SSL_BUNDLE_FILE fi + # Set glance credentials (used for location APIs) + configure_keystone_authtoken_middleware $CINDER_CONF glance glance + # Set nova credentials (used for os-assisted-snapshots) configure_keystone_authtoken_middleware $CINDER_CONF nova nova iniset $CINDER_CONF nova region_name "$REGION_NAME" diff --git a/lib/glance b/lib/glance index b596b53271..31a9ae9745 100644 --- a/lib/glance +++ b/lib/glance @@ -503,7 +503,9 @@ function configure_glance { function create_glance_accounts { if is_service_enabled g-api; then - create_service_user "glance" + # When cinder talk to glance service APIs user needs service + # role for RBAC checks and admin role for cinder to access images. + create_service_user "glance" "admin" # required for swift access if is_service_enabled s-proxy; then From f09da620cb5973f9a77233a700b06612462678e5 Mon Sep 17 00:00:00 2001 From: Yatin Karel Date: Mon, 15 Sep 2025 17:16:50 +0530 Subject: [PATCH 1917/1936] Restore os_CODENAME for old rhel distros With [1] fips based jobs which runs on 9-stream started to fail as os_CODENAME not applicable on those. This patch adds fallback as before. Moving fips jobs to 10-stream/rocky requires some more work due to [2] [1] https://review.opendev.org/c/openstack/devstack/+/960342 [2] https://fedoraproject.org/wiki/Changes/RemoveFipsModeSetup Change-Id: I6d7ba4f5698e9b4837b29662b0b7f883b3c5de35 Signed-off-by: Yatin Karel --- functions-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions-common b/functions-common index 37c1862c28..0ae51e3df1 100644 --- a/functions-common +++ b/functions-common @@ -440,7 +440,7 @@ function GetOSVersion { source /etc/os-release if [[ "${ID}" =~ (centos|rocky|rhel) ]]; then os_RELEASE=${VERSION_ID} - os_CODENAME=$(echo $VERSION | grep -oP '(?<=[(])[^)]*') + os_CODENAME=$(echo $VERSION | grep -oP '(?<=[(])[^)]*' || echo 'n/a') os_VENDOR=$(echo $NAME | tr -d '[:space:]') else _ensure_lsb_release From f72801c1081e9f63bb1a98a66950d65ee8cf6ecb Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Mon, 15 Sep 2025 12:54:09 +0100 Subject: [PATCH 1918/1936] lib/neutron: Prepare for move of api-paste, rootwrap conf Change-Id: I70ba357f9af668fb7a7cb737d13fe24e572eb0ff Signed-off-by: Stephen Finucane --- lib/neutron | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index ea2d8e728a..dec15fb782 100644 --- a/lib/neutron +++ b/lib/neutron @@ -996,7 +996,13 @@ function _configure_neutron_plugin_agent { # It is called when q-svc is enabled. function _configure_neutron_service { Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini - cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE + if test -r $NEUTRON_DIR/etc/neutron/api-paste.ini; then + cp $NEUTRON_DIR/etc/neutron/api-paste.ini $Q_API_PASTE_FILE + else + # TODO(stephenfin): Remove this branch once [1] merges + # [1] https://review.opendev.org/c/openstack/neutron/+/961130 + cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE + fi # Update either configuration file with plugin iniset $NEUTRON_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS @@ -1076,6 +1082,8 @@ function _neutron_setup_rootwrap { if test -r $NEUTRON_DIR/etc/neutron/rootwrap.conf; then sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/neutron/rootwrap.conf $Q_RR_CONF_FILE else + # TODO(stephenfin): Remove this branch once [1] merges + # [1] https://review.opendev.org/c/openstack/neutron/+/961130 sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE fi sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE From e4cb49c690677b437d817ad8c736edcb96b674e3 Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Sun, 21 Sep 2025 03:12:55 +0900 Subject: [PATCH 1919/1936] Remove option for apache < 2.4 apache 2.4 was released long time ago and is now available in recent operating systems. Change-Id: If367869e8490159f31c7d6c0207e182dd7ecb164 Signed-off-by: Takashi Kajinami --- files/apache-cinder-api.template | 12 ++---------- files/apache-horizon.template | 14 ++------------ files/apache-nova-api.template | 4 +--- files/apache-nova-metadata.template | 4 +--- 4 files changed, 6 insertions(+), 28 deletions(-) diff --git a/files/apache-cinder-api.template b/files/apache-cinder-api.template index e1246f11b6..e401803abc 100644 --- a/files/apache-cinder-api.template +++ b/files/apache-cinder-api.template @@ -6,21 +6,13 @@ Listen %PUBLICPORT% WSGIScriptAlias / %CINDER_BIN_DIR%/cinder-wsgi WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%{cu}t %M" - + ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/%APACHE_NAME%/c-api.log %SSLENGINE% %SSLCERTFILE% %SSLKEYFILE% - = 2.4> - Require all granted - - - Order allow,deny - Allow from all - + Require all granted diff --git a/files/apache-horizon.template b/files/apache-horizon.template index da7a7d26c3..c6c55ecf27 100644 --- a/files/apache-horizon.template +++ b/files/apache-horizon.template @@ -21,19 +21,9 @@ Options Indexes FollowSymLinks MultiViews AllowOverride None - # Apache 2.4 uses mod_authz_host for access control now (instead of - # "Allow") - - Order allow,deny - Allow from all - - = 2.4> - Require all granted - + Require all granted - = 2.4> - ErrorLogFormat "%{cu}t %M" - + ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/%APACHE_NAME%/horizon_error.log LogLevel warn CustomLog /var/log/%APACHE_NAME%/horizon_access.log combined diff --git a/files/apache-nova-api.template b/files/apache-nova-api.template index bcf406edf3..66fcf73cf2 100644 --- a/files/apache-nova-api.template +++ b/files/apache-nova-api.template @@ -6,9 +6,7 @@ Listen %PUBLICPORT% WSGIScriptAlias / %PUBLICWSGI% WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%M" - + ErrorLogFormat "%M" ErrorLog /var/log/%APACHE_NAME%/nova-api.log %SSLENGINE% %SSLCERTFILE% diff --git a/files/apache-nova-metadata.template b/files/apache-nova-metadata.template index 6231c1ced8..64be03166e 100644 --- a/files/apache-nova-metadata.template +++ b/files/apache-nova-metadata.template @@ -6,9 +6,7 @@ Listen %PUBLICPORT% WSGIScriptAlias / %PUBLICWSGI% WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%M" - + ErrorLogFormat "%M" ErrorLog /var/log/%APACHE_NAME%/nova-metadata.log %SSLENGINE% %SSLCERTFILE% From a3e37c86cab4ec43a4a6d1c1386abf12bc034db7 Mon Sep 17 00:00:00 2001 From: Ghanshyam Maan Date: Mon, 22 Sep 2025 06:04:42 +0000 Subject: [PATCH 1920/1936] Update DEVSTACK_SERIES to 2026.1 stable/2025.2 branch has been created now and current master is for 2026.1 Change-Id: Ibec78664417207ca7784c548ab15c1c6ef0e0663 Signed-off-by: Ghanshyam Maan --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 1e306cc685..93f8b1cd6d 100644 --- a/stackrc +++ b/stackrc @@ -252,7 +252,7 @@ REQUIREMENTS_DIR=${REQUIREMENTS_DIR:-$DEST/requirements} # Setting the variable to 'ALL' will activate the download for all # libraries. -DEVSTACK_SERIES="2025.2" +DEVSTACK_SERIES="2026.1" ############## # From 093bc57518a72b63a59389df63491d476069fc5c Mon Sep 17 00:00:00 2001 From: Ghanshyam Maan Date: Mon, 22 Sep 2025 06:56:25 +0000 Subject: [PATCH 1921/1936] Cap stable/2025.2 network, swift, volume api_extensions for tempest This commit cap the network, volume and swift extensions on Tempest's config option api_extensions. In 2025.2, no new extension in neutron. and no new extensions in swift and cinder. Change-Id: I1f9a2f53fa1e513f58d7dd8706d57f2481ab3d47 Signed-off-by: Ghanshyam Maan --- lib/tempest | 61 ++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 58 insertions(+), 3 deletions(-) diff --git a/lib/tempest b/lib/tempest index 1ebe9c5f1f..892e7fd72a 100644 --- a/lib/tempest +++ b/lib/tempest @@ -805,7 +805,48 @@ function configure_tempest { DISABLE_NETWORK_API_EXTENSIONS+=", l3_agent_scheduler" fi - local network_api_extensions=${NETWORK_API_EXTENSIONS:-"all"} + DEFAULT_NET_EXT="address-scope,agent,allowed-address-pairs,auto-allocated-topology" + DEFAULT_NET_EXT+=",availability_zone,binding,default-subnetpools,dhcp_agent_scheduler" + DEFAULT_NET_EXT+=",dvr,ext-gw-mode,external-net,extra_dhcp_opt,extraroute,flavors" + DEFAULT_NET_EXT+=",l3-flavors,l3-ha,l3_agent_scheduler,multi-provider,net-mtu" + DEFAULT_NET_EXT+=",network-ip-availability,network_availability_zone,pagination" + DEFAULT_NET_EXT+=",port-security,project-id,provider,quotas,quota_details,rbac-policies" + DEFAULT_NET_EXT+=",revision-if-match,router,router_availability_zone,security-group,service-type,sorting" + DEFAULT_NET_EXT+=",standard-attr-description,standard-attr-revisions,standard-attr-tag,standard-attr-timestamp" + DEFAULT_NET_EXT+=",subnet-service-types,subnet_allocation,net-mtu-writable,ip-substring-filtering" + DEFAULT_NET_EXT+=",availability_zone_filter,filter-validation,empty-string-filtering,port-mac-address-regenerate" + DEFAULT_NET_EXT+=",port-security-groups-filtering,fip-port-details,binding-extended" + DEFAULT_NET_EXT+=",subnet_onboard,l3-port-ip-change-not-allowed,agent-resources-synced" + DEFAULT_NET_EXT+=",floatingip-pools,rbac-security-groups,subnetpool-prefix-ops,router-admin-state-down-before-update" + DEFAULT_NET_EXT+=",rbac-subnetpool,tag-ports-during-bulk-creation,stateful-security-group,address-group,extraroute-atomic" + DEFAULT_NET_EXT+=",port-numa-affinity-policy,rbac-address-scope,security-groups-remote-address-group,trunk,trunk-details" + DEFAULT_NET_EXT+=",rbac-address-group,port-device-profile" + DEFAULT_NET_EXT+=",multiple-external-gateways,qos-pps-minimum,l3-ext-ndp-proxy,rbac-bgpvpn" + DEFAULT_NET_EXT+=",qos-pps,ecmp_routes,bgp,floating-ip-port-forwarding-port-ranges" + # New in Yoga + DEFAULT_NET_EXT+=",security-groups-shared-filtering,security-groups-normalized-cidr,quota-check-limit" + DEFAULT_NET_EXT+=",port-resource-request-groups" + # New in Zed + DEFAULT_NET_EXT+=",port-mac-override,floating-ip-port-forwarding-detail,network-cascade-delete" + # New in 2023.1 + DEFAULT_NET_EXT+=",port-hints,floating-ip-distributed" + # New in 2023.2 + DEFAULT_NET_EXT+=",port-hint-ovs-tx-steering,enable-default-route-bfd" + DEFAULT_NET_EXT+=",enable-default-route-ecmp,standard-attr-fwaas-v2" + DEFAULT_NET_EXT+=",allowed-address-pairs-atomic,network_ha" + DEFAULT_NET_EXT+=",security-groups-rules-belongs-to-default-sg" + DEFAULT_NET_EXT+=",port-hardware-offload-type" + # New in 2024.1 + DEFAULT_NET_EXT+=",vpn-aes-ccm-gcm,tap-mirror,subnet-external-network" + DEFAULT_NET_EXT+=",port-numa-affinity-policy-socket" + # New in 2024.2 + DEFAULT_NET_EXT+=",tag-creation,quota-check-limit-default,port-trusted-vif" + DEFAULT_NET_EXT+=",uplink-status-propagation-updatable" + # New in 2025.1 + DEFAULT_NET_EXT+=",qing,vpn-aes-ctr" + # New in 2025.2: None + local network_api_extensions=${NETWORK_API_EXTENSIONS:-$DEFAULT_NET_EXT} + if [[ ! -z "$DISABLE_NETWORK_API_EXTENSIONS" ]]; then # Enabled extensions are either the ones explicitly specified or those available on the API endpoint network_api_extensions=${NETWORK_API_EXTENSIONS:-$(iniget $tmp_cfg_file network-feature-enabled api_extensions | tr -d " ")} @@ -817,7 +858,10 @@ function configure_tempest { fi iniset $TEMPEST_CONFIG network-feature-enabled api_extensions $network_api_extensions # Swift API Extensions - local object_storage_api_extensions=${OBJECT_STORAGE_API_EXTENSIONS:-"all"} + DEFAULT_SWIFT_OPT="account_quotas,bulk_delete,bulk_upload,container_quotas" + DEFAULT_SWIFT_OPT+=",container_sync,crossdomain,formpost,ratelimit,slo" + DEFAULT_SWIFT_OPT+=",staticweb,tempauth,tempurl,versioned_writes" + local object_storage_api_extensions=${OBJECT_STORAGE_API_EXTENSIONS:-$DEFAULT_SWIFT_OPT} if [[ ! -z "$DISABLE_OBJECT_STORAGE_API_EXTENSIONS" ]]; then # Enabled extensions are either the ones explicitly specified or those available on the API endpoint object_storage_api_extensions=${OBJECT_STORAGE_API_EXTENSIONS:-$(iniget $tmp_cfg_file object-storage-feature-enabled discoverable_apis | tr -d " ")} @@ -826,7 +870,18 @@ function configure_tempest { fi iniset $TEMPEST_CONFIG object-storage-feature-enabled discoverable_apis $object_storage_api_extensions # Cinder API Extensions - local volume_api_extensions=${VOLUME_API_EXTENSIONS:-"all"} + DEFAULT_VOL_EXT="OS-SCH-HNT,backups,capabilities,cgsnapshots,consistencygroups" + DEFAULT_VOL_EXT+=",encryption,os-admin-actions,os-availability-zone" + DEFAULT_VOL_EXT+=",os-extended-services,os-extended-snapshot-attributes" + DEFAULT_VOL_EXT+=",os-hosts,os-quota-class-sets,os-quota-sets" + DEFAULT_VOL_EXT+=",os-services,os-snapshot-actions,os-snapshot-manage" + DEFAULT_VOL_EXT+=",os-snapshot-unmanage,os-types-extra-specs,os-types-manage" + DEFAULT_VOL_EXT+=",os-used-limits,os-vol-host-attr,os-vol-image-meta" + DEFAULT_VOL_EXT+=",os-vol-mig-status-attr,os-vol-tenant-attr,os-volume-actions" + DEFAULT_VOL_EXT+=",os-volume-encryption-metadata,os-volume-manage" + DEFAULT_VOL_EXT+=",os-volume-transfer,os-volume-type-access" + DEFAULT_VOL_EXT+=",os-volume-unmanage,qos-specs,scheduler-stats" + local volume_api_extensions=${VOLUME_API_EXTENSIONS:-$DEFAULT_VOL_EXT} if [[ ! -z "$DISABLE_VOLUME_API_EXTENSIONS" ]]; then # Enabled extensions are either the ones explicitly specified or those available on the API endpoint volume_api_extensions=${VOLUME_API_EXTENSIONS:-$(iniget $tmp_cfg_file volume-feature-enabled api_extensions | tr -d " ")} From a8fc640b674a744e887e641ca9addc85ac46e480 Mon Sep 17 00:00:00 2001 From: Fernando Ferraz Date: Fri, 19 Sep 2025 12:10:50 -0300 Subject: [PATCH 1922/1936] Avoid timeout in capture-system-logs due to df command The `df` command can stall indefinitely on stale NFS mounts, causing the playbook to time out. This leads to the entire job failing with POST_FAILURE status, discarding controller logs and impacting troubleshooting. This patch changes `capture-system-logs` to run `df` with a 60s timeout to prevent hangs from stale NFS mounts. If 'df' times out, the mount output may help debug which NFS share is unresponsive. Change-Id: Ife3945802c93bd77d60b60e433ea09aade38a522 Signed-off-by: Fernando Ferraz --- .zuul.yaml | 1 + roles/capture-system-logs/tasks/main.yaml | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 3b0e5dbde0..d81da3d8f5 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -446,6 +446,7 @@ /etc/sudoers.d: logs '{{ stage_dir }}/iptables.txt': logs '{{ stage_dir }}/df.txt': logs + '{{ stage_dir }}/mount.txt': logs '{{ stage_dir }}/pip2-freeze.txt': logs '{{ stage_dir }}/pip3-freeze.txt': logs '{{ stage_dir }}/dpkg-l.txt': logs diff --git a/roles/capture-system-logs/tasks/main.yaml b/roles/capture-system-logs/tasks/main.yaml index 77b5ec5098..4b5ec4836b 100644 --- a/roles/capture-system-logs/tasks/main.yaml +++ b/roles/capture-system-logs/tasks/main.yaml @@ -4,7 +4,13 @@ executable: /bin/bash cmd: | sudo iptables-save > {{ stage_dir }}/iptables.txt - df -h > {{ stage_dir }}/df.txt + + # NOTE(sfernand): Run 'df' with a 60s timeout to prevent hangs from + # stale NFS mounts. + timeout -s 9 60s df -h > {{ stage_dir }}/df.txt || true + # If 'df' times out, the mount output helps debug which NFS share + # is unresponsive. + mount > {{ stage_dir }}/mount.txt for py_ver in 2 3; do if [[ `which python${py_ver}` ]]; then From f8b3c902bef911938e03d4fc4f13fc9851a843f5 Mon Sep 17 00:00:00 2001 From: Balazs Gibizer Date: Thu, 25 Sep 2025 19:39:26 +0200 Subject: [PATCH 1923/1936] [nova-fake-virt]Restore compute restartability Since the stable-compute-uuid nova feature the compute nodes created via VIRT_DRIVER=fake cannot be restarted as these computes are not writing the compute_id file to disk at first startup. Therefore any subsequent restart will fail as nova-compute will refuse to start due to the missing compute_id file but having a service already in the DB. After this patch fake-virt uses a variant of the fake virt driver that actually writes compute_id file to disk. To allow multiple fake computes running on the same machine each compute now has a separate state_path created so each can store a separate compute_id file. Signed-off-by: Balazs Gibizer Change-Id: I813cab3c89554d1e319b2b3e5c3affd8ec5d887e --- lib/nova | 9 +++++++++ lib/nova_plugins/hypervisor-fake | 2 +- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 2357d87ee3..b289ec6d6d 100644 --- a/lib/nova +++ b/lib/nova @@ -1085,6 +1085,15 @@ function start_nova_compute { # gets its own configuration and own log file. local fake_conf="${NOVA_FAKE_CONF}-${i}" iniset $fake_conf DEFAULT host "${HOSTNAME}${i}" + # Ensure that each fake compute has its own state path so that it + # can have its own compute_id file + local state_path + state_path="$NOVA_STATE_PATH/${HOSTNAME}${i}" + COMPUTE_ID=$(uuidgen) + sudo mkdir -p "$state_path" + iniset $fake_conf DEFAULT state_path "$state_path" + # use the generated UUID as the stable compute node UUID + echo "$COMPUTE_ID" | sudo tee "$state_path/compute_id" run_process "n-cpu-${i}" "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF --config-file $fake_conf" done else diff --git a/lib/nova_plugins/hypervisor-fake b/lib/nova_plugins/hypervisor-fake index 87ee49fa4b..39cb45ca67 100644 --- a/lib/nova_plugins/hypervisor-fake +++ b/lib/nova_plugins/hypervisor-fake @@ -36,7 +36,7 @@ function cleanup_nova_hypervisor { # configure_nova_hypervisor - Set config files, create data dirs, etc function configure_nova_hypervisor { - iniset $NOVA_CONF DEFAULT compute_driver "fake.FakeDriver" + iniset $NOVA_CONF DEFAULT compute_driver "fake.FakeDriverWithoutFakeNodes" # Disable arbitrary limits iniset $NOVA_CONF quota driver nova.quota.NoopQuotaDriver } From 581d0a1d607538ffea0f41548fae25b4c6529cff Mon Sep 17 00:00:00 2001 From: Yatin Karel Date: Mon, 29 Sep 2025 17:03:48 +0530 Subject: [PATCH 1924/1936] [subnode setup] Fail the playbook when any node setup fails Currently on the multinode jobs, job continue to run even if devstack setup fails on any of the subnode and then fails later when required conditions are not met. This patch changes it to fail if any of the node setup fails using any_errors_fatal: true. Change-Id: I2acd8a1fe0802ee1880df2ef794f8e7d7478b67b Signed-off-by: Yatin Karel --- roles/orchestrate-devstack/tasks/main.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/orchestrate-devstack/tasks/main.yaml b/roles/orchestrate-devstack/tasks/main.yaml index 2b8ae01a62..b8ee7e35a7 100644 --- a/roles/orchestrate-devstack/tasks/main.yaml +++ b/roles/orchestrate-devstack/tasks/main.yaml @@ -4,6 +4,7 @@ when: inventory_hostname == 'controller' - name: Setup devstack on sub-nodes + any_errors_fatal: true block: - name: Distribute the build sshkey for the user "stack" From b20eaa6e142f2716eb1b85ed8eabd5bd71515a02 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Fri, 3 Oct 2025 09:40:46 +0200 Subject: [PATCH 1925/1936] Add AlmaLinux 10 platform job AlmaLinux 10 has been introduced in OpenDev to increase hardware coverage - it supports x86-64-v2 (compared to v3 required by CentOS Stream 10 and Rocky Linux 10) Change-Id: I5c91f2166bfce51cadef9c22a22a6031223604c7 Signed-off-by: Michal Nasiadka --- .zuul.yaml | 33 ++++++++++++++++++++++++++++----- functions-common | 2 +- 2 files changed, 29 insertions(+), 6 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 3b0e5dbde0..927945d8f0 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -39,10 +39,10 @@ - controller - nodeset: - name: devstack-single-node-centos-10-stream + name: devstack-single-node-almalinux-10 nodes: - name: controller - label: centos-10-stream-8GB + label: almalinux-10-8GB groups: - name: tempest nodes: @@ -58,10 +58,15 @@ nodes: - controller -# TODO(frickler): drop this dummy nodeset once all references have been removed - nodeset: - name: devstack-single-node-opensuse-15 - nodes: [] + name: devstack-single-node-centos-10-stream + nodes: + - name: controller + label: centos-10-stream-8GB + groups: + - name: tempest + nodes: + - controller - nodeset: name: devstack-single-node-debian-bookworm @@ -73,6 +78,11 @@ nodes: - controller +# TODO(frickler): drop this dummy nodeset once all references have been removed +- nodeset: + name: devstack-single-node-opensuse-15 + nodes: [] + - nodeset: name: devstack-single-node-rockylinux-9 nodes: @@ -766,6 +776,16 @@ # we often have to rush things through devstack to stabilise the gate, # and these platforms don't have the round-the-clock support to avoid # becoming blockers in that situation. +- job: + name: devstack-platform-almalinux-purple-lion + parent: tempest-full-py3 + description: AlmaLinux 10 platform test + nodeset: devstack-single-node-almalinux-10 + timeout: 9000 + voting: false + vars: + configure_swap_size: 4096 + - job: name: devstack-platform-centos-10-stream parent: tempest-full-py3 @@ -966,6 +986,7 @@ - devstack - devstack-ipv6 - devstack-enforce-scope + - devstack-platform-almalinux-purple-lion - devstack-platform-centos-10-stream - devstack-platform-centos-9-stream - devstack-platform-debian-bookworm @@ -1051,10 +1072,12 @@ - devstack-no-tls-proxy periodic-weekly: jobs: + - devstack-platform-almalinux-purple-lion - devstack-platform-centos-10-stream - devstack-platform-centos-9-stream - devstack-platform-debian-bookworm - devstack-platform-rocky-blue-onyx + - devstack-platform-rocky-red-quartz - devstack-platform-ubuntu-noble-ovn-source - devstack-platform-ubuntu-noble-ovs - devstack-platform-ubuntu-jammy diff --git a/functions-common b/functions-common index 0ae51e3df1..c2042c4fef 100644 --- a/functions-common +++ b/functions-common @@ -438,7 +438,7 @@ function _ensure_lsb_release { function GetOSVersion { # CentOS Stream 9 or later and RHEL 9 or later do not provide lsb_release source /etc/os-release - if [[ "${ID}" =~ (centos|rocky|rhel) ]]; then + if [[ "${ID}" =~ (almalinux|centos|rocky|rhel) ]]; then os_RELEASE=${VERSION_ID} os_CODENAME=$(echo $VERSION | grep -oP '(?<=[(])[^)]*' || echo 'n/a') os_VENDOR=$(echo $NAME | tr -d '[:space:]') From 8060e12a7bddf16179098e611f7052291eafa1e3 Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Sun, 12 Oct 2025 13:41:57 +0900 Subject: [PATCH 1926/1936] Drop logic for CentOS (Stream) 8 Change-Id: I528652ced464fadd565e16e89a7e0ef826d42611 Signed-off-by: Takashi Kajinami --- files/rpms/n-cpu | 3 +-- files/rpms/nova | 3 +-- lib/neutron_plugins/ovn_agent | 3 --- stack.sh | 31 +------------------------------ tools/fixup_stuff.sh | 13 ------------- 5 files changed, 3 insertions(+), 50 deletions(-) diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu index 5683862ee0..3d50f3a062 100644 --- a/files/rpms/n-cpu +++ b/files/rpms/n-cpu @@ -1,10 +1,9 @@ cryptsetup dosfstools -genisoimage # not:rhel9,rhel10 iscsi-initiator-utils libosinfo lvm2 sg3_utils # Stuff for diablo volumes sysfsutils -xorriso # not:rhel8 +xorriso diff --git a/files/rpms/nova b/files/rpms/nova index 3ed2943c1d..c323224279 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -1,7 +1,7 @@ conntrack-tools curl ebtables -genisoimage # not:rhel9,rhel10 required for config_drive +genisoimage iptables iputils kernel-modules # not:openEuler-22.03 @@ -11,4 +11,3 @@ polkit rabbitmq-server # NOPRIME sqlite sudo -xorriso # not:rhel8 diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index b128fde2b6..e346f4d1cd 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -407,9 +407,6 @@ function install_ovn { sudo mkdir -p $OVS_PREFIX/var/log/ovn sudo chown $(whoami) $OVS_PREFIX/var/log/ovn else - # Load fixup_ovn_centos - source ${TOP_DIR}/tools/fixup_stuff.sh - fixup_ovn_centos install_package $(get_packages openvswitch) install_package $(get_packages ovn) fi diff --git a/stack.sh b/stack.sh index c8be7fa216..67c7a74de9 100755 --- a/stack.sh +++ b/stack.sh @@ -366,36 +366,7 @@ fi # to speed things up SKIP_EPEL_INSTALL=$(trueorfalse False SKIP_EPEL_INSTALL) -if [[ $DISTRO == "rhel8" ]]; then - # If we have /etc/ci/mirror_info.sh assume we're on a OpenStack CI - # node, where EPEL is installed (but disabled) and already - # pointing at our internal mirror - if [[ -f /etc/ci/mirror_info.sh ]]; then - SKIP_EPEL_INSTALL=True - sudo dnf config-manager --set-enabled epel - fi - - # PowerTools repo provides libyaml-devel required by devstack itself and - # EPEL packages assume that the PowerTools repository is enable. - sudo dnf config-manager --set-enabled PowerTools - - # CentOS 8.3 changed the repository name to lower case. - sudo dnf config-manager --set-enabled powertools - - if [[ ${SKIP_EPEL_INSTALL} != True ]]; then - _install_epel - fi - # Along with EPEL, CentOS (and a-likes) require some packages only - # available in RDO repositories (e.g. OVS, or later versions of - # kvm) to run. - _install_rdo - - # NOTE(cgoncalves): workaround RHBZ#1154272 - # dnf fails for non-privileged users when expired_repos.json doesn't exist. - # RHBZ: https://bugzilla.redhat.com/show_bug.cgi?id=1154272 - # Patch: https://github.com/rpm-software-management/dnf/pull/1448 - echo "[]" | sudo tee /var/cache/dnf/expired_repos.json -elif [[ $DISTRO == "rhel9" ]]; then +if [[ $DISTRO == "rhel9" ]]; then # for CentOS Stream 9 repository sudo dnf config-manager --set-enabled crb # for RHEL 9 repository diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index faea44f1e0..fbac5ad2f1 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -89,19 +89,6 @@ EOF if is_package_installed python3-setuptools; then sudo dnf reinstall -y python3-setuptools fi - # Workaround CentOS 8-stream iputils and systemd Bug - # https://bugzilla.redhat.com/show_bug.cgi?id=2037807 - if [[ $os_VENDOR == "CentOSStream" && $os_RELEASE -eq 8 ]]; then - sudo sysctl -w net.ipv4.ping_group_range='0 2147483647' - fi -} - -function fixup_ovn_centos { - if [[ $os_VENDOR != "CentOS" ]]; then - return - fi - # OVN packages are part of this release for CentOS - yum_install centos-release-openstack-victoria } function fixup_ubuntu { From 84652d3cb8022f20cbcfaef747306c95763b8039 Mon Sep 17 00:00:00 2001 From: Balazs Gibizer Date: Wed, 22 Jan 2025 17:00:59 +0100 Subject: [PATCH 1927/1936] Follow up for creating parent dir for config files The commit Ia5fcfe6c63f5cc40b11f7e1f3be244d7897f26f6 wanted to enable config file creation even if its parent dir not exists. But missed that the caller of merge_config_file, merge_config_group already checks for hte missing directory. So creating the missing dir in merge_config_file is too late. This patch moves the dir creation to the caller. Change-Id: Ied0f321f31bf8888dce71cc18749f35dde303390 Signed-off-by: Balazs Gibizer --- inc/meta-config | 13 ++++++++----- tests/test_meta_config.sh | 24 +++++++++++++++++++----- 2 files changed, 27 insertions(+), 10 deletions(-) diff --git a/inc/meta-config b/inc/meta-config index b9d9649e4b..1215bb8307 100644 --- a/inc/meta-config +++ b/inc/meta-config @@ -90,7 +90,6 @@ function merge_config_file { local real_configfile real_configfile=$(eval echo $configfile) if [ ! -f $real_configfile ]; then - mkdir -p $(dirname $real_configfile) || die $LINENO "could not create the directory of $real_configfile ($configfile)" touch $real_configfile || die $LINENO "could not create config file $real_configfile ($configfile)" fi @@ -186,11 +185,15 @@ function merge_config_group { break fi dir=$(dirname $realconfigfile) - if [[ -d $dir ]]; then - merge_config_file $localfile $group $configfile - else - die $LINENO "bogus config file specification $configfile ($configfile=$realconfigfile, $dir is not a directory)" + + test -e $dir && ! test -d $dir && die $LINENO "bogus config file specification $configfile ($configfile=$realconfigfile, $dir exists but it is not a directory)" + + if ! [[ -e $dir ]] ; then + sudo mkdir -p $dir || die $LINENO "could not create the directory of $real_configfile ($configfile)" + sudo chown ${STACK_USER} $dir fi + + merge_config_file $localfile $group $configfile done done } diff --git a/tests/test_meta_config.sh b/tests/test_meta_config.sh index 087aaf468b..30479f245a 100755 --- a/tests/test_meta_config.sh +++ b/tests/test_meta_config.sh @@ -137,6 +137,9 @@ foo=bar [some] random=config +[[test12|run_tests.sh/test.conf]] +foo=bar + [[test-multi-sections|test-multi-sections.conf]] [sec-1] cfg_item1 = abcd @@ -389,13 +392,12 @@ EXPECT_VAL=0 check_result "$VAL" "$EXPECT_VAL" set -e -echo -n "merge_config_group test10 not directory: " +echo -n "merge_config_group test10 create directory: " set +e -# function is expected to fail and exit, running it -# in a subprocess to let this script proceed -(merge_config_group test.conf test10) +STACK_USER=$(id -u -n) +merge_config_group test.conf test10 VAL=$? -EXPECT_VAL=255 +EXPECT_VAL=0 check_result "$VAL" "$EXPECT_VAL" set -e @@ -414,9 +416,21 @@ random = config non = sense' check_result "$VAL" "$EXPECT_VAL" +echo -n "merge_config_group test12 directory as file: " +set +e +# function is expected to fail and exit, running it +# in a subprocess to let this script proceed +(merge_config_group test.conf test12) +VAL=$? +EXPECT_VAL=255 +check_result "$VAL" "$EXPECT_VAL" +set -e + rm -f test.conf test1c.conf test2a.conf \ test-space.conf test-equals.conf test-strip.conf \ test-colon.conf test-env.conf test-multiline.conf \ test-multi-sections.conf test-same.conf rm -rf test-etc +rm -rf does-not-exist-dir + From eee708742af669833d6a85d4c7289accb49c18a2 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 31 Oct 2025 17:09:11 +0000 Subject: [PATCH 1928/1936] Remove use of pkg_resources Change-Id: I5d0697f39bab0a5ff956c3cc41c26ffe601ef6b9 Signed-off-by: Stephen Finucane --- inc/python | 3 +-- tools/fixup_stuff.sh | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/inc/python b/inc/python index cd90ac82c6..3969c1fa82 100644 --- a/inc/python +++ b/inc/python @@ -273,8 +273,7 @@ function use_library_from_git { function lib_installed_from_git { local name=$1 local safe_name - safe_name=$(python -c "from pkg_resources import safe_name; \ - print(safe_name('${name}'))") + safe_name=$(python -c "from packaging import canonicalize_name; print(canonicalize_name('${name}'))") # Note "pip freeze" doesn't always work here, because it tries to # be smart about finding the remote of the git repo the package # was installed from. This doesn't work with zuul which clones diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index faea44f1e0..d8283b2591 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -84,7 +84,7 @@ EOF # python3-setuptools RPM are deleted, it breaks some tools such as semanage # (used in diskimage-builder) that use the -s flag of the python # interpreter, enforcing the use of the packages from /usr/lib. - # Importing setuptools/pkg_resources in a such environment fails. + # Importing setuptools in a such environment fails. # Enforce the package re-installation to fix those applications. if is_package_installed python3-setuptools; then sudo dnf reinstall -y python3-setuptools From 1c86258e6b0ccf95e6119d3a6271afa38e05d0a3 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Thu, 2 Oct 2025 17:48:03 +0100 Subject: [PATCH 1929/1936] Enable response validation in Keystone Unlike other projects, Keystone was previously enabling validation for all responses. This is a bad idea (TM). Quoting from the Keystone docs for the new '[api] response_validation' option added in [1]: 'warn' is the current recommendation for production environments. If you find it necessary to enable the 'ignore' option, please report the issues you are seeing to the Keystone team so we can improve our schemas. 'error' should not be used in a production environment. This is because schema validation happens *after* the response body has been generated, meaning any side effects will still happen and the call may be non-idempotent despite the user receiving a HTTP 500 error. DevStack is not used for production environments and is instead the test harness used for the bulk of our integration testing. We *do* want failed response validation to result in an error, since it will highlight bugs in our schemas. Thus, we should override the default value for this option. [1] https://review.opendev.org/c/openstack/keystone/+/962851 Change-Id: I9fc2c5dce9511165ad2c1ab18db5eb439d357d9b Signed-off-by: Stephen Finucane Related-bug: #2126676 --- lib/keystone | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/keystone b/lib/keystone index 241909cb9d..840103b9f4 100644 --- a/lib/keystone +++ b/lib/keystone @@ -195,6 +195,10 @@ function configure_keystone { iniset $KEYSTONE_CONF cache backend $CACHE_BACKEND iniset $KEYSTONE_CONF cache memcache_servers $MEMCACHE_SERVERS + # Enable errors if response validation fails. We want this enabled in CI + # and development contexts to highlights bugs in our response schemas. + iniset $KEYSTONE_CONF api response_validation error + iniset_rpc_backend keystone $KEYSTONE_CONF oslo_messaging_notifications local service_port=$KEYSTONE_SERVICE_PORT From 3566a15b8eeebb0dfc36e47f87129b108d2980e1 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Thu, 2 Oct 2025 17:46:42 +0100 Subject: [PATCH 1930/1936] Enable response validation in Nova Per inline description Change-Id: I85c959461e4e96b69d252e06fc697a1c61488411 Signed-off-by: Stephen Finucane --- lib/nova | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/nova b/lib/nova index b289ec6d6d..460b4adc85 100644 --- a/lib/nova +++ b/lib/nova @@ -453,6 +453,10 @@ function create_nova_conf { iniset $NOVA_CONF DEFAULT metadata_listen "$NOVA_SERVICE_LISTEN_ADDRESS" iniset $NOVA_CONF DEFAULT shutdown_timeout $NOVA_SHUTDOWN_TIMEOUT + # Enable errors if response validation fails. We want this enabled in CI + # and development contexts to highlights bugs in our response schemas. + iniset $NOVA_CONF api response_validation error + iniset $NOVA_CONF key_manager backend nova.keymgr.conf_key_mgr.ConfKeyManager iniset $NOVA_CONF DEFAULT report_interval $NOVA_SERVICE_REPORT_INTERVAL From a041a7fc66453958ce1d34421330fd27393bbd65 Mon Sep 17 00:00:00 2001 From: Ghanshyam Maan Date: Thu, 6 Nov 2025 18:39:36 +0000 Subject: [PATCH 1931/1936] Re-add the ironic job in gate Ironic job ironic-tempest-bios-ipmi-direct-tinyipa has been renamed to ironic-tempest-bios-ipmi-direct - https://review.opendev.org/c/openstack/ironic/+/950192 Zuul did not give any error or warning when this jobs was ignored to run and it went unnotice until I manually checked the gate jobs. Change-Id: I9b6bb94456ccfd17c2e38cdaa772aef372d169d0 Signed-off-by: Ghanshyam Maan --- .zuul.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 927945d8f0..a751c70a6a 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -998,7 +998,7 @@ - devstack-multinode - devstack-unit-tests - openstack-tox-bashate - - ironic-tempest-bios-ipmi-direct-tinyipa + - ironic-tempest-bios-ipmi-direct - swift-dsvm-functional - grenade: irrelevant-files: *common-irrelevant-files @@ -1031,7 +1031,7 @@ - openstack-tox-bashate - neutron-ovs-grenade-multinode: irrelevant-files: *common-irrelevant-files - - ironic-tempest-bios-ipmi-direct-tinyipa + - ironic-tempest-bios-ipmi-direct - swift-dsvm-functional - grenade: irrelevant-files: *common-irrelevant-files From f8ebb4939c46eed2f69bece7a7d9c8ff31b61353 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Thu, 10 Jul 2025 21:15:21 +0200 Subject: [PATCH 1932/1936] Add support for trixie and platform job Dropping libapache2-mod-proxy-uwsgi package for debuntu, which is no longer needed for Jammy, Bookworm and beyond. libpcre3-dev is removed form the set of packages pre installed for debian systems. This change adds both single and two node nodesets for trixie. Signed-off-by: Dr. Jens Harbott Signed-off-by: Sean Mooney Change-Id: Ib056d2ad64b31657ea8ef9359fed78e589b01b88 --- .zuul.yaml | 51 +++++++++++++++++++++++++++++++++++++++++++++ files/debs/general | 2 -- lib/apache | 2 +- lib/databases/mysql | 14 +++++++++++-- stack.sh | 3 +-- 5 files changed, 65 insertions(+), 7 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 927945d8f0..357c085fe9 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -68,6 +68,16 @@ nodes: - controller +- nodeset: + name: devstack-single-node-debian-trixie + nodes: + - name: controller + label: debian-trixie-8GB + groups: + - name: tempest + nodes: + - controller + - nodeset: name: devstack-single-node-debian-bookworm nodes: @@ -383,6 +393,36 @@ nodes: - compute1 +- nodeset: + name: devstack-two-node-debian-trixie + nodes: + - name: controller + label: debian-trixie-8GB + - name: compute1 + label: debian-trixie-8GB + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + - job: name: devstack-base parent: openstack-multinode-fips @@ -806,6 +846,15 @@ timeout: 9000 voting: false +- job: + name: devstack-platform-debian-trixie + parent: tempest-full-py3 + description: Debian Trixie platform test + nodeset: devstack-single-node-debian-trixie + timeout: 9000 + vars: + configure_swap_size: 4096 + - job: name: devstack-platform-debian-bookworm parent: tempest-full-py3 @@ -990,6 +1039,7 @@ - devstack-platform-centos-10-stream - devstack-platform-centos-9-stream - devstack-platform-debian-bookworm + - devstack-platform-debian-trixie - devstack-platform-rocky-blue-onyx - devstack-platform-rocky-red-quartz - devstack-platform-ubuntu-noble-ovn-source @@ -1021,6 +1071,7 @@ - devstack - devstack-ipv6 - devstack-platform-debian-bookworm + - devstack-platform-debian-trixie - devstack-platform-ubuntu-noble # NOTE(danms): Disabled due to instability, see comment in the job # definition above. diff --git a/files/debs/general b/files/debs/general index 364f3cc6e2..1e63e4f582 100644 --- a/files/debs/general +++ b/files/debs/general @@ -11,10 +11,8 @@ gettext # used for compiling message catalogs git graphviz # needed for docs iputils-ping -libapache2-mod-proxy-uwsgi libffi-dev # for pyOpenSSL libjpeg-dev # Pillow 3.0.0 -libpcre3-dev # for python-pcre libpq-dev # psycopg2 libssl-dev # for pyOpenSSL libsystemd-dev # for systemd-python diff --git a/lib/apache b/lib/apache index c49da711e1..b3379a7cde 100644 --- a/lib/apache +++ b/lib/apache @@ -82,7 +82,7 @@ function install_apache_uwsgi { fi if is_ubuntu; then - local pkg_list="uwsgi uwsgi-plugin-python3 libapache2-mod-proxy-uwsgi" + local pkg_list="uwsgi uwsgi-plugin-python3" install_package ${pkg_list} # NOTE(ianw) 2022-02-03 : Fedora 35 needs to skip this and fall # into the install-from-source because the upstream packages diff --git a/lib/databases/mysql b/lib/databases/mysql index 629014c1d8..a47580ca3d 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -20,7 +20,7 @@ if [[ -z "$MYSQL_SERVICE_NAME" ]]; then MYSQL_SERVICE_NAME=mysql if is_fedora && ! is_oraclelinux; then MYSQL_SERVICE_NAME=mariadb - elif [[ "$DISTRO" =~ bookworm|bullseye ]]; then + elif [[ "$DISTRO" =~ trixie|bookworm|bullseye ]]; then MYSQL_SERVICE_NAME=mariadb fi fi @@ -107,7 +107,7 @@ function configure_database_mysql { # for Ubuntu 22.04+ because the authorization model change in # version 10.4 of mariadb. See # https://mariadb.org/authentication-in-mariadb-10-4/ - if ! (is_ubuntu && [[ ! "$DISTRO" =~ bookworm|bullseye ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]); then + if ! (is_ubuntu && [[ ! "$DISTRO" =~ trixie|bookworm|bullseye ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]); then sudo mysqladmin -u root password $DATABASE_PASSWORD || true fi @@ -119,6 +119,16 @@ function configure_database_mysql { local cmd_args="-uroot -p$DATABASE_PASSWORD -h$SERVICE_LOCAL_HOST " fi + # Workaround for mariadb > 11.6.2, + # see https://bugs.launchpad.net/nova/+bug/2116186/comments/3 + min_db_ver="11.6.2" + db_version=$(sudo mysql ${cmd_args} -e "select version();" -sN | cut -d '-' -f 1) + max_db_ver=$(printf '%s\n' ${min_db_ver} ${db_version} | sort -V | tail -n 1) + if [[ "${min_db_ver}" != "${max_db_ver}" ]]; then + iniset -sudo $my_conf mysqld innodb_snapshot_isolation OFF + restart_service $MYSQL_SERVICE_NAME + fi + # In mariadb e.g. on Ubuntu socket plugin is used for authentication # as root so it works only as sudo. To restore old "mysql like" behaviour, # we need to change auth plugin for root user diff --git a/stack.sh b/stack.sh index a7f6a5e903..5ef98eb7b7 100755 --- a/stack.sh +++ b/stack.sh @@ -1,6 +1,5 @@ #!/usr/bin/env bash - # ``stack.sh`` is an opinionated OpenStack developer installation. It # installs and configures various combinations of **Cinder**, **Glance**, # **Horizon**, **Keystone**, **Nova**, **Neutron**, and **Swift** @@ -230,7 +229,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="bookworm|jammy|noble|rhel9|rhel10" +SUPPORTED_DISTROS="trixie|bookworm|jammy|noble|rhel9|rhel10" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" From 9a0db4f4999e2aa5923ed32452f1d2e41ae4ea55 Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Wed, 16 Jul 2025 11:38:12 +0000 Subject: [PATCH 1933/1936] [Neutron] Add TARGET_ENABLE_OVN_AGENT variable to enable OVN agent The new flag ``TARGET_ENABLE_OVN_AGENT`` will be disabled by default. If enabled: * The OVN agent will be enabled, regardless of not being configured. * The OVN Metadata agent will be disabled, regardless of being configured. This variable will be used, initially, in the grenade jobs. It will be used to test the migration from the OVN Metadata agent to the OVN agent. This variable will be removed in 2026.2, along with the OVN Metadata agent, that is set as deprecated and marked for removal. Related-Bug: #2112313 Signed-off-by: Rodolfo Alonso Hernandez Change-Id: I8f91e1cb8543da489f495b8cf5196e606a0f5eea --- lib/neutron_plugins/ovn_agent | 44 ++++++++++++++++++++++++++++------- 1 file changed, 35 insertions(+), 9 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index b128fde2b6..0e87b473e9 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -99,6 +99,13 @@ OVN_META_DATA_HOST=${OVN_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)} # The OVN agent is configured, by default, with the "metadata" extension. OVN_AGENT_CONF=$NEUTRON_CONF_DIR/plugins/ml2/ovn_agent.ini OVN_AGENT_EXTENSIONS=${OVN_AGENT_EXTENSIONS:-metadata} +# The variable TARGET_ENABLE_OVN_AGENT, if True, overrides the OVN Metadata +# agent service (q-ovn-metadata-agent neutron-ovn-metadata-agent) and the OVN +# agent service (q-ovn-agent neutron-ovn-agent) configuration, always disabling +# the first one (OVN Metadata agent) and enabling the second (OVN agent). +# This variable will be removed in 2026.2, along with the OVN Metadata agent +# removal. +TARGET_ENABLE_OVN_AGENT=$(trueorfalse False TARGET_ENABLE_OVN_AGENT) # If True (default) the node will be considered a gateway node. ENABLE_CHASSIS_AS_GW=$(trueorfalse True ENABLE_CHASSIS_AS_GW) @@ -301,6 +308,21 @@ function create_public_bridge { _configure_public_network_connectivity } +function is_ovn_metadata_agent_enabled { + if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent && [[ "$TARGET_ENABLE_OVN_AGENT" == "False" ]]; then + return 0 + fi + return 1 +} + +function is_ovn_agent_enabled { + if is_service_enabled q-ovn-agent neutron-ovn-agent || [[ "$TARGET_ENABLE_OVN_AGENT" == "True" ]]; then + enable_service q-ovn-agent + return 0 + fi + return 1 + +} # OVN compilation functions # ------------------------- @@ -498,9 +520,9 @@ function configure_ovn_plugin { inicomment /$Q_PLUGIN_CONF_FILE network_log local_output_log_base="$Q_LOG_DRIVER_LOG_BASE" fi - if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then + if is_ovn_metadata_agent_enabled; then populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True - elif is_service_enabled q-ovn-agent neutron-ovn-agent && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]]; then + elif is_ovn_agent_enabled && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]]; then populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True else populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=False @@ -521,9 +543,9 @@ function configure_ovn_plugin { fi if is_service_enabled n-api-meta ; then - if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then + if is_ovn_metadata_agent_enabled; then iniset $NOVA_CONF neutron service_metadata_proxy True - elif is_service_enabled q-ovn-agent neutron-ovn-agent && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]]; then + elif is_ovn_agent_enabled && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]]; then iniset $NOVA_CONF neutron service_metadata_proxy True fi fi @@ -558,10 +580,10 @@ function configure_ovn { # Metadata local sample_file="" local config_file="" - if is_service_enabled q-ovn-agent neutron-ovn-agent && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]] && is_service_enabled ovn-controller; then + if is_ovn_agent_enabled && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]] && is_service_enabled ovn-controller; then sample_file=$NEUTRON_DIR/etc/neutron/plugins/ml2/ovn_agent.ini.sample config_file=$OVN_AGENT_CONF - elif is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent && is_service_enabled ovn-controller; then + elif is_ovn_metadata_agent_enabled && is_service_enabled ovn-controller; then sample_file=$NEUTRON_DIR/etc/neutron_ovn_metadata_agent.ini.sample config_file=$OVN_META_CONF fi @@ -758,13 +780,13 @@ function start_ovn { fi fi - if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then + if is_ovn_metadata_agent_enabled; then run_process q-ovn-metadata-agent "$NEUTRON_OVN_BIN_DIR/$NEUTRON_OVN_METADATA_BINARY --config-file $OVN_META_CONF" # Format logging setup_logging $OVN_META_CONF fi - if is_service_enabled q-ovn-agent neutron-ovn-agent; then + if is_ovn_agent_enabled; then run_process q-ovn-agent "$NEUTRON_OVN_BIN_DIR/$NEUTRON_OVN_AGENT_BINARY --config-file $OVN_AGENT_CONF" # Format logging setup_logging $OVN_AGENT_CONF @@ -786,13 +808,17 @@ function _stop_process { } function stop_ovn { + # NOTE(ralonsoh): this check doesn't use "is_ovn_metadata_agent_enabled", + # instead it relies only in the configured services, disregarding the + # flag "TARGET_ENABLE_OVN_AGENT". It is needed to force the OVN Metadata + # agent stop in case the flag "TARGET_ENABLE_OVN_AGENT" is set. if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then # pkill takes care not to kill itself, but it may kill its parent # sudo unless we use the "ps | grep [f]oo" trick sudo pkill -9 -f "[h]aproxy" || : _stop_process "devstack@q-ovn-metadata-agent.service" fi - if is_service_enabled q-ovn-agent neutron-ovn-agent; then + if is_ovn_agent_enabled; then # pkill takes care not to kill itself, but it may kill its parent # sudo unless we use the "ps | grep [f]oo" trick sudo pkill -9 -f "[h]aproxy" || : From 5d602fef2a7f3ac153c907304a8f43b38c0c1a50 Mon Sep 17 00:00:00 2001 From: Ghanshyam Date: Fri, 21 Nov 2025 20:40:07 +0000 Subject: [PATCH 1934/1936] Revert "Cap stable/2025.2 network, swift, volume api_extensions for tempest" This reverts commit 093bc57518a72b63a59389df63491d476069fc5c. Reason for revert: This is done by mistake on master. It needs to be done on stable/2025.2 https://review.opendev.org/c/openstack/devstack/+/968073 Change-Id: I8bd7b732c181f3abb015a57b4445332614f8c6c9 Signed-off-by: Ghanshyam --- lib/tempest | 61 +++-------------------------------------------------- 1 file changed, 3 insertions(+), 58 deletions(-) diff --git a/lib/tempest b/lib/tempest index 892e7fd72a..1ebe9c5f1f 100644 --- a/lib/tempest +++ b/lib/tempest @@ -805,48 +805,7 @@ function configure_tempest { DISABLE_NETWORK_API_EXTENSIONS+=", l3_agent_scheduler" fi - DEFAULT_NET_EXT="address-scope,agent,allowed-address-pairs,auto-allocated-topology" - DEFAULT_NET_EXT+=",availability_zone,binding,default-subnetpools,dhcp_agent_scheduler" - DEFAULT_NET_EXT+=",dvr,ext-gw-mode,external-net,extra_dhcp_opt,extraroute,flavors" - DEFAULT_NET_EXT+=",l3-flavors,l3-ha,l3_agent_scheduler,multi-provider,net-mtu" - DEFAULT_NET_EXT+=",network-ip-availability,network_availability_zone,pagination" - DEFAULT_NET_EXT+=",port-security,project-id,provider,quotas,quota_details,rbac-policies" - DEFAULT_NET_EXT+=",revision-if-match,router,router_availability_zone,security-group,service-type,sorting" - DEFAULT_NET_EXT+=",standard-attr-description,standard-attr-revisions,standard-attr-tag,standard-attr-timestamp" - DEFAULT_NET_EXT+=",subnet-service-types,subnet_allocation,net-mtu-writable,ip-substring-filtering" - DEFAULT_NET_EXT+=",availability_zone_filter,filter-validation,empty-string-filtering,port-mac-address-regenerate" - DEFAULT_NET_EXT+=",port-security-groups-filtering,fip-port-details,binding-extended" - DEFAULT_NET_EXT+=",subnet_onboard,l3-port-ip-change-not-allowed,agent-resources-synced" - DEFAULT_NET_EXT+=",floatingip-pools,rbac-security-groups,subnetpool-prefix-ops,router-admin-state-down-before-update" - DEFAULT_NET_EXT+=",rbac-subnetpool,tag-ports-during-bulk-creation,stateful-security-group,address-group,extraroute-atomic" - DEFAULT_NET_EXT+=",port-numa-affinity-policy,rbac-address-scope,security-groups-remote-address-group,trunk,trunk-details" - DEFAULT_NET_EXT+=",rbac-address-group,port-device-profile" - DEFAULT_NET_EXT+=",multiple-external-gateways,qos-pps-minimum,l3-ext-ndp-proxy,rbac-bgpvpn" - DEFAULT_NET_EXT+=",qos-pps,ecmp_routes,bgp,floating-ip-port-forwarding-port-ranges" - # New in Yoga - DEFAULT_NET_EXT+=",security-groups-shared-filtering,security-groups-normalized-cidr,quota-check-limit" - DEFAULT_NET_EXT+=",port-resource-request-groups" - # New in Zed - DEFAULT_NET_EXT+=",port-mac-override,floating-ip-port-forwarding-detail,network-cascade-delete" - # New in 2023.1 - DEFAULT_NET_EXT+=",port-hints,floating-ip-distributed" - # New in 2023.2 - DEFAULT_NET_EXT+=",port-hint-ovs-tx-steering,enable-default-route-bfd" - DEFAULT_NET_EXT+=",enable-default-route-ecmp,standard-attr-fwaas-v2" - DEFAULT_NET_EXT+=",allowed-address-pairs-atomic,network_ha" - DEFAULT_NET_EXT+=",security-groups-rules-belongs-to-default-sg" - DEFAULT_NET_EXT+=",port-hardware-offload-type" - # New in 2024.1 - DEFAULT_NET_EXT+=",vpn-aes-ccm-gcm,tap-mirror,subnet-external-network" - DEFAULT_NET_EXT+=",port-numa-affinity-policy-socket" - # New in 2024.2 - DEFAULT_NET_EXT+=",tag-creation,quota-check-limit-default,port-trusted-vif" - DEFAULT_NET_EXT+=",uplink-status-propagation-updatable" - # New in 2025.1 - DEFAULT_NET_EXT+=",qing,vpn-aes-ctr" - # New in 2025.2: None - local network_api_extensions=${NETWORK_API_EXTENSIONS:-$DEFAULT_NET_EXT} - + local network_api_extensions=${NETWORK_API_EXTENSIONS:-"all"} if [[ ! -z "$DISABLE_NETWORK_API_EXTENSIONS" ]]; then # Enabled extensions are either the ones explicitly specified or those available on the API endpoint network_api_extensions=${NETWORK_API_EXTENSIONS:-$(iniget $tmp_cfg_file network-feature-enabled api_extensions | tr -d " ")} @@ -858,10 +817,7 @@ function configure_tempest { fi iniset $TEMPEST_CONFIG network-feature-enabled api_extensions $network_api_extensions # Swift API Extensions - DEFAULT_SWIFT_OPT="account_quotas,bulk_delete,bulk_upload,container_quotas" - DEFAULT_SWIFT_OPT+=",container_sync,crossdomain,formpost,ratelimit,slo" - DEFAULT_SWIFT_OPT+=",staticweb,tempauth,tempurl,versioned_writes" - local object_storage_api_extensions=${OBJECT_STORAGE_API_EXTENSIONS:-$DEFAULT_SWIFT_OPT} + local object_storage_api_extensions=${OBJECT_STORAGE_API_EXTENSIONS:-"all"} if [[ ! -z "$DISABLE_OBJECT_STORAGE_API_EXTENSIONS" ]]; then # Enabled extensions are either the ones explicitly specified or those available on the API endpoint object_storage_api_extensions=${OBJECT_STORAGE_API_EXTENSIONS:-$(iniget $tmp_cfg_file object-storage-feature-enabled discoverable_apis | tr -d " ")} @@ -870,18 +826,7 @@ function configure_tempest { fi iniset $TEMPEST_CONFIG object-storage-feature-enabled discoverable_apis $object_storage_api_extensions # Cinder API Extensions - DEFAULT_VOL_EXT="OS-SCH-HNT,backups,capabilities,cgsnapshots,consistencygroups" - DEFAULT_VOL_EXT+=",encryption,os-admin-actions,os-availability-zone" - DEFAULT_VOL_EXT+=",os-extended-services,os-extended-snapshot-attributes" - DEFAULT_VOL_EXT+=",os-hosts,os-quota-class-sets,os-quota-sets" - DEFAULT_VOL_EXT+=",os-services,os-snapshot-actions,os-snapshot-manage" - DEFAULT_VOL_EXT+=",os-snapshot-unmanage,os-types-extra-specs,os-types-manage" - DEFAULT_VOL_EXT+=",os-used-limits,os-vol-host-attr,os-vol-image-meta" - DEFAULT_VOL_EXT+=",os-vol-mig-status-attr,os-vol-tenant-attr,os-volume-actions" - DEFAULT_VOL_EXT+=",os-volume-encryption-metadata,os-volume-manage" - DEFAULT_VOL_EXT+=",os-volume-transfer,os-volume-type-access" - DEFAULT_VOL_EXT+=",os-volume-unmanage,qos-specs,scheduler-stats" - local volume_api_extensions=${VOLUME_API_EXTENSIONS:-$DEFAULT_VOL_EXT} + local volume_api_extensions=${VOLUME_API_EXTENSIONS:-"all"} if [[ ! -z "$DISABLE_VOLUME_API_EXTENSIONS" ]]; then # Enabled extensions are either the ones explicitly specified or those available on the API endpoint volume_api_extensions=${VOLUME_API_EXTENSIONS:-$(iniget $tmp_cfg_file volume-feature-enabled api_extensions | tr -d " ")} From fc31d8a37e59d6811b208b5dba6c312365abd2d8 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Tue, 9 Dec 2025 17:49:41 +0100 Subject: [PATCH 1935/1936] almalinux: Switch to build OVS from source Since almalinux-10 label in OpenDev runs on both x86-64-v2 and v3, and CentOS NFV SIG OVS packages are only build for v3, these jobs have been only successful when spawned on v3 hardware. In order to make the job passable on v2 hardware - we should be building OVS from source. Rename the jobs to reflect the change Change-Id: I67b19c18b45af23cda7899f7c2edab21b8ed1ede Signed-off-by: Michal Nasiadka --- .zuul.yaml | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 0aa2530d77..2227f185dd 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -818,7 +818,7 @@ # and these platforms don't have the round-the-clock support to avoid # becoming blockers in that situation. - job: - name: devstack-platform-almalinux-purple-lion + name: devstack-platform-almalinux-purple-lion-ovn-source parent: tempest-full-py3 description: AlmaLinux 10 platform test nodeset: devstack-single-node-almalinux-10 @@ -826,6 +826,11 @@ voting: false vars: configure_swap_size: 4096 + devstack_localrc: + OVN_BUILD_FROM_SOURCE: True + OVN_BRANCH: "branch-24.03" + OVS_BRANCH: "branch-3.3" + OVS_SYSCONFDIR: "/usr/local/etc/openvswitch" - job: name: devstack-platform-centos-10-stream @@ -1036,7 +1041,7 @@ - devstack - devstack-ipv6 - devstack-enforce-scope - - devstack-platform-almalinux-purple-lion + - devstack-platform-almalinux-purple-lion-ovn-source - devstack-platform-centos-10-stream - devstack-platform-centos-9-stream - devstack-platform-debian-bookworm @@ -1124,7 +1129,7 @@ - devstack-no-tls-proxy periodic-weekly: jobs: - - devstack-platform-almalinux-purple-lion + - devstack-platform-almalinux-purple-lion-ovn-source - devstack-platform-centos-10-stream - devstack-platform-centos-9-stream - devstack-platform-debian-bookworm From c11b16b44de613a15833e610ebf77d539e488856 Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Wed, 13 Aug 2025 01:02:34 +0900 Subject: [PATCH 1936/1936] Fix ownership of keyring file The file should be owned by the user(STACK_USER) to run the process. Note that STACK_USER may not match the current user in case stack.sh is run by root. Also we should not assume that the group name always matches the user name. Change-Id: Iec300311cab1b1a2fa124879aa3dc6a57a6a706b Signed-off-by: Takashi Kajinami --- lib/cinder_backups/ceph | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/cinder_backups/ceph b/lib/cinder_backups/ceph index e60efbb632..e4d6b96407 100644 --- a/lib/cinder_backups/ceph +++ b/lib/cinder_backups/ceph @@ -34,7 +34,7 @@ function configure_cinder_backup_ceph { sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID} fi sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "profile rbd" osd "profile rbd pool=${CINDER_BAK_CEPH_POOL}, profile rbd pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring - sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring + sudo chown $STACK_USER ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring fi iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph.CephBackupDriver"

0AfZ5PBCG(!5cuEV0e!frLH6cVGMYF243UI> zSJo>F7|pvcPs2P7@{NesmBDHKj7WLtwM(vPrg&e^Odk$6hbbe>`eS5tt}xHMQ2{Cd zsBqzF+mBWpvB?{Y5X%sQI?8GfShxCcjK+Sf=-q7b4+4=&t8Z@lX{oVdhZ^w9r{je; zxQ)b|gMVC*Iv_`2PRf#^5pMMTrdyk;seh1ARW}GCW~bQHZqAO_xades8Or#@9DQuV z?oq8{7U6lizYFHkIq9Pr?V>ZBZ9wu;h&0{m#ZS1=f(W znlL)3I<*c7LRuSk45Q-gJ-v^N<6|2=HvaaIx{6!z4vih3TD|>;#19B^_;U5i@TO&% z?TH@Tg4<pG11afF3AS(j3u}=T<_L&ECag@uZ2G3u*P(}>y{`NA^?PQoO(7YLpZ?G z*g>}63ss*Zl$gT%qW#Pq3$E}tXp!5Uu8orJW4xUThlwcsCL{?c0&AdI-8Q{a4!w)@U7L3gjN=)U1W!Nn`V6w<8gS5YyS#Pit<2 z)#!MBBV94>B^X3-nDUXY6Qn0@RMwL;i(6+h26QsLFfTZ_-BC63J(koalQBgKAjm83 zO7=%nJq_#vi&gk0hO)2Dy*+U3$9U`hz*KR4a`tL@Q{#bX&Sqmo^x!%zzn%R28w zY6`XKUa#%JX-dV_2WWX=8v+pjWE~(<&3^k!%3O39LX=vS#!mZNxnJQ;qqPSTsa3p$ za7GU5#^`jAMhEg6jAx`vu1={IeZT z{R)0#e>_v&IHZ{ zqijKFB=fF)Rc%6!)hD!kS%RTressO^>@(pN%+lssv2YSIGXV&_qMeS9(U8H$xhO=O`Td$;Og*R-5lW4@__F~!{kQrCtbcuI z?F#=~R++2L{>JK^QO7z9PpZ+Eus3uXTloY!gZ-P{atc;eaNKn z6b^}`0DfV>__A8-qM#sHN8${DHW*AoqRbtLfr0`GZC?1ik=)*X|0ynn(z3dlfy=hj zCHF;^!RK)FK+OXN+E?3&C$QzO|C2M$h2NO`TI})7>x&+*7Nln_;pK^8!wvdsYr$x- zWYsz2UN?B)()OXhuzIBy69s7#bE_wrO`K*w37yubw4n_Rbqg=hZ%a)#uMom5Lix@s z2?{?@f#Tzf6^7i2JN_As9Dh{3y={asN+K_ zvcGZ_imKm>aS^_=4?_Ah|A$Ul7~GUop^+*Lbvqu15|Y`Ee<;m`c{2syFh_gB?kevt zaWgFNtUl?!l6aleWT;IE?lKHy%PBq*I}&Oqq0_c>qIS}>B6+tyt2E}HIAOQ6fC&YN9#a-;mK`kNI^nA>l=^V5GPR4J9{h!tn64(sYH6lPbGG4ggn{@r+-Kg&QcLbIZL$k7xwZnRASx7x z=zTL8?(BT}naYx7nrYK&Q#F&ipppS*p@b9dg?DebO?1!(b-uoSfi6$~7Ci8De(2o4Kyw+@%QJWQ?n{jFavb9#&kpj%v_1++6Q1jI=(N(YNgSgdF{*6B&<+V zs#_RxoxFjDTVTAPGH8Q8TxYy)mNdVfVY%;_Dij$z9~M`Bxt;jimI>JZ zB`w;*ynEX9_1y~Cv0K7GfyP4wU0491l~d?(1-gMQmMS4f*gKQ_{IxBmuh{G0V{>rf z*k#dcVao>XfinFiF=C;yeS;-{DTmM#Eq9}=(x20=8@7u;r@`n#lvw~rZE}$FYw-2; zWJ4hghNv>hdY-olF{o!=Zog|b)S5<1n@JRs*oG%Mf(cunr7tcj0WQm3=z3yY77xM~ z%f^_#lyFNL3(6VgeN3}N?6^i7LXO$MfKj~cg{CO&CqziE>h3)G$`|(67vA;+q%UCJ zEhVWfP(CD{l#?n~>R&N`Y4v8qM>7d_`{fA-9zXcbOgCa|NT)Hq{*@7m;LPlFldDtp zAk7>9!eHb0uz%uCNP>maL2*fo`Q?IdDbueIVs1DEzZv93WVIoKZ?NS{u>22E4zZ`C zW-rQ1M$XhMyPg3`7&NvSRbnbwJ^WTOsWN>wu0y*?4YpdnQd*Id@?RS@d=NpCl32C- zvpSau`XPqMwaSRfq~mT6#R^sR2?&hu@Cvcuj~@r^+4`DC?~@Y3+Cz30s={Eg&FH{# zW$0YHjTXVf3lxpm({j0;ykWa>mP~7IkOD0$$rnp(0uMq$n~+-+x^7J54%*k!BD1+r z8(sk;l(IE?0&%?`AUbtg+sLNm*eG^j0(wZ)UrTYd4ZB3>24uW?)DHZChRzfKQNmgS z^JigpKq%p&0#wo@9x*Xf2KoXjo?k$0>H3Q^x7~1sK^VH4uu%0bV`t{IS0M$lgt39+ zR71NQb|DNrz+)5((2Jg@EBM3R<;A3`^ffXWxF z^B?h$z)*AunXf8?1T|-_?bZ^0dxK zNu1TB0QIWetsSULt<6I{3|N1!9@G)tIMM&Kx>QO|3F(%+%{0cd2!O=jPb_;sU3-`m zIP47Od-C0FFt;PRKdRmFQ+{MJg7w%*uRK96d*NJihFr4&58DS1t_SzVk{?v-0f6DM z9reH6)~6#@>Yw*JygSebm>e4RF1bB0SsZZS{uv9{WROV8s$i_ZJNg-qesQ$w;9cn( zz@1daE2q*aUglaO#~TK(2lP`-kHb2yDCPp9OJr{tVcd`&i$Bg!ko*7%#5%}$cnqQ$ z%t|Q4ZCJnp;TBr**b+Z~EE~RBdq_Fi#uLnuPJpFN-$X`iJ?eE;U+);?<-6RFO+rS8 z;{Lg3aP<%*fLk^QW1$x*D$ZN{YqrS?iG`2b=W@&=d=TJ+Uk;;hE^4V=qCcC&*@Vh? zDN+|&3gYVcAV5sJOq4qUlSK(vIQwXEw#GN^lZL%bsrN$OvViFWT2Nr?e!2Qsu~t6- z2N0E2hh3DOG9W&Gw+Qj?O+^cv?3rcw8( z$^<$ACTbTl@kfoQ9I|g9VeYcVC7E@~;*uQjsaKUFns&+u+tS-L>EK^W=$GOu%;JA9 zC|FB8SV#R?#H0CLJ02cafLse)ykw%r3d>o{3g5E`w(`5?pp3#W#hU`t5&W5_q0SzS zyB9=lx?H}dGUf=gXyOH}ogS2>zS4@h4s)~UxUql?8(lFeR>d4pKpGbSeK+8x8@fmW zP`dfH0iJ8ocx(@k!4(?wcD~qLICvsAF-GV?g4=6MZ>kTs2OmJS27(zRzIp2(WThtoEuAFo!Gkg#U_?8G&JDTFZr zd-x&(u#Vkbl(T7-;z|5c6_f*ZZW;Yx?V#mSpE0TrN|GE#G?)RmsCL206Rh{Yk02(i z`VxRA<3yv2NQ1}?4)g6T(f1X5n6X2etYQo4@4=eF=U-Y&;;)U zWPR7TBss#>b%~SlWW0}{&k=N(gn36PeDZ)rH6`lL~*$^7KcK~ z`yRx$*J_ul$_ot1eMjqHgB!KHIe=WjVK)u`=|k`VSD7(;GFfr+Zrt)|SGW?Z5zT+O zBc<@ZSwRfGD`>*4@htD0h3M&*d8R_DmK@B1p;w2L-U{x5N^x4R~NuOz}8SX{V{I=8}uEkf+Y|VB!@=jVNEH zVju*~974?@(ggQaB8Z7pS*+&at?*n+ot8(%Q1Tt;>Ec#WuXsw$!mZfoj4h!dx0i-^9?Tps;%qjn z7Eq5-p?B#7zdy>seCoaN4A-I4Ut$jb7N1AArVr%($Q+lH=}=5eii|$El#H{7-3xvz zTXeXud7m?TD#PWr=Q?6L(!w`xLN04+PFwEfCXJ1K1A{jYNw8&ot~P``Zks zX;@_)oQ**`z?8?!6*gLI4=?5(f-BOdsL{$N9sV936y<`NVXhb*kmST@7(yOTP_<#p z73*tV7%4W0t+ZcVJUW-D(a;@pBkY56EM(Kpq9HiHQE_Vxi3CZSy8(XAP?ZD6B^>y_ zp791CZmDGSKmpMH057K9v*D11jFa=H$kJ3^%WTu;Y;Lt#Xh#CU0MDXK`)}VfQXN5a zj8u|o21#K)Gb;Eep~Y(<1A9JdzDsY!V?r^K zp;id-f{WK>=LOT?91(%>d)oZn&`Jj6Z7Ju*L75u7ud6FqRE$G?UFJU2>8R5c9eGL9 zSr@ERVwM9rBbhp5LU>UhOSK`#-itzcZL>J^;YH)z5MjoyYY7~^%tVA;z z^)zcj42>^)NFFUTjG+0wn9{=7>cQez{oC;_)zFr`G+}V{d8`qbZ>=9z52-mJomnG@ za*qwI21}4TFGLHdm#3ZL=}b#*SLk7!bE(8olZqb*@|K_uYPL&?{*dj%zy8r&?OQxN zZO-gSu~&u(U5_W6`bV-WpDO*?VE4Va_uV>tMyp@NV0tT_;LDLcFSTv!1Hec>##-X0 zr01Q+YhV@_5LQ!jIcepC7G?Z)-U_pXP*_A3=OL z+u=t)<`G~M88?))MEg80<8o&bip5f>ukhfsh2ynR1{+|=4f?h6R7-uf^Geyzmsmu)z#T+NRyn&Q5`f;IFzP$ zGBbQMf8tE?ISGqZ58*pUg1i~-}fe4Wp9y3CYM z1~lEiXV+AYKL$dam%LC*lS0-B88{LX;8X1|L(z@>><-#vqm{AFKjHRY6Obu1)ZKS z`Nm|>A07E4lC_}`b0Jp-7gZ&b9NP;Ml2SgiP{%rAiI_*HF1Vfth9R`%OiP#`*vx&| zk0nV^zo*-hABZdO5vcc4m!Cj5w_^kjra!q#1ubKASNH} zgvS3bPK#pkj5;TJ=Tsz-pKVEXiPSyRjJZOQ3Ha&ufn{r(Hh*>mgqJxY4=E*%Xpn4B z_@f15C8e}ZY#jLF_tmM(l`az}3|p=c@7g9OE__veC^MWa4)ItsnlFuRNZHiIB|v#{ z>bZ4GdsS(fudF-n$y6?dWRQw=5%_VXo|~pBe;pB^Y57?O?>6gZR-vTcZ18@h1zOlh zVelqMc+|Z>eAL3P{`duNrqs5uQ+kn7 znh$<-tM zkr`Hp_l78l6q4%Wk#`O zb?^>zIrd%?3U6uqPMV7Ei&_!ul`O(l`wm__yjzq}%Zyu0C`Ox~BSoz}T%>Ua8lTzylsbo?DoVzZi?rHvL+^8V$ro-2j} zp~zQ*nr{BD)o{io{fR}_YlTXFz$jtLH|;6KuR}d1DJpj7TtsoyNM_xV(!LoNyz$$` zv5?}3jmKHFxDpF``uE@X>o`(G-IBd9=1@?6jcqgZu`IarXugVZ&dJfU4E9t<*kt}N zn`c2dTjy~j2fO{g#0L8V7s`2dVfDNLk~{;^$A^GxHr@qemeBq^Z#lA>gxO9vc%P}!lzjr;|{;5 zA3Hkr?Z>?3U*1Wpr_rj1|G)rg*U2e7mu=%bA0dZj!>+la&lgekN@X!nRl<2Jr0X$^qm2!ntuQX8mPYW2`#FpJr&8B-Ji$WrHv3a1A$?J?ih@ebUQuWM4t zuil_OYj(M1fB+NAy9YQMplZTOg;}!(S5A4t{cFPFtMDzs27}&~bJ6?Nqii;i%Q^N}(L@geC-dH!R`|g3SVtZ$_oxz&hjl~u>if^%HoQ`Tx z;zfT|gzYt(mJ|EKw|roMxv)F}mzWvAbjBTN@i6?m!tUN??Dz{x#MJ`iXw$c%B%s0Y z`dY1}cvc_Vl@tY2r@QREk>LWly*2X|7K~=$~TWWG5fb_Zh}CTqkl2>C~WmCzDB58c>X)s9=SmL$9BQ%@+cJ&?Okh{ zt7t!2z2GY8@LlD2N|sdA3An?cwYfCo4r?%DZtDsxx$3Qh7(qd!%F&8=7vW_>Zk__K z6sHH{l_xC=R}HU^|Bt6B3*(}Xz`g3hHy;sj5)-?7qC*rJ3r9ZXHKq+BkXU&`y>i0~ z7FNr+_K)5e-MpKVPq0fD-ukft!OI50p$(fFmm1^HEoH-Ctrcpo68()HxXH&R;X3sh z1CNn5xbg7x7>BzAYCn#Pk2gn-Na1n53ZUL|_`j_9M{*^E0w7)TV){&T^w%9uwjPq( z^kekjfuZacC>FjpHkDs^me;<7ttXS9hzvtd$H-Hk9BZ8?MnywDVL$?%NL`zt6q9Ky zM9g2{S3a9__L4GE9KW(M%>xNM2onKTb&F=OSN^d%}}{g5-* z;t^M3B*C}#KwbuN`?#KHZ!~vDIX8b6L4l~?1;1>YJqwW2@x^r(bu3+xAM%MUV$;%n#5zJHvGhuJsabm-mZVmL5dq(w)t z%_GY+ea~VhcNdP6ARbT*o&VZcMrtcw-H9@bj68X9T{R-4kwl^L-&D;aKy)Lv(#hDr zhyVBd{^xW5&kHTQo!(j4RIfAPk9H)!MYPqS-22=$%7yQ-I>kQ&R7TB`B7of!)Qr~%YhQ|t^MUxbyU0-o?uIO^AStkAT|v>F88@}=*@$ehi@;}TPY{ft6hUN z4z883>nYtjX~OLXQ?u^1x!4D9Ss&$3!Wy+)T(%_lB~}st7KQN#LkiS2&iJmHZUgHA zboOxH*^il&bE-(M$m@keUP4;s{<&mW=<%R}cE)m2Ni!m71aNp#5PprA^tKj|2f_q& zIg*mwPs-RIuJ5K_9DUIo_&gZgimdBUVg%VSH5aby_mQqX zU)ucC&xH%7VT^S6#})VM>v29SVWa=h*iQ}bj{(Z)z*-+Y^zvCg36tEVGNV|=bt zlF?I=*l%uhwU6Z3PBhI_2KqxXaVs*LE1=N%EL3n3&s~0p=%PZJ7!Q)$oWBi#dnFKH z!=VzMe=NtfR93b;fbj`<&TvdTew!2X5qwzbsi<57c~F}^lWbJK)AGJxJmWp{^e=;E z{4zUrbvrQ*7q>f@fL@UdM=_O23;Ako6c)0FN99h$l40ZTTUejIeXtb(?L@lii|MvW z-Tw1+FTZCw-fztQm@|G0;!ynk`489Q{@b=!2I`X|V555k-LOQ!;lg#kZlAuliMS&B z!M!-II^!YSx`)i-tz@aJNm8`JXiDUy^X0^snYUaH}##xAc97=@@8g=du%k=^53;JX|Vw(+#6q?OD>Ex9}c4*9JC(Wod0eKo86-?o_qd= zbMF%uJMR;C9ydVTBKIIuZe0mnlo9&)*cHm1`Vp$qz3t89(UYH@0au;PchCthH5^Da z{~dL3Agr&;r9WRpoWG?*4)MG7dZe3aaqe|Gh#D{;ZTD{(3Z*d8vr6n=cBLx{e{e8`gxLB9^3X&&d6H2h zd30nJSasyDxduvtuMO{~%aTA*1Vp#ns#3eWo;n9G;Ny>6J((x}$>=sHtv?J_XcwOQ zeF9%BcZ!U=Cxk_WJ~k3-^x}_3JaTu}^72n#F@I?o=L2#5uho~5bkZKq;+9rpXa2O0 z<;njQv!5fHW#y^z9xZX#?DdZsu%7R~{%UDA7oBtP_yy^EXQ*>t_lJA^O<@T*toBek z10=kf+f3y>b&7yhExc``)wzDNCEOG8L0?NcT59a?EotnEGVb*WqJ`ANXaFH%QU@fwu_U?%`yGUfEwPUN@^qhz zo`1$l3afH%hA+n!)6K@6o5P?;e7F1(5P;c?a>W?dx`WI68ulOlv$sXJ$hxz@ zoF5o<4$o%a%ebz$Pi=~W!};FUiq`*6JK4b-J;}J2i#UFQ*29K2%D6VSnv8tPv3i-< zzweAmqcouFzEiV`Zoi!|y1fw?61?4!QHQ?_L{&8|;=Np`d_pgpH5g}}liFIAYb)}u z-V|>WD2dS>3-MTXDdX`OWckd^rr9bYJ5uoa@bC;pmahX2Gx>HKq_m}A5?-6!V8#!W-ZIT*6OOLXpLQGsSL%(s+UJX-!ld`>Hz#2}1l&0qW z2ib*D0k*ejHUvUP0mBFEoYJ0b`G zX1?1VV*B;p(9z|Sd43?fx?@hM{3OEc4N)C`uo(mn;S4kVX0XUWNn#{q+#(`!M* z^SVLK#%|8jg$GpU9>1)o!`E*EL+ZYAbXQPQn)n;-lxHes+DK{1J(8_b{^IUoYjoEr zFH`?9RN2_&I0vr~?WQlNEcJ3DbQ~8p?v!SDdOAp9nD73K^B&z?$N*TGBTjXXqZ2FK zKi)FhX46cri37V!y6?8$=tRW_KHpwq92GFfi?OQWTX$%%afYJQi@SNdX3hEpfpoLF zTs5ONc~*-bc&8u!-g^#jqcIAk5X;fH&t29NJa2hm?3&h^JNkDViyFyg$4v?TaW@wn zWzK-uaE|VOzek-oCUFQJX3IRGg%tYMVsc)bAAy#ZSKLg-6RUnpJ{SJW!|8K6Q919% z<%8rH2kVE@AYEwe%E@gpdV}R3b2yEvs%B&NbpIYa?%FL#u_4c4!UEym6b4M z#rF{N6c92WS_i8)ZG$CAK}8O8If((XG0mbt-3+>mQk%H~!etK{OdM#%eo5yD3Cluz zseYzOAGU2vJa{N@z~}_cQ-p^~srJwKg34UsAm>Nz79G|3P}xAkQhpkW8l-zEoIbA; zHJr_qH!-9FN~j*Xivfe-I_VSl+kJwN5I(rVd__C?fre#C`$OroP*_YOx{$wV7nD|mzpT= ztkm_7F{ycJvs!p;TAP#@@w4G>5C72>C^q3P&vSJyb3bpp$mxePxZvSz$9c_^OywfU zG-4O^S=c%lq^P2b`TB>`)O^I4!xWx)-h504>Deys=Ud2cU*#hWRBwUL75_WXqD|N~ z*VwQ5l}lZRd&;OWiFz=9(%YK7WpCfr*S=eDhy}68ERJxoS@BcfIn( zV)b)O&d^x+p##m5bCv)>nyL@`PTNIxzktsO`|uuYHyfA`_3~*P1AQ*xM%eALGwwh- z?+HTqC`X}gLhj+ETad<$eqNOTYha7dS5>mG@g3Hc);;)WD z70ld;WZfUI%@cPqkF=c1l|~6yBa$isl9!Tc?KlW6wrCm-c%gSiNMd6hHYD8y-6 z_^?A7`_FRCA8}p|3duLFb&1F;p0o^>a8dXC;=oIF%8wV0hT66h-Yb)@Iax9~03K0d z0~l_g4F`)ue>I6CLBAZX6c2>2y4u_iyF*6BFx#IFMHQilYxYH$|mY{h;~ee&x()}Hz!^>?av_RppVP$oSuZ| zZl~qu`7M4Wt23VG(Yjz%8=ve_3&tkAX-T>mh_8*$ z_;3w8?~Us=xx~_WR`gz$A|xQoCsLs02AX5@Ai>>ndMaSmqzt_yhWc&Rupbk zF-RQD9od?e=Qpf8>8#s=m?ScSM?&Z{F7~yA=ewO3F#pV@ZlT!ZD>_h2STa#klWo@s zZy+XB>J{&F08tu-e_e;vD*|jY<0bLf z9Fds+=>KiI5b6Ka*P&Jaa=_^UEqRd5~OC1)_>Zi`)^LC{`?X^n?{=f+k!u z>(SUxt~?;B->?ak)SWw2nPU9S+fqH{a&c<<1#$>1O7yBMhV(?T;t0d#Mby+mqs6aQ zqa!C~eUTE4`(VTeZsv4>=v`pkjZAi-e>cT`8S!b8Ktu8?sd}|JPx;>!=v=)X(s{jI zyG=8c-Qu9}UT05jdlW@`6qkpCWLo!S@bPCCk8diz423LN1lVnK<`q!7zIE$Vw+O3^ zFnZmoSc>P|3?jPWX~*B{#Dfc#*BdVdsV9GVUA+Tbk3B>jyWDkSW8?gmT~`$ml_|55 zak@C}ZoBO8LIN)sVO1+iwI&|Z+!hMpQrjtle zm2Y{%Ja6K$A1^25P=pN;^ghE=mhe+c)Rh@VaA>`|8*L9{-i}xOr8XtEY5f{&rk!r6 zFF?=u_j30zJ?IawH4&TZaeevGl*p@}H&{&t-=GHukB|$F0tQgYCI@)(`1l(X8zs=E zBy~V)TX5Kl%V$R8fAm`@6PvnF(LRcA8EoFjt26!%C@0<1O*t7FnwPnjiZ0G$z520v z2^QC`s+ACy+vPZbrwtSs?$b@n0^vPH7bA%%-0n72hMNUz&0}@s#~9|}Nd@=KU8dXo zBo+VZ-*TpEayB88JtAP>*wGTNz6vpsWbwVb^b}vsOT}!lF@+y~Q6#JMBFXR|><7Qq zM@bc~`899+F1nzTTSXd=X~{UdD~x21R}NIATkhg`yjNlm_|E06I^*qXKMYUUQs2_a zDzE|4o2Afnd`(x{E@#fI`e_{S(Zq*7P$hJY(~Frl>8UU9YudwuvZlEFN#JhlxdTT2 zwi;us_spGU=5%E*!u48HL2tR%G_or3cS{Wasyi4PH?lVa>5Sc~5BQXRzDx4$>3(jX z&65CZb%?hFjlGNPPH?+D-5m7lZJre*j7i#p+-Ys}*c|Z|r@tXSB)mxjq#;K<#8!GX+i_^z*8}k%%ddajh3D*pUNi$KQU_{RKmEU~wuiweNhBYJX-q zlJz!`rMCNY&rnvh1>_mo5)5^^{HuCz^1 zgYpGPk1;Msb;kumgeSd9CePO*h=14Q41uKQq{DIGOW`h~p*-7D8nyR-{DTSfYo_Kf zt9bbz!^Z5P)wGlUT-?7_-Y7&=c+>mknXbUjI}UCaYu~zxq=DPI6Z1Q^w0S*E{Mmj? z*&N3)3pst*3V&SJh6P;^gprHTwh2>b!vxg(OsBuw>epif9+VV|4OB9!=0{Nrec@3hO+DAVDmWx00^h&11}+E2o6KbN~| zba!!^2JGkZ1Xtsg>Z{g#`sQbsv!_1q3ys2Wu=sp|18Y{4)@y~4l^Wxij?6!6x(~0j z?B9yE&k(-7d}}lllibN{>gb>3-57sE0N39l*sK{i$bPLY6~;WI`%!(4*V(c+FzY2G ze#Tlqkuf-D)V6zv+VGKAee~*PnS9t=r%L!B`C#KZP$I;(Fxxy#b_O$ zYuoDOn!c}c*fiSLT<|R@3o?GM)-KX8TeT6|-=Eb<#$Uu3>mVe_XAda|M?#)O6D&m~ zcdP1O+W4oQ`0R;NJUSJGcVUfQrmAlh-`hreeAxyiw6M7=_}x4}=)({iX0UHgrF0#g zT+vQ`Y*hHg=*&ZCY-{X~SF6+-Drt-bi7D=ka81DCkSw7I{vH=0q4NdS2-Qv3W4oR) zdD)H_%k|j1KhM(!j;*XZyebPpxzIZcUw#b%cRNM-=xS59(#VEai<*3=u{!1qKN6Mn z%W2el4UWK9Id0DC0mpt5yuilR36Iqo_G+soep@3fTc&V(t25X26CcmJ2BJ@sqZI`l z9%yy?bI+lEl&5ao-K%hf8>HIc6y~r?SXk$6!~@P_A|H^i&aC`${yiD5X_V8-A+e%) z3m+HttYZGv8OLo{ENpf@rP{|MGH3lxajq+qO-U^}gG!V1LPP7h!6Au^?F(Z!wdGw# z|Cpz*mt9c=%Vjm8qg*j_!8l?_zjJ#04cZ~8yQAnQ+W_K`wxV0-*Zf`@5%UofMN82POSITK^u(iePPi5cM_vFn;9D_hXyEJ=#=MhGob} zN=jO1T_Ssd@jejp`Vjjj*WQ>}p#cFBtM+Hdzr{;{j&*h7+9#YFiq!TlB1$9r9bKQ&4EiBhlnU1G< z+4!jLI?3Gsa4lcZnRNxO_k{t1$0g6eaMW>rPdcloi$*sal{e{VjavozeO8r$kGYU) zsdiKEtFwN#uPg#W9n+bz6PpF&Ex+2&mxRUBhmdq<<3i$jQm5EE6uLv9Nk2oOc|1wp zg2>Cxq679IT!-A_9gZIl$@<)~QPI4-*B(pLPgYBJN~4coG|lV0(=hdW!d_SZa*x0E z?b1s2>gU|~wlQz!*tLlqWYuZPQhijvysvwL1fKak8~WJ-Lc@9PU0L=nWG&MTX$OHM;p9eZm&-$ zyYg_kL+pWrSvlf=F|as_~+f%+Rj z1A78~{SA4{HNw!yvsN{?$q&@9kN)L8zpkVLgSdwMnv><&&x2~r1%5xPb;}HyN zueIOi=A6D;JoN9wLiKsByLkBD5?oj;N}@d}tHs~Fe{K4hc5>ye7XYnj)(-Pcd>$|H zhn~gYvY%Zb`ai@d7&h9jdTTJNE}1;MxDtvyzjpYi=?f-Aq&*I+Px_}B`N-Ve-Td6K zxqNYWJ9!@tlW`tFL`tQ%pKiTwKYV{*>$*JWy5VxPv|kaIc#)e^X+*>BgpGn3K;X#D zuhbXj8&1Lzj1|T2OM}}gjqyxHWluli2>tz1_IbqNvb4&*D#RV_yMX)33(V&aMZL+M zDND5_rpO1QBaezt_&+LQJh*7XzP}tTSMz4WT%ILdRNM3vK8zTue?g6d@a8?+2Lf(< z_4BRw28!qIiGPX2S8*|*ub91v8>|fD&9ZSr73oT1pfd-Z=+E>jjDVX)wTv#d!bbgX zSu&?aO1%ZXglHdFa(!|mF(WOA(qG>1k~?)Dcqj6np+|JGa~UnFE7N96 z?7KgE9hUESGb(*)Yipj@_U9aX{Z$9|?XF-E?)2^;cGYsdHC=V8tj7_Xs}3*JMz{2# z5_r_C5zB##`Fvc^i4gMX6sD3-P#sOLxj9%@CWSxdi;wed{RQsWt23fw*KrL4fNIa) zCqh>S17H%|JiYe@*CdxjYr^&v^CNVq&#{L1*waFi) z@sK@|e-E`b1sLlam?wzP4--zeA3aCL<2CZ;@-!G|IeJ50yKjXXJiA=)9RG78@8le-1 z=N5A#CZ`m3l#F!HUc(-AtbPjz3jIUcsy>6m2vpLNZE=Kw5Ab`=CSib`+0%Liz2PV0 zZ}btp_ke=sFU&_xLk_6TksSkq=6|^6I^*edzOQ08kXT$JBGE>hER_s|L-_EQ;=ve*WX%|D_1V8bIlFRgrx?oT}p7_L;iJd@FKbLpjB2~v<fAfEHRF|0PI=TD zqnAJkeq3z*K2OhN9G8ri0MNkx@{PG6r!tF1J?+t?yhf}#H&QO&wBuhv3p5-BeLMAs z6J*CLdSZ$7C6XkT1oYg+@BsKXhST^~F53~HQq3dTKrn*7R1P8lgs3|m-85^;^ijop z@nTNFMF1Ty_ZuytC@4VVCJ|Wb<&8PCuWC+@)FXS&KfN*;86kzi_G_i>wgF2sw#{_1 zQF{>&ncJm+iGQTd-UUZjy6ZkeCBve;HsAUb&-Ods@$s=4YCQ|FOtt^T);kAB8g*;m z9h(!|>e#lOOl(^d+t$Q0vF(X%+qTV#@nxR#o_fz$=dJ#yyQ=H1uKQkV_g>ff?Q1#Q z#v|E)0vuk_S0^s>q;dZ4qC0&bZ}DH+{;i3xc<&+=xtg>ktl83nD$dJyZ53=^Qvr)&F4FJxjWzp_yTxX*bL~P`~?+qKF-K8Ib;Q08gA7w z4w(Tqb3?niMTtJDT$o_$69b<#YK}Unvjt$8e?@j%hC75^HFXcjVEwZkj%-IZEnU^z z3qvDHUik~rne){~>U*R^YZG*%H)KUX`~L_fRVO*hx2ZMpR6Z}P)(uCxNiHVR&rhi+ za574PgQqk}RoJFW`6J>%aGFZ7!)ekDzX}7F-YO4ns~i`WMK%Z;*k;?=JpQ(lz_**b zHnnLF4FM4@rZjZIF%t?Vu7omt3^d7O%bjiV(0kw=`AN^r9Vy3W!_s+r@QfSd#Zp9 z%KGu4bG|=M7#DYB8mZ@bvqxtR!o!Ix-BSNO+GZ8`l9m6HM^=VJ_|=7;ZO*#}GW>)K zO~sr3Y?!Fs6%`eDp?dRmAA`z19H(Hv=ZHa&H7CUf+by$Ug57TJU`s`5Xu^(=PqqDk zxzs6;H&km=lWA26G%F`tA-eiZPcz}yMIbFW049%yxIds_NA*B-nGn3re@8<3MOPefG^gAP z6c%SXd8zr@*4<@)cWPAG?_n%RbT?60xZE)JEt{jk>UR97q6sv7aYp)S*4BUP_Urx4 z$?{^V2<@hQA1%mmnTUYA(FP8%%mRE=&0Av4-foo~{)=xIY#=lKE2GnEg^1wbVNMPG zeM31Fsn3POUdZU&E^rP0f}As+)2sT#24lJ0qY`#|e< zF09_6@HMl{SySD$FM_teOhSQUoJ`oFU%#l-=NL6ZE!fKFymYLzDba?orWp>8Z~G4A zlp~WvA!`D|hbC1~{gLXuY^K?S;Wdd6$Pw@_=hP%D8!@n{=PV)AY7XBbS$3_$#n@PJiye45|3wkzmCCUjmXo%pNu^|iwwlG7X_1ynxjix z=OIL`);yUM6mxt+tSZgDWG$cl#Ohx%Ivm%;$ccK}m3_T=%z%?UY_CZcp>% zEH+T&8;I@Qtm?Loyty}8v zC1=b=2{)V-s_U;AOmSKqT%#eg;UKc}J!m3pKMP-IO6_nKdgN_Q@Qa*8E(apOWN!;E zD3q$CrFpbCI7~LE$p%h56&7QxD9tFC!vbDBN{pNSpU|_4fX~_ghLO`Z?DL3OZ1SrdM0Dtp)H*#`E2{v5`iH?>JsHjgd~wMy68N| z90UgX6VV%0IzJR9Q*dwl@>8aAwM)H@QL9rFo|n&croY=ZO5Acg{x4oVgI26wRIQBD z?0<9KvIZVdn!kRm=V-^p596Um^Mz}TS!Of?3-aw@*S~o`dbA&q6BnZ$p)?k}nfpR( zd~{{uGEje5rZ;p$P9cjp$nTVIEn5N3cHWv?!8={unyy6PeB}&xYCo19Apd9;Tk_9) zy%lof7oY7xg%j-Uf=S!N>iVgn!}`zM%_xxM|5T?mg-iHlxSiai?ra97KOvypmYCOS zb>fxsM7ipBm+~=Zi$IsQ#f(8ih+S)zni48v8^B`$Qs7(y4KJQ2DO7R=c zTi0KkX+QEqcydr%+UkQ-x2;Q+8@yra#)cSfEN9h=yNGXR9X~9zSCi95e!ehoRcMOR zEeh2=_F||p>Zs5wozqIPrdFK3Gsl`{)m`w>~`3SCuVGhK2+|;@xsR` z4IbxGa_Vp)Vfg1=)j~bRJY+aqsgs`PE%wtzuL8K7L?KPFVS0gUFB4ZNYcbB)Ww;UD z5&9&Dd$nU*>3dXkf915HU#p(*r9`9K^cg=OP$VVc-`#iKj)y1)IylQ5NY(MYBp3gu zNOkEGL2n;N-?vNtGw8%*PD}ayg+HLpC*u-@@P|GI=<9U+MqHa3XU=XBi9}<)6=nyI z-lNV}8OaQ+4w%8@C!bBTm2IYKPT;swK3P^{_lC#33ro_22B`9o-(gj~2DRIAtE!hSnowu12sF(U?keZB=NnsHuy6}) zZQbygxQhMy1UVA`YBa}3CJXMa2udZTtjFW{Rh|l4FW0Sy=a?$?sX7syZk8Ybx{L4}5k_FaFk>&6a`m3+^aFe$g*ZxLMxA;ucUXxIf9ZM0_A7`Fb--lkJ2 zx>&syKkC094`H7^G>qL=T;IRl+Hl>jEK#d(VE@^hYtTe&NK(z^8l6{)lPR9e5CD!t zFTDqsc^`%@lhRcfC3t|RvFnhG1>|NloHP%hDg~Yd)>viMi`y!Qk|s}|DfbcM}dms zE?K$O|G>0&Q!oBpJ~TZOjGPUp5ZU;IopFUUrVCR8c*>>YX~rvwfNa*JA;920$f`Yu zDh?u5Rz>AiWIr-iq%9XOzG7ZJ6)vQ+L8Fu>$ z%~R09vh`7+)m*f%<`Gl@rUTC=!rsgC2Cy}|NuviyNAw>(u7^Rp^`=AS(o9q9C^xGM z;K_|$DFuAb@-7z`QqqOvpmwX{;b(8qMCfFy^j^pJ351odZ13p!T~jih)NEgk{6GS1 zIS>t4TOM|;Q@8TbhH^j%!H6^%t=B;WcI)q7gwVb=oV6xuz77(H+HGfzDm7(gM@5-b zKE#fbq!-n>f(<@fO`2|BM+7%u#Yva+3tbP~)%+LU2FDX)F-fXA@^#yx0BX860`(hO z^kq4+%{@Pz-8;Iz=A(W%Ne!B3ClgS=hIPLFTF9Y=f5|cg-1s?0`0d>{8K}%cos84H zk1D&4{p(?oGdPiZqRaT<@7Z0<#PL7+og%}wl#;fTGX&%(UFe)V`NGwe%XEZwIna)8 zSL4YI=_9~GtJ0e0&j$|NHI5{s(y&XR8#mMMNi8nP0{UrxEJ+;uAuL&P0~pg2;m;NW zy;)W?36jr(Gf9@L@h;fGYX00hc7r9rh5qYIbtc4X>>!461*?Uxtk6e8?_2x2oof!V zm&7Ay*ajp3j(bq3q?HM7{|U#h541ca7Q`wJ3l(9lELL-07;fk%$$vvi76}$}+g`Vc zzfNA9IlI1GfuBj~t-Va#Iw&0egYN=~$gDc~KOiO(Uy)q6uf=uWpuC$~(~rOC#GpgC z&y`xp>eO&Z1WZ0oB!`K4!SOH0GbzbA-?hqJIVM)cp>BgI!v!_D?0?v=!%bi}mK<*k zfOwPO>G3l4Ic%FR^lE!(+*FpVe?H5#IceD~e86l1k@fB9y85X%M4Q-}g%gKW=ga@^ za_W?u&1qG0B#ey*(Smule#;ewJsk-QuuoqmB{{=d_75q>WPpxn7F*)^^Rd(()mAtC zO8UfH)jkzmWVT!bguOBrNCiW9?zAjbQn{7o|tbu}5 z?)85%%IJU8h^_xOy8Oh^6O^BZ#@Y<2V4opSPRtvP`IPGx99HdGRAz>IBzP62x3OIw zvL1z*LEyc%hG~5r9BUCT6ovbB?T>Pgk}nXN-r);WuF@$wBPit_ewki|cme9~2l27( zWSSo{x?-qxgk5AC-+4XV3&C5EN_c#`?}YX9_8$qLZx{8sxldny!`V%ur8MDW_ls>& z`s$ekHmBF#utmd9pD(jrv{~U+iaitd)eP0VgZ1M~d5QSg4L&F^Za!vT8YH^PX*A=l z2~@wf5T6su*iYc>&+r4< z=@5C;T!{M8)#+;xeBJZV#?X{(6IuAGi-Md0GEQVPvTi6b%u(ty-iY={xW@2^v(zVr z&NBi}y`ZBZQe(_bd59bFA%|r=TO+%?F_U&4{9+Cx`xdyX6HpXPaQx{Z%vV#oFc!gL zJptd}>eqV+IYM5NaXK&OZs{|&9LqLU{nWNhB(=LMCd24-wqg8QhC087^A^8C! z7L3ZE_rSzvkVHs9X%XRru2nmP&NslB%k@?V$m=$0e+$q>@t?a*eJUsGw~cLE>YqlgaQw14b@js)3K-dd-2RQ~jx z?>{;$U+;M|p6RCW^Efkn)5wG1LBLt) z@L0k>X0l_9ix^5FRj?8HbAKKPv6N$ldHD%Ro&d+Ns<5*%g#t8?1L{c$GTjjHF2k&G z7|FpHy%?>!x91O4Fc{dCRpoHDyZl`N1M*`r2n2BAc4{LyksZnown47HA(WkMv4I;6 z;;o~v$rh5m*6(6{G=Yy14n{Q@F7i+%e9WPowp)Dieuw-SZ_Kn$wb==Pu!|lv<>&kZ zYkID4t1fB;iW=Zh?dM8%DBVW4-J)7i_1EQoO1t6|`dmfAuUfd)t&gysd+#HDm^UD8 zF)mq~yEV?|(Sv(uB4@xXZh?Oq+`rnNt>fsqF|3ZV zLbB6>m#W8Wy$X!NR`Wxe?DhpsZ?a)&?)Tgtd2t|qs8eZ>ympLFiQj6#x-#b=OXMqB zdC;|r(aws)orT9J*D~-H8wkW*d@x?@f|f{1F@?Oy$b{ZviM@u*%5NsEL!t?)ka;w) z^W(?Oy{pv{ycnTG=oiVAH6cjMCgXR+sa?!xaes|lk}9bplnE?r*&}pF#sf(5FzXWL zcLY&|=G;QMIr03`Cd<=DA$7!=bj6qBY5SV%L1y}^`EJbv0Yu}g0Yp|{mhFngW0?OD z$*nC_vi)lX5&uh_aaUR2-gTR|BKM{fd+wx$aoU&f7)!PU>GljZnRi})5loLWl?58B zCbV`Obo?->``EJ`tI<<32EyY--SAB>U{yS<_pQM>c(3)dNTV4+O5B8|e!e)A&>X3OA z9dTb>clv)=NMMEA_HJ#)7l?53q8bzQgu0_ezz2XnDP!HSr*Q40k~bGZ0IHRi+291a zg05IVh%9*Zu)`5IYfJ(7y9`Db>^hi_7<|OwAb{8&cS9U;SN~`tK8!8fo>XBJZd|c0 zkDqM2xFlL;7br|HiJqjxD1g!1nky(9S!Ol4asjBmv%~EL`|H_4oXzfxrE3#L8w-CC zEn4;uYXpXVA@MBPyu88GDLOya?m7n;j1ax>hy80xVoeei+QL zGZoYaJkocZy$rKgbYkISI8v(Uz>!8&3=};m;F~=YH>8k!oF~MYpO8vziEoJJef|QL zp1g=eATCM1V zAKjnyaopZ<{k6dFNZxgTSSfc8Hsw=WBeX+vQV&`LN}$StpfUB9Mn8$IdQZ!BWfiyT zyh3GIjfmN-H@DvzSpe<Z;%7{AJH+%^xd?n?+N*w+l9rT$2?NHyi zOMcX?u6f6&JRFOR$S2Tz@!M{zV=$xd?PW#YbIJIvaVbYPBRrhR==j%+b8bv74`90S zs~TP6W3WEUU#i+Ea6JZ0RD`O9$26ZL!&iWJWVHzBmcz~8$64g0j+l@pB7bT#kb4iC z?GSIlYM^`Kx|iA++$> zrY^t`n&|66`O(n8xa9lvgTI2g=7Pgw{mWw42ENyckhB&jn(w7iWEbQuw>Pi!_;>h% z0I3nhW@;!kE-}S3%sCPzqP{+xgV#_#ZmawDHDR+7$daHeevBMGqZ^kpt3PD=q5&b? zk)8(cvS8h2-}SqVgXPmR)^=#_UtiK6L!syvOZ%un=H%0^SX(HqXa$RK26{nE0)VqE zA+5g3=OnzISgMXt=A2i-W*P@>LN0$dBC;bz1PiB|J`lh)iwW$G{TYoM&3WwLvLSTC z08D(F0>lkE#?1lO;qFnkCXCA)ULXM=r1W;g<*W!{3BDj7@gj^nos$8!yrCJ3$}E zNaY^elvi&H{+uF#3S$c z#w->DQbUp%;{IB>r=SU7j#nSG)TcX_k-S_e3L<$PYVii32|leWkNhsSRKTp_Rqt=0 zuqLMkPlXvyGLFUw7~u3;aj+G`{#orC)H<^JbqZ@4)9`~byb{!3dPwfeGulZ#^X!SCGV@_w>uJQgr#&#Tp zERi=Cz_lY}+MqFZmM{D)FK-=Q6Dy!b0Itn!p;eSAr8-zHF;U3HRBmw6Xk7=sskaFK z#S{jxO2D8X^5A=f|LJd!X+?uKOogn0qv%SaP17_%60$it;;o4 zGue9#=Sj@q+*W5lQq1&Fjc04w&Es=;i@u z-VUSV)LDE>P!0UybS>|9t{=w0u*k}$%s6mg?5*E|_5DUrna zJ3K%a*{s-prHi3GfRI7Z+N>V6IGOtYAL^)xJ9&h7-yJGibW13%SrE$LlQ-QDxBx^D zPgV#ICf&+|wc%n~O8xX)mYAz{6^rZt%>oP{j#_jI&#lrRqh;-Lr9B|!QUPcW=6(uO zWT!#_>VX#rpsHx0JxV085a-!<_Lk7#=0YOd|6_urH=Z$^!RHTy`u)<uiP@^(J4)XFG! zJ+~Jp3KM30&M1VmkXeh%!z-QK=1c}elymtGGUN$n-6>bm(fb_hKWdueGjpPMUaddl9aBU!4oc8*--go1p?RoHx z9jDp~LVDXFuXq=wMA!aLaUu_f1n7$WoP!(?X;2_FC&Cj=2>=r+4z&G&1TZaMlb=gt ze^Kvm)m-q3DfS%#fI3K4e?~m4*$3&677q3exA6*4B*&+=%n@DrCxfhHIIIkTmXH^_ zoM7<6>FF30mg_0h@aEccVU3gb73PpH*0PSthaGj&a6?{)+CGlr!4ioocb4@5Zf^Js8r?U1*!j3+LGcBzk<5SbL8Y|&hy{Nc_JPO= z3jZkf@L_iAVys*V4&XAz3y~%XR82E9sEy*|h<+4$;4iQoFzWx^1Apk%Cg*nUeIg%G zb!kJL4-M;HLDgxM#R0BmW@7-ge}L17ZXCTfusq5rKKM3jf~Iv^yE|h(9_4%h6RvE0 z?7*2VVagNjCUUOuQYSMWp+?fc9H2@Y|E?c(PGPQJ+5L8S!vc_xDKEaJ2-?aX+~LMj%d8U;C-(8{!Lj_tRCQ| z;ssPeX~@YLjTRhoV=>()E`1g!@iJ~wZ33BBZ08YviSRzvvtzA&y&_f6Ap z(GgoF3^5lcnGRlG2D%um08J#lOcfWpI1shQ4I*fjIT)NkZCWiaja5TMhdXF}A0vf^ z(?k`3=wPOp$8woqr#4N4d#@eDCx<<~GSiVY`Ing46A>{aPU^`5+3vDuHXngtuHjZf z0pnZ+FGK5URO0$k#Qjo^Oog}>ZDXw`Kzw3Ej@iu}a_>7{|rp z0t^po3y=hX`HNyyb5!6bk?q_aXxRP?3*B9jsBL7?^N{G0!w1Z3yg&>xbjw`zx7}^D+kfGF6-tL5${|jol?BxvU06}6 zTotSjX;y^Y5`s=S6Sz5#VZ``>FvS~$2-RrZSBP;rO5B_bXwGd5y4h+C&BKf;C`Y&j zxIjQ}+b(Gw9izG4%dcRm2G)f5qKOR4O*;fo)fLeZ1A8 z^6!iK4*x3qu8A0r}0Xc6T5}le%svk@MrJz zTuRyN*Vm}WcIdqhb<)oadE6Sx0v*>3ECz*VjO%U?CK>qcK!koIBX^&ncBTGPoE+f3 z+8A6NjDq&%2kJX9vl70JVDaP4~BcD?k~ocx||g$|BcZFjm+bTQaxj+QF`8@ zN_c@6Yzt*AR-+D8J1Y#y+8_XB9j>JJxgEp61cN2~##Kn>RN8?A=Tmeq+XAnPzWyX1 zB=BnsA6pH77iuuJD%@W_V+VL63++N5xtLfj^N1FGNd_GK?#12Z>yqj^lxPk1ixb)z ziTFK-IiS?6F#|gzouo!l`|m$#EM7MpX2S5}P#dFfie#Xe1utMNB;_{dOH)egg;X#l zhiOf(9V|*oA^RMC{UGGq#%HHXP*Lk^l!zKDZI$f=jo$ihvw$s}L3LO}{}I{=!~aV` z-pCWt)JAaklepFMo#JANhj04dkMpO=g+si^slP_g;{L zL3S?Bm+)YZ65F3Ui|*VhMPOw~oRbknj=H2y*)~ zrTdvmW%Kt8lE;w4{p3vsyk}@na)31t0Qw|E>3~YMdqEjxYg02z!&q8hWr(yf`9ZHR zOX!el+zY|=lSl@xYqRoHV1l@$hoQ@$f9&bz`x}19zVfuA`%)#aHI zD0?%VpmsvPnE}q+ouxzF;xFiCN{AAV+bv5>bJ!z1@wwlQYHhLEz_Fg>IpMLR7&*ET zNIxuYjSOHsp8+0>Uz>QQO}jMm5EQfrjTvuKe~w^nwY{SttQ)LFy1}Zk?aXV% zzQ>QRf>lWy#(?Zx-H3AtZIh8g_&p8(q@L@E9K}AypP^=_CLoixNEC>ol{!q5GW=mL z=DH_qfq2KU^#{clW87GzXxoEBM?nH2KpjdJ7V1}^{v0jPE=r@o1<2`B8qKQI@Vg9a zqm)hdmy~mx6e~hbnhM#q=_rQt;L#5zo|4VPUm`VR0{VXGjIduKVt!rC(!l`n3_>4;A*-ND_%qbj zHCH7Oc(CFOAx{NP@*SPIU%OJs$Kj`}Nq2O2 zK&Rh5%}wc6Qik9md}_S^gv_0k?+?ZxVZVlUQcwJGBFO#OSlg-WIIMsm(ZTQ1@JGJ~ zam9^F47v`*xe1Cty5a1FUD02iw2`5{#GLiX zh`?Hm{WxJ{{|Hz!+j4FS7o#;oiF$(~`8%K>uygMP9nbOPJ&hZ#^Ha|;D&(|37uVHx z@Z+;5z7?vzfyn(zL7rA8X83VufEVk1)2$Je1&Z3by%EQc4@`S>cM1c8P0VvrZ2u>g z*<;S+JCse)S6AY_2NwzXPP?B3DPf-PX;>r3k|GI}t^U#A@BIhv3?E&ZEbTjr0B=|W z+|4WwSs5J#6YXTKMh7$jw)+q3a@%euiGB8oETi)8`L`hH5|4#!Cj3~Z!P-!9Q+&wp z{ECfvwv4R|7CA?ZvPrw|9fMmfPSs-_$Lemy;IX|XlqMMYX@TZM1i0b5Ea9c?0!*F& z(oEB$^`T<{|8aYiC|*Tn?Vfl=;xRIziHu6^`^>}h120>X$F1;7pOJ|EJG9Q<@cOBR zfpgJ{4p)F;G+M2o<+dJCRNhzOESp2Lweqd)LK!4f?#EI#(3d{^WnYB#pqVQ$Q{W;x ziy29{m=JS{U!*bl<0oqe=KJX-y!gZ9F$;L2El5romXe>*JBUi$D@r~T^Q@-Xs893< z*zg-h?JyWdp7m#gox<8M{HJ7M&o9+YFVeOxapx}RzQ83-#6nC*T^I;Dzb2|ZeCR8) zhvO9LIF%a%A>-Q6|2$5O<_!{zd%CX&`z1|de5nlop7H!=oMeQ;gQ}mzydWF z&lG{r)T-|2CLe8tgy@WzeWZFAO}8_}1LrQ)imw~ej)$^G8 zc-acqVZKv`?ESp49@@}d-Zq42J%_lu8NI&&xuGNRrd(2_~W#_d9Q&4L0k!4_C z&qKZNmu{pVVKzKwHoPH$n1S&Fq~vPC2KuX|#+tXY)lI+_LAr3kcS71gD6c_(az|zR zy&Uae6n>2gpD8}?Orp)c@Y~_SS0wd$aPnLm`@rRI8U%6YsctSvwkg`t=kQURd);A4 zg|@wKCWMl`=R`dVxskq9@ne)0rr6Md6XKycHl0lRewipJr~mf-g*C8xIDsE9^*~Cw zNS#7EvEHnp+H<$y?UT^xb!T8dV2IQu&TDDi#l)nOTk_~OJyA?N#Qd;`)P;9_*E5}7 zYWR9IdXF$I`&in!^|?VRg;J$>-}#_0M80Q2v5AsLW$jf)jf;J`R1%J*RJWLuN>I1< zi9V;D3*F_x{Zv^y3rU(VGT1S*lYO=WE9QQBK)*%jd~M?~ooiEpNpM`L=&ZdQfXbt{ z;~V0_9<>^xRb*39Wn0K>$lZqj~*3i$Z zdJ?i%%8M0-dh@RNh7t!N8V>6=yof??iaNd_orPi&nU8r0{Jue$ zUPrpkx46wQb)G|Yg;>|Iqt{`<*5|==4fTh3$3 zuf!0&ReYgEcF!fD?+MQ;Sh&43LVYH@hZl27_Vg)N zqoSq`12T88mrvI~aaZN#VD9dfpZb8<+0&}om$YM*t$us4Sq7_JUaQd3QN#4xm-BM& ztg?^aSxI*Yz4r%poywAZ&`vGpV{P*@lKSComw#$w?JP^S;QZ?sI&XA1N z91TK+w@?pS?@HtFx5K9?6*_k)LSX8(=C$=kDMs{?WLF?}`Y%bihH+`i>a%uFaKdMw z!Bw=fGh@9eKg``*=k?u07e<)pulPrOI68baZP#72e>`((a~>-Tloe-xqkc5+yd;}y zxlyav@RqY#ISvozTinOCYFy^)hv(`g>>SJM^~*ySN)A*3$uTGdt+-5G4OoI+C5`Pe zq?)Vie{TZ`VQ*K{D8SmllpMkpAUd-1Y)F@rp!LH;SJJ3K5TncebGoL{h~445oH*yX zI*s)RiUCP{i}irkBY>7TWDtYK;v4#hn5b&!>9%$dLTwR&uq))6=B*Jxd{Ayc-58~{&GkceK z5WvOD@Fc465tk@g)jLbSY`0-M}EK|YUv_NT;h%dxuxixq` z29sZLp)J#&_s1k$UR*J2FT=yOmqM>GHcLfhB+I{4g$bi%UAu+htM{c}YUXU{eyRjt zW%lEkfGtkhzE(O6gmhC2)dvvHze+AHJZ6x*t!pnbe~+d~_oC84Ir&C{zEtfnkBk*x z!)9Y6(1lvXxx1hQ3#hN#YsKA(^s(z?Q<@!kZd%*H6!_DyI0SJLEzpaO`R+A3P%aKJ zewkEo-0ADmB;LjdK4@K_FO)gqM4g*bnCU};zGJ@iCuQm5Dd=t|vX9{Jo^Kt$Wo%{i zbDcq#a~lXdK?4T_xlq;++RjqvG_D$aXHkO zkK6a|kn!m^NlpB^IIM*6=}arCpOs#I=WEf!2pS(0B z-dqrhDU4?^ExYtx=n%99{(j%;`g>OO_myo* zKZ`YwshT@R>tltgyg-5U&{j&NL6UmM7aM2}g_8!G+*vZBx{qOvOhk`>zNq=M*0`AZ zBfI%>{_@j_=W*~l0%*LjtF^df&po;I$4c-#y<=eYhSj{{kN%m^f8zzc@gN>&Lv}3%}MOV0tAcg(WBNo)YK(S)Db3^Y z-4Rz2pcKEqu-3RCm(aREOYhykTn8PZmTaXOxaWHiv49gWNE7z0|FRxsrbYiw(btf11K1z%}3NgK+OcVMa^YJ6_#$nqMf1 zJ*4dc|Ccs}QM-66Mty;QB{&M2F>sD4Z<{saf;-LC6$GFsHA?^Es3~ggyBSR`AgthXOa>+4SY)LkRLTX<+GFKWhS%$KLHlVxL+Vup%|yEs zQHUt_y`C@E@vXnX#h17^RkW>VRgc+l)UX1GE6 zv+dr6W|$U0YJOp1k?d`&vwbTA^QINYA+_38t!I{A^LAJq0`jexk>rAQy}D*{FmbL* zPuutU{H%}Bze>Z$N@B6C{4W+vlMTMX8Pw%eqUuX`vC0KcZaPn2=`Z8W3t{7bsSS?8 zy2y}Cpz6jO$gZDR8d41=7gbZoMTd(WUo(`(ArfMi`h!@>FCGMC99sWlD!Rx-B1GPV z0wLeWCWo5j2dX~Aw-4I&mWq2ATU2aU z-ihboMy%CK3z#_IOS@*wX!>=NzM`qug8mYzvt3%p5Zy9X@f zbKv6kDObfS!VL`|uGx4b3TqQl8NrBwsP9`oPYwxowog#qZdO=A)6iaLb0ds;n*XLD z=q6UAAkr|1Uc_%FjV7oOHaKx$g-#SmgK8CmObuHTleiGlPO`+z9E4~@`d_5jTtjQ) z?O@$Jkro{AlF6Zo)~BF);coOlkVpF4Gakn&5$1k<={|~&l8I=f!U1cX3FQjE=m{k> z1)>oLvqQE664sWR;3#87ctC@AIMf86)_4+}p~EDM#z~ivf0EoiS}OftIBh}Q&~>7B zs=<5(I+w`i2?lI~W#K^lBQv@XuG9NtX+&SYvK}_G$vKJk z`D~&#&s7WV#O8dBeQ|}``LY5ToAE6*?7b^eZa&fq8?fx(vQ>TT^E@$Ky*safLtBb# zgAE{)|ByFmADqmx&34~=YXM?QccNDqg8R%sE7Kcq>aWXr8C^ym!_HAe(FZZ?SsYcN zIYq9IpjmK{*H&eQLKmh_A8NuyK4G%}$ci2uQU zW*L+awO$Et0li;UQUT-jKOnDRQcRL?RwjkK}1%F1J+Nb znrE%Ea*Cr7d$9B0N1O(W5lxBTHUk zV}nb>fB5l>j-{Z~?}+wQ+IVfqb_NdLw@D7~-A)Vw1Z!lOf!j|&Gr@)V^X_Usnfdt` zlP~mV!;hB=)+wrpeGCR4@r%t3>%YK7j!tQ9%ptICVA+1NVW66iN+^F z+`YZR=I}9706+Mw&8cxc&`-a(ROoMCa=0&q8qCB`$z`0zi%Py69*pXss0-sg@*jRl zMfzofDw;@yo(dq+}kZeaj>J-%>2L1fcq8MDCLb|wI3(HnZ*pgalrtKzKA+i7f zYSm*vW9GLIVWV+Mxu#^sK)TZ&Xkwf|Z?asMrzOd=5GsaU1Wlkam1%rt@g zA9U5W?%%22G)J5|;#~sP5)qdaJ%?$fyd*A0yCm5_G|H?w559MQzVnn4U7tum_nk+Z+|M z4b_Spg%2st1`tyfWE`LB`P$QhY#AtWa4#&3+XJ2S&Gdy+qZwfji7ezH2X>at3b=Pm zJ)otABwm#u`~{U1)jfZ|VL_HSY@;bhqYFILr2*W}`5DyOQloib1aXfc`HuQEYDeU-@2=R9t9yN|&keqeR&k!dDS^}FK%`3wvU-o9!H*a`{zG=^{_-5li*m`Rm*(&2* z8dh%P)W*K2*y%!|hE2`u|H~(W?NW0E;wyO~m?LeYG)>RhRK&YWt9_qQ0S9SfdCFO;5i3zCS5%`@;n=u-Ulqzg>5H?w)$IP2F>x zBR0{{feDi*a2;&o7zGXcTLT)HBKqrd+ZrKf|D|e5&vz&4dx&^86zih>-@gYx8BRUU z%NNugk0kWWpm_J6mYm*y>8k7qBrQ(2-0qf5{d{nrq_Y~Rw$z%T3wm&O{(B^%EO1fF zuLRcjN`TO$^L;$WiDk64z1{V${7W;q{6#sX=~g+r9&QEh6A%68<7d}~ASJNMRfS+mW^n6S=+EW!Xlq()0TNH6!UcWC+IkDAsO(fd8+pD`vWTow zVjgdCT^2i;OENos?nnyZe?fgmhesJ{1%$h>yCZWT%E3i}J7Tt>um0kfz8f$^VbA zcMgv9`NBsh$;RH;$;P&A+uqo=v$2hhZQHhO+qQrCe05Q`>i+ds%}jOo+db#>!E?^z zCkk<4K4y{x;%mib1smrYdd>VHQ&1qRwSV+_swNU-*iTg`iH?UNJT7w}wt~XL2(E)< z7Hr6~hg^a?6xkD8cJe8aLg<(Guq_$`SdRz_eVi#^C7an+G!W-SXsawa0^fs4&M zqZw}+HSbz`?}CdfcXJrCemG6IUS;hG;LeHb2`*b)eHP27VO?SPXS**whT=O_;z*lR zlsiBorQz4;RF|&ex#dx>MXM?o3b1A8&_=<8nk*A$7>QXEpi>XJ%aW6| zfc#JAi~~N-Dqb5fS9PH6UHJX>Cubc;yACfF@5?vB4*>vQ+ttCn?p)~K8;z%1cPoU9 z#>d0?L<(l#D`=aGtIt1HR5#J@Zo1DvgCE*k-FW3`jBLL+I4N;dYO;JF8&~xVj%fgE!J)DRX+d0WVqE#x^%O zj+R;e$-On#<)5TY`nGIX{)E%ykl?b})o466@T#a#c`qoD+i-k5*33;hDh6K!S|z|y zJJVM03`o$u*XB&Fa5qap-D4Q08a=T)CqwuXeT-vvk|2P$x@rp*D;TQ5+r(-!dx7T4 z@{wirh_vt>mF0R6mnegRigf9t?rzeSCmEHT9{clwHsbGLxVwkrR(b^n**0d zvPwbDpB?L8kx)?xP=3yfYx{F$krkyQ5v@w)lF^+i#Yi1F>buz6!23h*H!lv+5g2#F zb62=7>8-oRmq(}E1JY~Hju4BIL(Vj{Bv!0Tv<7I?1^)i8tBVb9a|8z4@58ytqV9`( z;(63f8Gp+e`9>$b*b4J}s)~GM7D8-R3a2#7a4S~tXH{8Ce}pu&BvHyvTpe8P;K#E4 zfc@ACQtKkOfcN3fk49#@|4jB%v|C)MWubds1_eNg**`dQ;x{+G1oAi>5YhKPoI=G=cCVssAzpveiy z);2}sB+Gk&61)^@o7JI_6SgzrsAi~UxK{UM|Kk<1vMws;8h2#6${Uais7{dOg-Y=> z&g|565hcrep4ISC9JI6L%tFee zSTLg*I>QtZA01T1{XMj`9`8eiqyQF`$8`u7N2gC$+vsMNVz5vZhWGO&DSFAZYvRnZ zU!^?w!|y?xW&=vL3#U@-^wpc}H>H^?^tkPqr~FFXkpt~YOZhwF{c2?9jHurhP9Jb9 z_RQ3;RO2!g#L%j-Q*I!G;@N?zgMQ@(fG(0I3(GIPXlAD%%oL+v1@D+HrGGJ*!%DM? zS{(~;_;V-YO22!ACV%ZX!ECHeZ|LWeu-*YYN(Za7z9Yaf#fbMX zPwgrd1c^s20&p~5WFk_ACS*;J=hKO^A^%FbAkL8kYE^iRTEq_zcVLv6RCBEljy-9O zbq|dhjPaLMFcRqcT5`LP7zpi5uFNKdwH9+Uvj&e&Ox@_H$oTSrOhb0%!7^F+h)0DJ z-WjI|f&@){_KLzJwPxnvQjRdMVrM)8z*u)}c^YqYBXW0m@KFs8^~}&GKSL_QTB8MV z2Y*wm-~{nKR#5Z%{cIs8vq+06q!z{4HuLwVKLj@5RdB1p0YqhqJm5K4eY^vzAR5=5^*bjE8^&E_Klk0b zEUv0LMpu?s{TdS)0n~Lh4M|C*OuHHCs+sPuZ^^%wtJk)8=)0zJ4Me}Gd7*M>CoyNc z$?MvFJ?v529sV9tP+8m_mFtpHEZBzLO;guMxD{@K0N)17atO!h3^?KN<#>?2C|klT zyhJYGJqc2cH3uez4->f{v4ARvCHf_n6(>OQhb8M+!+?@r6&An*0!T>L2T~5*=|wP- zuJN@@H(YNHLai<$tCJK@)%4UsxnNF+(?Acd~J`#NXu z%BfhtAOpbZz+{V~ts3;gXs&&$e7a--c*L!;elW zE$+ANuO6mn$=CvH3x93&IbSJS=XF5&jN&((M6=M8Qqh!JeqGM4fw$7=oGzSIt)VH2 z3|bt?fTX6|&w6H3uUDa4pyS5+f+h%KLZc>=NhrkLmDbNN(Zqbhz$21$v>%y7}t@u`k0dfi!gA%!l7LgIE z)w+ZnST%|3uMN8Xx|T(obqrQtP8EwPESDS3j7GWTKry*?Rsi3&-_Xo znSdD(&Q<7{z@-AMs2RRQ=z&9*RTgqcHV{jMYwY+tO<+~$SDe+NuK1JhJFM7gmt~}D zY|L&=g-%0P(3a=FBfu&=Q<|rM)Eq=EW7L zMLuhKm?Pi8$(|B&YS^rBqYqm%&durtMNiV&^r-QfrN&qzw+T!vf9<>Z&8_=VyH}0r zU{zHU5A@Dp`vEsHVCDUP;+8|ax991g zzVqvO(?Kv=NfjW?Svu`r)jcn@`DB^ka+yOw&6px()o=b3z1H<1C^-xa8+97+>d(ob zA4i=c1>j@Pe)Lrt@0Aj zL&~*3-uESJI&ze)XGEOj#l*qx)Fn$BejnUL)?s+JoeHk)y&q6#RF;(LF?V@?#B?%m zUtcM30Cck}7sk%WMv)qK_Ue$MRNakLUlEUF&jn=smU65!D#n{*>F2vJ%f_P&u>1yjfHs4KdOF&gTLAWZcii$g4>?!K4G>QlHhOz zm}vr$a>mCK#-#<>e4Xf+x0<_ZY2{O-9LWAgW~X2zkT-ClB=~kvqR%H05M5XMHU3<9 zuqx^+U>ugV)+_N7Q~})vn1B(Bl<`xomuD-p95;b=iBx5j%={ZA&QJIMM2Q!F>EKZM z+#B}D4D^&dRC@mK;SD(26cRxBDLK=DgKavW{z~K!j24p_wkW+t^k@rUpw!Hx_A|%9 z57gT4gL{xeXmr?vf}3Y`!J$&SK8I%H{HT97W;R{gp_Q`l3A`wRj^vb99*rHI9Z)om zx?OK&P6IJ5+BD!0z(R?4DoW3t;COh06I_CNDpOQd>LZ_?O6pW{I8L6Yo<=DiuG)|I z=Vol21YEEkcW`TdUtsP>GcCLYra}$n5gJ^c%O);#dD??jTOWlRhwQRG9vnHVYGaYY zxJ>#nSWuts)SuYt#u}(1x)ID|Z`I-(@-Ig@M2swCW5wNO_4TP;-UA7uo&`YlIN77X z@CeN0+vspOu36(# zVVAXt0i&yM!zgQXy2D4w41fI5lk=FTa9Lj(10c_Z%${zI3Q1oA(aFZ@Shhc&L4ng3K@{at+c|b zC2V5JmU;uA%HqpUT?dUD`Ya+DP2FT=4p^HeJ)XL=g?j3v1x#Nw>?+%?UJmHBFoa$Z zWD8d}{M>_8CYIrcx`#{L)7zyb@_}Ss>AJ)e(DdvUAll&L$4N;YmyR1)NW|yVMSEyb zqVpQ+l$_qWa?t&PeR$Eh0)x}V1z|f+vmbLl_)hyS3eteoQ}#GxH+%LzR|{d*=QwdG zQux~=Vg>0-)(V&nUBuCe)WVhxQ&m)>r4Ts$-q?@HRh?`=ACfc*x`~+S@w3Y4dYG6Q zO8eUki)af}yh`qq>7$W5$s9IR#h+|ju!YlH>i$3lDU5(^|6;e#?mE_9wSMgYsF=mF zZR}-hiI;5^U^<`;oEZIc;op@x#{<7k%>C!g=TY!|o4%U|@O|U;rEnrLgKcX@xV{TK zqW%ZfpCL>B2dW>|3f^yu)!qqMjD0>vpKSnqL-k!fAqHLbEm-&nEPr78=7mdYJ|tCd>P>$4;HCc<@pM`o@hWnt=X*(!Rq2<~tvMMs z{;G;LY$e>HmW4LJTGwvMVazd@RjJWJ=Fh*6rE_W zMEShQqFf!yXt8e#XtLn7UHy_iDd+qeES02?Z~SSlj^IVV7y}M_LBT&8+Mh)GSKVgS z2jLIl0ey^}3!yqzOM1{7ZiS#}mU~ZVLVUZ>M+IQe{}GNb{fEk@V>g+GM$efIrs>Fq zLH-A_ad)^_r<5yr>xOG;$#%T;v4!BNLu<3GcvEPc-bM4hT5jS@)Z#DypCg+iFI^Kq zsFPZ?c5i31iDR1O?qRhBw6ys}lSfp2Z#=>(I0QqG2G-{Xw&m zveNtSu}7DE`@a-vd8cFYwcDq-jJL6rMnQYq$zKaM zF;R|O4eH%D(vu>_V={x4ndVJpqt91UjT?ip?&Q`4#OE5N&(R4*L84~_ukDevz2C<-?KmQ& zreKNIoAAK?0Z-Q_(F*d2ljr;$qQ}J4Sa_WiAiF)Ti*?IvkhMqGGtuXk#qQ+fcK2(4 z_4wXDMZ^S?yiFNH`|8WO6B{tF5N5#)9|#%sFLV%eI7!9*@1UhGor`LXI2W$ay$T|D z(D^rwW}~o2ynVLf`1Uiv(1=0##eAZLanWa{(dX*Y---Z4rd_N>#;jTadGYqhjXoD0 zh=REdHucP0CUD)Iq{v9IWrK;oIU>gDl0;|7Rp@=sTQ}*BpHJwAEi-XT7cJpX&5lLE z&H{_x8%BsWFpI|5oO#DDh;dJTFVL*wC1Cd8%V;+qGEILAuVgAxg`9WRVF2aF)u5J# z(zy+Jz#_9!kXg3!Sxb<@&j0TMkit|{s$0bwo(R~i5hJ5NPB}#X6sa5g{re zx*elXJU|i1Bph`o9bo-W@f{M7k@%Z?&HO%ti^z;-maJif!c>*QfMfx&ply^<{`J9i zOR{~#&sIG(ErLSX-UK1KD`yrgZh%iaICuw}xg!7nUn3Hd*AvM`kmwggfWTJ5r?%A! zKW4M1Pvc6gk91mLiz5dq_tKnMsLFFm+Yi(IMg8YazPOz}5AQZ==NJdhc$yKW5=Ltc zh);|*f8pV>CHaXI-W`cn+QscD3muh*0J#oyq{ zIFp5OYJ76?S9{W<&Hx_YsN8r8d@{FK1AcRxIW5_0i{<@h`%7>!Xay&2%WohmNL?w+ zV)cf{2GUI|5@g-=2FG24s;jm}$l02qv^5rZiZ3RE16D@9FN`C*Cz}_5#4lcKW+9D- znfIloiRoXor%z3yQ(jLot)fARrv)!xJaaYzhovrfJ|-VhaAfU{wtaI*y6?aEH}o5F z$5|>DE5?Hyu(~2~@6_@Hv7Cw2=ab5zEQlSF?vxxHv+EMkm%;#(NE5k6%^hd{3F)HaunjlkzHsD zMI^%KW{NSk9_#=y&}Nf}toju-o_xkF4rD;TWooM2iM%ztHq@FxYxJImG#GXM z2xRG?jN;#B8|E!*>?K$1At17xz!`h*mT_W}{;wo^tIGor5nBRm{9G~(iPA~$Ek8kf zc~ThN2RQMeChacO_sIhO;gZsg+KA6Ma!ds0R) zC3CCGhUiu!3^`z`E6L312={CmDfvQL#j5K52bE`?z}SVLd#0R;LBD*<6%Q_3eFpUG zsHpOJ#PK0^XW0p@aFn2m8}qtH$PaifGqEOI1ufCqZ=a2wJ|l~cd7H-tc=!Yn^enTj z{no@F6w`}%EbH!<$>5I(pYFH79&CVM(8;up;EN1UfxFdjK|)~*UlNwMosFl;aVubh zy9J808kwg_+o=CWZs*d@E z9ao*b0XJ&(vw8YQEv|C?MK|*_Uld7}s{N}o0#*&Rr}sMQx0z(0IgLv}Q}I|-`e3}? z_`Ee}RP6WHsAJJz^2%>$hVG9RW3|nVV!r>y0{qz7>{-7K-3HXeh+HLF*fPCckS|0w z$tF550(HZ}$2T=DEkQKKBZIS%|fNccxqkP`YvW&m> zJ+|=bljx{NxWWJ4CAubN`y@@W8fp*{2@lQ^TDp}bEY$mf&)fsu1>){>fW>NB0(M;Oa)z^r)o+=e zT-usX#Bw)O*$NSuv8wXz6-dg%9##xlZ^cMhHh0ppBIIxpT8CQZPP@kK`nsxQ|A85Yob-0PNbo82D(1vu#~0+GuE>I7lPJm+S6 zglHsBHXweEMn8iF3>I^s=`}QCFmYylzR~mp*A!&`p>Wa_i(}2V6C@-WxaGxj)$1OgB`tv zl_fM5|KEUiNX-{VRd8yJxZqtBZyw#zE#-^HY|!9c{kBb<1)FEbH|RWB!N#i`u_+FH zWq7+=@Cc`d$uW_ydsz>z-Cv6yIXh;KZ%^!_n956W4F^4G`Lt&*|6Ond7J~JAbk5#P z9{QL|?!Gg^bcA%)t_>^~$IeRU!+1S&)4uezA6#B5f0VFV&%{C#*u&=S-}26JOxgf< zK@dmBK5@=X^6s;2Jke|9N{O5d9$VVy*fHxixZdNO7n9lHxxHxd->B zL;4-AK$!O%`Hl(sGFkj&JVv#*?!z|3q;Kc+X4=o?Y7txG!M!^XG7bVb7hYZaixq~d zY1{Y%uo}%@jGEOIYC{9^V}>gUW}v$3+9Bzp>_7jK2+kilU94~kg}%VXo#E^SL$Cq* zOR7d&#m^EBPb#kZP?y#~IKujDJZnmyn!@RvyGJi#A8_zjK}xT>-?TPAo)UkrmWjmy zy1OVdQv$sCx5wEW&@FuU68Hn3fAs10oZ-5f0@0`ex_!tRC%TuacEgcPG4*=l(bv&6 z7<;1$SK#|JvgzO?c?NU1+L$ORT0i8tn1ONH`2h~}yHR@u(!+TaR3$y-FU?&j>1=!g z0&I(3WvkFo52d2vh#UAryFv%xOvvriySVL42os(;;QRUkID@MfA&jMw*ahxq!*K<< zE&Atd5ob|{mw=e8sI5fY0`Mqfi4xUyl58i>bq0v0085I>t=k8kOW5rIU*B4;V zc$Jgj3DKRFPE$mH2`&Pn6R<_+ch<;a(bO9)6DyiVCwm}dyk*l#0&y(w3L@e(;*tN_ z85n#R%(DqOOIP9GBPd_KNsX%x7;R!nf@Xwoe*ym2&su=Z zu~-UdkRvdSo@$8&naqd(yAJvyRF^ToO6TZLjDO2&1g8n8$qn@12h3e6-+&HXf=wLw zasD)+_Bs#}0N>?6ZB?sNfAZZufM=Uvr z?;zR-ckC@?r5Ip)16p@7P>`YX4agWz3OmAS;%2xlq~>ZKX%mGTi|`QsUBA11#1z9H zMc3fmv7G}nezE6wun3^tUGKqhKS1xnhAjK~QTR7~#KGHFA~ht2yx#LI+3kewea~Ny zoK0wnhf)!|h(FdL&xq_O_TMRlXJl~j-r zJg!I5_bvka^qv_3o*0`vV&hfjFUAx}8<8MbH=9mltSW{o6e|(1WjASnsY=%xAC$qT zvk;n|a&^`Tjv{%Sf zkbF=Y=dW62x12v_N?UK~6khq3;>4Qd?$d=8=QpvVpQRFB7F>nP_kIi#UKx;W`gF}m z9Ie~-xPW7jnN-p7P>BJStLQ=CX;d$6JNk-K4K}n0bI6-tyAuGud((rc_PSwkj>kZk zpN}TnPzcj7seN>mI@EfhF#neR!HhaT5Z?Pck#!W`U&NT??l4W+>TqxwrRgW;bXFvu zcQ@;tZ1OOGx47yD_VLqzqv(}paIx?jMli3noHXdqNpRnoOb9?H7aTNzx==RIg5|I? z*cJ)!o)|JwcdbO0$*lzKf<_y#b2WC%i&|k&bEj+QG%#CujT$3lUWw@I%Q6jg#+qoR zq^>6{2^?c<$r=TOkB+cbCM z$jaq|Tr2H`a$3PD`SU(2>QiR9@rOljIY)viojt0~MI?^J%?za4{BXHeZ9uUDVEYR4 zzDc1BvFkH*j>H#V%RkyZuL;Y049L;d_E-Iht0_IrU!dQj6pa(bI#^Uuy`y-)%afk4 z(nTIRrb>E6N0XEq6LZ0>mPdL1k+sru40Eo+>rov!hu5lyPMUXosto#46lc6(m>({F z(AT<_#3OlMy`jIGb^AA|%0RA$SJ2OWVKrljK>$U;gmUTdR?wc;Wa~5rtNO^J5O$y@ zXa7^llYcgr&<5+e@rR>i0(%Cf^QL_PngC2q6 zYB<01otwuOe-ivdSQ5QHMj_LG&g6h&Tgb|W!{`8j#*KYJY>Gs>3y_At*>g~2VMVG1 znG1Gi9AH?=`@enCYn0A3xI~@mrp?=dRKyK_+FhYQaH!I=z=S@C_~{Z>?DMiFIO>w2 z;l~(*Yw7w2vmDeX-e}df?{X?NjfGq31%@(mbz)?@BOv_#f^leVtr&ZB$ir1@f0tB| zht{c@SpgI(v!byCODCjPaY}B%NF$X;0jHMAs>P&PR&#*9iEJyaHvjwIkY#@v#`KNzgp4F&|-C!6_q+i|6_%iUKewZ{B;RH+NQ_! ziKx8)GGHMP2U;RYUp<%p&75)gg>t24#blAK!d;V5y-mvcP~NAT+oj;Niwf zhJV{;beGvE=~yO|44~Goq9ulWh`-k(iS-f-5fp%wurWc*yWvIYT@ha0ei=cuOIE)t zsh^T#$iXzb`-{3Ul>Vs#p%Nx5!N)EDbZe8M3x`6O=BRE;(CqF5|@L5wO z(F{VVt}`RRM-Qbox?V2Sg2N(HLBd{Mf@lr)(#C!#ks^H2z{+m@M%@$5HoBMW8@IiJrGgg_y+bC#BiSe;%{Hc!uAt@rC*g?A@RL| zmel$vvgmSwG%q$ut|sUzj*fv50sOHWQa zK6?_nQg8z!!1V;np5>(%9?AmK#@L%D!NV77^=%58c_jV&IR*yJc61|vY>OX0`1H7_ znyUY`7hT|3#{~@c6Zx(nk49Cg)Y;M{#*oanZpoLh4wy$sqvA7xKVX$*D z-EZQ?)C-;jK9B^*WA;0?ax;`$GtB7+%4xlPWjxq*1_U1fxgfm>kJcmpg5UOKsdAHSp0nbOA1_=GVG9B#+C3g*2*i|%o;c!L?{&9W zJ1aPEhVUPltVg*sQ051UT|xNoAi*qury&aNV0~%h zZ!>Q)j^XnI?5RDl1bXIx4Q?PWq;~<@lWkGAsf_@>jK}xVY1|FbtZMJdQnOJ(wUz)9 zvb#7q<^_)k{+RIkgE8#k zDof!Z*8?rVP?k3V;dlq+Nw=RYQKj0nu_V6SffL>Cdy*Nl7G@}KH8KI7gK@3tzrDV9uSui`F^0i&5w0e^-i(dXISM&_EdcwjA~snC=;! z5&kMj64yJ5E(_%(d!YK&&dv=w15}ja8(SZ)E$AQB=-(z<_#Anuu#emK$JP)C(TwOV|?(>F(fgyH$y0rY;RV&Hj@I-Yimex*I}n}#N5<6fh#t9 zC}r`@?N-*zkquQ8Yl(4ee8O2t^i|CI2L&hgL{~_!r|r4efec6lpr?Oog$YR6Q45`C z6v}g7I3P1AH0GBYIfBA#G%061#YdF{uso|#w#Y*OU|Qk*Dc+9?to}ZL3-OKjQ-;%D0ZFC*==?GC)n9wDBx9uTzpLuo zAIH|p?(0_exyEz2& z0_2YP1qW_n!m$2VljNWxLu(m;7=jJNhfOmE#>l@SQd=B>4!zZx+rBjZbQUMij{`vX(J@(=Y&}-SZHuU#Rjq^ENr9jsIE6 ziDuJY0c)YOr~^=no!UFkoQ7oLqm zc(u!O<(MU}Ef!{nW{a5G|=3D_NrKFrZg z)kYC%s{-N+G;20xK)_Q}{ue;2F zRNOuGqFeJ&R$kK#=V8-c<#bu^+zu0&z&9OiE^uhCEow@=V@ZL#q~f&!FUCl1+Mo)w zDLkXAF1>lYA3xdnj~V=wUIuL^!=GLM+!1D0HsP<%bt1ugnX=c9|BQD{Lm!(tIiQ}v zy%he^POJYdKiawl6IiSDOBEYiOJ{S1`^08z`|Q=kWXXv>M_e4`)A{v>3Jd$ysp}P^ zbm!&@IYZ}K)JkhDFk9!|em3prId&RyYQp@-d1ex{$Z0(@QYM0^s=WcNno|dhd`_&g zg{|jNinI^>S!Mz3fND7Wdhq>PJr$8#~XN=u>_^ z+*BjJ`Gaz$s=9I8ELTBXXnbFOlO|pmq_hMS_m|C>@YAv5LWH=7rO|@F#6z5jmuIr& zg)IvY6FE84dzXrh%rs}*>2uQ{F{MR5b`7(2&Z&la;_(`!#iCg<_b_{ckM0)#vulQN zz*WWhzIBdLg3hKX`+d&Aw5a}rHKeKd+ft=bMBe6{Bh~=>dO7E|is;B|ZCtis!xJS! zbL9LzmaIh72*yi8{o~1xb$KjXvRm0v;mBrOTF6csSTYuCC(hOshBdLuFLj>miM=-{ zGJt6+&+T4k^cBr=gMCjhC=RaH_3hMKI+(M!v5&Vplr%Obyod9{;D7;$uY*}A+vlZDftOmvKzPzkcp1RC+OKQ;hkgd-CrU)P2dZAvQ&6 z&3XKw#k62reKU z=L4(bLw=f07yopSCe&euz{}%y*N583%R|LZCJ<>d9e{pB3JV}tE*#m- zS7N0(Rgvx}Mvb%^N)+nTGHPqi0|+nMXd@OhagRtJcwgm_Oa6$opDq*967*)#6$yJQ z?j4@KOGRn^Z91cC^<0dBOR(PAGlFFrXI1ICu1Xa?7HYN9kqLy2gVgiZT9>=v=W$=1 z6OHGaKTF7@#OHtbcJ~0-ie+s>Yxe=KDh>xs%XvfW1->ccHv&WM)i z$D3s*LaJl?O8%j82C&>tm*a&$9BCk~&T(;BsJv6%7nC#y`m#t_rOv^O1V5pX1sgjp zH%@B(SXTimfKB$P;Ycvv`wo%SQry1FO7sq6%7EzS*B6&I=i9q{)K@G`_q0Y%t zVMtchvDq3e$gh*-R5@X#Ge&_zeU0V%x59BwVASg^ZI=@j)3+K9dkI?jTR)`IxT{{H zQ)!1sx^;_FmwM>!5ic<1t!Avo{8^zs!qQQ-4t8Ol6ILNra%DwYu3n`5(BgccH5mwK zn!`=kq(mX$hpso++Rac3YREimQSrEnKgdHT0Ft#B@~(s|%}Ud^W+?D?P2+nYgfx2y zaz1D1S*u|gxc*P>rETy0ibaOeIit}c4Ga-W>M#rz6b=`z;7%#nS@GMWKYzH=mjLih zzCOk4)?THtS9CocCC?g$r08*Wsy%-+%DVi;<7KuUe%QzvnjVl0;^^<>b)GCl7i%@OMT?o1>Vj?slOyDaD4#{h~7q_m2I2c6(;5H}X`1MzhO| z-DU2UXNT6shkY-61m0bXHa#&8=$fXpl1|7Z&D6=t$be4iwzHP`{MN=uipjcc-H>Og z7E63hw9G|UsBjRNPo*g^h!7}`t%of5L+9i%{2R>4d51H2*ZBwEIMqujY?=q*e+8CfR8r!9zR<%VSaT-mT0L+tvc(B}dbYbcD!vFM|(Y zo^D1-Di%Uf1+tRt{1BqaX5S$Xo0N=AE2y_@GjiS*NQMAXpUre37BusBJba}>;+hI? zTO38z6miBOaJ-b&!a~H&21|8P3VRl&q0_daVqsEvtf~e7E~h3uMXvnzWAH0p1 zJx@s1>nh6|&!@(NWmdi$qIgjY+bO}=rrk9^#dyn9_%mI?{ow0)p?s2+f~9?k$G=a< zsJ}*>y8MGpP51F;HCi@yr%;J2mNNtA`OTQ+f2sY>?u(j_+4tH0KD*oQsM&_bH1Xl` zfwhbcpgV1=6fgKnOJy;alMsT$*XVsR^t#rZWc4_S$hiElw(OYngWhjmFa0hwf6_&8 zOy0b|5lr@ORa=m5hU$uy%7vT`Lm8EyPiEtkQb%X7sg=yeeL)MSeFIT1JyX7n{&o}U zgE|Y2e$xi{B;eS90oaRv&u#7g{c?M_AC?;VQH7?(rWST^izqreoCi@kc-aR&kcXD;mPOBZK;~p9@`6DVKiyeLy)(Cg=%X`N zslB_BQFYgp15gD6AO%|vGqb@kYjjs{)6%IIElQNu5)OqG90S5B<$iO<%TKZR?x`QW+yo6b8?G-hagA~JWP)X_iE`r?6;TacuPr0 z(q-Xjdqm;b9CLF60!vL3l*{-y@kHu0!>?r(UkN&mcw@N}2f*b|IAAsz2NaQUQ3_kf z&N^V3TC4nd-%uFEC$MEgLm|{u$-dI!F^%`3(NZ4KR+RH1DEfq>X)U_@5MMT{JNC+Pg;8 zJrjw)vmS)~AnjivDU8|%lX#q-94%^E{iPm!1&7#&-gc z*)K^cJmN_}0)Zy-KP2Qp+`bYSbt@BlBRz!x5CIUNuJ+l)$ z1vnC57(;cihTSSBo-hBogUT)O5n~UW26dQtz46*O-b}2OZ`7`>fCQK^L_IN&ii02Y zVpB*6okgu6hR!pO6B>F<7bQ%+ZA(!6;e}MWf}r=C(QGSN;m=B=$aiSHv96XmA+Pd~o_uEh zsvnqj_Lg9iTb`q%Zd{1?1^}qTzAO1VEVz|&HD5(?Q9MXJpfXc|)j>B#{Ux@Tq#Kc3 zXCzPhY#v6F>>h#y*0E+i$7J=MrK+QA=6t!scomAu%P|DwQBnQfgxuMF?y_)dROHaDsf$)**zf&(h|;pO7IudpNRsVXToECP@iXz5TR9Ja_W`UfO{ zz=DS`leGqGj0i}_=aFR?yCSlkdgo(%Y}uH9pKAcIO;{ZJAHIC$3B=IFKTp=D3>T5U zs6l?hynjcNOcNeZ9Ot@B-@JL#-%dsgp&?FA=v&+d^b~|Upsco9MMf8Gtxu1k?Q9Yt zXpchkm=L@6i8HBs47HE=tUU^;j8Q*4AY~_@Qg7Y{U=iawE2b1bMha-_@}y7ue>y5A z{cwho#pv8L#dOiu$j+~_?-*rRvo=*9@Ud!rw@5Mt#+w~_8zI&7q!f)(GbXDsi`1X_ zF*y`Z7())2ZYM%S!1O>cl-HzJchDsFsMdIJf4G5Ln4a^&i2RrGY5Yg|aB39mQz{DX zu$#%!PE6OV^JXb^j&#RP2Qx=Iik{k;dHibUv;#3VO>Nao`jC+%!;g8)A)-_Kyc5+B zLtM|e;#A)=uP)~iG6MX#C^E~$<(uuxujobsa;m8=xw%vei%uv}cG z9^rK%rfzHmzxsDznGpmy;3;d%SsfU#*%Ava7o(DW)pp+`UDy6=V-+TBIB|M)Y2cAX zP`i#l1IPCeItsNM-u_37hxJ;VN#?VyZqkI}rqzO`z4gu4q0>s6{lH0m3%D$#*QNc} zN9<-*7Tq?g0u}>WhFYhi%aN#Ol;^dVpRm)&w&zfl8VCO8$7I&vU-^f4P~~K;*1tUk z#79jY#uKVjAlZ<-+>CmQt98PUQI;(2RhE$&4^?8YV9(9`_y}VSw_aL|r>AJU{as1g znpunY2qO1nTY(PZ9PQzyZsb5mVvg8Ss;1JLzq1LbNgP%!Pqt@k7jpLvCt@%>+pKS6 z2@`;$Dj?u7KQa;&<`xW+`eQP)@Z0Q=MTV466o^W@}H#K-6k6DTo$#c;0(_oJ&s7lI;bUa+V@b4 zpNdNW6~PICxF6)6a;u4?*8ejkegTm=nC!utOKdc5*P}xjCb?@*Avj7!4z?3>w0PJq zdW+f=sQPxQnF?O)t3%t{`d#4b8gzBqftN=%FyehYSUCEYBKhU;;!XA#h<=| zy%!(6a-FB%6R|;~l7;h_0|x;9%Tzx59pkTXAU>?!SrvGu$1mpE&@K=F`DrwzO?#X- zQ&BHZ+O`NM99R<&V9aKU$U94H;rMKM!N;4IMo)Tw-w2G*m$sJX{P;P^!A&PGcQe(@ zkWlbx7l^zrf>5cAWzbN2h<>&4I=E`R~r!?CV2eCn?silD^$t_@K$6 zE}E1ia1ukf+q&e5R~M^g%1^aW5mq7A!y_l|`tuf{oGM zQZ$a>5c@=Loo!6Na%@v18HH~B{o7JA6i4f&hRm{@V?)8M(od<*PWugLnN1rCrm*>; zU**7MplK+&Q$(G-YXgSoYP@v1^%RTMq=5xD*zP8uqxW9}84d?VF)3~j)?Z3pHWk-a zyAVb}@AY@_4E>?8?pIB|>tw#aUrWU(xuqvLAezmTMaPG9+Ofx)Rp>6~inL+a_Eu~c z&%}dbLJ!m2r`OL_|G=>Rs^PMrX5zm^=;7%!7GHwuiD9dxTPYvzJJ^%P&e3q+e8Cku zB|$&WTw@tnk}kBjt-72Y7ybSOq zIbZVK+?w1xIgVn^H$KA3_e)Csve z66dziLgb0WTOb5qL9!ZzSQZ-9>vtnKzgxgyQ4J0$F(|3l8bt(rgYI1!bQyJqI-Gp= z{9`9PhE8yVe}rE;27yvzUBM~Toxzw{co6l&?Vbn=)NY`XM<63hzA%*q4L7l#*zF#_ zGy9|IK)~3NTueLKuwJH(L?J#-|3ZNL*}n46ZZ8Liy$n3FM~eU&&MV6f;0O0I4S+u z8uiX?*A3QVgD_^4SeCrIH-I8+4qq5zIgUk2No;jIz zXUhBP@!2!2*cT_Hbt@oDRjKE_*rd4vm03WCs+kQ_=0?YBUoxm1>!VtK^dHZ?oY5?Y zT`ffdVOs3XVRR&?z`*xlG2IsUlS6nsrz-BKoTq-oaU-yDj4GO$5!a5S*#2N6{1%kQ zfgi1D<~VPj@PN9Q+kC!k^H(uFv@1$T?P82p#W7dlp6svCo~qf|FK%>fV7o?k0temH z5^_kPf(;y@nDJjxf>Cm5aATNWe?S)c;X%M|p@aUUj>~lapYP-aCgnE6-t+Dpzk)Om zE132nQ!+<`7hreY`oV|4Qi+=i<1NS)xolu?#@V^WX zlip#aJA2xv!%J-b*!NI=FyR#b)wo7|)T=k>@h1ZB>)#Bfgv16Wc2gb%J5DlFG3%T^ zRX*fMFuRYw*9&U-YsIn`nQMldn;Q?C*>e9@4}L2v??~a`5F8ZlGDcHKOhwhtl^=|* zpqGJfcX}Ow^0|R*bt9~_l)eEi_uEl7>l1B^z-aQXu1Ijk*tn3t$;^};jf8>C0z%H9 zwWETuqPMkrk=4kl<##`dy)wURHDC(fVvcZ8ihPh8qb>-JO7;K+OZ`a#BO1{pQzY49 zZkm79^rtryApthTCr9Fwc`hWylWm17QYHo+-prrw_q{jvQR4jgFj~+misI;XEAUsF3Xa`Z)QJMI zDAe-UH9?tqr^GkC^5;zeR~@;B1g2;pfXmCa6J)#%wpqhXYRV$Dx0wTn;isGF(7`^FA(f!K{Ih?x2VDh*iIe$MEx3i${H({cC*? z?jbdr>Bh(smh4h_JtTj_W2w}c1Ih2;O<6M2l6VQeM$f4g#KhYA{+hPxGvTXSwHL+t zIvEJtnAlH(?+q{26% z?K3ZyweqBcbYVZb%vAPvFnl633o)gSl*;apU&dXw;i)mUrS)`6=O9yAayxNJCsX*8 z``Ey)if*G_ab`b;E>dI-Pe!%^GElvd>SN&y&_*kUvM+6m&xqR2sxlgZqh{;49=fd_ z=}!8OW;OkzSsY7c2EFEA&d$yK_n{USpE%IAwkacE`S1exXU5TGhT_lh&kZ*L?x&X2DEQR1=6A!a;wRt40scDDg--&FAkX~GkyXcPOi z!?(1(PV2n3SocRV4NCIoJTUFvuS!acDizfAkn*odLCbg57u%^YlOzfANVrwkXcqWs z{O)4iN^U9-%=6Eee-k*Gvvr6u-u=yVA^K|g#d7Qv%?mCAFDJj7Dp@rp&mZ5OQ$_e!pZ1Hhlc!RPSL7M}0-U}hg06p? ztA1AGy`LQ{^&VfKh@z-l2u>@&K&2>X7}A77`IDfS0JD2MX&+|<$TLT$_S?SH0A44M zkci2Ir&S<00bp70CS?-=1Op8g-pUJ=t60e_3MJ%!;@}TstRZ--?r3S3T0`L%oF3|7 zd)o?CZL6Ovb76}EbJEg3uTKg>RX88v{yvC|L@3yw54V{KpuGW|1%gjBugWFB0Xkd_*LNuuRq{4E6fEuRsCE~ajMWs3J(Dpi znNkZgDlVIbL^3kBtSq_vif=nlk=Ta&5r0bidRVC1GX4o9oi+XmBxR>0j_<(r-Pk;w z&3n_J06H@K&+`&k=T;(F1bzd)WykP;j=mlj(J!kpKXHwo0h}+5ojJVycw%9@H;rSFktu$So-h zi1}D~8axMEF(dh)yye(1W>+H&qoJi_38KzKM?NfyJH>KlF7}mMGc1uuKDu4+ztqEl zEr?e-GqnapN5`+Zm=wGs&ugZ34SQT6?{(UF-F{H7_2(S#)&#mAF?yS=%+qCwJ?_xx zw;`r@ICBgpv|kR>7{6Vay5&^W=*+6AfySErnL0?Y@Xl}t3&s>lnEdiL9mbICk3P2V z$hlY_8)tttnn=*`-0iROEMe0di6L9xfK~a(Dk7=Nl+PY_lc9IF0w((;_lMhR6zVJg zM0mzj&PU=66j{x}FpmJw=+TIaKr5e=-8XOU{ZH zf=}54z5%_y2~MRj-EJ?0I#T%-<_oD#Y4Tr;o=*QshB-Rm8zkD2Yjp(qNXGU;0_<*U z^(RzX^e`WQid%MzL{hdlf2oTfLvUZZo9ChfSaQRDlooq4X^L=$_s**B6e=o3Fikqh z2!(?d=(^a|WQd(sNR~%GM^grx*VZFTAl^5gZ-4r(6>YO2O2s z^4ia&@)ht+WGpIoWpi$>c5-d!-#%x7^%G55qNQ)Cz7Xxalf{n1(6K+eUQ|&sWkE6= z^5~J9N6m`o?@tNv*OIINehC8s$Pi|He}JV`zJIyk zW`vl7OGlMI9%_Dq-bm77D<8e;3Sc=sPCD7wjx|x9C|gI5VXc_;)Ny>86z&D;Opa~l zp1*9{ftc3E9)S88B}^a+g4T5Fg}Jey0Vq1sRud%QJ?Tuwk}Q2=y)8hg*K&I34}TS| zq;!vc-E37^QAw7=BIciLN{Wmg;7U4PlH{`9u%J<^YgTz+Hik6UJA#xxpOH>CBKhWE zw>+%hkG4a9E>`VZSp4Hr%<@?^)LA%d4W2Ip7kX}+CI?rL9Da&lb{`-x*cXj&WZ};% zjyC@q=cPLgXnZIRue<9Dw)9nng%hB}W+XaQWr*Bcjozs-@pDcZEyE$H-iKwL20zqU zx>>f!BZL73#FM_`i)L3iLH(LaV7l2}j(v2fNOw2?fPluEQoIIl!!jX4wLAIa2{Tf6 z10Mkymzh5|#=1kzOjQ|@Zei^%z`=UrR3m@uQbgUr;}|{n)Dm`@|aR%?O{zb4k2`ZtJc7I(P zT-wAaR;o|Jwl~uahCuBhbq9p=`D_ahMvE;vpzOzuUw~h-8w=Ah!+wbM@+DaW6+*5iWl!mg`ia?R?_Bv%ypC64a5g8c_#-|l-De&awd3mP^~aR<#mhRq(fGG2d+lWtP{kg zJ4^x=9D1-z;FW0#G1=3&40=dg0R^_FL~!|mTm3Nfk(mmT63wXpOYYf-JNcOu2_<)qdwH{ zwCy#8UKiFf(>`%7LwcIJ^UqYa9~aLnjVit`rdAualrA#}2p~1x6asZR*yNT)R?)k= zEO=-#YB*2tqLU$k3gg#4hFX)lbW?5Ap#0azQMpXqCEn{N{mGc0+66k=ojG1gwj3F) zOoJxaMbGFFF)B}I$iyJxoQiJEclq=p-qs71rZe!J8q*=Lr2IOwC+4v77TgWV7fA z{B=?U8>)K3LA6+Gn<0yBV?wOh95>UQjooBQ%VnuSV4NgJr{jK^cjG4_?>-#|uh0g& z8Qxf7Q8L%$O>mESFZ)N$q4=A*Bu|*GE>MBO1ckMsw)OHLZ@87W7WzAsx%~GmF4bwQ zU^1hXrG>~cc zw2GyGoGgz44ps{XT7`fADe8Lu#d9S}NOmG6z*s65yB`KT`-65P zJ5pr4&hEXJScy0v+#J{b3LNB`RK1x%9`WsQUwqKu@k{K77KVsd@@9O#v}0=Qi@bk4 z9JO8kRCqk9QRrg++%kQU9?`nL*t@LC9R&$ag|~6@*p1XBF3a4e8nfE7*ch{7Z@FGO zVu)qoY}6M^5V0Pu_^rZdDj|C?B&$t7DT|O)6nd%6@S5DhDw15*kX$e!`2`0H;rU25 zmxsQ$>2uBco@VY7<@E52g5u+4@rYI7NaxzIpw*LsML{DgMs)J2CKk+3OWZL=e5XeO z^MDK#^}HD;CD^6^>ERp@Q6NrX6`3UOWvzhqSvSZYVe5>()eHtmqaCL(RpOB}N*yDN}3Ic;mm9IyizBlZr$q7@_$7DQwcWo(JGhwZA#0@3F7 zzmk`5hf!9mbW&XJKvwMWP^Z6|p`d1`#=aI3W|vpU>H-I<}oN(!!7ZMb*oO0D*m@wy4FD29}8E! z;Ti`DCHU8HPofg8WsD~z{2BAa0=|!zB=l%P?l5{hDpp3=9Cg$mT#SuwA?X7yc0gWpbGplfQ6$azeZ)o5G|RPy9@+om2mPjZQZWcRf5~%GHua_nvq56lpZfP+EC3-eo!$V z>*-C!QJ(W*c7Pf^=i8UFl zb}8;NVyUquG#p*5G3ghen7fn6jVc|QrH_yird;L?M?#OB0AB~}2{9VyA=4^aq5+S|kVJbLHbu*rANA#_M?|>qljBcT*b#>bd4$PP^D31IkjNKwA%|GSp0Cwg> zr|Y?R*&-a5zzThjv_E>xcPbalNsjgPTnky$hv*F04F3f^+hm?81B^2V&c#1C3Lf@} z{ZHFd*E+RlQ&B+Hjpjt(gr=iTBR`AeKGkAqxe1+kV8R2#;qFpORIk?8@{x zhWxGRI4VyaND5eM*V!F&RwoD7*f;z&AjKtv>v&Y!=1ID{Sub45iuj8Wz%K+!c4hry zCnFOF*GR7t^HkxJB{SSQZ+2JI?E35ZYTQ6YVnfBrP*+v6%dFo|J@C0~Gm7F1ndb1P zg_@&SzE;yI;ASCM?i-+)hL!H-CG3T^|H4M%FU;*+@yb-*R zjcaW9)Lj|WCsA?y!DDv3VHKXXlqy`M1;he65V6RNQ8%@`D|P7(zd-oC{%-AkKYfvL zTh|5>Tp-N7W7;`;4FhJp$tk!=vtx_xNNarJZ1xU=f?4~n4%;S@|02gP_Tm0~4`y|B z-1NaW(ZG;J_C-fF0CtlKZiz8ju@XQ&Ux6}=7d7jwDXf?kxQu!o_8!~J>07crI5zSc@ z`@RmUJDgFqCAX?0t?B_f<`Z#>O^{a?p_I}K|MD;*3(x#}SNY3go(_dP=QB;G2nRuR zzS>-OPThy7_IV+Je?GXxTwQcRK1i1A5K<3RaE^C4g=n{(pJdEH1I+a!(f2x~gK2N5 zAE3SQmUB4jNcRFJL()5}mm;=wmcMx)WMY=rtfk`3rzoI%XWR0tx7QO?hCBpQB3a9* zRj*HIA|L*{$7ti4jhv>{dUNW@D1JCgLjq&yAn-`2sHj80CnvVq&SYM;! z#o#QZ^nW)`n~kW~+$f`+M{$)vp{?V_o3=iz)G=-8b6;YAGbmXRD-Jt%SW)(G^w6Fk zLIBK2x0-6NJT~w#;eAaL7i;uh8-qfNX!~m^V&49ZC>Q?_UE@bRNX`@dK>(NiPY~ft za8Nn0XifcaN$Ph>^ZTfo0um&4e$gFw3X9036zy&udcmXh+*VdXZhkoC(=wFnNG-jJ zi>gR)SAx5OH1+3owEqW8Hh!wS&{BnMPZBM58{J`FV*}8eibL-CQMjuFw7hR=&N*@@J#wQvJb=J3vw;^la4%N&)*l!9N~&L~fvpyK z@})S~(2H_6w!p85I#>bwrd1GVY0#mb}Xb4GSZqp#|Gr|`=mP5DR; zTp`Z)&QRotgDc#FUj)UJ4I&VUhPP^@m2i!ROPJ!<$V9YW2Y`m|A{wqeQ z6Bg9RGyb-GIkzwbIQ(#p_&PBw#Ki~6O*XNA1Yh;DO3?u$gUp{rPF9ZDbQ&v1j#f=J z>~`8)vz)D9gp*t(xmyayEs{J51~AGer)Os(wnB7>{}(t96!3*56)N2od`?`J#G=p? zaxYS}fnl{a#$(;B7(%k-sbR;f88`Ie|MrzH9|mcijYF`Kr9{rHOcynJp(0uKhc6Nh zs|;9pnu$)8pFyhgK4hiYG|EzD8b(ydYqxD?tc`W+G|GsYdamY0c-4nk>poXZHV{}D zM>#L%uN;XaJD?t{OSvzU*~^sm1xmgrkDaY@w#Nwev}HXMIJq?`>9t|f)?2QtUe!7b zP(F%}siGEUw|>1J%E$Dk^RSqF=cQ@$y0(Wcaefe*2aI-w6N<3#bA-WQZWns5j=Y>4 z5{v=5h`bWG08;#>Fhm!-(7Y|@E5-R>?4l(@^4gbipp zP7M8wG)Fe@)>$tR)o_k^8534HM#60KWKGLq*+RkjvNxKW36Yh6-UU?*&PZg&pa$kdVu+ec+X&?~^WV z=_3>Bq$#M15~N^1$T?exNeZ9x5?WI=su2U)RQ8^IVt2PxE!@T$V9*gInS4`W#`P^G z2cu9z21hctQhrs-EXT&~61~aYQB_woSXdqr!g>ZVWy>pcr%wLVI<KfFa8giMsBu{^&EUG5Fw9PCpL z683kXZ?#h$UMe%;vEHLXN*`9hkBKcQP?40#?HZRj>@BTXP3gZnI&V_){QbK;1a~tI{X>QDoxdDL)6TghEw?i#-iN;3hQl5B-Z!|^?FB8BVqS` zj)H0F++&kYJw`W1h3&elWDYKuDxqSyB6=kMu*|*6NpSUDe^N3dA9u?TR{lpaV`cNi zl%Iu+{$Dwlo{OU~Tr}&>v$OW4a$sUKgfgj-N>nryYh=X&<=JXfI*V5AcOZ4yF2`zx z<)X)!-jpVQ(?MUr!TjQ2iUO;`1HFMbGk#e$eN$#U=_&T0xAdu-ZO*qxzKFl(uo{O4 zA9u6xIGn&zxMFtlP9oY-!j9M}W5$<#^nKOEuC{%mg8LYLsV>EJeLO!}Nw=;kpRxg3 zma!o7II)6NVKP79$Dr#)_1U^ax%jH|9xTL7^Rt6K2_TD?CZ@Cafl@>yK|z1F!0L*o zpe;11{cv5~5yaok4K(S_OZ{%kX6=THB?z>*v3}~ZNciArIwBkF1|{P$HVeuRpfpN@ zHaxN4cY2c)Z1P5Os0tFtG(U(m|4hr1tnHcQ7CGz+U{LUQQ_I#Ap1VMsDimIMHn~5Z zk(@H$ISFlr*B-6uqTGWZ*qQ$l*NoUM>FO)^_8-y}lO zARwV2`6u*_#4Dj=c!VN&?2l`?n>ay+M3aqbX~7A< z+swehjqwv8i)d@+?{s<3!n)>}KE_OFEoOIxEF)pkr1QmZGsWfrOI8ERz){LGH9251 zcI*W6Q?li%oT-n7gWTwZz<}@+od`!)x{Ax;YYQnU$bqmTyfZtIfm=+*FeS;|1Oc?) zlcTo{QIXurtND3A@KK1;szXHo#MF_h(=XMU1~bF22B^-d?DcaMM#C4>Oc88nXFnvE zRSVS6rgRJKLk;PX<~-JE`RW=>@=g5Q=v`QI2NfTtk=GhiiRPRYRiN5J3iai4GPY0l z%xRI6iyxwa@&k3Pd)j`=aD^yae=YMA2^{f@O1>(ek(U8iy{_g?wT5;3sIwGRdzKJ( zWw(Jvpy~L8<*vl*?Bs)lGIlvbejFV^D*}>>0!JFmz1SdDajOegfqPFDpLSXI*k%(M zssdG5DuSCYFE!#2`;=q06*L)n1zW01r%*^x)B#uQ_S+kPzaBJC6yddpW{c)f7CGSM zhYtc!G8pm)Fmk)dskvygFf9x}ZE2k32RIRcTwRjlz{~K@dGOfXgm3|;Kk4|W!~{+0 z!wb-(mbIQrI`0U|7BBCc79#q7V1)D`-lX=`(dhkky;#x1NaT*HYTgpV`8Ws5=F5Y# zW%U5XsqPG&$VX4adb%a%ulff$%D9ma@N|d2m@VqSE+`)Z&8b{`OT1(d6bcS^ui=`k z7PW@D`RK!x#v=@8ZU{RnJ#fY#sqU;7h*&yi0(FHOnUx?Bm^@*wf9Rk+wMv@#&h z`Cy?e{n3OXn6FXGxsPM|{){1l^#il#3T;2;04B7wE1K;{AfrpVqrzpZU~`tv3?+_bdoB2_JSPn5 z?WxaLqdBKfV$^L{@#(5%awsmfDI&9`9J2e-C~rPzc~MyJ%WT>LYAkV*K1guGQ9HIM zgZxc8$+BnPIJ8?t2d=1SZUumP29VPH&Tc&lEQO~TsbR(5GSnW=ukdiG6P-(A zIU8H|1C}TNn~G0=jHbW_ff1Ouni z%CAJCC=uxJPWl~eII&*)#{a!hA;6Wc`&AAyQN96_pd0ZUu{DqeZ&c(d-;AZYV^xwn z9Dp$#zBdHKi>!S~FB$+8e$&huJ34!;JlBbis|5J%f%v6`0n#GReqBzvtq>{9qR!XW zaMv2HrLuEuwtt;(`GCaN`PSYCIg}(sVu(h0meXWJ!i(-K+4KlPkegT(U%?RJXst}` zZKdv2N!sYJbj*Hnf9}!K63fxrJDP{PKZIs7GBHbVhksL>VviDCvo|02;&p*WcZ{5Q zKts7qhH;2+Sahh% zPR5L_5!r46Dp<1&KSIXyZ9L!sx$=T7YWsAl3NmbVCbmymb7=}~S+VP5Q>j#t*wXnWGGmeN_5na8K1bal+bt}bmv=^tVJ&LjEDA3Q_SZbMUpV$3&3%|RAnK1 zO!}tvm>-kt1T(M?QVhDP8-0u0o6ev3g2PlYU&j!X2>#7Isiio zyw5{u+NWprqB;kc^S-lZHDCyFnI}qS-c5V12(29F`9JpE0?U@DSNk+GxpAX$P>z>d zjskBgvm)t92=1DsUebdVLExN&uTWw8Y`jHut^iDL`=#5s?Da4Vp7=gWv%Wm*eJ5r@ zN?L&`8RQQ_R6lX0!hS!@zMJk3AJi@TAAIB~XI>JVmFI4%9MY>TRVIuwGz-)m_Q0m2 z=Eo6%23ba;FPr{#aPy6z#r5DA-=^CHKw+)fx7A#z$+4UcGFY+V$ygYBq;6ZNQWhYm z@+)%seO+e1c-X-zq4vo0N($jB&+t=nu| zZ=NIfp(kyd%73>M9+{{zYWxjEGrsJQyz>@JT`72qp`j?&X{@InDPwJNukwamG!syD zB-fm-_<5?b_?xO>jeGP)A|#smNb(z+Tw|;xfJSZ#?baI7!N+$@L>aI>8_DI`)NEI{ zD0$_N$RX|=`3)QT7KtP65ha;txSS-)1!LUPeI^PA&ip=W%d${iQI%GWe^h_6RxX@{ z?qeUwrzthnn#+ zOHZiXN4;eTe9nUd+D>xt>7+836MpG&M$=OG`BS&6!w|Fkbn4hMb8lm9fpKRv;IUe_ zQ-T6-s0DXeb62iOkymq*ep-m}GavVJ*cn@_x!T1>6$^bR*sbskf#PF5UJ8=-!G{+= zCTy*Y3u>wIODaMptfVO+8e4zRyg2G$?=${YgeXoj`6NJ12{W|W(-7Xas%W-Hoj>DQ zXsWCcY0=~zjgn(sY8xz&212JuMZgoYBL;KSq}=>h8ssM19b7u2M-s8pbqg^0Bx&o* z#8-K!U2J6MQ6ovRi?_Z&QzA{2`fA~Myk5-)uY#^VeCgEiOC8oK=)GPVN2UpIVopK@)}xe}y!3C$B{$3;yRCibXmtWP6&rW>7=6c6H)y1mRL@=+Zmytp-}j?m;DtnnIuTP4fo_>LWn zA>3N1U?3=s$a$vP-vua=Du(sCs}?*^kp%nZE5+%fRDDuEn}nBC_PGbiOO~}RI$pWG zQ8-nQpK{$^BEh=s1I+50@Jn-z3F7rEy6okFhGv;+3oh^)lqRM4o%tQl6_26Pp5Kwr@%B zr42?TA<9Ou=zqpaev?f0QCCej^##<7R~1M^I_%1>8Zi0-4T;b3i$RFJ z?J_d+l#=|P;=*RZ6BN|^#=pD1rJpCBeKU5<=AvFzs+MaOD#sWd<|PcvGz*5?)ouGCIMYmykP{jWS)Y??Y|zkXE#rNj`K)y?o^5_OVcsC#*z?QoV% zBM=>zQxf)ahU>ip&sA6d1_I2&9qz2dbBP9cDbsY58JoOECJ1CLZq>wK&)Ok?1#9mw z6*~>0LRZ9vFAn6%kfbQh-QFL2`pKTfG(Wb^IlKFndPQMc#ywRQKN~SbHeKhw6X7uh zT|$$S+}(|4#48cYbK)FhmxJf=OGxi}zRuQ5S|b$u#y?1U4GVZZb_4R1<{}as8#gu3 zo802kPTAqfV^bfgQ(0`(p|rIhHSHDN-uimi{Wm|fw_E>uPgM)r#Nvp;L&(uxM2dlD& z>;P!WxuhVv{Rsmtoz}f+t6U>7^32n9{n-+82o9mm=kFeyy)D^a1KsDQ+?3y9^5KSY zPGf3lQ+3-W-)!p25^9Mrw>{tpn*%)B6e17V>*HP7V!&X#o;JItZngUyCEas66j*d* z>~$$gRC!wy?%GpM)vBWvoxSFOUi&gGE^Ls%f{FY?`d1)fGEvW5UqN)A$%NX>HV3>V zlWT$IyUsA0)mi(LeA5c^E$>?QKX#817jN>hGb8EUrjkQgDu0Dfyt?Ci-F#csBklU~ zQPqD$?aMbFqS^2A#l7ond%ol8cJeRYT>%+nEw2BOv>1q$*>fLnAclIscaD*=ZcMR* z>*68`;zz6goTuRL+2Tx*Ip=QuxrRJ1`q1H)ZNfMCdKvHF`>`Wcy!aR@ zGB+MeUJWOPG6>dVXq@_K@%(7MYVra`f}hoGtYG3m7mgmmM~DsR_L_`QVs5{V6B>fN zh!P*-;=e}<+7{&*wu@#NtaJH7q42oU$f{4!F|xj27KbEf?o+hqCCo{S08!!$#$b*A z6&;4R*@n;VK>9=P8~=#1mnGd7QT1^;%Uqpvgfs%jzrXPPOB;>4t0}WpR-MXe+mt3b zESaUTk7vG?k15dz_~$`v-jH;fH7hkRdv0-FQlhPf2$%Dp_cMk2&*wiI`S*+e>mjD( zI~^YLCPkO9U%2h&3jydGYU4!`_ULW5)*y3YV5S_ZvPWk!<@X2g;->%lzmhVV+JC%GeIl(i# z2dc_UcxZ&y&zGM_Lm*Ck&T!f{VytuZqqdCCm%w0B^w&~ZJ~yy@%7g6cbxn;;2K2U| zep`R{NY6-#e&GwS#ShD@X8}<;Q;19M|cVt?mx`NcP*YtGfVN~5W z0c0-~zhreYA{A}%mkZy(uG>c6mqg!OcBI_wfsD-U8*6aMX!1=abZ46j-M%-yUih`F z?h@SkS#~G`oRFEl4q3GpTk5<3L=ET3Ax~uSgWC*KS`4XiYeMQA~rf ze>Qyw6MY1myS`)GACVS^&^c$ggiJag5E_@?o+z!al-UWUoBh>S0r_@E6l(vt7vH?x zj8q9dQ_ocr_2(6G{SB4!q>`H*{!+_#2SxeYA4^9>)o?u30>U3HQUg$;>)kj9O^;ZC z^yg^nYvTgv!<`xqII%sE#eMlS|ZV5vUujzFVmuU&cG6i}pGfu4j;!M|EMKu&c=b4>vz@J)okH)wW_<>_a&a@m9#i~g4XRnM zpDkW8x)l?8xjop0!=0qrV=kPyh>OoCZNVm@=Z4i>BHRc_+e~9ds%Bq*0He&;VX)|| z_*cR+uAyVZ`1Fr9vg#uKk`i(+CUc0!K~c`j5wg)htkWa!khtw*AEwv>`2WQF+85~` z$p6;IoU3A2>B2T6a?t(bZEyC-1Ha^+LL9PF?5VdBdDDG=ap6F-3;VQB;*|)8NPOdF zi6UA%O!o{x}NxZf|B}${}{Es?|AqUx=cNmNju3bCb=c*J_9akouO-3+E zIoUvzUeLbtKAnzd@X`5#;28q|Zm?L9*E};M(0_Ec0fSKox7VZP8M>l(ef^3I?9f@L zp)Pu(vFtXG00xu>yxuSsdbO`hyV8D!cHA+At3{~gnsaY*MeKFN;QrjbyypT*Gtiu$ zA!+n!fMU>BSH00MC_e!3Q|&hfm}P1YZ+f(?!|9gP%mXF23$6#Gzsk0ne)cb%&aShn zEcx%nXmWX!2*XqF%6+3=_!y!Jvbjr>f=g2C_e43zk+;)Ra2PAN?7=+_mZT<79|97- zQ5Sw&H~9dO(%&8=tnXlJ;#`)@XO)L>oj-kbq8XNU6ZgnxI)nDMKpwZkD%bvq$N!9t z?HOA10&II-pIKsP#(3)dsgneJ!ws|jL~j%fboxFR_TNk zkLH0J<_yAsv^uiNECcWjP*gSpYIFKa)9<|*FFIv^z5oy|6ysOEW6(FVB^jO3@LXb0 zmje);FoU6n3oPls=~eT8L1x}rBFam|?U!pI*&`THdB70uUy+}5q1^2Kq7tvwUaWc!qj1sIl1W4SZ zxb&aHTtm$|$SHlMKnYf*CxE{gir$SIt_k3I&PK$ znFgAWz4fHFe@vxlkxXws%sL-?6_R_N*S^voFzO?S|{ zu(x{}CN|I8y|wQ&kRc{SevvoVK$S=hb+)-(spHi1Qw$c69I-;#u1BqP^NS$6tGvl6``xi zp|RwV#MyQPQdcpu%M4T;*26M-uz69dp1I|En9h6!FZ%m48S2&?bR~`y_hf57qW8rWrmk-*&bdanAuijz1>i*nA4+k&}W!&l=EDI&!Pg+KS>&a!J< zHvE%u&#v~}YT!lj$p!M~N%XL*K)n6iH-IoWDEbnJ=tSqw5T;hgpE7D7deUern+hEY ziec_B+s?Bv!wRD!hUoMc@KIO2hg(ZhR{*Q0-Hr;UgByMNQXVn0|NldkC44ccCVOJrLu|)bu zQ!fV8=@Nr2{JZMDvs%k7GsCT4^hS^&Zu$qBYF_K3(tB39N{Q-3tMG#OgJj=j*5?S( zMDU)9ihHXa_8&|?cohw)k+&h}9 z{sa)g)bprdS0WP( zp=)lnV!kP-PHl9ulC5S(MrXFC_Nv-s1h-`Lzmk=JzJ6=EE=rni>=ewFAtDw#lCIX)w&)p!%(jMuVh8Sls*iIclL$3VqC)$=?H9( z7^ld>4*3C$Y=jKX>#R6D?fC_ly05A}e`SJxkxN&p_<_|HJOA9#;pdQKsWLFW==H(e zfa(Z@3Wp$8D@K^;?GI~NPO8^6Px{y-wc^PPmd zsmID6#uXlaXD>CpK4_c0{i}5io?!vs?##5(S(a{D$k_t|f2=-M;q|P(bm2eziqTg^ z^?>!K197`MgWT7Xji>4>CIx^qd`#JE-*-`nf#elW2amJ4xPz@eU7g6|Lp*UPS4J;u zEiG?OVIrZ%9N7tn5>b95oSoQ&jRp2mLcU$vb1z}%_V_=0p5~(Mw;D5GfPuv~fPF_( zWfx1V!C4MFSEbeb!x7sBkmBh;g^5l<$6~Z(r)VIZvJaocqM&O%o`#Eq!UlmOPln+r z=7xyn3FJy4eBCuog#OTpDCHX{UDG>>HAct_&MP-qP>}ua$%es98zQxTQ{B?k{DwZ3 znVb5H_S6-^UDgex?b-Qf=9=9Q=q!{d=;-7k1qw8xbnas@>wbp*m4A4d*JkjeA6cB{ zx0OEyn6EflC+59jTvvj=XRq*dHe7r{+i&*M;ze%(M$ibz?8xkXVORA0&4T4YnBQ)| zcp}Mu0syoCUYv; zwQCI*;aaa*rF!7Pxqcf#z;ilgMZ>(;7fw`_ z9KtL9zQW2{^;dnw*A-Q=g_pf+#?bDw--?X}tFoIGcxlYs3?JG4#Xx z02LJ`@Qm)N&;YBT1oRG6YUaNY9%xd5@b90};4L4(oqtV{iyzU6PGqfSRoH14eQ>F@ zQM=x5BWb@qG$_-E+o{V?3l`gA<%gJ9gi(*1WxRyI+m@>F)-HF5LDOFOM_b#hAWV54 z0)T8GIUC`@L*p=TnJife>M)0`LnhHl=<9c;kGFb;c_76W+gXD0Y5usF%XLtiQ`?#4 ztu&CgGG$6pdP|8fcjEsfrWv!)FPmuWwJMftn0(G~GA?D4mgfWtA#!2X z!VlFcr*(iNJGbAOh39|l8BLI9%j!vJ&hGrueC z3h3VaCLF5KsG+E_uNFT%6w%x6-g2xNKNZ)A$e~z3hSX>(01nu6>5nKr3O_ zE?u~vr{rTwchtKl{`{WsdtZ_J;+HiZMEz6Q>7S~0p_w{}J(rgmSGYLIKz8D4cc_a1T-9PsZ_56YMG`5;~0 z+E)39DAmr)TqzlvtdAB`;gXb8^sF|_-<}oW&CAY_t#-z*BJLm$_wKOi`04eC**cvd znx6UXuBrz6=(4IG>_#t%mg5W{SX^s94!v=l+rL(g;p3Vj5zC)%f86>=>%P!Zh~sT_ zSB-|OaEI@1dq?!kwW9?$jSU6ch4uJy9l2`T?9#42le${H6`h-7_ z^xpm%p3GM?eGSAXty}Vz8hl?QB6~KZsJ6mZtr)!9Or!=y?6UlFhCOyiLv~t`t8GQw zkaJd?p*fHSQ$T6eVPFDy0Rs_bC;@qkO{hHNFSiX^_GZMk=Ye(E_I+E|NdtlH1mexY zydZ))ubp+T>ziL2&6w(oUjN}>u+F`c_@LZQrXv%`+7paG($(lP-tQW{-{I`OnhwIJ zE*kcva1tT6M`R|KX$31=KjU8yKHf7#hW2cIcYCRv)#L&Mdhwu|(0CRpe)m_=6scXW z=3%Zk1{sMW3lc&|0JD%wml#PODpMaBkQHQm8oWczKyFnO-Hst=3(IQww?ka+&BM6h z9svxeBx0F1dx#Pd#vHWvX--2@_GOFaSP?rjOJ$nABHc7+?diYPF>1z%X{aW3;1Rq-GOn0f@I8UG$Y0(2mQQ>;1bl2XDE}*f>E05qxYpgS zze2j;YA6-H#qj4+RTFriABvGbycZKS(PxA&OB;u;PtX_!6q>CZ}f z5iY?S`-9(6AoydpD{b?fN$%F`O>l3p(2Ag zRaxmL9!8b|s`wHen&mLvZoz(}r#Pbj6_LwmoACn3-J zFfa_%(88$fgMcMPtiwL`r}!6ME3POFE!783zBrt5cJLz#5Q>-n{m0;% z<+FCRjGwy7^b5B;ekflKTX@X~C}@BBnDf=H67pi4EPKo@kAEi!2|A+RmshD-FdS0|AE{sgJL9jEASd- z!V{sDac3H`F^T@s5?@>|hFrn$5P}M##WK`1Yaxd=IBLbWFzCOqTlp{S4q!B_FqZ3s zF{_OoqqVJ%aQ*{3d_Plt7ruI{QJiR&=p;!)hYB99L8WnMjmbV2=Dm7Fh`?b3M*%9z zT3%#-t-%uY|4jvGjbMfax!<$D>AX~zqz6(F`GvU*?+#%w`Egt(flgE>I^YMwA1hNj zh3AK**)VidACI83Ixvl!jiWj}9J45GGY}vo1tM4VjrHG-Cz@E?54Aho`E0YlwthdG zw*=n*?QC%(CP?{fRUHD6qsHliC$-kJ7Tg5wzsq6ho+*%}6}G-ymiclqw^LDix8-P=_l=sFN`Ov3Ou2kVsuD%Pg@y>nO z_LX$lm;^;qQXsey50Ii|H1#9n8DraI;d|YOZ%99%{W6lZ`xg&{Gd)0)4lf%PhkP$v zr@e*%;3wA36|22Jq-x=QEqRI$deJsCK$={hU^uKzJ!j<>xr=#k$nXSmC45@@-)1$9 z5iAMxNF6}iQEGXBfq z@UhLTlKbo%iP5W)3tC~g(@cxUc#s8wp z#ES>WxJRcG__ca(^~J%PfSD{YNo9G57#k&~KXV=eF)kdS@C(BwYa#FdLDfDo`+rfj zc>A~9p3=(vnj7}GWz@8a2~FG7?N#%z3^z4%QqtOP8I77Q+J&;1(=zolU8V=+aQqD< zsK)eAHC(rSo^4X}ztj0#KSg&V7;6s22GW3UKA`!P2UO2gXXMNfeT|A&G^@)$0<(#| z_SC#}aO--)&>!0;;PCmxq!r36cOz8aZuBit@G(V*Y3~-_RZr9O&yC}5oEPqlK#?hj z^hsw{81ZM6!|(qUAa)6$bzzodC*F|8FNgLdKszszbtg2nF+G>R?x_0Y_y5>stoYv& z1~Q@bL)shB-yD31c7IUoI_Uk_M8TVJ;-tU<%`wt!Z_K`F4s#7e_9Xy*W9Eeu@xm^n z2=A(&Gj=oE{JwE?U#j>m)LPIk65tR`kgL(;^WVT75{;Nsz4O^~mVHA8^zdC2`yK=D z3lW@6h`%cDGyk3o2Ggu3K={wqSCMzSyO->_#BY)5W;vDq_0%a~Sa!_Q_X-i(pl8^; zD@ECBSpPBM^H*fTN-^wCG#j$LDh*IK(F+!ekN+3&>DO6vA85ncB4@{}d$ z-66uE-*&(BYl=dq=g|uPSEs0n6N~uKU=Ny-l!A>LcgFv(sPq4pYW}Y)hJ<;P#oZZ| zA|V7O+-dmGpbUokPX^L|cGq0ud}2=V)8P#Z!H~@bm+$>AJ(q??c=QqS-uQh6spwk5 zrk#`-EkkBAVrEcnjN>u?W_;sJn?S9F`M?Ud{X++)%1i32m{y$0>h1g~B~~H^TST&k zN59;*oA$R@o+Ds|X!+&3##e@91$Mn|&xALQ-q@7m!K&ECkBL~0E)osar~Q0L(@>GU zlZVx~)^5&cOg!&7*S3aTuj?OldOo0Z-f6Zf)9RC7Z#45ZFf{9UhnZhhIBay=7kVi7+;|RuD17wj``pA zcTUmDJ+Athw6f7^Uf2d@J$&wGAybtt`JC(zZ}&;WC^gfmSzdr9|1<+I0N<|or?29v zziDDhQzzcURHbqh zHYyo+7mVXM>`?|w$sosLMu=P;Yd2LjlTi;KpeNx_EPmK2#sDFsoqcBQZ7v8YC(vK? zhCLS3LZNT_lX1l5$#gcK4nl(A>w&!By>FbtKkl^T5c9Jz*P84OV3cU(^e8U$DeyN7 z36~1;p9G0wRRXh&!#1yUoP@~8;B+|Q?>dpTvf6dl-kWS*Ahne}(Up>lmsz%lHx{#P zwc?n(Y>(y^kC??um4Osrvdxz6M;}?}o*{i79(2>bC&wxsce@bB-m6W$cW0IcV~G0Dp7A_YyjPRw~0Y}Oy#$TSGx?cshCaqzzPv&9MziKGQ>?OD zG);)}s-Mhw`^&j z&}TQ%Ws~puF3qc_LTXVqRZZMk^S64vaMZ5W2I#a+Fgb-vC=w3^xQ~xs87ZP)2&!&(D=Q-%?@|;up{M_Y-*J`Ba>)JT_0rdmAf# zqwHAR4YM5xyDqyMPgw1Aj9;uXNU1BB&x;*vQcz%&C)cftX#wxA=@bMfI5v=Tu&AmNk)cj5g@V`AgPMw zXo*<(S)PpyxPE|`GOtv{{x>dEUh-*W$Wi?}xpW=@8RDT|9h-*xD9OKp6(lfA<0&Qls&W6}iK3Y#aT?2&83pKp(QnTSo!bOAR{4G-5`D8X`C8cE{ZWlO zJr@Bs54E{mH7H_rdYXvNnz9iyc#skP09VV_^Z)ooN=C?MRV-YO37!}3v`$?VclP*g zs+|iP?22ODG!rl2>(W=s$Eh=L(N4Ab-u-Q_qAH^l%O`|kw1D+J=#nC;OX*mlDC<^U z+4E*Hi-Nu$zv?3-_}XouYClc#JR(>sA_y*fjd<`s5%}44$(~U{i{B)rK@^S+0_S1p zhZo}Kb@9ysY~YAI!y;{17U|&U=l`(lxivqFXRx=2iA%9{w%3Xrb#NhUX1J$ZtDR0! zL|fgU<%*tK5&w2n+uB)RLZNDnq6e4P9A3Ss<6)c3?elO z^@}DhTfdi^h!l_5U^}cIyQf+Dz2025L<;l+mvF!O%;D>nKC#s6S1k%w*R0{!8?A$) zupL6StCT7*U)q-H%I;U0PSgXL^CJg4GlK?Ykgt4<+`;@)ApkiGl&QnS@P8$%!BUL3 zdTE<$GcqYP3qKukP#LS9-RCY%K1CvA^z0*9yhrud6O_T&D_q$7+0W?Nx1yi%GnoYy zbP>pO>+V;pq^g{0X-t}~;xLP6Vhh4Ky&*9O<~&7bTKiCrSx)p#Z(45sNdiFtL9lqM z8Z*Tajney$l|-oq;UCw1+OLmj+jen8+?$;-rbd*x*?lz6B~bBd9(BJ36)&MD83K`i z)X6qR`V=Kq-zCq?L(bN&zePF|6VN!Jb6ZxXlca4fW@U3oq;2}JIFyu%lG*b6cKQRW zZvPY&9cw_lp;0GZYHb^Q-}mQw`p$#mYTkkcia~3new!JvMLB_WH1Jm%SW(r`KXaN( z4wf*bZ?%|>mHtGb1kS3m5kzDh?Ybzf#TY{2J-W`z7MUB1OBFtD^GkvOn9S0nD&D}HR;-?*WWY-fiotoSU)~bD=_q{lvnTa>r+Io z<7NFf+D^eyrW?>n21_?G2TQZf=i<}+%ndbBgoxBG^Ebhb%#koS9w~wV|?}iWt;81uUGK1 z-UzW<6AT-JcfQCZeFr82NXHz#UOiVlTJpdPcdzFiZQ!p^#^o0#rgu$x+Zs9?wL(#g z_SbmjWu=^Mv;2iJw@5PiAY4_LrsdCX@)EMfyv~mNIKMNkg%olJQLSw zb}_ie4c^Xa3I*`wr5LNIf?Y?xtKI`YP*re?9{!O2{q`X*l!gIF=$otlNT}*0F46B< zov7cD`-}2LJ4iV_^RUAx+?;aD^4oeKgJ&;~!&WWFjkZF@nAHMHyMEIIkfnnGRi6>@ z3TyP%U)6l|)o1B8!qR%ccD26IjrCyry|N-V2jLbdH;2#h=K%;V#4xRKy)@poAt_4I^IP zEDHrKmz0^xx<=qL6mtivbDJW{`3bVJs5T4SqmKhR6YDtTc?%DoZ-gKMb##7k>z7Jv zNKj!u+0EdXgT zH_9jR^h}l{d);thU6h-qV#9%W?&J^Ohc*?Wr)o3l6yv4I@Ch*28Yv;aP*L&F5Po#Y zBTrO`HOfCqIx7gP6xQEQ+c=oRxXP8}Mw@`wBRi)qLl-VFtlRy#EMWnA>s@Y$8MUdx$Zoi-aeLbjogRJ_a@AP)NM9fpGm{M0Uo@j(eO-`rzK~`S_uNBpSDI$ zrsfOMN&lZ(fY1|7Tx(`)wR<3RuP6-T-g3az))y1Mksz(d@c>a#kuVK}$tpMtAT3hn zXO32SY+4nbqA~TZgfzMP2mH+yFpR`B;D7gR4Kauy_T4fBOlngyUARK{B zm(3Y9n6EE&ii&!B+!Uqip3S?vc;q3u5HiAxS%W5Z)O+p=)NlYjWn?~Ng||QNciK^0$)k0mzc&`~1oL~e>yJ*oUimft#-fT-f3M=GT_w;&Gl$2DDZaQN{4 z^+60Jv3H4fbs12>cT?sJ?RFfzMbdQLO#hgvt|UmLyC->|c^H#zjxFX)=sUwYyKL>{ z$D{UH_0+2AZBE5iudjE_b8kMZfJez|QP+9H*EnNJm$+ZX$cuA>TDW55=QG?7rQ8&! zLfmlPhw(r{eui5@`$vIXuJVBC|(vW|ZK@}!} zjg?VSC_D6VwfSTOy2m98@wN+jWon)M)mVHJ-K#v9xZvVw;MIAVLA@z z@d|kc6t)%?3EgV=`FPDRb+7Egi!pNtKb6?Aunr+z?LstUc9uJ9CQ$n4k9H`7;s_tzbRr(xoj82f9(}fK zwV2;vZmBuLoFq(p5RZw8tEl-f{qvSoj>XclF;AXJsP}rf4>%dkCd_htiR(W^1|Jp9 z+ytB1fp}g9Y0H^S%)CX+Z)dg^#5Ms$ZiA6t{0c7DeEMN&lW^Aof<%kaH0OE_udRdy zl4n>Me$tE&7()-0n?X*}Th~~NH%gFG@jtj!j}$e!42a@f=Q&0GblCV#TF#!%?~Q0b zPIV?`@=(L@@_Zv;b!a`m%e}OuXTjQ%d~_z>h$y0OAScsPwKum|6`o%ne^LX)B8v6< z`+ApT$LSR;)rpr=&`9{HlNDEm7i`jJNQBkhP6Anu6Eo)f7Ev7V+v0S+Hse$C>_hUL z_w&}{D|`|l^>oAF(&EcJP?G*X9MDtSm-eDaw4YjUDe@=vyo~>jAtfCY7E#yR9C}1h zCO`!U!I+=Q@?I*r|n?fS8**C&DC+`8pwF` ztQ_arO-w!0s$HA!a^*DxMi$rQH}0f@t8i#A2act5H zAIfEby3BApIG+&I?#u?sKR+h7c@q+BX{m;F3A40kSxh zI4a(wLLrgRC9=N!v|$MY-xm(Y1R#a%92?kWcSdFLG z)e(frmrpB^AfW63Gs4)^ard*GK-H3#Dn=JnXzbknFmR)<39_AHfmgp~-i`&#%|kk8 z&j=pMMWn^DI-2qcQhu)Le~LSb8Z(i+f{1ld_zitTjMkC~Nxz4_dy0hc- z31gZk#1Ulf(yA+8px*Y+Xf!$swX$tHeqscB%ISC69cD5iKVX0TUK!`gQ1)auj8JIX zl@K%DzLn(K!HBO@m=JiYF^R#7F;QUZs#WYtxv9qznOXR*PKh-U^qg;0tgK@taO=yZ zFKj@l>|?j;YYB16r+vblaO--M!nc@|n*LQ%m2&1c7uV=l3+`oRF$+kTos0Y}$NfKu zMFR<8i28{D z{t@cmUC)oQn34%RyOv%Jlw9${S9uMEXJ|jJ$c=)hr?fm$((inWW9*p2afWbR5*5DD z_&qHexjl)rjbzE7~^QIwlyUC!~HP^q20J<#Hu#7 zcsYRaQRDqzEuEBuZwK&=J4sVaLpbG1gwSQnjfCM%RY%-0Ik^pL%jiwQ?m=4}LE3aX zy?vU}yw`3Mh47;FtorM83@U)SShPGzcV4Z30ar;VLcNhdBs{x~hcb5#{y({=wg2Rv zECb6#Q^gYqk~py9l=3AvpE*kqk5Ss3cq%=r^@_f{tCMA&q1#kfHOv^B0rZtWM^=0$ z{AMz>F-8ITsNzt}C@N#|D>J5y82Laa=h@GyL$TaKuM+yuA9acKB}y5(m_pLh=b z7=U@oF-lf8?JT%2s7Lt&Trw?V=25y|ZOo5#4??~AxrZ;_@i*uCoV}@PW41t5veG{p zNxHH1gYx5=+<9F2Q_=$O~Uv zW1!JP{640&9(be~ak(M&+KkDhX~W%K-*j2)NL$w9ZR@0;k)EH-%fl<+mi-!Fc6al@ z`aQjJ?ToCH)n>dHMIOyF`x#X9^331nAfg#iGWd<*6d>`q&$)fd)L&ZH4mI z{xEFkE4Au&*Z4Gj|FJt&i6JjT^bJK}g#4bsO4XFgEQW{bIwJ8ex+<)_>L^!lOvd4M|E-w@I~DP_$L%bp)FA1Z@CB0*I4jz+0s$>%HQceJC%po2(kT# z@S9{?tLk8uhT63N;J|N7w_VR^NS{3WmBhU#ZEDT94@H0DV*j1(W90c@1++Y=hD=RN zKTY*=`SiQX3cOs6e0uG8x^x2!4XDd$Nt_uA0_Ra>sp)#>5DiJ{$g>uOIx#hjM z0lvKpWQ903;Q<~)d%*liOuq5%7cz$?-*5G9y=+Zhc(!AXlbg`bG9CZP9E9dfxXMI#pTuj z=DkT1XI@95O)NpUiS@0&fiancx9pUO=495(E{7C>Apa+U=F|hV=Q$S(0n?0R^#C7T zE6tbxe6iMnXp2(y08nSosW`nhn$G29{q{R=yxkXA>UCUsSf_N)<{z`8`zeRE5*p z2pmsr$Xw-or)59y9R6h_Q$IAIFkdgl-f1SMJHO!^;aW?IFb6H< zdF;5mmFm*7iMnPgZ=CkwI0(Y=D4SnUJN`OWobA)$u&ebKsMwakN zBi;<2h^Nd_C~GeFa=qn-`9oT0cDDnD3ETH8q#0J?ILw*&Qu4d2nqnn{^bd*`*mDylL8N(8cW0D*3ZiGP9% z_9=BAf`M={lbeYu_$64!7uW3LkskjUuB?_aqcIQTZ8%Gu%c6 z531hvJv-C8W+7cPahsEO+5!n6pxNCDA-mZO}}dY zMoWjLihB$=?`Q$UzL|$I(z<-Qu`f2@f01f+;M)-U`WmotUIQI{+S)$2*r-!Pvq_Ot zDCMMy*41eS8!NeuY_8z3TkB*>Gwf+&j)uktph zK#sauS0W4z5g5+SuM2jFts-iPWlmg9-G&%4}673*e6kDa9}R zD__0!e|HIcW6OG_tq+AgQk$n&qp~VA-0r^rFn$llkZLsixNNeMpwskv+(=}}3S@_r6v43&5;i}czyZ(*XMOvBYle2w5tKG0Gk)gj?R8O)Gi7PCibTenUN zAR+;nd-9B;K?~#*iGT1RXmbI&cDI3DOOsu|je4Q7rjq?ZvCtC<5$|6XKI@5;u|25> zRJ?uxYyWCFq3FyxSkg*WH%9`}R$FuPKw*}C#RQD%K0`NM;ur_Yrs!(=`)zzGtBpn1 zOZ3f70nLZbmMo-c&Pt@$IC{Df{|E|(C6(_8%@!eLz_A0E;cG}IDycyOQKTap=-Vn* zpYG;_(;BC+C#{B7M(#h)e~T%amzq#%ThgkTrY-z7S-ufLWf#$oq#72u3HpW>O7cQ|@`UmcEmyaG#M zOsuqoceW82UT;FBlQoJDNhGKQTF~()Sja$;4AGW+3Yq9~$WiLfy zM`TOEM0b1LPXsX*FP!PNPU;5mvrtInKF%oZzwDUnr9#J~34CW;_miz}Kf|Rj1bS^9 zNG25GJGJ{j5u*7~H!juRb#4R*53fbQLOk&P^k4VzfboW%_b;ku*ZMMJ(31oYD|py? z#)Xn6a{O&|((Jb;HEoq(k0{REU9DbsEUU8kQ&s%uXlk|BNAOq^3a7Azwd@(I3Y7mN zjHmmdYkB+zDG^jbm(O}G`;{)XKVLHGKha<}d;dB#b=9vQ5Cf@`GBKC{w*m9EU)vMk8kwiXKX|zK7mqKB=~CbJzpNs1=uVS@pf9% z%OZB`-D|tMW%PDn*2lY_%PMxwM8biI)`$Bn9u<*uZN`lN#cbWgUqQWy0r@!Czi?cu z7!%qGAeSy%k0TS6l>9bq4Z3j45q6wb)r4{{QkQyKf*WT&#ksH3)gQE}gY~dcw;$im zcWBMVS2Wu4@c;|lix906=V2&Fv7uSaIZ5nhcpA#Jn+k<`o))*$QFaRuQwmr7j&JL( zAzR*!!;49p+8P;7u-{sJnwaemDTGhapC)j1+t$jnJT^^Rkb${Xvh4QHI`d}_(1+vc zbNT}IjV9z9)6se3-)Bk{~5GBFa zDQ!#q!dAyZYbb^R%BB;8PAOC6E6;Jh?HX^B7cpf~)9=TidA!b~v!fheAC0DTD6XAv za50gLud*bg9FzXWsR=YSlt8)du;F!&UKor?KE?%QpzER;M||IJJPG@%)E^-rhHIQrB~T@6TBDi~fe1m86E2hV40=9KN;Cnq4Vn_iMo^E}7Am*T+k_(|Xdu z1`RYAr~5Hj27O`R%V$pV4ugg(=^o1_*bxw zdFqnNr2L*dAr8<^R_BYtOTwToqg-3|@ACt?zjs;U7FB5*3thh;gZvIAoy% z6C{eiG%e1wNRrh=fVDbR>tm?WC&c9B*g98ZY2OE9{PG^$C4o-aY(~jiDKL~Fz%_{AMvL1hTv^dG<6R!nO~8L1xy ze~i9_lW$191m^-(Os$+IXHu1alYOHe^c|Ofmuw=$cS+Ci3groqtiCN#ol-_aHdkIp zSYtl35LrKKhyiLV?uwr9RZ;0e)DkL4B|`B{RFFEvJ7(m7E`=ZFtMz40+w z=NCgHh5Jg$I-rr`U8_ri24C;dkM|m^CE}b21y$Qwfu%(*z{~qztR_)jpLzf3Hca36 zK4bAQdkP`!x@gJ1y{EmkYwFoon68Ofjb|{&#I`z`vTAMXS7wcB|-v za=06858nkbpU3xN#D_)Tbn~Ub`KyBtB_oc_1v!CpN*jc=!7~Vfa$f3*dco zo()40rXK5!Qp09vd06bK+lp-9TM`Wfil*|6yks6uBzb0lGOQm~ZBH5N>AH}_ZZ(e)K+(b${#4xsAZd=lqdKpx?GPf`D0A0&lb;j7)l!o~V~)_1>PJADn&|ZrT>06DHNO-==v& zf3t>KX6>QdM-VdaX9tzCcYiDPC|55w*>xjK^u+=pZd_ys@TAcA?Hmk!lc@!pTF5$!x3BWg9=8ix)OyEpkg46nx(-tWWM zG?a3lPmUU!>D#WhHHWhJmlHIPOWaJV_&`%3)L7dqCgW>%P8s6W)^xKiEzB;MN3$?8 z8gvm4FhIokrRuJu+~F;|ciZMJcAYUKfbV9=P)}Bh-qO)56a98h9N)hC$l-Ej2fGgJ z`7rqXrcW(~|J|@IwzvYjdh{^1vmjRk;;UY2*`=TMyqizV>@oi(@q$w_katyb%|6-Ca(a>m1L;rp>9 zR0u$D(;=ySdZwJ!C2Wi^oYncyf*;e)fr<7)=4XvsjuQRR6mi40vNXic6=uyF9?MH9 zxl2fwtK#1WgU&r@AWPqKbJBEkeNBmE7~n(q?W2cs%#0^S@1GslocSz40F7dK<< zi%wy+#-K%{{3Rsy6?ug}>u-{;kkWtB$Xt%O7k45A*3@S{f9tqwdc{tv%YX622&pB! z3m&QGzWJ-&7;9Fx@m45qzA<6*Wk1+~j^m@-K(3^*doU$a*;g`p4IGzM<^8taM9Q#u zg)Z7lNvvDa%0Piu{vd&k{pdj^6gpusEx9D*03P$--Mh6Pp7#Tl`Jsu7V*VQX9Ax*& zmi{+!kj!R!cYmo6Kye9WY~X{+_m0bf@-YiLe9bXWUPqDh`3Sf&psx@0MX!b>4nbEz zf(e#1V)Bq4rZhl3MdyA=cUb>&E$yKI2!~*7`ji&zk9bUy! z3-?TFY!Q1;j88__z})`mdj4g$uw{MuRtxb<&_z^?DhLSdW|U?0qPx8D^3oOlllDCD zaj*eWy{`RQa@}}T&QX;#!CkYSrJJ_vP4_T|>p6A%rN`>ipJmgeQ36{fTi^hG@HK@V zS7>~DRbJw`O2DTQ5YtYw&{9O_UlN)Hz=(q&3?tum!!8w|=C%y~0Ryw8TGrr8}uQZujLuDG(t;~3NA%@`qJ5-hXo zvOH>>u2QMhybxO>H8Pr7l|P)!y9WV^$^DLq0RLw+$eJZsLj;2$(Ma_&0tzzp)mGc) zn-|)-oTr9kZ)u2yW`1B}*|g-AnHx>ZAH5TL!TxroYf$X)tU}a!)-ghr`GhfL<|Bts<>-M;%n3V^=$M1gp;9`%!M;n zQ!=8@wFC1wZ#b=WtRqE#x$tGyP5Zc$PU0EvKGSq0-~e&E4(Z}zQ^>pO$w7ANs0F%Q zvBlrtzb6$^xUI5g%1ajEUmyeYd*n~G2(6KSW|g-Daj&PP7|+g8UPV)qn7(vG_Ddo+ zAqRQNyRMuVxy1cJ*#cp>B5;xet;#5f>u+pM;Zq*PY(@GgIEK4BCDVs&uDRbJARzwK zr`+tWXF@+h20}l%P2jFWLi)AAh|xvCiyfj&Ouc{K&#GlX!^iu)5kCj6>G%Js1;C60 zl@!KAbehV19In(!oQ&AR{E&KKNc?NEU{d2VP3?gpOrH5;cKpyBAh;C~y-6)h2>H3p z9NsBvmwyEhu)U=*vXT((u>T!W5dhs4sJ!h>pW=WcGf`2ysEF*FH0VKBa!@+F^rxId zG**Gp(EqCcX`rEpdR!Tr*rEdcP`=jGe=>;Ry)*_TvLV{#vGwj(E;0g5cE7M z%7}w}(>`|LqXHqmFm9iCCPne%+%@BvF77zA@1v5L)*n!`%C3furmEnMY=xsPA>TN& zZgTgpq5I$3S71+=#Z&CZ?_=i-3a;YWNZPik%)xe=xeL+F6K!~2S1ttQXcf+;CZqF* z8n&+XztP-kzpIr~ybY-B@(ZpAd)&z?cZ%?(453Q?&Ckv1&AyLCKUj5}-%G{|3(1fhoxZ+`xFE?!H#cDc8(%}8#wI2HX&MUX6yDTH z!p~VsDNib?3QZ;H{!?^XSOWL2@w7MYn}=g~Kyjs#qsb#uY_Ke61d?}J-b9Ojqlpqk zZ@ig7V!6XzL+#1x0d8jOABEhB%bSB#?Fckcx*Az-!ci1%78x!4U4)(|fzI*D{5F`lZ^pn>DO@&RY9&);KkGoWnV9`jnaDqB|AB8|o(*Q&^H7(rL5z_ctEl z-EQVjHQJQJnoDFGQM`R}O&y0duPr>!>@60^ly4)P=FQ~ZuQA}5aN5Z-9wI~b0p*0^ z?ec|7A``vS&Z#i+YT^>BgqEo@0W-lhv)R=Pim-;0N ze2kJp&cZw&J5+zP`tfOgAp>b!MR=V~U;=fCB(xrPDH9vK{Yw(#TsEA!F3pZ-=l;8` z(cJfv5*z+rzZTZte-_xj4)n@2myhb#4COPQew2_C;{bk2ckNUTeK5Wy8;=u+z7KL+ z5Ut<&D_@#-C71;Af=wMk5N7JsM6p#Y7GOhGx}--~e?Ui4akVSKlX%_7dh)a@OFmDxH80dZbyu@2_=@(@@rWLza^9Y z^1c@Ac>QHgV>YMX_RQVAJ0kXLL7_rt+b&b%kgS0Unj1u-4c+Owx}3y5VJR3_TBMhy zOQjs@B=isKL=47KS~Y>e^=D7X%NX=%ZWU(S*@EqF79dER%>YIJ<(o3uq~|UzMF;>9 zTsi|vmRLV2z<<^Kkl}M6fu5a~#Zy6+0F*x={!`#0HTt`G!rduWo>yA(S3gtCZielRxX%=r%kf6O)>pLkTb zRG3`eS5Ee(oHp1s{U=p!c>B9Vrr!SQgXTV4Tlb>c2ZlS zUYVzxZefNTV|$hw~&@=>mydlO%CptJM=+*vGl57C$%74ymLhp zm8eQ+Uo1&iFFkYt>~QF1`Qp>-o^7;l3O*Gvk?L;GJV^10TWY4cYFeiLME3Yle;@;n zSO-$_P1K}evZjl4s_0u-<+JN3nA&75gxbnwvl@}o>!uncK3gZf!SKBE7^~1+H7MvI! z-bqIu%Y*li)MEO!(80}Ue?MvsOeovm2HLP&sKa@D$h&4aD7^+=O_!^Eg@Q36!@vE9 zhwpzQdOyO%$>~k1G%u>Bbqn(vG81acd&SdOfy2LP@%r0B8BYHLt)2yIZKs5qeW?qZ zPN6g48ME##W>HIE?To}?!{_(7v2>}fna;72I(SS_LiXGjQ3L?)|Uw-tGVVwW>V3x0Y_C`21{l z{LA?0>fw~}E_lS!e)KGNkKq(mU&M2xdRBQ~I*!$?#zx^Ok(nw-+*bw^cIEz-0pT+s zr=LGWPRY``+mhPYm^)U;)191-At5~`h7gqRYLJDB-xFa7&`4OF&WI^XrlmD}WLs#_B0RK)xxS%PIr0TX8Agg+1sR)Jb-_H9n# zTi$%rachwUKC1KU9&9qt$vD2*tPjdv*HA@)^Qan%T@tI^^E3D&mDs1^%|!~9EXw1K z&EseuudK?TE+@OZu8B5Yi{P_{HJ&5Kl4KyQjm^neMSncbY~^Wu8~fa7F2%1PjJgCu z2%;G^qNB1GM+m?n-D$V2%Q$Z(wydetq(kGXEam=>k+>{SJs~`N5NQ~XKL~yS{9~Na zv@z$sQ?OZtEp@fg%D_o^v?86={QG)SloR4r&x^&`5 zrkx8_XA1G5Hc9)H69JI=_ym;^={|whHuyBmv9ShIvgbnot{`RjMZoOh9C^Tf7Pb)% z0KrLHQZvyNC*Hi!m3jPHk+cA%=u}TXpDZ09QFGDetRQ)(%*E|*)^#E)Z!upwpJg}S zvMNwUeh*L@r(1&GFJK_1YsZ^e2(EGhcV;}R)!8U;oNG?r#Ec$qioc)`ucO}&RhkUX z&9MCuhppS70l?vjy!EQMImDxEs`%~6C}m&m2CI>!I9~-l3;b$Cr%O~53dfNtP_19r zuVrom|1Y}UDZG-nS^Hi|CYX3)I}=W9n-kl%ZQHhO+qP}nw!X}>_x`SZu;2gLI_cG@ zuI{d1)qN9Nnjq$vhnD;{SX|5xxbFymm9PFI#r!Ks%_YIg1q80C-2u28XkVg?xetAltZ9E_8YH zi;$TsB5Iha<+<2Ut*vx6W8>ahE`=~2heru+DwbTjRQdQP=gU{^mdOmAs5c2R{Lt0K zgP|gQcJ~~K3b(x;Sb(1ec~u1Y;l!V9B|18^_z5-Xv!1XEI7_->N||-VO?7ofnnu5! z(!>l?JqspsH7{U|`+aLv2MjC<)s|co5lz*mlaHFQmH_0QGGT2pW`(Irvp;V;k5pvU zrdEX?l;=|lFW+%98hLP13Xr6t^RUdn72DH93po^-TN&TQ(qdArx?{h~VuVBZM-oRq z8-tKm8>VwaOD3u_dswFxuVQkTv5n)g_?hP1tqmTsOG03_#psGRI28^8owe1&C?%?a&9ler3N3A;veIZa4c2efBT*)qmUU{j^QPsij+}WNsQg zu7__!#YMC3kIFzlQ<}R{&&4;{~)OOjMzJ+y|6$OgSRa(?@_p6eSq`ybYtC< z3cG<1_9e!Eq!xqO$^a2}7k9PTw$j>7rR?IMA*%Hjrs?AaehBSH5U>K6`>cciECj}{ zA8S?5FjBt;%7yWiNp$;B zgKastl#2{mtxNv}Hu!~2%TqpGTofA(dsZ#leI}?xUGb24;V-Dm*SnF16n2MqXot1F(~|m_l=dwk^&@Qgk8&)LYUnn;OUTT-T3Ka_djUE6*=q_3A+{Nl3%j-g zGfF9sNtgTya48K{>9*DT&LP)Ceusy1od|szvw8|UaK$vs3W&c)(~%`4QVJ8+r;e*8 zeTJ$D;k0LN<5K`-*0SoE?7vkOO#3p7x%dg@O@3g!1lpec!1IZX+QcN>Hu7%#hKiHt z(SCfQ*E|GC*21W(#IZBh^F=lT-f8~_>NZ^tT3RF^N2Bw390)2 z6zjJt>`Li(8&w)yt}n9L&s?yZs1N;8R2J4NM931jJeO<}&OP^-tC9vPPh@A|Yej&R z!a4ehEneb>jKnB_P0czE{&$d8<~deup{4UzqEHTO_Y|uSjZ9zYrF)ueU6&~@x(&Ed}j>=Edr2+1ME5@ z{_ksxK!KJO^Q6nGq=aMUmplyBx@sNuB2jHT zU7wsitMY$ePRlpDv(@pvW2OOU>`^7`u({Z$3A>D@?Cai)aAN7w>sw%Ej;-tA4bYz1&hqzQ40~48k}zGgf5xiuB@x}W`f#uL5}jAE~DjMqt>z$ zRm2HPDizo=pk$74X?o~Ej#bPP2>`M=K z$aFUkAa%S?0e>(3<$Q=^hx0RGH7QqkolOal^|iRZ7NL2J64#ehug7NDXg=c&Q^Vk z99gcDFcQ}5sArKHTeYuD>)D-^l+e#Zrk6o#6C*J40%NRN_tdP`bVDDH(pxykkigJ& zjfywYxCsT-76VPqRR4-d5c)%K>S=IQsMw)*C+k936df5|y7PC|=4DvjkqO(6D)+Av zldv#rUQc^d`RU0UaWh^l6 zmLJiVvq3p0a#mdzAC%=(@-$K;?(!3*OqYl4VIL@4rS90~6|h_)nC;0^BC9*Ynhz6@ zA+C7Kg{?E5@?!}N;;(igyTw&(0_NQaU|vN3RDlD!V($BA3J-vt z=6NB50{Bc=>HkqaW|iBf>WrY44vH&oVaV$}))%nQ7DP>5^V9$4w-uT>wf6~wn&KXG z3mI3nhYZp~c2U`5cqLX^j>Qv*QRBnz9RS7N(9wctp#B%)vU}sa+R!cK3Hg#K^${+x zhqd|L)W{ax*ft-yXKpros+ISK9Fn3`dK)DSy_tGqc!)+fcUt$RePw;)T=ftfM73pX>E5QqhjRq7OW3ZeU{N%&OZl?^*g?L4SGJ9 ziwSLa4+K&e`knzt?@Bk(f`9U_E)sb^oFC67K}0X~Jz%D6ObAo@wI~AS>o(3LH^VCP z@=ur3d)fI)!7QFZ9o4z$k}@U+v&+PUq%c?aFk(tIy6A>;H7Cd{^m;Z_3luC44K02qy zR9q;1M+-;Zp81oU*hh!$mmB4|$x)1X9*Vt&1x9T)1sIb)FFosgL#6}gqWq#YaUE;s zd}XKl>hS%yA>T3A#F_Mx%B*;Mj@AfTbtlRD`Th`G?h~tQQzhjeT)g0Pp^EJm6H5-? z?H-Jwc6qu9g*ePr4cN>_vamMIRD14`b6>HYsgje|^ku@S>}l{JzE!ugT8ufEm|5fUy+Ba1`E2MDZ9 z$uRX1gN5i;E&L^mU5;>(p!h!Uhs8>xQsmMmx;1FB9oUsy%qG@fEWHW89*gu%(wPhp28QpE+1+DWZm(AXW)ky}@PrQcl1ZooN^}lA`*u6Z7jX1k(bJk9Q zRrXJJ>gjw*j8L4Rb={)1%8sOET7BwkXyo1o5x{8RJ!nC*xU8T!92He?8xP6c$Jq7@ zhh`DL04Wo5CoUB8s*xYfMR#OU0K`;;N*T!gm6p(BYj?$oWn#eo#s)@!0u+ZVRN6}w zq9Xn`{&~npB`$5OPSu%D?ST}RB9L%++j4p5C>gRg=|%%ci@9i`;BT*g=RLpwgEH+k zm7-031;VtZiGj~!Ju#>LOreOfd~s{M9V53q@{TiV->siWc+KK}`vd0Pij`P~3Gnxi zz$-Q+{=+!s^5PlEMZ|_dEc`IYXf02;%^9=e{kzOly0LV2TjiUGbx%@|1H7sW8>3an zGx&|W3gUsas|{-Ro#xje(>0TCj4ZO|Bd1$8Mz5+w{0`?b@5n%FkKS zKkl=0?)tyk(;|2~NbOEoQ@@krXo42Hj!ac`iXkQ1HC9qXO3Lvf_fi9BhsIJ%Dbv(= zdzFROUCX%NUlNBTp>*NWE*mCGg2CsTFVc>{(VyZbn`5uID+@+eOaB#t{)-an`AhTB z!UZwRV#re^fr&SjF0;Wyt-9LuDD#n)cMl27wSg|*cW$S;xPjwBArJ++bspBry;{@Q zz9rAdS!BhV)Dzph1C~n^bA17wuE*4KN*)iA9OA7U>gpHc=`e^&pWI`wyI@h&ucg`#*dWBz%= zfBfNlk?^9B!%qk7Z#~cCHf}qzB-B}t^3v~b=Bz7DD)7C#&TJxze4sS+|90n9<~=Tr zqwMWFW}O<=dEnTCAtuxkaVRY~ZCBYFuaEg=LH`yHhyb`S58Qv~i6;w+u7RE|o)XcEJyFyeoJ zN-Crm5D+Ks&3|wU*~S0DEdq=1|L;W;<&7s@rI9{-oxs-Q85|B@W{v102{HotGVaDV zLjp*wdu9*Y$teMg3vo*vyq@e2sX6oJ5i;Vf0bg8cjKge#~mTIfd0|)X9@jiMAB#E zN$(ERs1=lbJWC4I3c|iOd`@LMHpagR3gUaE^QE`P)f6N#=%YuR2^wQluNGf^eH-X? zqT|PXMyVpK<`X+gzuX`%zN`EU-IkHP60H~4()}avrB<+58~MwAnpl|?Q)TVimRtmW zSI=7LoK9r=im$y9ZD>i)C;;gwPWY4d$bH z&*@Z*##ZDq2-RR0D*hFe`U|5ZDP1-5WivvZD}QBQs8^-#VXX`tF(D|MX^v?_@9eM&2AQ;bvD9}rTmOT_t*e3X;Ey)r@+Z5ZSBU$7 zgr$?rH+}neW^-X-Bl7jG;s`D%@1PFD3qB4QJPl z;Dr>`85RTCdE6b9AUocH?sVUC`0$*I#2KiwW8SViQ2V59W)VD0>#Q>~A}-GgDaGYY z>+pP4R6VeB3w4tUSE(74T4?7%P`z*a?vNk0Ds^XkptJsgu(<9wRMl6jBt1SEIGw_V zhzPf9P!+MghJEy7ubYIFyFm+y%|#tqv@PaG|BuYo)%gq?&gd3?-ReDyeUGl&Waia| znL;UPg!%WN?#^Zl^4htbV+f6u+L@9i{Uc%F*SnKl7@x^R8#Wn3rxB5ixit@6nhFXg zk05jQU16QgfMZdc1uy9%*rq16|2~neu#(H5w)p|Lp0c82;K8b7Dnk-Ud!CLse?VDE zpXv^SNZd!NUw*ipW@e?au^^n-T=4|0)AW}FgZ^j_xvDe#6%It7UB-#u3 z!BTu@xmWuBcI`G0=To$oC5bJBy4Ccjqx74`xL7G~(JS|uH+>sn-s&8m{OnG;Qb`d% zKGi=d@yf3x8adL?O7t-k&COm7KuQ&NZflM|L1)v2S~e6o$^L`b4}J^KBgQS`^E9Ta zEi0!cnd?W^)T?pFYK%&$T7geQy9L~qH_ey+rfPzY2ZQmya5MN~+LTNSz%y^osH0RW=EseX4sde+PWVBWyxC})`iX~LLa z-?LVhTHzxbJlM-k$W>pcL1sYRv!E1~J@=W_Y|-_IL%FnN)TMJb<7Ugc=N_b6o6Y%{ zk5vPkt4s;gpv_+7yKTLw+Dzx0p-Wvnt=NF=pd-zJ{@(*#Ex@=)>E&fyN86R4Xup6g z`*Lbgf=GcuR@+~|tU(nVt+P8Jf+XJ+col!b|HPdY%Yv#LidI*D(t5X`Oued7rqx5U zM&d?gQ1zMulaCxK|9md}0ZqysTE#tJg}I$RBN-+1cKkE{BRFfa?>y}0P$wS4a>xI1 zXd-F1z4Ll!ShBZkbEpsjklo{aEIMA&0~56;jSa9*35K)k?*wgsbpM}TfTrti#p2W> zCUQcwyFYU3B7^IYh$k$gRpZbERF~90Nm@=xUUaB)WV0F?Kk+13FsI~?2lQ>5hnx9 z_Qa8803P4!S7K;ZZ&2#IL2s=dxTtczBpM#xQL6TLpjh+3^64Y@Z($}6y2x$^ffflTJMPHl6p+D+iV~lC$no2#+Uqn;ZKWij{~oTA8sI}w{{Fo&{n$LuKTh4k4^Zn=rN1<@*Jet|5P87I z@hKHVtuChsbuk*eNv!tCfd)ocyPe88Ovoy>%5dHz*;#Hb_<&6#IE(UhKiY<&2auNt z@iC>`!`J{=-;%=yL%o8@%3)WY!iHq$(D*qbR2Cd)tTN#z-( z$*pcU_=4@cdrH$quMp@A{q2$HxE|~qHq96ap?(d@)#WDl$%%tc`PPSshU*{ft3`hB zt^=5Lf_PtEl<>V7+ZC0dWLXWRo1mctZ-rlc3&SH>ZTS@gyFcSYzE@jwDRKs@;r4Q| zDlcUqqJRi+n;G1YQLRg%44T$uweCxkr#k4p#=(^-RJDhNjps=Kj^1f(~-p zySON}@8*U?)$&((+C3GHzPvB2yw|M;vVs13176^5c1FoO3t`6@uuS`GR;|$r%95k? zKnn!haRvoE59)aP&kM|BT$h36U-f!H?W|#cF(a3ORNFI(C3hm{2I&eoy}vTTD&p6Z zP-XZg@msGzk($m~S|u?G0Mba|qaIMgz z$-Y9T>-osS`%;&K>ltM`Oy8)zIVqX^+$H02+&~|n2y85i$r&9m2~3l>gdPgFZcMg- zu)@rVe%~V(-XuN7sBijVm(svG1f7;r+-d({KA6S%BxZ=XROIjsLFx&rOMQRyN7-z#u;$=NnME&<3SxroQU&62EHXX}p3 zr9K4o?|g@cU1rtL2mZ;XaZiG|F``^dz`a{H8v!&Ry@m1uk$6vJFXi*UIg*1@5J%)X z(*CP%BjSz0a&6JaX6OZt<}3BN#c0Wa_%HR)v`ju(Y`}YRL{#Nqg`V|L;-D96Ehabi zP`nFTbRe!L|A+|38fFZgEfQxB>FX0{sCB#+m3Os;bgR%FF9Y(uFQU>bFxsaTbN)Y!$WF4QH32WZP)1lYFQ2(uw}UYZAKHZXEPV5*kPWu*Ba^(`RxV@ zJM{8#GbTxs{YLWx2y?C*+BO3l*DJEa0l*uIn~57X!~0w!I|V6mqH?YwCb0=~3`5o0fY&05age;yLPzV8n-Et zne*>DrlVvXdc=h!Nllugsm!;0CM&cNjPGI!Nx1%aCmA(IV*j1+YXxE~-srkdArwR^+{ zL#hq^S+K2RvvnsRYYS~H*!uI3F4rv#(^p6n69^$cb%r;><68&2_DOW3eDU>mwqCh2 zpktwdS+D>B`unE{xgg#c^61N{Bu*5LWoS%5o?!Yozx>xLF1F^jVX^)WM&`$PIUdzl z`h4#-4qRk!V$%(;dPu);RoN!dbl4OdnFOaLzmMzcu{Uz_;6)svAa^TRM~3(I}B|^X;$Mzl8Le zYTHxjF*ra%F5S1!jR``%gK!5c=4LkPNbS7TGf3wa>ilg`^)AGh-(i_L!W_nc@Ybi= z=rAjfFlAf;ZpinTGi;s_5pJ^QN;tibuFzS&+Q1@cv`E84leSX+VyZjq_t$}uP zaKJoK*>P*k7q?*HH#;u$yka*lfijpT-+-kf<2752p;b7~jiFVrn-99KDR2lw2btpY z%94Wv((KE}2vO|{W&d?FQRFYQ@b}pG8s`ocf2*wyp+(p2GUTPpx#;wKU5WCws^;-< z`A*?@Aj=RT)19m)&B+vYGEUVceoJHd^eQqMMS}@+t+9_40tFO2idnqOgX)1d8FWFg_(1c94Ljvwdk$NU-{x5S zDbuf5a?n9)=T+SLL_ygVS+;TaL8^KwgY-^Y{erY?Vb1O=!1;y%Ya^7zF;_saNv7GV z)eZ9LuhJziZw+=L;5iy4%aESwQnY5u;@Od=a8{CF#~z)Qc$#yu$}JzSPeSQdkAH@lg9r@?9nv4rQ;@xtuMx?}F_iUs?6%zwg{y zSlILSTHqbd;Dp=2rM!I7Q@D973&A0-p9~Njdbm~tGvrMxJbrkHq*wrD;wq4G9XyG$ zhgW-tFFR9Nnh-8w3>vA_c%H@z`B1YC7Ul*NCOj4dgw#T9UL|f8GwCU{KU0nte7vn` zD#C)AjPSnJKMz7XQ}f=p>l-kda%hD_*}tiw3BKK!LF^?L%a5OTH~sopufTAhzO5*I zFAv=i=(h;()%gP{sC#dA*GXOY1+XQ^Xsd|$NDj~d_f|Zx;YoY9KSg!3P(l#VYI^g& zjNTup9Cd9Avw{_-o`!1(ehEssRG=yzHcv3iZ&E1gYLyh~CV2JID{6#Kpp3++ejyA|LKbuK;^(H_SY`+E^Ggt8COEVb9qC))1 z@rv)=Z{VSu?lqP|V4m-B)*?)drSNgEsK)rZGTiiqid`i)^@wGu*PkzO)%wtm3iv+T zdDtXD%59ztnmZR<->N!gt-COO!D!){-CmFNQm7ZFVrggL%OXo_j0`4I-GpDJ>hQIy z6QEM!P^PZskxk67;owVB|OOk2>szA=3gyD|ukh;W&} zaf5Do_%l1TyvV~2#USB9^24-$zqmsa8suM?DMYN4_4q7j#~EyDMlO?~&1|{}!Mts+ z-|?koO<`1-hxGW}N!P$obaK92CUSx_aYm@6d0KiQv+|4>uHQh`W_W{iVFO%s9PRUR zgeQ0)zUTbFwJtaXUx@$i#tnTIFp?>J-`wJxmg}Ocm5=OU@)Nz$3bAk=`M?yXom`3M7 zc6s=*Z-MsGY&gfa;4}d_FaqGQk0s+x&Qknny-)9@SFyiCW0iIpW#hMYefYOzP7ZUW z&^f#9jm&Q_b#G#N9kTu^(7^Xnz2-@UpixGvbRKQ)_FJsYFN)oLM+}}1VPJ=4nd88S z`j*M&2zA%C!ruwD?~T}9mP1u*H9Ovm{J0gA#TPZM<+Ch#9OddE)S)&Hkm@sKCMCr# z21Y+&*JkVpTGcpqL_IWG8e@37X1B4VocS?RT^~0SBIv8Z8$yp%`n;D;tRsIi-9^XT zQyMR3Y2ZGv)M4rqU4-ZTsY4k`9U%*>!eWNN=dna@#%Vg6kNZ#ISW}3CjrvQI44oRT zUHEaOI{|IgiSoRQ%;is0YrLocrS!1rYW=c!mmI~Q7~xg?$5I`e*Qy&=Lg>PL{@|t5 zGQwE3v1atR`2rpP(I4!~}^FN)?2 zrQ8!b>&r*a;h3p(z<0^VZ$y>gAtlgRTMt$!!t7ITL52v$dnG!4HrD~w7sDHAhet8* zdSpxcNnpJhc&96y31w_B&zjSdRmbBep%A_-pS~181T7>*fmF1o6B0yN3JK&>nFIDA zC`R6F-WS(17`DXN9}%J5+rS6uuntC#84?BGNnFpHWns=kWLbWdFkx-m%g?>x_r}8Xxr9T#Yky zW;5hjR1Y-*Z_xD=7?f$a8Gy?s86&h$E2lXk%~YJxnCvp5=a2}iERYyN?hqI<7FsxE zuomtfb$nXNq zJao(DD9?#8_f`8wVA{NW(U;kNa3RtIxy#+BYd!2>N960r5R#tf!S|c)b_8O9doNaa zC~z*#o|gqzo>|W@OEx^~!{u|r?K$k+PkBR>LDGi(}4X&v6(9GS=Jr z*TZtD;{UHh1pOa8#b_)oP7whM_<=Otf{t@csj165N8I>}LLXB8O3A5QZY-6lMlP(y z4&tIIUKRk|_&4!AGF5Ho;UwX_TMKEu1x-m4Y?o*DclmSw^pF?~vPR4EHpFEInqA}e z+T-Gg;AgWvtv;R`w_@}n~gtc zEB9;Z2FW1jp6E{JjMnu(KMr*j0bFkw$bnjOq-%aj1aF4w3Dr9QK#|3{zAhVA<;aA@*k3n2a5Qju4^6&^LB-(e4s-H6m>=nD*8JT>_3 zOg3Pk?27(9TcnRY7?Y^Dz5D{SdDl}QM~h~y$efyh$(eN8GWSmrDAFY2#zeUH9x86Wn=fwG8y&97i z0-cs)GcjC_?H4d2646`#k_>vSdj5(0_1=EtCJHvR5c!eX_l$?x#H zc21g&GqNpG28EG(*qZ!07wQrf=()%r@b?|aB@Cw7M+(C9J#;H$ezmF1wpvfk zYb3+kz+Y8!+gNethvhsf07^Y1?1iwhwF!7#mveIzBN-^J8?-2&?Dpa{}h zKK-~ghh>XyDJHgU|7r!^JVpKun42!(UhmjHJ1ai9UOde`K?pqp0#kp{{mAMJv$EnL z{WUsmO}1Wxk)}i-^lBIGwszoP{u0h#qEonGhmnx)Np%vz|Tr=LhJezyZHe*e>FgCRrr~vyc1C)Y9|qH z-5GEs1(C(_7p5=fN;?0OZ16+wBos`0kwsd&pF!PtV3gXSvxV?oW&6jjk**G!?U=L_ zJa&{|oOI*QHnT}d%onicbu-19JfUBg(>m#d1o!8SRsRmMTjl3W4{g}{#4|NnR&RYf& z26kxKu7(C4a5`FjLD7DLUHY+vi_aErC*YsC@reA73$lnq z@%u!26Zw(BGjlUwP4KO*aEbE#TiFJ+!4bmcVNOEicF~G|?Ef?V_U|nHzm_#q->%fa z*a|Q(!RU7#lP6Gsla0EVPzxDfQTp~swr#<7@ij17h3I=;O%hm(?NhMDOmFzMFN$pr z(qHxm8?y)v_Ki2zkv@B-OsGZC(E=eH4a7{W!ehS*%<5XLLf`8j%4E>1?Bes-D(k9%`auCMi+ zgzxKK=Q7Wq?5<5qA2EDETCdk#U0ZJDvodMUZtXK*F^|oiH&9uxui7P}tWHRmANMgW zHkM4t4F-!`t{D1E+Moa;>wCgBq)9nkbYn)>gX|TpWoL@>mXaWSMa)J2AX#TMmd-A` zg%P;7a-56|QjE82UCvF7t%?sa{f#$hlo|cvMkEdsFs9A+A+45KJ znp?+HUp0S9UT4Q((G9RXMr;0BR~K$(BK>a?qx(;-w~FL5y@$WfeD8iEUv*9!H!{d; zU+i{QjrW^~ct0RI0;m*90|;_CdS2^trZjO*w_Tzx>#~NCkc^u8WIE6?N>i7Hr6lh1dNk)p!XU8@VXxME>y*!VetUa+^uM z+22KdVH<*mMPBh0z0zEmlV=GZ7Z~?ypV%2W!bMb!3-d_0g6*2ZxOd>j3ZTZ#nX;DB z=>|EH041>nhLJJ|TCIg;RoeU@<|9g!d!905Z`!Y^O}7hC8uZT|%Z+*Mj{)bZ;Qp8} z=gSig;`}fj&$r&bPN52U#gMMQecpw66xz~MRod(wc_k6_ymq;_#yy!ny@l&!F0xEg zmTUSs2i%6F(0-TKFgV0;usY42=unhYz~xo#!9>J@rL-|ZE}P#EgTUT&<}~%ZU`eI= z-np*lxbF*xAYmt)wT^EGkWatnn=+_-DSW!yy*7rE;lx z1Mvt!ye|ze3$AlJDU_Yc^;}%z)XM5^bhmatk(qr99bVfKhwh|$$cdQHm7NuJaxN&J ziX1}D_1*d)cigTSozkKhL51F{f@wR6z!{7S!daRiA-WO;*!3L7W!+npVN4z-`+%pQn*O?uhYA^`D^ywpY0WyNo z2*{}m_6VxKFf)AM@Yy9_o;}zv&_Cz2zkaa)VE;NxZwFiiCwX$Lw(WFr<#AIUzHbwD zKm&OAslHl$@35iRnCDXllD#~fV(m2Bj*1+J-^V#ITB({U8SOH=-dk}rClgqahlq8) zR!Q%Cn}iq$$oe;9$DMm-b5zx<=q62(GO*JV3Z@}m{n?pSoF|*I4A>gLgM>D#Gwv$B zAc-e$M^oHqtj5<~(u+7|rjGqBoB3j5VBLhdF9r)6JP1YIWw|{{&{pKlEtmc1^OFLt z$U@vixIZjt$03ZD{q)d?G;9BI-xlvu1LTE7)ZA-_(fg1{0A8Xj<&O{qKGeC2ZzdN+0Lq7$jGde)&Y5l zw3T-coVB;Foo}vLbB;Qs(`9s(1e2E~@v>8|nKNp)v5j222IjAOMUF%VALyHOTh_(j ziBC)-3wfv@L&!OZ`67A2AwoGR$$mRQXwWpK%8av2Olt*fqgGmwLhiPsqrYJ6-m1My ztM+^Pe_MUk&(5)ifXFb%^HuKqdNnZ3=2)Fkx7Cq_{z7m_@_c=3O}?{}8|Q|G;pL!J zbtA&x$eNe(Qh~lPm0}>cT$1wQho*g>y4msyw0x!e=sda-kp z)S0e3vuLN+khkVLrG*)Do7MA0_NhJT;8R3+lx+m?wQ9y4KQU0eq^zp<>par1+WMt& z{kTwtTW`7ugB)7#lYsk@>fq_^h$#TcY;&|Ih}I+z@^=jdLoByYW~TS?ebw|u@GV;N zBY=xO;My1-lX{J+s8T=S@Nh(a+qve62m=WkP#&ptk?7=J;E*9)Vx&;}4&!`n^FO@+ zB1Pf>l77Wm+<@g!mCK^p9l}%2HwlzCSQ5_|0TKX0)p{SS$DgDnljD@=J6%*&;HVi zB1-uH0)rQFacLh+a@C^gQcB(!doRAEWodWx?>nm8roQ&cT;$IqeXkUWeaG0nlE(^>Kme`a_z|M8(~J6MatRmC$)D z{(^e;{H_3$H(9TVJA-;IBB-Vr43URAsIFh%=`dG=8`;d( zz<3F2OckWg?+-%gWKZV{Dl@F|%Tozx+gpfd!rHBFO4P|27ve$(Ljt??IK6 zk`eM&`a%Ri6m6F}!j<|FnF?aN3fWS;3p_EQ%Cn%#>XnY>LB^ThBboMr{u-KxBK;xb zvX*As(7{PyIxPon4UmXT*w-ln0;GeymhmOPY;0avh>T9zJ2SF^^a_K zbe!)z;Ktra^~2l^>PPP+tp_}$WO3dA12mqP`q8atu;)Oc-?!Pp$K{LsHY!=QYkb0zyKQB>UjRcWJ_9CNjAnqjUGd1%Iwy84}5$3 zf^ZKtR?Rm77mTS6{0RgVc-Te?bTFq9{u1x<{e?ogccyeq=#Q_8F22V5xy(0L)c2U- zsoeXvWr?rYA>p9nItPYI(G0M^hSvE8+#UD9C;3E&zG8M42AS7J>hF=Q}P_f>zm}3pX&nudou9U&SMDBogjU6!3^v^!a z^on?OTK=q~dC2_d4{`^MzutTM-OO!EH6noP9cn94@w&OY>{eV8jIDy7CQ*N9&^sM8 zyjYY^7v{#Irs9c``#O!MwW1CR&}D#umu?9%6GiB7o$IE_{3=t|gwiv@;O51`q_%Pu ztZXci%clf2;G(eOS2$A!X68WK3mT}JD;gd(YH7*@39oK!ay!0caO+`<;U5Pwz zlIYFuF*1H@ami_(y!23N^bGDMZPv|WgvVV%<8lwMUk{8DC>$v$e;!LDR(Hl!Z5=rn zi%zbUKm*EZ4w=}jGe10f*a7{GPoc-}51)8CdVW>+HHPO^5}ZA$Qr-MUu%L21=n;S&N*(ibFiM=%!LV6pgNLJS6KK<4Pzc(v;*o_R+q-1t!0A;e4@b=hfPmG1Uw7DNqYNiH~Hx=M1pz)z|ZcXdc)k*eEjB&$cdPWj(jZ*YX8p z{<1yF_^DM}Pez?~rqCioqQcAXM+&qD=%%Vx!lt`%*w`H_c2kC4up5?5ph{bn{OQT{!q0ZkEhP2 z;682aPAWSGL&@)_pg6!cImsE+ARDuFAx@*9t|9Oo;7?~ohLN&HN{7Z~Ln5_w9 z&WewTzAin3!w?$gR)@q?wayjk z;g=5%1BY@Y)jLTowZX=5u}Zj`V}5vD?GFV*+RJfoz|G5B7dt+P_cr1olsu{;rf}!| z*b(!Oq8E}#@rvjf!gVE&v>bd-buYwehn>e$IK6g&Jl6Z=K--nGfn3SypDWY7MO=0F zgAS0zKQ10xJHCJ127aM}zPsLClQ^c>pyY5`S8hD4ikrP{VWqzJGP62PaQX#KxqlSU zu1r07G4};FN}<$m*zwc)q2N@VIy}uvU4eJoc{aN(!dT%JTXYTo9@KbQPWU9w;^?5$ zS}euHAf46h-7%!+`@lyKk&qAAs!Ve{g^Ls^)3U6TItvVz3uL(HW&LfX*4Mxx#?uk( z)9V`BcI)dCNi?4GO6M{r9ml^opUefhHQ?G_c1C%*Ib zW!#ilr7LavdjJoUPus$&@*x~kHn*Q}Ux{wX=UzdRJU(8XA=UyPkoktTnvfT09q zq=2d-|1r|G0Du7f%Bd}O>q4cziBi$!GF3WRNmp%bVX_|e(yL;VsXip77j%ihB?7kX zYeZyrko8fzFo0@pjz-mjxtHL+8NZ518Y3NBs>oa*m0eU$`Xj$G%HWe>^%@}2Dv0)L zkf|v*Mw1ooz-vY8Gg6JUIupA~irFfa#=5S)!z9#-b?{TY?-Zyu;Vct1=vxk8q}`Vn zl|r39P~Iv=^VN63MjLZ%>Ff&Y&;6&us)ikxUcq@h9tLkje^u7B6=e)8%Uyp|o;!bw zf_S|BeMKaZx&%yu`5BZ1loH)VdUnlpPHM|dsR!j35dn3ZZiI3T)((yOqm$R>~qQRwivz3n9rm=e%Ikj5*v*D^8#7b=mD@pse z%)=^wdIU)iK1+qKT#F)ANY1NyOLvHdhowkQb}=S4bC}Igq`Zw{hFS51u6icW%+XU; zX3O$zoKuo`%Nks~Sn63}YxZOh&rN2QX6wuCNE~(7<;??-ou=|z-^3qDIk~H2|I3K! z3c2c?@J{Rqif6~mgl#L<>w(!VTOD|K``-`#sjW%w-4R{sXs zFJ_}rpMK**ZYi0`v!bOg)uu70O7OiSs$EAhAlQN=f%>v36{KO{Vc@mEDwDgZD@%B{ zvU^rirb7>q_##fAxumUZ0z}jeXW}_N?X_-Sa0r|On!JopGpHEb8YcKKj`{uRbA86sRj}8ZmnF?bJlLsO_#4{ zb6G!s>fCsGXeZU#wUON>w9;d|ydVK)z>Y!u=aM3x@Tbz*=+CxP%RlI%Urs;bQd}!X zgAbmxvz$lz1Fqm_PHZ2&t}Fs&LAn?*Ucd)l_vKPp#|bosm3KmbMb`YXHVmtitUM8< zstTBIVi;W>jm@bemk)&K3H1z~qYIn@(6QXzzuv+DXKd~SCJaX>$^^@dQ&gSE10R>) zu=V9c;fA4vyLuPK1BjPXRqEE-Wm>W_ZD*HVi|zF|-xHUeS??puCP5Aj@SMbCO;77^ z#p53~g8*IKRd=Q`uj9kSG8OW_X9m+2|8UF-^l}4Z?7UaM&azDDOmU6Fk-jc-fZMIg zmRr5xq34%D0zbq^bF!+2n+DR381_9?Apsn?+-HV_2PQ1TalEgeqd#d#){`2g@NW0s zU$%!|x^vN)a>3rW&9PeT*pH1~17#|v9>sk>xgifP9D-re3m6^u~J@c?KoPo}X;Fgt~hjm{kU>33W`R?Y?&rf4|n)yeB56 zlrXe%N+?c6M<)L*`HeHPep@!CDPxDZvfn_F^X?9EVaje|Qe+ECRMS=i5fi_wP1Tq6 zjRR(mGeE!=P?as!-F9&wxKeo=oRnDrm78{?77NW_FvTN2_uu%W!L!^4)!7?kv~Ska zSS~)oMS}&VYFOIS5L*lOHWu^!R!;>xIxqXfflS5a%Wczk`@~yA0`=pD{SNO`^#hIY zq{rps3P@ziWonnV7r%EDFhATDugGWqd{ROzqZQY$U4VJumucY|EvKYwYB$z zIQGclX^f63u>%*53(vO3c03Q2R~3Xn_(ub}8<%5sn-zAd(x(l7W@~z4;E=NRvR|4~ z8?kY$y21|7N1Q2L2uMP~cD^YH$Y^kPR7ZQd>k}}n= z7olT@Jx@Nzv!nh!`0M5>dCP23z|Dz^8}>g6xvSA_8fD^vAx_7P_%Tic_jQdQ-=G1+ zpJb4FWKG{Hh2UK`vZcF`_v09gOLjCqOJ66!-wUOlY%NBbnD}NaM^%Bpv@bQd zL?p2+sl$+!s}x%|#oKBP(RV8zCvWDzqC*0VX&;+bE%kuBykw2Js&3gUKp~^=kZS+{ zqcM_K%BruiX}3?qgOwJeaV}K{bXf>(DQgZ|IthCh@LA$*yJlt2>A}cjfK)s+iiRr_ zn*k}YUbUSw@g&fe8tmyxE4_UM*uuI%h?oC?i76d-PV~8yIvb}A`hE%?<^K6!t%7L3 z`R_UJ@rNGa(rfhF;E#_gs=HACaq^8&1N_9QX7UtZ=*5(x2T2VqEgd_tg z(}b}6JU%DAsE(Jh+~1zh`FM}-$;(zQf^c{S0K`&OooSm8(Ta5q1p@XvdfczCh9d}( z#|ii^=M<2dn*@}F1ee_7eDGd&ly1EwMjt?lK2GTN!;tZYVfse$`2z$gf+qle7$$2#jB_oZ-zH|3HBKmRzRCE_V2LU!bb^$=wCpg?%wr}?k zjzTkulI2?-4UffSyd;Jv#Y)9tXcs&KstEOV552*(AsuHeK#sh;+$IWHOkHF9RQr_^ z)I!U-X*cZk&&PpR8e{;(`(@YkWf>H<^c8B1!eul7uh4$=Nk9Vq_*_$n4u<=|AvVs< zWB1ctb%W5EPMZ6DBJ7gl%g@o}rtGXjadpG2I(cL2dH92l1Mh~xKY`C>bACn0fPgOl z;&};9-QRv|8V@9W&S-$=SA@SGRgvB-FBrfoG4H-`^y5Z~=<6A|1dQ5YTa=}yO-@6!drd7GZXs6qDvYWUeAkJ>~gw>2P5EChnH)FQwjO ziSiRe3DvNu%YvRDPq**$^UMk1kQD0P?mg|)S@>>3litC^0MQXv*4IsdusiLgM0P15M6a(*T+mTwYSr(k!^7qtRQ`1)t8-^^ow=5kBZ6iJ;3x=M=@x~YC~5i z?kkv1YuDaU&N$9zm0T(psRkBoK}Z-y13nuFzqidpwZ(n*0mj;~f3S~XR9q!xaOI>e zJvMQE{b!H}NEc28jDG8@$F^UyaD~3s(Nh+rFS`yJf~xFQk^3v+<5mDd8aVQDBgLp9yaDPk}$ zJ|+az+c+I-4LEZt4zFgli3%YkzYof))Fz^x8<8uvZl;}7JlFV6Qm%JcSc}q%T|6G) z>+)T79W8%h-yG^Lb{+C~IrDF=f4xxEggdvF)-ex@o(z@>2>50G^uFwgpasM6UGN1+8f}5tGh}G;cJh@G5uu-o&+%MVI@Gj(a zI(`?!KQ>u+dXu!CCZgD_Tw1GR_axtZ>V2l$I=;PDmh)|EeHo%C%07r#sLIelRU+n8 z1t#@8+V0S>;dWSPpgdX4(LGLhed;zun`jvyt+2`1{Izy-ZP_uI?pCzmXeeT4Ws6tO zYLw}-T$pcTmM^XlSYWn65d%QxRfL2&02}|CSoL)4PBwRpKF_B!8OeklSs=(BjXh|L zCvD$&CtO~mA8o9}#n&9;nnoMdiX!&ta45$mQy>U8&bgq5Q*V{1nCRq9w-|j!dQ@!J z;ScU+F0?${&}}%|0!c#fL(J6yUAjH0;HR#vHPtrw)a*m#PH?%&X8G~TJ;vQ$z6Sf(sC6qZ>I z744v8I+@XQfrEwa4gx@fdiheRbBgI-r2;vIHE317j`-q1KTodn<~8M(U93~)KJ{d1 zz1~A}5w5P9Y}8KVGH69q+(NMg?;>+_ZJK~mK!R&*;C%E2Vy^}wePoW}K|pFwLH;&P z&wTSQ{_b*S<(}aHMs*U4A(GWE{#aT>ko6va%!tj_r8Sj}kR2LgdZ{0iL3wT81w-IO zoHVgWZL`Yx7T(>eJ`mV8zrn$H+96NA9S?>$00{koYJ4#*wtr=~D~8nQ?=;Sgx8H)$ zqh7LA%vRdG2&hDVnT?BVmHo162 z=L;Pk1>8O^A0K`Q)$PA0xwJCHu4(K$M(AN`c3@7WgRD$i?8wC18WfX5+*4^dH?X#!;gOF?fLBRZKg{8_ z2(g>>CiNsZ@+UPYG^s>ohF&~B8)c*UhUoWt45rX^G!b$>r|b(%FdC$h+M@{Fdvdxv zRyVB%h4+dfyIB~9=jjF@|0Mo;Y8>sRtK6s@q6u@6Aa-c?I(%!BOe;6E(}o^vpN)5f zTfPv-GIc$~zBCQi^?tb64d!|6`E3qy&OtDNU*WlbF>>W*lc476G%2OJ@ix7mO~-pD-1zRHl>7Z`K9$bR`M5l-C!~>|o(aWX$@`;bk zumLUCw%i^b)JY85>H@^9{dAv(ZfmV}S4{s%*<_MA_KBZqKme%8)xLkE$IPj@N@S4b zT}kJ?S@lkhw~TT1So%=O(d|C+Hm+-ZYf~&3H!R5geP1erp-y`876aFj^_&2z*&T^x z@j&^`OS!32*!m`n$YJcg!tf*j_^I~6IHDn@xvEV$*3uR?m)@u84_G}D~VMv9^<_7y*tTMF;KhBukH zZsV!t)08uQg>GH^tS|Fj@RX9H#RH?)K!(-1^Yvz0S^^}cq9;(!lbD1k`lS}D`abjb zo$;y3;VDmPq;RAT@%UISz^EcBZAwn}{$DPD=9fmo!*NCvTn0U*ahphg<@e|Sf#`=; zq(a|uHJCs^`=D{0eBry1;o>bi0kRyz&7KX+fT7t^&CGTY_zm}hDN!i?1%JLl6FWTK z%kJX3-<=)I)cm-tYOAra-B9eaw8+l z$Wv5$WuE4j3U9v@R5#bm+c5w-ni@U6kwuD2Gt5XKyekJoK<7rHo9Sd6IbMoBwR;5Z z_0kFf5qdLE5diU>0x4k4_K~4|#qG&Mn?I^`rY%30;9!&+m`poWDc{a&?S4%kqFOwS zxRj~==1^%hz&t&EnG)qs_Q~>zNI$0lfdNXF?#jcYqMwb_gKo*RIGulFmw}u;lRUdwgDT z48r;bx*K2(=x^qX+q<jlB!{#JX% zHKN)8?)bYKcQ`m5DNK$8jLa<>Hebin!@%`qci@FqOc?`y#?F3v=Uz25fQayY8-A;w zHg8nj;5sDY-q~XJQRbA$brzuu)Q&Tn){nu3G#4z{)0VL!=OduQO8%_}N-W0$hFe)H zPtEE^52s}`hI=H@_l)%0gDUE^6~BG|J3MYQ+;1BQLf}bw2`(@8UDz03tu|jiT#Gw` zV+x`iLi-R`imiU876nKmNKF}r{U#R$0HAdmE+n3=2SD(z{UGReo@MfF79PTFUT^*e z%h25WLw1QDpThwhIOtzxBsHD4Dml^;i)fW_T{v!7`+xP>%K%Ahml0{o`iRjz(a3B6 zMt=OZs9m0Bn<~cXg`R8a1$#yL!}aKY{{26X=$kw!{YVwy$r|1f|Nblg;gIA%ruc6B ze}4YgkN@}a7olJ-KHWcKv2zi+`F}Ed^6+#|{kOAbxjv+|IJ2HwU8dE$K&HLx7UMxRTb&b4xR?D_jbMmeoywD&Ow;itmz(3MzW zQ1{GC*932t<=CN7ZuU+%jKDh;*I3fid+7Ux;^1E&{|&}{!fVmd^MddB#pl38sk6Py zc+eTU`9(N2HN`?Fy}qo!r^W5db=d{IEZe_g!=4?H*zDN1m_L2+d<>stE0%HyV_(XL z?+Gs_!*9VPOEE@a_0vZav>nE?VtU&wsPOE17x(X_$mNTpXT14lta!uGcxjYi?ts_7)oVmoGxHwM_qKnD9PtK3hjl;l!p%t*sX z?GkLr1}5w1l0_<2`{jm32KjWmR{QD%k}t%t-a)(rU*t~7*q?dhOH=pG7Y8ErhyoOj z1N(;G8P$_Qh|KUe=ql6bX^aF_4g$j`zOaS{$7sjpN2mbPQ&Cx=(}Kry#UtJeXEJmd zQCX&~QNWFe7is0AV?e%5>R#0sp7Ub>sNv?& zSnUZ7ra9^tS-2WWa!+4fY3%pQ&9&s>sO2~hXj7&)UmFgrdhGNk2jAu(i2@bUMI zaUD`KKzI}(hq~7O(XQdQ42TGqtBCn&TQu{3X|_3E$@d*_s>I&uR#6H23e( zZxR0=dvQeFwVA&OGDPQ4-=E8Axtr?5N(lLDalaGEW=nxdUD>F4ky#|zFFPc0FdLWh zg^=?O@k3Fs-~3^XD0AJ(u3o8!ryfUKMrio4FT+#)0_$9*j-L>uTEpH&LlLsKDi&un z)UusNhkZvsqzS%Ji`yi#jUB*f%4fWXGRGb<4*+bp-CYV&Zt3WC3`3NkNm zKQXx=KMgaF6n`vfO}qkX$yz>D_F+O?-3gDR+u*!eJDY)=giF^*sLe2OppbN-+g&sVef3{mdJ!wWKW|`2fO{u9 zojNjKQ7W1_xz32|Kp^?3k{fwTO0Is~cL#_%lL3oe5OG{o6x*YnSpQVxn>$g=np>ntYA0E+&@(~wx9K8U$3Z7O!l8`$a>Tw|zqOJh6~`=mB~c*OyA_v*Yj zPwLac@1x5DG{v-~)5dMotKl+H(Tg0aeMt5n4Xs52>kx*MCDA=PpP*Zlmc7sAIpC$+W^sYGzlUtZ12(DNkL+qdy(Z4HyVNR-Ih>uc&3F;A6J#*Zl-~So; z@{at1^|y6Y>J9SgWvn#k4>7#*P=XsMynJp$g)LaCY=y=k@9mAx2zcTh*)D?ffsN&fq-<(^FfUa9j%v{EPPn(!-FWLb zv>t1t5Q5_A-l)D~Yyd!6m)d1kW2ax*e(p5obLjvTKcxwn8SCz$>Wy#=hFM+VTSt$ zGBu-%Q(A~*gOKr`rO)BZEfhkho{@%EdBemmM!{CBvs0;1LIlK`ISFGlRQJHSL_bd$ z8T)b2I{r@V*VBVF{g^T z>=Tu|YW|F9urgW&3oyfsh85nlwb3OrBSxx=L#UTrwt+7obz&C?){2`xYjMyLD=<_@ z+I0aUm&W{P+;KeI>mxFM$dGnGF&)VrGu=nD|Es0ZBgcEs)nab|OR{A`=lag%Uq7v| zbQB`4*_CRHArw{&RS=A{<+Z7}#Ge}>rr0bGrhAl)!2*jqLX+E`NMlOYFnWze;dChk z(urgGY$^p^J^sz$KGDD;CSVx;|HEFBSp45mE|NAM;fhWc z@(!5sWzs*X94@9!lm10= zdDNOQ`uyW%9C1oFHoz;>xG2gwhnNenycIC0#eIiU31GjKOqW7vsP2Vi_Sp_PXHvs_ z2Z>u#a)!nViAC>q!X57krbSha8mX^teNi`B5V{_iksW=;<4d!@!gU(&Jg-p5`_Q`Df`q7Et5xhU0V^<1Jbn}}Tihz7)c!=Es#OnE3=AD?>j1V7t zQ?x~4|GMuX&hdJclMvNV^F5jl_NvEE*0lXTu=SoL(cXpqX`C%V&c z$Crrg*(y3?z9)bRAs70X3z^7$MJpf{I-(PNe{GIGai5-7*gZt1Pt1T7md zzyC1l*qR)^#r`-+qU#1~0x&L3Y3UK_(SV5=-GMS;-6Sz;7rS3ns}VrT+nR>9B~H#~ z5GtdkRI%uF=rKSD0j9yp?SGbM!}Qu-l~$+w@0Lc39sqyewKkvI1h0Us-ek#Bx;{cs zK#oG^HN5gkpzg}bBFK6OlNW&W$YUchwnaLgh}K=pW?%$~4*r?UhoS545EyZpV>kVFwC2@0D9ctnE zd@&1h=Vbig-S@NKSFAlxCMWiD<=MJOg3qgVMm)D%mP)O(zQ%DR@>INkaY6o1bN7tW zMw0na5PCyv0|nT!QQ-ufNKL*KOnIk+p}$(7PMe9?<^Flt^eJPo>V@G8Ig&f&17ClP z?cS14f9=#DMShA|#MBXnieyT@NnT|H#>Ejp9DwJ=Lv&!Z3?Typ&e`Jb%*y?Lin`RP|e?c19xlJKa5~Lk*7`(fM=%@nj|?qQnz8 z&MOv(s@+^46Tu#_ftX~Ej=Epta9LHESk7b-t1it2BId(+8~xi#%!{M>(z>Hr(G^pw zd+0>Ogy(QFSQiVOI~0`~P&qZE%C@sD(Q0bBpJQ~{mqX6r!RcY7zX+ZLKPaV*NK7o3 zEEsVK#m~ruhvrH1Y|Zkh?(eMT4XZmfpdD!U<-&j!`duH&2+__2Y{c)Z+OHdujaIZ6 z4&X!G>w1^*TQ_Eie&8+2v#O|^*$jj6K{SeOcOQ+ynn~|8w4t-`j%>&cAoxM&;a8!@@ha*Q& z2w3hVo62e<*IK6-T^2QKBh_{(ZL%XySPOj+fQTD1g!HsYl2sH@sWDxOYl}!oumrMe z_?NFQAkGTV+PCnyy>uBTbrjYo6jqyP$)dm$jwK#PlC0FUMuCI{duP!m8DItgsPvX| zJV=3Vf+e4%#Sh+tPA4=lp~{Ulu~aYFHgSBD+G6sqZ@{Sj%XDJ>Nw1vM%sA-mkxZ$$ z9+#i1ijliSIhM*PVr%)*u1sw&`C1Y^uFxL;r^`%>m~!P$n|OXG(%;)k6Cbuy*(fkfax_hEUDn837oPuSFVHj0$Im zzBJ4*ReqB3wn5qhJuS-$p`86n-V%G#K6*guQf^|*H#u{;?L=fCP)~lm$Jgc`a?qfj)Zp0Tpk}ioe*~|W| z7tEiaI-0W%)%vE0!ic>lAQYf7Dj|@zxxkCqYKc8?o`(j#cE6hoWZ{m0x<|jx?s=G^ zfg5+@QZPh>rMtmm;ro|juSIn3YEbA0%(yN;QCQh4$5*K1bV6mt6c!m%{nsiKz=Yi_ z6ZI&DLw5dNjV9?ceG_uoV;r0=z&_xAia(4}b*`ZxY=y=5Uey?AHZ|Ku|6aqP0nsfS zK+4M>0zQTD_e%?dXGm?j4sr1*U#u=Ys0>gid<94B$rh$LXlB4h?>9p{ZD=j*<9;uR zBq@$hzqsAv*`VtCX0qEx0lT)XM#=MqfhMr7qvV?x@=^Kh(k`QiVITnwC~Yp3Rhi%X z>PbvG)}QIqsT9;+-?w;EAsKxD@Ph#eMTvnABbw`6Lv@TJl-!C+RWY)cD92OTC2Xa> z9gNCe@!P>H$Bjf(usKpNUFfPbg0v%e2O1yirP1Bb|47zPi))n%sQ zAMy;(%@DSYP0l6kZ4%h*RWj+S{B8KCZG0gkvPX~3p?ArO9lLN?dvYi{bmpjQal5&Y zQA%dZSy`StojGDJo1TCL1msGPqtX0xvjdT?F!)+jj`J7X_yCs(&M9;y_X8%jFdTb< z)tHSDb^qz9p`^!{sUvI^8x3f{cDTW|a1vc=46d!5P8OQnF)te@G^tKwVx6!gnU2D7 zP;;A5e1h$A0?{wzHK@}4getfoG}6+)SQ~d7c&Qm>x2p%GyAC?b-8ajWSDSgwU(JPSOX@12#B;#C(L&L;pZpfP#g=|KKsg`Hf~mxI)kgim(9%_=(t! zdT7z;NVTQ5>nIrDpSt%XrCCwu8%dQB^!K9nDL}SA4`p(N%2x^wav`r6(`4j#*gwLN z5xmW=3qL{BAxxRmU!^n3;GNFsWJjHG%Zck)5_=Gd8@9*OwDuMMLOQ@<0XBOD$*d5R z>{th{WoJpbWZ}m!y9!4_|dbX4^Pdn?&6^rNU{4h z{nT6-B4P#3*tN&jwQlR*c$`gJ+k^r_6l-44awuJN_KGknG54qa^PI*n7Wpk8z1k-k z>W}dFETUnM22J~*hCEWM+xTybRh@;HeVe5{{>dDk3mg3T?I0eUe?`2*Gxx-Z>5x67F+_hP{1s6QZywJ$_zzbm(cWg)74gNd@#;#Y$>FJvQ!{&ph;414UAx&E zn+Uc$WDKjanEJvqQU0}EtHgb;lvo!+M`WnQ@=qMC)iN@A2mg7Oe2M2`wtZnTEwzMv zAG6sVhxLd}G_up;Skg;nc^%ym4!r#my07+v1Q?cc?Zwv}VTJ<4b1*;ue4|LH9m;uA zsjWm_(57#}by#8%G!__(A722dQmG;p@dBu)c7;7otF**-VZxxM-nJt9imgDQIChZf zD8V&J?soJg^ntWJ2L2#+h>F9;=Uaz&dnB7rgt@BGQsHgc>k_jSAw5VT(zuN1$EyC5 z_Z@`4%NEIcw0i217*CB<=6@C^ONO1|)tvAuPevtrKUxg7$465adW>0>q{Lx^G0caG zR0LtZDLWZb0D42RWWiLVg)h0be0+d3$Vu5!@nH0NC1OBsL7Ya!RLrfSa4d4Clhu*F z4b;(Q3FOv~Y*)nrrPHL;;=VVrU`QkhD9 zk_t6bmR0j)kU}>3Ay)23C6yc}3Q6?yg0>KTZ{JE)y%rg$lKhIcST1QP) zCgt9Vl2uQB=*krkD>x|OX#XRjTxwKUE?uPjuNS7=NK)sAC|or;L>uT_BS`fnOwVQo zf#$szR{Y5aX2X3}GuGTmcR!%%x_pxma~b?TKfC#x_@>jA4CA9AhIFIqY%%(%+nVop zzV)ROyk3zWCA_mD=?xpy=Y!SsZSmXhb^4SnZMl^nY#S{$7}ne39veiU%A=aw@n^3Q zC^qQeaz>geTrFJ-d0u$NHs5P!Mj_t5M{e>(J_;9K{t=y8(?g5oT*K*HiGUb`mLE{< zhQ=}f+|%X&C&q3G(N{Z04BmL5@81yEg~~?{ACrkN(1hS9 z+U-dxz*BC@tmOM)jUwf>{9BXdrcj?u)bqEJF@tiK`pE`kO3Xph=3@nT}y>mp0HTB8gLY=3QnZ2+ZXZwl4fk zrkEpbepW*zQCJ4S;$L#5S5zVhqwn4(XoqoXs#m0vY@e)wcJ&F+8o{cn;cNHrI$8#W+Ch*bwKtX9OL?g(_XGx$IDV`mN(Wtios~Ao#1m{fZff1 z6My9W7Wf(*RpH5@rChKwbVdmp^c3C%*D(#929%`oCAaR5j!>-!VAo8==uoaJjOBee~=FMHOD4i{%p7J4=op}Z0^Q0SXI9X z8r!hjZr85!4%opmXo1YWiPACwav6t6INnh=cn5kzf(s^fe&M{O`i>G>A@JUcl{{6_ zT_naEo!R%OB;Ctw)0sm@+##D~veM$|OEKK#Te#&TY?~31P&Mb$3m8&203$TRFc3e# zFSa&_B{+4(seF$=y^7u(aOVp}w`>S4XajwS8?KaVlbkZKD?|icUvk0q8@;u)665Y4 z&$=IIP2Im4v4$OJ)@Lo4!#EH)dK)w3>yaD?qsrXA2!F%r`UFe3?md)T518P#5Sz%u zR48>ol$@~-g5caSn3>(tSH$Y85~<-S&S?sx`z`<%VnR4#$IE@7d5Z2d2SaMWIHFc# zh2*2A`uYD~E`YO|=h!tEEM^cQ5K$I@(4O#zO2JROH)@u~>~v8#{YUu)0?`Qd3ecW+ zWR%NcvFZI-Biu4j7UD4k>K>75P{jKV;5(9=Srm3XAeX(OJhxNHiPg{p@`Y~Wm1{6M zcW6g~KEJG$^k@zE9z96PXNDpLp22UCc^|6jcBwTQC~s5fO;eC>>{)s1Mf)uDo;{)` zfeB9V-7F8GxBU4gQ?#RMe8&sdVc8yvAB!uU2=?}fMa6pDm`}2UG;c&aZyOPyr=pWO zP@%c_*g7PytD#hi>oz!-Eva@BZsN%hd~21ZTvz;ox$P>*W4NR}j2nVT6+as0hvT+E7es z7#bQHD4NJQG(|)NMMVDnmXH|{6cp46k8A42=4GcZsejPM#pNa$Y2S;_Erd}$^ECJL z_lt{*D?@5N0kUkxwRoZWvWabNlGT47kTknD z?~te9=&QY07@-v5v*^}UZI-Gv7d%fkn${+}qT1Et9GI~bDn0L;%*8!*Gg5?lgphuN zLwjGzTDS74Av}p%I~QcEvTxRX!CyUz3ZRgQ|E26IjfgxGovSugR_-o&7pu@ntgk=H zQL|JXq8NNXgqBv}IU4U=_{LtX{c_B!YwX0e)Q}`omDS2R&jIfTYnYGw)Rv^-_d0~< ztMVvufK-lqwh^8$l3~?B*1Zo|y{XhI4Fe638+G8(6p)O6^op`!yQ?Y&0BD=$bb4D@uiUnIn-udjGKn+WzqM>NAj-x9;2@%_TczPw&{Q%dO|@;M;Fav2As% z*}e4ot5(j5`^GtU=__b=@Re_al<${`K+_SsXuUDI8tQ5Fn+vSL2B?nE1>&%3C{3fC zq_%MHtijCzSM;J^)U(1zJ^ME+7;u~y>|J+%-zj>d$=-qO^`6m0HhdYRFR6-E#yF2( z$JS6_B7+T+6M%N$-M4nK$Zt=sA96stAs;=4 zHy%k^(9oP4AB1$1m<1+!sCLIZ+#bS0=495QhCvuUD}KO}zpS?OCw$;snz zNpl8maRHd=d#my6U(710IZHH#XBN0;5gn#`HLh`F)Y>q;>HZ~$hHIo73wW+i;DBFr zpR%A3qh_Q*Jk>Z~*@%^{CkF9O9#U+d99xMU?Sjjy9hG8meP@`k!%c!SH1_Y%L>n3A zdut2IBnfG538Pqht+9iwyV6wPfqi0!{MMb5yuU0af~?kvzRKSRgJ5ln&HYta;a{kL&QV)Gt; zD>Snnh9{Eo>H%r7sP^(kcY zik2XXrvKCULE+V)H>jm@FAU0WgC*j^SnwLzXweOCU5@cj+!P}sa)!LU#v(_Gi` zQ%l!Aa2C=ub2wliY_K!yzr8B(Ct7$Px(+`!9_+}FR}r&5Wh{pH>xN6!-tOI%uMN|! z;m;=m>eN^8QIn4;+jQ1BVSs25Km?riVORUgCg7w4P?ckKI9Cnmu8uK8erGgC{_!N- z$Yw+Wf7PEr5U*7%vBsq{l!@I!-Mo+aMi(V}8nG<2U`68k^G_@Gf4g*Sk_pbbNIeJ_ zid1Ir(IMwJfgOvKg+wDr*SBS@c8fA+=_3)5yUFgwgk^rlXbDm&o6PiIvo2_AsP1C~ zh8Ao?b*h@mVCOT=V2msif1To&IT?2#8z2y&Lq2u4x0Nwpv=ch6y3;%ZZ(6J6Y{gkA zJ^473;fnJ4iD~K|p;u0e%+vk;i9GJ~`-Ut^X;YMIiFoAbmcCPSf45JZ7eeY=l%5tx z+gpa!$6`L`jM|0)dp&A?rr0C6b(evdMb>sX)KHwlv(yxz22OH|9!S<6}C& z@OsK96oDm43hTdqa7tg*aoULQ#~*=4PV~jL*0sho&fbkS^5xlHUKZM;LNptY2nO(@ z9d*Nc8p(aY#{IR~AsFw58lL<`g74@ow(5?osmp5c-twN-SE-vid#e#4&f&@6 zEO+4hDKJaCu>>=FhNMg86`7~zMn3@|KQ}H5H&*mNe)tepdk5q7!p$pD^?4``%j&Ce zgCVv(A_PJGd9jU-AUyzAl~*DjGbE;~FBX#scgj%y&l_4B^Ro2*I;tW1bsX+iq5=9t z4L7Zm6~AjYu;U^`3zq01_8JVfeYm|+qbBjmAI$oq4m*jAP=I>+gBYQ7d&3aqs9LIm z>~7wSqQZs&oA%9aKJR#DGAYky(GSb@24$NaAOM2XR%)TC_XU8$et&@4Si{i6$Hk#W zCT}lFxAK6}dLtAU~@yEM?Qg`=dbIV*bcO%Imh>pu+0#?I> zI&S(rGMQBuUdE7NCx;t@TPc75H+>03$4sJpm9pYC%7M{NPFu_PT12EEkM| zRSd7yd>do@*br9iP5~*tk^9NpUbN%=f25jL;*vl>g|6OlG$%t06#+}WVW1VPI7p4y zVIx2CH4#gZ$nP@i;saVNXAAqiD+9n-+$s}VRD*?b(%g=~tIY;AgT2QRF7BU zI&OrUV0-+1?c9msqfVmIl;^N9OJhN`Gc0W7Em2xRoI$(dgZ*;jt_Oue4~_gqhRz?P zrA!Kha+Ldv>hcqR`aWPfIZ(bX>3<^LN7at*>sNmYecW;E`cbYolg_GI-2ppaUj9Z6 zGp{!=5!Uk|j**+*94JkCY-sj?LIcVLRzBkjf;}Ih+}+(wF0UFZihF3S6-Wd{fDn>- zP@0bH47rRbABxX<$e`A57)iilIYh z%r&qqZ@W4G05;xiWPbwa_s?f#jwATJH)+g#`!hY7oBn&U(fGwG#3iiF{F^86Oo{+s zW@((Dr+2$?uF-_*bC#n(Mr%DO{`{dfB*9Mmr|8XgG&DO8#>&S}YpFkuHbW>AwVx-oO)+~^dpgWxR2*QHV!Q`@jE%faOeRxRh*JKG5nSm(0 zkWXwQ5l8Vxww&^G#{ZpS%owQIQOio=EqcYNH4gQccTdY|!?0Wl8xdo>0zgCoD=7g@ zvHC6;+}+dRn0Z~TU0-jzFTS|OM$6%0KD$EHl>ok}{fsoXRtWrUm^5M+ z@=9&SV%pC&1gx-2U;;f$ZX@NM@=Eu~SawBd#LUOu^{$zD2w(Ja^}O3PU7wm(R+T|6 z@7ZMpoZD)ryIEI*QV3qnUv&(HSu;AAd;Z(L0x6Y~q8=0Ju-ZQ^HG&$@0V}^loLkJB z;uHQwHB$^t>3LoGmQMEqnZdmk_cjI&UoRI#d8bXWd_x!(Gt%$aP@S^YaJUwIL7XZ^ zF)5>3TN0L@Ikkp)dm-aGd4=iAC@^buB~gyyHTp#+%hn$a4iom|>6A~SS?+y2zAl^O z$@?KG>H}S?tI1)7liej>z5Zs;h@076@Wtu=os81_th{Ivf=*IP!l z-925yfg%NpySrP9ySuwfp}4!dyA&(#?(SBgxE6N{4#A!9a{cAG@7LDKTKSZmUovOr z%*@%dcUUz3l`4$V-i^us#PzsE=jTC`FGv#43Rx-JADB;wMFgoliv7Xoko}eEO{OY+WZ3YdxI}2q+QIWZH_!3V!0RJ5}|`-Yw$ev%^99 z8BL7dv?}#Qf|I!D!tLs*I&*XC7!@<@=&M*bDcxtXSu0`6c=bCs0MGg}R}Ls}+F)20 zwjvj6zs>9$&KqQ655A+Brg0pVK6hPWgJHW3^da9pJjUJGqSlOYKTlt~_JDLc`w&4| zfi~eJg59$-<3zBl5Tgw6cb1A1Qapb!uja1tBuH-b(VH>{jFNj6l8^i$Q5G>atbT6) zn}mMz4c(2r$%*#gshmW2;T^?~O>_gHKCM@dmQ@g=-!?mEN&KJh{#mc1?Y<~Oj%_dO z6YaQvFZynI9lSUNw4%wU2+a3HI)QJKBjpX;Ls4?9V#G&qa$ZmoG*dyN{ccw+NtYM$ zT)p_UXOeLtWkeoUWzJ!a@|sgdqAj=u*tbxW^X+csn7C9&KHo{cswk2BafJx`&MYBk z2Iecbwapr6R0pz<18_WC>67P`dWtasVm+(ZA4GHYdB{jLf1-J_!DLl9LEjSF+90Gg zsyTM~x8Y3og`GyJ?7TWP&8vKm=Fm2P&fnA+%eL#YGFsD4+cgpTJ|_Xku$?ii4!XBL znV2mvrC3eO7SO+NsNB$}Rc}`f8`M9S(sNUxo)8&~x+>E}nb7B4C>WwTE%U%57s zlZoS7v5X>_VUYf8px3|h@ZI|iNw_Snby zXhII87Pg!;(H}&73yAi(SJg`ZY{vvphg(@K5vH@JYNW8ve_Aw7R<1tRaLqzN){0H5 zjeSEue*4A_0~eq{l`?HQQQYlgdqYhf_)H}T^-iWmf^Pt1QLMg0K5CFV<#{<6(IEMA?0nuFrW;=!n*8HUQ6*71x3^E zr!g%@F2oWM>*}iaN6MHAp8y;`3zJfrAmD^b#VWW_XK%qI+i})$GQMx(>Yo zJxJW`12U2xIuUE?L{$wJofzE0&3Kg=+8xtS($9w3QYG@MdSei`Tx37J`}c8Z52%4d3IRT( zl_2|`{yNFayIi~xQHZO$#(Ow-3Rs*zQZGXf| zy^X-)MIa@Ps$GBs#5l4L49VVBt>d2TFEB&L`EZR`veF|=VEj;p)(D3UZ^)YDUNiUO z6;6~%h)dJW*0(HtF^@y&(N5!^!8DhSM3hR5tGR-s8f$+a_b(b!4nzr(DoU!`u&fY!bO<31vin;g9+zM;fC6DS z={t0u-q^qU1G6X^4nGxkY!(owF>yALeKW9r04`N^NgX{Ue zi(X5=mUzmFkNbUw=xK1T)J}~V7U=8J{rZLlNRU-luV}EraP2>D6^0@qWrP6L{yuos zv1lc0lpnUXzFLP`=ON}K>Jl8F!z-#@kw=~|`?^ElR-tk6#a%NE#sJQCgqunDBx+H|83-7Q&Yl zmJfF>kyZuZI3a|oJprFwf4!}lUZJ^>wBtkW*7Mf>xg*ngE@XfcZ*cZY@dmheN5fJZ zt(zPHbi!^*9S-16dFv88KEzt3O%dq4s~TDhpUI{=WoPa&`5piYj3QC;#Rnm=9$LT2 z3munS`eIn00;m~MQt1A-iI@Is;uP{a-hZ1o^w*x*KPILfKZ_HrOs>#TVzlFmk{a8r zF)gAQ(sDn*kYqKtlY9be(K!-sK#Ib`+l-)&J{WEYtN1C2U|$Q zEpdkGf4;1VR4?1?!_x@@8hc4)J1RCRYu!G`XLq~47I1Rqt)av$80{$1ZLPP%Sj6}( ze6{2zNP3Z$#duJM>Seo@_V2QAPjFma zr{8iDTRvc6*dgWb@w3%~U@<)@4y#!~O2C=3)-6xVzTwM2$+u^9(U3k)lsn8L*G z+s|F}AsYL(xATzJJ}|2Ag^r>=?RL}fi5i|;nJqSZ3pZ~0!C$SI+>b@oEkQa+*>*U1 zvz*9Ttu)3m_QAbU2!AJD{M#<~fA0FUf5S;y+Gr@b*}aTOquL)#$>PpMESgR_nT1H4pNr) zs29&io8=9Lfpq{iur=}%?A(Cjno2aDODE#iY1TRF(q`oe8Peg_X+QIQsV+^i_?3Ux zxBB#+1I)msUcY6Dc>dMjo8*Aa-qWT-!Kb&8P?6~4K61up0ZJvir5UF=YkY-$nPe5& zgG5*6m^h5=nAIp2`n1L13A`#mfX9Q+PG0OBE3;ndU!BJ3kvj>U*ntO5ptUIe>a(?> zneAJ3WSonS2Ej|c-K)&TBXIW)`BFYpQ}-vvDgtNCd~7ZFZ?Cl{q$E#w8NTMzI6ih{ zH@y84hxFTtn$i$C1zIo^(+3FvP9n+w8r6cMUd=dXgwLjTk@5bBun z)~ETVh0c1QNbOVPuhAGkR9@(zz`;RcOF;Ub_OG$_G@pB|by(Nm$vJ&ePAy$xwbt3( z2nW9ik99qj({uicGHsWu!DH~vS(#@APR|_C?>4rA##q16(yIG$goZ`Nej^uN2 z^_&ti`ti@RQc@o5-?vQ1&8eRz8VEigFZvV5OG`&e^R4d`O1}Nh+L{*X;v;^mHkXkp zmDEMAep9#8)HZ9i(@n9WG9XeluxmY)`&elr(tmBDSGm@Gj(TG@MHGRqSvKPq!PnWg z^he~!1AnX6U1Q2Jxulp{UIJ#Mco&`3GsP>}7EbsXEDCG&sI+=u9V$OPgem-fp&wGt zzr)=>QKpk&_Osi#YjIuT<T&(db>WFt!`g6^Bi((&%+noZ1lr=yE$*S zC#1I)FuMp!<8-|`TJZ%NB~hu>4vGe5XE0WOJpVDw(iE?9dt zeN2{8o0%oubn)?C{Y9vZLb>e(`ACzU zIN6Gqc!tvg6zzLd4O_nUS+K20KYnFmQxGt+caa*M>$gX-NlId=>1kDUj>^iMPjE4{ zAoNhB*u~27-i>ac?`2YK8xfW_@T0a8qj9J(liKyU9nteQn(bYTTF&_~QON@FG8E!} zt!`pyzZNNYE%4Nj(Yi#Xsj=oIClo#LitJCvcz=)M>OJy_e$9zERdN!>&Lis0dFm^}--& zWpkP6^Nab~FVFP0;(jUA4Zm%vy+AtSW`(5o+TROVCq?}D%AEHIA^?G?l_Ph~_koky zAB9A0qk}11MaSvrka4ByfC#aEsM|lCM*huBgIDpeg^Z>}HeJ$0KBrN# z#B$u*8m@bcrSh)K&qSj|1iE7p*^c#Z5vZ5%be@QS$>-ic#(e`%Bx@nXDXgr#7s+IC z-VoNES1%XY^~Xh(-Y|Q@J@d&QX6)a-be;1CZH~n?ny`pVPDRJ69aqGk<-Y`k@pC;B z{f?z~9kAbVQD>8`S)`B585q2xAL1~w+5Eu<^F?pMX=C|CkFPa9u1o)jBu1as&2F~O z1<0NU98D%7My{x5V?LttnP>lqTenXKgERld0)VQQ2x80I%>%8`US(w_6w#qNm8Bw( zw0$|KO#R1ctGRgnm4KShamt;l#%_YAdz!jN%$+6YTYrj(| zxv_&w4j#ZZ+D|m+vvc?%tNL1N_DUT?&o3S9BAAltDre^;42I?2pdTA^FWkhCY(@NBcl4N~ z$~P{1hP&Ntoy|3!4tpO5Qn^@F`rTF0oovVKhXn*Cak(xhA5n>;22zioALc(oVgZPJ zlPi}M<+3_7G1|kwUO`PoV4)@6Ms8I6M{@h3bI~*9_YyAfk8i8*Ru4-lZul)r?K9*M zL=Z&4hjJpql32L@eCVwl+D`!P!!2b0scv8#Zg7yGKrZP(s;|W!^|W8B-H^n%Z#ear zP8I@~FNU`V4)Kpm!_!YEF{8BQDQiG}$Tu#nD0$4<3Yj5XPb{{jR3y6bDm|SgmK*s$ zes$-=@1bXYUPsGA^=7v0niCYZ7w;zbz37zN4L=`W)>h)K+8hPZO^|3syP4ueHs3g|9Cii+X{w?4eL=^VOzMh6u) zTyc0s7L<1VF>{5z5sDB&0>g|Fka5&bxu`mdCj+66-?L}JRDiidE2AJqttpG!Za03` zKh#AlwlC%1OP4-IIG=gis`Z)AXzewy>e*((J$c3z<;qsXBOt?K%7FI5yuIvpPD$lB zvB5Wmu`Cbq1EfHs-jXM@%7n6(#@us@VyCA)t8*clsvo`~aQvq`v4D$S;&HN_*<%?{ z>!w`L3k#v-LiV;eYFQ6-Lkrn-7vuL<{5~xehm`_;kw3WsVHFwhB#6u-_Hdp=YJ)d^ za|d7*rz5}lT^)-jF$S4cyO)C^#HtmMKm)nA*+Cw&C-q58o^AC`UsYdU>DGqp{qHZ&h4|(}| zVr{XgyRPWB#2C9b$3tA$?fBwc6p9I(?cNz@<~)< zIMrnNL77Rf@s(v5qCVbZ^K1=5LqkwyL)RqvsMv05j!w>-XpA|}U(*k*5wtq7 zh8IV1t6cJTfR{#jbDDvg+?O(ER55-5SssfS$(2+oQmX=FO?IMCND0b5B0S9{GDuZf z(N_9P6tf$jn%3`e|+jp-u@WNLi(<+-zDwPYFZ*MShGvOd!LWI{*ZdY-LE9 zZ19@2;sg{uK?!`YNt`7|-;~Ld@%Zl_TGB1JV!#NF;j#dottpaVmtLuQ9#l=1WLxwa zy;;)6%C91h2BDv&N<;uCWZeEl(BWmN6tj)w*fSv|U^-w*-%OS@w;ay8%IbZKnu#(f zu>mvruBr;rwAOipnlj_Qx_TtOsXh8ZF*VMnLP=Sh4A4iGt8FBEV01L*j=Md^<>XmZ zroK@#n$-9P|FB>6wBqwkv*kEcsea$MJ(SC6q>`{FBVYEF-?v{S+PyZR9k~hI|1uGQ z9TiR67hSm7bCD!%yt-J`D!`IGg^*{AetR-V?v5sd^PR~#X=+E7=!UPJ?KqeV zH$YD>Ll3pjau?Z>7!u_Sgo*m*;zuG61G{mi?{#1yy)OQA{UXsL&?s@q<+4+$%0i8@ zT-NO^R&cW&tou0O@vWe_+yeYij7eMQ!foERm&yl=8n*_) z(*lM2-SWaRw!AVg_ukATZFxSia-tZ$MhYSMxpKXxVdCTy|6(j^1=m=Ul8@eVXFQh=-@|nH0x^pN#ev;bPL9+XbG4#`1NO!9+8`Eq zaj18W1^#oDF^7g)5?VL&+ZS(U_U(G6=GYOxjVIR&nOpX0(|ZLonZg)>d$X-nA{VA4#xZBbuGV0F%(oVuk? zsf^@!Jmg5)kKWp5&zT#qo#*tdEQZ_-8B~$qFG-b&iWQSDW9{h;6JwrnG6KgEOcfam zybS6SiDL~kUP7906PVuuQ1IE)*>0so`ca>5A*||!J|ASn?s!iREv6NdRfg%Q?d`qL z2&^MN3I45sEu_^J=BA$a>HXfiA#v{N=3V4fefhk1$|M8m^X9yQh+>KQ^0u{lxp0=3 z^FK7o-x7K^1RZy72f*D)*^cNrZEv)KPrMXIayWZr8?0(@^^OK9?e%jXy_PV| z8S3860^c&mv02+<5JCxww82Ar0E@Z!c)5(X{(7|5kD2-BN|Huh19R$|LMn8TR}z$E zI&Vy`T&{yT;;hEE`p0WkullsF6`DY%ted%uNwslFI4;G4L!}G_{imQnDIGNk}-Rp*!C>$6h>(xQXo8FI#?F<(`4}HE*?~{&U>y8 zwEgv$Pgz}7K9p#ETFQiA7J<2JE?{Gc#{|lu-UE>>(z9Y|<0TRaCOg9hmSUmLB4Id8`4u2z*Qfp%ux43iox+4Mf9{EkD(6y;!0eRLZ&e;$a zSm5!|gB>D3k!o*o@1x$qW*6OyO+lv3I_?G$pbDQz8qAb#bzE!ya7hj0AU&?Apl`$S z>5<~_?9UXlr{)1W-l}W0!NHa{FMN8Np=c_11Zq{Nk@)&-93LG z;|QD6wfikV`e^6;<>3hqgu^bt^qC0vO)t0MLDH033)N|el?@1{Yp$efw2GKuc=i!T z1^f;KYbC6Fj0AB$i!-uQYN=as3<2$!;c)JJZkwVlGto`rKTqfLLkZDsx$TxH(j8T_ z{0UlSdz^HK^ieSM0k%dBm?oc}YD`-Fla!=MmR-~Q4N0+EBZ>bt@L)}5U%M!Iaga0e zvew~2$XMJbcpW-CnEvDOV$IeYfzG`0<~lbM$P#c?uzM;}+dpWJJefu+al=|eEzWCe z@5_i!Tc@kAa7;un=>EFkR@e4T7Wnm))q%!2;SQD1g>0hucK6$=F(2quRxukA7qWl4 ze^bCay~4Up0Qp3E?TR3c@?47nQ|AMQSit4uwQ-oFbKoo4o?bCgp`EKEa4f@AQAH0_ z86p_{3GsPw06nj0O}5~j>Ga;PyarB5Ox6~U#U$PAjocK11dzfCKbY9qR!34!BWlf5 z5oK>!P7l>gH9nqjParsA9X<4_s!mXTbWD2@9be z?&o)!<47(UmwESF5UjfOcslG|(Ir3WDc)kIE1YR2{EX+yYnAq3xtZUb`?S0?&PgRY zP42M1E@}6<(inOn;}j{)6vkrbB2BWza@BV+EfVXT-TTh#ej~Ctm%r&YWRE&5&YEe( z&Hte!->=|k$?CG3cvwPF8+_s=Uyc9wrNS|`U=JNfM*HC9hrM@`$? z@`%D7eEW;qr7P#g?cf*xLIx%$VM5ud&|dK)t6NZsGrKCrkTPIX;DmjNQ12@?5!7Fz z@);OmWMO1(d8UF(TYJzKPioVatklLa6t;i}sBI}F$)GRX^jnzRviH*5XhB}(xudnl z(Ci3w)}kw?diSF|#Rf%W@{*5|`DfwHRF$xYz#&i1nZN$&CjHJr&PXJ)qHknME%%LN zq0?yIVaK{HY{~CpF!QTB+JCx7e>@YN&s-~o2bv%FS|jz=A9Eu3`oaMsz#RY*YL559 z(mnI3nA{h)M9^~C0x_&6{}1u85%06qN7#enYTiGwhHe{@tw-XBZ?Sks=pl2tejIE1 z;F-CaAPx+#B1G=Dgz`Q!Zl5s?Uj(M)HU6uU39YMa^DG0LKA&DFVWX4P8 zHzv~s+?6MAMeC`S8Kyp5eh+f(k;A2p-1XD17615`f{Cx&c^S1q{vdC%M>tw7i3MM$ytd|r#ayZ+V?Y}6qm;nM$mX}K zQh+FquwOGK!7!&8#lD2mrSZmNyz8FkNtiq};Fjp(pg`T;r=SC!LV6{%*#tT^=SS_v zpHo*P?;?nupHeE)8sAW`4(O??2?(3e0N9?2fcV`BGDc(r8u1k{u$Ey<4GQ4@=FMc> zcku!uCVo3^C~{J~!SCh)ho91ed4T6c=Q%to75VxCRtHO48`h7P&zsKf7%CIqPPel)tmw3X(JrwHlV4QyYVo$JD%X|0@G_Mea)yqN73^#ex>_!S#4 zDed0(^RGnWZ-_UY{A=5F!A_^gAx{3>;^`Z=9e5XVe0-#s8s1gP8D!?UP6@l%Bkktp zP}?C{eyPMf@u1~+o0rb(zt;OvhwzxpY~xfq5~wILTxxc1R482}R=)%yVNoGe?NU&F z6q~Vf5nn=Sz@O5t-M2p8C$*)v!d2Ue_8T1UBdaJVGtO7JR3mwzE;3j>I3_f*%L1A2 zEhqnJd$aNwx~J3hLHZ2$YNW;V^%U`4 z`J>|MFm-sB8C|73oTaqP)d}|QlY+hhKK9&QaRDJE5EqXBUi|s+T?SBB$i+SXQo1H8 z5pw9`moK7bRsZLlHfBw=?P@sarq|WS%FpA$a2mu}e!El(2dgy~UA8AD)vnLDTDL_G zjfmq+DfH#)Pw0YqXBA99ovtU5lN%i1awibvi&OCaeA^aEIfM~yaOdr(pV^rzZhxHH zRxPz6LE+-EZ;M*u-urblCks0wjR-L7c}-L>X`_BzYuri4Qh7AC{WR!qOKf9i(931H z=UBO*EBG90#(J001TnLIUY^uhEZM>latu8C!UB9jiucT%!m#?XTyr`mA5s_4>Syz9 zv(jQ`a1f-*BwN|vQ?pRjEjdPXdiI&JqEK$_uSAC{diRc5kEn6VLMs3;Qr%EHsq=+( z``i}vh$SOy3ovvv+&xAPi1E_ktavtj!o%xTyLS>tZ&rF#pM0*^@Du@1;HosZ_%LEJ zkC?&zgqj-Y*U##fyQ7;99u$J=OSXS^YrVLZE(Ntz9GH!<@TT9be3O;j345Wt0e6ib z!nP-QkK0+OOr=et_`F7fT*XH6Q>pZlzNaIIX?Ia=JjY@`$C3_t7Y~G6lp}6#JTWaT zi7F7{$pTmFgCwTwx9kedFEs&(r=yXG zJ))ZMG?e;0W`~4}(s!u(lvFA7AXqnM^H_>%e$zru?#s~pkdeWea7-`($%H;;M=S|-bFE2J+BoIve zle~4TS#}C))_wj+bafs0Em+A!T%AY4<>n?<^5nw2bZ9gt{gZT>OZ<~`Dr^cLk5Cwy zAbTB?cD4fmL0ti3tx00U9so+p6Ob~_4aAJUKw_MR;TgR`H&#)FH0|V+h!XykBJ*MD zLe~uiUl{`gM+P`vP2h(zE{c)6nG3b5t1*#UR`xh6yl=!?lGn-C)4v=bdJ|zJQ#bO{ z$~Jq&CPF>+tE1G6@;}2)bnWR_@;oDn7I2Il{k61~tJx;L=8PFM67JLz{=d-jh{nXw zRx(NSc`l39$RLFB`&`>|+H#`}dTvg~afPM_)t6eZ=($d6IsV73L{buM1={UYIlKhE zHtG+#XNL=XCH?%OX+N#}kTMrC$%`;!dvF=WY2Fr)L%!DMb+5Eq48*WrZ6RJ%yLjfcHJr(>zuW^x6D~que2SO=ZiA2 z+KgqI2SgiA5D6T2|24H@U-Gb;v04s5G0EkteIp=%Wv9Q_6gPA4x`4ke0jRRN=z}2QGX}9pGY;Rj71S zs9w$hy?1ck5>3fEmR9AXV>9Gk{_3DougbF#o?KPBD)}GMRuGU68w{JSq!w;CC4e#Spik<{WHdjeUj$73d4=orw=O;C<;5@)>1b-dTaR zsDCS#sgVCTp#b<^t}b5;()Q zI|mykrNj!6;1sJSPxmxqZC)bZ`FhifSsdWHv1fkhW7Sf2r`fJ=nx5h+PMZQjwj8g4 zXI1lsraEX#Z>11pYI!nWy!@sEey!2(-*Wq={L_gS9zj9@ih}OFb zom|Z{nH#k$<~o5?dx|?|7v# z&3{gQDM-z2F~RagF!|XbTXJpmIRc4~M0O!ZcZ4O6oUa@xw@~h>WaO6Wyk7*Y1sL(MxMp6wLL)w_>XyJuf_A`K{R+t@OI3jcCZ+M)ABJU$f=X^CDlJuz;Y^M zGPnB`Onf*d;n=ixI<5$#lyCt5jj7$<|Y#Ib}ax!tn z8vKW#a(O+3SVL;bsht-=d9-^~?uaIw-;U)fdcbSjKDByToXLo!*dY=D3LmII`Kj^h z9ND6=q3w^bgsz`YybUsyH^l@<7pTV~6T zQZiX~fzhH+JJMoSG$Nh0xLEiz*{L~+Qsy2D?%|Z&4+4~^jbN(W~#yQ`KpA> zxhh`T)$jPgN+M8QvEtP9+GKk1rBwm4^rY9@nhL^qfz+$1nmo=6`v*~rgW6I{kB0!u z?11D|OP$YmxGS0%p7o7_B%X0N#J_Z{HX7Tx#$j-}a-P{(o^7Ppp)=2_L7u83vDL?k zORRBepduPt18|__YvW|EujIpuTcv$2m15ksia__HJrN)Q@J?ANQWY*URU&&i>rRaB zq)P8~78_GYbq7w(kurR=-j-xt1ry~oyLp$-4t#B(bvTxVi#o3WCPne9EQZaty`Jw% zN0arw=)KyM&YKx8a~Y1OMPDe$=cZ)psju-BQ*lS}iigS<%U;Hj7QzH_M zns|x3=AlgA7=2zB*49+dtWW+g7C`5pF!j^Yh>TJ+!nTxg>&tPT9$Vx?(*C-Qymj2M zq(vsGPc3gEhHN#CzlQnDIt zUUv_r2hFF?2io-C<)2N)zF&1C8+dFtqXvC5y3tK~Z~eXooQ%ln#;VcfFS_<`Wqf<) znDM8ATCdAT5n35-9sZKE>3tXpO>h4E!mJYwwj@C?blxQ_CR@oLfDzTbR%I#4p z)=6U|6|LOVm7QmQa?nwQ+AfA@zwMOiK+5q^O|P62$ry zhUTch=i0njqq3`7O^79>`aHYRj82L&y=9nv6uF2qJE+x5`c(K>!rYanL9D0K}E7tzedNGsoHY{QszLD>|J~syR zrGD@Y&>8U6c4ib~SWXWgxv_3p!2@PgbbNG}*cN|R>A20e+%eX?$@>h_)X>5%D>>=N zkT#Pzhi>I0?y9!MDLVyqYnDa1jZ*4!=?7(5n#%`f@Jo$H62K-8%DnWZu=P9^+CYDt z&CMU>G+l88R?&!#EpO$aF48V9gcE6Zf|_lLE=%UznDIYtOsx;IN~Sq|U1S~`lax^mf0~F5>sYN6S`SK& zGxPJn>$I_bZhE{>FbC7YaAhP3OT;2);{1kSJ9L??pq@*8)$%%42R^2lmyXLDwsG#p zjzWnS`!mPp_Er`7+)xgWj;6mB9iz5Bq6htf032XM0PP*Z zCir=u6r0Q{m=vV!CZ>wRZPrq+2&o(Fb8cvE`Q|~O(9|+BaqgXnV?%MB6B$YK! zGF-CAai^7dphk7e5QS>89Kt|5lF`lGp*ojI=K-3a>bk&e-`=46nftKzj^NU()Gys@*(;f(w=4XKy`DFh8l*^t|Tu zXTp$r`k^VtlNQvsZ2!TcvlyG&AMoZM9|?o}FQ~_(1mTZAYO9m-|AWK;u~`!G<_FAP zM)(61h(tBtOv_eIGy`2w5e--Lv((I;kE*gjlWsui zV>_|78=)D`Towe3b9L!W52OtGeqpq&CA1u6?610X5gY)?x&CBB=ZO(XU)9=Z>%xL+4Cu131KnMSMkrCwnKJzet?#0E0VjXvz9!MvyV6 zAzx4SyfM!R+#XP*_a{X_-XT+}j~tv1JJk_a6ztmR26%ihHPbBhhfqBmTpuq50g^-g zQ+qUaPbE$pywN934r$l5gkgZ=tur4t#i5!XR_deE26oi9U}>;ldMr{2BFxRGwdbb0 zo>%NK_pPx0dm_bYE>Es1-rKq_rfCiLyMgx)!4D|aH4~G0Wemh_?DSQMt&=itv`mdw z?j$XunL_{NvB#UhM0VJkRAEa+7@jIpfSl zyGflLQF=+$W0Zh*Y3>_x|9fP({j50k6Ez_sFVq*b)vZ40@IWmKM>2WvLMle>U7w$* zpyOb6$glm48yoIooi+6|8A`P_ipGC{%*F6C?z_BqZ;Fp}rM)u42B>c-m3RNbF~g!A zMceH8^IMcz1P^IRh~7wtvEKBESau&j=|s`RI$rcdu`Ffz zqMU~r$-@9{GN^LI={5C?RfstE{Qbhzz`Y}5xu|ix46|ZW>EG)lNf*8ZUvv&x@4KAh z&zxKLRxj70-o9~p-rssO&2B00l0MfPO`*!@i%~u>sl|mR?R~k4i$dJ=MY6nEIHsTD z#}ybuZ$0R3%BNZMi-_D;vAjlNBs!hk4-N(a@c)~w!A$bZZSQi_q^Nqh3bCz73WVk1 zf+f)upcLh+TlYw9vRu-K*yEg=uNlR=W_P#zFFn`J zK-&!06VwYlF(6Uw4@!~Li$HC&kzTDf-~Trd)T)cmy0Rio-<&i)qac0JXxD!x0`ypSq(sTTX| z-<0B|3AfMq6!)Rh3fYY@6iID)6R_OF$Uy6~YUs2By`rOQkT(#qDa4Xv5PBQd#CEg* z6KyAPMa1{Y=#G=@%Wvi%{d3IvPWYD>{>7iW|>x67Kr<(z^?3MVR)PCroz{tgmrU6ZD;Mnkm|EKp7qfHZ6&9%F!S&~CT> zyH~7$L>VqoP`=Ny;bIbW+FWf`qRg>DSVOSW+W{Q;F-PEbaucYw1Ct=GsY5rya=M`x z4f(=EpfMo!6hpfI#1?C;rRo zKfRLQ>h_-a=?$xe9mx)Af2Zwzhw%vme1nOl$*bctZ*%_yt)_W|p#**s)9BI^=n|o;QksF0^<1gNJOu!3aZ$j$aMG5ElQc%HAzaI{P^iSIoW3&L_ZP=0&5wSTfwB;3hPfp z35Y&Tk(bpH14b~bOyZkPJFyI#RIa7j{b2y;vA!!GqVFG%rYO+WHQNpU!D*0Ba(OoG z-W~V+1YxG3w!Gb*o_Oz>iCm#iVv2kNn_M@#1=imDLBQLRiv;he-#5TnT!*^alVRTLt_wAvtxRiZGOKeOMWNle?Fxixg18hvY> zvn*%AM5nV5$xFRX_q%iT#cUo3>?7FS7B_GJZ!SYm29> zEzp&Pjq*5T%(#D6@TIztRZ7%GVCDM5J%;7NF{VScM`Jy3_JM;C0f!o&S05$m*c27f z13%!43%E{yWS(xZw22tl+Jne*eku~1Xuz}!2{aO?tjZd)m)pciamUS9Kmz^LSL9Zx zQ>u^w4##82ZE1LOMdu*tV!Q`>IEp5O1X9z(MGx3(&hb5&b;S0YQDGsZf3QU@Im~9Ago`bxrz7oVxx*(q{GZX&u#~MRIMzINB7QSJ)&*;2hZX?e&xVjQ|vDf8v$vOM-(!a#&!;mRk`gTI^aaLlQtUkdALB!~U#&)ViOU}Kpm)WPH zXMK*ZWa2}$`UZ?IdF}KnG*K9K=R)tj%Oz}z23Xp}=6it3E{xTyXl<(1DM;aG4eL>k z%;z4*5+*fS9r{$6`f+FN%b`Dw7z6&2iQW|y`=B%kC*QT%>!`5+=7b2>f&-RCF(|BY zThQVmdxk?32ER#j7zE>-sMA$!jZy^rh-1@t?m%S;Ij+bi-g^s{(p;4?6PDSP?|+;P zJZ6iQJxYZvGQyDz3hbELT&O&gQ_KKk{$P@=U8Pk{b`lIjG!M$*eQhcDaX$@&`ZZrN z@XHR=y7WH$^O9*W0A;2NBv*4UA!Y#n%ix7}6NpR9Fj*3T69d}BFoo(!BFdUk3%gEn zlh!%>r=muWJr0$XaGa5fp&*bzK_Ebn3sY=jxcrO@7GH|jT!2QgU6t3OP=~@%cc`G> z#M}!NgD0rsQxRRoXd5;`l1g!oSecjn;-%PSe+p^bnLuLCxMl1&RkBW&^bw~m<)PPp zS-88YiNg*>{N=8870&X3t;uxqLvv!S*G|uRT9z1Plz5zqa*Ixjzoj|=Tz3Bzo`_4T z{1tW1WFg)FZx=UlAMfyYc@T4SExWRb>PXi}L(^}$(iL&CheRvyeY5ixhZ{|RT@za9 z@{HeET1jHYQ-DsHiKHS`lsH>-#*td;xbgwq$U>C{6PCdG&m1s>n_rS_owLTox*OG` z%y3()$=G#!EM_Jw#a1{|newUDoND(C3Cb&%&UtvE$Xws0WvcX?u5g4Ps`JOCiE}V; zJN86mdxERtE~2!|NXWq$szlKM!DDszQi#9E^Rlt58VNf9iOlWoamr2JhnrA(Yj8^F zN$&tD06F5o4&~Z2f4*|St;pqqxJH29xSKikN`d|~2c{28RtvHcy6~u>o3Y-LxZ5Lu zo>fX7!wI&xRnNW|{v$4h8&pqpw-G|T4uJ0YjIVlq6*aI6mTu1O1kINYa}3_O3;@@+ z3+O@yy*{=w_cB0sfZp90{3+!*2WGS)C@t(BP~aQu*W84_=M71LU4@85vf`2VL@4Q7 z`M#YU=5u2Q2^|$!jW_Sh29;d};u}g%rFm`Xh0n2jn2E^-zP?r?v1-Aot8+b&2R+;~ zQ!vbA0Hi!hiGj)*^phMeP`@K;0eN zwxo=01h!OEK?GEj7|)Vg%iz(d9D?G4wYD|-9li`BAOdRl0K7~sppkcO3RfGZ?}x6Y z(BP69Dgj-nzxMn1M`bd&$m-go6!8I|j9f+Pm|Ovn9D8Y!Nzcg%dI$m|6G*|byWEel zuBE`N2`;xAu&(|BoY~)lTPR2z(pt0~)THXdCREJTKUz(PyNhQD2Yk0KR{VKKH>XT| z=_bRg1tP%rYe4Un(0ViIOj`bQk=6`POdkH;HMvd-r2AV|tAD|nfo$jScb%}f-!G8? zLv_eQ0DH=Rl!ZD{>qzB~Uj|3!YD0*!bssGdO1BJ1u_sH7hhkXjV9Jv)&KI}pYU zP}7@;LtYor3E#J_nn$BSu|{3b`O+MyYBwf2vLbJcwy!QfpNNVW%J!6KWWU!w)_7N* zrKJAHo9h)`Xi%X_nac{U>}2rXLO7ClBn;BKb0+_vl3S(I+>RAVWEE4p6h~I|>1|4@lE}Pb1X&GJS*B0w#f=fZ5=#<<8r!;q zuKd3&^2l6Dw^q}^py^cN`@jFZ44f9!ZO_HhXO1jt&$J=oS10*L?09U-BADez*)o^G zyUHOHedScTZ%6^gCfogydMA^>?!yQf%q9zR5_Ihq7NrcJZw=X*9t5?++agYaWP)8=Nq@v&OXSO&*QYGb29< zId}&FRFEI3OYMr!`U$;Yg^W{d4I$QjmkLeY z)5k!P3kqm7HXP_nnMa5@^M_9;(*KxzvV_f&?aTT&Qi=L0h21edco=bYDe;6SeN*JO zQLIjfv3UN6<1+E4=$cI}(RCv%Xbg1c0AwN^?j}fz*ZEzE84~p?n`Tc9Of-1p4mU!9 zE|39I6Tpa}H0z_)DK(Nk@_J9aW>YxR7#HXY&O?tUXF=RtqO)s4DPwqq;EI?tOa-Ro zgS`WFt63rfehCV=p=wD`n8ch4gj0>1JWVF@siSrNU12y}vL1O&T8=2_vfe0AgfGj* zF&goE1H9;g5k?@+lRV~hDyS8%0Y?%qBnS=9pb_IpleVwyxa9{!8Lr6g!L_c{ek6Fy z+62@@e^9ggVrfQLd2RJ{IB;*N)oIYgPhb!lQp6!&+CyP2MtPs-jbFT^{HeZy)xd!r zM}Tk>^s_?QShyoO2<;3@VRXR1v=Vm#(wUfnawzVWndVqSO4cCO7zrO>Od}w8wtq+? zLQO$ZAbPXVbemgA`)tir)tjsBvdn0}VxDIr|B~!RDSG5WLja!iT$xh#{0m(+O1y^x zji$IYJVRt0*N?qER<0uhNMa-Qc3@B9HVi`$xRgn_ND@anaA9ynDrGlTeLD+X}wIU#x4XI+` zB7&;Z0w9X07zmdTD1}NO5zK6dHY|>&F$yz_<$tw=K-GTo`NT+9FsR$9k8;?#MP@`j z+b=ypbdHyQavgBP*Bef7tLr=J3~C1M!Pd(PON`>`S(Lr@c^YoP-Y92>a-dYb|ZKg~e8tgWm(h2-8CWDR9_(`ZPSQD$irq zE#j5nm%!;C9u&T84N!}mE@>|XuZ%Ek@ntLCO zxtujlrta^)Y*iLmp_#jhr69fz6WA%uDEM|*bdmV$>LKiZoo~@WdZOYtQ9zg!Xr1U3 zw9i+1`xi870d^L+05zmsJ7b<0R|ZKs2U%U2XoF`ljgTJJZZ&Fq2MdYoGeim#)l>!` z%b>gR+aUtIDN0m(P)sYt08ar4zr=Hz+UE1bTf^gARB83cx&@t@^>c8?2W>it?=I_e zzEB_v$u@|i!Y%IFytzoF2!Ptry3sRLUGOzjlOA2!dB2)H6!oVdS9C_QhT!DG{#S~p z=kVB(KjI);Oc|Hh0dFRTma2{?C-IEAw=3pUp|q||v_taLwZCzB+Q(CosqEVfAykH> zNORcadqkvAL21}@TGm^^2mG95&7VMfUtv24QFg%*f&-6T58E!d@(DF%DA~dp9 zSdbar{r5+C2pR!|8_GFIQt3X91rOMNuu51rq`=HYD95b!m`|T+WNu)%OpMOaOW$~z zJ@Kt)GmU>3!zYs$*;U$sIo#mTGHAwW!SsO9gHPCa8pUD>uzp)JQ{783xblt5_|z!A zHDT{T8>XtSAuVp-7WzrV7edN3!)ai?Tfs1BIiOdm|_l0J}BGe#M z>w$mz>6bZ>_Xq~K_k#**`8v+~vs~v_W1m0YZndel(aybP^$5A)kgPh7Ad>yl-Sf;1 zIqIH+IDC1$21o$G31TnGa7)=?{nt#i6D+K3+#H5 z*y;^%v;06?OF1!`ZHp$s#`RzdQPe%y;!fk?^6GG;HyF7zHxh+eH!Z_~<*{<#dIjF< zDrEa=W@}rp)crdhS|=ooGn!PXP+1XtCUMgala z&RFs+$w$D!EyIPYh1B6HL@@g~tS%6Ii%O@e+()>q+ zoATnbXzr8TagmgclwCmI<|*57!qh{qjqSoEBa_?Hflhq zh5{8uJBirZsB0Dlw5^7cBF;cKWXHCgjEPe9X>@P`}!=tI3EQPK-O1KSG8UR|5kKb~f}lVw%z}e5#~mcH z$H#Zs(o88d6xl=&-K%~1eC%{yQ0Zz_){~||Jt_+!)5~s1W)5kw(!(vJe(CN}Fgs(i zgDNqVfD7r0sQ@T`G#^=OyTkjZUF@}I()hAi#mLHv?e7Y=HnmcOLcWnste34dTQCfS zZQ5lJqkbdzZeF%|WOOC<9~DeQ;0$8|D$KGXyK!yRK18)RDFKg%3iTmj0GilIbVut~ zg(_!71vWs6;c-iu^DKmE3Fsl5pU{tDZyMNJWOiRiw7qpP4HG0Q{%}W@nlTn^@~cBm z1+5&MHhCX>GANu5AuZG;U)ya`V{osntTpDAG#W&Zdw3`&J4Bw z?=FN3dV44#3wYrifA#}fil?ypf@S{M1qqFjAOAdhn^_3 zz)@r3=}6~tnLp5xeX0wWqZTdBU^ql`j05BBx@WtzpgCTi2OFN|BUaT8n&O+Y?ociq zuEkh}OhC6$do(`401mQ4mwP%SV{>!6% z=l7Y$nEdd{w`!i2tiJsUtMgS|%h}FGY7UcZtI15XH+Umeju?9nR+R6rXpp)v)+ArN-t0<>V8Eg(%_AUCa;Tk z5)DSOFH={Bv53*;4!e!t1Et$EMOjXLFJFhQT(stl8!l&##A7HXmu9^Xp#bB|?T zRIC)2weghy{b)C_NpN*2q&9j_Q2A^57)`OPsHJ!a!P_H$4YisX+B*;*{nxMV`pBm5 zz0r7j^dTF-YjMUGh6MwJqRsBLsQjb*E6pksaY`|c-yj`r^u<0KJv#od1qZWH5~@>; z${3&o{48@Ll|AB>#YCqw!!t$4&tjT;Pia1PO(dR~^GoZFcm4Bx2%0#Yij|?Kb#hF6 zs8(`U=A;7LGIZOgo^~*3(St3Xn08w71aRbv_3!0tDymM^O)BSVF^)>a(~Jo4iVs7B zD$xIUwmqWm!TLvF0J@=e?sEUC&7#Zf2~|tuC)-*Z$F)GQ=p=H}jmjtUfp9a)&UByU z?S|y@5;r=xU)zB}d}lTj0m@pJ!f5gctjX$rxPkVVw(e3tb-CT8!6O(Wnf20+I`D8f zJ6~{3v(IVCmD;fP_3$b6Wjgc=K)hc*Eur{ac~pubF-g2;|5qWOwlt0U1ItWS@I3kf zB^Pb78iz1_8>w`0`QP}8TcWs1?{0!KrDBkRNbJTx*5^Iv3X9=2mzifIQf{iEQ&j-U zB*2y$H0Ti%cq{E??HJ+jj59quus$^w&6A9r1A4)+vApTBmiqWK6n#i>;;z7 zONXI)h=zOmKw^N6bqjT@QpG+0cqyIK)$qKezadty(RKVBhaLWbbo+XRN3gHM&dd}j z`Y5zdeywb9`;^!>MVwf1UAj4n+9gk@M&GrSkmS#%XwN|ZK(yf;puI1-Dx0&WK}DvZ zBx_M49b%ho)ZgJ|%cpwgJ3ydR36UN2h?-`mLTrTLFtig4qFz^85=BkU`-$`+DbxZ* z9vN7J6yeIYB5pX*52HVJ&e?_)j~FTObcbXNm*gFS{Ae^C@$^h!tU|=?>x+hR6VCWE_ISjWC_|un*+R#h!9^LV7_sIKH8N&5-_La~TY$3aj zF9hHI>Tqil8(?V2)c*DP52t+j_DM@v6;0ut=)D4IT7#zDu_AnW0E6xT0Z*(ZyroDn zh3bbdJTl=CJ62QRR>YxQHOf*Hhx_gT4x=-FQf8&&lFw%Or+|@)|5MDcbLMbEHmJJB zHoL`ROcej-aO`8I+gMNp?yxh+9!iq)?<59lMNAz3NCatcdBUOsJA2*oa>-fBa35q; zkxF5oN-YLJY?=B1jOvQHSF}o+_@0QQ-;IdHBlB_lxJAM0-i^&%EcqdNh8nRrPqtay zZj0)A%a>~aA$qkE1vH3}=HM!GF(09gcfg?bP*C*m3QEG?yc#2Oos06OX5Ps>--KF( z6${^?#@(V0JlJ8C?B79E{{U#DJSH`n(yV+CtT`Y6rOf+3R9Y$ei=YHkGdAUKBhm1T zLy3EOhWSets)Ea4kH?WyaXf}U2qez#>-&_enUkDr1wZO4%#La6TyoC~6<`HKzqe-G zB_`-ZO2L`EX#l$bB=we6&HLXfXTew8K??w-8XC3QGj3iw6NwM>6yGSO9@yOZa!Wd? z-j&wq?K8!uz9{)gs%pbG3nekspydO#g^P~t!7$h$0O~5JyDHb*5Y|fv>Sj*PC2x{k z(VDWR2wVeH-buInyQ>yId<^NTjRrmyB?`zCe>~@Vs7Znd(_&y~Od@d}x8L9BRlvbG14Ls($dicPtgs5U@mLxvRaCw4FROj8g-vDcXX$*Tk|r zQ1Oe7+Gp$9*FqArtQ+hs=G(YhEvr~?)US27SVJ}L!UL;pdwN5+an>c8j1LI;ee3N$ z>s`IJJAHm{h;*xnx+f!RF1QW2wv58D&Z)AivibRGnK~5sZ&Az6 zu!%-0*|&aC4vFXcmG1P)>Hs=%x`p_BC^2(-NA>1~NxgDIXnyX%y zj&adfu4Xs%mn702mymcDdO;q**x0K?Vt=FNsphCz7dgJUt?#;K&$7@J#EYc{=}*8E z>685E>h}eY`(yMfohb4%1<`(y&EEOs1U5dkIllV1_dXl?T*#@!>1ZlfIOGeS8XEle z6OaS^me(*c${Ps$YV?j&&#TOJcXRHE(KEW&x#|p+t9H}5&|azK8SYyNwD{-TAD+I~DP*@Sy-X*15x3S>f4Pan@_K(jq+24$H@1Wk|3_h|di?Zn`GZ8rvk+ds(4&A=-XktJOzl#>!UlE(f2Cul~p3Zosm>Xi#SBjy9or&id{WcF$w( za|=HT3W|gTE$Lt?QHj0vDXslxI^)B$#7%f8G0C33iCNH(;|KFh;_*2>5hhBY&mv(S zN}N3=|Cb6XQ|}mRkLGoLd|R2|51ZnDn>^1%vW^CJ8fKb|YGs>Hxag6zk^^XMedc=e z#tg6ZUMrriid=ML)31c*xCJ_3NOMg5)&odAji^O-g4w&(oTZVVPc)OWp0_jG{enY8ZaYR~)e z(%RSnMzH1gt>N=>9shAX)GBYgH#RO*E{XL-4HX^}4eFT_Wy0EAEeRGNc)0DR|F!ig z0FVTme=&$EC9i!ekSP4AfH{ko*|&WO2|H{`>j9RXrF^OeCS=HD1`8P&plPSi!}kUv zHjaGzMYG-i9yMMu~|v^SHZP zu%u92vUybFX2?h!pbx$Wd$t~ps7BmS+c=(#VQ{3e_5>;`1rXq^KEiY23JkmatsS7( z6KIA(I0c0i*A8{*9nlXfqZ*1G5b{dG{q+KuaK{{ULoZm^S&yo6qA$*|B9t=@@u_d% zgQHOsafpwe1N6W{!CaP0|k-4%kGOo>n$|j6m=rn{uWJSVvxe9<=!HcIYK{tY- zjPK%gOWGOKO#o2i!qnBD(iu_o%gwt`yvuO-?olQGPBP{XXc-W<*WI(6@-P5TfUY+P z*t(rDM&}=qF!)b^af101bbBCz9?3dR(}burq^@ohH9$Z?^W3?r@@Yo_d{6&aaW93@ z0u=k940#Y5{y}I)j&kmecdiO?qYW#_<9dTSf%yrFs2nu0yi*{X73+OWy$<3SqBvoD zKPunf^k}gQv9lSYke2opBncpmA3mVaeCs1#vXEnjCHA-zkx5z8%?4Q13xS9+Sf~?k zXw0vqeY*KpzO#)JF$A^pR>p|cRVk7RRY~9%RKfJfbygvN$x#f)4Pmfe#iP8e_>0FO?*9oz`-E0`x9Z1m_u4C&{Ja_ zwnt~01k|UhcU#e|X)p@xx)Q_29q#AMb6z1n=Ij?k>>0X;?-=$&ct%9{12e1@?67$L zlcqx4ezxoY5KP{wFE~^`+t_7R{Qr>#p(&HUOAQWM+}W(Uk{rK@(j4vlcKsZ1#T!N7 zBa#)!{*5UYhr4yLa*NV28kU7}1xP^6+IR-AEtYTjos}5|F?8!sJw&Ywq&eGVrg4mB z`BBJDYJAZknRveu;J&&YQbfM6|*?SqgUVmXaMozQwm z*Sms;?Qh?vm)}(ws+mnSp+iTjjB|ku$&2*x7Gm-Y@J!n=o3RvDb#ZW}(!oR+O$^;k zaDZ~YAOhQIOA<2M#orPpOSL(*LzYHp#QJx%yt5c({QX)m2)YvT<{I61>DyX?P`Z_w z{lTFrQ?fLN<_5uq*7*l&*jkP09Lnf$X!mE*w4A>%>g|8bsx=rR3>m8);F>K-{}h?n zgE)L!w$D~RK%mp;YdJfECb6iqz0o8U>E=PepL=%e2iDIf%2$B$)6 zXD`&wLINO|-vmjcZD)I805IRT|F{_+UQQthvDVvKmFC&swmY|r1%1mXoEP=ycXuQ7 zXbYIM&Af9t@xRg+0Vg5G-1bHaRfVJeS&pvCwDpPK!(#t*qtYHX^;A!|N(c9GbQr<1 z8mD{=eP}+Dh=4=&n%JjWJ!v!<01Tt>AoNkV@l>|61P#Y#r|`qMS~nP+p2)hPtkuz&$Ppq z2a|WALz)9zRg99&EKwq=b*uOvuM}iT^TYxGUiznN!L;n;%{0Q*LMG9j;+De z6n4}0pnDvd#XZLzHojTGLz>N+vlft4m`RWQJyf{_HLfV zUa+pxY7c__%aYJGc0`nB5BHO3;6Z(m9XMZFFjO z{2Zm6?hg%%HRXGZ*lJ)ybglhhQ|J!6Qu3yl4Denr?8 z>6?^UjS@>oiH3{{ zQ1ho=5Rp-$1BJvy>x!jf+42{AjK|s+*mrO5Y?^i|E7Xe$J%Qriis&CqjD;)-kKT=f zP~Qu)g4~S`bL%3(_bzlHY2AI#I?V6*Dvh~Cs687o_hr`JPl&9@Syr>hSCwLJuPC&UUXBFcMs zH$txoz$$`B9xAR5H7vHU0=|PJ^|`E7CuncQ0;l+EoBvz7gz#l z!IVu93jTO!#p(&VmU;=1v@^Khi2rb7&)4h%)|U`!c%uo5vpv*nuXVV&BM0WT zksPjFqshAzYnp4(l+}ub7=3udUG+CC)8%f~SGPNY)Ey9epUBaDSB5}UmJ+Q(qRO9_ zGyHqMZRLfjEobVC zUGQ|SVR1`zr-f{5J!j53D`V3A$UNFQlp*e4N~}$|3oyC@E(QTZVC>Bu%m|2`!B8h3 z>9>5$es2{iulUU6{|*ADBXjyIM7g06f>76O*D*7s}pYEcr{S=WCW!c zAs7XHUk==NYxgoP;fVS4EyIPonv`(M^cfK?rG=I=L*rv|Vk5<==cw?&zAxku!%_Iz zv&1!_T?I>_pi>AnZmsTP+B0mV$Dr}(Uk@c%^_XqHgAdEk)`ik}*q@rWGvn|Rh?GS7 zPRQ`S{%F-RGp=m2{J+YO^6M(7gE4}u%-|vhlKfNfp0V;id6HfKtAOd2LTm6Yx%P}{ z^YHxy);MhCO>)@{C?p{~ha>`;&Vszy_AxZ`OMJM5Vc$rUh#mmlU9HFP6j|xJ#7Kyd zI{)yL2qD$g2UI*c`u_HYSX+vO56tY1Kk_fmqb(Gd{YUW22C47qqz z`dUF7l4!2Sv;S&^@I@0m3O@9(MRlzfjB%B=D*hFove?EIa}&lIakptnYS@Dq+LB(F z+Hnrg*!`N$HTd^$AIwgy;?7zIF7~TfVp92(_T?;G1FECoi(`W#FkwaVAmCz#Lbk&c z)wDX9tNXB|a z`j+_xc38j}5+7Jxw&|@;2MAR5hUIZC*!8jWUD#en!P($OPWU!pTFqLSX$QSBo}#TW zA@>dH<3fcFb1H6DQ4_xs0n}&al$A6}PwFVogSBk^00O-wte#g~AJmq?-d1cbkX7Z~ zv#54Mtl|ag9=~1MpWEz;LPHAm2W1G+Nk1Zf(wy+al$RgwWi#saD!t(U~tSMlcaYatT9sV=Wh zyCEN*e>eYR&gw)I!yr^rw&1d6!%tu|W3|bKR3i}aJYG{E_?nUnIHG2ll83F4l#;Htc6pMLBb9D z^RDp3@Q|dJ$0E!Lc(eR=%a9rHSF6cAIzeoqnXaN2Lvnlhy+|nS0OeaMboPWmP$h`fhHXu{6`g$L z!#{&vHC|Ajh0&~Ulq zI=#*)^Izw+h$(tvENee?zWx!!_uD-M5_)k$`Hh&`T-D;PfO{9n5%^ zn-!%$=$Wcj_}bLjKU%!+fl?#>@xx|8^ajqKyd>6c5~a;&F=VqeSRL`xR^E7rL|Zn( zUC}oW6zMm(2oEaJU$J)=>?}gvA6q|9*Ue}nYi4$>^wO7B_JwTj@$OFe;_xz78%yR1 zCfZ)3COsd9KFU!dxC~_T7a)lDy`$oO>1sGP3m8UfI6QtnogFduOnBx{*4@lY5!N}3 z;*+SUR*NpV+R{W3d|R-q3RsbE?2Km*j3sj9UrmX(HHef5qB`AdX{nK03HEdHmc)E4 zF?ciVdwL8Ke6+7vVdY*5i`gEcXOFaf)BC(&DDd-im=voSf)$5T~mTPDwsvIT`L z?fLB%_%z<#SN!sl7c#SqIb3yTN-Tibg3G(C`hH@w+B9Sgu;HCwq3?A#3#vlCpu+PeHson;gpa$0J z3KBqkryeG<8$Io;kM%CcJnHI4BY(<$)DBqNJ#u{}%(2S?l}*RIF3TWxh5dwO?)F%= zj`cp=EUL}6JM}s}Uem##*fGq@2)@1+cpLGr*Em3GJst?#;+EU69ONGAzSO0cn4P_O znaa%NQr3-x(ua*%s%}YViDu}&O#Ski^FC7@TTQI0kyq+mw6~_z)KkFG?vEt+@Gg=& zDUH#6q71O&KiGIZpkFs1;XC{lu7mOjEyP^V?}K@t*8|p!e_QfNvGd_ozFk3PH86Mf z;-i{bUQxB?-#t?hfP=Lbs;0K$LVJlpX!EAsXr3!6DP(qi?Iqz{o6#x$#f^+B}I)S>^VU0}BJ{%!#F@DPsp z{6j)r&-MChrFYVsnd1EGeyeHT&{O?5-OEvODYNTru@=~c)>@Ws^4&@9!E&sAot`tb zGHzTvyLf+nF@fF#?I(q6Pmy8>715(Nx=~u%-5Y%vNX}dJA~$_jZ=+}0^R)sx6RG({ z-nn6<*kHR&Wqn^!P)!w()P<(TsGa(qdgN@4RIJ2ggYmd8IwJUw7XPR_f0R=euEQh+ zlQ&B|{-(AqIa#)osM^TKk2XE}xy#*4VnaD}hM%uL*bkZS7P?nA&-Hm7pGq<{&rkJ7 zOG$v7S|KEJ7H-;o2rp8)uBj$HQp9M&D&%FL0{YBgo zAAUOrWGg4!z)VS#dLa4-B=~}q5L1KfkP}T84`%EJ$P5JZ-5o^VKv!~#PlGXm-ZoIe z3w(jfPI5?%J}W95Lh(x-EL%BCY?_Y% zYj91qX~Bk+X%c6!Ycj-b4uL9|plxZY>{qY{mBoFt!CQ!D4)Rnr)DRgEZMekvMs8`& zsP0h1cXq%njoGDcj!}NQsS=GXJQS#tp#p>b*=ex$ZQRpF_=+U>`ts(3J{sL9^T|FU zsM4B&rLP$wls6vh@wgi<>*hKbCfn(6j`<*QfA)KVd!(Hb_}paP^-1mK_=Dyt((O95 z!{RC?8)kV@aSX}d;`C)R3#;p$N=@wRXxmyNJD;PKdZeIM*9g~l1CfyxJ4YWQAI}3l zqH}nFfL8}UFK%`VHDNn>$jkGG9W&pyiRfU1VkK%#OG2bSR=V zqX|yf;9m}wAw|{j;|Q~njm3LasdBKD-`ifz^Pn|{y&dweV_ANbFELoJPtq0J=#?R% zwG**?Y^67Z&l~CuX{t7KI>2+I<%TO1_ILb~9yi|St|mI>0szUg_!&|_Ns;Ya_+7od zHP+UKxQCA^yY+WxjmX$3D1=>pwBD*D875#h98h6qO~GkgvO9l*tb@V8evIC34Z)Tn zpsz~HCYffy7Uj3ad<17xw5w=ACV=VNV7a?2d3A_zi|%{aH@IP%cS|anjWPnQerz$J zng27La(1zH3#a5cs3N69DojaHSZs`fi?0oIj!~fOIp)` zZSnf^8pzTY*S2*2$TLaw$eaqUy`QgFNNxPN*uvkqM{1%zRX9O$E`%nfL)7#$p)rDl zODn}|^pd%Z7Q)y%;vpe0I@hzH{qa#0(WTz?cqQ|cISvjMtftyOQ*lPJWl5V~<`!$j z6<^Tn9eCi-C-cbH*|hNT&!4LeM^JfTtKa38E6i00G@|6E#e0?=p(-wV&Dm!CjdKq} zCdz^2o2qd_d^!yeo%xLHSn@N-d32Eb9ypNZI)+u!AMe3H*`|S*l~Tmip2UL|mMzq({xfip%BfOLx5cTd=Y6 z;@GVWDeTPo#B)-C6@H5TIno$T@3Wni?0*`?YF4%X6K{)9pSx~E1zH5-6|yyFw$llI za0bTAH)r+m?V}H}*%=z6T?Sm>c$?*u<5}4-3hig-xnbS{AAn@Y=L%&Z(O<%G&=2cbyNTLTp>urz0?@xyeys>opw{*P=a zq=?nd=5tjA^_vS=+rHc-2!>^M=A&39k;Ny;kqW1;c&;j=^nod@2Df8-5B!dDm@Z}Cn-`kP>9=m3NmN%R^N zx;_DVw%K*dW<6Lx5n-)@w3FHL+YN)Ec{Y^LRv_u!C%*mciBr2>jZE*gn5%Q>0&-RL z7B~*aPX>5|0EY|ERc$o9PnN$ouq)auH$-`W=WnvB&7&v>{hTMiZan6z*=V7-1(Nt< z{i7YC$7Lk;ZuPp*KJtU6w}Wy%$IMJq6$?nG#Ep?FsZjW(ao2UhmY0K8`-83wCi;Rs zI-M~vxTQz2+vT0=Cn^1=tx%h+k?QE&_M3keu(E4IUyqins+vNm20mR)X(;pSi#PFpLh&;s zl$u(CO}x&K-N%K41}~cI2v=4{HX;!lD&BXQQn$8Rv^m2bK_8bdKw}J21N4cR5aD#= zfbqgZiKjuok8YtEp&a#fqxCeo&lEtIb-p8H?=qh234Yh*EP3={(p6~9Iu z+SDB)x=MIGo{GqLlq4FfwypFsx4E>PCt+gfADu5YxKl6+ZGM1i`6&r;bwe^SYB}^p7T49>h@5=UR=o6&|NCgb3}=o7nlmb zt(-{)52GSoNp#m+Y`!Txgea>Qn7l^^iU`Oiz~Q0HEJYBorr#_(oJ>kTP*_9W1`rqc z=q87kSyes!PXwtccGD3}Q@MMUP*?E8+g8h53QZAx@)-Hb0e9u&uJLMwp`ZyJNmFV! zgvv;&)_$!Jxn%!V)Ndg`byfmgIcqV7{qk6;WgW&pu-0W-5NX+G^|3Aevk7OMDP^v6 zx!vr8fOYu^?{5|=PvV=9qv;GHd_K?7$24Vn1a9COWFM15Q4?L_Rurv?ts{PE!jVBmFs z58SlI_pzvA$gYE+_aHf6vFsbZl*E>D=Oj{mU4QH5^F9t>@QYu}_zm@PT-V#;$Sg$d zQ7;tCev#DzEhYb+PiB%dNB*b%L5nNxMF%K;E5i%LV*>JIQX-Qts%XdZb)7_S$@ouO zPeqLjLtJ~GsVM~;zAB8e3nX+$*uza-tR*(%>AuR4O|EN$cz6N&vXSBfjiis+k{H01 z3{YaE9bT8vFW}wAu1b8&Jc}hmUnd zoBK1QbcGCi|N4fW!)M`o9)$DmHkE8>sO5@|JsLmSA>)che{2dj2lxkjP%+mW-o^ zc$$>*?dEo*6y}i3w&0rjv5dCPJ!fC7T*u+1$+HGlyUhFJag(4!nUC2rw?$J02h9-8 z+?RnY77I|lBaOVJ^@_B|!j|o%tu}&QU#8dP{c#Pt+SORA7?71|p<(^^7$p)&Et-m+ zfEWL9K>|-8$Kg$?g`;HlhS!_<&EB%9o*C?8{U}E(uyJD{sH@Hr@+#kv7lw!C^BR@L zDD>#heKxgQTE(Tc5i`JlVm1g0DsHV~CVVrMDh-Dt&1cNq%Yv)SSgl$)Z5ss~pt7Rd z;q?E8YO`Fc{wpWBgzZDEO9ipqge5u@fIq5N%3z$F7g|7wRTc!`jQ;-s&xu|ED8FEKES{wyHcMIPBFV_COl}*rV<{|?cfPWf zoSlz6QUm7=KF)9x@}wi!){LL`Y-{HO>hXcl%kPa{qz7da1oOlYfX@>W{;VW`uOs3e zK+;h%>U~qa!8mCXxhjQ7;yVgA_|IZneWx(onRRfH@l}nNpUpJBzlHfb`3^fdn)b@F z2&xur?{gtIc;}4zcp@GS*V>#3HZQR+H!sg#wf(V@k;aJ}fA+50mS1|Cbw@9D@*B*r zF$+YoQk3P-W;}Lzg9CW3Z!;w7_;}SP<}?>vs*EHQmtWH8%tJG-m(@7j$U4;ze?2g4 zGuB~&=x)LEw<9?TsS#j2E^Wr9h`~XMk@n+a}&kH zNT#^=nnW{h;Z&QSEdM4TTaBv}_q?>)tJ7OLPGhv6f;Dko|(rt^0RS}*fM zK;24+*A~tuucpNz_nMQdhrEXW*LeD`W;KVY5lu!q1D#~V>8yc7l1 z#?jz}Xg(J=Z?47p-fz-Y{*fY49<}_}309eFgtcd+u{>UE?BE4rWZ2fE3O@8|YLkl+HYRVP$rWND$N>8{kH1gV=V7^n?(_oVW z1X$zfGGyO7Sj$9LgvUswaY=#%%-d(3>-x3ISGl(~*GElvvP5~Iu!Jcg462#9DG`P0 zh;W3qBROOmO6nOAB;>?GN_Bbg0?CjmAy`tiRoxXkVwzJRLYkkfv*cgv_EXbBvnzIZ_h;9a!3AR(v6|Ct(q1m zua}OlT0+E|GF6JtojgXnSzigVIz}>)czYh%GyZ`>^s>Fi!$&#wa+o@N>Qg$w3Dab$ z9@9+U;i^8dFR%j8&34K3XxG}41ou8qG=7kT{i2$*%> z$FzrjSRMq{2OAkqXw@@I|JmPJ2v%vLk0_vwzi4QUb!TFL28PkdXqpvIcy&jkpl>7X z3FU{wltPpSdnnPBI3Pe25|fwzh1 z=!VYr3s(a!b3$w_Mn_!-JXklYsd>m#mK~oeiz@M}HPE+a)_a~-|Lm{+!CWQz4hzT@ z+H|ryp&`HZlxwl${b)Q05 zH}haV&^_s0xQ`ko1;l7%`Z6}IbfgAWCfBdTRugh9p5=^}(fb1J2b_4KGYdGwz#SZ$ z-pc5caP5bt5QapAg+z)vttjUpS#fWmm~($9aP)yYkP6Woa+9Qhkm6ft8##nTF}Y?C znTVa5g>FO0P{p)-LNYod?i16#LrV9y&4(aMNsW$UoZ1#23h%8nkTr~}CFP4Kv`384ihI=ndLa2OMheI)G)5AUi+^#}KfVoWM}ckU>P7^NEiy>71sy?va2^k4wr!rG9s8B-9*I8+*laPqyzYFKr zvn(%yUoXW9!E=Qhr!5U8OFD|>@&BHjgo+oUb=hshjw8uv*2RkQ?Es7e`FthEMul1> zoO~q`5=Htz)LN7M#Xz9n57MzRxQ>AqC`Iw7QEL}2NXMH)fZdS2uKE0u0joNA4_^S( z*Y8F5X|=&v?(gO9cZ}Gn!^qFm8Z+_uv$OMwuJ%;5*?j`|r)&EgyC2dOP6S?7u0t*D zkbuV;^1&(78;GucaHnp$3bSOj3}a-hL43N^Ynh!;0iLdBGr<-C27Knx=%prH2o*Dp z?sbJ~eTvyyUQ#%?qZ;SE*5P2Li2a2;iR zYjyd4Mr^|UR%Z1(T@~3b2aAPP9_VI4d?vyX+Hnz#Sc_^gZ%5879VC5E+usmfc`MKO zv3!w^RU&uuvn7i?Q`d!T%Z^#KR1yy`v6jhSXDzM>?Ch2wA0hwgcXCw_1+q!#gEQ@} z#Ft}w;}du)RxNS{Wr3swiWi+K1iSB*)c8p)qncd9zN~1L*vQ3SY)W5tWS+;Xls+H8 z0X$vro6=YNA-Dz+@McmeTf_7)QJAB-GtF;mH|TZ!cOE23foB`*-N zrcRwAhBjk2w15D^bIlAUZn|>3t)zUI>*S-&ZheS_$e ze>ZuvUbT#1;Q|g8^|M_eFK)@u5e%Hr4jlbdr}R*rIU7YwH$2D(k>J-|eXB3j(N%q! z0}`hQFIwDK=idt=`3qq`eZrQ8!6Fb{?deje@_dAiIz9t9|;-O$1dcKF~x2f+f^ZnAKJno#9gcl~!{Vy&fS$&qG6;GBS1HKt*_HOV zW~V#Gq4Md0LkxONmFFXDhPMn>ZzEBu>a^HBohIUj_Y3w4dG{&XV{T8?bpq$nqG8&N z8n&s~`q@yg82gxkwnKeQwMhs-h3~Jy`R2$XB{IDoBcBzqo?`VsZ&mRPl#F`E<(=8} zP7V~&oIKrEzDML+^$NqeANQ3SOOF^wOZ=nkv68Dsw(0q^4YFLK*GO0NW^keNSeqj!xIjQ54T!Fw%bH|(~kL2 z-D(PaVuLYbaxGqfRjV&361-2xwMbRcO2~_W?Q`^2umQ1lrb6Yo zMKsV4(<7PNH^Yd-ZbB%ow6wXVYuM{*Zm+}9WMfn3DL~85@1;xFC7ZkPc@1Z%Vfd<< zDL3l3M_ZOrSL@;7-NNH>&5^K@W_L$f5qM5jX8XKD`JL@KM3PA=e0>rS1^EV%iSTV} zBi)7B>zpiKYqrX_d;sYI!HVLySISg}jb~W{982j0i{nklV!+p-5wqg8o z@?*(UZyejuG>^WVDTyX}T=gCoG*Usi#yr&DvC+;GF~zI*(Llu%NVEXKId{dAQ5QlX zPM|;y>diowFz18my7fg0tBbJ@CkB3@dXm$ik=HTf`!)&&p{#+R?++dL034mOBD($^ z)wjqtBR8WkMw+7p2MtLM%T?N}MCH|OBpFJN4+U2n zY*#fL^x)fO8x3E4i*R2#_vqTpm9`>4JBlum*GSBI)?|r`32C%u*nMRJr{^}Trk0vC z-+8O(U{0Zsj46mn*|p{Ar&0%gp*;Q7nY}QTTf>UcH&odARQJ){Qrx-SNfi0P@R3A` zp%hT^p`Hk_Q6wcVul7Tf6$SJ4UfXYBYSJTL$tY5Osrysu>za)Wpi+cEL@z!JXV4getBrU=JCesv8cGSChM1M6w35=+Z`(%8GSCw z9>ODl^n`Q(S!vSz@yW(CpVZT|=*Da>UyGOaMcZcEF9%fN(tjn-#y2k>nB& zu6)t(+~2bOY%~PH)t8whNUsZ>bA>RF*|zkB9RC9cf2tvOv?;@(Wr+(s_U_(-M*gsq zOUekGr6HG&*OFOgtl)$fv7YyTumC0*W|ZI2s4$ejtjEAzghZk@QEh)6U!y;Gl|D}| zEfWAa4xjqoQ`iDU5wK80y=KwH1Z6agg2{_dtOpL-rit$>uVMtB=o%I(I3G-WIsZz~ z%+^$@?_9LoJ8gohKCu3E>Q*WsZuG$nwAmCI*Fo*kqk-X z5uKHr;`XKqw68beR377+W0{_s-prCzM&pl@gl1MAN)N zrn!;`eK{K*4@WpHMFiJ2LPK}`+Z+OqMTL~jmU+BKR^)|jloFygDP>k!HBvhA6*F3f zyfIs_7A(!oe5EmOR+8MCbVjWbV#egXcTARmu$cX<6>5JG0`oU9)2WdR2mcIQvXtb2 z*2Dlq$c{Aqk1!j4T{7-5 z{F((csnb8Q=>8bNxhBtDgA|vA#h4DqMus$fgqk$FX=YUUs_qO%_uorNs>p|C+*`M2af>PH;3ePFbI%`gI11mU{51Us<^t?;9$dhcffkNKQ?4FF zF<}cX_W;OJ!vsCg{e$2&nR-vb%hwYQp^2&Fp(0R0RQ0fKSK-<;I+1Bk^{SMTR=r0V z^L0#$wZL5UsKo=9x|vbG=6| z@RRuvrKaXng+*QPKCE-!6#DH7b-Ny%{3UtJgj|B0KBm5YFgHau4S2iPlA-z%(!BRJ z{9DzJgX7~13c!@Ap2OUGv%6@cRNv&XOR%%uOW-DC(}&vB`gbMsT!C}(kCWF6ARR!G zPt&RVq>yO@G&r)p z={ zH zvJpLQV75>CXk#aZ%5GKWDKm)H@@5iy?|vFgWI4Wju*e2L%|)DoWY}-!l%d-=j+k1F z)BqBr2W+eXG|lP!s|h_2kkfAG{}!Bp{2txQT2*?K91X@X2(dEpv<|S@a4Di`Q0DOt+Y6 z-BFUOhDRtD>1)>XRcSwbU$4B^<>xnt9v{(H!26}ydV5|ffIpO+z4bmaKxKYewsCMY z?>sr=8f@j4c|V9UC+Hq~({b}a3c#$(jm0CSdp+AHa7FA)nAe5ReqZ- z(4Bh7jc>RLF5T3BLWM}uS#J4<2*EGA(ni0Xe2PT@m}uBxp(Ax#s^ zY_4lEq>%2~-A(xGPvmCZcEU&Ji?2>6W99!us0v;gOMP*?=L6kf0L$^uOR5DDj}^*d z>+|B{`BI9tCz8BGtZYnqU$P=f-Q%8h2`9dj_nf{D*3=AY`Z3{#MVppVfFND|6HMsQ zCplyWdZ6?NGZAVVa{qEH0U&Z#gT$&cURUijEHNcbu5j6{rJ=#i5+WG*~zK+C;vcRc8vxRLQqsQ5$C2VUe7~JNR+cK!38+hn2V?- zJjbB&f?zqyLQ9|crk3^B-M=qnV-PylKo}=L!m=a2yaT{E8!a$^%*cFZNwP4DJ7koC z+LUvPGu&8#r{r`OJ|Md@&C}>g4xA0OB~nsdE5!a;A<(Garwa0`W6PS!!EU&w6IJ~g z*ho0VBxu>_U{dAa*Yrr3_F&xa4IZE&KbC0en2vbbu zD32X1X-;9kn=m~in#VjT=TJJ|Ag1`mZwsL>KXTsM7*RB`L$jm;v9Rw9Zinc2?C4; zx(MzfeW_7o%}NMX6}yy`-A<=L|Gi}i`wFDoVYT!p8@E?MHzWJBo8!Q{gIxDHc+>oc zdG~{-k#*hR^d0nGfDc^E7RhRz$0l1_qR5PVodxWg8>f`6h^L?=CNhJ>Qv1rZ6hLr{ zUQ|QEfSy~X(1X@r(0~WocvS&fwy_q~A;=~5{ z#F=IvI}RU7>nyk!;dBs1d~y+6(VFxW)bP3HbxhBsCS?_raizvlKjh{hGz8OZmaflP zjge9@=^OX1s>(0L{N00)51G|Jp;>YA$)Z1@mjZze!gFn^du|2Ze=o!5eT59QRJXr= zese}eQ1Xh-_HTP7t2YAD>Wh%vUlszyf5t9=vMn#RUI{7ABfaV|5?7z|LY+N zp!M;y)a@u&!u-6|R!N%0>+8Xxi|Ab~4fNSyf`3XiwI7$O3N+v3g$L<5 zKsCcUtqH6q1*_4Yx814jVl~LAy#l6@PKlLV&Z68@BOD6<&w7ga!l4-?6}a{P{q*09 z$S|l=Nb!x-xkjPJ@>n}^&h{WnUgPb&?p~B9vQO$kRuk2=R(LIU7aJ6J@rAz=nf4aF z-#Rr)oWsI?IQF}zNYDdgNL(B* zPv?~l(Xo}%g^84vf9JT4xa+c?mno!TA!AH^V}x(nzp_!#yR-tG!^ob9v2vgs#ZCc9 znVF)t1?M<)=^-*a4n@;pZg}>hkAe^`Zo|w{QuW*xNeZqgvS|SkeADYtM!2z;!$JD- z%7%gG_r{<*QJwV=82@2Yq2G;t<1oyXn5|Cun}JW62_yO|wWs8CzY(JJ2Z!(xOy~uQ zB{`#$z#gysJzSBIQ`S!iHQVk@@ds6#XB3m@!xHlMW*QK4?ZpO>s2mBzU4Ntn{MtQT zwWsLP53|Md2s1Y8w*C_)_qICO5VByIo$umq6ny&U_n=cu3KY#iTw#%;MNP4+&m5fNy(=ZKy3cCUmA6LqW+yHhDn=Gf!rG3wJE(ojN z3yj9YmVzaOvIzFDInA3x8hN*^=?}WCg@z+ogL)w_P7cWf3L-c`!Pt!Y{=BDzHzdTk z&w*}s_x(~)O+TF3LXZU51dY&3|;7BS`M>PjnVv2nrE_?nG(VM4lu z{Hc)|nb9{enQ^ak*x`fpZ+Oe?t1ZSk2aBl#4kw21l)(-XwS%wo`bF3rP*JePx*NQp z5ETisRcDQZS^K#u_z=48ci7TQQ;WtR;c`Lj`&xj`(yBmfW3MwrLSp!6SbVYw_wR(m z_lbW$D#N|)yX$`*)vV%nu4%mvgkpH8K)zsQPFh{dt5*?xQ&1s*^!fq*shou%b?DCO zH0|(?J&zYoOn5Fs!(9OOgLM40xA|HwnOxy==s2u7(qE0V>`R?$d!QTf4c~aUVCEgC zs(FIY-VZK?9v6&azSE`)PAoJny4+wQ1~5nv9Y0~Dp}(;+1Ay%`I9Z{$0{mmpu*X*Y zp6;y?K6J+8#=SCf;Ff7T2CB2^(7xZgyN+h-kHs939>$mv$tK>Lo^=ke@5D}2bLB9Q zKA7~FJkiRJ47!62O>)><+t7}MTiMZiQf&=?Rfz@p_M`%6Z1 zE#;gu0S=LZPV5+$#4S7C(=!-X<~59Eq91oku)rM}>{kLM1JiJ^;9LTb{U@^>)8&d( z1&yWTC(&I#X5XZHs4~*ri}Pg7Z$z;@vrP2rRX|^B0+#VI)&?$zJK)zi)_1iV#|Ze= zM0x^A#l?8HDQDZ&FR@^vNm*#Si%ZIGz+P8e!SxKZPmEQDw`BN}Djh@jD<1s<766g_ zOf1IRx=%SN>xFw~}k$=7>DQlljqCi@? zy`=-Dxu*L4W_3j)MaxwEhy%Rvcb+kmPgXCW0L@yU_Zng019uu}Ne>_NS1J7*oGh|qnZNE&Cn3Uy@f1&j-d^zN0IFGK-o-H`ek!=p^J4gR}%P*g~DR! z^jre#tf|ID=myx2OH&iBIGQ&ic8u$Ikv-u>@6rrOIQ zz1k2vhT_3GDi`ve7x6pWlzM^CyI~c9W`ReZ%Uhm4$%$=Q7M|~bn)_{P& z7L^*`CGa2#e(bKG9sPv>S4klO!RoziDkBf%J*fD&QMu9ed;cI=o>v3ciatVS29t=g z)=X^1PxssRiCyr+6qJ6M#Y5b`$Qi(_(W)wul z?i+DqX3Fg}z$&TH{zYo_Tx_3Ct*ra^s{J1&JFM74N<%>ehX8|tC(%l+P~T8^pLXFm z6H`K|7dHu??8dG5$&=!jCdSz|m!X4HM@M?X7Z~A~-?=Y14{F=*E%D{9Y+6!|rY-79 zWG~;M&fH#*ET{BEdK(Z@cwyoodAk9Jr#41O_fssMbPx&iHUs<$voc0(Kv6ttuwx^NxY7nK*?{z6Zh+-e%_ zdi7cuXl)MOdnKRwr%th|F41Yl8o$XJo$%L@=U#))c;yij0miWhcv<(Bejd5aePvHG zlMSF6Z4MU)%iCQs^?^t5#b)p5E+&maK?ey;ZsC5GP|lXWil$%k8YOYnXZL|pqo|4= z!qEcKw6wiXO!VS)oq9bN4fsj|_BW;$d9}W=O2;Tgdk+l13YMv72 zN*!037M~52u-ZC2^=;D#^Lu!bE;-fF!09`m5 zhyH96wLXMeItv54nsCvy;!RNggi@~H8a;qIFq^2elB2oJ(0R7ck!8k%#SMsEg1D8NEhBcSNcR9?>&2_iR=B1z&vti z2K)iBN7#M+1-Bma>~G>M@%AR}RtMsiY^7$fECgkA|2hZg?89M}e7GU&o`OzomEDBK zxy!>J8Jd&qo~cVlECH5CzX*e78PU`c864`+I<3f~EFx?E*f;2HFklHD*o!=xxO~ z<@u4i)(Z<>(!8+oi1g%1IV$)#%_e=>V`IFh47=+_D8q~#g0St$lOsfa96?9-Fd1FQqYRvF7ArJzQ_>rwAX%`a6 za)d@#lci?)8Nb@rNxvmibd$)fRKTe5!?egOx1z0pYu)#>I;-HwxpTqMdiV;Yy zC6jf@XVjB*Oh8{$3|K!_Hs2*iYJxvJPK?#Nk z2RAFAEDnpws@EPI%*WQ|fvlkrJ}OCUOPIq*LEF6irHBD(2}2uC_;c0hTrNp^7ddg* zQ?I0Yj@)0PjkLgD`_jg2T1A=U^UULv&;r8yEYJderWUu~!Gd!8o_OYprY4v{>Q8j0 zI+{|28a zjH*#XX_noD8XYk$5^Qq$kr``vrL|bc4H=&E6$KfBm>A;)zU~R__Cd%98UPt41XKA+ zxE5LYqJ<>RAKmh7FsAI|)_C85Mqz7;wuZu0p?pZe)K5Y%+=@nvf@K6w6~I2F68Fw_b=QOSk}2U&cw4bEH->megxAS zO&Ly-jF8}4_<<|E7RAxD-W`#6D>bSnEnD`I-tTw=A2Ed!JC1}rmFL1u{Q|7SV$?_j zQE?D}iC*leS{fuPTnVwlpoFxZ!8!~`JHklVWKkt_$p=lHVICZ8kvD#yG=M!03cMIE zCjlqHK$H=4sCHTqhhXITMnMk~iDAg6e4L_vkP9=P%t!$VTo)3XkR+$IAt~2DGPxLgg}&9z=GW-7vHng3%X%2Rkx|V)Cp;89 zqFa#YGylMnPe+O)#;~W$30&#(hb3086S1HvMUSA8ZFFcit74=Plr{V@3@Qb22MQv* z$HhtHErj|-LRk)VGADX)|BU*(Xz^m)I)0eKaOOtc2k*cVvov9~!szu01YFQ`X zk;NoOLTS&3(~dzLZQK9cexOK_7Xm<1ecxDJAr!+1{Y-33GzrIaWH{ZQ6?TJ>PmpIT z8V3H=+y^xQ~gX|lPjSqnx;MKmD!)l6WsBrJQ~ln zw$Q(xKEN|-VMK{w9!kK-a-gOsNeKrZ~ z|A9LSIr}vOtIy#8$=g&%uP`QOkk$JoEz_h^U2XD9%AAKm_BNu)!J_8bH?lFv7+M#| zj^KG3QFv6b)LCU#mYC_^!U`q~6oyC+;4C*)XWx=}U`oFpA^Sl#c<(K9cQn+nTd@#& z7{PwwAPelvgDHf8Kt)2-f=xyp`#b>K5OTh{X8lx)?@JD>Yt5enSN=%8z$>W72e4J@ zu2vTc^}q`;va3`U7M*wnZH~+3L(`^ALwdY0rhbs_t6jTtseFz_ z=Oix))2ME%R(co|fA)NEFhgXz$yO>=Bq||tdL6CZFQSpA^cZFqa_A{4gy5o7|JGE# zAi<4Rvg!m@%8u0=?fbNU(bQZ_$oj{KwCUB_?lp%=bsc$;GngEhI*6`2VxkL2>|U7nI_31fOv6sEbPL2vdx zdq)%pYxsuZ_+F3JWc|{30oQQ1%k;GHft8@ks`!`bKFyh#m z@IUb_v4#~=M_xCv$<`^|*LZd1A@nBz?Q?2}PttEs=AF=e)n$bQ&`90aWN9>;ab=o)qf{*CB0rN?B}W3`(zsv67^P|Sa4DYU8?P@Qq&4cxXa0m?k zI=nhtb4dT8deJ6R zIXNV@=v+Fbp1@B8uJZ2Gpr}y%)de;fCchhd_D4@CcrH=5@`0NuZK%%5j_PNpg`TY4 zcO_MLYRlvOi3NU65!RzUQ1C5YT8u0x5C$D*Iv(Rcc$8ZfQ)cyXhJf$hM)LWdhgSaA ztVww8I}CM6@zV)5?XW8NSWeYm!|2jrsHL>cw5fBq&cx>{?6!2R#6MaSi*&S)72PI6 zy3qIf^N%q0ZS7Cjf8OoH6N!0hTF(1hIV5;lwN**dj`ey8^)FAWwPn>bXAK2&VN1ZH ztrMN!F8G5k126E6PI5h=3H_`4uZ`I+M)NtbQ638#+Fw7OLkfXELVn&|1HQviqaJxt zO#OgIfYTre>0T>Y(|Uj2#v2g?Po+E^X6p!Ft_&0(T`Rkb|*4p z5RSzCHBdb#D1-P=6YIS3qle(}&<5hN*K!34k5%4;1F`AMr3Z7P1Gw>HaUqWj8|DVh z11O#7$kKIB^_+-QUti)2_fHzM992F1U^Z;+&hCT(Mo?DC!-5tfQi4KZ|Fqp z`frCW1W-XQ<-U|erLn?fI$d4S@lMYM=_gsxLeYmw0Hio|EK2v;Jh~TFsi6S=*xIkj zjnK)Ky+)swMQ7})PST3vm*9&2LwL4hQ^;VNjR8mJiCugQM&hh!>mV3mSm1lwSVEw%S+s^4_^vc~Sc2B5Sw^iMqE80FBZCy5Y!rgft%FnHNOySg=w|ewEHiW9Id{A( zbj(Yt-bT0rwoDKE|0gCQ)`3DUy%d+qV$ININ+KR#Zi4i-MR))3|HMXgojjU|*-HI^ zU2KAUUX-oa3@L(VR=R6$(MS=0D<%2fb`60+7!t9+XI$-L)kd$-@N>GH2b)>7)dMYy zV`ku{ZpO)W|1X^>SIiIT*IL4UGlQATWe3z`{xlb=RyI2RRVLMlr2ap6G3_sVQ(N>^ z0Gz8ygk{2DW%XzvvM;~dyugpGv`77w3%qMq1+escW@JA*cCj_z7hNT21nV8S+cKWg1W*^S6()n0 z1elm@EgwicDdo47uqK%QI_m6=+S@#fy|MoI}ma z^o2-%n6XrmuCK+oQ?#ldqvK0}?~EjE;1DK+I5=-XO~#8b3D#t=9eYW4