diff --git a/.gitignore b/.gitignore index a470ff5d27..ad153f4a07 100644 --- a/.gitignore +++ b/.gitignore @@ -1,8 +1,11 @@ *~ .*.sw? *.log -*.log.[1-9] +*-log +*.log.* +*-log.* *.pem +*.pyc .localrc.auto .localrc.password .prereqs @@ -15,13 +18,17 @@ files/*.gz files/*.vmdk files/*.rpm files/*.rpm.* +files/*.deb +files/*.deb.* files/*.qcow2 files/*.img files/images files/pip-* files/get-pip.py* files/ir-deploy* -local.conf +files/ironic-inspector* +files/etcd* +/local.conf local.sh localrc proto @@ -31,3 +38,5 @@ stack-screenrc userrc_early AUTHORS ChangeLog +tools/dbcounter/build/ +tools/dbcounter/dbcounter.egg-info/ diff --git a/.gitreview b/.gitreview index 570d31a987..e1bf63ba7a 100644 --- a/.gitreview +++ b/.gitreview @@ -1,4 +1,4 @@ [gerrit] -host=review.openstack.org +host=review.opendev.org port=29418 -project=openstack-dev/devstack.git +project=openstack/devstack.git diff --git a/.zuul.yaml b/.zuul.yaml new file mode 100644 index 0000000000..2227f185dd --- /dev/null +++ b/.zuul.yaml @@ -0,0 +1,1140 @@ +- nodeset: + name: openstack-single-node-jammy + nodes: + - name: controller + label: ubuntu-jammy + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: openstack-single-node-noble + nodes: + - name: controller + label: ubuntu-noble + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: openstack-single-node-focal + nodes: + - name: controller + label: ubuntu-focal + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: openstack-single-node-bionic + nodes: + - name: controller + label: ubuntu-bionic + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: devstack-single-node-almalinux-10 + nodes: + - name: controller + label: almalinux-10-8GB + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: devstack-single-node-centos-9-stream + nodes: + - name: controller + label: centos-9-stream + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: devstack-single-node-centos-10-stream + nodes: + - name: controller + label: centos-10-stream-8GB + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: devstack-single-node-debian-trixie + nodes: + - name: controller + label: debian-trixie-8GB + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: devstack-single-node-debian-bookworm + nodes: + - name: controller + label: debian-bookworm + groups: + - name: tempest + nodes: + - controller + +# TODO(frickler): drop this dummy nodeset once all references have been removed +- nodeset: + name: devstack-single-node-opensuse-15 + nodes: [] + +- nodeset: + name: devstack-single-node-rockylinux-9 + nodes: + - name: controller + label: rockylinux-9 + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: devstack-single-node-rockylinux-10 + nodes: + - name: controller + label: rockylinux-10-8GB + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: openstack-two-node-centos-10-stream + nodes: + - name: controller + label: centos-10-stream-8GB + - name: compute1 + label: centos-10-stream-8GB + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + +- nodeset: + name: openstack-two-node-centos-9-stream + nodes: + - name: controller + label: centos-9-stream + - name: compute1 + label: centos-9-stream + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + +- nodeset: + name: openstack-two-node-jammy + nodes: + - name: controller + label: ubuntu-jammy + - name: compute1 + label: ubuntu-jammy + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + +- nodeset: + name: openstack-two-node-noble + nodes: + - name: controller + label: ubuntu-noble + - name: compute1 + label: ubuntu-noble + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + +- nodeset: + name: openstack-two-node-focal + nodes: + - name: controller + label: ubuntu-focal + - name: compute1 + label: ubuntu-focal + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + +- nodeset: + name: openstack-two-node-bionic + nodes: + - name: controller + label: ubuntu-bionic + - name: compute1 + label: ubuntu-bionic + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + +- nodeset: + name: openstack-three-node-focal + nodes: + - name: controller + label: ubuntu-focal + - name: compute1 + label: ubuntu-focal + - name: compute2 + label: ubuntu-focal + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + - compute2 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + - compute2 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + - compute2 + +- nodeset: + name: openstack-three-node-bionic + nodes: + - name: controller + label: ubuntu-bionic + - name: compute1 + label: ubuntu-bionic + - name: compute2 + label: ubuntu-bionic + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + - compute2 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + - compute2 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + - compute2 + +- nodeset: + name: devstack-two-node-debian-bookworm + nodes: + - name: controller + label: debian-bookworm + - name: compute1 + label: debian-bookworm + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + +- nodeset: + name: devstack-two-node-debian-trixie + nodes: + - name: controller + label: debian-trixie-8GB + - name: compute1 + label: debian-trixie-8GB + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + +- job: + name: devstack-base + parent: openstack-multinode-fips + abstract: true + description: | + Base abstract Devstack job. + + Defines plays and base variables, but it does not include any project + and it does not run any service by default. This is a common base for + all single Devstack jobs, single or multinode. + Variables are defined in job.vars, which is what is then used by single + node jobs and by multi node jobs for the controller, as well as in + job.group-vars.peers, which is what is used by multi node jobs for subnode + nodes (everything but the controller). + required-projects: + - opendev.org/openstack/devstack + # this is a workaround for a packaging bug in ubuntu + # remove when https://bugs.launchpad.net/nova/+bug/2109592 + # is resolved and oslo.config is not a dep of the novnc deb + # via the defunct python3-novnc package. + - novnc/novnc + + roles: + - zuul: opendev.org/openstack/openstack-zuul-jobs + vars: + devstack_localrc: + DATABASE_PASSWORD: secretdatabase + RABBIT_PASSWORD: secretrabbit + ADMIN_PASSWORD: secretadmin + SERVICE_PASSWORD: secretservice + NETWORK_GATEWAY: 10.1.0.1 + FIXED_RANGE: 10.1.0.0/20 + IPV4_ADDRS_SAFE_TO_USE: 10.1.0.0/20 + FLOATING_RANGE: 172.24.5.0/24 + PUBLIC_NETWORK_GATEWAY: 172.24.5.1 + LOGFILE: /opt/stack/logs/devstacklog.txt + LOG_COLOR: false + VERBOSE: true + VERBOSE_NO_TIMESTAMP: true + ERROR_ON_CLONE: true + # Gate jobs can't deal with nested virt. Disable it by default. + LIBVIRT_TYPE: '{{ devstack_libvirt_type | default("qemu") }}' + devstack_services: + # Ignore any default set by devstack. Emit a "disable_all_services". + base: false + zuul_copy_output: + '{{ devstack_conf_dir }}/local.conf': logs + '{{ devstack_conf_dir }}/localrc': logs + '{{ devstack_conf_dir }}/.localrc.auto': logs + '{{ devstack_conf_dir }}/.stackenv': logs + '{{ devstack_log_dir }}/dstat-csv.log': logs + '{{ devstack_log_dir }}/atop': logs + '{{ devstack_log_dir }}/devstacklog.txt': logs + '{{ devstack_log_dir }}/devstacklog.txt.summary': logs + '{{ devstack_log_dir }}/tcpdump.pcap': logs + '{{ devstack_log_dir }}/worlddump-latest.txt': logs + '{{ devstack_log_dir }}/qemu.coredump': logs + '{{ devstack_full_log}}': logs + '{{ stage_dir }}/verify_tempest_conf.log': logs + '{{ stage_dir }}/performance.json': logs + '{{ stage_dir }}/apache': logs + '{{ stage_dir }}/apache_config': logs + '{{ stage_dir }}/etc': logs + /var/log/rabbitmq: logs + /var/log/postgresql: logs + /var/log/mysql: logs + /var/log/libvirt: logs + /etc/libvirt: logs + /etc/lvm: logs + /etc/sudoers: logs + /etc/sudoers.d: logs + '{{ stage_dir }}/iptables.txt': logs + '{{ stage_dir }}/df.txt': logs + '{{ stage_dir }}/mount.txt': logs + '{{ stage_dir }}/pip2-freeze.txt': logs + '{{ stage_dir }}/pip3-freeze.txt': logs + '{{ stage_dir }}/dpkg-l.txt': logs + '{{ stage_dir }}/rpm-qa.txt': logs + '{{ stage_dir }}/core': logs + '{{ stage_dir }}/listen53.txt': logs + '{{ stage_dir }}/services.txt': logs + '{{ stage_dir }}/deprecations.log': logs + '{{ stage_dir }}/audit.log': logs + /etc/ceph: logs + /var/log/ceph: logs + /var/log/openvswitch: logs + /var/log/glusterfs: logs + /etc/glusterfs/glusterd.vol: logs + /etc/resolv.conf: logs + /var/log/unbound.log: logs + extensions_to_txt: + conf: true + log: true + localrc: true + stackenv: true + auto: true + group-vars: + subnode: + devstack_localrc: + DATABASE_PASSWORD: secretdatabase + RABBIT_PASSWORD: secretrabbit + ADMIN_PASSWORD: secretadmin + SERVICE_PASSWORD: secretservice + NETWORK_GATEWAY: 10.1.0.1 + FIXED_RANGE: 10.1.0.0/20 + IPV4_ADDRS_SAFE_TO_USE: 10.1.0.0/20 + FLOATING_RANGE: 172.24.5.0/24 + PUBLIC_NETWORK_GATEWAY: 172.24.5.1 + LOGFILE: /opt/stack/logs/devstacklog.txt + LOG_COLOR: false + VERBOSE: true + VERBOSE_NO_TIMESTAMP: true + ERROR_ON_CLONE: true + LIBVIRT_TYPE: qemu + devstack_services: + base: false + pre-run: playbooks/pre.yaml + run: playbooks/devstack.yaml + post-run: playbooks/post.yaml + irrelevant-files: &common-irrelevant-files + # Documentation related + - ^.*\.rst$ + - ^api-ref/.*$ + - ^doc/.*$ + - ^releasenotes/.*$ + # Translations + - ^.*/locale/.*po$ + # pre-commit config + - ^.pre-commit-config.yaml$ + # gitreview config + - ^.gitreview$ + +- job: + name: devstack-minimal + parent: devstack-base + description: | + Minimal devstack base job, intended for use by jobs that need + less than the normal minimum set of required-projects. + nodeset: openstack-single-node-noble + required-projects: + - opendev.org/openstack/requirements + vars: + devstack_localrc: + # Multinode specific settings + SERVICE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" + HOST_IP: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" + PUBLIC_BRIDGE_MTU: '{{ external_bridge_mtu }}' + devstack_services: + # Shared services + dstat: false + etcd3: true + memory_tracker: true + file_tracker: true + mysql: true + rabbit: true + openstack-cli-server: true + group-vars: + subnode: + devstack_services: + # Shared services + dstat: false + memory_tracker: true + file_tracker: true + openstack-cli-server: true + devstack_localrc: + # Multinode specific settings + HOST_IP: "{{ hostvars[inventory_hostname]['nodepool']['private_ipv4'] }}" + SERVICE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" + PUBLIC_BRIDGE_MTU: '{{ external_bridge_mtu }}' + # Subnode specific settings + DATABASE_TYPE: mysql + RABBIT_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" + DATABASE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" + + +- job: + name: devstack + parent: devstack-minimal + description: | + Base devstack job for integration gate. + + This base job can be used for single node and multinode devstack jobs. + + With a single node nodeset, this job sets up an "all-in-one" (aio) + devstack with the seven OpenStack services included in the devstack tree: + keystone, glance, cinder, neutron, nova, placement, and swift. + + With a two node nodeset, this job sets up an aio + compute node. + The controller can be customised using host-vars.controller, the + sub-nodes can be customised using group-vars.subnode. + + Descendent jobs can enable / disable services, add devstack configuration + options, enable devstack plugins, configure log files or directories to be + transferred to the log server. + + The job assumes that there is only one controller node. The number of + subnodes can be scaled up seamlessly by setting a custom nodeset in + job.nodeset. + + The run playbook consists of a single role, so it can be easily rewritten + and extended. + required-projects: + - opendev.org/openstack/cinder + - opendev.org/openstack/glance + - opendev.org/openstack/keystone + - opendev.org/openstack/neutron + - opendev.org/openstack/nova + - opendev.org/openstack/placement + - opendev.org/openstack/swift + - opendev.org/openstack/os-test-images + timeout: 7200 + vars: + # based on observation of the integrated gate + # tempest-integrated-compute was only using ~1.7GB of swap + # when zswap and the host turning are enabled that increase + # slightly to ~2GB. we are setting the swap size to 8GB to + # be safe and account for more complex scenarios. + # we should revisit this value after some time to see if we + # can reduce it. + configure_swap_size: 8192 + devstack_localrc: + # Common OpenStack services settings + SWIFT_REPLICAS: 1 + SWIFT_START_ALL_SERVICES: false + SWIFT_HASH: 1234123412341234 + DEBUG_LIBVIRT_COREDUMPS: true + NOVA_VNC_ENABLED: true + OVN_DBS_LOG_LEVEL: dbg + # tune the host to optimize memory usage and hide io latency + # these setting will configure the kernel to treat the host page + # cache and swap with equal priority, and prefer deferring writes + # changing the default swappiness, dirty_ratio and + # the vfs_cache_pressure + ENABLE_SYSCTL_MEM_TUNING: true + # the net tuning optimizes ipv4 tcp fast open and config the default + # qdisk policy to pfifo_fast which effectively disable all qos. + # this minimizes the cpu load of the host network stack + ENABLE_SYSCTL_NET_TUNING: true + # zswap allows the kernel to compress pages in memory before swapping + # them to disk. this can reduce the amount of swap used and improve + # performance. effectively this trades a small amount of cpu for an + # increase in swap performance by reducing the amount of data + # written to disk. the overall speedup is proportional to the + # compression ratio and the speed of the swap device. + # NOTE: this option is ignored when not using nova with the libvirt + # virt driver. + NOVA_LIBVIRT_TB_CACHE_SIZE: 128 + ENABLE_ZSWAP: true + devstack_local_conf: + post-config: + $NEUTRON_CONF: + DEFAULT: + global_physnet_mtu: '{{ external_bridge_mtu }}' + devstack_services: + # Core services enabled for this branch. + # This list replaces the test-matrix. + # Shared services + dstat: false + etcd3: true + memory_tracker: true + file_tracker: true + mysql: true + rabbit: true + tls-proxy: true + # Keystone services + key: true + # Glance services + g-api: true + # Nova services + n-api: true + n-api-meta: true + n-cond: true + n-cpu: true + n-novnc: true + n-sch: true + # Placement service + placement-api: true + # OVN services + ovn-controller: true + ovn-northd: true + ovs-vswitchd: true + ovsdb-server: true + # Neutron services + q-svc: true + q-ovn-agent: true + # Swift services + s-account: true + s-container: true + s-object: true + s-proxy: true + # Cinder services + c-api: true + c-bak: true + c-sch: true + c-vol: true + # Services we don't need. + # This section is not really needed, it's for readability. + horizon: false + tempest: false + # Test matrix emits ceilometer but ceilomenter is not installed in the + # integrated gate, so specifying the services has not effect. + # ceilometer-*: false + group-vars: + subnode: + devstack_services: + # Core services enabled for this branch. + # This list replaces the test-matrix. + # Shared services + dstat: false + memory_tracker: true + file_tracker: true + tls-proxy: true + # Nova services + n-cpu: true + # Placement services + placement-client: true + # OVN services + ovn-controller: true + ovs-vswitchd: true + ovsdb-server: true + # Neutron services + q-ovn-agent: true + # Cinder services + c-bak: true + c-vol: true + # Services we don't run at all on subnode. + # This section is not really needed, it's for readability. + # keystone: false + # s-*: false + horizon: false + tempest: false + # Test matrix emits ceilometer but ceilometer is not installed in the + # integrated gate, so specifying the services has not effect. + # ceilometer-*: false + devstack_localrc: + # Subnode specific settings + GLANCE_HOSTPORT: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}:9292" + Q_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" + NOVA_VNC_ENABLED: true + ENABLE_CHASSIS_AS_GW: false + # tune the host to optimize memory usage and hide io latency + # these setting will configure the kernel to treat the host page + # cache and swap with equal priority, and prefer deferring writes + # changing the default swappiness, dirty_ratio and + # the vfs_cache_pressure + ENABLE_SYSCTL_MEM_TUNING: true + # the net tuning optimizes ipv4 tcp fast open and config the default + # qdisk policy to pfifo_fast which effectively disable all qos. + # this minimizes the cpu load of the host network stack + ENABLE_SYSCTL_NET_TUNING: true + # zswap allows the kernel to compress pages in memory before swapping + # them to disk. this can reduce the amount of swap used and improve + # performance. effectivly this trades a small amount of cpu for an + # increase in swap performance by reducing the amount of data + # written to disk. the overall speedup is porportional to the + # compression ratio and the speed of the swap device. + ENABLE_ZSWAP: true + # NOTE: this option is ignored when not using nova with the libvirt + # virt driver. + NOVA_LIBVIRT_TB_CACHE_SIZE: 128 + +- job: + name: devstack-ipv6 + parent: devstack + description: | + Devstack single node job for integration gate with IPv6, + all services and tunnels using IPv6 addresses. + vars: + devstack_localrc: + SERVICE_IP_VERSION: 6 + SERVICE_HOST: "" + TUNNEL_IP_VERSION: 6 + +- job: + name: devstack-enforce-scope + parent: devstack + description: | + This job runs the devstack with scope checks enabled. + vars: + devstack_localrc: + ENFORCE_SCOPE: true + +- job: + name: devstack-multinode + parent: devstack + nodeset: openstack-two-node-noble + description: | + Simple multinode test to verify multinode functionality on devstack side. + This is not meant to be used as a parent job. + +# NOTE(ianw) Platform tests have traditionally been non-voting because +# we often have to rush things through devstack to stabilise the gate, +# and these platforms don't have the round-the-clock support to avoid +# becoming blockers in that situation. +- job: + name: devstack-platform-almalinux-purple-lion-ovn-source + parent: tempest-full-py3 + description: AlmaLinux 10 platform test + nodeset: devstack-single-node-almalinux-10 + timeout: 9000 + voting: false + vars: + configure_swap_size: 4096 + devstack_localrc: + OVN_BUILD_FROM_SOURCE: True + OVN_BRANCH: "branch-24.03" + OVS_BRANCH: "branch-3.3" + OVS_SYSCONFDIR: "/usr/local/etc/openvswitch" + +- job: + name: devstack-platform-centos-10-stream + parent: tempest-full-py3 + description: CentOS 10 Stream platform test + nodeset: devstack-single-node-centos-10-stream + timeout: 9000 + voting: false + +- job: + name: devstack-platform-centos-9-stream + parent: tempest-full-py3 + description: CentOS 9 Stream platform test + nodeset: devstack-single-node-centos-9-stream + vars: + devstack_localrc: + # TODO(ykarel) Remove this when moving to 10-stream + PYTHON3_VERSION: 3.11 + timeout: 9000 + voting: false + +- job: + name: devstack-platform-debian-trixie + parent: tempest-full-py3 + description: Debian Trixie platform test + nodeset: devstack-single-node-debian-trixie + timeout: 9000 + vars: + configure_swap_size: 4096 + +- job: + name: devstack-platform-debian-bookworm + parent: tempest-full-py3 + description: Debian Bookworm platform test + nodeset: devstack-single-node-debian-bookworm + timeout: 9000 + vars: + configure_swap_size: 4096 + +- job: + name: devstack-platform-rocky-blue-onyx + parent: tempest-full-py3 + description: Rocky Linux 9 Blue Onyx platform test + nodeset: devstack-single-node-rockylinux-9 + timeout: 9000 + # NOTE(danms): This has been failing lately with some repository metadata + # errors. We're marking this as non-voting until it appears to have + # stabilized: + # https://zuul.openstack.org/builds?job_name=devstack-platform-rocky-blue-onyx&skip=0 + voting: false + vars: + configure_swap_size: 4096 + devstack_localrc: + # TODO(ykarel) Remove this when moving to rocky10 + PYTHON3_VERSION: 3.11 + +- job: + name: devstack-platform-rocky-red-quartz + parent: tempest-full-py3 + description: Rocky Linux Red Quartz platform test + nodeset: devstack-single-node-rockylinux-10 + timeout: 9000 + voting: false + vars: + configure_swap_size: 4096 + +- job: + name: devstack-platform-ubuntu-jammy + parent: tempest-full-py3 + description: Ubuntu 22.04 LTS (Jammy) platform test + nodeset: openstack-single-node-jammy + timeout: 9000 + vars: + configure_swap_size: 8192 + +- job: + name: devstack-platform-ubuntu-noble-ovn-source + parent: devstack-platform-ubuntu-noble + description: Ubuntu 24.04 LTS (noble) platform test (OVN from source) + voting: false + vars: + devstack_localrc: + OVN_BUILD_FROM_SOURCE: True + OVN_BRANCH: "branch-24.03" + OVS_BRANCH: "branch-3.3" + OVS_SYSCONFDIR: "/usr/local/etc/openvswitch" + +- job: + name: devstack-platform-ubuntu-noble-ovs + parent: tempest-full-py3 + description: Ubuntu 24.04 LTS (noble) platform test (OVS) + nodeset: openstack-single-node-noble + voting: false + timeout: 9000 + vars: + configure_swap_size: 8192 + devstack_localrc: + Q_AGENT: openvswitch + Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch + Q_ML2_TENANT_NETWORK_TYPE: vxlan + devstack_services: + # Disable OVN services + ovn-northd: false + ovn-controller: false + ovs-vswitchd: false + ovsdb-server: false + # Disable Neutron ML2/OVN services + q-ovn-metadata-agent: false + # Enable Neutron ML2/OVS services + q-agt: true + q-dhcp: true + q-l3: true + q-meta: true + q-metering: true + group-vars: + subnode: + devstack_services: + # Disable OVN services + ovn-controller: false + ovs-vswitchd: false + ovsdb-server: false + # Disable Neutron ML2/OVN services + q-ovn-metadata-agent: false + # Enable Neutron ML2/OVS services + q-agt: true + +- job: + name: devstack-no-tls-proxy + parent: tempest-full-py3 + description: | + Tempest job with tls-proxy off. + + Some gates run devstack like this and it follows different code paths. + vars: + devstack_services: + tls-proxy: false + +- job: + name: devstack-tox-base + parent: devstack + description: | + Base job for devstack-based functional tests that use tox. + + This job is not intended to be run directly. It's just here + for organizational purposes for devstack-tox-functional and + devstack-tox-functional-consumer. + post-run: playbooks/tox/post.yaml + vars: + tox_envlist: functional + tox_install_siblings: false + +- job: + name: devstack-tox-functional + parent: devstack-tox-base + description: | + Base job for devstack-based functional tests that use tox. + + Runs devstack, then runs the tox ``functional`` environment, + then collects tox/testr build output like normal tox jobs. + + Turns off tox sibling installation. Projects may be involved + in the devstack deployment and so may be in the required-projects + list, but may not want to test against master of the other + projects in their tox env. Child jobs can set tox_install_siblings + to True to re-enable sibling processing. + run: playbooks/tox/run-both.yaml + +- job: + name: devstack-tox-functional-consumer + parent: devstack + description: | + Base job for devstack-based functional tests for projects that + consume the devstack cloud. + + This base job should only be used by projects that are not involved + in the devstack deployment step, but are instead projects that are using + devstack to get a cloud against which they can test things. + + Runs devstack in pre-run, then runs the tox ``functional`` environment, + then collects tox/testr build output like normal tox jobs. + + Turns off tox sibling installation. Projects may be involved + in the devstack deployment and so may be in the required-projects + list, but may not want to test against master of the other + projects in their tox env. Child jobs can set tox_install_siblings + to True to re-enable sibling processing. + pre-run: + - playbooks/devstack.yaml + - playbooks/tox/pre.yaml + run: playbooks/tox/run.yaml + +- job: + name: devstack-unit-tests + nodeset: ubuntu-noble + description: | + Runs unit tests on devstack project. + + It runs ``run_tests.sh``. + pre-run: playbooks/unit-tests/pre.yaml + run: playbooks/unit-tests/run.yaml + +- project: + templates: + - integrated-gate-py3 + - publish-openstack-docs-pti + check: + jobs: + - devstack + - devstack-ipv6 + - devstack-enforce-scope + - devstack-platform-almalinux-purple-lion-ovn-source + - devstack-platform-centos-10-stream + - devstack-platform-centos-9-stream + - devstack-platform-debian-bookworm + - devstack-platform-debian-trixie + - devstack-platform-rocky-blue-onyx + - devstack-platform-rocky-red-quartz + - devstack-platform-ubuntu-noble-ovn-source + - devstack-platform-ubuntu-noble-ovs + - devstack-platform-ubuntu-jammy + - devstack-multinode + - devstack-unit-tests + - openstack-tox-bashate + - ironic-tempest-bios-ipmi-direct + - swift-dsvm-functional + - grenade: + irrelevant-files: *common-irrelevant-files + - neutron-ovs-grenade-multinode: + irrelevant-files: *common-irrelevant-files + - neutron-ovn-tempest-ovs-release: + voting: false + irrelevant-files: *common-irrelevant-files + - tempest-multinode-full-py3: + voting: false + irrelevant-files: *common-irrelevant-files + - openstacksdk-functional-devstack: + irrelevant-files: *common-irrelevant-files + - tempest-ipv6-only: + irrelevant-files: *common-irrelevant-files + - nova-ceph-multistore: + irrelevant-files: *common-irrelevant-files + gate: + jobs: + - devstack + - devstack-ipv6 + - devstack-platform-debian-bookworm + - devstack-platform-debian-trixie + - devstack-platform-ubuntu-noble + # NOTE(danms): Disabled due to instability, see comment in the job + # definition above. + # - devstack-platform-rocky-blue-onyx + - devstack-enforce-scope + - devstack-multinode + - devstack-unit-tests + - openstack-tox-bashate + - neutron-ovs-grenade-multinode: + irrelevant-files: *common-irrelevant-files + - ironic-tempest-bios-ipmi-direct + - swift-dsvm-functional + - grenade: + irrelevant-files: *common-irrelevant-files + - openstacksdk-functional-devstack: + irrelevant-files: *common-irrelevant-files + - tempest-ipv6-only: + irrelevant-files: *common-irrelevant-files + - nova-ceph-multistore: + irrelevant-files: *common-irrelevant-files + # Please add a note on each job and conditions for the job not + # being experimental any more, so we can keep this list somewhat + # pruned. + # + # * nova-next: maintained by nova for unreleased/undefaulted + # things, this job is not experimental but often is used to test + # things that are not yet production ready or to test what will be + # the new default after a deprecation period has ended. + # * nova-multi-cell: maintained by nova and now is voting in the + # check queue for nova changes but relies on devstack configuration + + experimental: + jobs: + - nova-multi-cell + - nova-next + - devstack-plugin-ceph-tempest-py3: + irrelevant-files: *common-irrelevant-files + - neutron-ovs-tempest-dvr: + irrelevant-files: *common-irrelevant-files + - neutron-ovs-tempest-dvr-ha-multinode-full: + irrelevant-files: *common-irrelevant-files + - cinder-tempest-lvm-multibackend: + irrelevant-files: *common-irrelevant-files + - tempest-pg-full: + irrelevant-files: *common-irrelevant-files + - devstack-no-tls-proxy + periodic: + jobs: + - devstack-no-tls-proxy + periodic-weekly: + jobs: + - devstack-platform-almalinux-purple-lion-ovn-source + - devstack-platform-centos-10-stream + - devstack-platform-centos-9-stream + - devstack-platform-debian-bookworm + - devstack-platform-rocky-blue-onyx + - devstack-platform-rocky-red-quartz + - devstack-platform-ubuntu-noble-ovn-source + - devstack-platform-ubuntu-noble-ovs + - devstack-platform-ubuntu-jammy diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst new file mode 100644 index 0000000000..bb511656f1 --- /dev/null +++ b/CONTRIBUTING.rst @@ -0,0 +1,19 @@ +The source repository for this project can be found at: + + https://opendev.org/openstack/devstack + +Pull requests submitted through GitHub are not monitored. + +To start contributing to OpenStack, follow the steps in the contribution guide +to set up and use Gerrit: + + https://docs.openstack.org/contributors/code-and-documentation/quick-start.html + +Bugs should be filed on Launchpad: + + https://bugs.launchpad.net/devstack + +For more specific information about contributing to this repository, see the +Devstack contributor guide: + + https://docs.openstack.org/devstack/latest/contributor/contributing.html diff --git a/HACKING.rst b/HACKING.rst index d763c75b8b..6a91e0a6a8 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -10,8 +10,8 @@ and so is limited to Bash (version 4 and up) and compatible shells. Shell script was chosen because it best illustrates the steps used to set up and interact with OpenStack components. -DevStack's official repository is located on git.openstack.org at -https://git.openstack.org/openstack-dev/devstack. Besides the master branch that +DevStack's official repository is located on opendev.org at +https://opendev.org/openstack/devstack. Besides the master branch that tracks the OpenStack trunk branches a separate branch is maintained for all OpenStack releases starting with Diablo (stable/diablo). @@ -20,13 +20,13 @@ in `How To Contribute`__ in the OpenStack wiki. `DevStack's LaunchPad project`_ contains the usual links for blueprints, bugs, etc. __ contribute_ -.. _contribute: http://docs.openstack.org/infra/manual/developers.html +.. _contribute: https://docs.openstack.org/infra/manual/developers.html __ lp_ -.. _lp: https://launchpad.net/~devstack +.. _lp: https://launchpad.net/devstack The `Gerrit review -queue `__ +queue `__ is used for all commits. The primary script in DevStack is ``stack.sh``, which performs the bulk of the @@ -47,12 +47,7 @@ The DevStack repo generally keeps all of the primary scripts at the root level. ``doc`` - Contains the Sphinx source for the documentation. -``tools/build_docs.sh`` is used to generate the HTML versions of the -DevStack scripts. A complete doc build can be run with ``tox -edocs``. - -``exercises`` - Contains the test scripts used to sanity-check and -demonstrate some OpenStack functions. These scripts know how to exit -early or skip services that are not enabled. +A complete doc build can be run with ``tox -edocs``. ``extras.d`` - Contains the dispatch scripts called by the hooks in ``stack.sh``, ``unstack.sh`` and ``clean.sh``. See :doc:`the plugins @@ -79,8 +74,7 @@ of test of specific fragile functions in the ``functions`` and ``tools`` - Contains a collection of stand-alone scripts. While these may reference the top-level DevStack configuration they can generally be -run alone. There are also some sub-directories to support specific -environments such as XenServer. +run alone. Scripts @@ -150,8 +144,8 @@ follows: * Global configuration that may be referenced in ``local.conf``, i.e. ``DEST``, ``DATA_DIR`` * Global service configuration like ``ENABLED_SERVICES`` * Variables used by multiple services that do not have a clear owner, i.e. - ``VOLUME_BACKING_FILE_SIZE`` (nova-compute, nova-volumes and cinder) or - ``PUBLIC_NETWORK_NAME`` (nova-network and neutron) + ``VOLUME_BACKING_FILE_SIZE`` (nova-compute and cinder) or + ``PUBLIC_NETWORK_NAME`` (only neutron but formerly nova-network too) * Variables that can not be cleanly declared in a project file due to dependency ordering, i.e. the order of sourcing the project files can not be changed for other reasons but the earlier file needs to dereference a @@ -168,7 +162,7 @@ Documentation The DevStack repo now contains all of the static pages of devstack.org in the ``doc/source`` directory. The OpenStack CI system rebuilds the docs after every -commit and updates devstack.org (now a redirect to docs.openstack.org/developer/devstack). +commit and updates devstack.org (now a redirect to https://docs.openstack.org/devstack/latest/). All of the scripts are processed with shocco_ to render them with the comments as text describing the script below. For this reason we tend to be a little @@ -183,88 +177,6 @@ The complete docs build is also handled with tox -edocs per the OpenStack project standard. -Exercises ---------- - -The scripts in the exercises directory are meant to 1) perform basic operational -checks on certain aspects of OpenStack; and b) document the use of the -OpenStack command-line clients. - -In addition to the guidelines above, exercise scripts MUST follow the structure -outlined here. ``swift.sh`` is perhaps the clearest example of these guidelines. -These scripts are executed serially by ``exercise.sh`` in testing situations. - -* Begin and end with a banner that stands out in a sea of script logs to aid - in debugging failures, particularly in automated testing situations. If the - end banner is not displayed, the script ended prematurely and can be assumed - to have failed. - - :: - - echo "**************************************************" - echo "Begin DevStack Exercise: $0" - echo "**************************************************" - ... - set +o xtrace - echo "**************************************************" - echo "End DevStack Exercise: $0" - echo "**************************************************" - -* The scripts will generally have the shell ``xtrace`` attribute set to display - the actual commands being executed, and the ``errexit`` attribute set to exit - the script on non-zero exit codes:: - - # This script exits on an error so that errors don't compound and you see - # only the first error that occurred. - set -o errexit - - # Print the commands being run so that we can see the command that triggers - # an error. It is also useful for following allowing as the install occurs. - set -o xtrace - -* Settings and configuration are stored in ``exerciserc``, which must be - sourced after ``openrc`` or ``stackrc``:: - - # Import exercise configuration - source $TOP_DIR/exerciserc - -* There are a couple of helper functions in the common ``functions`` sub-script - that will check for non-zero exit codes and unset environment variables and - print a message and exit the script. These should be called after most client - commands that are not otherwise checked to short-circuit long timeouts - (instance boot failure, for example):: - - swift post $CONTAINER - die_if_error "Failure creating container $CONTAINER" - - FLOATING_IP=`euca-allocate-address | cut -f2` - die_if_not_set FLOATING_IP "Failure allocating floating IP" - -* If you want an exercise to be skipped when for example a service wasn't - enabled for the exercise to be run, you can exit your exercise with the - special exitcode 55 and it will be detected as skipped. - -* The exercise scripts should only use the various OpenStack client binaries to - interact with OpenStack. This specifically excludes any ``*-manage`` tools - as those assume direct access to configuration and databases, as well as direct - database access from the exercise itself. - -* If specific configuration needs to be present for the exercise to complete, - it should be staged in ``stack.sh``, or called from ``stack.sh``. - -* The ``OS_*`` environment variables should be the only ones used for all - authentication to OpenStack clients as documented in the CLIAuth_ wiki page. - -.. _CLIAuth: http://wiki.openstack.org/CLIAuth - -* The exercise MUST clean up after itself if successful. If it is not successful, - it is assumed that state will be left behind; this allows a chance for developers - to look around and attempt to debug the problem. The exercise SHOULD clean up - or graciously handle possible artifacts left over from previous runs if executed - again. It is acceptable to require a reboot or even a re-install of DevStack - to restore a clean test environment. - - Bash Style Guidelines ~~~~~~~~~~~~~~~~~~~~~ DevStack defines a bash set of best practices for maintaining large @@ -276,7 +188,7 @@ to enforce basic guidelines, similar to pep8 and flake8 tools for Python. The list below is not complete for what bashate checks, nor is it all checked by bashate. So many lines of code, so little time. -.. _bashate: https://pypi.python.org/pypi/bashate +.. _bashate: https://pypi.org/project/bashate/ Whitespace Rules ---------------- @@ -322,7 +234,7 @@ Variables and Functions Review Criteria -=============== +--------------- There are some broad criteria that will be followed when reviewing your change @@ -362,5 +274,25 @@ your change even years from now -- why we were motivated to make a change at the time. -* **Reviewers** -- please see ``MAINTAINERS.rst`` for a list of people - that should be added to reviews of various sub-systems. + +Making Changes, Testing, and CI +------------------------------- + +Changes to Devstack are tested by automated continuous integration jobs +that run on a variety of Linux Distros using a handful of common +configurations. What this means is that every change to Devstack is +self testing. One major benefit of this is that developers do not +typically need to add new non voting test jobs to add features to +Devstack. Instead the features can be added, then if testing passes +with the feature enabled the change is ready to merge (pending code +review). + +A concrete example of this was the switch from screen based service +management to systemd based service management. No new jobs were +created for this. Instead the features were added to devstack, tested +locally and in CI using a change that enabled the feature, then once +the enabling change was passing and the new behavior communicated and +documented it was merged. + +Using this process has been proven to be effective and leads to +quicker implementation of desired features. diff --git a/MAINTAINERS.rst b/MAINTAINERS.rst deleted file mode 100644 index d4968a6051..0000000000 --- a/MAINTAINERS.rst +++ /dev/null @@ -1,92 +0,0 @@ -MAINTAINERS -=========== - - -Overview --------- - -The following is a list of people known to have interests in -particular areas or sub-systems of devstack. - -It is a rather general guide intended to help seed the initial -reviewers list of a change. A +1 on a review from someone identified -as being a maintainer of its affected area is a very positive flag to -the core team for the veracity of the change. - -The ``devstack-core`` group can still be added to all reviews. - - -Format -~~~~~~ - -The format of the file is the name of the maintainer and their -gerrit-registered email. - - -Maintainers ------------ - -.. contents:: :local: - - -Ceph -~~~~ - -* Sebastien Han - -Cinder -~~~~~~ - -Fedora/CentOS/RHEL -~~~~~~~~~~~~~~~~~~ - -* Ian Wienand - -Neutron -~~~~~~~ - -MidoNet -~~~~~~~ - -* Jaume Devesa -* Ryu Ishimoto -* YAMAMOTO Takashi - -OpenDaylight -~~~~~~~~~~~~ - -* Kyle Mestery - -OpenFlow Agent (ofagent) -~~~~~~~~~~~~~~~~~~~~~~~~ - -* YAMAMOTO Takashi -* Fumihiko Kakuma - -Swift -~~~~~ - -* Chmouel Boudjnah - -SUSE -~~~~ - -* Ralf Haferkamp -* Vincent Untz - -Tempest -~~~~~~~ - -Xen -~~~ -* Bob Ball - -Zaqar (Marconi) -~~~~~~~~~~~~~~~ - -* Flavio Percoco -* Malini Kamalambal - -Oracle Linux -~~~~~~~~~~~~ -* Wiekus Beukes diff --git a/README.md b/README.rst similarity index 83% rename from README.md rename to README.rst index 4ba4619c6d..86b85da956 100644 --- a/README.md +++ b/README.rst @@ -1,8 +1,10 @@ -DevStack is a set of scripts and utilities to quickly deploy an OpenStack cloud. +DevStack is a set of scripts and utilities to quickly deploy an OpenStack cloud +from git source trees. -# Goals +Goals +===== -* To quickly build dev OpenStack environments in a clean Ubuntu or Fedora +* To quickly build dev OpenStack environments in a clean Ubuntu or RockyLinux environment * To describe working configurations of OpenStack (which code branches work together? what do config files look like for those branches?) @@ -13,21 +15,22 @@ DevStack is a set of scripts and utilities to quickly deploy an OpenStack cloud. * To provide an environment for the OpenStack CI testing on every commit to the projects -Read more at http://docs.openstack.org/developer/devstack +Read more at https://docs.openstack.org/devstack/latest IMPORTANT: Be sure to carefully read `stack.sh` and any other scripts you execute before you run them, as they install software and will alter your networking configuration. We strongly recommend that you run `stack.sh` in a clean and disposable vm when you are first getting started. -# Versions +Versions +======== The DevStack master branch generally points to trunk versions of OpenStack components. For older, stable versions, look for branches named stable/[release] in the DevStack repo. For example, you can do the -following to create a juno OpenStack cloud: +following to create a Zed OpenStack cloud:: - git checkout stable/juno + git checkout stable/zed ./stack.sh You can also pick specific OpenStack project releases by setting the appropriate @@ -35,10 +38,11 @@ You can also pick specific OpenStack project releases by setting the appropriate `stackrc` for the default set). Usually just before a release there will be milestone-proposed branches that need to be tested:: - GLANCE_REPO=git://git.openstack.org/openstack/glance.git + GLANCE_REPO=https://opendev.org/openstack/glance.git GLANCE_BRANCH=milestone-proposed -# Start A Dev Cloud +Start A Dev Cloud +================= Installing in a dedicated disposable VM is safer than installing on your dev machine! Plus you can pick one of the supported Linux distros for @@ -51,17 +55,18 @@ When the script finishes executing, you should be able to access OpenStack endpoints, like so: * Horizon: http://myhost/ -* Keystone: http://myhost:5000/v2.0/ +* Keystone: http://myhost/identity/v3/ We also provide an environment file that you can use to interact with your -cloud via CLI: +cloud via CLI:: # source openrc file to load your environment with OpenStack CLI creds . openrc # list instances - nova list + openstack server list -# DevStack Execution Environment +DevStack Execution Environment +============================== DevStack runs rampant over the system it runs on, installing things and uninstalling other things. Running this on a system you care about is a recipe @@ -81,10 +86,12 @@ check it out to see what DevStack's expectations are for the account it runs under. Many people simply use their usual login (the default 'ubuntu' login on a UEC image for example). -# Customizing +Customizing +=========== DevStack can be extensively configured via the configuration file `local.conf`. It is likely that you will need to provide and modify this file if you want anything other than the most basic setup. Start -by reading the [configuration guide](doc/source/configuration.rst) for -details of the configuration file and the many available options. +by reading the `configuration guide +`_ +for details of the configuration file and the many available options. diff --git a/clean.sh b/clean.sh index 0641bffcf8..092f557a88 100755 --- a/clean.sh +++ b/clean.sh @@ -40,17 +40,16 @@ source $TOP_DIR/lib/rpc_backend source $TOP_DIR/lib/tls -source $TOP_DIR/lib/oslo +source $TOP_DIR/lib/libraries source $TOP_DIR/lib/lvm source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone source $TOP_DIR/lib/glance source $TOP_DIR/lib/nova +source $TOP_DIR/lib/placement source $TOP_DIR/lib/cinder source $TOP_DIR/lib/swift -source $TOP_DIR/lib/heat source $TOP_DIR/lib/neutron -source $TOP_DIR/lib/neutron-legacy set -o xtrace @@ -64,13 +63,8 @@ if [[ -d $TOP_DIR/extras.d ]]; then done fi -# See if there is anything running... -# need to adapt when run_service is merged -SESSION=$(screen -ls | awk '/[0-9].stack/ { print $1 }') -if [[ -n "$SESSION" ]]; then - # Let unstack.sh do its thing first - $TOP_DIR/unstack.sh --all -fi +# Let unstack.sh do its thing first +$TOP_DIR/unstack.sh --all # Run extras # ========== @@ -93,8 +87,10 @@ cleanup_cinder || /bin/true cleanup_glance cleanup_keystone cleanup_nova +cleanup_placement cleanup_neutron cleanup_swift +cleanup_horizon if is_service_enabled ldap; then cleanup_ldap @@ -106,7 +102,7 @@ if is_service_enabled nova && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; th fi # Clean out /etc -sudo rm -rf /etc/keystone /etc/glance /etc/nova /etc/cinder /etc/swift /etc/heat /etc/neutron /etc/openstack/ +sudo rm -rf /etc/keystone /etc/glance /etc/nova /etc/cinder /etc/swift /etc/neutron /etc/openstack/ # Clean out tgt sudo rm -f /etc/tgt/conf.d/* @@ -116,7 +112,7 @@ cleanup_rpc_backend cleanup_database # Clean out data and status -sudo rm -rf $DATA_DIR $DEST/status +sudo rm -rf $DATA_DIR $DEST/status $DEST/async # Clean out the log file and log directories if [[ -n "$LOGFILE" ]] && [[ -f "$LOGFILE" ]]; then @@ -125,9 +121,11 @@ fi if [[ -n "$LOGDIR" ]] && [[ -d "$LOGDIR" ]]; then sudo rm -rf $LOGDIR fi -if [[ -n "$SCREEN_LOGDIR" ]] && [[ -d "$SCREEN_LOGDIR" ]]; then - sudo rm -rf $SCREEN_LOGDIR -fi + +# Clean out the systemd unit files. +sudo find $SYSTEMD_DIR -type f -name '*devstack@*service' -delete +# Make systemd aware of the deletion. +$SYSTEMCTL daemon-reload # Clean up venvs DIRS_TO_CLEAN="$WHEELHOUSE ${PROJECT_VENV[@]} .config/openstack" @@ -145,3 +143,6 @@ for file in $FILES_TO_CLEAN; do done rm -rf ~/.config/openstack + +# Clear any fstab entries made +sudo sed -i '/.*comment=devstack-.*/ d' /etc/fstab diff --git a/data/devstack-plugins-registry.header b/data/devstack-plugins-registry.header index 6119ab5284..576dbbd35a 100644 --- a/data/devstack-plugins-registry.header +++ b/data/devstack-plugins-registry.header @@ -1,18 +1,16 @@ -.. +.. Note to patch submitters: - Note to patch submitters: + # ============================= # + # THIS FILE IS AUTOGENERATED ! # + # ============================= # - # ============================= # - # THIS FILE IS AUTOGENERATED ! # - # ============================= # + ** Plugins are found automatically and added to this list ** - ** Plugins are found automatically and added to this list ** + This file is created by a periodic proposal job. You should not + edit this file. - This file is created by a periodic proposal job. You should not - edit this file. - - You should edit the files data/devstack-plugins-registry.footer - data/devstack-plugins-registry.header to modify this text. + You should edit the files data/devstack-plugins-registry.footer + data/devstack-plugins-registry.header to modify this text. ========================== DevStack Plugin Registry diff --git a/doc/requirements.txt b/doc/requirements.txt new file mode 100644 index 0000000000..7980b93ed7 --- /dev/null +++ b/doc/requirements.txt @@ -0,0 +1,7 @@ +pbr>=2.0.0,!=2.1.0 + +Pygments +docutils +sphinx>=2.0.0,!=2.1.0 # BSD +openstackdocstheme>=2.2.1 # Apache-2.0 +zuul-sphinx>=0.2.0 diff --git a/doc/source/assets/images/neutron-network-1.png b/doc/source/assets/images/neutron-network-1.png new file mode 100644 index 0000000000..7730ca93f1 Binary files /dev/null and b/doc/source/assets/images/neutron-network-1.png differ diff --git a/doc/source/assets/images/neutron-network-2.png b/doc/source/assets/images/neutron-network-2.png new file mode 100644 index 0000000000..919935119d Binary files /dev/null and b/doc/source/assets/images/neutron-network-2.png differ diff --git a/doc/source/assets/images/neutron-network-3.png b/doc/source/assets/images/neutron-network-3.png new file mode 100644 index 0000000000..34f03ed5c9 Binary files /dev/null and b/doc/source/assets/images/neutron-network-3.png differ diff --git a/doc/source/assets/images/screen_session_1.png b/doc/source/assets/images/screen_session_1.png new file mode 100644 index 0000000000..6ad6752bb1 Binary files /dev/null and b/doc/source/assets/images/screen_session_1.png differ diff --git a/doc/source/assets/local.conf b/doc/source/assets/local.conf new file mode 120000 index 0000000000..cfc2a4e9d8 --- /dev/null +++ b/doc/source/assets/local.conf @@ -0,0 +1 @@ +../../../samples/local.conf \ No newline at end of file diff --git a/doc/source/conf.py b/doc/source/conf.py index 6e3ec029e9..bb0357286a 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -11,9 +11,6 @@ # All configuration values have a default; values that are commented out # serve to show the default. -import sys -import os - # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. @@ -26,7 +23,22 @@ # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [ 'oslosphinx', 'sphinxcontrib.blockdiag', 'sphinxcontrib.nwdiag' ] +extensions = [ + 'sphinx.ext.autodoc', + 'zuul_sphinx', + 'openstackdocstheme', +] + +# openstackdocstheme options +openstackdocs_repo_name = 'openstack/devstack' +openstackdocs_pdf_link = True +openstackdocs_bug_project = 'devstack' +openstackdocs_bug_tag = '' +openstackdocs_auto_name = False +# This repo is not tagged, so don't set versions +openstackdocs_auto_version = False +version = '' +release = '' todo_include_todos = True @@ -75,7 +87,7 @@ show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = 'native' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['DevStack-doc.'] @@ -87,7 +99,7 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = 'nature' +html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the @@ -113,11 +125,6 @@ # pixels large. #html_favicon = None -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1" -html_last_updated_fmt = os.popen(git_cmd).read() - # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True @@ -161,21 +168,10 @@ # -- Options for LaTeX output -------------------------------------------------- -latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', - -# The font size ('10pt', '11pt' or '12pt'). -#'pointsize': '10pt', - -# Additional stuff for the LaTeX preamble. -#'preamble': '', -} - # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ - ('index', 'DevStack-doc.tex', u'DevStack Docs', + ('index', 'doc-devstack.tex', u'DevStack Docs', u'OpenStack DevStack Team', 'manual'), ] diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 1161b344e2..3cfba716ca 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -41,6 +41,7 @@ The defined phases are: - **extra** - runs after services are started and before any files in ``extra.d`` are executed - **post-extra** - runs after files in ``extra.d`` are executed +- **test-config** - runs after tempest (and plugins) are configured The file is processed strictly in sequence; meta-sections may be specified more than once but if any settings are duplicated the last to @@ -63,7 +64,7 @@ exists it will be used instead to preserve backward-compatibility. :: [[local|localrc]] - FIXED_RANGE=10.254.1.0/24 + IPV4_ADDRS_SAFE_TO_USE=10.254.1.0/24 ADMIN_PASSWORD=speciale LOGFILE=$DEST/logs/stack.sh.log @@ -136,7 +137,7 @@ OS\_AUTH\_URL :: - OS_AUTH_URL=http://$SERVICE_HOST:5000/v2.0 + OS_AUTH_URL=http://$SERVICE_HOST:5000/v3.0 KEYSTONECLIENT\_DEBUG, NOVACLIENT\_DEBUG Set command-line client log level to ``DEBUG``. These are commented @@ -161,8 +162,8 @@ values that most often need to be set. - no logging - pre-set the passwords to prevent interactive prompts -- move network ranges away from the local network (``FIXED_RANGE`` and - ``FLOATING_RANGE``, commented out below) +- move network ranges away from the local network (``IPV4_ADDRS_SAFE_TO_USE`` + and ``FLOATING_RANGE``, commented out below) - set the host IP if detection is unreliable (``HOST_IP``, commented out below) @@ -173,13 +174,16 @@ values that most often need to be set. DATABASE_PASSWORD=$ADMIN_PASSWORD RABBIT_PASSWORD=$ADMIN_PASSWORD SERVICE_PASSWORD=$ADMIN_PASSWORD - #FIXED_RANGE=172.31.1.0/24 + #IPV4_ADDRS_SAFE_TO_USE=172.31.1.0/24 #FLOATING_RANGE=192.168.20.0/25 #HOST_IP=10.3.4.5 If the ``*_PASSWORD`` variables are not set here you will be prompted to enter values for them by ``stack.sh``. +.. warning:: Only use alphanumeric characters in your passwords, as some + services fail to work when using special characters. + The network ranges must not overlap with any networks in use on the host. Overlap is not uncommon as RFC-1918 'private' ranges are commonly used for both the local networking and Nova's fixed and floating ranges. @@ -195,13 +199,16 @@ will not be set if there is no IPv6 address on the default Ethernet interface. Setting it here also makes it available for ``openrc`` to set ``OS_AUTH_URL``. ``HOST_IPV6`` is not set by default. +For architecture specific configurations which differ from the x86 default +here, see `arch-configuration`_. + Historical Notes ================ Historically DevStack obtained all local configuration and customizations from a ``localrc`` file. In Oct 2013 the ``local.conf`` configuration method was introduced (in `review 46768 -`__) to simplify this +`__) to simplify this process. Configuration Notes @@ -220,25 +227,22 @@ check out. These may be overridden in ``local.conf`` to pull source from a different repo for testing, such as a Gerrit branch proposal. ``GIT_BASE`` points to the primary repository server. - :: +:: - NOVA_REPO=$GIT_BASE/openstack/nova.git - NOVA_BRANCH=master + NOVA_REPO=$GIT_BASE/openstack/nova.git + NOVA_BRANCH=master To pull a branch directly from Gerrit, get the repo and branch from -the Gerrit review page: - - :: - - git fetch https://review.openstack.org/p/openstack/nova refs/changes/50/5050/1 && git checkout FETCH_HEAD +the Gerrit review page:: - The repo is the stanza following ``fetch`` and the branch is the - stanza following that: + git fetch https://review.opendev.org/openstack/nova \ + refs/changes/50/5050/1 && git checkout FETCH_HEAD - :: +The repo is the stanza following ``fetch`` and the branch is the +stanza following that:: - NOVA_REPO=https://review.openstack.org/p/openstack/nova - NOVA_BRANCH=refs/changes/50/5050/1 + NOVA_REPO=https://review.opendev.org/openstack/nova + NOVA_BRANCH=refs/changes/50/5050/1 Installation Directory @@ -251,13 +255,15 @@ By setting it early in the ``localrc`` section you can reference it in later variables. It can be useful to set it even though it is not changed from the default value. - :: +:: - DEST=/opt/stack + DEST=/opt/stack Logging ------- +.. _enable_logging: + Enable Logging ~~~~~~~~~~~~~~ @@ -267,54 +273,45 @@ runs. It can be sent to a file in addition to the console by setting timestamp will be appended to the given filename for each run of ``stack.sh``. - :: +:: - LOGFILE=$DEST/logs/stack.sh.log + LOGFILE=$DEST/logs/stack.sh.log Old log files are cleaned automatically if ``LOGDAYS`` is set to the number of days of old log files to keep. - :: +:: - LOGDAYS=1 + LOGDAYS=2 -The some of the project logs (Nova, Cinder, etc) will be colorized by -default (if ``SYSLOG`` is not set below); this can be turned off by -setting ``LOG_COLOR`` to ``False``. +Some coloring is used during the DevStack runs to make it easier to +see what is going on. This can be disabled with:: - :: + LOG_COLOR=False - LOG_COLOR=False +When using the logfile, by default logs are sent to the console and +the file. You can set ``VERBOSE`` to ``false`` if you only wish the +logs to be sent to the file (this may avoid having double-logging in +some cases where you are capturing the script output and the log +files). If ``VERBOSE`` is ``true`` you can additionally set +``VERBOSE_NO_TIMESTAMP`` to avoid timestamps being added to each +output line sent to the console. This can be useful in some +situations where the console output is being captured by a runner or +framework (e.g. Ansible) that adds its own timestamps. Note that the +log lines sent to the ``LOGFILE`` will still be prefixed with a +timestamp. Logging the Service Output ~~~~~~~~~~~~~~~~~~~~~~~~~~ -DevStack will log the ``stdout`` output of the services it starts. -When using ``screen`` this logs the output in the screen windows to a -file. Without ``screen`` this simply redirects stdout of the service -process to a file in ``LOGDIR``. - - :: - - LOGDIR=$DEST/logs - -Note the use of ``DEST`` to locate the main install directory; this -is why we suggest setting it in ``local.conf``. +By default, services run under ``systemd`` and are natively logging to +the systemd journal. -Enabling Syslog -~~~~~~~~~~~~~~~ +To query the logs use the ``journalctl`` command, such as:: -Logging all services to a single syslog can be convenient. Enable -syslogging by setting ``SYSLOG`` to ``True``. If the destination log -host is not localhost ``SYSLOG_HOST`` and ``SYSLOG_PORT`` can be used -to direct the message stream to the log host. - - :: - - SYSLOG=True - SYSLOG_HOST=$HOST_IP - SYSLOG_PORT=516 + sudo journalctl --unit devstack@* +More examples can be found in :ref:`journalctl-examples`. Example Logging Configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -322,13 +319,12 @@ Example Logging Configuration For example, non-interactive installs probably wish to save output to a file, keep service logs and disable color in the stored files. - :: +:: - [[local|localrc]] - DEST=/opt/stack/ - LOGDIR=$DEST/logs - LOGFILE=$LOGDIR/stack.sh.log - LOG_COLOR=False + [[local|localrc]] + DEST=/opt/stack/ + LOGFILE=$DEST/stack.sh.log + LOG_COLOR=False Database Backend ---------------- @@ -336,12 +332,10 @@ Database Backend Multiple database backends are available. The available databases are defined in the lib/databases directory. ``mysql`` is the default database, choose a different one by putting the -following in the ``localrc`` section: +following in the ``localrc`` section:: - :: - - disable_service mysql - enable_service postgresql + disable_service mysql + enable_service postgresql ``mysql`` is the default database. @@ -353,61 +347,34 @@ backends may be available via external plugins. Enabling or disabling RabbitMQ is handled via the usual service functions and ``ENABLED_SERVICES``. -Example disabling RabbitMQ in ``local.conf``: - -:: - - disable_service rabbit +Example disabling RabbitMQ in ``local.conf``:: + disable_service rabbit Apache Frontend --------------- -The Apache web server can be enabled for wsgi services that support -being deployed under HTTPD + mod_wsgi. By default, services that -recommend running under HTTPD + mod_wsgi are deployed under Apache. To -use an alternative deployment strategy (e.g. eventlet) for services -that support an alternative to HTTPD + mod_wsgi set -``ENABLE_HTTPD_MOD_WSGI_SERVICES`` to ``False`` in your -``local.conf``. - -Each service that can be run under HTTPD + mod_wsgi also has an -override toggle available that can be set in your ``local.conf``. - -Keystone is run under Apache with ``mod_wsgi`` by default. - -Example (Keystone) - -:: - - KEYSTONE_USE_MOD_WSGI="True" +The Apache web server is enabled for services that support via WSGI. Today this +means HTTPD and uWSGI but historically this meant HTTPD + mod_wsgi. This +historical legacy is captured by the naming of many variables, which include +``MOD_WSGI`` rather than ``UWSGI``. -Example (Nova): +Some services support alternative deployment strategies (e.g. eventlet). You +can enable these ``ENABLE_HTTPD_MOD_WSGI_SERVICES`` to ``False`` in your +``local.conf``. In addition, each service that can be run under HTTPD + +mod_wsgi also has an override toggle available that can be set in your +``local.conf``. These are, however, slowly being removed as services have +adopted standardized deployment mechanisms and more generally moved away from +eventlet. -:: - - NOVA_USE_MOD_WSGI="True" - -Example (Swift): - -:: +Example (Swift):: SWIFT_USE_MOD_WSGI="True" -Example (Heat): - -:: +Example (Heat):: HEAT_USE_MOD_WSGI="True" - -Example (Cinder): - -:: - - CINDER_USE_MOD_WSGI="True" - - Libraries from Git ------------------ @@ -419,9 +386,9 @@ system you can have devstack install it from upstream, or from local git trees by specifying it in ``LIBS_FROM_GIT``. Multiple libraries can be specified as a comma separated list. - :: +:: - LIBS_FROM_GIT=python-keystoneclient,oslo.config + LIBS_FROM_GIT=python-keystoneclient,oslo.config Setting the variable to ``ALL`` will activate the download for all libraries. @@ -437,9 +404,9 @@ Each entry in the ``PROJECT_VENV`` array contains the directory name of a venv to be used for the project. The array index is the project name. Multiple projects can use the same venv if desired. - :: +:: - PROJECT_VENV["glance"]=${GLANCE_DIR}.venv + PROJECT_VENV["glance"]=${GLANCE_DIR}.venv ``ADDITIONAL_VENV_PACKAGES`` is a comma-separated list of additional packages to be installed into each venv. Often projects will not have @@ -448,10 +415,9 @@ are 'optional' requirements, i.e. only needed for certain configurations. By default, the enabled databases will have their Python bindings added when they are enabled. - :: - - ADDITIONAL_VENV_PACKAGES="python-foo, python-bar" +:: + ADDITIONAL_VENV_PACKAGES="python-foo, python-bar" A clean install every time -------------------------- @@ -461,9 +427,9 @@ exist in ``$DEST``. ``stack.sh`` will freshen each repo on each run if ``RECLONE`` is set to ``yes``. This avoids having to manually remove repos in order to get the current branch from ``$GIT_BASE``. - :: +:: - RECLONE=yes + RECLONE=yes Upgrade packages installed by pip --------------------------------- @@ -474,9 +440,9 @@ requirement. If ``PIP_UPGRADE`` is set to ``True`` then existing required Python packages will be upgraded to the most recent version that matches requirements. - :: +:: - PIP_UPGRADE=True + PIP_UPGRADE=True Guest Images ------------ @@ -490,11 +456,11 @@ their testing-requirements in ``stack.sh``. Setting these default images; in that case, you will want to populate ``IMAGE_URLS`` with sufficient images to satisfy testing-requirements. - :: +:: - DOWNLOAD_DEFAULT_IMAGES=False - IMAGE_URLS="http://foo.bar.com/image.qcow," - IMAGE_URLS+="http://foo.bar.com/image2.qcow" + DOWNLOAD_DEFAULT_IMAGES=False + IMAGE_URLS="http://foo.bar.com/image.qcow," + IMAGE_URLS+="http://foo.bar.com/image2.qcow" Instance Type @@ -509,45 +475,43 @@ should be specified in the configuration file so Tempest selects the default flavors instead. KVM on Power with QEMU 2.4 requires 512 MB to load the firmware - -`QEMU 2.4 - PowerPC `__ so users +`QEMU 2.4 - PowerPC `__ so users running instances on ppc64/ppc64le can choose one of the default created flavors as follows: - :: +:: - DEFAULT_INSTANCE_TYPE=m1.tiny + DEFAULT_INSTANCE_TYPE=m1.tiny IP Version ---------- -``IP_VERSION`` can be used to configure DevStack to create either an -IPv4, IPv6, or dual-stack self service project data-network by with +``IP_VERSION`` can be used to configure Neutron to create either an +IPv4, IPv6, or dual-stack self-service project data-network by with either ``IP_VERSION=4``, ``IP_VERSION=6``, or ``IP_VERSION=4+6`` -respectively. This functionality requires that the Neutron networking -service is enabled by setting the following options: +respectively. - :: +:: - disable_service n-net - enable_service q-svc q-agt q-dhcp q-l3 + IP_VERSION=4+6 The following optional variables can be used to alter the default IPv6 behavior: - :: +:: - IPV6_RA_MODE=slaac - IPV6_ADDRESS_MODE=slaac - FIXED_RANGE_V6=fd$IPV6_GLOBAL_ID::/64 - IPV6_PRIVATE_NETWORK_GATEWAY=fd$IPV6_GLOBAL_ID::1 + IPV6_RA_MODE=slaac + IPV6_ADDRESS_MODE=slaac + IPV6_ADDRS_SAFE_TO_USE=fd$IPV6_GLOBAL_ID::/56 + IPV6_PRIVATE_NETWORK_GATEWAY=fd$IPV6_GLOBAL_ID::1 -*Note*: ``FIXED_RANGE_V6`` and ``IPV6_PRIVATE_NETWORK_GATEWAY`` can be -configured with any valid IPv6 prefix. The default values make use of -an auto-generated ``IPV6_GLOBAL_ID`` to comply with RFC4193. +*Note*: ``IPV6_ADDRS_SAFE_TO_USE`` and ``IPV6_PRIVATE_NETWORK_GATEWAY`` +can be configured with any valid IPv6 prefix. The default values make +use of an auto-generated ``IPV6_GLOBAL_ID`` to comply with RFC4193. -Service Version -~~~~~~~~~~~~~~~ +Service IP Version +~~~~~~~~~~~~~~~~~~ DevStack can enable service operation over either IPv4 or IPv6 by setting ``SERVICE_IP_VERSION`` to either ``SERVICE_IP_VERSION=4`` or @@ -563,11 +527,30 @@ address. The default value for this setting is ``4``. Dual-mode support, for example ``4+6`` is not currently supported. ``HOST_IPV6`` can -optionally be used to alter the default IPv6 address +optionally be used to alter the default IPv6 address:: - :: + HOST_IPV6=${some_local_ipv6_address} + +Tunnel IP Version +~~~~~~~~~~~~~~~~~ + +DevStack can enable tunnel operation over either IPv4 or IPv6 by +setting ``TUNNEL_IP_VERSION`` to either ``TUNNEL_IP_VERSION=4`` or +``TUNNEL_IP_VERSION=6`` respectively. + +When set to ``4`` Neutron will use an IPv4 address for tunnel endpoints, +for example, ``HOST_IP``. + +When set to ``6`` Neutron will use an IPv6 address for tunnel endpoints, +for example, ``HOST_IPV6``. + +The default value for this setting is ``4``. Dual-mode support, for +example ``4+6`` is not supported, as this value must match the address +family of the local tunnel endpoint IP(v6) address. - HOST_IPV6=${some_local_ipv6_address} +The value of ``TUNNEL_IP_VERSION`` has a direct relationship to the +setting of ``TUNNEL_ENDPOINT_IP``, which will default to ``HOST_IP`` +when set to ``4``, and ``HOST_IPV6`` when set to ``6``. Multi-node setup ~~~~~~~~~~~~~~~~ @@ -589,9 +572,7 @@ Swift Swift is disabled by default. When enabled, it is configured with only one replica to avoid being IO/memory intensive on a small -VM. When running with only one replica the account, container and -object services will run directly in screen. The others services like -replicator, updaters or auditor runs in background. +VM. If you would like to enable Swift you can add this to your ``localrc`` section: @@ -627,37 +608,14 @@ used when adding nodes to the Swift rings. Swift S3 ++++++++ -If you are enabling ``swift3`` in ``ENABLED_SERVICES`` DevStack will -install the swift3 middleware emulation. Swift will be configured to +If you are enabling ``s3api`` in ``ENABLED_SERVICES`` DevStack will +install the s3api middleware emulation. Swift will be configured to act as a S3 endpoint for Keystone so effectively replacing the ``nova-objectstore``. -Only Swift proxy server is launched in the screen session all other +Only Swift proxy server is launched in the systemd system all other services are started in background and managed by ``swift-init`` tool. -Heat -~~~~ - -Heat is disabled by default (see ``stackrc`` file). To enable it -explicitly you'll need the following settings in your ``localrc`` -section - -:: - - enable_service heat h-api h-api-cfn h-api-cw h-eng - -Heat can also run in standalone mode, and be configured to orchestrate -on an external OpenStack cloud. To launch only Heat in standalone mode -you'll need the following settings in your ``localrc`` section - -:: - - disable_all_services - enable_service rabbit mysql heat h-api h-api-cfn h-api-cw h-eng - HEAT_STANDALONE=True - KEYSTONE_SERVICE_HOST=... - KEYSTONE_AUTH_HOST=... - Tempest ~~~~~~~ @@ -667,7 +625,7 @@ tests can be run as follows: :: $ cd /opt/stack/tempest - $ tox -efull tempest.scenario.test_network_basic_ops + $ tox -e smoke By default tempest is downloaded and the config file is generated, but the tempest package is not installed in the system's global site-packages (the @@ -680,40 +638,54 @@ outside of tox. If you would like to install it add the following to your INSTALL_TEMPEST=True -Xenserver -~~~~~~~~~ +Cinder +~~~~~~ + +The logical volume group used to hold the Cinder-managed volumes is +set by ``VOLUME_GROUP_NAME``, the logical volume name prefix is set with +``VOLUME_NAME_PREFIX`` and the size of the volume backing file is set +with ``VOLUME_BACKING_FILE_SIZE``. -If you would like to use Xenserver as the hypervisor, please refer to -the instructions in ``./tools/xen/README.md``. +:: -Cells -~~~~~ + VOLUME_GROUP_NAME="stack-volumes" + VOLUME_NAME_PREFIX="volume-" + VOLUME_BACKING_FILE_SIZE=24G -`Cells `__ is -an alternative scaling option. To setup a cells environment add the -following to your ``localrc`` section: +When running highly concurrent tests, the default per-project quotas +for volumes, backups, or snapshots may be too small. These can be +adjusted by setting ``CINDER_QUOTA_VOLUMES``, ``CINDER_QUOTA_BACKUPS``, +or ``CINDER_QUOTA_SNAPSHOTS`` to the desired value. (The default for +each is 10.) -:: +DevStack's Cinder LVM configuration module currently supports both iSCSI and +NVMe connections, and we can choose which one to use with options +``CINDER_TARGET_HELPER``, ``CINDER_TARGET_PROTOCOL``, ``CINDER_TARGET_PREFIX``, +and ``CINDER_TARGET_PORT``. - enable_service n-cell +Defaults use iSCSI with the LIO target manager:: -Be aware that there are some features currently missing in cells, one -notable one being security groups. The exercises have been patched to -disable functionality not supported by cells. + CINDER_TARGET_HELPER="lioadm" + CINDER_TARGET_PROTOCOL="iscsi" + CINDER_TARGET_PREFIX="iqn.2010-10.org.openstack:" + CINDER_TARGET_PORT=3260 -Cinder -~~~~~~ +Additionally there are 3 supported transport protocols for NVMe, +``nvmet_rdma``, ``nvmet_tcp``, and ``nvmet_fc``, and when the ``nvmet`` target +is selected the protocol, prefix, and port defaults will change to more +sensible defaults for NVMe:: -The logical volume group used to hold the Cinder-managed volumes is -set by ``VOLUME_GROUP``, the logical volume name prefix is set with -``VOLUME_NAME_PREFIX`` and the size of the volume backing file is set -with ``VOLUME_BACKING_FILE_SIZE``. + CINDER_TARGET_HELPER="nvmet" + CINDER_TARGET_PROTOCOL="nvmet_rdma" + CINDER_TARGET_PREFIX="nvme-subsystem-1" + CINDER_TARGET_PORT=4420 - :: +When selecting the RDMA transport protocol DevStack will create on Cinder nodes +a Software RoCE device on top of the ``HOST_IP_IFACE`` and if it is not defined +then on top of the interface with IP address ``HOST_IP`` or ``HOST_IPV6``. - VOLUME_GROUP="stack-volumes" - VOLUME_NAME_PREFIX="volume-" - VOLUME_BACKING_FILE_SIZE=10250M +This Soft-RoCE device will always be created on the Nova compute side since we +cannot tell beforehand whether there will be an RDMA connection or not. Keystone @@ -739,7 +711,6 @@ In RegionTwo: disable_service horizon KEYSTONE_SERVICE_HOST= - KEYSTONE_AUTH_HOST= REGION_NAME=RegionTwo KEYSTONE_REGION_NAME=RegionOne @@ -752,49 +723,84 @@ KEYSTONE_REGION_NAME to specify the region of Keystone service. KEYSTONE_REGION_NAME has a default value the same as REGION_NAME thus we omit it in the configuration of RegionOne. -Disabling Identity API v2 -+++++++++++++++++++++++++ +Glance +++++++ -The Identity API v2 is deprecated as of Mitaka and it is recommended to only -use the v3 API. It is possible to setup keystone without v2 API, by doing: +The default image size quota of 1GiB may be too small if larger images +are to be used. Change the default at setup time with: :: - ENABLE_IDENTITY_V2=False - -Exercises -~~~~~~~~~ - -``exerciserc`` is used to configure settings for the exercise scripts. -The values shown below are the default values. These can all be -overridden by setting them in the ``localrc`` section. + GLANCE_LIMIT_IMAGE_SIZE_TOTAL=5000 -* Max time to wait while vm goes from build to active state +or at runtime via: - :: - - ACTIVE_TIMEOUT==30 +:: -* Max time to wait for proper IP association and dis-association. + openstack --os-cloud devstack-system-admin registered limit set \ + --service glance --default-limit 5000 --region RegionOne image_size_total - :: +.. _arch-configuration: - ASSOCIATE_TIMEOUT=15 +Architectures +------------- -* Max time till the vm is bootable +The upstream CI runs exclusively on nodes with x86 architectures, but +OpenStack supports even more architectures. Some of them need to configure +Devstack in a certain way. - :: +KVM on s390x (IBM z Systems) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - BOOT_TIMEOUT=30 +KVM on s390x (IBM z Systems) is supported since the *Kilo* release. For +an all-in-one setup, these minimal settings in the ``local.conf`` file +are needed:: -* Max time from run instance command until it is running + [[local|localrc]] + ADMIN_PASSWORD=secret + DATABASE_PASSWORD=$ADMIN_PASSWORD + RABBIT_PASSWORD=$ADMIN_PASSWORD + SERVICE_PASSWORD=$ADMIN_PASSWORD - :: + DOWNLOAD_DEFAULT_IMAGES=False + IMAGE_URLS="https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-s390x-disk1.img" - RUNNING_TIMEOUT=$(($BOOT_TIMEOUT + $ACTIVE_TIMEOUT)) + # Provide a custom etcd3 binary download URL and ints sha256. + # The binary must be located under '//etcd--linux-s390x.tar.gz' + # on this URL. + # Build instructions for etcd3: https://github.com/linux-on-ibm-z/docs/wiki/Building-etcd + ETCD_DOWNLOAD_URL= + ETCD_SHA256= -* Max time to wait for a vm to terminate + enable_service n-sproxy + disable_service n-novnc - :: + [[post-config|$NOVA_CONF]] - TERMINATE_TIMEOUT=30 + [serial_console] + base_url=ws://$HOST_IP:6083/ # optional + +Reasoning: + +* The default image of Devstack is x86 only, so we deactivate the download + with ``DOWNLOAD_DEFAULT_IMAGES``. The referenced guest image + in the code above (``IMAGE_URLS``) serves as an example. The list of + possible s390x guest images is not limited to that. + +* This platform doesn't support a graphical console like VNC or SPICE. + The technical reason is the missing framebuffer on the platform. This + means we rely on the substitute feature *serial console* which needs the + proxy service ``n-sproxy``. We also disable VNC's proxy ``n-novnc`` for + that reason . The configuration in the ``post-config`` section is only + needed if you want to use the *serial console* outside of the all-in-one + setup. + +* A link to an etcd3 binary and its sha256 needs to be provided as the + binary for s390x is not hosted on github like it is for other + architectures. For more details see + https://bugs.launchpad.net/devstack/+bug/1693192. Etcd3 can easily be + built along https://github.com/linux-on-ibm-z/docs/wiki/Building-etcd. + +.. note:: To run *Tempest* against this *Devstack* all-in-one, you'll need + to use a guest image which is smaller than 1GB when uncompressed. + The example image from above is bigger than that! diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst new file mode 100644 index 0000000000..8b5a85b3df --- /dev/null +++ b/doc/source/contributor/contributing.rst @@ -0,0 +1,57 @@ +============================ +So You Want to Contribute... +============================ + +For general information on contributing to OpenStack, please check out the +`contributor guide `_ to get started. +It covers all the basics that are common to all OpenStack projects: the accounts +you need, the basics of interacting with our Gerrit review system, how we +communicate as a community, etc. + +Below will cover the more project specific information you need to get started +with Devstack. + +Communication +~~~~~~~~~~~~~ +* IRC channel ``#openstack-qa`` at OFTC. +* Mailing list (prefix subjects with ``[qa][devstack]`` for faster responses) + http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss + +Contacting the Core Team +~~~~~~~~~~~~~~~~~~~~~~~~ +Please refer to the `Devstack Core Team +`_ contacts. + +New Feature Planning +~~~~~~~~~~~~~~~~~~~~ +If you want to propose a new feature please read `Feature Proposal Process`_ +Devstack features are tracked on `Launchpad BP `_. + +Task Tracking +~~~~~~~~~~~~~ +We track our tasks in `Launchpad `_. + +Reporting a Bug +~~~~~~~~~~~~~~~ +You found an issue and want to make sure we are aware of it? You can do so on +`Launchpad `__. +More info about Launchpad usage can be found on `OpenStack docs page +`_ + +Getting Your Patch Merged +~~~~~~~~~~~~~~~~~~~~~~~~~ +All changes proposed to the Devstack require two ``Code-Review +2`` votes from +Devstack core reviewers before one of the core reviewers can approve the patch +by giving ``Workflow +1`` vote. There are 2 exceptions, approving patches to +unblock the gate and patches that do not relate to the Devstack's core logic, +like for example old job cleanups, can be approved by single core reviewers. + +Project Team Lead Duties +~~~~~~~~~~~~~~~~~~~~~~~~ +All common PTL duties are enumerated in the `PTL guide +`_. + +The Release Process for QA is documented in `QA Release Process +`_. + +.. _Feature Proposal Process: https://wiki.openstack.org/wiki/QA#Feature_Proposal_.26_Design_discussions diff --git a/doc/source/debugging.rst b/doc/source/debugging.rst new file mode 100644 index 0000000000..3ca0ad94b4 --- /dev/null +++ b/doc/source/debugging.rst @@ -0,0 +1,52 @@ +===================== +System-wide debugging +===================== + +A lot can go wrong during a devstack run, and there are a few inbuilt +tools to help you. + +dstat +----- + +Enable the ``dstat`` service to produce performance logs during the +devstack run. These will be logged to the journal and also as a CSV +file. + +memory_tracker +-------------- + +The ``memory_tracker`` service periodically monitors RAM usage and +provides consumption output when available memory is seen to be +falling (i.e. processes are consuming memory). It also provides +output showing locked (unswappable) memory. + +file_tracker +------------ + +The ``file_tracker`` service periodically monitors the number of +open files in the system. + +tcpdump +------- + +Enable the ``tcpdump`` service to run a background tcpdump. You must +set the ``TCPDUMP_ARGS`` variable to something suitable (there is no +default). For example, to trace iSCSI communication during a job in +the OpenStack gate and copy the result into the log output, you might +use: + +.. code-block:: yaml + + job: + name: devstack-job + parent: devstack + vars: + devstack_services: + tcpdump: true + devstack_localrc: + TCPDUMP_ARGS: "-i any tcp port 3260" + zuul_copy_output: + '{{ devstack_log_dir }}/tcpdump.pcap': logs + + + diff --git a/doc/source/development.rst b/doc/source/development.rst new file mode 100644 index 0000000000..957de9b0e1 --- /dev/null +++ b/doc/source/development.rst @@ -0,0 +1,117 @@ +========================== + Developing with Devstack +========================== + +Now that you have your nifty DevStack up and running, what can you do +with it? + +Inspecting Services +=================== + +By default most services in DevStack are running as `systemd` units +named `devstack@$servicename.service`. You can see running services +with. + +.. code-block:: bash + + sudo systemctl status "devstack@*" + +To learn more about the basics of systemd, see :doc:`/systemd` + +Patching a Service +================== + +If you want to make a quick change to a running service the easiest +way to do that is to change the code directly in /opt/stack/$service +and then restart the affected daemons. + +.. code-block:: bash + + sudo systemctl restart devstack@n-cpu.service + +If your change impacts more than one daemon you can restart by +wildcard as well. + +.. code-block:: bash + + sudo systemctl restart "devstack@n-*" + +.. warning:: + + All changes you are making are in checked out git trees that + DevStack thinks it has full control over. Uncommitted work, or + work committed to the master branch, may be overwritten during + subsequent DevStack runs. + +Testing a Patch Series +====================== + +When testing a larger set of patches, or patches that will impact more +than one service within a project, it is often less confusing to use +custom git locations, and make all your changes in a dedicated git +tree. + +In your ``local.conf`` you can add ``**_REPO``, ``**_BRANCH`` for most projects +to use a custom git tree instead of the default upstream ones. + +For instance: + +.. code-block:: bash + + [[local|localrc]] + NOVA_REPO=/home/sdague/nova + NOVA_BRANCH=fold_disk_config + +Will use a custom git tree and branch when doing any devstack +operations, such as ``stack.sh``. + +When testing complicated changes committing to these trees, then doing +``./unstack.sh && ./stack.sh`` is often a valuable way to +iterate. This does take longer per iteration than direct patching, as +the whole devstack needs to rebuild. + +You can use this same approach to test patches that are up for review +in gerrit by using the ref name that gerrit assigns to each change. + +.. code-block:: bash + + [[local|localrc]] + NOVA_BRANCH=refs/changes/10/353710/1 + + +Testing Changes to Libraries +============================ + +When testing changes to libraries consumed by OpenStack services (such +as oslo or any of the python-fooclient libraries) things are a little +more complicated. By default we only test with released versions of +these libraries that are on pypi. + +You must first override this with the setting ``LIBS_FROM_GIT``. This +will enable your DevStack with the git version of that library instead +of the released version. + +After that point you can also specify ``**_REPO``, ``**_BRANCH`` to use +your changes instead of just upstream master. + +.. code-block:: bash + + [[local|localrc]] + LIBS_FROM_GIT=oslo.policy + OSLOPOLICY_REPO=/home/sdague/oslo.policy + OSLOPOLICY_BRANCH=better_exception + +As libraries are not installed `editable` by pip, after you make any +local changes you will need to: + +* cd to top of library path +* sudo pip install -U . +* restart all services you want to use the new library + +You can do that with wildcards such as + +.. code-block:: bash + + sudo systemctl restart "devstack@n-*" + +which will restart all nova services. diff --git a/doc/source/faq.rst b/doc/source/faq.rst index 7793d8eb68..8214de0f6a 100644 --- a/doc/source/faq.rst +++ b/doc/source/faq.rst @@ -18,6 +18,57 @@ production systems. Your best choice is probably to choose a `distribution of OpenStack `__. +Can I use DevStack as a development environment? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Sure, you can. That said, there are a couple of things you should note before +doing so: + +- DevStack makes a lot of configuration changes to your system and should not + be run in your main development environment. + +- All the repositories that DevStack clones when deploying are considered + volatile by default and thus are subject to hard resets. This is necessary to + keep you in sync with the latest upstream, which is what you want in a CI + situation, but it can result in branches being overwritten and files being + removed. + + The corollary of this is that if you are working on a specific project, using + the DevStack project repository (defaulted to ``/opt/stack/``) as + the single master repository for storing all your work is not recommended. + This behavior can be overridden by setting the ``RECLONE`` config option to + ``no``. Alternatively, you can avoid running ``stack.sh`` to redeploy by + restarting services manually. In any case, you should generally ensure work + in progress is pushed to Gerrit or otherwise backed up before running + ``stack.sh``. + +- If you use DevStack within a VM, you may wish to mount a local OpenStack + directory, such as ``~/src/openstack``, inside the VM and configure DevStack + to use this as the clone location using the ``{PROJECT}_REPO`` config + variables. For example, assuming you're using Vagrant and sharing your home + directory, you should place the following in ``local.conf``: + + .. code-block:: shell + + NEUTRON_REPO=/home/vagrant/src/neutron + NOVA_REPO=/home/vagrant/src/nova + KEYSTONE_REPO=/home/vagrant/src/keystone + GLANCE_REPO=/home/vagrant/src/glance + SWIFT_REPO=/home/vagrant/src/swift + HORIZON_REPO=/home/vagrant/src/horizon + CINDER_REPO=/home/vagrant/src/cinder + HEAT_REPO=/home/vagrant/src/heat + TEMPEST_REPO=/home/vagrant/src/tempest + HEATCLIENT_REPO=/home/vagrant/src/python-heatclient + GLANCECLIENT_REPO=/home/vagrant/src/python-glanceclient + NOVACLIENT_REPO=/home/vagrant/src/python-novaclient + NEUTRONCLIENT_REPO=/home/vagrant/src/python-neutronclient + OPENSTACKCLIENT_REPO=/home/vagrant/src/python-openstackclient + HEAT_CFNTOOLS_REPO=/home/vagrant/src/heat-cfntools + HEAT_TEMPLATES_REPO=/home/vagrant/src/heat-templates + NEUTRON_FWAAS_REPO=/home/vagrant/src/neutron-fwaas + # ... + Why a shell script, why not chef/puppet/... ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -29,20 +80,20 @@ I'd like to help! ~~~~~~~~~~~~~~~~~ That isn't a question, but please do! The source for DevStack is at -`git.openstack.org -`__ and bug +`opendev.org `__ and bug reports go to `LaunchPad -`__. Contributions follow the +`__. Contributions follow the usual process as described in the `developer guide -`__. This +`__. This Sphinx documentation is housed in the doc directory. Why not use packages? ~~~~~~~~~~~~~~~~~~~~~ Unlike packages, DevStack leaves your cloud ready to develop - -checkouts of the code and services running in screen. However, many -people are doing the hard work of packaging and recipes for production +checkouts of the code and services running locally under systemd, +making it easy to hack on and test new patches. However, many people +are doing the hard work of packaging and recipes for production deployments. Why isn't $MY\_FAVORITE\_DISTRO supported? @@ -130,8 +181,8 @@ How do I run a specific OpenStack release? DevStack master tracks the upstream master of all the projects. If you would like to run a stable branch of OpenStack, you should use the corresponding stable branch of DevStack as well. For instance the -``stable/kilo`` version of DevStack will already default to all the -projects running at ``stable/kilo`` levels. +``stable/ocata`` version of DevStack will already default to all the +projects running at ``stable/ocata`` levels. Note: it's also possible to manually adjust the ``*_BRANCH`` variables further if you would like to test specific milestones, or even custom @@ -158,16 +209,6 @@ to a working IP address; setting it to 127.0.0.1 in ``/etc/hosts`` is often good enough for a single-node installation. And in an extreme case, use ``clean.sh`` to eradicate it and try again. -Configure ``local.conf`` thusly: - - :: - - [[local|localrc]] - HEAT_STANDALONE=True - ENABLED_SERVICES=rabbit,mysql,heat,h-api,h-api-cfn,h-api-cw,h-eng - KEYSTONE_SERVICE_HOST= - KEYSTONE_AUTH_HOST= - Why are my configuration changes ignored? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/guides.rst b/doc/source/guides.rst new file mode 100644 index 0000000000..e7b46b6e55 --- /dev/null +++ b/doc/source/guides.rst @@ -0,0 +1,80 @@ +Guides +====== + +.. warning:: + + The guides are point in time contributions, and may not always be + up to date with the latest work in devstack. + +Walk through various setups used by stackers + +.. toctree:: + :glob: + :hidden: + :maxdepth: 1 + + guides/single-vm + guides/single-machine + guides/lxc + guides/multinode-lab + guides/neutron + guides/devstack-with-nested-kvm + guides/nova + guides/devstack-with-octavia + guides/devstack-with-ldap + +All-In-One Single VM +-------------------- + +Run :doc:`OpenStack in a VM `. The VMs launched in your cloud will be slow as +they are running in QEMU (emulation), but it is useful if you don't have +spare hardware laying around. :doc:`[Read] ` + +All-In-One Single Machine +------------------------- + +Run :doc:`OpenStack on dedicated hardware ` This can include a +server-class machine or a laptop at home. +:doc:`[Read] ` + +All-In-One LXC Container +------------------------- + +Run :doc:`OpenStack in a LXC container `. Beneficial for intermediate +and advanced users. The VMs launched in this cloud will be fully accelerated but +not all OpenStack features are supported. :doc:`[Read] ` + +Multi-Node Lab +-------------- + +Setup a :doc:`multi-node cluster ` with dedicated VLANs for VMs & Management. +:doc:`[Read] ` + +DevStack with Neutron Networking +-------------------------------- + +Building a DevStack cluster with :doc:`Neutron Networking `. +This guide is meant for building lab environments with a dedicated +control node and multiple compute nodes. + +DevStack with KVM-based Nested Virtualization +--------------------------------------------- + +Procedure to setup :doc:`DevStack with KVM-based Nested Virtualization +`. With this setup, Nova instances +will be more performant than with plain QEMU emulation. + +Nova and devstack +-------------------------------- + +Guide to working with nova features :doc:`Nova and devstack `. + +Configure Octavia +----------------- + +Guide on :doc:`Configure Octavia `. + +Deploying DevStack with LDAP +---------------------------- + +Guide to setting up :doc:`DevStack with LDAP `. diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst deleted file mode 100644 index 0c439ad3ad..0000000000 --- a/doc/source/guides/devstack-with-lbaas-v2.rst +++ /dev/null @@ -1,110 +0,0 @@ -Configure Load-Balancer Version 2 -================================= - -Starting in the OpenStack Liberty release, the -`neutron LBaaS v2 API `_ -is now stable while the LBaaS v1 API has been deprecated. The LBaaS v2 reference -driver is based on Octavia. - - -Phase 1: Create DevStack + 2 nova instances --------------------------------------------- - -First, set up a vm of your choice with at least 8 GB RAM and 16 GB disk space, -make sure it is updated. Install git and any other developer tools you find useful. - -Install devstack - - :: - - git clone https://git.openstack.org/openstack-dev/devstack - cd devstack - - -Edit your ``local.conf`` to look like - - :: - - [[local|localrc]] - # Load the external LBaaS plugin. - enable_plugin neutron-lbaas https://git.openstack.org/openstack/neutron-lbaas - enable_plugin octavia https://git.openstack.org/openstack/octavia - - # ===== BEGIN localrc ===== - DATABASE_PASSWORD=password - ADMIN_PASSWORD=password - SERVICE_PASSWORD=password - RABBIT_PASSWORD=password - # Enable Logging - LOGFILE=$DEST/logs/stack.sh.log - VERBOSE=True - LOG_COLOR=True - SCREEN_LOGDIR=$DEST/logs - # Pre-requisite - ENABLED_SERVICES=rabbit,mysql,key - # Horizon - ENABLED_SERVICES+=,horizon - # Nova - ENABLED_SERVICES+=,n-api,n-crt,n-cpu,n-cond,n-sch - # Glance - ENABLED_SERVICES+=,g-api,g-reg - # Neutron - ENABLED_SERVICES+=,q-svc,q-agt,q-dhcp,q-l3,q-meta - # Enable LBaaS v2 - ENABLED_SERVICES+=,q-lbaasv2 - ENABLED_SERVICES+=,octavia,o-cw,o-hk,o-hm,o-api - # Cinder - ENABLED_SERVICES+=,c-api,c-vol,c-sch - # Tempest - ENABLED_SERVICES+=,tempest - # ===== END localrc ===== - -Run stack.sh and do some sanity checks - - :: - - ./stack.sh - . ./openrc - - neutron net-list # should show public and private networks - -Create two nova instances that we can use as test http servers: - - :: - - #create nova instances on private network - nova boot --image $(nova image-list | awk '/ cirros-.*-x86_64-uec / {print $2}') --flavor 1 --nic net-id=$(neutron net-list | awk '/ private / {print $2}') node1 - nova boot --image $(nova image-list | awk '/ cirros-.*-x86_64-uec / {print $2}') --flavor 1 --nic net-id=$(neutron net-list | awk '/ private / {print $2}') node2 - nova list # should show the nova instances just created - - #add secgroup rules to allow ssh etc.. - neutron security-group-rule-create default --protocol icmp - neutron security-group-rule-create default --protocol tcp --port-range-min 22 --port-range-max 22 - neutron security-group-rule-create default --protocol tcp --port-range-min 80 --port-range-max 80 - -Set up a simple web server on each of these instances. ssh into each instance (username 'cirros', password 'cubswin:)') and run - - :: - - MYIP=$(ifconfig eth0|grep 'inet addr'|awk -F: '{print $2}'| awk '{print $1}') - while true; do echo -e "HTTP/1.0 200 OK\r\n\r\nWelcome to $MYIP" | sudo nc -l -p 80 ; done& - -Phase 2: Create your load balancers ------------------------------------- - - :: - - neutron lbaas-loadbalancer-create --name lb1 private-subnet - neutron lbaas-loadbalancer-show lb1 # Wait for the provisioning_status to be ACTIVE. - neutron lbaas-listener-create --loadbalancer lb1 --protocol HTTP --protocol-port 80 --name listener1 - sleep 10 # Sleep since LBaaS actions can take a few seconds depending on the environment. - neutron lbaas-pool-create --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1 - sleep 10 - neutron lbaas-member-create --subnet private-subnet --address 10.0.0.3 --protocol-port 80 pool1 - sleep 10 - neutron lbaas-member-create --subnet private-subnet --address 10.0.0.5 --protocol-port 80 pool1 - -Please note here that the "10.0.0.3" and "10.0.0.5" in the above commands are the IPs of the nodes -(in my test run-thru, they were actually 10.2 and 10.4), and the address of the created LB will be -reported as "vip_address" from the lbaas-loadbalancer-create, and a quick test of that LB is -"curl that-lb-ip", which should alternate between showing the IPs of the two nodes. diff --git a/doc/source/guides/devstack-with-ldap.rst b/doc/source/guides/devstack-with-ldap.rst new file mode 100644 index 0000000000..4c54723c71 --- /dev/null +++ b/doc/source/guides/devstack-with-ldap.rst @@ -0,0 +1,174 @@ +============================ +Deploying DevStack with LDAP +============================ + +The OpenStack Identity service has the ability to integrate with LDAP. The goal +of this guide is to walk you through setting up an LDAP-backed OpenStack +development environment. + +Introduction +============ + +LDAP support in keystone is read-only. You can use it to back an entire +OpenStack deployment to a single LDAP server, or you can use it to back +separate LDAP servers to specific keystone domains. Users within those domains +can authenticate against keystone, assume role assignments, and interact with +other OpenStack services. + +Configuration +============= + +To deploy an OpenLDAP server, make sure ``ldap`` is added to the list of +``ENABLED_SERVICES`` in the ``local.conf`` file:: + + enable_service ldap + +Devstack will require a password to set up an LDAP administrator. This +administrative user is also the bind user specified in keystone's configuration +files, similar to a ``keystone`` user for MySQL databases. + +Devstack will prompt you for a password when running ``stack.sh`` if +``LDAP_PASSWORD`` is not set. You can add the following to your +``local.conf``:: + + LDAP_PASSWORD=super_secret_password + +At this point, devstack should have everything it needs to deploy OpenLDAP, +bootstrap it with a minimal set of users, and configure it to back to a domain +in keystone. You can do this by running the ``stack.sh`` script:: + + $ ./stack.sh + +Once ``stack.sh`` completes, you should have a running keystone deployment with +a basic set of users. It is important to note that not all users will live +within LDAP. Instead, keystone will back different domains to different +identity sources. For example, the ``default`` domain will be backed by MySQL. +This is usually where you'll find your administrative and services users. If +you query keystone for a list of domains, you should see a domain called +``Users``. This domain is set up by devstack and points to OpenLDAP. + +User Management +=============== + +Initially, there will only be two users in the LDAP server. The ``Manager`` +user is used by keystone to talk to OpenLDAP. The ``demo`` user is a generic +user that you should be able to see if you query keystone for users within the +``Users`` domain. Both of these users were added to LDAP using basic LDAP +utilities installed by devstack (e.g. ``ldap-utils``) and LDIFs. The LDIFs used +to create these users can be found in ``devstack/files/ldap/``. + +Listing Users +------------- + +To list all users in LDAP directly, you can use ``ldapsearch`` with the LDAP +user bootstrapped by devstack:: + + $ ldapsearch -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \ + -H ldap://localhost -b dc=openstack,dc=org + +As you can see, devstack creates an OpenStack domain called ``openstack.org`` +as a container for the ``Manager`` and ``demo`` users. + +Creating Users +-------------- + +Since keystone's LDAP integration is read-only, users must be added directly to +LDAP. Users added directly to OpenLDAP will automatically be placed into the +``Users`` domain. + +LDIFs can be used to add users via the command line. The following is an +example LDIF that can be used to create a new LDAP user, let's call it +``peter.ldif.in``:: + + dn: cn=peter,ou=Users,dc=openstack,dc=org + cn: peter + displayName: Peter Quill + givenName: Peter Quill + mail: starlord@openstack.org + objectClass: inetOrgPerson + objectClass: top + sn: peter + uid: peter + userPassword: im-a-better-pilot-than-rocket + +Now, we use the ``Manager`` user to create a user for Peter in LDAP:: + + $ ldapadd -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \ + -H ldap://localhost -c -f peter.ldif.in + +We should be able to assign Peter roles on projects. After Peter has some level +of authorization, he should be able to login to Horizon by specifying the +``Users`` domain and using his ``peter`` username and password. Authorization +can be given to Peter by creating a project within the ``Users`` domain and +giving him a role assignment on that project:: + + $ openstack project create --domain Users awesome-mix-vol-1 + +-------------+----------------------------------+ + | Field | Value | + +-------------+----------------------------------+ + | description | | + | domain_id | 61a2de23107c46bea2d758167af707b9 | + | enabled | True | + | id | 7d422396d54945cdac8fe1e8e32baec4 | + | is_domain | False | + | name | awesome-mix-vol-1 | + | parent_id | 61a2de23107c46bea2d758167af707b9 | + | tags | [] | + +-------------+----------------------------------+ + $ openstack role add --user peter --user-domain Users \ + --project awesome-mix-vol-1 --project-domain Users admin + + +Deleting Users +-------------- + +We can use the same basic steps to remove users from LDAP, but instead of using +LDIFs, we can just pass the ``dn`` of the user we want to delete:: + + $ ldapdelete -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \ + -H ldap://localhost cn=peter,ou=Users,dc=openstack,dc=org + +Group Management +================ + +Like users, groups are considered specific identities. This means that groups +also fall under the same read-only constraints as users and they can be managed +directly with LDAP in the same way users are with LDIFs. + +Adding Groups +------------- + +Let's define a specific group with the following LDIF:: + + dn: cn=guardians,ou=UserGroups,dc=openstack,dc=org + objectClass: groupOfNames + cn: guardians + description: Guardians of the Galaxy + member: cn=peter,dc=openstack,dc=org + member: cn=gamora,dc=openstack,dc=org + member: cn=drax,dc=openstack,dc=org + member: cn=rocket,dc=openstack,dc=org + member: cn=groot,dc=openstack,dc=org + +We can create the group using the same ``ldapadd`` command as we did with +users:: + + $ ldapadd -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \ + -H ldap://localhost -c -f guardian-group.ldif.in + +If we check the group membership in Horizon, we'll see that only Peter is a +member of the ``guardians`` group, despite the whole crew being specified in +the LDIF. Once those accounts are created in LDAP, they will automatically be +added to the ``guardians`` group. They will also assume any role assignments +given to the ``guardians`` group. + +Deleting Groups +--------------- + +Just like users, groups can be deleted using the ``dn``:: + + $ ldapdelete -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \ + -H ldap://localhost cn=guardians,ou=UserGroups,dc=openstack,dc=org + +Note that this operation will not remove users within that group. It will only +remove the group itself and the memberships any users had with that group. diff --git a/doc/source/guides/devstack-with-nested-kvm.rst b/doc/source/guides/devstack-with-nested-kvm.rst index 85a5656198..ba483e9ec9 100644 --- a/doc/source/guides/devstack-with-nested-kvm.rst +++ b/doc/source/guides/devstack-with-nested-kvm.rst @@ -1,3 +1,5 @@ +.. _kvm_nested_virt: + ======================================================= Configure DevStack with KVM-based Nested Virtualization ======================================================= @@ -73,7 +75,7 @@ back: :: sudo rmmod kvm-amd - sudo sh -c "echo 'options amd nested=1' >> /etc/modprobe.d/dist.conf" + sudo sh -c "echo 'options kvm-amd nested=1' >> /etc/modprobe.d/dist.conf" sudo modprobe kvm-amd Ensure the Nested KVM Kernel module parameter for AMD is enabled on the diff --git a/doc/source/guides/devstack-with-octavia.rst b/doc/source/guides/devstack-with-octavia.rst new file mode 100644 index 0000000000..55939f0f12 --- /dev/null +++ b/doc/source/guides/devstack-with-octavia.rst @@ -0,0 +1,144 @@ +Devstack with Octavia Load Balancing +==================================== + +Starting with the OpenStack Pike release, Octavia is now a standalone service +providing load balancing services for OpenStack. + +This guide will show you how to create a devstack with `Octavia API`_ enabled. + +.. _Octavia API: https://docs.openstack.org/api-ref/load-balancer/v2/index.html + +Phase 1: Create DevStack + 2 nova instances +-------------------------------------------- + +First, set up a VM of your choice with at least 8 GB RAM and 16 GB disk space, +make sure it is updated. Install git and any other developer tools you find +useful. + +Install devstack:: + + git clone https://opendev.org/openstack/devstack + cd devstack/tools + sudo ./create-stack-user.sh + cd ../.. + sudo mv devstack /opt/stack + sudo chown -R stack.stack /opt/stack/devstack + +This will clone the current devstack code locally, then setup the "stack" +account that devstack services will run under. Finally, it will move devstack +into its default location in /opt/stack/devstack. + +Edit your ``/opt/stack/devstack/local.conf`` to look like:: + + [[local|localrc]] + # ===== BEGIN localrc ===== + DATABASE_PASSWORD=password + ADMIN_PASSWORD=password + SERVICE_PASSWORD=password + SERVICE_TOKEN=password + RABBIT_PASSWORD=password + GIT_BASE=https://opendev.org + # Optional settings: + # OCTAVIA_AMP_BASE_OS=centos + # OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID=9-stream + # OCTAVIA_AMP_IMAGE_SIZE=3 + # OCTAVIA_LB_TOPOLOGY=ACTIVE_STANDBY + # OCTAVIA_ENABLE_AMPHORAV2_JOBBOARD=True + # LIBS_FROM_GIT+=octavia-lib, + # Enable Logging + LOGFILE=$DEST/logs/stack.sh.log + VERBOSE=True + LOG_COLOR=True + enable_service rabbit + enable_plugin neutron $GIT_BASE/openstack/neutron + # Octavia supports using QoS policies on the VIP port: + enable_service q-qos + enable_service placement-api placement-client + # Octavia services + enable_plugin octavia $GIT_BASE/openstack/octavia master + enable_plugin octavia-dashboard $GIT_BASE/openstack/octavia-dashboard + enable_plugin ovn-octavia-provider $GIT_BASE/openstack/ovn-octavia-provider + enable_plugin octavia-tempest-plugin $GIT_BASE/openstack/octavia-tempest-plugin + enable_service octavia o-api o-cw o-hm o-hk o-da + # If you are enabling barbican for TLS offload in Octavia, include it here. + # enable_plugin barbican $GIT_BASE/openstack/barbican + # enable_service barbican + # Cinder (optional) + disable_service c-api c-vol c-sch + # Tempest + enable_service tempest + # ===== END localrc ===== + +.. note:: + For best performance it is highly recommended to use KVM + virtualization instead of QEMU. + Also make sure nested virtualization is enabled as documented in + :ref:`the respective guide `. + By adding ``LIBVIRT_CPU_MODE="host-passthrough"`` to your + ``local.conf`` you enable the guest VMs to make use of all features your + host's CPU provides. + +Run stack.sh and do some sanity checks:: + + sudo su - stack + cd /opt/stack/devstack + ./stack.sh + . ./openrc + + openstack network list # should show public and private networks + +Create two nova instances that we can use as test http servers:: + + # create nova instances on private network + openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node1 + openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node2 + openstack server list # should show the nova instances just created + + # add secgroup rules to allow ssh etc.. + openstack security group rule create default --protocol icmp + openstack security group rule create default --protocol tcp --dst-port 22:22 + openstack security group rule create default --protocol tcp --dst-port 80:80 + +Set up a simple web server on each of these instances. One possibility is to use +the `Golang test server`_ that is used by the Octavia project for CI testing +as well. +Copy the binary to your instances and start it as shown below +(username 'cirros', password 'gocubsgo'):: + + INST_IP= + scp -O test_server.bin cirros@${INST_IP}: + ssh -f cirros@${INST_IP} ./test_server.bin -id ${INST_IP} + +When started this way the test server will respond to HTTP requests with +its own IP. + +Phase 2: Create your load balancer +---------------------------------- + +Create your load balancer:: + + openstack loadbalancer create --wait --name lb1 --vip-subnet-id private-subnet + openstack loadbalancer listener create --wait --protocol HTTP --protocol-port 80 --name listener1 lb1 + openstack loadbalancer pool create --wait --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1 + openstack loadbalancer healthmonitor create --wait --delay 5 --timeout 2 --max-retries 1 --type HTTP pool1 + openstack loadbalancer member create --wait --subnet-id private-subnet --address --protocol-port 80 pool1 + openstack loadbalancer member create --wait --subnet-id private-subnet --address --protocol-port 80 pool1 + +Please note: The fields are the IP addresses of the nova +servers created in Phase 1. +Also note, using the API directly you can do all of the above commands in one +API call. + +Phase 3: Test your load balancer +-------------------------------- + +:: + + openstack loadbalancer show lb1 # Note the vip_address + curl http:// + curl http:// + +This should show the "Welcome to " message from each member server. + + +.. _Golang test server: https://opendev.org/openstack/octavia-tempest-plugin/src/branch/master/octavia_tempest_plugin/contrib/test_server diff --git a/doc/source/guides/lxc.rst b/doc/source/guides/lxc.rst index 9549ed2974..dcaa4166c4 100644 --- a/doc/source/guides/lxc.rst +++ b/doc/source/guides/lxc.rst @@ -105,7 +105,7 @@ The commands in this section should all be run inside your container. :: - git clone https://git.openstack.org/openstack-dev/devstack + git clone https://opendev.org/openstack/devstack #. Configure diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index c996f95743..ef339f1f5c 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -24,7 +24,7 @@ Install a couple of packages to bootstrap configuration: :: - apt-get install -y git sudo || yum install -y git sudo + apt-get install -y git sudo || dnf install -y git sudo Network Configuration --------------------- @@ -73,8 +73,15 @@ Otherwise create the stack user: :: - groupadd stack - useradd -g stack -s /bin/bash -d /opt/stack -m stack + useradd -s /bin/bash -d /opt/stack -m stack + +Ensure home directory for the ``stack`` user has executable permission for all, +as RHEL based distros create it with ``700`` and Ubuntu 21.04+ with ``750`` +which can cause issues during deployment. + +:: + + chmod +x /opt/stack This user will be making many changes to your system during installation and operation so it needs to have sudo privileges to root without a @@ -82,7 +89,7 @@ password: :: - echo "stack ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers + echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack From here on use the ``stack`` user. **Logout** and **login** as the ``stack`` user. @@ -104,7 +111,7 @@ Grab the latest version of DevStack: :: - git clone https://git.openstack.org/openstack-dev/devstack + git clone https://opendev.org/openstack/devstack cd devstack Up to this point all of the steps apply to each node in the cluster. @@ -121,11 +128,8 @@ cluster controller's DevStack in ``local.conf``: [[local|localrc]] HOST_IP=192.168.42.11 - FLAT_INTERFACE=eth0 FIXED_RANGE=10.4.128.0/20 - FIXED_NETWORK_SIZE=4096 FLOATING_RANGE=192.168.42.128/25 - MULTI_HOST=1 LOGFILE=/opt/stack/logs/stack.sh.log ADMIN_PASSWORD=labstack DATABASE_PASSWORD=supersecret @@ -161,11 +165,8 @@ machines, create a ``local.conf`` with: [[local|localrc]] HOST_IP=192.168.42.12 # change this per compute node - FLAT_INTERFACE=eth0 FIXED_RANGE=10.4.128.0/20 - FIXED_NETWORK_SIZE=4096 FLOATING_RANGE=192.168.42.128/25 - MULTI_HOST=1 LOGFILE=/opt/stack/logs/stack.sh.log ADMIN_PASSWORD=labstack DATABASE_PASSWORD=supersecret @@ -176,17 +177,12 @@ machines, create a ``local.conf`` with: MYSQL_HOST=$SERVICE_HOST RABBIT_HOST=$SERVICE_HOST GLANCE_HOSTPORT=$SERVICE_HOST:9292 - ENABLED_SERVICES=n-cpu,n-net,n-api-meta,c-vol + ENABLED_SERVICES=n-cpu,c-vol,placement-client,ovn-controller,ovs-vswitchd,ovsdb-server,q-ovn-metadata-agent NOVA_VNC_ENABLED=True - NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_auto.html" + NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_lite.html" VNCSERVER_LISTEN=$HOST_IP VNCSERVER_PROXYCLIENT_ADDRESS=$VNCSERVER_LISTEN -**Note:** the ``n-api-meta`` service is a version of the api server -that only serves the metadata service. It's needed because the -computes created won't have a routing path to the metadata service on -the controller. - Fire up OpenStack: :: @@ -198,6 +194,64 @@ A stream of activity ensues. When complete you will see a summary of to poke at your shiny new OpenStack. The most recent log file is available in ``stack.sh.log``. +Starting in the Ocata release, Nova requires a `Cells v2`_ deployment. Compute +node services must be mapped to a cell before they can be used. + +After each compute node is stacked, verify it shows up in the +``nova service-list --binary nova-compute`` output. The compute service is +registered in the cell database asynchronously so this may require polling. + +Once the compute node services shows up, run the ``./tools/discover_hosts.sh`` +script from the control node to map compute hosts to the single cell. + +The compute service running on the primary control node will be +discovered automatically when the control node is stacked so this really +only needs to be performed for subnodes. + +.. _Cells v2: https://docs.openstack.org/nova/latest/user/cells.html + +Configure Tempest Node to run the Tempest tests +----------------------------------------------- + +If there is a need to execute Tempest tests against different Cluster +Controller node then it can be done by re-using the ``local.conf`` file from +the Cluster Controller node but with not enabled Controller services in +``ENABLED_SERVICES`` variable. This variable needs to contain only ``tempest`` +as a configured service. Then variable ``SERVICES_FOR_TEMPEST`` must be +configured to contain those services that were enabled on the Cluster +Controller node in the ``ENABLED_SERVICES`` variable. For example the +``local.conf`` file could look as follows: + +:: + + [[local|localrc]] + HOST_IP=192.168.42.12 # change this per compute node + FIXED_RANGE=10.4.128.0/20 + FLOATING_RANGE=192.168.42.128/25 + LOGFILE=/opt/stack/logs/stack.sh.log + ADMIN_PASSWORD=labstack + DATABASE_PASSWORD=supersecret + RABBIT_PASSWORD=supersecret + SERVICE_PASSWORD=supersecret + DATABASE_TYPE=mysql + SERVICE_HOST=192.168.42.11 + MYSQL_HOST=$SERVICE_HOST + RABBIT_HOST=$SERVICE_HOST + GLANCE_HOSTPORT=$SERVICE_HOST:9292 + NOVA_VNC_ENABLED=True + NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_lite.html" + VNCSERVER_LISTEN=$HOST_IP + VNCSERVER_PROXYCLIENT_ADDRESS=$VNCSERVER_LISTEN + ENABLED_SERVICES=tempest + SERVICES_FOR_TEMPEST=keystone,nova,neutron,glance + +Then just execute the devstack: + +:: + + ./stack.sh + + Cleaning Up After DevStack -------------------------- @@ -225,8 +279,8 @@ this when it runs but there are times it needs to still be done by hand: sudo rm -rf /etc/libvirt/qemu/inst* sudo virsh list | grep inst | awk '{print $1}' | xargs -n1 virsh destroy -Options pimp your stack -======================= +Going further +============= Additional Users ---------------- @@ -260,7 +314,7 @@ for scripting: openstack user create $NAME --password=$PASSWORD --project $PROJECT openstack role add Member --user $NAME --project $PROJECT # The Member role is created by stack.sh - # openstack role list + # openstack role assignment list Swift ----- @@ -287,17 +341,17 @@ Volumes DevStack will automatically use an existing LVM volume group named ``stack-volumes`` to store cloud-created volumes. If ``stack-volumes`` -doesn't exist, DevStack will set up a 10Gb loop-mounted file to contain -it. This obviously limits the number and size of volumes that can be -created inside OpenStack. The size can be overridden by setting -``VOLUME_BACKING_FILE_SIZE`` in ``local.conf``. +doesn't exist, DevStack will set up a loop-mounted file to contain +it. If the default size is insufficient for the number and size of volumes +required, it can be overridden by setting ``VOLUME_BACKING_FILE_SIZE`` in +``local.conf`` (sizes given in ``truncate`` compatible format, e.g. ``24G``). ``stack-volumes`` can be pre-created on any physical volume supported by Linux's LVM. The name of the volume group can be changed by setting -``VOLUME_GROUP`` in ``localrc``. ``stack.sh`` deletes all logical -volumes in ``VOLUME_GROUP`` that begin with ``VOLUME_NAME_PREFIX`` as +``VOLUME_GROUP_NAME`` in ``localrc``. ``stack.sh`` deletes all logical +volumes in ``VOLUME_GROUP_NAME`` that begin with ``VOLUME_NAME_PREFIX`` as part of cleaning up from previous runs. It is recommended to not use the -root volume group as ``VOLUME_GROUP``. +root volume group as ``VOLUME_GROUP_NAME``. The details of creating the volume group depends on the server hardware involved but looks something like this: @@ -354,17 +408,6 @@ To pull glance, OpenStack Image service, from an experimental fork: Notes stuff you might need to know ================================== -Reset the Bridge ----------------- - -How to reset the bridge configuration: - -:: - - sudo brctl delif br100 eth0.926 - sudo ip link set dev br100 down - sudo brctl delbr br100 - Set MySQL Password ------------------ @@ -400,6 +443,10 @@ SSH keys need to be exchanged between each compute node: ssh-keyscan -H DEST_HOSTNAME | sudo tee -a /root/.ssh/known_hosts +3. Verify that login via ssh works without a password:: + + ssh -i /root/.ssh/id_rsa stack@DESTINATION + In essence, this means that every compute node's root user's public RSA key must exist in every other compute node's stack user's authorized_keys file and every compute node's public ECDSA key needs to be in every other compute diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst index c5b1634f62..a7adeeff73 100644 --- a/doc/source/guides/neutron.rst +++ b/doc/source/guides/neutron.rst @@ -41,19 +41,8 @@ network and is on a shared subnet with other machines. The `local.conf` exhibited here assumes that 1500 is a reasonable MTU to use on that network. -.. nwdiag:: - - nwdiag { - inet [ shape = cloud ]; - router; - inet -- router; - - network hardware_network { - address = "172.18.161.0/24" - router [ address = "172.18.161.1" ]; - devstack-1 [ address = "172.18.161.6" ]; - } - } +.. image:: /assets/images/neutron-network-1.png + :alt: Network configuration for a single DevStack node DevStack Configuration @@ -76,16 +65,10 @@ serving as a hypervisor for guest instances. RABBIT_PASSWORD=secret SERVICE_PASSWORD=secret - # Do not use Nova-Network - disable_service n-net - # Enable Neutron - ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,q-agt,q-l3 - - ## Neutron options Q_USE_SECGROUP=True FLOATING_RANGE="172.18.161.0/24" - FIXED_RANGE="10.0.0.0/24" + IPV4_ADDRS_SAFE_TO_USE="10.0.0.0/22" Q_FLOATING_ALLOCATION_POOL=start=172.18.161.250,end=172.18.161.254 PUBLIC_NETWORK_GATEWAY="172.18.161.1" PUBLIC_INTERFACE=eth0 @@ -106,21 +89,8 @@ also want to do multinode testing and networking. Physical Network Setup ~~~~~~~~~~~~~~~~~~~~~~ -.. nwdiag:: - - nwdiag { - inet [ shape = cloud ]; - router; - inet -- router; - - network hardware_network { - address = "172.18.161.0/24" - router [ address = "172.18.161.1" ]; - devstack-1 [ address = "172.18.161.6" ]; - devstack-2 [ address = "172.18.161.7" ]; - } - } - +.. image:: /assets/images/neutron-network-2.png + :alt: Network configuration for multiple DevStack nodes After DevStack installs and configures Neutron, traffic from guest VMs flows out of `devstack-2` (the compute node) and is encapsulated in a @@ -228,8 +198,6 @@ connect OpenStack nodes (like `devstack-2`) together. This bridge is used so that project network traffic, using the VXLAN tunneling protocol, flows between each compute node where project instances run. - - DevStack Compute Configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -250,7 +218,7 @@ The host `devstack-2` has a very minimal `local.conf`. ## Neutron options PUBLIC_INTERFACE=eth0 - ENABLED_SERVICES=n-cpu,rabbit,q-agt + ENABLED_SERVICES=n-cpu,rabbit,q-agt,placement-client Network traffic from `eth0` on the compute nodes is then NAT'd by the controller node that runs Neutron's `neutron-l3-agent` and provides L3 @@ -274,30 +242,8 @@ to the neutron L3 service. Physical Network Setup ---------------------- -.. nwdiag:: - - nwdiag { - inet [ shape = cloud ]; - router; - inet -- router; - - network provider_net { - address = "203.0.113.0/24" - router [ address = "203.0.113.1" ]; - controller; - compute1; - compute2; - } - - network control_plane { - router [ address = "10.0.0.1" ] - address = "10.0.0.0/24" - controller [ address = "10.0.0.2" ] - compute1 [ address = "10.0.0.3" ] - compute2 [ address = "10.0.0.4" ] - } - } - +.. image:: /assets/images/neutron-network-3.png + :alt: Network configuration for provider networks On a compute node, the first interface, eth0 is used for the OpenStack management (API, message bus, etc) as well as for ssh for an @@ -382,31 +328,28 @@ controller node. ## Neutron options Q_USE_SECGROUP=True - ENABLE_PROJECT_VLANS=True - PROJECT_VLAN_RANGE=3001:4000 + ENABLE_TENANT_VLANS=True + TENANT_VLAN_RANGE=3001:4000 PHYSICAL_NETWORK=default OVS_PHYSICAL_BRIDGE=br-ex Q_USE_PROVIDER_NETWORKING=True - # Do not use Nova-Network - disable_service n-net - - # Neutron - ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,q-agt + disable_service q-l3 ## Neutron Networking options used to create Neutron Subnets - FIXED_RANGE="203.0.113.0/24" + IPV4_ADDRS_SAFE_TO_USE="203.0.113.0/24" NETWORK_GATEWAY=203.0.113.1 PROVIDER_SUBNET_NAME="provider_net" PROVIDER_NETWORK_TYPE="vlan" SEGMENTATION_ID=2010 + USE_SUBNETPOOL=False -In this configuration we are defining FIXED_RANGE to be a +In this configuration we are defining IPV4_ADDRS_SAFE_TO_USE to be a publicly routed IPv4 subnet. In this specific instance we are using -the special TEST-NET-3 subnet defined in `RFC 5737 `_, -which is used for documentation. In your DevStack setup, FIXED_RANGE +the special TEST-NET-3 subnet defined in `RFC 5737 `_, +which is used for documentation. In your DevStack setup, IPV4_ADDRS_SAFE_TO_USE would be a public IP address range that you or your organization has allocated to you, so that you could access your instances from the public internet. @@ -508,50 +451,6 @@ by default. If you want to remove all the extension drivers (even 'port_security'), set ``Q_ML2_PLUGIN_EXT_DRIVERS`` to blank. -Using Linux Bridge instead of Open vSwitch ------------------------------------------- - -The configuration for using the Linux Bridge ML2 driver is fairly -straight forward. The Linux Bridge configuration for DevStack is similar -to the :ref:`Open vSwitch based single interface ` -setup, with small modifications for the interface mappings. - - -:: - - [[local|localrc]] - HOST_IP=172.18.161.6 - SERVICE_HOST=172.18.161.6 - MYSQL_HOST=172.18.161.6 - RABBIT_HOST=172.18.161.6 - GLANCE_HOSTPORT=172.18.161.6:9292 - ADMIN_PASSWORD=secret - DATABASE_PASSWORD=secret - RABBIT_PASSWORD=secret - SERVICE_PASSWORD=secret - - # Do not use Nova-Network - disable_service n-net - # Enable Neutron - ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,q-agt,q-l3 - - - ## Neutron options - Q_USE_SECGROUP=True - FLOATING_RANGE="172.18.161.0/24" - FIXED_RANGE="10.0.0.0/24" - Q_FLOATING_ALLOCATION_POOL=start=172.18.161.250,end=172.18.161.254 - PUBLIC_NETWORK_GATEWAY="172.18.161.1" - PUBLIC_INTERFACE=eth0 - - Q_USE_PROVIDERNET_FOR_PUBLIC=True - - # Linuxbridge Settings - Q_AGENT=linuxbridge - LB_PHYSICAL_INTERFACE=eth0 - PUBLIC_PHYSICAL_NETWORK=default - LB_INTERFACE_MAPPINGS=default:eth0 - Using MacVTap instead of Open vSwitch ------------------------------------------ @@ -582,20 +481,18 @@ you do not require them. Q_ML2_PLUGIN_MECHANISM_DRIVERS=macvtap Q_USE_PROVIDER_NETWORKING=True - #Enable Neutron services - disable_service n-net - enable_plugin neutron git://git.openstack.org/openstack/neutron - ENABLED_SERVICES+=,q-agt,q-svc + enable_plugin neutron https://opendev.org/openstack/neutron ## MacVTap agent options Q_AGENT=macvtap PHYSICAL_NETWORK=default - FIXED_RANGE="203.0.113.0/24" + IPV4_ADDRS_SAFE_TO_USE="203.0.113.0/24" NETWORK_GATEWAY=203.0.113.1 PROVIDER_SUBNET_NAME="provider_net" PROVIDER_NETWORK_TYPE="vlan" SEGMENTATION_ID=2010 + USE_SUBNETPOOL=False [[post-config|/$Q_PLUGIN_CONF_FILE]] [macvtap] @@ -614,14 +511,14 @@ to be configured for VLAN tenant networks. For OVS, a similar configuration like described in the :ref:`OVS Provider Network ` section can be -used. Just add the the following line to this local.conf, which also loads +used. Just add the following line to this local.conf, which also loads the MacVTap mechanism driver: :: [[local|localrc]] ... - Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,linuxbridge,macvtap + Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,macvtap ... For the MacVTap compute node, use this local.conf: @@ -639,7 +536,7 @@ For the MacVTap compute node, use this local.conf: # Services that a compute node runs disable_all_services - enable_plugin neutron git://git.openstack.org/openstack/neutron + enable_plugin neutron https://opendev.org/openstack/neutron ENABLED_SERVICES+=n-cpu,q-agt ## MacVTap agent options diff --git a/doc/source/guides/nova.rst b/doc/source/guides/nova.rst index a91e0d194c..6b8aabf8db 100644 --- a/doc/source/guides/nova.rst +++ b/doc/source/guides/nova.rst @@ -10,10 +10,10 @@ nova-serialproxy ================ In Juno, nova implemented a `spec -`_ +`_ to allow read/write access to the serial console of an instance via `nova-serialproxy -`_. +`_. The service can be enabled by adding ``n-sproxy`` to ``ENABLED_SERVICES``. Further options can be enabled via @@ -62,11 +62,75 @@ The service can be enabled by adding ``n-sproxy`` to Enabling the service is enough to be functional for a single machine DevStack. -These config options are defined in `nova.console.serial -`_ -and `nova.cmd.serialproxy -`_. +These config options are defined in `nova.conf.serial_console +`_. For more information on OpenStack configuration see the `OpenStack -Configuration Reference -`_ +Compute Service Configuration Reference +`_ + + +Fake virt driver +================ + +Nova has a `fake virt driver`_ which can be used for scale testing the control +plane services or testing "move" operations between fake compute nodes, for +example cold/live migration, evacuate and unshelve. + +The fake virt driver does not communicate with any hypervisor, it just reports +some fake resource inventory values and keeps track of the state of the +"guests" created, moved and deleted. It is not feature-complete with the +compute API but is good enough for most API testing, and is also used within +the nova functional tests themselves so is fairly robust. + +.. _fake virt driver: https://opendev.org/openstack/nova/src/branch/master/nova/virt/fake.py + +Configuration +------------- + +Set the following in your devstack ``local.conf``: + +.. code-block:: ini + + [[local|localrc]] + VIRT_DRIVER=fake + NUMBER_FAKE_NOVA_COMPUTE= + +The ``NUMBER_FAKE_NOVA_COMPUTE`` variable controls the number of fake +``nova-compute`` services to run and defaults to 1. + +When ``VIRT_DRIVER=fake`` is used, devstack will disable quota checking in +nova and neutron automatically. However, other services, like cinder, will +still enforce quota limits by default. + +Scaling +------- + +The actual value to use for ``NUMBER_FAKE_NOVA_COMPUTE`` depends on factors +such as: + +* The size of the host (physical or virtualized) on which devstack is running. +* The number of API workers. By default, devstack will run ``max($nproc/2, 2)`` + workers per API service. If you are running several fake compute services on + a single host, then consider setting ``API_WORKERS=1`` in ``local.conf``. + +In addition, while quota will be disabled in neutron, there is no fake ML2 +backend for neutron so creating fake VMs will still result in real ports being +created. To create servers without networking, you can specify ``--nic=none`` +when creating the server, for example: + +.. code-block:: shell + + $ openstack --os-compute-api-version 2.37 server create --flavor cirros256 \ + --image cirros-0.6.3-x86_64-disk --nic none --wait test-server + +.. note:: ``--os-compute-api-version`` greater than or equal to 2.37 is + required to use ``--nic=none``. + +To avoid overhead from other services which you may not need, disable them in +your ``local.conf``, for example: + +.. code-block:: ini + + disable_service horizon + disable_service tempest diff --git a/doc/source/guides/single-machine.rst b/doc/source/guides/single-machine.rst index 011c41fbce..263fbb9d6f 100644 --- a/doc/source/guides/single-machine.rst +++ b/doc/source/guides/single-machine.rst @@ -45,31 +45,45 @@ We need to add a user to install DevStack. (if you created a user during install you can skip this step and just give the user sudo privileges below) -:: +.. code-block:: console - adduser stack + $ sudo useradd -s /bin/bash -d /opt/stack -m stack + +Ensure home directory for the ``stack`` user has executable permission for all, +as RHEL based distros create it with ``700`` and Ubuntu 21.04+ with ``750`` +which can cause issues during deployment. + +.. code-block:: console + + $ sudo chmod +x /opt/stack Since this user will be making many changes to your system, it will need to have sudo privileges: -:: +.. code-block:: console - apt-get install sudo -y || yum install -y sudo - echo "stack ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers + $ apt-get install sudo -y || dnf install -y sudo + $ echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack + +.. note:: On some systems you may need to use ``sudo visudo``. From here on you should use the user you created. **Logout** and -**login** as that user. +**login** as that user: + +.. code-block:: console + + $ sudo su stack && cd ~ Download DevStack ----------------- We'll grab the latest version of DevStack via https: -:: +.. code-block:: console - sudo apt-get install git -y || sudo yum install -y git - git clone https://git.openstack.org/openstack-dev/devstack - cd devstack + $ sudo apt-get install git -y || sudo dnf install -y git + $ git clone https://opendev.org/openstack/devstack + $ cd devstack Run DevStack ------------ @@ -81,11 +95,8 @@ do the following: - Set ``FLOATING_RANGE`` to a range not used on the local network, i.e. 192.168.1.224/27. This configures IP addresses ending in 225-254 to be used as floating IPs. -- Set ``FIXED_RANGE`` and ``FIXED_NETWORK_SIZE`` to configure the - internal address space used by the instances. -- Set ``FLAT_INTERFACE`` to the Ethernet interface that connects the - host to your local network. This is the interface that should be - configured with the static IP address mentioned above. +- Set ``FIXED_RANGE`` to configure the internal address space used by the + instances. - Set the administrative password. This password is used for the **admin** and **demo** accounts set up as OpenStack users. - Set the MySQL administrative password. The default here is a random @@ -95,25 +106,29 @@ do the following: - Set the service password. This is used by the OpenStack services (Nova, Glance, etc) to authenticate with Keystone. +.. warning:: Only use alphanumeric characters in your passwords, as some + services fail to work when using special characters. + ``local.conf`` should look something like this: -:: +.. code-block:: ini [[local|localrc]] FLOATING_RANGE=192.168.1.224/27 FIXED_RANGE=10.11.12.0/24 - FIXED_NETWORK_SIZE=256 - FLAT_INTERFACE=eth0 ADMIN_PASSWORD=supersecret DATABASE_PASSWORD=iheartdatabases RABBIT_PASSWORD=flopsymopsy SERVICE_PASSWORD=iheartksl +.. note:: There is a sample :download:`local.conf ` file + under the *samples* directory in the devstack repository. + Run DevStack: -:: +.. code-block:: console - ./stack.sh + $ ./stack.sh A seemingly endless stream of activity ensues. When complete you will see a summary of ``stack.sh``'s work, including the relevant URLs, @@ -127,7 +142,3 @@ computers on the local network. In this example that would be http://192.168.1.201/ for the dashboard (aka Horizon). Launch VMs and if you give them floating IPs and security group access those VMs will be accessible from other machines on your network. - -Some examples of using the OpenStack command-line clients ``nova`` and -``glance`` are in the shakedown scripts in ``devstack/exercises``. -``exercise.sh`` will run all of those scripts and report on the results. diff --git a/doc/source/guides/single-vm.rst b/doc/source/guides/single-vm.rst index 45b8f2dd89..4272a4b180 100644 --- a/doc/source/guides/single-vm.rst +++ b/doc/source/guides/single-vm.rst @@ -56,11 +56,11 @@ passed as the user-data file when booting the VM. write_files: - content: | #!/bin/sh - DEBIAN_FRONTEND=noninteractive sudo apt-get -qqy update || sudo yum update -qy - DEBIAN_FRONTEND=noninteractive sudo apt-get install -qqy git || sudo yum install -qy git + DEBIAN_FRONTEND=noninteractive sudo apt-get -qqy update || sudo dnf update -qy + DEBIAN_FRONTEND=noninteractive sudo apt-get install -qqy git || sudo dnf install -qy git sudo chown stack:stack /home/stack cd /home/stack - git clone https://git.openstack.org/openstack-dev/devstack + git clone https://opendev.org/openstack/devstack cd devstack echo '[[local|localrc]]' > local.conf echo ADMIN_PASSWORD=password >> local.conf @@ -78,7 +78,7 @@ As DevStack will refuse to run as root, this configures ``cloud-init`` to create a non-root user and run the ``start.sh`` script as that user. If you are using cloud-init and you have not -`enabled custom logging <../configuration.html#enable-logging>`_ of the stack +:ref:`enabled custom logging ` of the stack output, then the stack output can be found in ``/var/log/cloud-init-output.log`` by default. diff --git a/doc/source/index.rst b/doc/source/index.rst index 68ec174f3a..a07bb84922 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1,163 +1,180 @@ +.. Documentation Architecture for the devstack docs. + + It is really easy for online docs to meander over time as people + attempt to add the small bit of additional information they think + people need, into an existing information architecture. In order to + prevent that we need to be a bit strict as to what's on this front + page. + + This should *only* be the quick start narrative. Which should end + with 2 sections: what you can do with devstack once it's set up, + and how to go beyond this setup. Both should be a set of quick + links to other documents to let people explore from there. + DevStack ======== .. image:: assets/images/logo-blue.png DevStack is a series of extensible scripts used to quickly bring up a -complete OpenStack environment. It is used interactively as a -development environment and as the basis for much of the OpenStack -project's functional testing. +complete OpenStack environment based on the latest versions of +everything from git master. It is used interactively as a development +environment and as the basis for much of the OpenStack project's +functional testing. -The source is available at -``__. +The source is available at ``__. -.. toctree:: - :glob: - :maxdepth: 1 +.. warning:: - overview - configuration - plugins - plugin-registry - faq - hacking + DevStack will make substantial changes to your system during + installation. Only run DevStack on servers or virtual machines that + are dedicated to this purpose. Quick Start ------------ ++++++++++++ -#. Select a Linux Distribution +Install Linux +------------- - Only Ubuntu 14.04 (Trusty), Fedora 22 (or Fedora 23) and CentOS/RHEL - 7 are documented here. OpenStack also runs and is packaged on other - flavors of Linux such as OpenSUSE and Debian. +Start with a clean and minimal install of a Linux system. DevStack +attempts to support the two latest LTS releases of Ubuntu, +Rocky Linux 9 and openEuler. -#. Install Selected OS +If you do not have a preference, Ubuntu 24.04 (Noble) is the +most tested, and will probably go the smoothest. - In order to correctly install all the dependencies, we assume a - specific minimal version of the supported distributions to make it as - easy as possible. We recommend using a minimal install of Ubuntu or - Fedora server in a VM if this is your first time. +Add Stack User (optional) +------------------------- -#. Download DevStack +DevStack should be run as a non-root user with sudo enabled +(standard logins to cloud images such as "ubuntu" or "cloud-user" +are usually fine). - :: +If you are not using a cloud image, you can create a separate `stack` user +to run DevStack with - git clone https://git.openstack.org/openstack-dev/devstack +.. code-block:: console - The ``devstack`` repo contains a script that installs OpenStack and - templates for configuration files + $ sudo useradd -s /bin/bash -d /opt/stack -m stack -#. Configure +Ensure home directory for the ``stack`` user has executable permission for all, +as RHEL based distros create it with ``700`` and Ubuntu 21.04+ with ``750`` +which can cause issues during deployment. - We recommend at least a :ref:`minimal-configuration` be set up. +.. code-block:: console -#. Add Stack User + $ sudo chmod +x /opt/stack - Devstack should be run as a non-root user with sudo enabled - (standard logins to cloud images such as "ubuntu" or "cloud-user" - are usually fine). +Since this user will be making many changes to your system, it should +have sudo privileges: - You can quickly create a separate `stack` user to run DevStack with +.. code-block:: console - :: + $ echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack + $ sudo -u stack -i - devstack/tools/create-stack-user.sh; su stack +Download DevStack +----------------- -#. Start the install, this will take a few minutes. +.. code-block:: console - :: + $ git clone https://opendev.org/openstack/devstack + $ cd devstack - cd devstack; ./stack.sh +The ``devstack`` repo contains a script that installs OpenStack and +templates for configuration files. -Guides -====== +Create a local.conf +------------------- -Walk through various setups used by stackers +Create a ``local.conf`` file with four passwords preset at the root of the +devstack git repo. -.. toctree:: - :glob: - :maxdepth: 1 +.. code-block:: ini - guides/single-vm - guides/single-machine - guides/lxc - guides/multinode-lab - guides/neutron - guides/devstack-with-nested-kvm - guides/nova - guides/devstack-with-lbaas-v2 + [[local|localrc]] + ADMIN_PASSWORD=secret + DATABASE_PASSWORD=$ADMIN_PASSWORD + RABBIT_PASSWORD=$ADMIN_PASSWORD + SERVICE_PASSWORD=$ADMIN_PASSWORD -All-In-One Single VM --------------------- +This is the minimum required config to get started with DevStack. -Run :doc:`OpenStack in a VM `. The VMs launched in your cloud will be slow as -they are running in QEMU (emulation), but it is useful if you don't have -spare hardware laying around. :doc:`[Read] ` +.. note:: There is a sample :download:`local.conf ` file + under the *samples* directory in the devstack repository. -All-In-One Single Machine -------------------------- +.. warning:: Only use alphanumeric characters in your passwords, as some + services fail to work when using special characters. -Run :doc:`OpenStack on dedicated hardware ` This can include a -server-class machine or a laptop at home. -:doc:`[Read] ` +Start the install +----------------- -All-In-One LXC Container -------------------------- +.. code-block:: console -Run :doc:`OpenStack in a LXC container `. Beneficial for intermediate -and advanced users. The VMs launched in this cloud will be fully accelerated but -not all OpenStack features are supported. :doc:`[Read] ` + $ ./stack.sh -Multi-Node Lab --------------- +This will take 15 - 30 minutes, largely depending on the speed of +your internet connection. Many git trees and packages will be +installed during this process. -Setup a :doc:`multi-node cluster ` with dedicated VLANs for VMs & Management. -:doc:`[Read] ` +Profit! +------- -DevStack with Neutron Networking --------------------------------- +You now have a working DevStack! Congrats! -Building a DevStack cluster with :doc:`Neutron Networking `. -This guide is meant for building lab environments with a dedicated -control node and multiple compute nodes. +Your devstack will have installed ``keystone``, ``glance``, ``nova``, +``placement``, ``cinder``, ``neutron``, and ``horizon``. Floating IPs +will be available, guests have access to the external world. -DevStack with KVM-based Nested Virtualization ---------------------------------------------- +You can access horizon to experience the web interface to +OpenStack, and manage vms, networks, volumes, and images from +there. -Procedure to setup :doc:`DevStack with KVM-based Nested Virtualization -`. With this setup, Nova instances -will be more performant than with plain QEMU emulation. +You can ``source openrc`` in your shell, and then use the +``openstack`` command line tool to manage your devstack. -Nova and devstack --------------------------------- +You can :ref:`create a VM and SSH into it `. -Guide to working with nova features :doc:`Nova and devstack `. +You can ``cd /opt/stack/tempest`` and run tempest tests that have +been configured to work with your devstack. -DevStack Documentation -====================== +You can :doc:`make code changes to OpenStack and validate them +`. -Overview --------- +Going further +------------- -:doc:`An overview of DevStack goals and priorities ` +Learn more about our :doc:`configuration system ` to +customize devstack for your needs. Including making adjustments to the +default :doc:`networking `. -Configuration -------------- +Read :doc:`guides ` for specific setups people have (note: +guides are point in time contributions, and may not always be kept +up to date to the latest devstack). -:doc:`Configuring and customizing the stack ` +Enable :doc:`devstack plugins ` to support additional +services, features, and configuration not present in base devstack. -Plugins -------- +Use devstack in your CI with :doc:`Ansible roles ` and +:doc:`Jobs ` for Zuul V3. Migrate your devstack Zuul V2 jobs to Zuul +V3 with this full migration :doc:`how-to `. -:doc:`Extending DevStack with new features ` +Get :doc:`the big picture ` of what we are trying to do +with devstack, and help us by :doc:`contributing to the project +`. -FAQ ---- +If you are a new contributor to devstack please refer: :doc:`contributor/contributing` -:doc:`The DevStack FAQ ` +.. toctree:: + :hidden: + + contributor/contributing -Contributing ------------- +Contents +++++++++ -:doc:`Pitching in to make DevStack a better place ` +.. toctree:: + :glob: + :maxdepth: 2 + * diff --git a/doc/source/networking.rst b/doc/source/networking.rst new file mode 100644 index 0000000000..10e1c3ff2c --- /dev/null +++ b/doc/source/networking.rst @@ -0,0 +1,238 @@ +===================== + DevStack Networking +===================== + +An important part of the DevStack experience is networking that works +by default for created guests. This might not be optimal for your +particular testing environment, so this document tries its best to +explain what's going on. + +Defaults +======== + +If you don't specify any configuration you will get the following: + +* neutron (including l3 with openvswitch) +* private project networks for each openstack project +* a floating ip range of 172.24.4.0/24 with the gateway of 172.24.4.1 +* the demo project configured with fixed ips on a subnet allocated from + the 10.0.0.0/22 range +* a ``br-ex`` interface controlled by neutron for all its networking + (this is not connected to any physical interfaces). +* DNS resolution for guests based on the resolv.conf for your host +* an ip masq rule that allows created guests to route out + +This creates an environment which is isolated to the single +host. Guests can get to the external network for package +updates. Tempest tests will work in this environment. + +.. note:: + + By default all OpenStack environments have security group rules + which block all inbound packets to guests. If you want to be able + to ssh / ping your created guests you should run the following. + + .. code-block:: bash + + openstack security group rule create --proto icmp --dst-port 0 default + openstack security group rule create --proto tcp --dst-port 22 default + +Locally Accessible Guests +========================= + +If you want to make your guests accessible from other machines on your +network, we have to connect ``br-ex`` to a physical interface. + +Dedicated Guest Interface +------------------------- + +If you have 2 or more interfaces on your devstack server, you can +allocate an interface to neutron to fully manage. This **should not** +be the same interface you use to ssh into the devstack server itself. + +This is done by setting with the ``PUBLIC_INTERFACE`` attribute. + +.. code-block:: bash + + [[local|localrc]] + PUBLIC_INTERFACE=eth1 + +That will put all layer 2 traffic from your guests onto the main +network. When running in this mode the ip masq rule is **not** added +in your devstack, you are responsible for making routing work on your +local network. + +Shared Guest Interface +---------------------- + +.. warning:: + + This is not a recommended configuration. Because of interactions + between OVS and bridging, if you reboot your box with active + networking you may lose network connectivity to your system. + +If you need your guests accessible on the network, but only have 1 +interface (using something like a NUC), you can share your one +network. But in order for this to work you need to manually set a lot +of addresses, and have them all exactly correct. + +.. code-block:: bash + + [[local|localrc]] + PUBLIC_INTERFACE=eth0 + HOST_IP=10.42.0.52 + FLOATING_RANGE=10.42.0.0/24 + PUBLIC_NETWORK_GATEWAY=10.42.0.1 + Q_FLOATING_ALLOCATION_POOL=start=10.42.0.250,end=10.42.0.254 + +In order for this scenario to work the floating ip network must match +the default networking on your server. This breaks HOST_IP detection, +as we exclude the floating range by default, so you have to specify +that manually. + +The ``PUBLIC_NETWORK_GATEWAY`` is the gateway that server would normally +use to get off the network. ``Q_FLOATING_ALLOCATION_POOL`` controls +the range of floating ips that will be handed out. As we are sharing +your existing network, you'll want to give it a slice that your local +dhcp server is not allocating. Otherwise you could easily have +conflicting ip addresses, and cause havoc with your local network. + + +Private Network Addressing +========================== + +The private networks addresses are controlled by the ``IPV4_ADDRS_SAFE_TO_USE`` +and the ``IPV6_ADDRS_SAFE_TO_USE`` variables. This allows users to specify one +single variable of safe internal IPs to use that will be referenced whether or +not subnetpools are in use. + +For IPv4, ``FIXED_RANGE`` and ``SUBNETPOOL_PREFIX_V4`` will just default to +the value of ``IPV4_ADDRS_SAFE_TO_USE`` directly. + +For IPv6, ``FIXED_RANGE_V6`` will default to the first /64 of the value of +``IPV6_ADDRS_SAFE_TO_USE``. If ``IPV6_ADDRS_SAFE_TO_USE`` is /64 or smaller, +``FIXED_RANGE_V6`` will just use the value of that directly. +``SUBNETPOOL_PREFIX_V6`` will just default to the value of +``IPV6_ADDRS_SAFE_TO_USE`` directly. + +.. _ssh: + +SSH access to instances +======================= + +To validate connectivity, you can create an instance using the +``$PRIVATE_NETWORK_NAME`` network (default: ``private``), create a floating IP +using the ``$PUBLIC_NETWORK_NAME`` network (default: ``public``), and attach +this floating IP to the instance: + +.. code-block:: shell + + openstack keypair create --public-key ~/.ssh/id_rsa.pub test-keypair + openstack server create --network private --key-name test-keypair ... test-server + fip_id=$(openstack floating ip create public -f value -c id) + openstack server add floating ip test-server ${fip_id} + +Once done, ensure you have enabled SSH and ICMP (ping) access for the security +group used for the instance. You can either create a custom security group and +specify it when creating the instance or add it after creation, or you can +modify the ``default`` security group created by default for each project. +Let's do the latter: + +.. code-block:: shell + + openstack security group rule create --proto icmp --dst-port 0 default + openstack security group rule create --proto tcp --dst-port 22 default + +Finally, SSH into the instance. If you used the Cirros instance uploaded by +default, then you can run the following: + +.. code-block:: shell + + openstack server ssh test-server -- -l cirros + +This will connect using the ``cirros`` user and the keypair you configured when +creating the instance. + +Remote SSH access to instances +============================== + +You can also SSH to created instances on your DevStack host from other hosts. +This can be helpful if you are e.g. deploying DevStack in a VM on an existing +cloud and wish to do development on your local machine. There are a few ways to +do this. + +.. rubric:: Configure instances to be locally accessible + +The most obvious way is to configure guests to be locally accessible, as +described `above `__. This has the advantage of +requiring no further effort on the client. However, it is more involved and +requires either support from your cloud or some inadvisable workarounds. + +.. rubric:: Use your DevStack host as a jump host + +You can choose to use your DevStack host as a jump host. To SSH to a instance +this way, pass the standard ``-J`` option to the ``openstack ssh`` / ``ssh`` +command. For example: + +.. code-block:: + + openstack server ssh test-server -- -l cirros -J username@devstack-host + +(where ``test-server`` is name of an existing instance, as described +:ref:`previously `, and ``username`` and ``devstack-host`` are the +username and hostname of your DevStack host). + +This can also be configured via your ``~/.ssh/config`` file, making it rather +effortless. However, it only allows SSH access. If you want to access e.g. a +web application on the instance, you will need to configure an SSH tunnel and +forward select ports using the ``-L`` option. For example, to forward HTTP +traffic: + +.. code-block:: + + openstack server ssh test-server -- -l cirros -L 8080:username@devstack-host:80 + +(where ``test-server`` is name of an existing instance, as described +:ref:`previously `, and ``username`` and ``devstack-host`` are the +username and hostname of your DevStack host). + +As you can imagine, this can quickly get out of hand, particularly for more +complex guest applications with multiple ports. + +.. rubric:: Use a proxy or VPN tool + +You can use a proxy or VPN tool to enable tunneling for the floating IP +address range of the ``$PUBLIC_NETWORK_NAME`` network (default: ``public``) +defined by ``$FLOATING_RANGE`` (default: ``172.24.4.0/24``). There are many +such tools available to do this. For example, we could use a useful utility +called `shuttle`__. To enable tunneling using ``shuttle``, first ensure you +have allowed SSH and HTTP(S) traffic to your DevStack host. Allowing HTTP(S) +traffic is necessary so you can use the OpenStack APIs remotely. How you do +this will depend on where your DevStack host is running. Once this is done, +install ``sshuttle`` on your localhost: + +.. code-block:: bash + + sudo apt-get install sshuttle || dnf install sshuttle + +Finally, start ``sshuttle`` on your localhost using the floating IP address +range. For example, assuming you are using the default value for +``$FLOATING_RANGE``, you can do: + +.. code-block:: bash + + sshuttle -r username@devstack-host 172.24.4.0/24 + +(where ``username`` and ``devstack-host`` are the username and hostname of your +DevStack host). + +You should now be able to create an instance and SSH into it: + +.. code-block:: bash + + openstack server ssh test-server -- -l cirros + +(where ``test-server`` is name of an existing instance, as described +:ref:`previously `) + +.. __: https://github.com/sshuttle/sshuttle diff --git a/doc/source/overview.rst b/doc/source/overview.rst index d245035a1a..c978e8d2cf 100644 --- a/doc/source/overview.rst +++ b/doc/source/overview.rst @@ -20,16 +20,15 @@ Base OS *The OpenStack Technical Committee (TC) has defined the current CI strategy to include the latest Ubuntu release and the latest RHEL -release (for Python 2.6 testing).* +release.* - Ubuntu: current LTS release plus current development release -- Fedora: current release plus previous release -- RHEL: current major release +- RHEL/CentOS/RockyLinux: current major release - Other OS platforms may continue to be included but the maintenance of those platforms shall not be assumed simply due to their presence. Having a listed point-of-contact for each additional OS will greatly increase its chance of being well-maintained. -- Patches for Ubuntu and/or Fedora will not be held up due to +- Patches for Ubuntu and/or RockyLinux will not be held up due to side-effects on other OS platforms. Databases @@ -38,7 +37,6 @@ Databases *As packaged by the host OS* - MySQL -- PostgreSQL Queues ------ @@ -46,7 +44,6 @@ Queues *As packaged by the host OS* - Rabbit -- Qpid Web Server ---------- @@ -55,22 +52,13 @@ Web Server - Apache -OpenStack Network ------------------ - -*Defaults to nova network, optionally use neutron* - -- Nova Network: FlatDHCP -- Neutron: A basic configuration approximating the original FlatDHCP - mode using linuxbridge or OpenVSwitch. - Services -------- The default services configured by DevStack are Identity (keystone), -Object Storage (swift), Image Service (glance), Block Storage (cinder), -Compute (nova), Networking (nova), Dashboard (horizon), Orchestration -(heat) +Object Storage (swift), Image Service (glance), Block Storage +(cinder), Compute (nova), Placement (placement), +Networking (neutron), Dashboard (horizon). Additional services not included directly in DevStack can be tied in to ``stack.sh`` using the :doc:`plugin mechanism ` to call @@ -80,13 +68,4 @@ Node Configurations ------------------- - single node -- multi-node is not tested regularly by the core team, and even then - only minimal configurations are reviewed - -Exercises ---------- - -The DevStack exercise scripts are no longer used as integration and gate -testing as that job has transitioned to Tempest. They are still -maintained as a demonstrations of using OpenStack from the command line -and for quick operational testing. +- multi-node configurations as are tested by the gate diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 5b6622e75c..9185263443 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -1,18 +1,16 @@ -.. +.. Note to patch submitters: - Note to patch submitters: + # ============================= # + # THIS FILE IS AUTOGENERATED ! # + # ============================= # - # ============================= # - # THIS FILE IS AUTOGENERATED ! # - # ============================= # + ** Plugins are found automatically and added to this list ** - ** Plugins are found automatically and added to this list ** + This file is created by a periodic proposal job. You should not + edit this file. - This file is created by a periodic proposal job. You should not - edit this file. - - You should edit the files data/devstack-plugins-registry.footer - data/devstack-plugins-registry.header to modify this text. + You should edit the files data/devstack-plugins-registry.footer + data/devstack-plugins-registry.header to modify this text. ========================== DevStack Plugin Registry @@ -23,119 +21,162 @@ available DevStack plugins. This includes, but is not limited to, official OpenStack projects. -====================================== === -Plugin Name URL -====================================== === -aodh `git://git.openstack.org/openstack/aodh `__ -app-catalog-ui `git://git.openstack.org/openstack/app-catalog-ui `__ -astara `git://git.openstack.org/openstack/astara `__ -barbican `git://git.openstack.org/openstack/barbican `__ -bilean `git://git.openstack.org/openstack/bilean `__ -blazar `git://git.openstack.org/openstack/blazar `__ -broadview-collector `git://git.openstack.org/openstack/broadview-collector `__ -ceilometer `git://git.openstack.org/openstack/ceilometer `__ -ceilometer-powervm `git://git.openstack.org/openstack/ceilometer-powervm `__ -cerberus `git://git.openstack.org/openstack/cerberus `__ -cloudkitty `git://git.openstack.org/openstack/cloudkitty `__ -collectd-ceilometer-plugin `git://git.openstack.org/openstack/collectd-ceilometer-plugin `__ -congress `git://git.openstack.org/openstack/congress `__ -cue `git://git.openstack.org/openstack/cue `__ -designate `git://git.openstack.org/openstack/designate `__ -devstack-plugin-additional-pkg-repos `git://git.openstack.org/openstack/devstack-plugin-additional-pkg-repos `__ -devstack-plugin-amqp1 `git://git.openstack.org/openstack/devstack-plugin-amqp1 `__ -devstack-plugin-bdd `git://git.openstack.org/openstack/devstack-plugin-bdd `__ -devstack-plugin-ceph `git://git.openstack.org/openstack/devstack-plugin-ceph `__ -devstack-plugin-glusterfs `git://git.openstack.org/openstack/devstack-plugin-glusterfs `__ -devstack-plugin-hdfs `git://git.openstack.org/openstack/devstack-plugin-hdfs `__ -devstack-plugin-kafka `git://git.openstack.org/openstack/devstack-plugin-kafka `__ -devstack-plugin-mariadb `git://git.openstack.org/openstack/devstack-plugin-mariadb `__ -devstack-plugin-nfs `git://git.openstack.org/openstack/devstack-plugin-nfs `__ -devstack-plugin-pika `git://git.openstack.org/openstack/devstack-plugin-pika `__ -devstack-plugin-sheepdog `git://git.openstack.org/openstack/devstack-plugin-sheepdog `__ -devstack-plugin-zmq `git://git.openstack.org/openstack/devstack-plugin-zmq `__ -dragonflow `git://git.openstack.org/openstack/dragonflow `__ -drbd-devstack `git://git.openstack.org/openstack/drbd-devstack `__ -ec2-api `git://git.openstack.org/openstack/ec2-api `__ -freezer `git://git.openstack.org/openstack/freezer `__ -freezer-api `git://git.openstack.org/openstack/freezer-api `__ -freezer-web-ui `git://git.openstack.org/openstack/freezer-web-ui `__ -gce-api `git://git.openstack.org/openstack/gce-api `__ -gnocchi `git://git.openstack.org/openstack/gnocchi `__ -group-based-policy `git://git.openstack.org/openstack/group-based-policy `__ -higgins `git://git.openstack.org/openstack/higgins `__ -ironic `git://git.openstack.org/openstack/ironic `__ -ironic-inspector `git://git.openstack.org/openstack/ironic-inspector `__ -ironic-staging-drivers `git://git.openstack.org/openstack/ironic-staging-drivers `__ -kingbird `git://git.openstack.org/openstack/kingbird `__ -kuryr `git://git.openstack.org/openstack/kuryr `__ -magnum `git://git.openstack.org/openstack/magnum `__ -magnum-ui `git://git.openstack.org/openstack/magnum-ui `__ -manila `git://git.openstack.org/openstack/manila `__ -mistral `git://git.openstack.org/openstack/mistral `__ -monasca-analytics `git://git.openstack.org/openstack/monasca-analytics `__ -monasca-api `git://git.openstack.org/openstack/monasca-api `__ -monasca-ceilometer `git://git.openstack.org/openstack/monasca-ceilometer `__ -monasca-log-api `git://git.openstack.org/openstack/monasca-log-api `__ -monasca-transform `git://git.openstack.org/openstack/monasca-transform `__ -murano `git://git.openstack.org/openstack/murano `__ -networking-6wind `git://git.openstack.org/openstack/networking-6wind `__ -networking-bagpipe `git://git.openstack.org/openstack/networking-bagpipe `__ -networking-bgpvpn `git://git.openstack.org/openstack/networking-bgpvpn `__ -networking-brocade `git://git.openstack.org/openstack/networking-brocade `__ -networking-calico `git://git.openstack.org/openstack/networking-calico `__ -networking-cisco `git://git.openstack.org/openstack/networking-cisco `__ -networking-fortinet `git://git.openstack.org/openstack/networking-fortinet `__ -networking-generic-switch `git://git.openstack.org/openstack/networking-generic-switch `__ -networking-huawei `git://git.openstack.org/openstack/networking-huawei `__ -networking-infoblox `git://git.openstack.org/openstack/networking-infoblox `__ -networking-l2gw `git://git.openstack.org/openstack/networking-l2gw `__ -networking-midonet `git://git.openstack.org/openstack/networking-midonet `__ -networking-mlnx `git://git.openstack.org/openstack/networking-mlnx `__ -networking-nec `git://git.openstack.org/openstack/networking-nec `__ -networking-odl `git://git.openstack.org/openstack/networking-odl `__ -networking-ofagent `git://git.openstack.org/openstack/networking-ofagent `__ -networking-onos `git://git.openstack.org/openstack/networking-onos `__ -networking-ovn `git://git.openstack.org/openstack/networking-ovn `__ -networking-ovs-dpdk `git://git.openstack.org/openstack/networking-ovs-dpdk `__ -networking-plumgrid `git://git.openstack.org/openstack/networking-plumgrid `__ -networking-powervm `git://git.openstack.org/openstack/networking-powervm `__ -networking-sfc `git://git.openstack.org/openstack/networking-sfc `__ -networking-vsphere `git://git.openstack.org/openstack/networking-vsphere `__ -neutron `git://git.openstack.org/openstack/neutron `__ -neutron-dynamic-routing `git://git.openstack.org/openstack/neutron-dynamic-routing `__ -neutron-fwaas `git://git.openstack.org/openstack/neutron-fwaas `__ -neutron-lbaas `git://git.openstack.org/openstack/neutron-lbaas `__ -neutron-lbaas-dashboard `git://git.openstack.org/openstack/neutron-lbaas-dashboard `__ -neutron-vpnaas `git://git.openstack.org/openstack/neutron-vpnaas `__ -nova-docker `git://git.openstack.org/openstack/nova-docker `__ -nova-lxd `git://git.openstack.org/openstack/nova-lxd `__ -nova-powervm `git://git.openstack.org/openstack/nova-powervm `__ -octavia `git://git.openstack.org/openstack/octavia `__ -osprofiler `git://git.openstack.org/openstack/osprofiler `__ -panko `git://git.openstack.org/openstack/panko `__ -python-freezerclient `git://git.openstack.org/openstack/python-freezerclient `__ -rally `git://git.openstack.org/openstack/rally `__ -sahara `git://git.openstack.org/openstack/sahara `__ -sahara-dashboard `git://git.openstack.org/openstack/sahara-dashboard `__ -scalpels `git://git.openstack.org/openstack/scalpels `__ -searchlight `git://git.openstack.org/openstack/searchlight `__ -searchlight-ui `git://git.openstack.org/openstack/searchlight-ui `__ -senlin `git://git.openstack.org/openstack/senlin `__ -smaug `git://git.openstack.org/openstack/smaug `__ -smaug-dashboard `git://git.openstack.org/openstack/smaug-dashboard `__ -solum `git://git.openstack.org/openstack/solum `__ -tacker `git://git.openstack.org/openstack/tacker `__ -tap-as-a-service `git://git.openstack.org/openstack/tap-as-a-service `__ -tricircle `git://git.openstack.org/openstack/tricircle `__ -trove `git://git.openstack.org/openstack/trove `__ -trove-dashboard `git://git.openstack.org/openstack/trove-dashboard `__ -vitrage `git://git.openstack.org/openstack/vitrage `__ -vitrage-dashboard `git://git.openstack.org/openstack/vitrage-dashboard `__ -vmware-nsx `git://git.openstack.org/openstack/vmware-nsx `__ -watcher `git://git.openstack.org/openstack/watcher `__ -watcher-dashboard `git://git.openstack.org/openstack/watcher-dashboard `__ -zaqar `git://git.openstack.org/openstack/zaqar `__ -zaqar-ui `git://git.openstack.org/openstack/zaqar-ui `__ -====================================== === +======================================== === +Plugin Name URL +======================================== === +openstack/aetos `https://opendev.org/openstack/aetos `__ +openstack/aodh `https://opendev.org/openstack/aodh `__ +openstack/barbican `https://opendev.org/openstack/barbican `__ +openstack/blazar `https://opendev.org/openstack/blazar `__ +openstack/ceilometer `https://opendev.org/openstack/ceilometer `__ +openstack/cloudkitty `https://opendev.org/openstack/cloudkitty `__ +openstack/cyborg `https://opendev.org/openstack/cyborg `__ +openstack/designate `https://opendev.org/openstack/designate `__ +openstack/designate-tempest-plugin `https://opendev.org/openstack/designate-tempest-plugin `__ +openstack/devstack-plugin-amqp1 `https://opendev.org/openstack/devstack-plugin-amqp1 `__ +openstack/devstack-plugin-ceph `https://opendev.org/openstack/devstack-plugin-ceph `__ +openstack/devstack-plugin-container `https://opendev.org/openstack/devstack-plugin-container `__ +openstack/devstack-plugin-kafka `https://opendev.org/openstack/devstack-plugin-kafka `__ +openstack/devstack-plugin-nfs `https://opendev.org/openstack/devstack-plugin-nfs `__ +openstack/devstack-plugin-open-cas `https://opendev.org/openstack/devstack-plugin-open-cas `__ +openstack/devstack-plugin-prometheus `https://opendev.org/openstack/devstack-plugin-prometheus `__ +openstack/freezer `https://opendev.org/openstack/freezer `__ +openstack/freezer-api `https://opendev.org/openstack/freezer-api `__ +openstack/freezer-tempest-plugin `https://opendev.org/openstack/freezer-tempest-plugin `__ +openstack/freezer-web-ui `https://opendev.org/openstack/freezer-web-ui `__ +openstack/grian-ui `https://opendev.org/openstack/grian-ui `__ +openstack/heat `https://opendev.org/openstack/heat `__ +openstack/heat-dashboard `https://opendev.org/openstack/heat-dashboard `__ +openstack/ironic `https://opendev.org/openstack/ironic `__ +openstack/ironic-inspector `https://opendev.org/openstack/ironic-inspector `__ +openstack/ironic-prometheus-exporter `https://opendev.org/openstack/ironic-prometheus-exporter `__ +openstack/ironic-ui `https://opendev.org/openstack/ironic-ui `__ +openstack/keystone `https://opendev.org/openstack/keystone `__ +openstack/kuryr-libnetwork `https://opendev.org/openstack/kuryr-libnetwork `__ +openstack/magnum `https://opendev.org/openstack/magnum `__ +openstack/magnum-ui `https://opendev.org/openstack/magnum-ui `__ +openstack/manila `https://opendev.org/openstack/manila `__ +openstack/manila-tempest-plugin `https://opendev.org/openstack/manila-tempest-plugin `__ +openstack/manila-ui `https://opendev.org/openstack/manila-ui `__ +openstack/masakari `https://opendev.org/openstack/masakari `__ +openstack/mistral `https://opendev.org/openstack/mistral `__ +openstack/monasca-api `https://opendev.org/openstack/monasca-api `__ +openstack/monasca-events-api `https://opendev.org/openstack/monasca-events-api `__ +openstack/monasca-tempest-plugin `https://opendev.org/openstack/monasca-tempest-plugin `__ +openstack/networking-bagpipe `https://opendev.org/openstack/networking-bagpipe `__ +openstack/networking-baremetal `https://opendev.org/openstack/networking-baremetal `__ +openstack/networking-bgpvpn `https://opendev.org/openstack/networking-bgpvpn `__ +openstack/networking-generic-switch `https://opendev.org/openstack/networking-generic-switch `__ +openstack/networking-sfc `https://opendev.org/openstack/networking-sfc `__ +openstack/neutron `https://opendev.org/openstack/neutron `__ +openstack/neutron-dynamic-routing `https://opendev.org/openstack/neutron-dynamic-routing `__ +openstack/neutron-fwaas `https://opendev.org/openstack/neutron-fwaas `__ +openstack/neutron-fwaas-dashboard `https://opendev.org/openstack/neutron-fwaas-dashboard `__ +openstack/neutron-tempest-plugin `https://opendev.org/openstack/neutron-tempest-plugin `__ +openstack/neutron-vpnaas `https://opendev.org/openstack/neutron-vpnaas `__ +openstack/neutron-vpnaas-dashboard `https://opendev.org/openstack/neutron-vpnaas-dashboard `__ +openstack/nova `https://opendev.org/openstack/nova `__ +openstack/octavia `https://opendev.org/openstack/octavia `__ +openstack/octavia-dashboard `https://opendev.org/openstack/octavia-dashboard `__ +openstack/octavia-tempest-plugin `https://opendev.org/openstack/octavia-tempest-plugin `__ +openstack/openstacksdk `https://opendev.org/openstack/openstacksdk `__ +openstack/osprofiler `https://opendev.org/openstack/osprofiler `__ +openstack/ovn-bgp-agent `https://opendev.org/openstack/ovn-bgp-agent `__ +openstack/ovn-octavia-provider `https://opendev.org/openstack/ovn-octavia-provider `__ +openstack/rally-openstack `https://opendev.org/openstack/rally-openstack `__ +openstack/shade `https://opendev.org/openstack/shade `__ +openstack/skyline-apiserver `https://opendev.org/openstack/skyline-apiserver `__ +openstack/storlets `https://opendev.org/openstack/storlets `__ +openstack/tacker `https://opendev.org/openstack/tacker `__ +openstack/tap-as-a-service `https://opendev.org/openstack/tap-as-a-service `__ +openstack/telemetry-tempest-plugin `https://opendev.org/openstack/telemetry-tempest-plugin `__ +openstack/trove `https://opendev.org/openstack/trove `__ +openstack/trove-dashboard `https://opendev.org/openstack/trove-dashboard `__ +openstack/venus `https://opendev.org/openstack/venus `__ +openstack/venus-dashboard `https://opendev.org/openstack/venus-dashboard `__ +openstack/vitrage `https://opendev.org/openstack/vitrage `__ +openstack/vitrage-dashboard `https://opendev.org/openstack/vitrage-dashboard `__ +openstack/vitrage-tempest-plugin `https://opendev.org/openstack/vitrage-tempest-plugin `__ +openstack/watcher `https://opendev.org/openstack/watcher `__ +openstack/watcher-dashboard `https://opendev.org/openstack/watcher-dashboard `__ +openstack/whitebox-tempest-plugin `https://opendev.org/openstack/whitebox-tempest-plugin `__ +openstack/zaqar `https://opendev.org/openstack/zaqar `__ +openstack/zaqar-ui `https://opendev.org/openstack/zaqar-ui `__ +openstack/zun `https://opendev.org/openstack/zun `__ +openstack/zun-ui `https://opendev.org/openstack/zun-ui `__ +performa/os-faults `https://opendev.org/performa/os-faults `__ +starlingx/config `https://opendev.org/starlingx/config `__ +starlingx/fault `https://opendev.org/starlingx/fault `__ +starlingx/ha `https://opendev.org/starlingx/ha `__ +starlingx/integ `https://opendev.org/starlingx/integ `__ +starlingx/metal `https://opendev.org/starlingx/metal `__ +starlingx/nfv `https://opendev.org/starlingx/nfv `__ +starlingx/update `https://opendev.org/starlingx/update `__ +vexxhost/openstack-operator `https://opendev.org/vexxhost/openstack-operator `__ +x/almanach `https://opendev.org/x/almanach `__ +x/bilean `https://opendev.org/x/bilean `__ +x/broadview-collector `https://opendev.org/x/broadview-collector `__ +x/collectd-openstack-plugins `https://opendev.org/x/collectd-openstack-plugins `__ +x/devstack-plugin-additional-pkg-repos `https://opendev.org/x/devstack-plugin-additional-pkg-repos `__ +x/devstack-plugin-glusterfs `https://opendev.org/x/devstack-plugin-glusterfs `__ +x/devstack-plugin-hdfs `https://opendev.org/x/devstack-plugin-hdfs `__ +x/devstack-plugin-libvirt-qemu `https://opendev.org/x/devstack-plugin-libvirt-qemu `__ +x/devstack-plugin-mariadb `https://opendev.org/x/devstack-plugin-mariadb `__ +x/devstack-plugin-tobiko `https://opendev.org/x/devstack-plugin-tobiko `__ +x/devstack-plugin-vmax `https://opendev.org/x/devstack-plugin-vmax `__ +x/drbd-devstack `https://opendev.org/x/drbd-devstack `__ +x/fenix `https://opendev.org/x/fenix `__ +x/gce-api `https://opendev.org/x/gce-api `__ +x/glare `https://opendev.org/x/glare `__ +x/group-based-policy `https://opendev.org/x/group-based-policy `__ +x/gyan `https://opendev.org/x/gyan `__ +x/horizon-mellanox `https://opendev.org/x/horizon-mellanox `__ +x/ironic-staging-drivers `https://opendev.org/x/ironic-staging-drivers `__ +x/kingbird `https://opendev.org/x/kingbird `__ +x/meteos `https://opendev.org/x/meteos `__ +x/meteos-ui `https://opendev.org/x/meteos-ui `__ +x/mixmatch `https://opendev.org/x/mixmatch `__ +x/mogan `https://opendev.org/x/mogan `__ +x/mogan-ui `https://opendev.org/x/mogan-ui `__ +x/networking-6wind `https://opendev.org/x/networking-6wind `__ +x/networking-ansible `https://opendev.org/x/networking-ansible `__ +x/networking-arista `https://opendev.org/x/networking-arista `__ +x/networking-brocade `https://opendev.org/x/networking-brocade `__ +x/networking-cisco `https://opendev.org/x/networking-cisco `__ +x/networking-cumulus `https://opendev.org/x/networking-cumulus `__ +x/networking-dpm `https://opendev.org/x/networking-dpm `__ +x/networking-fortinet `https://opendev.org/x/networking-fortinet `__ +x/networking-hpe `https://opendev.org/x/networking-hpe `__ +x/networking-huawei `https://opendev.org/x/networking-huawei `__ +x/networking-infoblox `https://opendev.org/x/networking-infoblox `__ +x/networking-l2gw `https://opendev.org/x/networking-l2gw `__ +x/networking-lagopus `https://opendev.org/x/networking-lagopus `__ +x/networking-mlnx `https://opendev.org/x/networking-mlnx `__ +x/networking-nec `https://opendev.org/x/networking-nec `__ +x/networking-omnipath `https://opendev.org/x/networking-omnipath `__ +x/networking-opencontrail `https://opendev.org/x/networking-opencontrail `__ +x/networking-ovs-dpdk `https://opendev.org/x/networking-ovs-dpdk `__ +x/networking-plumgrid `https://opendev.org/x/networking-plumgrid `__ +x/networking-spp `https://opendev.org/x/networking-spp `__ +x/networking-vpp `https://opendev.org/x/networking-vpp `__ +x/networking-vsphere `https://opendev.org/x/networking-vsphere `__ +x/neutron-classifier `https://opendev.org/x/neutron-classifier `__ +x/nova-dpm `https://opendev.org/x/nova-dpm `__ +x/nova-mksproxy `https://opendev.org/x/nova-mksproxy `__ +x/oaktree `https://opendev.org/x/oaktree `__ +x/omni `https://opendev.org/x/omni `__ +x/os-xenapi `https://opendev.org/x/os-xenapi `__ +x/picasso `https://opendev.org/x/picasso `__ +x/rsd-virt-for-nova `https://opendev.org/x/rsd-virt-for-nova `__ +x/scalpels `https://opendev.org/x/scalpels `__ +x/slogging `https://opendev.org/x/slogging `__ +x/stackube `https://opendev.org/x/stackube `__ +x/tatu `https://opendev.org/x/tatu `__ +x/trio2o `https://opendev.org/x/trio2o `__ +x/valet `https://opendev.org/x/valet `__ +x/vmware-nsx `https://opendev.org/x/vmware-nsx `__ +x/vmware-vspc `https://opendev.org/x/vmware-vspc `__ +x/whitebox-neutron-tempest-plugin `https://opendev.org/x/whitebox-neutron-tempest-plugin `__ +======================================== === diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst index 70469d6876..fe567e2277 100644 --- a/doc/source/plugins.rst +++ b/doc/source/plugins.rst @@ -12,6 +12,15 @@ tree. They are called through a strong contract, so these plugins can be sure that they will continue to work in the future as DevStack evolves. +Prerequisites +============= + +If you are planning to create a plugin that is going to host a service in the +service catalog (that is, your plugin will use the command +``get_or_create_service``) please make sure that you apply to the `service +types authority`_ to reserve a valid service-type. This will help to make sure +that all deployments of your service use the same service-type. + Plugin Interface ================ @@ -45,6 +54,31 @@ directory. Inside this directory there can be 3 files. default value only if the variable is unset or empty; e.g. in bash syntax ``FOO=${FOO:-default}``. + The file should include a ``define_plugin`` line to indicate the + plugin's name, which is the name that should be used by users on + "enable_plugin" lines. It should generally be the last component of + the git repo path (e.g., if the plugin's repo is + openstack/foo, then the name here should be "foo") :: + + define_plugin + + If your plugin depends on another plugin, indicate it in this file + with one or more lines like the following:: + + plugin_requires + + For a complete example, if the plugin "foo" depends on "bar", the + ``settings`` file should include:: + + define_plugin foo + plugin_requires foo bar + + Devstack does not currently use this dependency information, so it's + important that users continue to add enable_plugin lines in the + correct order in ``local.conf``, however adding this information + allows other tools to consider dependency information when + automatically generating ``local.conf`` files. + - ``plugin.sh`` - the actual plugin. It is executed by devstack at well defined points during a ``stack.sh`` run. The plugin.sh internal structure is discussed below. @@ -65,7 +99,7 @@ They are added in the following format:: An example would be as follows:: - enable_plugin ec2-api git://git.openstack.org/openstack/ec2-api + enable_plugin ec2-api https://opendev.org/openstack/ec2-api plugin.sh contract ================== @@ -99,7 +133,7 @@ The current full list of ``mode`` and ``phase`` are: should exist at this point. - **extra** - Called near the end after layer 1 and 2 services have been started. - - **test-config** Called at the end of devstack used to configure tempest + - **test-config** - Called at the end of devstack used to configure tempest or any other test environments - **unstack** - Called by ``unstack.sh`` before other services are shut @@ -114,7 +148,7 @@ An example plugin would look something as follows. ``devstack/settings``:: - # settings file for template + # settings file for template enable_service template @@ -188,23 +222,62 @@ dependency mechanism is beyond the scope of the current work. System Packages =============== -Devstack provides a framework for getting packages installed at an early -phase of its execution. These packages may be defined in a plugin as files -that contain new-line separated lists of packages required by the plugin -Supported packaging systems include apt and yum across multiple distributions. -To enable a plugin to hook into this and install package dependencies, packages -may be listed at the following locations in the top-level of the plugin -repository: + +Devstack based +-------------- + +Devstack provides a custom framework for getting packages installed at +an early phase of its execution. These packages may be defined in a +plugin as files that contain new-line separated lists of packages +required by the plugin + +Supported packaging systems include apt and dnf across multiple +distributions. To enable a plugin to hook into this and install +package dependencies, packages may be listed at the following +locations in the top-level of the plugin repository: - ``./devstack/files/debs/$plugin_name`` - Packages to install when running - on Ubuntu, Debian or Linux Mint. + on Ubuntu or Debian. - ``./devstack/files/rpms/$plugin_name`` - Packages to install when running - on Red Hat, Fedora, CentOS or XenServer. + on Red Hat, Fedora, or CentOS. -- ``./devstack/files/rpms-suse/$plugin_name`` - Packages to install when - running on SUSE Linux or openSUSE. +Although there a no plans to remove this method of installing +packages, plugins should consider it deprecated for ``bindep`` support +described below. + +bindep +------ + +The `bindep `__ project has +become the defacto standard for OpenStack projects to specify binary +dependencies. + +A plugin may provide a ``./devstack/files/bindep.txt`` file, which +will be called with the *default* profile to install packages. For +details on the syntax, etc. see the bindep documentation. + +It is also possible to use the ``bindep.txt`` of projects that are +being installed from source with the ``-bindep`` flag available in +install functions. For example + +.. code-block:: bash + + if use_library_from_git "diskimage-builder"; then + GITREPO["diskimage-builder"]=$DISKIMAGE_BUILDER_REPO_URL + GITDIR["diskimage-builder"]=$DEST/diskimage-builder + GITBRANCH["diskimage-builder"]=$DISKIMAGE_BUILDER_REPO_REF + git_clone_by_name "diskimage-builder" + setup_dev_lib -bindep "diskimage-builder" + fi + +will result in any packages required by the ``bindep.txt`` of the +``diskimage-builder`` project being installed. Note however that jobs +that switch projects between source and released/pypi installs +(e.g. with a ``foo-dsvm`` and a ``foo-dsvm-src`` test to cover both +released dependencies and master versions) will have to deal with +``bindep.txt`` being unavailable without the source directory. Using Plugins in the OpenStack Gate @@ -230,10 +303,12 @@ integration of alternate RPC systems (e.g. zmq, qpid). In these cases the best practice is to build a dedicated ``openstack/devstack-plugin-FOO`` project. +Legacy project-config jobs +-------------------------- + To enable a plugin to be used in a gate job, the following lines will be needed in your ``jenkins/jobs/.yaml`` definition in -`project-config -`_:: +`project-config `_:: # Because we are testing a non standard project, add the # our project repository. This makes zuul do the right @@ -243,10 +318,17 @@ be needed in your ``jenkins/jobs/.yaml`` definition in # note the actual url here is somewhat irrelevant because it # caches in nodepool, however make it a valid url for # documentation purposes. - export DEVSTACK_LOCAL_CONFIG="enable_plugin ec2-api git://git.openstack.org/openstack/ec2-api" + export DEVSTACK_LOCAL_CONFIG="enable_plugin ec2-api https://opendev.org/openstack/ec2-api" + +Zuul v3 jobs +------------ + +See the ``devstack_plugins`` example in :doc:`zuul_ci_jobs_migration`. See Also ======== For additional inspiration on devstack plugins you can check out the -`Plugin Registry `_. +:doc:`Plugin Registry `. + +.. _service types authority: https://specs.openstack.org/openstack/service-types-authority/ diff --git a/doc/source/systemd.rst b/doc/source/systemd.rst new file mode 100644 index 0000000000..78535202d8 --- /dev/null +++ b/doc/source/systemd.rst @@ -0,0 +1,222 @@ +=========================== + Using Systemd in DevStack +=========================== + +By default DevStack is run with all the services as systemd unit +files. Systemd is now the default init system for nearly every Linux +distro, and systemd encodes and solves many of the problems related to +poorly running processes. + +Why this instead of screen? +=========================== + +The screen model for DevStack was invented when the number of services +that a DevStack user was going to run was typically < 10. This made +screen hot keys to jump around very easy. However, the landscape has +changed (not all services are stoppable in screen as some are under +Apache, there are typically at least 20 items) + +There is also a common developer workflow of changing code in more +than one service, and needing to restart a bunch of services for that +to take effect. + +Unit Structure +============== + +.. note:: + + Originally we actually wanted to do this as user units, however + there are issues with running this under non interactive + shells. For now, we'll be running as system units. Some user unit + code is left in place in case we can switch back later. + +All DevStack user units are created as a part of the DevStack slice +given the name ``devstack@$servicename.service``. This makes it easy +to understand which services are part of the devstack run, and lets us +disable / stop them in a single command. + +Manipulating Units +================== + +Assuming the unit ``n-cpu`` to make the examples more clear. + +Enable a unit (allows it to be started):: + + sudo systemctl enable devstack@n-cpu.service + +Disable a unit:: + + sudo systemctl disable devstack@n-cpu.service + +Start a unit:: + + sudo systemctl start devstack@n-cpu.service + +Stop a unit:: + + sudo systemctl stop devstack@n-cpu.service + +Restart a unit:: + + sudo systemctl restart devstack@n-cpu.service + +See status of a unit:: + + sudo systemctl status devstack@n-cpu.service + +Operating on more than one unit at a time +----------------------------------------- + +Systemd supports wildcarding for unit operations. To restart every +service in devstack you can do that following:: + + sudo systemctl restart devstack@* + +Or to see the status of all Nova processes you can do:: + + sudo systemctl status devstack@n-* + +We'll eventually make the unit names a bit more meaningful so that +it's easier to understand what you are restarting. + +.. _journalctl-examples: + +Querying Logs +============= + +One of the other major things that comes with systemd is journald, a +consolidated way to access logs (including querying through structured +metadata). This is accessed by the user via ``journalctl`` command. + + +Logs can be accessed through ``journalctl``. journalctl has powerful +query facilities. We'll start with some common options. + +Follow logs for a specific service:: + + sudo journalctl -f --unit devstack@n-cpu.service + +Following logs for multiple services simultaneously:: + + sudo journalctl -f --unit devstack@n-cpu.service --unit devstack@n-cond.service + +or you can even do wild cards to follow all the nova services:: + + sudo journalctl -f --unit devstack@n-* + +Use higher precision time stamps:: + + sudo journalctl -f -o short-precise --unit devstack@n-cpu.service + +By default, journalctl strips out "unprintable" characters, including +ASCII color codes. To keep the color codes (which can be interpreted by +an appropriate terminal/pager - e.g. ``less``, the default):: + + sudo journalctl -a --unit devstack@n-cpu.service + +When outputting to the terminal using the default pager, long lines +will be truncated, but horizontal scrolling is supported via the +left/right arrow keys. You can override this by setting the +``SYSTEMD_LESS`` environment variable to e.g. ``FRXM``. + +You can pipe the output to another tool, such as ``grep``. For +example, to find a server instance UUID in the nova logs:: + + sudo journalctl -a --unit devstack@n-* | grep 58391b5c-036f-44d5-bd68-21d3c26349e6 + +See ``man 1 journalctl`` for more. + +Debugging +========= + +Using pdb +--------- + +In order to break into a regular pdb session on a systemd-controlled +service, you need to invoke the process manually - that is, take it out +of systemd's control. + +Discover the command systemd is using to run the service:: + + systemctl show devstack@n-sch.service -p ExecStart --no-pager + +Stop the systemd service:: + + sudo systemctl stop devstack@n-sch.service + +Inject your breakpoint in the source, e.g.:: + + import pdb; pdb.set_trace() + +Invoke the command manually:: + + /usr/local/bin/nova-scheduler --config-file /etc/nova/nova.conf + +Some executables, such as :program:`nova-compute`, will need to be executed +with a particular group. This will be shown in the systemd unit file:: + + sudo systemctl cat devstack@n-cpu.service | grep Group + +:: + + Group = libvirt + +Use the :program:`sg` tool to execute the command as this group:: + + sg libvirt -c '/usr/local/bin/nova-compute --config-file /etc/nova/nova-cpu.conf' + +Using remote-pdb +---------------- + +`remote-pdb`_ works while the process is under systemd control. + +Make sure you have remote-pdb installed:: + + sudo pip install remote-pdb + +Inject your breakpoint in the source, e.g.:: + + import remote_pdb; remote_pdb.set_trace() + +Restart the relevant service:: + + sudo systemctl restart devstack@n-api.service + +The remote-pdb code configures the telnet port when ``set_trace()`` is +invoked. Do whatever it takes to hit the instrumented code path, and +inspect the logs for a message displaying the listening port:: + + Sep 07 16:36:12 p8-100-neo devstack@n-api.service[772]: RemotePdb session open at 127.0.0.1:46771, waiting for connection ... + +Telnet to that port to enter the pdb session:: + + telnet 127.0.0.1 46771 + +See the `remote-pdb`_ home page for more options. + +.. _`remote-pdb`: https://pypi.org/project/remote-pdb/ + +Future Work +=========== + +user units +---------- + +It would be great if we could do services as user units, so that there +is a clear separation of code being run as not root, to ensure running +as root never accidentally gets baked in as an assumption to +services. However, user units interact poorly with devstack-gate and +the way that commands are run as users with ansible and su. + +Maybe someday we can figure that out. + +References +========== + +- Arch Linux Wiki - https://wiki.archlinux.org/index.php/Systemd/User +- Python interface to journald - + https://www.freedesktop.org/software/systemd/python-systemd/journal.html +- Systemd documentation on service files - + https://www.freedesktop.org/software/systemd/man/systemd.service.html +- Systemd documentation on exec (can be used to impact service runs) - + https://www.freedesktop.org/software/systemd/man/systemd.exec.html diff --git a/doc/source/tempest.rst b/doc/source/tempest.rst new file mode 100644 index 0000000000..65dd5b16b2 --- /dev/null +++ b/doc/source/tempest.rst @@ -0,0 +1,25 @@ +======= +Tempest +======= + +`Tempest`_ is the OpenStack Integration test suite. It is installed by default +and is used to provide integration testing for many of the OpenStack services. +Just like DevStack itself, it is possible to extend Tempest with plugins. In +fact, many Tempest plugin packages also include DevStack plugin to do things +like pre-create required static resources. + +The `Tempest documentation `_ provides a thorough guide to using +Tempest. However, if you simply wish to run the standard set of Tempest tests +against an existing deployment, you can do the following: + +.. code-block:: shell + + cd /opt/stack/tempest + /opt/stack/data/venv/bin/tempest run ... + +The above assumes you have installed DevStack in the default location +(configured via the ``DEST`` configuration variable) and have enabled +virtualenv-based installation in the standard location (configured via the +``USE_VENV`` and ``VENV_DEST`` configuration variables, respectively). + +.. _Tempest: https://docs.openstack.org/tempest/latest/ diff --git a/doc/source/zuul_ci_jobs_migration.rst b/doc/source/zuul_ci_jobs_migration.rst new file mode 100644 index 0000000000..c43603ea17 --- /dev/null +++ b/doc/source/zuul_ci_jobs_migration.rst @@ -0,0 +1,320 @@ +=============================== +Migrating Zuul V2 CI jobs to V3 +=============================== + +The OpenStack CI system moved from Zuul v2 to Zuul v3, and all CI jobs moved to +the new CI system. All jobs have been migrated automatically to a format +compatible with Zuul v3; the jobs produced in this way however are suboptimal +and do not use the capabilities introduced by Zuul v3, which allow for re-use of +job parts, in the form of Ansible roles, as well as inheritance between jobs. + +DevStack hosts a set of roles, plays and jobs that can be used by other +repositories to define their DevStack based jobs. To benefit from them, jobs +must be migrated from the legacy v2 ones into v3 native format. + +This document provides guidance and examples to make the migration process as +painless and smooth as possible. + +Where to host the job definitions. +================================== + +In Zuul V3 jobs can be defined in the repository that contains the code they +excercise. If you are writing CI jobs for an OpenStack service you can define +your DevStack based CI jobs in one of the repositories that host the code for +your service. If you have a branchless repo, like a Tempest plugin, that is +a convenient choice to host the job definitions since job changes do not have +to be backported. For example, see the beginning of the ``.zuul.yaml`` from the +sahara Tempest plugin repo: + +.. code:: yaml + + # In https://opendev.org/openstack/sahara-tests/src/branch/master/.zuul.yaml: + - job: + name: sahara-tests-tempest + description: | + Run Tempest tests from the Sahara plugin. + parent: devstack-tempest + +Which base job to start from +============================ + +If your job needs an OpenStack cloud deployed via DevStack, but you don't plan +on running Tempest tests, you can start from one of the base +:doc:`jobs ` defined in the DevStack repo. + +The ``devstack`` job can be used for both single-node jobs and multi-node jobs, +and it includes the list of services used in the integrated gate (keystone, +glance, nova, cinder, neutron and swift). Different topologies can be achieved +by switching the nodeset used in the child job. + +The ``devstack-base`` job is similar to ``devstack`` but it does not specify any +required repo or service to be run in DevStack. It can be useful to setup +children jobs that use a very narrow DevStack setup. + +If your job needs an OpenStack cloud deployed via DevStack, and you do plan +on running Tempest tests, you can start from one of the base jobs defined in the +Tempest repo. + +The ``devstack-tempest`` job can be used for both single-node jobs and +multi-node jobs. Different topologies can be achieved by switching the nodeset +used in the child job. + +Jobs can be customized as follows without writing any Ansible code: + +- add and/or remove DevStack services +- add or modify DevStack and services configuration +- install DevStack plugins +- extend the number of sub-nodes (multinode only) +- define extra log files and/or directories to be uploaded on logs.o.o +- define extra log file extensions to be rewritten to .txt for ease of access + +Tempest jobs can be further customized as follows: + +- define the Tempest tox environment to be used +- define the test concurrency +- define the test regular expression + +Writing Ansible code, or importing existing custom roles, jobs can be further +extended by: + +- adding pre and/or post playbooks +- overriding the run playbook, add custom roles + +The (partial) example below extends a Tempest single node base job +"devstack-tempest" in the Kuryr repository. The parent job name is defined in +job.parent. + +.. code:: yaml + + # https://opendev.org/openstack/kuryr-kubernetes/src/branch/master/.zuul.d/base.yaml: + - job: + name: kuryr-kubernetes-tempest-base + parent: devstack-tempest + description: Base kuryr-kubernetes-job + required-projects: + - openstack/devstack-plugin-container + - openstack/kuryr + - openstack/kuryr-kubernetes + - openstack/kuryr-tempest-plugin + - openstack/neutron-lbaas + vars: + tempest_test_regex: '^(kuryr_tempest_plugin.tests.)' + tox_envlist: 'all' + devstack_localrc: + KURYR_K8S_API_PORT: 8080 + devstack_services: + kubernetes-api: true + kubernetes-controller-manager: true + kubernetes-scheduler: true + kubelet: true + kuryr-kubernetes: true + (...) + devstack_plugins: + kuryr-kubernetes: https://opendev.org/openstack/kuryr + devstack-plugin-container: https://opendev.org/openstack/devstack-plugin-container + neutron-lbaas: https://opendev.org/openstack/neutron-lbaas + tempest_plugins: + - kuryr-tempest-plugin + (...) + +Job variables +============= + +Variables can be added to the job in three different places: + +- job.vars: these are global variables available to all node in the nodeset +- job.host-vars.[HOST]: these are variables available only to the specified HOST +- job.group-vars.[GROUP]: these are variables available only to the specified + GROUP + +Zuul merges dict variables through job inheritance. Host and group variables +override variables with the same name defined as global variables. + +In the example below, for the sundaes job, hosts that are not part of the +subnode group will run vanilla and chocolate. Hosts in the subnode group will +run stracciatella and strawberry. + +.. code:: yaml + + - job: + name: ice-creams + vars: + devstack_service: + vanilla: true + chocolate: false + group-vars: + subnode: + devstack_service: + pistacchio: true + stracciatella: true + + - job: + name: sundaes + parent: ice-creams + vars: + devstack_service: + chocolate: true + group-vars: + subnode: + devstack_service: + strawberry: true + pistacchio: false + + +DevStack Gate Flags +=================== + +The old CI system worked using a combination of DevStack, Tempest and +devstack-gate to setup a test environment and run tests against it. With Zuul +V3, the logic that used to live in devstack-gate is moved into different repos, +including DevStack, Tempest and grenade. + +DevStack-gate exposes an interface for job definition based on a number of +DEVSTACK_GATE_* environment variables, or flags. This guide shows how to map +DEVSTACK_GATE flags into the new +system. + +The repo column indicates in which repository is hosted the code that replaces +the devstack-gate flag. The new implementation column explains how to reproduce +the same or a similar behaviour in Zuul v3 jobs. For localrc settings, +devstack-gate defined a default value. In ansible jobs the default is either the +value defined in the parent job, or the default from DevStack, if any. + +.. list-table:: **DevStack Gate Flags** + :widths: 20 10 60 + :header-rows: 1 + + * - DevStack gate flag + - Repo + - New implementation + * - OVERRIDE_ZUUL_BRANCH + - zuul + - override-checkout: [branch] in the job definition. + * - DEVSTACK_GATE_NET_OVERLAY + - zuul-jobs + - A bridge called br-infra is set up for all jobs that inherit + from multinode with a dedicated `bridge role + `_. + * - DEVSTACK_CINDER_VOLUME_CLEAR + - devstack + - *CINDER_VOLUME_CLEAR: true/false* in devstack_localrc in the + job vars. + * - DEVSTACK_GATE_NEUTRON + - devstack + - True by default. To disable, disable all neutron services in + devstack_services in the job definition. + * - DEVSTACK_GATE_CONFIGDRIVE + - devstack + - *FORCE_CONFIG_DRIVE: true/false* in devstack_localrc in the job + vars. + * - DEVSTACK_GATE_INSTALL_TESTONLY + - devstack + - *INSTALL_TESTONLY_PACKAGES: true/false* in devstack_localrc in + the job vars. + * - DEVSTACK_GATE_VIRT_DRIVER + - devstack + - *VIRT_DRIVER: [virt driver]* in devstack_localrc in the job + vars. + * - DEVSTACK_GATE_LIBVIRT_TYPE + - devstack + - *LIBVIRT_TYPE: [libvirt type]* in devstack_localrc in the job + vars. + * - DEVSTACK_GATE_TEMPEST + - devstack and tempest + - Defined by the job that is used. The ``devstack`` job only runs + devstack. The ``devstack-tempest`` one triggers a Tempest run + as well. + * - DEVSTACK_GATE_TEMPEST_FULL + - tempest + - *tox_envlist: full* in the job vars. + * - DEVSTACK_GATE_TEMPEST_ALL + - tempest + - *tox_envlist: all* in the job vars. + * - DEVSTACK_GATE_TEMPEST_ALL_PLUGINS + - tempest + - *tox_envlist: all-plugin* in the job vars. + * - DEVSTACK_GATE_TEMPEST_SCENARIOS + - tempest + - *tox_envlist: scenario* in the job vars. + * - TEMPEST_CONCURRENCY + - tempest + - *tempest_concurrency: [value]* in the job vars. This is + available only on jobs that inherit from ``devstack-tempest`` + down. + * - DEVSTACK_GATE_TEMPEST_NOTESTS + - tempest + - *tox_envlist: venv-tempest* in the job vars. This will create + Tempest virtual environment but run no tests. + * - DEVSTACK_GATE_SMOKE_SERIAL + - tempest + - *tox_envlist: smoke-serial* in the job vars. + * - DEVSTACK_GATE_TEMPEST_DISABLE_TENANT_ISOLATION + - tempest + - *tox_envlist: full-serial* in the job vars. + *TEMPEST_ALLOW_TENANT_ISOLATION: false* in devstack_localrc in + the job vars. + + +The following flags have not been migrated yet or are legacy and won't be +migrated at all. + +.. list-table:: **Not Migrated DevStack Gate Flags** + :widths: 20 10 60 + :header-rows: 1 + + * - DevStack gate flag + - Status + - Details + * - DEVSTACK_GATE_TOPOLOGY + - WIP + - The topology depends on the base job that is used and more + specifically on the nodeset attached to it. The new job format + allows project to define the variables to be passed to every + node/node-group that exists in the topology. Named topologies + that include the nodeset and the matching variables can be + defined in the form of base jobs. + * - DEVSTACK_GATE_GRENADE + - TBD + - Grenade Zuul V3 jobs will be hosted in the grenade repo. + * - GRENADE_BASE_BRANCH + - TBD + - Grenade Zuul V3 jobs will be hosted in the grenade repo. + * - DEVSTACK_GATE_NEUTRON_DVR + - TBD + - Depends on multinode support. + * - DEVSTACK_GATE_EXERCISES + - TBD + - Can be done on request. + * - DEVSTACK_GATE_IRONIC + - TBD + - This will probably be implemented on ironic side. + * - DEVSTACK_GATE_IRONIC_DRIVER + - TBD + - This will probably be implemented on ironic side. + * - DEVSTACK_GATE_IRONIC_BUILD_RAMDISK + - TBD + - This will probably be implemented on ironic side. + * - DEVSTACK_GATE_POSTGRES + - Legacy + - This flag exists in d-g but the only thing that it does is + capture postgres logs. This is already supported by the roles + in post, so the flag is useless in the new jobs. postgres + itself can be enabled via the devstack_service job variable. + * - DEVSTACK_GATE_ZEROMQ + - Legacy + - This has no effect in d-g. + * - DEVSTACK_GATE_MQ_DRIVER + - Legacy + - This has no effect in d-g. + * - DEVSTACK_GATE_TEMPEST_STRESS_ARGS + - Legacy + - Stress is not in Tempest anymore. + * - DEVSTACK_GATE_TEMPEST_HEAT_SLOW + - Legacy + - This is not used anywhere. + * - DEVSTACK_GATE_CELLS + - Legacy + - This has no effect in d-g. + * - DEVSTACK_GATE_NOVA_API_METADATA_SPLIT + - Legacy + - This has no effect in d-g. diff --git a/doc/source/zuul_jobs.rst b/doc/source/zuul_jobs.rst new file mode 100644 index 0000000000..cf203a8973 --- /dev/null +++ b/doc/source/zuul_jobs.rst @@ -0,0 +1,4 @@ +Zuul CI Jobs +============ + +.. zuul:autojobs:: diff --git a/doc/source/zuul_roles.rst b/doc/source/zuul_roles.rst new file mode 100644 index 0000000000..4939281057 --- /dev/null +++ b/doc/source/zuul_roles.rst @@ -0,0 +1,4 @@ +Zuul CI Roles +============= + +.. zuul:autoroles:: diff --git a/exercise.sh b/exercise.sh deleted file mode 100755 index 90670333a1..0000000000 --- a/exercise.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env bash - -# **exercise.sh** - -# Keep track of the current DevStack directory. -TOP_DIR=$(cd $(dirname "$0") && pwd) - -# Import common functions -source $TOP_DIR/functions - -# Load local configuration -source $TOP_DIR/stackrc - -# Run everything in the exercises/ directory that isn't explicitly disabled - -# comma separated list of script basenames to skip -# to refrain from exercising foo.sh use ``SKIP_EXERCISES=foo`` -SKIP_EXERCISES=${SKIP_EXERCISES:-""} - -# comma separated list of script basenames to run -# to run only foo.sh use ``RUN_EXERCISES=foo`` -basenames=${RUN_EXERCISES:-""} - -EXERCISE_DIR=$TOP_DIR/exercises - -if [[ -z "${basenames}" ]]; then - # Locate the scripts we should run - basenames=$(for b in `ls $EXERCISE_DIR/*.sh`; do basename $b .sh; done) -else - # If ``RUN_EXERCISES`` was specified, ignore ``SKIP_EXERCISES``. - SKIP_EXERCISES= -fi - -# Track the state of each script -passes="" -failures="" -skips="" - -# Loop over each possible script (by basename) -for script in $basenames; do - if [[ ,$SKIP_EXERCISES, =~ ,$script, ]]; then - skips="$skips $script" - else - echo "=====================================================================" - echo Running $script - echo "=====================================================================" - $EXERCISE_DIR/$script.sh - exitcode=$? - if [[ $exitcode == 55 ]]; then - skips="$skips $script" - elif [[ $exitcode -ne 0 ]]; then - failures="$failures $script" - else - passes="$passes $script" - fi - fi -done - -# Output status of exercise run -echo "=====================================================================" -for script in $skips; do - echo SKIP $script -done -for script in $passes; do - echo PASS $script -done -for script in $failures; do - echo FAILED $script -done -echo "=====================================================================" - -if [[ -n "$failures" ]]; then - exit 1 -fi diff --git a/exerciserc b/exerciserc deleted file mode 100644 index 978e0b3791..0000000000 --- a/exerciserc +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env bash -# -# source exerciserc -# -# Configure the DevStack exercise scripts -# For best results, source this _after_ stackrc/localrc as it will set -# values only if they are not already set. - -# Max time to wait while vm goes from build to active state -export ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30} - -# Max time to wait for proper IP association and dis-association. -export ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-15} - -# Max time till the vm is bootable -export BOOT_TIMEOUT=${BOOT_TIMEOUT:-30} - -# Max time from run instance command until it is running -export RUNNING_TIMEOUT=${RUNNING_TIMEOUT:-$(($BOOT_TIMEOUT + $ACTIVE_TIMEOUT))} - -# Max time to wait for a vm to terminate -export TERMINATE_TIMEOUT=${TERMINATE_TIMEOUT:-30} - -# The size of the volume we want to boot from; some storage back-ends -# do not allow a disk resize, so it's important that this can be tuned -export DEFAULT_VOLUME_SIZE=${DEFAULT_VOLUME_SIZE:-1} diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh deleted file mode 100755 index 808ef76e2f..0000000000 --- a/exercises/aggregates.sh +++ /dev/null @@ -1,150 +0,0 @@ -#!/usr/bin/env bash - -# **aggregates.sh** - -# This script demonstrates how to use host aggregates: -# -# * Create an Aggregate -# * Updating Aggregate details -# * Testing Aggregate metadata -# * Testing Aggregate delete -# * Testing General Aggregates (https://blueprints.launchpad.net/nova/+spec/general-host-aggregates) -# * Testing add/remove hosts (with one host) - -echo "**************************************************" -echo "Begin DevStack Exercise: $0" -echo "**************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Test as the admin user -# note this imports stackrc/functions, etc -. $TOP_DIR/openrc admin admin - -# Import exercise configuration -source $TOP_DIR/exerciserc - -# If nova api is not enabled we exit with exitcode 55 so that -# the exercise is skipped -is_service_enabled n-api || exit 55 - -# Cells does not support aggregates. -is_service_enabled n-cell && exit 55 - -# Create an aggregate -# =================== - -AGGREGATE_NAME=test_aggregate_$RANDOM -AGGREGATE2_NAME=test_aggregate_$RANDOM -AGGREGATE_A_ZONE=nova - -function exit_if_aggregate_present { - aggregate_name=$1 - - if [ $(nova aggregate-list | grep -c " $aggregate_name ") == 0 ]; then - echo "SUCCESS $aggregate_name not present" - else - die $LINENO "found aggregate: $aggregate_name" - exit -1 - fi -} - -exit_if_aggregate_present $AGGREGATE_NAME - -AGGREGATE_ID=$(nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE_NAME " | get_field 1) -die_if_not_set $LINENO AGGREGATE_ID "Failure creating AGGREGATE_ID for $AGGREGATE_NAME $AGGREGATE_A_ZONE" - -AGGREGATE2_ID=$(nova aggregate-create $AGGREGATE2_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE2_NAME " | get_field 1) -die_if_not_set $LINENO AGGREGATE2_ID "Fail creating AGGREGATE2_ID for $AGGREGATE2_NAME $AGGREGATE_A_ZONE" - -# check aggregate created -nova aggregate-list | grep -q " $AGGREGATE_NAME " || die $LINENO "Aggregate $AGGREGATE_NAME not created" - - -# Ensure creating a duplicate fails -# ================================= - -if nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE; then - die $LINENO "could create duplicate aggregate" -fi - - -# Test aggregate-update (and aggregate-details) -# ============================================= -AGGREGATE_NEW_NAME=test_aggregate_$RANDOM - -nova aggregate-update $AGGREGATE_ID $AGGREGATE_NEW_NAME -nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_NEW_NAME -nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_A_ZONE - -nova aggregate-update $AGGREGATE_ID $AGGREGATE_NAME $AGGREGATE_A_ZONE -nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_NAME -nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_A_ZONE - - -# Test aggregate-set-metadata -# =========================== -META_DATA_1_KEY=asdf -META_DATA_2_KEY=foo -META_DATA_3_KEY=bar - -#ensure no additional metadata is set -nova aggregate-details $AGGREGATE_ID | egrep "\|[{u ]*'availability_zone.+$AGGREGATE_A_ZONE'[ }]*\|" - -nova aggregate-set-metadata $AGGREGATE_ID ${META_DATA_1_KEY}=123 -nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY -nova aggregate-details $AGGREGATE_ID | grep 123 - -nova aggregate-set-metadata $AGGREGATE_ID ${META_DATA_2_KEY}=456 -nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY -nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY - -nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_2_KEY ${META_DATA_3_KEY}=789 -nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY -nova aggregate-details $AGGREGATE_ID | grep $META_DATA_3_KEY - -nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY && die $LINENO "ERROR metadata was not cleared" - -nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_3_KEY $META_DATA_1_KEY -nova aggregate-details $AGGREGATE_ID | egrep "\|[{u ]*'availability_zone.+$AGGREGATE_A_ZONE'[ }]*\|" - - -# Test aggregate-add/remove-host -# ============================== -if [ "$VIRT_DRIVER" == "xenserver" ]; then - echo "TODO(johngarbutt) add tests for add/remove host from pool aggregate" -fi -FIRST_HOST=$(nova host-list | grep compute | get_field 1 | head -1) -# Make sure can add two aggregates to same host -nova aggregate-add-host $AGGREGATE_ID $FIRST_HOST -nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST -if nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST; then - die $LINENO "could add duplicate host to single aggregate" -fi -nova aggregate-remove-host $AGGREGATE2_ID $FIRST_HOST -nova aggregate-remove-host $AGGREGATE_ID $FIRST_HOST - -# Test aggregate-delete -# ===================== -nova aggregate-delete $AGGREGATE_ID -nova aggregate-delete $AGGREGATE2_ID -exit_if_aggregate_present $AGGREGATE_NAME - -set +o xtrace -echo "**************************************************" -echo "End DevStack Exercise: $0" -echo "**************************************************" diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh deleted file mode 100755 index 84ac08f017..0000000000 --- a/exercises/boot_from_volume.sh +++ /dev/null @@ -1,224 +0,0 @@ -#!/usr/bin/env bash - -# **boot_from_volume.sh** - -# This script demonstrates how to boot from a volume. It does the following: -# -# * Create a bootable volume -# * Boot a volume-backed instance - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import project functions -source $TOP_DIR/lib/cinder -source $TOP_DIR/lib/neutron -source $TOP_DIR/lib/neutron-legacy - -# Import configuration -source $TOP_DIR/openrc - -# Import exercise configuration -source $TOP_DIR/exerciserc - -# If cinder is not enabled we exit with exitcode 55 so that -# the exercise is skipped -is_service_enabled cinder || exit 55 - -# Ironic does not support boot from volume. -[ "$VIRT_DRIVER" == "ironic" ] && exit 55 - -# Instance type to create -DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} - -# Boot this image, use first AMI image if unset -DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} - -# Security group name -SECGROUP=${SECGROUP:-boot_secgroup} - -# Instance and volume names -VM_NAME=${VM_NAME:-ex-bfv-inst} -VOL_NAME=${VOL_NAME:-ex-vol-bfv} - - -# Launching a server -# ================== - -# List servers for project: -nova list - -# Images -# ------ - -# List the images available -openstack image list - -# Grab the id of the image to launch -IMAGE=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) -die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" - -# Security Groups -# --------------- - -# List security groups -nova secgroup-list - -if is_service_enabled n-cell; then - # Cells does not support security groups, so force the use of "default" - SECGROUP="default" - echo "Using the default security group because of Cells." -else - # Create a secgroup - if ! nova secgroup-list | grep -q $SECGROUP; then - nova secgroup-create $SECGROUP "$SECGROUP description" - if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then - echo "Security group not created" - exit 1 - fi - fi -fi - -# Configure Security Group Rules -if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then - nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 -fi -if ! nova secgroup-list-rules $SECGROUP | grep -q " tcp .* 22 "; then - nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0 -fi - -# List secgroup rules -nova secgroup-list-rules $SECGROUP - -# Set up instance -# --------------- - -# List flavors -nova flavor-list - -# Select a flavor -INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) -if [[ -z "$INSTANCE_TYPE" ]]; then - # grab the first flavor in the list to launch if default doesn't exist - INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) -fi - -# Clean-up from previous runs -nova delete $VM_NAME || true -if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then - echo "server didn't terminate!" - exit 1 -fi - -# Setup Keypair -KEY_NAME=test_key -KEY_FILE=key.pem -nova keypair-delete $KEY_NAME || true -nova keypair-add $KEY_NAME > $KEY_FILE -chmod 600 $KEY_FILE - -# Set up volume -# ------------- - -# Delete any old volume -cinder delete $VOL_NAME || true -if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then - echo "Volume $VOL_NAME not deleted" - exit 1 -fi - -# Create the bootable volume -start_time=$(date +%s) -cinder create --image-id $IMAGE --display-name=$VOL_NAME --display-description "test bootable volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \ - die $LINENO "Failure creating volume $VOL_NAME" -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then - echo "Volume $VOL_NAME not created" - exit 1 -fi -end_time=$(date +%s) -echo "Completed cinder create in $((end_time - start_time)) seconds" - -# Get volume ID -VOL_ID=$(cinder list | grep $VOL_NAME | get_field 1) -die_if_not_set $LINENO VOL_ID "Failure retrieving volume ID for $VOL_NAME" - -# Boot instance -# ------------- - -# Boot using the --block-device-mapping param. The format of mapping is: -# =::: -# Leaving the middle two fields blank appears to do-the-right-thing -VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --block-device-mapping vda=$VOL_ID --security-groups=$SECGROUP --key-name $KEY_NAME $VM_NAME | grep ' id ' | get_field 2) -die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME" - -# Check that the status is active within ACTIVE_TIMEOUT seconds -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then - echo "server didn't become active!" - exit 1 -fi - -# Get the instance IP -IP=$(get_instance_ip $VM_UUID $PRIVATE_NETWORK_NAME) - -die_if_not_set $LINENO IP "Failure retrieving IP address" - -# Private IPs can be pinged in single node deployments -ping_check $IP $BOOT_TIMEOUT "$PRIVATE_NETWORK_NAME" - -# Clean up -# -------- - -# Delete volume backed instance -nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME" -if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then - echo "Server $VM_NAME not deleted" - exit 1 -fi - -# Wait for volume to be released -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then - echo "Volume $VOL_NAME not released" - exit 1 -fi - -# Delete volume -start_time=$(date +%s) -cinder delete $VOL_ID || die $LINENO "Failure deleting volume $VOLUME_NAME" -if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then - echo "Volume $VOL_NAME not deleted" - exit 1 -fi -end_time=$(date +%s) -echo "Completed cinder delete in $((end_time - start_time)) seconds" - -if [[ $SECGROUP = "default" ]] ; then - echo "Skipping deleting default security group" -else - # Delete secgroup - nova secgroup-delete $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP" -fi - -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End DevStack Exercise: $0" -echo "*********************************************************************" diff --git a/exercises/client-args.sh b/exercises/client-args.sh deleted file mode 100755 index 2c8fe81390..0000000000 --- a/exercises/client-args.sh +++ /dev/null @@ -1,174 +0,0 @@ -#!/usr/bin/env bash - -# **client-args.sh** - -# Test OpenStack client authentication arguments handling - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import configuration -source $TOP_DIR/openrc - -# Import exercise configuration -source $TOP_DIR/exerciserc - -# Unset all of the known NOVA_* vars -unset NOVA_API_KEY -unset NOVA_ENDPOINT_NAME -unset NOVA_PASSWORD -unset NOVA_PROJECT_ID -unset NOVA_REGION_NAME -unset NOVA_URL -unset NOVA_USERNAME - -# Save the known variables for later -export x_PROJECT_NAME=$OS_PROJECT_NAME -export x_USERNAME=$OS_USERNAME -export x_PASSWORD=$OS_PASSWORD -export x_AUTH_URL=$OS_AUTH_URL - -# Unset the usual variables to force argument processing -unset OS_PROJECT_NAME -unset OS_USERNAME -unset OS_PASSWORD -unset OS_AUTH_URL - -# Common authentication args -PROJECT_ARG="--os-project-name=$x_PROJECT_NAME" -ARGS="--os-username=$x_USERNAME --os-password=$x_PASSWORD --os-auth-url=$x_AUTH_URL" - -# Set global return -RETURN=0 - -# Keystone client -# --------------- -if [[ "$ENABLED_SERVICES" =~ "key" ]]; then - if [[ "$SKIP_EXERCISES" =~ "key" ]]; then - STATUS_KEYSTONE="Skipped" - else - echo -e "\nTest Keystone" - if openstack $PROJECT_ARG $ARGS catalog show identity; then - STATUS_KEYSTONE="Succeeded" - else - STATUS_KEYSTONE="Failed" - RETURN=1 - fi - fi -fi - -# Nova client -# ----------- - -if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then - if [[ "$SKIP_EXERCISES" =~ "n-api" ]]; then - STATUS_NOVA="Skipped" - else - # Test OSAPI - echo -e "\nTest Nova" - if nova $PROJECT_ARG $ARGS flavor-list; then - STATUS_NOVA="Succeeded" - else - STATUS_NOVA="Failed" - RETURN=1 - fi - fi -fi - -# Cinder client -# ------------- - -if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then - if [[ "$SKIP_EXERCISES" =~ "c-api" ]]; then - STATUS_CINDER="Skipped" - else - echo -e "\nTest Cinder" - if cinder $PROJECT_ARG $ARGS list; then - STATUS_CINDER="Succeeded" - else - STATUS_CINDER="Failed" - RETURN=1 - fi - fi -fi - -# Glance client -# ------------- - -if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then - if [[ "$SKIP_EXERCISES" =~ "g-api" ]]; then - STATUS_GLANCE="Skipped" - else - echo -e "\nTest Glance" - if openstack $PROJECT_ARG $ARGS image list; then - STATUS_GLANCE="Succeeded" - else - STATUS_GLANCE="Failed" - RETURN=1 - fi - fi -fi - -# Swift client -# ------------ - -if [[ "$ENABLED_SERVICES" =~ "swift" || "$ENABLED_SERVICES" =~ "s-proxy" ]]; then - if [[ "$SKIP_EXERCISES" =~ "swift" ]]; then - STATUS_SWIFT="Skipped" - else - echo -e "\nTest Swift" - if swift $PROJECT_ARG $ARGS stat; then - STATUS_SWIFT="Succeeded" - else - STATUS_SWIFT="Failed" - RETURN=1 - fi - fi -fi - -set +o xtrace - - -# Results -# ======= - -function report { - if [[ -n "$2" ]]; then - echo "$1: $2" - fi -} - -echo -e "\n" -report "Keystone" $STATUS_KEYSTONE -report "Nova" $STATUS_NOVA -report "Cinder" $STATUS_CINDER -report "Glance" $STATUS_GLANCE -report "Swift" $STATUS_SWIFT - -if (( $RETURN == 0 )); then - echo "*********************************************************************" - echo "SUCCESS: End DevStack Exercise: $0" - echo "*********************************************************************" -fi - -exit $RETURN diff --git a/exercises/client-env.sh b/exercises/client-env.sh deleted file mode 100755 index 6ab4d08715..0000000000 --- a/exercises/client-env.sh +++ /dev/null @@ -1,171 +0,0 @@ -#!/usr/bin/env bash - -# **client-env.sh** - -# Test OpenStack client environment variable handling - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import configuration -source $TOP_DIR/openrc admin - -# Import exercise configuration -source $TOP_DIR/exerciserc - -# Unset all of the known NOVA_* vars -unset NOVA_API_KEY -unset NOVA_ENDPOINT_NAME -unset NOVA_PASSWORD -unset NOVA_PROJECT_ID -unset NOVA_REGION_NAME -unset NOVA_URL -unset NOVA_USERNAME - -for i in OS_TENANT_NAME OS_USERNAME OS_PASSWORD OS_AUTH_URL; do - is_set $i - if [[ $? -ne 0 ]]; then - echo "$i expected to be set" - ABORT=1 - fi -done -if [[ -n "$ABORT" ]]; then - exit 1 -fi - -# Set global return -RETURN=0 - -# Keystone client -# --------------- -if [[ "$ENABLED_SERVICES" =~ "key" ]]; then - if [[ "$SKIP_EXERCISES" =~ "key" ]]; then - STATUS_KEYSTONE="Skipped" - else - echo -e "\nTest Keystone" - if openstack endpoint show identity; then - STATUS_KEYSTONE="Succeeded" - else - STATUS_KEYSTONE="Failed" - RETURN=1 - fi - fi -fi - -# Nova client -# ----------- - -if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then - if [[ "$SKIP_EXERCISES" =~ "n-api" ]]; then - STATUS_NOVA="Skipped" - else - # Test OSAPI - echo -e "\nTest Nova" - if nova flavor-list; then - STATUS_NOVA="Succeeded" - else - STATUS_NOVA="Failed" - RETURN=1 - fi - - fi -fi - -# Cinder client -# ------------- - -if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then - if [[ "$SKIP_EXERCISES" =~ "c-api" ]]; then - STATUS_CINDER="Skipped" - else - echo -e "\nTest Cinder" - if cinder list; then - STATUS_CINDER="Succeeded" - else - STATUS_CINDER="Failed" - RETURN=1 - fi - fi -fi - -# Glance client -# ------------- - -if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then - if [[ "$SKIP_EXERCISES" =~ "g-api" ]]; then - STATUS_GLANCE="Skipped" - else - echo -e "\nTest Glance" - if openstack image list; then - STATUS_GLANCE="Succeeded" - else - STATUS_GLANCE="Failed" - RETURN=1 - fi - fi -fi - -# Swift client -# ------------ - - -if [[ "$ENABLED_SERVICES" =~ "swift" || "$ENABLED_SERVICES" =~ "s-proxy" ]]; then - if [[ "$SKIP_EXERCISES" =~ "swift" ]]; then - STATUS_SWIFT="Skipped" - else - echo -e "\nTest Swift" - if swift stat; then - STATUS_SWIFT="Succeeded" - else - STATUS_SWIFT="Failed" - RETURN=1 - fi - fi -fi - -set +o xtrace - - -# Results -# ======= - -function report { - if [[ -n "$2" ]]; then - echo "$1: $2" - fi -} - -echo -e "\n" -report "Keystone" $STATUS_KEYSTONE -report "Nova" $STATUS_NOVA -report "Cinder" $STATUS_CINDER -report "Glance" $STATUS_GLANCE -report "Swift" $STATUS_SWIFT - -if (( $RETURN == 0 )); then - echo "*********************************************************************" - echo "SUCCESS: End DevStack Exercise: $0" - echo "*********************************************************************" -fi - -exit $RETURN diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh deleted file mode 100755 index 485208ba30..0000000000 --- a/exercises/floating_ips.sh +++ /dev/null @@ -1,216 +0,0 @@ -#!/usr/bin/env bash - -# **floating_ips.sh** - using the cloud can be fun - -# Test instance connectivity with the ``nova`` command from ``python-novaclient`` - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import configuration -source $TOP_DIR/openrc - -# Import project functions -source $TOP_DIR/lib/neutron -source $TOP_DIR/lib/neutron-legacy - -# Import exercise configuration -source $TOP_DIR/exerciserc - -# If nova api is not enabled we exit with exitcode 55 so that -# the exercise is skipped -is_service_enabled n-api || exit 55 - -# Instance type to create -DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} - -# Boot this image, use first AMI image if unset -DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} - -# Security group name -SECGROUP=${SECGROUP:-test_secgroup} - -# Default floating IP pool name -DEFAULT_FLOATING_POOL=${DEFAULT_FLOATING_POOL:-public} - -# Additional floating IP pool and range -TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test} - -# Instance name -VM_NAME="ex-float" - -# Cells does not support floating ips API calls -is_service_enabled n-cell && exit 55 - -# Launching a server -# ================== - -# List servers for tenant: -nova list - -# Images -# ------ - -# List the images available -openstack image list - -# Grab the id of the image to launch -IMAGE=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) -die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" - -# Security Groups -# --------------- - -# List security groups -nova secgroup-list - -# Create a secgroup -if ! nova secgroup-list | grep -q $SECGROUP; then - nova secgroup-create $SECGROUP "$SECGROUP description" - if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then - die $LINENO "Security group not created" - fi -fi - -# Configure Security Group Rules -if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then - nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 -fi -if ! nova secgroup-list-rules $SECGROUP | grep -q " tcp .* 22 "; then - nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0 -fi - -# List secgroup rules -nova secgroup-list-rules $SECGROUP - -# Set up instance -# --------------- - -# List flavors -nova flavor-list - -# Select a flavor -INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) -if [[ -z "$INSTANCE_TYPE" ]]; then - # grab the first flavor in the list to launch if default doesn't exist - INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) - die_if_not_set $LINENO INSTANCE_TYPE "Failure retrieving INSTANCE_TYPE" -fi - -# Clean-up from previous runs -nova delete $VM_NAME || true -if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then - die $LINENO "server didn't terminate!" - exit 1 -fi - -# Boot instance -# ------------- - -VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security-groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2) -die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME" - -# Check that the status is active within ACTIVE_TIMEOUT seconds -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then - die $LINENO "server didn't become active!" -fi - -# Get the instance IP -IP=$(get_instance_ip $VM_UUID $PRIVATE_NETWORK_NAME) -die_if_not_set $LINENO IP "Failure retrieving IP address" - -# Private IPs can be pinged in single node deployments -ping_check $IP $BOOT_TIMEOUT "$PRIVATE_NETWORK_NAME" - -# Floating IPs -# ------------ - -# Allocate a floating IP from the default pool -FLOATING_IP=$(nova floating-ip-create | grep $DEFAULT_FLOATING_POOL | get_field 1) -die_if_not_set $LINENO FLOATING_IP "Failure creating floating IP from pool $DEFAULT_FLOATING_POOL" - -# List floating addresses -if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep -q $FLOATING_IP; do sleep 1; done"; then - die $LINENO "Floating IP not allocated" -fi - -# Add floating IP to our server -nova add-floating-ip $VM_UUID $FLOATING_IP || \ - die $LINENO "Failure adding floating IP $FLOATING_IP to $VM_NAME" - -# Test we can ping our floating IP within ASSOCIATE_TIMEOUT seconds -ping_check $FLOATING_IP $ASSOCIATE_TIMEOUT "$PUBLIC_NETWORK_NAME" - -if ! is_service_enabled neutron; then - # Allocate an IP from second floating pool - TEST_FLOATING_IP=$(nova floating-ip-create $TEST_FLOATING_POOL | grep $TEST_FLOATING_POOL | get_field 1) - die_if_not_set $LINENO TEST_FLOATING_IP "Failure creating floating IP in $TEST_FLOATING_POOL" - - # list floating addresses - if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep $TEST_FLOATING_POOL | grep -q $TEST_FLOATING_IP; do sleep 1; done"; then - die $LINENO "Floating IP not allocated" - fi -fi - -# Dis-allow icmp traffic (ping) -nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || \ - die $LINENO "Failure deleting security group rule from $SECGROUP" - -if ! timeout $ASSOCIATE_TIMEOUT sh -c "while nova secgroup-list-rules $SECGROUP | grep -q icmp; do sleep 1; done"; then - die $LINENO "Security group rule not deleted from $SECGROUP" -fi - -# FIXME (anthony): make xs support security groups -if [ "$VIRT_DRIVER" != "ironic" -a "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then - # Test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds - ping_check $FLOATING_IP $ASSOCIATE_TIMEOUT "$PUBLIC_NETWORK_NAME" Fail -fi - -# Clean up -# -------- - -if ! is_service_enabled neutron; then - # Delete second floating IP - nova floating-ip-delete $TEST_FLOATING_IP || \ - die $LINENO "Failure deleting floating IP $TEST_FLOATING_IP" -fi - -# Delete the floating ip -nova floating-ip-delete $FLOATING_IP || \ - die $LINENO "Failure deleting floating IP $FLOATING_IP" - -# Delete instance -nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME" -# Wait for termination -if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then - die $LINENO "Server $VM_NAME not deleted" -fi - -# Delete secgroup -nova secgroup-delete $SECGROUP || \ - die $LINENO "Failure deleting security group $SECGROUP" - -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End DevStack Exercise: $0" -echo "*********************************************************************" diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh deleted file mode 100755 index 81150061ca..0000000000 --- a/exercises/neutron-adv-test.sh +++ /dev/null @@ -1,466 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright 2012, Cisco Systems -# Copyright 2012, VMware, Inc. -# Copyright 2012, NTT MCL, Inc. -# -# Please direct any questions to dedutta@cisco.com, dwendlandt@vmware.com, nachi@nttmcl.com -# -# **neutron-adv-test.sh** - -# Perform integration testing of Nova and other components with Neutron. - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. - -set -o errtrace - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - -# Environment -# ----------- - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import configuration -source $TOP_DIR/openrc - -# Import neutron functions -source $TOP_DIR/lib/neutron -source $TOP_DIR/lib/neutron-legacy - -# If neutron is not enabled we exit with exitcode 55, which means exercise is skipped. -neutron_plugin_check_adv_test_requirements || exit 55 - -# Import exercise configuration -source $TOP_DIR/exerciserc - -# Neutron Settings -# ---------------- - -PROJECTS="DEMO1" -# TODO (nati)_Test public network -#PROJECTS="DEMO1,DEMO2" - -PUBLIC_NAME="admin" -DEMO1_NAME="demo1" -DEMO2_NAME="demo2" - -PUBLIC_NUM_NET=1 -DEMO1_NUM_NET=1 -DEMO2_NUM_NET=2 - -PUBLIC_NET1_CIDR="200.0.0.0/24" -DEMO1_NET1_CIDR="10.10.0.0/24" -DEMO2_NET1_CIDR="10.20.0.0/24" -DEMO2_NET2_CIDR="10.20.1.0/24" - -PUBLIC_NET1_GATEWAY="200.0.0.1" -DEMO1_NET1_GATEWAY="10.10.0.1" -DEMO2_NET1_GATEWAY="10.20.0.1" -DEMO2_NET2_GATEWAY="10.20.1.1" - -PUBLIC_NUM_VM=1 -DEMO1_NUM_VM=1 -DEMO2_NUM_VM=2 - -PUBLIC_VM1_NET='admin-net1' -DEMO1_VM1_NET='demo1-net1' -# Multinic settings. But this is fail without nic setting in OS image -DEMO2_VM1_NET='demo2-net1' -DEMO2_VM2_NET='demo2-net2' - -PUBLIC_NUM_ROUTER=1 -DEMO1_NUM_ROUTER=1 -DEMO2_NUM_ROUTER=1 - -PUBLIC_ROUTER1_NET="admin-net1" -DEMO1_ROUTER1_NET="demo1-net1" -DEMO2_ROUTER1_NET="demo2-net1" - -# Various functions -# ----------------- - -function foreach_project { - COMMAND=$1 - for PROJECT in ${PROJECTS//,/ };do - eval ${COMMAND//%PROJECT%/$PROJECT} - done -} - -function foreach_project_resource { - COMMAND=$1 - RESOURCE=$2 - for PROJECT in ${PROJECTS//,/ };do - eval 'NUM=$'"${PROJECT}_NUM_$RESOURCE" - for i in `seq $NUM`;do - local COMMAND_LOCAL=${COMMAND//%PROJECT%/$PROJECT} - COMMAND_LOCAL=${COMMAND_LOCAL//%NUM%/$i} - eval $COMMAND_LOCAL - done - done -} - -function foreach_project_vm { - COMMAND=$1 - foreach_project_resource "$COMMAND" 'VM' -} - -function foreach_project_net { - COMMAND=$1 - foreach_project_resource "$COMMAND" 'NET' -} - -function get_image_id { - local IMAGE_ID - IMAGE_ID=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) - die_if_not_set $LINENO IMAGE_ID "Failure retrieving IMAGE_ID" - echo "$IMAGE_ID" -} - -function get_project_id { - local PROJECT_NAME=$1 - local PROJECT_ID - PROJECT_ID=`openstack project list | grep " $PROJECT_NAME " | head -n 1 | get_field 1` - die_if_not_set $LINENO PROJECT_ID "Failure retrieving PROJECT_ID for $PROJECT_NAME" - echo "$PROJECT_ID" -} - -function get_user_id { - local USER_NAME=$1 - local USER_ID - USER_ID=`openstack user list | grep $USER_NAME | awk '{print $2}'` - die_if_not_set $LINENO USER_ID "Failure retrieving USER_ID for $USER_NAME" - echo "$USER_ID" -} - -function get_role_id { - local ROLE_NAME=$1 - local ROLE_ID - ROLE_ID=`openstack role list | grep $ROLE_NAME | awk '{print $2}'` - die_if_not_set $LINENO ROLE_ID "Failure retrieving ROLE_ID for $ROLE_NAME" - echo "$ROLE_ID" -} - -function get_network_id { - local NETWORK_NAME="$1" - local NETWORK_ID - NETWORK_ID=`neutron net-list -F id -- --name=$NETWORK_NAME | awk "NR==4" | awk '{print $2}'` - echo $NETWORK_ID -} - -function get_flavor_id { - local INSTANCE_TYPE=$1 - local FLAVOR_ID - FLAVOR_ID=`nova flavor-list | grep $INSTANCE_TYPE | awk '{print $2}'` - die_if_not_set $LINENO FLAVOR_ID "Failure retrieving FLAVOR_ID for $INSTANCE_TYPE" - echo "$FLAVOR_ID" -} - -function confirm_server_active { - local VM_UUID=$1 - if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then - echo "server '$VM_UUID' did not become active!" - false - fi -} - -function neutron_debug_admin { - local os_username=$OS_USERNAME - local os_project_id=$OS_PROJECT_ID - source $TOP_DIR/openrc admin admin - neutron-debug $@ - source $TOP_DIR/openrc $os_username $os_project_id -} - -function add_project { - openstack project create $1 - openstack user create $2 --password ${ADMIN_PASSWORD} --project $1 - openstack role add Member --project $1 --user $2 -} - -function remove_project { - local PROJECT=$1 - local PROJECT_ID - PROJECT_ID=$(get_project_id $PROJECT) - openstack project delete $PROJECT_ID -} - -function remove_user { - local USER=$1 - local USER_ID - USER_ID=$(get_user_id $USER) - openstack user delete $USER_ID -} - -function create_projects { - source $TOP_DIR/openrc admin admin - add_project demo1 demo1 demo1 - add_project demo2 demo2 demo2 - source $TOP_DIR/openrc demo demo -} - -function delete_projects_and_users { - source $TOP_DIR/openrc admin admin - remove_user demo1 - remove_project demo1 - remove_user demo2 - remove_project demo2 - echo "removed all projects" - source $TOP_DIR/openrc demo demo -} - -function create_network { - local PROJECT=$1 - local GATEWAY=$2 - local CIDR=$3 - local NUM=$4 - local EXTRA=$5 - local NET_NAME="${PROJECT}-net$NUM" - local ROUTER_NAME="${PROJECT}-router${NUM}" - source $TOP_DIR/openrc admin admin - local PROJECT_ID - PROJECT_ID=$(get_project_id $PROJECT) - source $TOP_DIR/openrc $PROJECT $PROJECT - local NET_ID - NET_ID=$(neutron net-create --project-id $PROJECT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' ) - die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PROJECT_ID $NET_NAME $EXTRA" - neutron subnet-create --ip-version 4 --project-id $PROJECT_ID --gateway $GATEWAY --subnetpool None $NET_ID $CIDR - neutron_debug_admin probe-create --device-owner compute $NET_ID - source $TOP_DIR/openrc demo demo -} - -function create_networks { - foreach_project_net 'create_network ${%PROJECT%_NAME} ${%PROJECT%_NET%NUM%_GATEWAY} ${%PROJECT%_NET%NUM%_CIDR} %NUM% ${%PROJECT%_NET%NUM%_EXTRA}' - #TODO(nati) test security group function - # allow ICMP for both project's security groups - #source $TOP_DIR/openrc demo1 demo1 - #$NOVA secgroup-add-rule default icmp -1 -1 0.0.0.0/0 - #source $TOP_DIR/openrc demo2 demo2 - #$NOVA secgroup-add-rule default icmp -1 -1 0.0.0.0/0 -} - -function create_vm { - local PROJECT=$1 - local NUM=$2 - local NET_NAMES=$3 - source $TOP_DIR/openrc $PROJECT $PROJECT - local NIC="" - for NET_NAME in ${NET_NAMES//,/ };do - NIC="$NIC --nic net-id="`get_network_id $NET_NAME` - done - #TODO (nati) Add multi-nic test - #TODO (nati) Add public-net test - local VM_UUID - VM_UUID=`nova boot --flavor $(get_flavor_id m1.tiny) \ - --image $(get_image_id) \ - $NIC \ - $PROJECT-server$NUM | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'` - die_if_not_set $LINENO VM_UUID "Failure launching $PROJECT-server$NUM" - confirm_server_active $VM_UUID -} - -function create_vms { - foreach_project_vm 'create_vm ${%PROJECT%_NAME} %NUM% ${%PROJECT%_VM%NUM%_NET}' -} - -function ping_ip { - # Test agent connection. Assumes namespaces are disabled, and - # that DHCP is in use, but not L3 - local VM_NAME=$1 - local NET_NAME=$2 - IP=$(get_instance_ip $VM_NAME $NET_NAME) - ping_check $IP $BOOT_TIMEOUT $NET_NAME -} - -function check_vm { - local PROJECT=$1 - local NUM=$2 - local VM_NAME="$PROJECT-server$NUM" - local NET_NAME=$3 - source $TOP_DIR/openrc $PROJECT $PROJECT - ping_ip $VM_NAME $NET_NAME - # TODO (nati) test ssh connection - # TODO (nati) test inter connection between vm - # TODO (nati) test dhcp host routes - # TODO (nati) test multi-nic -} - -function check_vms { - foreach_project_vm 'check_vm ${%PROJECT%_NAME} %NUM% ${%PROJECT%_VM%NUM%_NET}' -} - -function shutdown_vm { - local PROJECT=$1 - local NUM=$2 - source $TOP_DIR/openrc $PROJECT $PROJECT - VM_NAME=${PROJECT}-server$NUM - nova delete $VM_NAME -} - -function shutdown_vms { - foreach_project_vm 'shutdown_vm ${%PROJECT%_NAME} %NUM%' - if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q ACTIVE; do sleep 1; done"; then - die $LINENO "Some VMs failed to shutdown" - fi -} - -function delete_network { - local PROJECT=$1 - local NUM=$2 - local NET_NAME="${PROJECT}-net$NUM" - source $TOP_DIR/openrc admin admin - local PROJECT_ID - PROJECT_ID=$(get_project_id $PROJECT) - #TODO(nati) comment out until l3-agent merged - #for res in port subnet net router;do - for net_id in `neutron net-list -c id -c name | grep $NET_NAME | awk '{print $2}'`;do - delete_probe $net_id - neutron subnet-list | grep $net_id | awk '{print $2}' | xargs -I% neutron subnet-delete % - neutron net-delete $net_id - done - source $TOP_DIR/openrc demo demo -} - -function delete_networks { - foreach_project_net 'delete_network ${%PROJECT%_NAME} %NUM%' - # TODO(nati) add secuirty group check after it is implemented - # source $TOP_DIR/openrc demo1 demo1 - # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0 - # source $TOP_DIR/openrc demo2 demo2 - # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0 -} - -function create_all { - create_projects - create_networks - create_vms -} - -function delete_all { - shutdown_vms - delete_networks - delete_projects_and_users -} - -function all { - create_all - check_vms - delete_all -} - -# Test functions -# -------------- - -function test_functions { - IMAGE=$(get_image_id) - echo $IMAGE - - PROJECT_ID=$(get_project_id demo) - echo $PROJECT_ID - - FLAVOR_ID=$(get_flavor_id m1.tiny) - echo $FLAVOR_ID - - NETWORK_ID=$(get_network_id admin) - echo $NETWORK_ID -} - -# Usage and main -# -------------- - -function usage { - echo "$0: [-h]" - echo " -h, --help Display help message" - echo " -t, --project Create projects" - echo " -n, --net Create networks" - echo " -v, --vm Create vms" - echo " -c, --check Check connection" - echo " -x, --delete-projects Delete projects" - echo " -y, --delete-nets Delete networks" - echo " -z, --delete-vms Delete vms" - echo " -T, --test Test functions" -} - -function main { - - echo Description - - if [ $# -eq 0 ] ; then - # if no args are provided, run all tests - all - else - - while [ "$1" != "" ]; do - case $1 in - -h | --help ) usage - exit - ;; - -n | --net ) create_networks - exit - ;; - -v | --vm ) create_vms - exit - ;; - -t | --project ) create_projects - exit - ;; - -c | --check ) check_vms - exit - ;; - -T | --test ) test_functions - exit - ;; - -x | --delete-projects ) delete_projects_and_users - exit - ;; - -y | --delete-nets ) delete_networks - exit - ;; - -z | --delete-vms ) shutdown_vms - exit - ;; - -a | --all ) all - exit - ;; - * ) usage - exit 1 - esac - shift - done - fi -} - -trap failed ERR -function failed { - local r=$? - set +o errtrace - set +o xtrace - echo "Failed to execute" - echo "Starting cleanup..." - delete_all - echo "Finished cleanup" - exit $r -} - -# Kick off script -# --------------- - -echo $* -main $* - -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End DevStack Exercise: $0" -echo "*********************************************************************" diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh deleted file mode 100755 index 5f8b0a4d5d..0000000000 --- a/exercises/sec_groups.sh +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/env bash - -# **sec_groups.sh** - -# Test security groups via the command line - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import configuration -source $TOP_DIR/openrc - -# Import exercise configuration -source $TOP_DIR/exerciserc - -# If nova api is not enabled we exit with exitcode 55 so that -# the exercise is skipped -is_service_enabled n-api || exit 55 - - -# Testing Security Groups -# ======================= - -# List security groups -nova secgroup-list - -# Create random name for new sec group and create secgroup of said name -SEC_GROUP_NAME="ex-secgroup-$(openssl rand -hex 4)" -nova secgroup-create $SEC_GROUP_NAME 'a test security group' - -# Add some rules to the secgroup -RULES_TO_ADD=( 22 3389 5900 ) - -for RULE in "${RULES_TO_ADD[@]}"; do - nova secgroup-add-rule $SEC_GROUP_NAME tcp $RULE $RULE 0.0.0.0/0 -done - -# Check to make sure rules were added -SEC_GROUP_RULES=( $(nova secgroup-list-rules $SEC_GROUP_NAME | grep -v \- | grep -v 'Source Group' | cut -d '|' -f3 | tr -d ' ') ) -die_if_not_set $LINENO SEC_GROUP_RULES "Failure retrieving SEC_GROUP_RULES for $SEC_GROUP_NAME" -for i in "${RULES_TO_ADD[@]}"; do - skip= - for j in "${SEC_GROUP_RULES[@]}"; do - [[ $i == $j ]] && { skip=1; break; } - done - [[ -n $skip ]] || exit 1 -done - -# Delete rules and secgroup -for RULE in "${RULES_TO_ADD[@]}"; do - nova secgroup-delete-rule $SEC_GROUP_NAME tcp $RULE $RULE 0.0.0.0/0 -done - -# Delete secgroup -nova secgroup-delete $SEC_GROUP_NAME || \ - die $LINENO "Failure deleting security group $SEC_GROUP_NAME" - -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End DevStack Exercise: $0" -echo "*********************************************************************" diff --git a/exercises/swift.sh b/exercises/swift.sh deleted file mode 100755 index 4a41e0f1ed..0000000000 --- a/exercises/swift.sh +++ /dev/null @@ -1,69 +0,0 @@ -#!/usr/bin/env bash - -# **swift.sh** - -# Test swift via the ``python-openstackclient`` command line - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import configuration -source $TOP_DIR/openrc - -# Import exercise configuration -source $TOP_DIR/exerciserc - -# If swift is not enabled we exit with exitcode 55 which mean -# exercise is skipped. -is_service_enabled s-proxy || exit 55 - -# Container name -CONTAINER=ex-swift -OBJECT=/etc/issue - - -# Testing Swift -# ============= - -# Check if we have to swift via keystone -openstack object store account show || die $LINENO "Failure getting account status" - -# We start by creating a test container -openstack container create $CONTAINER || die $LINENO "Failure creating container $CONTAINER" - -# add a file into it. -openstack object create $CONTAINER $OBJECT || die $LINENO "Failure uploading file to container $CONTAINER" - -# list the objects -openstack object list $CONTAINER || die $LINENO "Failure listing contents of container $CONTAINER" - -# delete the object first -openstack object delete $CONTAINER $OBJECT || die $LINENO "Failure deleting object $OBJECT in container $CONTAINER" - -# delete the container -openstack container delete $CONTAINER || die $LINENO "Failure deleting container $CONTAINER" - -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End DevStack Exercise: $0" -echo "*********************************************************************" diff --git a/exercises/volumes.sh b/exercises/volumes.sh deleted file mode 100755 index 0de1226fee..0000000000 --- a/exercises/volumes.sh +++ /dev/null @@ -1,225 +0,0 @@ -#!/usr/bin/env bash - -# **volumes.sh** - -# Test cinder volumes with the ``cinder`` command from ``python-cinderclient`` - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import configuration -source $TOP_DIR/openrc - -# Import project functions -source $TOP_DIR/lib/cinder -source $TOP_DIR/lib/neutron -source $TOP_DIR/lib/neutron-legacy - -# Import exercise configuration -source $TOP_DIR/exerciserc - -# If cinder is not enabled we exit with exitcode 55 which mean -# exercise is skipped. -is_service_enabled cinder || exit 55 - -# Ironic does not currently support volume attachment. -[ "$VIRT_DRIVER" == "ironic" ] && exit 55 - -# Instance type to create -DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} - -# Boot this image, use first AMI image if unset -DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} - -# Security group name -SECGROUP=${SECGROUP:-vol_secgroup} - -# Instance and volume names -VM_NAME=${VM_NAME:-ex-vol-inst} -VOL_NAME="ex-vol-$(openssl rand -hex 4)" - - -# Launching a server -# ================== - -# List servers for tenant: -nova list - -# Images -# ------ - -# List the images available -openstack image list - -# Grab the id of the image to launch -IMAGE=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) -die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" - -# Security Groups -# --------------- - -# List security groups -nova secgroup-list - -if is_service_enabled n-cell; then - # Cells does not support security groups, so force the use of "default" - SECGROUP="default" - echo "Using the default security group because of Cells." -else - # Create a secgroup - if ! nova secgroup-list | grep -q $SECGROUP; then - nova secgroup-create $SECGROUP "$SECGROUP description" - if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then - echo "Security group not created" - exit 1 - fi - fi -fi - -# Configure Security Group Rules -if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then - nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 -fi -if ! nova secgroup-list-rules $SECGROUP | grep -q " tcp .* 22 "; then - nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0 -fi - -# List secgroup rules -nova secgroup-list-rules $SECGROUP - -# Set up instance -# --------------- - -# List flavors -nova flavor-list - -# Select a flavor -INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) -if [[ -z "$INSTANCE_TYPE" ]]; then - # grab the first flavor in the list to launch if default doesn't exist - INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) - die_if_not_set $LINENO INSTANCE_TYPE "Failure retrieving INSTANCE_TYPE" -fi - -# Clean-up from previous runs -nova delete $VM_NAME || true -if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then - die $LINENO "server didn't terminate!" -fi - -# Boot instance -# ------------- - -VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security-groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2) -die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME" - -# Check that the status is active within ACTIVE_TIMEOUT seconds -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then - die $LINENO "server didn't become active!" -fi - -# Get the instance IP -IP=$(get_instance_ip $VM_UUID $PRIVATE_NETWORK_NAME) - -die_if_not_set $LINENO IP "Failure retrieving IP address" - -# Private IPs can be pinged in single node deployments -ping_check $IP $BOOT_TIMEOUT "$PRIVATE_NETWORK_NAME" - -# Volumes -# ------- - -# Verify it doesn't exist -if [[ -n $(cinder list | grep $VOL_NAME | head -1 | get_field 2) ]]; then - die $LINENO "Volume $VOL_NAME already exists" -fi - -# Create a new volume -start_time=$(date +%s) -cinder create --display-name $VOL_NAME --display-description "test volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \ - die $LINENO "Failure creating volume $VOL_NAME" -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then - die $LINENO "Volume $VOL_NAME not created" -fi -end_time=$(date +%s) -echo "Completed cinder create in $((end_time - start_time)) seconds" - -# Get volume ID -VOL_ID=$(cinder list | grep $VOL_NAME | head -1 | get_field 1) -die_if_not_set $LINENO VOL_ID "Failure retrieving volume ID for $VOL_NAME" - -# Attach to server -DEVICE=/dev/vdb -start_time=$(date +%s) -nova volume-attach $VM_UUID $VOL_ID $DEVICE || \ - die $LINENO "Failure attaching volume $VOL_NAME to $VM_NAME" -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep in-use; do sleep 1; done"; then - die $LINENO "Volume $VOL_NAME not attached to $VM_NAME" -fi -end_time=$(date +%s) -echo "Completed volume-attach in $((end_time - start_time)) seconds" - -VOL_ATTACH=$(cinder list | grep $VOL_NAME | head -1 | get_field -1) -die_if_not_set $LINENO VOL_ATTACH "Failure retrieving $VOL_NAME status" -if [[ "$VOL_ATTACH" != $VM_UUID ]]; then - die $LINENO "Volume not attached to correct instance" -fi - -# Clean up -# -------- - -# Detach volume -start_time=$(date +%s) -nova volume-detach $VM_UUID $VOL_ID || die $LINENO "Failure detaching volume $VOL_NAME from $VM_NAME" -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then - die $LINENO "Volume $VOL_NAME not detached from $VM_NAME" -fi -end_time=$(date +%s) -echo "Completed volume-detach in $((end_time - start_time)) seconds" - -# Delete volume -start_time=$(date +%s) -cinder delete $VOL_ID || die $LINENO "Failure deleting volume $VOL_NAME" -if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then - die $LINENO "Volume $VOL_NAME not deleted" -fi -end_time=$(date +%s) -echo "Completed cinder delete in $((end_time - start_time)) seconds" - -# Delete instance -nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME" -if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then - die $LINENO "Server $VM_NAME not deleted" -fi - -if [[ $SECGROUP = "default" ]] ; then - echo "Skipping deleting default security group" -else - # Delete secgroup - nova secgroup-delete $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP" -fi - -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End DevStack Exercise: $0" -echo "*********************************************************************" diff --git a/extras.d/60-ceph.sh b/extras.d/60-ceph.sh deleted file mode 100644 index cc90128176..0000000000 --- a/extras.d/60-ceph.sh +++ /dev/null @@ -1,75 +0,0 @@ -# ceph.sh - DevStack extras script to install Ceph - -if is_service_enabled ceph; then - if [[ "$1" == "source" ]]; then - # Initial source - source $TOP_DIR/lib/ceph - elif [[ "$1" == "stack" && "$2" == "pre-install" ]]; then - echo_summary "Installing Ceph" - check_os_support_ceph - if [ "$REMOTE_CEPH" = "False" ]; then - install_ceph - echo_summary "Configuring Ceph" - configure_ceph - # NOTE (leseb): Do everything here because we need to have Ceph started before the main - # OpenStack components. Ceph OSD must start here otherwise we can't upload any images. - echo_summary "Initializing Ceph" - init_ceph - start_ceph - else - install_ceph_remote - fi - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - if is_service_enabled glance; then - echo_summary "Configuring Glance for Ceph" - configure_ceph_glance - fi - if is_service_enabled nova; then - echo_summary "Configuring Nova for Ceph" - configure_ceph_nova - fi - if is_service_enabled cinder; then - echo_summary "Configuring Cinder for Ceph" - configure_ceph_cinder - fi - if is_service_enabled n-cpu; then - # NOTE (leseb): the part below is a requirement to attach Ceph block devices - echo_summary "Configuring libvirt secret" - import_libvirt_secret_ceph - fi - - if [ "$REMOTE_CEPH" = "False" ]; then - if is_service_enabled glance; then - echo_summary "Configuring Glance for Ceph" - configure_ceph_embedded_glance - fi - if is_service_enabled nova; then - echo_summary "Configuring Nova for Ceph" - configure_ceph_embedded_nova - fi - if is_service_enabled cinder; then - echo_summary "Configuring Cinder for Ceph" - configure_ceph_embedded_cinder - fi - fi - fi - - if [[ "$1" == "unstack" ]]; then - if [ "$REMOTE_CEPH" = "True" ]; then - cleanup_ceph_remote - else - cleanup_ceph_embedded - stop_ceph - fi - cleanup_ceph_general - fi - - if [[ "$1" == "clean" ]]; then - if [ "$REMOTE_CEPH" = "True" ]; then - cleanup_ceph_remote - else - cleanup_ceph_embedded - fi - cleanup_ceph_general - fi -fi diff --git a/extras.d/80-tempest.sh b/extras.d/80-tempest.sh index 6a3d121497..06c73ec763 100644 --- a/extras.d/80-tempest.sh +++ b/extras.d/80-tempest.sh @@ -6,18 +6,22 @@ if is_service_enabled tempest; then source $TOP_DIR/lib/tempest elif [[ "$1" == "stack" && "$2" == "install" ]]; then echo_summary "Installing Tempest" - install_tempest + async_runfunc install_tempest elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then # Tempest config must come after layer 2 services are running : elif [[ "$1" == "stack" && "$2" == "extra" ]]; then + # Tempest config must come after all other plugins are run + : + elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then + # local.conf Tempest option overrides + : + elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then + async_wait install_tempest echo_summary "Initializing Tempest" configure_tempest echo_summary "Installing Tempest Plugins" install_tempest_plugins - elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then - # local.conf Tempest option overrides - : fi if [[ "$1" == "unstack" ]]; then diff --git a/files/apache-cinder-api.template b/files/apache-cinder-api.template index e1246f11b6..e401803abc 100644 --- a/files/apache-cinder-api.template +++ b/files/apache-cinder-api.template @@ -6,21 +6,13 @@ Listen %PUBLICPORT% WSGIScriptAlias / %CINDER_BIN_DIR%/cinder-wsgi WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%{cu}t %M" - + ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/%APACHE_NAME%/c-api.log %SSLENGINE% %SSLCERTFILE% %SSLKEYFILE% - = 2.4> - Require all granted - - - Order allow,deny - Allow from all - + Require all granted diff --git a/files/apache-heat-api-cfn.template b/files/apache-heat-api-cfn.template deleted file mode 100644 index ab33c66f7e..0000000000 --- a/files/apache-heat-api-cfn.template +++ /dev/null @@ -1,27 +0,0 @@ -Listen %PUBLICPORT% - - - WSGIDaemonProcess heat-api-cfn processes=2 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% - WSGIProcessGroup heat-api-cfn - WSGIScriptAlias / %HEAT_BIN_DIR%/heat-wsgi-api-cfn - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - AllowEncodedSlashes On - = 2.4> - ErrorLogFormat "%{cu}t %M" - - ErrorLog /var/log/%APACHE_NAME%/heat-api-cfn.log - %SSLENGINE% - %SSLCERTFILE% - %SSLKEYFILE% - - - = 2.4> - Require all granted - - - Order allow,deny - Allow from all - - - diff --git a/files/apache-heat-api-cloudwatch.template b/files/apache-heat-api-cloudwatch.template deleted file mode 100644 index 06c91bbdb1..0000000000 --- a/files/apache-heat-api-cloudwatch.template +++ /dev/null @@ -1,27 +0,0 @@ -Listen %PUBLICPORT% - - - WSGIDaemonProcess heat-api-cloudwatch processes=2 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% - WSGIProcessGroup heat-api-cloudwatch - WSGIScriptAlias / %HEAT_BIN_DIR%/heat-wsgi-api-cloudwatch - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - AllowEncodedSlashes On - = 2.4> - ErrorLogFormat "%{cu}t %M" - - ErrorLog /var/log/%APACHE_NAME%/heat-api-cloudwatch.log - %SSLENGINE% - %SSLCERTFILE% - %SSLKEYFILE% - - - = 2.4> - Require all granted - - - Order allow,deny - Allow from all - - - diff --git a/files/apache-heat-api.template b/files/apache-heat-api.template deleted file mode 100644 index 4924b3978b..0000000000 --- a/files/apache-heat-api.template +++ /dev/null @@ -1,27 +0,0 @@ -Listen %PUBLICPORT% - - - WSGIDaemonProcess heat-api processes=3 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% - WSGIProcessGroup heat-api - WSGIScriptAlias / %HEAT_BIN_DIR%/heat-wsgi-api - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - AllowEncodedSlashes On - = 2.4> - ErrorLogFormat "%{cu}t %M" - - ErrorLog /var/log/%APACHE_NAME%/heat-api.log - %SSLENGINE% - %SSLCERTFILE% - %SSLKEYFILE% - - - = 2.4> - Require all granted - - - Order allow,deny - Allow from all - - - diff --git a/files/apache-heat-pip-repo.template b/files/apache-heat-pip-repo.template deleted file mode 100644 index d88ac3e35a..0000000000 --- a/files/apache-heat-pip-repo.template +++ /dev/null @@ -1,15 +0,0 @@ -Listen %HEAT_PIP_REPO_PORT% - - - DocumentRoot %HEAT_PIP_REPO% - - DirectoryIndex index.html - Require all granted - Order allow,deny - allow from all - - - ErrorLog /var/log/%APACHE_NAME%/heat_pip_repo_error.log - LogLevel warn - CustomLog /var/log/%APACHE_NAME%/heat_pip_repo_access.log combined - diff --git a/files/apache-horizon.template b/files/apache-horizon.template index bfd75678e3..c6c55ecf27 100644 --- a/files/apache-horizon.template +++ b/files/apache-horizon.template @@ -1,5 +1,5 @@ - WSGIScriptAlias %WEBROOT% %HORIZON_DIR%/openstack_dashboard/wsgi/django.wsgi + WSGIScriptAlias %WEBROOT% %HORIZON_DIR%/openstack_dashboard/wsgi.py WSGIDaemonProcess horizon user=%USER% group=%GROUP% processes=3 threads=10 home=%HORIZON_DIR% display-name=%{GROUP} WSGIApplicationGroup %{GLOBAL} @@ -21,22 +21,13 @@ Options Indexes FollowSymLinks MultiViews AllowOverride None - # Apache 2.4 uses mod_authz_host for access control now (instead of - # "Allow") - - Order allow,deny - Allow from all - - = 2.4> - Require all granted - + Require all granted - = 2.4> - ErrorLogFormat "%{cu}t %M" - + ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/%APACHE_NAME%/horizon_error.log LogLevel warn CustomLog /var/log/%APACHE_NAME%/horizon_access.log combined +%WSGIPYTHONHOME% WSGISocketPrefix /var/run/%APACHE_NAME% diff --git a/files/apache-keystone.template b/files/apache-keystone.template index 8a4b0f0c43..d99e8e6ce0 100644 --- a/files/apache-keystone.template +++ b/files/apache-keystone.template @@ -1,5 +1,4 @@ Listen %PUBLICPORT% -Listen %ADMINPORT% LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" keystone_combined @@ -7,7 +6,7 @@ LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" - WSGIDaemonProcess keystone-public processes=5 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% + WSGIDaemonProcess keystone-public processes=3 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% WSGIProcessGroup keystone-public WSGIScriptAlias / %KEYSTONE_BIN%/keystone-wsgi-public WSGIApplicationGroup %{GLOBAL} @@ -20,19 +19,12 @@ LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" %SSLKEYFILE% - - WSGIDaemonProcess keystone-admin processes=5 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% - WSGIProcessGroup keystone-admin - WSGIScriptAlias / %KEYSTONE_BIN%/keystone-wsgi-admin - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - ErrorLogFormat "%M" - ErrorLog /var/log/%APACHE_NAME%/keystone.log - CustomLog /var/log/%APACHE_NAME%/keystone_access.log keystone_combined - %SSLENGINE% - %SSLCERTFILE% - %SSLKEYFILE% - +%SSLLISTEN% +%SSLLISTEN% %SSLENGINE% +%SSLLISTEN% %SSLCERTFILE% +%SSLLISTEN% %SSLKEYFILE% +%SSLLISTEN% SSLProtocol -all +TLSv1.3 +TLSv1.2 +%SSLLISTEN% Alias /identity %KEYSTONE_BIN%/keystone-wsgi-public @@ -43,13 +35,3 @@ Alias /identity %KEYSTONE_BIN%/keystone-wsgi-public WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - -Alias /identity_v2_admin %KEYSTONE_BIN%/keystone-wsgi-admin - - SetHandler wsgi-script - Options +ExecCGI - - WSGIProcessGroup keystone-admin - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - diff --git a/files/apache-neutron.template b/files/apache-neutron.template new file mode 100644 index 0000000000..358e87f5da --- /dev/null +++ b/files/apache-neutron.template @@ -0,0 +1,37 @@ +Listen %PUBLICPORT% +LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" neutron_combined + + + Require all granted + + + + WSGIDaemonProcess neutron-server processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% + WSGIProcessGroup neutron-server + WSGIScriptAlias / %NEUTRON_BIN%/neutron-api + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + ErrorLogFormat "%M" + ErrorLog /var/log/%APACHE_NAME%/neutron.log + CustomLog /var/log/%APACHE_NAME%/neutron_access.log neutron_combined + %SSLENGINE% + %SSLCERTFILE% + %SSLKEYFILE% + + + +%SSLLISTEN% +%SSLLISTEN% %SSLENGINE% +%SSLLISTEN% %SSLCERTFILE% +%SSLLISTEN% %SSLKEYFILE% +%SSLLISTEN% SSLProtocol -all +TLSv1.3 +TLSv1.2 +%SSLLISTEN% + +Alias /networking %NEUTRON_BIN%/neutron-api + + SetHandler wsgi-script + Options +ExecCGI + WSGIProcessGroup neutron-server + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + diff --git a/files/apache-nova-api.template b/files/apache-nova-api.template index bcf406edf3..66fcf73cf2 100644 --- a/files/apache-nova-api.template +++ b/files/apache-nova-api.template @@ -6,9 +6,7 @@ Listen %PUBLICPORT% WSGIScriptAlias / %PUBLICWSGI% WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%M" - + ErrorLogFormat "%M" ErrorLog /var/log/%APACHE_NAME%/nova-api.log %SSLENGINE% %SSLCERTFILE% diff --git a/files/apache-nova-metadata.template b/files/apache-nova-metadata.template index 6231c1ced8..64be03166e 100644 --- a/files/apache-nova-metadata.template +++ b/files/apache-nova-metadata.template @@ -6,9 +6,7 @@ Listen %PUBLICPORT% WSGIScriptAlias / %PUBLICWSGI% WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%M" - + ErrorLogFormat "%M" ErrorLog /var/log/%APACHE_NAME%/nova-metadata.log %SSLENGINE% %SSLCERTFILE% diff --git a/files/apts b/files/apts deleted file mode 120000 index ef926de053..0000000000 --- a/files/apts +++ /dev/null @@ -1 +0,0 @@ -debs/ \ No newline at end of file diff --git a/files/debs/cinder b/files/debs/cinder index 3595e011da..5d390e24bf 100644 --- a/files/debs/cinder +++ b/files/debs/cinder @@ -1,5 +1,4 @@ lvm2 -open-iscsi -open-iscsi-utils # Deprecated since quantal dist:precise qemu-utils tgt # NOPRIME +thin-provisioning-tools diff --git a/files/debs/dstat b/files/debs/dstat index 2b643b8b1b..40d00f4aa4 100644 --- a/files/debs/dstat +++ b/files/debs/dstat @@ -1 +1,2 @@ -dstat +dstat # dist:bionic +pcp diff --git a/files/debs/general b/files/debs/general index a1f2a4b159..1e63e4f582 100644 --- a/files/debs/general +++ b/files/debs/general @@ -1,8 +1,11 @@ +apache2 +apache2-dev bc -bridge-utils bsdmainutils curl +default-jre-headless # NOPRIME g++ +gawk gcc gettext # used for compiling message catalogs git @@ -10,22 +13,21 @@ graphviz # needed for docs iputils-ping libffi-dev # for pyOpenSSL libjpeg-dev # Pillow 3.0.0 -libmysqlclient-dev # MySQL-python libpq-dev # psycopg2 libssl-dev # for pyOpenSSL +libsystemd-dev # for systemd-python libxml2-dev # lxml libxslt1-dev # lxml libyaml-dev lsof # useful when debugging -openjdk-7-jre-headless # NOPRIME openssh-server openssl pkg-config psmisc -python2.7 -python-dev -python-gdbm # needed for testr -screen +python3-dev +python3-pip +python3-systemd +python3-venv tar tcpdump unzip diff --git a/files/debs/heat b/files/debs/heat deleted file mode 100644 index 1ecbc780b1..0000000000 --- a/files/debs/heat +++ /dev/null @@ -1 +0,0 @@ -gettext # dist:trusty diff --git a/files/debs/horizon b/files/debs/horizon index 1f45b54f7c..48332893b1 100644 --- a/files/debs/horizon +++ b/files/debs/horizon @@ -1,3 +1,2 @@ apache2 # NOPRIME libapache2-mod-wsgi # NOPRIME -libpcre3-dev # pyScss diff --git a/files/debs/keystone b/files/debs/keystone index fd0317b9b6..1cfa6ffa38 100644 --- a/files/debs/keystone +++ b/files/debs/keystone @@ -2,5 +2,5 @@ libkrb5-dev libldap2-dev libsasl2-dev memcached -python-mysqldb +python3-mysqldb sqlite3 diff --git a/files/debs/ldap b/files/debs/ldap index aa3a934d95..54896bb845 100644 --- a/files/debs/ldap +++ b/files/debs/ldap @@ -1,3 +1,3 @@ ldap-utils -python-ldap +python3-ldap slapd diff --git a/files/debs/n-api b/files/debs/n-api deleted file mode 100644 index 0928cd56b9..0000000000 --- a/files/debs/n-api +++ /dev/null @@ -1 +0,0 @@ -fping diff --git a/files/debs/n-cpu b/files/debs/n-cpu index 69ac430290..54d6fa3fd1 100644 --- a/files/debs/n-cpu +++ b/files/debs/n-cpu @@ -1,9 +1,11 @@ cryptsetup +dosfstools genisoimage gir1.2-libosinfo-1.0 lvm2 # NOPRIME +netcat-openbsd open-iscsi -python-guestfs # NOPRIME +python3-guestfs # NOPRIME qemu-utils sg3-utils sysfsutils diff --git a/files/rpms-suse/q-agt b/files/debs/neutron-agent similarity index 100% rename from files/rpms-suse/q-agt rename to files/debs/neutron-agent diff --git a/files/debs/neutron b/files/debs/neutron-common similarity index 59% rename from files/debs/neutron rename to files/debs/neutron-common index 2307fa54d5..f6afc5bf55 100644 --- a/files/debs/neutron +++ b/files/debs/neutron-common @@ -1,14 +1,14 @@ acl dnsmasq-base -dnsmasq-utils # for dhcp_release only available in dist:precise +dnsmasq-utils # for dhcp_release ebtables +haproxy # to serve as metadata proxy inside router/dhcp namespaces iptables iputils-arping iputils-ping -libmysqlclient-dev mysql-server #NOPRIME postgresql-server-dev-all -python-mysqldb +python3-mysqldb rabbitmq-server # NOPRIME radvd # NOPRIME sqlite3 diff --git a/files/debs/neutron-l3 b/files/debs/neutron-l3 new file mode 100644 index 0000000000..106a6a35aa --- /dev/null +++ b/files/debs/neutron-l3 @@ -0,0 +1,3 @@ +conntrack +conntrackd +keepalived diff --git a/files/debs/nova b/files/debs/nova index 58dad411a8..5c00ad72d9 100644 --- a/files/debs/nova +++ b/files/debs/nova @@ -1,22 +1,18 @@ conntrack curl -dnsmasq-base -dnsmasq-utils # for dhcp_release ebtables -gawk genisoimage # required for config_drive iptables iputils-arping kpartx libjs-jquery-tablesorter # Needed for coverage html reports -libmysqlclient-dev -libvirt-bin # NOPRIME +libvirt-clients # NOPRIME +libvirt-daemon-system # NOPRIME libvirt-dev # NOPRIME mysql-server # NOPRIME parted pm-utils -python-mysqldb -qemu # dist:wheezy,jessie NOPRIME +python3-mysqldb qemu-kvm # NOPRIME rabbitmq-server # NOPRIME socat # used by ajaxterm diff --git a/files/debs/os-brick b/files/debs/os-brick new file mode 100644 index 0000000000..4148b0c421 --- /dev/null +++ b/files/debs/os-brick @@ -0,0 +1,3 @@ +lsscsi +open-iscsi +open-iscsi-utils # Deprecated since quantal dist:precise diff --git a/files/debs/ovn b/files/debs/ovn new file mode 100644 index 0000000000..81eea5e633 --- /dev/null +++ b/files/debs/ovn @@ -0,0 +1,3 @@ +ovn-central +ovn-controller-vtep +ovn-host diff --git a/files/debs/q-agt b/files/debs/q-agt deleted file mode 100644 index ea8819e884..0000000000 --- a/files/debs/q-agt +++ /dev/null @@ -1 +0,0 @@ -ipset diff --git a/files/debs/q-agt b/files/debs/q-agt new file mode 120000 index 0000000000..99fe353094 --- /dev/null +++ b/files/debs/q-agt @@ -0,0 +1 @@ +neutron-agent \ No newline at end of file diff --git a/files/debs/q-l3 b/files/debs/q-l3 deleted file mode 100644 index 106a6a35aa..0000000000 --- a/files/debs/q-l3 +++ /dev/null @@ -1,3 +0,0 @@ -conntrack -conntrackd -keepalived diff --git a/files/debs/q-l3 b/files/debs/q-l3 new file mode 120000 index 0000000000..0a5ca2a45f --- /dev/null +++ b/files/debs/q-l3 @@ -0,0 +1 @@ +neutron-l3 \ No newline at end of file diff --git a/files/debs/swift b/files/debs/swift index 4b8ac3d793..67c6c8ddb4 100644 --- a/files/debs/swift +++ b/files/debs/swift @@ -2,5 +2,6 @@ curl liberasurecode-dev make memcached +rsync sqlite3 xfsprogs diff --git a/files/debs/tls-proxy b/files/debs/tls-proxy index dce9c07d3f..5bd8e213a2 100644 --- a/files/debs/tls-proxy +++ b/files/debs/tls-proxy @@ -1 +1 @@ -stud +apache2 diff --git a/files/debs/zookeeper b/files/debs/zookeeper deleted file mode 100644 index f41b559007..0000000000 --- a/files/debs/zookeeper +++ /dev/null @@ -1 +0,0 @@ -zookeeperd diff --git a/files/dnsmasq-for-baremetal-from-nova-network.conf b/files/dnsmasq-for-baremetal-from-nova-network.conf deleted file mode 100644 index 66a375190e..0000000000 --- a/files/dnsmasq-for-baremetal-from-nova-network.conf +++ /dev/null @@ -1,3 +0,0 @@ -enable-tftp -tftp-root=/tftpboot -dhcp-boot=pxelinux.0 diff --git a/files/ebtables.workaround b/files/ebtables.workaround deleted file mode 100644 index c8af51fad5..0000000000 --- a/files/ebtables.workaround +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash -# -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# -# This is a terrible, terrible, truly terrible work around for -# environments that have libvirt < 1.2.11. ebtables requires that you -# specifically tell it you would like to not race and get punched in -# the face when 2 run at the same time with a --concurrent flag. - -flock -w 300 /var/lock/ebtables.nova /sbin/ebtables.real $@ diff --git a/files/ldap/manager.ldif.in b/files/ldap/manager.ldif.in index 2f1f1395ee..d3b9be8b6e 100644 --- a/files/ldap/manager.ldif.in +++ b/files/ldap/manager.ldif.in @@ -1,4 +1,4 @@ -dn: olcDatabase={${LDAP_OLCDB_NUMBER}}hdb,cn=config +dn: olcDatabase={${LDAP_OLCDB_NUMBER}}${LDAP_OLCDB_TYPE},cn=config changetype: modify replace: olcSuffix olcSuffix: ${BASE_DN} diff --git a/files/ldap/user.ldif.in b/files/ldap/user.ldif.in new file mode 100644 index 0000000000..16a980757d --- /dev/null +++ b/files/ldap/user.ldif.in @@ -0,0 +1,23 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +# Demo LDAP user +dn: cn=demo,ou=Users,${BASE_DN} +cn: demo +displayName: demo +givenName: demo +mail: demo@openstack.org +objectClass: inetOrgPerson +objectClass: top +sn: demo +uid: demo +userPassword: demo diff --git a/files/lvm-backing-file.template b/files/lvm-backing-file.template new file mode 100644 index 0000000000..dc519d7745 --- /dev/null +++ b/files/lvm-backing-file.template @@ -0,0 +1,16 @@ +[Unit] +Description=Activate LVM backing file %BACKING_FILE% +DefaultDependencies=no +After=systemd-udev-settle.service +Before=lvm2-activation-early.service +Wants=systemd-udev-settle.service + +[Service] +ExecStart=/sbin/losetup --find --show %DIRECTIO% %BACKING_FILE% +ExecStop=/bin/sh -c '/sbin/losetup -d $$(/sbin/losetup --associated %BACKING_FILE% -O NAME -n)' +RemainAfterExit=yes +Type=oneshot + +[Install] +WantedBy=local-fs.target +Also=systemd-udev-settle.service diff --git a/files/openstack-cli-server/openstack b/files/openstack-cli-server/openstack new file mode 100755 index 0000000000..47fbfc5e17 --- /dev/null +++ b/files/openstack-cli-server/openstack @@ -0,0 +1,118 @@ +#!/usr/bin/env python3 +# Copyright 2016 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import socket +import sys +import os +import os.path +import json + +server_address = "/tmp/openstack.sock" + +sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + +try: + sock.connect(server_address) +except socket.error as msg: + print(msg, file=sys.stderr) + sys.exit(1) + + +def send(sock, doc): + jdoc = json.dumps(doc) + sock.send(b'%d\n' % len(jdoc)) + sock.sendall(jdoc.encode('utf-8')) + +def recv(sock): + length_str = b'' + + char = sock.recv(1) + if len(char) == 0: + print("Unexpected end of file", file=sys.stderr) + sys.exit(1) + + while char != b'\n': + length_str += char + char = sock.recv(1) + if len(char) == 0: + print("Unexpected end of file", file=sys.stderr) + sys.exit(1) + + total = int(length_str) + + # use a memoryview to receive the data chunk by chunk efficiently + jdoc = memoryview(bytearray(total)) + next_offset = 0 + while total - next_offset > 0: + recv_size = sock.recv_into(jdoc[next_offset:], total - next_offset) + next_offset += recv_size + try: + doc = json.loads(jdoc.tobytes()) + except (TypeError, ValueError) as e: + raise Exception('Data received was not in JSON format') + return doc + +try: + env = {} + passenv = ["CINDER_VERSION", + "OS_AUTH_URL", + "OS_NO_CACHE", + "OS_PASSWORD", + "OS_PROJECT_NAME", + "OS_REGION_NAME", + "OS_TENANT_NAME", + "OS_USERNAME", + "OS_VOLUME_API_VERSION", + "OS_CLOUD"] + for name in passenv: + if name in os.environ: + env[name] = os.environ[name] + + cmd = { + "app": os.path.basename(sys.argv[0]), + "env": env, + "argv": sys.argv[1:] + } + try: + image_idx = sys.argv.index('image') + create_idx = sys.argv.index('create') + missing_file = image_idx < create_idx and \ + not any(x.startswith('--file') for x in sys.argv) + except ValueError: + missing_file = False + + if missing_file: + # This means we were called with an image create command, but were + # not provided a --file option. That likely means we're being passed + # the image data to stdin, which won't work because we do not proxy + # stdin to the server. So, we just reject the operation and ask the + # caller to provide the file with --file instead. + # We've already connected to the server, we need to send it some dummy + # data so it doesn't wait forever. + send(sock, {}) + print('Image create without --file is not allowed in server mode', + file=sys.stderr) + sys.exit(1) + else: + send(sock, cmd) + + doc = recv(sock) + if doc["stdout"] != b'': + print(doc["stdout"], end='') + if doc["stderr"] != b'': + print(doc["stderr"], file=sys.stderr) + sys.exit(doc["status"]) +finally: + sock.close() diff --git a/files/openstack-cli-server/openstack-cli-server b/files/openstack-cli-server/openstack-cli-server new file mode 100755 index 0000000000..f3d2747e52 --- /dev/null +++ b/files/openstack-cli-server/openstack-cli-server @@ -0,0 +1,118 @@ +#!/usr/bin/env python3 +# Copyright 2016 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import socket +import sys +import os +import json + +from openstackclient import shell as osc_shell +from io import StringIO + +server_address = "/tmp/openstack.sock" + +try: + os.unlink(server_address) +except OSError: + if os.path.exists(server_address): + raise + +sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) +print('starting up on %s' % server_address, file=sys.stderr) +sock.bind(server_address) + +# Listen for incoming connections +sock.listen(1) + +def send(sock, doc): + jdoc = json.dumps(doc) + sock.send(b'%d\n' % len(jdoc)) + sock.sendall(jdoc.encode('utf-8')) + +def recv(sock): + length_str = b'' + char = sock.recv(1) + while char != b'\n': + length_str += char + char = sock.recv(1) + + total = int(length_str) + + # use a memoryview to receive the data chunk by chunk efficiently + jdoc = memoryview(bytearray(total)) + next_offset = 0 + while total - next_offset > 0: + recv_size = sock.recv_into(jdoc[next_offset:], total - next_offset) + next_offset += recv_size + try: + doc = json.loads(jdoc.tobytes()) + except (TypeError, ValueError) as e: + raise Exception('Data received was not in JSON format') + return doc + +while True: + csock, client_address = sock.accept() + try: + doc = recv(csock) + + print("%s %s" % (doc["app"], doc["argv"]), file=sys.stderr) + oldenv = {} + for name in doc["env"].keys(): + oldenv[name] = os.environ.get(name, None) + os.environ[name] = doc["env"][name] + + try: + old_stdout = sys.stdout + old_stderr = sys.stderr + my_stdout = sys.stdout = StringIO() + my_stderr = sys.stderr = StringIO() + + class Exit(BaseException): + def __init__(self, status): + self.status = status + + def noexit(stat): + raise Exit(stat) + + sys.exit = noexit + + if doc["app"] == "openstack": + sh = osc_shell.OpenStackShell() + ret = sh.run(doc["argv"]) + else: + print("Unknown application %s" % doc["app"], file=sys.stderr) + ret = 1 + except Exit as e: + ret = e.status + finally: + sys.stdout = old_stdout + sys.stderr = old_stderr + + for name in oldenv.keys(): + if oldenv[name] is None: + del os.environ[name] + else: + os.environ[name] = oldenv[name] + + send(csock, { + "stdout": my_stdout.getvalue(), + "stderr": my_stderr.getvalue(), + "status": ret, + }) + + except BaseException as e: + print(e, file=sys.stderr) + finally: + csock.close() diff --git a/files/rpms-suse/baremetal b/files/rpms-suse/baremetal deleted file mode 100644 index 61f73eeae3..0000000000 --- a/files/rpms-suse/baremetal +++ /dev/null @@ -1 +0,0 @@ -dnsmasq diff --git a/files/rpms-suse/ceph b/files/rpms-suse/ceph deleted file mode 100644 index 8c4955df90..0000000000 --- a/files/rpms-suse/ceph +++ /dev/null @@ -1,3 +0,0 @@ -ceph # NOPRIME -lsb -xfsprogs diff --git a/files/rpms-suse/cinder b/files/rpms-suse/cinder deleted file mode 100644 index 189a232fa7..0000000000 --- a/files/rpms-suse/cinder +++ /dev/null @@ -1,4 +0,0 @@ -lvm2 -open-iscsi -qemu-tools -tgt # NOPRIME diff --git a/files/rpms-suse/dstat b/files/rpms-suse/dstat deleted file mode 100644 index 2b643b8b1b..0000000000 --- a/files/rpms-suse/dstat +++ /dev/null @@ -1 +0,0 @@ -dstat diff --git a/files/rpms-suse/general b/files/rpms-suse/general deleted file mode 100644 index 3b19071770..0000000000 --- a/files/rpms-suse/general +++ /dev/null @@ -1,30 +0,0 @@ -bc -bridge-utils -ca-certificates-mozilla -curl -gcc -gcc-c++ -git-core -graphviz # docs -iputils -libffi-devel # pyOpenSSL -libjpeg8-devel # Pillow 3.0.0 -libmysqlclient-devel # MySQL-python -libopenssl-devel # to rebuild pyOpenSSL if needed -libxslt-devel # lxml -lsof # useful when debugging -make -net-tools -openssh -openssl -postgresql-devel # psycopg2 -psmisc -python-cmd2 # dist:opensuse-12.3 -python-devel # pyOpenSSL -screen -tar -tcpdump -unzip -util-linux -wget -zlib-devel diff --git a/files/rpms-suse/horizon b/files/rpms-suse/horizon deleted file mode 100644 index 753ea76e04..0000000000 --- a/files/rpms-suse/horizon +++ /dev/null @@ -1,2 +0,0 @@ -apache2-mod_wsgi # NOPRIME -apache2 # NOPRIME diff --git a/files/rpms-suse/keystone b/files/rpms-suse/keystone deleted file mode 100644 index 66cfc23423..0000000000 --- a/files/rpms-suse/keystone +++ /dev/null @@ -1,4 +0,0 @@ -cyrus-sasl-devel -memcached -openldap2-devel -sqlite3 diff --git a/files/rpms-suse/ldap b/files/rpms-suse/ldap deleted file mode 100644 index 46d26f0796..0000000000 --- a/files/rpms-suse/ldap +++ /dev/null @@ -1,3 +0,0 @@ -openldap2 -openldap2-client -python-ldap diff --git a/files/rpms-suse/n-api b/files/rpms-suse/n-api deleted file mode 100644 index af5ac2fc54..0000000000 --- a/files/rpms-suse/n-api +++ /dev/null @@ -1,2 +0,0 @@ -fping -python-dateutil diff --git a/files/rpms-suse/n-cpu b/files/rpms-suse/n-cpu deleted file mode 100644 index 9ece11534d..0000000000 --- a/files/rpms-suse/n-cpu +++ /dev/null @@ -1,8 +0,0 @@ -cryptsetup -genisoimage -libosinfo -lvm2 -open-iscsi -sg3_utils -# Stuff for diablo volumes -sysfsutils diff --git a/files/rpms-suse/n-novnc b/files/rpms-suse/n-novnc deleted file mode 100644 index c8722b9f66..0000000000 --- a/files/rpms-suse/n-novnc +++ /dev/null @@ -1 +0,0 @@ -python-numpy diff --git a/files/rpms-suse/n-spice b/files/rpms-suse/n-spice deleted file mode 100644 index c8722b9f66..0000000000 --- a/files/rpms-suse/n-spice +++ /dev/null @@ -1 +0,0 @@ -python-numpy diff --git a/files/rpms-suse/neutron b/files/rpms-suse/neutron deleted file mode 100644 index e9abc6eca6..0000000000 --- a/files/rpms-suse/neutron +++ /dev/null @@ -1,12 +0,0 @@ -acl -dnsmasq -dnsmasq-utils # dist:opensuse-12.3,opensuse-13.1 -ebtables -iptables -iputils -mariadb # NOPRIME -rabbitmq-server # NOPRIME -radvd # NOPRIME -sqlite3 -sudo -vlan diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova deleted file mode 100644 index ae115d2138..0000000000 --- a/files/rpms-suse/nova +++ /dev/null @@ -1,23 +0,0 @@ -conntrack-tools -curl -dnsmasq -dnsmasq-utils # dist:opensuse-12.3,opensuse-13.1 -ebtables -gawk -genisoimage # required for config_drive -iptables -iputils -kpartx -kvm # NOPRIME -libvirt # NOPRIME -libvirt-python # NOPRIME -mariadb # NOPRIME -parted -polkit -# qemu as fallback if kvm cannot be used -qemu # NOPRIME -rabbitmq-server # NOPRIME -socat -sqlite3 -sudo -vlan diff --git a/files/rpms-suse/openvswitch b/files/rpms-suse/openvswitch deleted file mode 100644 index 53f8bb22cf..0000000000 --- a/files/rpms-suse/openvswitch +++ /dev/null @@ -1,3 +0,0 @@ - -openvswitch -openvswitch-switch diff --git a/files/rpms-suse/swift b/files/rpms-suse/swift deleted file mode 100644 index 3663b98545..0000000000 --- a/files/rpms-suse/swift +++ /dev/null @@ -1,6 +0,0 @@ -curl -liberasurecode-devel -memcached -sqlite3 -xfsprogs -xinetd diff --git a/files/rpms/ceph b/files/rpms/ceph index 64befc5f00..19f158fd57 100644 --- a/files/rpms/ceph +++ b/files/rpms/ceph @@ -1,3 +1,3 @@ ceph # NOPRIME -redhat-lsb-core +redhat-lsb-core # not:rhel9,openEuler-22.03 xfsprogs diff --git a/files/rpms/cinder b/files/rpms/cinder index 0274642fd6..375f93e090 100644 --- a/files/rpms/cinder +++ b/files/rpms/cinder @@ -1,4 +1,3 @@ -iscsi-initiator-utils lvm2 qemu-img -scsi-target-utils # NOPRIME +targetcli diff --git a/files/rpms/dstat b/files/rpms/dstat index 2b643b8b1b..6524bed607 100644 --- a/files/rpms/dstat +++ b/files/rpms/dstat @@ -1 +1 @@ -dstat +pcp-system-tools diff --git a/files/rpms/general b/files/rpms/general index ee2e8a058b..6f4572c708 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -1,32 +1,38 @@ bc -bridge-utils curl dbus +gawk gcc gcc-c++ gettext # used for compiling message catalogs git-core +glibc-langpack-en # dist:rhel9 graphviz # needed only for docs -iptables-services # NOPRIME f22,f23,f24 -java-1.7.0-openjdk-headless # NOPRIME rhel7 -java-1.8.0-openjdk-headless # NOPRIME f22,f23,f24 +httpd +httpd-devel +iptables-nft # dist:rhel9,rhel10 +iptables-services +java-1.8.0-openjdk-headless # not:rhel10 +java-21-openjdk-headless # dist:rhel10 libffi-devel libjpeg-turbo-devel # Pillow 3.0.0 libxml2-devel # lxml libxslt-devel # lxml libyaml-devel -mariadb-devel # MySQL-python +mod_ssl # required for tls-proxy on centos 9 stream computes net-tools openssh-server openssl openssl-devel # to rebuild pyOpenSSL if needed +pcre2-devel # dist:rhel10 for python-pcre2 +pcre-devel # not:rhel10 for python-pcre pkgconfig postgresql-devel # psycopg2 psmisc -pyOpenSSL # version in pip uses too much memory -python-devel -redhat-rpm-config # missing dep for gcc hardening flags, see rhbz#1217376 -screen +python3-devel +python3-pip # not:openEuler-22.03 +python3-systemd +redhat-rpm-config # not:openEuler-22.03 missing dep for gcc hardening flags, see rhbz#1217376 tar tcpdump unzip diff --git a/files/rpms/horizon b/files/rpms/horizon index aeb2cb5c96..a88552bc84 100644 --- a/files/rpms/horizon +++ b/files/rpms/horizon @@ -1,5 +1,2 @@ -Django httpd # NOPRIME mod_wsgi # NOPRIME -pcre-devel # pyScss -pyxattr diff --git a/files/rpms/keystone b/files/rpms/keystone index 170308373a..5f19c6f70c 100644 --- a/files/rpms/keystone +++ b/files/rpms/keystone @@ -1,4 +1,3 @@ memcached mod_ssl -MySQL-python sqlite diff --git a/files/rpms/n-api b/files/rpms/n-api deleted file mode 100644 index 0928cd56b9..0000000000 --- a/files/rpms/n-api +++ /dev/null @@ -1 +0,0 @@ -fping diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu index 26c5ced196..3d50f3a062 100644 --- a/files/rpms/n-cpu +++ b/files/rpms/n-cpu @@ -1,8 +1,9 @@ cryptsetup -genisoimage +dosfstools iscsi-initiator-utils libosinfo lvm2 sg3_utils # Stuff for diablo volumes sysfsutils +xorriso diff --git a/files/rpms/n-novnc b/files/rpms/n-novnc deleted file mode 100644 index 24ce15ab7e..0000000000 --- a/files/rpms/n-novnc +++ /dev/null @@ -1 +0,0 @@ -numpy diff --git a/files/rpms/n-spice b/files/rpms/n-spice deleted file mode 100644 index 24ce15ab7e..0000000000 --- a/files/rpms/n-spice +++ /dev/null @@ -1 +0,0 @@ -numpy diff --git a/files/rpms/neutron-agent b/files/rpms/neutron-agent new file mode 100644 index 0000000000..ea8819e884 --- /dev/null +++ b/files/rpms/neutron-agent @@ -0,0 +1 @@ +ipset diff --git a/files/rpms/neutron b/files/rpms/neutron-common similarity index 70% rename from files/rpms/neutron rename to files/rpms/neutron-common index 2e49a0cf93..fe25f57ea6 100644 --- a/files/rpms/neutron +++ b/files/rpms/neutron-common @@ -2,11 +2,9 @@ acl dnsmasq # for q-dhcp dnsmasq-utils # for dhcp_release ebtables +haproxy # to serve as metadata proxy inside router/dhcp namespaces iptables iputils -mysql-devel -MySQL-python -mysql-server # NOPRIME openvswitch # NOPRIME rabbitmq-server # NOPRIME radvd # NOPRIME diff --git a/files/rpms-suse/q-l3 b/files/rpms/neutron-l3 similarity index 100% rename from files/rpms-suse/q-l3 rename to files/rpms/neutron-l3 diff --git a/files/rpms/nova b/files/rpms/nova index 594393e733..c323224279 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -1,27 +1,13 @@ conntrack-tools curl -dnsmasq # for nova-network -dnsmasq-utils # for dhcp_release ebtables -gawk -genisoimage # required for config_drive +genisoimage iptables iputils -kernel-modules # dist:f22,f23,f24 +kernel-modules # not:openEuler-22.03 kpartx -kvm # NOPRIME -libvirt-bin # NOPRIME -libvirt-devel # NOPRIME -libvirt-python # NOPRIME -libxml2-python -m2crypto -mysql-devel -MySQL-python -mysql-server # NOPRIME -numpy # needed by websockify for spice console parted polkit -qemu-kvm # NOPRIME rabbitmq-server # NOPRIME sqlite sudo diff --git a/files/rpms/os-brick b/files/rpms/os-brick new file mode 100644 index 0000000000..14ff870557 --- /dev/null +++ b/files/rpms/os-brick @@ -0,0 +1,2 @@ +iscsi-initiator-utils +lsscsi diff --git a/files/rpms/ovn b/files/rpms/ovn new file mode 100644 index 0000000000..698e57b0de --- /dev/null +++ b/files/rpms/ovn @@ -0,0 +1,3 @@ +ovn-central +ovn-host +ovn-vtep diff --git a/files/rpms/q-agt b/files/rpms/q-agt deleted file mode 100644 index ea8819e884..0000000000 --- a/files/rpms/q-agt +++ /dev/null @@ -1 +0,0 @@ -ipset diff --git a/files/rpms/q-agt b/files/rpms/q-agt new file mode 120000 index 0000000000..99fe353094 --- /dev/null +++ b/files/rpms/q-agt @@ -0,0 +1 @@ +neutron-agent \ No newline at end of file diff --git a/files/rpms/q-l3 b/files/rpms/q-l3 deleted file mode 100644 index a7a190c063..0000000000 --- a/files/rpms/q-l3 +++ /dev/null @@ -1,2 +0,0 @@ -conntrack-tools -keepalived diff --git a/files/rpms/q-l3 b/files/rpms/q-l3 new file mode 120000 index 0000000000..0a5ca2a45f --- /dev/null +++ b/files/rpms/q-l3 @@ -0,0 +1 @@ +neutron-l3 \ No newline at end of file diff --git a/files/rpms/swift b/files/rpms/swift index 1e05167bcf..c3921a47d4 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -1,8 +1,6 @@ curl liberasurecode-devel memcached -pyxattr -rsync-daemon # dist:f22,f23,f24 +rsync-daemon sqlite xfsprogs -xinetd diff --git a/files/rpms/zookeeper b/files/rpms/zookeeper deleted file mode 100644 index 1bfac538a2..0000000000 --- a/files/rpms/zookeeper +++ /dev/null @@ -1 +0,0 @@ -zookeeper diff --git a/files/swift/rsyncd.conf b/files/swift/rsyncd.conf index c670531b31..937d6c4b9a 100644 --- a/files/swift/rsyncd.conf +++ b/files/swift/rsyncd.conf @@ -4,76 +4,76 @@ log file = %SWIFT_DATA_DIR%/logs/rsyncd.log pid file = %SWIFT_DATA_DIR%/run/rsyncd.pid address = 127.0.0.1 -[account6012] +[account6612] max connections = 25 -path = %SWIFT_DATA_DIR%/1/node/ +path = %SWIFT_DATA_DIR%/1/ read only = false -lock file = %SWIFT_DATA_DIR%/run/account6012.lock +lock file = %SWIFT_DATA_DIR%/run/account6612.lock -[account6022] +[account6622] max connections = 25 -path = %SWIFT_DATA_DIR%/2/node/ +path = %SWIFT_DATA_DIR%/2/ read only = false -lock file = %SWIFT_DATA_DIR%/run/account6022.lock +lock file = %SWIFT_DATA_DIR%/run/account6622.lock -[account6032] +[account6632] max connections = 25 -path = %SWIFT_DATA_DIR%/3/node/ +path = %SWIFT_DATA_DIR%/3/ read only = false -lock file = %SWIFT_DATA_DIR%/run/account6032.lock +lock file = %SWIFT_DATA_DIR%/run/account6632.lock -[account6042] +[account6642] max connections = 25 -path = %SWIFT_DATA_DIR%/4/node/ +path = %SWIFT_DATA_DIR%/4/ read only = false -lock file = %SWIFT_DATA_DIR%/run/account6042.lock +lock file = %SWIFT_DATA_DIR%/run/account6642.lock -[container6011] +[container6611] max connections = 25 -path = %SWIFT_DATA_DIR%/1/node/ +path = %SWIFT_DATA_DIR%/1/ read only = false -lock file = %SWIFT_DATA_DIR%/run/container6011.lock +lock file = %SWIFT_DATA_DIR%/run/container6611.lock -[container6021] +[container6621] max connections = 25 -path = %SWIFT_DATA_DIR%/2/node/ +path = %SWIFT_DATA_DIR%/2/ read only = false -lock file = %SWIFT_DATA_DIR%/run/container6021.lock +lock file = %SWIFT_DATA_DIR%/run/container6621.lock -[container6031] +[container6631] max connections = 25 -path = %SWIFT_DATA_DIR%/3/node/ +path = %SWIFT_DATA_DIR%/3/ read only = false -lock file = %SWIFT_DATA_DIR%/run/container6031.lock +lock file = %SWIFT_DATA_DIR%/run/container6631.lock -[container6041] +[container6641] max connections = 25 -path = %SWIFT_DATA_DIR%/4/node/ +path = %SWIFT_DATA_DIR%/4/ read only = false -lock file = %SWIFT_DATA_DIR%/run/container6041.lock +lock file = %SWIFT_DATA_DIR%/run/container6641.lock -[object6010] +[object6613] max connections = 25 -path = %SWIFT_DATA_DIR%/1/node/ +path = %SWIFT_DATA_DIR%/1/ read only = false -lock file = %SWIFT_DATA_DIR%/run/object6010.lock +lock file = %SWIFT_DATA_DIR%/run/object6613.lock -[object6020] +[object6623] max connections = 25 -path = %SWIFT_DATA_DIR%/2/node/ +path = %SWIFT_DATA_DIR%/2/ read only = false -lock file = %SWIFT_DATA_DIR%/run/object6020.lock +lock file = %SWIFT_DATA_DIR%/run/object6623.lock -[object6030] +[object6633] max connections = 25 -path = %SWIFT_DATA_DIR%/3/node/ +path = %SWIFT_DATA_DIR%/3/ read only = false -lock file = %SWIFT_DATA_DIR%/run/object6030.lock +lock file = %SWIFT_DATA_DIR%/run/object6633.lock -[object6040] +[object6643] max connections = 25 -path = %SWIFT_DATA_DIR%/4/node/ +path = %SWIFT_DATA_DIR%/4/ read only = false -lock file = %SWIFT_DATA_DIR%/run/object6040.lock +lock file = %SWIFT_DATA_DIR%/run/object6643.lock diff --git a/files/zookeeper/environment b/files/zookeeper/environment deleted file mode 100644 index afa2d2f89f..0000000000 --- a/files/zookeeper/environment +++ /dev/null @@ -1,36 +0,0 @@ -# -# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -#    http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# Modified from http://packages.ubuntu.com/saucy/zookeeperd -NAME=zookeeper -ZOOCFGDIR=/etc/zookeeper/conf - -# seems, that log4j requires the log4j.properties file to be in the classpath -CLASSPATH="$ZOOCFGDIR:/usr/share/java/jline.jar:/usr/share/java/log4j-1.2.jar:/usr/share/java/xercesImpl.jar:/usr/share/java/xmlParserAPIs.jar:/usr/share/java/netty.jar:/usr/share/java/slf4j-api.jar:/usr/share/java/slf4j-log4j12.jar:/usr/share/java/zookeeper.jar" - -ZOOCFG="$ZOOCFGDIR/zoo.cfg" -ZOO_LOG_DIR=/var/log/zookeeper -USER=$NAME -GROUP=$NAME -PIDDIR=/var/run/$NAME -PIDFILE=$PIDDIR/$NAME.pid -SCRIPTNAME=/etc/init.d/$NAME -JAVA=/usr/bin/java -ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain" -ZOO_LOG4J_PROP="INFO,ROLLINGFILE" -JMXLOCALONLY=false -JAVA_OPTS="" diff --git a/files/zookeeper/log4j.properties b/files/zookeeper/log4j.properties deleted file mode 100644 index 6c45a4aad9..0000000000 --- a/files/zookeeper/log4j.properties +++ /dev/null @@ -1,69 +0,0 @@ -# -# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -#    http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# From http://packages.ubuntu.com/saucy/zookeeperd - -# ZooKeeper Logging Configuration -# - -# Format is " (, )+ - -log4j.rootLogger=${zookeeper.root.logger} - -# Example: console appender only -# log4j.rootLogger=INFO, CONSOLE - -# Example with rolling log file -#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE - -# Example with rolling log file and tracing -#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE - -# -# Log INFO level and above messages to the console -# -log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender -log4j.appender.CONSOLE.Threshold=INFO -log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout -log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n - -# -# Add ROLLINGFILE to rootLogger to get log file output -# Log DEBUG level and above messages to a log file -log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender -log4j.appender.ROLLINGFILE.Threshold=WARN -log4j.appender.ROLLINGFILE.File=${zookeeper.log.dir}/zookeeper.log - -# Max log file size of 10MB -log4j.appender.ROLLINGFILE.MaxFileSize=10MB -# uncomment the next line to limit number of backup files -#log4j.appender.ROLLINGFILE.MaxBackupIndex=10 - -log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout -log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n - - -# -# Add TRACEFILE to rootLogger to get log file output -# Log DEBUG level and above messages to a log file -log4j.appender.TRACEFILE=org.apache.log4j.FileAppender -log4j.appender.TRACEFILE.Threshold=TRACE -log4j.appender.TRACEFILE.File=${zookeeper.log.dir}/zookeeper_trace.log - -log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout -### Notice we are including log4j's NDC here (%x) -log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n diff --git a/files/zookeeper/myid b/files/zookeeper/myid deleted file mode 100644 index c227083464..0000000000 --- a/files/zookeeper/myid +++ /dev/null @@ -1 +0,0 @@ -0 \ No newline at end of file diff --git a/files/zookeeper/zoo.cfg b/files/zookeeper/zoo.cfg deleted file mode 100644 index b8f55827e3..0000000000 --- a/files/zookeeper/zoo.cfg +++ /dev/null @@ -1,74 +0,0 @@ -# -# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -#    http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# http://hadoop.apache.org/zookeeper/docs/current/zookeeperAdmin.html - -# The number of milliseconds of each tick -tickTime=2000 -# The number of ticks that the initial -# synchronization phase can take -initLimit=10 -# The number of ticks that can pass between -# sending a request and getting an acknowledgement -syncLimit=5 -# the directory where the snapshot is stored. -dataDir=/var/lib/zookeeper -# Place the dataLogDir to a separate physical disc for better performance -# dataLogDir=/disk2/zookeeper - -# the port at which the clients will connect -clientPort=2181 - -# Maximum number of clients that can connect from one client -maxClientCnxns=60 - -# specify all zookeeper servers -# The fist port is used by followers to connect to the leader -# The second one is used for leader election - -server.0=127.0.0.1:2888:3888 - -# To avoid seeks ZooKeeper allocates space in the transaction log file in -# blocks of preAllocSize kilobytes. The default block size is 64M. One reason -# for changing the size of the blocks is to reduce the block size if snapshots -# are taken more often. (Also, see snapCount). -#preAllocSize=65536 - -# Clients can submit requests faster than ZooKeeper can process them, -# especially if there are a lot of clients. To prevent ZooKeeper from running -# out of memory due to queued requests, ZooKeeper will throttle clients so that -# there is no more than globalOutstandingLimit outstanding requests in the -# system. The default limit is 1,000.ZooKeeper logs transactions to a -# transaction log. After snapCount transactions are written to a log file a -# snapshot is started and a new transaction log file is started. The default -# snapCount is 10,000. -#snapCount=1000 - -# If this option is defined, requests will be will logged to a trace file named -# traceFile.year.month.day. -#traceFile= - -# Leader accepts client connections. Default value is "yes". The leader machine -# coordinates updates. For higher update throughput at thes slight expense of -# read throughput the leader can be configured to not accept clients and focus -# on coordination. -#leaderServes=yes - -# Autopurge every hour to avoid using lots of disk in bursts -# Order of the next 2 properties matters. -# autopurge.snapRetainCount must be before autopurge.purgeInterval. -autopurge.snapRetainCount=3 -autopurge.purgeInterval=1 \ No newline at end of file diff --git a/functions b/functions index 46a7d414a1..829fc86c55 100644 --- a/functions +++ b/functions @@ -12,14 +12,16 @@ # ensure we don't re-source this in the same environment [[ -z "$_DEVSTACK_FUNCTIONS" ]] || return 0 -declare -r _DEVSTACK_FUNCTIONS=1 +declare -r -g _DEVSTACK_FUNCTIONS=1 # Include the common functions FUNC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) source ${FUNC_DIR}/functions-common source ${FUNC_DIR}/inc/ini-config +source ${FUNC_DIR}/inc/meta-config source ${FUNC_DIR}/inc/python source ${FUNC_DIR}/inc/rootwrap +source ${FUNC_DIR}/inc/async # Save trace setting _XTRACE_FUNCTIONS=$(set +o | grep xtrace) @@ -45,6 +47,79 @@ function short_source { # export it so child shells have access to the 'short_source' function also. export -f short_source +# Download a file from a URL +# +# Will check cache (in $FILES) or download given URL. +# +# Argument is the URL to the remote file +# +# Will echo the local path to the file as the output. Will die on +# failure to download. +# +# Files can be pre-cached for CI environments, see EXTRA_CACHE_URLS +# and tools/image_list.sh +function get_extra_file { + local file_url=$1 + + file_name=$(basename "$file_url") + if [[ $file_url != file* ]]; then + # If the file isn't cache, download it + if [[ ! -f $FILES/$file_name ]]; then + wget --progress=dot:giga -t 2 -c $file_url -O $FILES/$file_name + if [[ $? -ne 0 ]]; then + die "$file_url could not be downloaded" + fi + fi + echo "$FILES/$file_name" + return + else + # just strip the file:// bit and that's the path to the file + echo $file_url | sed 's/$file:\/\///g' + fi +} + +# Generate image property arguments for OSC +# +# Arguments: properties, one per, like propname=value +# +# Result is --property propname1=value1 --property propname2=value2 +function _image_properties_to_arg { + local result="" + for property in $*; do + result+=" --property $property" + done + echo $result +} + +# Upload an image to glance using the configured mechanism +# +# Arguments: +# image name +# container format +# disk format +# path to image file +# optional properties (format of propname=value) +# +function _upload_image { + local image_name="$1" + shift + local container="$1" + shift + local disk="$1" + shift + local image="$1" + shift + local properties + local useimport + + properties=$(_image_properties_to_arg $*) + + if [[ "$GLANCE_USE_IMPORT_WORKFLOW" == "True" ]]; then + useimport="--import" + fi + + openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" --public --container-format "$container" --disk-format "$disk" $useimport $properties --file $(readlink -f "${image}") +} # Retrieve an image from a URL and upload into Glance. # Uses the following variables: @@ -58,17 +133,29 @@ function upload_image { local image image_fname image_name + local max_attempts=5 + # Create a directory for the downloaded image tarballs. mkdir -p $FILES/images image_fname=`basename "$image_url"` if [[ $image_url != file* ]]; then # Downloads the image (uec ami+akistyle), then extracts it. if [[ ! -f $FILES/$image_fname || "$(stat -c "%s" $FILES/$image_fname)" = "0" ]]; then - wget --progress=dot:giga -c $image_url -O $FILES/$image_fname - if [[ $? -ne 0 ]]; then - echo "Not found: $image_url" - return - fi + for attempt in `seq $max_attempts`; do + local rc=0 + wget --progress=dot:giga -c $image_url -O $FILES/$image_fname || rc=$? + if [[ $rc -ne 0 ]]; then + if [[ "$attempt" -eq "$max_attempts" ]]; then + echo "Not found: $image_url" + # Signal failure to download to the caller, so they can fail early + return 1 + fi + echo "Download failed, retrying in $attempt second, attempt: $attempt" + sleep $attempt + else + break + fi + done fi image="$FILES/${image_fname}" else @@ -86,7 +173,7 @@ function upload_image { # OpenVZ-format images are provided as .tar.gz, but not decompressed prior to loading if [[ "$image_url" =~ 'openvz' ]]; then image_name="${image_fname%.tar.gz}" - openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" --public --container-format ami --disk-format ami < "${image}" + _upload_image "$image_name" ami ami "$image" return fi @@ -200,42 +287,8 @@ function upload_image { vmdk_adapter_type="${props[1]:-$vmdk_adapter_type}" vmdk_net_adapter="${props[2]:-$vmdk_net_adapter}" - openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" --public --container-format bare --disk-format vmdk --property vmware_disktype="$vmdk_disktype" --property vmware_adaptertype="$vmdk_adapter_type" --property hw_vif_model="$vmdk_net_adapter" < "${image}" - return - fi - - # XenServer-vhd-ovf-format images are provided as .vhd.tgz - # and should not be decompressed prior to loading - if [[ "$image_url" =~ '.vhd.tgz' ]]; then - image_name="${image_fname%.vhd.tgz}" - local force_vm_mode="" - if [[ "$image_name" =~ 'cirros' ]]; then - # Cirros VHD image currently only boots in PV mode. - # Nova defaults to PV for all VHD images, but - # the glance setting is needed for booting - # directly from volume. - force_vm_mode="--property vm_mode=xen" - fi - openstack \ - --os-cloud=devstack-admin --os-region-name="$REGION_NAME" \ - image create \ - "$image_name" --public \ - --container-format=ovf --disk-format=vhd \ - $force_vm_mode < "${image}" - return - fi + _upload_image "$image_name" bare vmdk "$image" vmware_disktype="$vmdk_disktype" vmware_adaptertype="$vmdk_adapter_type" hw_vif_model="$vmdk_net_adapter" - # .xen-raw.tgz suggests a Xen capable raw image inside a tgz. - # and should not be decompressed prior to loading. - # Setting metadata, so PV mode is used. - if [[ "$image_url" =~ '.xen-raw.tgz' ]]; then - image_name="${image_fname%.xen-raw.tgz}" - openstack \ - --os-cloud=devstack-admin --os-region-name="$REGION_NAME" \ - image create \ - "$image_name" --public \ - --container-format=tgz --disk-format=raw \ - --property vm_mode=xen < "${image}" return fi @@ -246,12 +299,7 @@ function upload_image { die $LINENO "Unknown vm_mode=${vm_mode} for Virtuozzo image" fi - openstack \ - --os-cloud=devstack-admin --os-region-name="$REGION_NAME" \ - image create \ - "$image_name" --public \ - --container-format=bare --disk-format=ploop \ - --property vm_mode=$vm_mode < "${image}" + _upload_image "$image_name" bare ploop "$image" vm_mode=$vm_mode return fi @@ -261,6 +309,15 @@ function upload_image { local container_format="" local unpack="" local img_property="" + + # NOTE(danms): If we're on libvirt/qemu or libvirt/kvm, set the hw_rng_model + # to libvirt in the image properties. + if [[ "$VIRT_DRIVER" == "libvirt" ]]; then + if [[ "$LIBVIRT_TYPE" == "qemu" || "$LIBVIRT_TYPE" == "kvm" ]]; then + img_property="hw_rng_model=virtio" + fi + fi + case "$image_fname" in *.tar.gz|*.tgz) # Extract ami and aki files @@ -309,6 +366,17 @@ function upload_image { disk_format=qcow2 container_format=bare ;; + *.qcow2.xz) + image_name=$(basename "$image" ".qcow2.xz") + disk_format=qcow2 + container_format=bare + unpack=unxz + ;; + *.raw) + image_name=$(basename "$image" ".raw") + disk_format=raw + container_format=bare + ;; *.iso) image_name=$(basename "$image" ".iso") disk_format=iso @@ -317,7 +385,7 @@ function upload_image { *.vhd|*.vhdx|*.vhd.gz|*.vhdx.gz) local extension="${image_fname#*.}" image_name=$(basename "$image" ".$extension") - disk_format=vhd + disk_format=$(echo $image_fname | grep -oP '(?<=\.)vhdx?(?=\.|$)') container_format=bare if [ "${image_fname##*.}" == "gz" ]; then unpack=zcat @@ -327,20 +395,30 @@ function upload_image { esac if is_arch "ppc64le" || is_arch "ppc64" || is_arch "ppc"; then - img_property="--property hw_disk_bus=scsi --property hw_scsi_model=virtio-scsi --property hw_cdrom_bus=scsi --property os_command_line=console=hvc0" + img_property="$img_property hw_cdrom_bus=scsi os_command_line=console=hvc0" fi if is_arch "aarch64"; then - img_property="--property hw_machine_type=virt --property hw_cdrom_bus=virtio --property os_command_line='console=ttyAMA0'" + img_property="$img_property hw_machine_type=virt hw_cdrom_bus=scsi hw_scsi_model=virtio-scsi os_command_line='console=ttyAMA0'" fi if [ "$container_format" = "bare" ]; then if [ "$unpack" = "zcat" ]; then - openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < <(zcat --force "${image}") + _upload_image "$image_name" $container_format $disk_format <(zcat --force "$image") $img_property elif [ "$unpack" = "bunzip2" ]; then - openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < <(bunzip2 -cdk "${image}") + _upload_image "$image_name" $container_format $disk_format <(bunzip2 -cdk "$image") $img_property + elif [ "$unpack" = "unxz" ]; then + # NOTE(brtknr): unxz the file first and cleanup afterwards to + # prevent timeout while Glance tries to upload image (e.g. to Swift). + local tmp_dir + local image_path + tmp_dir=$(mktemp -d) + image_path="$tmp_dir/$image_name" + unxz -cv "${image}" > "$image_path" + _upload_image "$image_name" $container_format $disk_format "$image_path" $img_property + rm -rf $tmp_dir else - openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < "${image}" + _upload_image "$image_name" $container_format $disk_format "$image" $img_property fi else # Use glance client to add the kernel the root filesystem. @@ -348,12 +426,12 @@ function upload_image { # kernel for use when uploading the root filesystem. local kernel_id="" ramdisk_id=""; if [ -n "$kernel" ]; then - kernel_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-kernel" $img_property --public --container-format aki --disk-format aki < "$kernel" | grep ' id ' | get_field 2) + kernel_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-kernel" $(_image_properties_to_arg $img_property) --public --container-format aki --disk-format aki --file $(readlink -f "$kernel") -f value -c id) fi if [ -n "$ramdisk" ]; then - ramdisk_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-ramdisk" $img_property --public --container-format ari --disk-format ari < "$ramdisk" | grep ' id ' | get_field 2) + ramdisk_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-ramdisk" $(_image_properties_to_arg $img_property) --public --container-format ari --disk-format ari --file $(readlink -f "$ramdisk") -f value -c id) fi - openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "${image_name%.img}" $img_property --public --container-format ami --disk-format ami ${kernel_id:+--property kernel_id=$kernel_id} ${ramdisk_id:+--property ramdisk_id=$ramdisk_id} < "${image}" + _upload_image "${image_name%.img}" ami ami "$image" ${kernel_id:+ kernel_id=$kernel_id} ${ramdisk_id:+ ramdisk_id=$ramdisk_id} $img_property fi } @@ -401,9 +479,39 @@ EOF return $rval } +function wait_for_compute { + local timeout=$1 + local rval=0 + local compute_hostname + time_start "wait_for_service" + compute_hostname=$(iniget $NOVA_CONF DEFAULT host) + if [[ -z $compute_hostname ]]; then + compute_hostname=$(hostname) + fi + timeout $timeout bash -x < [boot-timeout] [from_net] [expected] function ping_check { local ip=$1 @@ -417,12 +525,9 @@ function ping_check { # if we don't specify a from_net we're expecting things to work # fine from our local box. if [[ -n "$from_net" ]]; then + # TODO(stephenfin): Is there any way neutron could be disabled now? if is_service_enabled neutron; then ping_cmd="$TOP_DIR/tools/ping_neutron.sh $from_net" - elif [[ "$MULTI_HOST" = "True" && "$from_net" = "$PRIVATE_NETWORK_NAME" ]]; then - # there is no way to address the multihost / private case, bail here for compatibility. - # TODO: remove this cruft and redo code to handle this at the caller level. - return fi fi @@ -446,13 +551,13 @@ function ping_check { function get_instance_ip { local vm_id=$1 local network_name=$2 - local nova_result + local addresses local ip - nova_result="$(nova show $vm_id)" - ip=$(echo "$nova_result" | grep "$network_name" | get_field 2) + addresses=$(openstack server show -c addresses -f value "$vm_id") + ip=$(echo $addresses | sed -n "s/^.*$network_name=\([0-9\.]*\).*$/\1/p") if [[ $ip = "" ]];then - echo "$nova_result" + echo "addresses of server $vm_id : $addresses" die $LINENO "[Fail] Couldn't get ipaddress of VM" fi echo $ip @@ -568,23 +673,59 @@ function vercmp { esac } +# This sets up defaults we like in devstack for logging for tracking +# down issues, and makes sure everything is done the same between +# projects. +# NOTE(jh): Historically this function switched between three different +# functions: setup_systemd_logging, setup_colorized_logging and +# setup_standard_logging_identity. Since we always run with systemd now, +# this could be cleaned up, but the other functions may still be in use +# by plugins. Since deprecations haven't worked in the past, we'll just +# leave them in place. +function setup_logging { + setup_systemd_logging $1 +} + # This function sets log formatting options for colorizing log # output to stdout. It is meant to be called by lib modules. -# The last two parameters are optional and can be used to specify -# non-default value for project and user format variables. -# Defaults are respectively 'project_name' and 'user_name' -# -# setup_colorized_logging something.conf SOMESECTION function setup_colorized_logging { local conf_file=$1 - local conf_section=$2 - local project_var=${3:-"project_name"} - local user_var=${4:-"user_name"} # Add color to logging output - iniset $conf_file $conf_section logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %("$user_var")s %("$project_var")s%(color)s] %(instance)s%(color)s%(message)s" - iniset $conf_file $conf_section logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" - iniset $conf_file $conf_section logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" - iniset $conf_file $conf_section logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" + iniset $conf_file DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %(project_name)s %(user_name)s%(color)s] %(instance)s%(color)s%(message)s" + iniset $conf_file DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" + iniset $conf_file DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" + iniset $conf_file DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" + # Enable or disable color for oslo.log + iniset $conf_file DEFAULT log_color $LOG_COLOR +} + +function setup_systemd_logging { + local conf_file=$1 + # NOTE(sdague): this is a nice to have, and means we're using the + # native systemd path, which provides for things like search on + # request-id. However, there may be an eventlet interaction here, + # so going off for now. + USE_JOURNAL=$(trueorfalse False USE_JOURNAL) + local pidstr="" + if [[ "$USE_JOURNAL" == "True" ]]; then + iniset $conf_file DEFAULT use_journal "True" + # if we are using the journal directly, our process id is already correct + else + pidstr="(pid=%(process)d) " + fi + iniset $conf_file DEFAULT logging_debug_format_suffix "{{${pidstr}%(funcName)s %(pathname)s:%(lineno)d}}" + + iniset $conf_file DEFAULT logging_context_format_string "%(color)s%(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(project_name)s %(user_name)s%(color)s] %(instance)s%(color)s%(message)s" + iniset $conf_file DEFAULT logging_default_format_string "%(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" + iniset $conf_file DEFAULT logging_exception_prefix "ERROR %(name)s %(instance)s" + + # Enable or disable color for oslo.log + iniset $conf_file DEFAULT log_color $LOG_COLOR +} + +function setup_standard_logging_identity { + local conf_file=$1 + iniset $conf_file DEFAULT logging_user_identity_format "%(project_name)s %(user_name)s" } # These functions are provided for basic fall-back functionality for @@ -603,23 +744,22 @@ if ! function_exists echo_nolog; then fi -# create_disk - Create backing disk +# create_disk - Create, configure, and mount a backing disk function create_disk { local node_number local disk_image=${1} local storage_data_dir=${2} local loopback_disk_size=${3} + local key - # Create a loopback disk and format it to XFS. - if [[ -e ${disk_image} ]]; then - if egrep -q ${storage_data_dir} /proc/mounts; then - sudo umount ${storage_data_dir}/drives/sdb1 - sudo rm -f ${disk_image} - fi - fi + key=$(echo $disk_image | sed 's#/.##') + key="devstack-$key" - sudo mkdir -p ${storage_data_dir}/drives/images + destroy_disk $disk_image $storage_data_dir + # Create an empty file of the correct size (and ensure the + # directory structure up to that path exists) + sudo mkdir -p $(dirname ${disk_image}) sudo truncate -s ${loopback_disk_size} ${disk_image} # Make a fresh XFS filesystem. Use bigger inodes so xattr can fit in @@ -629,11 +769,108 @@ function create_disk { # Swift and Ceph. sudo mkfs.xfs -f -i size=1024 ${disk_image} - # Mount the disk with mount options to make it as efficient as possible - if ! egrep -q ${storage_data_dir} /proc/mounts; then - sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 \ - ${disk_image} ${storage_data_dir} + # Install a new loopback fstab entry for this disk image, and mount it + echo "$disk_image $storage_data_dir xfs loop,noatime,nodiratime,logbufs=8,comment=$key 0 0" | sudo tee -a /etc/fstab + sudo mkdir -p $storage_data_dir + sudo mount -v $storage_data_dir +} + +# Unmount, de-configure, and destroy a backing disk +function destroy_disk { + local disk_image=$1 + local storage_data_dir=$2 + local key + + key=$(echo $disk_image | sed 's#/.##') + key="devstack-$key" + + # Unmount the target, if mounted + if egrep -q $storage_data_dir /proc/mounts; then + sudo umount $storage_data_dir fi + + # Clear any fstab rules + sudo sed -i '/.*comment=$key.*/ d' /etc/fstab + + # Delete the file + sudo rm -f $disk_image +} + + +# set_mtu - Set MTU on a device +function set_mtu { + local dev=$1 + local mtu=$2 + sudo ip link set mtu $mtu dev $dev +} + + +# running_in_container - Returns true otherwise false +function running_in_container { + [[ $(systemd-detect-virt --container) != 'none' ]] +} + + +# enable_kernel_bridge_firewall - Enable kernel support for bridge firewalling +function enable_kernel_bridge_firewall { + # Load bridge module. This module provides access to firewall for bridged + # frames; and also on older kernels (pre-3.18) it provides sysctl knobs to + # enable/disable bridge firewalling + sudo modprobe bridge + # For newer kernels (3.18+), those sysctl settings are split into a separate + # kernel module (br_netfilter). Load it too, if present. + sudo modprobe br_netfilter 2>> /dev/null || : + # Enable bridge firewalling in case it's disabled in kernel (upstream + # default is enabled, but some distributions may decide to change it). + # This is at least needed for RHEL 7.2 and earlier releases. + for proto in ip ip6; do + sudo sysctl -w net.bridge.bridge-nf-call-${proto}tables=1 + done +} + + +# Set a systemd system override +# +# This sets a system-side override in system.conf. A per-service +# override would be /etc/systemd/system/${service}.service/override.conf +function set_systemd_override { + local key="$1" + local value="$2" + + local sysconf="/etc/systemd/system.conf" + iniset -sudo "${sysconf}" "Manager" "$key" "$value" + echo "Set systemd system override for ${key}=${value}" + + sudo systemctl daemon-reload +} + +# Get a random port from the local port range +# +# This function returns an available port in the local port range. The search +# order is not truly random, but should be considered a random value by the +# user because it depends on the state of your local system. +function get_random_port { + read lower_port upper_port < /proc/sys/net/ipv4/ip_local_port_range + while true; do + for (( port = upper_port ; port >= lower_port ; port-- )); do + sudo lsof -i ":$port" &> /dev/null + if [[ $? > 0 ]] ; then + break 2 + fi + done + done + echo $port +} + +# Save some state information +# +# Write out various useful state information to /etc/devstack-version +function write_devstack_version { + cat - </dev/null +DevStack Version: ${DEVSTACK_SERIES} +Change: $(git log --format="%H %s %ci" -1) +OS Version: ${os_VENDOR} ${os_RELEASE} ${os_CODENAME} +EOF } # Restore xtrace diff --git a/functions-common b/functions-common index 3fdd71bffc..c2042c4fef 100644 --- a/functions-common +++ b/functions-common @@ -27,7 +27,6 @@ # - ``RECLONE`` # - ``REQUIREMENTS_DIR`` # - ``STACK_USER`` -# - ``TRACK_DEPENDS`` # - ``http_proxy``, ``https_proxy``, ``no_proxy`` # @@ -37,20 +36,23 @@ set +o xtrace # ensure we don't re-source this in the same environment [[ -z "$_DEVSTACK_FUNCTIONS_COMMON" ]] || return 0 -declare -r _DEVSTACK_FUNCTIONS_COMMON=1 +declare -r -g _DEVSTACK_FUNCTIONS_COMMON=1 # Global Config Variables -declare -A GITREPO -declare -A GITBRANCH -declare -A GITDIR +declare -A -g GITREPO +declare -A -g GITBRANCH +declare -A -g GITDIR -TRACK_DEPENDS=${TRACK_DEPENDS:-False} +# Systemd service file environment variables per service +declare -A -g SYSTEMD_ENV_VARS + +KILL_PATH="$(which kill)" # Save these variables to .stackenv STACK_ENV_VARS="BASE_SQL_CONN DATA_DIR DEST ENABLED_SERVICES HOST_IP \ - KEYSTONE_AUTH_PROTOCOL KEYSTONE_AUTH_URI KEYSTONE_SERVICE_URI \ - LOGFILE OS_CACERT SERVICE_HOST SERVICE_PROTOCOL STACK_USER TLS_IP \ - HOST_IPV6 SERVICE_IP_VERSION" + KEYSTONE_SERVICE_URI \ + LOGFILE OS_CACERT SERVICE_HOST STACK_USER TLS_IP \ + HOST_IPV6 SERVICE_IP_VERSION TUNNEL_ENDPOINT_IP TUNNEL_IP_VERSION" # Saves significant environment variables to .stackenv for later use @@ -86,41 +88,120 @@ function write_clouds_yaml { if [ -f "$SSL_BUNDLE_FILE" ]; then CA_CERT_ARG="--os-cacert $SSL_BUNDLE_FILE" fi - # demo -> devstack - $TOP_DIR/tools/update_clouds_yaml.py \ + # devstack: user with the member role on demo project + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ --file $CLOUDS_YAML \ --os-cloud devstack \ --os-region-name $REGION_NAME \ - --os-identity-api-version 3 \ $CA_CERT_ARG \ - --os-auth-url $KEYSTONE_AUTH_URI \ + --os-auth-url $KEYSTONE_SERVICE_URI \ --os-username demo \ --os-password $ADMIN_PASSWORD \ --os-project-name demo - # alt_demo -> devstack-alt - $TOP_DIR/tools/update_clouds_yaml.py \ + # devstack-admin: user with the admin role on the admin project + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-admin \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username admin \ + --os-password $ADMIN_PASSWORD \ + --os-project-name admin + + # devstack-admin-demo: user with the admin role on the demo project + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-admin-demo \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username admin \ + --os-password $ADMIN_PASSWORD \ + --os-project-name demo + + # devstack-alt: user with the member role on alt_demo project + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ --file $CLOUDS_YAML \ --os-cloud devstack-alt \ --os-region-name $REGION_NAME \ - --os-identity-api-version 3 \ $CA_CERT_ARG \ - --os-auth-url $KEYSTONE_AUTH_URI \ + --os-auth-url $KEYSTONE_SERVICE_URI \ --os-username alt_demo \ --os-password $ADMIN_PASSWORD \ --os-project-name alt_demo - # admin -> devstack-admin - $TOP_DIR/tools/update_clouds_yaml.py \ + # devstack-alt-member: user with the member role on alt_demo project + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ --file $CLOUDS_YAML \ - --os-cloud devstack-admin \ + --os-cloud devstack-alt-member \ --os-region-name $REGION_NAME \ - --os-identity-api-version 3 \ $CA_CERT_ARG \ - --os-auth-url $KEYSTONE_AUTH_URI \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username alt_demo_member \ + --os-password $ADMIN_PASSWORD \ + --os-project-name alt_demo + + # devstack-alt-reader: user with the reader role on alt_demo project + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-alt-reader \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username alt_demo_reader \ + --os-password $ADMIN_PASSWORD \ + --os-project-name alt_demo + + # devstack-reader: user with the reader role on demo project + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-reader \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username demo_reader \ + --os-password $ADMIN_PASSWORD \ + --os-project-name demo + + # devstack-system-admin: user with the admin role on the system + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-system-admin \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ --os-username admin \ --os-password $ADMIN_PASSWORD \ - --os-project-name admin + --os-system-scope all + + # devstack-system-member: user with the member role on the system + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-system-member \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username system_member \ + --os-password $ADMIN_PASSWORD \ + --os-system-scope all + + # devstack-system-reader: user with the reader role on the system + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-system-reader \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username system_reader \ + --os-password $ADMIN_PASSWORD \ + --os-system-scope all + + cat >> $CLOUDS_YAML < +# +# Convert True|False to int 1 or 0 +# This function can be used to convert the output of trueorfalse +# to an int follow c conventions where false is 0 and 1 it true. +function bool_to_int { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + if [ -z $1 ]; then + die $LINENO "Bool value required" + fi + if [[ $1 == "True" ]] ; then + echo '1' + else + echo '0' + fi + $xtrace +} + + function isset { [[ -v "$1" ]] } @@ -216,7 +318,7 @@ function die_if_not_set { function deprecated { local text=$1 DEPRECATED_TEXT+="\n$text" - echo "WARNING: $text" + echo "WARNING: $text" >&2 } # Prints line number and "message" in error format @@ -227,9 +329,9 @@ function err { xtrace=$(set +o | grep xtrace) set +o xtrace local msg="[ERROR] ${BASH_SOURCE[2]}:$1 $2" - echo $msg 1>&2; + echo "$msg" 1>&2; if [[ -n ${LOGDIR} ]]; then - echo $msg >> "${LOGDIR}/error.log" + echo "$msg" >> "${LOGDIR}/error.log" fi $xtrace return $exitcode @@ -282,7 +384,7 @@ function warn { xtrace=$(set +o | grep xtrace) set +o xtrace local msg="[WARNING] ${BASH_SOURCE[2]}:$1 $2" - echo $msg + echo "$msg" $xtrace return $exitcode } @@ -302,11 +404,11 @@ function warn { # such as "install_package" further abstract things in better ways. # # ``os_VENDOR`` - vendor name: ``Ubuntu``, ``Fedora``, etc -# ``os_RELEASE`` - major release: ``14.04`` (Ubuntu), ``20`` (Fedora) +# ``os_RELEASE`` - major release: ``22.04`` (Ubuntu), ``23`` (Fedora) # ``os_PACKAGE`` - package type: ``deb`` or ``rpm`` -# ``os_CODENAME`` - vendor's codename for release: ``trusty`` +# ``os_CODENAME`` - vendor's codename for release: ``jammy`` -declare os_VENDOR os_RELEASE os_PACKAGE os_CODENAME +declare -g os_VENDOR os_RELEASE os_PACKAGE os_CODENAME # Make a *best effort* attempt to install lsb_release packages for the # user if not available. Note can't use generic install_package* @@ -319,15 +421,9 @@ function _ensure_lsb_release { if [[ -x $(command -v apt-get 2>/dev/null) ]]; then sudo apt-get install -y lsb-release elif [[ -x $(command -v zypper 2>/dev/null) ]]; then - # XXX: old code paths seem to have assumed SUSE platforms also - # had "yum". Keep this ordered above yum so we don't try to - # install the rh package. suse calls it just "lsb" - sudo zypper -n install lsb + sudo zypper -n install lsb-release elif [[ -x $(command -v dnf 2>/dev/null) ]]; then - sudo dnf install -y redhat-lsb-core - elif [[ -x $(command -v yum 2>/dev/null) ]]; then - # all rh patforms (fedora, centos, rhel) have this pkg - sudo yum install -y redhat-lsb-core + sudo dnf install -y python3-distro || sudo dnf install -y openeuler-lsb else die $LINENO "Unable to find or auto-install lsb_release" fi @@ -340,14 +436,21 @@ function _ensure_lsb_release { # - os_VENDOR # - os_PACKAGE function GetOSVersion { - # We only support distros that provide a sane lsb_release - _ensure_lsb_release + # CentOS Stream 9 or later and RHEL 9 or later do not provide lsb_release + source /etc/os-release + if [[ "${ID}" =~ (almalinux|centos|rocky|rhel) ]]; then + os_RELEASE=${VERSION_ID} + os_CODENAME=$(echo $VERSION | grep -oP '(?<=[(])[^)]*' || echo 'n/a') + os_VENDOR=$(echo $NAME | tr -d '[:space:]') + else + _ensure_lsb_release - os_RELEASE=$(lsb_release -r -s) - os_CODENAME=$(lsb_release -c -s) - os_VENDOR=$(lsb_release -i -s) + os_RELEASE=$(lsb_release -r -s) + os_CODENAME=$(lsb_release -c -s) + os_VENDOR=$(lsb_release -i -s) + fi - if [[ $os_VENDOR =~ (Debian|Ubuntu|LinuxMint) ]]; then + if [[ $os_VENDOR =~ (Debian|Ubuntu) ]]; then os_PACKAGE="deb" else os_PACKAGE="rpm" @@ -361,35 +464,28 @@ function GetOSVersion { # Translate the OS version values into common nomenclature # Sets global ``DISTRO`` from the ``os_*`` values -declare DISTRO +declare -g DISTRO function GetDistro { GetOSVersion - if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) || \ - "$os_VENDOR" =~ (LinuxMint) ]]; then - # 'Everyone' refers to Ubuntu / Debian / Mint releases by + if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) ]]; then + # 'Everyone' refers to Ubuntu / Debian releases by # the code name adjective DISTRO=$os_CODENAME elif [[ "$os_VENDOR" =~ (Fedora) ]]; then # For Fedora, just use 'f' and the release DISTRO="f$os_RELEASE" - elif [[ "$os_VENDOR" =~ (openSUSE) ]]; then - DISTRO="opensuse-$os_RELEASE" - elif [[ "$os_VENDOR" =~ (SUSE LINUX) ]]; then - # just use major release - DISTRO="sle${os_RELEASE%.*}" elif [[ "$os_VENDOR" =~ (Red.*Hat) || \ "$os_VENDOR" =~ (CentOS) || \ + "$os_VENDOR" =~ (AlmaLinux) || \ "$os_VENDOR" =~ (Scientific) || \ "$os_VENDOR" =~ (OracleServer) || \ + "$os_VENDOR" =~ (RockyLinux) || \ "$os_VENDOR" =~ (Virtuozzo) ]]; then - # Drop the . release as we assume it's compatible - # XXX re-evaluate when we get RHEL10 - DISTRO="rhel${os_RELEASE::1}" - elif [[ "$os_VENDOR" =~ (XenServer) ]]; then - DISTRO="xs${os_RELEASE%.*}" - elif [[ "$os_VENDOR" =~ (kvmibm) ]]; then - DISTRO="${os_VENDOR}${os_RELEASE::1}" + MAJOR_VERSION=$(echo $os_RELEASE | cut -d. -f1) + DISTRO="rhel${MAJOR_VERSION}" + elif [[ "$os_VENDOR" =~ (openEuler) ]]; then + DISTRO="openEuler-$os_RELEASE" else # We can't make a good choice here. Setting a sensible DISTRO # is part of the problem, but not the major issue -- we really @@ -433,7 +529,7 @@ function is_oraclelinux { # Determine if current distribution is a Fedora-based distribution -# (Fedora, RHEL, CentOS, etc). +# (Fedora, RHEL, CentOS, Rocky, etc). # is_fedora function is_fedora { if [[ -z "$os_VENDOR" ]]; then @@ -441,21 +537,14 @@ function is_fedora { fi [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || \ + [ "$os_VENDOR" = "openEuler" ] || \ [ "$os_VENDOR" = "RedHatEnterpriseServer" ] || \ - [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "OracleServer" ] || \ - [ "$os_VENDOR" = "Virtuozzo" ] || [ "$os_VENDOR" = "kvmibm" ] -} - - -# Determine if current distribution is a SUSE-based distribution -# (openSUSE, SLE). -# is_suse -function is_suse { - if [[ -z "$os_VENDOR" ]]; then - GetOSVersion - fi - - [[ "$os_VENDOR" =~ (openSUSE) || "$os_VENDOR" == "SUSE LINUX" ]] + [ "$os_VENDOR" = "RedHatEnterprise" ] || \ + [ "$os_VENDOR" = "RedHatEnterpriseLinux" ] || \ + [ "$os_VENDOR" = "RockyLinux" ] || \ + [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "CentOSStream" ] || \ + [ "$os_VENDOR" = "AlmaLinux" ] || \ + [ "$os_VENDOR" = "OracleServer" ] || [ "$os_VENDOR" = "Virtuozzo" ] } @@ -469,7 +558,14 @@ function is_ubuntu { [ "$os_PACKAGE" = "deb" ] } - +# Determine if current distribution is an openEuler distribution +# is_openeuler +function is_openeuler { + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + [ "$os_VENDOR" = "openEuler" ] +} # Git Functions # ============= @@ -519,8 +615,8 @@ function git_clone { if [[ ! -d $git_dest ]]; then if [[ "$ERROR_ON_CLONE" = "True" ]]; then echo "The $git_dest project was not found; if this is a gate job, add" - echo "the project to the \$PROJECTS variable in the job definition." - die $LINENO "Cloning not allowed in this configuration" + echo "the project to 'required-projects' in the job definition." + die $LINENO "ERROR_ON_CLONE is set to True so cloning not allowed in this configuration" fi git_timed clone $git_clone_flags $git_remote $git_dest fi @@ -532,12 +628,12 @@ function git_clone { if [[ "$ERROR_ON_CLONE" = "True" ]]; then echo "The $git_dest project was not found; if this is a gate job, add" echo "the project to the \$PROJECTS variable in the job definition." - die $LINENO "Cloning not allowed in this configuration" + die $LINENO "ERROR_ON_CLONE is set to True so cloning not allowed in this configuration" fi - git_timed clone $git_clone_flags $git_remote $git_dest + git_timed clone --no-checkout $git_clone_flags $git_remote $git_dest cd $git_dest - # This checkout syntax works for both branches and tags - git checkout $git_ref + git_timed fetch $git_clone_flags origin $git_ref + git_timed checkout FETCH_HEAD elif [[ "$RECLONE" = "True" ]]; then # if it does exist then simulate what clone does if asked to RECLONE cd $git_dest @@ -547,7 +643,7 @@ function git_clone { # remove the existing ignored files (like pyc) as they cause breakage # (due to the py files having older timestamps than our pyc, so python # thinks the pyc files are correct using them) - find $git_dest -name '*.pyc' -delete + sudo find $git_dest -name '*.pyc' -delete # handle git_ref accordingly to type (tag, branch) if [[ -n "`git show-ref refs/tags/$git_ref`" ]]; then @@ -563,6 +659,18 @@ function git_clone { fi fi + # NOTE(ianw) 2022-04-13 : commit [1] has broken many assumptions + # about how we clone and work with repos. Mark them safe globally + # as a work-around. + # + # NOTE(danms): On bionic (and likely others) git-config may write + # ~stackuser/.gitconfig if not run with sudo -H. Using --system + # writes these changes to /etc/gitconfig which is more + # discoverable anyway. + # + # [1] https://github.com/git/git/commit/8959555cee7ec045958f9b6dd62e541affb7e7d9 + sudo git config --system --add safe.directory ${git_dest} + # print out the results so we know what change was used in the logs cd $git_dest git show --oneline | head -1 @@ -662,7 +770,7 @@ function get_default_host_ip { if [ -z "$host_ip" -o "$host_ip" == "dhcp" ]; then host_ip="" # Find the interface used for the default route - host_ip_iface=${host_ip_iface:-$(ip -f $af route | awk '/default/ {print $5}' | head -1)} + host_ip_iface=${host_ip_iface:-$(ip -f $af route list match default table all | grep via | awk '/default/ {print $5}' | head -1)} local host_ips host_ips=$(LC_ALL=C ip -f $af addr show ${host_ip_iface} | sed /temporary/d |awk /$af'/ {split($2,parts,"/"); print parts[1]}') local ip @@ -765,14 +873,9 @@ function policy_add { # Usage: get_or_create_domain function get_or_create_domain { local domain_id - # Gets domain id domain_id=$( - # Gets domain id - openstack domain show $1 \ - -f value -c id 2>/dev/null || - # Creates new domain - openstack domain create $1 \ - --description "$2" \ + openstack --os-cloud devstack-system-admin domain create $1 \ + --description "$2" --or-show \ -f value -c id ) echo $domain_id @@ -786,7 +889,7 @@ function get_or_create_group { # Gets group id group_id=$( # Creates new group with --or-show - openstack group create $1 \ + openstack --os-cloud devstack-system-admin group create $1 \ --domain $2 --description "$desc" --or-show \ -f value -c id ) @@ -805,7 +908,7 @@ function get_or_create_user { # Gets user id user_id=$( # Creates new user with --or-show - openstack user create \ + openstack --os-cloud devstack-system-admin user create \ $1 \ --password "$2" \ --domain=$3 \ @@ -822,7 +925,7 @@ function get_or_create_project { local project_id project_id=$( # Creates new project with --or-show - openstack project create $1 \ + openstack --os-cloud devstack-system-admin project create $1 \ --domain=$2 \ --or-show -f value -c id ) @@ -835,7 +938,7 @@ function get_or_create_role { local role_id role_id=$( # Creates role with --or-show - openstack role create $1 \ + openstack --os-cloud devstack-system-admin role create $1 \ --or-show -f value -c id ) echo $role_id @@ -861,31 +964,22 @@ function _get_domain_args { # Usage: get_or_add_user_project_role [ ] function get_or_add_user_project_role { local user_role_id + local domain_args domain_args=$(_get_domain_args $4 $5) - # Gets user role id - user_role_id=$(openstack role list \ + # Note this is idempotent so we are safe across multiple + # duplicate calls. + openstack --os-cloud devstack-system-admin role add $1 \ + --user $2 \ + --project $3 \ + $domain_args + user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ + --role $1 \ --user $2 \ - --column "ID" \ --project $3 \ - --column "Name" \ $domain_args \ - | grep " $1 " | get_field 1) - if [[ -z "$user_role_id" ]]; then - # Adds role to user and get it - openstack role add $1 \ - --user $2 \ - --project $3 \ - $domain_args - user_role_id=$(openstack role list \ - --user $2 \ - --column "ID" \ - --project $3 \ - --column "Name" \ - $domain_args \ - | grep " $1 " | get_field 1) - fi + -c Role -f value) echo $user_role_id } @@ -893,56 +987,48 @@ function get_or_add_user_project_role { # Usage: get_or_add_user_domain_role function get_or_add_user_domain_role { local user_role_id - # Gets user role id - user_role_id=$(openstack role list \ + + # Note this is idempotent so we are safe across multiple + # duplicate calls. + openstack --os-cloud devstack-system-admin role add $1 \ + --user $2 \ + --domain $3 + user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ + --role $1 \ --user $2 \ - --column "ID" \ --domain $3 \ - --column "Name" \ - | grep " $1 " | get_field 1) - if [[ -z "$user_role_id" ]]; then - # Adds role to user and get it - openstack role add $1 \ - --user $2 \ - --domain $3 - user_role_id=$(openstack role list \ - --user $2 \ - --column "ID" \ - --domain $3 \ - --column "Name" \ - | grep " $1 " | get_field 1) - fi + -c Role -f value) + echo $user_role_id } -# Gets or adds user role to domain -# Usage: get_or_add_user_domain_role -function get_or_add_user_domain_role { +# Gets or adds user role to system +# Usage: get_or_add_user_system_role [] +function get_or_add_user_system_role { local user_role_id + local domain_args + + domain_args=$(_get_domain_args $4) + # Gets user role id - user_role_id=$(openstack role list \ + user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ + --role $1 \ --user $2 \ - --os-url=$KEYSTONE_SERVICE_URI_V3 \ - --os-identity-api-version=3 \ - --column "ID" \ - --domain $3 \ - --column "Name" \ - | grep " $1 " | get_field 1) + --system $3 \ + $domain_args \ + -f value -c Role) if [[ -z "$user_role_id" ]]; then # Adds role to user and get it - openstack role add $1 \ + openstack --os-cloud devstack-system-admin role add $1 \ --user $2 \ - --domain $3 \ - --os-url=$KEYSTONE_SERVICE_URI_V3 \ - --os-identity-api-version=3 - user_role_id=$(openstack role list \ + --system $3 \ + $domain_args + user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ + --role $1 \ --user $2 \ - --os-url=$KEYSTONE_SERVICE_URI_V3 \ - --os-identity-api-version=3 \ - --column "ID" \ - --domain $3 \ - --column "Name" \ - | grep " $1 " | get_field 1) + --system $3 \ + $domain_args \ + -f value -c Role) fi echo $user_role_id } @@ -951,21 +1037,18 @@ function get_or_add_user_domain_role { # Usage: get_or_add_group_project_role function get_or_add_group_project_role { local group_role_id - # Gets group role id - group_role_id=$(openstack role list \ + + # Note this is idempotent so we are safe across multiple + # duplicate calls. + openstack role add $1 \ + --group $2 \ + --project $3 + group_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ + --role $1 \ --group $2 \ --project $3 \ - -c "ID" -f value) - if [[ -z "$group_role_id" ]]; then - # Adds role to group and get it - openstack role add $1 \ - --group $2 \ - --project $3 - group_role_id=$(openstack role list \ - --group $2 \ - --project $3 \ - -c "ID" -f value) - fi + -f value -c Role) + echo $group_role_id } @@ -976,9 +1059,9 @@ function get_or_create_service { # Gets service id service_id=$( # Gets service id - openstack service show $2 -f value -c id 2>/dev/null || + openstack --os-cloud devstack-system-admin service show $2 -f value -c id 2>/dev/null || # Creates new service if not exists - openstack service create \ + openstack --os-cloud devstack-system-admin service create \ $2 \ --name $1 \ --description="$3" \ @@ -991,14 +1074,14 @@ function get_or_create_service { # Usage: _get_or_create_endpoint_with_interface function _get_or_create_endpoint_with_interface { local endpoint_id - endpoint_id=$(openstack endpoint list \ + endpoint_id=$(openstack --os-cloud devstack-system-admin endpoint list \ --service $1 \ --interface $2 \ --region $4 \ -c ID -f value) if [[ -z "$endpoint_id" ]]; then # Creates new endpoint - endpoint_id=$(openstack endpoint create \ + endpoint_id=$(openstack --os-cloud devstack-system-admin endpoint create \ $1 $2 $3 --region $4 -f value -c id) fi @@ -1006,7 +1089,7 @@ function _get_or_create_endpoint_with_interface { } # Gets or creates endpoint -# Usage: get_or_create_endpoint +# Usage: get_or_create_endpoint [adminurl] [internalurl] function get_or_create_endpoint { # NOTE(jamielennnox): when converting to v3 endpoint creation we go from # creating one endpoint with multiple urls to multiple endpoints each with @@ -1018,9 +1101,13 @@ function get_or_create_endpoint { # endpoints they need. local public_id public_id=$(_get_or_create_endpoint_with_interface $1 public $3 $2) - _get_or_create_endpoint_with_interface $1 admin $4 $2 - _get_or_create_endpoint_with_interface $1 internal $5 $2 - + # only create admin/internal urls if provided content for them + if [[ -n "$4" ]]; then + _get_or_create_endpoint_with_interface $1 admin $4 $2 + fi + if [[ -n "$5" ]]; then + _get_or_create_endpoint_with_interface $1 internal $5 $2 + fi # return the public id to indicate success, and this is the endpoint most likely wanted echo $public_id } @@ -1028,7 +1115,7 @@ function get_or_create_endpoint { # Get a URL from the identity service # Usage: get_endpoint_url function get_endpoint_url { - echo $(openstack endpoint list \ + echo $(openstack --os-cloud devstack-system-admin endpoint list \ --service $1 --interface $2 \ -c URL -f value) } @@ -1042,6 +1129,17 @@ function is_ironic_hardware { return 1 } +function is_ironic_enforce_scope { + is_service_enabled ironic && [[ "$IRONIC_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]] && return 0 + return 1 +} + +function is_ironic_sharded { + # todo(JayF): Support >1 shard with multiple n-cpu instances for each + is_service_enabled ironic && [[ "$IRONIC_SHARDS" == "1" ]] && return 0 + return 1 +} + # Package Functions # ================= @@ -1058,8 +1156,6 @@ function _get_package_dir { pkg_dir=$base_dir/debs elif is_fedora; then pkg_dir=$base_dir/rpms - elif is_suse; then - pkg_dir=$base_dir/rpms-suse else exit_distro_not_supported "list of packages" fi @@ -1158,6 +1254,19 @@ function _parse_package_files { fi fi + # Look for # not:xxx in comment + if [[ $line =~ (.*)#.*not:([^ ]*) ]]; then + # We are using BASH regexp matching feature. + package=${BASH_REMATCH[1]} + distros=${BASH_REMATCH[2]} + # In bash ${VAR,,} will lowercase VAR + # Look for a match in the distro list + if [[ ${distros,,} =~ ${DISTRO,,} ]]; then + # If match then skip this package + inst_pkg=0 + fi + fi + if [[ $inst_pkg = 1 ]]; then echo $package fi @@ -1176,6 +1285,8 @@ function _parse_package_files { # - ``# NOPRIME`` defers installation to be performed later in `stack.sh` # - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection # of the package to the distros listed. The distro names are case insensitive. +# - ``# not:DISTRO`` or ``not:DISTRO1,DISTRO2`` limits the selection +# of the package to the distros not listed. The distro names are case insensitive. function get_packages { local xtrace xtrace=$(set +o | grep xtrace) @@ -1208,10 +1319,16 @@ function get_packages { if [[ ! $file_to_parse =~ $package_dir/glance ]]; then file_to_parse="${file_to_parse} ${package_dir}/glance" fi + if [[ ! $file_to_parse =~ $package_dir/os-brick ]]; then + file_to_parse="${file_to_parse} ${package_dir}/os-brick" + fi elif [[ $service == c-* ]]; then if [[ ! $file_to_parse =~ $package_dir/cinder ]]; then file_to_parse="${file_to_parse} ${package_dir}/cinder" fi + if [[ ! $file_to_parse =~ $package_dir/os-brick ]]; then + file_to_parse="${file_to_parse} ${package_dir}/os-brick" + fi elif [[ $service == s-* ]]; then if [[ ! $file_to_parse =~ $package_dir/swift ]]; then file_to_parse="${file_to_parse} ${package_dir}/swift" @@ -1220,6 +1337,9 @@ function get_packages { if [[ ! $file_to_parse =~ $package_dir/nova ]]; then file_to_parse="${file_to_parse} ${package_dir}/nova" fi + if [[ ! $file_to_parse =~ $package_dir/os-brick ]]; then + file_to_parse="${file_to_parse} ${package_dir}/os-brick" + fi elif [[ $service == g-* ]]; then if [[ ! $file_to_parse =~ $package_dir/glance ]]; then file_to_parse="${file_to_parse} ${package_dir}/glance" @@ -1228,9 +1348,9 @@ function get_packages { if [[ ! $file_to_parse =~ $package_dir/keystone ]]; then file_to_parse="${file_to_parse} ${package_dir}/keystone" fi - elif [[ $service == q-* ]]; then - if [[ ! $file_to_parse =~ $package_dir/neutron ]]; then - file_to_parse="${file_to_parse} ${package_dir}/neutron" + elif [[ $service == q-* || $service == neutron-* ]]; then + if [[ ! $file_to_parse =~ $package_dir/neutron-common ]]; then + file_to_parse="${file_to_parse} ${package_dir}/neutron-common" fi elif [[ $service == ir-* ]]; then if [[ ! $file_to_parse =~ $package_dir/ironic ]]; then @@ -1264,6 +1384,30 @@ function get_plugin_packages { $xtrace } +# Search plugins for a bindep.txt file +# +# Uses globals ``BINDEP_CMD``, ``GITDIR``, ``DEVSTACK_PLUGINS`` +# +# Note this is only valid after BINDEP_CMD is setup in stack.sh, and +# is thus not really intended to be called externally. +function _get_plugin_bindep_packages { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + local bindep_file + local packages + + for plugin in ${DEVSTACK_PLUGINS//,/ }; do + bindep_file=${GITDIR[$plugin]}/devstack/files/bindep.txt + if [[ -f ${bindep_file} ]]; then + packages+=$($BINDEP_CMD -b --file ${bindep_file} || true) + fi + done + echo "${packages}" + $xtrace +} + # Distro-agnostic package installer # Uses globals ``NO_UPDATE_REPOS``, ``REPOS_UPDATED``, ``RETRY_UPDATE`` # install_package package [package ...] @@ -1286,8 +1430,6 @@ function real_install_package { apt_get install "$@" elif is_fedora; then yum_install "$@" - elif is_suse; then - zypper_install "$@" else exit_distro_not_supported "installing packages" fi @@ -1328,16 +1470,17 @@ function uninstall_package { if is_ubuntu; then apt_get purge "$@" elif is_fedora; then - sudo ${YUM:-yum} remove -y "$@" ||: - elif is_suse; then - sudo zypper rm "$@" ||: + sudo dnf remove -y "$@" ||: else exit_distro_not_supported "uninstalling packages" fi } -# Wrapper for ``yum`` to set proxy environment variables -# Uses globals ``OFFLINE``, ``*_proxy``, ``YUM`` +# Wrapper for ``dnf`` to set proxy environment variables +# Uses globals ``OFFLINE``, ``*_proxy`` +# The name is kept for backwards compatability with external +# callers, despite none of our supported platforms using yum +# any more. # yum_install package [package ...] function yum_install { local result parse_yum_result @@ -1345,38 +1488,8 @@ function yum_install { [[ "$OFFLINE" = "True" ]] && return time_start "yum_install" - - # - We run with LC_ALL=C so string matching *should* be OK - # - Exit 1 if the failure might get better with a retry. - # - Exit 2 if it is fatal. - parse_yum_result=' \ - BEGIN { result=0 } \ - /^YUM_FAILED/ { exit $2 } \ - /^No package/ { result=2 } \ - /^Failed:/ { result=2 } \ - //{ print } \ - END { exit result }' - - # The manual check for missing packages is because yum -y assumes - # missing or failed packages are OK. - # See https://bugzilla.redhat.com/show_bug.cgi?id=965567 - (sudo_with_proxies "${YUM:-yum}" install -y "$@" 2>&1 || echo YUM_FAILED $?) \ - | awk "$parse_yum_result" && result=$? || result=$? - + sudo_with_proxies dnf install -y "$@" time_stop "yum_install" - - # if we return 1, then the wrapper functions will run an update - # and try installing the package again as a defense against bad - # mirrors. This can hide failures, especially when we have - # packages that are in the "Failed:" section because their rpm - # install scripts failed to run correctly (in this case, the - # package looks installed, so when the retry happens we just think - # the package is OK, and incorrectly continue on). - if [ "$result" == 2 ]; then - die "Detected fatal package install failure" - fi - - return "$result" } # zypper wrapper to set arguments correctly @@ -1388,78 +1501,160 @@ function zypper_install { [[ "$(id -u)" = "0" ]] && sudo="env" $sudo http_proxy="${http_proxy:-}" https_proxy="${https_proxy:-}" \ no_proxy="${no_proxy:-}" \ - zypper --non-interactive install --auto-agree-with-licenses "$@" + zypper --non-interactive install --auto-agree-with-licenses --no-recommends "$@" } +# Run bindep and install packages it outputs +# +# Usage: +# install_bindep [profile,profile] +# +# Note unlike the bindep command itself, profile(s) specified should +# be a single, comma-separated string, no spaces. +function install_bindep { + local file=$1 + local profiles=${2:-""} + local pkgs + + if [[ ! -f $file ]]; then + warn $LINENO "Can not find bindep file: $file" + return + fi -# Process Functions -# ================= + # converting here makes it much easier to work with passing + # arguments + profiles=${profiles/,/ /} -# _run_process() is designed to be backgrounded by run_process() to simulate a -# fork. It includes the dirty work of closing extra filehandles and preparing log -# files to produce the same logs as screen_it(). The log filename is derived -# from the service name. -# Uses globals ``CURRENT_LOG_TIME``, ``LOGDIR``, ``SCREEN_LOGDIR``, ``SCREEN_NAME``, ``SERVICE_DIR`` -# If an optional group is provided sg will be used to set the group of -# the command. -# _run_process service "command-line" [group] -function _run_process { - # disable tracing through the exec redirects, it's just confusing in the logs. - xtrace=$(set +o | grep xtrace) - set +o xtrace + # Note bindep returns 1 when packages need to be installed, so we + # have to ignore it's return for "-e" + pkgs=$($DEST/bindep-venv/bin/bindep -b --file $file $profiles || true) + + if [[ -n "${pkgs}" ]]; then + install_package ${pkgs} + fi +} +function write_user_unit_file { local service=$1 local command="$2" local group=$3 + local user=$4 + local env_vars="$5" + local extra="" + if [[ -n "$group" ]]; then + extra="Group=$group" + fi + local unitfile="$SYSTEMD_DIR/$service" + mkdir -p $SYSTEMD_DIR - # Undo logging redirections and close the extra descriptors - exec 1>&3 - exec 2>&3 - exec 3>&- - exec 6>&- + iniset -sudo $unitfile "Unit" "Description" "Devstack $service" + iniset -sudo $unitfile "Service" "Environment" "\"PATH=$PATH\"" + iniset -sudo $unitfile "Service" "User" "$user" + iniset -sudo $unitfile "Service" "ExecStart" "$command" + iniset -sudo $unitfile "Service" "KillMode" "process" + iniset -sudo $unitfile "Service" "TimeoutStopSec" "300" + iniset -sudo $unitfile "Service" "ExecReload" "$KILL_PATH -HUP \$MAINPID" + if [[ -n "$env_vars" ]] ; then + iniset -sudo $unitfile "Service" "Environment" "$env_vars" + fi + if [[ -n "$group" ]]; then + iniset -sudo $unitfile "Service" "Group" "$group" + fi + iniset -sudo $unitfile "Install" "WantedBy" "multi-user.target" - local logfile="${service}.log.${CURRENT_LOG_TIME}" - local real_logfile="${LOGDIR}/${logfile}" - if [[ -n ${LOGDIR} ]]; then - exec 1>&"$real_logfile" 2>&1 - bash -c "cd '$LOGDIR' && ln -sf '$logfile' ${service}.log" - if [[ -n ${SCREEN_LOGDIR} ]]; then - # Drop the backward-compat symlink - ln -sf "$real_logfile" ${SCREEN_LOGDIR}/screen-${service}.log - fi + # changes to existing units sometimes need a refresh + $SYSTEMCTL daemon-reload +} - # TODO(dtroyer): Hack to get stdout from the Python interpreter for the logs. - export PYTHONUNBUFFERED=1 +function write_uwsgi_user_unit_file { + local service=$1 + local command="$2" + local group=$3 + local user=$4 + local env_vars="$5" + local unitfile="$SYSTEMD_DIR/$service" + mkdir -p $SYSTEMD_DIR + + iniset -sudo $unitfile "Unit" "Description" "Devstack $service" + iniset -sudo $unitfile "Service" "Environment" "\"PATH=$PATH\"" + iniset -sudo $unitfile "Service" "SyslogIdentifier" "$service" + iniset -sudo $unitfile "Service" "User" "$user" + iniset -sudo $unitfile "Service" "ExecStart" "$command" + iniset -sudo $unitfile "Service" "ExecReload" "$KILL_PATH -HUP \$MAINPID" + iniset -sudo $unitfile "Service" "Type" "notify" + iniset -sudo $unitfile "Service" "KillMode" "process" + iniset -sudo $unitfile "Service" "Restart" "always" + iniset -sudo $unitfile "Service" "NotifyAccess" "all" + iniset -sudo $unitfile "Service" "RestartForceExitStatus" "100" + + if [[ -n "$env_vars" ]] ; then + iniset -sudo $unitfile "Service" "Environment" "$env_vars" + fi + if [[ -n "$group" ]]; then + iniset -sudo $unitfile "Service" "Group" "$group" fi + iniset -sudo $unitfile "Install" "WantedBy" "multi-user.target" - # reenable xtrace before we do *real* work - $xtrace + # changes to existing units sometimes need a refresh + $SYSTEMCTL daemon-reload +} - # Run under ``setsid`` to force the process to become a session and group leader. - # The pid saved can be used with pkill -g to get the entire process group. - if [[ -n "$group" ]]; then - setsid sg $group "$command" & echo $! >$SERVICE_DIR/$SCREEN_NAME/$service.pid - else - setsid $command & echo $! >$SERVICE_DIR/$SCREEN_NAME/$service.pid +function _common_systemd_pitfalls { + local cmd=$1 + # do some sanity checks on $cmd to see things we don't expect to work + + if [[ "$cmd" =~ "sudo" ]]; then + read -r -d '' msg << EOF || true # read returns 1 for EOF, but it is ok here +You are trying to use run_process with sudo, this is not going to work under systemd. + +If you need to run a service as a user other than \$STACK_USER call it with: + + run_process \$name \$cmd \$group \$user +EOF + die $LINENO "$msg" + fi + + if [[ ! "$cmd" =~ ^/ ]]; then + read -r -d '' msg << EOF || true # read returns 1 for EOF, but it is ok here +The cmd="$cmd" does not start with an absolute path. It will fail to +start under systemd. + +Please update your run_process stanza to have an absolute path. +EOF + die $LINENO "$msg" fi - # Just silently exit this process - exit 0 } -# Helper to remove the ``*.failure`` files under ``$SERVICE_DIR/$SCREEN_NAME``. -# This is used for ``service_check`` when all the ``screen_it`` are called finished -# Uses globals ``SCREEN_NAME``, ``SERVICE_DIR`` -# init_service_check -function init_service_check { - SCREEN_NAME=${SCREEN_NAME:-stack} - SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} +# Helper function to build a basic unit file and run it under systemd. +function _run_under_systemd { + local service=$1 + local command="$2" + local cmd=$command + # sanity check the command + _common_systemd_pitfalls "$cmd" - if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then - mkdir -p "$SERVICE_DIR/$SCREEN_NAME" + local systemd_service="devstack@$service.service" + local group=$3 + local user=${4:-$STACK_USER} + if [[ -z "$user" ]]; then + user=$STACK_USER + fi + local env_vars="$5" + if [[ -v SYSTEMD_ENV_VARS[$service] ]]; then + env_vars="${SYSTEMD_ENV_VARS[$service]} $env_vars" + fi + if [[ "$command" =~ "uwsgi" ]] ; then + if [[ "$GLOBAL_VENV" == "True" ]] ; then + cmd="$cmd --venv $DEVSTACK_VENV" + fi + write_uwsgi_user_unit_file $systemd_service "$cmd" "$group" "$user" "$env_vars" + else + write_user_unit_file $systemd_service "$cmd" "$group" "$user" "$env_vars" fi - rm -f "$SERVICE_DIR/$SCREEN_NAME"/*.failure + $SYSTEMCTL enable $systemd_service + $SYSTEMCTL start $systemd_service } # Find out if a process exists by partial name. @@ -1476,144 +1671,29 @@ function is_running { # If the command includes shell metachatacters (;<>*) it must be run using a shell # If an optional group is provided sg will be used to run the # command as that group. -# Uses globals ``USE_SCREEN`` -# run_process service "command-line" [group] +# run_process service "command-line" [group] [user] [env_vars] +# env_vars must be a space separated list of variable assigments, ie: "A=1 B=2" function run_process { local service=$1 local command="$2" local group=$3 - local subservice=$4 + local user=$4 + local env_vars="$5" - local name=${subservice:-$service} + local name=$service time_start "run_process" if is_service_enabled $service; then - if [[ "$USE_SCREEN" = "True" ]]; then - screen_process "$name" "$command" "$group" - else - # Spawn directly without screen - _run_process "$name" "$command" "$group" & - fi + _run_under_systemd "$name" "$command" "$group" "$user" "$env_vars" fi time_stop "run_process" } -# Helper to launch a process in a named screen -# Uses globals ``CURRENT_LOG_TIME``, ```LOGDIR``, ``SCREEN_LOGDIR``, `SCREEN_NAME``, -# ``SERVICE_DIR``, ``SCREEN_IS_LOGGING`` -# screen_process name "command-line" [group] -# Run a command in a shell in a screen window, if an optional group -# is provided, use sg to set the group of the command. -function screen_process { - local name=$1 - local command="$2" - local group=$3 - - SCREEN_NAME=${SCREEN_NAME:-stack} - SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} - - screen -S $SCREEN_NAME -X screen -t $name - - local logfile="${name}.log.${CURRENT_LOG_TIME}" - local real_logfile="${LOGDIR}/${logfile}" - echo "LOGDIR: $LOGDIR" - echo "SCREEN_LOGDIR: $SCREEN_LOGDIR" - echo "log: $real_logfile" - if [[ -n ${LOGDIR} ]]; then - if [[ "$SCREEN_IS_LOGGING" == "True" ]]; then - screen -S $SCREEN_NAME -p $name -X logfile "$real_logfile" - screen -S $SCREEN_NAME -p $name -X log on - fi - # If logging isn't active then avoid a broken symlink - touch "$real_logfile" - bash -c "cd '$LOGDIR' && ln -sf '$logfile' ${name}.log" - if [[ -n ${SCREEN_LOGDIR} ]]; then - # Drop the backward-compat symlink - ln -sf "$real_logfile" ${SCREEN_LOGDIR}/screen-${1}.log - fi - fi - - # sleep to allow bash to be ready to be send the command - we are - # creating a new window in screen and then sends characters, so if - # bash isn't running by the time we send the command, nothing - # happens. This sleep was added originally to handle gate runs - # where we needed this to be at least 3 seconds to pass - # consistently on slow clouds. Now this is configurable so that we - # can determine a reasonable value for the local case which should - # be much smaller. - sleep ${SCREEN_SLEEP:-3} - - NL=`echo -ne '\015'` - # This fun command does the following: - # - the passed server command is backgrounded - # - the pid of the background process is saved in the usual place - # - the server process is brought back to the foreground - # - if the server process exits prematurely the fg command errors - # and a message is written to stdout and the process failure file - # - # The pid saved can be used in stop_process() as a process group - # id to kill off all child processes - if [[ -n "$group" ]]; then - command="sg $group '$command'" - fi - - # Append the process to the screen rc file - screen_rc "$name" "$command" - - screen -S $SCREEN_NAME -p $name -X stuff "$command & echo \$! >$SERVICE_DIR/$SCREEN_NAME/${name}.pid; fg || echo \"$name failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/${name}.failure\"$NL" -} - -# Screen rc file builder -# Uses globals ``SCREEN_NAME``, ``SCREENRC``, ``SCREEN_IS_LOGGING`` -# screen_rc service "command-line" -function screen_rc { - SCREEN_NAME=${SCREEN_NAME:-stack} - SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc - if [[ ! -e $SCREENRC ]]; then - # Name the screen session - echo "sessionname $SCREEN_NAME" > $SCREENRC - # Set a reasonable statusbar - echo "hardstatus alwayslastline '$SCREEN_HARDSTATUS'" >> $SCREENRC - # Some distributions override PROMPT_COMMAND for the screen terminal type - turn that off - echo "setenv PROMPT_COMMAND /bin/true" >> $SCREENRC - echo "screen -t shell bash" >> $SCREENRC - fi - # If this service doesn't already exist in the screenrc file - if ! grep $1 $SCREENRC 2>&1 > /dev/null; then - NL=`echo -ne '\015'` - echo "screen -t $1 bash" >> $SCREENRC - echo "stuff \"$2$NL\"" >> $SCREENRC - - if [[ -n ${LOGDIR} ]] && [[ "$SCREEN_IS_LOGGING" == "True" ]]; then - echo "logfile ${LOGDIR}/${1}.log.${CURRENT_LOG_TIME}" >>$SCREENRC - echo "log on" >>$SCREENRC - fi - fi -} - -# Stop a service in screen -# If a PID is available use it, kill the whole process group via TERM -# If screen is being used kill the screen window; this will catch processes -# that did not leave a PID behind -# Uses globals ``SCREEN_NAME``, ``SERVICE_DIR`` -# screen_stop_service service -function screen_stop_service { - local service=$1 - - SCREEN_NAME=${SCREEN_NAME:-stack} - SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} - - if is_service_enabled $service; then - # Clean up the screen window - screen -S $SCREEN_NAME -p $service -X kill || true - fi -} - # Stop a service process # If a PID is available use it, kill the whole process group via TERM # If screen is being used kill the screen window; this will catch processes # that did not leave a PID behind -# Uses globals ``SERVICE_DIR``, ``USE_SCREEN`` +# Uses globals ``SERVICE_DIR`` # stop_process service function stop_process { local service=$1 @@ -1621,147 +1701,28 @@ function stop_process { SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} if is_service_enabled $service; then - # Kill via pid if we have one available - if [[ -r $SERVICE_DIR/$SCREEN_NAME/$service.pid ]]; then - pkill -g $(cat $SERVICE_DIR/$SCREEN_NAME/$service.pid) - # oslo.service tends to stop actually shutting down - # reliably in between releases because someone believes it - # is dying too early due to some inflight work they - # have. This is a tension. It happens often enough we're - # going to just account for it in devstack and assume it - # doesn't work. - # - # Set OSLO_SERVICE_WORKS=True to skip this block - if [[ -z "$OSLO_SERVICE_WORKS" ]]; then - # TODO(danms): Remove this double-kill when we have - # this fixed in all services: - # https://bugs.launchpad.net/oslo-incubator/+bug/1446583 - sleep 1 - # /bin/true because pkill on a non existent process returns an error - pkill -g $(cat $SERVICE_DIR/$SCREEN_NAME/$service.pid) || /bin/true - fi - rm $SERVICE_DIR/$SCREEN_NAME/$service.pid - fi - if [[ "$USE_SCREEN" = "True" ]]; then - # Clean up the screen window - screen_stop_service $service + # Only do this for units which appear enabled, this also + # catches units that don't really exist for cases like + # keystone without a failure. + if $SYSTEMCTL is-enabled devstack@$service.service; then + $SYSTEMCTL stop devstack@$service.service + $SYSTEMCTL disable devstack@$service.service fi fi } -# Helper to get the status of each running service -# Uses globals ``SCREEN_NAME``, ``SERVICE_DIR`` -# service_check +# use systemctl to check service status function service_check { local service - local failures - SCREEN_NAME=${SCREEN_NAME:-stack} - SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} - - - if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then - echo "No service status directory found" - return - fi - - # Check if there is any failure flag file under $SERVICE_DIR/$SCREEN_NAME - # make this -o errexit safe - failures=`ls "$SERVICE_DIR/$SCREEN_NAME"/*.failure 2>/dev/null || /bin/true` - - for service in $failures; do - service=`basename $service` - service=${service%.failure} - echo "Error: Service $service is not running" - done - - if [ -n "$failures" ]; then - die $LINENO "More details about the above errors can be found with screen" - fi -} - -# Tail a log file in a screen if USE_SCREEN is true. -# Uses globals ``USE_SCREEN`` -function tail_log { - local name=$1 - local logfile=$2 - - if [[ "$USE_SCREEN" = "True" ]]; then - screen_process "$name" "sudo tail -f $logfile" - fi -} - - -# Deprecated Functions -# -------------------- - -# _old_run_process() is designed to be backgrounded by old_run_process() to simulate a -# fork. It includes the dirty work of closing extra filehandles and preparing log -# files to produce the same logs as screen_it(). The log filename is derived -# from the service name and global-and-now-misnamed ``SCREEN_LOGDIR`` -# Uses globals ``CURRENT_LOG_TIME``, ``SCREEN_LOGDIR``, ``SCREEN_NAME``, ``SERVICE_DIR`` -# _old_run_process service "command-line" -function _old_run_process { - local service=$1 - local command="$2" - - # Undo logging redirections and close the extra descriptors - exec 1>&3 - exec 2>&3 - exec 3>&- - exec 6>&- - - if [[ -n ${SCREEN_LOGDIR} ]]; then - exec 1>&${SCREEN_LOGDIR}/screen-${1}.log.${CURRENT_LOG_TIME} 2>&1 - ln -sf ${SCREEN_LOGDIR}/screen-${1}.log.${CURRENT_LOG_TIME} ${SCREEN_LOGDIR}/screen-${1}.log - - # TODO(dtroyer): Hack to get stdout from the Python interpreter for the logs. - export PYTHONUNBUFFERED=1 - fi - - exec /bin/bash -c "$command" - die "$service exec failure: $command" -} - -# old_run_process() launches a child process that closes all file descriptors and -# then exec's the passed in command. This is meant to duplicate the semantics -# of screen_it() without screen. PIDs are written to -# ``$SERVICE_DIR/$SCREEN_NAME/$service.pid`` by the spawned child process. -# old_run_process service "command-line" -function old_run_process { - local service=$1 - local command="$2" - - # Spawn the child process - _old_run_process "$service" "$command" & - echo $! -} - -# Compatibility for existing start_XXXX() functions -# Uses global ``USE_SCREEN`` -# screen_it service "command-line" -function screen_it { - if is_service_enabled $1; then - # Append the service to the screen rc file - screen_rc "$1" "$2" - - if [[ "$USE_SCREEN" = "True" ]]; then - screen_process "$1" "$2" - else - # Spawn directly without screen - old_run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$1.pid + for service in ${ENABLED_SERVICES//,/ }; do + # because some things got renamed like key => keystone + if $SYSTEMCTL is-enabled devstack@$service.service; then + # no-pager is needed because otherwise status dumps to a + # pager when in interactive mode, which will stop a manual + # devstack run. + $SYSTEMCTL status devstack@$service.service --no-pager fi - fi -} - -# Compatibility for existing stop_XXXX() functions -# Stop a service in screen -# If a PID is available use it, kill the whole process group via TERM -# If screen is being used kill the screen window; this will catch processes -# that did not leave a PID behind -# screen_stop service -function screen_stop { - # Clean up the screen window - stop_process $1 + done } @@ -1779,12 +1740,28 @@ function enable_plugin { local name=$1 local url=$2 local branch=${3:-master} + if is_plugin_enabled $name; then + die $LINENO "Plugin attempted to be enabled twice: ${name} ${url} ${branch}" + fi DEVSTACK_PLUGINS+=",$name" GITREPO[$name]=$url GITDIR[$name]=$DEST/$name GITBRANCH[$name]=$branch } +# is_plugin_enabled +# +# Check if the plugin was enabled, e.g. using enable_plugin +# +# ``name`` The name with which the plugin was enabled +function is_plugin_enabled { + local name=$1 + if [[ ",${DEVSTACK_PLUGINS}," =~ ",${name}," ]]; then + return 0 + fi + return 1 +} + # fetch_plugins # # clones all plugins @@ -1880,7 +1857,7 @@ function run_phase { # white listed elements in tree. We want these to move out # over time as well, but they are in tree, so we need to # manage that. - local exceptions="60-ceph.sh 80-tempest.sh" + local exceptions="80-tempest.sh" local extra extra=$(basename $extra_plugin_file_name) if [[ ! ( $exceptions =~ "$extra" ) ]]; then @@ -1902,6 +1879,35 @@ function run_phase { fi } +# define_plugin +# +# This function is a no-op. It allows a plugin to define its name So +# that other plugins may reference it by name. It should generally be +# the last component of the canonical git repo name. E.g., +# openstack/devstack-foo should use "devstack-foo" as the name here. +# +# This function is currently a noop, but the value may still be used +# by external tools (as in plugin_requires) and may be used by +# devstack in the future. +# +# ``name`` is an arbitrary name - (aka: glusterfs, nova-docker, zaqar) +function define_plugin { + : +} + +# plugin_requires +# +# This function is a no-op. It is currently used by external tools +# (such as the devstack module for Ansible) to automatically generate +# local.conf files. It is not currently used by devstack itself to +# resolve dependencies. +# +# ``name`` is an arbitrary name - (aka: glusterfs, nova-docker, zaqar) +# ``other`` is the name of another plugin +function plugin_requires { + : +} + # Service Functions # ================= @@ -2038,10 +2044,6 @@ function enable_service { # For backward compatibility if we have **swift** in ENABLED_SERVICES all the # **s-** services will be enabled. This will be deprecated in the future. # -# Cells within nova is enabled if **n-cell** is in ``ENABLED_SERVICES``. -# We also need to make sure to treat **n-cell-region** and **n-cell-child** -# as enabled in this case. -# # Uses global ``ENABLED_SERVICES`` # is_service_enabled service [service ...] function is_service_enabled { @@ -2064,7 +2066,6 @@ function is_service_enabled { # TODO(dtroyer): Remove these legacy special-cases after the is_XXX_enabled() # are implemented - [[ ${service} == n-cell-* && ,${ENABLED_SERVICES} =~ ,"n-cell" ]] && enabled=0 [[ ${service} == n-cpu-* && ,${ENABLED_SERVICES} =~ ,"n-cpu" ]] && enabled=0 [[ ${service} == "nova" && ,${ENABLED_SERVICES} =~ ,"n-" ]] && enabled=0 [[ ${service} == "glance" && ,${ENABLED_SERVICES} =~ ,"g-" ]] && enabled=0 @@ -2165,11 +2166,7 @@ function _safe_permission_operation { return 0 fi - if [[ $TRACK_DEPENDS = True ]]; then - sudo_cmd="env" - else - sudo_cmd="sudo" - fi + sudo_cmd="sudo" $xtrace $sudo_cmd $@ @@ -2207,6 +2204,23 @@ function cidr2netmask { echo ${1-0}.${2-0}.${3-0}.${4-0} } +# Check if this is a valid ipv4 address string +function is_ipv4_address { + local address=$1 + local regex='([0-9]{1,3}\.){3}[0-9]{1,3}' + # TODO(clarkb) make this more robust + if [[ "$address" =~ $regex ]] ; then + return 0 + else + return 1 + fi +} + +# Remove "[]" around urlquoted IPv6 addresses +function ipv6_unquote { + echo $1 | tr -d [] +} + # Gracefully cp only if source file/dir exists # cp_it source destination function cp_it { @@ -2254,13 +2268,39 @@ function maskip { echo $subnet } +function is_provider_network { + if [ "$Q_USE_PROVIDER_NETWORKING" == "True" ]; then + return 0 + fi + return 1 +} + + +# Return just the . for the given python interpreter +function _get_python_version { + local interp=$1 + local version + # disable erroring out here, otherwise if python 3 doesn't exist we fail hard. + if [[ -x $(which $interp 2> /dev/null) ]]; then + version=$($interp -c 'import sys; print("%s.%s" % sys.version_info[0:2])') + fi + echo ${version} +} + # Return the current python as "python." function python_version { local python_version - python_version=$(python -c 'import sys; print("%s.%s" % sys.version_info[0:2])') + python_version=$(_get_python_version python2) echo "python${python_version}" } +function python3_version { + local python3_version + python3_version=$(_get_python_version python3) + echo "python${python3_version}" +} + + # Service wrapper to restart services # restart_service service-name function restart_service { @@ -2304,6 +2344,17 @@ function stop_service { fi } +# Service wrapper to reload services +# If the service was not in running state it will start it +# reload_service service-name +function reload_service { + if [ -x /bin/systemctl ]; then + sudo /bin/systemctl reload-or-restart $1 + else + sudo service $1 reload + fi +} + # Test with a finite retry loop. # function test_with_retry { @@ -2346,9 +2397,9 @@ function sudo_with_proxies { # Resolution is only in whole seconds, so should be used for long # running activities. -declare -A _TIME_TOTAL -declare -A _TIME_START -declare -r _TIME_BEGIN=$(date +%s) +declare -A -g _TIME_TOTAL +declare -A -g _TIME_START +declare -r -g _TIME_BEGIN=$(date +%s) # time_start $name # @@ -2360,7 +2411,7 @@ function time_start { if [[ -n "$start_time" ]]; then die $LINENO "Trying to start the clock on $name, but it's already been started" fi - _TIME_START[$name]=$(date +%s) + _TIME_START[$name]=$(date +%s%3N) } # time_stop $name @@ -2381,7 +2432,7 @@ function time_stop { if [[ -z "$start_time" ]]; then die $LINENO "Trying to stop the clock on $name, but it was never started" fi - end_time=$(date +%s) + end_time=$(date +%s%3N) elapsed_time=$(($end_time - $start_time)) total=${_TIME_TOTAL[$name]:-0} # reset the clock so we can start it in the future @@ -2389,16 +2440,66 @@ function time_stop { _TIME_TOTAL[$name]=$(($total + $elapsed_time)) } +function install_openstack_cli_server { + export PATH=$TOP_DIR/files/openstack-cli-server:$PATH + run_process openstack-cli-server "$PYTHON $TOP_DIR/files/openstack-cli-server/openstack-cli-server" +} + +function oscwrap { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + local out + local rc + local start + local end + # Cannot use timer_start and timer_stop as we run in subshells + # and those rely on modifying vars in the same process (which cannot + # happen from a subshell. + start=$(date +%s%3N) + out=$(command openstack "$@") + rc=$? + end=$(date +%s%3N) + echo $((end - start)) >> $OSCWRAP_TIMER_FILE + + echo "$out" + $xtrace + return $rc +} + +function install_oscwrap { + # File to accumulate our timing data + OSCWRAP_TIMER_FILE=$(mktemp) + # Bash by default doesn't expand aliases, allow it for the aliases + # we want to whitelist. + shopt -s expand_aliases + # Remove all aliases that might be expanded to preserve old unexpanded + # behavior + unalias -a + # Add only the alias we want for openstack + alias openstack=oscwrap +} + +function cleanup_oscwrap { + local total=0 + total=$(cat $OSCWRAP_TIMER_FILE | $PYTHON -c "import sys; print(sum(int(l) for l in sys.stdin))") + _TIME_TOTAL["osc"]=$total + rm $OSCWRAP_TIMER_FILE +} + # time_totals # Print out total time summary function time_totals { local elapsed_time local end_time - local len=15 + local len=20 local xtrace + local unaccounted_time end_time=$(date +%s) elapsed_time=$(($end_time - $_TIME_BEGIN)) + unaccounted_time=$elapsed_time # pad 1st column this far for t in ${!_TIME_TOTAL[*]}; do @@ -2407,24 +2508,43 @@ function time_totals { fi done + cleanup_oscwrap + xtrace=$(set +o | grep xtrace) set +o xtrace echo echo "=========================" echo "DevStack Component Timing" + echo " (times are in seconds) " echo "=========================" - printf "%-${len}s %3d\n" "Total runtime" "$elapsed_time" - echo for t in ${!_TIME_TOTAL[*]}; do local v=${_TIME_TOTAL[$t]} + # because we're recording in milliseconds + v=$(($v / 1000)) printf "%-${len}s %3d\n" "$t" "$v" + unaccounted_time=$(($unaccounted_time - $v)) done + echo "-------------------------" + printf "%-${len}s %3d\n" "Unaccounted time" "$unaccounted_time" echo "=========================" + printf "%-${len}s %3d\n" "Total runtime" "$elapsed_time" $xtrace } +function clean_pyc_files { + # Clean up all *.pyc files + if [[ -n "$DEST" ]] && [[ -d "$DEST" ]]; then + sudo find $DEST -name "*.pyc" -delete + fi +} + +function is_fips_enabled { + fips=`cat /proc/sys/crypto/fips_enabled` + [ "$fips" == "1" ] +} + # Restore xtrace $_XTRACE_FUNCTIONS_COMMON diff --git a/inc/async b/inc/async new file mode 100644 index 0000000000..56338f5343 --- /dev/null +++ b/inc/async @@ -0,0 +1,256 @@ +#!/bin/bash +# +# Symbolic asynchronous tasks for devstack +# +# Usage: +# +# async_runfunc my_shell_func foo bar baz +# +# ... do other stuff ... +# +# async_wait my_shell_func +# + +DEVSTACK_PARALLEL=$(trueorfalse True DEVSTACK_PARALLEL) +_ASYNC_BG_TIME=0 + +# Keep track of how much total time was spent in background tasks +# Takes a job runtime in ms. +function _async_incr_bg_time { + local elapsed_ms="$1" + _ASYNC_BG_TIME=$(($_ASYNC_BG_TIME + $elapsed_ms)) +} + +# Get the PID of a named future to wait on +function async_pidof { + local name="$1" + local inifile="${DEST}/async/${name}.ini" + + if [ -f "$inifile" ]; then + iniget $inifile job pid + else + echo 'UNKNOWN' + return 1 + fi +} + +# Log a message about a job. If the message contains "%command" then the +# full command line of the job will be substituted in the output +function async_log { + local name="$1" + shift + local message="$*" + local inifile=${DEST}/async/${name}.ini + local pid + local command + + pid=$(iniget $inifile job pid) + command=$(iniget $inifile job command | tr '#' '-') + message=$(echo "$message" | sed "s#%command#$command#g") + + echo "[$BASHPID Async ${name}:${pid}]: $message" +} + +# Inner function that actually runs the requested task. We wrap it like this +# just so we can emit a finish message as soon as the work is done, to make +# it easier to find the tracking just before an error. +function async_inner { + local name="$1" + local rc + local fifo="${DEST}/async/${name}.fifo" + shift + set -o xtrace + if $* >${DEST}/async/${name}.log 2>&1; then + rc=0 + set +o xtrace + async_log "$name" "finished successfully" + else + rc=$? + set +o xtrace + async_log "$name" "FAILED with rc $rc" + fi + iniset ${DEST}/async/${name}.ini job end_time $(date "+%s%3N") + # Block on the fifo until we are signaled to exit by the main process + cat $fifo + return $rc +} + +# Run something async. Takes a symbolic name and a list of arguments of +# what to run. Ideally this would be rarely used and async_runfunc() would +# be used everywhere for readability. +# +# This spawns the work in a background worker, records a "future" to be +# collected by a later call to async_wait() +function async_run { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + local name="$1" + shift + local inifile=${DEST}/async/${name}.ini + local fifo=${DEST}/async/${name}.fifo + + touch $inifile + iniset $inifile job command "$*" + iniset $inifile job start_time $(date +%s%3N) + + if [[ "$DEVSTACK_PARALLEL" = "True" ]]; then + mkfifo $fifo + async_inner $name $* & + iniset $inifile job pid $! + async_log "$name" "running: %command" + $xtrace + else + iniset $inifile job pid "self" + async_log "$name" "Running synchronously: %command" + $xtrace + $* + return $? + fi +} + +# Shortcut for running a shell function async. Uses the function name as the +# async name. +function async_runfunc { + async_run $1 $* +} + +# Dump some information to help debug a failed wait +function async_wait_dump { + local failpid=$1 + + echo "=== Wait failure dump from $BASHPID ===" + echo "Processes:" + ps -f + echo "Waiting jobs:" + for name in $(ls ${DEST}/async/*.ini); do + echo "Job $name :" + cat "$name" + done + echo "Failed PID status:" + sudo cat /proc/$failpid/status + sudo cat /proc/$failpid/cmdline + echo "=== End wait failure dump ===" +} + +# Wait for an async future to complete. May return immediately if already +# complete, or of the future has already been waited on (avoid this). May +# block until the future completes. +function async_wait { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + local pid rc running inifile runtime fifo + rc=0 + for name in $*; do + running=$(ls ${DEST}/async/*.ini 2>/dev/null | wc -l) + inifile="${DEST}/async/${name}.ini" + fifo="${DEST}/async/${name}.fifo" + + if pid=$(async_pidof "$name"); then + async_log "$name" "Waiting for completion of %command" \ + "running on PID $pid ($running other jobs running)" + time_start async_wait + if [[ "$pid" != "self" ]]; then + # Signal the child to go ahead and exit since we are about to + # wait for it to collect its status. + async_log "$name" "Signaling child to exit" + echo WAKEUP > $fifo + async_log "$name" "Signaled" + # Do not actually call wait if we ran synchronously + if wait $pid; then + rc=0 + else + rc=$? + fi + cat ${DEST}/async/${name}.log + rm -f $fifo + fi + time_stop async_wait + local start_time + local end_time + start_time=$(iniget $inifile job start_time) + end_time=$(iniget $inifile job end_time) + _async_incr_bg_time $(($end_time - $start_time)) + runtime=$((($end_time - $start_time) / 1000)) + async_log "$name" "finished %command with result" \ + "$rc in $runtime seconds" + rm -f $inifile + if [ $rc -ne 0 ]; then + async_wait_dump $pid + echo Stopping async wait due to error: $* + break + fi + else + # This could probably be removed - it is really just here + # to help notice if you wait for something by the wrong + # name, but it also shows up for things we didn't start + # because they were not enabled. + echo Not waiting for async task $name that we never started or \ + has already been waited for + fi + done + + $xtrace + return $rc +} + +# Check for uncollected futures and wait on them +function async_cleanup { + local name + + if [[ "$DEVSTACK_PARALLEL" != "True" ]]; then + return 0 + fi + + for inifile in $(find ${DEST}/async -name '*.ini'); do + name=$(basename $pidfile .ini) + echo "WARNING: uncollected async future $name" + async_wait $name || true + done +} + +# Make sure our async dir is created and clean +function async_init { + local async_dir=${DEST}/async + + # Clean any residue if present from previous runs + rm -Rf $async_dir + + # Make sure we have a state directory + mkdir -p $async_dir +} + +function async_print_timing { + local bg_time_minus_wait + local elapsed_time + local serial_time + local speedup + + if [[ "$DEVSTACK_PARALLEL" != "True" ]]; then + return 0 + fi + + # The logic here is: All the background task time would be + # serialized if we did not do them in the background. So we can + # add that to the elapsed time for the whole run. However, time we + # spend waiting for async things to finish adds to the elapsed + # time, but is time where we're not doing anything useful. Thus, + # we substract that from the would-be-serialized time. + + bg_time_minus_wait=$((\ + ($_ASYNC_BG_TIME - ${_TIME_TOTAL[async_wait]}) / 1000)) + elapsed_time=$(($(date "+%s") - $_TIME_BEGIN)) + serial_time=$(($elapsed_time + $bg_time_minus_wait)) + + echo + echo "=================" + echo " Async summary" + echo "=================" + echo " Time spent in the background minus waits: $bg_time_minus_wait sec" + echo " Elapsed time: $elapsed_time sec" + echo " Time if we did everything serially: $serial_time sec" + echo " Speedup: " $(echo | awk "{print $serial_time / $elapsed_time}") +} diff --git a/inc/ini-config b/inc/ini-config index 1f12343ae0..920d4775fa 100644 --- a/inc/ini-config +++ b/inc/ini-config @@ -88,17 +88,22 @@ function iniget_multiline { } # Determinate is the given option present in the INI file -# ini_has_option config-file section option +# ini_has_option [-sudo] config-file section option function ini_has_option { local xtrace xtrace=$(set +o | grep xtrace) set +o xtrace + local sudo="" + if [ $1 == "-sudo" ]; then + sudo="sudo " + shift + fi local file=$1 local section=$2 local option=$3 local line - line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") + line=$($sudo sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") $xtrace [ -n "$line" ] } @@ -173,8 +178,10 @@ function iniset { xtrace=$(set +o | grep xtrace) set +o xtrace local sudo="" + local sudo_option="" if [ $1 == "-sudo" ]; then sudo="sudo " + sudo_option="-sudo " shift fi local file=$1 @@ -182,16 +189,21 @@ function iniset { local option=$3 local value=$4 + # Escape the ampersand (&) and backslash (\) characters for sed + # Order of substitution matters: we escape backslashes first before + # adding more backslashes to escape ampersands + value=$(echo $value | sed -e 's/\\/\\\\/g' -e 's/&/\\&/g') + if [[ -z $section || -z $option ]]; then $xtrace return fi - if ! grep -q "^\[$section\]" "$file" 2>/dev/null; then + if ! $sudo grep -q "^\[$section\]" "$file" 2>/dev/null; then # Add section at the end echo -e "\n[$section]" | $sudo tee --append "$file" > /dev/null fi - if ! ini_has_option "$file" "$section" "$option"; then + if ! ini_has_option $sudo_option "$file" "$section" "$option"; then # Add it $sudo sed -i -e "/^\[$section\]/ a\\ $option = $value @@ -200,7 +212,7 @@ $option = $value local sep sep=$(echo -ne "\x01") # Replace it - $sudo sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file" + $sudo sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('"${option}"'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file" fi $xtrace } @@ -228,7 +240,7 @@ function iniset_multiline { # the reverse order. Do a reverse here to keep the original order. values="$v ${values}" done - if ! grep -q "^\[$section\]" "$file"; then + if ! $sudo grep -q "^\[$section\]" "$file"; then # Add section at the end echo -e "\n[$section]" | $sudo tee --append "$file" > /dev/null else @@ -274,6 +286,170 @@ function iniget_sections { $xtrace } +# Set a localrc var +function localrc_set { + local file=$1 + local group="local" + local conf="localrc" + local section="" + local option=$2 + local value=$3 + localconf_set "$file" "$group" "$conf" "$section" "$option" "$value" +} + +# Check if local.conf has section. +function localconf_has_section { + local file=$1 + local group=$2 + local conf=$3 + local section=$4 + local sep + sep=$(echo -ne "\x01") + local line + line=$(sed -ne "\\${sep}^\[\[${group}|${conf}\]\]${sep},\\${sep}\[\[.*\]\]${sep}{ + /\[${section}\]/p + }" "$file") + [ -n "$line" ] +} + +# Check if local.conf has option. +function localconf_has_option { + local file=$1 + local group=$2 + local conf=$3 + local section=$4 + local option=$5 + local sep + sep=$(echo -ne "\x01") + local line + if [[ -z "$section" ]]; then + line=$(sed -ne "\\${sep}^\[\[${group}|${conf}\]\]${sep},\\${sep}\[\[.*\]\]${sep}{ + /${option}[ \t]*=.*$/p + }" "$file") + else + line=$(sed -ne "\\${sep}^\[\[${group}|${conf}\]\]${sep},\\${sep}\[\[.*\]\]${sep}{ + /\[${section}\]/,/\[\[.*\]\]\|\[.*\]/{ + /${option}[ \t]*=.*$/p} + }" "$file") + fi + [ -n "$line" ] +} + +# Update option in local.conf. +function localconf_update_option { + local sudo=$1 + local file=$2 + local group=$3 + local conf=$4 + local section=$5 + local option=$6 + local value=$7 + local sep + sep=$(echo -ne "\x01") + if [[ -z "$section" ]]; then + $sudo sed -i -e "\\${sep}^\[\[${group}|${conf}\]\]${sep},\\${sep}\[\[.*\]\]${sep}{ + s${sep}^\(${option}[ \t]*=[ \t]*\).*\$${sep}\1${value}${sep} + }" "$file" + else + $sudo sed -i -e "\\${sep}^\[\[${group}|${conf}\]\]${sep},\\${sep}\[\[.*\]\]${sep}{ + /\[${section}\]/,/\[\[.*\]\]\|\[.*\]/s${sep}^\(${option}[ \t]*=[ \t]*\).*\$${sep}\1${value}${sep} + }" "$file" + fi +} + +# Add option in local.conf. +function localconf_add_option { + local sudo=$1 + local file=$2 + local group=$3 + local conf=$4 + local section=$5 + local option=$6 + local value=$7 + local sep + sep=$(echo -ne "\x01") + if [[ -z "$section" ]]; then + $sudo sed -i -e "\\${sep}^\[\[${group}|${conf}\]\]${sep} a $option=$value" "$file" + else + $sudo sed -i -e "\\${sep}^\[\[${group}|${conf}\]\]${sep},\\${sep}\[\[.*\]\]${sep}{ + /\[${section}\]/ a $option=$value + }" "$file" + fi +} + +# Add section and option in local.conf. +function localconf_add_section_and_option { + local sudo=$1 + local file=$2 + local group=$3 + local conf=$4 + local section=$5 + local option=$6 + local value=$7 + local sep + sep=$(echo -ne "\x01") + $sudo sed -i -e "\\${sep}^\[\[${group}|${conf}\]\]${sep} { + a [$section] + a $option=$value + }" "$file" +} + +# Set an option in a local.conf file. +# localconf_set [-sudo] config-file group conf-name section option value +# - if the file does not exist, it is created +function localconf_set { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local sep + sep=$(echo -ne "\x01") + local sudo="" + if [ $1 == "-sudo" ]; then + sudo="sudo " + shift + fi + local file=$1 + local group=$2 + local conf=$3 + local section=$4 + local option=$5 + local value=$6 + + if [[ -z $group || -z $conf || -z $option || -z $value ]]; then + $xtrace + return + fi + + if ! grep -q "^\[\[${group}|${conf}\]\]" "$file" 2>/dev/null; then + # Add meta section at the end if it does not exist + echo -e "\n[[${group}|${conf}]]" | $sudo tee --append "$file" > /dev/null + # Add section at the end + if [[ -n "$section" ]]; then + echo -e "[$section]" | $sudo tee --append "$file" > /dev/null + fi + # Add option at the end + echo -e "$option=$value" | $sudo tee --append "$file" > /dev/null + elif [[ -z "$section" ]]; then + if ! localconf_has_option "$file" "$group" "$conf" "$section" "$option"; then + # Add option + localconf_add_option "$sudo" "$file" "$group" "$conf" "$section" "$option" "$value" + else + # Replace it + localconf_update_option "$sudo" "$file" "$group" "$conf" "$section" "$option" "$value" + fi + elif ! localconf_has_section "$file" "$group" "$conf" "$section"; then + # Add section and option in specified meta section + localconf_add_section_and_option "$sudo" "$file" "$group" "$conf" "$section" "$option" "$value" + elif ! localconf_has_option "$file" "$group" "$conf" "$section" "$option"; then + # Add option + localconf_add_option "$sudo" "$file" "$group" "$conf" "$section" "$option" "$value" + else + # Replace it + localconf_update_option "$sudo" "$file" "$group" "$conf" "$section" "$option" "$value" + fi + $xtrace +} + # Restore xtrace $INC_CONF_TRACE diff --git a/inc/meta-config b/inc/meta-config index 6eb7a00f69..1215bb8307 100644 --- a/inc/meta-config +++ b/inc/meta-config @@ -40,12 +40,10 @@ function get_meta_section { $CONFIG_AWK_CMD -v matchgroup=$matchgroup -v configfile=$configfile ' BEGIN { group = "" } /^\[\[.+\|.*\]\]/ { - if (group == "") { - gsub("[][]", "", $1); - split($1, a, "|"); - if (a[1] == matchgroup && a[2] == configfile) { - group=a[1] - } + gsub("[][]", "", $1); + split($1, a, "|"); + if (a[1] == matchgroup && a[2] == configfile) { + group=a[1] } else { group="" } @@ -183,14 +181,19 @@ function merge_config_group { realconfigfile=$(eval "echo $configfile") if [[ -z $realconfigfile ]]; then - die $LINENO "bogus config file specification: $configfile is undefined" + warn $LINENO "unknown config file specification: $configfile is undefined" + break fi dir=$(dirname $realconfigfile) - if [[ -d $dir ]]; then - merge_config_file $localfile $group $configfile - else - die $LINENO "bogus config file specification $configfile ($configfile=$realconfigfile, $dir is not a directory)" + + test -e $dir && ! test -d $dir && die $LINENO "bogus config file specification $configfile ($configfile=$realconfigfile, $dir exists but it is not a directory)" + + if ! [[ -e $dir ]] ; then + sudo mkdir -p $dir || die $LINENO "could not create the directory of $real_configfile ($configfile)" + sudo chown ${STACK_USER} $dir fi + + merge_config_file $localfile $group $configfile done done } diff --git a/inc/python b/inc/python index e013dfab36..3969c1fa82 100644 --- a/inc/python +++ b/inc/python @@ -7,7 +7,6 @@ # External functions used: # - GetOSVersion # - is_fedora -# - is_suse # - safe_chown # Save trace setting @@ -19,16 +18,45 @@ set +o xtrace # PROJECT_VENV contains the name of the virtual environment for each # project. A null value installs to the system Python directories. -declare -A PROJECT_VENV +declare -A -g PROJECT_VENV +# Utility Functions +# ================= + +# Joins bash array of extras with commas as expected by other functions +function join_extras { + local IFS="," + echo "$*" +} # Python Functions # ================ +# Setup the global devstack virtualenvs and the associated environment +# updates. +function setup_devstack_virtualenv { + # We run devstack out of a global virtualenv. + if [[ ! -d $DEVSTACK_VENV ]] ; then + # Using system site packages to enable nova to use libguestfs. + # This package is currently installed via the distro and not + # available on pypi. + $PYTHON -m venv --system-site-packages "${DEVSTACK_VENV}" + pip_install -U pip setuptools[core] + fi + if [[ ":$PATH:" != *":$DEVSTACK_VENV/bin:"* ]] ; then + export PATH="$DEVSTACK_VENV/bin:$PATH" + export PYTHON="$DEVSTACK_VENV/bin/python3" + fi +} + # Get the path to the pip command. # get_pip_command function get_pip_command { local version="$1" + if [ -z "$version" ]; then + die $LINENO "pip python version is not set." + fi + # NOTE(dhellmann): I don't know if we actually get a pip3.4-python # under any circumstances. which pip${version} || which pip${version}-python @@ -49,8 +77,8 @@ function get_python_exec_prefix { fi $xtrace - if is_fedora || is_suse; then - echo "/usr/bin" + if [[ "$GLOBAL_VENV" == "True" ]] ; then + echo "$DEVSTACK_VENV/bin" else echo "/usr/local/bin" fi @@ -69,17 +97,51 @@ function pip_install_gr { pip_install $clean_name } -# Determine the python versions supported by a package -function get_python_versions_for_package { +# Wrapper for ``pip install`` that only installs versions of libraries +# from the global-requirements specification with extras. +# +# Uses globals ``REQUIREMENTS_DIR`` +# +# pip_install_gr_extras packagename extra1,extra2,... +function pip_install_gr_extras { local name=$1 - cd $name && python setup.py --classifiers \ - | grep 'Language' | cut -f5 -d: | grep '\.' | tr '\n' ' ' + local extras=$2 + local version_constraints + version_constraints=$(get_version_constraints_from_global_requirements $name) + pip_install $name[$extras]$version_constraints +} + +# enable_python3_package() -- no-op for backwards compatibility +# +# enable_python3_package dir [dir ...] +function enable_python3_package { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + echo "It is no longer necessary to call enable_python3_package()." + + $xtrace +} + +# disable_python3_package() -- no-op for backwards compatibility +# +# disable_python3_package dir [dir ...] +function disable_python3_package { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + echo "It is no longer possible to call disable_python3_package()." + + $xtrace } # Wrapper for ``pip install`` to set cache and proxy environment variables # Uses globals ``OFFLINE``, ``PIP_VIRTUAL_ENV``, -# ``PIP_UPGRADE``, ``TRACK_DEPENDS``, ``*_proxy``, -# pip_install package [package ...] +# ``PIP_UPGRADE``, ``*_proxy``, +# Usage: +# pip_install pip_arguments function pip_install { local xtrace result xtrace=$(set +o | grep xtrace) @@ -101,53 +163,49 @@ function pip_install { if [[ -z "$os_PACKAGE" ]]; then GetOSVersion fi - if [[ $TRACK_DEPENDS = True && ! "$@" =~ virtualenv ]]; then - # TRACK_DEPENDS=True installation creates a circular dependency when - # we attempt to install virtualenv into a virtualenv, so we must global - # that installation. - source $DEST/.venv/bin/activate - local cmd_pip=$DEST/.venv/bin/pip + + # Try to extract the path of the package we are installing into + # package_dir. We need this to check for test-requirements.txt, + # at least. + # + # ${!#} expands to the last positional argument to this function. + # With "extras" syntax included, our arguments might be something + # like: + # -e /path/to/fooproject[extra] + # Thus this magic line grabs just the path without extras + # + # Note that this makes no sense if this is a pypi (rather than + # local path) install; ergo you must check this path exists before + # use. Also, if we had multiple or mixed installs, we would also + # likely break. But for historical reasons, it's basically only + # the other wrapper functions in here calling this to install + # local packages, and they do so with single call per install. So + # this works (for now...) + local package_dir=${!#%\[*\]} + + if [[ -n ${PIP_VIRTUAL_ENV:=} && -d ${PIP_VIRTUAL_ENV} ]]; then + local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip + local sudo_pip="env" + elif [[ "${GLOBAL_VENV}" == "True" && -d ${DEVSTACK_VENV} ]] ; then + # We have to check that the DEVSTACK_VENV exists because early + # devstack boostrapping needs to operate in a system context + # too bootstrap pip. Once pip is bootstrapped we create the + # global venv and can start to use it. + local cmd_pip=$DEVSTACK_VENV/bin/pip local sudo_pip="env" + echo "Using python $PYTHON3_VERSION to install $package_dir" else - if [[ -n ${PIP_VIRTUAL_ENV:=} && -d ${PIP_VIRTUAL_ENV} ]]; then - local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip - local sudo_pip="env" - else - local cmd_pip - cmd_pip=$(get_pip_command $PYTHON2_VERSION) - local sudo_pip="sudo -H" - if python3_enabled; then - # Look at the package classifiers to find the python - # versions supported, and if we find the version of - # python3 we've been told to use, use that instead of the - # default pip - local package_dir=${!#} - local python_versions - if [[ -d "$package_dir" ]]; then - python_versions=$(get_python_versions_for_package $package_dir) - if [[ $python_versions =~ $PYTHON3_VERSION ]]; then - cmd_pip=$(get_pip_command $PYTHON3_VERSION) - fi - fi - fi - fi + local cmd_pip="python$PYTHON3_VERSION -m pip" + local sudo_pip="sudo -H LC_ALL=en_US.UTF-8" + echo "Using python $PYTHON3_VERSION to install $package_dir" fi cmd_pip="$cmd_pip install" # Always apply constraints cmd_pip="$cmd_pip -c $REQUIREMENTS_DIR/upper-constraints.txt" - # FIXME(dhellmann): Need to force multiple versions of pip for - # packages like setuptools? - local pip_version - pip_version=$(python -c "import pip; \ - print(pip.__version__.strip('.')[0])") - if (( pip_version<6 )); then - die $LINENO "Currently installed pip version ${pip_version} does not" \ - "meet minimum requirements (>=6)." - fi - $xtrace + $sudo_pip \ http_proxy="${http_proxy:-}" \ https_proxy="${https_proxy:-}" \ @@ -157,24 +215,26 @@ function pip_install { $@ result=$? - # Also install test requirements - local test_req="${!#}/test-requirements.txt" - if [[ $result == 0 ]] && [[ -e "$test_req" ]]; then - echo "Installing test-requirements for $test_req" - $sudo_pip \ - http_proxy=${http_proxy:-} \ - https_proxy=${https_proxy:-} \ - no_proxy=${no_proxy:-} \ - PIP_FIND_LINKS=$PIP_FIND_LINKS \ - $cmd_pip $upgrade \ - -r $test_req - result=$? - fi - time_stop "pip_install" return $result } +function pip_uninstall { + # Skip uninstall if offline + [[ "${OFFLINE}" = "True" ]] && return + + local name=$1 + if [[ -n ${PIP_VIRTUAL_ENV:=} && -d ${PIP_VIRTUAL_ENV} ]]; then + local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip + local sudo_pip="env" + else + local cmd_pip="python$PYTHON3_VERSION -m pip" + local sudo_pip="sudo -H LC_ALL=en_US.UTF-8" + fi + # don't error if we can't uninstall, it might not be there + $sudo_pip $cmd_pip uninstall -y $name || /bin/true +} + # get version of a package from global requirements file # get_from_global_requirements function get_from_global_requirements { @@ -187,6 +247,19 @@ function get_from_global_requirements { echo $required_pkg } +# get only version constraints of a package from global requirements file +# get_version_constraints_from_global_requirements +function get_version_constraints_from_global_requirements { + local package=$1 + local required_pkg_version_constraint + # drop the package name from output (\K) + required_pkg_version_constraint=$(grep -i -h -o -P "^${package}\K.*" $REQUIREMENTS_DIR/global-requirements.txt | cut -d\# -f1) + if [[ $required_pkg_version_constraint == "" ]]; then + die $LINENO "Can't find package $package in requirements" + fi + echo $required_pkg_version_constraint +} + # should we use this library from their git repo, or should we let it # get pulled in via pip dependencies. function use_library_from_git { @@ -199,23 +272,22 @@ function use_library_from_git { # determine if a package was installed from git function lib_installed_from_git { local name=$1 - pip freeze 2>/dev/null | grep -- "$name" | grep -q -- '-e git' -} - -# check that everything that's in LIBS_FROM_GIT was actually installed -# correctly, this helps double check issues with library fat fingering. -function check_libs_from_git { - local lib="" - local not_installed="" - for lib in $(echo ${LIBS_FROM_GIT} | tr "," " "); do - if ! lib_installed_from_git "$lib"; then - not_installed+=" $lib" - fi - done - # if anything is not installed, say what it is. - if [[ -n "$not_installed" ]]; then - die $LINENO "The following LIBS_FROM_GIT were not installed correct: $not_installed" - fi + local safe_name + safe_name=$(python -c "from packaging import canonicalize_name; print(canonicalize_name('${name}'))") + # Note "pip freeze" doesn't always work here, because it tries to + # be smart about finding the remote of the git repo the package + # was installed from. This doesn't work with zuul which clones + # repos with no remote. + # + # The best option seems to be to use "pip list" which will tell + # you the path an editable install was installed from; for example + # in response to something like + # pip install -e 'git+https://opendev.org/openstack/bashate#egg=bashate' + # pip list --format columns shows + # bashate 0.5.2.dev19 /tmp/env/src/bashate + # Thus we check the third column to see if we're installed from + # some local place. + [[ -n $(pip list --format=columns 2>/dev/null | awk "/^$safe_name/ {print \$3}") ]] } # setup a library by name. If we are trying to use the library from @@ -234,10 +306,18 @@ function setup_lib { # another project. # # use this for non namespaced libraries +# +# setup_dev_lib [-bindep] [] function setup_dev_lib { + local bindep + if [[ $1 == -bindep* ]]; then + bindep="${1}" + shift + fi local name=$1 local dir=${GITDIR[$name]} - setup_develop $dir + local extras=$2 + setup_develop $bindep $dir $extras } # this should be used if you want to install globally, all libraries should @@ -247,12 +327,18 @@ function setup_dev_lib { # project_dir: directory of project repo (e.g., /opt/stack/keystone) # extras: comma-separated list of optional dependencies to install # (e.g., ldap,memcache). -# See http://docs.openstack.org/developer/pbr/#extra-requirements +# See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements +# bindep: Set "-bindep" as first argument to install bindep.txt packages # The command is like "pip install []" function setup_install { + local bindep + if [[ $1 == -bindep* ]]; then + bindep="${1}" + shift + fi local project_dir=$1 local extras=$2 - _setup_package_with_constraints_edit $project_dir "" $extras + _setup_package_with_constraints_edit $bindep $project_dir "" $extras } # this should be used for projects which run services, like all services @@ -261,23 +347,17 @@ function setup_install { # project_dir: directory of project repo (e.g., /opt/stack/keystone) # extras: comma-separated list of optional dependencies to install # (e.g., ldap,memcache). -# See http://docs.openstack.org/developer/pbr/#extra-requirements +# See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements # The command is like "pip install -e []" function setup_develop { + local bindep + if [[ $1 == -bindep* ]]; then + bindep="${1}" + shift + fi local project_dir=$1 local extras=$2 - _setup_package_with_constraints_edit $project_dir -e $extras -} - -# determine if a project as specified by directory is in -# projects.txt. This will not be an exact match because we throw away -# the namespacing when we clone, but it should be good enough in all -# practical ways. -function is_in_projects_txt { - local project_dir=$1 - local project_name - project_name=$(basename $project_dir) - grep -q "/$project_name\$" $REQUIREMENTS_DIR/projects.txt + _setup_package_with_constraints_edit $bindep $project_dir -e $extras } # ``pip install -e`` the package, which processes the dependencies @@ -293,9 +373,14 @@ function is_in_projects_txt { # flags: pip CLI options/flags # extras: comma-separated list of optional dependencies to install # (e.g., ldap,memcache). -# See http://docs.openstack.org/developer/pbr/#extra-requirements +# See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements # The command is like "pip install []" function _setup_package_with_constraints_edit { + local bindep + if [[ $1 == -bindep* ]]; then + bindep="${1}" + shift + fi local project_dir=$1 local flags=$2 local extras=$3 @@ -308,30 +393,54 @@ function _setup_package_with_constraints_edit { project_dir=$(cd $project_dir && pwd) if [ -n "$REQUIREMENTS_DIR" ]; then - # Constrain this package to this project directory from here on out. + # Remove this package from constraints before we install it. + # That way, later installs won't "downgrade" the install from + # source we are about to do. local name name=$(awk '/^name.*=/ {print $3}' $project_dir/setup.cfg) + if [ -z $name ]; then + name=$(awk '/^name =/ {gsub(/"/, "", $3); print $3}' $project_dir/pyproject.toml) + fi $REQUIREMENTS_DIR/.venv/bin/edit-constraints \ - $REQUIREMENTS_DIR/upper-constraints.txt -- $name \ - "$flags file://$project_dir#egg=$name" + $REQUIREMENTS_DIR/upper-constraints.txt -- $name fi - setup_package $project_dir "$flags" $extras + setup_package $bindep $project_dir "$flags" $extras + # If this project is in LIBS_FROM_GIT, verify it was actually installed + # correctly. This helps catch errors caused by constraints mismatches. + if use_library_from_git "$project_dir"; then + if ! lib_installed_from_git "$project_dir"; then + die $LINENO "The following LIBS_FROM_GIT was not installed correctly: $project_dir" + fi + fi } # ``pip install -e`` the package, which processes the dependencies -# using pip before running `setup.py develop` +# using pip before running `setup.py develop`. The command is like +# "pip install []" # # Uses globals ``STACK_USER`` -# setup_package project_dir [flags] [extras] -# project_dir: directory of project repo (e.g., /opt/stack/keystone) -# flags: pip CLI options/flags -# extras: comma-separated list of optional dependencies to install -# (e.g., ldap,memcache). -# See http://docs.openstack.org/developer/pbr/#extra-requirements -# The command is like "pip install []" +# +# Usage: +# setup_package [-bindep[=profile,profile]] [extras] +# +# -bindep : Use bindep to install dependencies; select extra profiles +# as comma separated arguments after "=" +# project_dir : directory of project repo (e.g., /opt/stack/keystone) +# flags : pip CLI options/flags +# extras : comma-separated list of optional dependencies to install +# (e.g., ldap,memcache). +# See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements function setup_package { + local bindep=0 + local bindep_flag="" + local bindep_profiles="" + if [[ $1 == -bindep* ]]; then + bindep=1 + IFS="=" read bindep_flag bindep_profiles <<< ${1} + shift + fi local project_dir=$1 local flags=$2 local extras=$3 @@ -347,29 +456,49 @@ function setup_package { extras="[$extras]" fi + # install any bindep packages + if [[ $bindep == 1 ]]; then + install_bindep $project_dir/bindep.txt $bindep_profiles + fi + pip_install $flags "$project_dir$extras" # ensure that further actions can do things like setup.py sdist - if [[ "$flags" == "-e" ]]; then - safe_chown -R $STACK_USER $1/*.egg-info + if [[ "$flags" == "-e" && "$GLOBAL_VENV" == "False" ]]; then + # egg-info is not created when project have pyproject.toml + if [ -d $1/*.egg-info ]; then + safe_chown -R $STACK_USER $1/*.egg-info + fi fi } # Report whether python 3 should be used +# TODO(frickler): drop this once all legacy uses are removed function python3_enabled { - if [[ $USE_PYTHON3 == "True" ]]; then - return 0 - else - return 1 - fi + return 0 +} + +# Provide requested python version and sets PYTHON variable +function install_python { + install_python3 + export PYTHON=$(which python${PYTHON3_VERSION} 2>/dev/null) } # Install python3 packages function install_python3 { if is_ubuntu; then - apt_get install python3.4 python3.4-dev + apt_get install python${PYTHON3_VERSION} python${PYTHON3_VERSION}-dev + elif is_fedora; then + install_package python${PYTHON3_VERSION}-devel python${PYTHON3_VERSION}-pip fi } +function install_devstack_tools { + # intentionally old to ensure devstack-gate has control + local dstools_version=${DSTOOLS_VERSION:-0.1.2} + install_python3 + sudo pip3 install -U devstack-tools==${dstools_version} +} + # Restore xtrace $INC_PY_TRACE diff --git a/inc/rootwrap b/inc/rootwrap index 2a6e4b648f..4c65440a4e 100644 --- a/inc/rootwrap +++ b/inc/rootwrap @@ -60,6 +60,11 @@ function configure_rootwrap { sudo install -o root -g root -m 644 $rootwrap_conf_src_dir/rootwrap.conf /etc/${project}/rootwrap.conf sudo sed -e "s:^filters_path=.*$:filters_path=/etc/${project}/rootwrap.d:" -i /etc/${project}/rootwrap.conf + # Rely on $PATH set by devstack to determine what is safe to execute + # by rootwrap rather than use explicit whitelist of paths in + # rootwrap.conf + sudo sed -e 's/^exec_dirs=.*/#&/' -i /etc/${project}/rootwrap.conf + # Set up the rootwrap sudoers local tempfile tempfile=$(mktemp) diff --git a/lib/apache b/lib/apache index 2c84c7a481..b3379a7cde 100644 --- a/lib/apache +++ b/lib/apache @@ -27,83 +27,139 @@ set +o xtrace APACHE_USER=${APACHE_USER:-$STACK_USER} APACHE_GROUP=${APACHE_GROUP:-$(id -gn $APACHE_USER)} +APACHE_LOCAL_HOST=$SERVICE_LOCAL_HOST +if [[ "$SERVICE_IP_VERSION" == 6 ]]; then + APACHE_LOCAL_HOST=[$APACHE_LOCAL_HOST] +fi + # Set up apache name and configuration directory +# Note that APACHE_CONF_DIR is really more accurately apache's vhost +# configuration dir but we can't just change this because public interfaces. if is_ubuntu; then APACHE_NAME=apache2 APACHE_CONF_DIR=${APACHE_CONF_DIR:-/etc/$APACHE_NAME/sites-available} + APACHE_SETTINGS_DIR=${APACHE_SETTINGS_DIR:-/etc/$APACHE_NAME/conf-enabled} elif is_fedora; then APACHE_NAME=httpd APACHE_CONF_DIR=${APACHE_CONF_DIR:-/etc/$APACHE_NAME/conf.d} -elif is_suse; then - APACHE_NAME=apache2 - APACHE_CONF_DIR=${APACHE_CONF_DIR:-/etc/$APACHE_NAME/vhosts.d} + APACHE_SETTINGS_DIR=${APACHE_SETTINGS_DIR:-/etc/$APACHE_NAME/conf.d} fi +APACHE_LOG_DIR="/var/log/${APACHE_NAME}" # Functions # --------- -# install_apache_wsgi() - Install Apache server and wsgi module -function install_apache_wsgi { + +# Enable apache mod and restart apache if it isn't already enabled. +function enable_apache_mod { + local mod=$1 + local should_restart=$2 # Apache installation, because we mark it NOPRIME if is_ubuntu; then - # Install apache2, which is NOPRIME'd - install_package apache2 libapache2-mod-wsgi - # WSGI isn't enabled by default, enable it - sudo a2enmod wsgi + # Skip mod_version as it is not a valid mod to enable + # on debuntu, instead it is built in. + if [[ "$mod" != "version" ]] && ! a2query -m $mod ; then + sudo a2enmod $mod + if [[ "$should_restart" != "norestart" ]] ; then + restart_apache_server + fi + fi elif is_fedora; then - sudo rm -f /etc/httpd/conf.d/000-* - install_package httpd mod_wsgi - elif is_suse; then - install_package apache2 apache2-mod_wsgi - # WSGI isn't enabled by default, enable it - sudo a2enmod wsgi + # pass + true else - exit_distro_not_supported "apache installation" + exit_distro_not_supported "apache enable mod" fi - - # ensure mod_version enabled for . This is - # built-in statically on anything recent, but precise (2.2) - # doesn't have it enabled - sudo a2enmod version || true } -# get_apache_version() - return the version of Apache installed -# This function is used to determine the Apache version installed. There are -# various differences between Apache 2.2 and 2.4 that warrant special handling. -function get_apache_version { +# NOTE(sdague): Install uwsgi including apache module, we need to get +# to 2.0.6+ to get a working mod_proxy_uwsgi. We can probably build a +# check for that and do it differently for different platforms. +function install_apache_uwsgi { + local apxs="apxs2" + if is_fedora; then + apxs="apxs" + fi + if is_ubuntu; then - local version_str - version_str=$(sudo /usr/sbin/apache2ctl -v | awk '/Server version/ {print $3}' | cut -f2 -d/) - elif is_fedora; then - local version_str - version_str=$(rpm -qa --queryformat '%{VERSION}' httpd) - elif is_suse; then - local version_str - version_str=$(rpm -qa --queryformat '%{VERSION}' apache2) + local pkg_list="uwsgi uwsgi-plugin-python3" + install_package ${pkg_list} + # NOTE(ianw) 2022-02-03 : Fedora 35 needs to skip this and fall + # into the install-from-source because the upstream packages + # didn't fix Python 3.10 compatibility before release. Should be + # fixed in uwsgi 4.9.0; can remove this when packages available + # or we drop this release + elif is_fedora && ! is_openeuler && ! [[ $DISTRO =~ rhel9 ]]; then + # Note httpd comes with mod_proxy_uwsgi and it is loaded by + # default; the mod_proxy_uwsgi package actually conflicts now. + # See: + # https://bugzilla.redhat.com/show_bug.cgi?id=1574335 + # + # Thus there is nothing else to do after this install + install_package uwsgi \ + uwsgi-plugin-python3 else - exit_distro_not_supported "cannot determine apache version" + # Compile uwsgi from source. + local dir + dir=$(mktemp -d) + pushd $dir + pip_install uwsgi + pip download uwsgi -c $REQUIREMENTS_DIR/upper-constraints.txt + local uwsgi + uwsgi=$(ls uwsgi*) + tar xvf $uwsgi + cd uwsgi*/apache2 + sudo $apxs -i -c mod_proxy_uwsgi.c + popd + # delete the temp directory + sudo rm -rf $dir + fi + + if is_ubuntu; then + if ! a2query -m proxy || ! a2query -m proxy_uwsgi ; then + # we've got to enable proxy and proxy_uwsgi for this to work + sudo a2enmod proxy + sudo a2enmod proxy_uwsgi + restart_apache_server + fi fi - if [[ "$version_str" =~ ^2\.2\. ]]; then - echo "2.2" - elif [[ "$version_str" =~ ^2\.4\. ]]; then - echo "2.4" +} + +# install_apache_wsgi() - Install Apache server and wsgi module +function install_apache_wsgi { + # Apache installation, because we mark it NOPRIME + if is_ubuntu; then + # Install apache2, which is NOPRIME'd + install_package apache2 + if is_package_installed libapache2-mod-wsgi; then + uninstall_package libapache2-mod-wsgi + fi + install_package libapache2-mod-wsgi-py3 + elif is_fedora; then + sudo rm -f /etc/httpd/conf.d/000-* + install_package httpd python${PYTHON3_VERSION}-mod_wsgi + # rpm distros dont enable httpd by default so enable it to support reboots. + sudo systemctl enable httpd + # For consistency with Ubuntu, switch to the worker mpm, as + # the default is event + sudo sed -i '/mod_mpm_prefork.so/s/^/#/g' /etc/httpd/conf.modules.d/00-mpm.conf + sudo sed -i '/mod_mpm_event.so/s/^/#/g' /etc/httpd/conf.modules.d/00-mpm.conf + sudo sed -i '/mod_mpm_worker.so/s/^#//g' /etc/httpd/conf.modules.d/00-mpm.conf else - exit_distro_not_supported "apache version not supported" + exit_distro_not_supported "apache wsgi installation" fi + # WSGI isn't enabled by default, enable it + enable_apache_mod wsgi } # apache_site_config_for() - The filename of the site's configuration file. # This function uses the global variables APACHE_NAME and APACHE_CONF_DIR. # -# On Ubuntu 14.04, the site configuration file must have a .conf suffix for a2ensite and a2dissite to +# On Ubuntu 14.04+, the site configuration file must have a .conf suffix for a2ensite and a2dissite to # recognise it. a2ensite and a2dissite ignore the .conf suffix used as parameter. The default sites' # files are 000-default.conf and default-ssl.conf. # -# On Ubuntu 12.04, the site configuration file may have any format, as long as it is in -# /etc/apache2/sites-available/. a2ensite and a2dissite need the entire file name to work. The default -# sites' files are default and default-ssl. -# -# On Fedora and openSUSE, any file in /etc/httpd/conf.d/ whose name ends with .conf is enabled. +# On Fedora, any file in /etc/httpd/conf.d/ whose name ends with .conf is enabled. # # On RHEL and CentOS, things should hopefully work as in Fedora. # @@ -111,23 +167,15 @@ function get_apache_version { # +----------------------+--------------------+--------------------------+--------------------------+ # | Distribution | File name | Site enabling command | Site disabling command | # +----------------------+--------------------+--------------------------+--------------------------+ -# | Ubuntu 12.04 | site | a2ensite site | a2dissite site | # | Ubuntu 14.04 | site.conf | a2ensite site | a2dissite site | # | Fedora, RHEL, CentOS | site.conf.disabled | mv site.conf{.disabled,} | mv site.conf{,.disabled} | # +----------------------+--------------------+--------------------------+--------------------------+ function apache_site_config_for { local site=$@ if is_ubuntu; then - local apache_version - apache_version=$(get_apache_version) - if [[ "$apache_version" == "2.2" ]]; then - # Ubuntu 12.04 - Apache 2.2 - echo $APACHE_CONF_DIR/${site} - else - # Ubuntu 14.04 - Apache 2.4 - echo $APACHE_CONF_DIR/${site}.conf - fi - elif is_fedora || is_suse; then + # Ubuntu 14.04 - Apache 2.4 + echo $APACHE_CONF_DIR/${site}.conf + elif is_fedora; then # fedora conf.d is only imported if it ends with .conf so this is approx the same local enabled_site_file="$APACHE_CONF_DIR/${site}.conf" if [ -f $enabled_site_file ]; then @@ -141,9 +189,11 @@ function apache_site_config_for { # enable_apache_site() - Enable a particular apache site function enable_apache_site { local site=$@ + # Many of our sites use mod version. Just enable it. + enable_apache_mod version if is_ubuntu; then sudo a2ensite ${site} - elif is_fedora || is_suse; then + elif is_fedora; then local enabled_site_file="$APACHE_CONF_DIR/${site}.conf" # Do nothing if site already enabled or no site config exists if [[ -f ${enabled_site_file}.disabled ]] && [[ ! -f ${enabled_site_file} ]]; then @@ -156,8 +206,8 @@ function enable_apache_site { function disable_apache_site { local site=$@ if is_ubuntu; then - sudo a2dissite ${site} - elif is_fedora || is_suse; then + sudo a2dissite ${site} || true + elif is_fedora; then local enabled_site_file="$APACHE_CONF_DIR/${site}.conf" # Do nothing if no site config exists if [[ -f ${enabled_site_file} ]]; then @@ -185,11 +235,186 @@ function restart_apache_server { # Apache can be slow to stop, doing an explicit stop, sleep, start helps # to mitigate issues where apache will claim a port it's listening on is # still in use and fail to start. - time_start "restart_apache_server" - stop_service $APACHE_NAME - sleep 3 - start_service $APACHE_NAME - time_stop "restart_apache_server" + restart_service $APACHE_NAME +} + +# write_uwsgi_config() - Create a new uWSGI config file +function write_uwsgi_config { + local conf=$1 + local wsgi=$2 + local url=$3 + local http=$4 + local name=$5 + + if [ -z "$name" ]; then + name=$(basename $wsgi) + fi + + # create a home for the sockets; note don't use /tmp -- apache has + # a private view of it on some platforms. + local socket_dir='/var/run/uwsgi' + + # /var/run will be empty on ubuntu after reboot, so we can use systemd-temptiles + # to automatically create $socket_dir. + sudo mkdir -p /etc/tmpfiles.d/ + echo "d $socket_dir 0755 $STACK_USER root" | sudo tee /etc/tmpfiles.d/uwsgi.conf + sudo systemd-tmpfiles --create /etc/tmpfiles.d/uwsgi.conf + + local socket="$socket_dir/${name}.socket" + + # always cleanup given that we are using iniset here + rm -rf $conf + # Set either the module path or wsgi script path depending on what we've + # been given. Note that the regex isn't exhaustive - neither Python modules + # nor Python variables can start with a number - but it's "good enough" + if [[ "$wsgi" =~ ^[a-zA-Z0-9_.]+:[a-zA-Z0-9_]+$ ]]; then + iniset "$conf" uwsgi module "$wsgi" + else + deprecated 'Configuring uWSGI with a WSGI file is deprecated, use module paths instead' + iniset "$conf" uwsgi wsgi-file "$wsgi" + fi + iniset "$conf" uwsgi processes $API_WORKERS + # This is running standalone + iniset "$conf" uwsgi master true + # Set die-on-term & exit-on-reload so that uwsgi shuts down + iniset "$conf" uwsgi die-on-term true + iniset "$conf" uwsgi exit-on-reload false + # Set worker-reload-mercy so that worker will not exit till the time + # configured after graceful shutdown + iniset "$conf" uwsgi worker-reload-mercy $WORKER_TIMEOUT + iniset "$conf" uwsgi enable-threads true + iniset "$conf" uwsgi plugins http,python3 + # uwsgi recommends this to prevent thundering herd on accept. + iniset "$conf" uwsgi thunder-lock true + # Set hook to trigger graceful shutdown on SIGTERM + iniset "$conf" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all" + # Override the default size for headers from the 4k default. + iniset "$conf" uwsgi buffer-size 65535 + # Make sure the client doesn't try to re-use the connection. + iniset "$conf" uwsgi add-header "Connection: close" + # This ensures that file descriptors aren't shared between processes. + iniset "$conf" uwsgi lazy-apps true + # Starting time of the WSGi server + iniset "$conf" uwsgi start-time %t + + # If we said bind directly to http, then do that and don't start the apache proxy + if [[ -n "$http" ]]; then + iniset "$conf" uwsgi http $http + else + local apache_conf="" + apache_conf=$(apache_site_config_for $name) + iniset "$conf" uwsgi socket "$socket" + iniset "$conf" uwsgi chmod-socket 666 + echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}\" retry=0 acquire=1 " | sudo tee -a $apache_conf + enable_apache_site $name + restart_apache_server + fi +} + +# For services using chunked encoding, the only services known to use this +# currently are Glance and Swift, we need to use an http proxy instead of +# mod_proxy_uwsgi because the chunked encoding gets dropped. See: +# https://github.com/unbit/uwsgi/issues/1540. +function write_local_uwsgi_http_config { + local conf=$1 + local wsgi=$2 + local url=$3 + local name=$4 + + if [ -z "$name" ]; then + name=$(basename $wsgi) + fi + + # create a home for the sockets; note don't use /tmp -- apache has + # a private view of it on some platforms. + + # always cleanup given that we are using iniset here + rm -rf $conf + # Set either the module path or wsgi script path depending on what we've + # been given + if [[ "$wsgi" =~ ^[a-zA-Z0-9_.]+:[a-zA-Z0-9_]+$ ]]; then + iniset "$conf" uwsgi module "$wsgi" + else + deprecated 'Configuring uWSGI with a WSGI file is deprecated, use module paths instead' + iniset "$conf" uwsgi wsgi-file "$wsgi" + fi + port=$(get_random_port) + iniset "$conf" uwsgi http-socket "$APACHE_LOCAL_HOST:$port" + iniset "$conf" uwsgi processes $API_WORKERS + # This is running standalone + iniset "$conf" uwsgi master true + # Set die-on-term & exit-on-reload so that uwsgi shuts down + iniset "$conf" uwsgi die-on-term true + iniset "$conf" uwsgi exit-on-reload false + # Set worker-reload-mercy so that worker will not exit till the time + # configured after graceful shutdown + iniset "$conf" uwsgi worker-reload-mercy $WORKER_TIMEOUT + iniset "$conf" uwsgi enable-threads true + iniset "$conf" uwsgi plugins http,python3 + # uwsgi recommends this to prevent thundering herd on accept. + iniset "$conf" uwsgi thunder-lock true + # Set hook to trigger graceful shutdown on SIGTERM + iniset "$conf" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all" + # Override the default size for headers from the 4k default. + iniset "$conf" uwsgi buffer-size 65535 + # Make sure the client doesn't try to re-use the connection. + iniset "$conf" uwsgi add-header "Connection: close" + # This ensures that file descriptors aren't shared between processes. + iniset "$conf" uwsgi lazy-apps true + iniset "$conf" uwsgi chmod-socket 666 + iniset "$conf" uwsgi http-raw-body true + iniset "$conf" uwsgi http-chunked-input true + iniset "$conf" uwsgi http-auto-chunked true + iniset "$conf" uwsgi http-keepalive false + # Increase socket timeout for slow chunked uploads + iniset "$conf" uwsgi socket-timeout 30 + # Starting time of the WSGi server + iniset "$conf" uwsgi start-time %t + + enable_apache_mod proxy + enable_apache_mod proxy_http + local apache_conf="" + apache_conf=$(apache_site_config_for $name) + echo "KeepAlive Off" | sudo tee $apache_conf + echo "SetEnv proxy-sendchunked 1" | sudo tee -a $apache_conf + echo "ProxyPass \"${url}\" \"http://$APACHE_LOCAL_HOST:$port\" retry=0 acquire=1 " | sudo tee -a $apache_conf + enable_apache_site $name + restart_apache_server +} + +# Write a straight-through proxy for a service that runs locally and just needs +# to be reachable via the main http proxy at $loc +function write_local_proxy_http_config { + local name=$1 + local url=$2 + local loc=$3 + local apache_conf + apache_conf=$(apache_site_config_for $name) + + enable_apache_mod proxy + enable_apache_mod proxy_http + + echo "KeepAlive Off" | sudo tee $apache_conf + echo "SetEnv proxy-sendchunked 1" | sudo tee -a $apache_conf + echo "ProxyPass \"${loc}\" \"$url\" retry=0 acquire=1 " | sudo tee -a $apache_conf + enable_apache_site $name + restart_apache_server +} + +function remove_uwsgi_config { + local conf=$1 + local wsgi=$2 + local name="" + # TODO(stephenfin): Remove this call when everyone is using module path + # configuration instead of file path configuration + name=$(basename $wsgi) + + if [[ "$wsgi" = /* ]]; then + deprecated "Passing a wsgi script to remove_uwsgi_config is deprecated, pass an application name instead" + fi + + rm -rf $conf + disable_apache_site $name } # Restore xtrace diff --git a/lib/atop b/lib/atop new file mode 100644 index 0000000000..25c8e9a83f --- /dev/null +++ b/lib/atop @@ -0,0 +1,49 @@ +#!/bin/bash +# +# lib/atop +# Functions to start and stop atop + +# Dependencies: +# +# - ``functions`` file + +# ``stack.sh`` calls the entry points in this order: +# +# - configure_atop +# - install_atop +# - start_atop +# - stop_atop + +# Save trace setting +_XTRACE_ATOP=$(set +o | grep xtrace) +set +o xtrace + +function configure_atop { + mkdir -p $LOGDIR/atop + cat </dev/null +# /etc/default/atop +# see man atoprc for more possibilities to configure atop execution + +LOGOPTS="-R" +LOGINTERVAL=${ATOP_LOGINTERVAL:-"30"} +LOGGENERATIONS=${ATOP_LOGGENERATIONS:-"1"} +LOGPATH=$LOGDIR/atop +EOF +} + +function install_atop { + install_package atop +} + +# start_() - Start running processes +function start_atop { + start_service atop +} + +# stop_atop() stop atop process +function stop_atop { + stop_service atop +} + +# Restore xtrace +$_XTRACE_ATOP diff --git a/lib/ceph b/lib/ceph deleted file mode 100644 index e999647ed8..0000000000 --- a/lib/ceph +++ /dev/null @@ -1,382 +0,0 @@ -#!/bin/bash -# -# lib/ceph -# Functions to control the configuration and operation of the **Ceph** storage service - -# Dependencies: -# -# - ``functions`` file -# - ``CEPH_DATA_DIR`` or ``DATA_DIR`` must be defined - -# ``stack.sh`` calls the entry points in this order (via ``extras.d/60-ceph.sh``): -# -# - install_ceph -# - configure_ceph -# - init_ceph -# - start_ceph -# - stop_ceph -# - cleanup_ceph - -# Save trace setting -_XTRACE_LIB_CEPH=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# Set ``CEPH_DATA_DIR`` to the location of Ceph drives and objects. -# Default is the common DevStack data directory. -CEPH_DATA_DIR=${CEPH_DATA_DIR:-/var/lib/ceph} -CEPH_DISK_IMAGE=${CEPH_DATA_DIR}/drives/images/ceph.img - -# Set ``CEPH_CONF_DIR`` to the location of the configuration files. -# Default is ``/etc/ceph``. -CEPH_CONF_DIR=${CEPH_CONF_DIR:-/etc/ceph} - -# DevStack will create a loop-back disk formatted as XFS to store the -# Ceph data. Set ``CEPH_LOOPBACK_DISK_SIZE`` to the disk size in -# kilobytes. -# Default is 1 gigabyte. -CEPH_LOOPBACK_DISK_SIZE_DEFAULT=4G -CEPH_LOOPBACK_DISK_SIZE=${CEPH_LOOPBACK_DISK_SIZE:-$CEPH_LOOPBACK_DISK_SIZE_DEFAULT} - -# Common -CEPH_FSID=$(uuidgen) -CEPH_CONF_FILE=${CEPH_CONF_DIR}/ceph.conf - -# Glance -GLANCE_CEPH_USER=${GLANCE_CEPH_USER:-glance} -GLANCE_CEPH_POOL=${GLANCE_CEPH_POOL:-images} -GLANCE_CEPH_POOL_PG=${GLANCE_CEPH_POOL_PG:-8} -GLANCE_CEPH_POOL_PGP=${GLANCE_CEPH_POOL_PGP:-8} - -# Nova -NOVA_CEPH_POOL=${NOVA_CEPH_POOL:-vms} -NOVA_CEPH_POOL_PG=${NOVA_CEPH_POOL_PG:-8} -NOVA_CEPH_POOL_PGP=${NOVA_CEPH_POOL_PGP:-8} - -# Cinder -CINDER_CEPH_POOL=${CINDER_CEPH_POOL:-volumes} -CINDER_CEPH_POOL_PG=${CINDER_CEPH_POOL_PG:-8} -CINDER_CEPH_POOL_PGP=${CINDER_CEPH_POOL_PGP:-8} -CINDER_CEPH_USER=${CINDER_CEPH_USER:-cinder} -CINDER_CEPH_UUID=${CINDER_CEPH_UUID:-$(uuidgen)} - -# Set ``CEPH_REPLICAS`` to configure how many replicas are to be -# configured for your Ceph cluster. By default we are configuring -# only one replica since this is way less CPU and memory intensive. If -# you are planning to test Ceph replication feel free to increase this value -CEPH_REPLICAS=${CEPH_REPLICAS:-1} -CEPH_REPLICAS_SEQ=$(seq ${CEPH_REPLICAS}) - -# Connect to an existing Ceph cluster -REMOTE_CEPH=$(trueorfalse False REMOTE_CEPH) -REMOTE_CEPH_ADMIN_KEY_PATH=${REMOTE_CEPH_ADMIN_KEY_PATH:-$CEPH_CONF_DIR/ceph.client.admin.keyring} - -# Cinder encrypted volume tests are not supported with a Ceph backend due to -# bug 1463525. -ATTACH_ENCRYPTED_VOLUME_AVAILABLE=False - - -# Functions -# ------------ - -function get_ceph_version { - local ceph_version_str - ceph_version_str=$(sudo ceph daemon mon.$(hostname) version | cut -d '"' -f 4 | cut -f 1,2 -d '.') - echo $ceph_version_str -} - -# import_libvirt_secret_ceph() - Imports Cinder user key into libvirt -# so it can connect to the Ceph cluster while attaching a Cinder block device -function import_libvirt_secret_ceph { - cat > secret.xml < - ${CINDER_CEPH_UUID} - - client.${CINDER_CEPH_USER} secret - - -EOF - sudo virsh secret-define --file secret.xml - sudo virsh secret-set-value --secret ${CINDER_CEPH_UUID} --base64 $(sudo ceph -c ${CEPH_CONF_FILE} auth get-key client.${CINDER_CEPH_USER}) - sudo rm -f secret.xml -} - -# undefine_virsh_secret() - Undefine Cinder key secret from libvirt -function undefine_virsh_secret { - if is_service_enabled cinder || is_service_enabled nova; then - local virsh_uuid - virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }') - sudo virsh secret-undefine ${virsh_uuid} >/dev/null 2>&1 - fi -} - - -# check_os_support_ceph() - Check if the operating system provides a decent version of Ceph -function check_os_support_ceph { - if [[ ! ${DISTRO} =~ (trusty|f22|f23|f24) ]]; then - echo "WARNING: your distro $DISTRO does not provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 20 (and higher)" - if [[ "$FORCE_CEPH_INSTALL" != "yes" ]]; then - die $LINENO "If you wish to install Ceph on this distribution anyway run with FORCE_CEPH_INSTALL=yes" - fi - NO_UPDATE_REPOS=False - fi -} - -# cleanup_ceph() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_ceph_remote { - # do a proper cleanup from here to avoid leftover on the remote Ceph cluster - if is_service_enabled glance; then - sudo ceph osd pool delete $GLANCE_CEPH_POOL $GLANCE_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1 - sudo ceph auth del client.$GLANCE_CEPH_USER > /dev/null 2>&1 - fi - if is_service_enabled cinder; then - sudo ceph osd pool delete $CINDER_CEPH_POOL $CINDER_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1 - sudo ceph auth del client.$CINDER_CEPH_USER > /dev/null 2>&1 - fi - if is_service_enabled c-bak; then - sudo ceph osd pool delete $CINDER_BAK_CEPH_POOL $CINDER_BAK_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1 - sudo ceph auth del client.$CINDER_BAK_CEPH_USER > /dev/null 2>&1 - fi - if is_service_enabled nova; then - iniset $NOVA_CONF libvirt rbd_secret_uuid "" - sudo ceph osd pool delete $NOVA_CEPH_POOL $NOVA_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1 - fi -} - -function cleanup_ceph_embedded { - sudo killall -w -9 ceph-mon - sudo killall -w -9 ceph-osd - sudo rm -rf ${CEPH_DATA_DIR}/*/* - if egrep -q ${CEPH_DATA_DIR} /proc/mounts; then - sudo umount ${CEPH_DATA_DIR} - fi - if [[ -e ${CEPH_DISK_IMAGE} ]]; then - sudo rm -f ${CEPH_DISK_IMAGE} - fi - - # purge ceph config file and keys - sudo rm -rf ${CEPH_CONF_DIR}/* -} - -function cleanup_ceph_general { - undefine_virsh_secret -} - - -# configure_ceph() - Set config files, create data dirs, etc -function configure_ceph { - local count=0 - - # create a backing file disk - create_disk ${CEPH_DISK_IMAGE} ${CEPH_DATA_DIR} ${CEPH_LOOPBACK_DISK_SIZE} - - # populate ceph directory - sudo mkdir -p ${CEPH_DATA_DIR}/{bootstrap-mds,bootstrap-osd,mds,mon,osd,tmp} - - # create ceph monitor initial key and directory - sudo ceph-authtool /var/lib/ceph/tmp/keyring.mon.$(hostname) \ - --create-keyring --name=mon. --add-key=$(ceph-authtool --gen-print-key) \ - --cap mon 'allow *' - sudo mkdir /var/lib/ceph/mon/ceph-$(hostname) - - # create a default ceph configuration file - sudo tee ${CEPH_CONF_FILE} > /dev/null < /dev/null - sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring - fi -} - -function configure_ceph_embedded_cinder { - # Configure Cinder service options, ceph pool, ceph user and ceph key - sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} size ${CEPH_REPLICAS} - if [[ $CEPH_REPLICAS -ne 1 ]]; then - sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} crush_ruleset ${RULE_ID} - fi -} - -# configure_ceph_cinder() - Cinder config needs to come after Cinder is set up -function configure_ceph_cinder { - sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP} - sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} \ - mon "allow r" \ - osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rwx pool=${GLANCE_CEPH_POOL}" | \ - sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring - sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring -} - -# init_ceph() - Initialize databases, etc. -function init_ceph { - # clean up from previous (possibly aborted) runs - # make sure to kill all ceph processes first - sudo pkill -f ceph-mon || true - sudo pkill -f ceph-osd || true -} - -# install_ceph() - Collect source and prepare -function install_ceph_remote { - install_package ceph-common -} - -function install_ceph { - install_package ceph -} - -# start_ceph() - Start running processes, including screen -function start_ceph { - if is_ubuntu; then - sudo initctl emit ceph-mon id=$(hostname) - for id in $(sudo ceph -c ${CEPH_CONF_FILE} osd ls); do - sudo start ceph-osd id=${id} - done - else - sudo service ceph start - fi -} - -# stop_ceph() - Stop running processes (non-screen) -function stop_ceph { - if is_ubuntu; then - sudo service ceph-mon-all stop > /dev/null 2>&1 - sudo service ceph-osd-all stop > /dev/null 2>&1 - else - sudo service ceph stop > /dev/null 2>&1 - fi -} - - -# Restore xtrace -$_XTRACE_LIB_CEPH - -## Local variables: -## mode: shell-script -## End: diff --git a/lib/cinder b/lib/cinder index 0ebf195422..02056c20f4 100644 --- a/lib/cinder +++ b/lib/cinder @@ -31,6 +31,7 @@ set +o xtrace CINDER_DRIVER=${CINDER_DRIVER:-default} CINDER_PLUGINS=$TOP_DIR/lib/cinder_plugins CINDER_BACKENDS=$TOP_DIR/lib/cinder_backends +CINDER_BACKUPS=$TOP_DIR/lib/cinder_backups # grab plugin config if specified via cinder_driver if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then @@ -39,10 +40,16 @@ fi # set up default directories GITDIR["python-cinderclient"]=$DEST/python-cinderclient -GITDIR["os-brick"]=$DEST/os-brick GITDIR["python-brick-cinderclient-ext"]=$DEST/python-brick-cinderclient-ext CINDER_DIR=$DEST/cinder +if [[ $SERVICE_IP_VERSION == 6 ]]; then + CINDER_MY_IP="$HOST_IPV6" +else + CINDER_MY_IP="$HOST_IP" +fi + + # Cinder virtual environment if [[ ${USE_VENV} = True ]]; then PROJECT_VENV["cinder"]=${CINDER_DIR}.venv @@ -52,26 +59,39 @@ else fi CINDER_STATE_PATH=${CINDER_STATE_PATH:=$DATA_DIR/cinder} -CINDER_AUTH_CACHE_DIR=${CINDER_AUTH_CACHE_DIR:-/var/cache/cinder} +OS_BRICK_LOCK_PATH=${OS_BRICK_LOCK_PATH:=$DATA_DIR/os_brick} CINDER_CONF_DIR=/etc/cinder CINDER_CONF=$CINDER_CONF_DIR/cinder.conf +CINDER_UWSGI=cinder.wsgi.api:application +CINDER_UWSGI_CONF=$CINDER_CONF_DIR/cinder-api-uwsgi.ini CINDER_API_PASTE_INI=$CINDER_CONF_DIR/api-paste.ini # Public facing bits -if is_ssl_enabled_service "cinder" || is_service_enabled tls-proxy; then +if is_service_enabled tls-proxy; then CINDER_SERVICE_PROTOCOL="https" fi CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST} CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776} CINDER_SERVICE_PORT_INT=${CINDER_SERVICE_PORT_INT:-18776} CINDER_SERVICE_PROTOCOL=${CINDER_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} -CINDER_SERVICE_LISTEN_ADDRESS=${CINDER_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS} +CINDER_SERVICE_LISTEN_ADDRESS=${CINDER_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)} + +# We do not need to report service status every 10s for devstack-like +# deployments. In the gate this generates extra work for the services and the +# database which are already taxed. +CINDER_SERVICE_REPORT_INTERVAL=${CINDER_SERVICE_REPORT_INTERVAL:-120} # What type of LVM device should Cinder use for LVM backend -# Defaults to default, which is thick, the other valid choice -# is thin, which as the name implies utilizes lvm thin provisioning. -CINDER_LVM_TYPE=${CINDER_LVM_TYPE:-default} +# Defaults to auto, which will do thin provisioning if it's a fresh +# volume group, otherwise it will do thick. The other valid choices are +# default, which is thick, or thin, which as the name implies utilizes lvm +# thin provisioning. +CINDER_LVM_TYPE=${CINDER_LVM_TYPE:-auto} + +# ``CINDER_USE_SERVICE_TOKEN`` is a mode where service token is passed along with +# user token while communicating to external REST APIs like Glance. +CINDER_USE_SERVICE_TOKEN=$(trueorfalse True CINDER_USE_SERVICE_TOKEN) # Default backends # The backend format is type:name where type is one of the supported backend @@ -82,35 +102,64 @@ CINDER_LVM_TYPE=${CINDER_LVM_TYPE:-default} # CINDER_ENABLED_BACKENDS=${CINDER_ENABLED_BACKENDS:-lvm:lvmdriver-1,lvm:lvmdriver-2} CINDER_ENABLED_BACKENDS=${CINDER_ENABLED_BACKENDS:-lvm:lvmdriver-1} +CINDER_VOLUME_CLEAR=${CINDER_VOLUME_CLEAR:-${CINDER_VOLUME_CLEAR_DEFAULT:-zero}} +CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]') + +VOLUME_TYPE_MULTIATTACH=${VOLUME_TYPE_MULTIATTACH:-multiattach} -# Should cinder perform secure deletion of volumes? -# Defaults to zero. Can also be set to none or shred. -# This was previously CINDER_SECURE_DELETE (True or False). -# Equivalents using CINDER_VOLUME_CLEAR are zero and none, respectively. -# Set to none to avoid this bug when testing: -# https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1023755 -if [[ -n $CINDER_SECURE_DELETE ]]; then - CINDER_SECURE_DELETE=$(trueorfalse True CINDER_SECURE_DELETE) - if [[ $CINDER_SECURE_DELETE == "False" ]]; then - CINDER_VOLUME_CLEAR_DEFAULT="none" +if [[ -n "$CINDER_ISCSI_HELPER" ]]; then + if [[ -z "$CINDER_TARGET_HELPER" ]]; then + deprecated 'Using CINDER_ISCSI_HELPER is deprecated, use CINDER_TARGET_HELPER instead' + CINDER_TARGET_HELPER="$CINDER_ISCSI_HELPER" + else + deprecated 'Deprecated CINDER_ISCSI_HELPER is set, but is being overwritten by CINDER_TARGET_HELPER' fi - deprecated "Configure secure Cinder volume deletion using CINDER_VOLUME_CLEAR instead of CINDER_SECURE_DELETE." fi -CINDER_VOLUME_CLEAR=${CINDER_VOLUME_CLEAR:-${CINDER_VOLUME_CLEAR_DEFAULT:-zero}} -CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]') +CINDER_TARGET_HELPER=${CINDER_TARGET_HELPER:-lioadm} + +if [[ $CINDER_TARGET_HELPER == 'nvmet' ]]; then + CINDER_TARGET_PROTOCOL=${CINDER_TARGET_PROTOCOL:-'nvmet_rdma'} + CINDER_TARGET_PREFIX=${CINDER_TARGET_PREFIX:-'nvme-subsystem-1'} + CINDER_TARGET_PORT=${CINDER_TARGET_PORT:-4420} +else + CINDER_TARGET_PROTOCOL=${CINDER_TARGET_PROTOCOL:-'iscsi'} + CINDER_TARGET_PREFIX=${CINDER_TARGET_PREFIX:-'iqn.2010-10.org.openstack:'} + CINDER_TARGET_PORT=${CINDER_TARGET_PORT:-3260} +fi + -# Cinder reports allocations back to the scheduler on periodic intervals -# it turns out we can get an "out of space" issue when we run tests too -# quickly just because cinder didn't realize we'd freed up resources. -# Make this configurable so that devstack-gate/tempest can set it to -# less than the 60 second default -# https://bugs.launchpad.net/cinder/+bug/1180976 -CINDER_PERIODIC_INTERVAL=${CINDER_PERIODIC_INTERVAL:-60} +# EL should only use lioadm +if is_fedora; then + if [[ ${CINDER_TARGET_HELPER} != "lioadm" && ${CINDER_TARGET_HELPER} != 'nvmet' ]]; then + die "lioadm and nvmet are the only valid Cinder target_helper config on this platform" + fi +fi + +# When Cinder is used as a backend for Glance, it can be configured to clone +# the volume containing image data directly in the backend instead of +# transferring data from volume to volume. Value is a comma separated list of +# schemes (currently only 'file' and 'cinder' are supported). The default +# configuration in Cinder is empty (that is, do not use this feature). NOTE: +# to use this feature you must also enable GLANCE_SHOW_DIRECT_URL and/or +# GLANCE_SHOW_MULTIPLE_LOCATIONS for glance-api.conf. +CINDER_ALLOWED_DIRECT_URL_SCHEMES=${CINDER_ALLOWED_DIRECT_URL_SCHEMES:-} +if [[ -n "$CINDER_ALLOWED_DIRECT_URL_SCHEMES" ]]; then + if [[ "${GLANCE_SHOW_DIRECT_URL:-False}" != "True" \ + && "${GLANCE_SHOW_MULTIPLE_LOCATIONS:-False}" != "True" ]]; then + warn $LINENO "CINDER_ALLOWED_DIRECT_URL_SCHEMES is set, but neither \ +GLANCE_SHOW_DIRECT_URL nor GLANCE_SHOW_MULTIPLE_LOCATIONS is True" + fi +fi -CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-tgtadm} +# For backward compatibility +# Before CINDER_BACKUP_DRIVER was introduced, ceph backup driver was configured +# along with ceph backend driver. +if [[ -z "${CINDER_BACKUP_DRIVER}" && "$CINDER_ENABLED_BACKENDS" =~ "ceph" ]]; then + CINDER_BACKUP_DRIVER=ceph +fi -# Toggle for deploying Cinder under HTTPD + mod_wsgi -CINDER_USE_MOD_WSGI=${CINDER_USE_MOD_WSGI:-False} +# Supported backup drivers are in lib/cinder_backups +CINDER_BACKUP_DRIVER=${CINDER_BACKUP_DRIVER:-swift} # Source the enabled backends if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then @@ -123,12 +172,43 @@ if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then done fi -# Change the default nova_catalog_info and nova_catalog_admin_info values in -# cinder so that the service name cinder is searching for matches that set for -# nova in keystone. -CINDER_NOVA_CATALOG_INFO=${CINDER_NOVA_CATALOG_INFO:-compute:nova:publicURL} -CINDER_NOVA_CATALOG_ADMIN_INFO=${CINDER_NOVA_CATALOG_ADMIN_INFO:-compute:nova:adminURL} +# Source the backup driver +if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then + if [[ -r $CINDER_BACKUPS/$CINDER_BACKUP_DRIVER ]]; then + source $CINDER_BACKUPS/$CINDER_BACKUP_DRIVER + else + die "cinder backup driver $CINDER_BACKUP_DRIVER is not supported" + fi +fi + +# Environment variables to configure the image-volume cache +CINDER_IMG_CACHE_ENABLED=${CINDER_IMG_CACHE_ENABLED:-True} + +# Environment variables to configure the optimized volume upload +CINDER_UPLOAD_OPTIMIZED=${CINDER_UPLOAD_OPTIMIZED:-False} + +# Environment variables to configure the internal tenant during optimized volume upload +CINDER_UPLOAD_INTERNAL_TENANT=${CINDER_UPLOAD_INTERNAL_TENANT:-False} +# For limits, if left unset, it will use cinder defaults of 0 for unlimited +CINDER_IMG_CACHE_SIZE_GB=${CINDER_IMG_CACHE_SIZE_GB:-} +CINDER_IMG_CACHE_SIZE_COUNT=${CINDER_IMG_CACHE_SIZE_COUNT:-} + +# Configure which cinder backends will have the image-volume cache, this takes the same +# form as the CINDER_ENABLED_BACKENDS config option. By default it will +# enable the cache for all cinder backends. +CINDER_CACHE_ENABLED_FOR_BACKENDS=${CINDER_CACHE_ENABLED_FOR_BACKENDS:-$CINDER_ENABLED_BACKENDS} + +# Configure which cinder backends will have optimized volume upload, this takes the same +# form as the CINDER_ENABLED_BACKENDS config option. By default it will +# enable the cache for all cinder backends. +CINDER_UPLOAD_OPTIMIZED_BACKENDS=${CINDER_UPLOAD_OPTIMIZED_BACKENDS:-$CINDER_ENABLED_BACKENDS} + +# Flag to set the oslo_policy.enforce_scope. This is used to switch +# the Volume API policies to start checking the scope of token. by default, +# this flag is False. +# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope +CINDER_ENFORCE_SCOPE=$(trueorfalse False CINDER_ENFORCE_SCOPE) # Functions # --------- @@ -136,6 +216,7 @@ CINDER_NOVA_CATALOG_ADMIN_INFO=${CINDER_NOVA_CATALOG_ADMIN_INFO:-compute:nova:ad # Test if any Cinder services are enabled # is_cinder_enabled function is_cinder_enabled { + [[ ,${DISABLED_SERVICES} =~ ,"cinder" ]] && return 1 [[ ,${ENABLED_SERVICES} =~ ,"c-" ]] && return 0 return 1 } @@ -150,7 +231,7 @@ function _cinder_cleanup_apache_wsgi { function cleanup_cinder { # ensure the volume group is cleared up because fails might # leave dead volumes in the group - if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then + if [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then local targets targets=$(sudo tgtadm --op show --mode target) if [ $? -ne 0 ]; then @@ -178,8 +259,14 @@ function cleanup_cinder { else stop_service tgtd fi - else + elif [ "$CINDER_TARGET_HELPER" = "lioadm" ]; then sudo cinder-rtstool get-targets | sudo xargs -rn 1 cinder-rtstool delete + elif [ "$CINDER_TARGET_HELPER" = "nvmet" ]; then + # If we don't disconnect everything vgremove will block + sudo nvme disconnect-all + sudo nvmetcli clear + else + die $LINENO "Unknown value \"$CINDER_TARGET_HELPER\" for CINDER_TARGET_HELPER" fi if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then @@ -193,55 +280,28 @@ function cleanup_cinder { done fi - if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then - _cinder_cleanup_apache_wsgi + if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then + if type cleanup_cinder_backup_$CINDER_BACKUP_DRIVER >/dev/null 2>&1; then + cleanup_cinder_backup_$CINDER_BACKUP_DRIVER + fi fi -} -# _cinder_config_apache_wsgi() - Set WSGI config files -function _cinder_config_apache_wsgi { - local cinder_apache_conf - cinder_apache_conf=$(apache_site_config_for osapi-volume) - local cinder_ssl="" - local cinder_certfile="" - local cinder_keyfile="" - local cinder_api_port=$CINDER_SERVICE_PORT - local venv_path="" - - if is_ssl_enabled_service c-api; then - cinder_ssl="SSLEngine On" - cinder_certfile="SSLCertificateFile $CINDER_SSL_CERT" - cinder_keyfile="SSLCertificateKeyFile $CINDER_SSL_KEY" - fi - if [[ ${USE_VENV} = True ]]; then - venv_path="python-path=${PROJECT_VENV["cinder"]}/lib/python2.7/site-packages" - fi - - # copy proxy vhost file - sudo cp $FILES/apache-cinder-api.template $cinder_apache_conf - sudo sed -e " - s|%PUBLICPORT%|$cinder_api_port|g; - s|%APACHE_NAME%|$APACHE_NAME|g; - s|%APIWORKERS%|$API_WORKERS|g - s|%CINDER_BIN_DIR%|$CINDER_BIN_DIR|g; - s|%SSLENGINE%|$cinder_ssl|g; - s|%SSLCERTFILE%|$cinder_certfile|g; - s|%SSLKEYFILE%|$cinder_keyfile|g; - s|%USER%|$STACK_USER|g; - s|%VIRTUALENV%|$venv_path|g - " -i $cinder_apache_conf + stop_process "c-api" + remove_uwsgi_config "$CINDER_UWSGI_CONF" "cinder-wsgi" } # configure_cinder() - Set config files, create data dirs, etc function configure_cinder { sudo install -d -o $STACK_USER -m 755 $CINDER_CONF_DIR - cp -p $CINDER_DIR/etc/cinder/policy.json $CINDER_CONF_DIR - rm -f $CINDER_CONF configure_rootwrap cinder + if [[ -f "$CINDER_DIR/etc/cinder/resource_filters.json" ]]; then + cp -p "$CINDER_DIR/etc/cinder/resource_filters.json" "$CINDER_CONF_DIR/resource_filters.json" + fi + cp $CINDER_DIR/etc/cinder/api-paste.ini $CINDER_API_PASTE_INI inicomment $CINDER_API_PASTE_INI filter:authtoken auth_host @@ -253,15 +313,11 @@ function configure_cinder { inicomment $CINDER_API_PASTE_INI filter:authtoken admin_password inicomment $CINDER_API_PASTE_INI filter:authtoken signing_dir - configure_auth_token_middleware $CINDER_CONF cinder $CINDER_AUTH_CACHE_DIR + configure_keystone_authtoken_middleware $CINDER_CONF cinder - iniset $CINDER_CONF DEFAULT nova_catalog_info $CINDER_NOVA_CATALOG_INFO - iniset $CINDER_CONF DEFAULT nova_catalog_admin_info $CINDER_NOVA_CATALOG_ADMIN_INFO - - iniset $CINDER_CONF DEFAULT auth_strategy keystone iniset $CINDER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $CINDER_CONF DEFAULT iscsi_helper "$CINDER_ISCSI_HELPER" + iniset $CINDER_CONF DEFAULT target_helper "$CINDER_TARGET_HELPER" iniset $CINDER_CONF database connection `database_connection_url cinder` iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI iniset $CINDER_CONF DEFAULT rootwrap_config "$CINDER_CONF_DIR/rootwrap.conf" @@ -269,12 +325,25 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT osapi_volume_listen $CINDER_SERVICE_LISTEN_ADDRESS iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH iniset $CINDER_CONF oslo_concurrency lock_path $CINDER_STATE_PATH - iniset $CINDER_CONF DEFAULT periodic_interval $CINDER_PERIODIC_INTERVAL - iniset $CINDER_CONF DEFAULT my_ip "$HOST_IP" + iniset $CINDER_CONF DEFAULT my_ip "$CINDER_MY_IP" + iniset $CINDER_CONF key_manager backend cinder.keymgr.conf_key_mgr.ConfKeyManager + iniset $CINDER_CONF key_manager fixed_key $(openssl rand -hex 16) + if [[ -n "$CINDER_ALLOWED_DIRECT_URL_SCHEMES" ]]; then + iniset $CINDER_CONF DEFAULT allowed_direct_url_schemes $CINDER_ALLOWED_DIRECT_URL_SCHEMES + fi - iniset $CINDER_CONF DEFAULT os_region_name "$REGION_NAME" + # set default quotas + iniset $CINDER_CONF DEFAULT quota_volumes ${CINDER_QUOTA_VOLUMES:-10} + iniset $CINDER_CONF DEFAULT quota_backups ${CINDER_QUOTA_BACKUPS:-10} + iniset $CINDER_CONF DEFAULT quota_snapshots ${CINDER_QUOTA_SNAPSHOTS:-10} - iniset $CINDER_CONF privsep_osbrick helper_command "sudo cinder-rootwrap \$rootwrap_config privsep-helper --config-file $CINDER_CONF" + # Avoid RPC timeouts in slow CI and test environments by doubling the + # default response timeout set by RPC clients. See bug #1873234 for more + # details and example failures. + iniset $CINDER_CONF DEFAULT rpc_response_timeout 120 + + iniset $CINDER_CONF DEFAULT report_interval $CINDER_SERVICE_REPORT_INTERVAL + iniset $CINDER_CONF DEFAULT service_down_time $(($CINDER_SERVICE_REPORT_INTERVAL * 6)) if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then local enabled_backends="" @@ -295,21 +364,35 @@ function configure_cinder { if [[ -n "$default_name" ]]; then iniset $CINDER_CONF DEFAULT default_volume_type ${default_name} fi + configure_cinder_image_volume_cache + + # The upload optimization uses Cinder's clone volume functionality to + # clone the Image-Volume from source volume hence can only be + # performed when glance is using cinder as it's backend. + if [[ "$USE_CINDER_FOR_GLANCE" == "True" ]]; then + # Configure optimized volume upload + configure_cinder_volume_upload + fi fi - if is_service_enabled swift; then - iniset $CINDER_CONF DEFAULT backup_swift_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_" + if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then + if type configure_cinder_backup_$CINDER_BACKUP_DRIVER >/dev/null 2>&1; then + configure_cinder_backup_$CINDER_BACKUP_DRIVER + else + die "configure_cinder_backup_$CINDER_BACKUP_DRIVER doesn't exist in $CINDER_BACKUPS/$CINDER_BACKUP_DRIVER" + fi fi if is_service_enabled ceilometer; then - iniset $CINDER_CONF oslo_messaging_notifications driver "messaging" + iniset $CINDER_CONF oslo_messaging_notifications driver "messagingv2" fi if is_service_enabled tls-proxy; then - # Set the service port for a proxy to take the original - iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT - - iniset $CINDER_CONF DEFAULT public_endpoint $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT + if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then + # Set the service port for a proxy to take the original + iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT + iniset $CINDER_CONF oslo_middleware enable_proxy_headers_parsing True + fi fi if [ "$SYSLOG" != "False" ]; then @@ -318,18 +401,11 @@ function configure_cinder { iniset_rpc_backend cinder $CINDER_CONF - iniset $CINDER_CONF DEFAULT volume_clear $CINDER_VOLUME_CLEAR - # Format logging - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$CINDER_USE_MOD_WSGI" == "False" ]; then - setup_colorized_logging $CINDER_CONF DEFAULT "project_id" "user_id" - else - # Set req-id, project-name and resource in log format - iniset $CINDER_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(project_name)s] %(resource)s%(message)s" - fi + setup_logging $CINDER_CONF - if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then - _cinder_config_apache_wsgi + if is_service_enabled c-api; then + write_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" "/volume" "" "cinder-api" fi if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then @@ -338,90 +414,85 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT osapi_volume_workers "$API_WORKERS" - iniset $CINDER_CONF DEFAULT glance_api_servers "${GLANCE_SERVICE_PROTOCOL}://${GLANCE_HOSTPORT}" - if is_ssl_enabled_service glance || is_service_enabled tls-proxy; then + iniset $CINDER_CONF DEFAULT glance_api_servers "$GLANCE_URL" + if is_service_enabled tls-proxy; then iniset $CINDER_CONF DEFAULT glance_protocol https iniset $CINDER_CONF DEFAULT glance_ca_certificates_file $SSL_BUNDLE_FILE fi - if [ "$GLANCE_V1_ENABLED" != "True" ]; then - iniset $CINDER_CONF DEFAULT glance_api_version 2 - fi + # Set glance credentials (used for location APIs) + configure_keystone_authtoken_middleware $CINDER_CONF glance glance + + # Set nova credentials (used for os-assisted-snapshots) + configure_keystone_authtoken_middleware $CINDER_CONF nova nova + iniset $CINDER_CONF nova region_name "$REGION_NAME" + iniset $CINDER_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" - # Register SSL certificates if provided - if is_ssl_enabled_service cinder; then - ensure_certificates CINDER + if [[ ! -z "$CINDER_COORDINATION_URL" ]]; then + iniset $CINDER_CONF coordination backend_url "$CINDER_COORDINATION_URL" + elif is_service_enabled etcd3; then + # NOTE(jan.gutter): api_version can revert to default once tooz is + # updated with the etcd v3.4 defaults + iniset $CINDER_CONF coordination backend_url "etcd3+http://${SERVICE_HOST}:$ETCD_PORT?api_version=v3" + fi - iniset $CINDER_CONF DEFAULT ssl_cert_file "$CINDER_SSL_CERT" - iniset $CINDER_CONF DEFAULT ssl_key_file "$CINDER_SSL_KEY" + if [[ "$CINDER_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $CINDER_CONF oslo_policy enforce_scope true + iniset $CINDER_CONF oslo_policy enforce_new_defaults true + else + iniset $CINDER_CONF oslo_policy enforce_scope false + iniset $CINDER_CONF oslo_policy enforce_new_defaults false fi - # Set os_privileged_user credentials (used for os-assisted-snapshots) - iniset $CINDER_CONF DEFAULT os_privileged_user_name nova - iniset $CINDER_CONF DEFAULT os_privileged_user_password "$SERVICE_PASSWORD" - iniset $CINDER_CONF DEFAULT os_privileged_user_tenant "$SERVICE_PROJECT_NAME" - iniset $CINDER_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" + if [ "$CINDER_USE_SERVICE_TOKEN" == "True" ]; then + init_cinder_service_user_conf + fi } # create_cinder_accounts() - Set up common required cinder accounts -# Tenant User Roles +# Project User Roles # ------------------------------------------------------------------ -# service cinder admin # if enabled +# SERVICE_PROJECT_NAME cinder service +# SERVICE_PROJECT_NAME cinder creator (if Barbican is enabled) # Migrated from keystone_data.sh function create_cinder_accounts { - # Cinder if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then - create_service_user "cinder" + local extra_role="" - get_or_create_service "cinder" "volume" "Cinder Volume Service" - get_or_create_endpoint \ - "volume" \ - "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(project_id)s" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(project_id)s" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(project_id)s" + # cinder needs the "creator" role in order to interact with barbican + if is_service_enabled barbican; then + extra_role=$(get_or_create_role "creator") + fi - get_or_create_service "cinderv2" "volumev2" "Cinder Volume Service V2" - get_or_create_endpoint \ - "volumev2" \ - "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(project_id)s" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(project_id)s" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(project_id)s" + create_service_user "cinder" $extra_role - get_or_create_service "cinderv3" "volumev3" "Cinder Volume Service V3" + local cinder_api_url + cinder_api_url="$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume" + + # block-storage is the official service type + get_or_create_service "cinder" "block-storage" "Cinder Volume Service" get_or_create_endpoint \ - "volumev3" \ + "block-storage" \ "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s" + "$cinder_api_url/v3" + configure_cinder_internal_tenant fi } -# create_cinder_cache_dir() - Part of the init_cinder() process -function create_cinder_cache_dir { - # Create cache dir - sudo install -d -o $STACK_USER $CINDER_AUTH_CACHE_DIR - rm -f $CINDER_AUTH_CACHE_DIR/* -} - # init_cinder() - Initialize database and volume group -# Uses global ``NOVA_ENABLED_APIS`` function init_cinder { - # Force nova volumes off - NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/osapi_volume,//") - if is_service_enabled $DATABASE_BACKENDS; then # (Re)create cinder database recreate_database cinder + time_start "dbsync" # Migrate cinder database $CINDER_BIN_DIR/cinder-manage --config-file $CINDER_CONF db sync + time_stop "dbsync" fi if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then @@ -430,42 +501,86 @@ function init_cinder { be_type=${be%%:*} be_name=${be##*:} if type init_cinder_backend_${be_type} >/dev/null 2>&1; then - # Always init the default volume group for lvm. - if [[ "$be_type" == "lvm" ]]; then - init_default_lvm_volume_group - fi init_cinder_backend_${be_type} ${be_name} fi done fi + if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then + if type init_cinder_backup_$CINDER_BACKUP_DRIVER >/dev/null 2>&1; then + init_cinder_backup_$CINDER_BACKUP_DRIVER + fi + fi + mkdir -p $CINDER_STATE_PATH/volumes - create_cinder_cache_dir } -# install_cinder() - Collect source and prepare -function install_cinder { - # Install os-brick from git so we make sure we're testing - # the latest code. - if use_library_from_git "os-brick"; then - git_clone_by_name "os-brick" - setup_dev_lib "os-brick" + +function init_os_brick { + mkdir -p $OS_BRICK_LOCK_PATH + if is_service_enabled cinder; then + iniset $CINDER_CONF os_brick lock_path $OS_BRICK_LOCK_PATH + fi + if is_service_enabled nova; then + iniset $NOVA_CONF os_brick lock_path $OS_BRICK_LOCK_PATH + fi + if is_service_enabled glance; then + iniset $GLANCE_API_CONF os_brick lock_path $OS_BRICK_LOCK_PATH + iniset $GLANCE_CACHE_CONF os_brick lock_path $OS_BRICK_LOCK_PATH fi +} +# install_cinder() - Collect source and prepare +function install_cinder { git_clone $CINDER_REPO $CINDER_DIR $CINDER_BRANCH setup_develop $CINDER_DIR - if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then - if is_fedora; then - install_package scsi-target-utils + if [[ "$CINDER_TARGET_HELPER" == "tgtadm" ]]; then + install_package tgt + elif [[ "$CINDER_TARGET_HELPER" == "lioadm" ]]; then + if is_ubuntu; then + # TODO(frickler): Workaround for https://launchpad.net/bugs/1819819 + sudo mkdir -p /etc/target + + install_package targetcli-fb else - install_package tgt + install_package targetcli fi - fi + elif [[ "$CINDER_TARGET_HELPER" == "nvmet" ]]; then + install_package nvme-cli - if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then - install_apache_wsgi - if is_ssl_enabled_service "c-api"; then - enable_mod_ssl + # TODO: Remove manual installation of the dependency when the + # requirement is added to nvmetcli: + # http://lists.infradead.org/pipermail/linux-nvme/2022-July/033576.html + if is_ubuntu; then + install_package python3-configshell-fb + else + install_package python3-configshell + fi + # Install from source because Ubuntu doesn't have the package and some packaged versions didn't work on Python 3 + pip_install git+git://git.infradead.org/users/hch/nvmetcli.git + + sudo modprobe nvmet + sudo modprobe nvme-fabrics + + if [[ $CINDER_TARGET_PROTOCOL == 'nvmet_rdma' ]]; then + install_package rdma-core + sudo modprobe nvme-rdma + + # Create the Soft-RoCE device over the networking interface + local iface=${HOST_IP_IFACE:-`ip -br -$SERVICE_IP_VERSION a | grep $CINDER_MY_IP | awk '{print $1}'`} + if [[ -z "$iface" ]]; then + die $LINENO "Cannot find interface to bind Soft-RoCE" + fi + + if ! sudo rdma link | grep $iface ; then + sudo rdma link add rxe_$iface type rxe netdev $iface + fi + + elif [[ $CINDER_TARGET_PROTOCOL == 'nvmet_tcp' ]]; then + sudo modprobe nvme-tcp + + else # 'nvmet_fc' + sudo modprobe nvme-fc fi fi } @@ -494,26 +609,18 @@ function _configure_tgt_for_config_d { fi } -# start_cinder() - Start running processes, including screen +# start_cinder() - Start running processes function start_cinder { local service_port=$CINDER_SERVICE_PORT local service_protocol=$CINDER_SERVICE_PROTOCOL - if is_service_enabled tls-proxy; then - service_port=$CINDER_SERVICE_PORT_INT - service_protocol="http" - fi - if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then + local cinder_url + if [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then if is_service_enabled c-vol; then # Delete any old stack.conf sudo rm -f /etc/tgt/conf.d/stack.conf _configure_tgt_for_config_d if is_ubuntu; then sudo service tgt restart - elif is_suse; then - # NOTE(dmllr): workaround restart bug - # https://bugzilla.suse.com/show_bug.cgi?id=934642 - stop_service tgtd - start_service tgtd else restart_service tgtd fi @@ -522,46 +629,53 @@ function start_cinder { fi fi - if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then - enable_apache_site osapi-volume - restart_apache_server - tail_log c-api /var/log/$APACHE_NAME/c-api.log - else - run_process c-api "$CINDER_BIN_DIR/cinder-api --config-file $CINDER_CONF" - echo "Waiting for Cinder API to start..." - if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$CINDER_SERVICE_HOST:$service_port; then - die $LINENO "c-api did not start" - fi + if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then + run_process "c-api" "$(which uwsgi) --procname-prefix cinder-api --ini $CINDER_UWSGI_CONF" + cinder_url=$service_protocol://$SERVICE_HOST/volume/v3 + fi + + echo "Waiting for Cinder API to start..." + if ! wait_for_service $SERVICE_TIMEOUT $cinder_url; then + die $LINENO "c-api did not start" fi run_process c-sch "$CINDER_BIN_DIR/cinder-scheduler --config-file $CINDER_CONF" - run_process c-bak "$CINDER_BIN_DIR/cinder-backup --config-file $CINDER_CONF" - run_process c-vol "$CINDER_BIN_DIR/cinder-volume --config-file $CINDER_CONF" + # Tune glibc for Python Services using single malloc arena for all threads + # and disabling dynamic thresholds to reduce memory usage when using native + # threads directly or via eventlet.tpool + # https://www.gnu.org/software/libc/manual/html_node/Memory-Allocation-Tunables.html + malloc_tuning="MALLOC_ARENA_MAX=1 MALLOC_MMAP_THRESHOLD_=131072 MALLOC_TRIM_THRESHOLD_=262144" + run_process c-bak "$CINDER_BIN_DIR/cinder-backup --config-file $CINDER_CONF" "" "" "$malloc_tuning" + run_process c-vol "$CINDER_BIN_DIR/cinder-volume --config-file $CINDER_CONF" "" "" "$malloc_tuning" # NOTE(jdg): For cinder, startup order matters. To ensure that repor_capabilities is received # by the scheduler start the cinder-volume service last (or restart it) after the scheduler # has started. This is a quick fix for lp bug/1189595 - - # Start proxies if enabled - if is_service_enabled c-api && is_service_enabled tls-proxy; then - start_tls_proxy '*' $CINDER_SERVICE_PORT $CINDER_SERVICE_HOST $CINDER_SERVICE_PORT_INT & - fi } # stop_cinder() - Stop running processes function stop_cinder { - if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then - disable_apache_site osapi-volume - restart_apache_server + stop_process c-api + stop_process c-bak + stop_process c-sch + stop_process c-vol +} + +function create_one_type { + type_name=$1 + property_key=$2 + property_value=$3 + # NOTE (e0ne): openstack client doesn't work with cinder in noauth mode + if is_service_enabled keystone; then + openstack --os-region-name="$REGION_NAME" volume type create --property $property_key="$property_value" $type_name else - stop_process c-api + # TODO (e0ne): use openstack client once it will support cinder in noauth mode: + # https://bugs.launchpad.net/python-cinderclient/+bug/1755279 + local cinder_url + cinder_url=$CINDER_SERVICE_PROTOCOL://$SERVICE_HOST:$CINDER_SERVICE_PORT/v3 + OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-create $type_name + OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-key $type_name set $property_key="$property_value" fi - - # Kill the cinder screen windows - local serv - for serv in c-bak c-sch c-vol; do - stop_process $serv - done } # create_volume_types() - Create Cinder's configured volume types @@ -571,8 +685,20 @@ function create_volume_types { local be be_name for be in ${CINDER_ENABLED_BACKENDS//,/ }; do be_name=${be##*:} - openstack --os-region-name="$REGION_NAME" volume type create --property volume_backend_name="${be_name}" ${be_name} + create_one_type $be_name "volume_backend_name" $be_name done + + if [[ $ENABLE_VOLUME_MULTIATTACH == "True" ]]; then + create_one_type $VOLUME_TYPE_MULTIATTACH $VOLUME_TYPE_MULTIATTACH " True" + fi + + # Increase quota for the service project if glance is using cinder, + # since it's likely to occasionally go above the default 10 in parallel + # test execution. + if [[ "$USE_CINDER_FOR_GLANCE" == "True" ]]; then + openstack --os-region-name="$REGION_NAME" \ + quota set --volumes 50 "$SERVICE_PROJECT_NAME" + fi fi } @@ -584,6 +710,48 @@ function create_cinder_volume_group { : } +function configure_cinder_internal_tenant { + # Re-use the Cinder service account for simplicity. + iniset $CINDER_CONF DEFAULT cinder_internal_tenant_project_id $(get_or_create_project $SERVICE_PROJECT_NAME) + iniset $CINDER_CONF DEFAULT cinder_internal_tenant_user_id $(get_or_create_user "cinder") +} + +function configure_cinder_image_volume_cache { + # Expect CINDER_CACHE_ENABLED_FOR_BACKENDS to be a list of backends + # similar to CINDER_ENABLED_BACKENDS with NAME:TYPE where NAME will + # be the backend specific configuration stanza in cinder.conf. + for be in ${CINDER_CACHE_ENABLED_FOR_BACKENDS//,/ }; do + local be_name=${be##*:} + + iniset $CINDER_CONF $be_name image_volume_cache_enabled $CINDER_IMG_CACHE_ENABLED + + if [[ -n $CINDER_IMG_CACHE_SIZE_GB ]]; then + iniset $CINDER_CONF $be_name image_volume_cache_max_size_gb $CINDER_IMG_CACHE_SIZE_GB + fi + + if [[ -n $CINDER_IMG_CACHE_SIZE_COUNT ]]; then + iniset $CINDER_CONF $be_name image_volume_cache_max_count $CINDER_IMG_CACHE_SIZE_COUNT + fi + done +} + +function configure_cinder_volume_upload { + # Expect UPLOAD_VOLUME_OPTIMIZED_FOR_BACKENDS to be a list of backends + # similar to CINDER_ENABLED_BACKENDS with NAME:TYPE where NAME will + # be the backend specific configuration stanza in cinder.conf. + local be be_name + for be in ${CINDER_UPLOAD_OPTIMIZED_BACKENDS//,/ }; do + be_name=${be##*:} + + iniset $CINDER_CONF $be_name image_upload_use_cinder_backend $CINDER_UPLOAD_OPTIMIZED + iniset $CINDER_CONF $be_name image_upload_use_internal_tenant $CINDER_UPLOAD_INTERNAL_TENANT + done +} + +function init_cinder_service_user_conf { + configure_keystone_authtoken_middleware $CINDER_CONF cinder service_user + iniset $CINDER_CONF service_user send_service_user_token True +} # Restore xtrace $_XTRACE_CINDER diff --git a/lib/cinder_backends/ceph b/lib/cinder_backends/ceph index 9bff5bef4f..0b465730c0 100644 --- a/lib/cinder_backends/ceph +++ b/lib/cinder_backends/ceph @@ -6,12 +6,6 @@ # Enable with: # # CINDER_ENABLED_BACKENDS+=,ceph:ceph -# -# Optional parameters: -# CINDER_BAK_CEPH_POOL= -# CINDER_BAK_CEPH_USER= -# CINDER_BAK_CEPH_POOL_PG= -# CINDER_BAK_CEPH_POOL_PGP= # Dependencies: # @@ -29,11 +23,6 @@ set +o xtrace # Defaults # -------- -CINDER_BAK_CEPH_POOL=${CINDER_BAK_CEPH_POOL:-backups} -CINDER_BAK_CEPH_POOL_PG=${CINDER_BAK_CEPH_POOL_PG:-8} -CINDER_BAK_CEPH_POOL_PGP=${CINDER_BAK_CEPH_POOL_PGP:-8} -CINDER_BAK_CEPH_USER=${CINDER_BAK_CEPH_USER:-cinder-bak} - # Entry Points # ------------ @@ -45,34 +34,13 @@ function configure_cinder_backend_ceph { iniset $CINDER_CONF $be_name volume_backend_name $be_name iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.rbd.RBDDriver" - iniset $CINDER_CONF $be_name rbd_ceph_conf "$CEPH_CONF" + iniset $CINDER_CONF $be_name rbd_ceph_conf "$CEPH_CONF_FILE" iniset $CINDER_CONF $be_name rbd_pool "$CINDER_CEPH_POOL" iniset $CINDER_CONF $be_name rbd_user "$CINDER_CEPH_USER" - iniset $CINDER_CONF $be_name rbd_uuid "$CINDER_CEPH_UUID" + iniset $CINDER_CONF $be_name rbd_secret_uuid "$CINDER_CEPH_UUID" iniset $CINDER_CONF $be_name rbd_flatten_volume_from_snapshot False iniset $CINDER_CONF $be_name rbd_max_clone_depth 5 iniset $CINDER_CONF DEFAULT glance_api_version 2 - - if is_service_enabled c-bak; then - sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP} - if [ "$REMOTE_CEPH" = "False" ]; then - # Configure Cinder backup service options, ceph pool, ceph user and ceph key - sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} size ${CEPH_REPLICAS} - if [[ $CEPH_REPLICAS -ne 1 ]]; then - sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID} - fi - fi - sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring - sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring - - iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph" - iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF" - iniset $CINDER_CONF DEFAULT backup_ceph_pool "$CINDER_BAK_CEPH_POOL" - iniset $CINDER_CONF DEFAULT backup_ceph_user "$CINDER_BAK_CEPH_USER" - iniset $CINDER_CONF DEFAULT backup_ceph_stripe_unit 0 - iniset $CINDER_CONF DEFAULT backup_ceph_stripe_count 0 - iniset $CINDER_CONF DEFAULT restore_discard_excess_bytes True - fi } # Restore xtrace diff --git a/lib/cinder_backends/ceph_iscsi b/lib/cinder_backends/ceph_iscsi new file mode 100644 index 0000000000..94412e0da6 --- /dev/null +++ b/lib/cinder_backends/ceph_iscsi @@ -0,0 +1,56 @@ +#!/bin/bash +# +# lib/cinder_backends/ceph_iscsi +# Configure the ceph_iscsi backend + +# Enable with: +# +# CINDER_ENABLED_BACKENDS+=,ceph_iscsi:ceph_iscsi +# +# Optional paramteters: +# CEPH_ISCSI_API_URL= +# +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# configure_ceph_backend_ceph_iscsi - called from configure_cinder() + + +# Save trace setting +_XTRACE_CINDER_CEPH_ISCSI=$(set +o | grep xtrace) +set +o xtrace + +# Entry Points +# ------------ + +# configure_cinder_backend_ceph_iscsi - Set config files, create data dirs, etc +# configure_cinder_backend_ceph_iscsi $name +function configure_cinder_backend_ceph_iscsi { + local be_name=$1 + + CEPH_ISCSI_API_URL=${CEPH_ISCSI_API_URL:-http://$CEPH_ISCSI_API_HOST:$CEPH_ISCSI_API_PORT} + + iniset $CINDER_CONF $be_name volume_backend_name $be_name + iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.ceph.rbd_iscsi.RBDISCSIDriver" + iniset $CINDER_CONF $be_name rbd_ceph_conf "$CEPH_CONF_FILE" + iniset $CINDER_CONF $be_name rbd_pool "$CINDER_CEPH_POOL" + iniset $CINDER_CONF $be_name rbd_user "$CINDER_CEPH_USER" + iniset $CINDER_CONF $be_name rbd_iscsi_api_user "$CEPH_ISCSI_API_USER" + iniset $CINDER_CONF $be_name rbd_iscsi_api_password "$CEPH_ISCSI_API_PASSWORD" + iniset $CINDER_CONF $be_name rbd_iscsi_api_url "$CEPH_ISCSI_API_URL" + iniset $CINDER_CONF $be_name rbd_iscsi_target_iqn "$CEPH_ISCSI_TARGET_IQN" + iniset $CINDER_CONF $be_name rbd_flatten_volume_from_snapshot False + iniset $CINDER_CONF $be_name rbd_max_clone_depth 5 + iniset $CINDER_CONF DEFAULT glance_api_version 2 + + pip_install rbd-iscsi-client +} + +# Restore xtrace +$_XTRACE_CINDER_CEPH_ISCSI + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_backends/fake b/lib/cinder_backends/fake new file mode 100644 index 0000000000..4749aced69 --- /dev/null +++ b/lib/cinder_backends/fake @@ -0,0 +1,47 @@ +#!/bin/bash +# +# lib/cinder_backends/fake +# Configure the Fake backend + +# Enable with: +# +# CINDER_ENABLED_BACKENDS+=,fake:fake + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# CINDER_CONF + +# clean_cinder_backend_fake - called from clean_cinder() +# configure_cinder_backend_fake - called from configure_cinder() +# init_cinder_backend_fake - called from init_cinder() + + +# Save trace setting +_XTRACE_CINDER_FAKE=$(set +o | grep xtrace) +set +o xtrace + + +function cleanup_cinder_backend_fake { + local be_name=$1 +} + +function configure_cinder_backend_fake { + local be_name=$1 + + iniset $CINDER_CONF $be_name volume_backend_name $be_name + iniset $CINDER_CONF $be_name volume_driver "cinder.tests.fake_driver.FakeLoggingVolumeDriver" + +} + +function init_cinder_backend_fake { + local be_name=$1 +} + +# Restore xtrace +$_XTRACE_CINDER_FAKE + +# mode: shell-script +# End: diff --git a/lib/cinder_backends/fake_gate b/lib/cinder_backends/fake_gate new file mode 100644 index 0000000000..3b9f1d1164 --- /dev/null +++ b/lib/cinder_backends/fake_gate @@ -0,0 +1,74 @@ +#!/bin/bash +# +# lib/cinder_backends/lvm +# Configure the LVM backend + +# Enable with: +# +# CINDER_ENABLED_BACKENDS+=,fake_gate:lvmname + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# CINDER_CONF +# DATA_DIR +# VOLUME_GROUP_NAME + +# clean_cinder_backend_lvm - called from clean_cinder() +# configure_cinder_backend_lvm - called from configure_cinder() +# init_cinder_backend_lvm - called from init_cinder() + + +# Save trace setting +_XTRACE_CINDER_LVM=$(set +o | grep xtrace) +set +o xtrace + + +# TODO: resurrect backing device...need to know how to set values +#VOLUME_BACKING_DEVICE=${VOLUME_BACKING_DEVICE:-} + +# Entry Points +# ------------ + +# cleanup_cinder_backend_lvm - Delete volume group and remove backing file +# cleanup_cinder_backend_lvm $be_name +function cleanup_cinder_backend_lvm { + local be_name=$1 + + # Campsite rule: leave behind a volume group at least as clean as we found it + clean_lvm_volume_group $VOLUME_GROUP_NAME-$be_name + clean_lvm_filter +} + +# configure_cinder_backend_lvm - Set config files, create data dirs, etc +# configure_cinder_backend_lvm $be_name +function configure_cinder_backend_lvm { + local be_name=$1 + + iniset $CINDER_CONF $be_name volume_backend_name $be_name + iniset $CINDER_CONF $be_name volume_driver "cinder.tests.fake_driver.FakeGateDriver" + iniset $CINDER_CONF $be_name volume_group $VOLUME_GROUP_NAME-$be_name + iniset $CINDER_CONF $be_name target_helper "$CINDER_TARGET_HELPER" + iniset $CINDER_CONF $be_name lvm_type "$CINDER_LVM_TYPE" + + if [[ "$CINDER_VOLUME_CLEAR" == "non" ]]; then + iniset $CINDER_CONF $be_name volume_clear none + fi +} + +# init_cinder_backend_lvm - Initialize volume group +# init_cinder_backend_lvm $be_name +function init_cinder_backend_lvm { + local be_name=$1 + + # Start with a clean volume group + init_lvm_volume_group $VOLUME_GROUP_NAME-$be_name $VOLUME_BACKING_FILE_SIZE +} + +# Restore xtrace +$_XTRACE_CINDER_LVM + +# mode: shell-script +# End: diff --git a/lib/cinder_backends/lvm b/lib/cinder_backends/lvm index d927f9cd6b..42865119da 100644 --- a/lib/cinder_backends/lvm +++ b/lib/cinder_backends/lvm @@ -50,12 +50,12 @@ function configure_cinder_backend_lvm { iniset $CINDER_CONF $be_name volume_backend_name $be_name iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.lvm.LVMVolumeDriver" iniset $CINDER_CONF $be_name volume_group $VOLUME_GROUP_NAME-$be_name - iniset $CINDER_CONF $be_name iscsi_helper "$CINDER_ISCSI_HELPER" + iniset $CINDER_CONF $be_name target_helper "$CINDER_TARGET_HELPER" + iniset $CINDER_CONF $be_name target_protocol "$CINDER_TARGET_PROTOCOL" + iniset $CINDER_CONF $be_name target_port "$CINDER_TARGET_PORT" + iniset $CINDER_CONF $be_name target_prefix "$CINDER_TARGET_PREFIX" iniset $CINDER_CONF $be_name lvm_type "$CINDER_LVM_TYPE" - - if [[ "$CINDER_SECURE_DELETE" == "False" ]]; then - iniset $CINDER_CONF $be_name volume_clear none - fi + iniset $CINDER_CONF $be_name volume_clear "$CINDER_VOLUME_CLEAR" } # init_cinder_backend_lvm - Initialize volume group diff --git a/lib/cinder_backends/nfs b/lib/cinder_backends/nfs index 89a37a1f02..f3fcbeff19 100644 --- a/lib/cinder_backends/nfs +++ b/lib/cinder_backends/nfs @@ -32,6 +32,15 @@ function configure_cinder_backend_nfs { iniset $CINDER_CONF $be_name volume_backend_name $be_name iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.nfs.NfsDriver" iniset $CINDER_CONF $be_name nfs_shares_config "$CINDER_CONF_DIR/nfs-shares-$be_name.conf" + iniset $CINDER_CONF $be_name nas_host localhost + iniset $CINDER_CONF $be_name nas_share_path ${NFS_EXPORT_DIR} + iniset $CINDER_CONF $be_name nas_secure_file_operations \ + ${NFS_SECURE_FILE_OPERATIONS} + iniset $CINDER_CONF $be_name nas_secure_file_permissions \ + ${NFS_SECURE_FILE_PERMISSIONS} + + # NFS snapshot support is currently opt-in only. + iniset $CINDER_CONF $be_name nfs_snapshot_support True echo "$CINDER_NFS_SERVERPATH" | tee "$CINDER_CONF_DIR/nfs-shares-$be_name.conf" } diff --git a/lib/cinder_backends/xiv b/lib/cinder_backends/xiv deleted file mode 100644 index e8b5da05d5..0000000000 --- a/lib/cinder_backends/xiv +++ /dev/null @@ -1,86 +0,0 @@ -#!/bin/bash -# -# Copyright 2014 IBM Corp. -# Copyright (c) 2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Authors: -# Alon Marx -# - -# lib/cinder_plugins/xiv -# Configure the xiv_ds8k driver for xiv testing - -# Enable xiv_ds8k driver for xiv with: -# -# CINDER_ENABLED_BACKENDS+=,xiv: -# XIV_DRIVER_VERSION= -# SAN_IP= -# SAN_LOGIN= -# SAN_PASSWORD= -# SAN_CLUSTERNAME= -# CONNECTION_TYPE= iscsi|fc -# XIV_CHAP= disabled|enabled - -# Dependencies: -# -# - ``functions`` file -# - ``cinder`` configurations - -# configure_cinder_backend_xiv - Configure Cinder for xiv backends - -# Save trace setting -_XTRACE_CINDER_XIV=$(set +o | grep xtrace) -set +o xtrace - -# Defaults -# -------- -# Set up default directories - - -# Entry Points -# ------------ - -# configure_cinder_backend_xiv - Set config files, create data dirs, etc -function configure_cinder_backend_xiv { - - local be_name=$1 - - python -c 'from xiv_ds8k_openstack.xiv_nova_proxy import XIVNovaProxy' - if [ $? -ne 0 ]; then - die $LINENO "XIV_DS8K driver is missing. Please install first" - fi - - # For reference: - # ``XIV_DS8K_BACKEND='IBM-XIV_'${SAN_IP}'_'${SAN_CLUSTERNAME}'_'${CONNECTION_TYPE}`` - iniset $CINDER_CONF DEFAULT xiv_ds8k_driver_version $XIV_DRIVER_VERSION - - iniset $CINDER_CONF $be_name san_ip $SAN_IP - iniset $CINDER_CONF $be_name san_login $SAN_LOGIN - iniset $CINDER_CONF $be_name san_password $SAN_PASSWORD - iniset $CINDER_CONF $be_name san_clustername $SAN_CLUSTERNAME - iniset $CINDER_CONF $be_name xiv_ds8k_connection_type $CONNECTION_TYPE - iniset $CINDER_CONF $be_name volume_backend_name $be_name - iniset $CINDER_CONF $be_name volume_driver 'cinder.volume.drivers.ibm.xiv_ds8k.XIVDS8KDriver' - iniset $CINDER_CONF $be_name xiv_ds8k_proxy 'xiv_ds8k_openstack.xiv_nova_proxy.XIVNovaProxy' - iniset $CINDER_CONF $be_name xiv_chap $XIV_CHAP -} - -# Restore xtrace -$_XTRACE_CINDER_XIV - -# Local variables: -# mode: shell-script -# End: diff --git a/lib/cinder_backups/ceph b/lib/cinder_backups/ceph new file mode 100644 index 0000000000..e4d6b96407 --- /dev/null +++ b/lib/cinder_backups/ceph @@ -0,0 +1,58 @@ +#!/bin/bash +# +# lib/cinder_backups/ceph +# Configure the ceph backup driver + +# Enable with: +# +# CINDER_BACKUP_DRIVER=ceph + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# Save trace setting +_XTRACE_CINDER_CEPH=$(set +o | grep xtrace) +set +o xtrace + +# Defaults +# -------- + +CINDER_BAK_CEPH_MAX_SNAPSHOTS=${CINDER_BAK_CEPH_MAX_SNAPSHOTS:-0} +CINDER_BAK_CEPH_POOL=${CINDER_BAK_CEPH_POOL:-backups} +CINDER_BAK_CEPH_POOL_PG=${CINDER_BAK_CEPH_POOL_PG:-8} +CINDER_BAK_CEPH_POOL_PGP=${CINDER_BAK_CEPH_POOL_PGP:-8} +CINDER_BAK_CEPH_USER=${CINDER_BAK_CEPH_USER:-cinder-bak} + + +function configure_cinder_backup_ceph { + # Execute this part only when cephadm is not used + if [[ "$CEPHADM_DEPLOY" = "False" ]]; then + sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP} + if [[ "$REMOTE_CEPH" = "False" && "$CEPH_REPLICAS" -ne 1 ]]; then + sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID} + fi + sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "profile rbd" osd "profile rbd pool=${CINDER_BAK_CEPH_POOL}, profile rbd pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring + sudo chown $STACK_USER ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring + fi + + iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph.CephBackupDriver" + iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF_FILE" + iniset $CINDER_CONF DEFAULT backup_ceph_max_snapshots "$CINDER_BAK_CEPH_MAX_SNAPSHOTS" + iniset $CINDER_CONF DEFAULT backup_ceph_pool "$CINDER_BAK_CEPH_POOL" + iniset $CINDER_CONF DEFAULT backup_ceph_user "$CINDER_BAK_CEPH_USER" + iniset $CINDER_CONF DEFAULT backup_ceph_stripe_unit 0 + iniset $CINDER_CONF DEFAULT backup_ceph_stripe_count 0 + iniset $CINDER_CONF DEFAULT restore_discard_excess_bytes True +} + +# init_cinder_backup_ceph: nothing to do +# cleanup_cinder_backup_ceph: nothing to do + +# Restore xtrace +$_XTRACE_CINDER_CEPH + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_backups/s3_swift b/lib/cinder_backups/s3_swift new file mode 100644 index 0000000000..6fb248606e --- /dev/null +++ b/lib/cinder_backups/s3_swift @@ -0,0 +1,45 @@ +#!/bin/bash +# +# lib/cinder_backups/s3_swift +# Configure the s3 backup driver with swift s3api +# +# TODO: create lib/cinder_backup/s3 for external s3 compatible storage + +# Enable with: +# +# CINDER_BACKUP_DRIVER=s3_swift +# enable_service s3api s-proxy s-object s-container s-account + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# Save trace setting +_XTRACE_CINDER_S3_SWIFT=$(set +o | grep xtrace) +set +o xtrace + +function configure_cinder_backup_s3_swift { + # This configuration requires swift and s3api. If we're + # on a subnode we might not know if they are enabled + iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.s3.S3BackupDriver" + iniset $CINDER_CONF DEFAULT backup_s3_endpoint_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$S3_SERVICE_PORT" +} + +function init_cinder_backup_s3_swift { + openstack ec2 credential create + iniset $CINDER_CONF DEFAULT backup_s3_store_access_key "$(openstack ec2 credential list -c Access -f value)" + iniset $CINDER_CONF DEFAULT backup_s3_store_secret_key "$(openstack ec2 credential list -c Secret -f value)" + if is_service_enabled tls-proxy; then + iniset $CINDER_CONF DEFAULT backup_s3_ca_cert_file "$SSL_BUNDLE_FILE" + fi +} + +# cleanup_cinder_backup_s3_swift: nothing to do + +# Restore xtrace +$_XTRACE_CINDER_S3_SWIFT + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_backups/swift b/lib/cinder_backups/swift new file mode 100644 index 0000000000..c7ec306246 --- /dev/null +++ b/lib/cinder_backups/swift @@ -0,0 +1,41 @@ +#!/bin/bash +# +# lib/cinder_backups/swift +# Configure the swift backup driver + +# Enable with: +# +# CINDER_BACKUP_DRIVER=swift + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# Save trace setting +_XTRACE_CINDER_SWIFT=$(set +o | grep xtrace) +set +o xtrace + + +function configure_cinder_backup_swift { + # NOTE(mriedem): The default backup driver uses swift and if we're + # on a subnode we might not know if swift is enabled, but chances are + # good that it is on the controller so configure the backup service + # to use it. + iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.swift.SwiftBackupDriver" + iniset $CINDER_CONF DEFAULT backup_swift_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_" + if is_service_enabled tls-proxy; then + iniset $CINDER_CONF DEFAULT backup_swift_ca_cert_file $SSL_BUNDLE_FILE + fi +} + +# init_cinder_backup_swift: nothing to do +# cleanup_cinder_backup_swift: nothing to do + + +# Restore xtrace +$_XTRACE_CINDER_SWIFT + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_plugins/XenAPINFS b/lib/cinder_plugins/XenAPINFS deleted file mode 100644 index 92135e7c4f..0000000000 --- a/lib/cinder_plugins/XenAPINFS +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash -# -# lib/cinder_plugins/XenAPINFS -# Configure the XenAPINFS driver - -# Enable with: -# -# CINDER_DRIVER=XenAPINFS - -# Dependencies: -# -# - ``functions`` file -# - ``cinder`` configurations - -# configure_cinder_driver - make configuration changes, including those to other services - -# Save trace setting -_XTRACE_CINDER_XENAPINFS=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# Set up default directories - - -# Entry Points -# ------------ - -# configure_cinder_driver - Set config files, create data dirs, etc -function configure_cinder_driver { - iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.xenapi.sm.XenAPINFSDriver" - iniset $CINDER_CONF DEFAULT xenapi_connection_url "$CINDER_XENAPI_CONNECTION_URL" - iniset $CINDER_CONF DEFAULT xenapi_connection_username "$CINDER_XENAPI_CONNECTION_USERNAME" - iniset $CINDER_CONF DEFAULT xenapi_connection_password "$CINDER_XENAPI_CONNECTION_PASSWORD" - iniset $CINDER_CONF DEFAULT xenapi_nfs_server "$CINDER_XENAPI_NFS_SERVER" - iniset $CINDER_CONF DEFAULT xenapi_nfs_serverpath "$CINDER_XENAPI_NFS_SERVERPATH" -} - -# Restore xtrace -$_XTRACE_CINDER_XENAPINFS - -# Local variables: -# mode: shell-script -# End: diff --git a/lib/cinder_plugins/sheepdog b/lib/cinder_plugins/sheepdog deleted file mode 100644 index 558de46c6d..0000000000 --- a/lib/cinder_plugins/sheepdog +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash -# -# lib/cinder_plugins/sheepdog -# Configure the sheepdog driver - -# Enable with: -# -# CINDER_DRIVER=sheepdog - -# Dependencies: -# -# - ``functions`` file -# - ``cinder`` configurations - -# configure_cinder_driver - make configuration changes, including those to other services - -# Save trace setting -_XTRACE_CINDER_SHEEPDOG=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# Set up default directories - - -# Entry Points -# ------------ - -# configure_cinder_driver - Set config files, create data dirs, etc -function configure_cinder_driver { - iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.sheepdog.SheepdogDriver" -} - -# Restore xtrace -$_XTRACE_CINDER_SHEEPDOG - -# Local variables: -# mode: shell-script -# End: diff --git a/lib/database b/lib/database index 0d720527df..78563f6f6d 100644 --- a/lib/database +++ b/lib/database @@ -87,10 +87,12 @@ function initialize_database_backends { if [ -n "$MYSQL_PASSWORD" ]; then DATABASE_PASSWORD=$MYSQL_PASSWORD - else - read_password DATABASE_PASSWORD "ENTER A PASSWORD TO USE FOR THE DATABASE." fi + return 0 +} + +function define_database_baseurl { # We configure Nova, Horizon, Glance and Keystone to use MySQL as their # database server. While they share a single server, each has their own # database and tables. @@ -102,8 +104,6 @@ function initialize_database_backends { # NOTE: Don't specify ``/db`` in this string so we can use it for multiple services BASE_SQL_CONN=${BASE_SQL_CONN:-$(get_database_type_$DATABASE_TYPE)://$DATABASE_USER:$DATABASE_PASSWORD@$DATABASE_HOST} - - return 0 } # Recreate a given database diff --git a/lib/databases/mysql b/lib/databases/mysql index f6cc9224af..a47580ca3d 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -12,13 +12,17 @@ _XTRACE_DB_MYSQL=$(set +o | grep xtrace) set +o xtrace MYSQL_DRIVER=${MYSQL_DRIVER:-PyMySQL} +INSTALL_DATABASE_SERVER_PACKAGES=$(trueorfalse True INSTALL_DATABASE_SERVER_PACKAGES) register_database mysql -# Linux distros, thank you for being incredibly consistent -MYSQL=mysql -if is_fedora && ! is_oraclelinux; then - MYSQL=mariadb +if [[ -z "$MYSQL_SERVICE_NAME" ]]; then + MYSQL_SERVICE_NAME=mysql + if is_fedora && ! is_oraclelinux; then + MYSQL_SERVICE_NAME=mariadb + elif [[ "$DISTRO" =~ trixie|bookworm|bullseye ]]; then + MYSQL_SERVICE_NAME=mariadb + fi fi # Functions @@ -34,14 +38,14 @@ function get_database_type_mysql { # Get rid of everything enough to cleanly change database backends function cleanup_database_mysql { - stop_service $MYSQL + stop_service $MYSQL_SERVICE_NAME if is_ubuntu; then # Get ruthless with mysql apt_get purge -y mysql* mariadb* sudo rm -rf /var/lib/mysql sudo rm -rf /etc/mysql return - elif is_suse || is_oraclelinux; then + elif is_oraclelinux; then uninstall_package mysql-community-server sudo rm -rf /var/lib/mysql elif is_fedora; then @@ -59,47 +63,94 @@ function recreate_database_mysql { } function configure_database_mysql { - local my_conf mysql slow_log + local my_conf mysql slow_log my_client_conf echo_summary "Configuring and starting MySQL" if is_ubuntu; then my_conf=/etc/mysql/my.cnf - mysql=mysql - elif is_suse || is_oraclelinux; then + elif is_oraclelinux; then my_conf=/etc/my.cnf - mysql=mysql elif is_fedora; then - mysql=mariadb my_conf=/etc/my.cnf + local cracklib_conf=/etc/my.cnf.d/cracklib_password_check.cnf + if [ -f "$cracklib_conf" ]; then + inicomment -sudo "$cracklib_conf" "mariadb" "plugin-load-add" + fi else exit_distro_not_supported "mysql configuration" fi - # Start mysql-server - if is_fedora || is_suse; then + # Set fips mode on + if is_ubuntu; then + if is_fips_enabled; then + my_client_conf=/etc/mysql/mysql.conf.d/mysql.cnf + iniset -sudo $my_client_conf mysql ssl-fips-mode "on" + iniset -sudo $my_conf mysqld ssl-fips-mode "on" + fi + fi + + # Change bind-address from localhost (127.0.0.1) to any (::) + iniset -sudo $my_conf mysqld bind-address "$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)" + + # (Re)Start mysql-server + if is_fedora; then # service is not started by default - start_service $mysql + start_service $MYSQL_SERVICE_NAME + elif is_ubuntu; then + # required since bind-address could have changed above + restart_service $MYSQL_SERVICE_NAME fi # Set the root password - only works the first time. For Ubuntu, we already - # did that with debconf before installing the package. - if ! is_ubuntu; then + # did that with debconf before installing the package, but we still try, + # because the package might have been installed already. We don't do this + # for Ubuntu 22.04+ because the authorization model change in + # version 10.4 of mariadb. See + # https://mariadb.org/authentication-in-mariadb-10-4/ + if ! (is_ubuntu && [[ ! "$DISTRO" =~ trixie|bookworm|bullseye ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]); then sudo mysqladmin -u root password $DATABASE_PASSWORD || true fi - # Update the DB to give user '$DATABASE_USER'@'%' full control of the all databases: - sudo mysql -uroot -p$DATABASE_PASSWORD -h127.0.0.1 -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';" + # In case of Mariadb, giving hostname in arguments causes permission + # problems as it expects connection through socket + if is_ubuntu && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then + local cmd_args="-uroot -p$DATABASE_PASSWORD " + else + local cmd_args="-uroot -p$DATABASE_PASSWORD -h$SERVICE_LOCAL_HOST " + fi + + # Workaround for mariadb > 11.6.2, + # see https://bugs.launchpad.net/nova/+bug/2116186/comments/3 + min_db_ver="11.6.2" + db_version=$(sudo mysql ${cmd_args} -e "select version();" -sN | cut -d '-' -f 1) + max_db_ver=$(printf '%s\n' ${min_db_ver} ${db_version} | sort -V | tail -n 1) + if [[ "${min_db_ver}" != "${max_db_ver}" ]]; then + iniset -sudo $my_conf mysqld innodb_snapshot_isolation OFF + restart_service $MYSQL_SERVICE_NAME + fi + + # In mariadb e.g. on Ubuntu socket plugin is used for authentication + # as root so it works only as sudo. To restore old "mysql like" behaviour, + # we need to change auth plugin for root user + # TODO(frickler): simplify this logic + if is_ubuntu && [[ ! "$DISTRO" =~ bookworm|bullseye ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then + # For Ubuntu 22.04+ we follow the model outlined in + # https://mariadb.org/authentication-in-mariadb-10-4/ + sudo mysql -e "ALTER USER $DATABASE_USER@localhost IDENTIFIED VIA mysql_native_password USING PASSWORD('$DATABASE_PASSWORD');" + fi + if ! (is_ubuntu && [[ ! "$DISTRO" =~ bookworm|bullseye ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]); then + # Create DB user if it does not already exist + sudo mysql $cmd_args -e "CREATE USER IF NOT EXISTS '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';" + # Update the DB to give user '$DATABASE_USER'@'%' full control of the all databases: + sudo mysql $cmd_args -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%';" + fi # Now update ``my.cnf`` for some local needs and restart the mysql service - # Change bind-address from localhost (127.0.0.1) to any (::) and - # set default db type to InnoDB - iniset -sudo $my_conf mysqld bind-address "$SERVICE_LISTEN_ADDRESS" - iniset -sudo $my_conf mysqld sql_mode STRICT_ALL_TABLES + # Set default db type to InnoDB + iniset -sudo $my_conf mysqld sql_mode TRADITIONAL iniset -sudo $my_conf mysqld default-storage-engine InnoDB iniset -sudo $my_conf mysqld max_connections 1024 - iniset -sudo $my_conf mysqld query_cache_type OFF - iniset -sudo $my_conf mysqld query_cache_size 0 if [[ "$DATABASE_QUERY_LOGGING" == "True" ]]; then echo_summary "Enabling MySQL query logging" @@ -121,7 +172,30 @@ function configure_database_mysql { iniset -sudo $my_conf mysqld log-queries-not-using-indexes 1 fi - restart_service $mysql + if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" ]]; then + echo "enabling MySQL performance counting" + + # Install our sqlalchemy plugin + pip_install ${TOP_DIR}/tools/dbcounter + + # Create our stats database for accounting + recreate_database stats + mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST -e \ + "CREATE TABLE queries (db VARCHAR(32), op VARCHAR(32), + count INT, PRIMARY KEY (db, op)) ENGINE MEMORY" stats + fi + + if [[ "$MYSQL_REDUCE_MEMORY" == "True" ]]; then + iniset -sudo $my_conf mysqld read_buffer_size 64K + iniset -sudo $my_conf mysqld innodb_buffer_pool_size 16M + iniset -sudo $my_conf mysqld thread_stack 192K + iniset -sudo $my_conf mysqld thread_cache_size 8 + iniset -sudo $my_conf mysqld tmp_table_size 8M + iniset -sudo $my_conf mysqld sort_buffer_size 8M + iniset -sudo $my_conf mysqld max_allowed_packet 8M + fi + + restart_service $MYSQL_SERVICE_NAME } function install_database_mysql { @@ -143,22 +217,25 @@ MYSQL_PRESEED [client] user=$DATABASE_USER password=$DATABASE_PASSWORD -host=$MYSQL_HOST EOF + + if ! is_ubuntu || [ "$MYSQL_SERVICE_NAME" != "mariadb" ]; then + echo "host=$MYSQL_HOST" >> $HOME/.my.cnf + fi chmod 0600 $HOME/.my.cnf fi # Install mysql-server - if is_suse || is_oraclelinux; then - if ! is_package_installed mariadb; then + if [[ "$INSTALL_DATABASE_SERVER_PACKAGES" == "True" ]]; then + if is_oraclelinux; then install_package mysql-community-server + elif is_fedora; then + install_package mariadb-server mariadb-devel mariadb + sudo systemctl enable $MYSQL_SERVICE_NAME + elif is_ubuntu; then + install_package $MYSQL_SERVICE_NAME-server + else + exit_distro_not_supported "mysql installation" fi - elif is_fedora; then - install_package mariadb-server - sudo systemctl enable mariadb - elif is_ubuntu; then - install_package mysql-server - else - exit_distro_not_supported "mysql installation" fi } @@ -174,7 +251,17 @@ function install_database_python_mysql { function database_connection_url_mysql { local db=$1 - echo "$BASE_SQL_CONN/$db?charset=utf8" + local plugin + + # NOTE(danms): We don't enable perf on subnodes yet because the + # plugin is not installed there + if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" ]]; then + if is_service_enabled mysql; then + plugin="&plugin=dbcounter" + fi + fi + + echo "$BASE_SQL_CONN/$db?charset=utf8$plugin" } diff --git a/lib/databases/postgresql b/lib/databases/postgresql index 14425a53b7..2aa38ccf76 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -13,7 +13,7 @@ set +o xtrace MAX_DB_CONNECTIONS=${MAX_DB_CONNECTIONS:-200} - +INSTALL_DATABASE_SERVER_PACKAGES=$(trueorfalse True INSTALL_DATABASE_SERVER_PACKAGES) register_database postgresql @@ -32,7 +32,7 @@ function cleanup_database_postgresql { # Get ruthless with mysql apt_get purge -y postgresql* return - elif is_fedora || is_suse; then + elif is_fedora; then uninstall_package postgresql-server else return @@ -46,14 +46,20 @@ function recreate_database_postgresql { createdb -h $DATABASE_HOST -U$DATABASE_USER -l C -T template0 -E utf8 $db } +function _exit_pg_init { + sudo cat /var/lib/pgsql/initdb_postgresql.log +} + function configure_database_postgresql { - local pg_conf pg_dir pg_hba root_roles version + local pg_conf pg_dir pg_hba check_role version echo_summary "Configuring and starting PostgreSQL" if is_fedora; then pg_hba=/var/lib/pgsql/data/pg_hba.conf pg_conf=/var/lib/pgsql/data/postgresql.conf if ! sudo [ -e $pg_hba ]; then + trap _exit_pg_init EXIT sudo postgresql-setup initdb + trap - EXIT fi elif is_ubuntu; then version=`psql --version | cut -d ' ' -f3 | cut -d. -f1-2` @@ -66,11 +72,6 @@ function configure_database_postgresql { pg_dir=`find /etc/postgresql -name pg_hba.conf|xargs dirname` pg_hba=$pg_dir/pg_hba.conf pg_conf=$pg_dir/postgresql.conf - elif is_suse; then - pg_hba=/var/lib/pgsql/data/pg_hba.conf - pg_conf=/var/lib/pgsql/data/postgresql.conf - # initdb is called when postgresql is first started - sudo [ -e $pg_hba ] || start_service postgresql else exit_distro_not_supported "postgresql configuration" fi @@ -85,8 +86,8 @@ function configure_database_postgresql { restart_service postgresql # Create the role if it's not here or else alter it. - root_roles=$(sudo -u root sudo -u postgres -i psql -t -c "SELECT 'HERE' from pg_roles where rolname='root'") - if [[ ${root_roles} == *HERE ]];then + check_role=$(sudo -u root sudo -u postgres -i psql -t -c "SELECT 'HERE' from pg_roles where rolname='$DATABASE_USER'") + if [[ ${check_role} == *HERE ]];then sudo -u root sudo -u postgres -i psql -c "ALTER ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" else sudo -u root sudo -u postgres -i psql -c "CREATE ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" @@ -104,15 +105,17 @@ EOF else sed -i "s/:root:\w\+/:root:$DATABASE_PASSWORD/" $pgpass fi - if is_ubuntu; then - install_package postgresql - elif is_fedora || is_suse; then - install_package postgresql-server - if is_fedora; then - sudo systemctl enable postgresql + if [[ "$INSTALL_DATABASE_SERVER_PACKAGES" == "True" ]]; then + if is_ubuntu; then + install_package postgresql + elif is_fedora; then + install_package postgresql-server + if is_fedora; then + sudo systemctl enable postgresql + fi + else + exit_distro_not_supported "postgresql installation" fi - else - exit_distro_not_supported "postgresql installation" fi } diff --git a/lib/dlm b/lib/dlm deleted file mode 100644 index e391535910..0000000000 --- a/lib/dlm +++ /dev/null @@ -1,110 +0,0 @@ -#!/bin/bash -# -# lib/dlm -# -# Functions to control the installation and configuration of software -# that provides a dlm (and possibly other functions). The default is -# **zookeeper**, and is going to be the only backend supported in the -# devstack tree. - -# Dependencies: -# -# - ``functions`` file - -# ``stack.sh`` calls the entry points in this order: -# -# - is_dlm_enabled -# - install_dlm -# - configure_dlm -# - cleanup_dlm - -# Save trace setting -_XTRACE_DLM=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# - -# Set up default directories -ZOOKEEPER_DATA_DIR=$DEST/data/zookeeper -ZOOKEEPER_CONF_DIR=/etc/zookeeper - - -# Entry Points -# ------------ -# -# NOTE(sdague): it is expected that when someone wants to implement -# another one of these out of tree, they'll implement the following -# functions: -# -# - dlm_backend -# - install_dlm -# - configure_dlm -# - cleanup_dlm - -# This should be declared in the settings file of any plugin or -# service that needs to have a dlm in their environment. -function use_dlm { - enable_service $(dlm_backend) -} - -# A function to return the name of the backend in question, some users -# are going to need to know this. -function dlm_backend { - echo "zookeeper" -} - -# Test if a dlm is enabled (defaults to a zookeeper specific check) -function is_dlm_enabled { - [[ ,${ENABLED_SERVICES}, =~ ,"$(dlm_backend)", ]] && return 0 - return 1 -} - -# cleanup_dlm() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_dlm { - # NOTE(sdague): we don't check for is_enabled here because we - # should just delete this regardless. Some times users updated - # their service list before they run cleanup. - sudo rm -rf $ZOOKEEPER_DATA_DIR -} - -# configure_dlm() - Set config files, create data dirs, etc -function configure_dlm { - if is_dlm_enabled; then - sudo cp $FILES/zookeeper/* $ZOOKEEPER_CONF_DIR - sudo sed -i -e 's|.*dataDir.*|dataDir='$ZOOKEEPER_DATA_DIR'|' $ZOOKEEPER_CONF_DIR/zoo.cfg - # clean up from previous (possibly aborted) runs - # create required data files - sudo rm -rf $ZOOKEEPER_DATA_DIR - sudo mkdir -p $ZOOKEEPER_DATA_DIR - # restart after configuration, there is no reason to make this - # another step, because having data files that don't match the - # zookeeper running is just going to cause tears. - restart_service zookeeper - fi -} - -# install_dlm() - Collect source and prepare -function install_dlm { - if is_dlm_enabled; then - if is_ubuntu; then - install_package zookeeperd - elif is_fedora; then - install_package zookeeper - else - die $LINENO "Don't know how to install zookeeper on this platform" - fi - fi -} - -# Restore xtrace -$_XTRACE_DLM - -# Tell emacs to use shell-script-mode -## Local variables: -## mode: shell-script -## End: diff --git a/lib/dstat b/lib/dstat index b705948094..9bd0370847 100644 --- a/lib/dstat +++ b/lib/dstat @@ -9,6 +9,7 @@ # ``stack.sh`` calls the entry points in this order: # +# - install_dstat # - start_dstat # - stop_dstat @@ -16,21 +17,41 @@ _XTRACE_DSTAT=$(set +o | grep xtrace) set +o xtrace -# start_dstat() - Start running processes, including screen +# install_dstat() - Install prerequisites for dstat services +function install_dstat { + if is_service_enabled memory_tracker; then + # Install python libraries required by tools/mlock_report.py + pip_install_gr psutil + fi +} + +# start_dstat() - Start running processes function start_dstat { # A better kind of sysstat, with the top process per time slice run_process dstat "$TOP_DIR/tools/dstat.sh $LOGDIR" - # To enable peakmem_tracker add: - # enable_service peakmem_tracker + # To enable memory_tracker add: + # enable_service memory_tracker + # to your localrc + run_process memory_tracker "$TOP_DIR/tools/memory_tracker.sh" "" "root" "PYTHON=python${PYTHON3_VERSION}" + + # TODO(jh): Fail when using the old service name otherwise consumers might + # never notice that is has been removed. + if is_service_enabled peakmem_tracker; then + die $LINENO "The peakmem_tracker service has been removed, use memory_tracker instead" + fi + + # To enable file_tracker add: + # enable_service file_tracker # to your localrc - run_process peakmem_tracker "$TOP_DIR/tools/peakmem_tracker.sh" + run_process file_tracker "$TOP_DIR/tools/file_tracker.sh" } # stop_dstat() stop dstat process function stop_dstat { stop_process dstat - stop_process peakmem_tracker + stop_process memory_tracker + stop_process file_tracker } # Restore xtrace diff --git a/lib/etcd3 b/lib/etcd3 new file mode 100644 index 0000000000..0d22de8c73 --- /dev/null +++ b/lib/etcd3 @@ -0,0 +1,136 @@ +#!/bin/bash +# +# lib/etcd3 +# +# Functions to control the installation and configuration of etcd 3.x +# that provides a key-value store (and possibly other functions). + +# Dependencies: +# +# - ``functions`` file + +# ``stack.sh`` calls the entry points in this order: +# +# - start_etcd3 +# - stop_etcd3 +# - cleanup_etcd3 + +# Save trace setting +_XTRACE_ETCD3=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default values for etcd +ETCD_DATA_DIR="$DATA_DIR/etcd" +ETCD_SYSTEMD_SERVICE="devstack@etcd.service" +ETCD_BIN_DIR="$DEST/bin" +# Option below will mount ETCD_DATA_DIR as ramdisk, which is useful to run +# etcd-heavy services in the gate VM's, e.g. Kubernetes. +ETCD_USE_RAMDISK=$(trueorfalse True ETCD_USE_RAMDISK) +ETCD_RAMDISK_MB=${ETCD_RAMDISK_MB:-512} + +if is_ubuntu ; then + UBUNTU_RELEASE_BASE_NUM=`lsb_release -r | awk '{print $2}' | cut -d '.' -f 1` +fi + +# start_etcd3() - Starts to run the etcd process +function start_etcd3 { + local cmd="$ETCD_BIN_DIR/etcd" + cmd+=" --name $HOSTNAME --data-dir $ETCD_DATA_DIR" + cmd+=" --initial-cluster-state new --initial-cluster-token etcd-cluster-01" + cmd+=" --initial-cluster $HOSTNAME=http://$SERVICE_HOST:$ETCD_PEER_PORT" + cmd+=" --initial-advertise-peer-urls http://$SERVICE_HOST:$ETCD_PEER_PORT" + cmd+=" --advertise-client-urls http://$SERVICE_HOST:$ETCD_PORT" + if [ "$SERVICE_LISTEN_ADDRESS" == "::" ]; then + cmd+=" --listen-peer-urls http://[::]:$ETCD_PEER_PORT " + else + cmd+=" --listen-peer-urls http://0.0.0.0:$ETCD_PEER_PORT " + fi + cmd+=" --listen-client-urls http://$SERVICE_HOST:$ETCD_PORT" + if [ "$ENABLE_DEBUG_LOG_LEVEL" == "True" ]; then + cmd+=" --log-level=debug" + fi + + local unitfile="$SYSTEMD_DIR/$ETCD_SYSTEMD_SERVICE" + write_user_unit_file $ETCD_SYSTEMD_SERVICE "$cmd" "" "root" + + iniset -sudo $unitfile "Unit" "After" "network.target" + iniset -sudo $unitfile "Service" "Type" "notify" + iniset -sudo $unitfile "Service" "Restart" "on-failure" + iniset -sudo $unitfile "Service" "LimitNOFILE" "65536" + if is_arch "aarch64"; then + iniset -sudo $unitfile "Service" "Environment" "ETCD_UNSUPPORTED_ARCH=arm64" + fi + + $SYSTEMCTL daemon-reload + $SYSTEMCTL enable $ETCD_SYSTEMD_SERVICE + $SYSTEMCTL start $ETCD_SYSTEMD_SERVICE +} + +# stop_etcd3() stops the etcd3 process +function stop_etcd3 { + # Don't install in sub nodes (multinode scenario) + if [ "$SERVICE_HOST" != "$HOST_IP" ]; then + return + fi + + $SYSTEMCTL stop $ETCD_SYSTEMD_SERVICE +} + +function cleanup_etcd3 { + # Don't install in sub nodes (multinode scenario) + if [ "$SERVICE_HOST" != "$HOST_IP" ]; then + return + fi + + $SYSTEMCTL disable $ETCD_SYSTEMD_SERVICE + + local unitfile="$SYSTEMD_DIR/$ETCD_SYSTEMD_SERVICE" + sudo rm -f $unitfile + + $SYSTEMCTL daemon-reload + + if [[ "$ETCD_USE_RAMDISK" == "True" ]]; then + sudo umount $ETCD_DATA_DIR + fi + sudo rm -rf $ETCD_DATA_DIR +} + +function install_etcd3 { + echo "Installing etcd" + + # Create the necessary directories + sudo mkdir -p $ETCD_BIN_DIR + sudo mkdir -p $ETCD_DATA_DIR + if [[ "$ETCD_USE_RAMDISK" == "True" ]]; then + sudo mount -t tmpfs -o nodev,nosuid,size=${ETCD_RAMDISK_MB}M tmpfs $ETCD_DATA_DIR + fi + + # Download and cache the etcd tgz for subsequent use + local etcd_file + etcd_file="$(get_extra_file $ETCD_DOWNLOAD_LOCATION)" + if [ ! -f "$FILES/etcd-$ETCD_VERSION-linux-$ETCD_ARCH/etcd" ]; then + echo "${ETCD_SHA256} $etcd_file" > $FILES/etcd.sha256sum + # NOTE(yuanke wei): rm the damaged file when checksum fails + sha256sum -c $FILES/etcd.sha256sum || (sudo rm -f $etcd_file; exit 1) + + tar xzvf $etcd_file -C $FILES + sudo cp $FILES/$ETCD_NAME/etcd $ETCD_BIN_DIR/etcd + sudo cp $FILES/$ETCD_NAME/etcdctl $ETCD_BIN_DIR/etcdctl + fi + if [ ! -f "$ETCD_BIN_DIR/etcd" ]; then + sudo cp $FILES/$ETCD_NAME/etcd $ETCD_BIN_DIR/etcd + sudo cp $FILES/$ETCD_NAME/etcdctl $ETCD_BIN_DIR/etcdctl + fi +} + +# Restore xtrace +$_XTRACE_ETCD3 + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/glance b/lib/glance index 8d95aad73f..9422c22141 100644 --- a/lib/glance +++ b/lib/glance @@ -41,39 +41,98 @@ else GLANCE_BIN_DIR=$(get_python_exec_prefix) fi +#S3 for Glance +GLANCE_USE_S3=$(trueorfalse False GLANCE_USE_S3) +GLANCE_S3_DEFAULT_BACKEND=${GLANCE_S3_DEFAULT_BACKEND:-s3_fast} +GLANCE_S3_BUCKET_ON_PUT=$(trueorfalse True GLANCE_S3_BUCKET_ON_PUT) +GLANCE_S3_BUCKET_NAME=${GLANCE_S3_BUCKET_NAME:-images} + +# Cinder for Glance +USE_CINDER_FOR_GLANCE=$(trueorfalse False USE_CINDER_FOR_GLANCE) +# GLANCE_CINDER_DEFAULT_BACKEND should be one of the values +# from CINDER_ENABLED_BACKENDS +GLANCE_CINDER_DEFAULT_BACKEND=${GLANCE_CINDER_DEFAULT_BACKEND:-lvmdriver-1} +GLANCE_STORE_ROOTWRAP_BASE_DIR=/usr/local/etc/glance +if [[ "$GLOBAL_VENV" == "True" ]] ; then + GLANCE_STORE_ROOTWRAP_BASE_DIR=${DEVSTACK_VENV}/etc/glance +fi +# When Cinder is used as a glance store, you can optionally configure cinder to +# optimize bootable volume creation by allowing volumes to be cloned directly +# in the backend instead of transferring data via Glance. To use this feature, +# set CINDER_ALLOWED_DIRECT_URL_SCHEMES for cinder.conf and enable +# GLANCE_SHOW_DIRECT_URL and/or GLANCE_SHOW_MULTIPLE_LOCATIONS for Glance. The +# default value for both of these is False, because for some backends they +# present a grave security risk (though not for Cinder, because all that's +# exposed is the volume_id where the image data is stored.) See OSSN-0065 for +# more information: https://wiki.openstack.org/wiki/OSSN/OSSN-0065 +GLANCE_SHOW_DIRECT_URL=$(trueorfalse False GLANCE_SHOW_DIRECT_URL) +GLANCE_SHOW_MULTIPLE_LOCATIONS=$(trueorfalse False GLANCE_SHOW_MULTIPLE_LOCATIONS) + +# Glance multi-store configuration +# Boolean flag to enable multiple store configuration for glance +GLANCE_ENABLE_MULTIPLE_STORES=$(trueorfalse False GLANCE_ENABLE_MULTIPLE_STORES) + +# Comma separated list for configuring multiple file stores of glance, +# for example; GLANCE_MULTIPLE_FILE_STORES = fast,cheap,slow +GLANCE_MULTIPLE_FILE_STORES=${GLANCE_MULTIPLE_FILE_STORES:-fast} + +# Default store/backend for glance, must be one of the store specified +# in GLANCE_MULTIPLE_FILE_STORES option. +GLANCE_DEFAULT_BACKEND=${GLANCE_DEFAULT_BACKEND:-fast} + GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache} +GLANCE_CACHE_DRIVER=${GLANCE_CACHE_DRIVER:-centralized_db} + +# File path for each store specified in GLANCE_MULTIPLE_FILE_STORES, the store +# identifier will be appended to this path at runtime. If GLANCE_MULTIPLE_FILE_STORES +# has fast,cheap specified then filepath will be generated like $DATA_DIR/glance/fast +# and $DATA_DIR/glance/cheap. +GLANCE_MULTISTORE_FILE_IMAGE_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/glance} GLANCE_IMAGE_DIR=${GLANCE_IMAGE_DIR:=$DATA_DIR/glance/images} -GLANCE_AUTH_CACHE_DIR=${GLANCE_AUTH_CACHE_DIR:-/var/cache/glance} +GLANCE_NFS_MOUNTPOINT=$GLANCE_IMAGE_DIR/mnt +GLANCE_LOCK_DIR=${GLANCE_LOCK_DIR:=$DATA_DIR/glance/locks} +GLANCE_STAGING_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/os_glance_staging_store} +GLANCE_TASKS_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/os_glance_tasks_store} + +GLANCE_USE_IMPORT_WORKFLOW=$(trueorfalse False GLANCE_USE_IMPORT_WORKFLOW) +GLANCE_ENABLE_QUOTAS=$(trueorfalse True GLANCE_ENABLE_QUOTAS) + +# Flag to set the oslo_policy.enforce_scope. This is used to switch +# This is used to disable the Image API policies scope and new defaults. +# By Default, it is True. +# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope +GLANCE_ENFORCE_SCOPE=$(trueorfalse True GLANCE_ENFORCE_SCOPE) + +# Flag to disable image format inspection on upload +GLANCE_ENFORCE_IMAGE_FORMAT=$(trueorfalse True GLANCE_ENFORCE_IMAGE_FORMAT) GLANCE_CONF_DIR=${GLANCE_CONF_DIR:-/etc/glance} GLANCE_METADEF_DIR=$GLANCE_CONF_DIR/metadefs -GLANCE_REGISTRY_CONF=$GLANCE_CONF_DIR/glance-registry.conf GLANCE_API_CONF=$GLANCE_CONF_DIR/glance-api.conf -GLANCE_REGISTRY_PASTE_INI=$GLANCE_CONF_DIR/glance-registry-paste.ini GLANCE_API_PASTE_INI=$GLANCE_CONF_DIR/glance-api-paste.ini GLANCE_CACHE_CONF=$GLANCE_CONF_DIR/glance-cache.conf -GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json GLANCE_SCHEMA_JSON=$GLANCE_CONF_DIR/schema-image.json GLANCE_SWIFT_STORE_CONF=$GLANCE_CONF_DIR/glance-swift-store.conf -GLANCE_GLARE_CONF=$GLANCE_CONF_DIR/glance-glare.conf -GLANCE_GLARE_PASTE_INI=$GLANCE_CONF_DIR/glance-glare-paste.ini -GLANCE_V1_ENABLED=${GLANCE_V1_ENABLED:-True} +GLANCE_IMAGE_IMPORT_CONF=$GLANCE_CONF_DIR/glance-image-import.conf -if is_ssl_enabled_service "glance" || is_service_enabled tls-proxy; then +if is_service_enabled tls-proxy; then GLANCE_SERVICE_PROTOCOL="https" fi # Glance connection info. Note the port must be specified. GLANCE_SERVICE_HOST=${GLANCE_SERVICE_HOST:-$SERVICE_HOST} -GLANCE_SERVICE_LISTEN_ADDRESS=${GLANCE_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS} +GLANCE_SERVICE_LISTEN_ADDRESS=${GLANCE_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)} GLANCE_SERVICE_PORT=${GLANCE_SERVICE_PORT:-9292} GLANCE_SERVICE_PORT_INT=${GLANCE_SERVICE_PORT_INT:-19292} GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT} GLANCE_SERVICE_PROTOCOL=${GLANCE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} -GLANCE_REGISTRY_PORT=${GLANCE_REGISTRY_PORT:-9191} -GLANCE_REGISTRY_PORT_INT=${GLANCE_REGISTRY_PORT_INT:-19191} -GLANCE_GLARE_PORT=${GLANCE_GLARE_PORT:-9494} -GLANCE_GLARE_HOSTPORT=${GLANCE_GLARE_HOSTPORT:-$GLANCE_SERVICE_HOST:$GLANCE_GLARE_PORT} +GLANCE_UWSGI=glance.wsgi.api:application +GLANCE_UWSGI_CONF=$GLANCE_CONF_DIR/glance-uwsgi.ini + +# Glance default limit for Devstack +GLANCE_LIMIT_IMAGE_SIZE_TOTAL=${GLANCE_LIMIT_IMAGE_SIZE_TOTAL:-2000} + +GLANCE_URL="$GLANCE_SERVICE_PROTOCOL://$GLANCE_SERVICE_HOST/image" # Functions # --------- @@ -81,6 +140,7 @@ GLANCE_GLARE_HOSTPORT=${GLANCE_GLARE_HOSTPORT:-$GLANCE_SERVICE_HOST:$GLANCE_GLAR # Test if any Glance services are enabled # is_glance_enabled function is_glance_enabled { + [[ ,${DISABLED_SERVICES} =~ ,"glance" ]] && return 1 [[ ,${ENABLED_SERVICES} =~ ,"g-" ]] && return 0 return 1 } @@ -88,67 +148,227 @@ function is_glance_enabled { # cleanup_glance() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_glance { - # kill instances (nova) - # delete image files (glance) - sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR $GLANCE_AUTH_CACHE_DIR + # delete image files (glance) and all of the glance-remote temporary + # storage + sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR "${DATA_DIR}/glance-remote" + + # Cleanup multiple stores directories + if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "True" ]]; then + local store file_dir + for store in $(echo $GLANCE_MULTIPLE_FILE_STORES | tr "," "\n"); do + file_dir="${GLANCE_MULTISTORE_FILE_IMAGE_DIR}/${store}/" + sudo rm -rf $file_dir + done + + # Cleanup reserved stores directories + sudo rm -rf $GLANCE_STAGING_DIR $GLANCE_TASKS_DIR + fi + remove_uwsgi_config "$GLANCE_UWSGI_CONF" "glance-wsgi-api" +} + +# Set multiple s3 store related config options +# +function configure_multiple_s3_stores { + enabled_backends="${GLANCE_S3_DEFAULT_BACKEND}:s3" + + iniset $GLANCE_API_CONF DEFAULT enabled_backends ${enabled_backends} + iniset $GLANCE_API_CONF glance_store default_backend $GLANCE_S3_DEFAULT_BACKEND +} + +# Set common S3 store options to given config section +# +# Arguments: +# config_section +# +function set_common_s3_store_params { + local config_section="$1" + openstack ec2 credential create + iniset $GLANCE_API_CONF $config_section s3_store_host "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$S3_SERVICE_PORT" + iniset $GLANCE_API_CONF $config_section s3_store_access_key "$(openstack ec2 credential list -c Access -f value)" + iniset $GLANCE_API_CONF $config_section s3_store_secret_key "$(openstack ec2 credential list -c Secret -f value)" + iniset $GLANCE_API_CONF $config_section s3_store_create_bucket_on_put $GLANCE_S3_BUCKET_ON_PUT + iniset $GLANCE_API_CONF $config_section s3_store_bucket $GLANCE_S3_BUCKET_NAME + iniset $GLANCE_API_CONF $config_section s3_store_bucket_url_format "path" + if is_service_enabled tls-proxy; then + iniset $GLANCE_API_CONF $config_section s3_store_cacert $SSL_BUNDLE_FILE + fi +} + +# Set multiple cinder store related config options for each of the cinder store +# +function configure_multiple_cinder_stores { + + local be be_name be_type enabled_backends + for be in ${CINDER_ENABLED_BACKENDS//,/ }; do + be_type=${be%%:*} + be_name=${be##*:} + enabled_backends+="${be_name}:cinder," + + set_common_cinder_store_params $be_name + iniset $GLANCE_API_CONF $be_name cinder_volume_type ${be_name} + if [[ "$be_type" == "nfs" ]]; then + mkdir -p "$GLANCE_NFS_MOUNTPOINT" + iniset $GLANCE_API_CONF $be_name cinder_mount_point_base "$GLANCE_NFS_MOUNTPOINT" + fi + done + iniset $GLANCE_API_CONF DEFAULT enabled_backends ${enabled_backends::-1} + iniset $GLANCE_API_CONF glance_store default_backend $GLANCE_CINDER_DEFAULT_BACKEND +} + +# Set common cinder store options to given config section +# +# Arguments: +# config_section +# +function set_common_cinder_store_params { + local config_section="$1" + iniset $GLANCE_API_CONF $config_section cinder_store_auth_address $KEYSTONE_SERVICE_URI_V3 + iniset $GLANCE_API_CONF $config_section cinder_store_user_name glance + iniset $GLANCE_API_CONF $config_section cinder_store_password $SERVICE_PASSWORD + iniset $GLANCE_API_CONF $config_section cinder_store_project_name $SERVICE_PROJECT_NAME +} + +# Configure multiple file stores options for each file store +# +# Arguments: +# +function configure_multiple_file_stores { + local store enabled_backends + enabled_backends="" + for store in $(echo $GLANCE_MULTIPLE_FILE_STORES | tr "," "\n"); do + enabled_backends+="${store}:file," + done + iniset $GLANCE_API_CONF DEFAULT enabled_backends ${enabled_backends::-1} + + # Glance multiple store Store specific configs + iniset $GLANCE_API_CONF glance_store default_backend $GLANCE_DEFAULT_BACKEND + local store + for store in $(echo $glance_multiple_file_stores | tr "," "\n"); do + iniset $GLANCE_API_CONF $store filesystem_store_datadir "${GLANCE_MULTISTORE_FILE_IMAGE_DIR}/${store}/" + done +} + +# Set reserved stores for glance +function configure_reserved_stores { + iniset $GLANCE_API_CONF os_glance_staging_store filesystem_store_datadir "${GLANCE_MULTISTORE_FILE_IMAGE_DIR}/os_glance_staging_store/" + iniset $GLANCE_API_CONF os_glance_tasks_store filesystem_store_datadir "${GLANCE_MULTISTORE_FILE_IMAGE_DIR}/os_glance_tasks_store/" +} + +# Copy rootwrap file from glance_store/etc/glance to /etc/glance +# +# Arguments: +# source_path Source path to copy rootwrap files from +# +function copy_rootwrap { + local source_path="$1" + # Make glance configuration directory if it is not exists + sudo install -d -o $STACK_USER $GLANCE_CONF_DIR + cp -r $source_path/rootwrap.* $GLANCE_CONF_DIR/ +} + +# Set glance_store related config options +# +# Arguments: +# USE_CINDER_FOR_GLANCE +# GLANCE_ENABLE_MULTIPLE_STORES +# +function configure_glance_store { + local use_cinder_for_glance="$1" + local glance_enable_multiple_stores="$2" + local be + + if [[ "$glance_enable_multiple_stores" == "False" ]]; then + if [[ "$use_cinder_for_glance" == "True" ]]; then + # set common glance_store parameters + iniset $GLANCE_API_CONF glance_store stores "cinder,file,http" + iniset $GLANCE_API_CONF glance_store default_store cinder + + # set cinder related store parameters + set_common_cinder_store_params glance_store + # set nfs mount_point dir + for be in ${CINDER_ENABLED_BACKENDS//,/ }; do + local be_name=${be##*:} + if [[ "$be_name" == "nfs" ]]; then + mkdir -p $GLANCE_NFS_MOUNTPOINT + iniset $GLANCE_API_CONF glance_store cinder_mount_point_base $GLANCE_NFS_MOUNTPOINT + fi + done + fi + # Store specific configs + iniset $GLANCE_API_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/ + else + if [[ "$use_cinder_for_glance" == "True" ]]; then + # Configure multiple cinder stores for glance + configure_multiple_cinder_stores + elif ! is_service_enabled s-proxy && [[ "$GLANCE_USE_S3" == "False" ]]; then + # Configure multiple file stores for glance + configure_multiple_file_stores + fi + # Configure reserved stores + configure_reserved_stores + fi +} + +function configure_glance_quotas { + + # Registered limit resources in keystone are system-specific resources. + # Make sure we use a system-scoped token to interact with this API. + + openstack --os-cloud devstack-system-admin registered limit create --service glance \ + --default-limit $GLANCE_LIMIT_IMAGE_SIZE_TOTAL --region $REGION_NAME image_size_total + openstack --os-cloud devstack-system-admin registered limit create --service glance \ + --default-limit $GLANCE_LIMIT_IMAGE_SIZE_TOTAL --region $REGION_NAME image_stage_total + openstack --os-cloud devstack-system-admin registered limit create --service glance \ + --default-limit 100 --region $REGION_NAME image_count_total + openstack --os-cloud devstack-system-admin registered limit create --service glance \ + --default-limit 100 --region $REGION_NAME image_count_uploading + + # Tell glance to use these limits + iniset $GLANCE_API_CONF DEFAULT use_keystone_limits True + + # Configure oslo_limit so it can talk to keystone + iniset $GLANCE_API_CONF oslo_limit user_domain_name $SERVICE_DOMAIN_NAME + iniset $GLANCE_API_CONF oslo_limit password $SERVICE_PASSWORD + iniset $GLANCE_API_CONF oslo_limit username glance + iniset $GLANCE_API_CONF oslo_limit auth_type password + iniset $GLANCE_API_CONF oslo_limit auth_url $KEYSTONE_SERVICE_URI + iniset $GLANCE_API_CONF oslo_limit system_scope all + iniset $GLANCE_API_CONF oslo_limit endpoint_id \ + $(openstack --os-cloud devstack-system-admin endpoint list --service glance -f value -c ID) + + # Allow the glance service user to read quotas + openstack --os-cloud devstack-system-admin role add --user glance \ + --user-domain $SERVICE_DOMAIN_NAME --system all reader } # configure_glance() - Set config files, create data dirs, etc function configure_glance { sudo install -d -o $STACK_USER $GLANCE_CONF_DIR $GLANCE_METADEF_DIR - # Copy over our glance configurations and update them - if is_service_enabled g-glare; then - cp $GLANCE_DIR/etc/glance-glare.conf $GLANCE_GLARE_CONF - fi - cp $GLANCE_DIR/etc/glance-registry.conf $GLANCE_REGISTRY_CONF - iniset $GLANCE_REGISTRY_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $GLANCE_REGISTRY_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS - inicomment $GLANCE_REGISTRY_CONF DEFAULT log_file + # Set non-default configuration options for the API server local dburl dburl=`database_connection_url glance` - iniset $GLANCE_REGISTRY_CONF database connection $dburl - iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG - iniset $GLANCE_REGISTRY_CONF DEFAULT workers "$API_WORKERS" - iniset $GLANCE_REGISTRY_CONF paste_deploy flavor keystone - configure_auth_token_middleware $GLANCE_REGISTRY_CONF glance $GLANCE_AUTH_CACHE_DIR/registry - iniset $GLANCE_REGISTRY_CONF oslo_messaging_notifications driver messaging - iniset_rpc_backend glance $GLANCE_REGISTRY_CONF - iniset $GLANCE_REGISTRY_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" - - cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF + iniset $GLANCE_API_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $GLANCE_API_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS - inicomment $GLANCE_API_CONF DEFAULT log_file iniset $GLANCE_API_CONF database connection $dburl iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG iniset $GLANCE_API_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ + iniset $GLANCE_API_CONF DEFAULT image_cache_driver $GLANCE_CACHE_DRIVER + iniset $GLANCE_API_CONF oslo_concurrency lock_path $GLANCE_LOCK_DIR iniset $GLANCE_API_CONF paste_deploy flavor keystone+cachemanagement - configure_auth_token_middleware $GLANCE_API_CONF glance $GLANCE_AUTH_CACHE_DIR/api - iniset $GLANCE_API_CONF oslo_messaging_notifications driver messaging + configure_keystone_authtoken_middleware $GLANCE_API_CONF glance + iniset $GLANCE_API_CONF oslo_messaging_notifications driver messagingv2 iniset_rpc_backend glance $GLANCE_API_CONF - if [ "$VIRT_DRIVER" = 'xenserver' ]; then - iniset $GLANCE_API_CONF DEFAULT container_formats "ami,ari,aki,bare,ovf,tgz" - iniset $GLANCE_API_CONF DEFAULT disk_formats "ami,ari,aki,vhd,raw,iso" - fi if [ "$VIRT_DRIVER" = 'libvirt' ] && [ "$LIBVIRT_TYPE" = 'parallels' ]; then iniset $GLANCE_API_CONF DEFAULT disk_formats "ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso,ploop" fi + # Only use these if you know what you are doing! See OSSN-0065 + iniset $GLANCE_API_CONF DEFAULT show_image_direct_url $GLANCE_SHOW_DIRECT_URL + iniset $GLANCE_API_CONF DEFAULT show_multiple_locations $GLANCE_SHOW_MULTIPLE_LOCATIONS + iniset $GLANCE_API_CONF image_format require_image_format_match $GLANCE_ENFORCE_IMAGE_FORMAT - # NOTE(flaper87): To uncomment as soon as all services consuming Glance are - # able to consume V2 entirely. - if [ "$GLANCE_V1_ENABLED" != "True" ]; then - iniset $GLANCE_API_CONF DEFAULT enable_v1_api False - fi - - # Store specific configs - iniset $GLANCE_API_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/ - if is_service_enabled g-glare; then - iniset $GLANCE_GLARE_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/ - fi - iniset $GLANCE_API_CONF DEFAULT registry_host $GLANCE_SERVICE_HOST - - iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS" + # Configure glance_store + configure_glance_store $USE_CINDER_FOR_GLANCE $GLANCE_ENABLE_MULTIPLE_STORES # CORS feature support - to allow calls from Horizon by default if [ -n "$GLANCE_CORS_ALLOWED_ORIGIN" ]; then @@ -157,132 +377,99 @@ function configure_glance { iniset $GLANCE_API_CONF cors allowed_origin "http://$SERVICE_HOST" fi - # Store the images in swift if enabled. - if is_service_enabled s-proxy; then - iniset $GLANCE_API_CONF glance_store default_store swift - iniset $GLANCE_API_CONF glance_store swift_store_create_container_on_put True - - iniset $GLANCE_API_CONF glance_store swift_store_config_file $GLANCE_SWIFT_STORE_CONF - iniset $GLANCE_API_CONF glance_store default_swift_reference ref1 - iniset $GLANCE_API_CONF glance_store stores "file, http, swift" - iniset $GLANCE_API_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" - - iniset $GLANCE_SWIFT_STORE_CONF ref1 user $SERVICE_PROJECT_NAME:glance-swift - - # Store the glare in swift if enabled. - if is_service_enabled g-glare; then - iniset $GLANCE_GLARE_CONF glance_store default_store swift - iniset $GLANCE_GLARE_CONF glance_store swift_store_create_container_on_put True - - iniset $GLANCE_GLARE_CONF glance_store swift_store_config_file $GLANCE_SWIFT_STORE_CONF - iniset $GLANCE_GLARE_CONF glance_store default_swift_reference ref1 - iniset $GLANCE_GLARE_CONF glance_store stores "file, http, swift" - iniset $GLANCE_GLARE_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" - - # commenting is not strictly necessary but it's confusing to have bad values in conf - inicomment $GLANCE_GLARE_CONF glance_store swift_store_user - inicomment $GLANCE_GLARE_CONF glance_store swift_store_key - inicomment $GLANCE_GLARE_CONF glance_store swift_store_auth_address + # No multiple stores for swift yet + if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "False" ]]; then + # Return if s3api is enabled for glance + if [[ "$GLANCE_USE_S3" == "True" ]]; then + if is_service_enabled s3api; then + # set common glance_store parameters + iniset $GLANCE_API_CONF glance_store stores "s3,file,http" + iniset $GLANCE_API_CONF glance_store default_store s3 + fi + elif is_service_enabled s-proxy; then + # Store the images in swift if enabled. + iniset $GLANCE_API_CONF glance_store default_store swift + iniset $GLANCE_API_CONF glance_store swift_store_create_container_on_put True + + iniset $GLANCE_API_CONF glance_store swift_store_config_file $GLANCE_SWIFT_STORE_CONF + iniset $GLANCE_API_CONF glance_store default_swift_reference ref1 + iniset $GLANCE_API_CONF glance_store stores "file, http, swift" + if is_service_enabled tls-proxy; then + iniset $GLANCE_API_CONF glance_store swift_store_cacert $SSL_BUNDLE_FILE + fi + iniset $GLANCE_API_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" + + iniset $GLANCE_SWIFT_STORE_CONF ref1 user $SERVICE_PROJECT_NAME:glance-swift + + iniset $GLANCE_SWIFT_STORE_CONF ref1 key $SERVICE_PASSWORD + iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $KEYSTONE_SERVICE_URI/v3 + iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_version 3 + fi + else + if [[ "$GLANCE_USE_S3" == "True" ]]; then + if is_service_enabled s3api; then + configure_multiple_s3_stores + fi fi - - iniset $GLANCE_SWIFT_STORE_CONF ref1 key $SERVICE_PASSWORD - iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $KEYSTONE_SERVICE_URI/v3 - iniset $GLANCE_SWIFT_STORE_CONF ref1 user_domain_name $SERVICE_DOMAIN_NAME - iniset $GLANCE_SWIFT_STORE_CONF ref1 project_domain_name $SERVICE_DOMAIN_NAME - iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_version 3 - - # commenting is not strictly necessary but it's confusing to have bad values in conf - inicomment $GLANCE_API_CONF glance_store swift_store_user - inicomment $GLANCE_API_CONF glance_store swift_store_key - inicomment $GLANCE_API_CONF glance_store swift_store_auth_address fi + # We need to tell glance what it's public endpoint is so that the version + # discovery document will be correct + iniset $GLANCE_API_CONF DEFAULT public_endpoint $GLANCE_URL + if is_service_enabled tls-proxy; then iniset $GLANCE_API_CONF DEFAULT bind_port $GLANCE_SERVICE_PORT_INT - iniset $GLANCE_API_CONF DEFAULT public_endpoint $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT - iniset $GLANCE_REGISTRY_CONF DEFAULT bind_port $GLANCE_REGISTRY_PORT_INT - - iniset $GLANCE_API_CONF keystone_authtoken identity_uri $KEYSTONE_AUTH_URI - iniset $GLANCE_REGISTRY_CONF keystone_authtoken identity_uri $KEYSTONE_AUTH_URI - fi - - # Register SSL certificates if provided - if is_ssl_enabled_service glance; then - ensure_certificates GLANCE - - iniset $GLANCE_API_CONF DEFAULT cert_file "$GLANCE_SSL_CERT" - iniset $GLANCE_API_CONF DEFAULT key_file "$GLANCE_SSL_KEY" - - iniset $GLANCE_REGISTRY_CONF DEFAULT cert_file "$GLANCE_SSL_CERT" - iniset $GLANCE_REGISTRY_CONF DEFAULT key_file "$GLANCE_SSL_KEY" - fi - - if is_ssl_enabled_service glance || is_service_enabled tls-proxy; then - iniset $GLANCE_API_CONF DEFAULT registry_client_protocol https + iniset $GLANCE_API_CONF keystone_authtoken identity_uri $KEYSTONE_SERVICE_URI fi # Format logging - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then - setup_colorized_logging $GLANCE_API_CONF DEFAULT tenant user - setup_colorized_logging $GLANCE_REGISTRY_CONF DEFAULT tenant user - fi - - cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI + setup_logging $GLANCE_API_CONF cp -p $GLANCE_DIR/etc/glance-api-paste.ini $GLANCE_API_PASTE_INI - cp $GLANCE_DIR/etc/glance-cache.conf $GLANCE_CACHE_CONF + # Set non-default configuration options for the glance-cache iniset $GLANCE_CACHE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - inicomment $GLANCE_CACHE_CONF DEFAULT log_file iniset $GLANCE_CACHE_CONF DEFAULT use_syslog $SYSLOG iniset $GLANCE_CACHE_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ - iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_url - iniset $GLANCE_CACHE_CONF DEFAULT auth_url $KEYSTONE_AUTH_URI/v2.0 - iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_tenant_name - iniset $GLANCE_CACHE_CONF DEFAULT admin_tenant_name $SERVICE_PROJECT_NAME - iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_user - iniset $GLANCE_CACHE_CONF DEFAULT admin_user glance - iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_password - iniset $GLANCE_CACHE_CONF DEFAULT admin_password $SERVICE_PASSWORD - iniset $GLANCE_CACHE_CONF DEFAULT registry_host $GLANCE_SERVICE_HOST + iniset $GLANCE_CACHE_CONF DEFAULT image_cache_driver $GLANCE_CACHE_DRIVER # Store specific confs iniset $GLANCE_CACHE_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/ - cp -p $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON - cp -p $GLANCE_DIR/etc/schema-image.json $GLANCE_SCHEMA_JSON + # Set default configuration options for the glance-image-import + iniset $GLANCE_IMAGE_IMPORT_CONF image_import_opts image_import_plugins "[]" + iniset $GLANCE_IMAGE_IMPORT_CONF inject_metadata_properties ignore_user_roles admin + iniset $GLANCE_IMAGE_IMPORT_CONF inject_metadata_properties inject + cp -p $GLANCE_DIR/etc/schema-image.json $GLANCE_SCHEMA_JSON cp -p $GLANCE_DIR/etc/metadefs/*.json $GLANCE_METADEF_DIR - if is_ssl_enabled_service "cinder" || is_service_enabled tls-proxy; then + if is_service_enabled tls-proxy; then CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST} CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776} - iniset $GLANCE_API_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/%(project_id)s" - iniset $GLANCE_CACHE_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/%(project_id)s" + iniset $GLANCE_API_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/%(project_id)s" + iniset $GLANCE_CACHE_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/%(project_id)s" fi - # Configure GLANCE_GLARE (Glance Glare) - if is_service_enabled g-glare; then - local dburl - dburl=`database_connection_url glance` - setup_colorized_logging $GLANCE_GLARE_CONF DEFAULT tenant user - iniset $GLANCE_GLARE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $GLANCE_GLARE_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS - iniset $GLANCE_GLARE_CONF DEFAULT bind_port $GLANCE_GLARE_PORT - inicomment $GLANCE_GLARE_CONF DEFAULT log_file - iniset $GLANCE_GLARE_CONF DEFAULT workers "$API_WORKERS" - - iniset $GLANCE_GLARE_CONF database connection $dburl - iniset $GLANCE_GLARE_CONF paste_deploy flavor keystone - configure_auth_token_middleware $GLANCE_GLARE_CONF glare $GLANCE_AUTH_CACHE_DIR/artifact - # Register SSL certificates if provided - if is_ssl_enabled_service glance; then - ensure_certificates GLANCE - iniset $GLANCE_GLARE_CONF DEFAULT cert_file "$GLANCE_SSL_CERT" - iniset $GLANCE_GLARE_CONF DEFAULT key_file "$GLANCE_SSL_KEY" - fi - cp $GLANCE_DIR/etc/glance-glare-paste.ini $GLANCE_GLARE_PASTE_INI + write_local_uwsgi_http_config "$GLANCE_UWSGI_CONF" "$GLANCE_UWSGI" "/image" "glance-api" + + # Grab our uwsgi listen address and use that to fill out our + # worker_self_reference_url config + iniset $GLANCE_API_CONF DEFAULT worker_self_reference_url $(awk '-F= ' '/^http-socket/ { print "http://"$2}' $GLANCE_UWSGI_CONF) + + # Configure the Python binary used for "import" plugins. If unset, these + # will attempt the uwsgi binary instead. + iniset $GLANCE_API_CONF wsgi python_interpreter $PYTHON + + if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $GLANCE_API_CONF oslo_policy enforce_scope true + iniset $GLANCE_API_CONF oslo_policy enforce_new_defaults true + iniset $GLANCE_API_CONF DEFAULT enforce_secure_rbac true + else + iniset $GLANCE_API_CONF oslo_policy enforce_scope false + iniset $GLANCE_API_CONF oslo_policy enforce_new_defaults false + iniset $GLANCE_API_CONF DEFAULT enforce_secure_rbac false fi } @@ -293,12 +480,13 @@ function configure_glance { # SERVICE_PROJECT_NAME glance service # SERVICE_PROJECT_NAME glance-swift ResellerAdmin (if Swift is enabled) # SERVICE_PROJECT_NAME glance-search search (if Search is enabled) -# SERVICE_PROJECT_NAME glare service (if enabled) function create_glance_accounts { if is_service_enabled g-api; then - create_service_user "glance" + # When cinder talk to glance service APIs user needs service + # role for RBAC checks and admin role for cinder to access images. + create_service_user "glance" "admin" # required for swift access if is_service_enabled s-proxy; then @@ -309,29 +497,25 @@ function create_glance_accounts { get_or_create_endpoint \ "image" \ "$REGION_NAME" \ - "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" \ - "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" \ - "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" - fi + "$GLANCE_URL" - # Add glance-glare service and endpoints - if is_service_enabled g-glare; then - create_service_user "glare" - get_or_create_service "glare" "artifact" "Glance Artifact Service" + # Note(frickler): Crude workaround for https://bugs.launchpad.net/glance-store/+bug/1620999 + service_domain_id=$(get_or_create_domain $SERVICE_DOMAIN_NAME) + iniset $GLANCE_SWIFT_STORE_CONF ref1 project_domain_id $service_domain_id + iniset $GLANCE_SWIFT_STORE_CONF ref1 user_domain_id $service_domain_id - get_or_create_endpoint "artifact" \ - "$REGION_NAME" \ - "$GLANCE_SERVICE_PROTOCOL://$GLANCE_GLARE_HOSTPORT" \ - "$GLANCE_SERVICE_PROTOCOL://$GLANCE_GLARE_HOSTPORT" \ - "$GLANCE_SERVICE_PROTOCOL://$GLANCE_GLARE_HOSTPORT" - fi -} + if [[ "$GLANCE_ENABLE_QUOTAS" = True ]]; then + configure_glance_quotas + fi -# create_glance_cache_dir() - Part of the init_glance() process -function create_glance_cache_dir { - # Create cache dir - sudo install -d -o $STACK_USER $GLANCE_AUTH_CACHE_DIR/api $GLANCE_AUTH_CACHE_DIR/registry $GLANCE_AUTH_CACHE_DIR/search $GLANCE_AUTH_CACHE_DIR/artifact - rm -f $GLANCE_AUTH_CACHE_DIR/api/* $GLANCE_AUTH_CACHE_DIR/registry/* $GLANCE_AUTH_CACHE_DIR/search/* $GLANCE_AUTH_CACHE_DIR/artifact/* + if is_service_enabled s3api && [[ "$GLANCE_USE_S3" == "True" ]]; then + if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "False" ]]; then + set_common_s3_store_params glance_store + else + set_common_s3_store_params $GLANCE_S3_DEFAULT_BACKEND + fi + fi + fi } # init_glance() - Initialize databases, etc. @@ -340,20 +524,16 @@ function init_glance { rm -rf $GLANCE_IMAGE_DIR mkdir -p $GLANCE_IMAGE_DIR - # Delete existing cache - rm -rf $GLANCE_CACHE_DIR - mkdir -p $GLANCE_CACHE_DIR - # (Re)create glance database recreate_database glance + time_start "dbsync" # Migrate glance database $GLANCE_BIN_DIR/glance-manage --config-file $GLANCE_CONF_DIR/glance-api.conf db_sync # Load metadata definitions $GLANCE_BIN_DIR/glance-manage --config-file $GLANCE_CONF_DIR/glance-api.conf db_load_metadefs - - create_glance_cache_dir + time_stop "dbsync" } # install_glanceclient() - Collect source and prepare @@ -367,11 +547,26 @@ function install_glanceclient { # install_glance() - Collect source and prepare function install_glance { + local glance_store_extras=() + + if is_service_enabled cinder; then + glance_store_extras=("cinder" "${glance_store_extras[@]}") + fi + + if is_service_enabled swift; then + glance_store_extras=("swift" "${glance_store_extras[@]}") + fi + # Install glance_store from git so we make sure we're testing # the latest code. if use_library_from_git "glance_store"; then git_clone_by_name "glance_store" - setup_dev_lib "glance_store" + setup_dev_lib "glance_store" $(join_extras "${glance_store_extras[@]}") + copy_rootwrap ${DEST}/glance_store/etc/glance + else + # we still need to pass extras + pip_install_gr_extras glance-store $(join_extras "${glance_store_extras[@]}") + copy_rootwrap $GLANCE_STORE_ROOTWRAP_BASE_DIR fi git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH @@ -379,41 +574,97 @@ function install_glance { setup_develop $GLANCE_DIR } -# start_glance() - Start running processes, including screen +# glance_remote_conf() - Return the path to an alternate config file for +# the remote glance clone +function glance_remote_conf { + echo $(dirname "${GLANCE_CONF_DIR}")/glance-remote/$(basename "$1") +} + +# start_glance_remote_clone() - Clone the regular glance api worker +function start_glance_remote_clone { + local glance_remote_conf_dir glance_remote_port remote_data + local glance_remote_uwsgi venv + + glance_remote_conf_dir="$(glance_remote_conf "")" + glance_remote_port=$(get_random_port) + glance_remote_uwsgi="$(glance_remote_conf $GLANCE_UWSGI_CONF)" + + # Clone the existing ready-to-go glance-api setup + sudo rm -Rf "$glance_remote_conf_dir" + sudo cp -r "$GLANCE_CONF_DIR" "$glance_remote_conf_dir" + sudo chown $STACK_USER -R "$glance_remote_conf_dir" + + # Point this worker at different data dirs + remote_data="${DATA_DIR}/glance-remote" + mkdir -p $remote_data/os_glance_tasks_store \ + "${remote_data}/os_glance_staging_store" + iniset $(glance_remote_conf "$GLANCE_API_CONF") os_glance_staging_store \ + filesystem_store_datadir "${remote_data}/os_glance_staging_store" + iniset $(glance_remote_conf "$GLANCE_API_CONF") os_glance_tasks_store \ + filesystem_store_datadir "${remote_data}/os_glance_tasks_store" + + # Point this worker to use different cache dir + mkdir -p "$remote_data/cache" + iniset $(glance_remote_conf "$GLANCE_API_CONF") DEFAULT \ + image_cache_dir "${remote_data}/cache" + + # Change our uwsgi to our new port + sed -ri "s/^(http-socket.*):[0-9]+/\1:$glance_remote_port/" \ + "$glance_remote_uwsgi" + + # Update the self-reference url with our new port + iniset $(glance_remote_conf $GLANCE_API_CONF) DEFAULT \ + worker_self_reference_url \ + $(awk '-F= ' '/^http-socket/ { print "http://"$2 }' \ + "$glance_remote_uwsgi") + + # We need to create the systemd service for the clone, but then + # change it to include an Environment line to point the WSGI app + # at the alternate config directory. + if [[ "$GLOBAL_VENV" == True ]]; then + venv="--venv $DEVSTACK_VENV" + fi + write_uwsgi_user_unit_file devstack@g-api-r.service "$(which uwsgi) \ + --procname-prefix \ + glance-api-remote \ + --ini $glance_remote_uwsgi \ + $venv" \ + "" "$STACK_USER" + iniadd -sudo ${SYSTEMD_DIR}/devstack@g-api-r.service \ + "Service" "Environment" \ + "OS_GLANCE_CONFIG_DIR=$glance_remote_conf_dir" + + # Reload and restart with the new config + $SYSTEMCTL daemon-reload + $SYSTEMCTL restart devstack@g-api-r + + get_or_create_service glance_remote image_remote "Alternate glance" + get_or_create_endpoint image_remote $REGION_NAME \ + $(awk '-F= ' '/^http-socket/ { print "http://"$2 }' \ + $glance_remote_uwsgi) +} + +# start_glance() - Start running processes function start_glance { local service_protocol=$GLANCE_SERVICE_PROTOCOL - if is_service_enabled tls-proxy; then - start_tls_proxy '*' $GLANCE_SERVICE_PORT $GLANCE_SERVICE_HOST $GLANCE_SERVICE_PORT_INT & - start_tls_proxy '*' $GLANCE_REGISTRY_PORT $GLANCE_SERVICE_HOST $GLANCE_REGISTRY_PORT_INT & - fi - run_process g-reg "$GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf" - run_process g-api "$GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf" + run_process g-api "$(which uwsgi) --procname-prefix glance-api --ini $GLANCE_UWSGI_CONF" - echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..." - if ! wait_for_service $SERVICE_TIMEOUT $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT; then - die $LINENO "g-api did not start" + if is_service_enabled g-api-r; then + echo "Starting the g-api-r clone service..." + start_glance_remote_clone fi - #Start g-glare after g-reg/g-api - if is_service_enabled g-glare; then - run_process g-glare "$GLANCE_BIN_DIR/glance-glare --config-file=$GLANCE_CONF_DIR/glance-glare.conf" - echo "Waiting for Glare [g-glare] ($GLANCE_GLARE_HOSTPORT) to start..." - if ! wait_for_service $SERVICE_TIMEOUT $GLANCE_SERVICE_PROTOCOL://$GLANCE_GLARE_HOSTPORT; then - die $LINENO " Glare [g-glare] did not start" - fi + echo "Waiting for g-api ($GLANCE_SERVICE_HOST) to start..." + if ! wait_for_service $SERVICE_TIMEOUT $GLANCE_URL; then + die $LINENO "g-api did not start" fi } # stop_glance() - Stop running processes function stop_glance { - # Kill the Glance screen windows stop_process g-api - stop_process g-reg - - if is_service_enabled g-glare; then - stop_process g-glare - fi + stop_process g-api-r } # Restore xtrace diff --git a/lib/heat b/lib/heat deleted file mode 100644 index c841e0a499..0000000000 --- a/lib/heat +++ /dev/null @@ -1,469 +0,0 @@ -#!/bin/bash -# -# lib/heat -# Install and start **Heat** service - -# To enable, add the following to localrc -# -# ENABLED_SERVICES+=,heat,h-api,h-api-cfn,h-api-cw,h-eng - -# Dependencies: -# (none) - -# stack.sh -# --------- -# - install_heatclient -# - install_heat -# - configure_heatclient -# - configure_heat -# - _config_heat_apache_wsgi -# - init_heat -# - start_heat -# - stop_heat -# - cleanup_heat - -# Save trace setting -_XTRACE_HEAT=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# set up default directories -GITDIR["python-heatclient"]=$DEST/python-heatclient - -# Toggle for deploying Heat-API under HTTPD + mod_wsgi -HEAT_USE_MOD_WSGI=${HEAT_USE_MOD_WSGI:-False} - -HEAT_DIR=$DEST/heat -HEAT_CFNTOOLS_DIR=$DEST/heat-cfntools -HEAT_TEMPLATES_REPO_DIR=$DEST/heat-templates -OCC_DIR=$DEST/os-collect-config -DIB_UTILS_DIR=$DEST/dib-utils -ORC_DIR=$DEST/os-refresh-config -OAC_DIR=$DEST/os-apply-config - -HEAT_PIP_REPO=$DATA_DIR/heat-pip-repo -HEAT_PIP_REPO_PORT=${HEAT_PIP_REPO_PORT:-8899} - -HEAT_AUTH_CACHE_DIR=${HEAT_AUTH_CACHE_DIR:-/var/cache/heat} -HEAT_STANDALONE=$(trueorfalse False HEAT_STANDALONE) -HEAT_ENABLE_ADOPT_ABANDON=$(trueorfalse False HEAT_ENABLE_ADOPT_ABANDON) -HEAT_CONF_DIR=/etc/heat -HEAT_CONF=$HEAT_CONF_DIR/heat.conf -HEAT_ENV_DIR=$HEAT_CONF_DIR/environment.d -HEAT_TEMPLATES_DIR=$HEAT_CONF_DIR/templates -HEAT_API_HOST=${HEAT_API_HOST:-$HOST_IP} -HEAT_API_PORT=${HEAT_API_PORT:-8004} -HEAT_SERVICE_USER=${HEAT_SERVICE_USER:-heat} -HEAT_TRUSTEE_USER=${HEAT_TRUSTEE_USER:-$HEAT_SERVICE_USER} -HEAT_TRUSTEE_PASSWORD=${HEAT_TRUSTEE_PASSWORD:-$SERVICE_PASSWORD} -HEAT_TRUSTEE_DOMAIN=${HEAT_TRUSTEE_DOMAIN:-default} - -# Support entry points installation of console scripts -HEAT_BIN_DIR=$(get_python_exec_prefix) - -# other default options -if [[ "$HEAT_STANDALONE" = "True" ]]; then - # for standalone, use defaults which require no service user - HEAT_STACK_DOMAIN=$(trueorfalse False HEAT_STACK_DOMAIN) - HEAT_DEFERRED_AUTH=${HEAT_DEFERRED_AUTH:-password} - if [[ ${HEAT_DEFERRED_AUTH} != "password" ]]; then - # Heat does not support keystone trusts when deployed in - # standalone mode - die $LINENO \ - 'HEAT_DEFERRED_AUTH can only be set to "password" when HEAT_STANDALONE is True.' - fi -else - HEAT_STACK_DOMAIN=$(trueorfalse True HEAT_STACK_DOMAIN) - HEAT_DEFERRED_AUTH=${HEAT_DEFERRED_AUTH:-} -fi -HEAT_PLUGIN_DIR=${HEAT_PLUGIN_DIR:-$DATA_DIR/heat/plugins} -ENABLE_HEAT_PLUGINS=${ENABLE_HEAT_PLUGINS:-} - -# Functions -# --------- - -# Test if any Heat services are enabled -# is_heat_enabled -function is_heat_enabled { - [[ ,${ENABLED_SERVICES} =~ ,"h-" ]] && return 0 - return 1 -} - -# cleanup_heat() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_heat { - sudo rm -rf $HEAT_AUTH_CACHE_DIR - sudo rm -rf $HEAT_ENV_DIR - sudo rm -rf $HEAT_TEMPLATES_DIR - sudo rm -rf $HEAT_CONF_DIR -} - -# configure_heat() - Set config files, create data dirs, etc -function configure_heat { - - sudo install -d -o $STACK_USER $HEAT_CONF_DIR - # remove old config files - rm -f $HEAT_CONF_DIR/heat-*.conf - - HEAT_API_CFN_HOST=${HEAT_API_CFN_HOST:-$HOST_IP} - HEAT_API_CFN_PORT=${HEAT_API_CFN_PORT:-8000} - HEAT_ENGINE_HOST=${HEAT_ENGINE_HOST:-$SERVICE_HOST} - HEAT_ENGINE_PORT=${HEAT_ENGINE_PORT:-8001} - HEAT_API_CW_HOST=${HEAT_API_CW_HOST:-$HOST_IP} - HEAT_API_CW_PORT=${HEAT_API_CW_PORT:-8003} - HEAT_API_PASTE_FILE=$HEAT_CONF_DIR/api-paste.ini - HEAT_POLICY_FILE=$HEAT_CONF_DIR/policy.json - - cp $HEAT_DIR/etc/heat/api-paste.ini $HEAT_API_PASTE_FILE - cp $HEAT_DIR/etc/heat/policy.json $HEAT_POLICY_FILE - - # common options - iniset_rpc_backend heat $HEAT_CONF - iniset $HEAT_CONF DEFAULT heat_metadata_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT - iniset $HEAT_CONF DEFAULT heat_waitcondition_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1/waitcondition - iniset $HEAT_CONF DEFAULT heat_watch_server_url http://$HEAT_API_CW_HOST:$HEAT_API_CW_PORT - iniset $HEAT_CONF database connection `database_connection_url heat` - iniset $HEAT_CONF DEFAULT auth_encryption_key $(generate_hex_string 16) - - iniset $HEAT_CONF DEFAULT region_name_for_services "$REGION_NAME" - - # logging - iniset $HEAT_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $HEAT_CONF DEFAULT use_syslog $SYSLOG - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$HEAT_USE_MOD_WSGI" == "False" ] ; then - # Add color to logging output - setup_colorized_logging $HEAT_CONF DEFAULT tenant user - fi - - if [ ! -z "$HEAT_DEFERRED_AUTH" ]; then - iniset $HEAT_CONF DEFAULT deferred_auth_method $HEAT_DEFERRED_AUTH - fi - - if [ "$HEAT_USE_MOD_WSGI" == "True" ]; then - _config_heat_apache_wsgi - fi - - if [[ "$HEAT_STANDALONE" = "True" ]]; then - iniset $HEAT_CONF paste_deploy flavor standalone - iniset $HEAT_CONF clients_heat url "http://$HEAT_API_HOST:$HEAT_API_PORT/v1/%(tenant_id)s" - else - configure_auth_token_middleware $HEAT_CONF heat $HEAT_AUTH_CACHE_DIR - fi - - # If HEAT_DEFERRED_AUTH is unset or explicitly set to trusts, configure - # the section for the client plugin associated with the trustee - if [ -z "$HEAT_DEFERRED_AUTH" -o "trusts" == "$HEAT_DEFERRED_AUTH" ]; then - iniset $HEAT_CONF trustee auth_type password - iniset $HEAT_CONF trustee auth_url $KEYSTONE_AUTH_URI - iniset $HEAT_CONF trustee username $HEAT_TRUSTEE_USER - iniset $HEAT_CONF trustee password $HEAT_TRUSTEE_PASSWORD - iniset $HEAT_CONF trustee user_domain_id $HEAT_TRUSTEE_DOMAIN - fi - - # clients_keystone - iniset $HEAT_CONF clients_keystone auth_uri $KEYSTONE_AUTH_URI - - # OpenStack API - iniset $HEAT_CONF heat_api bind_port $HEAT_API_PORT - iniset $HEAT_CONF heat_api workers "$API_WORKERS" - - # Cloudformation API - iniset $HEAT_CONF heat_api_cfn bind_port $HEAT_API_CFN_PORT - - # Cloudwatch API - iniset $HEAT_CONF heat_api_cloudwatch bind_port $HEAT_API_CW_PORT - - if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then - iniset $HEAT_CONF clients_keystone ca_file $SSL_BUNDLE_FILE - fi - - if is_ssl_enabled_service "nova" || is_service_enabled tls-proxy; then - iniset $HEAT_CONF clients_nova ca_file $SSL_BUNDLE_FILE - fi - - if is_ssl_enabled_service "cinder" || is_service_enabled tls-proxy; then - iniset $HEAT_CONF clients_cinder ca_file $SSL_BUNDLE_FILE - fi - - if [[ "$HEAT_ENABLE_ADOPT_ABANDON" = "True" ]]; then - iniset $HEAT_CONF DEFAULT enable_stack_adopt true - iniset $HEAT_CONF DEFAULT enable_stack_abandon true - fi - - iniset $HEAT_CONF cache enabled "True" - iniset $HEAT_CONF cache backend "dogpile.cache.memory" - - sudo install -d -o $STACK_USER $HEAT_ENV_DIR $HEAT_TEMPLATES_DIR - - # copy the default environment - cp $HEAT_DIR/etc/heat/environment.d/* $HEAT_ENV_DIR/ - - # copy the default templates - cp $HEAT_DIR/etc/heat/templates/* $HEAT_TEMPLATES_DIR/ - - # Enable heat plugins. - # NOTE(nic): The symlink nonsense is necessary because when - # plugins are installed in "developer mode", the final component - # of their target directory is always "resources", which confuses - # Heat's plugin loader into believing that all plugins are named - # "resources", and therefore are all the same plugin; so it - # will only load one of them. Linking them all to a common - # location with unique names avoids that type of collision, - # while still allowing the plugins to be edited in-tree. - local err_count=0 - - if [ -n "$ENABLE_HEAT_PLUGINS" ]; then - mkdir -p $HEAT_PLUGIN_DIR - # Clean up cruft from any previous runs - rm -f $HEAT_PLUGIN_DIR/* - iniset $HEAT_CONF DEFAULT plugin_dirs $HEAT_PLUGIN_DIR - fi - - for heat_plugin in $ENABLE_HEAT_PLUGINS; do - if [ -d $HEAT_DIR/contrib/$heat_plugin ]; then - setup_package $HEAT_DIR/contrib/$heat_plugin -e - ln -s $HEAT_DIR/contrib/$heat_plugin/$heat_plugin/resources $HEAT_PLUGIN_DIR/$heat_plugin - else - : # clear retval on the test so that we can roll up errors - err $LINENO "Requested Heat plugin(${heat_plugin}) not found." - err_count=$(($err_count + 1)) - fi - done - [ $err_count -eq 0 ] || die $LINENO "$err_count of the requested Heat plugins could not be installed." -} - -# init_heat() - Initialize database -function init_heat { - - # (re)create heat database - recreate_database heat - - $HEAT_BIN_DIR/heat-manage --config-file $HEAT_CONF db_sync - create_heat_cache_dir -} - -# create_heat_cache_dir() - Part of the init_heat() process -function create_heat_cache_dir { - # Create cache dirs - sudo install -d -o $STACK_USER $HEAT_AUTH_CACHE_DIR -} - -# install_heatclient() - Collect source and prepare -function install_heatclient { - if use_library_from_git "python-heatclient"; then - git_clone_by_name "python-heatclient" - setup_dev_lib "python-heatclient" - sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-heatclient"]}/tools/,/etc/bash_completion.d/}heat.bash_completion - fi -} - -# install_heat() - Collect source and prepare -function install_heat { - git_clone $HEAT_REPO $HEAT_DIR $HEAT_BRANCH - setup_develop $HEAT_DIR - if [ "$HEAT_USE_MOD_WSGI" == "True" ]; then - install_apache_wsgi - fi -} - -# install_heat_other() - Collect source and prepare -function install_heat_other { - git_clone $HEAT_CFNTOOLS_REPO $HEAT_CFNTOOLS_DIR $HEAT_CFNTOOLS_BRANCH - git_clone $HEAT_TEMPLATES_REPO $HEAT_TEMPLATES_REPO_DIR $HEAT_TEMPLATES_BRANCH - git_clone $OAC_REPO $OAC_DIR $OAC_BRANCH - git_clone $OCC_REPO $OCC_DIR $OCC_BRANCH - git_clone $ORC_REPO $ORC_DIR $ORC_BRANCH - git_clone $DIB_UTILS_REPO $DIB_UTILS_DIR $DIB_UTILS_BRANCH -} - -# start_heat() - Start running processes, including screen -function start_heat { - run_process h-eng "$HEAT_BIN_DIR/heat-engine --config-file=$HEAT_CONF" - - # If the site is not enabled then we are in a grenade scenario - local enabled_site_file - enabled_site_file=$(apache_site_config_for heat-api) - if [ -f ${enabled_site_file} ] && [ "$HEAT_USE_MOD_WSGI" == "True" ]; then - enable_apache_site heat-api - enable_apache_site heat-api-cfn - enable_apache_site heat-api-cloudwatch - restart_apache_server - tail_log heat-api /var/log/$APACHE_NAME/heat-api.log - tail_log heat-api-cfn /var/log/$APACHE_NAME/heat-api-cfn.log - tail_log heat-api-cloudwatch /var/log/$APACHE_NAME/heat-api-cloudwatch.log - else - run_process h-api "$HEAT_BIN_DIR/heat-api --config-file=$HEAT_CONF" - run_process h-api-cfn "$HEAT_BIN_DIR/heat-api-cfn --config-file=$HEAT_CONF" - run_process h-api-cw "$HEAT_BIN_DIR/heat-api-cloudwatch --config-file=$HEAT_CONF" - fi -} - -# stop_heat() - Stop running processes -function stop_heat { - # Kill the screen windows - stop_process h-eng - - if [ "$HEAT_USE_MOD_WSGI" == "True" ]; then - disable_apache_site heat-api - disable_apache_site heat-api-cfn - disable_apache_site heat-api-cloudwatch - restart_apache_server - else - local serv - for serv in h-api h-api-cfn h-api-cw; do - stop_process $serv - done - fi - -} - -# _cleanup_heat_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file -function _cleanup_heat_apache_wsgi { - sudo rm -f $(apache_site_config_for heat-api) - sudo rm -f $(apache_site_config_for heat-api-cfn) - sudo rm -f $(apache_site_config_for heat-api-cloudwatch) -} - -# _config_heat_apache_wsgi() - Set WSGI config files of Heat -function _config_heat_apache_wsgi { - - local heat_apache_conf - heat_apache_conf=$(apache_site_config_for heat-api) - local heat_cfn_apache_conf - heat_cfn_apache_conf=$(apache_site_config_for heat-api-cfn) - local heat_cloudwatch_apache_conf - heat_cloudwatch_apache_conf=$(apache_site_config_for heat-api-cloudwatch) - local heat_ssl="" - local heat_certfile="" - local heat_keyfile="" - local heat_api_port=$HEAT_API_PORT - local heat_cfn_api_port=$HEAT_API_CFN_PORT - local heat_cw_api_port=$HEAT_API_CW_PORT - local venv_path="" - - sudo cp $FILES/apache-heat-api.template $heat_apache_conf - sudo sed -e " - s|%PUBLICPORT%|$heat_api_port|g; - s|%APACHE_NAME%|$APACHE_NAME|g; - s|%HEAT_BIN_DIR%|$HEAT_BIN_DIR|g; - s|%SSLENGINE%|$heat_ssl|g; - s|%SSLCERTFILE%|$heat_certfile|g; - s|%SSLKEYFILE%|$heat_keyfile|g; - s|%USER%|$STACK_USER|g; - s|%VIRTUALENV%|$venv_path|g - " -i $heat_apache_conf - - sudo cp $FILES/apache-heat-api-cfn.template $heat_cfn_apache_conf - sudo sed -e " - s|%PUBLICPORT%|$heat_cfn_api_port|g; - s|%APACHE_NAME%|$APACHE_NAME|g; - s|%HEAT_BIN_DIR%|$HEAT_BIN_DIR|g; - s|%SSLENGINE%|$heat_ssl|g; - s|%SSLCERTFILE%|$heat_certfile|g; - s|%SSLKEYFILE%|$heat_keyfile|g; - s|%USER%|$STACK_USER|g; - s|%VIRTUALENV%|$venv_path|g - " -i $heat_cfn_apache_conf - - sudo cp $FILES/apache-heat-api-cloudwatch.template $heat_cloudwatch_apache_conf - sudo sed -e " - s|%PUBLICPORT%|$heat_cw_api_port|g; - s|%APACHE_NAME%|$APACHE_NAME|g; - s|%HEAT_BIN_DIR%|$HEAT_BIN_DIR|g; - s|%SSLENGINE%|$heat_ssl|g; - s|%SSLCERTFILE%|$heat_certfile|g; - s|%SSLKEYFILE%|$heat_keyfile|g; - s|%USER%|$STACK_USER|g; - s|%VIRTUALENV%|$venv_path|g - " -i $heat_cloudwatch_apache_conf -} - - -# create_heat_accounts() - Set up common required heat accounts -function create_heat_accounts { - if [[ "$HEAT_STANDALONE" != "True" ]]; then - - create_service_user "heat" "admin" - get_or_create_service "heat" "orchestration" "Heat Orchestration Service" - get_or_create_endpoint \ - "orchestration" \ - "$REGION_NAME" \ - "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(project_id)s" \ - "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(project_id)s" \ - "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(project_id)s" - - get_or_create_service "heat-cfn" "cloudformation" "Heat CloudFormation Service" - get_or_create_endpoint \ - "cloudformation" \ - "$REGION_NAME" \ - "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \ - "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \ - "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" - - # heat_stack_user role is for users created by Heat - get_or_create_role "heat_stack_user" - fi - - if [[ "$HEAT_STACK_DOMAIN" == "True" ]]; then - # domain -> heat and user -> heat_domain_admin - domain_id=$(get_or_create_domain heat 'Owns users and projects created by heat') - iniset $HEAT_CONF DEFAULT stack_user_domain_id ${domain_id} - get_or_create_user heat_domain_admin $SERVICE_PASSWORD heat - get_or_add_user_domain_role admin heat_domain_admin heat - iniset $HEAT_CONF DEFAULT stack_domain_admin heat_domain_admin - iniset $HEAT_CONF DEFAULT stack_domain_admin_password $SERVICE_PASSWORD - fi -} - -# build_heat_pip_mirror() - Build a pip mirror containing heat agent projects -function build_heat_pip_mirror { - local project_dirs="$OCC_DIR $OAC_DIR $ORC_DIR $HEAT_CFNTOOLS_DIR $DIB_UTILS_DIR" - local projpath proj package - - rm -rf $HEAT_PIP_REPO - mkdir -p $HEAT_PIP_REPO - - echo "" > $HEAT_PIP_REPO/index.html - for projpath in $project_dirs; do - proj=$(basename $projpath) - mkdir -p $HEAT_PIP_REPO/$proj - pushd $projpath - rm -rf dist - python setup.py sdist - pushd dist - package=$(ls *) - mv $package $HEAT_PIP_REPO/$proj/$package - popd - - echo "$package" > $HEAT_PIP_REPO/$proj/index.html - echo "$proj
" >> $HEAT_PIP_REPO/index.html - - popd - done - - echo "" >> $HEAT_PIP_REPO/index.html - - local heat_pip_repo_apache_conf - heat_pip_repo_apache_conf=$(apache_site_config_for heat_pip_repo) - - sudo cp $FILES/apache-heat-pip-repo.template $heat_pip_repo_apache_conf - sudo sed -e " - s|%HEAT_PIP_REPO%|$HEAT_PIP_REPO|g; - s|%HEAT_PIP_REPO_PORT%|$HEAT_PIP_REPO_PORT|g; - s|%APACHE_NAME%|$APACHE_NAME|g; - " -i $heat_pip_repo_apache_conf - enable_apache_site heat_pip_repo - restart_apache_server - sudo iptables -I INPUT -d $HOST_IP -p tcp --dport $HEAT_PIP_REPO_PORT -j ACCEPT || true -} - -# Restore xtrace -$_XTRACE_HEAT - -# Tell emacs to use shell-script-mode -## Local variables: -## mode: shell-script -## End: diff --git a/lib/horizon b/lib/horizon index 0517e32197..7c0d443aa6 100644 --- a/lib/horizon +++ b/lib/horizon @@ -26,9 +26,6 @@ set +o xtrace # Defaults # -------- -# Set up default directories -GITDIR["django_openstack_auth"]=$DEST/django_openstack_auth - HORIZON_DIR=$DEST/horizon # local_settings.py is used to customize Dashboard settings. @@ -46,8 +43,8 @@ function _horizon_config_set { local value=$4 if [ -z "$section" ]; then - sed -e "/^$option/d" -i $local_settings - echo -e "\n$option=$value" >> $file + sed -e "/^$option/d" -i $file + echo "$option = $value" >> $file elif grep -q "^$section" $file; then local line line=$(sed -ne "/^$section/,/^}/ { /^ *'$option':/ p; }" $file) @@ -69,9 +66,8 @@ function _horizon_config_set { # cleanup_horizon() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_horizon { - local horizon_conf - horizon_conf=$(apache_site_config_for horizon) - sudo rm -f $horizon_conf + disable_apache_site horizon + sudo rm -f $(apache_site_config_for horizon) } # configure_horizon() - Set config files, create data dirs, etc @@ -82,32 +78,52 @@ function configure_horizon { # Horizon is installed as develop mode, so we can compile here. # Message catalog compilation is handled by Django admin script, # so compiling them after the installation avoids Django installation twice. - (cd $HORIZON_DIR; ./run_tests.sh -N --compilemessages) + (cd $HORIZON_DIR; $PYTHON manage.py compilemessages) # ``local_settings.py`` is used to override horizon default settings. local local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py cp $HORIZON_SETTINGS $local_settings + # Ensure local_setting.py file ends with EOL (newline) + echo >> $local_settings + _horizon_config_set $local_settings "" WEBROOT \"$HORIZON_APACHE_ROOT/\" _horizon_config_set $local_settings "" COMPRESS_OFFLINE True - _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_DEFAULT_ROLE \"Member\" + _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_DEFAULT_ROLE \"member\" _horizon_config_set $local_settings "" OPENSTACK_HOST \"${KEYSTONE_SERVICE_HOST}\" - _horizon_config_set $local_settings "" OPENSTACK_API_VERSIONS {\"identity\":3} _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_URL "\"${KEYSTONE_SERVICE_URI}/v3\"" + # note(trebskit): if HOST_IP points at non-localhost ip address, horizon cannot be accessed + # from outside the virtual machine. This fixes is meant primarily for local development + # purpose + _horizon_config_set $local_settings "" ALLOWED_HOSTS [\"*\"] + if [ -f $SSL_BUNDLE_FILE ]; then _horizon_config_set $local_settings "" OPENSTACK_SSL_CACERT \"${SSL_BUNDLE_FILE}\" fi + if is_service_enabled ldap; then + _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT "True" + fi + + if is_service_enabled c-bak; then + _horizon_config_set $local_settings OPENSTACK_CINDER_FEATURES enable_backup "True" + fi + # Create an empty directory that apache uses as docroot sudo mkdir -p $HORIZON_DIR/.blackhole local horizon_conf horizon_conf=$(apache_site_config_for horizon) + local wsgi_venv_config="" + if [[ "$GLOBAL_VENV" == "True" ]] ; then + wsgi_venv_config="WSGIPythonHome $DEVSTACK_VENV" + fi + # Configure apache to run horizon # Set up the django horizon application to serve via apache/wsgi sudo sh -c "sed -e \" @@ -117,14 +133,13 @@ function configure_horizon { s,%APACHE_NAME%,$APACHE_NAME,g; s,%DEST%,$DEST,g; s,%WEBROOT%,$HORIZON_APACHE_ROOT,g; + s,%WSGIPYTHONHOME%,$wsgi_venv_config,g; \" $FILES/apache-horizon.template >$horizon_conf" if is_ubuntu; then disable_apache_site 000-default sudo touch $horizon_conf elif is_fedora; then - sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf - elif is_suse; then : # nothing to do else exit_distro_not_supported "horizon apache configuration" @@ -153,46 +168,28 @@ function init_horizon { } -# install_django_openstack_auth() - Collect source and prepare -function install_django_openstack_auth { - if use_library_from_git "django_openstack_auth"; then - local dir=${GITDIR["django_openstack_auth"]} - git_clone_by_name "django_openstack_auth" - # Compile message catalogs before installation - _prepare_message_catalog_compilation - (cd $dir; python setup.py compile_catalog) - setup_dev_lib "django_openstack_auth" - fi - # if we aren't using this library from git, then we just let it - # get dragged in by the horizon setup. -} - # install_horizon() - Collect source and prepare function install_horizon { # Apache installation, because we mark it NOPRIME install_apache_wsgi + # Install the memcache library so that horizon can use memcached as its + # cache backend + pip_install_gr pymemcache + git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH } -# start_horizon() - Start running processes, including screen +# start_horizon() - Start running processes function start_horizon { restart_apache_server - tail_log horizon /var/log/$APACHE_NAME/horizon_error.log } -# stop_horizon() - Stop running processes (non-screen) +# stop_horizon() - Stop running processes function stop_horizon { stop_apache_server } -# NOTE: It can be moved to common functions, but it is only used by compilation -# of django_openstack_auth catalogs at the moment. -function _prepare_message_catalog_compilation { - pip_install_gr Babel -} - - # Restore xtrace $_XTRACE_HORIZON diff --git a/lib/host b/lib/host new file mode 100644 index 0000000000..58062eff6b --- /dev/null +++ b/lib/host @@ -0,0 +1,98 @@ +#!/bin/bash + +# Kernel Samepage Merging (KSM) +# ----------------------------- + +# Processes that mark their memory as mergeable can share identical memory +# pages if KSM is enabled. This is particularly useful for nova + libvirt +# backends but any other setup that marks its memory as mergeable can take +# advantage. The drawback is there is higher cpu load; however, we tend to +# be memory bound not cpu bound so enable KSM by default but allow people +# to opt out if the CPU time is more important to them. +ENABLE_KSM=$(trueorfalse True ENABLE_KSM) +ENABLE_KSMTUNED=$(trueorfalse True ENABLE_KSMTUNED) +function configure_ksm { + if [[ $ENABLE_KSMTUNED == "True" ]] ; then + install_package "ksmtuned" + fi + if [[ -f /sys/kernel/mm/ksm/run ]] ; then + echo $(bool_to_int ENABLE_KSM) | sudo tee /sys/kernel/mm/ksm/run + fi +} + +# Compressed swap (ZSWAP) +#------------------------ + +# as noted in the kernel docs https://docs.kernel.org/admin-guide/mm/zswap.html +# Zswap is a lightweight compressed cache for swap pages. +# It takes pages that are in the process of being swapped out and attempts +# to compress them into a dynamically allocated RAM-based memory pool. +# zswap basically trades CPU cycles for potentially reduced swap I/O. +# This trade-off can also result in a significant performance improvement +# if reads from the compressed cache are faster than reads from a swap device. + +ENABLE_ZSWAP=$(trueorfalse False ENABLE_ZSWAP) +# lz4 is very fast although it does not have the best compression +# zstd has much better compression but more latency +ZSWAP_COMPRESSOR=${ZSWAP_COMPRESSOR:="lz4"} +ZSWAP_ZPOOL=${ZSWAP_ZPOOL:="zsmalloc"} +function configure_zswap { + if [[ $ENABLE_ZSWAP == "True" ]] ; then + # Centos 9 stream seems to only support enabling but not run time + # tuning so dont try to choose better default on centos + if is_ubuntu; then + echo ${ZSWAP_COMPRESSOR} | sudo tee /sys/module/zswap/parameters/compressor + echo ${ZSWAP_ZPOOL} | sudo tee /sys/module/zswap/parameters/zpool + fi + echo 1 | sudo tee /sys/module/zswap/parameters/enabled + # print curent zswap kernel config + sudo grep -R . /sys/module/zswap/parameters || /bin/true + fi +} + +ENABLE_SYSCTL_MEM_TUNING=$(trueorfalse False ENABLE_SYSCTL_MEM_TUNING) +function configure_sysctl_mem_parmaters { + if [[ $ENABLE_SYSCTL_MEM_TUNING == "True" ]] ; then + # defer write when memory is available + sudo sysctl -w vm.dirty_ratio=60 + sudo sysctl -w vm.dirty_background_ratio=10 + sudo sysctl -w vm.vfs_cache_pressure=50 + # assume swap is compressed so on new kernels + # give it equal priority as page cache which is + # uncompressed. on kernels < 5.8 the max is 100 + # not 200 so it will strongly prefer swapping. + sudo sysctl -w vm.swappiness=100 + sudo grep -R . /proc/sys/vm/ || /bin/true + fi +} + +function configure_host_mem { + configure_zswap + configure_ksm + configure_sysctl_mem_parmaters +} + +ENABLE_SYSCTL_NET_TUNING=$(trueorfalse False ENABLE_SYSCTL_NET_TUNING) +function configure_sysctl_net_parmaters { + if [[ $ENABLE_SYSCTL_NET_TUNING == "True" ]] ; then + # detect dead TCP connections after 120 seconds + sudo sysctl -w net.ipv4.tcp_keepalive_time=60 + sudo sysctl -w net.ipv4.tcp_keepalive_intvl=10 + sudo sysctl -w net.ipv4.tcp_keepalive_probes=6 + # reudce network latency for new connections + sudo sysctl -w net.ipv4.tcp_fastopen=3 + # print tcp options + sudo grep -R . /proc/sys/net/ipv4/tcp* || /bin/true + # disable qos by default + sudo sysctl -w net.core.default_qdisc=pfifo_fast + fi +} + +function configure_host_net { + configure_sysctl_net_parmaters +} + +function tune_host { + configure_host_mem + configure_host_net +} diff --git a/lib/infra b/lib/infra index cf003cce01..f4760c352c 100644 --- a/lib/infra +++ b/lib/infra @@ -29,9 +29,9 @@ GITDIR["pbr"]=$DEST/pbr # install_infra() - Collect source and prepare function install_infra { local PIP_VIRTUAL_ENV="$REQUIREMENTS_DIR/.venv" - [ ! -d $PIP_VIRTUAL_ENV ] && virtualenv $PIP_VIRTUAL_ENV + [ ! -d $PIP_VIRTUAL_ENV ] && ${VIRTUALENV_CMD} $PIP_VIRTUAL_ENV # We don't care about testing git pbr in the requirements venv. - PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install -U pbr + PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install -U pbr setuptools[core] PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install $REQUIREMENTS_DIR # Unset the PIP_VIRTUAL_ENV so that PBR does not end up trapped diff --git a/lib/keystone b/lib/keystone index 6198e43b58..840103b9f4 100644 --- a/lib/keystone +++ b/lib/keystone @@ -9,7 +9,6 @@ # - ``tls`` file # - ``DEST``, ``STACK_USER`` # - ``FILES`` -# - ``IDENTITY_API_VERSION`` # - ``BASE_SQL_CONN`` # - ``SERVICE_HOST``, ``SERVICE_PROTOCOL`` # - ``S3_SERVICE_PORT`` (template backend only) @@ -25,7 +24,6 @@ # - create_keystone_accounts # - stop_keystone # - cleanup_keystone -# - _cleanup_keystone_apache_wsgi # Save trace setting _XTRACE_KEYSTONE=$(set +o | grep xtrace) @@ -50,30 +48,8 @@ fi KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone} KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf -KEYSTONE_PASTE_INI=${KEYSTONE_PASTE_INI:-$KEYSTONE_CONF_DIR/keystone-paste.ini} - -# NOTE(sdague): remove in Newton -KEYSTONE_CATALOG_BACKEND="sql" - -# Toggle for deploying Keystone under HTTPD + mod_wsgi -# Deprecated in Mitaka, use KEYSTONE_DEPLOY instead. -KEYSTONE_USE_MOD_WSGI=${KEYSTONE_USE_MOD_WSGI:-${ENABLE_HTTPD_MOD_WSGI_SERVICES}} - -# KEYSTONE_DEPLOY defines how keystone is deployed, allowed values: -# - mod_wsgi : Run keystone under Apache HTTPd mod_wsgi -# - uwsgi : Run keystone under uwsgi -if [ -z "$KEYSTONE_DEPLOY" ]; then - if [ -z "$KEYSTONE_USE_MOD_WSGI" ]; then - KEYSTONE_DEPLOY=mod_wsgi - elif [ "$KEYSTONE_USE_MOD_WSGI" == True ]; then - KEYSTONE_DEPLOY=mod_wsgi - else - KEYSTONE_DEPLOY=uwsgi - fi -fi - -# Select the token persistence backend driver -KEYSTONE_TOKEN_BACKEND=${KEYSTONE_TOKEN_BACKEND:-sql} +KEYSTONE_PUBLIC_UWSGI_CONF=$KEYSTONE_CONF_DIR/keystone-uwsgi-public.ini +KEYSTONE_PUBLIC_UWSGI=keystone.wsgi.api:application # Select the Identity backend driver KEYSTONE_IDENTITY_BACKEND=${KEYSTONE_IDENTITY_BACKEND:-sql} @@ -88,25 +64,17 @@ KEYSTONE_ROLE_BACKEND=${KEYSTONE_ROLE_BACKEND:-sql} KEYSTONE_RESOURCE_BACKEND=${KEYSTONE_RESOURCE_BACKEND:-sql} # Select Keystone's token provider (and format) -# Choose from 'uuid', 'pki', 'pkiz', or 'fernet' -KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-} +# Refer keystone doc for supported token provider: +# https://docs.openstack.org/keystone/latest/admin/token-provider.html +KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-fernet} KEYSTONE_TOKEN_FORMAT=$(echo ${KEYSTONE_TOKEN_FORMAT} | tr '[:upper:]' '[:lower:]') -# Set Keystone interface configuration -KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST} -KEYSTONE_AUTH_PORT=${KEYSTONE_AUTH_PORT:-35357} -KEYSTONE_AUTH_PORT_INT=${KEYSTONE_AUTH_PORT_INT:-35358} -KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-$SERVICE_PROTOCOL} - # Public facing bits KEYSTONE_SERVICE_HOST=${KEYSTONE_SERVICE_HOST:-$SERVICE_HOST} KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000} KEYSTONE_SERVICE_PORT_INT=${KEYSTONE_SERVICE_PORT_INT:-5001} KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} -# Bind hosts -KEYSTONE_ADMIN_BIND_HOST=${KEYSTONE_ADMIN_BIND_HOST:-$KEYSTONE_SERVICE_HOST} - # Set the project for service accounts in Keystone SERVICE_DOMAIN_NAME=${SERVICE_DOMAIN_NAME:-Default} SERVICE_PROJECT_NAME=${SERVICE_PROJECT_NAME:-service} @@ -116,25 +84,42 @@ SERVICE_PROJECT_NAME=${SERVICE_PROJECT_NAME:-service} SERVICE_TENANT_NAME=${SERVICE_PROJECT_NAME:-service} # if we are running with SSL use https protocols -if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then - KEYSTONE_AUTH_PROTOCOL="https" +if is_service_enabled tls-proxy; then KEYSTONE_SERVICE_PROTOCOL="https" fi -# complete URIs -if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then - # If running in Apache, use path access rather than port. - KEYSTONE_AUTH_URI=${KEYSTONE_AUTH_PROTOCOL}://${KEYSTONE_AUTH_HOST}/identity_v2_admin - KEYSTONE_SERVICE_URI=${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}/identity -else - KEYSTONE_AUTH_URI=${KEYSTONE_AUTH_PROTOCOL}://${KEYSTONE_AUTH_HOST}:${KEYSTONE_AUTH_PORT} - KEYSTONE_SERVICE_URI=${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}:${KEYSTONE_SERVICE_PORT} -fi +KEYSTONE_SERVICE_URI=${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}/identity +# for compat +KEYSTONE_AUTH_URI=$KEYSTONE_SERVICE_URI # V3 URIs -KEYSTONE_AUTH_URI_V3=$KEYSTONE_AUTH_URI/v3 +KEYSTONE_AUTH_URI_V3=$KEYSTONE_SERVICE_URI/v3 KEYSTONE_SERVICE_URI_V3=$KEYSTONE_SERVICE_URI/v3 +# Security compliance +KEYSTONE_SECURITY_COMPLIANCE_ENABLED=${KEYSTONE_SECURITY_COMPLIANCE_ENABLED:-True} +KEYSTONE_LOCKOUT_FAILURE_ATTEMPTS=${KEYSTONE_LOCKOUT_FAILURE_ATTEMPTS:-2} +KEYSTONE_LOCKOUT_DURATION=${KEYSTONE_LOCKOUT_DURATION:-10} +KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT=${KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT:-2} + +# Number of bcrypt hashing rounds, increasing number exponentially increases required +# resources to generate password hash. This is very effective way to protect from +# bruteforce attacks. 4 is minimal value that can be specified for bcrypt and +# it works way faster than default 12. Minimal value is great for CI and development +# however may not be suitable for real production. +KEYSTONE_PASSWORD_HASH_ROUNDS=${KEYSTONE_PASSWORD_HASH_ROUNDS:-4} + +# Cache settings +KEYSTONE_ENABLE_CACHE=${KEYSTONE_ENABLE_CACHE:-True} + +# Whether to create a keystone admin endpoint for legacy applications +KEYSTONE_ADMIN_ENDPOINT=$(trueorfalse False KEYSTONE_ADMIN_ENDPOINT) + +# Flag to set the oslo_policy.enforce_scope. This is used to switch +# the Identity API policies to start checking the scope of token. By Default, +# this flag is False. +# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope +KEYSTONE_ENFORCE_SCOPE=$(trueorfalse False KEYSTONE_ENFORCE_SCOPE) # Functions # --------- @@ -142,6 +127,7 @@ KEYSTONE_SERVICE_URI_V3=$KEYSTONE_SERVICE_URI/v3 # Test if Keystone is enabled # is_keystone_enabled function is_keystone_enabled { + [[ ,${DISABLED_SERVICES} =~ ,"keystone" ]] && return 1 [[ ,${ENABLED_SERVICES}, =~ ,"key", ]] && return 0 return 1 } @@ -149,33 +135,24 @@ function is_keystone_enabled { # cleanup_keystone() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_keystone { - _cleanup_keystone_apache_wsgi -} - -# _cleanup_keystone_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file -function _cleanup_keystone_apache_wsgi { - sudo rm -f $(apache_site_config_for keystone) + stop_process "keystone" + remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "keystone-wsgi-public" + sudo rm -f $(apache_site_config_for keystone-wsgi-public) } # _config_keystone_apache_wsgi() - Set WSGI config files of Keystone function _config_keystone_apache_wsgi { local keystone_apache_conf keystone_apache_conf=$(apache_site_config_for keystone) + keystone_ssl_listen="#" local keystone_ssl="" local keystone_certfile="" local keystone_keyfile="" local keystone_service_port=$KEYSTONE_SERVICE_PORT - local keystone_auth_port=$KEYSTONE_AUTH_PORT local venv_path="" - if is_ssl_enabled_service key; then - keystone_ssl="SSLEngine On" - keystone_certfile="SSLCertificateFile $KEYSTONE_SSL_CERT" - keystone_keyfile="SSLCertificateKeyFile $KEYSTONE_SSL_KEY" - fi if is_service_enabled tls-proxy; then keystone_service_port=$KEYSTONE_SERVICE_PORT_INT - keystone_auth_port=$KEYSTONE_AUTH_PORT_INT fi if [[ ${USE_VENV} = True ]]; then venv_path="python-path=${PROJECT_VENV["keystone"]}/lib/$(python_version)/site-packages" @@ -184,8 +161,8 @@ function _config_keystone_apache_wsgi { sudo cp $FILES/apache-keystone.template $keystone_apache_conf sudo sed -e " s|%PUBLICPORT%|$keystone_service_port|g; - s|%ADMINPORT%|$keystone_auth_port|g; s|%APACHE_NAME%|$APACHE_NAME|g; + s|%SSLLISTEN%|$keystone_ssl_listen|g; s|%SSLENGINE%|$keystone_ssl|g; s|%SSLCERTFILE%|$keystone_certfile|g; s|%SSLKEYFILE%|$keystone_keyfile|g; @@ -200,83 +177,42 @@ function configure_keystone { sudo install -d -o $STACK_USER $KEYSTONE_CONF_DIR if [[ "$KEYSTONE_CONF_DIR" != "$KEYSTONE_DIR/etc" ]]; then - install -m 600 $KEYSTONE_DIR/etc/keystone.conf.sample $KEYSTONE_CONF - cp -p $KEYSTONE_DIR/etc/policy.json $KEYSTONE_CONF_DIR - if [[ -f "$KEYSTONE_DIR/etc/keystone-paste.ini" ]]; then - cp -p "$KEYSTONE_DIR/etc/keystone-paste.ini" "$KEYSTONE_PASTE_INI" - fi - fi - if [[ -f "$KEYSTONE_PASTE_INI" ]]; then - iniset "$KEYSTONE_CONF" paste_deploy config_file "$KEYSTONE_PASTE_INI" - else - # compatibility with mixed cfg and paste.deploy configuration - KEYSTONE_PASTE_INI="$KEYSTONE_CONF" + install -m 600 /dev/null $KEYSTONE_CONF fi - - if [ "$ENABLE_IDENTITY_V2" == "False" ]; then - # Only Identity v3 API should be available; then disable v2 pipelines - inidelete $KEYSTONE_PASTE_INI composite:main \\/v2.0 - inidelete $KEYSTONE_PASTE_INI composite:admin \\/v2.0 - fi - - # Rewrite stock ``keystone.conf`` - + # Populate ``keystone.conf`` if is_service_enabled ldap; then - #Set all needed ldap values - iniset $KEYSTONE_CONF ldap password $LDAP_PASSWORD - iniset $KEYSTONE_CONF ldap user $LDAP_MANAGER_DN - iniset $KEYSTONE_CONF ldap suffix $LDAP_BASE_DN - iniset $KEYSTONE_CONF ldap use_dumb_member "True" - iniset $KEYSTONE_CONF ldap user_attribute_ignore "enabled,email,tenants,default_project_id" - iniset $KEYSTONE_CONF ldap tenant_attribute_ignore "enabled" - iniset $KEYSTONE_CONF ldap tenant_domain_id_attribute "businessCategory" - iniset $KEYSTONE_CONF ldap tenant_desc_attribute "description" - iniset $KEYSTONE_CONF ldap tenant_tree_dn "ou=Projects,$LDAP_BASE_DN" - iniset $KEYSTONE_CONF ldap user_domain_id_attribute "businessCategory" - iniset $KEYSTONE_CONF ldap user_tree_dn "ou=Users,$LDAP_BASE_DN" - iniset $KEYSTONE_CONF DEFAULT member_role_id "9fe2ff9ee4384b1894a90878d3e92bab" - iniset $KEYSTONE_CONF DEFAULT member_role_name "_member_" + iniset $KEYSTONE_CONF identity domain_config_dir "$KEYSTONE_CONF_DIR/domains" + iniset $KEYSTONE_CONF identity domain_specific_drivers_enabled "True" fi - iniset $KEYSTONE_CONF identity driver "$KEYSTONE_IDENTITY_BACKEND" + iniset $KEYSTONE_CONF identity password_hash_rounds $KEYSTONE_PASSWORD_HASH_ROUNDS iniset $KEYSTONE_CONF assignment driver "$KEYSTONE_ASSIGNMENT_BACKEND" iniset $KEYSTONE_CONF role driver "$KEYSTONE_ROLE_BACKEND" iniset $KEYSTONE_CONF resource driver "$KEYSTONE_RESOURCE_BACKEND" # Enable caching - iniset $KEYSTONE_CONF cache enabled "True" - iniset $KEYSTONE_CONF cache backend "oslo_cache.memcache_pool" - iniset $KEYSTONE_CONF cache memcache_servers localhost:11211 + iniset $KEYSTONE_CONF cache enabled $KEYSTONE_ENABLE_CACHE + iniset $KEYSTONE_CONF cache backend $CACHE_BACKEND + iniset $KEYSTONE_CONF cache memcache_servers $MEMCACHE_SERVERS - # Do not cache the catalog backend due to https://bugs.launchpad.net/keystone/+bug/1537617 - iniset $KEYSTONE_CONF catalog caching "False" + # Enable errors if response validation fails. We want this enabled in CI + # and development contexts to highlights bugs in our response schemas. + iniset $KEYSTONE_CONF api response_validation error - iniset_rpc_backend keystone $KEYSTONE_CONF - - # Register SSL certificates if provided - if is_ssl_enabled_service key; then - ensure_certificates KEYSTONE - fi + iniset_rpc_backend keystone $KEYSTONE_CONF oslo_messaging_notifications local service_port=$KEYSTONE_SERVICE_PORT - local auth_port=$KEYSTONE_AUTH_PORT if is_service_enabled tls-proxy; then # Set the service ports for a proxy to take the originals service_port=$KEYSTONE_SERVICE_PORT_INT - auth_port=$KEYSTONE_AUTH_PORT_INT fi - # Override the endpoints advertised by keystone (the public_endpoint and - # admin_endpoint) so that clients use the correct endpoint. By default, the - # keystone server uses the public_port and admin_port which isn't going to - # work when you want to use a different port (in the case of proxy), or you - # don't want the port (in the case of putting keystone on a path in - # apache). - if is_service_enabled tls-proxy || [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then - iniset $KEYSTONE_CONF DEFAULT public_endpoint $KEYSTONE_SERVICE_URI - iniset $KEYSTONE_CONF DEFAULT admin_endpoint $KEYSTONE_AUTH_URI - fi + # Override the endpoints advertised by keystone so that clients use the correct + # endpoint. By default, the keystone server uses the public_port which isn't + # going to work when you want to use a different port (in the case of proxy), + # or you don't want the port (in the case of putting keystone on a path in apache). + iniset $KEYSTONE_CONF DEFAULT public_endpoint $KEYSTONE_SERVICE_URI if [[ "$KEYSTONE_TOKEN_FORMAT" != "" ]]; then iniset $KEYSTONE_CONF token provider $KEYSTONE_TOKEN_FORMAT @@ -284,72 +220,45 @@ function configure_keystone { iniset $KEYSTONE_CONF database connection `database_connection_url keystone` - iniset $KEYSTONE_CONF token driver "$KEYSTONE_TOKEN_BACKEND" - # Set up logging if [ "$SYSLOG" != "False" ]; then iniset $KEYSTONE_CONF DEFAULT use_syslog "True" fi # Format logging - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$KEYSTONE_DEPLOY" != "mod_wsgi" ] ; then - setup_colorized_logging $KEYSTONE_CONF DEFAULT - fi + setup_logging $KEYSTONE_CONF iniset $KEYSTONE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then - iniset $KEYSTONE_CONF DEFAULT logging_exception_prefix "%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s" - _config_keystone_apache_wsgi - else # uwsgi - # iniset creates these files when it's called if they don't exist. - KEYSTONE_PUBLIC_UWSGI_FILE=$KEYSTONE_CONF_DIR/keystone-uwsgi-public.ini - KEYSTONE_ADMIN_UWSGI_FILE=$KEYSTONE_CONF_DIR/keystone-uwsgi-admin.ini - - rm -f "$KEYSTONE_PUBLIC_UWSGI_FILE" - rm -f "$KEYSTONE_ADMIN_UWSGI_FILE" - - if is_ssl_enabled_service key; then - iniset "$KEYSTONE_PUBLIC_UWSGI_FILE" uwsgi https $KEYSTONE_SERVICE_HOST:$service_port,$KEYSTONE_SSL_CERT,$KEYSTONE_SSL_KEY - iniset "$KEYSTONE_ADMIN_UWSGI_FILE" uwsgi https $KEYSTONE_ADMIN_BIND_HOST:$auth_port,$KEYSTONE_SSL_CERT,$KEYSTONE_SSL_KEY - else - iniset "$KEYSTONE_PUBLIC_UWSGI_FILE" uwsgi http $KEYSTONE_SERVICE_HOST:$service_port - iniset "$KEYSTONE_ADMIN_UWSGI_FILE" uwsgi http $KEYSTONE_ADMIN_BIND_HOST:$auth_port - fi - - iniset "$KEYSTONE_PUBLIC_UWSGI_FILE" uwsgi wsgi-file "$KEYSTONE_BIN_DIR/keystone-wsgi-public" - iniset "$KEYSTONE_PUBLIC_UWSGI_FILE" uwsgi processes $(nproc) - - iniset "$KEYSTONE_ADMIN_UWSGI_FILE" uwsgi wsgi-file "$KEYSTONE_BIN_DIR/keystone-wsgi-admin" - iniset "$KEYSTONE_ADMIN_UWSGI_FILE" uwsgi processes $API_WORKERS - - # Common settings - for file in "$KEYSTONE_PUBLIC_UWSGI_FILE" "$KEYSTONE_ADMIN_UWSGI_FILE"; do - # This is running standalone - iniset "$file" uwsgi master true - # Set die-on-term & exit-on-reload so that uwsgi shuts down - iniset "$file" uwsgi die-on-term true - iniset "$file" uwsgi exit-on-reload true - iniset "$file" uwsgi enable-threads true - iniset "$file" uwsgi plugins python - # uwsgi recommends this to prevent thundering herd on accept. - iniset "$file" uwsgi thunder-lock true - # Override the default size for headers from the 4k default. - iniset "$file" uwsgi buffer-size 65535 - # Make sure the client doesn't try to re-use the connection. - iniset "$file" uwsgi add-header "Connection: close" - done - fi + write_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" "/identity" "" "keystone-api" iniset $KEYSTONE_CONF DEFAULT max_token_size 16384 iniset $KEYSTONE_CONF fernet_tokens key_repository "$KEYSTONE_CONF_DIR/fernet-keys/" + iniset $KEYSTONE_CONF credential key_repository "$KEYSTONE_CONF_DIR/credential-keys/" + # Configure the project created by the 'keystone-manage bootstrap' as the cloud-admin project. # The users from this project are globally admin as before, but it also # allows policy changes in order to clarify the adminess scope. - iniset $KEYSTONE_CONF resource admin_project_domain_name Default - iniset $KEYSTONE_CONF resource admin_project_name admin + #iniset $KEYSTONE_CONF resource admin_project_domain_name Default + #iniset $KEYSTONE_CONF resource admin_project_name admin + + if [[ "$KEYSTONE_SECURITY_COMPLIANCE_ENABLED" = True ]]; then + iniset $KEYSTONE_CONF security_compliance lockout_failure_attempts $KEYSTONE_LOCKOUT_FAILURE_ATTEMPTS + iniset $KEYSTONE_CONF security_compliance lockout_duration $KEYSTONE_LOCKOUT_DURATION + iniset $KEYSTONE_CONF security_compliance unique_last_password_count $KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT + fi + + iniset $KEYSTONE_CONF oslo_policy policy_file policy.yaml + + if [[ "$KEYSTONE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $KEYSTONE_CONF oslo_policy enforce_scope true + iniset $KEYSTONE_CONF oslo_policy enforce_new_defaults true + else + iniset $KEYSTONE_CONF oslo_policy enforce_scope false + iniset $KEYSTONE_CONF oslo_policy enforce_new_defaults false + fi } # create_keystone_accounts() - Sets up common required keystone accounts @@ -360,55 +269,60 @@ function configure_keystone { # service -- -- # -- -- service # -- -- ResellerAdmin -# -- -- Member +# -- -- member # demo admin admin -# demo demo Member, anotherrole +# demo demo member, anotherrole # alt_demo admin admin -# alt_demo alt_demo Member, anotherrole -# invisible_to_admin demo Member +# alt_demo alt_demo member, anotherrole +# invisible_to_admin demo member # Group Users Roles Project # ------------------------------------------------------------------ # admins admin admin admin -# nonadmins demo, alt_demo Member, anotherrole demo, alt_demo +# nonadmins demo, alt_demo member, anotherrole demo, alt_demo + +# System User Roles +# ------------------------------------------------------------------ +# all admin admin +# all system_reader reader +# all system_member member # Migrated from keystone_data.sh function create_keystone_accounts { - # The keystone bootstrapping process (performed via keystone-manage bootstrap) - # creates an admin user, admin role and admin project. As a sanity check - # we exercise the CLI to retrieve the IDs for these values. + # The keystone bootstrapping process (performed via keystone-manage + # bootstrap) creates an admin user and an admin + # project. As a sanity check we exercise the CLI to retrieve the IDs for + # these values. local admin_project admin_project=$(openstack project show "admin" -f value -c id) local admin_user admin_user=$(openstack user show "admin" -f value -c id) - local admin_role - admin_role=$(openstack role show "admin" -f value -c id) + # These roles are also created during bootstrap but we don't need their IDs + local admin_role="admin" + local member_role="member" + local reader_role="reader" - get_or_add_user_domain_role $admin_role $admin_user default + async_run ks-domain-role get_or_add_user_domain_role $admin_role $admin_user default # Create service project/role get_or_create_domain "$SERVICE_DOMAIN_NAME" - get_or_create_project "$SERVICE_PROJECT_NAME" "$SERVICE_DOMAIN_NAME" + async_run ks-project get_or_create_project "$SERVICE_PROJECT_NAME" "$SERVICE_DOMAIN_NAME" # Service role, so service users do not have to be admins - get_or_create_role service + async_run ks-service get_or_create_role service # The ResellerAdmin role is used by Nova and Ceilometer so we need to keep it. # The admin role in swift allows a user to act as an admin for their project, # but ResellerAdmin is needed for a user to act as any project. The name of this # role is also configurable in swift-proxy.conf - get_or_create_role ResellerAdmin - - # The Member role is used by Horizon and Swift so we need to keep it: - local member_role - member_role=$(get_or_create_role "Member") + async_run ks-reseller get_or_create_role ResellerAdmin # another_role demonstrates that an arbitrary role may be created and used # TODO(sleepsonthefloor): show how this can be used for rbac in the future! - local another_role - another_role=$(get_or_create_role "anotherrole") + local another_role="anotherrole" + async_run ks-anotherrole get_or_create_role $another_role # invisible project - admin can't see this one local invis_project @@ -421,21 +335,55 @@ function create_keystone_accounts { demo_user=$(get_or_create_user "demo" \ "$ADMIN_PASSWORD" "default" "demo@example.com") - get_or_add_user_project_role $member_role $demo_user $demo_project - get_or_add_user_project_role $admin_role $admin_user $demo_project - get_or_add_user_project_role $another_role $demo_user $demo_project - get_or_add_user_project_role $member_role $demo_user $invis_project + async_wait ks-{domain-role,domain,project,service,reseller,anotherrole} + + async_run ks-demo-member get_or_add_user_project_role $member_role $demo_user $demo_project - # alt_demo + async_run ks-demo-admin get_or_add_user_project_role $admin_role $admin_user $demo_project + async_run ks-demo-another get_or_add_user_project_role $another_role $demo_user $demo_project + async_run ks-demo-invis get_or_add_user_project_role $member_role $demo_user $invis_project + + # Create a user to act as a reader on project demo + local demo_reader + demo_reader=$(get_or_create_user "demo_reader" \ + "$ADMIN_PASSWORD" "default" "demo_reader@example.com") + + async_run ks-demo-reader get_or_add_user_project_role $reader_role $demo_reader $demo_project + + # Create a different project called alt_demo local alt_demo_project alt_demo_project=$(get_or_create_project "alt_demo" default) + # Create a user to act as member, admin and anotherrole on project alt_demo local alt_demo_user alt_demo_user=$(get_or_create_user "alt_demo" \ "$ADMIN_PASSWORD" "default" "alt_demo@example.com") - get_or_add_user_project_role $member_role $alt_demo_user $alt_demo_project - get_or_add_user_project_role $admin_role $admin_user $alt_demo_project - get_or_add_user_project_role $another_role $alt_demo_user $alt_demo_project + async_run ks-alt-admin get_or_add_user_project_role $admin_role $alt_demo_user $alt_demo_project + async_run ks-alt-another get_or_add_user_project_role $another_role $alt_demo_user $alt_demo_project + + # Create another user to act as a member on project alt_demo + local alt_demo_member + alt_demo_member=$(get_or_create_user "alt_demo_member" \ + "$ADMIN_PASSWORD" "default" "alt_demo_member@example.com") + async_run ks-alt-member-user get_or_add_user_project_role $member_role $alt_demo_member $alt_demo_project + + # Create another user to act as a reader on project alt_demo + local alt_demo_reader + alt_demo_reader=$(get_or_create_user "alt_demo_reader" \ + "$ADMIN_PASSWORD" "default" "alt_demo_reader@example.com") + async_run ks-alt-reader-user get_or_add_user_project_role $reader_role $alt_demo_reader $alt_demo_project + + # Create two users, give one the member role on the system and the other the + # reader role on the system. These two users model system-member and + # system-reader personas. The admin user already has the admin role on the + # system and we can re-use this user as a system-admin. + system_member_user=$(get_or_create_user "system_member" \ + "$ADMIN_PASSWORD" "default" "system_member@example.com") + async_run ks-system-member get_or_add_user_system_role $member_role $system_member_user "all" + + system_reader_user=$(get_or_create_user "system_reader" \ + "$ADMIN_PASSWORD" "default" "system_reader@example.com") + async_run ks-system-reader get_or_add_user_system_role $reader_role $system_reader_user "all" # groups local admin_group @@ -445,52 +393,72 @@ function create_keystone_accounts { non_admin_group=$(get_or_create_group "nonadmins" \ "default" "non-admin group") - get_or_add_group_project_role $member_role $non_admin_group $demo_project - get_or_add_group_project_role $another_role $non_admin_group $demo_project - get_or_add_group_project_role $member_role $non_admin_group $alt_demo_project - get_or_add_group_project_role $another_role $non_admin_group $alt_demo_project - get_or_add_group_project_role $admin_role $admin_group $admin_project + async_run ks-group-memberdemo get_or_add_group_project_role $member_role $non_admin_group $demo_project + async_run ks-group-anotherdemo get_or_add_group_project_role $another_role $non_admin_group $demo_project + async_run ks-group-memberalt get_or_add_group_project_role $member_role $non_admin_group $alt_demo_project + async_run ks-group-anotheralt get_or_add_group_project_role $another_role $non_admin_group $alt_demo_project + async_run ks-group-admin get_or_add_group_project_role $admin_role $admin_group $admin_project + + async_wait ks-demo-{member,admin,another,invis,reader} + async_wait ks-alt-{admin,another,member-user,reader-user} + async_wait ks-system-{member,reader} + async_wait ks-group-{memberdemo,anotherdemo,memberalt,anotheralt,admin} + + if is_service_enabled ldap; then + create_ldap_domain + fi } # Create a user that is capable of verifying keystone tokens for use with auth_token middleware. # # create_service_user [role] # -# The role defaults to the service role. It is allowed to be provided as optional as historically +# We always add the service role, other roles are also allowed to be added as historically # a lot of projects have configured themselves with the admin or other role here if they are # using this user for other purposes beyond simply auth_token middleware. function create_service_user { - local role=${2:-service} - get_or_create_user "$1" "$SERVICE_PASSWORD" "$SERVICE_DOMAIN_NAME" - get_or_add_user_project_role "$role" "$1" "$SERVICE_PROJECT_NAME" "$SERVICE_DOMAIN_NAME" "$SERVICE_DOMAIN_NAME" + get_or_add_user_project_role service "$1" "$SERVICE_PROJECT_NAME" "$SERVICE_DOMAIN_NAME" "$SERVICE_DOMAIN_NAME" + + if [[ -n "$2" ]]; then + get_or_add_user_project_role "$2" "$1" "$SERVICE_PROJECT_NAME" "$SERVICE_DOMAIN_NAME" "$SERVICE_DOMAIN_NAME" + fi } -# Configure the service to use the auth token middleware. +# Configure a service to use the auth token middleware. # -# configure_auth_token_middleware conf_file admin_user signing_dir [section] +# configure_keystone_authtoken_middleware conf_file admin_user IGNORED [section] # # section defaults to keystone_authtoken, which is where auth_token looks in # the .conf file. If the paste config file is used (api-paste.ini) then # provide the section name for the auth_token filter. -function configure_auth_token_middleware { +function configure_keystone_authtoken_middleware { local conf_file=$1 local admin_user=$2 - local signing_dir=$3 - local section=${4:-keystone_authtoken} + local section=${3:-keystone_authtoken} + local service_type=$4 iniset $conf_file $section auth_type password - iniset $conf_file $section auth_url $KEYSTONE_AUTH_URI + iniset $conf_file $section interface public + iniset $conf_file $section auth_url $KEYSTONE_SERVICE_URI iniset $conf_file $section username $admin_user iniset $conf_file $section password $SERVICE_PASSWORD iniset $conf_file $section user_domain_name "$SERVICE_DOMAIN_NAME" iniset $conf_file $section project_name $SERVICE_PROJECT_NAME iniset $conf_file $section project_domain_name "$SERVICE_DOMAIN_NAME" - iniset $conf_file $section auth_uri $KEYSTONE_SERVICE_URI iniset $conf_file $section cafile $SSL_BUNDLE_FILE - iniset $conf_file $section signing_dir $signing_dir - iniset $conf_file $section memcached_servers $SERVICE_HOST:11211 + iniset $conf_file $section memcached_servers $MEMCACHE_SERVERS + if [[ -n "$service_type" ]]; then + iniset $conf_file $section service_type $service_type + fi +} + +# configure_auth_token_middleware conf_file admin_user IGNORED [section] +# TODO(frickler): old function for backwards compatibility, remove in U cycle +function configure_auth_token_middleware { + echo "WARNING: configure_auth_token_middleware is deprecated, use configure_keystone_authtoken_middleware instead" + configure_keystone_authtoken_middleware $1 $2 $4 } # init_keystone() - Initialize databases, etc. @@ -499,21 +467,23 @@ function init_keystone { init_ldap fi - # (Re)create keystone database - recreate_database keystone + if [[ "$RECREATE_KEYSTONE_DB" == True ]]; then + # (Re)create keystone database + recreate_database keystone + fi + time_start "dbsync" # Initialize keystone database $KEYSTONE_BIN_DIR/keystone-manage --config-file $KEYSTONE_CONF db_sync + time_stop "dbsync" - if [[ "$KEYSTONE_TOKEN_FORMAT" == "pki" || "$KEYSTONE_TOKEN_FORMAT" == "pkiz" ]]; then - # Set up certificates - rm -rf $KEYSTONE_CONF_DIR/ssl - $KEYSTONE_BIN_DIR/keystone-manage --config-file $KEYSTONE_CONF pki_setup - fi if [[ "$KEYSTONE_TOKEN_FORMAT" == "fernet" ]]; then rm -rf "$KEYSTONE_CONF_DIR/fernet-keys/" $KEYSTONE_BIN_DIR/keystone-manage --config-file $KEYSTONE_CONF fernet_setup fi + rm -rf "$KEYSTONE_CONF_DIR/credential-keys/" + $KEYSTONE_BIN_DIR/keystone-manage --config-file $KEYSTONE_CONF credential_setup + } # install_keystoneauth() - Collect source and prepare @@ -561,36 +531,19 @@ function install_keystone { if is_service_enabled ldap; then setup_develop $KEYSTONE_DIR ldap fi - - if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then - install_apache_wsgi - if is_ssl_enabled_service "key"; then - enable_mod_ssl - fi - elif [ "$KEYSTONE_DEPLOY" == "uwsgi" ]; then - pip_install uwsgi - fi } -# start_keystone() - Start running processes, including screen +# start_keystone() - Start running processes function start_keystone { # Get right service port for testing local service_port=$KEYSTONE_SERVICE_PORT - local auth_protocol=$KEYSTONE_AUTH_PROTOCOL + local auth_protocol=$KEYSTONE_SERVICE_PROTOCOL if is_service_enabled tls-proxy; then service_port=$KEYSTONE_SERVICE_PORT_INT auth_protocol="http" fi - if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then - enable_apache_site keystone - restart_apache_server - tail_log key /var/log/$APACHE_NAME/keystone.log - tail_log key-access /var/log/$APACHE_NAME/keystone_access.log - else # uwsgi - run_process key "$KEYSTONE_BIN_DIR/uwsgi $KEYSTONE_PUBLIC_UWSGI_FILE" "" "key-p" - run_process key "$KEYSTONE_BIN_DIR/uwsgi $KEYSTONE_ADMIN_UWSGI_FILE" "" "key-a" - fi + run_process keystone "$(which uwsgi) --procname-prefix keystone --ini $KEYSTONE_PUBLIC_UWSGI_CONF" "" echo "Waiting for keystone to start..." # Check that the keystone service is running. Even if the tls tunnel @@ -598,10 +551,7 @@ function start_keystone { # unencryted traffic at this point. # If running in Apache, use the path rather than port. - local service_uri=$auth_protocol://$KEYSTONE_SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ - if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then - service_uri=$auth_protocol://$KEYSTONE_SERVICE_HOST/identity/v$IDENTITY_API_VERSION/ - fi + local service_uri=$auth_protocol://$KEYSTONE_SERVICE_HOST/identity/v3/ if ! wait_for_service $SERVICE_TIMEOUT $service_uri; then die $LINENO "keystone did not start" @@ -609,8 +559,7 @@ function start_keystone { # Start proxies if enabled if is_service_enabled tls-proxy; then - start_tls_proxy '*' $KEYSTONE_SERVICE_PORT $KEYSTONE_SERVICE_HOST $KEYSTONE_SERVICE_PORT_INT & - start_tls_proxy '*' $KEYSTONE_AUTH_PORT $KEYSTONE_AUTH_HOST $KEYSTONE_AUTH_PORT_INT & + start_tls_proxy keystone-service '*' $KEYSTONE_SERVICE_PORT $KEYSTONE_SERVICE_HOST $KEYSTONE_SERVICE_PORT_INT fi # (re)start memcached to make sure we have a clean memcache. @@ -619,24 +568,15 @@ function start_keystone { # stop_keystone() - Stop running processes function stop_keystone { - if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then - disable_apache_site keystone - restart_apache_server - fi - # Kill the Keystone screen window - stop_process key + stop_process keystone } # bootstrap_keystone() - Initialize user, role and project # This function uses the following GLOBAL variables: # - ``KEYSTONE_BIN_DIR`` # - ``ADMIN_PASSWORD`` -# - ``IDENTITY_API_VERSION`` -# - ``KEYSTONE_AUTH_URI`` # - ``REGION_NAME`` -# - ``KEYSTONE_SERVICE_PROTOCOL`` -# - ``KEYSTONE_SERVICE_HOST`` -# - ``KEYSTONE_SERVICE_PORT`` +# - ``KEYSTONE_SERVICE_URI`` function bootstrap_keystone { $KEYSTONE_BIN_DIR/keystone-manage bootstrap \ --bootstrap-username admin \ @@ -645,9 +585,68 @@ function bootstrap_keystone { --bootstrap-role-name admin \ --bootstrap-service-name keystone \ --bootstrap-region-id "$REGION_NAME" \ - --bootstrap-admin-url "$KEYSTONE_AUTH_URI" \ - --bootstrap-public-url "$KEYSTONE_SERVICE_URI" \ - --bootstrap-internal-url "$KEYSTONE_SERVICE_URI" + --bootstrap-public-url "$KEYSTONE_SERVICE_URI" + if [ "$KEYSTONE_ADMIN_ENDPOINT" == "True" ]; then + openstack endpoint create --region "$REGION_NAME" \ + --os-username admin \ + --os-user-domain-id default \ + --os-password "$ADMIN_PASSWORD" \ + --os-project-name admin \ + --os-project-domain-id default \ + keystone admin "$KEYSTONE_SERVICE_URI" + fi +} + +# create_ldap_domain() - Create domain file and initialize domain with a user +function create_ldap_domain { + # Creates domain Users + openstack domain create --description "LDAP domain" Users + + # Create domain file inside etc/keystone/domains + KEYSTONE_LDAP_DOMAIN_FILE=$KEYSTONE_CONF_DIR/domains/keystone.Users.conf + mkdir -p "$KEYSTONE_CONF_DIR/domains" + touch "$KEYSTONE_LDAP_DOMAIN_FILE" + + # Set identity driver 'ldap' + iniset $KEYSTONE_LDAP_DOMAIN_FILE identity driver "ldap" + + # LDAP settings for Users domain + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_tree_dn "ou=Users,$LDAP_BASE_DN" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_objectclass "inetOrgPerson" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_name_attribute "cn" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_mail_attribute "mail" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_id_attribute "uid" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_enabled_emulation "True" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user "cn=Manager,dc=openstack,dc=org" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap url "ldap://localhost" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap suffix $LDAP_BASE_DN + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap password $LDAP_PASSWORD + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap group_tree_dn "ou=Groups,$LDAP_BASE_DN" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap group_objectclass "groupOfNames" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap group_name_attribute "cn" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap group_id_attribute "cn" + + # Restart apache and identity services to associate domain and conf file + sudo service apache2 reload + sudo systemctl restart devstack@keystone + + # Create LDAP user.ldif and add user to LDAP backend + local tmp_ldap_dir + tmp_ldap_dir=$(mktemp -d -t ldap.$$.XXXXXXXXXX) + + _ldap_varsubst $FILES/ldap/user.ldif.in $slappass >$tmp_ldap_dir/user.ldif + sudo ldapadd -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -c -f $tmp_ldap_dir/user.ldif + rm -rf $tmp_ldap_dir + + local admin_project + admin_project=$(get_or_create_project "admin" default) + local ldap_user + ldap_user=$(openstack user show --domain=Users demo -f value -c id) + local admin_role="admin" + get_or_create_role $admin_role + + # Grant demo LDAP user access to project and role + get_or_add_user_project_role $admin_role $ldap_user $admin_project } # Restore xtrace diff --git a/lib/ldap b/lib/ldap index 4cea812d3c..66c2afc4d5 100644 --- a/lib/ldap +++ b/lib/ldap @@ -33,16 +33,12 @@ LDAP_SERVICE_NAME=slapd if is_ubuntu; then LDAP_OLCDB_NUMBER=1 + LDAP_OLCDB_TYPE=mdb LDAP_ROOTPW_COMMAND=replace elif is_fedora; then LDAP_OLCDB_NUMBER=2 + LDAP_OLCDB_TYPE=hdb LDAP_ROOTPW_COMMAND=add -elif is_suse; then - # SUSE has slappasswd in /usr/sbin/ - PATH=$PATH:/usr/sbin/ - LDAP_OLCDB_NUMBER=1 - LDAP_ROOTPW_COMMAND=add - LDAP_SERVICE_NAME=ldap fi @@ -56,6 +52,7 @@ function _ldap_varsubst { local slappass=$2 sed -e " s|\${LDAP_OLCDB_NUMBER}|$LDAP_OLCDB_NUMBER| + s|\${LDAP_OLCDB_TYPE}|$LDAP_OLCDB_TYPE| s|\${SLAPPASS}|$slappass| s|\${LDAP_ROOTPW_COMMAND}|$LDAP_ROOTPW_COMMAND| s|\${BASE_DC}|$LDAP_BASE_DC| @@ -72,8 +69,6 @@ function cleanup_ldap { sudo rm -rf /etc/ldap/ldap.conf /var/lib/ldap elif is_fedora; then sudo rm -rf /etc/openldap /var/lib/ldap - elif is_suse; then - sudo rm -rf /var/lib/ldap fi } @@ -87,6 +82,14 @@ function init_ldap { # Remove data but not schemas clear_ldap_state + if is_ubuntu; then + # a bug in OpenLDAP 2.6.7+ + # (https://bugs.openldap.org/show_bug.cgi?id=10336) causes slapd crash + # after deleting nonexisting tree. It is fixed upstream, but Ubuntu is + # still not having a fix in Noble. Try temporarily simly restarting the + # process. + sudo service $LDAP_SERVICE_NAME restart + fi # Add our top level ldap nodes if ldapsearch -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -b "$LDAP_BASE_DN" | grep -q "Success"; then @@ -119,15 +122,9 @@ function install_ldap { printf "installing OpenLDAP" if is_ubuntu; then - # Ubuntu automatically starts LDAP so no need to call start_ldap() - : + configure_ldap elif is_fedora; then start_ldap - elif is_suse; then - _ldap_varsubst $FILES/ldap/suse-base-config.ldif.in >$tmp_ldap_dir/suse-base-config.ldif - sudo slapadd -F /etc/openldap/slapd.d/ -bcn=config -l $tmp_ldap_dir/suse-base-config.ldif - sudo sed -i '/^OPENLDAP_START_LDAPI=/s/"no"/"yes"/g' /etc/sysconfig/openldap - start_ldap fi echo "LDAP_PASSWORD is $LDAP_PASSWORD" @@ -148,6 +145,27 @@ function install_ldap { rm -rf $tmp_ldap_dir } +# configure_ldap() - Configure LDAP - reconfigure slapd +function configure_ldap { + sudo debconf-set-selections </dev/null)" ]]; then - _clean_lvm_backing_file $DATA_DIR/$vg$BACKING_FILE_SUFFIX + local backing_file=$DATA_DIR/$vg$BACKING_FILE_SUFFIX + + if [[ -n "$vg$BACKING_FILE_SUFFIX" ]] && \ + [[ -e "/etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service" ]]; then + sudo systemctl disable --now $vg$BACKING_FILE_SUFFIX.service + sudo rm -f /etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service + sudo systemctl daemon-reload + fi + + # If the backing physical device is a loop device, it was probably setup by DevStack + if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then + rm -f $backing_file + fi fi } - # _create_lvm_volume_group creates default volume group # # Usage: _create_lvm_volume_group() $vg $size @@ -90,8 +92,27 @@ function _create_lvm_volume_group { if ! sudo vgs $vg; then # Only create if the file doesn't already exists [[ -f $backing_file ]] || truncate -s $size $backing_file + + local directio="" + # Check to see if we can do direct-io + if losetup -h | grep -q direct-io; then + directio="--direct-io=on" + fi + + # Only create systemd service if it doesn't already exists + if [[ ! -e "/etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service" ]]; then + sed -e " + s|%DIRECTIO%|${directio}|g; + s|%BACKING_FILE%|${backing_file}|g; + " $FILES/lvm-backing-file.template | sudo tee \ + /etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service + + sudo systemctl daemon-reload + sudo systemctl enable --now $vg$BACKING_FILE_SUFFIX.service + fi + local vg_dev - vg_dev=`sudo losetup -f --show $backing_file` + vg_dev=$(sudo losetup --associated $backing_file -O NAME -n) # Only create volume group if it doesn't already exist if ! sudo vgs $vg; then @@ -103,28 +124,30 @@ function _create_lvm_volume_group { # init_lvm_volume_group() initializes the volume group creating the backing # file if necessary # -# Usage: init_lvm_volume_group() $vg +# Usage: init_lvm_volume_group() $vg $size function init_lvm_volume_group { local vg=$1 local size=$2 - # Start the lvmetad and tgtd services - if is_fedora || is_suse; then - # services is not started by default - start_service lvm2-lvmetad - if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then - start_service tgtd - fi + # Start the tgtd service on Fedora if tgtadm is used + if is_fedora; then + start_service tgtd fi # Start with a clean volume group _create_lvm_volume_group $vg $size - # Remove iscsi targets - if [ "$CINDER_ISCSI_HELPER" = "lioadm" ]; then - sudo cinder-rtstool get-targets | sudo xargs -rn 1 cinder-rtstool delete - else - sudo tgtadm --op show --mode target | awk '/Target/ {print $3}' | sudo xargs -r -n1 tgt-admin --delete + if is_service_enabled cinder; then + # Remove iscsi targets + if [ "$CINDER_TARGET_HELPER" = "lioadm" ]; then + sudo cinder-rtstool get-targets | sudo xargs -rn 1 cinder-rtstool delete + elif [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then + sudo tgtadm --op show --mode target | awk '/Target/ {print $3}' | sudo xargs -r -n1 tgt-admin --delete + elif [ "$CINDER_TARGET_HELPER" = "nvmet" ]; then + # If we don't disconnect everything vgremove will block + sudo nvme disconnect-all + sudo nvmetcli clear + fi fi _clean_lvm_volume_group $vg } @@ -177,7 +200,7 @@ function set_lvm_filter { filter_string=$filter_string$filter_suffix clean_lvm_filter - sudo sed -i "/# global_filter = \[*\]/a\ $global_filter$filter_string" /etc/lvm/lvm.conf + sudo sed -i "/# global_filter = \[.*\]/a\ $filter_string" /etc/lvm/lvm.conf echo_summary "set lvm.conf device global_filter to: $filter_string" } diff --git a/lib/neutron b/lib/neutron index ad68d8e62f..dec15fb782 100644 --- a/lib/neutron +++ b/lib/neutron @@ -1,77 +1,305 @@ #!/bin/bash # # lib/neutron -# Install and start **Neutron** network services +# functions - functions specific to neutron # Dependencies: -# # ``functions`` file # ``DEST`` must be defined +# ``STACK_USER`` must be defined # ``stack.sh`` calls the entry points in this order: # -# - is_XXXX_enabled -# - install_XXXX -# - configure_XXXX -# - init_XXXX -# - start_XXXX -# - stop_XXXX -# - cleanup_XXXX +# - install_neutron_agent_packages +# - install_neutronclient +# - install_neutron +# - install_neutron_third_party +# - configure_neutron +# - init_neutron +# - configure_neutron_third_party +# - init_neutron_third_party +# - start_neutron_third_party +# - create_nova_conf_neutron +# - configure_neutron_after_post_config +# - start_neutron_service_and_check +# - check_neutron_third_party_integration +# - start_neutron_agents +# - create_neutron_initial_network +# +# ``unstack.sh`` calls the entry points in this order: +# +# - stop_neutron +# - stop_neutron_third_party +# - cleanup_neutron + +# Functions in lib/neutron are classified into the following categories: +# +# - entry points (called from stack.sh or unstack.sh) +# - internal functions +# - neutron exercises +# - 3rd party programs -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace -# Defaults +# Neutron Networking +# ------------------ + +# Make sure that neutron is enabled in ``ENABLED_SERVICES``. If you want +# to run Neutron on this host, make sure that q-svc is also in +# ``ENABLED_SERVICES``. +# +# See "Neutron Network Configuration" below for additional variables +# that must be set in localrc for connectivity across hosts with +# Neutron. + +# Settings # -------- + +# Neutron Network Configuration +# ----------------------------- + +if is_service_enabled tls-proxy; then + Q_PROTOCOL="https" +fi + # Set up default directories GITDIR["python-neutronclient"]=$DEST/python-neutronclient -NEUTRON_AGENT=${NEUTRON_AGENT:-openvswitch} NEUTRON_DIR=$DEST/neutron -NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron} +NEUTRON_FWAAS_DIR=$DEST/neutron-fwaas -NEUTRON_BIN_DIR=$(get_python_exec_prefix) -NEUTRON_DHCP_BINARY="neutron-dhcp-agent" +# Support entry points installation of console scripts +if [[ -d $NEUTRON_DIR/bin/neutron-server ]]; then + NEUTRON_BIN_DIR=$NEUTRON_DIR/bin +else + NEUTRON_BIN_DIR=$(get_python_exec_prefix) +fi NEUTRON_CONF_DIR=/etc/neutron NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf -NEUTRON_META_CONF=$NEUTRON_CONF_DIR/metadata_agent.ini +export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"} + +NEUTRON_UWSGI=neutron.wsgi.api:application +NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini + +# If NEUTRON_ENFORCE_SCOPE == True, it will set "enforce_scope" +# and "enforce_new_defaults" to True in the Neutron's config to enforce usage +# of the new RBAC policies and scopes. Set it to False if you do not +# want to run Neutron with new RBAC. +NEUTRON_ENFORCE_SCOPE=$(trueorfalse True NEUTRON_ENFORCE_SCOPE) + +# Agent binaries. Note, binary paths for other agents are set in per-service +# scripts in lib/neutron_plugins/services/ +AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent" +AGENT_L3_BINARY=${AGENT_L3_BINARY:-"$NEUTRON_BIN_DIR/neutron-l3-agent"} +AGENT_META_BINARY="$NEUTRON_BIN_DIR/neutron-metadata-agent" + +# Agent config files. Note, plugin-specific Q_PLUGIN_CONF_FILE is set and +# loaded from per-plugin scripts in lib/neutron_plugins/ +Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini +# NOTE(slaweq): NEUTRON_DHCP_CONF is used e.g. in neutron repository, +# it was previously defined in the lib/neutron module which is now deleted. +NEUTRON_DHCP_CONF=$Q_DHCP_CONF_FILE +Q_L3_CONF_FILE=$NEUTRON_CONF_DIR/l3_agent.ini +# NOTE(slaweq): NEUTRON_L3_CONF is used e.g. in neutron repository, +# it was previously defined in the lib/neutron module which is now deleted. +NEUTRON_L3_CONF=$Q_L3_CONF_FILE +Q_META_CONF_FILE=$NEUTRON_CONF_DIR/metadata_agent.ini + +# Default name for Neutron database +Q_DB_NAME=${Q_DB_NAME:-neutron} +# Default Neutron Plugin +Q_PLUGIN=${Q_PLUGIN:-ml2} +# Default Neutron Port +Q_PORT=${Q_PORT:-9696} +# Default Neutron Internal Port when using TLS proxy +Q_PORT_INT=${Q_PORT_INT:-19696} +# Default Neutron Host +Q_HOST=${Q_HOST:-$SERVICE_HOST} +# Default protocol +Q_PROTOCOL=${Q_PROTOCOL:-$SERVICE_PROTOCOL} +# Default listen address +Q_LISTEN_ADDRESS=${Q_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)} +# Default admin username +Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-neutron} +# Default auth strategy +Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone} +# RHEL's support for namespaces requires using veths with ovs +Q_OVS_USE_VETH=${Q_OVS_USE_VETH:-False} +Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True} +Q_USE_ROOTWRAP_DAEMON=$(trueorfalse True Q_USE_ROOTWRAP_DAEMON) +# Meta data IP +Q_META_DATA_IP=${Q_META_DATA_IP:-$(ipv6_unquote $SERVICE_HOST)} +# Allow Overlapping IP among subnets +Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True} +Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True} +Q_NOTIFY_NOVA_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_DATA_CHANGES:-True} +VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True} +VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300} + +# Allow to skip stopping of OVN services +SKIP_STOP_OVN=${SKIP_STOP_OVN:-False} + +# The directory which contains files for Q_PLUGIN_EXTRA_CONF_FILES. +# /etc/neutron is assumed by many of devstack plugins. Do not change. +_Q_PLUGIN_EXTRA_CONF_PATH=/etc/neutron + +# The name of the service in the endpoint URL +NEUTRON_ENDPOINT_SERVICE_NAME=${NEUTRON_ENDPOINT_SERVICE_NAME-"networking"} +if [[ -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then + NEUTRON_ENDPOINT_SERVICE_NAME="networking" +fi + +# Source install libraries +ALEMBIC_REPO=${ALEMBIC_REPO:-https://github.com/sqlalchemy/alembic.git} +ALEMBIC_DIR=${ALEMBIC_DIR:-$DEST/alembic} +ALEMBIC_BRANCH=${ALEMBIC_BRANCH:-main} +SQLALCHEMY_REPO=${SQLALCHEMY_REPO:-https://github.com/sqlalchemy/sqlalchemy.git} +SQLALCHEMY_DIR=${SQLALCHEMY_DIR:-$DEST/sqlalchemy} +SQLALCHEMY_BRANCH=${SQLALCHEMY_BRANCH:-main} + +# List of config file names in addition to the main plugin config file +# To add additional plugin config files, use ``neutron_server_config_add`` +# utility function. For example: +# +# ``neutron_server_config_add file1`` +# +# These config files are relative to ``/etc/neutron``. The above +# example would specify ``--config-file /etc/neutron/file1`` for +# neutron server. +declare -a -g Q_PLUGIN_EXTRA_CONF_FILES + +# same as Q_PLUGIN_EXTRA_CONF_FILES, but with absolute path. +declare -a -g _Q_PLUGIN_EXTRA_CONF_FILES_ABS + + +Q_RR_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf +if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then + Q_RR_COMMAND="sudo" +else + NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron) + Q_RR_COMMAND="sudo $NEUTRON_ROOTWRAP $Q_RR_CONF_FILE" + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + Q_RR_DAEMON_COMMAND="sudo $NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE" + fi +fi -NEUTRON_DHCP_CONF=$NEUTRON_CONF_DIR/dhcp_agent.ini -NEUTRON_L3_CONF=$NEUTRON_CONF_DIR/l3_agent.ini -NEUTRON_AGENT_CONF=$NEUTRON_CONF_DIR/ -NEUTRON_STATE_PATH=${NEUTRON_STATE_PATH:=$DATA_DIR/neutron} -NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron} +# Distributed Virtual Router (DVR) configuration +# Can be: +# - ``legacy`` - No DVR functionality +# - ``dvr_snat`` - Controller or single node DVR +# - ``dvr`` - Compute node in multi-node DVR +# - ``dvr_no_external`` - Compute node in multi-node DVR, no external network +# +Q_DVR_MODE=${Q_DVR_MODE:-legacy} +if [[ "$Q_DVR_MODE" != "legacy" ]]; then + Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,l2population +fi -# By default, use the ML2 plugin -NEUTRON_PLUGIN=${NEUTRON_PLUGIN:-ml2} -NEUTRON_PLUGIN_CONF_FILENAME=${NEUTRON_PLUGIN_CONF_FILENAME:-ml2_conf.ini} -NEUTRON_PLUGIN_CONF_PATH=$NEUTRON_CONF_DIR/plugins/$NEUTRON_PLUGIN -NEUTRON_PLUGIN_CONF=$NEUTRON_PLUGIN_CONF_PATH/$NEUTRON_PLUGIN_CONF_FILENAME +# Provider Network Configurations +# -------------------------------- + +# The following variables control the Neutron ML2 plugins' allocation +# of tenant networks and availability of provider networks. If these +# are not configured in ``localrc``, tenant networks will be local to +# the host (with no remote connectivity), and no physical resources +# will be available for the allocation of provider networks. + +# To disable tunnels (GRE or VXLAN) for tenant networks, +# set to False in ``local.conf``. +# GRE tunnels are only supported by the openvswitch. +ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-True} + +# If using GRE, VXLAN or GENEVE tunnels for tenant networks, +# specify the range of IDs from which tenant networks are +# allocated. Can be overridden in ``localrc`` if necessary. +TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGES:-1:1000} + +# To use VLANs for tenant networks, set to True in localrc. VLANs +# are supported by the ML2 plugins, requiring additional configuration +# described below. +ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False} + +# If using VLANs for tenant networks, set in ``localrc`` to specify +# the range of VLAN VIDs from which tenant networks are +# allocated. An external network switch must be configured to +# trunk these VLANs between hosts for multi-host connectivity. +# +# Example: ``TENANT_VLAN_RANGE=1000:1999`` +TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} -NEUTRON_AGENT_BINARY=${NEUTRON_AGENT_BINARY:-neutron-$NEUTRON_AGENT-agent} -NEUTRON_L3_BINARY=${NEUTRON_L3_BINARY:-neutron-l3-agent} -NEUTRON_META_BINARY=${NEUTRON_META_BINARY:-neutron-metadata-agent} +# If using VLANs for tenant networks, or if using flat or VLAN +# provider networks, set in ``localrc`` to the name of the physical +# network, and also configure ``OVS_PHYSICAL_BRIDGE`` for the +# openvswitch agent, as described below. +# +# Example: ``PHYSICAL_NETWORK=default`` +PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-public} + +# With the openvswitch agent, if using VLANs for tenant networks, +# or if using flat or VLAN provider networks, set in ``localrc`` to +# the name of the OVS bridge to use for the physical network. The +# bridge will be created if it does not already exist, but a +# physical interface must be manually added to the bridge as a +# port for external connectivity. +# +# Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` +OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-br-ex} -# Public facing bits -if is_ssl_enabled_service "neutron" || is_service_enabled tls-proxy; then - NEUTRON_SERVICE_PROTOCOL="https" +# With the openvswitch plugin, set to True in ``localrc`` to enable +# provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False. +# +# Example: ``OVS_ENABLE_TUNNELING=True`` +OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS} + +# Use DHCP agent for providing metadata service in the case of +# without L3 agent (No Route Agent), set to True in localrc. +ENABLE_ISOLATED_METADATA=${ENABLE_ISOLATED_METADATA:-False} + +# Add a static route as dhcp option, so the request to 169.254.169.254 +# will be able to reach through a route(DHCP agent) +# This option require ENABLE_ISOLATED_METADATA = True +ENABLE_METADATA_NETWORK=${ENABLE_METADATA_NETWORK:-False} +# Neutron plugin specific functions +# --------------------------------- + +# Please refer to ``lib/neutron_plugins/README.md`` for details. +if [ -f $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN ]; then + source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN fi -NEUTRON_SERVICE_HOST=${NEUTRON_SERVICE_HOST:-$SERVICE_HOST} -NEUTRON_SERVICE_PORT=${NEUTRON_SERVICE_PORT:-9696} -NEUTRON_SERVICE_PORT_INT=${NEUTRON_SERVICE_PORT_INT:-19696} -NEUTRON_SERVICE_PROTOCOL=${NEUTRON_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} -NEUTRON_AUTH_STRATEGY=${NEUTRON_AUTH_STRATEGY:-keystone} -NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron) -NEUTRON_ROOTWRAP_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf -NEUTRON_ROOTWRAP_DAEMON_CMD="sudo $NEUTRON_ROOTWRAP-daemon $NEUTRON_ROOTWRAP_CONF_FILE" +# Agent metering service plugin functions +# ------------------------------------------- + +# Hardcoding for 1 service plugin for now +source $TOP_DIR/lib/neutron_plugins/services/metering + +# L3 Service functions +source $TOP_DIR/lib/neutron_plugins/services/l3 + +# Additional Neutron service plugins +source $TOP_DIR/lib/neutron_plugins/services/placement +source $TOP_DIR/lib/neutron_plugins/services/trunk +source $TOP_DIR/lib/neutron_plugins/services/qos +source $TOP_DIR/lib/neutron_plugins/services/segments + +# Use security group or not +if has_neutron_plugin_security_group; then + Q_USE_SECGROUP=${Q_USE_SECGROUP:-True} +else + Q_USE_SECGROUP=False +fi + +# OVN_BRIDGE_MAPPINGS - ovn-bridge-mappings +# NOTE(hjensas): Initialize after sourcing neutron_plugins/services/l3 +# which initialize PUBLIC_BRIDGE. +OVN_BRIDGE_MAPPINGS=${OVN_BRIDGE_MAPPINGS:-$PHYSICAL_NETWORK:$PUBLIC_BRIDGE} + +# Save trace setting +_XTRACE_NEUTRON=$(set +o | grep xtrace) +set +o xtrace -# Add all enabled config files to a single config arg -NEUTRON_CONFIG_ARG=${NEUTRON_CONFIG_ARG:-""} # Functions # --------- @@ -79,271 +307,216 @@ NEUTRON_CONFIG_ARG=${NEUTRON_CONFIG_ARG:-""} # Test if any Neutron services are enabled # is_neutron_enabled function is_neutron_enabled { + [[ ,${DISABLED_SERVICES} =~ ,"neutron" ]] && return 1 [[ ,${ENABLED_SERVICES} =~ ,"neutron-" || ,${ENABLED_SERVICES} =~ ,"q-" ]] && return 0 return 1 } # Test if any Neutron services are enabled -# is_neutron_enabled +# TODO(slaweq): this is not really needed now and we should remove it as soon +# as it will not be called from any other Devstack plugins, like e.g. Neutron +# plugin function is_neutron_legacy_enabled { - [[ ,${ENABLED_SERVICES} =~ ,"q-" ]] && return 0 - return 1 + return 0 } -# cleanup_neutron() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_neutron_new { - source $TOP_DIR/lib/neutron_plugins/${NEUTRON_AGENT}_agent - if is_neutron_ovs_base_plugin; then - neutron_ovs_base_cleanup +function _determine_config_server { + if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" != '' ]]; then + if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" = "$_Q_PLUGIN_EXTRA_CONF_PATH" ]]; then + deprecated "Q_PLUGIN_EXTRA_CONF_PATH is deprecated" + else + die $LINENO "Q_PLUGIN_EXTRA_CONF_PATH is deprecated" + fi fi - - if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then - neutron_lb_cleanup + if [[ ${#Q_PLUGIN_EXTRA_CONF_FILES[@]} > 0 ]]; then + deprecated "Q_PLUGIN_EXTRA_CONF_FILES is deprecated. Use neutron_server_config_add instead." fi - # delete all namespaces created by neutron - for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|qlbaas|fip|snat)-[0-9a-f-]*'); do - sudo ip netns delete ${ns} + for cfg_file in ${Q_PLUGIN_EXTRA_CONF_FILES[@]}; do + _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($_Q_PLUGIN_EXTRA_CONF_PATH/$cfg_file) done -} - -# configure_neutron() - Set config files, create data dirs, etc -function configure_neutron_new { - sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR - (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh) - - cp $NEUTRON_DIR/etc/neutron.conf.sample $NEUTRON_CONF + local cfg_file + local opts="--config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" + for cfg_file in ${_Q_PLUGIN_EXTRA_CONF_FILES_ABS[@]}; do + opts+=" --config-file $cfg_file" + done + echo "$opts" +} - configure_neutron_rootwrap +function _determine_config_l3 { + local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE" + echo "$opts" +} - mkdir -p $NEUTRON_PLUGIN_CONF_PATH +function _enable_ovn_maintenance { + if [[ $Q_AGENT == "ovn" ]]; then + enable_service neutron-ovn-maintenance-worker + fi +} - cp $NEUTRON_DIR/etc/neutron/plugins/$NEUTRON_PLUGIN/$NEUTRON_PLUGIN_CONF_FILENAME.sample $NEUTRON_PLUGIN_CONF +function _run_ovn_maintenance { + if [[ $Q_AGENT == "ovn" ]]; then + run_process neutron-ovn-maintenance-worker "$NEUTRON_BIN_DIR/neutron-ovn-maintenance-worker $cfg_file_options" + fi +} - iniset $NEUTRON_CONF database connection `database_connection_url neutron` - iniset $NEUTRON_CONF DEFAULT state_path $NEUTRON_STATE_PATH - iniset $NEUTRON_CONF oslo_concurrency lock_path $NEUTRON_STATE_PATH/lock - iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG +function _stop_ovn_maintenance { + if [[ $Q_AGENT == "ovn" ]]; then + stop_process neutron-ovn-maintenance-worker + fi +} - iniset $NEUTRON_CONF DEFAULT debug True +# For services and agents that require it, dynamically construct a list of +# --config-file arguments that are passed to the binary. +function determine_config_files { + local opts="" + case "$1" in + "neutron-server") opts="$(_determine_config_server)" ;; + "neutron-l3-agent") opts="$(_determine_config_l3)" ;; + esac + if [ -z "$opts" ] ; then + die $LINENO "Could not determine config files for $1." + fi + echo "$opts" +} +# configure_neutron() +# Set common config for all neutron server and agents. +function configure_neutron { + _configure_neutron_common iniset_rpc_backend neutron $NEUTRON_CONF - # Neutron API server & Neutron plugin - if is_service_enabled neutron-api; then - local policy_file=$NEUTRON_CONF_DIR/policy.json - cp $NEUTRON_DIR/etc/policy.json $policy_file - # Allow neutron user to administer neutron to match neutron account - sed -i 's/"context_is_admin": "role:admin"/"context_is_admin": "role:admin or user_name:neutron"/g' $policy_file - - cp $NEUTRON_DIR/etc/api-paste.ini $NEUTRON_CONF_DIR/api-paste.ini - - iniset $NEUTRON_CONF DEFAULT core_plugin ml2 - - iniset $NEUTRON_CONF DEFAULT policy_file $policy_file - iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips True - - iniset $NEUTRON_CONF DEFAULT auth_strategy $NEUTRON_AUTH_STRATEGY - configure_auth_token_middleware $NEUTRON_CONF neutron $NEUTRON_AUTH_CACHE_DIR keystone_authtoken - - # Configuration for neutron notifations to nova. - iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES - iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES - - iniset $NEUTRON_CONF nova auth_type password - iniset $NEUTRON_CONF nova auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v3" - iniset $NEUTRON_CONF nova username nova - iniset $NEUTRON_CONF nova password $SERVICE_PASSWORD - iniset $NEUTRON_CONF nova user_domain_id default - iniset $NEUTRON_CONF nova project_name $SERVICE_TENANT_NAME - iniset $NEUTRON_CONF nova project_domain_id default - iniset $NEUTRON_CONF nova region_name $REGION_NAME - - # Configure VXLAN - # TODO(sc68cal) not hardcode? - iniset $NEUTRON_PLUGIN_CONF ml2 tenant_network_types vxlan - iniset $NEUTRON_PLUGIN_CONF ml2 type_drivers vxlan - iniset $NEUTRON_PLUGIN_CONF ml2 mechanism_drivers openvswitch,linuxbridge - iniset $NEUTRON_PLUGIN_CONF ml2_type_vxlan vni_ranges 1001:2000 - fi - - # Neutron OVS or LB agent - if is_service_enabled neutron-agent; then - iniset $NEUTRON_PLUGIN_CONF agent tunnel_types vxlan - iniset $NEUTRON_PLUGIN_CONF DEFAULT debug True - - # Configure the neutron agent - if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then - iniset $NEUTRON_PLUGIN_CONF securitygroup iptables - iniset $NEUTRON_PLUGIN_CONF vxlan local_ip $HOST_IP - else - iniset $NEUTRON_PLUGIN_CONF securitygroup iptables_hybrid - iniset $NEUTRON_PLUGIN_CONF ovs local_ip $HOST_IP - fi + if is_service_enabled q-metering neutron-metering; then + _configure_neutron_metering fi - - # DHCP Agent - if is_service_enabled neutron-dhcp; then - cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $NEUTRON_DHCP_CONF - - iniset $NEUTRON_DHCP_CONF DEFAULT debug True - iniset $NEUTRON_DHCP_CONF agent root_helper_daemon "$NEUTRON_ROOTWRAP_DAEMON_CMD" - iniset $NEUTRON_DHCP_CONF DEFAULT interface_driver $NEUTRON_AGENT - neutron_plugin_configure_dhcp_agent $NEUTRON_DHCP_CONF + if is_service_enabled q-agt neutron-agent; then + _configure_neutron_plugin_agent fi - - if is_service_enabled neutron-l3; then - cp $NEUTRON_DIR/etc/l3_agent.ini.sample $NEUTRON_L3_CONF - iniset $NEUTRON_L3_CONF DEFAULT interface_driver $NEUTRON_AGENT - iniset $NEUTRON_CONF DEFAULT service_plugins router - iniset $NEUTRON_L3_CONF agent root_helper_daemon "$NEUTRON_ROOTWRAP_DAEMON_CMD" - iniset $NEUTRON_L3_CONF DEFAULT debug True - neutron_plugin_configure_l3_agent $NEUTRON_L3_CONF + if is_service_enabled q-dhcp neutron-dhcp; then + _configure_neutron_dhcp_agent fi - - # Metadata - if is_service_enabled neutron-metadata-agent; then - cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $NEUTRON_META_CONF - - iniset $NEUTRON_META_CONF DEFAULT debug True - iniset $NEUTRON_META_CONF DEFAULT nova_metadata_ip $SERVICE_HOST - iniset $NEUTRON_META_CONF agent root_helper_daemon "$NEUTRON_ROOTWRAP_DAEMON_CMD" - - # TODO(dtroyer): remove the v2.0 hard code below - iniset $NEUTRON_META_CONF DEFAULT auth_url $KEYSTONE_SERVICE_URI/v2.0 - configure_auth_token_middleware $NEUTRON_META_CONF neutron $NEUTRON_AUTH_CACHE_DIR DEFAULT + if is_service_enabled q-l3 neutron-l3; then + _configure_neutron_l3_agent fi - - # Format logging - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then - setup_colorized_logging $NEUTRON_CONF DEFAULT project_id - else - # Show user_name and project_name by default - iniset $NEUTRON_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" + if is_service_enabled q-meta neutron-metadata-agent; then + _configure_neutron_metadata_agent fi - if is_service_enabled tls-proxy; then - # Set the service port for a proxy to take the original - iniset $NEUTRON_CONF DEFAULT bind_port "$NEUTRON_SERVICE_PORT_INT" + if [[ "$Q_DVR_MODE" != "legacy" ]]; then + _configure_dvr fi - - if is_ssl_enabled_service "nova"; then - iniset $NEUTRON_CONF nova cafile $SSL_BUNDLE_FILE + if is_service_enabled ceilometer; then + _configure_neutron_ceilometer_notifications fi - if is_ssl_enabled_service "neutron"; then - ensure_certificates NEUTRON - - iniset $NEUTRON_CONF DEFAULT use_ssl True - iniset $NEUTRON_CONF DEFAULT ssl_cert_file "$NEUTRON_SSL_CERT" - iniset $NEUTRON_CONF DEFAULT ssl_key_file "$NEUTRON_SSL_KEY" + if [[ $Q_AGENT == "ovn" ]]; then + configure_ovn + configure_ovn_plugin fi - # Metering - if is_service_enabled neutron-metering; then - source $TOP_DIR/lib/neutron_plugins/services/metering - neutron_agent_metering_configure_common - neutron_agent_metering_configure_agent + # Configure Neutron's advanced services + if is_service_enabled q-placement neutron-placement; then + configure_placement_extension fi - -} - -# configure_neutron_rootwrap() - configure Neutron's rootwrap -function configure_neutron_rootwrap { - # Set the paths of certain binaries - neutron_rootwrap=$(get_rootwrap_location neutron) - - # Specify ``rootwrap.conf`` as first parameter to neutron-rootwrap - local rootwrap_sudoer_cmd="${neutron_rootwrap} $NEUTRON_CONF_DIR/rootwrap.conf" - - # Deploy new rootwrap filters files (owned by root). - # Wipe any existing rootwrap.d files first - if [[ -d $NEUTRON_CONF_DIR/rootwrap.d ]]; then - sudo rm -rf $NEUTRON_CONF_DIR/rootwrap.d + if is_service_enabled q-trunk neutron-trunk; then + configure_trunk_extension + fi + if is_service_enabled q-qos neutron-qos; then + configure_qos + if is_service_enabled q-l3 neutron-l3; then + configure_l3_agent_extension_fip_qos + configure_l3_agent_extension_gateway_ip_qos + fi + fi + if is_service_enabled neutron-segments; then + configure_placement_neutron + configure_segments_extension fi - # Deploy filters to /etc/neutron/rootwrap.d - sudo install -d -o root -g root -m 755 $NEUTRON_CONF_DIR/rootwrap.d - sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/neutron/rootwrap.d/*.filters $NEUTRON_CONF_DIR/rootwrap.d + # Finally configure Neutron server and core plugin + if is_service_enabled q-agt neutron-agent q-svc neutron-api; then + _configure_neutron_service + fi - # Set up ``rootwrap.conf``, pointing to ``$NEUTRON_CONF_DIR/rootwrap.d`` - sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $NEUTRON_CONF_DIR - sudo sed -e "s:^filters_path=.*$:filters_path=$NEUTRON_CONF_DIR/rootwrap.d:" -i $NEUTRON_CONF_DIR/rootwrap.conf + iniset $NEUTRON_CONF DEFAULT api_workers "$API_WORKERS" + # devstack is not a tool for running uber scale OpenStack + # clouds, therefore running without a dedicated RPC worker + # for state reports is more than adequate. + iniset $NEUTRON_CONF DEFAULT rpc_state_report_workers 0 - # Set up the rootwrap sudoers for Neutron - tempfile=`mktemp` - echo "$STACK_USER ALL=(root) NOPASSWD: $rootwrap_sudoer_cmd *" >$tempfile - chmod 0440 $tempfile - sudo chown root:root $tempfile - sudo mv $tempfile /etc/sudoers.d/neutron-rootwrap + write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_UWSGI" "/networking" "" "neutron-api" } -# Make Neutron-required changes to nova.conf -function configure_neutron_nova_new { - iniset $NOVA_CONF DEFAULT use_neutron True - iniset $NOVA_CONF neutron auth_type "password" - iniset $NOVA_CONF neutron auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v3" - iniset $NOVA_CONF neutron username neutron - iniset $NOVA_CONF neutron password "$SERVICE_PASSWORD" - iniset $NOVA_CONF neutron user_domain_name "Default" - iniset $NOVA_CONF neutron project_name "$SERVICE_TENANT_NAME" - iniset $NOVA_CONF neutron project_domain_name "Default" - iniset $NOVA_CONF neutron auth_strategy $NEUTRON_AUTH_STRATEGY - iniset $NOVA_CONF neutron region_name "$REGION_NAME" - iniset $NOVA_CONF neutron url $NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST:$NEUTRON_SERVICE_PORT - - iniset $NOVA_CONF DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver +function configure_neutron_nova { + create_nova_conf_neutron $NOVA_CONF + if [[ "${CELLSV2_SETUP}" == "superconductor" ]]; then + for i in $(seq 1 $NOVA_NUM_CELLS); do + local conf + conf=$(conductor_conf $i) + create_nova_conf_neutron $conf + done + fi +} - if is_service_enabled neutron-metadata-agent; then - iniset $NOVA_CONF neutron service_metadata_proxy "True" +function create_nova_conf_neutron { + local conf=${1:-$NOVA_CONF} + iniset $conf neutron auth_type "password" + iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI" + iniset $conf neutron username nova + iniset $conf neutron password "$SERVICE_PASSWORD" + iniset $conf neutron user_domain_name "$SERVICE_DOMAIN_NAME" + iniset $conf neutron project_name "$SERVICE_PROJECT_NAME" + iniset $conf neutron project_domain_name "$SERVICE_DOMAIN_NAME" + iniset $conf neutron auth_strategy "$Q_AUTH_STRATEGY" + iniset $conf neutron region_name "$REGION_NAME" + + # optionally set options in nova_conf + neutron_plugin_create_nova_conf $conf + + if is_service_enabled q-meta neutron-metadata-agent; then + iniset $conf neutron service_metadata_proxy "True" fi + iniset $conf DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL" + iniset $conf DEFAULT vif_plugging_timeout "$VIF_PLUGGING_TIMEOUT" } +# create_neutron_accounts() - Set up common required neutron accounts + # Tenant User Roles # ------------------------------------------------------------------ # service neutron admin # if enabled -# create_neutron_accounts() - Create required service accounts -function create_neutron_accounts_new { - if [[ "$ENABLED_SERVICES" =~ "neutron-api" ]]; then +# Migrated from keystone_data.sh +function create_neutron_accounts { + local neutron_url + neutron_url=$Q_PROTOCOL://$SERVICE_HOST/ + if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then + neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME + fi + + if is_service_enabled q-svc neutron-api; then create_service_user "neutron" - neutron_service=$(get_or_create_service "neutron" \ - "network" "Neutron Service") - get_or_create_endpoint $neutron_service \ - "$REGION_NAME" \ - "$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST:$NEUTRON_SERVICE_PORT/" \ - "$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST:$NEUTRON_SERVICE_PORT/" \ - "$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST:$NEUTRON_SERVICE_PORT/" + get_or_create_service "neutron" "network" "Neutron Service" + get_or_create_endpoint \ + "network" \ + "$REGION_NAME" "$neutron_url" fi } -# create_neutron_cache_dir() - Part of the init_neutron() process -function create_neutron_cache_dir { - # Create cache dir - sudo install -d -o $STACK_USER $NEUTRON_AUTH_CACHE_DIR - rm -f $NEUTRON_AUTH_CACHE_DIR/* -} - # init_neutron() - Initialize databases, etc. -function init_neutron_new { - - recreate_database neutron - +function init_neutron { + recreate_database $Q_DB_NAME + time_start "dbsync" # Run Neutron db migrations - $NEUTRON_BIN_DIR/neutron-db-manage $NEUTRON_CONFIG_ARG upgrade heads - - create_neutron_cache_dir + $NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head + time_stop "dbsync" } # install_neutron() - Collect source and prepare -function install_neutron_new { - git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH - setup_develop $NEUTRON_DIR - +function install_neutron { # Install neutron-lib from git so we make sure we're testing # the latest code. if use_library_from_git "neutron-lib"; then @@ -351,17 +524,23 @@ function install_neutron_new { setup_dev_lib "neutron-lib" fi - # L3 service requires radvd - if is_service_enabled neutron-l3; then - install_package radvd + # Install SQLAlchemy and alembic from git when these are required + # see https://bugs.launchpad.net/neutron/+bug/2042941 + if use_library_from_git "sqlalchemy"; then + git_clone $SQLALCHEMY_REPO $SQLALCHEMY_DIR $SQLALCHEMY_BRANCH + setup_develop $SQLALCHEMY_DIR fi - - if is_service_enabled neutron-agent neutron-dhcp neutron-l3; then - #TODO(sc68cal) - kind of ugly - source $TOP_DIR/lib/neutron_plugins/${NEUTRON_AGENT}_agent - neutron_plugin_install_agent_packages + if use_library_from_git "alembic"; then + git_clone $ALEMBIC_REPO $ALEMBIC_DIR $ALEMBIC_BRANCH + setup_develop $ALEMBIC_DIR fi + git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH + setup_develop $NEUTRON_DIR + + if [[ $Q_AGENT == "ovn" ]]; then + install_ovn + fi } # install_neutronclient() - Collect source and prepare @@ -369,195 +548,611 @@ function install_neutronclient { if use_library_from_git "python-neutronclient"; then git_clone_by_name "python-neutronclient" setup_dev_lib "python-neutronclient" - sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-neutronclient"]}/tools/,/etc/bash_completion.d/}neutron.bash_completion fi } -# start_neutron_api() - Start the API process ahead of other things -function start_neutron_api { - local service_port=$NEUTRON_SERVICE_PORT - local service_protocol=$NEUTRON_SERVICE_PROTOCOL - if is_service_enabled tls-proxy; then - service_port=$NEUTRON_SERVICE_PORT_INT - service_protocol="http" +# install_neutron_agent_packages() - Collect source and prepare +function install_neutron_agent_packages { + # radvd doesn't come with the OS. Install it if the l3 service is enabled. + if is_service_enabled q-l3 neutron-l3; then + install_package radvd + fi + # install packages that are specific to plugin agent(s) + if is_service_enabled q-agt neutron-agent q-dhcp neutron-dhcp q-l3 neutron-l3; then + neutron_plugin_install_agent_packages fi +} - # Start the Neutron service - # TODO(sc68cal) Stop hard coding this - run_process neutron-api "$NEUTRON_BIN_DIR/neutron-server --config-file $NEUTRON_CONF --config-file $NEUTRON_PLUGIN_CONF" +# Finish neutron configuration +function configure_neutron_after_post_config { + if [[ $Q_SERVICE_PLUGIN_CLASSES != '' ]]; then + iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES + fi + configure_rbac_policies +} - if is_ssl_enabled_service "neutron"; then - ssl_ca="--ca-certificate=${SSL_BUNDLE_FILE}" - local testcmd="wget ${ssl_ca} --no-proxy -q -O- $service_protocol://$NEUTRON_SERVICE_HOST:$service_port" - test_with_retry "$testcmd" "Neutron did not start" $SERVICE_TIMEOUT +# configure_rbac_policies() - Configure Neutron to enforce new RBAC +# policies and scopes if NEUTRON_ENFORCE_SCOPE == True +function configure_rbac_policies { + if [[ "$NEUTRON_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == True ]]; then + iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True + iniset $NEUTRON_CONF oslo_policy enforce_scope True else - if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$NEUTRON_SERVICE_HOST:$service_port; then - die $LINENO "neutron-api did not start" + iniset $NEUTRON_CONF oslo_policy enforce_new_defaults False + iniset $NEUTRON_CONF oslo_policy enforce_scope False + fi +} + +# Start running OVN processes +function start_ovn_services { + if [[ $Q_AGENT == "ovn" ]]; then + if [ "$VIRT_DRIVER" != 'ironic' ]; then + # NOTE(TheJulia): Ironic's devstack plugin needs to perform + # additional networking configuration to setup a working test + # environment with test virtual machines to emulate baremetal, + # which requires OVN to be up and running earlier to complete + # that base configuration. + init_ovn + start_ovn + fi + if [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]]; then + if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" != "True" ]]; then + echo "OVN_L3_CREATE_PUBLIC_NETWORK=True is being ignored " + echo "because NEUTRON_CREATE_INITIAL_NETWORKS is set to False" + else + create_public_bridge + fi fi fi +} +# Start running processes +function start_neutron_service_and_check { + local service_port=$Q_PORT + local service_protocol=$Q_PROTOCOL + local cfg_file_options + local neutron_url + + cfg_file_options="$(determine_config_files neutron-server)" - # Start proxy if enabled if is_service_enabled tls-proxy; then - start_tls_proxy '*' $NEUTRON_SERVICE_PORT $NEUTRON_SERVICE_HOST $NEUTRON_SERVICE_PORT_INT & + service_port=$Q_PORT_INT + service_protocol="http" + fi + + # Start the Neutron service + # The default value of "rpc_workers" is None (not defined). If + # "rpc_workers" is explicitly set to 0, the RPC workers process + # should not be executed. + local rpc_workers + rpc_workers=$(iniget_multiline $NEUTRON_CONF DEFAULT rpc_workers) + + enable_service neutron-api + run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF" + neutron_url=$Q_PROTOCOL://$Q_HOST/ + if [ "$rpc_workers" != "0" ]; then + enable_service neutron-rpc-server + fi + enable_service neutron-periodic-workers + _enable_ovn_maintenance + if [ "$rpc_workers" != "0" ]; then + run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options" + fi + run_process neutron-periodic-workers "$NEUTRON_BIN_DIR/neutron-periodic-workers $cfg_file_options" + _run_ovn_maintenance + if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then + neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME fi + echo "Waiting for Neutron to start..." + + local testcmd="wget ${ssl_ca} --no-proxy -q -O- $neutron_url" + test_with_retry "$testcmd" "Neutron did not start" $SERVICE_TIMEOUT +} + +function start_neutron { + start_l2_agent "$@" + start_other_agents "$@" +} + +# Control of the l2 agent is separated out to make it easier to test partial +# upgrades (everything upgraded except the L2 agent) +function start_l2_agent { + run_process q-agt "$AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" + + if is_provider_network && [[ $Q_AGENT == "openvswitch" ]]; then + sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE + sudo ip link set $OVS_PHYSICAL_BRIDGE up + sudo ip link set br-int up + sudo ip link set $PUBLIC_INTERFACE up + if is_ironic_hardware; then + for IP in $(ip addr show dev $PUBLIC_INTERFACE | grep ' inet ' | awk '{print $2}'); do + sudo ip addr del $IP dev $PUBLIC_INTERFACE + sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE + done + sudo ip route replace $FIXED_RANGE via $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE + fi + fi +} + +function start_other_agents { + run_process q-dhcp "$AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file $Q_DHCP_CONF_FILE" + + run_process q-l3 "$AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)" + + run_process q-meta "$AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file $Q_META_CONF_FILE" + run_process q-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME" +} + +# Start running processes, including screen +function start_neutron_agents { + # NOTE(slaweq): it's now just a wrapper for start_neutron function + start_neutron "$@" } -# start_neutron() - Start running processes, including screen -function start_neutron_new { - _set_config_files +function stop_l2_agent { + stop_process q-agt +} - # Start up the neutron agents if enabled - # TODO(sc68cal) Make this pluggable so different DevStack plugins for different Neutron plugins - # can resolve the $NEUTRON_AGENT_BINARY - if is_service_enabled neutron-agent; then - run_process neutron-agent "$NEUTRON_BIN_DIR/$NEUTRON_AGENT_BINARY $NEUTRON_CONFIG_ARG" +# stop_other() - Stop running processes +function stop_other { + if is_service_enabled q-dhcp neutron-dhcp; then + stop_process q-dhcp + pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }') + [ ! -z "$pid" ] && sudo kill -9 $pid fi - if is_service_enabled neutron-dhcp; then - neutron_plugin_configure_dhcp_agent $NEUTRON_DHCP_CONF - run_process neutron-dhcp "$NEUTRON_BIN_DIR/$NEUTRON_DHCP_BINARY $NEUTRON_CONFIG_ARG" + + stop_process neutron-rpc-server + stop_process neutron-periodic-workers + stop_process neutron-api + _stop_ovn_maintenance + + if is_service_enabled q-l3 neutron-l3; then + sudo pkill -f "radvd -C $DATA_DIR/neutron/ra" + stop_process q-l3 fi - if is_service_enabled neutron-l3; then - run_process neutron-l3 "$NEUTRON_BIN_DIR/$NEUTRON_L3_BINARY $NEUTRON_CONFIG_ARG" - # XXX(sc68cal) - Here's where plugins can wire up their own networks instead - # of the code in lib/neutron_plugins/services/l3 - if type -p neutron_plugin_create_initial_networks > /dev/null; then - neutron_plugin_create_initial_networks - else - # XXX(sc68cal) Load up the built in Neutron networking code and build a topology - source $TOP_DIR/lib/neutron_plugins/services/l3 - # Create the networks using servic - create_neutron_initial_network - fi + + if is_service_enabled q-meta neutron-metadata-agent; then + stop_process q-meta fi - if is_service_enabled neutron-metadata-agent; then - run_process neutron-metadata-agent "$NEUTRON_BIN_DIR/$NEUTRON_META_BINARY $NEUTRON_CONFIG_ARG" + + if is_service_enabled q-metering neutron-metering; then + neutron_metering_stop fi - if is_service_enabled neutron-metering; then - run_process neutron-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME" + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + # pkill takes care not to kill itself, but it may kill its parent + # sudo unless we use the "ps | grep [f]oo" trick + sudo pkill -9 -f "$NEUTRON_ROOTWRAP-[d]aemon" || : fi } # stop_neutron() - Stop running processes (non-screen) -function stop_neutron_new { - for serv in neutron-api neutron-agent neutron-l3; do - stop_process $serv - done +function stop_neutron { + stop_other + stop_l2_agent - if is_service_enabled neutron-dhcp; then - stop_process neutron-dhcp - pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }') - [ ! -z "$pid" ] && sudo kill -9 $pid + if [[ $Q_AGENT == "ovn" && $SKIP_STOP_OVN != "True" ]]; then + stop_ovn fi +} + +# _move_neutron_addresses_route() - Move the primary IP to the OVS bridge +# on startup, or back to the public interface on cleanup. If no IP is +# configured on the interface, just add it as a port to the OVS bridge. +function _move_neutron_addresses_route { + local from_intf=$1 + local to_intf=$2 + local add_ovs_port=$3 + local del_ovs_port=$4 + local af=$5 + + if [[ -n "$from_intf" && -n "$to_intf" ]]; then + # Remove the primary IP address from $from_intf and add it to $to_intf, + # along with the default route, if it exists. Also, when called + # on configure we will also add $from_intf as a port on $to_intf, + # assuming it is an OVS bridge. + + local IP_REPLACE="" + local IP_DEL="" + local IP_UP="" + local DEFAULT_ROUTE_GW + DEFAULT_ROUTE_GW=$(ip -f $af r | awk "/default.+$from_intf\s/ { print \$3; exit }") + local ADD_OVS_PORT="" + local DEL_OVS_PORT="" + local ARP_CMD="" + + IP_BRD=$(ip -f $af a s dev $from_intf scope global primary | grep inet | awk '{ print $2, $3, $4; exit }') + + if [ "$DEFAULT_ROUTE_GW" != "" ]; then + ADD_DEFAULT_ROUTE="sudo ip -f $af r replace default via $DEFAULT_ROUTE_GW dev $to_intf" + fi - if is_service_enabled neutron-metadata-agent; then - sudo pkill -9 -f neutron-ns-metadata-proxy || : - stop_process neutron-metadata-agent + if [[ "$add_ovs_port" == "True" ]]; then + ADD_OVS_PORT="sudo ovs-vsctl --may-exist add-port $to_intf $from_intf" + fi + + if [[ "$del_ovs_port" == "True" ]]; then + DEL_OVS_PORT="sudo ovs-vsctl --if-exists del-port $from_intf $to_intf" + fi + + if [[ "$IP_BRD" != "" ]]; then + IP_DEL="sudo ip addr del $IP_BRD dev $from_intf" + IP_REPLACE="sudo ip addr replace $IP_BRD dev $to_intf" + IP_UP="sudo ip link set $to_intf up" + if [[ "$af" == "inet" ]]; then + IP=$(echo $IP_BRD | awk '{ print $1; exit }' | grep -o -E '(.*)/' | cut -d "/" -f1) + ARP_CMD="sudo arping -A -c 3 -w 5 -I $to_intf $IP " + fi + fi + + # The add/del OVS port calls have to happen either before or + # after the address is moved in order to not leave it orphaned. + $DEL_OVS_PORT; $IP_DEL; $IP_REPLACE; $IP_UP; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE; $ARP_CMD fi } -# Compile the lost of enabled config files -function _set_config_files { +# _configure_public_network_connectivity() - Configures connectivity to the +# external network using $PUBLIC_INTERFACE or NAT on the single interface +# machines +function _configure_public_network_connectivity { + # If we've given a PUBLIC_INTERFACE to take over, then we assume + # that we can own the whole thing, and privot it into the OVS + # bridge. If we are not, we're probably on a single interface + # machine, and we just setup NAT so that fixed guests can get out. + if [[ -n "$PUBLIC_INTERFACE" ]]; then + _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True False "inet" + + if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then + _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6" + fi + else + for d in $default_v4_route_devs; do + sudo iptables -t nat -A POSTROUTING -o $d -s $FLOATING_RANGE -j MASQUERADE + done + fi +} - NEUTRON_CONFIG_ARG+=" --config-file $NEUTRON_CONF" +# cleanup_neutron() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_neutron { + stop_process neutron-api + stop_process neutron-rpc-server + stop_process neutron-periodic-workers + _stop_ovn_maintenance + remove_uwsgi_config "$NEUTRON_UWSGI_CONF" "neutron-api" + sudo rm -f $(apache_site_config_for neutron-api) + + if [[ -n "$OVS_PHYSICAL_BRIDGE" ]]; then + _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False True "inet" + + if [[ $(ip -f inet6 a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then + # ip(8) wants the prefix length when deleting + local v6_gateway + v6_gateway=$(ip -6 a s dev $OVS_PHYSICAL_BRIDGE | grep $IPV6_PUBLIC_NETWORK_GATEWAY | awk '{ print $2 }') + sudo ip -6 addr del $v6_gateway dev $OVS_PHYSICAL_BRIDGE + _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False False "inet6" + fi - #TODO(sc68cal) OVS and LB agent uses settings in NEUTRON_PLUGIN_CONF (ml2_conf.ini) but others may not - if is_service_enabled neutron-agent; then - NEUTRON_CONFIG_ARG+=" --config-file $NEUTRON_PLUGIN_CONF" + if is_provider_network && is_ironic_hardware; then + for IP in $(ip addr show dev $OVS_PHYSICAL_BRIDGE | grep ' inet ' | awk '{print $2}'); do + sudo ip addr del $IP dev $OVS_PHYSICAL_BRIDGE + sudo ip addr add $IP dev $PUBLIC_INTERFACE + done + sudo route del -net $FIXED_RANGE gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE + fi fi - if is_service_enabled neutron-dhcp; then - NEUTRON_CONFIG_ARG+=" --config-file $NEUTRON_DHCP_CONF" + if is_neutron_ovs_base_plugin; then + neutron_ovs_base_cleanup fi - if is_service_enabled neutron-l3; then - NEUTRON_CONFIG_ARG+=" --config-file $NEUTRON_L3_CONF" - fi + # delete all namespaces created by neutron + for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|fip|snat)-[0-9a-f-]*'); do + sudo ip netns delete ${ns} + done - if is_service_enabled neutron-metadata-agent; then - NEUTRON_CONFIG_ARG+=" --config-file $NEUTRON_META_CONF" + if [[ $Q_AGENT == "ovn" ]]; then + cleanup_ovn fi +} + +function _create_neutron_conf_dir { + # Put config files in ``NEUTRON_CONF_DIR`` for everyone to find + sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR } -# Dispatch functions -# These are needed for compatibility between the old and new implementations -# where there are function name overlaps. These will be removed when -# neutron-legacy is removed. -# TODO(sc68cal) Remove when neutron-legacy is no more. -function cleanup_neutron { - if is_neutron_legacy_enabled; then - # Call back to old function - cleanup_mutnauq "$@" +# _configure_neutron_common() +# Set common config for all neutron server and agents. +# This MUST be called before other ``_configure_neutron_*`` functions. +function _configure_neutron_common { + _create_neutron_conf_dir + + # Uses oslo config generator to generate core sample configuration files + (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh) + + cp $NEUTRON_DIR/etc/neutron.conf.sample $NEUTRON_CONF + + Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json + + # allow neutron user to administer neutron to match neutron account + # NOTE(amotoki): This is required for nova works correctly with neutron. + if [ -f $NEUTRON_DIR/etc/policy.json ]; then + cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE + sed -i 's/"context_is_admin": "role:admin"/"context_is_admin": "role:admin or user_name:neutron"/g' $Q_POLICY_FILE else - cleanup_neutron_new "$@" + echo '{"context_is_admin": "role:admin or user_name:neutron"}' > $Q_POLICY_FILE + fi + + # Set plugin-specific variables ``Q_DB_NAME``, ``Q_PLUGIN_CLASS``. + # For main plugin config file, set ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``. + neutron_plugin_configure_common + + if [[ "$Q_PLUGIN_CONF_PATH" == '' || "$Q_PLUGIN_CONF_FILENAME" == '' || "$Q_PLUGIN_CLASS" == '' ]]; then + die $LINENO "Neutron plugin not set.. exiting" fi + + # If needed, move config file from ``$NEUTRON_DIR/etc/neutron`` to ``NEUTRON_CONF_DIR`` + mkdir -p /$Q_PLUGIN_CONF_PATH + Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME + # NOTE(slaweq): NEUTRON_CORE_PLUGIN_CONF is used e.g. in neutron repository, + # it was previously defined in the lib/neutron module which is now deleted. + NEUTRON_CORE_PLUGIN_CONF=$Q_PLUGIN_CONF_FILE + # NOTE(hichihara): Some neutron vendor plugins were already decomposed and + # there is no config file in Neutron tree. They should prepare the file in each plugin. + if [ -f "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" ]; then + cp "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" /$Q_PLUGIN_CONF_FILE + elif [ -f $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE ]; then + cp $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE + fi + + iniset $NEUTRON_CONF database connection `database_connection_url $Q_DB_NAME` + iniset $NEUTRON_CONF DEFAULT state_path $DATA_DIR/neutron + iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG + iniset $NEUTRON_CONF DEFAULT bind_host $Q_LISTEN_ADDRESS + iniset $NEUTRON_CONF oslo_concurrency lock_path $DATA_DIR/neutron/lock + + # NOTE(freerunner): Need to adjust Region Name for nova in multiregion installation + iniset $NEUTRON_CONF nova region_name $REGION_NAME + + if [ "$VIRT_DRIVER" = 'fake' ]; then + # Disable arbitrary limits + iniset $NEUTRON_CONF quotas quota_network -1 + iniset $NEUTRON_CONF quotas quota_subnet -1 + iniset $NEUTRON_CONF quotas quota_port -1 + iniset $NEUTRON_CONF quotas quota_security_group -1 + iniset $NEUTRON_CONF quotas quota_security_group_rule -1 + fi + + # Format logging + setup_logging $NEUTRON_CONF + + _neutron_setup_rootwrap } -function configure_neutron { - if is_neutron_legacy_enabled; then - # Call back to old function - configure_mutnauq "$@" - else - configure_neutron_new "$@" +function _configure_neutron_dhcp_agent { + + cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $Q_DHCP_CONF_FILE + + iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + # make it so we have working DNS from guests + iniset $Q_DHCP_CONF_FILE DEFAULT dnsmasq_local_resolv True + configure_root_helper_options $Q_DHCP_CONF_FILE + + if ! is_service_enabled q-l3 neutron-l3; then + if [[ "$ENABLE_ISOLATED_METADATA" = "True" ]]; then + iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata $ENABLE_ISOLATED_METADATA + iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network $ENABLE_METADATA_NETWORK + else + if [[ "$ENABLE_METADATA_NETWORK" = "True" ]]; then + die "$LINENO" "Enable isolated metadata is a must for metadata network" + fi + fi fi + + _neutron_setup_interface_driver $Q_DHCP_CONF_FILE + + neutron_plugin_configure_dhcp_agent $Q_DHCP_CONF_FILE } -function configure_neutron_nova { - if is_neutron_legacy_enabled; then - # Call back to old function - create_nova_conf_neutron "$@" + +function _configure_neutron_metadata_agent { + cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $Q_META_CONF_FILE + + iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $Q_META_CONF_FILE DEFAULT nova_metadata_host $Q_META_DATA_IP + iniset $Q_META_CONF_FILE DEFAULT metadata_workers $API_WORKERS + configure_root_helper_options $Q_META_CONF_FILE +} + +function _configure_neutron_ceilometer_notifications { + iniset $NEUTRON_CONF oslo_messaging_notifications driver messagingv2 +} + +function _configure_neutron_metering { + neutron_agent_metering_configure_common + neutron_agent_metering_configure_agent +} + +function _configure_dvr { + iniset $NEUTRON_CONF DEFAULT router_distributed True + iniset $Q_L3_CONF_FILE DEFAULT agent_mode $Q_DVR_MODE +} + + +# _configure_neutron_plugin_agent() - Set config files for neutron plugin agent +# It is called when q-agt is enabled. +function _configure_neutron_plugin_agent { + # Specify the default root helper prior to agent configuration to + # ensure that an agent's configuration can override the default + configure_root_helper_options /$Q_PLUGIN_CONF_FILE + iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + + # Configure agent for plugin + neutron_plugin_configure_plugin_agent +} + +# _configure_neutron_service() - Set config files for neutron service +# It is called when q-svc is enabled. +function _configure_neutron_service { + Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini + if test -r $NEUTRON_DIR/etc/neutron/api-paste.ini; then + cp $NEUTRON_DIR/etc/neutron/api-paste.ini $Q_API_PASTE_FILE else - configure_neutron_nova_new "$@" + # TODO(stephenfin): Remove this branch once [1] merges + # [1] https://review.opendev.org/c/openstack/neutron/+/961130 + cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE fi + + # Update either configuration file with plugin + iniset $NEUTRON_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS + + iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $NEUTRON_CONF oslo_policy policy_file $Q_POLICY_FILE + + iniset $NEUTRON_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY + configure_keystone_authtoken_middleware $NEUTRON_CONF $Q_ADMIN_USERNAME + + # Configuration for neutron notifications to nova. + iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES + iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES + + configure_keystone_authtoken_middleware $NEUTRON_CONF nova nova + + # Configuration for placement client + configure_keystone_authtoken_middleware $NEUTRON_CONF placement placement + + # Configure plugin + neutron_plugin_configure_service } -function create_neutron_accounts { - if is_neutron_legacy_enabled; then - # Call back to old function - create_mutnauq_accounts "$@" - else - create_neutron_accounts_new "$@" +# Utility Functions +#------------------ + +# neutron_service_plugin_class_add() - add service plugin class +function neutron_service_plugin_class_add { + local service_plugin_class=$1 + if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then + Q_SERVICE_PLUGIN_CLASSES=$service_plugin_class + elif [[ ! ,${Q_SERVICE_PLUGIN_CLASSES}, =~ ,${service_plugin_class}, ]]; then + Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$service_plugin_class" fi } -function init_neutron { - if is_neutron_legacy_enabled; then - # Call back to old function - init_mutnauq "$@" - else - init_neutron_new "$@" +# neutron_ml2_extension_driver_add() - add ML2 extension driver +function neutron_ml2_extension_driver_add { + local extension=$1 + if [[ $Q_ML2_PLUGIN_EXT_DRIVERS == '' ]]; then + Q_ML2_PLUGIN_EXT_DRIVERS=$extension + elif [[ ! ,${Q_ML2_PLUGIN_EXT_DRIVERS}, =~ ,${extension}, ]]; then + Q_ML2_PLUGIN_EXT_DRIVERS="$Q_ML2_PLUGIN_EXT_DRIVERS,$extension" fi } -function install_neutron { - if is_neutron_legacy_enabled; then - # Call back to old function - install_mutnauq "$@" - else - install_neutron_new "$@" +# neutron_server_config_add() - add server config file +function neutron_server_config_add { + _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($1) +} + +# neutron_deploy_rootwrap_filters() - deploy rootwrap filters to $Q_CONF_ROOTWRAP_D (owned by root). +function neutron_deploy_rootwrap_filters { + if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then + return fi + local srcdir=$1 + sudo install -d -o root -m 755 $Q_CONF_ROOTWRAP_D + sudo install -o root -m 644 $srcdir/etc/neutron/rootwrap.d/* $Q_CONF_ROOTWRAP_D/ } -function start_neutron { - if is_neutron_legacy_enabled; then - # Call back to old function - start_mutnauq_l2_agent "$@" - start_mutnauq_other_agents "$@" +# _neutron_setup_rootwrap() - configure Neutron's rootwrap +function _neutron_setup_rootwrap { + if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then + return + fi + # Wipe any existing ``rootwrap.d`` files first + Q_CONF_ROOTWRAP_D=$NEUTRON_CONF_DIR/rootwrap.d + if [[ -d $Q_CONF_ROOTWRAP_D ]]; then + sudo rm -rf $Q_CONF_ROOTWRAP_D + fi + + neutron_deploy_rootwrap_filters $NEUTRON_DIR + + # Set up ``rootwrap.conf``, pointing to ``$NEUTRON_CONF_DIR/rootwrap.d`` + # location moved in newer versions, prefer new location + if test -r $NEUTRON_DIR/etc/neutron/rootwrap.conf; then + sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/neutron/rootwrap.conf $Q_RR_CONF_FILE else - start_neutron_new "$@" + # TODO(stephenfin): Remove this branch once [1] merges + # [1] https://review.opendev.org/c/openstack/neutron/+/961130 + sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE fi + sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE + # Rely on $PATH set by devstack to determine what is safe to execute + # by rootwrap rather than use explicit whitelist of paths in + # rootwrap.conf + sudo sed -e 's/^exec_dirs=.*/#&/' -i $Q_RR_CONF_FILE + + # Specify ``rootwrap.conf`` as first parameter to neutron-rootwrap + ROOTWRAP_SUDOER_CMD="$NEUTRON_ROOTWRAP $Q_RR_CONF_FILE *" + ROOTWRAP_DAEMON_SUDOER_CMD="$NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE" + + # Set up the rootwrap sudoers for neutron + TEMPFILE=`mktemp` + echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE + echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_DAEMON_SUDOER_CMD" >>$TEMPFILE + chmod 0440 $TEMPFILE + sudo chown root:root $TEMPFILE + sudo mv $TEMPFILE /etc/sudoers.d/neutron-rootwrap + + # Update the root_helper + configure_root_helper_options $NEUTRON_CONF } -function stop_neutron { - if is_neutron_legacy_enabled; then - # Call back to old function - stop_mutnauq "$@" - else - stop_neutron_new "$@" +function configure_root_helper_options { + local conffile=$1 + iniset $conffile agent root_helper "$Q_RR_COMMAND" + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + iniset $conffile agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" + fi +} + +function _neutron_setup_interface_driver { + + # ovs_use_veth needs to be set before the plugin configuration + # occurs to allow plugins to override the setting. + iniset $1 DEFAULT ovs_use_veth $Q_OVS_USE_VETH + + neutron_plugin_setup_interface_driver $1 +} +# Functions for Neutron Exercises +#-------------------------------- + +# ssh check +function _ssh_check_neutron { + local from_net=$1 + local key_file=$2 + local ip=$3 + local user=$4 + local timeout_sec=$5 + local probe_cmd = "" + probe_cmd=`_get_probe_cmd_prefix $from_net` + local testcmd="$probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success" + test_with_retry "$testcmd" "server $ip didn't become ssh-able" $timeout_sec +} + +function plugin_agent_add_l2_agent_extension { + local l2_agent_extension=$1 + if [[ -z "$L2_AGENT_EXTENSIONS" ]]; then + L2_AGENT_EXTENSIONS=$l2_agent_extension + elif [[ ! ,${L2_AGENT_EXTENSIONS}, =~ ,${l2_agent_extension}, ]]; then + L2_AGENT_EXTENSIONS+=",$l2_agent_extension" fi } # Restore xtrace -$XTRACE +$_XTRACE_NEUTRON + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/neutron-legacy b/lib/neutron-legacy index dca2e98a0c..e90400fec1 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -1,1086 +1,6 @@ #!/bin/bash -# -# lib/neutron -# functions - functions specific to neutron -# Dependencies: -# ``functions`` file -# ``DEST`` must be defined -# ``STACK_USER`` must be defined +# TODO(slaweq): remove that file when other projects, like e.g. Grenade will +# be using lib/neutron -# ``stack.sh`` calls the entry points in this order: -# -# - install_neutron_agent_packages -# - install_neutronclient -# - install_neutron -# - install_neutron_third_party -# - configure_neutron -# - init_neutron -# - configure_neutron_third_party -# - init_neutron_third_party -# - start_neutron_third_party -# - create_nova_conf_neutron -# - start_neutron_service_and_check -# - check_neutron_third_party_integration -# - start_neutron_agents -# - create_neutron_initial_network -# - setup_neutron_debug -# -# ``unstack.sh`` calls the entry points in this order: -# -# - teardown_neutron_debug -# - stop_neutron -# - stop_neutron_third_party -# - cleanup_neutron - -# Functions in lib/neutron are classified into the following categories: -# -# - entry points (called from stack.sh or unstack.sh) -# - internal functions -# - neutron exercises -# - 3rd party programs - - -# Neutron Networking -# ------------------ - -# Make sure that neutron is enabled in ``ENABLED_SERVICES``. If you want -# to run Neutron on this host, make sure that q-svc is also in -# ``ENABLED_SERVICES``. -# -# See "Neutron Network Configuration" below for additional variables -# that must be set in localrc for connectivity across hosts with -# Neutron. -# -# With Neutron networking the NETWORK_MANAGER variable is ignored. - -# Settings -# -------- - - -# Neutron Network Configuration -# ----------------------------- - -deprecated "Using lib/neutron-legacy is deprecated, and it will be removed in the future" - -if is_ssl_enabled_service "neutron" || is_service_enabled tls-proxy; then - Q_PROTOCOL="https" -fi - - -# Set up default directories -GITDIR["python-neutronclient"]=$DEST/python-neutronclient - - -NEUTRON_DIR=$DEST/neutron -NEUTRON_FWAAS_DIR=$DEST/neutron-fwaas -NEUTRON_LBAAS_DIR=$DEST/neutron-lbaas -NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron} - -# Support entry points installation of console scripts -if [[ -d $NEUTRON_DIR/bin/neutron-server ]]; then - NEUTRON_BIN_DIR=$NEUTRON_DIR/bin -else - NEUTRON_BIN_DIR=$(get_python_exec_prefix) -fi - -NEUTRON_CONF_DIR=/etc/neutron -NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf -export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"} - -# Default provider for load balancer service -DEFAULT_LB_PROVIDER=LOADBALANCER:Haproxy:neutron_lbaas.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default - -# Agent binaries. Note, binary paths for other agents are set in per-service -# scripts in lib/neutron_plugins/services/ -AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent" -AGENT_L3_BINARY=${AGENT_L3_BINARY:-"$NEUTRON_BIN_DIR/neutron-l3-agent"} -AGENT_META_BINARY="$NEUTRON_BIN_DIR/neutron-metadata-agent" - -# Agent config files. Note, plugin-specific Q_PLUGIN_CONF_FILE is set and -# loaded from per-plugin scripts in lib/neutron_plugins/ -Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini -Q_L3_CONF_FILE=$NEUTRON_CONF_DIR/l3_agent.ini -Q_FWAAS_CONF_FILE=$NEUTRON_CONF_DIR/fwaas_driver.ini -Q_META_CONF_FILE=$NEUTRON_CONF_DIR/metadata_agent.ini - -# Default name for Neutron database -Q_DB_NAME=${Q_DB_NAME:-neutron} -# Default Neutron Plugin -Q_PLUGIN=${Q_PLUGIN:-ml2} -# Default Neutron Port -Q_PORT=${Q_PORT:-9696} -# Default Neutron Internal Port when using TLS proxy -Q_PORT_INT=${Q_PORT_INT:-19696} -# Default Neutron Host -Q_HOST=${Q_HOST:-$SERVICE_HOST} -# Default protocol -Q_PROTOCOL=${Q_PROTOCOL:-$SERVICE_PROTOCOL} -# Default listen address -Q_LISTEN_ADDRESS=${Q_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS} -# Default admin username -Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-neutron} -# Default auth strategy -Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone} -# RHEL's support for namespaces requires using veths with ovs -Q_OVS_USE_VETH=${Q_OVS_USE_VETH:-False} -Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True} -Q_USE_ROOTWRAP_DAEMON=$(trueorfalse True Q_USE_ROOTWRAP_DAEMON) -# Meta data IP -Q_META_DATA_IP=${Q_META_DATA_IP:-$SERVICE_HOST} -# Allow Overlapping IP among subnets -Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True} -# The name of the default q-l3 router -Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1} -Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True} -Q_NOTIFY_NOVA_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_DATA_CHANGES:-True} -VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True} -VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300} - -# List of config file names in addition to the main plugin config file -# See _configure_neutron_common() for details about setting it up -declare -a Q_PLUGIN_EXTRA_CONF_FILES - - -Q_RR_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf -if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then - Q_RR_COMMAND="sudo" -else - NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron) - Q_RR_COMMAND="sudo $NEUTRON_ROOTWRAP $Q_RR_CONF_FILE" - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - Q_RR_DAEMON_COMMAND="sudo $NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE" - fi -fi - - -# Distributed Virtual Router (DVR) configuration -# Can be: -# - ``legacy`` - No DVR functionality -# - ``dvr_snat`` - Controller or single node DVR -# - ``dvr`` - Compute node in multi-node DVR -# -Q_DVR_MODE=${Q_DVR_MODE:-legacy} -if [[ "$Q_DVR_MODE" != "legacy" ]]; then - Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,linuxbridge,l2population -fi - -# Provider Network Configurations -# -------------------------------- - -# The following variables control the Neutron ML2 plugins' allocation -# of tenant networks and availability of provider networks. If these -# are not configured in ``localrc``, tenant networks will be local to -# the host (with no remote connectivity), and no physical resources -# will be available for the allocation of provider networks. - -# To disable tunnels (GRE or VXLAN) for tenant networks, -# set to False in ``local.conf``. -# GRE tunnels are only supported by the openvswitch. -ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-True} - -# If using GRE, VXLAN or GENEVE tunnels for tenant networks, -# specify the range of IDs from which tenant networks are -# allocated. Can be overridden in ``localrc`` if necessary. -TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGES:-1:1000} - -# To use VLANs for tenant networks, set to True in localrc. VLANs -# are supported by the ML2 plugins, requiring additional configuration -# described below. -ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False} - -# If using VLANs for tenant networks, set in ``localrc`` to specify -# the range of VLAN VIDs from which tenant networks are -# allocated. An external network switch must be configured to -# trunk these VLANs between hosts for multi-host connectivity. -# -# Example: ``TENANT_VLAN_RANGE=1000:1999`` -TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} - -# If using VLANs for tenant networks, or if using flat or VLAN -# provider networks, set in ``localrc`` to the name of the physical -# network, and also configure ``OVS_PHYSICAL_BRIDGE`` for the -# openvswitch agent or ``LB_PHYSICAL_INTERFACE`` for the linuxbridge -# agent, as described below. -# -# Example: ``PHYSICAL_NETWORK=default`` -PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-} - -# With the openvswitch agent, if using VLANs for tenant networks, -# or if using flat or VLAN provider networks, set in ``localrc`` to -# the name of the OVS bridge to use for the physical network. The -# bridge will be created if it does not already exist, but a -# physical interface must be manually added to the bridge as a -# port for external connectivity. -# -# Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` -OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-} - -# With the linuxbridge agent, if using VLANs for tenant networks, -# or if using flat or VLAN provider networks, set in ``localrc`` to -# the name of the network interface to use for the physical -# network. -# -# Example: ``LB_PHYSICAL_INTERFACE=eth1`` -LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-} - -# When Neutron tunnels are enabled it is needed to specify the -# IP address of the end point in the local server. This IP is set -# by default to the same IP address that the HOST IP. -# This variable can be used to specify a different end point IP address -# Example: ``TUNNEL_ENDPOINT_IP=1.1.1.1`` -TUNNEL_ENDPOINT_IP=${TUNNEL_ENDPOINT_IP:-$HOST_IP} - -# With the openvswitch plugin, set to True in ``localrc`` to enable -# provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False. -# -# Example: ``OVS_ENABLE_TUNNELING=True`` -OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS} - -# Use DHCP agent for providing metadata service in the case of -# without L3 agent (No Route Agent), set to True in localrc. -ENABLE_ISOLATED_METADATA=${ENABLE_ISOLATED_METADATA:-False} - -# Add a static route as dhcp option, so the request to 169.254.169.254 -# will be able to reach through a route(DHCP agent) -# This option require ENABLE_ISOLATED_METADATA = True -ENABLE_METADATA_NETWORK=${ENABLE_METADATA_NETWORK:-False} -# Neutron plugin specific functions -# --------------------------------- - -# Please refer to ``lib/neutron_plugins/README.md`` for details. -if [ -f $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN ]; then - source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN -fi - -# Agent loadbalancer service plugin functions -# ------------------------------------------- - -# Hardcoding for 1 service plugin for now -source $TOP_DIR/lib/neutron_plugins/services/loadbalancer - -# Agent metering service plugin functions -# ------------------------------------------- - -# Hardcoding for 1 service plugin for now -source $TOP_DIR/lib/neutron_plugins/services/metering - -# Firewall Service Plugin functions -# --------------------------------- -source $TOP_DIR/lib/neutron_plugins/services/firewall - -# L3 Service functions -source $TOP_DIR/lib/neutron_plugins/services/l3 - -# Use security group or not -if has_neutron_plugin_security_group; then - Q_USE_SECGROUP=${Q_USE_SECGROUP:-True} -else - Q_USE_SECGROUP=False -fi - -# Save trace setting -_XTRACE_NEUTRON=$(set +o | grep xtrace) -set +o xtrace - - -# Functions -# --------- - -function _determine_config_server { - local cfg_file - local opts="--config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" - for cfg_file in ${Q_PLUGIN_EXTRA_CONF_FILES[@]}; do - opts+=" --config-file $cfg_file" - done - echo "$opts" -} - -function _determine_config_l3 { - local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE" - if is_service_enabled q-fwaas; then - opts+=" --config-file $Q_FWAAS_CONF_FILE" - fi - echo "$opts" -} - -# For services and agents that require it, dynamically construct a list of -# --config-file arguments that are passed to the binary. -function determine_config_files { - local opts="" - case "$1" in - "neutron-server") opts="$(_determine_config_server)" ;; - "neutron-l3-agent") opts="$(_determine_config_l3)" ;; - esac - if [ -z "$opts" ] ; then - die $LINENO "Could not determine config files for $1." - fi - echo "$opts" -} - -# configure_mutnauq() -# Set common config for all neutron server and agents. -function configure_mutnauq { - _configure_neutron_common - iniset_rpc_backend neutron $NEUTRON_CONF - - # goes before q-svc to init Q_SERVICE_PLUGIN_CLASSES - if is_service_enabled q-lbaas; then - deprecated "Configuring q-lbaas through devstack is deprecated" - _configure_neutron_lbaas - fi - if is_service_enabled q-metering; then - _configure_neutron_metering - fi - if is_service_enabled q-fwaas; then - deprecated "Configuring q-fwaas through devstack is deprecated" - _configure_neutron_fwaas - fi - if is_service_enabled q-agt q-svc; then - _configure_neutron_service - fi - if is_service_enabled q-agt; then - _configure_neutron_plugin_agent - fi - if is_service_enabled q-dhcp; then - _configure_neutron_dhcp_agent - fi - if is_service_enabled q-l3; then - _configure_neutron_l3_agent - fi - if is_service_enabled q-meta; then - _configure_neutron_metadata_agent - fi - - if [[ "$Q_DVR_MODE" != "legacy" ]]; then - _configure_dvr - fi - if is_service_enabled ceilometer; then - _configure_neutron_ceilometer_notifications - fi - - iniset $NEUTRON_CONF DEFAULT api_workers "$API_WORKERS" -} - -function create_nova_conf_neutron { - iniset $NOVA_CONF DEFAULT use_neutron True - iniset $NOVA_CONF neutron auth_type "password" - iniset $NOVA_CONF neutron auth_url "$KEYSTONE_AUTH_URI/v3" - iniset $NOVA_CONF neutron username "$Q_ADMIN_USERNAME" - iniset $NOVA_CONF neutron password "$SERVICE_PASSWORD" - iniset $NOVA_CONF neutron user_domain_name "$SERVICE_DOMAIN_NAME" - iniset $NOVA_CONF neutron project_name "$SERVICE_PROJECT_NAME" - iniset $NOVA_CONF neutron project_domain_name "$SERVICE_DOMAIN_NAME" - iniset $NOVA_CONF neutron auth_strategy "$Q_AUTH_STRATEGY" - iniset $NOVA_CONF neutron region_name "$REGION_NAME" - iniset $NOVA_CONF neutron url "${Q_PROTOCOL}://$Q_HOST:$Q_PORT" - - if [[ "$Q_USE_SECGROUP" == "True" ]]; then - LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver - iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER - fi - - # optionally set options in nova_conf - neutron_plugin_create_nova_conf - - if is_service_enabled q-meta; then - iniset $NOVA_CONF neutron service_metadata_proxy "True" - fi - - iniset $NOVA_CONF DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL" - iniset $NOVA_CONF DEFAULT vif_plugging_timeout "$VIF_PLUGGING_TIMEOUT" -} - -# create_mutnauq_accounts() - Set up common required neutron accounts - -# Tenant User Roles -# ------------------------------------------------------------------ -# service neutron admin # if enabled - -# Migrated from keystone_data.sh -function create_mutnauq_accounts { - if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then - - create_service_user "neutron" - - get_or_create_service "neutron" "network" "Neutron Service" - get_or_create_endpoint \ - "network" \ - "$REGION_NAME" \ - "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/" \ - "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/" \ - "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/" - fi -} - -# init_mutnauq() - Initialize databases, etc. -function init_mutnauq { - recreate_database $Q_DB_NAME - # Run Neutron db migrations - $NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head -} - -# install_mutnauq() - Collect source and prepare -function install_mutnauq { - # Install neutron-lib from git so we make sure we're testing - # the latest code. - if use_library_from_git "neutron-lib"; then - git_clone_by_name "neutron-lib" - setup_dev_lib "neutron-lib" - fi - - git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH - setup_develop $NEUTRON_DIR - if is_service_enabled q-fwaas; then - git_clone $NEUTRON_FWAAS_REPO $NEUTRON_FWAAS_DIR $NEUTRON_FWAAS_BRANCH - setup_develop $NEUTRON_FWAAS_DIR - fi - if is_service_enabled q-lbaas; then - git_clone $NEUTRON_LBAAS_REPO $NEUTRON_LBAAS_DIR $NEUTRON_LBAAS_BRANCH - setup_develop $NEUTRON_LBAAS_DIR - fi - - if [ "$VIRT_DRIVER" == 'xenserver' ]; then - local dom0_ip - dom0_ip=$(echo "$XENAPI_CONNECTION_URL" | cut -d "/" -f 3-) - - local ssh_dom0 - ssh_dom0="sudo -u $DOMZERO_USER ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@$dom0_ip" - - # Find where the plugins should go in dom0 - local xen_functions - xen_functions=$(cat $TOP_DIR/tools/xen/functions) - local plugin_dir - plugin_dir=$($ssh_dom0 "$xen_functions; set -eux; xapi_plugin_location") - - # install neutron plugins to dom0 - tar -czf - -C $NEUTRON_DIR/neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/etc/xapi.d/plugins/ ./ | - $ssh_dom0 "tar -xzf - -C $plugin_dir && chmod a+x $plugin_dir/*" - fi -} - -# install_neutron_agent_packages() - Collect source and prepare -function install_neutron_agent_packages { - # radvd doesn't come with the OS. Install it if the l3 service is enabled. - if is_service_enabled q-l3; then - install_package radvd - fi - # install packages that are specific to plugin agent(s) - if is_service_enabled q-agt q-dhcp q-l3; then - neutron_plugin_install_agent_packages - fi - - if is_service_enabled q-lbaas; then - neutron_agent_lbaas_install_agent_packages - fi -} - -# Start running processes, including screen -function start_neutron_service_and_check { - local service_port=$Q_PORT - local service_protocol=$Q_PROTOCOL - local cfg_file_options - - cfg_file_options="$(determine_config_files neutron-server)" - - if is_service_enabled tls-proxy; then - service_port=$Q_PORT_INT - service_protocol="http" - fi - # Start the Neutron service - run_process q-svc "$NEUTRON_BIN_DIR/neutron-server $cfg_file_options" - echo "Waiting for Neutron to start..." - if is_ssl_enabled_service "neutron"; then - ssl_ca="--ca-certificate=${SSL_BUNDLE_FILE}" - fi - - local testcmd="wget ${ssl_ca} --no-proxy -q -O- $service_protocol://$Q_HOST:$service_port" - test_with_retry "$testcmd" "Neutron did not start" $SERVICE_TIMEOUT - - # Start proxy if enabled - if is_service_enabled tls-proxy; then - start_tls_proxy '*' $Q_PORT $Q_HOST $Q_PORT_INT & - fi -} - -# Control of the l2 agent is separated out to make it easier to test partial -# upgrades (everything upgraded except the L2 agent) -function start_mutnauq_l2_agent { - run_process q-agt "$AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" - - if is_provider_network && [[ $Q_AGENT == "openvswitch" ]]; then - sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE - sudo ip link set $OVS_PHYSICAL_BRIDGE up - sudo ip link set br-int up - sudo ip link set $PUBLIC_INTERFACE up - if is_ironic_hardware; then - for IP in $(ip addr show dev $PUBLIC_INTERFACE | grep ' inet ' | awk '{print $2}'); do - sudo ip addr del $IP dev $PUBLIC_INTERFACE - sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE - done - sudo ip route replace $FIXED_RANGE via $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE - fi - fi -} - -function start_mutnauq_other_agents { - run_process q-dhcp "$AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file $Q_DHCP_CONF_FILE" - - if is_service_enabled neutron-vpnaas; then - : # Started by plugin - else - run_process q-l3 "$AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)" - fi - - run_process q-meta "$AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file $Q_META_CONF_FILE" - run_process q-lbaas "$AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file $LBAAS_AGENT_CONF_FILENAME" - run_process q-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME" - - if [ "$VIRT_DRIVER" = 'xenserver' ]; then - # For XenServer, start an agent for the domU openvswitch - run_process q-domua "$AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE.domU" - fi -} - -# Start running processes, including screen -function start_neutron_agents { - # Start up the neutron agents if enabled - start_mutnauq_l2_agent - start_mutnauq_other_agents -} - -function stop_mutnauq_l2_agent { - stop_process q-agt -} - -# stop_mutnauq_other() - Stop running processes (non-screen) -function stop_mutnauq_other { - if is_service_enabled q-dhcp; then - stop_process q-dhcp - pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }') - [ ! -z "$pid" ] && sudo kill -9 $pid - fi - - stop_process q-svc - - if is_service_enabled q-l3; then - sudo pkill -f "radvd -C $DATA_DIR/neutron/ra" - stop_process q-l3 - fi - - if is_service_enabled q-meta; then - sudo pkill -9 -f neutron-ns-metadata-proxy || : - stop_process q-meta - fi - - if is_service_enabled q-lbaas; then - neutron_lbaas_stop - fi - if is_service_enabled q-fwaas; then - neutron_fwaas_stop - fi - if is_service_enabled q-metering; then - neutron_metering_stop - fi - - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - sudo pkill -9 -f $NEUTRON_ROOTWRAP-daemon || : - fi -} - -# stop_neutron() - Stop running processes (non-screen) -function stop_mutnauq { - stop_mutnauq_other - stop_mutnauq_l2_agent -} - -# _move_neutron_addresses_route() - Move the primary IP to the OVS bridge -# on startup, or back to the public interface on cleanup. If no IP is -# configured on the interface, just add it as a port to the OVS bridge. -function _move_neutron_addresses_route { - local from_intf=$1 - local to_intf=$2 - local add_ovs_port=$3 - local del_ovs_port=$4 - local af=$5 - - if [[ -n "$from_intf" && -n "$to_intf" ]]; then - # Remove the primary IP address from $from_intf and add it to $to_intf, - # along with the default route, if it exists. Also, when called - # on configure we will also add $from_intf as a port on $to_intf, - # assuming it is an OVS bridge. - - local IP_ADD="" - local IP_DEL="" - local IP_UP="" - local DEFAULT_ROUTE_GW - DEFAULT_ROUTE_GW=$(ip -f $af r | awk "/default.+$from_intf/ { print \$3; exit }") - local ADD_OVS_PORT="" - local DEL_OVS_PORT="" - local ARP_CMD="" - - IP_BRD=$(ip -f $af a s dev $from_intf scope global primary | grep inet | awk '{ print $2, $3, $4; exit }') - - if [ "$DEFAULT_ROUTE_GW" != "" ]; then - ADD_DEFAULT_ROUTE="sudo ip -f $af r replace default via $DEFAULT_ROUTE_GW dev $to_intf" - fi - - if [[ "$add_ovs_port" == "True" ]]; then - ADD_OVS_PORT="sudo ovs-vsctl --may-exist add-port $to_intf $from_intf" - fi - - if [[ "$del_ovs_port" == "True" ]]; then - DEL_OVS_PORT="sudo ovs-vsctl --if-exists del-port $from_intf $to_intf" - fi - - if [[ "$IP_BRD" != "" ]]; then - IP_DEL="sudo ip addr del $IP_BRD dev $from_intf" - IP_ADD="sudo ip addr add $IP_BRD dev $to_intf" - IP_UP="sudo ip link set $to_intf up" - if [[ "$af" == "inet" ]]; then - IP=$(echo $IP_BRD | awk '{ print $1; exit }' | grep -o -E '(.*)/' | cut -d "/" -f1) - ARP_CMD="arping -A -c 3 -w 4.5 -I $to_intf $IP " - fi - fi - - # The add/del OVS port calls have to happen either before or - # after the address is moved in order to not leave it orphaned. - $DEL_OVS_PORT; $IP_DEL; $IP_ADD; $IP_UP; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE; $ARP_CMD - fi -} - -# cleanup_mutnauq() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_mutnauq { - - if [[ -n "$OVS_PHYSICAL_BRIDGE" ]]; then - _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False True "inet" - - if [[ $(ip -f inet6 a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then - # ip(8) wants the prefix length when deleting - local v6_gateway - v6_gateway=$(ip -6 a s dev $OVS_PHYSICAL_BRIDGE | grep $IPV6_PUBLIC_NETWORK_GATEWAY | awk '{ print $2 }') - sudo ip -6 addr del $v6_gateway dev $OVS_PHYSICAL_BRIDGE - _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False False "inet6" - fi - - if is_provider_network && is_ironic_hardware; then - for IP in $(ip addr show dev $OVS_PHYSICAL_BRIDGE | grep ' inet ' | awk '{print $2}'); do - sudo ip addr del $IP dev $OVS_PHYSICAL_BRIDGE - sudo ip addr add $IP dev $PUBLIC_INTERFACE - done - sudo route del -net $FIXED_RANGE gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE - fi - fi - - if is_neutron_ovs_base_plugin; then - neutron_ovs_base_cleanup - fi - - if [[ $Q_AGENT == "linuxbridge" ]]; then - neutron_lb_cleanup - fi - - # delete all namespaces created by neutron - for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|qlbaas|fip|snat)-[0-9a-f-]*'); do - sudo ip netns delete ${ns} - done -} - - -function _create_neutron_conf_dir { - # Put config files in ``NEUTRON_CONF_DIR`` for everyone to find - sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR -} - -# _configure_neutron_common() -# Set common config for all neutron server and agents. -# This MUST be called before other ``_configure_neutron_*`` functions. -function _configure_neutron_common { - _create_neutron_conf_dir - - # Uses oslo config generator to generate core sample configuration files - (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh) - - cp $NEUTRON_DIR/etc/neutron.conf.sample $NEUTRON_CONF - - Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json - cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE - - # allow neutron user to administer neutron to match neutron account - sed -i 's/"context_is_admin": "role:admin"/"context_is_admin": "role:admin or user_name:neutron"/g' $Q_POLICY_FILE - - # Set plugin-specific variables ``Q_DB_NAME``, ``Q_PLUGIN_CLASS``. - # For main plugin config file, set ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``. - # For additional plugin config files, set ``Q_PLUGIN_EXTRA_CONF_PATH`` and - # ``Q_PLUGIN_EXTRA_CONF_FILES``. For example: - # - # ``Q_PLUGIN_EXTRA_CONF_PATH=/path/to/plugins`` - # ``Q_PLUGIN_EXTRA_CONF_FILES=(file1 file2)`` - neutron_plugin_configure_common - - if [[ "$Q_PLUGIN_CONF_PATH" == '' || "$Q_PLUGIN_CONF_FILENAME" == '' || "$Q_PLUGIN_CLASS" == '' ]]; then - die $LINENO "Neutron plugin not set.. exiting" - fi - - # If needed, move config file from ``$NEUTRON_DIR/etc/neutron`` to ``NEUTRON_CONF_DIR`` - mkdir -p /$Q_PLUGIN_CONF_PATH - Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME - # NOTE(hichihara): Some neutron vendor plugins were already decomposed and - # there is no config file in Neutron tree. They should prepare the file in each plugin. - if [ -f "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" ]; then - cp "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" /$Q_PLUGIN_CONF_FILE - elif [ -f $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE ]; then - cp $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE - fi - - iniset $NEUTRON_CONF database connection `database_connection_url $Q_DB_NAME` - iniset $NEUTRON_CONF DEFAULT state_path $DATA_DIR/neutron - iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG - iniset $NEUTRON_CONF DEFAULT bind_host $Q_LISTEN_ADDRESS - iniset $NEUTRON_CONF oslo_concurrency lock_path $DATA_DIR/neutron/lock - - # NOTE(freerunner): Need to adjust Region Name for nova in multiregion installation - iniset $NEUTRON_CONF nova region_name $REGION_NAME - - # If addition config files are set, make sure their path name is set as well - if [[ ${#Q_PLUGIN_EXTRA_CONF_FILES[@]} > 0 && $Q_PLUGIN_EXTRA_CONF_PATH == '' ]]; then - die $LINENO "Neutron additional plugin config not set.. exiting" - fi - - # If additional config files exist, copy them over to neutron configuration - # directory - if [[ $Q_PLUGIN_EXTRA_CONF_PATH != '' ]]; then - local f - for (( f=0; $f < ${#Q_PLUGIN_EXTRA_CONF_FILES[@]}; f+=1 )); do - Q_PLUGIN_EXTRA_CONF_FILES[$f]=$Q_PLUGIN_EXTRA_CONF_PATH/${Q_PLUGIN_EXTRA_CONF_FILES[$f]} - done - fi - - if [ "$VIRT_DRIVER" = 'fake' ]; then - # Disable arbitrary limits - iniset $NEUTRON_CONF quotas quota_network -1 - iniset $NEUTRON_CONF quotas quota_subnet -1 - iniset $NEUTRON_CONF quotas quota_port -1 - iniset $NEUTRON_CONF quotas quota_security_group -1 - iniset $NEUTRON_CONF quotas quota_security_group_rule -1 - fi - - # Format logging - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then - setup_colorized_logging $NEUTRON_CONF DEFAULT project_id - else - # Show user_name and project_name by default like in nova - iniset $NEUTRON_CONF DEFAULT logging_user_identity_format "%(user_name)s %(project_name)s" - fi - - if is_service_enabled tls-proxy; then - # Set the service port for a proxy to take the original - iniset $NEUTRON_CONF DEFAULT bind_port "$Q_PORT_INT" - fi - - if is_ssl_enabled_service "nova"; then - iniset $NEUTRON_CONF nova cafile $SSL_BUNDLE_FILE - fi - - if is_ssl_enabled_service "neutron"; then - ensure_certificates NEUTRON - - iniset $NEUTRON_CONF DEFAULT use_ssl True - iniset $NEUTRON_CONF DEFAULT ssl_cert_file "$NEUTRON_SSL_CERT" - iniset $NEUTRON_CONF DEFAULT ssl_key_file "$NEUTRON_SSL_KEY" - fi - - _neutron_setup_rootwrap -} - -function _configure_neutron_dhcp_agent { - - cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $Q_DHCP_CONF_FILE - - iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $Q_DHCP_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - iniset $Q_DHCP_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND" - fi - - if ! is_service_enabled q-l3; then - if [[ "$ENABLE_ISOLATED_METADATA" = "True" ]]; then - iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata $ENABLE_ISOLATED_METADATA - iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network $ENABLE_METADATA_NETWORK - else - if [[ "$ENABLE_METADATA_NETWORK" = "True" ]]; then - die "$LINENO" "Enable isolated metadata is a must for metadata network" - fi - fi - fi - - _neutron_setup_interface_driver $Q_DHCP_CONF_FILE - - neutron_plugin_configure_dhcp_agent -} - - -function _configure_neutron_metadata_agent { - cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $Q_META_CONF_FILE - - iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP - iniset $Q_META_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - iniset $Q_META_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND" - fi -} - -function _configure_neutron_ceilometer_notifications { - iniset $NEUTRON_CONF oslo_messaging_notifications driver messaging -} - -function _configure_neutron_lbaas { - # Uses oslo config generator to generate LBaaS sample configuration files - (cd $NEUTRON_LBAAS_DIR && exec ./tools/generate_config_file_samples.sh) - - if [ -f $NEUTRON_LBAAS_DIR/etc/neutron_lbaas.conf.sample ]; then - cp $NEUTRON_LBAAS_DIR/etc/neutron_lbaas.conf.sample $NEUTRON_CONF_DIR/neutron_lbaas.conf - iniset $NEUTRON_CONF_DIR/neutron_lbaas.conf service_providers service_provider $DEFAULT_LB_PROVIDER - fi - neutron_agent_lbaas_configure_common - neutron_agent_lbaas_configure_agent -} - -function _configure_neutron_metering { - neutron_agent_metering_configure_common - neutron_agent_metering_configure_agent -} - -function _configure_neutron_fwaas { - if [ -f $NEUTRON_FWAAS_DIR/etc/neutron_fwaas.conf ]; then - cp $NEUTRON_FWAAS_DIR/etc/neutron_fwaas.conf $NEUTRON_CONF_DIR - fi - neutron_fwaas_configure_common - neutron_fwaas_configure_driver -} - -function _configure_dvr { - iniset $NEUTRON_CONF DEFAULT router_distributed True - iniset $Q_L3_CONF_FILE DEFAULT agent_mode $Q_DVR_MODE -} - - -# _configure_neutron_plugin_agent() - Set config files for neutron plugin agent -# It is called when q-agt is enabled. -function _configure_neutron_plugin_agent { - # Specify the default root helper prior to agent configuration to - # ensure that an agent's configuration can override the default - iniset /$Q_PLUGIN_CONF_FILE agent root_helper "$Q_RR_COMMAND" - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" - fi - iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - - # Configure agent for plugin - neutron_plugin_configure_plugin_agent -} - -# _configure_neutron_service() - Set config files for neutron service -# It is called when q-svc is enabled. -function _configure_neutron_service { - Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini - cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE - - # Update either configuration file with plugin - iniset $NEUTRON_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS - - if [[ $Q_SERVICE_PLUGIN_CLASSES != '' ]]; then - iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES - fi - - iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $NEUTRON_CONF oslo_policy policy_file $Q_POLICY_FILE - iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP - - iniset $NEUTRON_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY - _neutron_setup_keystone $NEUTRON_CONF keystone_authtoken - - # Configuration for neutron notifications to nova. - iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES - iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES - - configure_auth_token_middleware $NEUTRON_CONF nova $NEUTRON_AUTH_CACHE_DIR nova - - # Configure plugin - neutron_plugin_configure_service -} - -# Utility Functions -#------------------ - -# _neutron_service_plugin_class_add() - add service plugin class -function _neutron_service_plugin_class_add { - local service_plugin_class=$1 - if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then - Q_SERVICE_PLUGIN_CLASSES=$service_plugin_class - elif [[ ! ,${Q_SERVICE_PLUGIN_CLASSES}, =~ ,${service_plugin_class}, ]]; then - Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$service_plugin_class" - fi -} - -# _neutron_deploy_rootwrap_filters() - deploy rootwrap filters to $Q_CONF_ROOTWRAP_D (owned by root). -function _neutron_deploy_rootwrap_filters { - if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then - return - fi - local srcdir=$1 - sudo install -d -o root -m 755 $Q_CONF_ROOTWRAP_D - sudo install -o root -m 644 $srcdir/etc/neutron/rootwrap.d/* $Q_CONF_ROOTWRAP_D/ -} - -# _neutron_setup_rootwrap() - configure Neutron's rootwrap -function _neutron_setup_rootwrap { - if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then - return - fi - # Wipe any existing ``rootwrap.d`` files first - Q_CONF_ROOTWRAP_D=$NEUTRON_CONF_DIR/rootwrap.d - if [[ -d $Q_CONF_ROOTWRAP_D ]]; then - sudo rm -rf $Q_CONF_ROOTWRAP_D - fi - - _neutron_deploy_rootwrap_filters $NEUTRON_DIR - - # Set up ``rootwrap.conf``, pointing to ``$NEUTRON_CONF_DIR/rootwrap.d`` - # location moved in newer versions, prefer new location - if test -r $NEUTRON_DIR/etc/neutron/rootwrap.conf; then - sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/neutron/rootwrap.conf $Q_RR_CONF_FILE - else - sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE - fi - sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE - sudo sed -e 's:^exec_dirs=\(.*\)$:exec_dirs=\1,/usr/local/bin:' -i $Q_RR_CONF_FILE - - # Specify ``rootwrap.conf`` as first parameter to neutron-rootwrap - ROOTWRAP_SUDOER_CMD="$NEUTRON_ROOTWRAP $Q_RR_CONF_FILE *" - ROOTWRAP_DAEMON_SUDOER_CMD="$NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE" - - # Set up the rootwrap sudoers for neutron - TEMPFILE=`mktemp` - echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE - echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_DAEMON_SUDOER_CMD" >>$TEMPFILE - chmod 0440 $TEMPFILE - sudo chown root:root $TEMPFILE - sudo mv $TEMPFILE /etc/sudoers.d/neutron-rootwrap - - # Update the root_helper - iniset $NEUTRON_CONF agent root_helper "$Q_RR_COMMAND" - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - iniset $NEUTRON_CONF agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" - fi -} - -# Configures keystone integration for neutron service -function _neutron_setup_keystone { - local conf_file=$1 - local section=$2 - - create_neutron_cache_dir - configure_auth_token_middleware $conf_file $Q_ADMIN_USERNAME $NEUTRON_AUTH_CACHE_DIR $section -} - -function _neutron_setup_interface_driver { - - # ovs_use_veth needs to be set before the plugin configuration - # occurs to allow plugins to override the setting. - iniset $1 DEFAULT ovs_use_veth $Q_OVS_USE_VETH - - neutron_plugin_setup_interface_driver $1 -} -# Functions for Neutron Exercises -#-------------------------------- - -function delete_probe { - local from_net="$1" - net_id=`_get_net_id $from_net` - probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'` - neutron-debug --os-tenant-name admin --os-username admin probe-delete $probe_id -} - -function _get_net_id { - neutron --os-cloud devstack-admin --os-region "$REGION_NAME" --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD net-list | grep $1 | awk '{print $2}' -} - -function _get_probe_cmd_prefix { - local from_net="$1" - net_id=`_get_net_id $from_net` - probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}' | head -n 1` - echo "$Q_RR_COMMAND ip netns exec qprobe-$probe_id" -} - -# ssh check -function _ssh_check_neutron { - local from_net=$1 - local key_file=$2 - local ip=$3 - local user=$4 - local timeout_sec=$5 - local probe_cmd = "" - probe_cmd=`_get_probe_cmd_prefix $from_net` - local testcmd="$probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success" - test_with_retry "$testcmd" "server $ip didn't become ssh-able" $timeout_sec -} - -# Neutron 3rd party programs -#--------------------------- - -# please refer to ``lib/neutron_thirdparty/README.md`` for details -NEUTRON_THIRD_PARTIES="" -for f in $TOP_DIR/lib/neutron_thirdparty/*; do - third_party=$(basename $f) - if is_service_enabled $third_party; then - source $TOP_DIR/lib/neutron_thirdparty/$third_party - NEUTRON_THIRD_PARTIES="$NEUTRON_THIRD_PARTIES,$third_party" - fi -done - -function _neutron_third_party_do { - for third_party in ${NEUTRON_THIRD_PARTIES//,/ }; do - ${1}_${third_party} - done -} - -# configure_neutron_third_party() - Set config files, create data dirs, etc -function configure_neutron_third_party { - _neutron_third_party_do configure -} - -# init_neutron_third_party() - Initialize databases, etc. -function init_neutron_third_party { - _neutron_third_party_do init -} - -# install_neutron_third_party() - Collect source and prepare -function install_neutron_third_party { - _neutron_third_party_do install -} - -# start_neutron_third_party() - Start running processes, including screen -function start_neutron_third_party { - _neutron_third_party_do start -} - -# stop_neutron_third_party - Stop running processes (non-screen) -function stop_neutron_third_party { - _neutron_third_party_do stop -} - -# check_neutron_third_party_integration() - Check that third party integration is sane -function check_neutron_third_party_integration { - _neutron_third_party_do check -} - -# Restore xtrace -$_XTRACE_NEUTRON - -# Tell emacs to use shell-script-mode -## Local variables: -## mode: shell-script -## End: +source $TOP_DIR/lib/neutron diff --git a/lib/neutron_plugins/README.md b/lib/neutron_plugins/README.md index f03000e7cb..728aaee85f 100644 --- a/lib/neutron_plugins/README.md +++ b/lib/neutron_plugins/README.md @@ -13,7 +13,7 @@ Plugin specific configuration variables should be in this file. functions --------- -``lib/neutron-legacy`` calls the following functions when the ``$Q_PLUGIN`` is enabled +``lib/neutron`` calls the following functions when the ``$Q_PLUGIN`` is enabled * ``neutron_plugin_create_nova_conf`` : optionally set options in nova_conf @@ -24,7 +24,6 @@ functions * ``neutron_plugin_configure_common`` : set plugin-specific variables, ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``, ``Q_PLUGIN_CLASS`` -* ``neutron_plugin_configure_debug_command`` * ``neutron_plugin_configure_dhcp_agent`` * ``neutron_plugin_configure_l3_agent`` * ``neutron_plugin_configure_plugin_agent`` diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight index 586ded79b4..84ca7ec42c 100644 --- a/lib/neutron_plugins/bigswitch_floodlight +++ b/lib/neutron_plugins/bigswitch_floodlight @@ -1,6 +1,6 @@ #!/bin/bash # -# Neuton Big Switch/FloodLight plugin +# Neutron Big Switch/FloodLight plugin # ------------------------------------ # Save trace setting @@ -26,10 +26,6 @@ function neutron_plugin_configure_common { BS_FL_CONTROLLER_TIMEOUT=${BS_FL_CONTROLLER_TIMEOUT:-10} } -function neutron_plugin_configure_debug_command { - _neutron_ovs_base_configure_debug_command -} - function neutron_plugin_configure_dhcp_agent { : } @@ -71,7 +67,7 @@ function has_neutron_plugin_security_group { } function neutron_plugin_check_adv_test_requirements { - is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 + is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0 } # Restore xtrace diff --git a/lib/neutron_plugins/brocade b/lib/neutron_plugins/brocade index 6ba0a66c3f..96400634af 100644 --- a/lib/neutron_plugins/brocade +++ b/lib/neutron_plugins/brocade @@ -49,16 +49,11 @@ function neutron_plugin_configure_service { } -function neutron_plugin_configure_debug_command { - iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge -} - function neutron_plugin_configure_dhcp_agent { iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport } function neutron_plugin_configure_l3_agent { - iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport } @@ -77,7 +72,7 @@ function has_neutron_plugin_security_group { } function neutron_plugin_check_adv_test_requirements { - is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 + is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0 } # Restore xtrace diff --git a/lib/neutron_plugins/cisco b/lib/neutron_plugins/cisco index fc2cb8ad17..b397169b59 100644 --- a/lib/neutron_plugins/cisco +++ b/lib/neutron_plugins/cisco @@ -45,7 +45,6 @@ source $TOP_DIR/lib/neutron_plugins/openvswitch _prefix_function neutron_plugin_create_nova_conf ovs _prefix_function neutron_plugin_install_agent_packages ovs _prefix_function neutron_plugin_configure_common ovs -_prefix_function neutron_plugin_configure_debug_command ovs _prefix_function neutron_plugin_configure_dhcp_agent ovs _prefix_function neutron_plugin_configure_l3_agent ovs _prefix_function neutron_plugin_configure_plugin_agent ovs @@ -83,10 +82,6 @@ function neutron_plugin_configure_common { Q_PLUGIN_CLASS="neutron.plugins.cisco.network_plugin.PluginV2" } -function neutron_plugin_configure_debug_command { - : -} - function neutron_plugin_configure_dhcp_agent { iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport } diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent deleted file mode 100644 index 0a066354ca..0000000000 --- a/lib/neutron_plugins/linuxbridge_agent +++ /dev/null @@ -1,98 +0,0 @@ -#!/bin/bash -# -# Neutron Linux Bridge L2 agent -# ----------------------------- - -# Save trace setting -_XTRACE_NEUTRON_LB=$(set +o | grep xtrace) -set +o xtrace - -function neutron_lb_cleanup { - sudo brctl delbr $PUBLIC_BRIDGE - - if [[ "$Q_ML2_TENANT_NETWORK_TYPE" = "vxlan" ]]; then - for port in $(sudo brctl show | grep -o -e [a-zA-Z\-]*tap[0-9a-f\-]* -e vxlan-[0-9a-f\-]*); do - sudo ip link delete $port - done - elif [[ "$Q_ML2_TENANT_NETWORK_TYPE" = "vlan" ]]; then - for port in $(sudo brctl show | grep -o -e [a-zA-Z\-]*tap[0-9a-f\-]* -e ${LB_PHYSICAL_INTERFACE}\.[0-9a-f\-]*); do - sudo ip link delete $port - done - fi - for bridge in $(sudo brctl show |grep -o -e brq[0-9a-f\-]*); do - sudo ip link set $bridge down - sudo brctl delbr $bridge - done -} - -function is_neutron_ovs_base_plugin { - # linuxbridge doesn't use OVS - return 1 -} - -function neutron_plugin_create_nova_conf { - : -} - -function neutron_plugin_install_agent_packages { - install_package bridge-utils -} - -function neutron_plugin_configure_debug_command { - iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge -} - -function neutron_plugin_configure_dhcp_agent { - local conf_file=$1 - iniset $conf_file DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport -} - -function neutron_plugin_configure_l3_agent { - local conf_file=$1 - sudo brctl addbr $PUBLIC_BRIDGE - iniset $conf_file DEFAULT external_network_bridge - iniset $conf_file DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport -} - -function neutron_plugin_configure_plugin_agent { - # Setup physical network interface mappings. Override - # ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` for more - # complex physical network configurations. - if [[ "$LB_INTERFACE_MAPPINGS" == "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$LB_PHYSICAL_INTERFACE" != "" ]]; then - LB_INTERFACE_MAPPINGS=$PHYSICAL_NETWORK:$LB_PHYSICAL_INTERFACE - fi - if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE linux_bridge physical_interface_mappings $LB_INTERFACE_MAPPINGS - fi - if [[ "$Q_USE_SECGROUP" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver - else - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver - fi - AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-linuxbridge-agent" - iniset /$Q_PLUGIN_CONF_FILE agent tunnel_types $Q_TUNNEL_TYPES - - # Configure vxlan tunneling - if [[ "$ENABLE_TENANT_TUNNELS" == "True" ]]; then - if [[ "$Q_ML2_TENANT_NETWORK_TYPE" == "vxlan" ]]; then - iniset /$Q_PLUGIN_CONF_FILE vxlan enable_vxlan "True" - iniset /$Q_PLUGIN_CONF_FILE vxlan local_ip $TUNNEL_ENDPOINT_IP - else - iniset /$Q_PLUGIN_CONF_FILE vxlan enable_vxlan "False" - fi - else - iniset /$Q_PLUGIN_CONF_FILE vxlan enable_vxlan "False" - fi -} - -function neutron_plugin_setup_interface_driver { - local conf_file=$1 - iniset $conf_file DEFAULT interface_driver linuxbridge -} - -function neutron_plugin_check_adv_test_requirements { - is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 -} - -# Restore xtrace -$_XTRACE_NEUTRON_LB diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index 2ece210a0b..687167bf79 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -7,9 +7,16 @@ _XTRACE_NEUTRON_ML2=$(set +o | grep xtrace) set +o xtrace +# Default OVN L2 agent +Q_AGENT=${Q_AGENT:-ovn} +if [ -f $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent ]; then + source $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent +fi + # Enable this to simply and quickly enable tunneling with ML2. -# Select either 'gre', 'vxlan', or 'gre,vxlan' -Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"vxlan"} +# For ML2/OVS select either 'gre', 'vxlan', or 'gre,vxlan'. +# For ML2/OVN use 'geneve'. +Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"geneve"} # This has to be set here since the agent will set this in the config file if [[ "$Q_ML2_TENANT_NETWORK_TYPE" == "gre" || "$Q_ML2_TENANT_NETWORK_TYPE" == "vxlan" ]]; then Q_TUNNEL_TYPES=$Q_ML2_TENANT_NETWORK_TYPE @@ -17,14 +24,8 @@ elif [[ "$ENABLE_TENANT_TUNNELS" == "True" ]]; then Q_TUNNEL_TYPES=gre fi -# Default openvswitch L2 agent -Q_AGENT=${Q_AGENT:-openvswitch} -if [ -f $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent ]; then - source $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent -fi - # List of MechanismDrivers to load -Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-openvswitch,linuxbridge} +Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-ovn} # Default GRE TypeDriver options Q_ML2_PLUGIN_GRE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GRE_TYPE_OPTIONS:-tunnel_id_ranges=$TENANT_TUNNEL_RANGES} # Default VXLAN TypeDriver options @@ -35,12 +36,16 @@ Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS=${Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS:-} Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS:-vni_ranges=$TENANT_TUNNEL_RANGES} # List of extension drivers to load, use '-' instead of ':-' to allow people to # explicitly override this to blank -Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS-port_security} +if [[ "$NEUTRON_PORT_SECURITY" = "True" ]]; then + Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS-port_security} +else + Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS:-} +fi # L3 Plugin to load for ML2 # For some flat network environment, they not want to extend L3 plugin. # Make sure it is able to set empty to ML2_L3_PLUGIN. -ML2_L3_PLUGIN=${ML2_L3_PLUGIN-neutron.services.l3_router.l3_router_plugin.L3RouterPlugin} +ML2_L3_PLUGIN=${ML2_L3_PLUGIN-router} function populate_ml2_config { CONF=$1 @@ -59,10 +64,10 @@ function populate_ml2_config { function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ml2 Q_PLUGIN_CONF_FILENAME=ml2_conf.ini - Q_PLUGIN_CLASS="neutron.plugins.ml2.plugin.Ml2Plugin" + Q_PLUGIN_CLASS="ml2" # The ML2 plugin delegates L3 routing/NAT functionality to # the L3 service plugin which must therefore be specified. - _neutron_service_plugin_class_add $ML2_L3_PLUGIN + neutron_service_plugin_class_add $ML2_L3_PLUGIN } function neutron_plugin_configure_service { @@ -95,23 +100,21 @@ function neutron_plugin_configure_service { # Allow for setup the flat type network - if [[ -z "$Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS" && -n "$PHYSICAL_NETWORK" ]]; then - Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS="flat_networks=$PHYSICAL_NETWORK" - fi - # REVISIT(rkukura): Setting firewall_driver here for - # neutron.agent.securitygroups_rpc.is_firewall_enabled() which is - # used in the server, in case no L2 agent is configured on the - # server's node. If an L2 agent is configured, this will get - # overridden with the correct driver. The ml2 plugin should - # instead use its own config variable to indicate whether security - # groups is enabled, and that will need to be set here instead. - if [[ "$Q_USE_SECGROUP" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.not.a.real.FirewallDriver - else - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver + if [[ -z "$Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS" ]]; then + if [[ -n "$PHYSICAL_NETWORK" || -n "$PUBLIC_PHYSICAL_NETWORK" ]]; then + Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS="flat_networks=" + if [[ -n "$PHYSICAL_NETWORK" ]]; then + Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS+="${PHYSICAL_NETWORK}," + fi + if [[ -n "$PUBLIC_PHYSICAL_NETWORK" ]] && [[ "${PHYSICAL_NETWORK}" != "$PUBLIC_PHYSICAL_NETWORK" ]]; then + Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS+="${PUBLIC_PHYSICAL_NETWORK}," + fi + fi fi + populate_ml2_config /$Q_PLUGIN_CONF_FILE securitygroup enable_security_group=$Q_USE_SECGROUP populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 overlay_ip_version=$TUNNEL_IP_VERSION if [[ -n "$Q_ML2_PLUGIN_TYPE_DRIVERS" ]]; then populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 type_drivers=$Q_ML2_PLUGIN_TYPE_DRIVERS @@ -135,6 +138,7 @@ function neutron_plugin_configure_service { populate_ml2_config /$Q_PLUGIN_CONF_FILE agent l2_population=True populate_ml2_config /$Q_PLUGIN_CONF_FILE agent tunnel_types=vxlan populate_ml2_config /$Q_PLUGIN_CONF_FILE agent enable_distributed_routing=True + populate_ml2_config /$Q_PLUGIN_CONF_FILE agent arp_responder=True fi } @@ -142,5 +146,9 @@ function has_neutron_plugin_security_group { return 0 } +function configure_qos_ml2 { + neutron_ml2_extension_driver_add "qos" +} + # Restore xtrace $_XTRACE_NEUTRON_ML2 diff --git a/lib/neutron_plugins/nuage b/lib/neutron_plugins/nuage index 61e634e453..8c75e15048 100644 --- a/lib/neutron_plugins/nuage +++ b/lib/neutron_plugins/nuage @@ -8,10 +8,9 @@ _XTRACE_NEUTRON_NU=$(set +o | grep xtrace) set +o xtrace function neutron_plugin_create_nova_conf { + local conf="$1" NOVA_OVS_BRIDGE=${NOVA_OVS_BRIDGE:-"br-int"} - iniset $NOVA_CONF neutron ovs_bridge $NOVA_OVS_BRIDGE - LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver - iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER + iniset $conf neutron ovs_bridge $NOVA_OVS_BRIDGE } function neutron_plugin_install_agent_packages { @@ -33,10 +32,6 @@ function neutron_plugin_configure_common { NUAGE_CNA_DEF_NETPART_NAME=${NUAGE_CNA_DEF_NETPART_NAME:-''} } -function neutron_plugin_configure_debug_command { - : -} - function neutron_plugin_configure_dhcp_agent { : } diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent index 69e38f4df1..6e79984e9b 100644 --- a/lib/neutron_plugins/openvswitch_agent +++ b/lib/neutron_plugins/openvswitch_agent @@ -11,31 +11,24 @@ source $TOP_DIR/lib/neutron_plugins/ovs_base function neutron_plugin_create_nova_conf { _neutron_ovs_base_configure_nova_vif_driver - if [ "$VIRT_DRIVER" == 'xenserver' ]; then - iniset $NOVA_CONF xenserver vif_driver nova.virt.xenapi.vif.XenAPIOpenVswitchDriver - iniset $NOVA_CONF xenserver ovs_integration_bridge $XEN_INTEGRATION_BRIDGE - # Disable nova's firewall so that it does not conflict with neutron - iniset $NOVA_CONF DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver - fi } function neutron_plugin_install_agent_packages { _neutron_ovs_base_install_agent_packages -} - -function neutron_plugin_configure_debug_command { - _neutron_ovs_base_configure_debug_command + if use_library_from_git "os-ken"; then + git_clone_by_name "os-ken" + setup_dev_lib "os-ken" + fi } function neutron_plugin_configure_dhcp_agent { local conf_file=$1 - iniset $conf_file DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport + : } function neutron_plugin_configure_l3_agent { local conf_file=$1 _neutron_ovs_base_configure_l3_agent - iniset $conf_file DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport } function neutron_plugin_configure_plugin_agent { @@ -52,8 +45,10 @@ function neutron_plugin_configure_plugin_agent { # Setup physical network bridge mappings. Override # ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` for more # complex physical network configurations. - if [[ "$OVS_BRIDGE_MAPPINGS" == "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then - OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE + if [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then + if [[ "$OVS_BRIDGE_MAPPINGS" == "" ]]; then + OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE + fi # Configure bridge manually with physical interface as port for multi-node _neutron_ovs_base_add_bridge $OVS_PHYSICAL_BRIDGE @@ -63,57 +58,6 @@ function neutron_plugin_configure_plugin_agent { fi AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-openvswitch-agent" - if [ "$VIRT_DRIVER" == 'xenserver' ]; then - # Make a copy of our config for domU - sudo cp /$Q_PLUGIN_CONF_FILE "/$Q_PLUGIN_CONF_FILE.domU" - - # change domU's config file to STACK_USER - sudo chown $STACK_USER:$STACK_USER /$Q_PLUGIN_CONF_FILE.domU - - # Deal with Dom0's L2 Agent: - Q_RR_DOM0_COMMAND="$NEUTRON_BIN_DIR/neutron-rootwrap-xen-dom0 $Q_RR_CONF_FILE" - - # For now, duplicate the xen configuration already found in nova.conf - iniset $Q_RR_CONF_FILE xenapi xenapi_connection_url "$XENAPI_CONNECTION_URL" - iniset $Q_RR_CONF_FILE xenapi xenapi_connection_username "$XENAPI_USER" - iniset $Q_RR_CONF_FILE xenapi xenapi_connection_password "$XENAPI_PASSWORD" - - # Under XS/XCP, the ovs agent needs to target the dom0 - # integration bridge. This is enabled by using a root wrapper - # that executes commands on dom0 via a XenAPI plugin. - # XenAPI does not support daemon rootwrap now, so set root_helper_daemon empty - iniset /$Q_PLUGIN_CONF_FILE agent root_helper "$Q_RR_DOM0_COMMAND" - iniset /$Q_PLUGIN_CONF_FILE agent root_helper_daemon "" - - # Disable minimize polling, so that it can always detect OVS and Port changes - # This is a problem of xenserver + neutron, bug has been reported - # https://bugs.launchpad.net/neutron/+bug/1495423 - iniset /$Q_PLUGIN_CONF_FILE agent minimize_polling False - - # Set "physical" mapping - iniset /$Q_PLUGIN_CONF_FILE ovs bridge_mappings "physnet1:$FLAT_NETWORK_BRIDGE" - - # XEN_INTEGRATION_BRIDGE is the integration bridge in dom0 - iniset /$Q_PLUGIN_CONF_FILE ovs integration_bridge $XEN_INTEGRATION_BRIDGE - - # Set up domU's L2 agent: - - # Create a bridge "br-$VLAN_INTERFACE" - _neutron_ovs_base_add_bridge "br-$VLAN_INTERFACE" - # Add $VLAN_INTERFACE to that bridge - sudo ovs-vsctl -- --may-exist add-port "br-$VLAN_INTERFACE" $VLAN_INTERFACE - - # Create external bridge and add port - _neutron_ovs_base_add_bridge $PUBLIC_BRIDGE - sudo ovs-vsctl -- --may-exist add-port $PUBLIC_BRIDGE $PUBLIC_INTERFACE - - # Set bridge mappings to "physnet1:br-$GUEST_INTERFACE_DEFAULT" - iniset "/$Q_PLUGIN_CONF_FILE.domU" ovs bridge_mappings "physnet1:br-$VLAN_INTERFACE,physnet-ex:$PUBLIC_BRIDGE" - # Set integration bridge to domU's - iniset "/$Q_PLUGIN_CONF_FILE.domU" ovs integration_bridge $OVS_BRIDGE - # Set root wrap - iniset "/$Q_PLUGIN_CONF_FILE.domU" agent root_helper "$Q_RR_COMMAND" - fi iniset /$Q_PLUGIN_CONF_FILE agent tunnel_types $Q_TUNNEL_TYPES iniset /$Q_PLUGIN_CONF_FILE ovs datapath_type $OVS_DATAPATH_TYPE } @@ -124,7 +68,7 @@ function neutron_plugin_setup_interface_driver { } function neutron_plugin_check_adv_test_requirements { - is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 + is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0 } # Restore xtrace diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent new file mode 100644 index 0000000000..48e92a1782 --- /dev/null +++ b/lib/neutron_plugins/ovn_agent @@ -0,0 +1,867 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +# Global Sources +# -------------- + +# There are some ovs functions OVN depends on that must be sourced from +# the ovs neutron plugins. +source ${TOP_DIR}/lib/neutron_plugins/ovs_base +source ${TOP_DIR}/lib/neutron_plugins/openvswitch_agent + +# Load devstack ovs compliation and loading functions +source ${TOP_DIR}/lib/neutron_plugins/ovs_source + +# Set variables for building OVN from source +OVN_REPO=${OVN_REPO:-https://github.com/ovn-org/ovn.git} +OVN_REPO_NAME=$(basename ${OVN_REPO} | cut -f1 -d'.') +OVN_REPO_NAME=${OVN_REPO_NAME:-ovn} +OVN_BRANCH=${OVN_BRANCH:-branch-24.03} +# The commit removing OVN bits from the OVS tree, it is the commit that is not +# present in OVN tree and is used to distinguish if OVN is part of OVS or not. +# https://github.com/openvswitch/ovs/commit/05bf1dbb98b0635a51f75e268ef8aed27601401d +OVN_SPLIT_HASH=05bf1dbb98b0635a51f75e268ef8aed27601401d + +if is_service_enabled tls-proxy; then + OVN_PROTO=ssl +else + OVN_PROTO=tcp +fi + +# How to connect to ovsdb-server hosting the OVN SB database. +OVN_SB_REMOTE=${OVN_SB_REMOTE:-$OVN_PROTO:$SERVICE_HOST:6642} + +# How to connect to ovsdb-server hosting the OVN NB database +OVN_NB_REMOTE=${OVN_NB_REMOTE:-$OVN_PROTO:$SERVICE_HOST:6641} + +# ml2/config for neutron_sync_mode +OVN_NEUTRON_SYNC_MODE=${OVN_NEUTRON_SYNC_MODE:-log} + +# Configured DNS servers to be used with internal_dns extension, only +# if the subnet DNS is not configured. +OVN_DNS_SERVERS=${OVN_DNS_SERVERS:-8.8.8.8} + +# The type of OVN L3 Scheduler to use. The OVN L3 Scheduler determines the +# hypervisor/chassis where a routers gateway should be hosted in OVN. The +# default OVN L3 scheduler is leastloaded +OVN_L3_SCHEDULER=${OVN_L3_SCHEDULER:-leastloaded} + +# A UUID to uniquely identify this system. If one is not specified, a random +# one will be generated. A randomly generated UUID will be saved in a file +# $OVS_SYSCONFDIR/system-id.conf (typically /etc/openvswitch/system-id.conf) +# so that the same one will be re-used if you re-run DevStack or restart +# Open vSwitch service. +OVN_UUID=${OVN_UUID:-} + +# Whether or not to build the openvswitch kernel module from ovs. This is required +# unless the distro kernel includes ovs+conntrack support. +OVN_BUILD_MODULES=$(trueorfalse False OVN_BUILD_MODULES) +OVN_BUILD_FROM_SOURCE=$(trueorfalse False OVN_BUILD_FROM_SOURCE) +if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + Q_BUILD_OVS_FROM_GIT=True +fi + +# Whether or not to install the ovs python module from ovs source. This can be +# used to test and validate new ovs python features. This should only be used +# for development purposes since the ovs python version is controlled by OpenStack +# requirements. +OVN_INSTALL_OVS_PYTHON_MODULE=$(trueorfalse False OVN_INSTALL_OVS_PYTHON_MODULE) + +# GENEVE overlay protocol overhead. Defaults to 38 bytes plus the IP version +# overhead (20 bytes for IPv4 (default) or 40 bytes for IPv6) which is determined +# based on the ML2 overlay_ip_version option. The ML2 framework will use this to +# configure the MTU DHCP option. +OVN_GENEVE_OVERHEAD=${OVN_GENEVE_OVERHEAD:-38} + +# The log level of the OVN databases (north and south). +# Supported log levels are: off, emer, err, warn, info or dbg. +# More information about log levels can be found at +# http://www.openvswitch.org/support/dist-docs/ovs-appctl.8.txt +OVN_DBS_LOG_LEVEL=${OVN_DBS_LOG_LEVEL:-info} + +# OVN metadata agent configuration +OVN_META_CONF=$NEUTRON_CONF_DIR/neutron_ovn_metadata_agent.ini +OVN_META_DATA_HOST=${OVN_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)} + +# OVN agent configuration +# The OVN agent is configured, by default, with the "metadata" extension. +OVN_AGENT_CONF=$NEUTRON_CONF_DIR/plugins/ml2/ovn_agent.ini +OVN_AGENT_EXTENSIONS=${OVN_AGENT_EXTENSIONS:-metadata} +# The variable TARGET_ENABLE_OVN_AGENT, if True, overrides the OVN Metadata +# agent service (q-ovn-metadata-agent neutron-ovn-metadata-agent) and the OVN +# agent service (q-ovn-agent neutron-ovn-agent) configuration, always disabling +# the first one (OVN Metadata agent) and enabling the second (OVN agent). +# This variable will be removed in 2026.2, along with the OVN Metadata agent +# removal. +TARGET_ENABLE_OVN_AGENT=$(trueorfalse False TARGET_ENABLE_OVN_AGENT) + +# If True (default) the node will be considered a gateway node. +ENABLE_CHASSIS_AS_GW=$(trueorfalse True ENABLE_CHASSIS_AS_GW) +OVN_L3_CREATE_PUBLIC_NETWORK=$(trueorfalse True OVN_L3_CREATE_PUBLIC_NETWORK) + +export OVSDB_SERVER_LOCAL_HOST=$SERVICE_LOCAL_HOST +TUNNEL_IP=$TUNNEL_ENDPOINT_IP +if [[ "$SERVICE_IP_VERSION" == 6 ]]; then + OVSDB_SERVER_LOCAL_HOST=[$OVSDB_SERVER_LOCAL_HOST] + TUNNEL_IP=[$TUNNEL_IP] +fi + +OVN_IGMP_SNOOPING_ENABLE=$(trueorfalse False OVN_IGMP_SNOOPING_ENABLE) + +OVS_PREFIX= +if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + OVS_PREFIX=/usr/local +fi +OVS_SBINDIR=$OVS_PREFIX/sbin +OVS_BINDIR=$OVS_PREFIX/bin +OVS_RUNDIR=$OVS_PREFIX/var/run/openvswitch +OVS_SHAREDIR=$OVS_PREFIX/share/openvswitch +OVS_SCRIPTDIR=$OVS_SHAREDIR/scripts +OVS_DATADIR=$DATA_DIR/ovs +OVS_SYSCONFDIR=${OVS_SYSCONFDIR:-$OVS_PREFIX/etc/openvswitch} + +if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + OVN_DATADIR=$DATA_DIR/ovn +else + # When using OVN from packages, the data dir for OVN DBs is + # /var/lib/ovn + OVN_DATADIR=/var/lib/ovn +fi +OVN_SHAREDIR=$OVS_PREFIX/share/ovn +OVN_SCRIPTDIR=$OVN_SHAREDIR/scripts +OVN_RUNDIR=$OVS_PREFIX/var/run/ovn + +NEUTRON_OVN_BIN_DIR=$(get_python_exec_prefix) +NEUTRON_OVN_METADATA_BINARY="neutron-ovn-metadata-agent" +NEUTRON_OVN_AGENT_BINARY="neutron-ovn-agent" + +STACK_GROUP="$( id --group --name "$STACK_USER" )" + +OVN_NORTHD_SERVICE=ovn-northd.service +if is_ubuntu; then + # The ovn-central.service file on Ubuntu is responsible for starting + # ovn-northd and the OVN DBs (on CentOS this is done by ovn-northd.service) + OVN_NORTHD_SERVICE=ovn-central.service +fi +OVSDB_SERVER_SERVICE=ovsdb-server.service +OVS_VSWITCHD_SERVICE=ovs-vswitchd.service +OVN_CONTROLLER_SERVICE=ovn-controller.service +OVN_CONTROLLER_VTEP_SERVICE=ovn-controller-vtep.service +if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + OVSDB_SERVER_SERVICE=devstack@ovsdb-server.service + OVS_VSWITCHD_SERVICE=devstack@ovs-vswitchd.service + OVN_NORTHD_SERVICE=devstack@ovn-northd.service + OVN_CONTROLLER_SERVICE=devstack@ovn-controller.service + OVN_CONTROLLER_VTEP_SERVICE=devstack@ovn-controller-vtep.service +fi + +# Defaults Overwrite +# ------------------ +# NOTE(ralonsoh): during the eventlet removal, the "logger" mech +# driver has been removed from this list. Re-add it once the removal +# is finished or the mech driver does not call monkey_patch(). +Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-ovn} +Q_ML2_PLUGIN_TYPE_DRIVERS=${Q_ML2_PLUGIN_TYPE_DRIVERS:-local,flat,vlan,geneve} +Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"geneve"} +Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS:-"vni_ranges=1:65536"} +Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS:-port_security,qos} +# this one allows empty: +ML2_L3_PLUGIN=${ML2_L3_PLUGIN-"ovn-router"} + +Q_LOG_DRIVER_RATE_LIMIT=${Q_LOG_DRIVER_RATE_LIMIT:-100} +Q_LOG_DRIVER_BURST_LIMIT=${Q_LOG_DRIVER_BURST_LIMIT:-25} +Q_LOG_DRIVER_LOG_BASE=${Q_LOG_DRIVER_LOG_BASE:-acl_log_meter} + +# Utility Functions +# ----------------- + +function wait_for_db_file { + local count=0 + while [ ! -f $1 ]; do + sleep 1 + count=$((count+1)) + if [ "$count" -gt 40 ]; then + die $LINENO "DB File $1 not found" + fi + done +} + +function wait_for_sock_file { + local count=0 + while [ ! -S $1 ]; do + sleep 1 + count=$((count+1)) + if [ "$count" -gt 40 ]; then + die $LINENO "Socket $1 not found" + fi + done +} + +function use_new_ovn_repository { + if [[ "$OVN_BUILD_FROM_SOURCE" == "False" ]]; then + return 0 + fi + if [ -z "$is_new_ovn" ]; then + local ovs_repo_dir=$DEST/$OVS_REPO_NAME + if [ ! -d $ovs_repo_dir ]; then + git_timed clone $OVS_REPO $ovs_repo_dir + pushd $ovs_repo_dir + git checkout $OVS_BRANCH + popd + else + clone_repository $OVS_REPO $ovs_repo_dir $OVS_BRANCH + fi + # Check the split commit exists in the current branch + pushd $ovs_repo_dir + git log $OVS_BRANCH --pretty=format:"%H" | grep -q $OVN_SPLIT_HASH + is_new_ovn=$? + popd + fi + return $is_new_ovn +} + +# NOTE(rtheis): Function copied from DevStack _neutron_ovs_base_setup_bridge +# and _neutron_ovs_base_add_bridge with the call to neutron-ovs-cleanup +# removed. The call is not relevant for OVN, as it is specific to the use +# of Neutron's OVS agent and hangs when running stack.sh because +# neutron-ovs-cleanup uses the OVSDB native interface. +function ovn_base_setup_bridge { + local bridge=$1 + local addbr_cmd="sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge -- set bridge $bridge protocols=OpenFlow13,OpenFlow15" + + if [ "$OVS_DATAPATH_TYPE" != "system" ] ; then + addbr_cmd="$addbr_cmd -- set Bridge $bridge datapath_type=${OVS_DATAPATH_TYPE}" + fi + + $addbr_cmd + sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge +} + +function _start_process { + $SYSTEMCTL daemon-reload + $SYSTEMCTL enable $1 + $SYSTEMCTL restart $1 +} + +function _run_process { + local service=$1 + local cmd="$2" + local stop_cmd="$3" + local group=$4 + local user=$5 + local rundir=${6:-$OVS_RUNDIR} + + local systemd_service="devstack@$service.service" + local unit_file="$SYSTEMD_DIR/$systemd_service" + local environment="OVN_RUNDIR=$OVN_RUNDIR OVN_DBDIR=$OVN_DATADIR OVN_LOGDIR=$LOGDIR OVS_RUNDIR=$OVS_RUNDIR OVS_DBDIR=$OVS_DATADIR OVS_LOGDIR=$LOGDIR" + + echo "Starting $service executed command": $cmd + + write_user_unit_file $systemd_service "$cmd" "$group" "$user" + iniset -sudo $unit_file "Service" "Type" "forking" + iniset -sudo $unit_file "Service" "RemainAfterExit" "yes" + iniset -sudo $unit_file "Service" "KillMode" "mixed" + iniset -sudo $unit_file "Service" "LimitNOFILE" "65536" + iniset -sudo $unit_file "Service" "Environment" "$environment" + if [ -n "$stop_cmd" ]; then + iniset -sudo $unit_file "Service" "ExecStop" "$stop_cmd" + fi + + _start_process $systemd_service + + local testcmd="test -e $rundir/$service.pid" + test_with_retry "$testcmd" "$service did not start" $SERVICE_TIMEOUT 1 + local service_ctl_file + service_ctl_file=$(ls $rundir | grep $service | grep ctl) + if [ -z "$service_ctl_file" ]; then + die $LINENO "ctl file for service $service is not present." + fi + sudo ovs-appctl -t $rundir/$service_ctl_file vlog/set console:off syslog:info file:info +} + +function clone_repository { + local repo=$1 + local dir=$2 + local branch=$3 + # Set ERROR_ON_CLONE to false to avoid the need of having the + # repositories like OVN and OVS in the required_projects of the job + # definition. + ERROR_ON_CLONE=false git_clone $repo $dir $branch +} + +function create_public_bridge { + # Create the public bridge that OVN will use + sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE -- set bridge $PUBLIC_BRIDGE protocols=OpenFlow13,OpenFlow15 + sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=${OVN_BRIDGE_MAPPINGS} + _configure_public_network_connectivity +} + +function is_ovn_metadata_agent_enabled { + if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent && [[ "$TARGET_ENABLE_OVN_AGENT" == "False" ]]; then + return 0 + fi + return 1 +} + +function is_ovn_agent_enabled { + if is_service_enabled q-ovn-agent neutron-ovn-agent || [[ "$TARGET_ENABLE_OVN_AGENT" == "True" ]]; then + enable_service q-ovn-agent + return 0 + fi + return 1 + +} + +# OVN compilation functions +# ------------------------- + + +# compile_ovn() - Compile OVN from source and load needed modules +# Accepts three parameters: +# - first optional parameter defines prefix for +# ovn compilation +# - second optional parameter defines localstatedir for +# ovn single machine runtime +function compile_ovn { + local prefix=$1 + local localstatedir=$2 + + if [ -n "$prefix" ]; then + prefix="--prefix=$prefix" + fi + + if [ -n "$localstatedir" ]; then + localstatedir="--localstatedir=$localstatedir" + fi + + clone_repository $OVN_REPO $DEST/$OVN_REPO_NAME $OVN_BRANCH + pushd $DEST/$OVN_REPO_NAME + + if [ ! -f configure ] ; then + ./boot.sh + fi + + # NOTE(mnaser): OVN requires that you build using the OVS from the + # submodule. + # + # https://github.com/ovn-org/ovn/blob/3fb397b63663297acbcbf794e1233951222ae5af/Documentation/intro/install/general.rst#bootstrapping + # https://github.com/ovn-org/ovn/issues/128 + git submodule update --init + pushd ovs + if [ ! -f configure ] ; then + ./boot.sh + fi + if [ ! -f config.status ] || [ configure -nt config.status ] ; then + ./configure + fi + make -j$(($(nproc) + 1)) + popd + + if [ ! -f config.status ] || [ configure -nt config.status ] ; then + ./configure $prefix $localstatedir + fi + make -j$(($(nproc) + 1)) + sudo make install + popd +} + + +# OVN Neutron driver functions +# ---------------------------- + +# OVN service sanity check +function ovn_sanity_check { + if is_service_enabled q-agt neutron-agent; then + die $LINENO "The q-agt/neutron-agt service must be disabled with OVN." + elif is_service_enabled q-l3 neutron-l3; then + die $LINENO "The q-l3/neutron-l3 service must be disabled with OVN." + elif is_service_enabled q-svc neutron-api && [[ ! $Q_ML2_PLUGIN_MECHANISM_DRIVERS =~ "ovn" ]]; then + die $LINENO "OVN needs to be enabled in \$Q_ML2_PLUGIN_MECHANISM_DRIVERS" + elif is_service_enabled q-svc neutron-api && [[ ! $Q_ML2_PLUGIN_TYPE_DRIVERS =~ "geneve" ]]; then + die $LINENO "Geneve needs to be enabled in \$Q_ML2_PLUGIN_TYPE_DRIVERS to be used with OVN" + fi +} + +# install_ovn() - Collect source and prepare +function install_ovn { + echo "Installing OVN and dependent packages" + + # Check the OVN configuration + ovn_sanity_check + + # Install tox, used to generate the config (see devstack/override-defaults) + pip_install tox + + sudo mkdir -p $OVS_RUNDIR + sudo chown $(whoami) $OVS_RUNDIR + + if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + # If OVS is already installed, remove it, because we're about to + # re-install it from source. + for package in openvswitch openvswitch-switch openvswitch-common; do + if is_package_installed $package ; then + uninstall_package $package + fi + done + + remove_ovs_packages + sudo rm -f $OVS_RUNDIR/* + + compile_ovs $OVN_BUILD_MODULES + if use_new_ovn_repository; then + compile_ovn + fi + + sudo mkdir -p $OVS_PREFIX/var/log/openvswitch + sudo chown $(whoami) $OVS_PREFIX/var/log/openvswitch + sudo mkdir -p $OVS_PREFIX/var/log/ovn + sudo chown $(whoami) $OVS_PREFIX/var/log/ovn + else + install_package $(get_packages openvswitch) + install_package $(get_packages ovn) + fi + + # Ensure that the OVS commands are accessible in the PATH + export PATH=$OVS_BINDIR:$PATH + + # Archive log files and create new + local log_archive_dir=$LOGDIR/archive + mkdir -p $log_archive_dir + for logfile in ovs-vswitchd.log ovn-northd.log ovn-controller.log ovn-controller-vtep.log ovs-vtep.log ovsdb-server.log ovsdb-server-nb.log ovsdb-server-sb.log; do + if [ -f "$LOGDIR/$logfile" ] ; then + mv "$LOGDIR/$logfile" "$log_archive_dir/$logfile.${CURRENT_LOG_TIME}" + fi + done + + # Install ovsdbapp from source if requested + if use_library_from_git "ovsdbapp"; then + git_clone_by_name "ovsdbapp" + setup_dev_lib "ovsdbapp" + fi + + # Install ovs python module from ovs source. + if [[ "$OVN_INSTALL_OVS_PYTHON_MODULE" == "True" ]]; then + sudo pip uninstall -y ovs + # Clone the OVS repository if it's not yet present + clone_repository $OVS_REPO $DEST/$OVS_REPO_NAME $OVS_BRANCH + sudo pip install -e $DEST/$OVS_REPO_NAME/python + fi +} + +# filter_network_api_extensions() - Remove non-supported API extensions by +# the OVN driver from the list of enabled API extensions +function filter_network_api_extensions { + SUPPORTED_NETWORK_API_EXTENSIONS=$($PYTHON -c \ + 'from neutron.common.ovn import extensions ;\ + print(",".join(extensions.ML2_SUPPORTED_API_EXTENSIONS))') + SUPPORTED_NETWORK_API_EXTENSIONS=$SUPPORTED_NETWORK_API_EXTENSIONS,$($PYTHON -c \ + 'from neutron.common.ovn import extensions ;\ + print(",".join(extensions.ML2_SUPPORTED_API_EXTENSIONS_OVN_L3))') + if is_service_enabled q-qos neutron-qos ; then + SUPPORTED_NETWORK_API_EXTENSIONS="$SUPPORTED_NETWORK_API_EXTENSIONS,qos" + fi + NETWORK_API_EXTENSIONS=${NETWORK_API_EXTENSIONS:-$SUPPORTED_NETWORK_API_EXTENSIONS} + extensions=$(echo $NETWORK_API_EXTENSIONS | tr ', ' '\n' | sort -u) + supported_ext=$(echo $SUPPORTED_NETWORK_API_EXTENSIONS | tr ', ' '\n' | sort -u) + enabled_ext=$(comm -12 <(echo -e "$extensions") <(echo -e "$supported_ext")) + disabled_ext=$(comm -3 <(echo -e "$extensions") <(echo -e "$enabled_ext")) + + # Log a message in case some extensions had to be disabled because + # they are not supported by the OVN driver + if [ ! -z "$disabled_ext" ]; then + _disabled=$(echo $disabled_ext | tr ' ' ',') + echo "The folling network API extensions have been disabled because they are not supported by OVN: $_disabled" + fi + + # Export the final list of extensions that have been enabled and are + # supported by OVN + export NETWORK_API_EXTENSIONS=$(echo $enabled_ext | tr ' ' ',') +} + +function configure_ovn_plugin { + echo "Configuring Neutron for OVN" + + if is_service_enabled q-svc neutron-api; then + filter_network_api_extensions + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_geneve max_header_size=$OVN_GENEVE_OVERHEAD + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_connection="$OVN_NB_REMOTE" + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_sb_connection="$OVN_SB_REMOTE" + if is_service_enabled tls-proxy; then + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_sb_ca_cert="$INT_CA_DIR/ca-chain.pem" + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_sb_certificate="$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt" + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_sb_private_key="$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key" + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_ca_cert="$INT_CA_DIR/ca-chain.pem" + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_certificate="$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt" + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_private_key="$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key" + fi + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn neutron_sync_mode="$OVN_NEUTRON_SYNC_MODE" + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_l3_scheduler="$OVN_L3_SCHEDULER" + populate_ml2_config /$Q_PLUGIN_CONF_FILE securitygroup enable_security_group="$Q_USE_SECGROUP" + inicomment /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver + + if is_service_enabled q-log neutron-log; then + populate_ml2_config /$Q_PLUGIN_CONF_FILE network_log rate_limit="$Q_LOG_DRIVER_RATE_LIMIT" + populate_ml2_config /$Q_PLUGIN_CONF_FILE network_log burst_limit="$Q_LOG_DRIVER_BURST_LIMIT" + inicomment /$Q_PLUGIN_CONF_FILE network_log local_output_log_base="$Q_LOG_DRIVER_LOG_BASE" + fi + + if is_ovn_metadata_agent_enabled; then + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True + elif is_ovn_agent_enabled && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]]; then + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True + else + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=False + fi + + if is_service_enabled q-dns neutron-dns ; then + iniset $NEUTRON_CONF DEFAULT dns_domain openstackgate.local + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn dns_servers="$OVN_DNS_SERVERS" + fi + + iniset $NEUTRON_CONF ovs igmp_snooping_enable $OVN_IGMP_SNOOPING_ENABLE + fi + + if is_service_enabled q-dhcp neutron-dhcp ; then + iniset $NEUTRON_CONF DEFAULT dhcp_agent_notification True + else + iniset $NEUTRON_CONF DEFAULT dhcp_agent_notification False + fi + + if is_service_enabled n-api-meta ; then + if is_ovn_metadata_agent_enabled; then + iniset $NOVA_CONF neutron service_metadata_proxy True + elif is_ovn_agent_enabled && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]]; then + iniset $NOVA_CONF neutron service_metadata_proxy True + fi + fi +} + +function configure_ovn { + echo "Configuring OVN" + + if [ -z "$OVN_UUID" ] ; then + if [ -f $OVS_SYSCONFDIR/system-id.conf ]; then + OVN_UUID=$(cat $OVS_SYSCONFDIR/system-id.conf) + else + OVN_UUID=$(uuidgen) + echo $OVN_UUID | sudo tee $OVS_SYSCONFDIR/system-id.conf + fi + else + local ovs_uuid + ovs_uuid=$(cat $OVS_SYSCONFDIR/system-id.conf) + if [ "$ovs_uuid" != $OVN_UUID ]; then + echo $OVN_UUID | sudo tee $OVS_SYSCONFDIR/system-id.conf + fi + fi + + # Erase the pre-set configurations from packages. DevStack will + # configure OVS and OVN accordingly for its use. + if [[ "$OVN_BUILD_FROM_SOURCE" == "False" ]] && is_fedora; then + sudo truncate -s 0 /etc/openvswitch/default.conf + sudo truncate -s 0 /etc/sysconfig/openvswitch + sudo truncate -s 0 /etc/sysconfig/ovn + fi + + # Metadata + local sample_file="" + local config_file="" + if is_ovn_agent_enabled && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]] && is_service_enabled ovn-controller; then + sample_file=$NEUTRON_DIR/etc/neutron/plugins/ml2/ovn_agent.ini.sample + config_file=$OVN_AGENT_CONF + elif is_ovn_metadata_agent_enabled && is_service_enabled ovn-controller; then + sample_file=$NEUTRON_DIR/etc/neutron_ovn_metadata_agent.ini.sample + config_file=$OVN_META_CONF + fi + if [ -n "$config_file" ]; then + sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR + + mkdir -p $NEUTRON_DIR/etc/neutron/plugins/ml2 + (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh) + + cp $sample_file $config_file + configure_root_helper_options $config_file + + iniset $config_file DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $config_file DEFAULT nova_metadata_host $OVN_META_DATA_HOST + iniset $config_file DEFAULT metadata_workers $API_WORKERS + iniset $config_file DEFAULT state_path $DATA_DIR/neutron + iniset $config_file ovs ovsdb_connection tcp:$OVSDB_SERVER_LOCAL_HOST:6640 + iniset $config_file ovn ovn_sb_connection $OVN_SB_REMOTE + if is_service_enabled tls-proxy; then + iniset $config_file ovn \ + ovn_sb_ca_cert $INT_CA_DIR/ca-chain.pem + iniset $config_file ovn \ + ovn_sb_certificate $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt + iniset $config_file ovn \ + ovn_sb_private_key $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key + fi + if [[ $config_file == $OVN_AGENT_CONF ]]; then + iniset $config_file agent extensions $OVN_AGENT_EXTENSIONS + iniset $config_file ovn ovn_nb_connection $OVN_NB_REMOTE + fi + fi +} + +function init_ovn { + # clean up from previous (possibly aborted) runs + # create required data files + + # Assumption: this is a dedicated test system and there is nothing important + # in the ovn, ovn-nb, or ovs databases. We're going to trash them and + # create new ones on each devstack run. + + local mkdir_cmd="mkdir -p ${OVN_DATADIR}" + + if [[ "$OVN_BUILD_FROM_SOURCE" == "False" ]]; then + mkdir_cmd="sudo ${mkdir_cmd}" + fi + + $mkdir_cmd + mkdir -p $OVS_DATADIR + + rm -f $OVS_DATADIR/*.db + rm -f $OVS_DATADIR/.*.db.~lock~ + sudo rm -f $OVN_DATADIR/*.db + sudo rm -f $OVN_DATADIR/.*.db.~lock~ + sudo rm -f $OVN_RUNDIR/*.sock +} + +function _start_ovs { + echo "Starting OVS" + if is_service_enabled ovn-controller ovn-controller-vtep ovn-northd; then + # ovsdb-server and ovs-vswitchd are used privately in OVN as openvswitch service names. + enable_service ovsdb-server + enable_service ovs-vswitchd + + if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + if [ ! -f $OVS_DATADIR/conf.db ]; then + ovsdb-tool create $OVS_DATADIR/conf.db $OVS_SHAREDIR/vswitch.ovsschema + fi + + if is_service_enabled ovn-controller-vtep; then + if [ ! -f $OVS_DATADIR/vtep.db ]; then + ovsdb-tool create $OVS_DATADIR/vtep.db $OVS_SHAREDIR/vtep.ovsschema + fi + fi + + local dbcmd="$OVS_SBINDIR/ovsdb-server --remote=punix:$OVS_RUNDIR/db.sock --remote=ptcp:6640:$OVSDB_SERVER_LOCAL_HOST --pidfile --detach --log-file" + dbcmd+=" --remote=db:Open_vSwitch,Open_vSwitch,manager_options" + if is_service_enabled ovn-controller-vtep; then + dbcmd+=" --remote=db:hardware_vtep,Global,managers $OVS_DATADIR/vtep.db" + fi + dbcmd+=" $OVS_DATADIR/conf.db" + _run_process ovsdb-server "$dbcmd" "" "$STACK_GROUP" "root" "$OVS_RUNDIR" + + # Note: ovn-controller will create and configure br-int once it is started. + # So, no need to create it now because nothing depends on that bridge here. + local ovscmd="$OVS_SBINDIR/ovs-vswitchd --log-file --pidfile --detach" + _run_process ovs-vswitchd "$ovscmd" "" "$STACK_GROUP" "root" "$OVS_RUNDIR" + else + _start_process "$OVSDB_SERVER_SERVICE" + _start_process "$OVS_VSWITCHD_SERVICE" + fi + + echo "Configuring OVSDB" + if is_service_enabled tls-proxy; then + sudo ovs-vsctl --no-wait set-ssl \ + $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key \ + $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt \ + $INT_CA_DIR/ca-chain.pem + fi + + sudo ovs-vsctl --no-wait set-manager ptcp:6640:$OVSDB_SERVER_LOCAL_HOST + sudo ovs-vsctl --no-wait set open_vswitch . system-type="devstack" + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:system-id="$OVN_UUID" + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-remote="$OVN_SB_REMOTE" + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-bridge="br-int" + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-type="geneve" + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-ip="$TUNNEL_IP" + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:hostname=$(hostname) + # Select this chassis to host gateway routers + if [[ "$ENABLE_CHASSIS_AS_GW" == "True" ]]; then + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-cms-options="enable-chassis-as-gw" + fi + + if is_provider_network || [[ $Q_USE_PROVIDERNET_FOR_PUBLIC == "True" ]]; then + ovn_base_setup_bridge $OVS_PHYSICAL_BRIDGE + sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=${PHYSICAL_NETWORK}:${OVS_PHYSICAL_BRIDGE} + fi + + if is_service_enabled ovn-controller-vtep ; then + ovn_base_setup_bridge br-v + vtep-ctl add-ps br-v + vtep-ctl set Physical_Switch br-v tunnel_ips=$TUNNEL_IP + + enable_service ovs-vtep + local vtepcmd="$OVS_SCRIPTDIR/ovs-vtep --log-file --pidfile --detach br-v" + _run_process ovs-vtep "$vtepcmd" "" "$STACK_GROUP" "root" "$OVS_RUNDIR" + + vtep-ctl set-manager tcp:$HOST_IP:6640 + fi + fi +} + +function _wait_for_ovn_and_set_custom_config { + # Wait for the service to be ready + # Check for socket and db files for both OVN NB and SB + wait_for_sock_file $OVN_RUNDIR/ovnnb_db.sock + wait_for_sock_file $OVN_RUNDIR/ovnsb_db.sock + wait_for_db_file $OVN_DATADIR/ovnnb_db.db + wait_for_db_file $OVN_DATADIR/ovnsb_db.db + + if is_service_enabled tls-proxy; then + sudo ovn-nbctl --db=unix:$OVN_RUNDIR/ovnnb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem + sudo ovn-sbctl --db=unix:$OVN_RUNDIR/ovnsb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem + fi + + sudo ovn-nbctl --db=unix:$OVN_RUNDIR/ovnnb_db.sock set-connection p${OVN_PROTO}:6641:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 + sudo ovn-sbctl --db=unix:$OVN_RUNDIR/ovnsb_db.sock set-connection p${OVN_PROTO}:6642:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 + sudo ovs-appctl -t $OVN_RUNDIR/ovnnb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL + sudo ovs-appctl -t $OVN_RUNDIR/ovnsb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL +} + +# start_ovn() - Start running processes, including screen +function start_ovn { + echo "Starting OVN" + + _start_ovs + + local SCRIPTDIR=$OVN_SCRIPTDIR + if ! use_new_ovn_repository; then + SCRIPTDIR=$OVS_SCRIPTDIR + fi + + if is_service_enabled ovn-northd ; then + if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_northd" + local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_northd" + + _run_process ovn-northd "$cmd" "$stop_cmd" "$STACK_GROUP" "root" "$OVN_RUNDIR" + else + _start_process "$OVN_NORTHD_SERVICE" + fi + + _wait_for_ovn_and_set_custom_config + + fi + + if is_service_enabled ovn-controller ; then + if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_controller" + local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_controller" + + _run_process ovn-controller "$cmd" "$stop_cmd" "$STACK_GROUP" "root" "$OVN_RUNDIR" + else + _start_process "$OVN_CONTROLLER_SERVICE" + fi + fi + + if is_service_enabled ovn-controller-vtep ; then + if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + local cmd="$OVS_BINDIR/ovn-controller-vtep --log-file --pidfile --detach --ovnsb-db=$OVN_SB_REMOTE" + _run_process ovn-controller-vtep "$cmd" "" "$STACK_GROUP" "root" "$OVN_RUNDIR" + else + _start_process "$OVN_CONTROLLER_VTEP_SERVICE" + fi + fi + + if is_ovn_metadata_agent_enabled; then + run_process q-ovn-metadata-agent "$NEUTRON_OVN_BIN_DIR/$NEUTRON_OVN_METADATA_BINARY --config-file $OVN_META_CONF" + # Format logging + setup_logging $OVN_META_CONF + fi + + if is_ovn_agent_enabled; then + run_process q-ovn-agent "$NEUTRON_OVN_BIN_DIR/$NEUTRON_OVN_AGENT_BINARY --config-file $OVN_AGENT_CONF" + # Format logging + setup_logging $OVN_AGENT_CONF + fi +} + +function _stop_ovs_dp { + sudo ovs-dpctl dump-dps | sudo xargs -n1 ovs-dpctl del-dp + modprobe -q -r vport_geneve vport_vxlan openvswitch || true +} + +function _stop_process { + local service=$1 + echo "Stopping process $service" + if $SYSTEMCTL is-enabled $service; then + $SYSTEMCTL stop $service + $SYSTEMCTL disable $service + fi +} + +function stop_ovn { + # NOTE(ralonsoh): this check doesn't use "is_ovn_metadata_agent_enabled", + # instead it relies only in the configured services, disregarding the + # flag "TARGET_ENABLE_OVN_AGENT". It is needed to force the OVN Metadata + # agent stop in case the flag "TARGET_ENABLE_OVN_AGENT" is set. + if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then + # pkill takes care not to kill itself, but it may kill its parent + # sudo unless we use the "ps | grep [f]oo" trick + sudo pkill -9 -f "[h]aproxy" || : + _stop_process "devstack@q-ovn-metadata-agent.service" + fi + if is_ovn_agent_enabled; then + # pkill takes care not to kill itself, but it may kill its parent + # sudo unless we use the "ps | grep [f]oo" trick + sudo pkill -9 -f "[h]aproxy" || : + _stop_process "devstack@q-ovn-agent.service" + fi + if is_service_enabled ovn-controller-vtep ; then + _stop_process "$OVN_CONTROLLER_VTEP_SERVICE" + fi + if is_service_enabled ovn-controller ; then + _stop_process "$OVN_CONTROLLER_SERVICE" + fi + if is_service_enabled ovn-northd ; then + _stop_process "$OVN_NORTHD_SERVICE" + fi + if is_service_enabled ovs-vtep ; then + _stop_process "devstack@ovs-vtep.service" + fi + + _stop_process "$OVS_VSWITCHD_SERVICE" + _stop_process "$OVSDB_SERVER_SERVICE" + + _stop_ovs_dp +} + +function _cleanup { + local path=${1:-$DEST/$OVN_REPO_NAME} + pushd $path + cd $path + sudo make uninstall + sudo make distclean + popd +} + +# cleanup_ovn() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_ovn { + local ovn_path=$DEST/$OVN_REPO_NAME + local ovs_path=$DEST/$OVS_REPO_NAME + + if [ -d $ovn_path ]; then + _cleanup $ovn_path + fi + + if [ -d $ovs_path ]; then + _cleanup $ovs_path + fi + + sudo rm -rf $OVN_RUNDIR +} diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index ecf252f88b..adabc56412 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -7,6 +7,12 @@ _XTRACE_NEUTRON_OVS_BASE=$(set +o | grep xtrace) set +o xtrace +# Load devstack ovs compliation and loading functions +source ${TOP_DIR}/lib/neutron_plugins/ovs_source + +# Defaults +# -------- + OVS_BRIDGE=${OVS_BRIDGE:-br-int} # OVS recognize default 'system' datapath or 'netdev' for userspace datapath OVS_DATAPATH_TYPE=${OVS_DATAPATH_TYPE:-system} @@ -19,7 +25,7 @@ function is_neutron_ovs_base_plugin { function _neutron_ovs_base_add_bridge { local bridge=$1 - local addbr_cmd="sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge" + local addbr_cmd="sudo ovs-vsctl -- --may-exist add-br $bridge" if [ "$OVS_DATAPATH_TYPE" != "system" ] ; then addbr_cmd="$addbr_cmd -- set Bridge $bridge datapath_type=${OVS_DATAPATH_TYPE}" @@ -30,7 +36,7 @@ function _neutron_ovs_base_add_bridge { function _neutron_ovs_base_setup_bridge { local bridge=$1 - neutron-ovs-cleanup + neutron-ovs-cleanup --config-file $NEUTRON_CONF _neutron_ovs_base_add_bridge $bridge sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge } @@ -60,43 +66,37 @@ function _neutron_ovs_base_install_ubuntu_dkms { } function _neutron_ovs_base_install_agent_packages { - # Install deps - install_package $(get_packages "openvswitch") - if is_ubuntu; then - _neutron_ovs_base_install_ubuntu_dkms - restart_service openvswitch-switch - elif is_fedora; then - restart_service openvswitch - sudo systemctl enable openvswitch - elif is_suse; then - restart_service openvswitch-switch - fi -} - -function _neutron_ovs_base_configure_debug_command { - if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then - iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge "" + if [ "$Q_BUILD_OVS_FROM_GIT" == "True" ]; then + remove_ovs_packages + compile_ovs False /usr/local /var + load_conntrack_gre_module + start_new_ovs else - iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE + # Install deps + install_package $(get_packages "openvswitch") + if is_ubuntu; then + _neutron_ovs_base_install_ubuntu_dkms + restart_service openvswitch-switch + elif is_fedora; then + restart_service openvswitch + sudo systemctl enable openvswitch + fi fi } function _neutron_ovs_base_configure_firewall_driver { if [[ "$Q_USE_SECGROUP" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver + iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver openvswitch + if ! running_in_container; then + enable_kernel_bridge_firewall + fi else - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver + iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver noop fi } function _neutron_ovs_base_configure_l3_agent { - if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then - iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge "" - else - iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE - fi - - neutron-ovs-cleanup + neutron-ovs-cleanup --config-file $NEUTRON_CONF if [[ "$Q_USE_PUBLIC_VETH" = "True" ]]; then ip link show $Q_PUBLIC_VETH_INT > /dev/null 2>&1 || sudo ip link add $Q_PUBLIC_VETH_INT type veth \ @@ -105,11 +105,16 @@ function _neutron_ovs_base_configure_l3_agent { sudo ip link set $Q_PUBLIC_VETH_EX up sudo ip addr flush dev $Q_PUBLIC_VETH_EX else - _neutron_ovs_base_add_bridge $PUBLIC_BRIDGE + _neutron_ovs_base_add_public_bridge sudo ovs-vsctl br-set-external-id $PUBLIC_BRIDGE bridge-id $PUBLIC_BRIDGE fi } +function _neutron_ovs_base_add_public_bridge { + _neutron_ovs_base_add_bridge $PUBLIC_BRIDGE + set_mtu $PUBLIC_BRIDGE $PUBLIC_BRIDGE_MTU +} + function _neutron_ovs_base_configure_nova_vif_driver { : } diff --git a/lib/neutron_plugins/ovs_source b/lib/neutron_plugins/ovs_source new file mode 100644 index 0000000000..6b6f531a01 --- /dev/null +++ b/lib/neutron_plugins/ovs_source @@ -0,0 +1,214 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Defaults +# -------- +Q_BUILD_OVS_FROM_GIT=$(trueorfalse False Q_BUILD_OVS_FROM_GIT) + +# Set variables for building OVS from source +OVS_REPO=${OVS_REPO:-https://github.com/openvswitch/ovs.git} +OVS_REPO_NAME=$(basename ${OVS_REPO} | cut -f1 -d'.') +OVS_REPO_NAME=${OVS_REPO_NAME:-ovs} +OVS_BRANCH=${OVS_BRANCH:-branch-3.3} + +# Functions + +# load_module() - Load module using modprobe module given by argument and dies +# on failure +# - fatal argument is optional and says whether function should +# exit if module can't be loaded +function load_module { + local module=$1 + local fatal=$2 + + if [ "$(trueorfalse True fatal)" == "True" ]; then + sudo modprobe $module || (sudo dmesg && die $LINENO "FAILED TO LOAD $module") + else + sudo modprobe $module || (echo "FAILED TO LOAD $module" && sudo dmesg) + fi +} + +# prepare_for_compilation() - Fetch ovs git repository and install packages needed for +# compilation. +function prepare_for_ovs_compilation { + local build_modules=${1:-False} + OVS_DIR=$DEST/$OVS_REPO_NAME + + if [ ! -d $OVS_DIR ] ; then + # We can't use git_clone here because we want to ignore ERROR_ON_CLONE + git_timed clone $OVS_REPO $OVS_DIR + cd $OVS_DIR + git checkout $OVS_BRANCH + else + # Even though the directory already exists, call git_clone to update it + # if needed based on the RECLONE option + git_clone $OVS_REPO $OVS_DIR $OVS_BRANCH + cd $OVS_DIR + fi + + # TODO: Can you create package list files like you can inside devstack? + install_package autoconf automake libtool gcc patch make + + # If build_modules is False, we don't need to install the kernel-* + # packages. Just return. + if [[ "$build_modules" == "False" ]]; then + return + fi + + KERNEL_VERSION=`uname -r` + if is_fedora ; then + # is_fedora covers Fedora, RHEL, CentOS, etc... + if [[ "$os_VENDOR" == "Fedora" ]]; then + install_package elfutils-libelf-devel + KERNEL_VERSION=`echo $KERNEL_VERSION | cut --delimiter='-' --field 1` + elif [[ ${KERNEL_VERSION:0:2} != "3." ]]; then + # dash is illegal character in rpm version so replace + # them with underscore like it is done in the kernel + # https://github.com/torvalds/linux/blob/master/scripts/package/mkspec#L25 + # but only for latest series of the kernel, not 3.x + + KERNEL_VERSION=`echo $KERNEL_VERSION | tr - _` + fi + + echo NOTE: if kernel-devel-$KERNEL_VERSION or kernel-headers-$KERNEL_VERSION installation + echo failed, please, provide a repository with the package, or yum update / reboot + echo your machine to get the latest kernel. + + install_package kernel-devel-$KERNEL_VERSION + install_package kernel-headers-$KERNEL_VERSION + if is_service_enabled tls-proxy; then + install_package openssl-devel + fi + + elif is_ubuntu ; then + install_package linux-headers-$KERNEL_VERSION + if is_service_enabled tls-proxy; then + install_package libssl-dev + fi + fi +} + +# load_ovs_kernel_modules() - load openvswitch kernel module +function load_ovs_kernel_modules { + load_module openvswitch + load_module vport-geneve False + sudo dmesg | tail +} + +# reload_ovs_kernel_modules() - reload openvswitch kernel module +function reload_ovs_kernel_modules { + set +e + ovs_system=$(sudo ovs-dpctl dump-dps | grep ovs-system) + if [ -n "$ovs_system" ]; then + sudo ovs-dpctl del-dp ovs-system + fi + set -e + sudo modprobe -r vport_geneve + sudo modprobe -r openvswitch + load_ovs_kernel_modules +} + +# compile_ovs() - Compile OVS from source and load needed modules. +# Accepts two parameters: +# - first one is False by default and means that modules are not built and installed. +# - second optional parameter defines prefix for ovs compilation +# - third optional parameter defines localstatedir for ovs single machine runtime +# Env variables OVS_REPO_NAME, OVS_REPO and OVS_BRANCH must be set +function compile_ovs { + local _pwd=$PWD + local build_modules=${1:-False} + local prefix=$2 + local localstatedir=$3 + + if [ -n "$prefix" ]; then + prefix="--prefix=$prefix" + fi + + if [ -n "$localstatedir" ]; then + localstatedir="--localstatedir=$localstatedir" + fi + + prepare_for_ovs_compilation $build_modules + + KERNEL_VERSION=$(uname -r) + major_version=$(echo "${KERNEL_VERSION}" | cut -d '.' -f1) + patch_level=$(echo "${KERNEL_VERSION}" | cut -d '.' -f2) + if [ "${major_version}" -gt 5 ] || [ "${major_version}" == 5 ] && [ "${patch_level}" -gt 5 ]; then + echo "NOTE: KERNEL VERSION is ${KERNEL_VERSION} and OVS doesn't support compiling " + echo "Kernel module for version higher than 5.5. Skipping module compilation..." + build_modules="False" + fi + + if [ ! -f configure ] ; then + ./boot.sh + fi + if [ ! -f config.status ] || [ configure -nt config.status ] ; then + if [[ "$build_modules" == "True" ]]; then + ./configure $prefix $localstatedir --with-linux=/lib/modules/$(uname -r)/build + else + ./configure $prefix $localstatedir + fi + fi + make -j$(($(nproc) + 1)) + sudo make install + if [[ "$build_modules" == "True" ]]; then + sudo make INSTALL_MOD_DIR=kernel/net/openvswitch modules_install + fi + reload_ovs_kernel_modules + + cd $_pwd +} + +# action_service - call an action over openvswitch service +# Accepts one parameter that can be either +# 'start', 'restart' and 'stop'. +function action_openvswitch { + local action=$1 + + if is_ubuntu; then + ${action}_service openvswitch-switch + elif is_fedora; then + ${action}_service openvswitch + fi +} + +# start_new_ovs() - removes old ovs database, creates a new one and starts ovs +function start_new_ovs { + sudo rm -f /etc/openvswitch/conf.db /etc/openvswitch/.conf.db~lock~ + sudo /usr/local/share/openvswitch/scripts/ovs-ctl start +} + +# stop_new_ovs() - stops ovs +function stop_new_ovs { + local ovs_ctl='/usr/local/share/openvswitch/scripts/ovs-ctl' + + if [ -x $ovs_ctl ] ; then + sudo $ovs_ctl stop + fi +} + +# remove_ovs_packages() - removes old ovs packages from the system +function remove_ovs_packages { + for package in openvswitch openvswitch-switch openvswitch-common; do + if is_package_installed $package; then + uninstall_package $package + fi + done +} + + +# load_conntrack_gre_module() - loads nf_conntrack_proto_gre kernel module +function load_conntrack_gre_module { + load_module nf_conntrack_proto_gre False +} diff --git a/lib/neutron_plugins/services/firewall b/lib/neutron_plugins/services/firewall deleted file mode 100644 index 40968fa04c..0000000000 --- a/lib/neutron_plugins/services/firewall +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -# Neutron firewall plugin -# --------------------------- - -# Save trace setting -_XTRACE_NEUTRON_FIREWALL=$(set +o | grep xtrace) -set +o xtrace - -FWAAS_PLUGIN=${FWAAS_PLUGIN:-neutron_fwaas.services.firewall.fwaas_plugin.FirewallPlugin} -FWAAS_DRIVER=${FWAAS_DRIVER:-neutron_fwaas.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver} - -function neutron_fwaas_configure_common { - _neutron_service_plugin_class_add $FWAAS_PLUGIN -} - -function neutron_fwaas_configure_driver { - # Uses oslo config generator to generate FWaaS sample configuration files - (cd $NEUTRON_FWAAS_DIR && exec ./tools/generate_config_file_samples.sh) - - FWAAS_DRIVER_CONF_FILENAME=/etc/neutron/fwaas_driver.ini - cp $NEUTRON_FWAAS_DIR/etc/fwaas_driver.ini.sample $FWAAS_DRIVER_CONF_FILENAME - - iniset_multiline $FWAAS_DRIVER_CONF_FILENAME fwaas enabled True - iniset_multiline $FWAAS_DRIVER_CONF_FILENAME fwaas driver "$FWAAS_DRIVER" -} - -function neutron_fwaas_stop { - : -} - -# Restore xtrace -$_XTRACE_NEUTRON_FIREWALL diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 4ce87bdf6f..bbedc57a44 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -15,6 +15,15 @@ IPV6_PROVIDER_FIXED_RANGE=${IPV6_PROVIDER_FIXED_RANGE:-} IPV6_PROVIDER_NETWORK_GATEWAY=${IPV6_PROVIDER_NETWORK_GATEWAY:-} PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} +PUBLIC_BRIDGE_MTU=${PUBLIC_BRIDGE_MTU:-1500} + +# If Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE=True, assign the gateway IP of the public +# subnet to the public bridge interface even if Q_USE_PROVIDERNET_FOR_PUBLIC is +# used. +Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE=${Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE:-True} + +# The name of the default router +Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1} # If Q_USE_PUBLIC_VETH=True, create and use a veth pair instead of # PUBLIC_BRIDGE. This is intended to be used with @@ -30,15 +39,16 @@ Q_PUBLIC_VETH_INT=${Q_PUBLIC_VETH_INT:-veth-pub-int} Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-True} -# Use flat providernet for public network +# Use providernet for public network # -# If Q_USE_PROVIDERNET_FOR_PUBLIC=True, use a flat provider network +# If Q_USE_PROVIDERNET_FOR_PUBLIC=True, use a provider network # for external interface of neutron l3-agent. In that case, # PUBLIC_PHYSICAL_NETWORK specifies provider:physical_network value # used for the network. In case of ofagent, you should add the # corresponding entry to your OFAGENT_PHYSICAL_INTERFACE_MAPPINGS. # For openvswitch agent, you should add the corresponding entry to -# your OVS_BRIDGE_MAPPINGS. +# your OVS_BRIDGE_MAPPINGS and for OVN add the corresponding entry +# to your OVN_BRIDGE_MAPPINGS. # # eg. (ofagent) # Q_USE_PROVIDERNET_FOR_PUBLIC=True @@ -50,7 +60,16 @@ Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-True} # Q_USE_PROVIDERNET_FOR_PUBLIC=True # PUBLIC_PHYSICAL_NETWORK=public # OVS_BRIDGE_MAPPINGS=public:br-ex -Q_USE_PROVIDERNET_FOR_PUBLIC=${Q_USE_PROVIDERNET_FOR_PUBLIC:-False} +# +# eg. (ovn agent) +# Q_USER_PROVIDERNET_FOR_PUBLIC=True +# PUBLIC_PHYSICAL_NETWORK=public +# OVN_BRIDGE_MAPPINGS=public:br-ex +# +# The provider-network-type defaults to flat, however, the values +# PUBLIC_PROVIDERNET_TYPE and PUBLIC_PROVIDERNET_SEGMENTATION_ID could +# be set to specify the parameters for an alternate network type. +Q_USE_PROVIDERNET_FOR_PUBLIC=${Q_USE_PROVIDERNET_FOR_PUBLIC:-True} PUBLIC_PHYSICAL_NETWORK=${PUBLIC_PHYSICAL_NETWORK:-public} # Generate 40-bit IPv6 Global ID to comply with RFC 4193 @@ -61,27 +80,36 @@ IPV6_RA_MODE=${IPV6_RA_MODE:-slaac} IPV6_ADDRESS_MODE=${IPV6_ADDRESS_MODE:-slaac} IPV6_PUBLIC_SUBNET_NAME=${IPV6_PUBLIC_SUBNET_NAME:-ipv6-public-subnet} IPV6_PRIVATE_SUBNET_NAME=${IPV6_PRIVATE_SUBNET_NAME:-ipv6-private-subnet} -FIXED_RANGE_V6=${FIXED_RANGE_V6:-fd$IPV6_GLOBAL_ID::/64} -IPV6_PRIVATE_NETWORK_GATEWAY=${IPV6_PRIVATE_NETWORK_GATEWAY:-fd$IPV6_GLOBAL_ID::1} +IPV6_ADDRS_SAFE_TO_USE=${IPV6_ADDRS_SAFE_TO_USE:-fd$IPV6_GLOBAL_ID::/56} +# if we got larger than a /64 safe to use, we only use the first /64 to +# avoid side effects outlined in rfc7421 +FIXED_RANGE_V6=${FIXED_RANGE_V6:-$(echo $IPV6_ADDRS_SAFE_TO_USE | awk -F '/' '{ print $1"/"($2>63 ? $2 : 64) }')} +IPV6_PRIVATE_NETWORK_GATEWAY=${IPV6_PRIVATE_NETWORK_GATEWAY:-} IPV6_PUBLIC_RANGE=${IPV6_PUBLIC_RANGE:-2001:db8::/64} IPV6_PUBLIC_NETWORK_GATEWAY=${IPV6_PUBLIC_NETWORK_GATEWAY:-2001:db8::2} IPV6_ROUTER_GW_IP=${IPV6_ROUTER_GW_IP:-2001:db8::1} # Gateway and subnet defaults, in case they are not customized in localrc -NETWORK_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1} -PUBLIC_NETWORK_GATEWAY=${PUBLIC_NETWORK_GATEWAY:-172.24.4.1} +NETWORK_GATEWAY=${NETWORK_GATEWAY:-} +PUBLIC_NETWORK_GATEWAY=${PUBLIC_NETWORK_GATEWAY:-} PRIVATE_SUBNET_NAME=${PRIVATE_SUBNET_NAME:-"private-subnet"} PUBLIC_SUBNET_NAME=${PUBLIC_SUBNET_NAME:-"public-subnet"} # Subnetpool defaults -SUBNETPOOL_NAME=${SUBNETPOOL_NAME:-"shared-default-subnetpool"} +USE_SUBNETPOOL=${USE_SUBNETPOOL:-True} +SUBNETPOOL_NAME_V4=${SUBNETPOOL_NAME:-"shared-default-subnetpool-v4"} +SUBNETPOOL_NAME_V6=${SUBNETPOOL_NAME:-"shared-default-subnetpool-v6"} -SUBNETPOOL_PREFIX_V4=${SUBNETPOOL_PREFIX_V4:-10.0.0.0/8} -SUBNETPOOL_PREFIX_V6=${SUBNETPOOL_PREFIX_V6:-2001:db8:8000::/48} +SUBNETPOOL_PREFIX_V4=${SUBNETPOOL_PREFIX_V4:-$IPV4_ADDRS_SAFE_TO_USE} +SUBNETPOOL_PREFIX_V6=${SUBNETPOOL_PREFIX_V6:-$IPV6_ADDRS_SAFE_TO_USE} -SUBNETPOOL_SIZE_V4=${SUBNETPOOL_SIZE_V4:-24} +SUBNETPOOL_SIZE_V4=${SUBNETPOOL_SIZE_V4:-26} SUBNETPOOL_SIZE_V6=${SUBNETPOOL_SIZE_V6:-64} +default_v4_route_devs=$(ip -4 route | grep ^default | awk '{print $5}') + +default_v6_route_devs=$(ip -6 route list match default table all | grep via | awk '{print $5}') + function _determine_config_l3 { local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE" echo "$opts" @@ -101,11 +129,7 @@ function _configure_neutron_l3_agent { neutron_plugin_configure_l3_agent $Q_L3_CONF_FILE - _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True False "inet" - - if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then - _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6" - fi + _configure_public_network_connectivity } # Explicitly set router id in l3 agent configuration @@ -129,31 +153,44 @@ function _neutron_get_ext_gw_interface { } function create_neutron_initial_network { - local project_id - project_id=$(openstack project list | grep " demo " | get_field 1) - die_if_not_set $LINENO project_id "Failure retrieving project_id for demo" - # Allow drivers that need to create an initial network to do so here if type -p neutron_plugin_create_initial_network_profile > /dev/null; then neutron_plugin_create_initial_network_profile $PHYSICAL_NETWORK fi + if is_networking_extension_supported "auto-allocated-topology"; then + if [[ "$USE_SUBNETPOOL" == "True" ]]; then + if [[ "$IP_VERSION" =~ 4.* ]]; then + SUBNETPOOL_V4_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME_V4 --default-prefix-length $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --share --default -f value -c id) + fi + if [[ "$IP_VERSION" =~ .*6 ]]; then + SUBNETPOOL_V6_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME_V6 --default-prefix-length $SUBNETPOOL_SIZE_V6 --pool-prefix $SUBNETPOOL_PREFIX_V6 --share --default -f value -c id) + fi + fi + fi + if is_provider_network; then die_if_not_set $LINENO PHYSICAL_NETWORK "You must specify the PHYSICAL_NETWORK" die_if_not_set $LINENO PROVIDER_NETWORK_TYPE "You must specify the PROVIDER_NETWORK_TYPE" - NET_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" net-create $PHYSICAL_NETWORK --tenant_id $project_id --provider:network_type $PROVIDER_NETWORK_TYPE --provider:physical_network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider:segmentation_id $SEGMENTATION_ID} --shared | grep ' id ' | get_field 2) - die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $project_id" + NET_ID=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share -f value -c id) + die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK" if [[ "$IP_VERSION" =~ 4.* ]]; then - SUBNET_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create --tenant_id $project_id --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --name $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) - die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME $project_id" + if [ -z $SUBNETPOOL_V4_ID ]; then + fixed_range_v4=$FIXED_RANGE + fi + SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} -f value -c id) + die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME" fi if [[ "$IP_VERSION" =~ .*6 ]]; then - die_if_not_set $LINENO IPV6_PROVIDER_FIXED_RANGE "IPV6_PROVIDER_FIXED_RANGE has not been set, but Q_USE_PROVIDERNET_FOR_PUBLIC is true and IP_VERSION includes 6" - die_if_not_set $LINENO IPV6_PROVIDER_NETWORK_GATEWAY "IPV6_PROVIDER_NETWORK_GATEWAY has not been set, but Q_USE_PROVIDERNET_FOR_PUBLIC is true and IP_VERSION includes 6" - SUBNET_V6_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create --tenant_id $project_id --ip_version 6 --ipv6-address-mode $IPV6_ADDRESS_MODE --gateway $IPV6_PROVIDER_NETWORK_GATEWAY --name $IPV6_PROVIDER_SUBNET_NAME $NET_ID $IPV6_PROVIDER_FIXED_RANGE | grep 'id' | get_field 2) - die_if_not_set $LINENO SUBNET_V6_ID "Failure creating SUBNET_V6_ID for $IPV6_PROVIDER_SUBNET_NAME $project_id" + die_if_not_set $LINENO IPV6_PROVIDER_FIXED_RANGE "IPV6_PROVIDER_FIXED_RANGE has not been set, but Q_USE_PROVIDER_NETWORKING is true and IP_VERSION includes 6" + die_if_not_set $LINENO IPV6_PROVIDER_NETWORK_GATEWAY "IPV6_PROVIDER_NETWORK_GATEWAY has not been set, but Q_USE_PROVIDER_NETWORKING is true and IP_VERSION includes 6" + if [ -z $SUBNETPOOL_V6_ID ]; then + fixed_range_v6=$IPV6_PROVIDER_FIXED_RANGE + fi + IPV6_SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} -f value -c id) + die_if_not_set $LINENO IPV6_SUBNET_ID "Failure creating IPV6_SUBNET_ID for $IPV6_PROVIDER_SUBNET_NAME" fi if [[ $Q_AGENT == "openvswitch" ]]; then @@ -162,17 +199,17 @@ function create_neutron_initial_network { sudo ip link set $PUBLIC_INTERFACE up fi else - NET_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" net-create --tenant-id $project_id "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) - die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PRIVATE_NETWORK_NAME $project_id" + NET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" network create "$PRIVATE_NETWORK_NAME" -f value -c id) + die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PRIVATE_NETWORK_NAME" if [[ "$IP_VERSION" =~ 4.* ]]; then # Create IPv4 private subnet - SUBNET_ID=$(_neutron_create_private_subnet_v4 $project_id) + SUBNET_ID=$(_neutron_create_private_subnet_v4) fi if [[ "$IP_VERSION" =~ .*6 ]]; then # Create IPv6 private subnet - IPV6_SUBNET_ID=$(_neutron_create_private_subnet_v6 $project_id) + IPV6_SUBNET_ID=$(_neutron_create_private_subnet_v6) fi fi @@ -180,29 +217,23 @@ function create_neutron_initial_network { # Create a router, and add the private subnet as one of its interfaces if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then # create a tenant-owned router. - ROUTER_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" router-create --tenant-id $project_id $Q_ROUTER_NAME | grep ' id ' | get_field 2) - die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $project_id $Q_ROUTER_NAME" + ROUTER_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" router create $Q_ROUTER_NAME -f value -c id) + die_if_not_set $LINENO ROUTER_ID "Failure creating router $Q_ROUTER_NAME" else # Plugin only supports creating a single router, which should be admin owned. - ROUTER_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" router-create $Q_ROUTER_NAME | grep ' id ' | get_field 2) - die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $Q_ROUTER_NAME" + ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create $Q_ROUTER_NAME -f value -c id) + die_if_not_set $LINENO ROUTER_ID "Failure creating router $Q_ROUTER_NAME" fi - EXTERNAL_NETWORK_FLAGS="--router:external" - if is_networking_extension_supported "auto-allocated-topology" && is_networking_extension_supported "subnet_allocation"; then - EXTERNAL_NETWORK_FLAGS="$EXTERNAL_NETWORK_FLAGS --is-default" - if [[ "$IP_VERSION" =~ 4.* ]]; then - SUBNETPOOL_V4_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnetpool-create $SUBNETPOOL_NAME --default-prefixlen $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --shared --is-default=True | grep ' id ' | get_field 2) - fi - if [[ "$IP_VERSION" =~ .*6 ]]; then - SUBNETPOOL_V6_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnetpool-create $SUBNETPOOL_NAME --default-prefixlen $SUBNETPOOL_SIZE_V6 --pool-prefix $SUBNETPOOL_PREFIX_V6 --shared --is-default=True | grep ' id ' | get_field 2) - fi + EXTERNAL_NETWORK_FLAGS="--external" + if is_networking_extension_supported "auto-allocated-topology"; then + EXTERNAL_NETWORK_FLAGS="$EXTERNAL_NETWORK_FLAGS --default" fi # Create an external network, and a subnet. Configure the external network as router gw if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then - EXT_NET_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" net-create "$PUBLIC_NETWORK_NAME" -- $EXTERNAL_NETWORK_FLAGS --provider:network_type=flat --provider:physical_network=${PUBLIC_PHYSICAL_NETWORK} | grep ' id ' | get_field 2) + EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type ${PUBLIC_PROVIDERNET_TYPE:-flat} ${PUBLIC_PROVIDERNET_SEGMENTATION_ID:+--provider-segment $PUBLIC_PROVIDERNET_SEGMENTATION_ID} --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} -f value -c id) else - EXT_NET_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" net-create "$PUBLIC_NETWORK_NAME" -- $EXTERNAL_NETWORK_FLAGS | grep ' id ' | get_field 2) + EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS -f value -c id) fi die_if_not_set $LINENO EXT_NET_ID "Failure creating EXT_NET_ID for $PUBLIC_NETWORK_NAME" @@ -220,65 +251,74 @@ function create_neutron_initial_network { # Create private IPv4 subnet function _neutron_create_private_subnet_v4 { - local project_id=$1 - local subnet_params="--tenant-id $project_id " - subnet_params+="--ip_version 4 " - subnet_params+="--gateway $NETWORK_GATEWAY " - subnet_params+="--name $PRIVATE_SUBNET_NAME " - subnet_params+="$NET_ID $FIXED_RANGE" + if [ -z $SUBNETPOOL_V4_ID ]; then + fixed_range_v4=$FIXED_RANGE + fi + local subnet_params="--ip-version 4 " + if [[ -n "$NETWORK_GATEWAY" ]]; then + subnet_params+="--gateway $NETWORK_GATEWAY " + fi + + subnet_params+="${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} " + subnet_params+="${fixed_range_v4:+--subnet-range $fixed_range_v4} " + subnet_params+="--network $NET_ID $PRIVATE_SUBNET_NAME" local subnet_id - subnet_id=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create $subnet_params | grep ' id ' | get_field 2) - die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet for $project_id" + subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params -f value -c id) + die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet" echo $subnet_id } # Create private IPv6 subnet function _neutron_create_private_subnet_v6 { - local project_id=$1 die_if_not_set $LINENO IPV6_RA_MODE "IPV6 RA Mode not set" die_if_not_set $LINENO IPV6_ADDRESS_MODE "IPV6 Address Mode not set" local ipv6_modes="--ipv6-ra-mode $IPV6_RA_MODE --ipv6-address-mode $IPV6_ADDRESS_MODE" - local subnet_params="--tenant-id $project_id " - subnet_params+="--ip_version 6 " - subnet_params+="--gateway $IPV6_PRIVATE_NETWORK_GATEWAY " - subnet_params+="--name $IPV6_PRIVATE_SUBNET_NAME " - subnet_params+="$NET_ID $FIXED_RANGE_V6 $ipv6_modes" + if [ -z $SUBNETPOOL_V6_ID ]; then + fixed_range_v6=$FIXED_RANGE_V6 + fi + local subnet_params="--ip-version 6 " + if [[ -n "$IPV6_PRIVATE_NETWORK_GATEWAY" ]]; then + subnet_params+="--gateway $IPV6_PRIVATE_NETWORK_GATEWAY " + fi + subnet_params+="${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} " + subnet_params+="${fixed_range_v6:+--subnet-range $fixed_range_v6} " + subnet_params+="$ipv6_modes --network $NET_ID $IPV6_PRIVATE_SUBNET_NAME " local ipv6_subnet_id - ipv6_subnet_id=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create $subnet_params | grep ' id ' | get_field 2) - die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet for $project_id" + ipv6_subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params -f value -c id) + die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet" echo $ipv6_subnet_id } # Create public IPv4 subnet function _neutron_create_public_subnet_v4 { - local subnet_params+="--ip_version 4 " + local subnet_params="--ip-version 4 " subnet_params+="${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} " - subnet_params+="--gateway $PUBLIC_NETWORK_GATEWAY " - subnet_params+="--name $PUBLIC_SUBNET_NAME " - subnet_params+="$EXT_NET_ID $FLOATING_RANGE " - subnet_params+="-- --enable_dhcp=False" + if [[ -n "$PUBLIC_NETWORK_GATEWAY" ]]; then + subnet_params+="--gateway $PUBLIC_NETWORK_GATEWAY " + fi + subnet_params+="--network $EXT_NET_ID --subnet-range $FLOATING_RANGE --no-dhcp " + subnet_params+="$PUBLIC_SUBNET_NAME" local id_and_ext_gw_ip - id_and_ext_gw_ip=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create $subnet_params | grep -e 'gateway_ip' -e ' id ') + id_and_ext_gw_ip=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ') die_if_not_set $LINENO id_and_ext_gw_ip "Failure creating public IPv4 subnet" echo $id_and_ext_gw_ip } # Create public IPv6 subnet function _neutron_create_public_subnet_v6 { - local subnet_params="--ip_version 6 " + local subnet_params="--ip-version 6 " subnet_params+="--gateway $IPV6_PUBLIC_NETWORK_GATEWAY " - subnet_params+="--name $IPV6_PUBLIC_SUBNET_NAME " - subnet_params+="$EXT_NET_ID $IPV6_PUBLIC_RANGE " - subnet_params+="-- --enable_dhcp=False" + subnet_params+="--network $EXT_NET_ID --subnet-range $IPV6_PUBLIC_RANGE --no-dhcp " + subnet_params+="$IPV6_PUBLIC_SUBNET_NAME" local ipv6_id_and_ext_gw_ip - ipv6_id_and_ext_gw_ip=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create $subnet_params | grep -e 'gateway_ip' -e ' id ') + ipv6_id_and_ext_gw_ip=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ') die_if_not_set $LINENO ipv6_id_and_ext_gw_ip "Failure creating an IPv6 public subnet" echo $ipv6_id_and_ext_gw_ip } # Configure neutron router for IPv4 public access function _neutron_configure_router_v4 { - neutron --os-cloud devstack-admin --os-region "$REGION_NAME" router-interface-add $ROUTER_ID $SUBNET_ID + openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router add subnet $ROUTER_ID $SUBNET_ID # Create a public subnet on the external network local id_and_ext_gw_ip id_and_ext_gw_ip=$(_neutron_create_public_subnet_v4 $EXT_NET_ID) @@ -286,31 +326,25 @@ function _neutron_configure_router_v4 { ext_gw_ip=$(echo $id_and_ext_gw_ip | get_field 2) PUB_SUBNET_ID=$(echo $id_and_ext_gw_ip | get_field 5) # Configure the external network as the default router gateway - neutron --os-cloud devstack-admin --os-region "$REGION_NAME" router-gateway-set $ROUTER_ID $EXT_NET_ID + openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID - # This logic is specific to using the l3-agent for layer 3 - if is_service_enabled q-l3 || is_service_enabled neutron-l3; then + # This logic is specific to using OVN or the l3-agent for layer 3 + if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-api) || is_service_enabled q-l3 neutron-l3; then # Configure and enable public bridge local ext_gw_interface="none" if is_neutron_ovs_base_plugin; then ext_gw_interface=$(_neutron_get_ext_gw_interface) - elif [[ "$Q_AGENT" = "linuxbridge" ]]; then - # Search for the brq device the neutron router and network for $FIXED_RANGE - # will be using. - # e.x. brq3592e767-da for NET_ID 3592e767-da66-4bcb-9bec-cdb03cd96102 - ext_gw_interface=brq${EXT_NET_ID:0:11} fi if [[ "$ext_gw_interface" != "none" ]]; then local cidr_len=${FLOATING_RANGE#*/} local testcmd="ip -o link | grep -q $ext_gw_interface" test_with_retry "$testcmd" "$ext_gw_interface creation failed" - if [[ $(ip addr show dev $ext_gw_interface | grep -c $ext_gw_ip) == 0 && ( $Q_USE_PROVIDERNET_FOR_PUBLIC == "False" || $Q_USE_PUBLIC_VETH == "True" ) ]]; then + if [[ $(ip addr show dev $ext_gw_interface | grep -c $ext_gw_ip) == 0 && ( $Q_USE_PROVIDERNET_FOR_PUBLIC == "False" || $Q_USE_PUBLIC_VETH == "True" || $Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE == "True" ) ]]; then sudo ip addr add $ext_gw_ip/$cidr_len dev $ext_gw_interface sudo ip link set $ext_gw_interface up fi - ROUTER_GW_IP=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F'ip_address' '{ print $2 }' | cut -f3 -d\" | tr '\n' ' ') + ROUTER_GW_IP=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' --device-owner network:router_gateway | awk -F'ip_address' '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ') die_if_not_set $LINENO ROUTER_GW_IP "Failure retrieving ROUTER_GW_IP" - sudo ip route replace $FIXED_RANGE via $ROUTER_GW_IP fi _neutron_set_router_id fi @@ -318,7 +352,7 @@ function _neutron_configure_router_v4 { # Configure neutron router for IPv6 public access function _neutron_configure_router_v6 { - neutron --os-cloud devstack-admin --os-region "$REGION_NAME" router-interface-add $ROUTER_ID $IPV6_SUBNET_ID + openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router add subnet $ROUTER_ID $IPV6_SUBNET_ID # Create a public subnet on the external network local ipv6_id_and_ext_gw_ip ipv6_id_and_ext_gw_ip=$(_neutron_create_public_subnet_v6 $EXT_NET_ID) @@ -330,16 +364,34 @@ function _neutron_configure_router_v6 { # If the external network has not already been set as the default router # gateway when configuring an IPv4 public subnet, do so now if [[ "$IP_VERSION" == "6" ]]; then - neutron --os-cloud devstack-admin --os-region "$REGION_NAME" router-gateway-set $ROUTER_ID $EXT_NET_ID + openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID fi - # This logic is specific to using the l3-agent for layer 3 - if is_service_enabled q-l3 || is_service_enabled neutron-l3; then + # This logic is specific to using OVN or the l3-agent for layer 3 + if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-api) || is_service_enabled q-l3 neutron-l3; then + # if the Linux host considers itself to be a router then it will + # ignore all router advertisements + # Ensure IPv6 RAs are accepted on interfaces with a default route. + # This is needed for neutron-based devstack clouds to work in + # IPv6-only clouds in the gate. Please do not remove this without + # talking to folks in Infra. + for d in $default_v6_route_devs; do + # Slashes must be used in this sysctl command because route devices + # can have dots in their names. If dots were used, dots in the + # device name would be reinterpreted as a slash, causing an error. + sudo sysctl -w net/ipv6/conf/$d/accept_ra=2 + done # Ensure IPv6 forwarding is enabled on the host sudo sysctl -w net.ipv6.conf.all.forwarding=1 # Configure and enable public bridge # Override global IPV6_ROUTER_GW_IP with the true value from neutron - IPV6_ROUTER_GW_IP=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" port-list -c fixed_ips | grep $ipv6_pub_subnet_id | awk -F'ip_address' '{ print $2 }' | cut -f3 -d\" | tr '\n' ' ') + # NOTE(slaweq): when enforce scopes is enabled in Neutron, router's + # gateway ports aren't visible in API because such ports don't belongs + # to any tenant. Because of that, at least temporary we need to find + # IPv6 address of the router's gateway in a bit different way. + # It can be reverted when bug + # https://bugs.launchpad.net/neutron/+bug/1959332 will be fixed + IPV6_ROUTER_GW_IP=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router show $ROUTER_ID -c external_gateway_info -f json | grep -C 1 $ipv6_pub_subnet_id | grep ip_address | awk '{print $2}' | tr -d '"') die_if_not_set $LINENO IPV6_ROUTER_GW_IP "Failure retrieving IPV6_ROUTER_GW_IP" if is_neutron_ovs_base_plugin; then @@ -347,24 +399,38 @@ function _neutron_configure_router_v6 { ext_gw_interface=$(_neutron_get_ext_gw_interface) local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/} - # Configure interface for public bridge + # Configure interface for public bridge by setting the interface + # to "up" in case the job is running entirely private network based + # testing. + sudo ip link set $ext_gw_interface up sudo ip -6 addr replace $ipv6_ext_gw_ip/$ipv6_cidr_len dev $ext_gw_interface - sudo ip -6 route replace $FIXED_RANGE_V6 via $IPV6_ROUTER_GW_IP dev $ext_gw_interface + # Any IPv6 private subnet that uses the default IPV6 subnet pool + # and that is plugged into the default router (Q_ROUTER_NAME) will + # be reachable from the devstack node (ex: ipv6-private-subnet). + # Some scenario tests (such as octavia-tempest-plugin) rely heavily + # on this feature. + local replace_range=${SUBNETPOOL_PREFIX_V6} + if [[ -z "${SUBNETPOOL_V6_ID}" ]]; then + replace_range=${FIXED_RANGE_V6} + fi + sudo ip -6 route replace $replace_range via $IPV6_ROUTER_GW_IP dev $ext_gw_interface fi _neutron_set_router_id fi } -function is_provider_network { - if [ "$Q_USE_PROVIDER_NETWORKING" == "True" ]; then - return 0 - fi - return 1 -} - function is_networking_extension_supported { local extension=$1 # TODO(sc68cal) cache this instead of calling every time - EXT_LIST=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" ext-list -c alias -f value) + EXT_LIST=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" extension list --network -c Alias -f value) [[ $EXT_LIST =~ $extension ]] && return 0 } + +function plugin_agent_add_l3_agent_extension { + local l3_agent_extension=$1 + if [[ -z "$L3_AGENT_EXTENSIONS" ]]; then + L3_AGENT_EXTENSIONS=$l3_agent_extension + elif [[ ! ,${L3_AGENT_EXTENSIONS}, =~ ,${l3_agent_extension}, ]]; then + L3_AGENT_EXTENSIONS+=",$l3_agent_extension" + fi +} diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer deleted file mode 100644 index 30e9480f2e..0000000000 --- a/lib/neutron_plugins/services/loadbalancer +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/bash - -# Neutron loadbalancer plugin -# --------------------------- - -# Save trace setting -_XTRACE_NEUTRON_LB=$(set +o | grep xtrace) -set +o xtrace - - -AGENT_LBAAS_BINARY="$NEUTRON_BIN_DIR/neutron-lbaas-agent" -LBAAS_PLUGIN=neutron_lbaas.services.loadbalancer.plugin.LoadBalancerPlugin - -function neutron_agent_lbaas_install_agent_packages { - if is_ubuntu || is_fedora || is_suse; then - install_package haproxy - fi -} - -function neutron_agent_lbaas_configure_common { - _neutron_service_plugin_class_add $LBAAS_PLUGIN - _neutron_deploy_rootwrap_filters $NEUTRON_LBAAS_DIR -} - -function neutron_agent_lbaas_configure_agent { - LBAAS_AGENT_CONF_PATH=/etc/neutron/services/loadbalancer/haproxy - mkdir -p $LBAAS_AGENT_CONF_PATH - - LBAAS_AGENT_CONF_FILENAME="$LBAAS_AGENT_CONF_PATH/lbaas_agent.ini" - - cp $NEUTRON_LBAAS_DIR/etc/lbaas_agent.ini.sample $LBAAS_AGENT_CONF_FILENAME - - # ovs_use_veth needs to be set before the plugin configuration - # occurs to allow plugins to override the setting. - iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT ovs_use_veth $Q_OVS_USE_VETH - - neutron_plugin_setup_interface_driver $LBAAS_AGENT_CONF_FILENAME - - if is_fedora; then - iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT user_group "nobody" - iniset $LBAAS_AGENT_CONF_FILENAME haproxy user_group "nobody" - fi -} - -function neutron_lbaas_stop { - pids=$(ps aux | awk '/haproxy/ { print $2 }') - [ ! -z "$pids" ] && sudo kill $pids || true -} - -# Restore xtrace -$_XTRACE_NEUTRON_LB diff --git a/lib/neutron_plugins/services/metering b/lib/neutron_plugins/services/metering index 5b32468d21..757a562ee6 100644 --- a/lib/neutron_plugins/services/metering +++ b/lib/neutron_plugins/services/metering @@ -12,7 +12,7 @@ AGENT_METERING_BINARY="$NEUTRON_BIN_DIR/neutron-metering-agent" METERING_PLUGIN="neutron.services.metering.metering_plugin.MeteringPlugin" function neutron_agent_metering_configure_common { - _neutron_service_plugin_class_add $METERING_PLUGIN + neutron_service_plugin_class_add $METERING_PLUGIN } function neutron_agent_metering_configure_agent { diff --git a/lib/neutron_plugins/services/placement b/lib/neutron_plugins/services/placement new file mode 100644 index 0000000000..3ec185bae6 --- /dev/null +++ b/lib/neutron_plugins/services/placement @@ -0,0 +1,21 @@ +#!/bin/bash + +function configure_placement_service_plugin { + neutron_service_plugin_class_add "placement" +} + +function configure_placement_neutron { + iniset $NEUTRON_CONF placement auth_type "$NEUTRON_PLACEMENT_AUTH_TYPE" + iniset $NEUTRON_CONF placement auth_url "$KEYSTONE_SERVICE_URI" + iniset $NEUTRON_CONF placement username "$NEUTRON_PLACEMENT_USERNAME" + iniset $NEUTRON_CONF placement password "$SERVICE_PASSWORD" + iniset $NEUTRON_CONF placement user_domain_name "$SERVICE_DOMAIN_NAME" + iniset $NEUTRON_CONF placement project_name "$SERVICE_TENANT_NAME" + iniset $NEUTRON_CONF placement project_domain_name "$SERVICE_DOMAIN_NAME" + iniset $NEUTRON_CONF placement region_name "$REGION_NAME" +} + +function configure_placement_extension { + configure_placement_service_plugin + configure_placement_neutron +} diff --git a/lib/neutron_plugins/services/qos b/lib/neutron_plugins/services/qos new file mode 100644 index 0000000000..c11c315586 --- /dev/null +++ b/lib/neutron_plugins/services/qos @@ -0,0 +1,30 @@ +#!/bin/bash + +function configure_qos_service_plugin { + neutron_service_plugin_class_add "qos" +} + + +function configure_qos_core_plugin { + configure_qos_$Q_PLUGIN +} + + +function configure_qos_l2_agent { + plugin_agent_add_l2_agent_extension "qos" +} + + +function configure_qos { + configure_qos_service_plugin + configure_qos_core_plugin + configure_qos_l2_agent +} + +function configure_l3_agent_extension_fip_qos { + plugin_agent_add_l3_agent_extension "fip_qos" +} + +function configure_l3_agent_extension_gateway_ip_qos { + plugin_agent_add_l3_agent_extension "gateway_ip_qos" +} diff --git a/lib/neutron_plugins/services/segments b/lib/neutron_plugins/services/segments new file mode 100644 index 0000000000..08936bae49 --- /dev/null +++ b/lib/neutron_plugins/services/segments @@ -0,0 +1,10 @@ +#!/bin/bash + +function configure_segments_service_plugin { + neutron_service_plugin_class_add segments +} + +function configure_segments_extension { + configure_segments_service_plugin +} + diff --git a/lib/neutron_plugins/services/trunk b/lib/neutron_plugins/services/trunk new file mode 100644 index 0000000000..8e0f6944cf --- /dev/null +++ b/lib/neutron_plugins/services/trunk @@ -0,0 +1,5 @@ +#!/bin/bash + +function configure_trunk_extension { + neutron_service_plugin_class_add "trunk" +} diff --git a/lib/neutron_thirdparty/README.md b/lib/neutron_thirdparty/README.md deleted file mode 100644 index 905ae776a8..0000000000 --- a/lib/neutron_thirdparty/README.md +++ /dev/null @@ -1,41 +0,0 @@ -Neutron third party specific files -================================== -Some Neutron plugins require third party programs to function. -The files under the directory, ``lib/neutron_thirdparty/``, will be used -when their service are enabled. -Third party program specific configuration variables should be in this file. - -* filename: ```` - * The corresponding file name should be same to service name, ````. - -functions ---------- -``lib/neutron-legacy`` calls the following functions when the ```` is enabled - -functions to be implemented -* ``configure_``: - set config files, create data dirs, etc - e.g. - sudo python setup.py deploy - iniset $XXXX_CONF... - -* ``init_``: - initialize databases, etc - -* ``install_``: - collect source and prepare - e.g. - git clone xxx - -* ``start_``: - start running processes, including screen if USE_SCREEN=True - e.g. - run_process XXXX "$XXXX_DIR/bin/XXXX-bin" - -* ``stop_``: - stop running processes (non-screen) - e.g. - stop_process XXXX - -* ``check_``: - verify that the integration between neutron server and third-party components is sane diff --git a/lib/neutron_thirdparty/bigswitch_floodlight b/lib/neutron_thirdparty/bigswitch_floodlight deleted file mode 100644 index 45a4f2e263..0000000000 --- a/lib/neutron_thirdparty/bigswitch_floodlight +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash -# -# Big Switch/FloodLight OpenFlow Controller -# ------------------------------------------ - -# Save trace setting -_XTRACE_NEUTRON_BIGSWITCH=$(set +o | grep xtrace) -set +o xtrace - -BS_FL_CONTROLLERS_PORT=${BS_FL_CONTROLLERS_PORT:-localhost:80} -BS_FL_OF_PORT=${BS_FL_OF_PORT:-6633} - -function configure_bigswitch_floodlight { - : -} - -function init_bigswitch_floodlight { - install_neutron_agent_packages - - echo -n "Installing OVS managed by the openflow controllers:" - echo ${BS_FL_CONTROLLERS_PORT} - - # Create local OVS bridge and configure it - sudo ovs-vsctl --no-wait -- --if-exists del-br ${OVS_BRIDGE} - sudo ovs-vsctl --no-wait add-br ${OVS_BRIDGE} - sudo ovs-vsctl --no-wait br-set-external-id ${OVS_BRIDGE} bridge-id ${OVS_BRIDGE} - - ctrls= - for ctrl in `echo ${BS_FL_CONTROLLERS_PORT} | tr ',' ' '`; do - ctrl=${ctrl%:*} - ctrls="${ctrls} tcp:${ctrl}:${BS_FL_OF_PORT}" - done - echo "Adding Network conttrollers: " ${ctrls} - sudo ovs-vsctl --no-wait set-controller ${OVS_BRIDGE} ${ctrls} -} - -function install_bigswitch_floodlight { - : -} - -function start_bigswitch_floodlight { - : -} - -function stop_bigswitch_floodlight { - : -} - -function check_bigswitch_floodlight { - : -} - -# Restore xtrace -$_XTRACE_NEUTRON_BIGSWITCH diff --git a/lib/neutron_thirdparty/vmware_nsx b/lib/neutron_thirdparty/vmware_nsx deleted file mode 100644 index e182fca1ae..0000000000 --- a/lib/neutron_thirdparty/vmware_nsx +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -# REVISIT(roeyc): this file left empty so that 'enable_service vmware_nsx' -# continues to work. diff --git a/lib/nova b/lib/nova index 67a80b9b16..460b4adc85 100644 --- a/lib/nova +++ b/lib/nova @@ -17,7 +17,6 @@ # # - install_nova # - configure_nova -# - _config_nova_apache_wsgi # - create_nova_conf # - init_nova # - start_nova @@ -28,7 +27,6 @@ _XTRACE_LIB_NOVA=$(set +o | grep xtrace) set +o xtrace - # Defaults # -------- @@ -48,28 +46,51 @@ fi NOVA_STATE_PATH=${NOVA_STATE_PATH:=$DATA_DIR/nova} # INSTANCES_PATH is the previous name for this NOVA_INSTANCES_PATH=${NOVA_INSTANCES_PATH:=${INSTANCES_PATH:=$NOVA_STATE_PATH/instances}} -NOVA_AUTH_CACHE_DIR=${NOVA_AUTH_CACHE_DIR:-/var/cache/nova} NOVA_CONF_DIR=/etc/nova NOVA_CONF=$NOVA_CONF_DIR/nova.conf -NOVA_CELLS_CONF=$NOVA_CONF_DIR/nova-cells.conf +NOVA_COND_CONF=$NOVA_CONF_DIR/nova.conf +NOVA_CPU_CONF=$NOVA_CONF_DIR/nova-cpu.conf NOVA_FAKE_CONF=$NOVA_CONF_DIR/nova-fake.conf -NOVA_CELLS_DB=${NOVA_CELLS_DB:-nova_cell} NOVA_API_DB=${NOVA_API_DB:-nova_api} +NOVA_UWSGI=nova.wsgi.osapi_compute:application +NOVA_METADATA_UWSGI=nova.wsgi.metadata:application +NOVA_UWSGI_CONF=$NOVA_CONF_DIR/nova-api-uwsgi.ini +NOVA_METADATA_UWSGI_CONF=$NOVA_CONF_DIR/nova-metadata-uwsgi.ini + +# Allow forcing the stable compute uuid to something specific. This would be +# done by deployment tools that pre-allocate the UUIDs, but it is also handy +# for developers that need to re-stack a compute-only deployment multiple +# times. Since the DB is non-local and not erased on an unstack, making it +# stay the same each time is what developers want. Set to a uuid here or +# leave it blank for default allocate-on-start behavior. +NOVA_CPU_UUID="" + +# The total number of cells we expect. Must be greater than one and doesn't +# count cell0. +NOVA_NUM_CELLS=${NOVA_NUM_CELLS:-1} +# Our cell index, so we know what rabbit vhost to connect to. +# This should be in the range of 1-$NOVA_NUM_CELLS +NOVA_CPU_CELL=${NOVA_CPU_CELL:-1} NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini} -if is_suse; then - NOVA_WSGI_DIR=${NOVA_WSGI_DIR:-/srv/www/htdocs/nova} -else - NOVA_WSGI_DIR=${NOVA_WSGI_DIR:-/var/www/nova} +# We do not need to report service status every 10s for devstack-like +# deployments. In the gate this generates extra work for the services and the +# database which are already taxed. +NOVA_SERVICE_REPORT_INTERVAL=${NOVA_SERVICE_REPORT_INTERVAL:-120} + +if is_service_enabled tls-proxy; then + NOVA_SERVICE_PROTOCOL="https" fi -# Toggle for deploying Nova-API under HTTPD + mod_wsgi -NOVA_USE_MOD_WSGI=${NOVA_USE_MOD_WSGI:-False} +# Whether to use TLS for comms between the VNC/SPICE/serial proxy +# services and the compute node +NOVA_CONSOLE_PROXY_COMPUTE_TLS=${NOVA_CONSOLE_PROXY_COMPUTE_TLS:-False} -if is_ssl_enabled_service "nova" || is_service_enabled tls-proxy; then - NOVA_SERVICE_PROTOCOL="https" +# Validate configuration +if ! is_service_enabled tls-proxy && [ "$NOVA_CONSOLE_PROXY_COMPUTE_TLS" == "True" ]; then + die $LINENO "enabling TLS for the console proxy requires the tls-proxy service" fi # Public facing bits @@ -77,42 +98,38 @@ NOVA_SERVICE_HOST=${NOVA_SERVICE_HOST:-$SERVICE_HOST} NOVA_SERVICE_PORT=${NOVA_SERVICE_PORT:-8774} NOVA_SERVICE_PORT_INT=${NOVA_SERVICE_PORT_INT:-18774} NOVA_SERVICE_PROTOCOL=${NOVA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} -NOVA_SERVICE_LOCAL_HOST=${NOVA_SERVICE_LOCAL_HOST:-$SERVICE_LOCAL_HOST} -NOVA_SERVICE_LISTEN_ADDRESS=${NOVA_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS} +NOVA_SERVICE_LISTEN_ADDRESS=${NOVA_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)} METADATA_SERVICE_PORT=${METADATA_SERVICE_PORT:-8775} +NOVA_ENABLE_CACHE=${NOVA_ENABLE_CACHE:-True} + +# Flag to set the oslo_policy.enforce_scope and oslo_policy.enforce_new_defaults. +# This is used to disable the compute API policies scope and new defaults. +# By Default, it is True. +# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope +NOVA_ENFORCE_SCOPE=$(trueorfalse True NOVA_ENFORCE_SCOPE) + +if [[ $SERVICE_IP_VERSION == 6 ]]; then + NOVA_MY_IP="$HOST_IPV6" +else + NOVA_MY_IP="$HOST_IP" +fi # Option to enable/disable config drive # NOTE: Set ``FORCE_CONFIG_DRIVE="False"`` to turn OFF config drive -FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"True"} - -# Nova supports pluggable schedulers. The default ``FilterScheduler`` -# should work in most cases. -SCHEDULER=${SCHEDULER:-filter_scheduler} +FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"False"} -# The following FILTERS contains SameHostFilter and DifferentHostFilter with +# The following NOVA_FILTERS contains SameHostFilter and DifferentHostFilter with # the default filters. -FILTERS="RetryFilter,AvailabilityZoneFilter,RamFilter,DiskFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter" +NOVA_FILTERS="ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter" QEMU_CONF=/etc/libvirt/qemu.conf -# Set default defaults here as some hypervisor drivers override these -PUBLIC_INTERFACE_DEFAULT=br100 -FLAT_NETWORK_BRIDGE_DEFAULT=br100 -# Set ``GUEST_INTERFACE_DEFAULT`` to some interface on the box so that -# the default isn't completely crazy. This will match ``eth*``, ``em*``, or -# the new ``p*`` interfaces, then basically picks the first -# alphabetically. It's probably wrong, however it's less wrong than -# always using ``eth0`` which doesn't exist on new Linux distros at all. -GUEST_INTERFACE_DEFAULT=$(ip link \ - | grep 'state UP' \ - | awk '{print $2}' \ - | sed 's/://' \ - | grep ^[ep] \ - | head -1) - # ``NOVA_VNC_ENABLED`` can be used to forcibly enable VNC configuration. # In multi-node setups allows compute hosts to not run ``n-novnc``. NOVA_VNC_ENABLED=$(trueorfalse False NOVA_VNC_ENABLED) +# same as ``NOVA_VNC_ENABLED`` but for Spice and serial console respectively. +NOVA_SPICE_ENABLED=$(trueorfalse False NOVA_SPICE_ENABLED) +NOVA_SERIAL_ENABLED=$(trueorfalse False NOVA_SERIAL_ENABLED) # Get hypervisor configuration # ---------------------------- @@ -123,43 +140,36 @@ if is_service_enabled nova && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; th source $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER fi +# Other Nova configurations +# ---------------------------- -# Nova Network Configuration -# -------------------------- - -NETWORK_MANAGER=${NETWORK_MANAGER:-${NET_MAN:-FlatDHCPManager}} -PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT} -VLAN_INTERFACE=${VLAN_INTERFACE:-$GUEST_INTERFACE_DEFAULT} -FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT} - -# If you are using the FlatDHCP network mode on multiple hosts, set the -# ``FLAT_INTERFACE`` variable but make sure that the interface doesn't already -# have an IP or you risk breaking things. -# -# **DHCP Warning**: If your flat interface device uses DHCP, there will be a -# hiccup while the network is moved from the flat interface to the flat network -# bridge. This will happen when you launch your first instance. Upon launch -# you will lose all connectivity to the node, and the VM launch will probably -# fail. -# -# If you are running on a single node and don't need to access the VMs from -# devices other than that node, you can set ``FLAT_INTERFACE=`` -# This will stop nova from bridging any interfaces into ``FLAT_NETWORK_BRIDGE``. -FLAT_INTERFACE=${FLAT_INTERFACE:-$GUEST_INTERFACE_DEFAULT} - -# ``MULTI_HOST`` is a mode where each compute node runs its own network node. This -# allows network operations and routing for a VM to occur on the server that is -# running the VM - removing a SPOF and bandwidth bottleneck. -MULTI_HOST=$(trueorfalse False MULTI_HOST) +# ``NOVA_USE_SERVICE_TOKEN`` is a mode where service token is passed along with +# user token while communicating to external RESP API's like Neutron, Cinder +# and Glance. +NOVA_USE_SERVICE_TOKEN=$(trueorfalse True NOVA_USE_SERVICE_TOKEN) # ``NOVA_ALLOW_MOVE_TO_SAME_HOST`` can be set to False in multi node DevStack, # where there are at least two nova-computes. NOVA_ALLOW_MOVE_TO_SAME_HOST=$(trueorfalse True NOVA_ALLOW_MOVE_TO_SAME_HOST) -# Test floating pool and range are used for testing. They are defined -# here until the admin APIs can replace nova-manage -TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test} -TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29} +# Enable debugging levels for iscsid service (goes from 0-8) +ISCSID_DEBUG=$(trueorfalse False ISCSID_DEBUG) +ISCSID_DEBUG_LEVEL=${ISCSID_DEBUG_LEVEL:-4} + +# Format for notifications. Nova defaults to "unversioned" since Train. +# Other options include "versioned" and "both". +NOVA_NOTIFICATION_FORMAT=${NOVA_NOTIFICATION_FORMAT:-unversioned} + +# Timeout for servers to gracefully shutdown the OS during operations +# like shelve, rescue, stop, rebuild. Defaults to 0 since the default +# image in devstack is CirrOS. +NOVA_SHUTDOWN_TIMEOUT=${NOVA_SHUTDOWN_TIMEOUT:-0} + +# Whether to use Keystone unified limits instead of legacy quota limits. +NOVA_USE_UNIFIED_LIMITS=$(trueorfalse False NOVA_USE_UNIFIED_LIMITS) + +# TB Cache Size in MiB for qemu guests +NOVA_LIBVIRT_TB_CACHE_SIZE=${NOVA_LIBVIRT_TB_CACHE_SIZE:-0} # Functions # --------- @@ -167,14 +177,15 @@ TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29} # Test if any Nova services are enabled # is_nova_enabled function is_nova_enabled { + [[ ,${DISABLED_SERVICES} =~ ,"nova" ]] && return 1 [[ ,${ENABLED_SERVICES} =~ ,"n-" ]] && return 0 return 1 } -# Test if any Nova Cell services are enabled -# is_nova_enabled -function is_n-cell_enabled { - [[ ,${ENABLED_SERVICES} =~ ,"n-cell" ]] && return 0 +# is_nova_console_proxy_compute_tls_enabled() - Test if the Nova Console Proxy +# service has TLS enabled +function is_nova_console_proxy_compute_tls_enabled { + [[ ${NOVA_CONSOLE_PROXY_COMPUTE_TLS} = "True" ]] && return 0 return 1 } @@ -202,7 +213,10 @@ function cleanup_nova { instances=`sudo virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"` if [ ! "$instances" = "" ]; then echo $instances | xargs -n1 sudo virsh destroy || true - echo $instances | xargs -n1 sudo virsh undefine --managed-save || true + if ! xargs -n1 sudo virsh undefine --managed-save --nvram <<< $instances; then + # Can't delete with nvram flags, then just try without this flag + xargs -n1 sudo virsh undefine --managed-save <<< $instances + fi fi # Logout and delete iscsi sessions @@ -214,11 +228,14 @@ function cleanup_nova { done sudo iscsiadm --mode node --op delete || true + # Disconnect all nvmeof connections + sudo nvme disconnect-all || true + # Clean out the instances directory. sudo rm -rf $NOVA_INSTANCES_PATH/* fi - sudo rm -rf $NOVA_STATE_PATH $NOVA_AUTH_CACHE_DIR + sudo rm -rf $NOVA_STATE_PATH # NOTE(dtroyer): This really should be called from here but due to the way # nova abuses the _cleanup() function we're moving it @@ -227,71 +244,14 @@ function cleanup_nova { # cleanup_nova_hypervisor #fi - if [ "$NOVA_USE_MOD_WSGI" == "True" ]; then - _cleanup_nova_apache_wsgi - fi -} - -# _cleanup_nova_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file -function _cleanup_nova_apache_wsgi { - sudo rm -f $NOVA_WSGI_DIR/* - sudo rm -f $(apache_site_config_for nova-api) - sudo rm -f $(apache_site_config_for nova-metadata) -} - -# _config_nova_apache_wsgi() - Set WSGI config files of Keystone -function _config_nova_apache_wsgi { - sudo mkdir -p $NOVA_WSGI_DIR - - local nova_apache_conf - nova_apache_conf=$(apache_site_config_for nova-api) - local nova_metadata_apache_conf - nova_metadata_apache_conf=$(apache_site_config_for nova-metadata) - local nova_ssl="" - local nova_certfile="" - local nova_keyfile="" - local nova_api_port=$NOVA_SERVICE_PORT - local nova_metadata_port=$METADATA_SERVICE_PORT - local venv_path="" - - if is_ssl_enabled_service nova-api; then - nova_ssl="SSLEngine On" - nova_certfile="SSLCertificateFile $NOVA_SSL_CERT" - nova_keyfile="SSLCertificateKeyFile $NOVA_SSL_KEY" - fi - if [[ ${USE_VENV} = True ]]; then - venv_path="python-path=${PROJECT_VENV["nova"]}/lib/$(python_version)/site-packages" - fi - - # copy proxy vhost and wsgi helper files - sudo cp $NOVA_DIR/nova/wsgi/nova-api.py $NOVA_WSGI_DIR/nova-api - sudo cp $NOVA_DIR/nova/wsgi/nova-metadata.py $NOVA_WSGI_DIR/nova-metadata - - sudo cp $FILES/apache-nova-api.template $nova_apache_conf - sudo sed -e " - s|%PUBLICPORT%|$nova_api_port|g; - s|%APACHE_NAME%|$APACHE_NAME|g; - s|%PUBLICWSGI%|$NOVA_WSGI_DIR/nova-api|g; - s|%SSLENGINE%|$nova_ssl|g; - s|%SSLCERTFILE%|$nova_certfile|g; - s|%SSLKEYFILE%|$nova_keyfile|g; - s|%USER%|$STACK_USER|g; - s|%VIRTUALENV%|$venv_path|g - s|%APIWORKERS%|$API_WORKERS|g - " -i $nova_apache_conf - - sudo cp $FILES/apache-nova-metadata.template $nova_metadata_apache_conf - sudo sed -e " - s|%PUBLICPORT%|$nova_metadata_port|g; - s|%APACHE_NAME%|$APACHE_NAME|g; - s|%PUBLICWSGI%|$NOVA_WSGI_DIR/nova-metadata|g; - s|%SSLENGINE%|$nova_ssl|g; - s|%SSLCERTFILE%|$nova_certfile|g; - s|%SSLKEYFILE%|$nova_keyfile|g; - s|%USER%|$STACK_USER|g; - s|%VIRTUALENV%|$venv_path|g - s|%APIWORKERS%|$API_WORKERS|g - " -i $nova_metadata_apache_conf + stop_process "n-api" + stop_process "n-api-meta" + remove_uwsgi_config "$NOVA_UWSGI_CONF" "nova-api" + remove_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "nova-metadata" + + if [[ "$NOVA_BACKEND" == "LVM" ]]; then + clean_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME + fi } # configure_nova() - Set config files, create data dirs, etc @@ -299,8 +259,6 @@ function configure_nova { # Put config files in ``/etc/nova`` for everyone to find sudo install -d -o $STACK_USER $NOVA_CONF_DIR - install_default_policy nova - configure_rootwrap nova if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then @@ -321,6 +279,8 @@ function configure_nova { if [ ! -e /dev/kvm ]; then echo "WARNING: Switching to QEMU" LIBVIRT_TYPE=qemu + LIBVIRT_CPU_MODE=custom + LIBVIRT_CPU_MODEL=Nehalem if which selinuxenabled >/dev/null 2>&1 && selinuxenabled; then # https://bugzilla.redhat.com/show_bug.cgi?id=753589 sudo setsebool virt_use_execmem on @@ -333,17 +293,6 @@ function configure_nova { # to simulate multiple systems. if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then if is_ubuntu; then - if [[ ! "$DISTRO" > natty ]]; then - local cgline="none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0" - sudo mkdir -p /cgroup - if ! grep -q cgroup /etc/fstab; then - echo "$cgline" | sudo tee -a /etc/fstab - fi - if ! mount -n | grep -q cgroup; then - sudo mount /cgroup - fi - fi - # enable nbd for lxc unless you're using an lvm backend # otherwise you can't boot instances if [[ "$NOVA_BACKEND" != "LVM" ]]; then @@ -368,9 +317,49 @@ function configure_nova { sudo chown -R $STACK_USER $NOVA_INSTANCES_PATH fi fi - if is_suse; then - # iscsid is not started by default - start_service iscsid + + # Due to cinder bug #1966513 we ALWAYS need an initiator name for LVM + # Ensure each compute host uses a unique iSCSI initiator + echo InitiatorName=$(iscsi-iname) | sudo tee /etc/iscsi/initiatorname.iscsi + + if [[ ${ISCSID_DEBUG} == "True" ]]; then + # Install an override that starts iscsid with debugging + # enabled. + cat > /tmp/iscsid.override <=v1.0.0 from source. + NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:$((6080 + offset))/vnc_lite.html"} + fi + iniset $NOVA_CPU_CONF vnc novncproxy_base_url "$NOVNCPROXY_URL" + SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:$((6081 + offset))/spice_auto.html"} + iniset $NOVA_CPU_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL" + fi - if is_service_enabled n-api-meta; then - NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/,metadata//") - iniset $NOVA_CONF DEFAULT enabled_apis $NOVA_ENABLED_APIS - iniset $NOVA_CELLS_CONF DEFAULT enabled_apis metadata + if is_service_enabled n-novnc || [ "$NOVA_VNC_ENABLED" != False ]; then + # Address on which instance vncservers will listen on compute hosts. + # For multi-host, this should be the management ip of the compute host. + VNCSERVER_LISTEN=${VNCSERVER_LISTEN:-$NOVA_SERVICE_LISTEN_ADDRESS} + VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS:-$default_proxyclient_addr} + iniset $NOVA_CPU_CONF vnc server_listen "$VNCSERVER_LISTEN" + iniset $NOVA_CPU_CONF vnc server_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" + else + iniset $NOVA_CPU_CONF vnc enabled false + fi + + if is_service_enabled n-spice || [ "$NOVA_SPICE_ENABLED" != False ]; then + # Address on which instance spiceservers will listen on compute hosts. + # For multi-host, this should be the management ip of the compute host. + SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS:-$default_proxyclient_addr} + SPICESERVER_LISTEN=${SPICESERVER_LISTEN:-$NOVA_SERVICE_LISTEN_ADDRESS} + iniset $NOVA_CPU_CONF spice enabled true + iniset $NOVA_CPU_CONF spice server_listen "$SPICESERVER_LISTEN" + iniset $NOVA_CPU_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS" + fi + + if is_service_enabled n-sproxy || [ "$NOVA_SERIAL_ENABLED" != False ]; then + iniset $NOVA_CPU_CONF serial_console enabled True + iniset $NOVA_CPU_CONF serial_console base_url "ws://$SERVICE_HOST:$((6082 + offset))/" + fi +} + +function configure_console_proxies { + # Use the provided config file path or default to $NOVA_CONF. + local conf=${1:-$NOVA_CONF} + local offset=${2:-0} + # Stagger the offset based on the total number of possible console proxies + # (novnc, spice, serial) so that their ports will not collide if + # all are enabled. + offset=$((offset * 3)) + + if is_service_enabled n-novnc || [ "$NOVA_VNC_ENABLED" != False ]; then + iniset $conf vnc novncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" + iniset $conf vnc novncproxy_port $((6080 + offset)) + + if is_nova_console_proxy_compute_tls_enabled ; then + iniset $conf vnc auth_schemes "vencrypt" + iniset $conf vnc vencrypt_client_key "/etc/pki/nova-novnc/client-key.pem" + iniset $conf vnc vencrypt_client_cert "/etc/pki/nova-novnc/client-cert.pem" + iniset $conf vnc vencrypt_ca_certs "/etc/pki/nova-novnc/ca-cert.pem" + + sudo mkdir -p /etc/pki/nova-novnc + deploy_int_CA /etc/pki/nova-novnc/ca-cert.pem + deploy_int_cert /etc/pki/nova-novnc/client-cert.pem /etc/pki/nova-novnc/client-key.pem + # OpenSSL 1.1.0 generates the key file with permissions: 600, by + # default, and the deploy_int* methods use 'sudo cp' to copy the + # files, making them owned by root:root. + # Change ownership of everything under /etc/pki/nova-novnc to + # $STACK_USER:$(id -g ${STACK_USER}) so that $STACK_USER can read + # the key file. + sudo chown -R $STACK_USER:$(id -g ${STACK_USER}) /etc/pki/nova-novnc + # This is needed to enable TLS in the proxy itself, example log: + # WebSocket server settings: + # - Listen on 0.0.0.0:6080 + # - Flash security policy server + # - Web server (no directory listings). Web root: /usr/share/novnc + # - SSL/TLS support + # - proxying from 0.0.0.0:6080 to None:None + iniset $conf DEFAULT key "/etc/pki/nova-novnc/client-key.pem" + iniset $conf DEFAULT cert "/etc/pki/nova-novnc/client-cert.pem" fi + fi - $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CELLS_CONF db sync - $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CELLS_CONF cell create --name=region --cell_type=parent --username=$RABBIT_USERID --hostname=$RABBIT_HOST --port=5672 --password=$RABBIT_PASSWORD --virtual_host=/ --woffset=0 --wscale=1 - $NOVA_BIN_DIR/nova-manage cell create --name=child --cell_type=child --username=$RABBIT_USERID --hostname=$RABBIT_HOST --port=5672 --password=$RABBIT_PASSWORD --virtual_host=child_cell --woffset=0 --wscale=1 + if is_service_enabled n-spice; then + iniset $conf spice html5proxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" + iniset $conf spice html5proxy_port $((6081 + offset)) + fi + + if is_service_enabled n-sproxy; then + iniset $conf serial_console serialproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" + iniset $conf serial_console serialproxy_port $((6082 + offset)) fi } -# create_nova_cache_dir() - Part of the init_nova() process -function create_nova_cache_dir { - # Create cache dir - sudo install -d -o $STACK_USER $NOVA_AUTH_CACHE_DIR - rm -f $NOVA_AUTH_CACHE_DIR/* +function configure_nova_unified_limits { + # Registered limit resources in keystone are system-specific resources. + # Make sure we use a system-scoped token to interact with this API. + + # Default limits here mirror the legacy config-based default values. + # Note: disk quota is new in nova as of unified limits. + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 10 --region $REGION_NAME servers + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 20 --region $REGION_NAME class:VCPU + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit $((50 * 1024)) --region $REGION_NAME class:MEMORY_MB + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 20 --region $REGION_NAME class:DISK_GB + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 128 --region $REGION_NAME server_metadata_items + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 5 --region $REGION_NAME server_injected_files + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 10240 --region $REGION_NAME server_injected_file_content_bytes + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 255 --region $REGION_NAME server_injected_file_path_bytes + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 100 --region $REGION_NAME server_key_pairs + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 10 --region $REGION_NAME server_groups + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 10 --region $REGION_NAME server_group_members + + # Tell nova to use these limits + iniset $NOVA_CONF quota driver "nova.quota.UnifiedLimitsDriver" + + # Configure oslo_limit so it can talk to keystone + iniset $NOVA_CONF oslo_limit user_domain_name $SERVICE_DOMAIN_NAME + iniset $NOVA_CONF oslo_limit password $SERVICE_PASSWORD + iniset $NOVA_CONF oslo_limit username nova + iniset $NOVA_CONF oslo_limit auth_type password + iniset $NOVA_CONF oslo_limit auth_url $KEYSTONE_SERVICE_URI + iniset $NOVA_CONF oslo_limit system_scope all + iniset $NOVA_CONF oslo_limit endpoint_id \ + $(openstack endpoint list --service nova -f value -c ID) + + # Allow the nova service user to read quotas + openstack --os-cloud devstack-system-admin role add --user nova \ + --user-domain $SERVICE_DOMAIN_NAME --system all reader } -function create_nova_conf_nova_network { - iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NETWORK_MANAGER" - iniset $NOVA_CONF DEFAULT public_interface "$PUBLIC_INTERFACE" - iniset $NOVA_CONF DEFAULT vlan_interface "$VLAN_INTERFACE" - iniset $NOVA_CONF DEFAULT flat_network_bridge "$FLAT_NETWORK_BRIDGE" - if [ -n "$FLAT_INTERFACE" ]; then - iniset $NOVA_CONF DEFAULT flat_interface "$FLAT_INTERFACE" - fi +function init_nova_service_user_conf { + iniset $NOVA_CONF service_user send_service_user_token True + iniset $NOVA_CONF service_user auth_type password + iniset $NOVA_CONF service_user auth_url "$KEYSTONE_SERVICE_URI" + iniset $NOVA_CONF service_user username nova + iniset $NOVA_CONF service_user password "$SERVICE_PASSWORD" + iniset $NOVA_CONF service_user user_domain_name "$SERVICE_DOMAIN_NAME" + iniset $NOVA_CONF service_user project_name "$SERVICE_PROJECT_NAME" + iniset $NOVA_CONF service_user project_domain_name "$SERVICE_DOMAIN_NAME" +} + +function conductor_conf { + local cell="$1" + echo "${NOVA_CONF_DIR}/nova_cell${cell}.conf" } # create_nova_keys_dir() - Part of the init_nova() process @@ -679,30 +860,61 @@ function create_nova_keys_dir { sudo install -d -o $STACK_USER ${NOVA_STATE_PATH} ${NOVA_STATE_PATH}/keys } +function init_nova_db { + local dbname="$1" + local conffile="$2" + recreate_database $dbname + $NOVA_BIN_DIR/nova-manage --config-file $conffile db sync --local_cell +} + # init_nova() - Initialize databases, etc. function init_nova { # All nova components talk to a central database. # Only do this step once on the API node for an entire cluster. if is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-api; then - # (Re)create nova database - recreate_database nova - - # Migrate nova database - $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db sync - - if is_service_enabled n-cell; then - recreate_database $NOVA_CELLS_DB + # (Re)create nova databases + if [[ "$CELLSV2_SETUP" == "singleconductor" ]]; then + # If we are doing singleconductor mode, we have some strange + # interdependencies. in that the main config refers to cell1 + # instead of cell0. In that case, just make sure the cell0 database + # is created before we need it below, but don't db_sync it until + # after the cellN databases are there. + recreate_database nova_cell0 + else + async_run nova-cell-0 init_nova_db nova_cell0 $NOVA_CONF fi + for i in $(seq 1 $NOVA_NUM_CELLS); do + async_run nova-cell-$i init_nova_db nova_cell${i} $(conductor_conf $i) + done + recreate_database $NOVA_API_DB $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF api_db sync + # map_cell0 will create the cell mapping record in the nova_api DB so + # this needs to come after the api_db sync happens. + $NOVA_BIN_DIR/nova-manage cell_v2 map_cell0 --database_connection `database_connection_url nova_cell0` + + # Wait for DBs to finish from above + for i in $(seq 0 $NOVA_NUM_CELLS); do + async_wait nova-cell-$i + done + + if [[ "$CELLSV2_SETUP" == "singleconductor" ]]; then + # We didn't db sync cell0 above, so run it now + $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db sync + fi + # Run online migrations on the new databases # Needed for flavor conversion $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db online_data_migrations + + # create the cell1 cell for the main nova db where the hosts live + for i in $(seq 1 $NOVA_NUM_CELLS); do + $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF --config-file $(conductor_conf $i) cell_v2 create_cell --name "cell$i" + done fi - create_nova_cache_dir create_nova_keys_dir if [[ "$NOVA_BACKEND" == "LVM" ]]; then @@ -736,10 +948,25 @@ function install_nova { # a websockets/html5 or flash powered VNC console for vm instances NOVNC_FROM_PACKAGE=$(trueorfalse False NOVNC_FROM_PACKAGE) if [ "$NOVNC_FROM_PACKAGE" = "True" ]; then + # Installing novnc on Debian bullseye breaks the global pip + # install. This happens because novnc pulls in distro cryptography + # which will be prefered by distro pip, but if anything has + # installed pyOpenSSL from pypi (keystone) that is not compatible + # with distro cryptography. Fix this by installing + # python3-openssl (pyOpenSSL) from the distro which pip will prefer + # on Debian. Ubuntu has inverse problems so we only do this for + # Debian. + local novnc_packages + novnc_packages="novnc" + GetOSVersion + if [[ "$os_VENDOR" = "Debian" ]] ; then + novnc_packages="$novnc_packages python3-openssl" + fi + NOVNC_WEB_DIR=/usr/share/novnc - install_package novnc + install_package $novnc_packages else - NOVNC_WEB_DIR=$DEST/noVNC + NOVNC_WEB_DIR=$DEST/novnc git_clone $NOVNC_REPO $NOVNC_WEB_DIR $NOVNC_BRANCH fi fi @@ -759,13 +986,6 @@ function install_nova { git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH setup_develop $NOVA_DIR sudo install -D -m 0644 -o $STACK_USER {$NOVA_DIR/tools/,/etc/bash_completion.d/}nova-manage.bash_completion - - if [ "$NOVA_USE_MOD_WSGI" == "True" ]; then - install_apache_wsgi - if is_ssl_enabled_service "nova-api"; then - enable_mod_ssl - fi - fi } # start_nova_api() - Start the API process ahead of other things @@ -773,6 +993,7 @@ function start_nova_api { # Get right service port for testing local service_port=$NOVA_SERVICE_PORT local service_protocol=$NOVA_SERVICE_PROTOCOL + local nova_url if is_service_enabled tls-proxy; then service_port=$NOVA_SERVICE_PORT_INT service_protocol="http" @@ -782,49 +1003,84 @@ function start_nova_api { local old_path=$PATH export PATH=$NOVA_BIN_DIR:$PATH - # If the site is not enabled then we are in a grenade scenario - local enabled_site_file - enabled_site_file=$(apache_site_config_for nova-api) - if [ -f ${enabled_site_file} ] && [ "$NOVA_USE_MOD_WSGI" == "True" ]; then - enable_apache_site nova-api - enable_apache_site nova-metadata - restart_apache_server - tail_log nova-api /var/log/$APACHE_NAME/nova-api.log - tail_log nova-metadata /var/log/$APACHE_NAME/nova-metadata.log - else - run_process n-api "$NOVA_BIN_DIR/nova-api" - fi + run_process "n-api" "$(which uwsgi) --procname-prefix nova-api --ini $NOVA_UWSGI_CONF" + nova_url=$service_protocol://$SERVICE_HOST/compute/v2.1/ echo "Waiting for nova-api to start..." - if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$SERVICE_HOST:$service_port; then + if ! wait_for_service $SERVICE_TIMEOUT $nova_url; then die $LINENO "nova-api did not start" fi - # Start proxies if enabled - if is_service_enabled tls-proxy; then - start_tls_proxy '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT & - fi - export PATH=$old_path } + # start_nova_compute() - Start the compute process function start_nova_compute { # Hack to set the path for rootwrap local old_path=$PATH export PATH=$NOVA_BIN_DIR:$PATH - if is_service_enabled n-cell; then - local compute_cell_conf=$NOVA_CELLS_CONF + local compute_cell_conf=$NOVA_CONF + + # Bug #1802143: $NOVA_CPU_CONF is constructed by first copying $NOVA_CONF... + cp $compute_cell_conf $NOVA_CPU_CONF + # ...and then adding/overriding anything explicitly set in $NOVA_CPU_CONF + merge_config_file $TOP_DIR/local.conf post-config '$NOVA_CPU_CONF' + + if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then + # NOTE(danms): Grenade doesn't setup multi-cell rabbit, so + # skip these bits and use the normal config. + echo "Skipping multi-cell conductor fleet setup" else - local compute_cell_conf=$NOVA_CONF + # "${CELLSV2_SETUP}" is "superconductor" + # FIXME(danms): Should this be configurable? + iniset $NOVA_CPU_CONF workarounds disable_group_policy_check_upcall True + # Since the nova-compute service cannot reach nova-scheduler over + # RPC, we also disable track_instance_changes. + iniset $NOVA_CPU_CONF filter_scheduler track_instance_changes False + iniset_rpc_backend nova $NOVA_CPU_CONF DEFAULT "nova_cell${NOVA_CPU_CELL}" + fi + + # Make sure we nuke any database config + inidelete $NOVA_CPU_CONF database connection + inidelete $NOVA_CPU_CONF api_database connection + + # Console proxies were configured earlier in create_nova_conf. Now that the + # nova-cpu.conf has been created, configure the console settings required + # by the compute process. + configure_console_compute + + # Set rebuild timeout longer for BFV instances because we likely have + # slower disk than expected. Default is 20s/GB + iniset $NOVA_CPU_CONF DEFAULT reimage_timeout_per_gb 180 + + # Configure the OVSDB connection for os-vif + if [ -n "$OVSDB_SERVER_LOCAL_HOST" ]; then + iniset $NOVA_CPU_CONF os_vif_ovs ovsdb_connection "tcp:$OVSDB_SERVER_LOCAL_HOST:6640" + fi + + # Workaround bug #1939108 + if [[ "$VIRT_DRIVER" == "libvirt" && "$LIBVIRT_TYPE" == "qemu" ]]; then + iniset $NOVA_CPU_CONF workarounds libvirt_disable_apic True + fi + + if [[ "$NOVA_CPU_UUID" ]]; then + echo -n $NOVA_CPU_UUID > $NOVA_CONF_DIR/compute_id fi if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then + if [ ${NOVA_LIBVIRT_TB_CACHE_SIZE} -gt 0 ]; then + iniset $NOVA_CPU_CONF libvirt tb_cache_size ${NOVA_LIBVIRT_TB_CACHE_SIZE} + fi # The group **$LIBVIRT_GROUP** is added to the current user in this script. # ``sg`` is used in run_process to execute nova-compute as a member of the # **$LIBVIRT_GROUP** group. - run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" $LIBVIRT_GROUP + run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF" $LIBVIRT_GROUP + elif [[ "$VIRT_DRIVER" = 'lxd' ]]; then + run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF" $LXD_GROUP + elif [[ "$VIRT_DRIVER" = 'docker' || "$VIRT_DRIVER" = 'zun' ]]; then + run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF" $DOCKER_GROUP elif [[ "$VIRT_DRIVER" = 'fake' ]]; then local i for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`; do @@ -832,54 +1088,127 @@ function start_nova_compute { # creating or modifying real configurations. Each fake # gets its own configuration and own log file. local fake_conf="${NOVA_FAKE_CONF}-${i}" - iniset $fake_conf DEFAULT nhost "${HOSTNAME}${i}" - run_process "n-cpu-${i}" "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf --config-file $fake_conf" + iniset $fake_conf DEFAULT host "${HOSTNAME}${i}" + # Ensure that each fake compute has its own state path so that it + # can have its own compute_id file + local state_path + state_path="$NOVA_STATE_PATH/${HOSTNAME}${i}" + COMPUTE_ID=$(uuidgen) + sudo mkdir -p "$state_path" + iniset $fake_conf DEFAULT state_path "$state_path" + # use the generated UUID as the stable compute node UUID + echo "$COMPUTE_ID" | sudo tee "$state_path/compute_id" + run_process "n-cpu-${i}" "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF --config-file $fake_conf" done else if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then start_nova_hypervisor fi - run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" + run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF" fi export PATH=$old_path } -# start_nova() - Start running processes, including screen +# start_nova() - Start running processes function start_nova_rest { # Hack to set the path for rootwrap local old_path=$PATH export PATH=$NOVA_BIN_DIR:$PATH + local compute_cell_conf=$NOVA_CONF + + run_process n-sch "$NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf" + run_process n-api-meta "$(which uwsgi) --procname-prefix nova-api-meta --ini $NOVA_METADATA_UWSGI_CONF" + + export PATH=$old_path +} + +function enable_nova_console_proxies { + for i in $(seq 1 $NOVA_NUM_CELLS); do + for srv in n-novnc n-spice n-sproxy; do + if is_service_enabled $srv; then + enable_service ${srv}-cell${i} + fi + done + done +} + +function start_nova_console_proxies { + # Hack to set the path for rootwrap + local old_path=$PATH + # This is needed to find the nova conf + export PATH=$NOVA_BIN_DIR:$PATH + local api_cell_conf=$NOVA_CONF - if is_service_enabled n-cell; then - local compute_cell_conf=$NOVA_CELLS_CONF + # console proxies run globally for singleconductor, else they run per cell + if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then + run_process n-novnc "$NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_WEB_DIR" + run_process n-spice "$NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $api_cell_conf --web $SPICE_WEB_DIR" + run_process n-sproxy "$NOVA_BIN_DIR/nova-serialproxy --config-file $api_cell_conf" else - local compute_cell_conf=$NOVA_CONF + enable_nova_console_proxies + for i in $(seq 1 $NOVA_NUM_CELLS); do + local conf + conf=$(conductor_conf $i) + run_process n-novnc-cell${i} "$NOVA_BIN_DIR/nova-novncproxy --config-file $conf --web $NOVNC_WEB_DIR" + run_process n-spice-cell${i} "$NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $conf --web $SPICE_WEB_DIR" + run_process n-sproxy-cell${i} "$NOVA_BIN_DIR/nova-serialproxy --config-file $conf" + done fi - # ``run_process`` checks ``is_service_enabled``, it is not needed here - run_process n-cond "$NOVA_BIN_DIR/nova-conductor --config-file $compute_cell_conf" - run_process n-cell-region "$NOVA_BIN_DIR/nova-cells --config-file $api_cell_conf" - run_process n-cell-child "$NOVA_BIN_DIR/nova-cells --config-file $compute_cell_conf" + export PATH=$old_path +} - run_process n-crt "$NOVA_BIN_DIR/nova-cert --config-file $api_cell_conf" - run_process n-net "$NOVA_BIN_DIR/nova-network --config-file $compute_cell_conf" - run_process n-sch "$NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf" - run_process n-api-meta "$NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf" +function enable_nova_fleet { + if is_service_enabled n-cond; then + enable_service n-super-cond + for i in $(seq 1 $NOVA_NUM_CELLS); do + enable_service n-cond-cell${i} + done + fi +} - run_process n-novnc "$NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_WEB_DIR" - run_process n-xvnc "$NOVA_BIN_DIR/nova-xvpvncproxy --config-file $api_cell_conf" - run_process n-spice "$NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $api_cell_conf --web $SPICE_WEB_DIR" - run_process n-cauth "$NOVA_BIN_DIR/nova-consoleauth --config-file $api_cell_conf" - run_process n-sproxy "$NOVA_BIN_DIR/nova-serialproxy --config-file $api_cell_conf" +function start_nova_conductor { + if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then + echo "Starting nova-conductor in a cellsv1-compatible way" + run_process n-cond "$NOVA_BIN_DIR/nova-conductor --config-file $NOVA_COND_CONF" + return + fi - export PATH=$old_path + enable_nova_fleet + if is_service_enabled n-super-cond; then + run_process n-super-cond "$NOVA_BIN_DIR/nova-conductor --config-file $NOVA_COND_CONF" + fi + for i in $(seq 1 $NOVA_NUM_CELLS); do + if is_service_enabled n-cond-cell${i}; then + local conf + conf=$(conductor_conf $i) + run_process n-cond-cell${i} "$NOVA_BIN_DIR/nova-conductor --config-file $conf" + fi + done +} + +function is_nova_ready { + # NOTE(sdague): with cells v2 all the compute services must be up + # and checked into the database before discover_hosts is run. This + # happens in all in one installs by accident, because > 30 seconds + # happen between here and the script ending. However, in multinode + # tests this can very often not be the case. So ensure that the + # compute is up before we move on. + wait_for_compute $NOVA_READY_TIMEOUT } function start_nova { start_nova_rest + start_nova_console_proxies + start_nova_conductor start_nova_compute + if is_service_enabled n-api; then + # dump the cell mapping to ensure life is good + echo "Dumping cells_v2 mapping" + $NOVA_BIN_DIR/nova-manage cell_v2 list_cells --verbose + fi } function stop_nova_compute { @@ -897,24 +1226,46 @@ function stop_nova_compute { } function stop_nova_rest { - if [ "$NOVA_USE_MOD_WSGI" == "True" ]; then - disable_apache_site nova-api - disable_apache_site nova-metadata - restart_apache_server + # Kill the non-compute nova processes + for serv in n-api n-api-meta n-sch; do + stop_process $serv + done +} + +function stop_nova_console_proxies { + if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then + for srv in n-novnc n-spice n-sproxy; do + stop_process $srv + done else - stop_process n-api + enable_nova_console_proxies + for i in $(seq 1 $NOVA_NUM_CELLS); do + for srv in n-novnc n-spice n-sproxy; do + stop_process ${srv}-cell${i} + done + done fi - # Kill the nova screen windows - # Some services are listed here twice since more than one instance - # of a service may be running in certain configs. - for serv in n-api n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-sproxy; do - stop_process $serv +} + +function stop_nova_conductor { + if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then + stop_process n-cond + return + fi + + enable_nova_fleet + for srv in n-super-cond $(seq -f n-cond-cell%0.f 1 $NOVA_NUM_CELLS); do + if is_service_enabled $srv; then + stop_process $srv + fi done } -# stop_nova() - Stop running processes (non-screen) +# stop_nova() - Stop running processes function stop_nova { stop_nova_rest + stop_nova_console_proxies + stop_nova_conductor stop_nova_compute } @@ -923,19 +1274,19 @@ function create_flavors { if is_service_enabled n-api; then if ! openstack --os-region-name="$REGION_NAME" flavor list | grep -q ds512M; then # Note that danms hates these flavors and apologizes for sdague - openstack --os-region-name="$REGION_NAME" flavor create --id c1 --ram 256 --disk 0 --vcpus 1 cirros256 - openstack --os-region-name="$REGION_NAME" flavor create --id d1 --ram 512 --disk 5 --vcpus 1 ds512M - openstack --os-region-name="$REGION_NAME" flavor create --id d2 --ram 1024 --disk 10 --vcpus 1 ds1G - openstack --os-region-name="$REGION_NAME" flavor create --id d3 --ram 2048 --disk 10 --vcpus 2 ds2G - openstack --os-region-name="$REGION_NAME" flavor create --id d4 --ram 4096 --disk 20 --vcpus 4 ds4G + openstack --os-region-name="$REGION_NAME" flavor create --id c1 --ram 256 --disk 1 --vcpus 1 --property hw_rng:allowed=True cirros256 + openstack --os-region-name="$REGION_NAME" flavor create --id d1 --ram 512 --disk 5 --vcpus 1 --property hw_rng:allowed=True ds512M + openstack --os-region-name="$REGION_NAME" flavor create --id d2 --ram 1024 --disk 10 --vcpus 1 --property hw_rng:allowed=True ds1G + openstack --os-region-name="$REGION_NAME" flavor create --id d3 --ram 2048 --disk 10 --vcpus 2 --property hw_rng:allowed=True ds2G + openstack --os-region-name="$REGION_NAME" flavor create --id d4 --ram 4096 --disk 20 --vcpus 4 --property hw_rng:allowed=True ds4G fi if ! openstack --os-region-name="$REGION_NAME" flavor list | grep -q m1.tiny; then - openstack --os-region-name="$REGION_NAME" flavor create --id 1 --ram 512 --disk 1 --vcpus 1 m1.tiny - openstack --os-region-name="$REGION_NAME" flavor create --id 2 --ram 2048 --disk 20 --vcpus 1 m1.small - openstack --os-region-name="$REGION_NAME" flavor create --id 3 --ram 4096 --disk 40 --vcpus 2 m1.medium - openstack --os-region-name="$REGION_NAME" flavor create --id 4 --ram 8192 --disk 80 --vcpus 4 m1.large - openstack --os-region-name="$REGION_NAME" flavor create --id 5 --ram 16384 --disk 160 --vcpus 8 m1.xlarge + openstack --os-region-name="$REGION_NAME" flavor create --id 1 --ram 512 --disk 1 --vcpus 1 --property hw_rng:allowed=True m1.tiny + openstack --os-region-name="$REGION_NAME" flavor create --id 2 --ram 2048 --disk 20 --vcpus 1 --property hw_rng:allowed=True m1.small + openstack --os-region-name="$REGION_NAME" flavor create --id 3 --ram 4096 --disk 40 --vcpus 2 --property hw_rng:allowed=True m1.medium + openstack --os-region-name="$REGION_NAME" flavor create --id 4 --ram 8192 --disk 80 --vcpus 4 --property hw_rng:allowed=True m1.large + openstack --os-region-name="$REGION_NAME" flavor create --id 5 --ram 16384 --disk 160 --vcpus 8 --property hw_rng:allowed=True m1.xlarge fi fi } diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 4e5a748e45..c0713f9953 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -20,44 +20,101 @@ set +o xtrace # extremely verbose.) DEBUG_LIBVIRT=$(trueorfalse True DEBUG_LIBVIRT) +# Try to enable coredumps for libvirt +# Currently fairly specific to OpenStackCI hosts +DEBUG_LIBVIRT_COREDUMPS=$(trueorfalse False DEBUG_LIBVIRT_COREDUMPS) + +# Enable the Fedora Virtualization Preview Copr repo that provides the latest +# rawhide builds of QEMU, Libvirt and other virt tools. +ENABLE_FEDORA_VIRT_PREVIEW_REPO=$(trueorfalse False ENABLE_FEDORA_VIRT_PREVIEW_REPO) + +# Enable coredumps for libvirt +# Bug: https://bugs.launchpad.net/nova/+bug/1643911 +function _enable_coredump { + local confdir=/etc/systemd/system/libvirtd.service.d + local conffile=${confdir}/coredump.conf + + # Create a coredump directory, and instruct the kernel to save to + # here + sudo mkdir -p /var/core + sudo chmod a+wrx /var/core + echo '/var/core/core.%e.%p.%h.%t' | \ + sudo tee /proc/sys/kernel/core_pattern + + # Drop a config file to up the core ulimit + sudo mkdir -p ${confdir} + sudo tee ${conffile} < - elif is_fedora || is_suse; then - # On "KVM for IBM z Systems", kvm does not have its own package - if [[ ! ${DISTRO} =~ "kvmibm1" ]]; then - install_package kvm + elif is_fedora; then + + # Optionally enable the virt-preview repo when on Fedora + if [[ $DISTRO =~ f[0-9][0-9] ]] && [[ ${ENABLE_FEDORA_VIRT_PREVIEW_REPO} == "True" ]]; then + # https://copr.fedorainfracloud.org/coprs/g/virtmaint-sig/virt-preview/ + sudo dnf copr enable -y @virtmaint-sig/virt-preview + fi + + if is_openeuler; then + qemu_package=qemu + else + qemu_package=qemu-kvm fi - # there is a dependency issue with kvm (which is really just a - # wrapper to qemu-system-x86) that leaves some bios files out, - # so install qemu-kvm (which shouldn't strictly be needed, as - # everything has been merged into qemu-system-x86) to bring in - # the right packages. see - # https://bugzilla.redhat.com/show_bug.cgi?id=1235890 - install_package qemu-kvm + + # Note that in CentOS/RHEL this needs to come from the RDO + # repositories (qemu-kvm-ev ... which provides this package) + # as the base system version is too old. We should have + # pre-installed these + install_package $qemu_package install_package libvirt libvirt-devel - pip_install_gr libvirt-python + + if [[ $DISTRO =~ rhel9 ]]; then + pip_install_gr libvirt-python + else + install_package python3-libvirt + fi + + if is_arch "aarch64"; then + install_package edk2-aarch64 + fi + fi + + if [[ $DEBUG_LIBVIRT_COREDUMPS == True ]]; then + _enable_coredump fi } # Configures the installed libvirt system so that is accessible by # STACK_USER via qemu:///system with management capabilities. function configure_libvirt { - if is_service_enabled neutron && is_neutron_ovs_base_plugin && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF; then + if is_service_enabled neutron && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF; then # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces cat </dev/null 2>&1 -CRONTAB - - # Create directories for kernels and images - { - echo "set -eux" - cat $TOP_DIR/tools/xen/functions - echo "create_directory_for_images" - echo "create_directory_for_kernels" - } | $ssh_dom0 - -} - -# install_nova_hypervisor() - Install external components -function install_nova_hypervisor { - pip_install_gr xenapi -} - -# start_nova_hypervisor - Start any required external services -function start_nova_hypervisor { - # This function intentionally left blank - : -} - -# stop_nova_hypervisor - Stop any external services -function stop_nova_hypervisor { - # This function intentionally left blank - : -} - - -# Restore xtrace -$_XTRACE_XENSERVER - -# Local variables: -# mode: shell-script -# End: diff --git a/lib/os-vif b/lib/os-vif new file mode 100644 index 0000000000..7c8bee3744 --- /dev/null +++ b/lib/os-vif @@ -0,0 +1,22 @@ +#!/bin/bash + +function is_ml2_ovs { + if [[ "${Q_AGENT}" == "openvswitch" ]]; then + echo "True" + fi + echo "False" +} + +# This should be true for any ml2/ovs job but should be set to false for +# all other ovs based jobs e.g. ml2/ovn +OS_VIF_OVS_ISOLATE_VIF=${OS_VIF_OVS_ISOLATE_VIF:=$(is_ml2_ovs)} +OS_VIF_OVS_ISOLATE_VIF=$(trueorfalse False OS_VIF_OVS_ISOLATE_VIF) + +function configure_os_vif { + if [[ -e ${NOVA_CONF} ]]; then + iniset ${NOVA_CONF} os_vif_ovs isolate_vif ${OS_VIF_OVS_ISOLATE_VIF} + fi + if [[ -e ${NEUTRON_CONF} ]]; then + iniset ${NEUTRON_CONF} os_vif_ovs isolate_vif ${OS_VIF_OVS_ISOLATE_VIF} + fi +} diff --git a/lib/oslo b/lib/oslo deleted file mode 100644 index 1773da2975..0000000000 --- a/lib/oslo +++ /dev/null @@ -1,105 +0,0 @@ -#!/bin/bash -# -# lib/oslo -# -# Functions to install **Oslo** libraries from git -# -# We need this to handle the fact that projects would like to use -# pre-released versions of oslo libraries. - -# Dependencies: -# -# - ``functions`` file - -# ``stack.sh`` calls the entry points in this order: -# -# - install_oslo - -# Save trace setting -_XTRACE_LIB_OSLO=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- -GITDIR["automaton"]=$DEST/automaton -GITDIR["cliff"]=$DEST/cliff -GITDIR["debtcollector"]=$DEST/debtcollector -GITDIR["futurist"]=$DEST/futurist -GITDIR["oslo.cache"]=$DEST/oslo.cache -GITDIR["oslo.concurrency"]=$DEST/oslo.concurrency -GITDIR["oslo.config"]=$DEST/oslo.config -GITDIR["oslo.context"]=$DEST/oslo.context -GITDIR["oslo.db"]=$DEST/oslo.db -GITDIR["oslo.i18n"]=$DEST/oslo.i18n -GITDIR["oslo.log"]=$DEST/oslo.log -GITDIR["oslo.messaging"]=$DEST/oslo.messaging -GITDIR["oslo.middleware"]=$DEST/oslo.middleware -GITDIR["oslo.policy"]=$DEST/oslo.policy -GITDIR["oslo.privsep"]=$DEST/oslo.privsep -GITDIR["oslo.reports"]=$DEST/oslo.reports -GITDIR["oslo.rootwrap"]=$DEST/oslo.rootwrap -GITDIR["oslo.serialization"]=$DEST/oslo.serialization -GITDIR["oslo.service"]=$DEST/oslo.service -GITDIR["oslo.utils"]=$DEST/oslo.utils -GITDIR["oslo.versionedobjects"]=$DEST/oslo.versionedobjects -GITDIR["oslo.vmware"]=$DEST/oslo.vmware -GITDIR["osprofiler"]=$DEST/osprofiler -GITDIR["pycadf"]=$DEST/pycadf -GITDIR["stevedore"]=$DEST/stevedore -GITDIR["taskflow"]=$DEST/taskflow -GITDIR["tooz"]=$DEST/tooz - -# Support entry points installation of console scripts -OSLO_BIN_DIR=$(get_python_exec_prefix) - - -# Functions -# --------- - -function _do_install_oslo_lib { - local name=$1 - if use_library_from_git "$name"; then - git_clone_by_name "$name" - setup_dev_lib "$name" - fi -} - -# install_oslo() - Collect source and prepare -function install_oslo { - _do_install_oslo_lib "automaton" - _do_install_oslo_lib "cliff" - _do_install_oslo_lib "debtcollector" - _do_install_oslo_lib "futurist" - _do_install_oslo_lib "oslo.cache" - _do_install_oslo_lib "oslo.concurrency" - _do_install_oslo_lib "oslo.config" - _do_install_oslo_lib "oslo.context" - _do_install_oslo_lib "oslo.db" - _do_install_oslo_lib "oslo.i18n" - _do_install_oslo_lib "oslo.log" - _do_install_oslo_lib "oslo.messaging" - _do_install_oslo_lib "oslo.middleware" - _do_install_oslo_lib "oslo.policy" - _do_install_oslo_lib "oslo.privsep" - _do_install_oslo_lib "oslo.reports" - _do_install_oslo_lib "oslo.rootwrap" - _do_install_oslo_lib "oslo.serialization" - _do_install_oslo_lib "oslo.service" - _do_install_oslo_lib "oslo.utils" - _do_install_oslo_lib "oslo.versionedobjects" - _do_install_oslo_lib "oslo.vmware" - _do_install_oslo_lib "osprofiler" - _do_install_oslo_lib "pycadf" - _do_install_oslo_lib "stevedore" - _do_install_oslo_lib "taskflow" - _do_install_oslo_lib "tooz" -} - -# Restore xtrace -$_XTRACE_LIB_OSLO - -# Tell emacs to use shell-script-mode -## Local variables: -## mode: shell-script -## End: diff --git a/lib/placement b/lib/placement new file mode 100644 index 0000000000..03aaa0344b --- /dev/null +++ b/lib/placement @@ -0,0 +1,151 @@ +#!/bin/bash +# +# lib/placement +# Functions to control the configuration and operation of the **Placement** service +# + +# Dependencies: +# +# - ``functions`` file +# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined +# - ``FILES`` + +# ``stack.sh`` calls the entry points in this order: +# +# - install_placement +# - cleanup_placement +# - configure_placement +# - init_placement +# - start_placement +# - stop_placement + +# Save trace setting +_XTRACE_LIB_PLACEMENT=$(set +o | grep xtrace) +set +o xtrace + +# Defaults +# -------- + +PLACEMENT_DIR=$DEST/placement +PLACEMENT_CONF_DIR=/etc/placement +PLACEMENT_CONF=$PLACEMENT_CONF_DIR/placement.conf +PLACEMENT_AUTH_STRATEGY=${PLACEMENT_AUTH_STRATEGY:-keystone} +# Placement virtual environment +if [[ ${USE_VENV} = True ]]; then + PROJECT_VENV["placement"]=${PLACEMENT_DIR}.venv + PLACEMENT_BIN_DIR=${PROJECT_VENV["placement"]}/bin +else + PLACEMENT_BIN_DIR=$(get_python_exec_prefix) +fi +PLACEMENT_UWSGI=placement.wsgi.api:application +PLACEMENT_UWSGI_CONF=$PLACEMENT_CONF_DIR/placement-uwsgi.ini + +if is_service_enabled tls-proxy; then + PLACEMENT_SERVICE_PROTOCOL="https" +fi + +# Public facing bits +PLACEMENT_SERVICE_PROTOCOL=${PLACEMENT_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} +PLACEMENT_SERVICE_HOST=${PLACEMENT_SERVICE_HOST:-$SERVICE_HOST} + +# Flag to set the oslo_policy.enforce_scope and oslo_policy.enforce_new_defaults. +# This is used to switch the Placement API policies scope and new defaults. +# By Default, these flag are False. +# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope +PLACEMENT_ENFORCE_SCOPE=$(trueorfalse False PLACEMENT_ENFORCE_SCOPE) + +# Functions +# --------- + +# Test if any placement services are enabled +# is_placement_enabled +function is_placement_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"placement-api" ]] && return 0 + return 1 +} + +# cleanup_placement() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_placement { + sudo rm -f $(apache_site_config_for placement-api) + remove_uwsgi_config "$PLACEMENT_UWSGI_CONF" "placement-api" +} + +# create_placement_conf() - Write config +function create_placement_conf { + rm -f $PLACEMENT_CONF + iniset $PLACEMENT_CONF placement_database connection `database_connection_url placement` + iniset $PLACEMENT_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" + iniset $PLACEMENT_CONF api auth_strategy $PLACEMENT_AUTH_STRATEGY + configure_keystone_authtoken_middleware $PLACEMENT_CONF placement + setup_logging $PLACEMENT_CONF +} + +# configure_placement() - Set config files, create data dirs, etc +function configure_placement { + sudo install -d -o $STACK_USER $PLACEMENT_CONF_DIR + create_placement_conf + + write_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI" "/placement" "" "placement-api" + if [[ "$PLACEMENT_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]]; then + iniset $PLACEMENT_CONF oslo_policy enforce_new_defaults True + iniset $PLACEMENT_CONF oslo_policy enforce_scope True + else + iniset $PLACEMENT_CONF oslo_policy enforce_new_defaults False + iniset $PLACEMENT_CONF oslo_policy enforce_scope False + fi +} + +# create_placement_accounts() - Set up required placement accounts +# and service and endpoints. +function create_placement_accounts { + create_service_user "placement" "admin" + local placement_api_url="$PLACEMENT_SERVICE_PROTOCOL://$PLACEMENT_SERVICE_HOST/placement" + get_or_create_service "placement" "placement" "Placement Service" + get_or_create_endpoint \ + "placement" \ + "$REGION_NAME" \ + "$placement_api_url" +} + +# init_placement() - Create service user and endpoints +function init_placement { + recreate_database placement + $PLACEMENT_BIN_DIR/placement-manage db sync + create_placement_accounts +} + +# install_placement() - Collect source and prepare +function install_placement { + # Install the openstackclient placement client plugin for CLI + pip_install_gr osc-placement + git_clone $PLACEMENT_REPO $PLACEMENT_DIR $PLACEMENT_BRANCH + setup_develop $PLACEMENT_DIR +} + +# start_placement_api() - Start the API processes ahead of other things +function start_placement_api { + run_process "placement-api" "$(which uwsgi) --procname-prefix placement --ini $PLACEMENT_UWSGI_CONF" + + echo "Waiting for placement-api to start..." + if ! wait_for_service $SERVICE_TIMEOUT $PLACEMENT_SERVICE_PROTOCOL://$PLACEMENT_SERVICE_HOST/placement; then + die $LINENO "placement-api did not start" + fi +} + +function start_placement { + start_placement_api +} + +# stop_placement() - Disable the api service and stop it. +function stop_placement { + stop_process "placement-api" +} + +# Restore xtrace +$_XTRACE_LIB_PLACEMENT + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/rpc_backend b/lib/rpc_backend index 0ee46dca6f..bbb41499be 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -24,6 +24,11 @@ _XTRACE_RPC_BACKEND=$(set +o | grep xtrace) set +o xtrace +RABBIT_USERID=${RABBIT_USERID:-stackrabbit} +if is_service_enabled rabbit; then + RABBIT_HOST=${RABBIT_HOST:-$SERVICE_HOST} +fi + # Functions # --------- @@ -48,7 +53,12 @@ function install_rpc_backend { # Install rabbitmq-server install_package rabbitmq-server if is_fedora; then - sudo systemctl enable rabbitmq-server + # NOTE(jangutter): If rabbitmq is not running (as in a fresh + # install) then rabbit_setuser triggers epmd@0.0.0.0.socket with + # socket activation. This fails the first time and does not get + # cleared. It is benign, but the workaround is to start rabbitmq a + # bit earlier for RPM based distros. + sudo systemctl --now enable rabbitmq-server fi fi } @@ -92,17 +102,26 @@ function restart_rpc_backend { break done - if is_service_enabled n-cell; then - # Add partitioned access for the child cell - if [ -z `sudo rabbitmqctl list_vhosts | grep child_cell` ]; then - sudo rabbitmqctl add_vhost child_cell - sudo rabbitmqctl set_permissions -p child_cell $RABBIT_USERID ".*" ".*" ".*" - fi + # NOTE(frickler): Remove the default guest user + sudo rabbitmqctl delete_user guest || true + fi +} + +# adds a vhost to the rpc backend +function rpc_backend_add_vhost { + local vhost="$1" + if is_service_enabled rabbit; then + if [ -z `sudo rabbitmqctl list_vhosts | grep $vhost` ]; then + sudo rabbitmqctl add_vhost $vhost + sudo rabbitmqctl set_permissions -p $vhost $RABBIT_USERID ".*" ".*" ".*" fi + else + echo 'RPC backend does not support vhosts' + return 1 fi } -# builds transport url string +# Returns the address of the RPC backend in URL format. function get_transport_url { local virtual_host=$1 if is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then @@ -110,6 +129,16 @@ function get_transport_url { fi } +# Returns the address of the Notification backend in URL format. This +# should be used to set the transport_url option in the +# oslo_messaging_notifications group. +function get_notification_url { + local virtual_host=$1 + if is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then + echo "rabbit://$RABBIT_USERID:$RABBIT_PASSWORD@$RABBIT_HOST:5672/$virtual_host" + fi +} + # iniset configuration function iniset_rpc_backend { local package=$1 diff --git a/lib/stack b/lib/stack index f09ddcee85..bada26f1c2 100644 --- a/lib/stack +++ b/lib/stack @@ -33,5 +33,8 @@ function stack_install_service { if [[ ${USE_VENV} = True && -n ${PROJECT_VENV[$service]:-} ]]; then unset PIP_VIRTUAL_ENV fi + else + echo "No function declared with name 'install_${service}'." + exit 1 fi } diff --git a/lib/swift b/lib/swift index 0c74411a9c..862927437d 100644 --- a/lib/swift +++ b/lib/swift @@ -7,7 +7,7 @@ # # - ``functions`` file # - ``apache`` file -# - ``DEST``, ``SCREEN_NAME``, `SWIFT_HASH` must be defined +# - ``DEST``, `SWIFT_HASH` must be defined # - ``STACK_USER`` must be defined # - ``SWIFT_DATA_DIR`` or ``DATA_DIR`` must be defined # - ``lib/keystone`` file @@ -31,23 +31,29 @@ set +o xtrace # Defaults # -------- -if is_ssl_enabled_service "s-proxy" || is_service_enabled tls-proxy; then +if is_service_enabled tls-proxy; then SWIFT_SERVICE_PROTOCOL="https" fi # Set up default directories GITDIR["python-swiftclient"]=$DEST/python-swiftclient - SWIFT_DIR=$DEST/swift -SWIFT_AUTH_CACHE_DIR=${SWIFT_AUTH_CACHE_DIR:-/var/cache/swift} + +# Swift virtual environment +if [[ ${USE_VENV} = True ]]; then + PROJECT_VENV["swift"]=${SWIFT_DIR}.venv + SWIFT_BIN_DIR=${PROJECT_VENV["swift"]}/bin +else + SWIFT_BIN_DIR=$(get_python_exec_prefix) +fi + SWIFT_APACHE_WSGI_DIR=${SWIFT_APACHE_WSGI_DIR:-/var/www/swift} -SWIFT3_DIR=$DEST/swift3 SWIFT_SERVICE_PROTOCOL=${SWIFT_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} SWIFT_DEFAULT_BIND_PORT=${SWIFT_DEFAULT_BIND_PORT:-8080} SWIFT_DEFAULT_BIND_PORT_INT=${SWIFT_DEFAULT_BIND_PORT_INT:-8081} SWIFT_SERVICE_LOCAL_HOST=${SWIFT_SERVICE_LOCAL_HOST:-$SERVICE_LOCAL_HOST} -SWIFT_SERVICE_LISTEN_ADDRESS=${SWIFT_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS} +SWIFT_SERVICE_LISTEN_ADDRESS=${SWIFT_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)} # TODO: add logging to different location. @@ -60,8 +66,8 @@ SWIFT_DISK_IMAGE=${SWIFT_DATA_DIR}/drives/images/swift.img # Default is ``/etc/swift``. SWIFT_CONF_DIR=${SWIFT_CONF_DIR:-/etc/swift} -if is_service_enabled s-proxy && is_service_enabled swift3; then - # If we are using ``swift3``, we can default the S3 port to swift instead +if is_service_enabled s-proxy && is_service_enabled s3api; then + # If we are using ``s3api``, we can default the S3 port to swift instead # of nova-objectstore S3_SERVICE_PORT=${S3_SERVICE_PORT:-$SWIFT_DEFAULT_BIND_PORT} fi @@ -119,6 +125,11 @@ SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9} SWIFT_REPLICAS=${SWIFT_REPLICAS:-1} SWIFT_REPLICAS_SEQ=$(seq ${SWIFT_REPLICAS}) +# Set ``SWIFT_START_ALL_SERVICES`` to control whether all Swift +# services (including the *-auditor, *-replicator, *-reconstructor, etc. +# daemons) should be started. +SWIFT_START_ALL_SERVICES=$(trueorfalse True SWIFT_START_ALL_SERVICES) + # Set ``SWIFT_LOG_TOKEN_LENGTH`` to configure how many characters of an auth # token should be placed in the logs. When keystone is used with PKI tokens, # the token values can be huge, seemingly larger the 2K, at the least. We @@ -160,6 +171,7 @@ SWIFT_STORAGE_IPS=${SWIFT_STORAGE_IPS:-} # Test if any Swift services are enabled # is_swift_enabled function is_swift_enabled { + [[ ,${DISABLED_SERVICES} =~ ,"swift" ]] && return 1 [[ ,${ENABLED_SERVICES} =~ ,"s-" ]] && return 0 return 1 } @@ -167,12 +179,9 @@ function is_swift_enabled { # cleanup_swift() - Remove residual data files function cleanup_swift { rm -f ${SWIFT_CONF_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz} - if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then - sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 - fi - if [[ -e ${SWIFT_DISK_IMAGE} ]]; then - rm ${SWIFT_DISK_IMAGE} - fi + + destroy_disk ${SWIFT_DISK_IMAGE} ${SWIFT_DATA_DIR}/drives/sdb1 + rm -rf ${SWIFT_DATA_DIR}/run/ if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then _cleanup_swift_apache_wsgi @@ -309,8 +318,8 @@ function generate_swift_config_services { iniuncomment ${swift_node_config} DEFAULT mount_check iniset ${swift_node_config} DEFAULT mount_check false - iniuncomment ${swift_node_config} ${server_type}-replicator vm_test_mode - iniset ${swift_node_config} ${server_type}-replicator vm_test_mode yes + iniuncomment ${swift_node_config} ${server_type}-replicator rsync_module + iniset ${swift_node_config} ${server_type}-replicator rsync_module "{replication_ip}::${server_type}{replication_port}" # Using a sed and not iniset/iniuncomment because we want to a global # modification and make sure it works for new sections. @@ -323,10 +332,9 @@ function configure_swift { local node_number local swift_node_config local swift_log_dir - local user_group # Make sure to kill all swift processes first - swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true + $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true sudo install -d -o ${STACK_USER} ${SWIFT_CONF_DIR} sudo install -d -o ${STACK_USER} ${SWIFT_CONF_DIR}/{object,container,account}-server @@ -341,7 +349,7 @@ function configure_swift { # partitions (which make more sense when you have a multi-node # setup) we configure it with our version of rsync. sed -e " - s/%GROUP%/${USER_GROUP}/; + s/%GROUP%/$(id -g -n ${STACK_USER})/; s/%USER%/${STACK_USER}/; s,%SWIFT_DATA_DIR%,$SWIFT_DATA_DIR,; " $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf @@ -354,6 +362,7 @@ function configure_swift { SWIFT_CONFIG_PROXY_SERVER=${SWIFT_CONF_DIR}/proxy-server.conf cp ${SWIFT_DIR}/etc/proxy-server.conf-sample ${SWIFT_CONFIG_PROXY_SERVER} + cp ${SWIFT_DIR}/etc/internal-client.conf-sample ${SWIFT_CONF_DIR}/internal-client.conf # To run container sync feature introduced in Swift ver 1.12.0, # container sync "realm" is added in container-sync-realms.conf @@ -384,25 +393,26 @@ function configure_swift { iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT} fi - if is_ssl_enabled_service s-proxy; then - ensure_certificates SWIFT - - iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT cert_file "$SWIFT_SSL_CERT" - iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT key_file "$SWIFT_SSL_KEY" - fi - # DevStack is commonly run in a small slow environment, so bump the timeouts up. # ``node_timeout`` is the node read operation response time to the proxy server # ``conn_timeout`` is how long it takes a connect() system call to return iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server node_timeout 120 iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server conn_timeout 20 + # Versioned Writes + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:versioned_writes allow_versioned_writes true + + # Add sha1 temporary https://storyboard.openstack.org/#!/story/2010068 + if [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]]; then + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempurl allowed_digests "sha1 sha256 sha512" + fi + # Configure Ceilometer if is_service_enabled ceilometer; then iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer "set log_level" "WARN" iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer paste.filter_factory "ceilometermiddleware.swift:filter_factory" iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer control_exchange "swift" - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer url $(get_transport_url) + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer url $(get_notification_url) iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer driver "messaging" iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer topic "notifications" SWIFT_EXTRAS_MIDDLEWARE_LAST="${SWIFT_EXTRAS_MIDDLEWARE_LAST} ceilometer" @@ -412,16 +422,22 @@ function configure_swift { iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:proxy-logging reveal_sensitive_prefix ${SWIFT_LOG_TOKEN_LENGTH} # By default Swift will be installed with Keystone and tempauth middleware - # and add the swift3 middleware if its configured for it. The token for + # and add the s3api middleware if its configured for it. The token for # tempauth would be prefixed with the reseller_prefix setting `TEMPAUTH_` the # token for keystoneauth would have the standard reseller_prefix `AUTH_` - if is_service_enabled swift3;then - swift_pipeline+=" swift3 s3token " + if is_service_enabled s3api;then + swift_pipeline+=" s3api" fi - if is_service_enabled keystone; then - swift_pipeline+=" authtoken keystoneauth" + swift_pipeline+=" authtoken" + if is_service_enabled s3api;then + swift_pipeline+=" s3token" + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token auth_uri ${KEYSTONE_SERVICE_URI_V3} + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token delay_auth_decision true + fi + swift_pipeline+=" keystoneauth" fi + swift_pipeline+=" tempauth " sed -i "/^pipeline/ { s/tempauth/${swift_pipeline} ${SWIFT_EXTRAS_MIDDLEWARE}/ ;}" ${SWIFT_CONFIG_PROXY_SERVER} @@ -439,7 +455,7 @@ function configure_swift { iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken log_name swift iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken paste.filter_factory keystonemiddleware.auth_token:filter_factory - configure_auth_token_middleware $SWIFT_CONFIG_PROXY_SERVER swift $SWIFT_AUTH_CACHE_DIR filter:authtoken + configure_keystone_authtoken_middleware $SWIFT_CONFIG_PROXY_SERVER swift filter:authtoken iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken delay_auth_decision 1 iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken cache swift.cache iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken include_service_catalog False @@ -451,24 +467,10 @@ function configure_swift { # out. Make sure we uncomment Tempauth after we uncomment Keystoneauth # otherwise, this code also sets the reseller_prefix for Keystoneauth. iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth account_autocreate - iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth reseller_prefix iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth reseller_prefix "TEMPAUTH" - if is_service_enabled swift3; then - cat <>${SWIFT_CONFIG_PROXY_SERVER} -[filter:s3token] -paste.filter_factory = keystonemiddleware.s3_token:filter_factory -auth_uri = ${KEYSTONE_AUTH_URI} -cafile = ${SSL_BUNDLE_FILE} -admin_user = swift -admin_tenant_name = ${SERVICE_PROJECT_NAME} -admin_password = ${SERVICE_PASSWORD} - -[filter:swift3] -use = egg:swift3#swift3 -location = ${REGION_NAME} -EOF - fi + # Allow both reseller prefixes to be used with domain_remap + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:domain_remap reseller_prefixes "AUTH, TEMPAUTH" cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONF_DIR}/swift.conf iniset ${SWIFT_CONF_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH} @@ -489,8 +491,6 @@ EOF generate_swift_config_services ${swift_node_config} ${node_number} $(( CONTAINER_PORT_BASE + 10 * (node_number - 1) )) container iniuncomment ${swift_node_config} DEFAULT bind_ip iniset ${swift_node_config} DEFAULT bind_ip ${SWIFT_SERVICE_LISTEN_ADDRESS} - iniuncomment ${swift_node_config} app:container-server allow_versions - iniset ${swift_node_config} app:container-server allow_versions "true" swift_node_config=${SWIFT_CONF_DIR}/account-server/${node_number}.conf cp ${SWIFT_DIR}/etc/account-server.conf-sample ${swift_node_config} @@ -523,11 +523,20 @@ EOF local auth_vers auth_vers=$(iniget ${testfile} func_test auth_version) iniset ${testfile} func_test auth_host ${KEYSTONE_SERVICE_HOST} - iniset ${testfile} func_test auth_port ${KEYSTONE_AUTH_PORT} - if [[ $auth_vers == "3" ]]; then - iniset ${testfile} func_test auth_prefix /v3/ + if [[ "$KEYSTONE_SERVICE_PROTOCOL" == "https" ]]; then + iniset ${testfile} func_test auth_port 443 + else + iniset ${testfile} func_test auth_port 80 + fi + iniset ${testfile} func_test auth_uri ${KEYSTONE_SERVICE_URI} + if [[ "$auth_vers" == "3" ]]; then + iniset ${testfile} func_test auth_prefix /identity/v3/ else - iniset ${testfile} func_test auth_prefix /v2.0/ + iniset ${testfile} func_test auth_prefix /identity/v2.0/ + fi + if is_service_enabled tls-proxy; then + iniset ${testfile} func_test cafile ${SSL_BUNDLE_FILE} + iniset ${testfile} func_test web_front_end apache2 fi fi @@ -537,11 +546,13 @@ EOF local swift_log_dir=${SWIFT_DATA_DIR}/logs sudo rm -rf ${swift_log_dir} - sudo install -d -o ${STACK_USER} -g adm ${swift_log_dir}/hourly + local swift_log_group=adm + sudo install -d -o ${STACK_USER} -g ${swift_log_group} ${swift_log_dir}/hourly if [[ $SYSLOG != "False" ]]; then sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \ tee /etc/rsyslog.d/10-swift.conf + echo "MaxMessageSize 6k" | sudo tee /etc/rsyslog.d/99-maxsize.conf # restart syslog to take the changes sudo killall -HUP rsyslogd fi @@ -563,42 +574,19 @@ function create_swift_disk { sudo install -d -o ${STACK_USER} -g ${user_group} ${SWIFT_DATA_DIR}/{drives,cache,run,logs} # Create a loopback disk and format it to XFS. - if [[ -e ${SWIFT_DISK_IMAGE} ]]; then - if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then - sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 - sudo rm -f ${SWIFT_DISK_IMAGE} - fi - fi - - mkdir -p ${SWIFT_DATA_DIR}/drives/images - sudo touch ${SWIFT_DISK_IMAGE} - sudo chown ${STACK_USER}: ${SWIFT_DISK_IMAGE} - - truncate -s ${SWIFT_LOOPBACK_DISK_SIZE} ${SWIFT_DISK_IMAGE} - - # Make a fresh XFS filesystem - /sbin/mkfs.xfs -f -i size=1024 ${SWIFT_DISK_IMAGE} - - # Mount the disk with mount options to make it as efficient as possible - mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1 - if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then - sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 \ - ${SWIFT_DISK_IMAGE} ${SWIFT_DATA_DIR}/drives/sdb1 - fi + create_disk ${SWIFT_DISK_IMAGE} ${SWIFT_DATA_DIR}/drives/sdb1 ${SWIFT_LOOPBACK_DISK_SIZE} # Create a link to the above mount and # create all of the directories needed to emulate a few different servers local node_number for node_number in ${SWIFT_REPLICAS_SEQ}; do - sudo ln -sf ${SWIFT_DATA_DIR}/drives/sdb1/$node_number ${SWIFT_DATA_DIR}/$node_number; - local drive=${SWIFT_DATA_DIR}/drives/sdb1/${node_number} - local node=${SWIFT_DATA_DIR}/${node_number}/node - local node_device=${node}/sdb1 - [[ -d $node ]] && continue - [[ -d $drive ]] && continue - sudo install -o ${STACK_USER} -g $user_group -d $drive - sudo install -o ${STACK_USER} -g $user_group -d $node_device - sudo chown -R ${STACK_USER}: ${node} + # node_devices must match *.conf devices option + local node_devices=${SWIFT_DATA_DIR}/${node_number} + local real_devices=${SWIFT_DATA_DIR}/drives/sdb1/$node_number + sudo ln -sf $real_devices $node_devices; + local device=${real_devices}/sdb1 + [[ -d $device ]] && continue + sudo install -o ${STACK_USER} -g $user_group -d $device done } @@ -636,8 +624,7 @@ function create_swift_accounts { "object-store" \ "$REGION_NAME" \ "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_\$(project_id)s" \ - "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT" \ - "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_\$(project_id)s" + "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT" local swift_project_test1 swift_project_test1=$(get_or_create_project swiftprojecttest1 default) @@ -682,7 +669,7 @@ function create_swift_accounts { function init_swift { local node_number # Make sure to kill all swift processes first - swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true + $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true # Forcibly re-create the backing filesystem create_swift_disk @@ -693,9 +680,9 @@ function init_swift { rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz - swift-ring-builder object.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 - swift-ring-builder container.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 - swift-ring-builder account.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 + $SWIFT_BIN_DIR/swift-ring-builder object.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 + $SWIFT_BIN_DIR/swift-ring-builder container.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 + $SWIFT_BIN_DIR/swift-ring-builder account.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 # The ring will be created on each node, and because the order of # nodes is identical we can use a seed for rebalancing, making it @@ -706,36 +693,34 @@ function init_swift { node_number=1 for node in ${SWIFT_STORAGE_IPS}; do - swift-ring-builder object.builder add z${node_number}-${node}:${OBJECT_PORT_BASE}/sdb1 1 - swift-ring-builder container.builder add z${node_number}-${node}:${CONTAINER_PORT_BASE}/sdb1 1 - swift-ring-builder account.builder add z${node_number}-${node}:${ACCOUNT_PORT_BASE}/sdb1 1 + $SWIFT_BIN_DIR/swift-ring-builder object.builder add z${node_number}-${node}:${OBJECT_PORT_BASE}/sdb1 1 + $SWIFT_BIN_DIR/swift-ring-builder container.builder add z${node_number}-${node}:${CONTAINER_PORT_BASE}/sdb1 1 + $SWIFT_BIN_DIR/swift-ring-builder account.builder add z${node_number}-${node}:${ACCOUNT_PORT_BASE}/sdb1 1 let "node_number=node_number+1" done else for node_number in ${SWIFT_REPLICAS_SEQ}; do - swift-ring-builder object.builder add z${node_number}-${SWIFT_SERVICE_LOCAL_HOST}:$(( OBJECT_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1 - swift-ring-builder container.builder add z${node_number}-${SWIFT_SERVICE_LOCAL_HOST}:$(( CONTAINER_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1 - swift-ring-builder account.builder add z${node_number}-${SWIFT_SERVICE_LOCAL_HOST}:$(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1 + $SWIFT_BIN_DIR/swift-ring-builder object.builder add z${node_number}-${SWIFT_SERVICE_LOCAL_HOST}:$(( OBJECT_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1 + $SWIFT_BIN_DIR/swift-ring-builder container.builder add z${node_number}-${SWIFT_SERVICE_LOCAL_HOST}:$(( CONTAINER_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1 + $SWIFT_BIN_DIR/swift-ring-builder account.builder add z${node_number}-${SWIFT_SERVICE_LOCAL_HOST}:$(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1 done fi # We use a seed for rebalancing. Doing this allows us to create # identical rings on multiple nodes if SWIFT_STORAGE_IPS is the same - swift-ring-builder object.builder rebalance 42 - swift-ring-builder container.builder rebalance 42 - swift-ring-builder account.builder rebalance 42 + $SWIFT_BIN_DIR/swift-ring-builder object.builder rebalance 42 + $SWIFT_BIN_DIR/swift-ring-builder container.builder rebalance 42 + $SWIFT_BIN_DIR/swift-ring-builder account.builder rebalance 42 } && popd >/dev/null - - # Create cache dir - sudo install -d -o ${STACK_USER} $SWIFT_AUTH_CACHE_DIR - rm -f $SWIFT_AUTH_CACHE_DIR/* } function install_swift { git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH - setup_develop $SWIFT_DIR + # keystonemiddleware needs to be installed via keystone extras as defined + # in setup.cfg, see bug #1909018 for more details. + setup_develop $SWIFT_DIR keystone if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then install_apache_wsgi fi @@ -763,7 +748,7 @@ function install_ceilometermiddleware { fi } -# start_swift() - Start running processes, including screen +# start_swift() - Start running processes function start_swift { # (re)start memcached to make sure we have a clean memcache. restart_service memcached @@ -778,41 +763,57 @@ function start_swift { fi if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then + # Apache should serve the "PACO" a.k.a "main" services restart_apache_server - swift-init --run-dir=${SWIFT_DATA_DIR}/run rest start - tail_log s-proxy /var/log/$APACHE_NAME/proxy-server - if [[ ${SWIFT_REPLICAS} == 1 ]]; then - for type in object container account; do - tail_log s-${type} /var/log/$APACHE_NAME/${type}-server-1 - done - fi + # The rest of the services should be started in backgroud + $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run rest start return 0 fi - # By default with only one replica we are launching the proxy, - # container, account and object server in screen in foreground and - # other services in background. If we have ``SWIFT_REPLICAS`` set to something - # greater than one we first spawn all the Swift services then kill the proxy - # service so we can run it in foreground in screen. ``swift-init ... - # {stop|restart}`` exits with '1' if no servers are running, ignore it just - # in case - local todo type - swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true + + # By default with only one replica we are launching the proxy, container + # account and object server in screen in foreground. Then, the rest of + # the services is optionally started. + # + # If we have ``SWIFT_REPLICAS`` set to something greater than one + # we first spawn *all* the Swift services then kill the proxy service + # so we can run it in foreground in screen. + # + # ``swift-init ... {stop|restart}`` exits with '1' if no servers are + # running, ignore it just in case if [[ ${SWIFT_REPLICAS} == 1 ]]; then - todo="object container account" + local foreground_services type + + foreground_services="object container account" + for type in ${foreground_services}; do + run_process s-${type} "$SWIFT_BIN_DIR/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v" + done + + if [[ "$SWIFT_START_ALL_SERVICES" == "True" ]]; then + $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run rest start + else + # The container-sync daemon is strictly needed to pass the container + # sync Tempest tests. + enable_service s-container-sync + run_process s-container-sync "$SWIFT_BIN_DIR/swift-container-sync ${SWIFT_CONF_DIR}/container-server/1.conf" + fi + else + $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true + $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run proxy stop || true fi - for type in proxy ${todo}; do - swift-init --run-dir=${SWIFT_DATA_DIR}/run ${type} stop || true - done + if is_service_enabled tls-proxy; then local proxy_port=${SWIFT_DEFAULT_BIND_PORT} - start_tls_proxy '*' $proxy_port $SERVICE_HOST $SWIFT_DEFAULT_BIND_PORT_INT & + start_tls_proxy swift '*' $proxy_port $SERVICE_HOST $SWIFT_DEFAULT_BIND_PORT_INT $SWIFT_MAX_HEADER_SIZE fi - run_process s-proxy "$SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v" - if [[ ${SWIFT_REPLICAS} == 1 ]]; then - for type in object container account; do - run_process s-${type} "$SWIFT_DIR/bin/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v" - done + run_process s-proxy "$SWIFT_BIN_DIR/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v" + + # We also started the storage services, but proxy started last and + # will take the longest to start, so by the time it comes up, we're + # probably fine. + echo "Waiting for swift proxy to start..." + if ! wait_for_service $SERVICE_TIMEOUT $SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/info; then + die $LINENO "swift proxy did not start" fi if [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]]; then @@ -820,17 +821,17 @@ function start_swift { fi } -# stop_swift() - Stop running processes (non-screen) +# stop_swift() - Stop running processes function stop_swift { local type if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then - swift-init --run-dir=${SWIFT_DATA_DIR}/run rest stop && return 0 + $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run rest stop && return 0 fi # screen normally killed by ``unstack.sh`` - if type -p swift-init >/dev/null; then - swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true + if type -p $SWIFT_BIN_DIR/swift-init >/dev/null; then + $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true fi # Dump all of the servers # Maintain the iteration as stop_process() has some desirable side-effects @@ -843,12 +844,15 @@ function stop_swift { function swift_configure_tempurls { # note we are using swift credentials! - OS_USERNAME=swift \ - OS_PASSWORD=$SERVICE_PASSWORD \ - OS_USER_DOMAIN_NAME=$SERVICE_DOMAIN_NAME \ - OS_PROJECT_NAME=$SERVICE_PROJECT_NAME \ - OS_PROJECT_DOMAIN_NAME=$SERVICE_DOMAIN_NAME \ - openstack object store account \ + openstack --os-cloud="" \ + --os-region-name="$REGION_NAME" \ + --os-auth-url="$KEYSTONE_SERVICE_URI" \ + --os-username="swift" \ + --os-password="$SERVICE_PASSWORD" \ + --os-user-domain-name="$SERVICE_DOMAIN_NAME" \ + --os-project-name="$SERVICE_PROJECT_NAME" \ + --os-project-domain-name="$SERVICE_DOMAIN_NAME" \ + object store account \ set --property "Temp-URL-Key=$SWIFT_TEMPURL_KEY" } diff --git a/lib/tcpdump b/lib/tcpdump new file mode 100644 index 0000000000..16e8269d02 --- /dev/null +++ b/lib/tcpdump @@ -0,0 +1,43 @@ +#!/bin/bash +# +# lib/tcpdump +# Functions to start and stop a tcpdump + +# Dependencies: +# +# - ``functions`` file + +# ``stack.sh`` calls the entry points in this order: +# +# - start_tcpdump +# - stop_tcpdump + +# Save trace setting +_XTRACE_TCPDUMP=$(set +o | grep xtrace) +set +o xtrace + +TCPDUMP_OUTPUT=${TCPDUMP_OUTPUT:-$LOGDIR/tcpdump.pcap} + +# e.g. for iscsi +# "-i any tcp port 3260" +TCPDUMP_ARGS=${TCPDUMP_ARGS:-""} + +# start_tcpdump() - Start running processes +function start_tcpdump { + # Run a tcpdump with given arguments and save the packet capture + if is_service_enabled tcpdump; then + if [[ -z "${TCPDUMP_ARGS}" ]]; then + die $LINENO "The tcpdump service requires TCPDUMP_ARGS to be set" + fi + touch ${TCPDUMP_OUTPUT} + run_process tcpdump "/usr/sbin/tcpdump -w $TCPDUMP_OUTPUT $TCPDUMP_ARGS" root root + fi +} + +# stop_tcpdump() stop tcpdump process +function stop_tcpdump { + stop_process tcpdump +} + +# Restore xtrace +$_XTRACE_TCPDUMP diff --git a/lib/tempest b/lib/tempest index 347b2a7dc7..1ebe9c5f1f 100644 --- a/lib/tempest +++ b/lib/tempest @@ -11,14 +11,14 @@ # - ``DEST``, ``FILES`` # - ``ADMIN_PASSWORD`` # - ``DEFAULT_IMAGE_NAME`` +# - ``DEFAULT_IMAGE_FILE_NAME`` # - ``S3_SERVICE_PORT`` # - ``SERVICE_HOST`` # - ``BASE_SQL_CONN`` ``lib/database`` declares # - ``PUBLIC_NETWORK_NAME`` -# - ``Q_ROUTER_NAME`` # - ``VIRT_DRIVER`` # - ``LIBVIRT_TYPE`` -# - ``KEYSTONE_SERVICE_PROTOCOL``, ``KEYSTONE_SERVICE_HOST`` from lib/keystone +# - ``KEYSTONE_SERVICE_URI_V3`` from lib/keystone # # Optional Dependencies: # @@ -27,7 +27,9 @@ # - ``USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION`` # - ``DEFAULT_INSTANCE_TYPE`` # - ``DEFAULT_INSTANCE_USER`` +# - ``DEFAULT_INSTANCE_ALT_USER`` # - ``CINDER_ENABLED_BACKENDS`` +# - ``CINDER_BACKUP_DRIVER`` # - ``NOVA_ALLOW_DUPLICATE_NETWORKS`` # # ``stack.sh`` calls the entry points in this order: @@ -49,10 +51,6 @@ TEMPEST_CONFIG_DIR=${TEMPEST_CONFIG_DIR:-$TEMPEST_DIR/etc} TEMPEST_CONFIG=$TEMPEST_CONFIG_DIR/tempest.conf TEMPEST_STATE_PATH=${TEMPEST_STATE_PATH:=$DATA_DIR/tempest} -NOVA_SOURCE_DIR=$DEST/nova - -BUILD_INTERVAL=1 - # This is the timeout that tempest will wait for a VM to change state, # spawn, delete, etc. # The default is set to 196 seconds. @@ -74,6 +72,17 @@ TEMPEST_VOLUME_VENDOR=${TEMPEST_VOLUME_VENDOR:-$TEMPEST_DEFAULT_VOLUME_VENDOR} TEMPEST_DEFAULT_STORAGE_PROTOCOL="iSCSI" TEMPEST_STORAGE_PROTOCOL=${TEMPEST_STORAGE_PROTOCOL:-$TEMPEST_DEFAULT_STORAGE_PROTOCOL} +# Glance/Image variables +# When Glance image import is enabled, image creation is asynchronous and images +# may not yet be active when tempest looks for them. In that case, we poll +# Glance every TEMPEST_GLANCE_IMPORT_POLL_INTERVAL seconds for the number of +# times specified by TEMPEST_GLANCE_IMPORT_POLL_LIMIT. If you are importing +# multiple images, set TEMPEST_GLANCE_IMAGE_COUNT so the poller does not quit +# too early (though it will not exceed the polling limit). +TEMPEST_GLANCE_IMPORT_POLL_INTERVAL=${TEMPEST_GLANCE_IMPORT_POLL_INTERVAL:-1} +TEMPEST_GLANCE_IMPORT_POLL_LIMIT=${TEMPEST_GLANCE_IMPORT_POLL_LIMIT:-12} +TEMPEST_GLANCE_IMAGE_COUNT=${TEMPEST_GLANCE_IMAGE_COUNT:-1} + # Neutron/Network variables IPV6_ENABLED=$(trueorfalse True IPV6_ENABLED) IPV6_SUBNET_ATTRIBUTES_ENABLED=$(trueorfalse True IPV6_SUBNET_ATTRIBUTES_ENABLED) @@ -93,6 +102,10 @@ TEMPEST_USE_TEST_ACCOUNTS=$(trueorfalse False TEMPEST_USE_TEST_ACCOUNTS) # it will run tempest with TEMPEST_CONCURRENCY=${TEMPEST_CONCURRENCY:-$(nproc)} +TEMPEST_FLAVOR_RAM=${TEMPEST_FLAVOR_RAM:-192} +TEMPEST_FLAVOR_ALT_RAM=${TEMPEST_FLAVOR_ALT_RAM:-256} + +TEMPEST_USE_ISO_IMAGE=$(trueorfalse False TEMPEST_USE_ISO_IMAGE) # Functions # --------- @@ -106,6 +119,85 @@ function remove_disabled_extensions { remove_disabled_services "$extensions_list" "$disabled_exts" } +# image_size_in_gib - converts an image size from bytes to GiB, rounded up +# Takes an image ID parameter as input +function image_size_in_gib { + local size + size=$(openstack --os-cloud devstack-admin image show $1 -c size -f value) + echo $size | python3 -c "import math; print(int(math.ceil(float(int(input()) / 1024.0 ** 3))))" +} + +function set_tempest_venv_constraints { + local tmp_c + tmp_c=$1 + if [[ $TEMPEST_VENV_UPPER_CONSTRAINTS == "master" ]]; then + (cd $REQUIREMENTS_DIR && + git show master:upper-constraints.txt 2>/dev/null || + git show origin/master:upper-constraints.txt) > $tmp_c + # NOTE(gmann): we need to set the below env var pointing to master + # constraints even that is what default in tox.ini. Otherwise it can + # create the issue for grenade run where old and new devstack can have + # different tempest (old and master) to install. For detail problem, + # refer to the https://bugs.launchpad.net/devstack/+bug/2003993 + export UPPER_CONSTRAINTS_FILE=https://releases.openstack.org/constraints/upper/master + export TOX_CONSTRAINTS_FILE=https://releases.openstack.org/constraints/upper/master + else + echo "Using $TEMPEST_VENV_UPPER_CONSTRAINTS constraints in Tempest virtual env." + cat $TEMPEST_VENV_UPPER_CONSTRAINTS > $tmp_c + # NOTE: setting both tox env var and once Tempest start using new var + # TOX_CONSTRAINTS_FILE then we can remove the old one. + export UPPER_CONSTRAINTS_FILE=$TEMPEST_VENV_UPPER_CONSTRAINTS + export TOX_CONSTRAINTS_FILE=$TEMPEST_VENV_UPPER_CONSTRAINTS + fi +} + +# Makes a call to glance to get a list of active images, ignoring +# ramdisk and kernel images. Takes 3 arguments, an array and two +# variables. The array will contain the list of active image UUIDs; +# if an image with ``DEFAULT_IMAGE_NAME`` is found, its UUID will be +# set as the value img_id ($2) parameters. +function get_active_images { + declare -n img_array=$1 + declare -n img_id=$2 + + # start with a fresh array in case we are called multiple times + img_array=() + + # NOTE(gmaan): Most of the iso image require ssh to be enabled explicitly + # and if we set those iso images in image_ref and image_ref_alt that can + # cause test to fail because many tests using image_ref and image_ref_alt + # to boot server also perform ssh. We skip to set iso image in tempest + # unless it is requested via TEMPEST_USE_ISO_IMAGE. + while read -r IMAGE_NAME IMAGE_UUID DISK_FORMAT; do + if [[ "$DISK_FORMAT" == "iso" ]] && [[ "$TEMPEST_USE_ISO_IMAGE" == False ]]; then + continue + fi + if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then + img_id="$IMAGE_UUID" + fi + img_array+=($IMAGE_UUID) + done < <(openstack --os-cloud devstack-admin image list --long --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2,$4 }') +} + +function poll_glance_images { + declare -n image_array=$1 + declare -n image_id=$2 + local -i poll_count + + poll_count=$TEMPEST_GLANCE_IMPORT_POLL_LIMIT + while (( poll_count-- > 0 )) ; do + sleep $TEMPEST_GLANCE_IMPORT_POLL_INTERVAL + get_active_images image_array image_id + if (( ${#image_array[*]} >= $TEMPEST_GLANCE_IMAGE_COUNT )) ; then + return + fi + done + local msg + msg="Polling limit of $TEMPEST_GLANCE_IMPORT_POLL_LIMIT exceeded; " + msg+="poll interval was $TEMPEST_GLANCE_IMPORT_POLL_INTERVAL sec" + warn $LINENO "$msg" +} + # configure_tempest() - Set config files, create data dirs, etc function configure_tempest { if [[ "$INSTALL_TEMPEST" == "True" ]]; then @@ -115,6 +207,8 @@ function configure_tempest { pip_install_gr testrepository fi + local ENABLED_SERVICES=${SERVICES_FOR_TEMPEST:=$ENABLED_SERVICES} + local image_lines local images local num_images @@ -126,9 +220,12 @@ function configure_tempest { local available_flavors local flavors_ref local flavor_lines + local flavor_ref_size + local flavor_ref_alt_size local public_network_id local public_router_id local ssh_connect_method="floating" + local disk # Save IFS ifs=$IFS @@ -144,13 +241,21 @@ function configure_tempest { declare -a images if is_service_enabled glance; then - while read -r IMAGE_NAME IMAGE_UUID; do - if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then - image_uuid="$IMAGE_UUID" - image_uuid_alt="$IMAGE_UUID" + get_active_images images image_uuid + + if (( ${#images[*]} < $TEMPEST_GLANCE_IMAGE_COUNT )); then + # Glance image import is asynchronous and may be configured + # to do image conversion. If image import is being used, + # it's possible that this code is being executed before the + # import has completed and there may be no active images yet. + if [[ "$GLANCE_USE_IMPORT_WORKFLOW" == "True" ]]; then + poll_glance_images images image_uuid + if (( ${#images[*]} < $TEMPEST_GLANCE_IMAGE_COUNT )); then + echo "Only found ${#images[*]} image(s), was looking for $TEMPEST_GLANCE_IMAGE_COUNT" + exit 1 + fi fi - images+=($IMAGE_UUID) - done < <(openstack image list --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }') + fi case "${#images[*]}" in 0) @@ -160,13 +265,22 @@ function configure_tempest { 1) if [ -z "$image_uuid" ]; then image_uuid=${images[0]} - image_uuid_alt=${images[0]} fi + image_uuid_alt=$image_uuid ;; *) if [ -z "$image_uuid" ]; then image_uuid=${images[0]} - image_uuid_alt=${images[1]} + if [ -z "$image_uuid_alt" ]; then + image_uuid_alt=${images[1]} + fi + elif [ -z "$image_uuid_alt" ]; then + for image in ${images[@]}; do + if [[ "$image" != "$image_uuid" ]]; then + image_uuid_alt=$image + break + fi + done fi ;; esac @@ -186,19 +300,25 @@ function configure_tempest { local alt_username=${ALT_USERNAME:-alt_demo} local alt_project_name=${ALT_TENANT_NAME:-alt_demo} local admin_project_id - admin_project_id=$(openstack project list | awk "/ admin / { print \$2 }") + admin_project_id=$(openstack --os-cloud devstack-admin project list | awk "/ admin / { print \$2 }") if is_service_enabled nova; then # If ``DEFAULT_INSTANCE_TYPE`` is not declared, use the new behavior # Tempest creates its own instance types - available_flavors=$(nova flavor-list) + available_flavors=$(openstack --os-cloud devstack-admin flavor list) if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then - nova flavor-create m1.nano 42 64 0 1 + # Determine the flavor disk size based on the image size. + disk=$(image_size_in_gib $image_uuid) + ram=${TEMPEST_FLAVOR_RAM} + openstack --os-cloud devstack-admin flavor create --id 42 --ram ${ram} --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.nano fi flavor_ref=42 if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then - nova flavor-create m1.micro 84 128 0 1 + # Determine the alt flavor disk size based on the alt image size. + disk=$(image_size_in_gib $image_uuid_alt) + ram=${TEMPEST_FLAVOR_ALT_RAM} + openstack --os-cloud devstack-admin flavor create --id 84 --ram ${ram} --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.micro fi flavor_ref_alt=84 else @@ -224,11 +344,24 @@ function configure_tempest { fi flavor_ref=${flavors[0]} flavor_ref_alt=$flavor_ref + flavor_ref_size=$(openstack --os-cloud devstack-admin flavor show --format value --column disk "${flavor_ref}") # Ensure ``flavor_ref`` and ``flavor_ref_alt`` have different values. # Some resize instance in tempest tests depends on this. for f in ${flavors[@]:1}; do - if [[ $f -ne $flavor_ref ]]; then + if [[ "$f" != "$flavor_ref" ]]; then + # + # NOTE(sdatko): Resize is only possible when target flavor + # is not smaller than the original one. For + # Tempest tests, in case there was a bigger + # flavor selected as default, e.g. m1.small, + # we need to perform additional check. + # + flavor_ref_alt_size=$(openstack --os-cloud devstack-admin flavor show --format value --column disk "${f}") + if [[ "${flavor_ref_alt_size}" -lt "${flavor_ref_size}" ]]; then + continue + fi + flavor_ref_alt=$f break fi @@ -236,13 +369,33 @@ function configure_tempest { fi fi + if is_service_enabled glance; then + git_clone $OSTESTIMAGES_REPO $OSTESTIMAGES_DIR $OSTESTIMAGES_BRANCH + pushd $OSTESTIMAGES_DIR + tox -egenerate + popd + iniset $TEMPEST_CONFIG image images_manifest_file ${OSTESTIMAGES_DIR}/images/manifest.yaml + local image_conversion + image_conversion=$(iniget $GLANCE_IMAGE_IMPORT_CONF image_conversion output_format) + if [[ -n "$image_conversion" ]]; then + iniset $TEMPEST_CONFIG image-feature-enabled image_conversion True + fi + iniset $TEMPEST_CONFIG image-feature-enabled image_format_enforcement $GLANCE_ENFORCE_IMAGE_FORMAT + fi + + iniset $TEMPEST_CONFIG network project_network_cidr $FIXED_RANGE + ssh_connect_method=${TEMPEST_SSH_CONNECT_METHOD:-$ssh_connect_method} # the public network (for floating ip access) is only available # if the extension is enabled. - if is_networking_extension_supported 'external-net'; then - public_network_id=$(neutron net-list | grep $PUBLIC_NETWORK_NAME | \ - awk '{print $2}') + # If NEUTRON_CREATE_INITIAL_NETWORKS is not true, there is no network created + # and the public_network_id should not be set. + if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]] && is_networking_extension_supported 'external-net'; then + public_network_id=$(openstack --os-cloud devstack-admin network show -f value -c id $PUBLIC_NETWORK_NAME) + # make sure shared network presence does not confuses the tempest tests + openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create --share shared + openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --description shared-subnet --subnet-range ${TEMPEST_SHARED_POOL:-192.168.233.0/24} --network shared shared-subnet fi iniset $TEMPEST_CONFIG DEFAULT use_syslog $SYSLOG @@ -259,59 +412,63 @@ function configure_tempest { iniset $TEMPEST_CONFIG volume build_timeout $BUILD_TIMEOUT # Identity - iniset $TEMPEST_CONFIG identity uri "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v2.0/" iniset $TEMPEST_CONFIG identity uri_v3 "$KEYSTONE_SERVICE_URI_V3" - # Use domain scoped tokens for admin v3 tests, v3 dynamic credentials of v3 account generation - iniset $TEMPEST_CONFIG identity admin_domain_scope True + iniset $TEMPEST_CONFIG identity user_lockout_failure_attempts $KEYSTONE_LOCKOUT_FAILURE_ATTEMPTS + iniset $TEMPEST_CONFIG identity user_lockout_duration $KEYSTONE_LOCKOUT_DURATION + iniset $TEMPEST_CONFIG identity user_unique_last_password_count $KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT if [[ "$TEMPEST_HAS_ADMIN" == "True" ]]; then iniset $TEMPEST_CONFIG auth admin_username $admin_username iniset $TEMPEST_CONFIG auth admin_password "$password" - iniset $TEMPEST_CONFIG auth admin_tenant_name $admin_project_name - iniset $TEMPEST_CONFIG auth admin_tenant_id $admin_project_id + iniset $TEMPEST_CONFIG auth admin_project_name $admin_project_name iniset $TEMPEST_CONFIG auth admin_domain_name $admin_domain_name fi - if [ "$ENABLE_IDENTITY_V2" == "False" ]; then - # Only Identity v3 is available; then skip Identity API v2 tests - iniset $TEMPEST_CONFIG identity-feature-enabled api_v2 False - # In addition, use v3 auth tokens for running all Tempest tests - iniset $TEMPEST_CONFIG identity auth_version v3 - else - iniset $TEMPEST_CONFIG identity auth_version ${TEMPEST_AUTH_VERSION:-v2} + iniset $TEMPEST_CONFIG identity auth_version ${TEMPEST_AUTH_VERSION:-v3} + if is_service_enabled tls-proxy; then + iniset $TEMPEST_CONFIG identity ca_certificates_file $SSL_BUNDLE_FILE fi - if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then - iniset $TEMPEST_CONFIG identity ca_certificates_file $SSL_BUNDLE_FILE + # Identity Features + if [[ "$KEYSTONE_SECURITY_COMPLIANCE_ENABLED" = True ]]; then + iniset $TEMPEST_CONFIG identity-feature-enabled security_compliance True + fi + + # When LDAP is enabled domain specific drivers are also enabled and the users + # and groups identity tests must adapt to this scenario + if is_service_enabled ldap; then + iniset $TEMPEST_CONFIG identity-feature-enabled domain_specific_drivers True fi + # TODO(felipemonteiro): Remove this once Tempest no longer supports Pike + # as this is supported in Queens and beyond. + iniset $TEMPEST_CONFIG identity-feature-enabled project_tags True + + # In Queens and later, application credentials are enabled by default + # so remove this once Tempest no longer supports Pike. + iniset $TEMPEST_CONFIG identity-feature-enabled application_credentials True + + # In Train and later, access rules for application credentials are enabled + # by default so remove this once Tempest no longer supports Stein. + iniset $TEMPEST_CONFIG identity-feature-enabled access_rules True + # Image # We want to be able to override this variable in the gate to avoid # doing an external HTTP fetch for this test. if [[ ! -z "$TEMPEST_HTTP_IMAGE" ]]; then iniset $TEMPEST_CONFIG image http_image $TEMPEST_HTTP_IMAGE fi - if [ "$VIRT_DRIVER" = "xenserver" ]; then - iniset $TEMPEST_CONFIG image disk_formats "ami,ari,aki,vhd,raw,iso" - iniset $TEMPEST_CONFIG scenario img_disk_format vhd - fi - - # Image Features - iniset $TEMPEST_CONFIG image-feature-enabled deactivate_image True - if [ "$GLANCE_V1_ENABLED" != "True" ]; then - iniset $TEMPEST_CONFIG image-feature-enabled api_v1 False + iniset $TEMPEST_CONFIG image-feature-enabled import_image $GLANCE_USE_IMPORT_WORKFLOW + iniset $TEMPEST_CONFIG image-feature-enabled os_glance_reserved True + if is_service_enabled g-api-r; then + iniset $TEMPEST_CONFIG image alternate_image_endpoint image_remote fi # Compute - iniset $TEMPEST_CONFIG compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED iniset $TEMPEST_CONFIG compute image_ref $image_uuid iniset $TEMPEST_CONFIG compute image_ref_alt $image_uuid_alt - iniset $TEMPEST_CONFIG compute image_alt_ssh_user ${ALT_INSTANCE_USER:-cirros} iniset $TEMPEST_CONFIG compute flavor_ref $flavor_ref iniset $TEMPEST_CONFIG compute flavor_ref_alt $flavor_ref_alt - iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method - # set the equiv validation option here as well to ensure they are - # in sync. They shouldn't be separate options. iniset $TEMPEST_CONFIG validation connect_method $ssh_connect_method - if [[ ! $(is_service_enabled n-cell) && ! $(is_service_enabled neutron) ]]; then + if ! is_service_enabled neutron; then iniset $TEMPEST_CONFIG compute fixed_network_name $PRIVATE_NETWORK_NAME fi @@ -348,97 +505,105 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute max_microversion $tempest_compute_max_microversion fi + iniset $TEMPEST_CONFIG compute-feature-enabled personality ${ENABLE_FILE_INJECTION:-False} iniset $TEMPEST_CONFIG compute-feature-enabled resize True iniset $TEMPEST_CONFIG compute-feature-enabled live_migration ${LIVE_MIGRATION_AVAILABLE:-False} iniset $TEMPEST_CONFIG compute-feature-enabled change_password False iniset $TEMPEST_CONFIG compute-feature-enabled block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} - # TODO(mriedem): Remove the preserve_ports flag when Juno is end of life. - iniset $TEMPEST_CONFIG compute-feature-enabled preserve_ports True - # TODO(gilliard): Remove the live_migrate_paused_instances flag when Juno is end of life. - iniset $TEMPEST_CONFIG compute-feature-enabled live_migrate_paused_instances True + iniset $TEMPEST_CONFIG compute-feature-enabled live_migrate_back_and_forth ${LIVE_MIGRATE_BACK_AND_FORTH:-False} iniset $TEMPEST_CONFIG compute-feature-enabled attach_encrypted_volume ${ATTACH_ENCRYPTED_VOLUME_AVAILABLE:-True} - # TODO(mriedem): Remove this when kilo-eol happens since the - # neutron.allow_duplicate_networks option was removed from nova in Liberty - # and is now the default behavior. - iniset $TEMPEST_CONFIG compute-feature-enabled allow_duplicate_networks ${NOVA_ALLOW_DUPLICATE_NETWORKS:-True} - if is_service_enabled n-cell; then - # Cells doesn't support shelving/unshelving - iniset $TEMPEST_CONFIG compute-feature-enabled shelve False - # Cells doesn't support hot-plugging virtual interfaces. - iniset $TEMPEST_CONFIG compute-feature-enabled interface_attach False - # Cells v1 doesn't support the rescue/unrescue tests in Tempest - iniset $TEMPEST_CONFIG compute-feature-enabled rescue False - if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then - # Cells supports resize but does not currently work with devstack - # because of the custom flavors created for Tempest runs which are - # not in the cells database. - # TODO(mriedem): work on adding a nova-manage command to sync - # flavors into the cells database. - iniset $TEMPEST_CONFIG compute-feature-enabled resize False - fi + # Starting Wallaby, nova sanitizes instance hostnames having freeform characters with dashes + iniset $TEMPEST_CONFIG compute-feature-enabled hostname_fqdn_sanitization True + + if [[ -n "$NOVA_FILTERS" ]]; then + iniset $TEMPEST_CONFIG compute-feature-enabled scheduler_enabled_filters ${NOVA_FILTERS} + fi + + if [[ $ENABLE_VOLUME_MULTIATTACH == "True" ]]; then + iniset $TEMPEST_CONFIG compute-feature-enabled volume_multiattach True + fi + + if is_service_enabled n-novnc || [ "$NOVA_VNC_ENABLED" != False ]; then + iniset $TEMPEST_CONFIG compute-feature-enabled vnc_console True + fi + if is_service_enabled n-spice || [ "$NOVA_SPICE_ENABLED" != False ]; then + iniset $TEMPEST_CONFIG compute-feature-enabled spice_console True + fi + if is_service_enabled n-sproxy || [ "$NOVA_SERIAL_ENABLED" != False ]; then + iniset $TEMPEST_CONFIG compute-feature-enabled serial_console True fi + # NOTE(gmaan): Since 2025.2, 'manager' role is available in nova. + local nova_policy_roles="admin,manager,member,reader,service" + iniset $TEMPEST_CONFIG compute-feature-enabled nova_policy_roles $nova_policy_roles + # Network - iniset $TEMPEST_CONFIG network api_version 2.0 iniset $TEMPEST_CONFIG network project_networks_reachable false iniset $TEMPEST_CONFIG network public_network_id "$public_network_id" iniset $TEMPEST_CONFIG network public_router_id "$public_router_id" iniset $TEMPEST_CONFIG network default_network "$FIXED_RANGE" iniset $TEMPEST_CONFIG network-feature-enabled ipv6 "$IPV6_ENABLED" iniset $TEMPEST_CONFIG network-feature-enabled ipv6_subnet_attributes "$IPV6_SUBNET_ATTRIBUTES_ENABLED" + iniset $TEMPEST_CONFIG network-feature-enabled port_security $NEUTRON_PORT_SECURITY - # Orchestration Tests - if is_service_enabled heat; then - # Though this is not needed by heat, some tempest tests explicitly - # try to set this role. Removing them from the tempest tests breaks - # some non-devstack CIs. - get_or_create_role "heat_stack_owner" + iniset $TEMPEST_CONFIG enforce_scope neutron "$NEUTRON_ENFORCE_SCOPE" - if [[ ! -z "$HEAT_CFN_IMAGE_URL" ]]; then - iniset $TEMPEST_CONFIG orchestration image_ref $(basename "${HEAT_CFN_IMAGE_URL%.*}") - fi - # Nova might not be enabled, especially when we want to test tempest scenario/API that only create Neutron resources - if is_service_enabled nova; then - # build a specialized heat flavor - available_flavors=$(nova flavor-list) - if [[ ! ( $available_flavors =~ 'm1.heat' ) ]]; then - nova flavor-create m1.heat 451 512 0 1 - fi - iniset $TEMPEST_CONFIG orchestration instance_type "m1.heat" + # Scenario + SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES} + SCENARIO_IMAGE_FILE=$DEFAULT_IMAGE_FILE_NAME + SCENARIO_IMAGE_TYPE=${SCENARIO_IMAGE_TYPE:-cirros} + iniset $TEMPEST_CONFIG scenario img_file $SCENARIO_IMAGE_DIR/$SCENARIO_IMAGE_FILE + + # since version 0.6.0 cirros uses dhcpcd dhcp client by default, however, cirros, prior to the + # version 0.6.0, used udhcpc (the only available client at that time) which is also tempest's default + if [[ "$SCENARIO_IMAGE_TYPE" == "cirros" ]]; then + # the image is a cirros image + # use dhcpcd client when version greater or equal 0.6.0 + if [[ $(echo $CIRROS_VERSION | tr -d '.') -ge 060 ]]; then + iniset $TEMPEST_CONFIG scenario dhcp_client dhcpcd fi - iniset $TEMPEST_CONFIG orchestration build_timeout 900 - iniset $TEMPEST_CONFIG orchestration stack_owner_role "heat_stack_owner" fi - # Scenario - SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES/images/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec} - iniset $TEMPEST_CONFIG scenario img_dir $SCENARIO_IMAGE_DIR - iniset $TEMPEST_CONFIG scenario ami_img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-blank.img" - iniset $TEMPEST_CONFIG scenario ari_img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-initrd" - iniset $TEMPEST_CONFIG scenario aki_img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-vmlinuz" - iniset $TEMPEST_CONFIG scenario img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" - - # Large Ops Number - iniset $TEMPEST_CONFIG scenario large_ops_number ${TEMPEST_LARGE_OPS_NUMBER:-0} - - # Telemetry - iniset $TEMPEST_CONFIG telemetry-feature-enabled events "True" - + # If using provider networking, use the physical network for validation rather than private + TEMPEST_SSH_NETWORK_NAME=$PRIVATE_NETWORK_NAME + if is_provider_network; then + TEMPEST_SSH_NETWORK_NAME=$PHYSICAL_NETWORK + fi # Validation - iniset $TEMPEST_CONFIG validation run_validation ${TEMPEST_RUN_VALIDATION:-False} + iniset $TEMPEST_CONFIG validation run_validation ${TEMPEST_RUN_VALIDATION:-True} iniset $TEMPEST_CONFIG validation ip_version_for_ssh 4 iniset $TEMPEST_CONFIG validation ssh_timeout $BUILD_TIMEOUT - iniset $TEMPEST_CONFIG validation image_ssh_user ${DEFAULT_INSTANCE_USER:-cirros} - iniset $TEMPEST_CONFIG validation network_for_ssh $PRIVATE_NETWORK_NAME + iniset $TEMPEST_CONFIG validation image_ssh_user ${DEFAULT_INSTANCE_USER:=cirros} + iniset $TEMPEST_CONFIG validation image_alt_ssh_user ${DEFAULT_INSTANCE_ALT_USER:-$DEFAULT_INSTANCE_USER} + iniset $TEMPEST_CONFIG validation network_for_ssh $TEMPEST_SSH_NETWORK_NAME # Volume - # TODO(obutenko): Remove the incremental_backup_force flag when Kilo and Juno is end of life. - iniset $TEMPEST_CONFIG volume-feature-enabled incremental_backup_force True - # TODO(ynesenenko): Remove the volume_services flag when Liberty and Kilo will correct work with host info. - iniset $TEMPEST_CONFIG volume-feature-enabled volume_services True - # TODO(ameade): Remove the api_v3 flag when Mitaka and Liberty are end of life. - iniset $TEMPEST_CONFIG volume-feature-enabled api_v3 True + # Only turn on TEMPEST_VOLUME_MANAGE_SNAPSHOT by default for "lvm" backends + if [[ "$CINDER_ENABLED_BACKENDS" == *"lvm"* ]]; then + TEMPEST_VOLUME_MANAGE_SNAPSHOT=${TEMPEST_VOLUME_MANAGE_SNAPSHOT:-True} + fi + iniset $TEMPEST_CONFIG volume-feature-enabled manage_snapshot $(trueorfalse False TEMPEST_VOLUME_MANAGE_SNAPSHOT) + # Only turn on TEMPEST_VOLUME_MANAGE_VOLUME by default for "lvm" backends + if [[ "$CINDER_ENABLED_BACKENDS" == *"lvm"* ]]; then + TEMPEST_VOLUME_MANAGE_VOLUME=${TEMPEST_VOLUME_MANAGE_VOLUME:-True} + fi + iniset $TEMPEST_CONFIG volume-feature-enabled manage_volume $(trueorfalse False TEMPEST_VOLUME_MANAGE_VOLUME) + # Only turn on TEMPEST_EXTEND_ATTACHED_VOLUME by default for "lvm" backends + # in Cinder and the libvirt driver in Nova. + if [[ "$CINDER_ENABLED_BACKENDS" == *"lvm"* ]] && [ "$VIRT_DRIVER" = "libvirt" ]; then + TEMPEST_EXTEND_ATTACHED_VOLUME=${TEMPEST_EXTEND_ATTACHED_VOLUME:-True} + fi + iniset $TEMPEST_CONFIG volume-feature-enabled extend_attached_volume $(trueorfalse False TEMPEST_EXTEND_ATTACHED_VOLUME) + # Only turn on TEMPEST_VOLUME_REVERT_TO_SNAPSHOT by default for "lvm" backends + if [[ "$CINDER_ENABLED_BACKENDS" == *"lvm"* ]]; then + TEMPEST_VOLUME_REVERT_TO_SNAPSHOT=${TEMPEST_VOLUME_REVERT_TO_SNAPSHOT:-True} + fi + iniset $TEMPEST_CONFIG volume-feature-enabled volume_revert $(trueorfalse False TEMPEST_VOLUME_REVERT_TO_SNAPSHOT) + iniset $TEMPEST_CONFIG volume-feature-enabled extend_attached_encrypted_volume ${TEMPEST_EXTEND_ATTACHED_ENCRYPTED_VOLUME:-False} + if [[ "$CINDER_BACKUP_DRIVER" == *"swift"* ]]; then + iniset $TEMPEST_CONFIG volume backup_driver swift + fi local tempest_volume_min_microversion=${TEMPEST_VOLUME_MIN_MICROVERSION:-None} local tempest_volume_max_microversion=${TEMPEST_VOLUME_MAX_MICROVERSION:-"latest"} if [ "$tempest_volume_min_microversion" == "None" ]; then @@ -458,15 +623,26 @@ function configure_tempest { fi # Using ``CINDER_ENABLED_BACKENDS`` + # Cinder uses a comma separated list with "type:backend_name": + # CINDER_ENABLED_BACKENDS = ceph:cephBE1,lvm:lvmBE2,foo:my_foo if [[ -n "$CINDER_ENABLED_BACKENDS" ]] && [[ $CINDER_ENABLED_BACKENDS =~ .*,.* ]]; then + # We have at least 2 backends iniset $TEMPEST_CONFIG volume-feature-enabled multi_backend "True" - local i=1 + local add_comma_seperator=0 + local backends_list='' local be + # Tempest uses a comma separated list of backend_names: + # backend_names = BACKEND_1,BACKEND_2 for be in ${CINDER_ENABLED_BACKENDS//,/ }; do - local be_name=${be##*:} - iniset $TEMPEST_CONFIG volume "backend${i}_name" "$be_name" - i=$(( i + 1 )) + if [ "$add_comma_seperator" -eq "1" ]; then + backends_list+=,${be##*:} + else + # first element in the list + backends_list+=${be##*:} + add_comma_seperator=1 + fi done + iniset $TEMPEST_CONFIG volume "backend_names" "$backends_list" fi if [ $TEMPEST_VOLUME_DRIVER != "default" -o \ @@ -478,19 +654,30 @@ function configure_tempest { iniset $TEMPEST_CONFIG volume storage_protocol "$TEMPEST_STORAGE_PROTOCOL" fi - # Dashboard - iniset $TEMPEST_CONFIG dashboard dashboard_url "http://$SERVICE_HOST/" + if [[ $ENABLE_VOLUME_MULTIATTACH == "True" ]]; then + iniset $TEMPEST_CONFIG volume volume_type_multiattach $VOLUME_TYPE_MULTIATTACH + fi - # CLI - iniset $TEMPEST_CONFIG cli cli_dir $NOVA_BIN_DIR + # Placement Features + # Set the microversion range for placement. + # Setting [None, latest] range of microversion which allow Tempest to run all microversions tests. + # NOTE- To avoid microversion tests failure on stable branch, we need to change "tempest_placement_max_microversion" + # for stable branch on each release which should be changed from "latest" to max supported version of that release. + local tempest_placement_min_microversion=${TEMPEST_PLACEMENT_MIN_MICROVERSION:-None} + local tempest_placement_max_microversion=${TEMPEST_PLACEMENT_MAX_MICROVERSION:-"latest"} + if [ "$tempest_placement_min_microversion" == "None" ]; then + inicomment $TEMPEST_CONFIG placement min_microversion + else + iniset $TEMPEST_CONFIG placement min_microversion $tempest_placement_min_microversion + fi + if [ "$tempest_placement_max_microversion" == "None" ]; then + inicomment $TEMPEST_CONFIG placement max_microversion + else + iniset $TEMPEST_CONFIG placement max_microversion $tempest_placement_max_microversion + fi # Baremetal if [ "$VIRT_DRIVER" = "ironic" ] ; then - iniset $TEMPEST_CONFIG baremetal driver_enabled True - iniset $TEMPEST_CONFIG baremetal unprovision_timeout $BUILD_TIMEOUT - iniset $TEMPEST_CONFIG baremetal active_timeout $BUILD_TIMEOUT - iniset $TEMPEST_CONFIG baremetal deploy_img_dir $FILES - iniset $TEMPEST_CONFIG baremetal node_uuid $IRONIC_NODE_UUID iniset $TEMPEST_CONFIG compute-feature-enabled change_password False iniset $TEMPEST_CONFIG compute-feature-enabled console_output False iniset $TEMPEST_CONFIG compute-feature-enabled interface_attach False @@ -503,22 +690,33 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute-feature-enabled suspend False fi - # Libvirt-LXC - if [ "$VIRT_DRIVER" = "libvirt" ] && [ "$LIBVIRT_TYPE" = "lxc" ]; then - iniset $TEMPEST_CONFIG compute-feature-enabled rescue False - iniset $TEMPEST_CONFIG compute-feature-enabled resize False - iniset $TEMPEST_CONFIG compute-feature-enabled shelve False - iniset $TEMPEST_CONFIG compute-feature-enabled snapshot False - iniset $TEMPEST_CONFIG compute-feature-enabled suspend False + # Libvirt + if [ "$VIRT_DRIVER" = "libvirt" ]; then + # Libvirt-LXC + if [ "$LIBVIRT_TYPE" = "lxc" ]; then + iniset $TEMPEST_CONFIG compute-feature-enabled rescue False + iniset $TEMPEST_CONFIG compute-feature-enabled resize False + iniset $TEMPEST_CONFIG compute-feature-enabled shelve False + iniset $TEMPEST_CONFIG compute-feature-enabled snapshot False + iniset $TEMPEST_CONFIG compute-feature-enabled suspend False + else + iniset $TEMPEST_CONFIG compute-feature-enabled shelve_migrate True + iniset $TEMPEST_CONFIG compute-feature-enabled stable_rescue True + iniset $TEMPEST_CONFIG compute-feature-enabled swap_volume True + fi fi # ``service_available`` # - # this tempest service list needs to be all the services that - # tempest supports, otherwise we can have an erroneous set of + # this tempest service list needs to be the services that + # tempest own, otherwise we can have an erroneous set of # defaults (something defaulting true in Tempest, but not listed here). + # services tested by tempest plugins needs to be set on service devstack + # plugin side as devstack cannot keep track of all the tempest plugins + # services. Refer Bug#1743688 for more details. + # 'horizon' is also kept here as no devtack plugin for horizon. local service - local tempest_services="key,glance,nova,neutron,cinder,swift,heat,ceilometer,horizon,sahara,ironic,trove" + local tempest_services="key,glance,nova,neutron,cinder,swift,horizon" for service in ${tempest_services//,/ }; do if is_service_enabled $service ; then iniset $TEMPEST_CONFIG service_available $service "True" @@ -527,6 +725,30 @@ function configure_tempest { fi done + # ``enforce_scope`` + # If services enable the enforce_scope for their policy + # we need to enable the same on Tempest side so that + # test can be run with scoped token. + if [[ "$KEYSTONE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $TEMPEST_CONFIG enforce_scope keystone true + fi + + if [[ "$NOVA_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $TEMPEST_CONFIG enforce_scope nova true + fi + + if [[ "$PLACEMENT_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $TEMPEST_CONFIG enforce_scope placement true + fi + + if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $TEMPEST_CONFIG enforce_scope glance true + fi + + if [[ "$CINDER_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $TEMPEST_CONFIG enforce_scope cinder true + fi + if [ "$VIRT_DRIVER" = "libvirt" ] && [ "$LIBVIRT_TYPE" = "lxc" ]; then # libvirt-lxc does not support boot from volume or attaching volumes # so basically anything with cinder is out of the question. @@ -540,18 +762,22 @@ function configure_tempest { local tmp_cfg_file tmp_cfg_file=$(mktemp) cd $TEMPEST_DIR + + local tmp_u_c_m + tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX) + set_tempest_venv_constraints $tmp_u_c_m if [[ "$OFFLINE" != "True" ]]; then tox -revenv-tempest --notest fi - tox -evenv-tempest -- pip install -c $REQUIREMENTS_DIR/upper-constraints.txt -r requirements.txt + tox -evenv-tempest -- pip install -c $tmp_u_c_m -r requirements.txt + rm -f $tmp_u_c_m # Auth: - iniset $TEMPEST_CONFIG auth tempest_roles "Member" if [[ $TEMPEST_USE_TEST_ACCOUNTS == "True" ]]; then if [[ $TEMPEST_HAS_ADMIN == "True" ]]; then - tox -evenv-tempest -- tempest-account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-tenant-name $admin_project_name -r $TEMPEST_CONCURRENCY --with-admin etc/accounts.yaml + tox -evenv-tempest -- tempest account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-project-name $admin_project_name -r $TEMPEST_CONCURRENCY --with-admin etc/accounts.yaml else - tox -evenv-tempest -- tempest-account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-tenant-name $admin_project_name -r $TEMPEST_CONCURRENCY etc/accounts.yaml + tox -evenv-tempest -- tempest account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-project-name $admin_project_name -r $TEMPEST_CONCURRENCY etc/accounts.yaml fi iniset $TEMPEST_CONFIG auth use_dynamic_credentials False iniset $TEMPEST_CONFIG auth test_accounts_file "etc/accounts.yaml" @@ -566,16 +792,19 @@ function configure_tempest { # Run ``verify_tempest_config -ur`` to retrieve enabled extensions on API endpoints # NOTE(mtreinish): This must be done after auth settings are added to the tempest config tox -evenv -- tempest verify-config -uro $tmp_cfg_file - # Nova API extensions - local compute_api_extensions=${COMPUTE_API_EXTENSIONS:-"all"} - if [[ ! -z "$DISABLE_COMPUTE_API_EXTENSIONS" ]]; then - # Enabled extensions are either the ones explicitly specified or those available on the API endpoint - compute_api_extensions=${COMPUTE_API_EXTENSIONS:-$(iniget $tmp_cfg_file compute-feature-enabled api_extensions | tr -d " ")} - # Remove disabled extensions - compute_api_extensions=$(remove_disabled_extensions $compute_api_extensions $DISABLE_COMPUTE_API_EXTENSIONS) - fi - iniset $TEMPEST_CONFIG compute-feature-enabled api_extensions $compute_api_extensions + # Neutron API Extensions + + # disable metering if we didn't enable the service + if ! is_service_enabled q-metering neutron-metering; then + DISABLE_NETWORK_API_EXTENSIONS+=", metering" + fi + + # disable l3_agent_scheduler if we didn't enable L3 agent + if ! is_service_enabled q-l3 neutron-l3; then + DISABLE_NETWORK_API_EXTENSIONS+=", l3_agent_scheduler" + fi + local network_api_extensions=${NETWORK_API_EXTENSIONS:-"all"} if [[ ! -z "$DISABLE_NETWORK_API_EXTENSIONS" ]]; then # Enabled extensions are either the ones explicitly specified or those available on the API endpoint @@ -583,6 +812,9 @@ function configure_tempest { # Remove disabled extensions network_api_extensions=$(remove_disabled_extensions $network_api_extensions $DISABLE_NETWORK_API_EXTENSIONS) fi + if [[ -n "$ADDITIONAL_NETWORK_API_EXTENSIONS" ]] && [[ "$network_api_extensions" != "all" ]]; then + network_api_extensions+=",$ADDITIONAL_NETWORK_API_EXTENSIONS" + fi iniset $TEMPEST_CONFIG network-feature-enabled api_extensions $network_api_extensions # Swift API Extensions local object_storage_api_extensions=${OBJECT_STORAGE_API_EXTENSIONS:-"all"} @@ -610,14 +842,29 @@ function configure_tempest { # install_tempest() - Collect source and prepare function install_tempest { git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH - pip_install tox + # NOTE(gmann): Pinning tox<4.0.0 for stable/zed and lower. Tox 4.0.0 + # released after zed was released and has some incompatible changes + # and it is ok not to fix the issues caused by tox 4.0.0 in stable + # beanches jobs. We can continue testing the stable/zed and lower + # branches with tox<4.0.0 + pip_install 'tox!=2.8.0,<4.0.0' pushd $TEMPEST_DIR - tox --notest -efull + # NOTE(gmann): checkout the TEMPEST_BRANCH in case TEMPEST_BRANCH + # is tag name not master. git_clone would not checkout tag because + # TEMPEST_DIR already exist until RECLONE is true. + git checkout $TEMPEST_BRANCH + + local tmp_u_c_m + tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX) + set_tempest_venv_constraints $tmp_u_c_m + + tox -r --notest -efull # NOTE(mtreinish) Respect constraints in the tempest full venv, things that # are using a tox job other than full will not be respecting constraints but # running pip install -U on tempest requirements - $TEMPEST_DIR/.tox/tempest/bin/pip install -c $REQUIREMENTS_DIR/upper-constraints.txt -r requirements.txt + $TEMPEST_DIR/.tox/tempest/bin/pip install -c $tmp_u_c_m -r requirements.txt PROJECT_VENV["tempest"]=${TEMPEST_DIR}/.tox/tempest + rm -f $tmp_u_c_m popd } @@ -625,7 +872,11 @@ function install_tempest { function install_tempest_plugins { pushd $TEMPEST_DIR if [[ $TEMPEST_PLUGINS != 0 ]] ; then - tox -evenv-tempest -- pip install $TEMPEST_PLUGINS + local tmp_u_c_m + tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX) + set_tempest_venv_constraints $tmp_u_c_m + tox -evenv-tempest -- pip install -c $tmp_u_c_m $TEMPEST_PLUGINS + rm -f $tmp_u_c_m echo "Checking installed Tempest plugins:" tox -evenv-tempest -- tempest list-plugins fi diff --git a/lib/template b/lib/template index b92fb40483..e6d003284f 100644 --- a/lib/template +++ b/lib/template @@ -41,6 +41,7 @@ XXX_CONF_DIR=/etc/XXXX # Test if any XXXX services are enabled # is_XXXX_enabled function is_XXXX_enabled { + [[ ,${DISABLED_SERVICES} =~ ,"XXXX" ]] && return 1 [[ ,${ENABLED_SERVICES} =~ ,"XX-" ]] && return 0 return 1 } @@ -80,7 +81,7 @@ function install_XXXX { : } -# start_XXXX() - Start running processes, including screen +# start_XXXX() - Start running processes function start_XXXX { # The quoted command must be a single command and not include an # shell metacharacters, redirections or shell builtins. @@ -88,7 +89,7 @@ function start_XXXX { : } -# stop_XXXX() - Stop running processes (non-screen) +# stop_XXXX() - Stop running processes function stop_XXXX { # for serv in serv-a serv-b; do # stop_process $serv diff --git a/lib/tls b/lib/tls index ca57ed44e0..fa0a448d7d 100644 --- a/lib/tls +++ b/lib/tls @@ -16,7 +16,6 @@ # # - configure_CA # - init_CA -# - cleanup_CA # - configure_proxy # - start_tls_proxy @@ -38,7 +37,7 @@ if is_service_enabled tls-proxy; then # TODO(dtroyer): revisit this below after the search for HOST_IP has been done - TLS_IP=${TLS_IP:-$SERVICE_IP} + TLS_IP=${TLS_IP:-$(ipv6_unquote $SERVICE_HOST)} fi DEVSTACK_HOSTNAME=$(hostname -f) @@ -68,9 +67,9 @@ function configure_CA { # build common config file # Verify ``TLS_IP`` is good - if [[ -n "$HOST_IP" && "$HOST_IP" != "$TLS_IP" ]]; then + if [[ -n "$SERVICE_HOST" && "$(ipv6_unquote $SERVICE_HOST)" != "$TLS_IP" ]]; then # auto-discover has changed the IP - TLS_IP=$HOST_IP + TLS_IP=$(ipv6_unquote $SERVICE_HOST) fi } @@ -114,11 +113,11 @@ new_certs_dir = \$dir/newcerts certificate = \$dir/cacert.pem private_key = \$dir/private/cacert.key RANDFILE = \$dir/private/.rand -default_md = default +default_md = sha256 [ req ] -default_bits = 1024 -default_md = sha1 +default_bits = 2048 +default_md = sha256 prompt = no distinguished_name = ca_distinguished_name @@ -170,7 +169,7 @@ default_md = default [ req ] default_bits = 1024 -default_md = sha1 +default_md = sha256 prompt = no distinguished_name = req_distinguished_name @@ -202,7 +201,6 @@ subjectAltName = \$ENV::SUBJECT_ALT_NAME # Create root and intermediate CAs # init_CA function init_CA { - fix_system_ca_bundle_path # Ensure CAs are built make_root_CA $ROOT_CA_DIR make_int_CA $INT_CA_DIR $ROOT_CA_DIR @@ -221,26 +219,15 @@ function init_CA { fi } -# Clean up the CA files -# cleanup_CA -function cleanup_CA { - if is_fedora; then - sudo rm -f /usr/share/pki/ca-trust-source/anchors/devstack-chain.pem - sudo update-ca-trust - elif is_ubuntu; then - sudo rm -f /usr/local/share/ca-certificates/devstack-int.crt - sudo rm -f /usr/local/share/ca-certificates/devstack-root.crt - sudo update-ca-certificates - fi -} - # Create an initial server cert # init_cert function init_cert { if [[ ! -r $DEVSTACK_CERT ]]; then if [[ -n "$TLS_IP" ]]; then - # Lie to let incomplete match routines work - TLS_IP="DNS:$TLS_IP" + TLS_IP="IP:$TLS_IP" + if [[ -n "$HOST_IPV6" ]]; then + TLS_IP="$TLS_IP,IP:$HOST_IPV6" + fi fi make_cert $INT_CA_DIR $DEVSTACK_CERT_NAME $DEVSTACK_HOSTNAME "$TLS_IP" @@ -258,10 +245,12 @@ function make_cert { local alt_names=$4 if [ "$common_name" != "$SERVICE_HOST" ]; then - if [[ -z "$alt_names" ]]; then - alt_names="DNS:$SERVICE_HOST" - else - alt_names="$alt_names,DNS:$SERVICE_HOST" + if is_ipv4_address "$SERVICE_HOST" ; then + if [[ -z "$alt_names" ]]; then + alt_names="IP:$SERVICE_HOST" + else + alt_names="$alt_names,IP:$SERVICE_HOST" + fi fi fi @@ -269,7 +258,7 @@ function make_cert { if [ ! -r "$ca_dir/$cert_name.crt" ]; then # Generate a signing request $OPENSSL req \ - -sha1 \ + -sha256 \ -newkey rsa \ -nodes \ -keyout $ca_dir/private/$cert_name.key \ @@ -309,7 +298,7 @@ function make_int_CA { if [ ! -r "$ca_dir/cacert.pem" ]; then # Create a signing certificate request $OPENSSL req -config $ca_dir/ca.conf \ - -sha1 \ + -sha256 \ -newkey rsa \ -nodes \ -keyout $ca_dir/private/cacert.key \ @@ -336,15 +325,35 @@ function make_root_CA { create_CA_base $ca_dir create_CA_config $ca_dir 'Root CA' - # Create a self-signed certificate valid for 5 years - $OPENSSL req -config $ca_dir/ca.conf \ - -x509 \ - -nodes \ - -newkey rsa \ - -days 21360 \ - -keyout $ca_dir/private/cacert.key \ - -out $ca_dir/cacert.pem \ - -outform PEM + if [ ! -r "$ca_dir/cacert.pem" ]; then + # Create a self-signed certificate valid for 5 years + $OPENSSL req -config $ca_dir/ca.conf \ + -x509 \ + -nodes \ + -newkey rsa \ + -days 21360 \ + -keyout $ca_dir/private/cacert.key \ + -out $ca_dir/cacert.pem \ + -outform PEM + fi +} + +# Deploy the service cert & key to a service specific +# location +function deploy_int_cert { + local cert_target_file=$1 + local key_target_file=$2 + + sudo cp "$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt" "$cert_target_file" + sudo cp "$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key" "$key_target_file" +} + +# Deploy the intermediate CA cert bundle file to a service +# specific location +function deploy_int_CA { + local ca_target_file=$1 + + sudo cp "$INT_CA_DIR/ca-chain.pem" "$ca_target_file" } # If a non-system python-requests is installed then it will use the @@ -353,10 +362,13 @@ function make_root_CA { # one. If the value for the CA is not rooted in /etc then we know # we need to change it. function fix_system_ca_bundle_path { - if is_service_enabled tls-proxy || [ "$USE_SSL" == "True" ]; then + if is_service_enabled tls-proxy; then local capath - capath=$(python -c $'try:\n from requests import certs\n print certs.where()\nexcept ImportError: pass') - + if [[ "$GLOBAL_VENV" == "True" ]] ; then + capath=$($DEVSTACK_VENV/bin/python3 -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass') + else + capath=$(python$PYTHON3_VERSION -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass') + fi if [[ ! $capath == "" && ! $capath =~ ^/etc/.* && ! -L $capath ]]; then if is_fedora; then sudo rm -f $capath @@ -372,27 +384,14 @@ function fix_system_ca_bundle_path { } -# Certificate Input Configuration -# =============================== - -# check to see if the service(s) specified are to be SSL enabled. -# -# Multiple services specified as arguments are ``OR``'ed together; the test -# is a short-circuit boolean, i.e it returns on the first match. -# -# Uses global ``SSL_ENABLED_SERVICES`` +# Only for compatibility, return if the tls-proxy is enabled function is_ssl_enabled_service { - local services=$@ - local service="" - if [ "$USE_SSL" == "False" ]; then - return 1 - fi - for service in ${services}; do - [[ ,${SSL_ENABLED_SERVICES}, =~ ,${service}, ]] && return 0 - done - return 1 + return is_service_enabled tls-proxy } +# Certificate Input Configuration +# =============================== + # Ensure that the certificates for a service are in place. This function does # not check that a service is SSL enabled, this should already have been # completed. @@ -452,30 +451,164 @@ function enable_mod_ssl { # Proxy Functions # =============== +function tune_apache_connections { + local should_restart=$1 + local tuning_file=$APACHE_SETTINGS_DIR/connection-tuning.conf + if ! [ -f $tuning_file ] ; then + sudo bash -c "cat > $tuning_file" << EOF +# worker MPM +# StartServers: initial number of server processes to start +# MinSpareThreads: minimum number of worker threads which are kept spare +# MaxSpareThreads: maximum number of worker threads which are kept spare +# ThreadLimit: ThreadsPerChild can be changed to this maximum value during a +# graceful restart. ThreadLimit can only be changed by stopping +# and starting Apache. +# ThreadsPerChild: constant number of worker threads in each server process +# MaxClients: maximum number of simultaneous client connections +# MaxRequestsPerChild: maximum number of requests a server process serves +# +# We want to be memory thrifty so tune down apache to allow 256 total +# connections. This should still be plenty for a dev env yet lighter than +# apache defaults. + +# Note that the next three conf values must be changed together. +# MaxClients = ServerLimit * ThreadsPerChild +ServerLimit 8 +ThreadsPerChild 32 +MaxClients 256 +StartServers 2 +MinSpareThreads 32 +MaxSpareThreads 96 +ThreadLimit 64 +MaxRequestsPerChild 0 + + +# Note that the next three conf values must be changed together. +# MaxClients = ServerLimit * ThreadsPerChild +ServerLimit 8 +ThreadsPerChild 32 +MaxClients 256 +StartServers 2 +MinSpareThreads 32 +MaxSpareThreads 96 +ThreadLimit 64 +MaxRequestsPerChild 0 + +EOF + if [ "$should_restart" != "norestart" ] ; then + # Only restart the apache server if we know we really want to + # do so. Too many restarts in a short period of time is treated + # as an error by systemd. + restart_apache_server + fi + fi +} + # Starts the TLS proxy for the given IP/ports -# start_tls_proxy front-host front-port back-host back-port +# start_tls_proxy service-name front-host front-port back-host back-port function start_tls_proxy { - local f_host=$1 - local f_port=$2 - local b_host=$3 - local b_port=$4 - - stud $STUD_PROTO -f $f_host,$f_port -b $b_host,$b_port $DEVSTACK_CERT 2>/dev/null + local b_service="$1-tls-proxy" + local f_host=$2 + local f_port=$3 + local b_host=$4 + local b_port=$5 + # 8190 is the default apache size. + local f_header_size=${6:-8190} + + # We don't restart apache here as we'll do it at the end of the function. + tune_apache_connections norestart + + local config_file + config_file=$(apache_site_config_for $b_service) + local listen_string + # Default apache configs on ubuntu and centos listen on 80 and 443 + # newer apache seems fine with duplicate listen directive but older + # apache does not so special case 80 and 443. + if [[ "$f_port" == "80" ]] || [[ "$f_port" == "443" ]]; then + listen_string="" + elif [[ "$f_host" == '*' ]] ; then + listen_string="Listen $f_port" + else + listen_string="Listen $f_host:$f_port" + fi + sudo bash -c "cat >$config_file" << EOF +$listen_string + + + SSLEngine On + SSLCertificateFile $DEVSTACK_CERT + SSLProtocol -all +TLSv1.3 +TLSv1.2 + + # Disable KeepAlive to fix bug #1630664 a.k.a the + # ('Connection aborted.', BadStatusLine("''",)) error + KeepAlive Off + + # This increase in allowed request header sizes is required + # for swift functional testing to work with tls enabled. It is 2 bytes + # larger than the apache default of 8190. + LimitRequestFieldSize $f_header_size + RequestHeader set X-Forwarded-Proto "https" + + # Avoid races (at the cost of performance) to re-use a pooled connection + # where the connection is closed (bug 1807518). + # Set acquire=1 to disable waiting for connection pool members so that + # we can determine when apache is overloaded (returns 503). + SetEnv proxy-initial-not-pooled + + ProxyPass http://$b_host:$b_port/ retry=0 nocanon acquire=1 + ProxyPassReverse http://$b_host:$b_port/ + + ErrorLog $APACHE_LOG_DIR/tls-proxy_error.log + ErrorLogFormat "%{cu}t [%-m:%l] [pid %P:tid %T] %7F: %E: [client\ %a] [frontend\ %A] %M% ,\ referer\ %{Referer}i" + LogLevel info + CustomLog $APACHE_LOG_DIR/tls-proxy_access.log combined + +EOF + for mod in headers ssl proxy proxy_http; do + # We don't need to restart here as we will restart once at the end + # of the function. + enable_apache_mod $mod norestart + done + enable_apache_site $b_service + restart_apache_server } - # Cleanup Functions # ================= -# Stops all stud processes. This should be done only after all services +# Stops the apache service. This should be done only after all services # using tls configuration are down. function stop_tls_proxy { - killall stud + stop_apache_server + + # NOTE(jh): Removing all tls-proxy configs is a bit of a hack, but + # necessary so that we can restart after an unstack. A better + # solution would be to ensure that each service calling + # start_tls_proxy will call stop_tls_proxy with the same + # parameters on shutdown so we can use the disable_apache_site + # function and remove individual files there. + if is_ubuntu; then + sudo rm -f /etc/apache2/sites-enabled/*-tls-proxy.conf + else + for i in $APACHE_CONF_DIR/*-tls-proxy.conf; do + sudo mv $i $i.disabled + done + fi } -# Remove CA along with configuration, as well as the local server certificate +# Clean up the CA files +# cleanup_CA function cleanup_CA { - rm -rf "$DATA_DIR/CA" "$DEVSTACK_CERT" + if is_fedora; then + sudo rm -f /usr/share/pki/ca-trust-source/anchors/devstack-chain.pem + sudo update-ca-trust + elif is_ubuntu; then + sudo rm -f /usr/local/share/ca-certificates/devstack-int.crt + sudo rm -f /usr/local/share/ca-certificates/devstack-root.crt + sudo update-ca-certificates + fi + + rm -rf "$INT_CA_DIR" "$ROOT_CA_DIR" "$DEVSTACK_CERT" } # Tell emacs to use shell-script-mode diff --git a/openrc b/openrc index 8d8ae8b030..e800abeb3d 100644 --- a/openrc +++ b/openrc @@ -7,9 +7,6 @@ # Set OS_USERNAME to override the default user name 'demo' # Set ADMIN_PASSWORD to set the password for 'admin' and 'demo' -# NOTE: support for the old NOVA_* novaclient environment variables has -# been removed. - if [[ -n "$1" ]]; then OS_USERNAME=$1 fi @@ -29,35 +26,17 @@ source $RC_DIR/stackrc # Load the last env variables if available if [[ -r $RC_DIR/.stackenv ]]; then source $RC_DIR/.stackenv + export OS_CACERT fi # Get some necessary configuration source $RC_DIR/lib/tls -# The OpenStack ecosystem has standardized the term **project** as the -# entity that owns resources. In some places **tenant** remains -# referenced, but in all cases this just means **project**. We will -# warn if we need to turn on legacy **tenant** support to have a -# working environment. +# Minimal configuration +export OS_AUTH_TYPE=password export OS_PROJECT_NAME=${OS_PROJECT_NAME:-demo} - -echo "WARNING: setting legacy OS_TENANT_NAME to support cli tools." -export OS_TENANT_NAME=$OS_PROJECT_NAME - -# In addition to the owning entity (project), nova stores the entity performing -# the action as the **user**. export OS_USERNAME=${OS_USERNAME:-demo} - -# With Keystone you pass the keystone password instead of an api key. -# Recent versions of novaclient use OS_PASSWORD instead of NOVA_API_KEYs -# or NOVA_PASSWORD. export OS_PASSWORD=${ADMIN_PASSWORD:-secret} - -# Don't put the key into a keyring by default. Testing for development is much -# easier with this off. -export OS_NO_CACHE=${OS_NO_CACHE:-1} - -# Region export OS_REGION_NAME=${REGION_NAME:-RegionOne} # Set the host API endpoint. This will default to HOST_IP if SERVICE_IP_VERSION @@ -76,26 +55,14 @@ else GLANCE_HOST=${GLANCE_HOST:-$HOST_IP} fi -SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http} -KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-$SERVICE_PROTOCOL} -KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST} +# If you don't have a working .stackenv, this is the backup position +KEYSTONE_BACKUP=$SERVICE_PROTOCOL://$SERVICE_HOST:5000 +KEYSTONE_SERVICE_URI=${KEYSTONE_SERVICE_URI:-$KEYSTONE_BACKUP} -# Identity API version -export OS_IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-2.0} +export OS_AUTH_URL=${OS_AUTH_URL:-$KEYSTONE_SERVICE_URI} -# Authenticating against an OpenStack cloud using Keystone returns a **Token** -# and **Service Catalog**. The catalog contains the endpoints for all services -# the user/project has access to - including nova, glance, keystone, swift, ... -# We currently recommend using the 2.0 *identity api*. -# -export OS_AUTH_URL=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:5000/v${OS_IDENTITY_API_VERSION} - -# Currently, in order to use openstackclient with Identity API v3, -# we need to set the domain which the user and project belong to. -if [ "$OS_IDENTITY_API_VERSION" = "3" ]; then - export OS_USER_DOMAIN_ID=${OS_USER_DOMAIN_ID:-"default"} - export OS_PROJECT_DOMAIN_ID=${OS_PROJECT_DOMAIN_ID:-"default"} -fi +export OS_USER_DOMAIN_ID=${OS_USER_DOMAIN_ID:-"default"} +export OS_PROJECT_DOMAIN_ID=${OS_PROJECT_DOMAIN_ID:-"default"} # Set OS_CACERT to a default CA certificate chain if it exists. if [[ ! -v OS_CACERT ]] ; then @@ -105,8 +72,3 @@ if [[ ! -v OS_CACERT ]] ; then export OS_CACERT=$DEFAULT_OS_CACERT fi fi - -# Currently cinderclient needs you to specify the *volume api* version. This -# needs to match the config of your catalog returned by Keystone. -export CINDER_VERSION=${CINDER_VERSION:-2} -export OS_VOLUME_API_VERSION=${OS_VOLUME_API_VERSION:-$CINDER_VERSION} diff --git a/pkg/elasticsearch.sh b/pkg/elasticsearch.sh deleted file mode 100755 index 856eaff36f..0000000000 --- a/pkg/elasticsearch.sh +++ /dev/null @@ -1,136 +0,0 @@ -#!/bin/bash -xe - -# basic reference point for things like filecache -# -# TODO(sdague): once we have a few of these I imagine the download -# step can probably be factored out to something nicer -TOP_DIR=$(cd $(dirname "$0")/.. && pwd) -FILES=$TOP_DIR/files -source $TOP_DIR/stackrc - -# Package source and version, all pkg files are expected to have -# something like this, as well as a way to override them. -ELASTICSEARCH_VERSION=${ELASTICSEARCH_VERSION:-1.7.5} -ELASTICSEARCH_BASEURL=${ELASTICSEARCH_BASEURL:-https://download.elasticsearch.org/elasticsearch/elasticsearch} - -# Elastic search actual implementation -function wget_elasticsearch { - local file=${1} - - if [ ! -f ${FILES}/${file} ]; then - wget $ELASTICSEARCH_BASEURL/${file} -O ${FILES}/${file} - fi - - if [ ! -f ${FILES}/${file}.sha1.txt ]; then - wget $ELASTICSEARCH_BASEURL/${file}.sha1.txt -O ${FILES}/${file}.sha1.txt - fi - - pushd ${FILES}; sha1sum ${file} > ${file}.sha1.gen; popd - - if ! diff ${FILES}/${file}.sha1.gen ${FILES}/${file}.sha1.txt; then - echo "Invalid elasticsearch download. Could not install." - return 1 - fi - return 0 -} - -function download_elasticsearch { - if is_ubuntu; then - wget_elasticsearch elasticsearch-${ELASTICSEARCH_VERSION}.deb - elif is_fedora; then - wget_elasticsearch elasticsearch-${ELASTICSEARCH_VERSION}.noarch.rpm - fi -} - -function configure_elasticsearch { - # currently a no op - : -} - -function _check_elasticsearch_ready { - # poll elasticsearch to see if it's started - if ! wait_for_service 30 http://localhost:9200; then - die $LINENO "Maximum timeout reached. Could not connect to ElasticSearch" - fi -} - -function start_elasticsearch { - if is_ubuntu; then - sudo /etc/init.d/elasticsearch start - _check_elasticsearch_ready - elif is_fedora; then - sudo /bin/systemctl start elasticsearch.service - _check_elasticsearch_ready - else - echo "Unsupported architecture...can not start elasticsearch." - fi -} - -function stop_elasticsearch { - if is_ubuntu; then - sudo /etc/init.d/elasticsearch stop - elif is_fedora; then - sudo /bin/systemctl stop elasticsearch.service - else - echo "Unsupported architecture...can not stop elasticsearch." - fi -} - -function install_elasticsearch { - pip_install_gr elasticsearch - if is_package_installed elasticsearch; then - echo "Note: elasticsearch was already installed." - return - fi - if is_ubuntu; then - is_package_installed openjdk-7-jre-headless || install_package openjdk-7-jre-headless - - sudo dpkg -i ${FILES}/elasticsearch-${ELASTICSEARCH_VERSION}.deb - sudo update-rc.d elasticsearch defaults 95 10 - elif is_fedora; then - is_package_installed java-1.8.0-openjdk-headless || install_package java-1.8.0-openjdk-headless - yum_install ${FILES}/elasticsearch-${ELASTICSEARCH_VERSION}.noarch.rpm - sudo /bin/systemctl daemon-reload - sudo /bin/systemctl enable elasticsearch.service - else - echo "Unsupported install of elasticsearch on this architecture." - fi -} - -function uninstall_elasticsearch { - if is_package_installed elasticsearch; then - if is_ubuntu; then - sudo apt-get purge elasticsearch - elif is_fedora; then - sudo yum remove elasticsearch - else - echo "Unsupported install of elasticsearch on this architecture." - fi - fi -} - -# The PHASE dispatcher. All pkg files are expected to basically cargo -# cult the case statement. -PHASE=$1 -echo "Phase is $PHASE" - -case $PHASE in - download) - download_elasticsearch - ;; - install) - install_elasticsearch - ;; - configure) - configure_elasticsearch - ;; - start) - start_elasticsearch - ;; - stop) - stop_elasticsearch - ;; - uninstall) - uninstall_elasticsearch - ;; -esac diff --git a/playbooks/devstack.yaml b/playbooks/devstack.yaml new file mode 100644 index 0000000000..d0906380ab --- /dev/null +++ b/playbooks/devstack.yaml @@ -0,0 +1,7 @@ +- hosts: all + # This is the default strategy, however since orchestrate-devstack requires + # "linear", it is safer to enforce it in case this is running in an + # environment configured with a different default strategy. + strategy: linear + roles: + - orchestrate-devstack diff --git a/playbooks/post.yaml b/playbooks/post.yaml new file mode 100644 index 0000000000..0047d78ea5 --- /dev/null +++ b/playbooks/post.yaml @@ -0,0 +1,41 @@ +- hosts: all + become: True + vars: + devstack_log_dir: "{{ devstack_base_dir|default('/opt/stack') }}/logs/" + devstack_conf_dir: "{{ devstack_base_dir|default('/opt/stack') }}/devstack/" + devstack_full_log: "{{ devstack_early_log|default('/opt/stack/logs/devstack-early.txt') }}" + tasks: + # NOTE(andreaf) If the tempest service is enabled, a tempest.log is + # generated as part of lib/tempest, as a result of verify_tempest_config + - name: Check if a tempest log exits + stat: + path: "{{ devstack_conf_dir }}/tempest.log" + register: tempest_log + - name: Link post-devstack tempest.log + file: + src: "{{ devstack_conf_dir }}/tempest.log" + dest: "{{ stage_dir }}/verify_tempest_conf.log" + state: hard + when: tempest_log.stat.exists + - name: Capture most recent qemu crash dump, if any + shell: + executable: /bin/bash + cmd: | + coredumpctl -o {{ devstack_log_dir }}/qemu.coredump dump /usr/bin/qemu-system-x86_64 + ignore_errors: yes + roles: + - export-devstack-journal + - apache-logs-conf + # This should run as early as possible to make sure we don't skew + # the post-tempest results with other activities. + - capture-performance-data + - devstack-project-conf + # capture-system-logs should be the last role before stage-output + - capture-system-logs + - role: stage-output + # NOTE(andreaf) We need fetch-devstack-log-dir only as long as the base job + # starts pulling logs for us from {{ ansible_user_dir }}/logs. + # Meanwhile we already store things in ansible_user_dir and use + # fetch-devstack-log-dir setting devstack_base_dir + - role: fetch-devstack-log-dir + devstack_base_dir: "{{ ansible_user_dir }}" diff --git a/playbooks/pre.yaml b/playbooks/pre.yaml new file mode 100644 index 0000000000..68cb1d8c7a --- /dev/null +++ b/playbooks/pre.yaml @@ -0,0 +1,37 @@ +- hosts: all + pre_tasks: + - name: Fix the permissions of the zuul home directory + # Make sure that the zuul home can be traversed, + # so that all users can access the sources placed there. + # Some distributions create it with 700 by default. + file: + path: "{{ ansible_user_dir }}" + mode: a+x + - name: Gather minimum local MTU + set_fact: + local_mtu: > + {% set mtus = [] -%} + {% for interface in ansible_interfaces -%} + {% set interface_variable = 'ansible_' + interface -%} + {% if interface_variable in hostvars[inventory_hostname] -%} + {% set _ = mtus.append(hostvars[inventory_hostname][interface_variable]['mtu']|int) -%} + {% endif -%} + {% endfor -%} + {{- mtus|min -}} + - name: Calculate external_bridge_mtu + # 30 bytes is overhead for vxlan (which is greater than GRE + # allowing us to use either overlay option with this MTU. + # 40 bytes is overhead for IPv6, which will also support an IPv4 overlay. + # TODO(andreaf) This should work, but it may have to be reconcilied with + # the MTU setting used by the multinode setup roles in multinode pre.yaml + set_fact: + external_bridge_mtu: "{{ local_mtu | int - 30 - 40 }}" + roles: + - configure-swap + - setup-stack-user + - setup-tempest-user + - setup-devstack-source-dirs + - setup-devstack-log-dir + - setup-devstack-cache + - start-fresh-logging + - write-devstack-local-conf diff --git a/playbooks/tox/post.yaml b/playbooks/tox/post.yaml new file mode 100644 index 0000000000..7f0cb19824 --- /dev/null +++ b/playbooks/tox/post.yaml @@ -0,0 +1,4 @@ +- hosts: all + roles: + - fetch-tox-output + - fetch-subunit-output diff --git a/playbooks/tox/pre.yaml b/playbooks/tox/pre.yaml new file mode 100644 index 0000000000..68d5254251 --- /dev/null +++ b/playbooks/tox/pre.yaml @@ -0,0 +1,14 @@ +- hosts: all + roles: + # Run bindep and test-setup after devstack so that they won't interfere + - role: bindep + bindep_profile: test + bindep_dir: "{{ zuul_work_dir }}" + - test-setup + # NOTE(gmann): Pinning tox<4.0.0 for stable/zed and lower. Tox 4.0.0 + # released after zed was released and has some incompatible changes + # and it is ok not to fix the issues caused by tox 4.0.0 in stable + # beanches jobs. We can continue testing the stable/zed and lower + # branches with tox<4.0.0 + - role: ensure-tox + ensure_tox_version: "<4" diff --git a/playbooks/tox/run-both.yaml b/playbooks/tox/run-both.yaml new file mode 100644 index 0000000000..e4043d8231 --- /dev/null +++ b/playbooks/tox/run-both.yaml @@ -0,0 +1,11 @@ +- hosts: all + roles: + - run-devstack + # Run bindep and test-setup after devstack so that they won't interfere + - role: bindep + bindep_profile: test + bindep_dir: "{{ zuul_work_dir }}" + - test-setup + - ensure-tox + - get-devstack-os-environment + - tox diff --git a/playbooks/tox/run.yaml b/playbooks/tox/run.yaml new file mode 100644 index 0000000000..0d065c6ca2 --- /dev/null +++ b/playbooks/tox/run.yaml @@ -0,0 +1,4 @@ +- hosts: all + roles: + - get-devstack-os-environment + - tox diff --git a/playbooks/unit-tests/pre.yaml b/playbooks/unit-tests/pre.yaml new file mode 100644 index 0000000000..cfa1676378 --- /dev/null +++ b/playbooks/unit-tests/pre.yaml @@ -0,0 +1,13 @@ +- hosts: all + + tasks: + + - name: Install prerequisites + shell: + chdir: '{{ zuul.project.src_dir }}' + executable: /bin/bash + cmd: | + set -e + set -x + echo "IPV4_ADDRS_SAFE_TO_USE=10.1.0.0/20" >> localrc + ./tools/install_prereqs.sh diff --git a/playbooks/unit-tests/run.yaml b/playbooks/unit-tests/run.yaml new file mode 100644 index 0000000000..181521f072 --- /dev/null +++ b/playbooks/unit-tests/run.yaml @@ -0,0 +1,12 @@ +- hosts: all + + tasks: + + - name: Run run_tests.sh + shell: + chdir: '{{ zuul.project.src_dir }}' + executable: /bin/bash + cmd: | + set -e + set -x + ./run_tests.sh diff --git a/roles/apache-logs-conf/README.rst b/roles/apache-logs-conf/README.rst new file mode 100644 index 0000000000..eccee403a5 --- /dev/null +++ b/roles/apache-logs-conf/README.rst @@ -0,0 +1,12 @@ +Prepare apache configs and logs for staging + +Make sure apache config files and log files are available in a linux flavor +independent location. Note that this relies on hard links, to the staging +directory must be in the same partition where the logs and configs are. + +**Role Variables** + +.. zuul:rolevar:: stage_dir + :default: {{ ansible_user_dir }} + + The base stage directory. diff --git a/roles/apache-logs-conf/defaults/main.yaml b/roles/apache-logs-conf/defaults/main.yaml new file mode 100644 index 0000000000..1fb04fedc8 --- /dev/null +++ b/roles/apache-logs-conf/defaults/main.yaml @@ -0,0 +1,2 @@ +devstack_base_dir: /opt/stack +stage_dir: "{{ ansible_user_dir }}" diff --git a/roles/apache-logs-conf/tasks/main.yaml b/roles/apache-logs-conf/tasks/main.yaml new file mode 100644 index 0000000000..6b7ea37857 --- /dev/null +++ b/roles/apache-logs-conf/tasks/main.yaml @@ -0,0 +1,90 @@ +- name: Ensure {{ stage_dir }}/apache exists + file: + path: "{{ stage_dir }}/apache" + state: directory + +- name: Link apache logs on Debian/SuSE + block: + - name: Find logs + find: + path: "/var/log/apache2" + file_type: any + register: debian_suse_apache_logs + + - name: Dereference files + stat: + path: "{{ item.path }}" + with_items: "{{ debian_suse_apache_logs.files }}" + register: debian_suse_apache_deref_logs + + - name: Create hard links + file: + src: "{{ item.stat.lnk_source | default(item.stat.path) }}" + dest: "{{ stage_dir }}/apache/{{ item.stat.path | basename }}" + state: hard + with_items: "{{ debian_suse_apache_deref_logs.results }}" + when: + - item.stat.isreg or item.stat.islnk + when: ansible_os_family in ('Debian', 'Suse') + no_log: true + +- name: Link apache logs on RedHat + block: + - name: Find logs + find: + path: "/var/log/httpd" + file_type: any + register: redhat_apache_logs + + - name: Dereference files + stat: + path: "{{ item.path }}" + with_items: "{{ redhat_apache_logs.files }}" + register: redhat_apache_deref_logs + + - name: Create hard links + file: + src: "{{ item.stat.lnk_source | default(item.stat.path) }}" + dest: "{{ stage_dir }}/apache/{{ item.stat.path | basename }}" + state: hard + with_items: "{{ redhat_apache_deref_logs.results }}" + when: + - item.stat.isreg or item.stat.islnk + when: ansible_os_family == 'RedHat' + no_log: true + +- name: Ensure {{ stage_dir }}/apache_config apache_config exists + file: + path: "{{ stage_dir }}/apache_config" + state: directory + +- name: Define config paths + set_fact: + apache_config_paths: + 'Debian': '/etc/apache2/sites-enabled/' + 'Suse': '/etc/apache2/conf.d/' + 'RedHat': '/etc/httpd/conf.d/' + 'openEuler': '/etc/httpd/conf.d/' + +- name: Discover configurations + find: + path: "{{ apache_config_paths[ansible_os_family] }}" + file_type: any + register: apache_configs + no_log: true + +- name: Dereference configurations + stat: + path: "{{ item.path }}" + with_items: "{{ apache_configs.files }}" + register: apache_configs_deref + no_log: true + +- name: Link configurations + file: + src: "{{ item.stat.lnk_source | default(item.stat.path) }}" + dest: "{{ stage_dir }}/apache_config/{{ item.stat.path | basename }}" + state: hard + with_items: "{{ apache_configs_deref.results }}" + when: item.stat.isreg or item.stat.islnk + no_log: true diff --git a/roles/capture-performance-data/README.rst b/roles/capture-performance-data/README.rst new file mode 100644 index 0000000000..b7a37c223f --- /dev/null +++ b/roles/capture-performance-data/README.rst @@ -0,0 +1,25 @@ +Generate performance logs for staging + +Captures usage information from mysql, systemd, apache logs, and other +parts of the system and generates a performance.json file in the +staging directory. + +**Role Variables** + +.. zuul:rolevar:: stage_dir + :default: {{ ansible_user_dir }} + + The base stage directory + +.. zuul:rolevar:: devstack_conf_dir + :default: /opt/stack + + The base devstack destination directory + +.. zuul:rolevar:: debian_suse_apache_deref_logs + + The apache logs found in the debian/suse locations + +.. zuul:rolevar:: redhat_apache_deref_logs + + The apache logs found in the redhat locations diff --git a/roles/capture-performance-data/defaults/main.yaml b/roles/capture-performance-data/defaults/main.yaml new file mode 100644 index 0000000000..7bd79f4c4f --- /dev/null +++ b/roles/capture-performance-data/defaults/main.yaml @@ -0,0 +1,3 @@ +devstack_base_dir: /opt/stack +devstack_conf_dir: "{{ devstack_base_dir }}" +stage_dir: "{{ ansible_user_dir }}" diff --git a/roles/capture-performance-data/tasks/main.yaml b/roles/capture-performance-data/tasks/main.yaml new file mode 100644 index 0000000000..51a11b60bc --- /dev/null +++ b/roles/capture-performance-data/tasks/main.yaml @@ -0,0 +1,18 @@ +- name: Generate statistics + shell: + executable: /bin/bash + cmd: | + source {{ devstack_conf_dir }}/stackrc + source {{ devstack_conf_dir }}/inc/python + setup_devstack_virtualenv + $PYTHON {{ devstack_conf_dir }}/tools/get-stats.py \ + --db-user="$DATABASE_USER" \ + --db-pass="$DATABASE_PASSWORD" \ + --db-host="$DATABASE_HOST" \ + {{ apache_logs }} > {{ stage_dir }}/performance.json + vars: + apache_logs: >- + {% for i in debian_suse_apache_deref_logs.results | default([]) + redhat_apache_deref_logs.results | default([]) %} + --apache-log="{{ i.stat.path }}" + {% endfor %} + ignore_errors: yes diff --git a/roles/capture-system-logs/README.rst b/roles/capture-system-logs/README.rst new file mode 100644 index 0000000000..1376f63bfc --- /dev/null +++ b/roles/capture-system-logs/README.rst @@ -0,0 +1,21 @@ +Stage a number of system type logs + +Stage a number of different logs / reports: +- snapshot of iptables +- disk space available +- pip[2|3] freeze +- installed packages (dpkg/rpm) +- ceph, openswitch, gluster +- coredumps +- dns resolver +- listen53 +- services +- unbound.log +- deprecation messages + +**Role Variables** + +.. zuul:rolevar:: stage_dir + :default: {{ ansible_user_dir }} + + The base stage directory. diff --git a/roles/capture-system-logs/defaults/main.yaml b/roles/capture-system-logs/defaults/main.yaml new file mode 100644 index 0000000000..fea05c8146 --- /dev/null +++ b/roles/capture-system-logs/defaults/main.yaml @@ -0,0 +1 @@ +devstack_base_dir: /opt/stack diff --git a/roles/capture-system-logs/tasks/main.yaml b/roles/capture-system-logs/tasks/main.yaml new file mode 100644 index 0000000000..4b5ec4836b --- /dev/null +++ b/roles/capture-system-logs/tasks/main.yaml @@ -0,0 +1,59 @@ +# TODO(andreaf) Make this into proper Ansible +- name: Stage various logs and reports + shell: + executable: /bin/bash + cmd: | + sudo iptables-save > {{ stage_dir }}/iptables.txt + + # NOTE(sfernand): Run 'df' with a 60s timeout to prevent hangs from + # stale NFS mounts. + timeout -s 9 60s df -h > {{ stage_dir }}/df.txt || true + # If 'df' times out, the mount output helps debug which NFS share + # is unresponsive. + mount > {{ stage_dir }}/mount.txt + + for py_ver in 2 3; do + if [[ `which python${py_ver}` ]]; then + python${py_ver} -m pip freeze > {{ stage_dir }}/pip${py_ver}-freeze.txt + fi + done + + if [ `command -v dpkg` ]; then + dpkg -l> {{ stage_dir }}/dpkg-l.txt + fi + if [ `command -v rpm` ]; then + rpm -qa | sort > {{ stage_dir }}/rpm-qa.txt + fi + + # Services status + sudo systemctl status --all > services.txt 2>/dev/null + + # NOTE(kchamart) The 'audit.log' can be useful in cases when QEMU + # failed to start due to denials from SELinux — useful for CentOS + # and Fedora machines. For Ubuntu (which runs AppArmor), DevStack + # already captures the contents of /var/log/kern.log (via + # `journalctl -t kernel` redirected into syslog.txt.gz), which + # contains AppArmor-related messages. + if [ -f /var/log/audit/audit.log ] ; then + sudo cp /var/log/audit/audit.log {{stage_dir }}/audit.log && + chmod +r {{ stage_dir }}/audit.log; + fi + + # gzip and save any coredumps in /var/core + if [ -d /var/core ]; then + sudo gzip -r /var/core + sudo cp -r /var/core {{ stage_dir }}/ + fi + + sudo ss -lntup | grep ':53' > {{ stage_dir }}/listen53.txt + + # NOTE(andreaf) Service logs are already in logs/ thanks for the + # export-devstack-journal log. Apache logs are under apache/ thans to the + # apache-logs-conf role. + grep -i deprecat {{ stage_dir }}/logs/*.txt {{ stage_dir }}/apache/*.log | \ + sed -r 's/[0-9]{1,2}\:[0-9]{1,2}\:[0-9]{1,2}\.[0-9]{1,3}/ /g' | \ + sed -r 's/[0-9]{1,2}\:[0-9]{1,2}\:[0-9]{1,2}/ /g' | \ + sed -r 's/[0-9]{1,4}-[0-9]{1,2}-[0-9]{1,4}/ /g' | + sed -r 's/\[.*\]/ /g' | \ + sed -r 's/\s[0-9]+\s/ /g' | \ + awk '{if ($0 in seen) {seen[$0]++} else {out[++n]=$0;seen[$0]=1}} END { for (i=1; i<=n; i++) print seen[out[i]]" :: " out[i] }' > {{ stage_dir }}/deprecations.log diff --git a/roles/devstack-ipv6-only-deployments-verification/README.rst b/roles/devstack-ipv6-only-deployments-verification/README.rst new file mode 100644 index 0000000000..3bddf5ea60 --- /dev/null +++ b/roles/devstack-ipv6-only-deployments-verification/README.rst @@ -0,0 +1,16 @@ +Verify all addresses in IPv6-only deployments + +This role needs to be invoked from a playbook that +runs tests. This role verifies the IPv6 settings on the +devstack side and that devstack deploys with all addresses +being IPv6. This role is invoked before tests are run so that +if there is any missing IPv6 setting, deployments can fail +the job early. + + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. diff --git a/roles/devstack-ipv6-only-deployments-verification/defaults/main.yaml b/roles/devstack-ipv6-only-deployments-verification/defaults/main.yaml new file mode 100644 index 0000000000..fea05c8146 --- /dev/null +++ b/roles/devstack-ipv6-only-deployments-verification/defaults/main.yaml @@ -0,0 +1 @@ +devstack_base_dir: /opt/stack diff --git a/roles/devstack-ipv6-only-deployments-verification/tasks/main.yaml b/roles/devstack-ipv6-only-deployments-verification/tasks/main.yaml new file mode 100644 index 0000000000..59d3b79bc1 --- /dev/null +++ b/roles/devstack-ipv6-only-deployments-verification/tasks/main.yaml @@ -0,0 +1,4 @@ +- name: Verify the ipv6-only deployments + become: true + become_user: stack + shell: "{{ devstack_base_dir }}/devstack/tools/verify-ipv6-only-deployments.sh" diff --git a/roles/devstack-project-conf/README.rst b/roles/devstack-project-conf/README.rst new file mode 100644 index 0000000000..3f2d4c9697 --- /dev/null +++ b/roles/devstack-project-conf/README.rst @@ -0,0 +1,11 @@ +Prepare OpenStack project configurations for staging + +Prepare all relevant config files for staging. +This is helpful to avoid staging the entire /etc. + +**Role Variables** + +.. zuul:rolevar:: stage_dir + :default: {{ ansible_user_dir }} + + The base stage directory. diff --git a/roles/devstack-project-conf/defaults/main.yaml b/roles/devstack-project-conf/defaults/main.yaml new file mode 100644 index 0000000000..f8fb8deac9 --- /dev/null +++ b/roles/devstack-project-conf/defaults/main.yaml @@ -0,0 +1 @@ +stage_dir: "{{ ansible_user_dir }}" diff --git a/roles/devstack-project-conf/tasks/main.yaml b/roles/devstack-project-conf/tasks/main.yaml new file mode 100644 index 0000000000..917cdbc370 --- /dev/null +++ b/roles/devstack-project-conf/tasks/main.yaml @@ -0,0 +1,25 @@ +- name: Ensure {{ stage_dir }}/etc exists + file: + path: "{{ stage_dir }}/etc" + state: directory + +- name: Check which projects have a config folder + stat: + path: "/etc/{{ item.value.short_name }}" + with_dict: "{{ zuul.projects }}" + register: project_configs + no_log: true + +- name: Copy configuration files + command: cp -pRL {{ item.stat.path }} {{ stage_dir }}/etc/{{ item.item.value.short_name }} + when: item.stat.exists + with_items: "{{ project_configs.results }}" + +- name: Check if openstack has a config folder + stat: + path: "/etc/openstack" + register: openstack_configs + +- name: Copy configuration files + command: cp -pRL /etc/openstack {{ stage_dir }}/etc/ + when: openstack_configs.stat.exists diff --git a/roles/export-devstack-journal/README.rst b/roles/export-devstack-journal/README.rst new file mode 100644 index 0000000000..9e3c919627 --- /dev/null +++ b/roles/export-devstack-journal/README.rst @@ -0,0 +1,25 @@ +Export journal files from devstack services + +This performs a number of logging collection services + +* Export the systemd journal in native format +* For every devstack service, export logs to text in a file named + ``screen-*`` to maintain legacy compatability when devstack services + used to run in a screen session and were logged separately. +* Export a syslog-style file with kernel and sudo messages for legacy + compatability. + +Writes the output to the ``logs/`` subdirectory of ``stage_dir``. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. This is used to obtain the + ``log-start-timestamp.txt``, used to filter the systemd journal. + +.. zuul:rolevar:: stage_dir + :default: {{ ansible_user_dir }} + + The base stage directory. diff --git a/roles/export-devstack-journal/defaults/main.yaml b/roles/export-devstack-journal/defaults/main.yaml new file mode 100644 index 0000000000..1fb04fedc8 --- /dev/null +++ b/roles/export-devstack-journal/defaults/main.yaml @@ -0,0 +1,2 @@ +devstack_base_dir: /opt/stack +stage_dir: "{{ ansible_user_dir }}" diff --git a/roles/export-devstack-journal/tasks/main.yaml b/roles/export-devstack-journal/tasks/main.yaml new file mode 100644 index 0000000000..db38b10a44 --- /dev/null +++ b/roles/export-devstack-journal/tasks/main.yaml @@ -0,0 +1,54 @@ +# NOTE(andreaf) This bypasses the stage-output role +- name: Ensure {{ stage_dir }}/logs exists + become: true + file: + path: "{{ stage_dir }}/logs" + state: directory + owner: "{{ ansible_user }}" + +- name: Export legacy stack screen log files + become: true + shell: + cmd: | + u="" + name="" + for u in $(systemctl list-unit-files | grep devstack | awk '{print $1}'); do + name=$(echo $u | sed 's/devstack@/screen-/' | sed 's/\.service//') + journalctl -o short-precise --unit $u > {{ stage_dir }}/logs/$name.txt + done + +- name: Export legacy syslog.txt + become: true + shell: + # The journal contains everything running under systemd, we'll + # build an old school version of the syslog with just the + # kernel and sudo messages. + cmd: | + journalctl \ + -t kernel \ + -t sudo \ + --no-pager \ + --since="$(cat {{ devstack_base_dir }}/log-start-timestamp.txt)" \ + > {{ stage_dir }}/logs/syslog.txt + +# TODO: convert this to ansible +# - make a list of the above units +# - iterate the list here +- name: Export journal + become: true + shell: + # Export the journal in export format to make it downloadable + # for later searching. It can then be rewritten to a journal native + # format locally using systemd-journal-remote. This makes a class of + # debugging much easier. We don't do the native conversion here as + # some distros do not package that tooling. + cmd: | + journalctl -o export \ + --since="$(cat {{ devstack_base_dir }}/log-start-timestamp.txt)" \ + | gzip > {{ stage_dir }}/logs/devstack.journal.gz + +- name: Save journal README + become: true + template: + src: devstack.journal.README.txt.j2 + dest: '{{ stage_dir }}/logs/devstack.journal.README.txt' diff --git a/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 b/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 new file mode 100644 index 0000000000..30519f63d7 --- /dev/null +++ b/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 @@ -0,0 +1,33 @@ +Devstack systemd journal +======================== + +The devstack.journal file is a copy of the systemd journal during the +devstack run. + +To use it, you will need to convert it so journalctl can read it +locally. After downloading the file: + + $ /lib/systemd/systemd-journal-remote <(zcat ./devstack.journal.gz) -o output.journal + +Note this binary is not in the regular path. On Debian/Ubuntu +platforms, you will need to have the "systemd-journal-remote" package +installed. + +It should result in something like: + + Finishing after writing entries + +You can then use journalctl to examine this file. For example, to see +all devstack services try: + + $ journalctl --file ./output.journal -u 'devstack@*' + +To see just cinder API server logs restrict the match with + + $ journalctl --file ./output.journal -u 'devstack@c-api' + +There may be many types of logs available in the journal, a command like + + $ journalctl --file ./output.journal --output=json-pretty | grep "_SYSTEMD_UNIT" | sort -u + +can help you find interesting things to filter on. \ No newline at end of file diff --git a/roles/fetch-devstack-log-dir/README.rst b/roles/fetch-devstack-log-dir/README.rst new file mode 100644 index 0000000000..360a2e3dd0 --- /dev/null +++ b/roles/fetch-devstack-log-dir/README.rst @@ -0,0 +1,10 @@ +Fetch content from the devstack log directory + +Copy logs from every host back to the zuul executor. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. diff --git a/roles/fetch-devstack-log-dir/defaults/main.yaml b/roles/fetch-devstack-log-dir/defaults/main.yaml new file mode 100644 index 0000000000..fea05c8146 --- /dev/null +++ b/roles/fetch-devstack-log-dir/defaults/main.yaml @@ -0,0 +1 @@ +devstack_base_dir: /opt/stack diff --git a/roles/fetch-devstack-log-dir/tasks/main.yaml b/roles/fetch-devstack-log-dir/tasks/main.yaml new file mode 100644 index 0000000000..276c4e0eb5 --- /dev/null +++ b/roles/fetch-devstack-log-dir/tasks/main.yaml @@ -0,0 +1,10 @@ +# as the user in the guest may not exist on the executor +# we do not preserve the group or owner of the copied logs. + +- name: Collect devstack logs + synchronize: + dest: "{{ zuul.executor.log_root }}/{{ inventory_hostname }}" + mode: pull + src: "{{ devstack_base_dir }}/logs" + group: no + owner: no diff --git a/roles/get-devstack-os-environment/README.rst b/roles/get-devstack-os-environment/README.rst new file mode 100644 index 0000000000..68ddce8b5a --- /dev/null +++ b/roles/get-devstack-os-environment/README.rst @@ -0,0 +1,40 @@ +Reads the OS_* variables set by devstack through openrc +for the specified user and project and exports them as +the os_env_vars fact. + +**WARNING**: this role is meant to be used as porting aid +for the non-unified python-client jobs which +are already around, as those clients do not use clouds.yaml +as openstackclient does. +When those clients and their jobs are deprecated and removed, +or anyway when the new code is able to read from clouds.yaml +directly, this role should be removed as well. + + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. + +.. zuul:rolevar:: openrc_file + :default: {{ devstack_base_dir }}/devstack/openrc + + The location of the generated openrc file. + +.. zuul:rolevar:: openrc_user + :default: admin + + The user whose credentials should be retrieved. + +.. zuul:rolevar:: openrc_project + :default: admin + + The project (which openrc_user is part of) whose + access data should be retrieved. + +.. zuul:rolevar:: openrc_enable_export + :default: false + + Set it to true to export os_env_vars. diff --git a/roles/get-devstack-os-environment/defaults/main.yaml b/roles/get-devstack-os-environment/defaults/main.yaml new file mode 100644 index 0000000000..f68ea560d0 --- /dev/null +++ b/roles/get-devstack-os-environment/defaults/main.yaml @@ -0,0 +1,6 @@ +devstack_base_dir: "/opt/stack" +openrc_file: "{{ devstack_base_dir }}/devstack/openrc" +openrc_user: admin +openrc_project: admin +openrc_enable_export: false +tox_environment: {} diff --git a/roles/get-devstack-os-environment/tasks/main.yaml b/roles/get-devstack-os-environment/tasks/main.yaml new file mode 100644 index 0000000000..b2c5e93ed4 --- /dev/null +++ b/roles/get-devstack-os-environment/tasks/main.yaml @@ -0,0 +1,14 @@ +- when: openrc_enable_export + block: + - name: Extract the OS_ environment variables + shell: + cmd: | + source {{ openrc_file }} {{ openrc_user }} {{ openrc_project }} &>/dev/null + env | awk -F= 'BEGIN {print "---" } /^OS_/ { print " "$1": \""$2"\""} ' + args: + executable: "/bin/bash" + register: env_os + + - name: Append the the OS_ environment variables to tox_environment + set_fact: + tox_environment: "{{ env_os.stdout|from_yaml|default({})|combine(tox_environment) }}" diff --git a/roles/orchestrate-devstack/README.rst b/roles/orchestrate-devstack/README.rst new file mode 100644 index 0000000000..097dcea55e --- /dev/null +++ b/roles/orchestrate-devstack/README.rst @@ -0,0 +1,25 @@ +Orchestrate a devstack + +Runs devstack in a multinode scenario, with one controller node +and a group of subnodes. + +The reason for this role is so that jobs in other repository may +run devstack in their plays with no need for re-implementing the +orchestration logic. + +The "run-devstack" role is available to run devstack with no +orchestration. + +This role sets up the controller and CA first, it then pushes CA +data to sub-nodes and run devstack there. The only requirement for +this role is for the controller inventory_hostname to be "controller" +and for all sub-nodes to be defined in a group called "subnode". + +This role needs to be invoked from a playbook that uses a "linear" strategy. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. diff --git a/roles/orchestrate-devstack/defaults/main.yaml b/roles/orchestrate-devstack/defaults/main.yaml new file mode 100644 index 0000000000..fea05c8146 --- /dev/null +++ b/roles/orchestrate-devstack/defaults/main.yaml @@ -0,0 +1 @@ +devstack_base_dir: /opt/stack diff --git a/roles/orchestrate-devstack/tasks/main.yaml b/roles/orchestrate-devstack/tasks/main.yaml new file mode 100644 index 0000000000..b8ee7e35a7 --- /dev/null +++ b/roles/orchestrate-devstack/tasks/main.yaml @@ -0,0 +1,50 @@ +- name: Run devstack on the controller + include_role: + name: run-devstack + when: inventory_hostname == 'controller' + +- name: Setup devstack on sub-nodes + any_errors_fatal: true + block: + + - name: Distribute the build sshkey for the user "stack" + include_role: + name: copy-build-sshkey + vars: + copy_sshkey_target_user: 'stack' + + - name: Sync CA data to subnodes (when any) + # Only do this if the tls-proxy service is defined and enabled + include_role: + name: sync-devstack-data + when: devstack_services['tls-proxy']|default(false) + + - name: Sync controller ceph.conf and key rings to subnode + include_role: + name: sync-controller-ceph-conf-and-keys + when: devstack_plugins is defined and 'devstack-plugin-ceph' in devstack_plugins + + - name: Run devstack on the sub-nodes + include_role: + name: run-devstack + when: inventory_hostname in groups['subnode'] + + - name: Discover hosts + # Discovers compute nodes (subnodes) and maps them to cells. Only run + # on the controller node. + # NOTE(mriedem): We want to remove this if/when nova supports + # auto-registration of computes with cells, but that's not happening in + # Ocata. + # NOTE(andreaf) This is taken (NOTE included) from the discover_hosts + # function in devstack gate. Since this is now in devstack, which is + # branched, we know that the discover_hosts tool exists. + become: true + become_user: stack + shell: ./tools/discover_hosts.sh + args: + chdir: "{{ devstack_base_dir }}/devstack" + when: inventory_hostname == 'controller' + + when: + - '"controller" in hostvars' + - '"subnode" in groups' diff --git a/roles/process-stackviz/README.rst b/roles/process-stackviz/README.rst new file mode 100644 index 0000000000..a8447d2355 --- /dev/null +++ b/roles/process-stackviz/README.rst @@ -0,0 +1,22 @@ +Generate stackviz report. + +Generate stackviz report using subunit and dstat data, using +the stackviz archive embedded in test images. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. + +.. zuul:rolevar:: stage_dir + :default: "{{ ansible_user_dir }}" + + The stage directory where the input data can be found and + the output will be produced. + +.. zuul:rolevar:: zuul_work_dir + :default: {{ devstack_base_dir }}/tempest + + Directory to work in. It has to be a fully qualified path. diff --git a/roles/process-stackviz/defaults/main.yaml b/roles/process-stackviz/defaults/main.yaml new file mode 100644 index 0000000000..f3bc32b149 --- /dev/null +++ b/roles/process-stackviz/defaults/main.yaml @@ -0,0 +1,3 @@ +devstack_base_dir: /opt/stack +stage_dir: "{{ ansible_user_dir }}" +zuul_work_dir: "{{ devstack_base_dir }}/tempest" diff --git a/roles/process-stackviz/tasks/main.yaml b/roles/process-stackviz/tasks/main.yaml new file mode 100644 index 0000000000..3ba3d9c2e6 --- /dev/null +++ b/roles/process-stackviz/tasks/main.yaml @@ -0,0 +1,73 @@ +- name: Process Stackviz + block: + + - name: Devstack checks if stackviz archive exists + stat: + path: "/opt/cache/files/stackviz-latest.tar.gz" + register: stackviz_archive + + - debug: + msg: "Stackviz archive could not be found in /opt/cache/files/stackviz-latest.tar.gz" + when: not stackviz_archive.stat.exists + + - name: Check if subunit data exists + stat: + path: "{{ zuul_work_dir }}/testrepository.subunit" + register: subunit_input + + - debug: + msg: "Subunit file could not be found at {{ zuul_work_dir }}/testrepository.subunit" + when: not subunit_input.stat.exists + + - name: Install stackviz + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists + block: + - include_role: + name: ensure-pip + + - pip: + name: "file://{{ stackviz_archive.stat.path }}" + virtualenv: /tmp/stackviz + virtualenv_command: '{{ ensure_pip_virtualenv_command }}' + extra_args: -U + + - name: Deploy stackviz static html+js + command: cp -pR /tmp/stackviz/share/stackviz-html {{ stage_dir }}/stackviz + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists + + - name: Check if dstat data exists + stat: + path: "{{ devstack_base_dir }}/logs/dstat-csv.log" + register: dstat_input + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists + + - name: Run stackviz with dstat + shell: | + cat {{ subunit_input.stat.path }} | \ + /tmp/stackviz/bin/stackviz-export \ + --dstat "{{ devstack_base_dir }}/logs/dstat-csv.log" \ + --env --stdin \ + {{ stage_dir }}/stackviz/data + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists + - dstat_input.stat.exists + + - name: Run stackviz without dstat + shell: | + cat {{ subunit_input.stat.path }} | \ + /tmp/stackviz/bin/stackviz-export \ + --env --stdin \ + {{ stage_dir }}/stackviz/data + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists + - not dstat_input.stat.exists + + ignore_errors: yes diff --git a/roles/run-devstack/README.rst b/roles/run-devstack/README.rst new file mode 100644 index 0000000000..d77eb15e99 --- /dev/null +++ b/roles/run-devstack/README.rst @@ -0,0 +1,8 @@ +Run devstack + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. diff --git a/roles/run-devstack/defaults/main.yaml b/roles/run-devstack/defaults/main.yaml new file mode 100644 index 0000000000..fea05c8146 --- /dev/null +++ b/roles/run-devstack/defaults/main.yaml @@ -0,0 +1 @@ +devstack_base_dir: /opt/stack diff --git a/roles/run-devstack/tasks/main.yaml b/roles/run-devstack/tasks/main.yaml new file mode 100644 index 0000000000..f58b31d477 --- /dev/null +++ b/roles/run-devstack/tasks/main.yaml @@ -0,0 +1,11 @@ +- name: Run devstack + shell: + cmd: | + ./stack.sh 2>&1 + rc=$? + echo "*** FINISHED ***" + exit $rc + args: + chdir: "{{devstack_base_dir}}/devstack" + become: true + become_user: stack diff --git a/roles/setup-devstack-cache/README.rst b/roles/setup-devstack-cache/README.rst new file mode 100644 index 0000000000..b8938c3dea --- /dev/null +++ b/roles/setup-devstack-cache/README.rst @@ -0,0 +1,15 @@ +Set up the devstack cache directory + +If the node has a cache of devstack image files, copy it into place. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. + +.. zuul:rolevar:: devstack_cache_dir + :default: /opt/cache + + The directory with the cached files. diff --git a/roles/setup-devstack-cache/defaults/main.yaml b/roles/setup-devstack-cache/defaults/main.yaml new file mode 100644 index 0000000000..c56720b4f5 --- /dev/null +++ b/roles/setup-devstack-cache/defaults/main.yaml @@ -0,0 +1,2 @@ +devstack_base_dir: /opt/stack +devstack_cache_dir: /opt/cache diff --git a/roles/setup-devstack-cache/tasks/main.yaml b/roles/setup-devstack-cache/tasks/main.yaml new file mode 100644 index 0000000000..3adff17d5d --- /dev/null +++ b/roles/setup-devstack-cache/tasks/main.yaml @@ -0,0 +1,15 @@ +- name: Copy cached devstack files + # This uses hard links to avoid using extra space. + command: "find {{ devstack_cache_dir }}/files -mindepth 1 -maxdepth 1 -exec cp -l {} {{ devstack_base_dir }}/devstack/files/ ;" + become: true + ignore_errors: yes + +- name: Set ownership of cached files + file: + path: '{{ devstack_base_dir }}/devstack/files' + state: directory + recurse: true + owner: stack + group: stack + mode: a+r + become: yes diff --git a/roles/setup-devstack-log-dir/README.rst b/roles/setup-devstack-log-dir/README.rst new file mode 100644 index 0000000000..9d8dba3442 --- /dev/null +++ b/roles/setup-devstack-log-dir/README.rst @@ -0,0 +1,11 @@ +Set up the devstack log directory + +Create a log directory on the ephemeral disk partition to save space +on the root device. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. diff --git a/roles/setup-devstack-log-dir/defaults/main.yaml b/roles/setup-devstack-log-dir/defaults/main.yaml new file mode 100644 index 0000000000..fea05c8146 --- /dev/null +++ b/roles/setup-devstack-log-dir/defaults/main.yaml @@ -0,0 +1 @@ +devstack_base_dir: /opt/stack diff --git a/roles/setup-devstack-log-dir/tasks/main.yaml b/roles/setup-devstack-log-dir/tasks/main.yaml new file mode 100644 index 0000000000..d8e8cfe70a --- /dev/null +++ b/roles/setup-devstack-log-dir/tasks/main.yaml @@ -0,0 +1,8 @@ +- name: Create logs directory + file: + path: '{{ devstack_base_dir }}/logs' + state: directory + mode: 0755 + owner: stack + group: stack + become: yes diff --git a/roles/setup-devstack-source-dirs/README.rst b/roles/setup-devstack-source-dirs/README.rst new file mode 100644 index 0000000000..0aa048b7d2 --- /dev/null +++ b/roles/setup-devstack-source-dirs/README.rst @@ -0,0 +1,16 @@ +Set up the devstack source directories + +Ensure that the base directory exists, and then move the source repos +into it. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. + +.. zuul:rolevar:: devstack_sources_branch + :default: None + + The target branch to be setup (where available). diff --git a/roles/setup-devstack-source-dirs/defaults/main.yaml b/roles/setup-devstack-source-dirs/defaults/main.yaml new file mode 100644 index 0000000000..77a74d7b89 --- /dev/null +++ b/roles/setup-devstack-source-dirs/defaults/main.yaml @@ -0,0 +1,9 @@ +devstack_base_dir: /opt/stack +devstack_source_dirs: + - src/opendev.org/opendev + - src/opendev.org/openstack + - src/opendev.org/openstack-dev + - src/opendev.org/openstack-infra + - src/opendev.org/starlingx + - src/opendev.org/x + - src/opendev.org/zuul diff --git a/roles/setup-devstack-source-dirs/tasks/main.yaml b/roles/setup-devstack-source-dirs/tasks/main.yaml new file mode 100644 index 0000000000..cb7c6e3af8 --- /dev/null +++ b/roles/setup-devstack-source-dirs/tasks/main.yaml @@ -0,0 +1,72 @@ +- name: Find all OpenStack source repos used by this job + find: + paths: "{{ devstack_source_dirs }}" + file_type: directory + register: found_repos + +- name: Copy Zuul repos into devstack working directory + command: rsync -a {{ item.path }} {{ devstack_base_dir }} + with_items: '{{ found_repos.files }}' + become: yes + +# Github projects are github.com/username/repo (username might be a +# top-level project too), so we have to do a two-step swizzle to just +# get the full repo path (ansible's find module doesn't help with this +# :/) +- name: Find top level github projects + find: + paths: + - src/github.com + file_type: directory + register: found_github_projects + +- name: Find actual github repos + find: + paths: '{{ found_github_projects.files | map(attribute="path") | list }}' + file_type: directory + register: found_github_repos + when: found_github_projects.files + +- name: Copy github repos into devstack working directory + command: rsync -a {{ item.path }} {{ devstack_base_dir }} + with_items: '{{ found_github_repos.files }}' + become: yes + when: found_github_projects.files + +- name: Setup refspec for repos into devstack working directory + shell: + # Copied almost "as-is" from devstack-gate setup-workspace function + # but removing the dependency on functions.sh + # TODO this should be rewritten as a python module. + cmd: | + cd {{ devstack_base_dir }}/{{ item.path | basename }} + base_branch={{ devstack_sources_branch }} + if git branch -a | grep "$base_branch" > /dev/null ; then + git checkout $base_branch + elif [[ "$base_branch" == stable/* ]] || [[ "$base_branch" == unmaintained/* ]]; then + # Look for an eol tag for the stable branch. + eol_tag="${base_branch#*/}-eol" + if git tag -l |grep $eol_tag >/dev/null; then + git checkout $eol_tag + git reset --hard $eol_tag + if ! git clean -x -f -d -q ; then + sleep 1 + git clean -x -f -d -q + fi + fi + else + git checkout master + fi + args: + executable: /bin/bash + with_items: '{{ found_repos.files }}' + when: devstack_sources_branch is defined + +- name: Set ownership of repos + file: + path: '{{ devstack_base_dir }}' + state: directory + recurse: true + owner: stack + group: stack + become: yes diff --git a/roles/setup-stack-user/README.rst b/roles/setup-stack-user/README.rst new file mode 100644 index 0000000000..80c4d39eff --- /dev/null +++ b/roles/setup-stack-user/README.rst @@ -0,0 +1,16 @@ +Set up the `stack` user + +Create the stack user, set up its home directory, and allow it to +sudo. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. + +.. zuul:rolevar:: devstack_stack_home_dir + :default: {{ devstack_base_dir }} + + The home directory for the stack user. diff --git a/roles/setup-stack-user/defaults/main.yaml b/roles/setup-stack-user/defaults/main.yaml new file mode 100644 index 0000000000..6d0be666d4 --- /dev/null +++ b/roles/setup-stack-user/defaults/main.yaml @@ -0,0 +1,2 @@ +devstack_base_dir: /opt/stack +devstack_stack_home_dir: '{{ devstack_base_dir }}' diff --git a/roles/setup-stack-user/files/50_stack_sh b/roles/setup-stack-user/files/50_stack_sh new file mode 100644 index 0000000000..4c6b46bdb1 --- /dev/null +++ b/roles/setup-stack-user/files/50_stack_sh @@ -0,0 +1 @@ +stack ALL=(root) NOPASSWD:ALL diff --git a/roles/setup-stack-user/tasks/main.yaml b/roles/setup-stack-user/tasks/main.yaml new file mode 100644 index 0000000000..0fc7c2d78b --- /dev/null +++ b/roles/setup-stack-user/tasks/main.yaml @@ -0,0 +1,47 @@ +- name: Create stack group + group: + name: stack + become: yes + +# NOTE(andreaf) Create a user home_dir is not safe via +# the user module since it will fail if the containing +# folder does not exists. If the folder does exists and +# it's empty, the skeleton is setup and ownership set. +- name: Create the stack user home folder + file: + path: '{{ devstack_stack_home_dir }}' + state: directory + become: yes + +- name: Create stack user + user: + name: stack + shell: /bin/bash + home: '{{ devstack_stack_home_dir }}' + group: stack + become: yes + +- name: Set stack user home directory permissions and ownership + file: + path: '{{ devstack_stack_home_dir }}' + mode: 0755 + owner: stack + group: stack + become: yes + +- name: Copy 50_stack_sh file to /etc/sudoers.d + copy: + src: 50_stack_sh + dest: /etc/sudoers.d + mode: 0440 + owner: root + group: root + become: yes + +- name: Create .cache folder within BASE + file: + path: '{{ devstack_stack_home_dir }}/.cache' + state: directory + owner: stack + group: stack + become: yes diff --git a/roles/setup-tempest-user/README.rst b/roles/setup-tempest-user/README.rst new file mode 100644 index 0000000000..bb29c50a28 --- /dev/null +++ b/roles/setup-tempest-user/README.rst @@ -0,0 +1,10 @@ +Set up the `tempest` user + +Create the tempest user and allow it to sudo. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. diff --git a/roles/setup-tempest-user/files/51_tempest_sh b/roles/setup-tempest-user/files/51_tempest_sh new file mode 100644 index 0000000000..f88ff9f4f2 --- /dev/null +++ b/roles/setup-tempest-user/files/51_tempest_sh @@ -0,0 +1,3 @@ +tempest ALL=(root) NOPASSWD:/sbin/ip +tempest ALL=(root) NOPASSWD:/sbin/iptables +tempest ALL=(root) NOPASSWD:/usr/bin/ovsdb-client diff --git a/roles/setup-tempest-user/tasks/main.yaml b/roles/setup-tempest-user/tasks/main.yaml new file mode 100644 index 0000000000..892eaf655a --- /dev/null +++ b/roles/setup-tempest-user/tasks/main.yaml @@ -0,0 +1,20 @@ +- name: Create tempest group + group: + name: tempest + become: yes + +- name: Create tempest user + user: + name: tempest + shell: /bin/bash + group: tempest + become: yes + +- name: Copy 51_tempest_sh to /etc/sudoers.d + copy: + src: 51_tempest_sh + dest: /etc/sudoers.d + owner: root + group: root + mode: 0440 + become: yes diff --git a/roles/start-fresh-logging/README.rst b/roles/start-fresh-logging/README.rst new file mode 100644 index 0000000000..11b029e182 --- /dev/null +++ b/roles/start-fresh-logging/README.rst @@ -0,0 +1,11 @@ +Restart logging on all hosts + +Restart syslog so that the system logs only include output from the +job. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. diff --git a/roles/start-fresh-logging/defaults/main.yaml b/roles/start-fresh-logging/defaults/main.yaml new file mode 100644 index 0000000000..fea05c8146 --- /dev/null +++ b/roles/start-fresh-logging/defaults/main.yaml @@ -0,0 +1 @@ +devstack_base_dir: /opt/stack diff --git a/roles/start-fresh-logging/tasks/main.yaml b/roles/start-fresh-logging/tasks/main.yaml new file mode 100644 index 0000000000..6c7ba66de7 --- /dev/null +++ b/roles/start-fresh-logging/tasks/main.yaml @@ -0,0 +1,56 @@ +- name: Check for /bin/journalctl file + command: which journalctl + changed_when: False + failed_when: False + register: which_out + +- block: + - name: Get current date + command: date +"%Y-%m-%d %H:%M:%S" + register: date_out + + - name: Copy current date to log-start-timestamp.txt + copy: + dest: "{{ devstack_base_dir }}/log-start-timestamp.txt" + content: "{{ date_out.stdout }}" + when: which_out.rc == 0 + become: yes + +- block: + - name: Stop rsyslog + service: name=rsyslog state=stopped + + - name: Save syslog file prior to devstack run + command: mv /var/log/syslog /var/log/syslog-pre-devstack + + - name: Save kern.log file prior to devstack run + command: mv /var/log/kern.log /var/log/kern_log-pre-devstack + + - name: Recreate syslog file + file: name=/var/log/syslog state=touch + + - name: Recreate syslog file owner and group + command: chown /var/log/syslog --ref /var/log/syslog-pre-devstack + + - name: Recreate syslog file permissions + command: chmod /var/log/syslog --ref /var/log/syslog-pre-devstack + + - name: Add read permissions to all on syslog file + file: name=/var/log/syslog mode=a+r + + - name: Recreate kern.log file + file: name=/var/log/kern.log state=touch + + - name: Recreate kern.log file owner and group + command: chown /var/log/kern.log --ref /var/log/kern_log-pre-devstack + + - name: Recreate kern.log file permissions + command: chmod /var/log/kern.log --ref /var/log/kern_log-pre-devstack + + - name: Add read permissions to all on kern.log file + file: name=/var/log/kern.log mode=a+r + + - name: Start rsyslog + service: name=rsyslog state=started + when: which_out.rc == 1 + become: yes diff --git a/roles/sync-controller-ceph-conf-and-keys/README.rst b/roles/sync-controller-ceph-conf-and-keys/README.rst new file mode 100644 index 0000000000..e3d2bb42a4 --- /dev/null +++ b/roles/sync-controller-ceph-conf-and-keys/README.rst @@ -0,0 +1,3 @@ +Sync ceph config and keys between controller and subnodes + +Simply copy the contents of /etc/ceph on the controller to subnodes. diff --git a/roles/sync-controller-ceph-conf-and-keys/tasks/main.yaml b/roles/sync-controller-ceph-conf-and-keys/tasks/main.yaml new file mode 100644 index 0000000000..71ece579e6 --- /dev/null +++ b/roles/sync-controller-ceph-conf-and-keys/tasks/main.yaml @@ -0,0 +1,15 @@ +- name: Ensure /etc/ceph exists on subnode + become: true + file: + path: /etc/ceph + state: directory + +- name: Copy /etc/ceph from controller to subnode + become: true + synchronize: + owner: yes + group: yes + perms: yes + src: /etc/ceph/ + dest: /etc/ceph/ + delegate_to: controller diff --git a/roles/sync-devstack-data/README.rst b/roles/sync-devstack-data/README.rst new file mode 100644 index 0000000000..388625c893 --- /dev/null +++ b/roles/sync-devstack-data/README.rst @@ -0,0 +1,19 @@ +Sync devstack data for multinode configurations + +Sync any data files which include certificates to be used if TLS is enabled. +This role must be executed on the controller and it pushes data to all +subnodes. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. + +.. zuul:rolevar:: devstack_data_base_dir + :default: {{ devstack_base_dir }} + + The devstack base directory for data/. + Useful for example when multiple executions of devstack (i.e. grenade) + share the same data directory. diff --git a/roles/sync-devstack-data/defaults/main.yaml b/roles/sync-devstack-data/defaults/main.yaml new file mode 100644 index 0000000000..6b5017b811 --- /dev/null +++ b/roles/sync-devstack-data/defaults/main.yaml @@ -0,0 +1,2 @@ +devstack_base_dir: /opt/stack +devstack_data_base_dir: "{{ devstack_base_dir }}" diff --git a/roles/sync-devstack-data/tasks/main.yaml b/roles/sync-devstack-data/tasks/main.yaml new file mode 100644 index 0000000000..a1d37c3951 --- /dev/null +++ b/roles/sync-devstack-data/tasks/main.yaml @@ -0,0 +1,59 @@ +- name: Ensure the data folder exists + become: true + file: + path: "{{ devstack_data_base_dir }}/data" + state: directory + owner: stack + group: stack + mode: 0755 + when: 'inventory_hostname in groups["subnode"]|default([])' + +- name: Ensure the CA folder exists + become: true + file: + path: "{{ devstack_data_base_dir }}/data/CA" + state: directory + owner: stack + group: stack + mode: 0755 + when: 'inventory_hostname in groups["subnode"]|default([])' + +- name: Pull the CA certificate and folder + become: true + synchronize: + src: "{{ item }}" + dest: "{{ zuul.executor.work_root }}/{{ item | basename }}" + mode: pull + with_items: + - "{{ devstack_data_base_dir }}/data/ca-bundle.pem" + - "{{ devstack_data_base_dir }}/data/CA" + when: inventory_hostname == 'controller' + +- name: Push the CA certificate + become: true + become_user: stack + synchronize: + src: "{{ zuul.executor.work_root }}/ca-bundle.pem" + dest: "{{ devstack_data_base_dir }}/data/ca-bundle.pem" + mode: push + when: 'inventory_hostname in groups["subnode"]|default([])' + +- name: Push the CA folder + become: true + become_user: stack + synchronize: + src: "{{ zuul.executor.work_root }}/CA/" + dest: "{{ devstack_data_base_dir }}/data/" + mode: push + when: 'inventory_hostname in groups["subnode"]|default([])' + +- name: Ensure the data folder and subfolders have the correct permissions + become: true + file: + path: "{{ devstack_data_base_dir }}/data" + state: directory + owner: stack + group: stack + mode: 0755 + recurse: yes + when: 'inventory_hostname in groups["subnode"]|default([])' diff --git a/roles/write-devstack-local-conf/README.rst b/roles/write-devstack-local-conf/README.rst new file mode 100644 index 0000000000..d0a51e77c2 --- /dev/null +++ b/roles/write-devstack-local-conf/README.rst @@ -0,0 +1,99 @@ +Write the local.conf file for use by devstack + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. + +.. zuul:rolevar:: devstack_local_conf_path + :default: {{ devstack_base_dir }}/devstack/local.conf + + The path of the local.conf file. + +.. zuul:rolevar:: devstack_localrc + :type: dict + + A dictionary of variables that should be written to the localrc + section of local.conf. The values (which are strings) may contain + bash shell variables, and will be ordered so that variables used by + later entries appear first. + + As a special case, the variable ``LIBS_FROM_GIT`` will be + constructed automatically from the projects which appear in the + ``required-projects`` list defined by the job plus the project of + the change under test. To instruct devstack to install a library + from source rather than pypi, simply add that library to the job's + ``required-projects`` list. To override the + automatically-generated value, set ``LIBS_FROM_GIT`` in + ``devstack_localrc`` to the desired value. + +.. zuul:rolevar:: devstack_local_conf + :type: dict + + A complex argument consisting of nested dictionaries which combine + to form the meta-sections of the local_conf file. The top level is + a dictionary of phases, followed by dictionaries of filenames, then + sections, which finally contain key-value pairs for the INI file + entries in those sections. + + The keys in this dictionary are the devstack phases. + + .. zuul:rolevar:: [phase] + :type: dict + + The keys in this dictionary are the filenames for this phase. + + .. zuul:rolevar:: [filename] + :type: dict + + The keys in this dictionary are the INI sections in this file. + + .. zuul:rolevar:: [section] + :type: dict + + This is a dictionary of key-value pairs which comprise + this section of the INI file. + +.. zuul:rolevar:: devstack_base_services + :type: list + :default: {{ base_services | default(omit) }} + + A list of base services which are enabled. Services can be added or removed + from this list via the ``devstack_services`` variable. This is ignored if + ``base`` is set to ``False`` in ``devstack_services``. + +.. zuul:rolevar:: devstack_services + :type: dict + + A dictionary mapping service names to boolean values. If the + boolean value is ``false``, a ``disable_service`` line will be + emitted for the service name. If it is ``true``, then + ``enable_service`` will be emitted. All other values are ignored. + + The special key ``base`` can be used to enable or disable the base set of + services enabled by default. If ``base`` is found, it will processed before + all other keys. If its value is ``False`` a ``disable_all_services`` will be + emitted; if its value is ``True`` services from ``devstack_base_services`` + will be emitted via ``ENABLED_SERVICES``. + +.. zuul:rolevar:: devstack_plugins + :type: dict + + A dictionary mapping a plugin name to a git repo location. If the + location is a non-empty string, then an ``enable_plugin`` line will + be emmitted for the plugin name. + + If a plugin declares a dependency on another plugin (via + ``plugin_requires`` in the plugin's settings file), this role will + automatically emit ``enable_plugin`` lines in the correct order. + +.. zuul:rolevar:: tempest_plugins + :type: list + + A list of tempest plugins which are installed alongside tempest. + + The list of values will be combined with the base devstack directory + and used to populate the ``TEMPEST_PLUGINS`` variable. If the variable + already exists, its value is *not* changed. diff --git a/roles/write-devstack-local-conf/defaults/main.yaml b/roles/write-devstack-local-conf/defaults/main.yaml new file mode 100644 index 0000000000..7bc1dec9b8 --- /dev/null +++ b/roles/write-devstack-local-conf/defaults/main.yaml @@ -0,0 +1,3 @@ +devstack_base_dir: /opt/stack +devstack_local_conf_path: "{{ devstack_base_dir }}/devstack/local.conf" +devstack_base_services: "{{ enabled_services | default(omit) }}" diff --git a/roles/write-devstack-local-conf/library/devstack_local_conf.py b/roles/write-devstack-local-conf/library/devstack_local_conf.py new file mode 100644 index 0000000000..2f97d0e355 --- /dev/null +++ b/roles/write-devstack-local-conf/library/devstack_local_conf.py @@ -0,0 +1,351 @@ +# Copyright (C) 2017 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import re + + +class DependencyGraph(object): + # This is based on the JobGraph from Zuul. + + def __init__(self): + self._names = set() + self._dependencies = {} # dependent_name -> set(parent_names) + + def add(self, name, dependencies): + # Append the dependency information + self._dependencies.setdefault(name, set()) + try: + for dependency in dependencies: + # Make sure a circular dependency is never created + ancestors = self._getParentNamesRecursively( + dependency, soft=True) + ancestors.add(dependency) + if name in ancestors: + raise Exception("Dependency cycle detected in {}". + format(name)) + self._dependencies[name].add(dependency) + except Exception: + del self._dependencies[name] + raise + + def getDependenciesRecursively(self, parent): + dependencies = [] + + current_dependencies = self._dependencies[parent] + for current in current_dependencies: + if current not in dependencies: + dependencies.append(current) + for dep in self.getDependenciesRecursively(current): + if dep not in dependencies: + dependencies.append(dep) + return dependencies + + def _getParentNamesRecursively(self, dependent, soft=False): + all_parent_items = set() + items_to_iterate = set([dependent]) + while len(items_to_iterate) > 0: + current_item = items_to_iterate.pop() + current_parent_items = self._dependencies.get(current_item) + if current_parent_items is None: + if soft: + current_parent_items = set() + else: + raise Exception("Dependent item {} not found: ".format( + dependent)) + new_parent_items = current_parent_items - all_parent_items + items_to_iterate |= new_parent_items + all_parent_items |= new_parent_items + return all_parent_items + + +class VarGraph(DependencyGraph): + def __init__(self, vars): + super(VarGraph, self).__init__() + self.vars = {} + self._varnames = set() + for k, v in vars.items(): + self._varnames.add(k) + for k, v in vars.items(): + self._addVar(k, str(v)) + + bash_var_re = re.compile(r'\$\{?(\w+)') + def getDependencies(self, value): + return self.bash_var_re.findall(value) + + def _addVar(self, key, value): + if key in self.vars: + raise Exception("Variable {} already added".format(key)) + self.vars[key] = value + # Append the dependency information + dependencies = set() + for dependency in self.getDependencies(value): + if dependency == key: + # A variable is allowed to reference itself; no + # dependency link needed in that case. + continue + if dependency not in self._varnames: + # It's not necessary to create a link for an + # external variable. + continue + dependencies.add(dependency) + try: + self.add(key, dependencies) + except Exception: + del self.vars[key] + raise + + def getVars(self): + ret = [] + keys = sorted(self.vars.keys()) + seen = set() + for key in keys: + dependencies = self.getDependenciesRecursively(key) + for var in dependencies + [key]: + if var not in seen: + ret.append((var, self.vars[var])) + seen.add(var) + return ret + + +class PluginGraph(DependencyGraph): + def __init__(self, base_dir, plugins): + super(PluginGraph, self).__init__() + # The dependency trees expressed by all the plugins we found + # (which may be more than those the job is using). + self._plugin_dependencies = {} + self.loadPluginNames(base_dir) + + self.plugins = {} + self._pluginnames = set() + for k, v in plugins.items(): + self._pluginnames.add(k) + for k, v in plugins.items(): + self._addPlugin(k, str(v)) + + def loadPluginNames(self, base_dir): + if base_dir is None: + return + git_roots = [] + for root, dirs, files in os.walk(base_dir): + if '.git' not in dirs: + continue + # Don't go deeper than git roots + dirs[:] = [] + git_roots.append(root) + for root in git_roots: + devstack = os.path.join(root, 'devstack') + if not (os.path.exists(devstack) and os.path.isdir(devstack)): + continue + settings = os.path.join(devstack, 'settings') + if not (os.path.exists(settings) and os.path.isfile(settings)): + continue + self.loadDevstackPluginInfo(settings) + + define_re = re.compile(r'^define_plugin\s+(\S+).*') + require_re = re.compile(r'^plugin_requires\s+(\S+)\s+(\S+).*') + def loadDevstackPluginInfo(self, fn): + name = None + reqs = set() + with open(fn) as f: + for line in f: + m = self.define_re.match(line) + if m: + name = m.group(1) + m = self.require_re.match(line) + if m: + if name == m.group(1): + reqs.add(m.group(2)) + if name and reqs: + self._plugin_dependencies[name] = reqs + + def getDependencies(self, value): + return self._plugin_dependencies.get(value, []) + + def _addPlugin(self, key, value): + if key in self.plugins: + raise Exception("Plugin {} already added".format(key)) + self.plugins[key] = value + # Append the dependency information + dependencies = set() + for dependency in self.getDependencies(key): + if dependency == key: + continue + dependencies.add(dependency) + try: + self.add(key, dependencies) + except Exception: + del self.plugins[key] + raise + + def getPlugins(self): + ret = [] + keys = sorted(self.plugins.keys()) + seen = set() + for key in keys: + dependencies = self.getDependenciesRecursively(key) + for plugin in dependencies + [key]: + if plugin not in seen: + ret.append((plugin, self.plugins[plugin])) + seen.add(plugin) + return ret + + +class LocalConf(object): + + def __init__(self, localrc, localconf, base_services, services, plugins, + base_dir, projects, project, tempest_plugins): + self.localrc = [] + self.warnings = [] + self.meta_sections = {} + self.plugin_deps = {} + self.base_dir = base_dir + self.projects = projects + self.project = project + self.tempest_plugins = tempest_plugins + if services or base_services: + self.handle_services(base_services, services or {}) + self.handle_localrc(localrc) + # Plugins must be the last items in localrc, otherwise + # the configuration lines which follows them in the file are + # not applied to the plugins (for example, the value of DEST.) + if plugins: + self.handle_plugins(plugins) + if localconf: + self.handle_localconf(localconf) + + def handle_plugins(self, plugins): + pg = PluginGraph(self.base_dir, plugins) + for k, v in pg.getPlugins(): + if v: + self.localrc.append('enable_plugin {} {}'.format(k, v)) + + def handle_services(self, base_services, services): + enable_base_services = services.pop('base', True) + if enable_base_services and base_services: + self.localrc.append('ENABLED_SERVICES={}'.format( + ",".join(base_services))) + else: + self.localrc.append('disable_all_services') + for k, v in services.items(): + if v is False: + self.localrc.append('disable_service {}'.format(k)) + elif v is True: + self.localrc.append('enable_service {}'.format(k)) + + def handle_localrc(self, localrc): + lfg = False + tp = False + if localrc: + vg = VarGraph(localrc) + for k, v in vg.getVars(): + # Avoid double quoting + if len(v) and v[0]=='"': + self.localrc.append('{}={}'.format(k, v)) + else: + self.localrc.append('{}="{}"'.format(k, v)) + if k == 'LIBS_FROM_GIT': + lfg = True + elif k == 'TEMPEST_PLUGINS': + tp = True + + if not lfg and (self.projects or self.project): + required_projects = [] + if self.projects: + for project_name, project_info in self.projects.items(): + if project_info.get('required'): + required_projects.append(project_info['short_name']) + if self.project: + if self.project['short_name'] not in required_projects: + required_projects.append(self.project['short_name']) + if required_projects: + self.localrc.append('LIBS_FROM_GIT={}'.format( + ','.join(required_projects))) + + if self.tempest_plugins: + if not tp: + tp_dirs = [] + for tempest_plugin in self.tempest_plugins: + tp_dirs.append(os.path.join(self.base_dir, tempest_plugin)) + self.localrc.append('TEMPEST_PLUGINS="{}"'.format( + ' '.join(tp_dirs))) + else: + self.warnings.append('TEMPEST_PLUGINS already defined ({}),' + 'requested value {} ignored'.format( + tp, self.tempest_plugins)) + + + def handle_localconf(self, localconf): + for phase, phase_data in localconf.items(): + for fn, fn_data in phase_data.items(): + ms_name = '[[{}|{}]]'.format(phase, fn) + ms_data = [] + for section, section_data in fn_data.items(): + ms_data.append('[{}]'.format(section)) + for k, v in section_data.items(): + ms_data.append('{} = {}'.format(k, v)) + ms_data.append('') + self.meta_sections[ms_name] = ms_data + + def write(self, path): + with open(path, 'w') as f: + f.write('[[local|localrc]]\n') + f.write('\n'.join(self.localrc)) + f.write('\n\n') + for section, lines in self.meta_sections.items(): + f.write('{}\n'.format(section)) + f.write('\n'.join(lines)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + plugins=dict(type='dict'), + base_services=dict(type='list'), + services=dict(type='dict'), + localrc=dict(type='dict'), + local_conf=dict(type='dict'), + base_dir=dict(type='path'), + path=dict(type='str'), + projects=dict(type='dict'), + project=dict(type='dict'), + tempest_plugins=dict(type='list'), + ) + ) + + p = module.params + lc = LocalConf(p.get('localrc'), + p.get('local_conf'), + p.get('base_services'), + p.get('services'), + p.get('plugins'), + p.get('base_dir'), + p.get('projects'), + p.get('project'), + p.get('tempest_plugins')) + lc.write(p['path']) + + module.exit_json(warnings=lc.warnings) + + +try: + from ansible.module_utils.basic import * # noqa + from ansible.module_utils.basic import AnsibleModule +except ImportError: + pass + +if __name__ == '__main__': + main() diff --git a/roles/write-devstack-local-conf/library/test.py b/roles/write-devstack-local-conf/library/test.py new file mode 100644 index 0000000000..7c526b34c8 --- /dev/null +++ b/roles/write-devstack-local-conf/library/test.py @@ -0,0 +1,291 @@ +# Copyright (C) 2017 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +import tempfile +import unittest + +from devstack_local_conf import LocalConf +from collections import OrderedDict + +class TestDevstackLocalConf(unittest.TestCase): + + @staticmethod + def _init_localconf(p): + lc = LocalConf(p.get('localrc'), + p.get('local_conf'), + p.get('base_services'), + p.get('services'), + p.get('plugins'), + p.get('base_dir'), + p.get('projects'), + p.get('project'), + p.get('tempest_plugins')) + return lc + + def setUp(self): + self.tmpdir = tempfile.mkdtemp() + + def tearDown(self): + shutil.rmtree(self.tmpdir) + + def test_plugins(self): + "Test that plugins without dependencies work" + localrc = {'test_localrc': '1'} + local_conf = {'install': + {'nova.conf': + {'main': + {'test_conf': '2'}}}} + services = {'cinder': True} + # We use ordereddict here to make sure the plugins are in the + # *wrong* order for testing. + plugins = OrderedDict([ + ('bar', 'https://git.openstack.org/openstack/bar-plugin'), + ('foo', 'https://git.openstack.org/openstack/foo-plugin'), + ('baz', 'https://git.openstack.org/openstack/baz-plugin'), + ]) + p = dict(localrc=localrc, + local_conf=local_conf, + base_services=[], + services=services, + plugins=plugins, + base_dir='./test', + path=os.path.join(self.tmpdir, 'test.local.conf')) + lc = self._init_localconf(p) + lc.write(p['path']) + + plugins = [] + with open(p['path']) as f: + for line in f: + if line.startswith('enable_plugin'): + plugins.append(line.split()[1]) + self.assertEqual(['bar', 'baz', 'foo'], plugins) + + + def test_plugin_deps(self): + "Test that plugins with dependencies work" + os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', 'devstack')) + os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', '.git')) + os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', 'devstack')) + os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', '.git')) + with open(os.path.join( + self.tmpdir, + 'foo-plugin', 'devstack', 'settings'), 'w') as f: + f.write('define_plugin foo-plugin\n') + with open(os.path.join( + self.tmpdir, + 'bar-plugin', 'devstack', 'settings'), 'w') as f: + f.write('define_plugin bar-plugin\n') + f.write('plugin_requires bar-plugin foo-plugin\n') + + localrc = {'test_localrc': '1'} + local_conf = {'install': + {'nova.conf': + {'main': + {'test_conf': '2'}}}} + services = {'cinder': True} + # We use ordereddict here to make sure the plugins are in the + # *wrong* order for testing. + plugins = OrderedDict([ + ('bar-plugin', 'https://git.openstack.org/openstack/bar-plugin'), + ('foo-plugin', 'https://git.openstack.org/openstack/foo-plugin'), + ]) + p = dict(localrc=localrc, + local_conf=local_conf, + base_services=[], + services=services, + plugins=plugins, + base_dir=self.tmpdir, + path=os.path.join(self.tmpdir, 'test.local.conf')) + lc = self._init_localconf(p) + lc.write(p['path']) + + plugins = [] + with open(p['path']) as f: + for line in f: + if line.startswith('enable_plugin'): + plugins.append(line.split()[1]) + self.assertEqual(['foo-plugin', 'bar-plugin'], plugins) + + def test_libs_from_git(self): + "Test that LIBS_FROM_GIT is auto-generated" + projects = { + 'git.openstack.org/openstack/nova': { + 'required': True, + 'short_name': 'nova', + }, + 'git.openstack.org/openstack/oslo.messaging': { + 'required': True, + 'short_name': 'oslo.messaging', + }, + 'git.openstack.org/openstack/devstack-plugin': { + 'required': False, + 'short_name': 'devstack-plugin', + }, + } + project = { + 'short_name': 'glance', + } + p = dict(base_services=[], + base_dir='./test', + path=os.path.join(self.tmpdir, 'test.local.conf'), + projects=projects, + project=project) + lc = self._init_localconf(p) + lc.write(p['path']) + + lfg = None + with open(p['path']) as f: + for line in f: + if line.startswith('LIBS_FROM_GIT'): + lfg = line.strip().split('=')[1] + self.assertEqual('nova,oslo.messaging,glance', lfg) + + def test_overridelibs_from_git(self): + "Test that LIBS_FROM_GIT can be overridden" + localrc = {'LIBS_FROM_GIT': 'oslo.db'} + projects = { + 'git.openstack.org/openstack/nova': { + 'required': True, + 'short_name': 'nova', + }, + 'git.openstack.org/openstack/oslo.messaging': { + 'required': True, + 'short_name': 'oslo.messaging', + }, + 'git.openstack.org/openstack/devstack-plugin': { + 'required': False, + 'short_name': 'devstack-plugin', + }, + } + p = dict(localrc=localrc, + base_services=[], + base_dir='./test', + path=os.path.join(self.tmpdir, 'test.local.conf'), + projects=projects) + lc = self._init_localconf(p) + lc.write(p['path']) + + lfg = None + with open(p['path']) as f: + for line in f: + if line.startswith('LIBS_FROM_GIT'): + lfg = line.strip().split('=')[1] + self.assertEqual('"oslo.db"', lfg) + + def test_avoid_double_quote(self): + "Test that there a no duplicated quotes" + localrc = {'TESTVAR': '"quoted value"'} + p = dict(localrc=localrc, + base_services=[], + base_dir='./test', + path=os.path.join(self.tmpdir, 'test.local.conf'), + projects={}) + lc = self._init_localconf(p) + lc.write(p['path']) + + testvar = None + with open(p['path']) as f: + for line in f: + if line.startswith('TESTVAR'): + testvar = line.strip().split('=')[1] + self.assertEqual('"quoted value"', testvar) + + def test_plugin_circular_deps(self): + "Test that plugins with circular dependencies fail" + os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', 'devstack')) + os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', '.git')) + os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', 'devstack')) + os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', '.git')) + with open(os.path.join( + self.tmpdir, + 'foo-plugin', 'devstack', 'settings'), 'w') as f: + f.write('define_plugin foo\n') + f.write('plugin_requires foo bar\n') + with open(os.path.join( + self.tmpdir, + 'bar-plugin', 'devstack', 'settings'), 'w') as f: + f.write('define_plugin bar\n') + f.write('plugin_requires bar foo\n') + + localrc = {'test_localrc': '1'} + local_conf = {'install': + {'nova.conf': + {'main': + {'test_conf': '2'}}}} + services = {'cinder': True} + # We use ordereddict here to make sure the plugins are in the + # *wrong* order for testing. + plugins = OrderedDict([ + ('bar', 'https://git.openstack.org/openstack/bar-plugin'), + ('foo', 'https://git.openstack.org/openstack/foo-plugin'), + ]) + p = dict(localrc=localrc, + local_conf=local_conf, + base_services=[], + services=services, + plugins=plugins, + base_dir=self.tmpdir, + path=os.path.join(self.tmpdir, 'test.local.conf')) + with self.assertRaises(Exception): + lc = self._init_localconf(p) + lc.write(p['path']) + + def _find_tempest_plugins_value(self, file_path): + tp = None + with open(file_path) as f: + for line in f: + if line.startswith('TEMPEST_PLUGINS'): + found = line.strip().split('=')[1] + self.assertIsNone(tp, + "TEMPEST_PLUGIN ({}) found again ({})".format( + tp, found)) + tp = found + return tp + + def test_tempest_plugins(self): + "Test that TEMPEST_PLUGINS is correctly populated." + p = dict(base_services=[], + base_dir='./test', + path=os.path.join(self.tmpdir, 'test.local.conf'), + tempest_plugins=['heat-tempest-plugin', 'sahara-tests']) + lc = self._init_localconf(p) + lc.write(p['path']) + + tp = self._find_tempest_plugins_value(p['path']) + self.assertEqual('"./test/heat-tempest-plugin ./test/sahara-tests"', tp) + self.assertEqual(len(lc.warnings), 0) + + def test_tempest_plugins_not_overridden(self): + """Test that the existing value of TEMPEST_PLUGINS is not overridden + by the user-provided value, but a warning is emitted.""" + localrc = {'TEMPEST_PLUGINS': 'someplugin'} + p = dict(localrc=localrc, + base_services=[], + base_dir='./test', + path=os.path.join(self.tmpdir, 'test.local.conf'), + tempest_plugins=['heat-tempest-plugin', 'sahara-tests']) + lc = self._init_localconf(p) + lc.write(p['path']) + + tp = self._find_tempest_plugins_value(p['path']) + self.assertEqual('"someplugin"', tp) + self.assertEqual(len(lc.warnings), 1) + + +if __name__ == '__main__': + unittest.main() diff --git a/roles/write-devstack-local-conf/tasks/main.yaml b/roles/write-devstack-local-conf/tasks/main.yaml new file mode 100644 index 0000000000..bfd086034b --- /dev/null +++ b/roles/write-devstack-local-conf/tasks/main.yaml @@ -0,0 +1,14 @@ +- name: Write a job-specific local_conf file + become: true + become_user: stack + devstack_local_conf: + path: "{{ devstack_local_conf_path }}" + plugins: "{{ devstack_plugins|default(omit) }}" + base_services: "{{ devstack_base_services|default(omit) }}" + services: "{{ devstack_services|default(omit) }}" + localrc: "{{ devstack_localrc|default(omit) }}" + local_conf: "{{ devstack_local_conf|default(omit) }}" + base_dir: "{{ devstack_base_dir|default(omit) }}" + projects: "{{ zuul.projects }}" + project: "{{ zuul.project }}" + tempest_plugins: "{{ tempest_plugins|default(omit) }}" diff --git a/samples/local.conf b/samples/local.conf index 06ac18572d..55b729809d 100644 --- a/samples/local.conf +++ b/samples/local.conf @@ -10,7 +10,7 @@ # This is a collection of some of the settings we have found to be useful # in our DevStack development environments. Additional settings are described -# in http://devstack.org/local.conf.html +# in https://docs.openstack.org/devstack/latest/configuration.html#local-conf # These should be considered as samples and are unsupported DevStack code. # The ``localrc`` section replaces the old ``localrc`` configuration file. @@ -49,7 +49,7 @@ SERVICE_PASSWORD=$ADMIN_PASSWORD # path of the destination log file. A timestamp will be appended to the given name. LOGFILE=$DEST/logs/stack.sh.log -# Old log files are automatically removed after 7 days to keep things neat. Change +# Old log files are automatically removed after 2 days to keep things neat. Change # the number of days by setting ``LOGDAYS``. LOGDAYS=2 diff --git a/samples/local.sh b/samples/local.sh index 634f6ddb17..7e6ae70ad4 100755 --- a/samples/local.sh +++ b/samples/local.sh @@ -31,16 +31,23 @@ if is_service_enabled nova; then # ``demo``) # Get OpenStack user auth - source $TOP_DIR/openrc + export OS_CLOUD=devstack # Add first keypair found in localhost:$HOME/.ssh for i in $HOME/.ssh/id_rsa.pub $HOME/.ssh/id_dsa.pub; do if [[ -r $i ]]; then - nova keypair-add --pub_key=$i `hostname` + openstack keypair create --public-key $i `hostname` break fi done + # Update security default group + # ----------------------------- + + # Add tcp/22 and icmp to default security group + default=$(openstack security group list -f value -c ID) + openstack security group rule create $default --protocol tcp --dst-port 22 + openstack security group rule create $default --protocol icmp # Create A Flavor # --------------- @@ -53,16 +60,8 @@ if is_service_enabled nova; then MI_NAME=m1.micro # Create micro flavor if not present - if [[ -z $(nova flavor-list | grep $MI_NAME) ]]; then - nova flavor-create $MI_NAME 6 128 0 1 + if [[ -z $(openstack flavor list | grep $MI_NAME) ]]; then + openstack flavor create $MI_NAME --id 6 --ram 128 --disk 0 --vcpus 1 fi - - # Other Uses - # ---------- - - # Add tcp/22 and icmp to default security group - nova secgroup-add-rule default tcp 22 22 0.0.0.0/0 - nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0 - fi diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index e4b2888dcb..0000000000 --- a/setup.cfg +++ /dev/null @@ -1,23 +0,0 @@ -[metadata] -name = DevStack -summary = OpenStack DevStack -description-file = - README.md -author = OpenStack -author-email = openstack-dev@lists.openstack.org -home-page = http://docs.openstack.org/developer/devstack -classifier = - Intended Audience :: Developers - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux - -[build_sphinx] -all_files = 1 -build-dir = doc/build -source-dir = doc/source - -[pbr] -warnerrors = True - -[wheel] -universal = 1 diff --git a/setup.py b/setup.py deleted file mode 100755 index 70c2b3f32b..0000000000 --- a/setup.py +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT -import setuptools - -setuptools.setup( - setup_requires=['pbr'], - pbr=True) diff --git a/stack.sh b/stack.sh index 6fbb0bee3b..965f58007d 100755 --- a/stack.sh +++ b/stack.sh @@ -2,7 +2,7 @@ # ``stack.sh`` is an opinionated OpenStack developer installation. It # installs and configures various combinations of **Cinder**, **Glance**, -# **Heat**, **Horizon**, **Keystone**, **Nova**, **Neutron**, and **Swift** +# **Horizon**, **Keystone**, **Nova**, **Neutron**, and **Swift** # This script's options can be changed by setting appropriate environment # variables. You can configure things like which git repositories to use, @@ -12,7 +12,7 @@ # a multi-node developer install. # To keep this script simple we assume you are running on a recent **Ubuntu** -# (14.04 Trusty or newer), **Fedora** (F20 or newer), or **CentOS/RHEL** +# (Bionic or newer) or **CentOS/RHEL/RockyLinux** # (7 or newer) machine. (It may work on other platforms but support for those # platforms is left to those who added them to DevStack.) It should work in # a VM or physical server. Additionally, we maintain a list of ``deb`` and @@ -27,11 +27,49 @@ set -o xtrace # Make sure custom grep options don't get in the way unset GREP_OPTIONS +# NOTE(sdague): why do we explicitly set locale when running stack.sh? +# +# Devstack is written in bash, and many functions used throughout +# devstack process text coming off a command (like the ip command) +# and do transforms using grep, sed, cut, awk on the strings that are +# returned. Many of these programs are internationalized, which is +# great for end users, but means that the strings that devstack +# functions depend upon might not be there in other locales. We thus +# need to pin the world to an english basis during the runs. +# +# Previously we used the C locale for this, every system has it, and +# it gives us a stable sort order. It does however mean that we +# effectively drop unicode support.... boo! :( +# +# With python3 being more unicode aware by default, that's not the +# right option. While there is a C.utf8 locale, some distros are +# shipping it as C.UTF8 for extra confusingness. And it's support +# isn't super clear across distros. This is made more challenging when +# trying to support both out of the box distros, and the gate which +# uses diskimage builder to build disk images in a different way than +# the distros do. +# +# So... en_US.utf8 it is. That's existed for a very long time. It is a +# compromise position, but it is the least worse idea at the time of +# this comment. +# +# We also have to unset other variables that might impact LC_ALL +# taking effect. +unset LANG +unset LANGUAGE +LC_ALL=en_US.utf8 +export LC_ALL + +# Clear all OpenStack related envvars +unset `env | grep -E '^OS_' | cut -d = -f 1` + # Make sure umask is sane umask 022 # Not all distros have sbin in PATH for regular users. -PATH=$PATH:/usr/local/sbin:/usr/sbin:/sbin +# osc will normally be installed at /usr/local/bin/openstack so ensure +# /usr/local/bin is also in the path +PATH=$PATH:/usr/local/bin:/usr/local/sbin:/usr/sbin:/sbin # Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0") && pwd) @@ -60,19 +98,25 @@ fi # templates and other useful files in the ``files`` subdirectory FILES=$TOP_DIR/files if [ ! -d $FILES ]; then - die $LINENO "missing devstack/files" + set +o xtrace + echo "missing devstack/files" + exit 1 fi # ``stack.sh`` keeps function libraries here # Make sure ``$TOP_DIR/inc`` directory is present if [ ! -d $TOP_DIR/inc ]; then - die $LINENO "missing devstack/inc" + set +o xtrace + echo "missing devstack/inc" + exit 1 fi # ``stack.sh`` keeps project libraries here # Make sure ``$TOP_DIR/lib`` directory is present if [ ! -d $TOP_DIR/lib ]; then - die $LINENO "missing devstack/lib" + set +o xtrace + echo "missing devstack/lib" + exit 1 fi # Check if run in POSIX shell @@ -131,9 +175,6 @@ LAST_SPINNER_PID="" # Import common functions source $TOP_DIR/functions -# Import config functions -source $TOP_DIR/inc/meta-config - # Import 'public' stack.sh functions source $TOP_DIR/lib/stack @@ -154,16 +195,16 @@ rm -f $TOP_DIR/.localrc.auto extract_localrc_section $TOP_DIR/local.conf $TOP_DIR/localrc $TOP_DIR/.localrc.auto # ``stack.sh`` is customizable by setting environment variables. Override a -# default setting via export:: +# default setting via export: # # export DATABASE_PASSWORD=anothersecret # ./stack.sh # -# or by setting the variable on the command line:: +# or by setting the variable on the command line: # # DATABASE_PASSWORD=simple ./stack.sh # -# Persistent variables can be placed in a ``local.conf`` file:: +# Persistent variables can be placed in a ``local.conf`` file: # # [[local|localrc]] # DATABASE_PASSWORD=anothersecret @@ -183,25 +224,20 @@ if [[ ! -r $TOP_DIR/stackrc ]]; then fi source $TOP_DIR/stackrc +# write /etc/devstack-version +write_devstack_version + # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (trusty|wily|xenial|7.0|wheezy|sid|testing|jessie|f22|f23|f24|rhel7|kvmibm1) ]]; then +SUPPORTED_DISTROS="trixie|bookworm|jammy|noble|rhel9|rhel10" + +if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" fi fi -# Check to see if we are already running DevStack -# Note that this may fail if USE_SCREEN=False -if type -p screen > /dev/null && screen -ls | egrep -q "[0-9]\.$SCREEN_NAME"; then - echo "You are already running a stack.sh session." - echo "To rejoin this session type 'screen -x stack'." - echo "To destroy this session, type './unstack.sh'." - exit 1 -fi - - # Local Settings # -------------- @@ -218,7 +254,7 @@ disable_negated_services # -------------- # We're not as **root** so make sure ``sudo`` is available -is_package_installed sudo || install_package sudo +is_package_installed sudo || is_package_installed sudo-ldap || install_package sudo # UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || @@ -244,7 +280,6 @@ chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh - # Configure Distro Repositories # ----------------------------- @@ -256,47 +291,31 @@ fi # Some distros need to add repos beyond the defaults provided by the vendor # to pick up required packages. -function _install_epel_and_rdo { - # NOTE: We always remove and install latest -- some environments - # use snapshot images, and if EPEL version updates they break - # unless we update them to latest version. - if sudo yum repolist enabled epel | grep -q 'epel'; then - uninstall_package epel-release || true - fi +function _install_epel { + # epel-release is in extras repo which is enabled by default + install_package epel-release - # This trick installs the latest epel-release from a bootstrap - # repo, then removes itself (as epel-release installed the - # "real" repo). - # - # You would think that rather than this, you could use - # $releasever directly in .repo file we create below. However - # RHEL gives a $releasever of "6Server" which breaks the path; - # see https://bugzilla.redhat.com/show_bug.cgi?id=1150759 - cat </dev/null 2>&1; then + sudo dnf -y install centos-release-openstack-${rdo_release} + else + sudo wget https://trunk.rdoproject.org/centos${VERSION}-${rdo_release}/delorean-deps.repo -O /etc/yum.repos.d/delorean-deps.repo + fi + fi fi + sudo dnf -y update } @@ -308,25 +327,36 @@ DEST=${DEST:-/opt/stack} # Create the destination directory and ensure it is writable by the user # and read/executable by everybody for daemons (e.g. apache run for horizon) -sudo mkdir -p $DEST -safe_chown -R $STACK_USER $DEST -safe_chmod 0755 $DEST +# If directory exists do not modify the permissions. +if [[ ! -d $DEST ]]; then + sudo mkdir -p $DEST + safe_chown -R $STACK_USER $DEST + safe_chmod 0755 $DEST +fi # Destination path for devstack logs if [[ -n ${LOGDIR:-} ]]; then - mkdir -p $LOGDIR + sudo mkdir -p $LOGDIR + safe_chown -R $STACK_USER $LOGDIR + safe_chmod 0755 $LOGDIR fi # Destination path for service data DATA_DIR=${DATA_DIR:-${DEST}/data} -sudo mkdir -p $DATA_DIR -safe_chown -R $STACK_USER $DATA_DIR +if [[ ! -d $DATA_DIR ]]; then + sudo mkdir -p $DATA_DIR + safe_chown -R $STACK_USER $DATA_DIR + safe_chmod 0755 $DATA_DIR +fi + +# Create and/or clean the async state directory +async_init # Configure proper hostname # Certain services such as rabbitmq require that the local hostname resolves # correctly. Make sure it exists in /etc/hosts so that is always true. LOCAL_HOSTNAME=`hostname -s` -if [ -z "`grep ^127.0.0.1 /etc/hosts | grep $LOCAL_HOSTNAME`" ]; then +if ! grep -Fqwe "$LOCAL_HOSTNAME" /etc/hosts; then sudo sed -i "s/\(^127.0.0.1.*\)/\1 $LOCAL_HOSTNAME/" /etc/hosts fi @@ -335,21 +365,43 @@ fi # to speed things up SKIP_EPEL_INSTALL=$(trueorfalse False SKIP_EPEL_INSTALL) -# If we have /etc/nodepool/provider assume we're on a OpenStack CI -# node, where EPEL is already pointing at our internal mirror and RDO -# is pre-installed. -if [[ -f /etc/nodepool/provider ]]; then - SKIP_EPEL_INSTALL=True -fi +if [[ $DISTRO == "rhel9" ]]; then + # for CentOS Stream 9 repository + sudo dnf config-manager --set-enabled crb + # for RHEL 9 repository + sudo dnf config-manager --set-enabled codeready-builder-for-rhel-9-x86_64-rpms + # rabbitmq and other packages are provided by RDO repositories. + _install_rdo + + # Some distributions (Rocky Linux 9) provide curl-minimal instead of curl, + # it triggers a conflict when devstack wants to install "curl". + # Swap curl-minimal with curl. + if is_package_installed curl-minimal; then + sudo dnf swap -y curl-minimal curl + fi +elif [[ $DISTRO == "rhel10" ]]; then + # for CentOS Stream 10 repository + sudo dnf config-manager --set-enabled crb + # rabbitmq and other packages are provided by RDO repositories. + _install_rdo +elif [[ $DISTRO == "openEuler-22.03" ]]; then + # There are some problem in openEuler. We should fix it first. Some required + # package/action runs before fixup script. So we can't fix there. + # + # 1. the hostname package is not installed by default + # 2. Some necessary packages are in openstack repo, for example liberasurecode-devel + # 3. python3-pip can be uninstalled by `get_pip.py` automaticly. + # 4. Ensure wget installation before use + install_package hostname openstack-release-wallaby wget + uninstall_package python3-pip -if is_fedora && [[ $DISTRO == "rhel7" ]] && \ - [[ ${SKIP_EPEL_INSTALL} != True ]]; then - _install_epel_and_rdo + # Add yum repository for libvirt7.X + sudo wget https://eur.openeuler.openatom.cn/coprs/g/sig-openstack/Libvirt-7.X/repo/openeuler-22.03_LTS/group_sig-openstack-Libvirt-7.X-openeuler-22.03_LTS.repo -O /etc/yum.repos.d/libvirt7.2.0.repo fi # Ensure python is installed # -------------------------- -is_package_installed python || install_package python +install_python # Configure Logging @@ -357,6 +409,7 @@ is_package_installed python || install_package python # Set up logging level VERBOSE=$(trueorfalse True VERBOSE) +VERBOSE_NO_TIMESTAMP=$(trueorfalse False VERBOSE) # Draw a spinner so the user knows something is happening function spinner { @@ -422,15 +475,19 @@ if [[ -n "$LOGFILE" ]]; then # stdout later. exec 3>&1 if [[ "$VERBOSE" == "True" ]]; then + _of_args="-v" + if [[ "$VERBOSE_NO_TIMESTAMP" == "True" ]]; then + _of_args="$_of_args --no-timestamp" + fi # Set fd 1 and 2 to write the log file - exec 1> >( $TOP_DIR/tools/outfilter.py -v -o "${LOGFILE}" ) 2>&1 + exec 1> >( $PYTHON $TOP_DIR/tools/outfilter.py $_of_args -o "${LOGFILE}" ) 2>&1 # Set fd 6 to summary log file - exec 6> >( $TOP_DIR/tools/outfilter.py -o "${SUMFILE}" ) + exec 6> >( $PYTHON $TOP_DIR/tools/outfilter.py -o "${SUMFILE}" ) else # Set fd 1 and 2 to primary logfile - exec 1> >( $TOP_DIR/tools/outfilter.py -o "${LOGFILE}" ) 2>&1 + exec 1> >( $PYTHON $TOP_DIR/tools/outfilter.py -o "${LOGFILE}" ) 2>&1 # Set fd 6 to summary logfile and stdout - exec 6> >( $TOP_DIR/tools/outfilter.py -v -o "${SUMFILE}" >&3 ) + exec 6> >( $PYTHON $TOP_DIR/tools/outfilter.py -v -o "${SUMFILE}" >&3 ) fi echo_summary "stack.sh log $LOGFILE" @@ -447,25 +504,7 @@ else exec 1>/dev/null 2>&1 fi # Always send summary fd to original stdout - exec 6> >( $TOP_DIR/tools/outfilter.py -v >&3 ) -fi - -# Set up logging of screen windows -# Set ``SCREEN_LOGDIR`` to turn on logging of screen windows to the -# directory specified in ``SCREEN_LOGDIR``, we will log to the file -# ``screen-$SERVICE_NAME-$TIMESTAMP.log`` in that dir and have a link -# ``screen-$SERVICE_NAME.log`` to the latest log file. -# Logs are kept for as long specified in ``LOGDAYS``. -# This is deprecated....logs go in ``LOGDIR``, only symlinks will be here now. -if [[ -n "$SCREEN_LOGDIR" ]]; then - - # We make sure the directory is created. - if [[ -d "$SCREEN_LOGDIR" ]]; then - # We cleanup the old logs - find $SCREEN_LOGDIR -maxdepth 1 -name screen-\*.log -mtime +$LOGDAYS -exec rm {} \; - else - mkdir -p $SCREEN_LOGDIR - fi + exec 6> >( $PYTHON $TOP_DIR/tools/outfilter.py -v >&3 ) fi # Basic test for ``$DEST`` path permissions (fatal on error unless skipped) @@ -486,19 +525,30 @@ function exit_trap { kill 2>&1 $jobs fi + #Remove timing data file + if [ -f "$OSCWRAP_TIMER_FILE" ] ; then + rm "$OSCWRAP_TIMER_FILE" + fi + # Kill the last spinner process kill_spinner if [[ $r -ne 0 ]]; then echo "Error on exit" - generate-subunit $DEVSTACK_START_TIME $SECONDS 'fail' >> ${SUBUNIT_OUTPUT} + # If we error before we've installed os-testr, this will fail. + if type -p generate-subunit > /dev/null; then + generate-subunit $DEVSTACK_START_TIME $SECONDS 'fail' >> ${SUBUNIT_OUTPUT} + fi if [[ -z $LOGDIR ]]; then - $TOP_DIR/tools/worlddump.py + ${PYTHON} $TOP_DIR/tools/worlddump.py else - $TOP_DIR/tools/worlddump.py -d $LOGDIR + ${PYTHON} $TOP_DIR/tools/worlddump.py -d $LOGDIR fi else - generate-subunit $DEVSTACK_START_TIME $SECONDS >> ${SUBUNIT_OUTPUT} + # If we error before we've installed os-testr, this will fail. + if type -p generate-subunit > /dev/null; then + generate-subunit $DEVSTACK_START_TIME $SECONDS >> ${SUBUNIT_OUTPUT} + fi fi exit $r @@ -531,12 +581,11 @@ rm -f $SSL_BUNDLE_FILE source $TOP_DIR/lib/database source $TOP_DIR/lib/rpc_backend -# Service to enable with SSL if ``USE_SSL`` is True -SSL_ENABLED_SERVICES="key,nova,cinder,glance,s-proxy,neutron" - -if is_service_enabled tls-proxy && [ "$USE_SSL" == "True" ]; then - die $LINENO "tls-proxy and SSL are mutually exclusive" -fi +# load host tuning functions and defaults +source $TOP_DIR/lib/host +# tune host memory early to ensure zswap/ksm are configured before +# doing memory intensive operation like cloning repos or unpacking packages. +tune_host # Configure Projects # ================== @@ -556,20 +605,22 @@ source $TOP_DIR/lib/tls # Source project function libraries source $TOP_DIR/lib/infra -source $TOP_DIR/lib/oslo +source $TOP_DIR/lib/libraries source $TOP_DIR/lib/lvm source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone source $TOP_DIR/lib/glance source $TOP_DIR/lib/nova +source $TOP_DIR/lib/placement source $TOP_DIR/lib/cinder source $TOP_DIR/lib/swift -source $TOP_DIR/lib/heat source $TOP_DIR/lib/neutron -source $TOP_DIR/lib/neutron-legacy source $TOP_DIR/lib/ldap source $TOP_DIR/lib/dstat -source $TOP_DIR/lib/dlm +source $TOP_DIR/lib/atop +source $TOP_DIR/lib/tcpdump +source $TOP_DIR/lib/etcd3 +source $TOP_DIR/lib/os-vif # Extras Source # -------------- @@ -646,7 +697,16 @@ function read_password { # The available database backends are listed in ``DATABASE_BACKENDS`` after # ``lib/database`` is sourced. ``mysql`` is the default. -initialize_database_backends && echo "Using $DATABASE_TYPE database backend" || echo "No database enabled" +if initialize_database_backends; then + echo "Using $DATABASE_TYPE database backend" + # Last chance for the database password. This must be handled here + # because read_password is not a library function. + read_password DATABASE_PASSWORD "ENTER A PASSWORD TO USE FOR THE DATABASE." + + define_database_baseurl +else + echo "No database enabled" +fi # Queue Configuration @@ -655,9 +715,7 @@ initialize_database_backends && echo "Using $DATABASE_TYPE database backend" || # Rabbit connection info # In multi node DevStack, second node needs ``RABBIT_USERID``, but rabbit # isn't enabled. -RABBIT_USERID=${RABBIT_USERID:-stackrabbit} if is_service_enabled rabbit; then - RABBIT_HOST=${RABBIT_HOST:-$SERVICE_HOST} read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT." fi @@ -712,37 +770,57 @@ save_stackenv $LINENO # Bring down global requirements before any use of pip_install. This is # necessary to ensure that the constraints file is in place before we # attempt to apply any constraints to pip installs. -git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH +# We always need the master branch in addition to any stable branch, so +# override GIT_DEPTH here. +GIT_DEPTH=0 git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH # Install package requirements # Source it so the entire environment is available echo_summary "Installing package prerequisites" source $TOP_DIR/tools/install_prereqs.sh -# Configure an appropriate Python environment +# Configure an appropriate Python environment. +# +# NOTE(ianw) 2021-08-11 : We install the latest pip here because pip +# is very active and changes are not generally reflected in the LTS +# distros. This often involves important things like dependency or +# conflict resolution, and has often been required because the +# complicated constraints etc. used by openstack have tickled bugs in +# distro versions of pip. We want to find these problems as they +# happen, rather than years later when we try to update our LTS +# distro. Whilst it is clear that global installations of upstream +# pip are less and less common, with virtualenv's being the general +# approach now; there are a lot of devstack plugins that assume a +# global install environment. if [[ "$OFFLINE" != "True" ]]; then PYPI_ALTERNATIVE_URL=${PYPI_ALTERNATIVE_URL:-""} $TOP_DIR/tools/install_pip.sh fi -# Install subunit for the subunit output stream -pip_install -U os-testr - -TRACK_DEPENDS=${TRACK_DEPENDS:-False} +# Do the ugly hacks for broken packages and distros +source $TOP_DIR/tools/fixup_stuff.sh +fixup_all -# Install Python packages into a virtualenv so that we can track them -if [[ $TRACK_DEPENDS = True ]]; then - echo_summary "Installing Python packages into a virtualenv $DEST/.venv" - pip_install -U virtualenv +if [[ "$GLOBAL_VENV" == "True" ]] ; then + # TODO(frickler): find a better solution for this + sudo ln -sf /opt/stack/data/venv/bin/cinder-manage /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/cinder-rtstool /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/glance /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/nova-manage /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/openstack /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/privsep-helper /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/rally /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/tox /usr/local/bin - rm -rf $DEST/.venv - virtualenv --system-site-packages $DEST/.venv - source $DEST/.venv/bin/activate - $DEST/.venv/bin/pip freeze > $DEST/requires-pre-pip + setup_devstack_virtualenv fi -# Do the ugly hacks for broken packages and distros -source $TOP_DIR/tools/fixup_stuff.sh +# Install subunit for the subunit output stream +pip_install -U os-testr +# the default rate limit of 1000 messages / 30 seconds is not +# sufficient given how verbose our logging is. +iniset -sudo /etc/systemd/journald.conf "Journal" "RateLimitBurst" "0" +sudo systemctl restart systemd-journald # Virtual Environment # ------------------- @@ -750,16 +828,28 @@ source $TOP_DIR/tools/fixup_stuff.sh # Install required infra support libraries install_infra +# Install bindep +$VIRTUALENV_CMD $DEST/bindep-venv +# TODO(ianw) : optionally install from zuul checkout? +$DEST/bindep-venv/bin/pip install bindep +export BINDEP_CMD=${DEST}/bindep-venv/bin/bindep + +# Install packages as defined in plugin bindep.txt files +pkgs="$( _get_plugin_bindep_packages )" +if [[ -n "${pkgs}" ]]; then + install_package ${pkgs} +fi + # Extras Pre-install # ------------------ # Phase: pre-install run_phase stack pre-install -install_rpc_backend +# NOTE(danms): Set global limits before installing anything +set_systemd_override DefaultLimitNOFILE ${ULIMIT_NOFILE} -# NOTE(sdague): dlm install is conditional on one being enabled by configuration -install_dlm -configure_dlm +install_rpc_backend +restart_rpc_backend if is_service_enabled $DATABASE_BACKENDS; then install_database @@ -772,13 +862,39 @@ if is_service_enabled neutron; then install_neutron_agent_packages fi +if is_service_enabled etcd3; then + install_etcd3 +fi + +# Setup TLS certs +# --------------- + +# Do this early, before any webservers are set up to ensure +# we don't run into problems with missing certs when apache +# is restarted. +if is_service_enabled tls-proxy; then + configure_CA + init_CA + init_cert +fi + +# Dstat +# ----- + +# Install dstat services prerequisites +install_dstat + + # Check Out and Install Source # ---------------------------- echo_summary "Installing OpenStack project source" -# Install Oslo libraries -install_oslo +# Install additional libraries +install_libs + +# Install uwsgi +install_apache_uwsgi # Install client libraries install_keystoneauth @@ -792,15 +908,12 @@ fi if is_service_enabled neutron nova horizon; then install_neutronclient fi -if is_service_enabled heat horizon; then - install_heatclient -fi # Install middleware install_keystonemiddleware if is_service_enabled keystone; then - if [ "$KEYSTONE_AUTH_HOST" == "$SERVICE_HOST" ]; then + if [ "$KEYSTONE_SERVICE_HOST" == "$SERVICE_HOST" ]; then stack_install_service keystone configure_keystone fi @@ -813,12 +926,10 @@ if is_service_enabled swift; then stack_install_service swift configure_swift - # swift3 middleware to provide S3 emulation to Swift - if is_service_enabled swift3; then + # s3api middleware to provide S3 emulation to Swift + if is_service_enabled s3api; then # Replace the nova-objectstore port by the swift port S3_SERVICE_PORT=8080 - git_clone $SWIFT3_REPO $SWIFT3_DIR $SWIFT3_BRANCH - setup_develop $SWIFT3_DIR fi fi @@ -837,39 +948,44 @@ fi if is_service_enabled neutron; then # Network service stack_install_service neutron - install_neutron_third_party fi if is_service_enabled nova; then # Compute service stack_install_service nova - cleanup_nova configure_nova fi +if is_service_enabled placement; then + # placement api + stack_install_service placement + configure_placement +fi + +# create a placement-client fake service to know we need to configure +# placement connectivity. We configure the placement service for nova +# if placement-api or placement-client is active, and n-cpu on the +# same box. +if is_service_enabled placement placement-client; then + if is_service_enabled n-cpu || is_service_enabled n-sch; then + configure_placement_nova_compute + fi +fi + if is_service_enabled horizon; then - # django openstack_auth - install_django_openstack_auth # dashboard stack_install_service horizon fi -if is_service_enabled heat; then - stack_install_service heat - install_heat_other - cleanup_heat - configure_heat +if is_service_enabled tls-proxy; then + fix_system_ca_bundle_path fi -if is_service_enabled tls-proxy || [ "$USE_SSL" == "True" ]; then - configure_CA - init_CA - init_cert - # Add name to ``/etc/hosts``. - # Don't be naive and add to existing line! +if is_service_enabled cinder || [[ "$USE_CINDER_FOR_GLANCE" == "True" ]]; then + # os-brick setup required by glance, cinder, and nova + init_os_brick fi - # Extras Install # -------------- @@ -882,18 +998,14 @@ if use_library_from_git "python-openstackclient"; then setup_dev_lib "python-openstackclient" else pip_install_gr python-openstackclient -fi - -if [[ $TRACK_DEPENDS = True ]]; then - $DEST/.venv/bin/pip freeze > $DEST/requires-post-pip - if ! diff -Nru $DEST/requires-pre-pip $DEST/requires-post-pip > $DEST/requires.diff; then - echo "Detect some changes for installed packages of pip, in depend tracking mode" - cat $DEST/requires.diff + if is_service_enabled openstack-cli-server; then + install_openstack_cli_server fi - echo "Ran stack.sh in depend tracking mode, bailing out now" - exit 0 fi +# Installs alias for osc so that we can collect timing for all +# osc commands. Alias dies with stack.sh. +install_oscwrap # Syslog # ------ @@ -901,17 +1013,15 @@ fi if [[ $SYSLOG != "False" ]]; then if [[ "$SYSLOG_HOST" = "$HOST_IP" ]]; then # Configure the master host to receive - cat </tmp/90-stack-m.conf + cat </dev/null \$ModLoad imrelp \$InputRELPServerRun $SYSLOG_PORT EOF - sudo mv /tmp/90-stack-m.conf /etc/rsyslog.d else # Set rsyslog to send to remote host - cat </tmp/90-stack-s.conf + cat </dev/null *.* :omrelp:$SYSLOG_HOST:$SYSLOG_PORT EOF - sudo mv /tmp/90-stack-s.conf /etc/rsyslog.d fi RSYSLOGCONF="/etc/rsyslog.conf" @@ -934,11 +1044,6 @@ EOF fi -# Finalize queue installation -# ---------------------------- -restart_rpc_backend - - # Export Certificate Authority Bundle # ----------------------------------- @@ -957,34 +1062,6 @@ if is_service_enabled $DATABASE_BACKENDS; then configure_database fi - -# Configure screen -# ---------------- - -USE_SCREEN=$(trueorfalse True USE_SCREEN) -if [[ "$USE_SCREEN" == "True" ]]; then - # Create a new named screen to run processes in - screen -d -m -S $SCREEN_NAME -t shell -s /bin/bash - sleep 1 - - # Set a reasonable status bar - SCREEN_HARDSTATUS=${SCREEN_HARDSTATUS:-} - if [ -z "$SCREEN_HARDSTATUS" ]; then - SCREEN_HARDSTATUS='%{= .} %-Lw%{= .}%> %n%f %t*%{= .}%+Lw%< %-=%{g}(%{d}%H/%l%{g})' - fi - screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS" - screen -r $SCREEN_NAME -X setenv PROMPT_COMMAND /bin/true -fi - -# Clear ``screenrc`` file -SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc -if [[ -e $SCREENRC ]]; then - rm -f $SCREENRC -fi - -# Initialize the directory for service status check -init_service_check - # Save configuration values save_stackenv $LINENO @@ -998,79 +1075,83 @@ save_stackenv $LINENO # A better kind of sysstat, with the top process per time slice start_dstat +if is_service_enabled atop; then + configure_atop + install_atop + start_atop +fi + +# Run a background tcpdump for debugging +# Note: must set TCPDUMP_ARGS with the enabled service +if is_service_enabled tcpdump; then + start_tcpdump +fi + +# Etcd +# ----- + +# etcd is a distributed key value store that provides a reliable way to store data across a cluster of machines +if is_service_enabled etcd3; then + start_etcd3 +fi # Keystone # -------- +if is_service_enabled tls-proxy; then + start_tls_proxy http-services '*' 443 $SERVICE_HOST 80 +fi + +# Write a clouds.yaml file and use the devstack-admin cloud +write_clouds_yaml +export OS_CLOUD=${OS_CLOUD:-devstack-admin} + if is_service_enabled keystone; then echo_summary "Starting Keystone" - if [ "$KEYSTONE_AUTH_HOST" == "$SERVICE_HOST" ]; then + if [ "$KEYSTONE_SERVICE_HOST" == "$SERVICE_HOST" ]; then init_keystone start_keystone bootstrap_keystone fi - # Rather than just export these, we write them out to a - # intermediate userrc file that can also be used to debug if - # something goes wrong between here and running - # tools/create_userrc.sh (this script relies on services other - # than keystone being available, so we can't call it right now) - cat > $TOP_DIR/userrc_early <> $TOP_DIR/userrc_early - fi - - source $TOP_DIR/userrc_early - create_keystone_accounts - create_nova_accounts - create_glance_accounts - create_cinder_accounts - create_neutron_accounts - - if is_service_enabled swift; then - create_swift_accounts + if is_service_enabled nova; then + async_runfunc create_nova_accounts fi - - if is_service_enabled heat; then - create_heat_accounts + if is_service_enabled glance; then + async_runfunc create_glance_accounts + fi + if is_service_enabled cinder; then + async_runfunc create_cinder_accounts + fi + if is_service_enabled neutron; then + async_runfunc create_neutron_accounts + fi + if is_service_enabled swift; then + async_runfunc create_swift_accounts fi fi -# Write a clouds.yaml file -write_clouds_yaml - # Horizon # ------- if is_service_enabled horizon; then echo_summary "Configuring Horizon" - configure_horizon + async_runfunc configure_horizon fi +async_wait create_nova_accounts create_glance_accounts create_cinder_accounts +async_wait create_neutron_accounts create_swift_accounts configure_horizon # Glance # ------ -if is_service_enabled g-reg; then +# NOTE(yoctozepto): limited to node hosting the database which is the controller +if is_service_enabled $DATABASE_BACKENDS && is_service_enabled glance; then echo_summary "Configuring Glance" - init_glance + async_runfunc init_glance fi @@ -1081,26 +1162,20 @@ if is_service_enabled neutron; then echo_summary "Configuring Neutron" configure_neutron + # Run init_neutron only on the node hosting the Neutron API server if is_service_enabled $DATABASE_BACKENDS && is_service_enabled neutron; then - init_neutron + async_runfunc init_neutron fi fi -# Some Neutron plugins require network controllers which are not -# a part of the OpenStack project. Configure and start them. -if is_service_enabled neutron; then - configure_neutron_third_party - init_neutron_third_party - start_neutron_third_party -fi - # Nova # ---- -if is_service_enabled n-net q-dhcp; then - # Delete traces of nova networks from prior runs +if is_service_enabled q-dhcp; then + # TODO(frickler): These are remnants from n-net, check which parts are really + # still needed for Neutron. # Do not kill any dnsmasq instance spawned by NetworkManager netman_pid=$(pidof NetworkManager || true) if [ -z "$netman_pid" ]; then @@ -1111,23 +1186,22 @@ if is_service_enabled n-net q-dhcp; then clean_iptables - if is_service_enabled n-net; then - rm -rf ${NOVA_STATE_PATH}/networks - sudo mkdir -p ${NOVA_STATE_PATH}/networks - safe_chown -R ${STACK_USER} ${NOVA_STATE_PATH}/networks - fi - # Force IP forwarding on, just in case sudo sysctl -w net.ipv4.ip_forward=1 fi +# os-vif +# ------ +if is_service_enabled nova neutron; then + configure_os_vif +fi # Storage Service # --------------- if is_service_enabled swift; then echo_summary "Configuring Swift" - init_swift + async_runfunc init_swift fi @@ -1136,9 +1210,23 @@ fi if is_service_enabled cinder; then echo_summary "Configuring Cinder" - init_cinder + async_runfunc init_cinder +fi + +# Placement Service +# --------------- + +if is_service_enabled placement; then + echo_summary "Configuring placement" + async_runfunc init_placement fi +# Wait for neutron and placement before starting nova +async_wait init_neutron +async_wait init_placement +async_wait init_glance +async_wait init_swift +async_wait init_cinder # Compute Service # --------------- @@ -1147,14 +1235,7 @@ if is_service_enabled nova; then echo_summary "Configuring Nova" init_nova - # Additional Nova configuration that is dependent on other services - if is_service_enabled neutron; then - configure_neutron_nova - elif is_service_enabled n-net; then - create_nova_conf_nova_network - fi - - init_nova_cells + async_runfunc configure_neutron_nova fi @@ -1184,40 +1265,23 @@ if is_service_enabled swift; then start_swift fi -# Launch the Glance services -if is_service_enabled glance; then - echo_summary "Starting Glance" - start_glance +# NOTE(lyarwood): By default use a single hardcoded fixed_key across devstack +# deployments. This ensures the keys match across nova and cinder across all +# hosts. +FIXED_KEY=${FIXED_KEY:-bae3516cc1c0eb18b05440eba8012a4a880a2ee04d584a9c1579445e675b12defdc716ec} +if is_service_enabled cinder; then + iniset $CINDER_CONF key_manager fixed_key "$FIXED_KEY" fi +async_wait configure_neutron_nova -# Install Images -# ============== - -# Upload an image to Glance. -# -# The default image is CirrOS, a small testing image which lets you login as **root** -# CirrOS has a ``cloud-init`` analog supporting login via keypair and sending -# scripts as userdata. -# See https://help.ubuntu.com/community/CloudInit for more on ``cloud-init`` - -if is_service_enabled g-reg; then - - echo_summary "Uploading images" - - # Option to upload legacy ami-tty, which works with xenserver - if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then - IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}https://github.com/downloads/citrix-openstack/warehouse/tty.tgz" - fi - - for image_url in ${IMAGE_URLS//,/ }; do - upload_image $image_url - done -fi - -# Create a randomized default value for the key manager's fixed_key +# NOTE(clarkb): This must come after async_wait configure_neutron_nova because +# configure_neutron_nova modifies $NOVA_CONF and $NOVA_CPU_CONF as well. If +# we don't wait then these two ini updates race either other and can result +# in unexpected configs. if is_service_enabled nova; then - iniset $NOVA_CONF key_manager fixed_key $(generate_hex_string 32) + iniset $NOVA_CONF key_manager fixed_key "$FIXED_KEY" + iniset $NOVA_CPU_CONF key_manager fixed_key "$FIXED_KEY" fi # Launch the nova-api and wait for it to answer before continuing @@ -1226,43 +1290,44 @@ if is_service_enabled n-api; then start_nova_api fi -if is_service_enabled neutron-api; then - echo_summary "Starting Neutron" - start_neutron_api - # check_neutron_third_party_integration -elif is_service_enabled q-svc; then +if is_service_enabled ovn-controller ovn-controller-vtep; then + echo_summary "Starting OVN services" + start_ovn_services +fi + +if is_service_enabled q-svc neutron-api; then echo_summary "Starting Neutron" + configure_neutron_after_post_config start_neutron_service_and_check - check_neutron_third_party_integration -elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then - NM_CONF=${NOVA_CONF} - if is_service_enabled n-cell; then - NM_CONF=${NOVA_CELLS_CONF} - fi - - # Create a small network - $NOVA_BIN_DIR/nova-manage --config-file $NM_CONF network create "$PRIVATE_NETWORK_NAME" $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS - - # Create some floating ips - $NOVA_BIN_DIR/nova-manage --config-file $NM_CONF floating create $FLOATING_RANGE --pool=$PUBLIC_NETWORK_NAME +fi - # Create a second pool - $NOVA_BIN_DIR/nova-manage --config-file $NM_CONF floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL +# Start placement before any of the service that are likely to want +# to use it to manage resource providers. +if is_service_enabled placement; then + echo_summary "Starting Placement" + start_placement fi if is_service_enabled neutron; then start_neutron fi # Once neutron agents are started setup initial network elements -if is_service_enabled q-svc && [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]]; then +if is_service_enabled q-svc neutron-api && [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]]; then echo_summary "Creating initial neutron network elements" - create_neutron_initial_network + # Here's where plugins can wire up their own networks instead + # of the code in lib/neutron_plugins/services/l3 + if type -p neutron_plugin_create_initial_networks > /dev/null; then + neutron_plugin_create_initial_networks + else + create_neutron_initial_network + fi + fi if is_service_enabled nova; then echo_summary "Starting Nova" start_nova - create_flavors + async_runfunc create_flavors fi if is_service_enabled cinder; then echo_summary "Starting Cinder" @@ -1270,19 +1335,42 @@ if is_service_enabled cinder; then create_volume_types fi -# Configure and launch Heat engine, api and metadata -if is_service_enabled heat; then - # Initialize heat - echo_summary "Configuring Heat" - init_heat - echo_summary "Starting Heat" - start_heat - if [ "$HEAT_BUILD_PIP_MIRROR" = "True" ]; then - echo_summary "Building Heat pip mirror" - build_heat_pip_mirror - fi +# This sleep is required for cinder volume service to become active and +# publish capabilities to cinder scheduler before creating the image-volume +if [[ "$USE_CINDER_FOR_GLANCE" == "True" ]]; then + sleep 30 +fi + +# Launch the Glance services +# NOTE (abhishekk): We need to start glance api service only after cinder +# service has started as on glance startup glance-api queries cinder for +# validating volume_type configured for cinder store of glance. +if is_service_enabled glance; then + echo_summary "Starting Glance" + start_glance +fi + +# Install Images +# ============== + +# Upload an image to Glance. +# +# The default image is CirrOS, a small testing image which lets you login as **root** +# CirrOS has a ``cloud-init`` analog supporting login via keypair and sending +# scripts as userdata. +# See https://help.ubuntu.com/community/CloudInit for more on ``cloud-init`` + +# NOTE(yoctozepto): limited to node hosting the database which is the controller +if is_service_enabled $DATABASE_BACKENDS && is_service_enabled glance; then + echo_summary "Uploading images" + + for image_url in ${IMAGE_URLS//,/ }; do + upload_image $image_url + done fi +async_wait create_flavors + if is_service_enabled horizon; then echo_summary "Starting Horizon" init_horizon @@ -1298,16 +1386,12 @@ fi # which is helpful in image bundle steps. if is_service_enabled nova && is_service_enabled keystone; then - USERRC_PARAMS="-PA --target-dir $TOP_DIR/accrc" + USERRC_PARAMS="-PA --target-dir $TOP_DIR/accrc --os-password $ADMIN_PASSWORD" if [ -f $SSL_BUNDLE_FILE ]; then USERRC_PARAMS="$USERRC_PARAMS --os-cacert $SSL_BUNDLE_FILE" fi - if [[ "$HEAT_STANDALONE" = "True" ]]; then - USERRC_PARAMS="$USERRC_PARAMS --heat-url http://$HEAT_API_HOST:$HEAT_API_PORT/v1" - fi - $TOP_DIR/tools/create_userrc.sh $USERRC_PARAMS fi @@ -1342,6 +1426,41 @@ run_phase stack extra merge_config_group $TOP_DIR/local.conf post-extra +# Sanity checks +# ============= + +# Check that computes are all ready +# +# TODO(sdague): there should be some generic phase here. +if is_service_enabled n-cpu; then + is_nova_ready +fi + +# Check the status of running services +service_check + +# Configure nova cellsv2 +# ---------------------- + +# Do this late because it requires compute hosts to have started +if is_service_enabled n-api; then + if is_service_enabled n-cpu; then + $TOP_DIR/tools/discover_hosts.sh + else + # Some CI systems like Hyper-V build the control plane on + # Linux, and join in non Linux Computes after setup. This + # allows them to delay the processing until after their whole + # environment is up. + echo_summary "SKIPPING Cell setup because n-cpu is not enabled. You will have to do this manually before you have a working environment." + fi + # Run the nova-status upgrade check command which can also be used + # to verify the base install. Note that this is good enough in a + # single node deployment, but in a multi-node setup it won't verify + # any subnodes - that would have to be driven from whatever tooling + # is deploying the subnodes, e.g. the zuul v3 devstack-multinode job. + $NOVA_BIN_DIR/nova-status --config-file $NOVA_CONF upgrade check +fi + # Run local script # ---------------- @@ -1351,22 +1470,14 @@ if [[ -x $TOP_DIR/local.sh ]]; then $TOP_DIR/local.sh fi -# Sanity checks -# ============= - -# Check the status of running services -service_check - -# ensure that all the libraries we think we installed from git, -# actually were. -check_libs_from_git - - # Bash completion # =============== # Prepare bash completion for OSC -openstack complete | sudo tee /etc/bash_completion.d/osc.bash_completion > /dev/null +# Note we use "command" to avoid the timing wrapper +# which isn't relevant here and floods logs +command openstack complete \ + | sudo tee /etc/bash_completion.d/osc.bash_completion > /dev/null # If cinder is configured, set global_filter for PV devices if is_service_enabled cinder; then @@ -1384,6 +1495,9 @@ fi # Phase: test-config run_phase stack test-config +# Apply late configuration from ``local.conf`` if it exists for layer 2 services +# Phase: test-config +merge_config_group $TOP_DIR/local.conf test-config # Fin # === @@ -1399,8 +1513,25 @@ else exec 1>&3 fi +# Make sure we didn't leak any background tasks +async_cleanup + # Dump out the time totals time_totals +async_print_timing + +if is_service_enabled mysql; then + if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" && "$MYSQL_HOST" ]]; then + echo "" + echo "" + echo "Post-stack database query stats:" + mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST stats -e \ + 'SELECT * FROM queries' -t 2>/dev/null + mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST stats -e \ + 'DELETE FROM queries' 2>/dev/null + fi +fi + # Using the cloud # =============== @@ -1428,12 +1559,25 @@ fi # Warn that a deprecated feature was used if [[ -n "$DEPRECATED_TEXT" ]]; then - echo_summary "WARNING: $DEPRECATED_TEXT" + echo + echo -e "WARNING: $DEPRECATED_TEXT" + echo fi +echo +echo "Services are running under systemd unit files." +echo "For more information see: " +echo "https://docs.openstack.org/devstack/latest/systemd.html" +echo + +# Useful info on current state +cat /etc/devstack-version +echo + # Indicate how long this took to run (bash maintained variable ``SECONDS``) echo_summary "stack.sh completed in $SECONDS seconds." + # Restore/close logging file descriptors exec 1>&3 exec 2>&3 diff --git a/stackrc b/stackrc index acb7d3f650..93f8b1cd6d 100644 --- a/stackrc +++ b/stackrc @@ -5,14 +5,7 @@ # ensure we don't re-source this in the same environment [[ -z "$_DEVSTACK_STACKRC" ]] || return 0 -declare -r _DEVSTACK_STACKRC=1 - -# Sanitize language settings to avoid commands bailing out -# with "unsupported locale setting" errors. -unset LANG -unset LANGUAGE -LC_ALL=C -export LC_ALL +declare -r -g _DEVSTACK_STACKRC=1 # Find the other rc files RC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) @@ -20,6 +13,18 @@ RC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) # Source required DevStack functions and globals source $RC_DIR/functions +# Set the target branch. This is used so that stable branching +# does not need to update each repo below. +TARGET_BRANCH=master + +# Cycle trailing projects need to branch later than the others. +TRAILING_TARGET_BRANCH=master + +# And some repos do not create stable branches, so this is used +# to make it explicit and avoid accidentally setting to a stable +# branch. +BRANCHLESS_TARGET_BRANCH=master + # Destination path for installation DEST=/opt/stack @@ -51,89 +56,84 @@ KEYSTONE_REGION_NAME=${KEYSTONE_REGION_NAME:-$REGION_NAME} # Specify which services to launch. These generally correspond to # screen tabs. To change the default list, use the ``enable_service`` and # ``disable_service`` functions in ``local.conf``. -# For example, to enable Swift add this to ``local.conf``: -# enable_service s-proxy s-object s-container s-account -# In order to enable Neutron (a single node setup) add the following +# For example, to enable Swift as part of DevStack add the following # settings in ``local.conf``: # [[local|localrc]] -# disable_service n-net -# enable_service q-svc -# enable_service q-agt -# enable_service q-dhcp -# enable_service q-l3 -# enable_service q-meta -# # Optional, to enable tempest configuration as part of DevStack -# enable_service tempest - +# enable_service s-proxy s-object s-container s-account # This allows us to pass ``ENABLED_SERVICES`` if ! isset ENABLED_SERVICES ; then # Keystone - nothing works without keystone ENABLED_SERVICES=key # Nova - services to support libvirt based openstack clouds - ENABLED_SERVICES+=,n-api,n-cpu,n-net,n-cond,n-sch,n-novnc,n-cauth + ENABLED_SERVICES+=,n-api,n-cpu,n-cond,n-sch,n-novnc,n-api-meta + # Placement service needed for Nova + ENABLED_SERVICES+=,placement-api,placement-client # Glance services needed for Nova - ENABLED_SERVICES+=,g-api,g-reg + ENABLED_SERVICES+=,g-api # Cinder ENABLED_SERVICES+=,c-sch,c-api,c-vol + # OVN + ENABLED_SERVICES+=,ovn-controller,ovn-northd,ovs-vswitchd,ovsdb-server + # Neutron + ENABLED_SERVICES+=,q-svc,q-ovn-agent # Dashboard ENABLED_SERVICES+=,horizon # Additional services - ENABLED_SERVICES+=,rabbit,tempest,mysql,dstat + ENABLED_SERVICES+=,rabbit,tempest,mysql,etcd3,dstat fi # Global toggle for enabling services under mod_wsgi. If this is set to # ``True`` all services that use HTTPD + mod_wsgi as the preferred method of # deployment, will be deployed under Apache. If this is set to ``False`` all -# services will rely on the local toggle variable (e.g. ``KEYSTONE_USE_MOD_WSGI``) +# services will rely on the local toggle variable. ENABLE_HTTPD_MOD_WSGI_SERVICES=True # Set the default Nova APIs to enable NOVA_ENABLED_APIS=osapi_compute,metadata +# allow local overrides of env variables, including repo config +if [[ -f $RC_DIR/localrc ]]; then + # Old-style user-supplied config + source $RC_DIR/localrc +elif [[ -f $RC_DIR/.localrc.auto ]]; then + # New-style user-supplied config extracted from local.conf + source $RC_DIR/.localrc.auto +fi + +# CELLSV2_SETUP - how we should configure services with cells v2 +# +# - superconductor - this is one conductor for the api services, and +# one per cell managing the compute services. This is preferred +# - singleconductor - this is one conductor for the whole deployment, +# this is not recommended, and will be removed in the future. +CELLSV2_SETUP=${CELLSV2_SETUP:-"superconductor"} + # Set the root URL for Horizon HORIZON_APACHE_ROOT="/dashboard" -# Whether to use 'dev mode' for screen windows. Dev mode works by -# stuffing text into the screen windows so that a developer can use -# ctrl-c, up-arrow, enter to restart the service. Starting services -# this way is slightly unreliable, and a bit slower, so this can -# be disabled for automated testing by setting this value to False. -USE_SCREEN=$(trueorfalse True USE_SCREEN) - -# When using screen, should we keep a log file on disk? You might -# want this False if you have a long-running setup where verbose logs -# can fill-up the host. -# XXX: Ideally screen itself would be configured to log but just not -# activate. This isn't possible with the screerc syntax. Temporary -# logging can still be used by a developer with: -# C-a : logfile foo -# C-a : log on -SCREEN_IS_LOGGING=$(trueorfalse True SCREEN_IS_LOGGING) +# Whether to use user specific units for running services or global ones. +USER_UNITS=$(trueorfalse False USER_UNITS) +if [[ "$USER_UNITS" == "True" ]]; then + SYSTEMD_DIR="$HOME/.local/share/systemd/user" + SYSTEMCTL="systemctl --user" +else + SYSTEMD_DIR="/etc/systemd/system" + SYSTEMCTL="sudo systemctl" +fi # Passwords generated by interactive devstack runs if [[ -r $RC_DIR/.localrc.password ]]; then source $RC_DIR/.localrc.password fi -# Control whether Python 3 should be used. -export USE_PYTHON3=${USE_PYTHON3:-False} - -# When Python 3 is supported by an application, adding the specific -# version of Python 3 to this variable will install the app using that -# version of the interpreter instead of 2.7. -export PYTHON3_VERSION=${PYTHON3_VERSION:-3.4} +# Adding the specific version of Python 3 to this variable will install +# the app using that version of the interpreter instead of just 3. +_DEFAULT_PYTHON3_VERSION="$(_get_python_version python3)" +export PYTHON3_VERSION=${PYTHON3_VERSION:-${_DEFAULT_PYTHON3_VERSION:-3}} -# Just to be more explicit on the Python 2 version to use. -export PYTHON2_VERSION=${PYTHON2_VERSION:-2.7} - -# allow local overrides of env variables, including repo config -if [[ -f $RC_DIR/localrc ]]; then - # Old-style user-supplied config - source $RC_DIR/localrc -elif [[ -f $RC_DIR/.localrc.auto ]]; then - # New-style user-supplied config extracted from local.conf - source $RC_DIR/.localrc.auto -fi +# Create a virtualenv with this +# Use the built-in venv to avoid more dependencies +export VIRTUALENV_CMD="python3 -m venv" # Default for log coloring is based on interactive-or-not. # Baseline assumption is that non-interactive invocations are for CI, @@ -162,20 +162,25 @@ else export PS4='+ $(short_source): ' fi -# Configure Identity API version: 2.0, 3 -IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-2.0} - -# Set the option ENABLE_IDENTITY_V2 to True. It defines whether the DevStack -# deployment will be deploying the Identity v2 pipelines. If this option is set -# to ``False``, DevStack will: i) disable Identity v2; ii) configure Tempest to -# skip Identity v2 specific tests; and iii) configure Horizon to use Identity -# v3. When this option is set to ``False``, the option IDENTITY_API_VERSION -# will to be set to ``3`` in order to make DevStack register the Identity -# endpoint as v3. This flag is experimental and will be used as basis to -# identify the projects which still have issues to operate with Identity v3. -ENABLE_IDENTITY_V2=$(trueorfalse True ENABLE_IDENTITY_V2) -if [ "$ENABLE_IDENTITY_V2" == "False" ]; then - IDENTITY_API_VERSION=3 +# Global option for enforcing scope. If enabled, ENFORCE_SCOPE overrides +# each services ${SERVICE}_ENFORCE_SCOPE variables +ENFORCE_SCOPE=$(trueorfalse False ENFORCE_SCOPE) + +# Devstack supports the use of a global virtualenv. These variables enable +# and disable this functionality as well as set the path to the virtualenv. +# Note that the DATA_DIR is selected because grenade testing uses a shared +# DATA_DIR but different DEST dirs and we don't want two sets of venvs, +# instead we want one global set. +DEVSTACK_VENV=${DEVSTACK_VENV:-$DATA_DIR/venv} + +# NOTE(kopecmartin): remove this once this is fixed +# https://bugs.launchpad.net/devstack/+bug/2031639 +# This couldn't go to fixup_stuff as that's called after projects +# (e.g. certain paths) are set taking GLOBAL_VENV into account +if [[ "$os_VENDOR" =~ (CentOSStream|Rocky) ]]; then + GLOBAL_VENV=$(trueorfalse False GLOBAL_VENV) +else + GLOBAL_VENV=$(trueorfalse True GLOBAL_VENV) fi # Enable use of Python virtual environments. Individual project use of @@ -185,13 +190,23 @@ fi USE_VENV=$(trueorfalse False USE_VENV) # Add packages that need to be installed into a venv but are not in any -# requirmenets files here, in a comma-separated list -ADDITIONAL_VENV_PACKAGES=${ADITIONAL_VENV_PACKAGES:-""} +# requirements files here, in a comma-separated list. +# Currently only used when USE_VENV is true (individual project venvs) +ADDITIONAL_VENV_PACKAGES=${ADDITIONAL_VENV_PACKAGES:-""} # This can be used to turn database query logging on and off # (currently only implemented for MySQL backend) DATABASE_QUERY_LOGGING=$(trueorfalse False DATABASE_QUERY_LOGGING) +# This can be used to turn on various non-default items in the +# performance_schema that are of interest to us +MYSQL_GATHER_PERFORMANCE=$(trueorfalse True MYSQL_GATHER_PERFORMANCE) + +# This can be used to reduce the amount of memory mysqld uses while running. +# These are unscientifically determined, and could reduce performance or +# cause other issues. +MYSQL_REDUCE_MEMORY=$(trueorfalse True MYSQL_REDUCE_MEMORY) + # Set a timeout for git operations. If git is still running when the # timeout expires, the command will be retried up to 3 times. This is # in the format for timeout(1); @@ -203,15 +218,23 @@ DATABASE_QUERY_LOGGING=$(trueorfalse False DATABASE_QUERY_LOGGING) # Zero disables timeouts GIT_TIMEOUT=${GIT_TIMEOUT:-0} +# How should we be handling WSGI deployments. By default we're going +# to allow for 2 modes, which is "uwsgi" which runs with an apache +# proxy uwsgi in front of it, or "mod_wsgi", which runs in +# apache. mod_wsgi is deprecated, don't use it. +WSGI_MODE=${WSGI_MODE:-"uwsgi"} +if [[ "$WSGI_MODE" != "uwsgi" ]]; then + die $LINENO "$WSGI_MODE is no longer a supported WSGI mode. Only uwsgi is valid." +fi + # Repositories # ------------ # Base GIT Repo URL -# Another option is https://git.openstack.org -GIT_BASE=${GIT_BASE:-git://git.openstack.org} +GIT_BASE=${GIT_BASE:-https://opendev.org} # The location of REQUIREMENTS once cloned -REQUIREMENTS_DIR=$DEST/requirements +REQUIREMENTS_DIR=${REQUIREMENTS_DIR:-$DEST/requirements} # Which libraries should we install from git instead of using released # versions on pypi? @@ -229,6 +252,7 @@ REQUIREMENTS_DIR=$DEST/requirements # Setting the variable to 'ALL' will activate the download for all # libraries. +DEVSTACK_SERIES="2026.1" ############## # @@ -238,43 +262,35 @@ REQUIREMENTS_DIR=$DEST/requirements # block storage service CINDER_REPO=${CINDER_REPO:-${GIT_BASE}/openstack/cinder.git} -CINDER_BRANCH=${CINDER_BRANCH:-master} +CINDER_BRANCH=${CINDER_BRANCH:-$TARGET_BRANCH} # image catalog service GLANCE_REPO=${GLANCE_REPO:-${GIT_BASE}/openstack/glance.git} -GLANCE_BRANCH=${GLANCE_BRANCH:-master} - -# heat service -HEAT_REPO=${HEAT_REPO:-${GIT_BASE}/openstack/heat.git} -HEAT_BRANCH=${HEAT_BRANCH:-master} +GLANCE_BRANCH=${GLANCE_BRANCH:-$TARGET_BRANCH} # django powered web control panel for openstack HORIZON_REPO=${HORIZON_REPO:-${GIT_BASE}/openstack/horizon.git} -HORIZON_BRANCH=${HORIZON_BRANCH:-master} +HORIZON_BRANCH=${HORIZON_BRANCH:-$TARGET_BRANCH} # unified auth system (manages accounts/tokens) KEYSTONE_REPO=${KEYSTONE_REPO:-${GIT_BASE}/openstack/keystone.git} -KEYSTONE_BRANCH=${KEYSTONE_BRANCH:-master} +KEYSTONE_BRANCH=${KEYSTONE_BRANCH:-$TARGET_BRANCH} # neutron service NEUTRON_REPO=${NEUTRON_REPO:-${GIT_BASE}/openstack/neutron.git} -NEUTRON_BRANCH=${NEUTRON_BRANCH:-master} - -# neutron fwaas service -NEUTRON_FWAAS_REPO=${NEUTRON_FWAAS_REPO:-${GIT_BASE}/openstack/neutron-fwaas.git} -NEUTRON_FWAAS_BRANCH=${NEUTRON_FWAAS_BRANCH:-master} - -# neutron lbaas service -NEUTRON_LBAAS_REPO=${NEUTRON_LBAAS_REPO:-${GIT_BASE}/openstack/neutron-lbaas.git} -NEUTRON_LBAAS_BRANCH=${NEUTRON_LBAAS_BRANCH:-master} +NEUTRON_BRANCH=${NEUTRON_BRANCH:-$TARGET_BRANCH} # compute service NOVA_REPO=${NOVA_REPO:-${GIT_BASE}/openstack/nova.git} -NOVA_BRANCH=${NOVA_BRANCH:-master} +NOVA_BRANCH=${NOVA_BRANCH:-$TARGET_BRANCH} # object storage service SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git} -SWIFT_BRANCH=${SWIFT_BRANCH:-master} +SWIFT_BRANCH=${SWIFT_BRANCH:-$TARGET_BRANCH} + +# placement service +PLACEMENT_REPO=${PLACEMENT_REPO:-${GIT_BASE}/openstack/placement.git} +PLACEMENT_BRANCH=${PLACEMENT_BRANCH:-$TARGET_BRANCH} ############## # @@ -284,12 +300,16 @@ SWIFT_BRANCH=${SWIFT_BRANCH:-master} # consolidated openstack requirements REQUIREMENTS_REPO=${REQUIREMENTS_REPO:-${GIT_BASE}/openstack/requirements.git} -REQUIREMENTS_BRANCH=${REQUIREMENTS_BRANCH:-master} +REQUIREMENTS_BRANCH=${REQUIREMENTS_BRANCH:-$TARGET_BRANCH} # Tempest test suite TEMPEST_REPO=${TEMPEST_REPO:-${GIT_BASE}/openstack/tempest.git} -TEMPEST_BRANCH=${TEMPEST_BRANCH:-master} +TEMPEST_BRANCH=${TEMPEST_BRANCH:-$BRANCHLESS_TARGET_BRANCH} +TEMPEST_VENV_UPPER_CONSTRAINTS=${TEMPEST_VENV_UPPER_CONSTRAINTS:-master} +OSTESTIMAGES_REPO=${OSTESTIMAGES_REPO:-${GIT_BASE}/openstack/os-test-images.git} +OSTESTIMAGES_BRANCH=${OSTESTIMAGES_BRANCH:-$BRANCHLESS_TARGET_BRANCH} +OSTESTIMAGES_DIR=${DEST}/os-test-images ############## # @@ -300,52 +320,57 @@ TEMPEST_BRANCH=${TEMPEST_BRANCH:-master} # volume client GITREPO["python-cinderclient"]=${CINDERCLIENT_REPO:-${GIT_BASE}/openstack/python-cinderclient.git} -GITBRANCH["python-cinderclient"]=${CINDERCLIENT_BRANCH:-master} +GITBRANCH["python-cinderclient"]=${CINDERCLIENT_BRANCH:-$TARGET_BRANCH} # os-brick client for local volume attachement GITREPO["python-brick-cinderclient-ext"]=${BRICK_CINDERCLIENT_REPO:-${GIT_BASE}/openstack/python-brick-cinderclient-ext.git} -GITBRANCH["python-brick-cinderclient-ext"]=${BRICK_CINDERCLIENT_BRANCH:-master} +GITBRANCH["python-brick-cinderclient-ext"]=${BRICK_CINDERCLIENT_BRANCH:-$TARGET_BRANCH} + +# python barbican client library +GITREPO["python-barbicanclient"]=${BARBICANCLIENT_REPO:-${GIT_BASE}/openstack/python-barbicanclient.git} +GITBRANCH["python-barbicanclient"]=${BARBICANCLIENT_BRANCH:-$TARGET_BRANCH} +GITDIR["python-barbicanclient"]=$DEST/python-barbicanclient # python glance client library GITREPO["python-glanceclient"]=${GLANCECLIENT_REPO:-${GIT_BASE}/openstack/python-glanceclient.git} -GITBRANCH["python-glanceclient"]=${GLANCECLIENT_BRANCH:-master} - -# python heat client library -GITREPO["python-heatclient"]=${HEATCLIENT_REPO:-${GIT_BASE}/openstack/python-heatclient.git} -GITBRANCH["python-heatclient"]=${HEATCLIENT_BRANCH:-master} +GITBRANCH["python-glanceclient"]=${GLANCECLIENT_BRANCH:-$TARGET_BRANCH} # ironic client GITREPO["python-ironicclient"]=${IRONICCLIENT_REPO:-${GIT_BASE}/openstack/python-ironicclient.git} -GITBRANCH["python-ironicclient"]=${IRONICCLIENT_BRANCH:-master} +GITBRANCH["python-ironicclient"]=${IRONICCLIENT_BRANCH:-$TARGET_BRANCH} # ironic plugin is out of tree, but nova uses it. set GITDIR here. GITDIR["python-ironicclient"]=$DEST/python-ironicclient # the base authentication plugins that clients use to authenticate GITREPO["keystoneauth"]=${KEYSTONEAUTH_REPO:-${GIT_BASE}/openstack/keystoneauth.git} -GITBRANCH["keystoneauth"]=${KEYSTONEAUTH_BRANCH:-master} +GITBRANCH["keystoneauth"]=${KEYSTONEAUTH_BRANCH:-$TARGET_BRANCH} # python keystone client library to nova that horizon uses GITREPO["python-keystoneclient"]=${KEYSTONECLIENT_REPO:-${GIT_BASE}/openstack/python-keystoneclient.git} -GITBRANCH["python-keystoneclient"]=${KEYSTONECLIENT_BRANCH:-master} +GITBRANCH["python-keystoneclient"]=${KEYSTONECLIENT_BRANCH:-$TARGET_BRANCH} # neutron client GITREPO["python-neutronclient"]=${NEUTRONCLIENT_REPO:-${GIT_BASE}/openstack/python-neutronclient.git} -GITBRANCH["python-neutronclient"]=${NEUTRONCLIENT_BRANCH:-master} +GITBRANCH["python-neutronclient"]=${NEUTRONCLIENT_BRANCH:-$TARGET_BRANCH} # python client library to nova that horizon (and others) use GITREPO["python-novaclient"]=${NOVACLIENT_REPO:-${GIT_BASE}/openstack/python-novaclient.git} -GITBRANCH["python-novaclient"]=${NOVACLIENT_BRANCH:-master} +GITBRANCH["python-novaclient"]=${NOVACLIENT_BRANCH:-$TARGET_BRANCH} # python swift client library GITREPO["python-swiftclient"]=${SWIFTCLIENT_REPO:-${GIT_BASE}/openstack/python-swiftclient.git} -GITBRANCH["python-swiftclient"]=${SWIFTCLIENT_BRANCH:-master} +GITBRANCH["python-swiftclient"]=${SWIFTCLIENT_BRANCH:-$TARGET_BRANCH} # consolidated openstack python client GITREPO["python-openstackclient"]=${OPENSTACKCLIENT_REPO:-${GIT_BASE}/openstack/python-openstackclient.git} -GITBRANCH["python-openstackclient"]=${OPENSTACKCLIENT_BRANCH:-master} +GITBRANCH["python-openstackclient"]=${OPENSTACKCLIENT_BRANCH:-$TARGET_BRANCH} # this doesn't exist in a lib file, so set it here GITDIR["python-openstackclient"]=$DEST/python-openstackclient +# placement-api CLI +GITREPO["osc-placement"]=${OSC_PLACEMENT_REPO:-${GIT_BASE}/openstack/osc-placement.git} +GITBRANCH["osc-placement"]=${OSC_PLACEMENT_BRANCH:-$TARGET_BRANCH} + ################### # @@ -354,117 +379,129 @@ GITDIR["python-openstackclient"]=$DEST/python-openstackclient # ################### +# castellan key manager interface +GITREPO["castellan"]=${CASTELLAN_REPO:-${GIT_BASE}/openstack/castellan.git} +GITBRANCH["castellan"]=${CASTELLAN_BRANCH:-$TARGET_BRANCH} + # cliff command line framework GITREPO["cliff"]=${CLIFF_REPO:-${GIT_BASE}/openstack/cliff.git} -GITBRANCH["cliff"]=${CLIFF_BRANCH:-master} +GITBRANCH["cliff"]=${CLIFF_BRANCH:-$TARGET_BRANCH} # async framework/helpers GITREPO["futurist"]=${FUTURIST_REPO:-${GIT_BASE}/openstack/futurist.git} -GITBRANCH["futurist"]=${FUTURIST_BRANCH:-master} +GITBRANCH["futurist"]=${FUTURIST_BRANCH:-$TARGET_BRANCH} # debtcollector deprecation framework/helpers GITREPO["debtcollector"]=${DEBTCOLLECTOR_REPO:-${GIT_BASE}/openstack/debtcollector.git} -GITBRANCH["debtcollector"]=${DEBTCOLLECTOR_BRANCH:-master} +GITBRANCH["debtcollector"]=${DEBTCOLLECTOR_BRANCH:-$TARGET_BRANCH} + +# etcd3gw library +GITREPO["etcd3gw"]=${ETCD3GW_REPO:-${GIT_BASE}/openstack/etcd3gw.git} +GITBRANCH["etcd3gw"]=${ETCD3GW_BRANCH:-$BRANCHLESS_TARGET_BRANCH} # helpful state machines GITREPO["automaton"]=${AUTOMATON_REPO:-${GIT_BASE}/openstack/automaton.git} -GITBRANCH["automaton"]=${AUTOMATON_BRANCH:-master} +GITBRANCH["automaton"]=${AUTOMATON_BRANCH:-$TARGET_BRANCH} # oslo.cache GITREPO["oslo.cache"]=${OSLOCACHE_REPO:-${GIT_BASE}/openstack/oslo.cache.git} -GITBRANCH["oslo.cache"]=${OSLOCACHE_BRANCH:-master} +GITBRANCH["oslo.cache"]=${OSLOCACHE_BRANCH:-$TARGET_BRANCH} # oslo.concurrency GITREPO["oslo.concurrency"]=${OSLOCON_REPO:-${GIT_BASE}/openstack/oslo.concurrency.git} -GITBRANCH["oslo.concurrency"]=${OSLOCON_BRANCH:-master} +GITBRANCH["oslo.concurrency"]=${OSLOCON_BRANCH:-$TARGET_BRANCH} # oslo.config GITREPO["oslo.config"]=${OSLOCFG_REPO:-${GIT_BASE}/openstack/oslo.config.git} -GITBRANCH["oslo.config"]=${OSLOCFG_BRANCH:-master} +GITBRANCH["oslo.config"]=${OSLOCFG_BRANCH:-$TARGET_BRANCH} # oslo.context GITREPO["oslo.context"]=${OSLOCTX_REPO:-${GIT_BASE}/openstack/oslo.context.git} -GITBRANCH["oslo.context"]=${OSLOCTX_BRANCH:-master} +GITBRANCH["oslo.context"]=${OSLOCTX_BRANCH:-$TARGET_BRANCH} # oslo.db GITREPO["oslo.db"]=${OSLODB_REPO:-${GIT_BASE}/openstack/oslo.db.git} -GITBRANCH["oslo.db"]=${OSLODB_BRANCH:-master} +GITBRANCH["oslo.db"]=${OSLODB_BRANCH:-$TARGET_BRANCH} # oslo.i18n GITREPO["oslo.i18n"]=${OSLOI18N_REPO:-${GIT_BASE}/openstack/oslo.i18n.git} -GITBRANCH["oslo.i18n"]=${OSLOI18N_BRANCH:-master} +GITBRANCH["oslo.i18n"]=${OSLOI18N_BRANCH:-$TARGET_BRANCH} + +# oslo.limit +GITREPO["oslo.limit"]=${OSLOLIMIT_REPO:-${GIT_BASE}/openstack/oslo.limit.git} +GITBRANCH["oslo.limit"]=${OSLOLIMIT_BRANCH:-$TARGET_BRANCH} # oslo.log GITREPO["oslo.log"]=${OSLOLOG_REPO:-${GIT_BASE}/openstack/oslo.log.git} -GITBRANCH["oslo.log"]=${OSLOLOG_BRANCH:-master} +GITBRANCH["oslo.log"]=${OSLOLOG_BRANCH:-$TARGET_BRANCH} # oslo.messaging GITREPO["oslo.messaging"]=${OSLOMSG_REPO:-${GIT_BASE}/openstack/oslo.messaging.git} -GITBRANCH["oslo.messaging"]=${OSLOMSG_BRANCH:-master} +GITBRANCH["oslo.messaging"]=${OSLOMSG_BRANCH:-$TARGET_BRANCH} # oslo.middleware GITREPO["oslo.middleware"]=${OSLOMID_REPO:-${GIT_BASE}/openstack/oslo.middleware.git} -GITBRANCH["oslo.middleware"]=${OSLOMID_BRANCH:-master} +GITBRANCH["oslo.middleware"]=${OSLOMID_BRANCH:-$TARGET_BRANCH} # oslo.policy GITREPO["oslo.policy"]=${OSLOPOLICY_REPO:-${GIT_BASE}/openstack/oslo.policy.git} -GITBRANCH["oslo.policy"]=${OSLOPOLICY_BRANCH:-master} +GITBRANCH["oslo.policy"]=${OSLOPOLICY_BRANCH:-$TARGET_BRANCH} # oslo.privsep GITREPO["oslo.privsep"]=${OSLOPRIVSEP_REPO:-${GIT_BASE}/openstack/oslo.privsep.git} -GITBRANCH["oslo.privsep"]=${OSLOPRIVSEP_BRANCH:-master} +GITBRANCH["oslo.privsep"]=${OSLOPRIVSEP_BRANCH:-$TARGET_BRANCH} # oslo.reports GITREPO["oslo.reports"]=${OSLOREPORTS_REPO:-${GIT_BASE}/openstack/oslo.reports.git} -GITBRANCH["oslo.reports"]=${OSLOREPORTS_BRANCH:-master} +GITBRANCH["oslo.reports"]=${OSLOREPORTS_BRANCH:-$TARGET_BRANCH} # oslo.rootwrap GITREPO["oslo.rootwrap"]=${OSLORWRAP_REPO:-${GIT_BASE}/openstack/oslo.rootwrap.git} -GITBRANCH["oslo.rootwrap"]=${OSLORWRAP_BRANCH:-master} +GITBRANCH["oslo.rootwrap"]=${OSLORWRAP_BRANCH:-$TARGET_BRANCH} # oslo.serialization GITREPO["oslo.serialization"]=${OSLOSERIALIZATION_REPO:-${GIT_BASE}/openstack/oslo.serialization.git} -GITBRANCH["oslo.serialization"]=${OSLOSERIALIZATION_BRANCH:-master} +GITBRANCH["oslo.serialization"]=${OSLOSERIALIZATION_BRANCH:-$TARGET_BRANCH} # oslo.service GITREPO["oslo.service"]=${OSLOSERVICE_REPO:-${GIT_BASE}/openstack/oslo.service.git} -GITBRANCH["oslo.service"]=${OSLOSERVICE_BRANCH:-master} +GITBRANCH["oslo.service"]=${OSLOSERVICE_BRANCH:-$TARGET_BRANCH} # oslo.utils GITREPO["oslo.utils"]=${OSLOUTILS_REPO:-${GIT_BASE}/openstack/oslo.utils.git} -GITBRANCH["oslo.utils"]=${OSLOUTILS_BRANCH:-master} +GITBRANCH["oslo.utils"]=${OSLOUTILS_BRANCH:-$TARGET_BRANCH} # oslo.versionedobjects GITREPO["oslo.versionedobjects"]=${OSLOVERSIONEDOBJECTS_REPO:-${GIT_BASE}/openstack/oslo.versionedobjects.git} -GITBRANCH["oslo.versionedobjects"]=${OSLOVERSIONEDOBJECTS_BRANCH:-master} +GITBRANCH["oslo.versionedobjects"]=${OSLOVERSIONEDOBJECTS_BRANCH:-$TARGET_BRANCH} # oslo.vmware GITREPO["oslo.vmware"]=${OSLOVMWARE_REPO:-${GIT_BASE}/openstack/oslo.vmware.git} -GITBRANCH["oslo.vmware"]=${OSLOVMWARE_BRANCH:-master} +GITBRANCH["oslo.vmware"]=${OSLOVMWARE_BRANCH:-$TARGET_BRANCH} # osprofiler GITREPO["osprofiler"]=${OSPROFILER_REPO:-${GIT_BASE}/openstack/osprofiler.git} -GITBRANCH["osprofiler"]=${OSPROFILER_BRANCH:-master} +GITBRANCH["osprofiler"]=${OSPROFILER_BRANCH:-$TARGET_BRANCH} # pycadf auditing library GITREPO["pycadf"]=${PYCADF_REPO:-${GIT_BASE}/openstack/pycadf.git} -GITBRANCH["pycadf"]=${PYCADF_BRANCH:-master} +GITBRANCH["pycadf"]=${PYCADF_BRANCH:-$TARGET_BRANCH} # stevedore plugin manager GITREPO["stevedore"]=${STEVEDORE_REPO:-${GIT_BASE}/openstack/stevedore.git} -GITBRANCH["stevedore"]=${STEVEDORE_BRANCH:-master} +GITBRANCH["stevedore"]=${STEVEDORE_BRANCH:-$TARGET_BRANCH} # taskflow plugin manager GITREPO["taskflow"]=${TASKFLOW_REPO:-${GIT_BASE}/openstack/taskflow.git} -GITBRANCH["taskflow"]=${TASKFLOW_BRANCH:-master} +GITBRANCH["taskflow"]=${TASKFLOW_BRANCH:-$TARGET_BRANCH} # tooz plugin manager GITREPO["tooz"]=${TOOZ_REPO:-${GIT_BASE}/openstack/tooz.git} -GITBRANCH["tooz"]=${TOOZ_BRANCH:-master} +GITBRANCH["tooz"]=${TOOZ_BRANCH:-$TARGET_BRANCH} # pbr drives the setuptools configs -GITREPO["pbr"]=${PBR_REPO:-${GIT_BASE}/openstack-dev/pbr.git} -GITBRANCH["pbr"]=${PBR_BRANCH:-master} +GITREPO["pbr"]=${PBR_REPO:-${GIT_BASE}/openstack/pbr.git} +GITBRANCH["pbr"]=${PBR_BRANCH:-$BRANCHLESS_TARGET_BRANCH} ################## @@ -473,80 +510,77 @@ GITBRANCH["pbr"]=${PBR_BRANCH:-master} # ################## +# cursive library +GITREPO["cursive"]=${CURSIVE_REPO:-${GIT_BASE}/openstack/cursive.git} +GITBRANCH["cursive"]=${CURSIVE_BRANCH:-$TARGET_BRANCH} + # glance store library GITREPO["glance_store"]=${GLANCE_STORE_REPO:-${GIT_BASE}/openstack/glance_store.git} -GITBRANCH["glance_store"]=${GLANCE_STORE_BRANCH:-master} - -# heat-cfntools server agent -HEAT_CFNTOOLS_REPO=${HEAT_CFNTOOLS_REPO:-${GIT_BASE}/openstack/heat-cfntools.git} -HEAT_CFNTOOLS_BRANCH=${HEAT_CFNTOOLS_BRANCH:-master} - -# heat example templates and elements -HEAT_TEMPLATES_REPO=${HEAT_TEMPLATES_REPO:-${GIT_BASE}/openstack/heat-templates.git} -HEAT_TEMPLATES_BRANCH=${HEAT_TEMPLATES_BRANCH:-master} - -# django openstack_auth library -GITREPO["django_openstack_auth"]=${HORIZONAUTH_REPO:-${GIT_BASE}/openstack/django_openstack_auth.git} -GITBRANCH["django_openstack_auth"]=${HORIZONAUTH_BRANCH:-master} +GITBRANCH["glance_store"]=${GLANCE_STORE_BRANCH:-$TARGET_BRANCH} # keystone middleware GITREPO["keystonemiddleware"]=${KEYSTONEMIDDLEWARE_REPO:-${GIT_BASE}/openstack/keystonemiddleware.git} -GITBRANCH["keystonemiddleware"]=${KEYSTONEMIDDLEWARE_BRANCH:-master} - -# s3 support for swift -SWIFT3_REPO=${SWIFT3_REPO:-${GIT_BASE}/openstack/swift3.git} -SWIFT3_BRANCH=${SWIFT3_BRANCH:-master} +GITBRANCH["keystonemiddleware"]=${KEYSTONEMIDDLEWARE_BRANCH:-$TARGET_BRANCH} # ceilometer middleware GITREPO["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_REPO:-${GIT_BASE}/openstack/ceilometermiddleware.git} -GITBRANCH["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_BRANCH:-master} +GITBRANCH["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_BRANCH:-$TARGET_BRANCH} GITDIR["ceilometermiddleware"]=$DEST/ceilometermiddleware +# openstacksdk OpenStack Python SDK +GITREPO["openstacksdk"]=${OPENSTACKSDK_REPO:-${GIT_BASE}/openstack/openstacksdk.git} +GITBRANCH["openstacksdk"]=${OPENSTACKSDK_BRANCH:-$TARGET_BRANCH} + # os-brick library to manage local volume attaches GITREPO["os-brick"]=${OS_BRICK_REPO:-${GIT_BASE}/openstack/os-brick.git} -GITBRANCH["os-brick"]=${OS_BRICK_BRANCH:-master} +GITBRANCH["os-brick"]=${OS_BRICK_BRANCH:-$TARGET_BRANCH} + +# os-client-config to manage clouds.yaml and friends +GITREPO["os-client-config"]=${OS_CLIENT_CONFIG_REPO:-${GIT_BASE}/openstack/os-client-config.git} +GITBRANCH["os-client-config"]=${OS_CLIENT_CONFIG_BRANCH:-$TARGET_BRANCH} +GITDIR["os-client-config"]=$DEST/os-client-config # os-vif library to communicate between Neutron to Nova GITREPO["os-vif"]=${OS_VIF_REPO:-${GIT_BASE}/openstack/os-vif.git} -GITBRANCH["os-vif"]=${OS_VIF_BRANCH:-master} +GITBRANCH["os-vif"]=${OS_VIF_BRANCH:-$TARGET_BRANCH} + +# osc-lib OpenStackClient common lib +GITREPO["osc-lib"]=${OSC_LIB_REPO:-${GIT_BASE}/openstack/osc-lib.git} +GITBRANCH["osc-lib"]=${OSC_LIB_BRANCH:-$TARGET_BRANCH} # ironic common lib GITREPO["ironic-lib"]=${IRONIC_LIB_REPO:-${GIT_BASE}/openstack/ironic-lib.git} -GITBRANCH["ironic-lib"]=${IRONIC_LIB_BRANCH:-master} +GITBRANCH["ironic-lib"]=${IRONIC_LIB_BRANCH:-$TARGET_BRANCH} # this doesn't exist in a lib file, so set it here GITDIR["ironic-lib"]=$DEST/ironic-lib # diskimage-builder tool GITREPO["diskimage-builder"]=${DIB_REPO:-${GIT_BASE}/openstack/diskimage-builder.git} -GITBRANCH["diskimage-builder"]=${DIB_BRANCH:-master} +GITBRANCH["diskimage-builder"]=${DIB_BRANCH:-$BRANCHLESS_TARGET_BRANCH} GITDIR["diskimage-builder"]=$DEST/diskimage-builder # neutron-lib library containing neutron stable non-REST interfaces GITREPO["neutron-lib"]=${NEUTRON_LIB_REPO:-${GIT_BASE}/openstack/neutron-lib.git} -GITBRANCH["neutron-lib"]=${NEUTRON_LIB_BRANCH:-master} +GITBRANCH["neutron-lib"]=${NEUTRON_LIB_BRANCH:-$TARGET_BRANCH} GITDIR["neutron-lib"]=$DEST/neutron-lib -################## -# -# TripleO / Heat Agent Components -# -################## - -# run-parts script required by os-refresh-config -DIB_UTILS_REPO=${DIB_UTILS_REPO:-${GIT_BASE}/openstack/dib-utils.git} -DIB_UTILS_BRANCH=${DIB_UTILS_BRANCH:-master} +# os-resource-classes library containing a list of standardized resource classes for OpenStack +GITREPO["os-resource-classes"]=${OS_RESOURCE_CLASSES_REPO:-${GIT_BASE}/openstack/os-resource-classes.git} +GITBRANCH["os-resource-classes"]=${OS_RESOURCE_CLASSES_BRANCH:-$TARGET_BRANCH} -# os-apply-config configuration template tool -OAC_REPO=${OAC_REPO:-${GIT_BASE}/openstack/os-apply-config.git} -OAC_BRANCH=${OAC_BRANCH:-master} +# os-traits library for resource provider traits in the placement service +GITREPO["os-traits"]=${OS_TRAITS_REPO:-${GIT_BASE}/openstack/os-traits.git} +GITBRANCH["os-traits"]=${OS_TRAITS_BRANCH:-$TARGET_BRANCH} -# os-collect-config configuration agent -OCC_REPO=${OCC_REPO:-${GIT_BASE}/openstack/os-collect-config.git} -OCC_BRANCH=${OCC_BRANCH:-master} +# ovsdbapp used by neutron +GITREPO["ovsdbapp"]=${OVSDBAPP_REPO:-${GIT_BASE}/openstack/ovsdbapp.git} +GITBRANCH["ovsdbapp"]=${OVSDBAPP_BRANCH:-$TARGET_BRANCH} +GITDIR["ovsdbapp"]=$DEST/ovsdbapp -# os-refresh-config configuration run-parts tool -ORC_REPO=${ORC_REPO:-${GIT_BASE}/openstack/os-refresh-config.git} -ORC_BRANCH=${ORC_BRANCH:-master} +# os-ken used by neutron +GITREPO["os-ken"]=${OS_KEN_REPO:-${GIT_BASE}/openstack/os-ken.git} +GITBRANCH["os-ken"]=${OS_KEN_BRANCH:-$TARGET_BRANCH} +GITDIR["os-ken"]=$DEST/os-ken ################# @@ -559,49 +593,56 @@ ORC_BRANCH=${ORC_BRANCH:-master} # ironic python agent IRONIC_PYTHON_AGENT_REPO=${IRONIC_PYTHON_AGENT_REPO:-${GIT_BASE}/openstack/ironic-python-agent.git} -IRONIC_PYTHON_AGENT_BRANCH=${IRONIC_PYTHON_AGENT_BRANCH:-master} +IRONIC_PYTHON_AGENT_BRANCH=${IRONIC_PYTHON_AGENT_BRANCH:-$TARGET_BRANCH} # a websockets/html5 or flash powered VNC console for vm instances -NOVNC_REPO=${NOVNC_REPO:-https://github.com/kanaka/noVNC.git} -NOVNC_BRANCH=${NOVNC_BRANCH:-master} +NOVNC_REPO=${NOVNC_REPO:-https://github.com/novnc/novnc.git} +NOVNC_BRANCH=${NOVNC_BRANCH:-v1.3.0} # a websockets/html5 or flash powered SPICE console for vm instances SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git} -SPICE_BRANCH=${SPICE_BRANCH:-master} +SPICE_BRANCH=${SPICE_BRANCH:-$BRANCHLESS_TARGET_BRANCH} +# Global flag used to configure Tempest and potentially other services if +# volume multiattach is supported. In Queens, only the libvirt compute driver +# and lvm volume driver support multiattach, and qemu must be less than 2.10 +# or libvirt must be greater than or equal to 3.10. +ENABLE_VOLUME_MULTIATTACH=$(trueorfalse False ENABLE_VOLUME_MULTIATTACH) # Nova hypervisor configuration. We default to libvirt with **kvm** but will # drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can -# also install an **LXC**, **OpenVZ** or **XenAPI** based system. If xenserver-core -# is installed, the default will be XenAPI +# also install an **LXC** or **OpenVZ** based system. DEFAULT_VIRT_DRIVER=libvirt -is_package_installed xenserver-core && DEFAULT_VIRT_DRIVER=xenserver VIRT_DRIVER=${VIRT_DRIVER:-$DEFAULT_VIRT_DRIVER} case "$VIRT_DRIVER" in ironic|libvirt) LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm} - if [[ "$os_VENDOR" =~ (Debian) ]]; then + LIBVIRT_CPU_MODE=${LIBVIRT_CPU_MODE:-custom} + LIBVIRT_CPU_MODEL=${LIBVIRT_CPU_MODEL:-Nehalem} + + if [[ -z "$os_VENDOR" ]]; then + GetOSVersion + fi + + if [[ "$os_VENDOR" =~ (Debian|Ubuntu) ]]; then LIBVIRT_GROUP=libvirt else LIBVIRT_GROUP=libvirtd fi ;; + lxd) + LXD_GROUP=${LXD_GROUP:-"lxd"} + ;; + docker|zun) + DOCKER_GROUP=${DOCKER_GROUP:-"docker"} + ;; fake) NUMBER_FAKE_NOVA_COMPUTE=${NUMBER_FAKE_NOVA_COMPUTE:-1} ;; - xenserver) - # Xen config common to nova and neutron - XENAPI_USER=${XENAPI_USER:-"root"} - # This user will be used for dom0 - domU communication - # should be able to log in to dom0 without a password - # will be used to install the plugins - DOMZERO_USER=${DOMZERO_USER:-"domzero"} - ;; *) ;; esac - # Images # ------ @@ -611,21 +652,20 @@ esac # If the file ends in .tar.gz, uncompress the tarball and and select the first # .img file inside it as the image. If present, use "*-vmlinuz*" as the kernel # and "*-initrd*" as the ramdisk -# example: http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-amd64.tar.gz +# example: https://cloud-images.ubuntu.com/releases/jammy/release/ubuntu-22.04-server-cloudimg-amd64.tar.gz # * disk image (*.img,*.img.gz) # if file ends in .img, then it will be uploaded and registered as a to # glance as a disk image. If it ends in .gz, it is uncompressed first. # example: -# http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-armel-disk1.img -# http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz +# https://cloud-images.ubuntu.com/releases/jammy/release/ubuntu-22.04-server-cloudimg-amd64.img +# https://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz # * OpenVZ image: # OpenVZ uses its own format of image, and does not support UEC style images -#IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image -#IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image +#IMAGE_URLS="https://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image -CIRROS_VERSION=${CIRROS_VERSION:-"0.3.4"} -CIRROS_ARCH=${CIRROS_ARCH:-"x86_64"} +CIRROS_VERSION=${CIRROS_VERSION:-"0.6.3"} +CIRROS_ARCH=${CIRROS_ARCH:-$(uname -m)} # Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of # which may be set in ``local.conf``. Also allow ``DEFAULT_IMAGE_NAME`` and @@ -636,58 +676,73 @@ if [[ "$DOWNLOAD_DEFAULT_IMAGES" == "True" ]]; then IMAGE_URLS+="," fi case "$VIRT_DRIVER" in - openvz) - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-12.04-x86_64} - IMAGE_URLS+="http://download.openvz.org/template/precreated/ubuntu-12.04-x86_64.tar.gz";; libvirt) case "$LIBVIRT_TYPE" in lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs} - IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz";; - *) # otherwise, use the uec style image (with kernel, ramdisk, disk) - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec} - IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec.tar.gz";; + DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz} + IMAGE_URLS+="https://github.com/cirros-dev/cirros/releases/download/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; + *) # otherwise, use the qcow image + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk} + DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img} + IMAGE_URLS+="https://github.com/cirros-dev/cirros/releases/download/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; esac ;; vsphere) DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.2-i386-disk.vmdk} - IMAGE_URLS+="http://partnerweb.vmware.com/programs/vmdkimage/cirros-0.3.2-i386-disk.vmdk";; - xenserver) - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.4-x86_64-disk} - IMAGE_URLS+="http://ca.downloads.xensource.com/OpenStack/cirros-0.3.4-x86_64-disk.vhd.tgz" - IMAGE_URLS+=",http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz";; - ironic) - # Ironic can do both partition and full disk images, depending on the driver - if [[ -z "${IRONIC_DEPLOY_DRIVER%%agent*}" ]]; then - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-x86_64-disk} - else - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-x86_64-uec} - fi - IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz" - IMAGE_URLS+=",http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-disk.img";; - *) # Default to Cirros with kernel, ramdisk and disk image - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec} - IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec.tar.gz";; + DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-$DEFAULT_IMAGE_NAME} + IMAGE_URLS+="http://partnerweb.vmware.com/programs/vmdkimage/${DEFAULT_IMAGE_FILE_NAME}";; + fake) + # Use the same as the default for libvirt + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk} + DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img} + IMAGE_URLS+="https://github.com/cirros-dev/cirros/releases/download/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; esac DOWNLOAD_DEFAULT_IMAGES=False fi -# Staging area for new images. These images are cached by a run of -# ./tools/image_list.sh during CI image build (see -# project-config:nodepool/elements/cache-devstack/extra-data.d/55-cache-devstack-repos). -# -# To avoid CI failures grabbing the images, new images should be here -# for at least 24hrs (nodepool builds images at 14:00UTC) so the they -# are in the cache. -PRECACHE_IMAGES=$(trueorfalse False PRECACHE_IMAGES) -if [[ "$PRECACHE_IMAGES" == "True" ]]; then - # required for trove devstack tests; see - # git.openstack.org/cgit/openstack/trove/tree/devstack/plugin.sh - IMAGE_URL="http://tarballs.openstack.org/trove/images/ubuntu/mysql.qcow2" - if ! [[ "$IMAGE_URLS" =~ "$IMAGE_URL" ]]; then - IMAGE_URLS+=",$IMAGE_URL" - fi +# This is a comma separated list of extra URLS to be listed for +# download by the tools/image_list.sh script. CI environments can +# pre-download these URLS and place them in $FILES. Later scripts can +# then use "get_extra_file " which will print out the path to the +# file; it will either be downloaded on demand or acquired from the +# cache if there. +EXTRA_CACHE_URLS="" + +# etcd3 defaults +ETCD_VERSION=${ETCD_VERSION:-v3.5.21} +ETCD_SHA256_AMD64=${ETCD_SHA256_AMD64:-"adddda4b06718e68671ffabff2f8cee48488ba61ad82900e639d108f2148501c"} +ETCD_SHA256_ARM64=${ETCD_SHA256_ARM64:-"95bf6918623a097c0385b96f139d90248614485e781ec9bee4768dbb6c79c53f"} +ETCD_SHA256_PPC64=${ETCD_SHA256_PPC64:-"6fb6ecb3d1b331eb177dc610a8efad3aceb1f836d6aeb439ba0bfac5d5c2a38c"} +ETCD_SHA256_S390X=${ETCD_SHA256_S390X:-"a211a83961ba8a7e94f7d6343ad769e699db21a715ba4f3b68cf31ea28f9c951"} +# Make sure etcd3 downloads the correct architecture +if is_arch "x86_64"; then + ETCD_ARCH="amd64" + ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_AMD64} +elif is_arch "aarch64"; then + ETCD_ARCH="arm64" + ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_ARM64} +elif is_arch "ppc64le"; then + ETCD_ARCH="ppc64le" + ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_PPC64} +elif is_arch "s390x"; then + ETCD_ARCH="s390x" + ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_S390X} +else + exit_distro_not_supported "invalid hardware type - $ETCD_ARCH" fi +ETCD_PORT=${ETCD_PORT:-2379} +ETCD_PEER_PORT=${ETCD_PEER_PORT:-2380} +ETCD_DOWNLOAD_URL=${ETCD_DOWNLOAD_URL:-https://github.com/etcd-io/etcd/releases/download} +ETCD_NAME=etcd-$ETCD_VERSION-linux-$ETCD_ARCH +ETCD_DOWNLOAD_FILE=$ETCD_NAME.tar.gz +ETCD_DOWNLOAD_LOCATION=$ETCD_DOWNLOAD_URL/$ETCD_VERSION/$ETCD_DOWNLOAD_FILE +# etcd is always required, so place it into list of pre-cached downloads +EXTRA_CACHE_URLS+=",$ETCD_DOWNLOAD_LOCATION" + +# Cache settings +CACHE_BACKEND=${CACHE_BACKEND:-"dogpile.cache.memcached"} +MEMCACHE_SERVERS=${MEMCACHE_SERVERS:-"localhost:11211"} # Detect duplicate values in IMAGE_URLS for image_url in ${IMAGE_URLS//,/ }; do @@ -696,8 +751,8 @@ for image_url in ${IMAGE_URLS//,/ }; do fi done -# 10Gb default volume backing file size -VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-10250M} +# 30Gb default volume backing file size +VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-30G} # Prefixes for volume and instance names VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} @@ -710,8 +765,7 @@ S3_SERVICE_PORT=${S3_SERVICE_PORT:-3333} PRIVATE_NETWORK_NAME=${PRIVATE_NETWORK_NAME:-"private"} PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-"public"} -# Set default screen name -SCREEN_NAME=${SCREEN_NAME:-stack} +PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-""} # Allow the use of an alternate protocol (such as https) for service endpoints SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http} @@ -720,24 +774,19 @@ SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http} # the memory used where there are a large number of CPUs present # (the default number of workers for many services is the number of CPUs) # Also sets the minimum number of workers to 2. -if [[ "$VIRT_DRIVER" = 'fake' ]]; then - # we need more workers for the large ops job - API_WORKERS=${API_WORKERS:=$(( ($(nproc)/2)<2 ? 2 : ($(nproc)/2) ))} -else - API_WORKERS=${API_WORKERS:=$(( ($(nproc)/4)<2 ? 2 : ($(nproc)/4) ))} -fi +API_WORKERS=${API_WORKERS:=$(( ($(nproc)/4)<2 ? 2 : ($(nproc)/4) ))} # Service startup timeout SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} +# Timeout for compute node registration in Nova +NOVA_READY_TIMEOUT=${NOVA_READY_TIMEOUT:-$SERVICE_TIMEOUT} + # Service graceful shutdown timeout SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT=${SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT:-5} -# Support alternative yum -- in future Fedora 'dnf' will become the -# only supported installer, but for now 'yum' and 'dnf' are both -# available in parallel with compatible CLIs. Allow manual switching -# till we get to the point we need to handle this automatically -YUM=${YUM:-yum} +# Service graceful shutdown timeout +WORKER_TIMEOUT=${WORKER_TIMEOUT:-80} # Common Configuration # -------------------- @@ -760,8 +809,8 @@ ENABLE_DEBUG_LOG_LEVEL=$(trueorfalse True ENABLE_DEBUG_LOG_LEVEL) # Note that setting ``FIXED_RANGE`` may be necessary when running DevStack # in an OpenStack cloud that uses either of these address ranges internally. FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24} -FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24} -FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256} +IPV4_ADDRS_SAFE_TO_USE=${IPV4_ADDRS_SAFE_TO_USE:-10.0.0.0/22} +FIXED_RANGE=${FIXED_RANGE:-$IPV4_ADDRS_SAFE_TO_USE} HOST_IP_IFACE=${HOST_IP_IFACE:-} HOST_IP=${HOST_IP:-} HOST_IPV6=${HOST_IPV6:-} @@ -773,6 +822,9 @@ fi HOST_IPV6=$(get_default_host_ip "" "" "$HOST_IP_IFACE" "$HOST_IPV6" "inet6") +# Whether or not the port_security extension should be enabled for Neutron. +NEUTRON_PORT_SECURITY=$(trueorfalse True NEUTRON_PORT_SECURITY) + # SERVICE IP version # This is the IP version that services should be listening on, as well # as using to register their endpoints with keystone. @@ -799,10 +851,10 @@ if [[ "$SERVICE_IP_VERSION" == 6 ]]; then DEF_SERVICE_HOST=[$HOST_IPV6] DEF_SERVICE_LOCAL_HOST=::1 - DEF_SERVICE_LISTEN_ADDRESS=:: + DEF_SERVICE_LISTEN_ADDRESS="[::]" fi -# This is either 0.0.0.0 for IPv4 or :: for IPv6 +# This is either 0.0.0.0 for IPv4 or [::] for IPv6 SERVICE_LISTEN_ADDRESS=${SERVICE_LISTEN_ADDRESS:-${DEF_SERVICE_LISTEN_ADDRESS}} # Allow the use of an alternate hostname (such as localhost/127.0.0.1) for @@ -811,7 +863,31 @@ SERVICE_HOST=${SERVICE_HOST:-${DEF_SERVICE_HOST}} # This is either 127.0.0.1 for IPv4 or ::1 for IPv6 SERVICE_LOCAL_HOST=${SERVICE_LOCAL_HOST:-${DEF_SERVICE_LOCAL_HOST}} -REGION_NAME=${REGION_NAME:-RegionOne} +# TUNNEL IP version +# This is the IP version to use for tunnel endpoints +TUNNEL_IP_VERSION=${TUNNEL_IP_VERSION:-4} + +# Validate TUNNEL_IP_VERSION +if [[ $TUNNEL_IP_VERSION != "4" ]] && [[ $TUNNEL_IP_VERSION != "6" ]]; then + die $LINENO "TUNNEL_IP_VERSION must be either 4 or 6" +fi + +if [[ "$TUNNEL_IP_VERSION" == 4 ]]; then + DEF_TUNNEL_ENDPOINT_IP=$HOST_IP +fi + +if [[ "$TUNNEL_IP_VERSION" == 6 ]]; then + # Only die if the user has not over-ridden the endpoint IP + if [[ "$HOST_IPV6" == "" ]] && [[ "$TUNNEL_ENDPOINT_IP" == "" ]]; then + die $LINENO "Could not determine host IPv6 address. See local.conf for suggestions on setting HOST_IPV6." + fi + + DEF_TUNNEL_ENDPOINT_IP=$HOST_IPV6 +fi + +# Allow the use of an alternate address for tunnel endpoints. +# Default is dependent on TUNNEL_IP_VERSION above. +TUNNEL_ENDPOINT_IP=${TUNNEL_ENDPOINT_IP:-${DEF_TUNNEL_ENDPOINT_IP}} # Configure services to use syslog instead of writing to individual log files SYSLOG=$(trueorfalse False SYSLOG) @@ -822,31 +898,12 @@ SYSLOG_PORT=${SYSLOG_PORT:-516} # Set to 0 to disable shallow cloning GIT_DEPTH=${GIT_DEPTH:-0} -# Use native SSL for servers in ``SSL_ENABLED_SERVICES`` -USE_SSL=$(trueorfalse False USE_SSL) - -# ebtables is inherently racey. If you run it by two or more processes -# simultaneously it will collide, badly, in the kernel and produce -# failures or corruption of ebtables. The only way around it is for -# all tools running ebtables to only ever do so with the --concurrent -# flag. This requires libvirt >= 1.2.11. -# -# If you don't have this then the following work around will replace -# ebtables with a wrapper script so that it is safe to run without -# that flag. -EBTABLES_RACE_FIX=$(trueorfalse False EBTABLES_RACE_FIX) +# We may not need to recreate database in case 2 Keystone services +# sharing the same database. It would be useful for multinode Grenade tests. +RECREATE_KEYSTONE_DB=$(trueorfalse True RECREATE_KEYSTONE_DB) # Following entries need to be last items in file -# Compatibility bits required by other callers like Grenade - -# Old way was using SCREEN_LOGDIR to locate those logs and LOGFILE for the stack.sh trace log. -# LOGFILE SCREEN_LOGDIR output -# not set not set no log files -# set not set stack.sh log to LOGFILE -# not set set screen logs to SCREEN_LOGDIR -# set set stack.sh log to LOGFILE, screen logs to SCREEN_LOGDIR - # New way is LOGDIR for all logs and LOGFILE for stack.sh trace log, but if not fully-qualified will be in LOGDIR # LOGFILE LOGDIR output # not set not set (new) set LOGDIR from default @@ -854,9 +911,6 @@ EBTABLES_RACE_FIX=$(trueorfalse False EBTABLES_RACE_FIX) # not set set screen logs to LOGDIR # set set stack.sh log to LOGFILE, screen logs to LOGDIR -# For compat, if SCREEN_LOGDIR is set, it will be used to create back-compat symlinks to the LOGDIR -# symlinks to SCREEN_LOGDIR (compat) - # Set up new logging defaults if [[ -z "${LOGDIR:-}" ]]; then default_logdir=$DEST/logs @@ -871,18 +925,14 @@ if [[ -z "${LOGDIR:-}" ]]; then # LOGFILE had no path, set a default LOGDIR="$default_logdir" fi - - # Check for duplication - if [[ "${SCREEN_LOGDIR:-}" == "${LOGDIR}" ]]; then - # We don't need the symlinks since it's the same directory - unset SCREEN_LOGDIR - fi fi unset default_logdir logfile fi # ``LOGDIR`` is always set at this point so it is not useful as a 'enable' for service logs -# ``SCREEN_LOGDIR`` may be set, it is useful to enable the compat symlinks + +# System-wide ulimit file descriptors override +ULIMIT_NOFILE=${ULIMIT_NOFILE:-2048} # Local variables: # mode: shell-script diff --git a/tests/run-process.sh b/tests/run-process.sh deleted file mode 100755 index 301b9a032b..0000000000 --- a/tests/run-process.sh +++ /dev/null @@ -1,109 +0,0 @@ -#!/bin/bash -# tests/exec.sh - Test DevStack run_process() and stop_process() -# -# exec.sh start|stop|status -# -# Set USE_SCREEN True|False to change use of screen. -# -# This script emulates the basic exec environment in ``stack.sh`` to test -# the process spawn and kill operations. - -if [[ -z $1 ]]; then - echo "$0 start|stop" - exit 1 -fi - -TOP_DIR=$(cd $(dirname "$0")/.. && pwd) -source $TOP_DIR/functions - -USE_SCREEN=${USE_SCREEN:-False} - -ENABLED_SERVICES=fake-service - -SERVICE_DIR=/tmp -SCREEN_NAME=test -SCREEN_LOGDIR=${SERVICE_DIR}/${SCREEN_NAME} - - -# Kill background processes on exit -trap clean EXIT -clean() { - local r=$? - jobs -p - kill >/dev/null 2>&1 $(jobs -p) - exit $r -} - - -# Exit on any errors so that errors don't compound -trap failed ERR -failed() { - local r=$? - jobs -p - kill >/dev/null 2>&1 $(jobs -p) - set +o xtrace - [ -n "$LOGFILE" ] && echo "${0##*/} failed: full log in $LOGFILE" - exit $r -} - -function status { - if [[ -r $SERVICE_DIR/$SCREEN_NAME/fake-service.pid ]]; then - pstree -pg $(cat $SERVICE_DIR/$SCREEN_NAME/fake-service.pid) - fi - ps -ef | grep fake -} - -function setup_screen { -if [[ ! -d $SERVICE_DIR/$SCREEN_NAME ]]; then - rm -rf $SERVICE_DIR/$SCREEN_NAME - mkdir -p $SERVICE_DIR/$SCREEN_NAME -fi - -if [[ "$USE_SCREEN" == "True" ]]; then - # Create a new named screen to run processes in - screen -d -m -S $SCREEN_NAME -t shell -s /bin/bash - sleep 1 - - # Set a reasonable status bar - if [ -z "$SCREEN_HARDSTATUS" ]; then - SCREEN_HARDSTATUS='%{= .} %-Lw%{= .}%> %n%f %t*%{= .}%+Lw%< %-=%{g}(%{d}%H/%l%{g})' - fi - screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS" -fi - -# Clear screen rc file -SCREENRC=$TOP_DIR/tests/$SCREEN_NAME-screenrc -if [[ -e $SCREENRC ]]; then - echo -n > $SCREENRC -fi -} - -# Mimic logging - # Set up output redirection without log files - # Copy stdout to fd 3 - exec 3>&1 - if [[ "$VERBOSE" != "True" ]]; then - # Throw away stdout and stderr - #exec 1>/dev/null 2>&1 - : - fi - # Always send summary fd to original stdout - exec 6>&3 - - -if [[ "$1" == "start" ]]; then - echo "Start service" - setup_screen - run_process fake-service "$TOP_DIR/tests/fake-service.sh" - sleep 1 - status -elif [[ "$1" == "stop" ]]; then - echo "Stop service" - stop_process fake-service - status -elif [[ "$1" == "status" ]]; then - status -else - echo "Unknown command" - exit 1 -fi diff --git a/tests/test_functions.sh b/tests/test_functions.sh index 8aae23dcb8..08143d2a68 100755 --- a/tests/test_functions.sh +++ b/tests/test_functions.sh @@ -224,7 +224,7 @@ fi # test against removed package...was a bug on Ubuntu if is_ubuntu; then - PKG=cowsay + PKG=cowsay-off if ! (dpkg -s $PKG >/dev/null 2>&1); then # it was never installed...set up the condition sudo apt-get install -y cowsay >/dev/null 2>&1 @@ -272,7 +272,7 @@ function test_export_proxy_variables { export_proxy_variables expected=$(echo -e "http_proxy=$http_proxy\nhttps_proxy=$https_proxy\nno_proxy=$no_proxy") - results=$(env | egrep '(http(s)?|no)_proxy=') + results=$(env | egrep '(http(s)?|no)_proxy=' | sort) if [[ $expected = $results ]]; then passed "OK: Proxy variables are exported when proxy variables are set" else diff --git a/tests/test_ini_config.sh b/tests/test_ini_config.sh index a5e110736e..fd3896d6ba 100755 --- a/tests/test_ini_config.sh +++ b/tests/test_ini_config.sh @@ -44,6 +44,15 @@ empty = multi = foo1 multi = foo2 +[fff] +ampersand = + +[ggg] +backslash = + +[key_with_spaces] +rgw special key = something + # inidelete(a) [del_separate_options] a=b @@ -82,8 +91,9 @@ fi # test iniget_sections VAL=$(iniget_sections "${TEST_INI}") -assert_equal "$VAL" "default aaa bbb ccc ddd eee del_separate_options \ -del_same_option del_missing_option del_missing_option_multi del_no_options" +assert_equal "$VAL" "default aaa bbb ccc ddd eee fff ggg key_with_spaces \ +del_separate_options del_same_option del_missing_option \ +del_missing_option_multi del_no_options" # Test with missing arguments BEFORE=$(cat ${TEST_INI}) @@ -120,15 +130,32 @@ iniset ${SUDO_ARG} ${TEST_INI} bbb handlers "33,44" VAL=$(iniget ${TEST_INI} bbb handlers) assert_equal "$VAL" "33,44" "inset at EOF" +# Test with ampersand in values +for i in `seq 3`; do + iniset ${TEST_INI} fff ampersand '&y' +done +VAL=$(iniget ${TEST_INI} fff ampersand) +assert_equal "$VAL" "&y" "iniset ampersands in option" + +# Test with backslash in value +iniset ${TEST_INI} ggg backslash 'foo\bar' +VAL=$(iniget ${TEST_INI} ggg backslash) +assert_equal "$VAL" 'foo\bar' "iniset backslash in value" + +# Test with both ampersand and backslash +iniset ${TEST_INI} ggg backslash 'foo\bar&baz' +VAL=$(iniget ${TEST_INI} ggg backslash) +assert_equal "$VAL" 'foo\bar&baz' "iniset ampersand and backslash in value" + # test empty option -if ini_has_option ${TEST_INI} ddd empty; then +if ini_has_option ${SUDO_ARG} ${TEST_INI} ddd empty; then passed "ini_has_option: ddd.empty present" else failed "ini_has_option failed: ddd.empty not found" fi # test non-empty option -if ini_has_option ${TEST_INI} bbb handlers; then +if ini_has_option ${SUDO_ARG} ${TEST_INI} bbb handlers; then passed "ini_has_option: bbb.handlers present" else failed "ini_has_option failed: bbb.handlers not found" @@ -209,6 +236,20 @@ iniset $SUDO_ARG ${INI_TMP_ETC_DIR}/test.new.ini test foo bar VAL=$(iniget ${INI_TMP_ETC_DIR}/test.new.ini test foo) assert_equal "$VAL" "bar" "iniset created file" +# test creation of keys with spaces +iniset ${SUDO_ARG} ${TEST_INI} key_with_spaces "rgw another key" somethingelse +VAL=$(iniget ${TEST_INI} key_with_spaces "rgw another key") +assert_equal "$VAL" "somethingelse" "iniset created a key with spaces" + +# test update of keys with spaces +iniset ${SUDO_ARG} ${TEST_INI} key_with_spaces "rgw special key" newvalue +VAL=$(iniget ${TEST_INI} key_with_spaces "rgw special key") +assert_equal "$VAL" "newvalue" "iniset updated a key with spaces" + +inidelete ${SUDO_ARG} ${TEST_INI} key_with_spaces "rgw another key" +VAL=$(iniget ${TEST_INI} key_with_spaces "rgw another key") +assert_empty VAL "inidelete removed a key with spaces" + $SUDO rm -rf ${INI_TMP_DIR} report_results diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index bb58088ef3..9552c93c4f 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -32,17 +32,20 @@ done ALL_LIBS="python-novaclient oslo.config pbr oslo.context" ALL_LIBS+=" python-keystoneclient taskflow oslo.middleware pycadf" ALL_LIBS+=" python-glanceclient python-ironicclient" -ALL_LIBS+=" oslo.messaging oslo.log cliff python-heatclient stevedore" +ALL_LIBS+=" oslo.messaging oslo.log cliff stevedore" ALL_LIBS+=" python-cinderclient glance_store oslo.concurrency oslo.db" ALL_LIBS+=" oslo.versionedobjects oslo.vmware keystonemiddleware" -ALL_LIBS+=" oslo.serialization django_openstack_auth" -ALL_LIBS+=" python-openstackclient oslo.rootwrap oslo.i18n" -ALL_LIBS+=" oslo.utils python-swiftclient" +ALL_LIBS+=" oslo.serialization" +ALL_LIBS+=" python-openstackclient osc-lib osc-placement" +ALL_LIBS+=" os-client-config oslo.rootwrap" +ALL_LIBS+=" oslo.i18n oslo.utils openstacksdk python-swiftclient" ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy" -ALL_LIBS+=" debtcollector os-brick automaton futurist oslo.service" -ALL_LIBS+=" oslo.cache oslo.reports osprofiler" +ALL_LIBS+=" debtcollector os-brick os-traits automaton futurist oslo.service" +ALL_LIBS+=" oslo.cache oslo.reports osprofiler cursive" ALL_LIBS+=" keystoneauth ironic-lib neutron-lib oslo.privsep" ALL_LIBS+=" diskimage-builder os-vif python-brick-cinderclient-ext" +ALL_LIBS+=" castellan python-barbicanclient ovsdbapp os-ken os-resource-classes" +ALL_LIBS+=" oslo.limit etcd3gw" # Generate the above list with # echo ${!GITREPO[@]} diff --git a/tests/test_localconf.sh b/tests/test_localconf.sh new file mode 100755 index 0000000000..d8075df442 --- /dev/null +++ b/tests/test_localconf.sh @@ -0,0 +1,475 @@ +#!/usr/bin/env bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +# Tests for DevStack INI functions + +TOP=$(cd $(dirname "$0")/.. && pwd) + +# Import config functions +source $TOP/inc/ini-config + +source $TOP/tests/unittest.sh + +echo "Testing INI local.conf functions" + +# test that can determine if file has section in specified meta-section + +function test_localconf_has_section { + local file_localconf + local file_conf1 + local file_conf2 + file_localconf=`mktemp` + file_conf1=`mktemp` + file_conf2=`mktemp` + + cat <<- EOF > $file_localconf +[[local|localrc]] +LOCALRC_VAR1=localrc_val1 +LOCALRC_VAR2=localrc_val2 +LOCALRC_VAR3=localrc_val3 + +[[post-config|$file_conf1]] +[conf1_t1] +conf1_t1_opt1=conf1_t1_val1 +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt1=conf2_t1_val1 +conf2_t1_opt2=conf2_t1_val2 +conf2_t1_opt3=conf2_t1_val3 +EOF + + localconf_has_section $file_localconf post-config $file_conf1 conf1_t1 + assert_equal $? 0 + localconf_has_section $file_localconf post-config $file_conf1 conf1_t2 + assert_equal $? 0 + localconf_has_section $file_localconf post-config $file_conf1 conf1_t3 + assert_equal $? 0 + localconf_has_section $file_localconf post-extra $file_conf2 conf2_t1 + assert_equal $? 0 + localconf_has_section $file_localconf post-config $file_conf1 conf1_t4 + assert_equal $? 1 + localconf_has_section $file_localconf post-install $file_conf1 conf1_t1 + assert_equal $? 1 + localconf_has_section $file_localconf local localrc conf1_t2 + assert_equal $? 1 + rm -f $file_localconf $file_conf1 $file_conf2 +} + +# test that can determine if file has option in specified meta-section and section +function test_localconf_has_option { + local file_localconf + local file_conf1 + local file_conf2 + file_localconf=`mktemp` + file_conf1=`mktemp` + file_conf2=`mktemp` + cat <<- EOF > $file_localconf +[[post-config|$file_conf1]] +[conf1_t1] +conf1_t1_opt1 = conf1_t1_val1 +conf1_t1_opt2 = conf1_t1_val2 +conf1_t1_opt3 = conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[local|localrc]] +LOCALRC_VAR1=localrc_val1 +LOCALRC_VAR2=localrc_val2 +LOCALRC_VAR3=localrc_val3 + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt1=conf2_t1_val1 +conf2_t1_opt2=conf2_t1_val2 +conf2_t1_opt3=conf2_t1_val3 +EOF + + localconf_has_option $file_localconf local localrc "" LOCALRC_VAR1 + assert_equal $? 0 + localconf_has_option $file_localconf local localrc "" LOCALRC_VAR2 + assert_equal $? 0 + localconf_has_option $file_localconf local localrc "" LOCALRC_VAR3 + assert_equal $? 0 + localconf_has_option $file_localconf post-config $file_conf1 conf1_t1 conf1_t1_opt1 + assert_equal $? 0 + localconf_has_option $file_localconf post-config $file_conf1 conf1_t2 conf1_t2_opt2 + assert_equal $? 0 + localconf_has_option $file_localconf post-config $file_conf1 conf1_t3 conf1_t3_opt3 + assert_equal $? 0 + localconf_has_option $file_localconf post-extra $file_conf2 conf2_t1 conf2_t1_opt2 + assert_equal $? 0 + localconf_has_option $file_localconf post-config $file_conf1 conf1_t1_opt4 + assert_equal $? 1 + localconf_has_option $file_localconf post-install $file_conf1 conf1_t1_opt1 + assert_equal $? 1 + localconf_has_option $file_localconf local localrc conf1_t2 conf1_t2_opt1 + assert_equal $? 1 + rm -f $file_localconf $file_conf1 $file_conf2 +} + +# test that update option in specified meta-section and section +function test_localconf_update_option { + local file_localconf + local file_localconf_expected + local file_conf1 + local file_conf2 + file_localconf=`mktemp` + file_localconf_expected=`mktemp` + file_conf1=`mktemp` + file_conf2=`mktemp` + cat <<- EOF > $file_localconf +[[local|localrc]] +LOCALRC_VAR1 = localrc_val1 +LOCALRC_VAR2 = localrc_val2 +LOCALRC_VAR3 = localrc_val3 + +[[post-config|$file_conf1]] +[conf1_t1] +conf1_t1_opt1=conf1_t1_val1 +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt1=conf2_t1_val1 +conf2_t1_opt2=conf2_t1_val2 +conf2_t1_opt3=conf2_t1_val3 +EOF + cat <<- EOF > $file_localconf_expected +[[local|localrc]] +LOCALRC_VAR1 = localrc_val1 +LOCALRC_VAR2 = localrc_val2_update +LOCALRC_VAR3 = localrc_val3 + +[[post-config|$file_conf1]] +[conf1_t1] +conf1_t1_opt1=conf1_t1_val1_update +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2_update +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3_update + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt1=conf2_t1_val1 +conf2_t1_opt2=conf2_t1_val2 +conf2_t1_opt3=conf2_t1_val3_update +EOF + + localconf_update_option "$SUDO" $file_localconf local localrc "" LOCALRC_VAR2 localrc_val2_update + localconf_update_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t1 conf1_t1_opt1 conf1_t1_val1_update + localconf_update_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t2 conf1_t2_opt2 conf1_t2_val2_update + localconf_update_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t3 conf1_t3_opt3 conf1_t3_val3_update + localconf_update_option "$SUDO" $file_localconf post-extra $file_conf2 conf2_t1 conf2_t1_opt3 conf2_t1_val3_update + result=`cat $file_localconf` + result_expected=`cat $file_localconf_expected` + assert_equal "$result" "$result_expected" + localconf_update_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t2 conf1_t3_opt1 conf1_t3_val1_update + localconf_update_option "$SUDO" $file_localconf post-extra $file_conf2 conf2_t1 conf2_t1_opt4 conf2_t1_val4_update + localconf_update_option "$SUDO" $file_localconf post-install $file_conf2 conf2_t1 conf2_t1_opt1 conf2_t1_val1_update + localconf_update_option "$SUDO" $file_localconf local localrc "" LOCALRC_VAR4 localrc_val4_update + result=`cat $file_localconf` + result_expected=`cat $file_localconf_expected` + assert_equal "$result" "$result_expected" + rm -f $file_localconf $file_localconf_expected $file_conf1 $file_conf2 +} + +# test that add option in specified meta-section and section +function test_localconf_add_option { + local file_localconf + local file_localconf_expected + local file_conf1 + local file_conf2 + file_localconf=`mktemp` + file_localconf_expected=`mktemp` + file_conf1=`mktemp` + file_conf2=`mktemp` + cat <<- EOF > $file_localconf +[[post-config|$file_conf1]] +[conf1_t1] +conf1_t1_opt1=conf1_t1_val1 +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[local|localrc]] +LOCALRC_VAR1=localrc_val1 +LOCALRC_VAR2=localrc_val2 +LOCALRC_VAR3=localrc_val3 + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt1 = conf2_t1_val1 +conf2_t1_opt2 = conf2_t1_val2 +conf2_t1_opt3 = conf2_t1_val3 +EOF + cat <<- EOF > $file_localconf_expected +[[post-config|$file_conf1]] +[conf1_t1] +conf1_t1_opt4 = conf1_t1_val4 +conf1_t1_opt1=conf1_t1_val1 +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt4 = conf1_t2_val4 +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt4 = conf1_t3_val4 +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[local|localrc]] +LOCALRC_VAR4 = localrc_val4 +LOCALRC_VAR1=localrc_val1 +LOCALRC_VAR2=localrc_val2 +LOCALRC_VAR3=localrc_val3 + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt4 = conf2_t1_val4 +conf2_t1_opt1 = conf2_t1_val1 +conf2_t1_opt2 = conf2_t1_val2 +conf2_t1_opt3 = conf2_t1_val3 +EOF + + localconf_add_option "$SUDO" $file_localconf local localrc "" LOCALRC_VAR4 localrc_val4 + localconf_add_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t1 conf1_t1_opt4 conf1_t1_val4 + localconf_add_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t2 conf1_t2_opt4 conf1_t2_val4 + localconf_add_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t3 conf1_t3_opt4 conf1_t3_val4 + localconf_add_option "$SUDO" $file_localconf post-extra $file_conf2 conf2_t1 conf2_t1_opt4 conf2_t1_val4 + result=`cat $file_localconf` + result_expected=`cat $file_localconf_expected` + assert_equal "$result" "$result_expected" + localconf_add_option "$SUDO" $file_localconf local localrc.conf "" LOCALRC_VAR4 localrc_val4_update + localconf_add_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t4 conf1_t4_opt1 conf1_t4_val1 + localconf_add_option "$SUDO" $file_localconf post-extra $file_conf2 conf2_t2 conf2_t2_opt4 conf2_t2_val4 + localconf_add_option "$SUDO" $file_localconf post-install $file_conf2 conf2_t1 conf2_t1_opt4 conf2_t2_val4 + result=`cat $file_localconf` + result_expected=`cat $file_localconf_expected` + assert_equal "$result" "$result_expected" + rm -f $file_localconf $file_localconf_expected $file_conf1 $file_conf2 +} + +# test that add section and option in specified meta-section +function test_localconf_add_section_and_option { + local file_localconf + local file_localconf_expected + local file_conf1 + local file_conf2 + file_localconf=`mktemp` + file_localconf_expected=`mktemp` + file_conf1=`mktemp` + file_conf2=`mktemp` + cat <<- EOF > $file_localconf +[[post-config|$file_conf1]] +[conf1_t1] +conf1_t1_opt1=conf1_t1_val1 +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[local|localrc]] +LOCALRC_VAR1=localrc_val1 +LOCALRC_VAR2=localrc_val2 +LOCALRC_VAR3=localrc_val3 + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt1=conf2_t1_val1 +conf2_t1_opt2=conf2_t1_val2 +conf2_t1_opt3=conf2_t1_val3 +EOF + cat <<- EOF > $file_localconf_expected +[[post-config|$file_conf1]] +[conf1_t4] +conf1_t4_opt1 = conf1_t4_val1 +[conf1_t1] +conf1_t1_opt1=conf1_t1_val1 +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[local|localrc]] +LOCALRC_VAR1=localrc_val1 +LOCALRC_VAR2=localrc_val2 +LOCALRC_VAR3=localrc_val3 + +[[post-extra|$file_conf2]] +[conf2_t2] +conf2_t2_opt1 = conf2_t2_val1 +[conf2_t1] +conf2_t1_opt1=conf2_t1_val1 +conf2_t1_opt2=conf2_t1_val2 +conf2_t1_opt3=conf2_t1_val3 +EOF + + localconf_add_section_and_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t4 conf1_t4_opt1 conf1_t4_val1 + localconf_add_section_and_option "$SUDO" $file_localconf post-extra $file_conf2 conf2_t2 conf2_t2_opt1 conf2_t2_val1 + result=`cat $file_localconf` + result_expected=`cat $file_localconf_expected` + assert_equal "$result" "$result_expected" + localconf_add_section_and_option "$SUDO" $file_localconf post-install $file_conf2 conf2_t2 conf2_t2_opt1 conf2_t2_val1 + result=`cat $file_localconf` + result_expected=`cat $file_localconf_expected` + assert_equal "$result" "$result_expected" + rm -f $file_localconf $file_localconf_expected $file_conf1 $file_conf2 +} + +# test that add section and option in specified meta-section +function test_localconf_set { + local file_localconf + local file_localconf_expected + local file_conf1 + local file_conf2 + file_localconf=`mktemp` + file_localconf_expected=`mktemp` + file_conf1=`mktemp` + file_conf2=`mktemp` + cat <<- EOF > $file_localconf +[[local|localrc]] +LOCALRC_VAR1=localrc_val1 +LOCALRC_VAR2=localrc_val2 +LOCALRC_VAR3=localrc_val3 + +[[post-config|$file_conf1]] +[conf1_t1] +conf1_t1_opt1=conf1_t1_val1 +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt1=conf2_t1_val1 +conf2_t1_opt2=conf2_t1_val2 +conf2_t1_opt3=conf2_t1_val3 +EOF + cat <<- EOF > $file_localconf_expected +[[local|localrc]] +LOCALRC_VAR1=localrc_val1 +LOCALRC_VAR2=localrc_val2_update +LOCALRC_VAR3=localrc_val3 + +[[post-config|$file_conf1]] +[conf1_t4] +conf1_t4_opt1 = conf1_t4_val1 +[conf1_t1] +conf1_t1_opt1=conf1_t1_val1 +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt4 = conf2_t1_val4 +conf2_t1_opt1=conf2_t1_val1 +conf2_t1_opt2=conf2_t1_val2 +conf2_t1_opt3=conf2_t1_val3 + +[[post-install|/etc/neutron/plugin/ml2/ml2_conf.ini]] +[ml2] +ml2_opt1 = ml2_val1 +EOF + + if [[ -n "$SUDO" ]]; then + SUDO_ARG="-sudo" + else + SUDO_ARG="" + fi + localconf_set $SUDO_ARG $file_localconf post-install /etc/neutron/plugin/ml2/ml2_conf.ini ml2 ml2_opt1 ml2_val1 + localconf_set $SUDO_ARG $file_localconf local localrc "" LOCALRC_VAR2 localrc_val2_update + localconf_set $SUDO_ARG $file_localconf post-config $file_conf1 conf1_t4 conf1_t4_opt1 conf1_t4_val1 + localconf_set $SUDO_ARG $file_localconf post-extra $file_conf2 conf2_t1 conf2_t1_opt4 conf2_t1_val4 + result=`cat $file_localconf` + result_expected=`cat $file_localconf_expected` + assert_equal "$result" "$result_expected" + rm -f $file_localconf $file_localconf_expected $file_conf1 $file_conf2 +} + + +test_localconf_has_section +test_localconf_has_option +test_localconf_update_option +test_localconf_add_option +test_localconf_add_section_and_option +test_localconf_set diff --git a/tests/test_meta_config.sh b/tests/test_meta_config.sh index 327fb56185..30479f245a 100755 --- a/tests/test_meta_config.sh +++ b/tests/test_meta_config.sh @@ -29,6 +29,10 @@ function die { exit -1 } +function warn { + return 0 +} + TEST_1C_ADD="[eee] type=new multi = foo2" @@ -92,7 +96,7 @@ $TEST_1C_ADD [[test3|test-space.conf]] [DEFAULT] attribute=value - + # the above line has a single space [[test4|\$TEST4_DIR/\$TEST4_FILE]] @@ -125,6 +129,17 @@ foo=bar [[test10|does-not-exist-dir/test.conf]] foo=bar +[[test11|test-same.conf]] +[DEFAULT] +foo=bar + +[[test11|test-same.conf]] +[some] +random=config + +[[test12|run_tests.sh/test.conf]] +foo=bar + [[test-multi-sections|test-multi-sections.conf]] [sec-1] cfg_item1 = abcd @@ -147,6 +162,9 @@ cfg_item1 = abcd cfg_item2 = efgh cfg_item2 = \${FOO_BAR_BAZ} +[[test11|test-same.conf]] +[another] +non = sense EOF echo -n "get_meta_section_files: test0 doesn't exist: " @@ -367,26 +385,52 @@ set -e echo -n "merge_config_group test9 undefined conf file: " set +e -# function is expected to fail and exit, running it -# in a subprocess to let this script proceed +# function is expected to trigger warn and continue (merge_config_group test.conf test9) VAL=$? -EXPECT_VAL=255 +EXPECT_VAL=0 check_result "$VAL" "$EXPECT_VAL" set -e -echo -n "merge_config_group test10 not directory: " +echo -n "merge_config_group test10 create directory: " +set +e +STACK_USER=$(id -u -n) +merge_config_group test.conf test10 +VAL=$? +EXPECT_VAL=0 +check_result "$VAL" "$EXPECT_VAL" +set -e + +echo -n "merge_config_file test11 same section: " +rm -f test-same.conf +merge_config_group test.conf test11 +VAL=$(cat test-same.conf) +EXPECT_VAL=' +[DEFAULT] +foo = bar + +[some] +random = config + +[another] +non = sense' +check_result "$VAL" "$EXPECT_VAL" + +echo -n "merge_config_group test12 directory as file: " set +e # function is expected to fail and exit, running it # in a subprocess to let this script proceed -(merge_config_group test.conf test10) +(merge_config_group test.conf test12) VAL=$? EXPECT_VAL=255 check_result "$VAL" "$EXPECT_VAL" set -e + rm -f test.conf test1c.conf test2a.conf \ test-space.conf test-equals.conf test-strip.conf \ test-colon.conf test-env.conf test-multiline.conf \ - test-multi-sections.conf + test-multi-sections.conf test-same.conf rm -rf test-etc +rm -rf does-not-exist-dir + diff --git a/tests/test_package_ordering.sh b/tests/test_package_ordering.sh index bfc2a1954f..f221c821a0 100755 --- a/tests/test_package_ordering.sh +++ b/tests/test_package_ordering.sh @@ -8,7 +8,7 @@ TOP=$(cd $(dirname "$0")/.. && pwd) source $TOP/tests/unittest.sh export LC_ALL=en_US.UTF-8 -PKG_FILES=$(find $TOP/files/debs $TOP/files/rpms $TOP/files/rpms-suse -type f) +PKG_FILES=$(find $TOP/files/debs $TOP/files/rpms -type f) TMPDIR=$(mktemp -d) diff --git a/tests/test_refs.sh b/tests/test_refs.sh index bccca5dff7..0f9aa4a5ca 100755 --- a/tests/test_refs.sh +++ b/tests/test_refs.sh @@ -15,10 +15,10 @@ echo "Ensuring we don't have crazy refs" -REFS=`grep BRANCH stackrc | grep -v -- '-master'` +REFS=`grep BRANCH stackrc | grep -v 'TARGET_BRANCH' | grep -v 'NOVNC_BRANCH'` rc=$? if [[ $rc -eq 0 ]]; then - echo "Branch defaults must be master. Found:" + echo "Branch defaults must be one of the *TARGET_BRANCH values. Found:" echo $REFS exit 1 fi diff --git a/tests/test_worlddump.sh b/tests/test_worlddump.sh index f407d407c0..919652536d 100755 --- a/tests/test_worlddump.sh +++ b/tests/test_worlddump.sh @@ -8,7 +8,7 @@ source $TOP/tests/unittest.sh OUT_DIR=$(mktemp -d) -$TOP/tools/worlddump.py -d $OUT_DIR +${PYTHON} $TOP/tools/worlddump.py -d $OUT_DIR if [[ $? -ne 0 ]]; then fail "worlddump failed" diff --git a/tests/test_write_devstack_local_conf_role.sh b/tests/test_write_devstack_local_conf_role.sh new file mode 100755 index 0000000000..71d8d51614 --- /dev/null +++ b/tests/test_write_devstack_local_conf_role.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +TOP=$(cd $(dirname "$0")/.. && pwd) + +# Import common functions +source $TOP/functions +source $TOP/tests/unittest.sh + +${PYTHON} $TOP/roles/write-devstack-local-conf/library/test.py diff --git a/tests/unittest.sh b/tests/unittest.sh index 3703ece91d..fced2abe65 100644 --- a/tests/unittest.sh +++ b/tests/unittest.sh @@ -17,6 +17,8 @@ ERROR=0 PASS=0 FAILED_FUNCS="" +export PYTHON=$(which python3 2>/dev/null) + # pass a test, printing out MSG # usage: passed message function passed { diff --git a/tools/build_venv.sh b/tools/build_venv.sh index cfa39a82e0..a439163b5d 100755 --- a/tools/build_venv.sh +++ b/tools/build_venv.sh @@ -38,7 +38,7 @@ if [[ -z "$TOP_DIR" ]]; then fi # Build new venv -virtualenv $VENV_DEST +python$PYTHON3_VERSION -m venv --system-site-packages $VENV_DEST # Install modern pip PIP_VIRTUAL_ENV=$VENV_DEST pip_install -U pip diff --git a/tools/cap-pip.txt b/tools/cap-pip.txt deleted file mode 100644 index c280267641..0000000000 --- a/tools/cap-pip.txt +++ /dev/null @@ -1 +0,0 @@ -pip!=8 diff --git a/tools/create-stack-user.sh b/tools/create-stack-user.sh index c0b7ac70aa..cb8d7aa328 100755 --- a/tools/create-stack-user.sh +++ b/tools/create-stack-user.sh @@ -32,7 +32,7 @@ GetDistro source $TOP_DIR/stackrc # Give the non-root user the ability to run as **root** via ``sudo`` -is_package_installed sudo || install_package sudo +is_package_installed sudo || is_package_installed sudo-ldap || install_package sudo [[ -z "$STACK_USER" ]] && die "STACK_USER is not set. Exiting." @@ -44,6 +44,15 @@ fi if ! getent passwd $STACK_USER >/dev/null; then echo "Creating a user called $STACK_USER" useradd -g $STACK_USER -s /bin/bash -d $DEST -m $STACK_USER + # RHEL based distros create home dir with 700 permissions, + # And Ubuntu 21.04+ with 750, i.e missing executable + # permission for either group or others + # Devstack deploy will have issues with this, fix it by + # adding executable permission + if [[ $(stat -c '%A' $DEST|grep -o x|wc -l) -lt 3 ]]; then + echo "Executable permission missing for $DEST, adding it" + chmod +x $DEST + fi fi echo "Giving stack user passwordless sudo privileges" diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index b6db5d11aa..f4a4edcbe2 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -152,7 +152,7 @@ if [ -z "$OS_USERNAME" ]; then fi if [ -z "$OS_AUTH_URL" ]; then - export OS_AUTH_URL=http://localhost:5000/v2.0/ + export OS_AUTH_URL=http://localhost:5000/v3/ fi if [ -z "$OS_USER_DOMAIN_ID" -a -z "$OS_USER_DOMAIN_NAME" ]; then @@ -193,7 +193,6 @@ export OS_PROJECT_NAME="$project_name" export OS_AUTH_URL="$OS_AUTH_URL" export OS_CACERT="$OS_CACERT" export NOVA_CERT="$ACCOUNT_DIR/cacert.pem" -export OS_AUTH_TYPE=v2password EOF if [ -n "$ADDPASS" ]; then echo "export OS_PASSWORD=\"$user_passwd\"" >>"$rcfile" diff --git a/tools/dbcounter/dbcounter.py b/tools/dbcounter/dbcounter.py new file mode 100644 index 0000000000..86e5529c97 --- /dev/null +++ b/tools/dbcounter/dbcounter.py @@ -0,0 +1,121 @@ +import json +import logging +import os +import threading +import time +import queue + +import sqlalchemy +from sqlalchemy.engine import CreateEnginePlugin +from sqlalchemy import event + +# https://docs.sqlalchemy.org/en/14/core/connections.html? +# highlight=createengineplugin#sqlalchemy.engine.CreateEnginePlugin + +LOG = logging.getLogger(__name__) + +# The theory of operation here is that we register this plugin with +# sqlalchemy via an entry_point. It gets loaded by virtue of plugin= +# being in the database connection URL, which gives us an opportunity +# to hook the engines that get created. +# +# We opportunistically spawn a thread, which we feed "hits" to over a +# queue, and which occasionally writes those hits to a special +# database called 'stats'. We access that database with the same user, +# pass, and host as the main connection URL for simplicity. + + +class LogCursorEventsPlugin(CreateEnginePlugin): + def __init__(self, url, kwargs): + self.db_name = url.database + LOG.info('Registered counter for database %s' % self.db_name) + new_url = sqlalchemy.engine.URL.create(url.drivername, + url.username, + url.password, + url.host, + url.port, + 'stats') + + self.engine = sqlalchemy.create_engine(new_url) + self.queue = queue.Queue() + self.thread = None + + def update_url(self, url): + return url.difference_update_query(["dbcounter"]) + + def engine_created(self, engine): + """Hook the engine creation process. + + This is the plug point for the sqlalchemy plugin. Using + plugin=$this in the URL causes this method to be called when + the engine is created, giving us a chance to hook it below. + """ + event.listen(engine, "before_cursor_execute", self._log_event) + + def ensure_writer_thread(self): + self.thread = threading.Thread(target=self.stat_writer, daemon=True) + self.thread.start() + + def _log_event(self, conn, cursor, statement, parameters, context, + executemany): + """Queue a "hit" for this operation to be recorded. + + Attepts to determine the operation by the first word of the + statement, or 'OTHER' if it cannot be determined. + """ + + # Start our thread if not running. If we were forked after the + # engine was created and this plugin was associated, our + # writer thread is gone, so respawn. + if not self.thread or not self.thread.is_alive(): + self.ensure_writer_thread() + + try: + op = statement.strip().split(' ', 1)[0] or 'OTHER' + except Exception: + op = 'OTHER' + + self.queue.put((self.db_name, op)) + + def do_incr(self, db, op, count): + """Increment the counter for (db,op) by count.""" + + query = sqlalchemy.text('INSERT INTO queries (db, op, count) ' + ' VALUES (:db, :op, :count) ' + ' ON DUPLICATE KEY UPDATE count=count+:count') + try: + with self.engine.begin() as conn: + r = conn.execute(query, {'db': db, 'op': op, 'count': count}) + except Exception as e: + LOG.error('Failed to account for access to database %r: %s', + db, e) + + def stat_writer(self): + """Consume messages from the queue and write them in batches. + + This reads "hists" from from a queue fed by _log_event() and + writes (db,op)+=count stats to the database after ten seconds + of no activity to avoid triggering a write for every SELECT + call. Write no less often than every sixty seconds to avoid being + starved by constant activity. + """ + LOG.debug('[%i] Writer thread running' % os.getpid()) + while True: + to_write = {} + last = time.time() + while time.time() - last < 60: + try: + item = self.queue.get(timeout=10) + to_write.setdefault(item, 0) + to_write[item] += 1 + except queue.Empty: + break + + if to_write: + LOG.debug('[%i] Writing DB stats %s' % ( + os.getpid(), + ','.join(['%s:%s=%i' % (db, op, count) + for (db, op), count in to_write.items()]))) + + for (db, op), count in to_write.items(): + self.do_incr(db, op, count) diff --git a/tools/dbcounter/pyproject.toml b/tools/dbcounter/pyproject.toml new file mode 100644 index 0000000000..d74d688997 --- /dev/null +++ b/tools/dbcounter/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["sqlalchemy", "setuptools>=42"] +build-backend = "setuptools.build_meta" \ No newline at end of file diff --git a/tools/dbcounter/setup.cfg b/tools/dbcounter/setup.cfg new file mode 100644 index 0000000000..12300bf619 --- /dev/null +++ b/tools/dbcounter/setup.cfg @@ -0,0 +1,14 @@ +[metadata] +name = dbcounter +author = Dan Smith +author_email = dms@danplanet.com +version = 0.1 +description = A teeny tiny dbcounter plugin for use with devstack +url = http://github.com/openstack/devstack +license = Apache + +[options] +py_modules = dbcounter +entry_points = + [sqlalchemy.plugins] + dbcounter = dbcounter:LogCursorEventsPlugin diff --git a/tools/debug_function.sh b/tools/debug_function.sh new file mode 100755 index 0000000000..68bd85dc61 --- /dev/null +++ b/tools/debug_function.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +# This is a small helper to speed development and debug with devstack. +# It is intended to help you run a single function in a project module +# without having to re-stack. +# +# For example, to run the just start_glance function, do this: +# +# ./tools/debug_function.sh glance start_glance + +if [ ! -f "lib/$1" ]; then + echo "Usage: $0 [project] [function] [function...]" +fi + +source stackrc +source lib/$1 +shift +set -x +while [ "$1" ]; do + echo ==== Running $1 ==== + $1 + echo ==== Done with $1 ==== + shift +done diff --git a/tools/discover_hosts.sh b/tools/discover_hosts.sh new file mode 100755 index 0000000000..4ec6a40511 --- /dev/null +++ b/tools/discover_hosts.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +# **discover_hosts.sh** + +# This is just a very simple script to run the +# "nova-manage cell_v2 discover_hosts" command +# which is needed to discover compute nodes and +# register them with a parent cell in Nova. +# This assumes that /etc/nova/nova.conf exists +# and has the following entries filled in: +# +# [api_database] +# connection = This is the URL to the nova_api database +# +# In other words this should be run on the primary +# (API) node in a multi-node setup. + +if [[ -x $(which nova-manage) ]]; then + nova-manage cell_v2 discover_hosts --verbose +fi diff --git a/tools/dstat.sh b/tools/dstat.sh index 3c0b3be089..e6cbb0f21c 100755 --- a/tools/dstat.sh +++ b/tools/dstat.sh @@ -9,14 +9,23 @@ # Assumes: # - dstat command is installed -# Retreive log directory as argument from calling script. +# Retrieve log directory as argument from calling script. LOGDIR=$1 +DSTAT_TOP_OPTS="--top-cpu-adv --top-io-adv --top-mem" +if dstat --version | grep -q 'pcp-dstat' ; then + # dstat is unmaintained, and moving to a plugin of performance + # co-pilot. Fedora 29 for example has rolled this out. It's + # mostly compatible, except for a few options which are not + # implemented (yet?) + DSTAT_TOP_OPTS="" +fi + # Command line arguments for primary DStat process. -DSTAT_OPTS="-tcmndrylpg --top-cpu-adv --top-io-adv --swap" +DSTAT_OPTS="-tcmndrylpg ${DSTAT_TOP_OPTS} --swap --tcp" # Command-line arguments for secondary background DStat process. -DSTAT_CSV_OPTS="-tcmndrylpg --output $LOGDIR/dstat-csv.log" +DSTAT_CSV_OPTS="-tcmndrylpg --tcp --output $LOGDIR/dstat-csv.log" # Execute and background the secondary dstat process and discard its output. dstat $DSTAT_CSV_OPTS >& /dev/null & diff --git a/tools/file_tracker.sh b/tools/file_tracker.sh new file mode 100755 index 0000000000..9c31b30a56 --- /dev/null +++ b/tools/file_tracker.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +set -o errexit + +# time to sleep between checks +SLEEP_TIME=20 + +function tracker { + echo "Number of open files | Number of open files not in use | Maximum number of files allowed to be opened" + while true; do + cat /proc/sys/fs/file-nr + sleep $SLEEP_TIME + done +} + +function usage { + echo "Usage: $0 [-x] [-s N]" 1>&2 + exit 1 +} + +while getopts ":s:x" opt; do + case $opt in + s) + SLEEP_TIME=$OPTARG + ;; + x) + set -o xtrace + ;; + *) + usage + ;; + esac +done + +tracker diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 193a1f7aba..9e2818f2cc 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -5,16 +5,6 @@ # fixup_stuff.sh # # All distro and package specific hacks go in here -# -# - prettytable 0.7.2 permissions are 600 in the package and -# pip 1.4 doesn't fix it (1.3 did) -# -# - httplib2 0.8 permissions are 600 in the package and -# pip 1.4 doesn't fix it (1.3 did) -# -# - Fedora: -# - set selinux not enforcing -# - uninstall firewalld (f20 only) # If ``TOP_DIR`` is set we're being sourced rather than running stand-alone @@ -36,75 +26,22 @@ if [[ -z "$TOP_DIR" ]]; then FILES=$TOP_DIR/files fi -# Keystone Port Reservation -# ------------------------- -# Reserve and prevent ``KEYSTONE_AUTH_PORT`` and ``KEYSTONE_AUTH_PORT_INT`` from -# being used as ephemeral ports by the system. The default(s) are 35357 and -# 35358 which are in the Linux defined ephemeral port range (in disagreement -# with the IANA ephemeral port range). This is a workaround for bug #1253482 -# where Keystone will try and bind to the port and the port will already be -# in use as an ephemeral port by another process. This places an explicit -# exception into the Kernel for the Keystone AUTH ports. -keystone_ports=${KEYSTONE_AUTH_PORT:-35357},${KEYSTONE_AUTH_PORT_INT:-35358} - -# Only do the reserved ports when available, on some system (like containers) -# where it's not exposed we are almost pretty sure these ports would be -# exclusive for our DevStack. -if sysctl net.ipv4.ip_local_reserved_ports >/dev/null 2>&1; then - # Get any currently reserved ports, strip off leading whitespace - reserved_ports=$(sysctl net.ipv4.ip_local_reserved_ports | awk -F'=' '{print $2;}' | sed 's/^ //') - - if [[ -z "${reserved_ports}" ]]; then - # If there are no currently reserved ports, reserve the keystone ports - sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports} - else - # If there are currently reserved ports, keep those and also reserve the - # Keystone specific ports. Duplicate reservations are merged into a single - # reservation (or range) automatically by the kernel. - sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports},${reserved_ports} - fi -else - echo_summary "WARNING: unable to reserve keystone ports" -fi - - # Python Packages # --------------- -# get_package_path python-package # in import notation -function get_package_path { - local package=$1 - echo $(python -c "import os; import $package; print(os.path.split(os.path.realpath($package.__file__))[0])") -} - - -# Pre-install affected packages so we can fix the permissions -# These can go away once we are confident that pip 1.4.1+ is available everywhere - -# Fix prettytable 0.7.2 permissions -# Don't specify --upgrade so we use the existing package if present -pip_install 'prettytable>=0.7' -PACKAGE_DIR=$(get_package_path prettytable) -# Only fix version 0.7.2 -dir=$(echo $PACKAGE_DIR/prettytable-0.7.2*) -if [[ -d $dir ]]; then - sudo chmod +r $dir/* -fi - -# Fix httplib2 0.8 permissions -# Don't specify --upgrade so we use the existing package if present -pip_install httplib2 -PACKAGE_DIR=$(get_package_path httplib2) -# Only fix version 0.8 -dir=$(echo $PACKAGE_DIR-0.8*) -if [[ -d $dir ]]; then - sudo chmod +r $dir/* -fi - -if is_fedora; then +function fixup_fedora { + if ! is_fedora; then + return + fi # Disable selinux to avoid configuring to allow Apache access # to Horizon files (LP#1175444) if selinuxenabled; then + #persit selinux config across reboots + cat << EOF | sudo tee /etc/selinux/config +SELINUX=permissive +SELINUXTYPE=targeted +EOF + # then disable at runtime sudo setenforce 0 fi @@ -123,7 +60,7 @@ if is_fedora; then # [1] https://bugzilla.redhat.com/show_bug.cgi?id=1099031 # [2] https://bugs.launchpad.net/neutron/+bug/1455303 # [3] https://github.com/redhat-openstack/openstack-puppet-modules/blob/master/firewall/manifests/linux/redhat.pp - # [4] http://docs.openstack.org/developer/devstack/guides/neutron.html + # [4] https://docs.openstack.org/devstack/latest/guides/neutron.html if is_package_installed firewalld; then sudo systemctl disable firewalld # The iptables service files are no longer included by default, @@ -135,34 +72,42 @@ if is_fedora; then fi fi - if [[ "$os_VENDOR" == "Fedora" ]] && [[ "$os_RELEASE" -ge "22" ]]; then - # requests ships vendored version of chardet/urllib3, but on - # fedora these are symlinked back to the primary versions to - # avoid duplication of code on disk. This is fine when - # maintainers keep things in sync, but since devstack takes - # over and installs later versions via pip we can end up with - # incompatible versions. - # - # The rpm package is not removed to preserve the dependent - # packages like cloud-init; rather we remove the symlinks and - # force a re-install of requests so the vendored versions it - # wants are present. - # - # Realted issues: - # https://bugs.launchpad.net/glance/+bug/1476770 - # https://bugzilla.redhat.com/show_bug.cgi?id=1253823 - - base_path=$(get_package_path requests)/packages - if [ -L $base_path/chardet -o -L $base_path/urllib3 ]; then - sudo rm -f $base_path/{chardet,urllib3} - # install requests with the bundled urllib3 to avoid conflicts - pip_install --upgrade --force-reinstall requests - fi + # Since pip10, pip will refuse to uninstall files from packages + # that were created with distutils (rather than more modern + # setuptools). This is because it technically doesn't have a + # manifest of what to remove. However, in most cases, simply + # overwriting works. So this hacks around those packages that + # have been dragged in by some other system dependency + sudo rm -rf /usr/lib64/python3*/site-packages/PyYAML-*.egg-info + + # After updating setuptools based on the requirements, the files from the + # python3-setuptools RPM are deleted, it breaks some tools such as semanage + # (used in diskimage-builder) that use the -s flag of the python + # interpreter, enforcing the use of the packages from /usr/lib. + # Importing setuptools in a such environment fails. + # Enforce the package re-installation to fix those applications. + if is_package_installed python3-setuptools; then + sudo dnf reinstall -y python3-setuptools + fi +} + +function fixup_ubuntu { + if ! is_ubuntu; then + return fi -fi -# The version of pip(1.5.4) supported by python-virtualenv(1.11.4) has -# connection issues under proxy, hence uninstalling python-virtualenv package -# and installing the latest version using pip. -uninstall_package python-virtualenv -pip_install -U virtualenv + # Since pip10, pip will refuse to uninstall files from packages + # that were created with distutils (rather than more modern + # setuptools). This is because it technically doesn't have a + # manifest of what to remove. However, in most cases, simply + # overwriting works. So this hacks around those packages that + # have been dragged in by some other system dependency + sudo rm -rf /usr/lib/python3/dist-packages/PyYAML-*.egg-info + sudo rm -rf /usr/lib/python3/dist-packages/pyasn1_modules-*.egg-info + sudo rm -rf /usr/lib/python3/dist-packages/simplejson-*.egg-info +} + +function fixup_all { + fixup_ubuntu + fixup_fedora +} diff --git a/tools/generate-devstack-plugins-list.py b/tools/generate-devstack-plugins-list.py index bbad1bf502..bc28515a26 100644 --- a/tools/generate-devstack-plugins-list.py +++ b/tools/generate-devstack-plugins-list.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 # Copyright 2016 Hewlett Packard Enterprise Development Company, L.P. # @@ -19,17 +19,21 @@ # # In order to function correctly, the environment in which the # script runs must have -# * network access to the review.openstack.org Gerrit API +# * network access to the review.opendev.org Gerrit API # working directory -# * network access to https://git.openstack.org/cgit +# * network access to https://opendev.org/ +import functools import logging import json import requests +from requests.adapters import HTTPAdapter +from requests.packages.urllib3.util.retry import Retry + logging.basicConfig(level=logging.DEBUG) -url = 'https://review.openstack.org/projects/' +url = 'https://review.opendev.org/projects/' # This is what a project looks like ''' @@ -39,23 +43,44 @@ }, ''' -def is_in_openstack_namespace(proj): - # only interested in openstack namespace (e.g. not retired - # stackforge, etc) - return proj.startswith('openstack/') +def is_in_wanted_namespace(proj): + # only interested in openstack or x namespace (e.g. not retired + # stackforge, etc). + # + # openstack/openstack "super-repo" of openstack projects as + # submodules, that can cause gitea to 500 timeout and thus stop + # this script. Skip it. + if proj.startswith('stackforge/') or \ + proj.startswith('stackforge-attic/') or \ + proj == "openstack/openstack": + return False + else: + return True # Check if this project has a plugin file -def has_devstack_plugin(proj): - r = requests.get("https://git.openstack.org/cgit/%s/plain/devstack/plugin.sh" % proj) +def has_devstack_plugin(session, proj): + # Don't link in the deb packaging repos + if "openstack/deb-" in proj: + return False + r = session.get("https://opendev.org/%s/raw/branch/master/devstack/plugin.sh" % proj) return r.status_code == 200 logging.debug("Getting project list from %s" % url) r = requests.get(url) -projects = sorted(filter(is_in_openstack_namespace, json.loads(r.text[4:]))) +projects = sorted(filter(is_in_wanted_namespace, json.loads(r.text[4:]))) logging.debug("Found %d projects" % len(projects)) -found_plugins = filter(has_devstack_plugin, projects) +s = requests.Session() +# sometimes gitea gives us a 500 error; retry sanely +# https://stackoverflow.com/a/35636367 +# We need to disable raise_on_status because if any repo endup with 500 then +# propose-updates job which run this script will fail. +retries = Retry(total=3, backoff_factor=1, + status_forcelist=[ 500 ], + raise_on_status=False) +s.mount('https://', HTTPAdapter(max_retries=retries)) + +found_plugins = filter(functools.partial(has_devstack_plugin, s), projects) for project in found_plugins: - # strip of openstack/ - print(project[10:]) + print(project) diff --git a/tools/generate-devstack-plugins-list.sh b/tools/generate-devstack-plugins-list.sh index 95f13318b8..3307943df9 100755 --- a/tools/generate-devstack-plugins-list.sh +++ b/tools/generate-devstack-plugins-list.sh @@ -28,9 +28,9 @@ # * the environment variable git_dir pointing to the location # * of said git repositories # ) OR ( -# * network access to the review.openstack.org Gerrit API +# * network access to the review.opendev.org Gerrit API # working directory -# * network access to https://git.openstack.org/cgit +# * network access to https://opendev.org # )) # # If a file named data/devstack-plugins-registry.header or @@ -50,13 +50,11 @@ function title_underline { } ( -declare -A plugins - if [[ -r data/devstack-plugins-registry.header ]]; then cat data/devstack-plugins-registry.header fi -sorted_plugins=$(python tools/generate-devstack-plugins-list.py) +sorted_plugins=$(python3 tools/generate-devstack-plugins-list.py) # find the length of the name column & pad name_col_len=$(echo "${sorted_plugins}" | wc -L) @@ -65,7 +63,7 @@ name_col_len=$(( name_col_len + 2 )) # ====================== === # Plugin Name URL # ====================== === -# foobar `git://... `__ +# foobar `https://... `__ # ... printf "\n\n" @@ -74,8 +72,8 @@ printf "%-${name_col_len}s %s\n" "Plugin Name" "URL" title_underline ${name_col_len} for plugin in ${sorted_plugins}; do - giturl="git://git.openstack.org/openstack/${plugin}" - gitlink="https://git.openstack.org/cgit/openstack/${plugin}" + giturl="https://opendev.org/${plugin}" + gitlink="https://opendev.org/${plugin}" printf "%-${name_col_len}s %s\n" "${plugin}" "\`${giturl} <${gitlink}>\`__" done diff --git a/tools/get-stats.py b/tools/get-stats.py new file mode 100755 index 0000000000..b958af61b2 --- /dev/null +++ b/tools/get-stats.py @@ -0,0 +1,220 @@ +#!/usr/bin/python3 + +import argparse +import csv +import datetime +import glob +import itertools +import json +import logging +import os +import re +import socket +import subprocess +import sys + +try: + import psutil +except ImportError: + psutil = None + print('No psutil, process information will not be included', + file=sys.stderr) + +try: + import pymysql +except ImportError: + pymysql = None + print('No pymysql, database information will not be included', + file=sys.stderr) + +LOG = logging.getLogger('perf') + +# https://www.elastic.co/blog/found-crash-elasticsearch#mapping-explosion + + +def tryint(value): + try: + return int(value) + except (ValueError, TypeError): + return value + + +def get_service_stats(service): + stats = {'MemoryCurrent': 0} + output = subprocess.check_output(['/usr/bin/systemctl', 'show', service] + + ['-p%s' % stat for stat in stats]) + for line in output.decode().split('\n'): + if not line: + continue + stat, val = line.split('=') + stats[stat] = tryint(val) + + return stats + + +def get_services_stats(): + services = [os.path.basename(s) for s in + glob.glob('/etc/systemd/system/devstack@*.service')] + \ + ['apache2.service'] + return [dict(service=service, **get_service_stats(service)) + for service in services] + + +def get_process_stats(proc): + cmdline = proc.cmdline() + if 'python' in cmdline[0]: + cmdline = cmdline[1:] + return {'cmd': cmdline[0], + 'pid': proc.pid, + 'args': ' '.join(cmdline[1:]), + 'rss': proc.memory_info().rss} + + +def get_processes_stats(matches): + me = os.getpid() + procs = psutil.process_iter() + + def proc_matches(proc): + return me != proc.pid and any( + re.search(match, ' '.join(proc.cmdline())) + for match in matches) + + return [ + get_process_stats(proc) + for proc in procs + if proc_matches(proc)] + + +def get_db_stats(host, user, passwd): + dbs = [] + try: + db = pymysql.connect(host=host, user=user, password=passwd, + database='stats', + cursorclass=pymysql.cursors.DictCursor) + except pymysql.err.OperationalError as e: + if 'Unknown database' in str(e): + print('No stats database; assuming devstack failed', + file=sys.stderr) + return [] + raise + + with db: + with db.cursor() as cur: + cur.execute('SELECT db,op,count FROM queries') + for row in cur: + dbs.append({k: tryint(v) for k, v in row.items()}) + return dbs + + +def get_http_stats_for_log(logfile): + stats = {} + apache_fields = ('host', 'a', 'b', 'date', 'tz', 'request', 'status', + 'length', 'c', 'agent') + ignore_agents = ('curl', 'uwsgi', 'nova-status') + ignored_services = set() + for line in csv.reader(open(logfile), delimiter=' '): + fields = dict(zip(apache_fields, line)) + if len(fields) != len(apache_fields): + # Not a combined access log, so we can bail completely + return [] + try: + method, url, http = fields['request'].split(' ') + except ValueError: + method = url = http = '' + if 'HTTP' not in http: + # Not a combined access log, so we can bail completely + return [] + + # Tempest's User-Agent is unchanged, but client libraries and + # inter-service API calls use proper strings. So assume + # 'python-urllib' is tempest so we can tell it apart. + if 'python-urllib' in fields['agent'].lower(): + agent = 'tempest' + else: + agent = fields['agent'].split(' ')[0] + if agent.startswith('python-'): + agent = agent.replace('python-', '') + if '/' in agent: + agent = agent.split('/')[0] + + if agent in ignore_agents: + continue + + try: + service, rest = url.strip('/').split('/', 1) + except ValueError: + # Root calls like "GET /identity" + service = url.strip('/') + rest = '' + + if not service.isalpha(): + ignored_services.add(service) + continue + + method_key = '%s-%s' % (agent, method) + try: + length = int(fields['length']) + except ValueError: + LOG.warning('[%s] Failed to parse length %r from line %r' % ( + logfile, fields['length'], line)) + length = 0 + stats.setdefault(service, {'largest': 0}) + stats[service].setdefault(method_key, 0) + stats[service][method_key] += 1 + stats[service]['largest'] = max(stats[service]['largest'], + length) + + if ignored_services: + LOG.warning('Ignored services: %s' % ','.join( + sorted(ignored_services))) + + # Flatten this for ES + return [{'service': service, 'log': os.path.basename(logfile), + **vals} + for service, vals in stats.items()] + + +def get_http_stats(logfiles): + return list(itertools.chain.from_iterable(get_http_stats_for_log(log) + for log in logfiles)) + + +def get_report_info(): + return { + 'timestamp': datetime.datetime.now().isoformat(), + 'hostname': socket.gethostname(), + 'version': 2, + } + + +if __name__ == '__main__': + process_defaults = ['privsep', 'mysqld', 'erlang', 'etcd'] + parser = argparse.ArgumentParser() + parser.add_argument('--db-user', default='root', + help=('MySQL user for collecting stats ' + '(default: "root")')) + parser.add_argument('--db-pass', default=None, + help='MySQL password for db-user') + parser.add_argument('--db-host', default='localhost', + help='MySQL hostname') + parser.add_argument('--apache-log', action='append', default=[], + help='Collect API call stats from this apache log') + parser.add_argument('--process', action='append', + default=process_defaults, + help=('Include process stats for this cmdline regex ' + '(default is %s)' % ','.join(process_defaults))) + args = parser.parse_args() + + logging.basicConfig(level=logging.WARNING) + + data = { + 'services': get_services_stats(), + 'db': pymysql and args.db_pass and get_db_stats(args.db_host, + args.db_user, + args.db_pass) or [], + 'processes': psutil and get_processes_stats(args.process) or [], + 'api': get_http_stats(args.apache_log), + 'report': get_report_info(), + } + + print(json.dumps(data, indent=2)) diff --git a/tools/image_list.sh b/tools/image_list.sh index 27b3d4612d..81231be9f3 100755 --- a/tools/image_list.sh +++ b/tools/image_list.sh @@ -1,5 +1,14 @@ #!/bin/bash +# Print out a list of image and other files to download for caching. +# This is mostly used by the OpenStack infrasturucture during daily +# image builds to save the large images to /opt/cache/files (see [1]) +# +# The two lists of URL's downloaded are the IMAGE_URLS and +# EXTRA_CACHE_URLS, which are setup in stackrc +# +# [1] project-config:nodepool/elements/cache-devstack/extra-data.d/55-cache-devstack-repos + # Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0")/.. && pwd) @@ -13,7 +22,7 @@ source $TOP_DIR/functions # Possible virt drivers, if we have more, add them here. Always keep # dummy in the end position to trigger the fall through case. -DRIVERS="openvz ironic libvirt vsphere xenserver dummy" +DRIVERS="openvz ironic libvirt vsphere dummy" # Extra variables to trigger getting additional images. export ENABLED_SERVICES="h-api,tr-api" @@ -31,12 +40,20 @@ for driver in $DRIVERS; do ALL_IMAGES+=$URLS done -# Make a nice list -echo $ALL_IMAGES | tr ',' '\n' | sort | uniq - # Sanity check - ensure we have a minimum number of images num=$(echo $ALL_IMAGES | tr ',' '\n' | sort | uniq | wc -l) -if [[ "$num" -lt 5 ]]; then +if [[ "$num" -lt 4 ]]; then echo "ERROR: We only found $num images in $ALL_IMAGES, which can't be right." exit 1 fi + +# This is extra non-image files that we want pre-cached. This is kept +# in a separate list because devstack loops over the IMAGE_LIST to +# upload files glance and these aren't images. (This was a bit of an +# after-thought which is why the naming around this is very +# image-centric) +URLS=$(source $TOP_DIR/stackrc && echo $EXTRA_CACHE_URLS) +ALL_IMAGES+=$URLS + +# Make a nice combined list +echo $ALL_IMAGES | tr ',' '\n' | sort | uniq diff --git a/tools/info.sh b/tools/info.sh index c056fa73f8..282667f9d0 100755 --- a/tools/info.sh +++ b/tools/info.sh @@ -8,7 +8,7 @@ # Output types are git,localrc,os,pip,pkg: # # git||[] -# localtc|= +# localrc|= # os|= # pip|| # pkg|| diff --git a/tools/install_ebtables_workaround.sh b/tools/install_ebtables_workaround.sh deleted file mode 100755 index 45ced87f13..0000000000 --- a/tools/install_ebtables_workaround.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash -eu -# -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# -# This replaces the ebtables on your system with a wrapper script that -# does implicit locking. This is needed if libvirt < 1.2.11 on your platform. - -EBTABLES=/sbin/ebtables -EBTABLESREAL=/sbin/ebtables.real -FILES=$TOP_DIR/files - -if [[ -f "$EBTABLES" ]]; then - if file $EBTABLES | grep ELF; then - sudo mv $EBTABLES $EBTABLESREAL - sudo install -m 0755 $FILES/ebtables.workaround $EBTABLES - echo "Replaced ebtables with locking workaround" - fi -fi diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 12676998d2..027693fc0a 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -5,7 +5,7 @@ # Update pip and friends to a known common version # Assumptions: -# - if USE_PYTHON3=True, PYTHON3_VERSION refers to a version already installed +# - PYTHON3_VERSION refers to a version already installed set -o errexit @@ -24,8 +24,20 @@ set -o xtrace FILES=$TOP_DIR/files -PIP_GET_PIP_URL=https://bootstrap.pypa.io/get-pip.py -LOCAL_PIP="$FILES/$(basename $PIP_GET_PIP_URL)" +# The URL from where the get-pip.py file gets downloaded. If a local +# get-pip.py mirror is available, PIP_GET_PIP_URL can be set to that +# mirror in local.conf to avoid download timeouts. +# Example: +# PIP_GET_PIP_URL="http://local-server/get-pip.py" +# +# Note that if get-pip.py already exists in $FILES this script will +# not re-download or check for a new version. For example, this is +# done by openstack-infra diskimage-builder elements as part of image +# preparation [1]. This prevents any network access, which can be +# unreliable in CI situations. +# [1] https://opendev.org/openstack/project-config/src/branch/master/nodepool/elements/cache-devstack/source-repository-pip + +PIP_GET_PIP_URL=${PIP_GET_PIP_URL:-"https://bootstrap.pypa.io/get-pip.py"} GetDistro echo "Distro: $DISTRO" @@ -33,7 +45,7 @@ echo "Distro: $DISTRO" function get_versions { # FIXME(dhellmann): Deal with multiple python versions here? This # is just used for reporting, so maybe not? - PIP=$(which pip 2>/dev/null || which pip-python 2>/dev/null || true) + PIP=$(which pip 2>/dev/null || which pip-python 2>/dev/null || which pip3 2>/dev/null || true) if [[ -n $PIP ]]; then PIP_VERSION=$($PIP --version | awk '{ print $2}') echo "pip: $PIP_VERSION" @@ -44,12 +56,15 @@ function get_versions { function install_get_pip { + _pip_url=$PIP_GET_PIP_URL + _local_pip="$FILES/$(basename $_pip_url)" + # If get-pip.py isn't python, delete it. This was probably an # outage on the server. - if [[ -r $LOCAL_PIP ]]; then - if ! head -1 $LOCAL_PIP | grep -q '#!/usr/bin/env python'; then - echo "WARNING: Corrupt $LOCAL_PIP found removing" - rm $LOCAL_PIP + if [[ -r $_local_pip ]]; then + if ! head -1 $_local_pip | grep -q '#!/usr/bin/env python'; then + echo "WARNING: Corrupt $_local_pip found removing" + rm $_local_pip fi fi @@ -63,23 +78,20 @@ function install_get_pip { # Thus we use curl's "-z" feature to always check the modified # since and only download if a new version is out -- but only if # it seems we downloaded the file originally. - if [[ ! -r $LOCAL_PIP || -r $LOCAL_PIP.downloaded ]]; then + if [[ ! -r $_local_pip || -r $_local_pip.downloaded ]]; then # only test freshness if LOCAL_PIP is actually there, # otherwise we generate a scary warning. local timecond="" - if [[ -r $LOCAL_PIP ]]; then - timecond="-z $LOCAL_PIP" + if [[ -r $_local_pip ]]; then + timecond="-z $_local_pip" fi curl -f --retry 6 --retry-delay 5 \ - $timecond -o $LOCAL_PIP $PIP_GET_PIP_URL || \ + $timecond -o $_local_pip $_pip_url || \ die $LINENO "Download of get-pip.py failed" - touch $LOCAL_PIP.downloaded - fi - sudo -H -E python $LOCAL_PIP -c $TOOLS_DIR/cap-pip.txt - if python3_enabled; then - sudo -H -E python${PYTHON3_VERSION} $LOCAL_PIP -c $TOOLS_DIR/cap-pip.txt + touch $_local_pip.downloaded fi + sudo -H -E python${PYTHON3_VERSION} $_local_pip } @@ -101,36 +113,37 @@ function configure_pypi_alternative_url { } -# Setuptools 8 implements PEP 440, and 8.0.4 adds a warning triggered any time -# pkg_resources inspects the list of installed Python packages if there are -# non-compliant version numbers in the egg-info (for example, from distro -# system packaged Python libraries). This is off by default after 8.2 but can -# be enabled by uncommenting the lines below. -#PYTHONWARNINGS=$PYTHONWARNINGS,always::RuntimeWarning:pkg_resources -#export PYTHONWARNINGS - # Show starting versions get_versions -# Do pip - -# Eradicate any and all system packages - -# Python in fedora depends on the python-pip package so removing it -# results in a nonfunctional system. pip on fedora installs to /usr so pip -# can safely override the system pip for all versions of fedora -if ! is_fedora ; then - uninstall_package python-pip - uninstall_package python3-pip -fi - -install_get_pip - if [[ -n $PYPI_ALTERNATIVE_URL ]]; then configure_pypi_alternative_url fi +if is_fedora && [[ ${DISTRO} == f* || ${DISTRO} == rhel* ]]; then + # get-pip.py will not install over the python3-pip package in + # Fedora 34 any more. + # https://bugzilla.redhat.com/show_bug.cgi?id=1988935 + # https://github.com/pypa/pip/issues/9904 + # You can still install using get-pip.py if python3-pip is *not* + # installed; this *should* remain separate under /usr/local and not break + # if python3-pip is later installed. + # For general sanity, we just use the packaged pip. It should be + # recent enough anyway. This is included via rpms/general + : # Simply fall through +elif is_ubuntu; then + # pip on Ubuntu 20.04 and higher is new enough, too + # drop setuptools from u-c + sed -i -e '/setuptools/d' $REQUIREMENTS_DIR/upper-constraints.txt +else + install_get_pip + + # Note setuptools is part of requirements.txt and we want to make sure + # we obey any versioning as described there. + pip_install_gr setuptools +fi + set -x -pip_install -U setuptools + get_versions diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh index 8895e1e77c..bb470b2927 100755 --- a/tools/install_prereqs.sh +++ b/tools/install_prereqs.sh @@ -74,16 +74,13 @@ install_package $PACKAGES if [[ -n "$SYSLOG" && "$SYSLOG" != "False" ]]; then if is_ubuntu || is_fedora; then install_package rsyslog-relp - elif is_suse; then - install_package rsyslog-module-relp else exit_distro_not_supported "rsyslog-relp installation" fi fi -if python3_enabled; then - install_python3 -fi +# TODO(clarkb) remove these once we are switched to global venv by default +export PYTHON=$(which python${PYTHON3_VERSION} 2>/dev/null || which python3 2>/dev/null) # Mark end of run # --------------- diff --git a/tools/make_cert.sh b/tools/make_cert.sh index 2628b40524..0212d0033a 100755 --- a/tools/make_cert.sh +++ b/tools/make_cert.sh @@ -27,7 +27,7 @@ function usage { } CN=$1 -if [ -z "$CN" ]]; then +if [ -z "$CN" ]; then usage fi ORG_UNIT_NAME=${2:-$ORG_UNIT_NAME} @@ -45,11 +45,12 @@ DEVSTACK_CERT=$DATA_DIR/$DEVSTACK_CERT_NAME.pem # Make sure the CA is set up configure_CA +fix_system_ca_bundle_path init_CA # Create the server cert make_cert $INT_CA_DIR $DEVSTACK_CERT_NAME $DEVSTACK_HOSTNAME # Create a cert bundle -cat $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/cacert.pem >$DEVSTACK_CERT - +cat $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key \ + $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/cacert.pem >$DEVSTACK_CERT diff --git a/tools/memory_tracker.sh b/tools/memory_tracker.sh new file mode 100755 index 0000000000..2f404c26fb --- /dev/null +++ b/tools/memory_tracker.sh @@ -0,0 +1,125 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +set -o errexit + +# TODO(frickler): make this use stackrc variables +if [ -x /opt/stack/data/venv/bin/python ]; then + PYTHON=/opt/stack/data/venv/bin/python +else + PYTHON=${PYTHON:-python3} +fi + +# time to sleep between checks +SLEEP_TIME=20 + +# MemAvailable is the best estimation and has built-in heuristics +# around reclaimable memory. However, it is not available until 3.14 +# kernel (i.e. Ubuntu LTS Trusty misses it). In that case, we fall +# back to free+buffers+cache as the available memory. +USE_MEM_AVAILABLE=0 +if grep -q '^MemAvailable:' /proc/meminfo; then + USE_MEM_AVAILABLE=1 +fi + +function get_mem_unevictable { + awk '/^Unevictable:/ {print $2}' /proc/meminfo +} + +function get_mem_available { + if [[ $USE_MEM_AVAILABLE -eq 1 ]]; then + awk '/^MemAvailable:/ {print $2}' /proc/meminfo + else + awk '/^MemFree:/ {free=$2} + /^Buffers:/ {buffers=$2} + /^Cached:/ {cached=$2} + END { print free+buffers+cached }' /proc/meminfo + fi +} + +function tracker { + local low_point + local unevictable_point + low_point=$(get_mem_available) + # log mlocked memory at least on first iteration + unevictable_point=0 + while [ 1 ]; do + + local mem_available + mem_available=$(get_mem_available) + + local unevictable + unevictable=$(get_mem_unevictable) + + if [ $mem_available -lt $low_point -o $unevictable -ne $unevictable_point ]; then + echo "[[[" + date + + # whenever we see less memory available than last time, dump the + # snapshot of current usage; i.e. checking the latest entry in the file + # will give the peak-memory usage + if [[ $mem_available -lt $low_point ]]; then + low_point=$mem_available + echo "---" + # always available greppable output; given difference in + # meminfo output as described above... + echo "memory_tracker low_point: $mem_available" + echo "---" + cat /proc/meminfo + echo "---" + # would hierarchial view be more useful (-H)? output is + # not sorted by usage then, however, and the first + # question is "what's using up the memory" + # + # there are a lot of kernel threads, especially on a 8-cpu + # system. do a best-effort removal to improve + # signal/noise ratio of output. + ps --sort=-pmem -eo pid:10,pmem:6,rss:15,ppid:10,cputime:10,nlwp:8,wchan:25,args:100 | + grep -v ']$' + fi + echo "---" + + # list processes that lock memory from swap + if [[ $unevictable -ne $unevictable_point ]]; then + unevictable_point=$unevictable + ${PYTHON} $(dirname $0)/mlock_report.py + fi + + echo "]]]" + fi + sleep $SLEEP_TIME + done +} + +function usage { + echo "Usage: $0 [-x] [-s N]" 1>&2 + exit 1 +} + +while getopts ":s:x" opt; do + case $opt in + s) + SLEEP_TIME=$OPTARG + ;; + x) + set -o xtrace + ;; + *) + usage + ;; + esac +done +shift $((OPTIND-1)) + +tracker diff --git a/tools/mlock_report.py b/tools/mlock_report.py new file mode 100644 index 0000000000..8cbda15895 --- /dev/null +++ b/tools/mlock_report.py @@ -0,0 +1,53 @@ +# This tool lists processes that lock memory pages from swapping to disk. + +import re + +import psutil + + +LCK_SUMMARY_REGEX = re.compile( + r"^VmLck:\s+(?P[\d]+)\s+kB", re.MULTILINE) + + +def main(): + try: + print(_get_report()) + except Exception as e: + print("Failure listing processes locking memory: %s" % str(e)) + raise + + +def _get_report(): + mlock_users = [] + for proc in psutil.process_iter(): + # sadly psutil does not expose locked pages info, that's why we + # iterate over the /proc/%pid/status files manually + try: + s = open("%s/%d/status" % (psutil.PROCFS_PATH, proc.pid), 'r') + with s: + for line in s: + result = LCK_SUMMARY_REGEX.search(line) + if result: + locked = int(result.group('locked')) + if locked: + mlock_users.append({'name': proc.name(), + 'pid': proc.pid, + 'locked': locked}) + except OSError: + # pids can disappear, we're ok with that + continue + + + # produce a single line log message with per process mlock stats + if mlock_users: + return "; ".join( + "[%(name)s (pid:%(pid)s)]=%(locked)dKB" % args + # log heavy users first + for args in sorted(mlock_users, key=lambda d: d['locked']) + ) + else: + return "no locked memory" + + +if __name__ == "__main__": + main() diff --git a/tools/outfilter.py b/tools/outfilter.py old mode 100755 new mode 100644 index f82939be1d..3955d39794 --- a/tools/outfilter.py +++ b/tools/outfilter.py @@ -1,5 +1,5 @@ -#!/usr/bin/env python -# +#!/usr/bin/env python3 + # Copyright 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -26,8 +26,8 @@ import re import sys -IGNORE_LINES = re.compile('(set \+o|xtrace)') -HAS_DATE = re.compile('^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{3} \|') +IGNORE_LINES = re.compile(r'(set \+o|xtrace)') +HAS_DATE = re.compile(r'^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{3} \|') def get_options(): @@ -36,6 +36,13 @@ def get_options(): parser.add_argument('-o', '--outfile', help='Output file for content', default=None) + # NOTE(ianw): This is intended for the case where your stdout is + # being captured by something like ansible which independently + # logs timestamps on the lines it receives. Note that if using a + # output file, those log lines are still timestamped. + parser.add_argument('-b', '--no-timestamp', action='store_true', + help='Do not prefix stdout with timestamp (bare)', + default=False) parser.add_argument('-v', '--verbose', action='store_true', default=False) return parser.parse_args() @@ -50,33 +57,43 @@ def main(): opts = get_options() outfile = None if opts.outfile: - outfile = open(opts.outfile, 'a', 0) + # note, binary mode so we can do unbuffered output. + outfile = open(opts.outfile, 'ab', 0) # Otherwise fileinput reprocess args as files sys.argv = [] - while True: - line = sys.stdin.readline() - if not line: - return 0 + for line in iter(sys.stdin.readline, ''): # put skip lines here if skip_line(line): continue - # This prevents us from nesting date lines, because - # we'd like to pull this in directly in Grenade and not double - # up on DevStack lines + # This prevents us from nesting date lines, because we'd like + # to pull this in directly in Grenade and not double up on + # DevStack lines. + # NOTE(ianw): we could actually strip the extra ts in "bare" + # mode (which came after this)? ... as we get more experience + # with zuulv3 native jobs and ansible capture it may become + # clearer what to do if HAS_DATE.search(line) is None: - now = datetime.datetime.utcnow() - line = ("%s | %s" % ( + now = datetime.datetime.now(datetime.timezone.utc).replace( + tzinfo=None) + ts_line = ("%s | %s" % ( now.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3], line)) + else: + ts_line = line if opts.verbose: - sys.stdout.write(line) + sys.stdout.write(line if opts.no_timestamp else ts_line) sys.stdout.flush() + if outfile: - outfile.write(line) + # We've opened outfile as a binary file to get the + # non-buffered behaviour. on python3, sys.stdin was + # opened with the system encoding and made the line into + # utf-8, so write the logfile out in utf-8 bytes. + outfile.write(ts_line.encode('utf-8')) outfile.flush() diff --git a/tools/peakmem_tracker.sh b/tools/peakmem_tracker.sh deleted file mode 100755 index ecbd79a0bc..0000000000 --- a/tools/peakmem_tracker.sh +++ /dev/null @@ -1,98 +0,0 @@ -#!/bin/bash -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -set -o errexit - -# time to sleep between checks -SLEEP_TIME=20 - -# MemAvailable is the best estimation and has built-in heuristics -# around reclaimable memory. However, it is not available until 3.14 -# kernel (i.e. Ubuntu LTS Trusty misses it). In that case, we fall -# back to free+buffers+cache as the available memory. -USE_MEM_AVAILBLE=0 -if grep -q '^MemAvailable:' /proc/meminfo; then - USE_MEM_AVAILABLE=1 -fi - -function get_mem_available { - if [[ $USE_MEM_AVAILABLE -eq 1 ]]; then - awk '/^MemAvailable:/ {print $2}' /proc/meminfo - else - awk '/^MemFree:/ {free=$2} - /^Buffers:/ {buffers=$2} - /^Cached:/ {cached=$2} - END { print free+buffers+cached }' /proc/meminfo - fi -} - -# whenever we see less memory available than last time, dump the -# snapshot of current usage; i.e. checking the latest entry in the -# file will give the peak-memory usage -function tracker { - local low_point - low_point=$(get_mem_available) - while [ 1 ]; do - - local mem_available - mem_available=$(get_mem_available) - - if [[ $mem_available -lt $low_point ]]; then - low_point=$mem_available - echo "[[[" - date - echo "---" - # always available greppable output; given difference in - # meminfo output as described above... - echo "peakmem_tracker low_point: $mem_available" - echo "---" - cat /proc/meminfo - echo "---" - # would hierarchial view be more useful (-H)? output is - # not sorted by usage then, however, and the first - # question is "what's using up the memory" - # - # there are a lot of kernel threads, especially on a 8-cpu - # system. do a best-effort removal to improve - # signal/noise ratio of output. - ps --sort=-pmem -eo pid:10,pmem:6,rss:15,ppid:10,cputime:10,nlwp:8,wchan:25,args:100 | - grep -v ']$' - echo "]]]" - fi - - sleep $SLEEP_TIME - done -} - -function usage { - echo "Usage: $0 [-x] [-s N]" 1>&2 - exit 1 -} - -while getopts ":s:x" opt; do - case $opt in - s) - SLEEP_TIME=$OPTARG - ;; - x) - set -o xtrace - ;; - *) - usage - ;; - esac -done -shift $((OPTIND-1)) - -tracker diff --git a/tools/ping_neutron.sh b/tools/ping_neutron.sh index dba7502652..ab8e8dfca8 100755 --- a/tools/ping_neutron.sh +++ b/tools/ping_neutron.sh @@ -30,7 +30,8 @@ ping_neutron.sh [ping args] This provides a wrapper to ping neutron guests that are on isolated tenant networks that the caller can't normally reach. It does so by -creating a network namespace probe. +using either the DHCP or Metadata network namespace to support both +ML2/OVS and OVN. It takes arguments like ping, except the first arg must be the network name. @@ -44,6 +45,12 @@ EOF exit 1 } +# BUG: with duplicate network names, this fails pretty hard since it +# will just pick the first match. +function _get_net_id { + openstack --os-cloud devstack-admin --os-region-name="$REGION_NAME" --os-project-name admin --os-username admin --os-password $ADMIN_PASSWORD network list | grep $1 | head -n 1 | awk '{print $2}' +} + NET_NAME=$1 if [[ -z "$NET_NAME" ]]; then @@ -53,12 +60,11 @@ fi REMAINING_ARGS="${@:2}" -# BUG: with duplicate network names, this fails pretty hard. -NET_ID=$(neutron net-list | grep "$NET_NAME" | awk '{print $2}') -PROBE_ID=$(neutron-debug probe-list -c id -c network_id | grep "$NET_ID" | awk '{print $2}' | head -n 1) +NET_ID=`_get_net_id $NET_NAME` +NET_NS=$(ip netns list | grep "$NET_ID" | head -n 1) # This runs a command inside the specific netns -NET_NS_CMD="ip netns exec qprobe-$PROBE_ID" +NET_NS_CMD="ip netns exec $NET_NS" PING_CMD="sudo $NET_NS_CMD ping $REMAINING_ARGS" echo "Running $PING_CMD" diff --git a/tools/uec/meta.py b/tools/uec/meta.py deleted file mode 100644 index 1d994a60d6..0000000000 --- a/tools/uec/meta.py +++ /dev/null @@ -1,42 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import BaseHTTPServer -import SimpleHTTPServer -import sys - - -def main(host, port, HandlerClass=SimpleHTTPServer.SimpleHTTPRequestHandler, - ServerClass=BaseHTTPServer.HTTPServer, protocol="HTTP/1.0"): - """simple http server that listens on a give address:port.""" - - server_address = (host, port) - - HandlerClass.protocol_version = protocol - httpd = ServerClass(server_address, HandlerClass) - - sa = httpd.socket.getsockname() - print("Serving HTTP on", sa[0], "port", sa[1], "...") - httpd.serve_forever() - -if __name__ == '__main__': - if sys.argv[1:]: - address = sys.argv[1] - else: - address = '0.0.0.0' - if ':' in address: - host, port = address.split(':') - else: - host = address - port = 8080 - - main(host, int(port)) diff --git a/tools/update_clouds_yaml.py b/tools/update_clouds_yaml.py index eb7265f76c..87312d9469 100755 --- a/tools/update_clouds_yaml.py +++ b/tools/update_clouds_yaml.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -14,14 +14,14 @@ # Update the clouds.yaml file. - import argparse import os.path +import sys import yaml -class UpdateCloudsYaml(object): +class UpdateCloudsYaml: def __init__(self, args): if args.file: self._clouds_path = args.file @@ -35,18 +35,26 @@ def __init__(self, args): self._cloud = args.os_cloud self._cloud_data = { 'region_name': args.os_region_name, - 'identity_api_version': args.os_identity_api_version, - 'volume_api_version': args.os_volume_api_version, 'auth': { 'auth_url': args.os_auth_url, 'username': args.os_username, + 'user_domain_id': 'default', 'password': args.os_password, - 'project_name': args.os_project_name, }, } - if args.os_identity_api_version == '3': - self._cloud_data['auth']['user_domain_id'] = 'default' + + if args.os_project_name and args.os_system_scope: + print( + "WARNING: os_project_name and os_system_scope were both " + "given. os_system_scope will take priority." + ) + + if args.os_system_scope: # system-scoped + self._cloud_data['auth']['system_scope'] = args.os_system_scope + elif args.os_project_name: # project-scoped + self._cloud_data['auth']['project_name'] = args.os_project_name self._cloud_data['auth']['project_domain_id'] = 'default' + if args.os_cacert: self._cloud_data['cacert'] = args.os_cacert @@ -58,7 +66,7 @@ def run(self): def _read_clouds(self): try: with open(self._clouds_path) as clouds_file: - self._clouds = yaml.load(clouds_file) + self._clouds = yaml.safe_load(clouds_file) except IOError: # The user doesn't have a clouds.yaml file. print("The user clouds.yaml file didn't exist.") @@ -82,13 +90,12 @@ def main(): parser.add_argument('--file') parser.add_argument('--os-cloud', required=True) parser.add_argument('--os-region-name', default='RegionOne') - parser.add_argument('--os-identity-api-version', default='3') - parser.add_argument('--os-volume-api-version', default='2') parser.add_argument('--os-cacert') parser.add_argument('--os-auth-url', required=True) parser.add_argument('--os-username', required=True) parser.add_argument('--os-password', required=True) - parser.add_argument('--os-project-name', required=True) + parser.add_argument('--os-project-name') + parser.add_argument('--os-system-scope') args = parser.parse_args() diff --git a/tools/verify-ipv6-address.py b/tools/verify-ipv6-address.py new file mode 100644 index 0000000000..dc18fa6d8a --- /dev/null +++ b/tools/verify-ipv6-address.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python3 + +import argparse +import ipaddress +import sys + +def main(): + parser = argparse.ArgumentParser( + description="Check if a given string is a valid IPv6 address.", + formatter_class=argparse.RawTextHelpFormatter, + ) + parser.add_argument( + "address", + help=( + "The IPv6 address string to validate.\n" + "Examples:\n" + " 2001:0db8:85a3:0000:0000:8a2e:0370:7334\n" + " 2001:db8::1\n" + " ::1\n" + " fe80::1%eth0 (scope IDs are handled)" + ), + ) + args = parser.parse_args() + + try: + # try to create a IPv6Address: if we fail to parse or get an + # IPv4Address then die + ip_obj = ipaddress.ip_address(args.address.strip('[]')) + if isinstance(ip_obj, ipaddress.IPv6Address): + sys.exit(0) + else: + sys.exit(1) + except ValueError: + sys.exit(1) + except Exception as e: + print(f"An unexpected error occurred during validation: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/tools/verify-ipv6-only-deployments.sh b/tools/verify-ipv6-only-deployments.sh new file mode 100755 index 0000000000..a1acecbb3f --- /dev/null +++ b/tools/verify-ipv6-only-deployments.sh @@ -0,0 +1,95 @@ +#!/bin/bash +# +# +# NOTE(gmann): This script is used in 'devstack-tempest-ipv6' zuul job to verify that +# services are deployed on IPv6 properly or not. This will capture if any devstck or devstack +# plugins are missing the required setting to listen on IPv6 address. This is run as part of +# run phase of zuul job and before test run. Child job of 'devstack-tempest-ipv6' +# can expand the IPv6 verification specific to project by defining the new post-run script which +# will run along with this base script. +# If there are more common verification for IPv6 then we can always extent this script. + +# Keep track of the DevStack directory +TOP_DIR=$(cd $(dirname "$0")/../../devstack && pwd) +source $TOP_DIR/stackrc +source $TOP_DIR/openrc admin admin + +function verify_devstack_ipv6_setting { + local _service_host='' + _service_host=$(echo $SERVICE_HOST | tr -d []) + local _host_ipv6='' + _host_ipv6=$(echo $HOST_IPV6 | tr -d []) + local _service_listen_address='' + _service_listen_address=$(echo $SERVICE_LISTEN_ADDRESS | tr -d []) + local _service_local_host='' + _service_local_host=$(echo $SERVICE_LOCAL_HOST | tr -d []) + local _tunnel_endpoint_ip='' + _tunnel_endpoint_ip=$(echo $TUNNEL_ENDPOINT_IP | tr -d []) + if [[ "$SERVICE_IP_VERSION" != 6 ]]; then + echo $SERVICE_IP_VERSION "SERVICE_IP_VERSION is not set to 6 which is must for devstack to deploy services with IPv6 address." + exit 1 + fi + if [[ "$TUNNEL_IP_VERSION" != 6 ]]; then + echo $TUNNEL_IP_VERSION "TUNNEL_IP_VERSION is not set to 6 so TUNNEL_ENDPOINT_IP cannot be an IPv6 address." + exit 1 + fi + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$_service_host"; then + echo $SERVICE_HOST "SERVICE_HOST is not IPv6 which means devstack cannot deploy services on IPv6 addresses." + exit 1 + fi + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$_host_ipv6"; then + echo $HOST_IPV6 "HOST_IPV6 is not IPv6 which means devstack cannot deploy services on IPv6 addresses." + exit 1 + fi + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$_service_listen_address"; then + echo $SERVICE_LISTEN_ADDRESS "SERVICE_LISTEN_ADDRESS is not IPv6 which means devstack cannot deploy services on IPv6 addresses." + exit 1 + fi + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$_service_local_host"; then + echo $SERVICE_LOCAL_HOST "SERVICE_LOCAL_HOST is not IPv6 which means devstack cannot deploy services on IPv6 addresses." + exit 1 + fi + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$_tunnel_endpoint_ip"; then + echo $TUNNEL_ENDPOINT_IP "TUNNEL_ENDPOINT_IP is not IPv6 which means devstack will not deploy with an IPv6 endpoint address." + exit 1 + fi + echo "Devstack is properly configured with IPv6" + echo "SERVICE_IP_VERSION:" $SERVICE_IP_VERSION "HOST_IPV6:" $HOST_IPV6 "SERVICE_HOST:" $SERVICE_HOST "SERVICE_LISTEN_ADDRESS:" $SERVICE_LISTEN_ADDRESS "SERVICE_LOCAL_HOST:" $SERVICE_LOCAL_HOST "TUNNEL_IP_VERSION:" $TUNNEL_IP_VERSION "TUNNEL_ENDPOINT_IP:" $TUNNEL_ENDPOINT_IP +} + +function sanity_check_system_ipv6_enabled { + if [ ! -f "/proc/sys/net/ipv6/conf/default/disable_ipv6" ] || [ "$(cat /proc/sys/net/ipv6/conf/default/disable_ipv6)" -ne "0" ]; then + echo "IPv6 is disabled in system" + exit 1 + fi + echo "IPv6 is enabled in system" +} + +function verify_service_listen_address_is_ipv6 { + local endpoints_verified=False + local all_ipv6=True + endpoints=$(openstack endpoint list -f value -c URL) + for endpoint in ${endpoints}; do + local endpoint_address='' + endpoint_address=$(echo "$endpoint" | awk -F/ '{print $3}' | awk -F] '{print $1}') + endpoint_address=$(echo $endpoint_address | tr -d '[]') + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$endpoint_address"; then + all_ipv6=False + echo $endpoint ": This is not an IPv6 endpoint which means corresponding service is not listening on an IPv6 address." + continue + fi + endpoints_verified=True + done + if [[ "$all_ipv6" == "False" ]] || [[ "$endpoints_verified" == "False" ]]; then + exit 1 + fi + echo "All services deployed by devstack are on IPv6 endpoints" + echo $endpoints +} + +#First thing to verify if system has IPv6 enabled or not +sanity_check_system_ipv6_enabled +#Verify whether devstack is configured properly with IPv6 setting +verify_devstack_ipv6_setting +#Get all registrfed endpoints by devstack in keystone and verify that each endpoints address is IPv6. +verify_service_listen_address_is_ipv6 diff --git a/tools/worlddump.py b/tools/worlddump.py index e1ef544a55..26ced3f653 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # # Copyright 2014 Hewlett-Packard Development Company, L.P. # @@ -19,10 +19,10 @@ import argparse import datetime -from distutils import spawn import fnmatch +import io import os -import os.path +import shutil import subprocess import sys @@ -31,9 +31,9 @@ 'nova-compute', 'neutron-dhcp-agent', 'neutron-l3-agent', - 'neutron-linuxbridge-agent', 'neutron-metadata-agent', 'neutron-openvswitch-agent', + 'cinder-volume', ) @@ -50,7 +50,7 @@ def get_options(): def filename(dirname, name=""): - now = datetime.datetime.utcnow() + now = datetime.datetime.now(datetime.timezone.utc) fmt = "worlddump-%Y-%m-%d-%H%M%S" if name: fmt += "-" + name @@ -74,7 +74,7 @@ def _dump_cmd(cmd): def _find_cmd(cmd): - if not spawn.find_executable(cmd): + if not shutil.which(cmd): print("*** %s not found: skipping" % cmd) return False return True @@ -105,9 +105,10 @@ def _bridge_list(): # This method gets max version searching 'OpenFlow versions 0x1:0x'. # And return a version value converted to an integer type. def _get_ofp_version(): - process = subprocess.Popen(['ovs-ofctl', '--version'], stdout=subprocess.PIPE) + process = subprocess.Popen(['ovs-ofctl', '--version'], + stdout=subprocess.PIPE) stdout, _ = process.communicate() - find_str = 'OpenFlow versions 0x1:0x' + find_str = b'OpenFlow versions 0x1:0x' offset = stdout.find(find_str) return int(stdout[offset + len(find_str):-1]) - 1 @@ -131,7 +132,7 @@ def disk_space(): def ebtables_dump(): - tables = ['filter', 'nat', 'broute'] + tables = ['filter', 'nat'] _header("EB Tables Dump") if not _find_cmd('ebtables'): return @@ -150,20 +151,24 @@ def iptables_dump(): def _netns_list(): process = subprocess.Popen(['ip', 'netns'], stdout=subprocess.PIPE) stdout, _ = process.communicate() - return stdout.split() + # NOTE(jlvillal): Sometimes 'ip netns list' can return output like: + # qrouter-0805fd7d-c493-4fa6-82ca-1c6c9b23cd9e (id: 1) + # qdhcp-bb2cc6ae-2ae8-474f-adda-a94059b872b5 (id: 0) + output = [x.split()[0] for x in stdout.splitlines()] + return output def network_dump(): _header("Network Dump") - _dump_cmd("brctl show") - _dump_cmd("arp -n") - ip_cmds = ["addr", "link", "route"] + _dump_cmd("bridge link") + _dump_cmd("ip link show type bridge") + ip_cmds = ["neigh", "addr", "route", "-6 route"] for cmd in ip_cmds + ['netns']: _dump_cmd("ip %s" % cmd) for netns_ in _netns_list(): for cmd in ip_cmds: - args = {'netns': netns_, 'cmd': cmd} + args = {'netns': bytes.decode(netns_), 'cmd': cmd} _dump_cmd('sudo ip netns exec %(netns)s ip %(cmd)s' % args) @@ -184,7 +189,7 @@ def ovs_dump(): _dump_cmd("sudo ovs-vsctl show") for ofctl_cmd in ofctl_cmds: for bridge in bridges: - args = {'vers': vers, 'cmd': ofctl_cmd, 'bridge': bridge} + args = {'vers': vers, 'cmd': ofctl_cmd, 'bridge': bytes.decode(bridge)} _dump_cmd("sudo ovs-ofctl --protocols=%(vers)s %(cmd)s %(bridge)s" % args) @@ -196,7 +201,7 @@ def process_list(): def compute_consoles(): _header("Compute consoles") - for root, dirnames, filenames in os.walk('/opt/stack'): + for root, _, filenames in os.walk('/opt/stack'): for filename in fnmatch.filter(filenames, 'console.log'): fullpath = os.path.join(root, filename) _dump_cmd("sudo cat %s" % fullpath) @@ -216,12 +221,30 @@ def guru_meditation_reports(): print("guru meditation report in %s log" % service) +def var_core(): + if os.path.exists('/var/core'): + _header("/var/core dumps") + # NOTE(ianw) : see DEBUG_LIBVIRT_COREDUMPS. We could think + # about getting backtraces out of these. There are other + # tools out there that can do that sort of thing though. + _dump_cmd("ls -ltrah /var/core") + + +def disable_stdio_buffering(): + # re-open STDOUT as binary, then wrap it in a + # TextIOWrapper, and write through everything. + binary_stdout = io.open(sys.stdout.fileno(), 'wb', 0) + sys.stdout = io.TextIOWrapper(binary_stdout, write_through=True) + + def main(): opts = get_options() fname = filename(opts.dir, opts.name) print("World dumping... see %s for details" % fname) - sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) - with open(fname, 'w') as f: + + disable_stdio_buffering() + + with io.open(fname, 'w') as f: os.dup2(f.fileno(), sys.stdout.fileno()) disk_space() process_list() @@ -231,6 +254,15 @@ def main(): ebtables_dump() compute_consoles() guru_meditation_reports() + var_core() + # Singular name for ease of log retrieval + copyname = os.path.join(opts.dir, 'worlddump') + if opts.name: + copyname += '-' + opts.name + copyname += '-latest.txt' + # We make a full copy to deal with jobs that may or may not + # gzip logs breaking symlinks. + shutil.copyfile(fname, copyname) if __name__ == '__main__': diff --git a/tools/xen/README.md b/tools/xen/README.md deleted file mode 100644 index 7062ecb48c..0000000000 --- a/tools/xen/README.md +++ /dev/null @@ -1,178 +0,0 @@ -# Getting Started With XenServer and Devstack - -The purpose of the code in this directory it to help developers bootstrap a -XenServer 6.2 (older versions may also work) + OpenStack development -environment. This file gives some pointers on how to get started. - -Xenserver is a Type 1 hypervisor, so it is best installed on bare metal. The -OpenStack services are configured to run within a virtual machine (called OS -domU) on the XenServer host. The VM uses the XAPI toolstack to communicate with -the host over a network connection (see `MGT_BRIDGE_OR_NET_NAME`). - -The provided localrc helps to build a basic environment. - -## Introduction - -### Requirements - - - An internet-enabled network with a DHCP server on it - - XenServer box plugged in to the same network -This network will be used as the OpenStack management network. The VM Network -and the Public Network will not be connected to any physical interfaces, only -new virtual networks will be created by the `install_os_domU.sh` script. - -### Steps to follow - - - Install XenServer - - Download Devstack to XenServer - - Customise `localrc` - - Start `install_os_domU.sh` script - -### Brief explanation - -The `install_os_domU.sh` script will: - - Setup XenAPI plugins - - Create the named networks, if they don't exist - - Preseed-Netinstall an Ubuntu Virtual Machine (NOTE: you can save and reuse - it, see [Reuse the Ubuntu VM](#reuse-the-ubuntu-vm)), with 1 network - interface: - - `eth0` - Connected to `UBUNTU_INST_BRIDGE_OR_NET_NAME`, defaults to - `MGT_BRIDGE_OR_NET_NAME` - - After the Ubuntu install process finished, the network configuration is - modified to: - - `eth0` - Management interface, connected to `MGT_BRIDGE_OR_NET_NAME`. Xapi - must be accessible through this network. - - `eth1` - VM interface, connected to `VM_BRIDGE_OR_NET_NAME` - - `eth2` - Public interface, connected to `PUB_BRIDGE_OR_NET_NAME` - - Start devstack inside the created OpenStack VM - -## Step 1: Install Xenserver -Install XenServer on a clean box. You can download the latest XenServer for -free from: http://www.xenserver.org/ - -The XenServer IP configuration depends on your local network setup. If you are -using dhcp, make a reservation for XenServer, so its IP address won't change -over time. Make a note of the XenServer's IP address, as it has to be specified -in `localrc`. The other option is to manually specify the IP setup for the -XenServer box. Please make sure, that a gateway and a nameserver is configured, -as `install_os_domU.sh` will connect to github.com to get source-code snapshots. - -## Step 2: Download devstack -On your XenServer host, run the following commands as root: - - wget --no-check-certificate https://github.com/openstack-dev/devstack/zipball/master - unzip -o master -d ./devstack - cd devstack/*/ - -## Step 3: Configure your localrc inside the devstack directory -Devstack uses a localrc for user-specific configuration. Note that -the `XENAPI_PASSWORD` must be your dom0 root password. -Of course, use real passwords if this machine is exposed. - - cat > ./localrc <$STAGING_DIR/etc/init/devstack.conf << EOF -start on stopped rc RUNLEVEL=[2345] - -console output -task - -pre-start script - rm -f /opt/stack/runsh.succeeded -end script - -script - initctl stop hvc0 || true - - # Read any leftover characters from standard input - while read -n 1 -s -t 0.1 -r ignored; do - true - done - - clear - - chown -R $STACK_USER /opt/stack - - su -c "/opt/stack/run.sh" $STACK_USER - - # Update /etc/issue - { - echo "OpenStack VM - Installed by DevStack" - IPADDR=\$(ip -4 address show eth0 | sed -n 's/.*inet \\([0-9\.]\\+\\).*/\1/p') - echo " Management IP: \$IPADDR" - echo -n " Devstack run: " - if [ -e /opt/stack/runsh.succeeded ]; then - echo "SUCCEEDED" - else - echo "FAILED" - fi - echo "" - } > /etc/issue - initctl start hvc0 > /dev/null 2>&1 -end script -EOF - -# Configure the hostname -echo $GUEST_NAME > $STAGING_DIR/etc/hostname - -# Hostname must resolve for rabbit -HOSTS_FILE_IP=$PUB_IP -if [ $MGT_IP != "dhcp" ]; then - HOSTS_FILE_IP=$MGT_IP -fi -cat <$STAGING_DIR/etc/hosts -$HOSTS_FILE_IP $GUEST_NAME -127.0.0.1 localhost localhost.localdomain -EOF - -# Configure the network -print_interfaces_config > $STAGING_DIR/etc/network/interfaces - -# Gracefully cp only if source file/dir exists -function cp_it { - if [ -e $1 ] || [ -d $1 ]; then - cp -pRL $1 $2 - fi -} - -# Copy over your ssh keys and env if desired -COPYENV=${COPYENV:-1} -if [ "$COPYENV" = "1" ]; then - cp_it ~/.ssh $STAGING_DIR/opt/stack/.ssh - cp_it ~/.ssh/id_rsa.pub $STAGING_DIR/opt/stack/.ssh/authorized_keys - cp_it ~/.gitconfig $STAGING_DIR/opt/stack/.gitconfig - cp_it ~/.vimrc $STAGING_DIR/opt/stack/.vimrc - cp_it ~/.bashrc $STAGING_DIR/opt/stack/.bashrc -fi - -# Configure run.sh -cat <$STAGING_DIR/opt/stack/run.sh -#!/bin/bash -set -eux -( - flock -n 9 || exit 1 - - [ -e /opt/stack/runsh.succeeded ] && rm /opt/stack/runsh.succeeded - echo \$\$ >> /opt/stack/run_sh.pid - - cd /opt/stack/devstack - ./unstack.sh || true - ./stack.sh - - # Got to the end - success - touch /opt/stack/runsh.succeeded - rm /opt/stack/run_sh.pid -) 9> /opt/stack/.runsh_lock -EOF -chmod 755 $STAGING_DIR/opt/stack/run.sh diff --git a/tools/xen/devstackubuntu_latecommand.sh b/tools/xen/devstackubuntu_latecommand.sh deleted file mode 100644 index 2afbe2cdf3..0000000000 --- a/tools/xen/devstackubuntu_latecommand.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash -set -eux - -# Need to set barrier=0 to avoid a Xen bug -# https://bugs.launchpad.net/ubuntu/+source/linux/+bug/824089 -sed -i -e 's/errors=/barrier=0,errors=/' /etc/fstab - -# Allow root to login with a password -sed -i -e 's/.*PermitRootLogin.*/PermitRootLogin yes/g' /etc/ssh/sshd_config - -# Install the XenServer tools so IP addresses are reported -wget --no-proxy @XS_TOOLS_URL@ -O /root/tools.deb -dpkg -i /root/tools.deb -rm /root/tools.deb diff --git a/tools/xen/devstackubuntupreseed.cfg b/tools/xen/devstackubuntupreseed.cfg deleted file mode 100644 index 80f334ba8e..0000000000 --- a/tools/xen/devstackubuntupreseed.cfg +++ /dev/null @@ -1,471 +0,0 @@ -### Contents of the preconfiguration file (for squeeze) -### Localization -# Preseeding only locale sets language, country and locale. -d-i debian-installer/locale string en_US - -# The values can also be preseeded individually for greater flexibility. -#d-i debian-installer/language string en -#d-i debian-installer/country string NL -#d-i debian-installer/locale string en_GB.UTF-8 -# Optionally specify additional locales to be generated. -#d-i localechooser/supported-locales en_US.UTF-8, nl_NL.UTF-8 - -# Keyboard selection. -# Disable automatic (interactive) keymap detection. -d-i console-setup/ask_detect boolean false -#d-i keyboard-configuration/modelcode string pc105 -d-i keyboard-configuration/layoutcode string us -# To select a variant of the selected layout (if you leave this out, the -# basic form of the layout will be used): -#d-i keyboard-configuration/variantcode string dvorak - -### Network configuration -# Disable network configuration entirely. This is useful for cdrom -# installations on non-networked devices where the network questions, -# warning and long timeouts are a nuisance. -#d-i netcfg/enable boolean false - -# netcfg will choose an interface that has link if possible. This makes it -# skip displaying a list if there is more than one interface. -d-i netcfg/choose_interface select auto - -# To pick a particular interface instead: -#d-i netcfg/choose_interface select eth1 - -# If you have a slow dhcp server and the installer times out waiting for -# it, this might be useful. -d-i netcfg/dhcp_timeout string 120 - -# If you prefer to configure the network manually, uncomment this line and -# the static network configuration below. -#d-i netcfg/disable_autoconfig boolean true - -# If you want the preconfiguration file to work on systems both with and -# without a dhcp server, uncomment these lines and the static network -# configuration below. -#d-i netcfg/dhcp_failed note -#d-i netcfg/dhcp_options select Configure network manually - -# Static network configuration. -#d-i netcfg/get_nameservers string 192.168.1.1 -#d-i netcfg/get_ipaddress string 192.168.1.42 -#d-i netcfg/get_netmask string 255.255.255.0 -#d-i netcfg/get_gateway string 192.168.1.1 -#d-i netcfg/confirm_static boolean true - -# Any hostname and domain names assigned from dhcp take precedence over -# values set here. However, setting the values still prevents the questions -# from being shown, even if values come from dhcp. -d-i netcfg/get_hostname string stack -d-i netcfg/get_domain string stackpass - -# Disable that annoying WEP key dialog. -d-i netcfg/wireless_wep string -# The wacky dhcp hostname that some ISPs use as a password of sorts. -#d-i netcfg/dhcp_hostname string radish - -# If non-free firmware is needed for the network or other hardware, you can -# configure the installer to always try to load it, without prompting. Or -# change to false to disable asking. -#d-i hw-detect/load_firmware boolean true - -### Network console -# Use the following settings if you wish to make use of the network-console -# component for remote installation over SSH. This only makes sense if you -# intend to perform the remainder of the installation manually. -#d-i anna/choose_modules string network-console -#d-i network-console/password password r00tme -#d-i network-console/password-again password r00tme - -### Mirror settings -# If you select ftp, the mirror/country string does not need to be set. -#d-i mirror/protocol string ftp -d-i mirror/country string manual -d-i mirror/http/hostname string archive.ubuntu.com -d-i mirror/http/directory string /ubuntu -d-i mirror/http/proxy string - -# Alternatively: by default, the installer uses CC.archive.ubuntu.com where -# CC is the ISO-3166-2 code for the selected country. You can preseed this -# so that it does so without asking. -#d-i mirror/http/mirror select CC.archive.ubuntu.com - -# Suite to install. -#d-i mirror/suite string squeeze -# Suite to use for loading installer components (optional). -#d-i mirror/udeb/suite string squeeze -# Components to use for loading installer components (optional). -#d-i mirror/udeb/components multiselect main, restricted - -### Clock and time zone setup -# Controls whether or not the hardware clock is set to UTC. -d-i clock-setup/utc boolean true - -# You may set this to any valid setting for $TZ; see the contents of -# /usr/share/zoneinfo/ for valid values. -d-i time/zone string US/Pacific - -# Controls whether to use NTP to set the clock during the install -d-i clock-setup/ntp boolean true -# NTP server to use. The default is almost always fine here. -d-i clock-setup/ntp-server string 0.us.pool.ntp.org - -### Partitioning -## Partitioning example -# If the system has free space you can choose to only partition that space. -# This is only honoured if partman-auto/method (below) is not set. -# Alternatives: custom, some_device, some_device_crypto, some_device_lvm. -#d-i partman-auto/init_automatically_partition select biggest_free - -# Alternatively, you may specify a disk to partition. If the system has only -# one disk the installer will default to using that, but otherwise the device -# name must be given in traditional, non-devfs format (so e.g. /dev/hda or -# /dev/sda, and not e.g. /dev/discs/disc0/disc). -# For example, to use the first SCSI/SATA hard disk: -#d-i partman-auto/disk string /dev/sda -# In addition, you'll need to specify the method to use. -# The presently available methods are: -# - regular: use the usual partition types for your architecture -# - lvm: use LVM to partition the disk -# - crypto: use LVM within an encrypted partition -d-i partman-auto/method string regular - -# If one of the disks that are going to be automatically partitioned -# contains an old LVM configuration, the user will normally receive a -# warning. This can be preseeded away... -d-i partman-lvm/device_remove_lvm boolean true -# The same applies to pre-existing software RAID array: -d-i partman-md/device_remove_md boolean true -# And the same goes for the confirmation to write the lvm partitions. -d-i partman-lvm/confirm boolean true - -# For LVM partitioning, you can select how much of the volume group to use -# for logical volumes. -#d-i partman-auto-lvm/guided_size string max -#d-i partman-auto-lvm/guided_size string 10GB -#d-i partman-auto-lvm/guided_size string 50% - -# You can choose one of the three predefined partitioning recipes: -# - atomic: all files in one partition -# - home: separate /home partition -# - multi: separate /home, /usr, /var, and /tmp partitions -d-i partman-auto/choose_recipe select atomic - -# Or provide a recipe of your own... -# If you have a way to get a recipe file into the d-i environment, you can -# just point at it. -#d-i partman-auto/expert_recipe_file string /hd-media/recipe - -# If not, you can put an entire recipe into the preconfiguration file in one -# (logical) line. This example creates a small /boot partition, suitable -# swap, and uses the rest of the space for the root partition: -#d-i partman-auto/expert_recipe string \ -# boot-root :: \ -# 40 50 100 ext3 \ -# $primary{ } $bootable{ } \ -# method{ format } format{ } \ -# use_filesystem{ } filesystem{ ext3 } \ -# mountpoint{ /boot } \ -# . \ -# 500 10000 1000000000 ext3 \ -# method{ format } format{ } \ -# use_filesystem{ } filesystem{ ext3 } \ -# mountpoint{ / } \ -# . \ -# 64 512 300% linux-swap \ -# method{ swap } format{ } \ -# . - -# If you just want to change the default filesystem from ext3 to something -# else, you can do that without providing a full recipe. -d-i partman/default_filesystem string ext3 - -# The full recipe format is documented in the file partman-auto-recipe.txt -# included in the 'debian-installer' package or available from D-I source -# repository. This also documents how to specify settings such as file -# system labels, volume group names and which physical devices to include -# in a volume group. - -# This makes partman automatically partition without confirmation, provided -# that you told it what to do using one of the methods above. -d-i partman-partitioning/confirm_write_new_label boolean true -d-i partman/choose_partition select finish -d-i partman/confirm boolean true -d-i partman/confirm_nooverwrite boolean true - -## Partitioning using RAID -# The method should be set to "raid". -#d-i partman-auto/method string raid -# Specify the disks to be partitioned. They will all get the same layout, -# so this will only work if the disks are the same size. -#d-i partman-auto/disk string /dev/sda /dev/sdb - -# Next you need to specify the physical partitions that will be used. -#d-i partman-auto/expert_recipe string \ -# multiraid :: \ -# 1000 5000 4000 raid \ -# $primary{ } method{ raid } \ -# . \ -# 64 512 300% raid \ -# method{ raid } \ -# . \ -# 500 10000 1000000000 raid \ -# method{ raid } \ -# . - -# Last you need to specify how the previously defined partitions will be -# used in the RAID setup. Remember to use the correct partition numbers -# for logical partitions. RAID levels 0, 1, 5, 6 and 10 are supported; -# devices are separated using "#". -# Parameters are: -# \ -# - -#d-i partman-auto-raid/recipe string \ -# 1 2 0 ext3 / \ -# /dev/sda1#/dev/sdb1 \ -# . \ -# 1 2 0 swap - \ -# /dev/sda5#/dev/sdb5 \ -# . \ -# 0 2 0 ext3 /home \ -# /dev/sda6#/dev/sdb6 \ -# . - -# For additional information see the file partman-auto-raid-recipe.txt -# included in the 'debian-installer' package or available from D-I source -# repository. - -# This makes partman automatically partition without confirmation. -d-i partman-md/confirm boolean true -d-i partman-partitioning/confirm_write_new_label boolean true -d-i partman/choose_partition select finish -d-i partman/confirm boolean true -d-i partman/confirm_nooverwrite boolean true - -## Controlling how partitions are mounted -# The default is to mount by UUID, but you can also choose "traditional" to -# use traditional device names, or "label" to try filesystem labels before -# falling back to UUIDs. -#d-i partman/mount_style select uuid - -### Base system installation -# Configure APT to not install recommended packages by default. Use of this -# option can result in an incomplete system and should only be used by very -# experienced users. -#d-i base-installer/install-recommends boolean false - -# The kernel image (meta) package to be installed; "none" can be used if no -# kernel is to be installed. -d-i base-installer/kernel/image string linux-virtual - -### Account setup -# Skip creation of a root account (normal user account will be able to -# use sudo). The default is false; preseed this to true if you want to set -# a root password. -d-i passwd/root-login boolean true -# Alternatively, to skip creation of a normal user account. -d-i passwd/make-user boolean false - -# Root password, either in clear text -d-i passwd/root-password password stackpass -d-i passwd/root-password-again password stackpass -# or encrypted using an MD5 hash. -#d-i passwd/root-password-crypted password [MD5 hash] - -# To create a normal user account. -#d-i passwd/user-fullname string Ubuntu User -#d-i passwd/username string ubuntu -# Normal user's password, either in clear text -#d-i passwd/user-password password insecure -#d-i passwd/user-password-again password insecure -# or encrypted using an MD5 hash. -#d-i passwd/user-password-crypted password [MD5 hash] -# Create the first user with the specified UID instead of the default. -#d-i passwd/user-uid string 1010 -# The installer will warn about weak passwords. If you are sure you know -# what you're doing and want to override it, uncomment this. -d-i user-setup/allow-password-weak boolean true - -# The user account will be added to some standard initial groups. To -# override that, use this. -#d-i passwd/user-default-groups string audio cdrom video - -# Set to true if you want to encrypt the first user's home directory. -d-i user-setup/encrypt-home boolean false - -### Apt setup -# You can choose to install restricted and universe software, or to install -# software from the backports repository. -d-i apt-setup/restricted boolean true -d-i apt-setup/universe boolean true -d-i apt-setup/backports boolean true -# Uncomment this if you don't want to use a network mirror. -#d-i apt-setup/use_mirror boolean false -# Select which update services to use; define the mirrors to be used. -# Values shown below are the normal defaults. -#d-i apt-setup/services-select multiselect security -#d-i apt-setup/security_host string security.ubuntu.com -#d-i apt-setup/security_path string /ubuntu - -# Additional repositories, local[0-9] available -#d-i apt-setup/local0/repository string \ -# http://local.server/ubuntu squeeze main -#d-i apt-setup/local0/comment string local server -# Enable deb-src lines -#d-i apt-setup/local0/source boolean true -# URL to the public key of the local repository; you must provide a key or -# apt will complain about the unauthenticated repository and so the -# sources.list line will be left commented out -#d-i apt-setup/local0/key string http://local.server/key - -# By default the installer requires that repositories be authenticated -# using a known gpg key. This setting can be used to disable that -# authentication. Warning: Insecure, not recommended. -#d-i debian-installer/allow_unauthenticated boolean true - -### Package selection -#tasksel tasksel/first multiselect ubuntu-desktop -#tasksel tasksel/first multiselect lamp-server, print-server -#tasksel tasksel/first multiselect kubuntu-desktop -tasksel tasksel/first multiselect openssh-server - -# Individual additional packages to install -d-i pkgsel/include string cracklib-runtime curl wget ssh openssh-server tcpdump ethtool git sudo python-netaddr coreutils - -# Whether to upgrade packages after debootstrap. -# Allowed values: none, safe-upgrade, full-upgrade -d-i pkgsel/upgrade select safe-upgrade - -# Language pack selection -#d-i pkgsel/language-packs multiselect de, en, zh - -# Policy for applying updates. May be "none" (no automatic updates), -# "unattended-upgrades" (install security updates automatically), or -# "landscape" (manage system with Landscape). -d-i pkgsel/update-policy select unattended-upgrades - -# Some versions of the installer can report back on what software you have -# installed, and what software you use. The default is not to report back, -# but sending reports helps the project determine what software is most -# popular and include it on CDs. -#popularity-contest popularity-contest/participate boolean false - -# By default, the system's locate database will be updated after the -# installer has finished installing most packages. This may take a while, so -# if you don't want it, you can set this to "false" to turn it off. -d-i pkgsel/updatedb boolean false - -### Boot loader installation -# Grub is the default boot loader (for x86). If you want lilo installed -# instead, uncomment this: -#d-i grub-installer/skip boolean true -# To also skip installing lilo, and install no bootloader, uncomment this -# too: -#d-i lilo-installer/skip boolean true - -# With a few exceptions for unusual partitioning setups, GRUB 2 is now the -# default. If you need GRUB Legacy for some particular reason, then -# uncomment this: -d-i grub-installer/grub2_instead_of_grub_legacy boolean false - -# This is fairly safe to set, it makes grub install automatically to the MBR -# if no other operating system is detected on the machine. -d-i grub-installer/only_debian boolean true - -# This one makes grub-installer install to the MBR if it also finds some other -# OS, which is less safe as it might not be able to boot that other OS. -d-i grub-installer/with_other_os boolean true - -# Alternatively, if you want to install to a location other than the mbr, -# uncomment and edit these lines: -#d-i grub-installer/only_debian boolean false -#d-i grub-installer/with_other_os boolean false -#d-i grub-installer/bootdev string (hd0,0) -# To install grub to multiple disks: -#d-i grub-installer/bootdev string (hd0,0) (hd1,0) (hd2,0) - -# Optional password for grub, either in clear text -#d-i grub-installer/password password r00tme -#d-i grub-installer/password-again password r00tme -# or encrypted using an MD5 hash, see grub-md5-crypt(8). -#d-i grub-installer/password-crypted password [MD5 hash] - -# Use the following option to add additional boot parameters for the -# installed system (if supported by the bootloader installer). -# Note: options passed to the installer will be added automatically. -#d-i debian-installer/add-kernel-opts string nousb - -### Finishing up the installation -# During installations from serial console, the regular virtual consoles -# (VT1-VT6) are normally disabled in /etc/inittab. Uncomment the next -# line to prevent this. -d-i finish-install/keep-consoles boolean true - -# Avoid that last message about the install being complete. -d-i finish-install/reboot_in_progress note - -# This will prevent the installer from ejecting the CD during the reboot, -# which is useful in some situations. -#d-i cdrom-detect/eject boolean false - -# This is how to make the installer shutdown when finished, but not -# reboot into the installed system. -#d-i debian-installer/exit/halt boolean true -# This will power off the machine instead of just halting it. -#d-i debian-installer/exit/poweroff boolean true - -### X configuration -# X can detect the right driver for some cards, but if you're preseeding, -# you override whatever it chooses. Still, vesa will work most places. -#xserver-xorg xserver-xorg/config/device/driver select vesa - -# A caveat with mouse autodetection is that if it fails, X will retry it -# over and over. So if it's preseeded to be done, there is a possibility of -# an infinite loop if the mouse is not autodetected. -#xserver-xorg xserver-xorg/autodetect_mouse boolean true - -# Monitor autodetection is recommended. -xserver-xorg xserver-xorg/autodetect_monitor boolean true -# Uncomment if you have an LCD display. -#xserver-xorg xserver-xorg/config/monitor/lcd boolean true -# X has three configuration paths for the monitor. Here's how to preseed -# the "medium" path, which is always available. The "simple" path may not -# be available, and the "advanced" path asks too many questions. -xserver-xorg xserver-xorg/config/monitor/selection-method \ - select medium -xserver-xorg xserver-xorg/config/monitor/mode-list \ - select 1024x768 @ 60 Hz - -### Preseeding other packages -# Depending on what software you choose to install, or if things go wrong -# during the installation process, it's possible that other questions may -# be asked. You can preseed those too, of course. To get a list of every -# possible question that could be asked during an install, do an -# installation, and then run these commands: -# debconf-get-selections --installer > file -# debconf-get-selections >> file - - -#### Advanced options -### Running custom commands during the installation -# d-i preseeding is inherently not secure. Nothing in the installer checks -# for attempts at buffer overflows or other exploits of the values of a -# preconfiguration file like this one. Only use preconfiguration files from -# trusted locations! To drive that home, and because it's generally useful, -# here's a way to run any shell command you'd like inside the installer, -# automatically. - -# This first command is run as early as possible, just after -# preseeding is read. -#d-i preseed/early_command string anna-install some-udeb -# This command is run immediately before the partitioner starts. It may be -# useful to apply dynamic partitioner preseeding that depends on the state -# of the disks (which may not be visible when preseed/early_command runs). -#d-i partman/early_command \ -# string debconf-set partman-auto/disk "$(list-devices disk | head -n1)" -# This command is run just before the install finishes, but when there is -# still a usable /target directory. You can chroot to /target and use it -# directly, or use the apt-install and in-target commands to easily install -# packages and run commands in the target system. -d-i preseed/late_command string diff --git a/tools/xen/functions b/tools/xen/functions deleted file mode 100644 index cf145686b5..0000000000 --- a/tools/xen/functions +++ /dev/null @@ -1,307 +0,0 @@ -#!/bin/bash - -function die_with_error { - local err_msg - - err_msg="$1" - - echo "$err_msg" >&2 - exit 1 -} - -function xapi_plugin_location { - for PLUGIN_DIR in "/etc/xapi.d/plugins/" "/usr/lib/xcp/plugins/" "/usr/lib/xapi/plugins" "/usr/lib64/xapi/plugins"; do - if [ -d $PLUGIN_DIR ]; then - echo $PLUGIN_DIR - return 0 - fi - done - return 1 -} - -function create_directory_for_kernels { - if [ -d "/boot/guest" ]; then - echo "INFO: /boot/guest directory already exists, using that" >&2 - else - local local_path - local_path="$(get_local_sr_path)/os-guest-kernels" - mkdir -p $local_path - ln -s $local_path /boot/guest - fi -} - -function create_directory_for_images { - if [ -d "/images" ]; then - echo "INFO: /images directory already exists, using that" >&2 - else - local local_path - local_path="$(get_local_sr_path)/os-images" - mkdir -p $local_path - ln -s $local_path /images - fi -} - -function get_local_sr { - xe pool-list params=default-SR minimal=true -} - -function get_local_sr_path { - pbd_path="/var/run/sr-mount/$(get_local_sr)" - pbd_device_config_path=`xe pbd-list sr-uuid=$(get_local_sr) params=device-config | grep " path: "` - if [ -n "$pbd_device_config_path" ]; then - pbd_uuid=`xe pbd-list sr-uuid=$(get_local_sr) minimal=true` - pbd_path=`xe pbd-param-get uuid=$pbd_uuid param-name=device-config param-key=path || echo ""` - fi - echo $pbd_path -} - -function find_ip_by_name { - local guest_name="$1" - local interface="$2" - - local period=10 - local max_tries=10 - local i=0 - - while true; do - if [ $i -ge $max_tries ]; then - echo "Timeout: ip address for interface $interface of $guest_name" - exit 11 - fi - - ipaddress=$(xe vm-list --minimal \ - name-label=$guest_name \ - params=networks | sed -ne "s,^.*${interface}/ip: \([0-9.]*\).*\$,\1,p") - - if [ -z "$ipaddress" ]; then - sleep $period - i=$((i+1)) - else - echo $ipaddress - break - fi - done -} - -function _vm_uuid { - local vm_name_label - - vm_name_label="$1" - - xe vm-list name-label="$vm_name_label" --minimal -} - -function _create_new_network { - local name_label - name_label=$1 - - xe network-create name-label="$name_label" -} - -function _multiple_networks_with_name { - local name_label - name_label=$1 - - # A comma indicates multiple matches - xe network-list name-label="$name_label" --minimal | grep -q "," -} - -function _network_exists { - local name_label - name_label=$1 - - ! [ -z "$(xe network-list name-label="$name_label" --minimal)" ] -} - -function _bridge_exists { - local bridge - bridge=$1 - - ! [ -z "$(xe network-list bridge="$bridge" --minimal)" ] -} - -function _network_uuid { - local bridge_or_net_name - bridge_or_net_name=$1 - - if _bridge_exists "$bridge_or_net_name"; then - xe network-list bridge="$bridge_or_net_name" --minimal - else - xe network-list name-label="$bridge_or_net_name" --minimal - fi -} - -function add_interface { - local vm_name_label - local bridge_or_network_name - - vm_name_label="$1" - bridge_or_network_name="$2" - device_number="$3" - - local vm - local net - - vm=$(_vm_uuid "$vm_name_label") - net=$(_network_uuid "$bridge_or_network_name") - xe vif-create network-uuid=$net vm-uuid=$vm device=$device_number -} - -function setup_network { - local bridge_or_net_name - bridge_or_net_name=$1 - - if ! _bridge_exists "$bridge_or_net_name"; then - if _network_exists "$bridge_or_net_name"; then - if _multiple_networks_with_name "$bridge_or_net_name"; then - cat >&2 << EOF -ERROR: Multiple networks found matching name-label to "$bridge_or_net_name" -please review your XenServer network configuration / localrc file. -EOF - exit 1 - fi - else - _create_new_network "$bridge_or_net_name" - fi - fi -} - -function bridge_for { - local bridge_or_net_name - bridge_or_net_name=$1 - - if _bridge_exists "$bridge_or_net_name"; then - echo "$bridge_or_net_name" - else - xe network-list name-label="$bridge_or_net_name" params=bridge --minimal - fi -} - -function xenapi_ip_on { - local bridge_or_net_name - bridge_or_net_name=$1 - - ip -4 addr show $(bridge_for "$bridge_or_net_name") |\ - awk '/inet/{split($2, ip, "/"); print ip[1];}' -} - -function xenapi_is_listening_on { - local bridge_or_net_name - bridge_or_net_name=$1 - - ! [ -z $(xenapi_ip_on "$bridge_or_net_name") ] -} - -function parameter_is_specified { - local parameter_name - parameter_name=$1 - - compgen -v | grep "$parameter_name" -} - -function append_kernel_cmdline { - local vm_name_label - local kernel_args - - vm_name_label="$1" - kernel_args="$2" - - local vm - local pv_args - - vm=$(_vm_uuid "$vm_name_label") - pv_args=$(xe vm-param-get param-name=PV-args uuid=$vm) - xe vm-param-set PV-args="$pv_args $kernel_args" uuid=$vm -} - -function destroy_all_vifs_of { - local vm_name_label - - vm_name_label="$1" - - local vm - - vm=$(_vm_uuid "$vm_name_label") - IFS=, - for vif in $(xe vif-list vm-uuid=$vm --minimal); do - xe vif-destroy uuid="$vif" - done - unset IFS -} - -function have_multiple_hosts { - xe host-list --minimal | grep -q "," -} - -function attach_network { - local bridge_or_net_name - - bridge_or_net_name="$1" - - local net - local host - - net=$(_network_uuid "$bridge_or_net_name") - host=$(xe host-list --minimal) - - xe network-attach uuid=$net host-uuid=$host -} - -function set_vm_memory { - local vm_name_label - local memory - - vm_name_label="$1" - memory="$2" - - local vm - - vm=$(_vm_uuid "$vm_name_label") - - xe vm-memory-limits-set \ - static-min=${memory}MiB \ - static-max=${memory}MiB \ - dynamic-min=${memory}MiB \ - dynamic-max=${memory}MiB \ - uuid=$vm -} - -function max_vcpus { - local vm_name_label - - vm_name_label="$1" - - local vm - local host - local cpu_count - - host=$(xe host-list --minimal) - vm=$(_vm_uuid "$vm_name_label") - - cpu_count=$(xe host-param-get \ - param-name=cpu_info \ - uuid=$host | - sed -e 's/^.*cpu_count: \([0-9]*\);.*$/\1/g') - - if [ -z "$cpu_count" ]; then - # get dom0's vcpu count - cpu_count=$(cat /proc/cpuinfo | grep processor | wc -l) - fi - - # Assert cpu_count is not empty - [ -n "$cpu_count" ] - - # Assert ithas a numeric nonzero value - expr "$cpu_count" + 0 - - xe vm-param-set uuid=$vm VCPUs-max=$cpu_count - xe vm-param-set uuid=$vm VCPUs-at-startup=$cpu_count -} - -function get_domid { - local vm_name_label - - vm_name_label="$1" - - xe vm-list name-label="$vm_name_label" params=dom-id minimal=true -} diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh deleted file mode 100755 index 3a61215b5b..0000000000 --- a/tools/xen/install_os_domU.sh +++ /dev/null @@ -1,432 +0,0 @@ -#!/bin/bash - -# This script must be run on a XenServer or XCP machine -# -# It creates a DomU VM that runs OpenStack services -# -# For more details see: README.md - -set -o errexit -set -o nounset -set -o xtrace - -export LC_ALL=C - -# This directory -THIS_DIR=$(cd $(dirname "$0") && pwd) - -# Include onexit commands -. $THIS_DIR/scripts/on_exit.sh - -# xapi functions -. $THIS_DIR/functions - -# -# Get Settings -# -TOP_DIR=$(cd $THIS_DIR/../../ && pwd) -source $TOP_DIR/inc/meta-config -rm -f $TOP_DIR/.localrc.auto -extract_localrc_section $TOP_DIR/local.conf $TOP_DIR/localrc $TOP_DIR/.localrc.auto - -# Source params - override xenrc params in your localrc to suit your taste -source $THIS_DIR/xenrc - -xe_min() -{ - local cmd="$1" - shift - xe "$cmd" --minimal "$@" -} - -# -# Prepare Dom0 -# including installing XenAPI plugins -# - -cd $THIS_DIR - -# Die if multiple hosts listed -if have_multiple_hosts; then - cat >&2 << EOF -ERROR: multiple hosts found. This might mean that the XenServer is a member -of a pool - Exiting. -EOF - exit 1 -fi - -# -# Configure Networking -# - -MGT_NETWORK=`xe pif-list management=true params=network-uuid minimal=true` -MGT_BRIDGE_OR_NET_NAME=`xe network-list uuid=$MGT_NETWORK params=bridge minimal=true` - -setup_network "$VM_BRIDGE_OR_NET_NAME" -setup_network "$MGT_BRIDGE_OR_NET_NAME" -setup_network "$PUB_BRIDGE_OR_NET_NAME" - -# With neutron, one more network is required, which is internal to the -# hypervisor, and used by the VMs -setup_network "$XEN_INT_BRIDGE_OR_NET_NAME" - -if parameter_is_specified "FLAT_NETWORK_BRIDGE"; then - if [ "$(bridge_for "$VM_BRIDGE_OR_NET_NAME")" != "$(bridge_for "$FLAT_NETWORK_BRIDGE")" ]; then - cat >&2 << EOF -ERROR: FLAT_NETWORK_BRIDGE is specified in localrc file, and either no network -found on XenServer by searching for networks by that value as name-label or -bridge name or the network found does not match the network specified by -VM_BRIDGE_OR_NET_NAME. Please check your localrc file. -EOF - exit 1 - fi -fi - -if ! xenapi_is_listening_on "$MGT_BRIDGE_OR_NET_NAME"; then - cat >&2 << EOF -ERROR: XenAPI does not have an assigned IP address on the management network. -please review your XenServer network configuration / localrc file. -EOF - exit 1 -fi - -HOST_IP=$(xenapi_ip_on "$MGT_BRIDGE_OR_NET_NAME") - -# Set up ip forwarding, but skip on xcp-xapi -if [ -a /etc/sysconfig/network ]; then - if ! grep -q "FORWARD_IPV4=YES" /etc/sysconfig/network; then - # FIXME: This doesn't work on reboot! - echo "FORWARD_IPV4=YES" >> /etc/sysconfig/network - fi -fi -# Also, enable ip forwarding in rc.local, since the above trick isn't working -if ! grep -q "echo 1 >/proc/sys/net/ipv4/ip_forward" /etc/rc.local; then - echo "echo 1 >/proc/sys/net/ipv4/ip_forward" >> /etc/rc.local -fi -# Enable ip forwarding at runtime as well -echo 1 > /proc/sys/net/ipv4/ip_forward - - -# -# Shutdown previous runs -# - -DO_SHUTDOWN=${DO_SHUTDOWN:-1} -CLEAN_TEMPLATES=${CLEAN_TEMPLATES:-false} -if [ "$DO_SHUTDOWN" = "1" ]; then - # Shutdown all domU's that created previously - clean_templates_arg="" - if $CLEAN_TEMPLATES; then - clean_templates_arg="--remove-templates" - fi - ./scripts/uninstall-os-vpx.sh $clean_templates_arg - - # Destroy any instances that were launched - for uuid in `xe vm-list | grep -1 instance | grep uuid | sed "s/.*\: //g"`; do - echo "Shutting down nova instance $uuid" - xe vm-uninstall uuid=$uuid force=true - done - - # Destroy orphaned vdis - for uuid in `xe vdi-list | grep -1 Glance | grep uuid | sed "s/.*\: //g"`; do - xe vdi-destroy uuid=$uuid - done -fi - - -# -# Create Ubuntu VM template -# and/or create VM from template -# - -GUEST_NAME=${GUEST_NAME:-"DevStackOSDomU"} -TNAME="jeos_template_for_devstack" -SNAME_TEMPLATE="jeos_snapshot_for_devstack" -SNAME_FIRST_BOOT="before_first_boot" - -function wait_for_VM_to_halt { - set +x - echo "Waiting for the VM to halt. Progress in-VM can be checked with XenCenter or xl console:" - mgmt_ip=$(echo $XENAPI_CONNECTION_URL | tr -d -c '1234567890.') - domid=$(get_domid "$GUEST_NAME") - echo "ssh root@$mgmt_ip \"xl console $domid\"" - while true; do - state=$(xe_min vm-list name-label="$GUEST_NAME" power-state=halted) - if [ -n "$state" ]; then - break - else - echo -n "." - sleep 20 - fi - done - set -x -} - -templateuuid=$(xe template-list name-label="$TNAME") -if [ -z "$templateuuid" ]; then - # - # Install Ubuntu over network - # - UBUNTU_INST_BRIDGE_OR_NET_NAME=${UBUNTU_INST_BRIDGE_OR_NET_NAME:-"$MGT_BRIDGE_OR_NET_NAME"} - - # always update the preseed file, incase we have a newer one - PRESEED_URL=${PRESEED_URL:-""} - if [ -z "$PRESEED_URL" ]; then - PRESEED_URL="${HOST_IP}/devstackubuntupreseed.cfg" - - HTTP_SERVER_LOCATION="/opt/xensource/www" - if [ ! -e $HTTP_SERVER_LOCATION ]; then - HTTP_SERVER_LOCATION="/var/www/html" - mkdir -p $HTTP_SERVER_LOCATION - fi - - # Copy the tools DEB to the XS web server - XS_TOOLS_URL="https://github.com/downloads/citrix-openstack/warehouse/xe-guest-utilities_5.6.100-651_amd64.deb" - ISO_DIR="/opt/xensource/packages/iso" - if [ -e "$ISO_DIR" ]; then - TOOLS_ISO=$(ls -1 $ISO_DIR/*-tools-*.iso | head -1) - TMP_DIR=/tmp/temp.$RANDOM - mkdir -p $TMP_DIR - mount -o loop $TOOLS_ISO $TMP_DIR - # the target deb package maybe *amd64.deb or *all.deb, - # so use *amd64.deb by default. If it doesn't exist, - # then use *all.deb. - DEB_FILE=$(ls $TMP_DIR/Linux/*amd64.deb || ls $TMP_DIR/Linux/*all.deb) - cp $DEB_FILE $HTTP_SERVER_LOCATION - umount $TMP_DIR - rmdir $TMP_DIR - XS_TOOLS_URL=${HOST_IP}/$(basename $DEB_FILE) - fi - - cp -f $THIS_DIR/devstackubuntupreseed.cfg $HTTP_SERVER_LOCATION - cp -f $THIS_DIR/devstackubuntu_latecommand.sh $HTTP_SERVER_LOCATION/latecommand.sh - - sed \ - -e "s,\(d-i mirror/http/hostname string\).*,\1 $UBUNTU_INST_HTTP_HOSTNAME,g" \ - -e "s,\(d-i mirror/http/directory string\).*,\1 $UBUNTU_INST_HTTP_DIRECTORY,g" \ - -e "s,\(d-i mirror/http/proxy string\).*,\1 $UBUNTU_INST_HTTP_PROXY,g" \ - -e "s,\(d-i passwd/root-password password\).*,\1 $GUEST_PASSWORD,g" \ - -e "s,\(d-i passwd/root-password-again password\).*,\1 $GUEST_PASSWORD,g" \ - -e "s,\(d-i preseed/late_command string\).*,\1 in-target mkdir -p /tmp; in-target wget --no-proxy ${HOST_IP}/latecommand.sh -O /root/latecommand.sh; in-target bash /root/latecommand.sh,g" \ - -i "${HTTP_SERVER_LOCATION}/devstackubuntupreseed.cfg" - - sed \ - -e "s,@XS_TOOLS_URL@,$XS_TOOLS_URL,g" \ - -i "${HTTP_SERVER_LOCATION}/latecommand.sh" - fi - - # Update the template - $THIS_DIR/scripts/install_ubuntu_template.sh $PRESEED_URL - - # create a new VM from the given template with eth0 attached to the given - # network - $THIS_DIR/scripts/install-os-vpx.sh \ - -t "$UBUNTU_INST_TEMPLATE_NAME" \ - -n "$UBUNTU_INST_BRIDGE_OR_NET_NAME" \ - -l "$GUEST_NAME" - - set_vm_memory "$GUEST_NAME" "1024" - - xe vm-start vm="$GUEST_NAME" - - # wait for install to finish - wait_for_VM_to_halt - - # set VM to restart after a reboot - vm_uuid=$(xe_min vm-list name-label="$GUEST_NAME") - xe vm-param-set actions-after-reboot=Restart uuid="$vm_uuid" - - # Make template from VM - snuuid=$(xe vm-snapshot vm="$GUEST_NAME" new-name-label="$SNAME_TEMPLATE") - xe snapshot-clone uuid=$snuuid new-name-label="$TNAME" -else - # - # Template already installed, create VM from template - # - vm_uuid=$(xe vm-install template="$TNAME" new-name-label="$GUEST_NAME") -fi - -if [ -n "${EXIT_AFTER_JEOS_INSTALLATION:-}" ]; then - echo "User requested to quit after JEOS instalation" - exit 0 -fi - -# -# Prepare VM for DevStack -# -xe vm-param-set other-config:os-vpx=true uuid="$vm_uuid" - -# Install XenServer tools, and other such things -$THIS_DIR/prepare_guest_template.sh "$GUEST_NAME" - -# Set virtual machine parameters -set_vm_memory "$GUEST_NAME" "$OSDOMU_MEM_MB" - -# Max out VCPU count for better performance -max_vcpus "$GUEST_NAME" - -# Wipe out all network cards -destroy_all_vifs_of "$GUEST_NAME" - -# Add only one interface to prepare the guest template -add_interface "$GUEST_NAME" "$MGT_BRIDGE_OR_NET_NAME" "0" - -# start the VM to run the prepare steps -xe vm-start vm="$GUEST_NAME" - -# Wait for prep script to finish and shutdown system -wait_for_VM_to_halt - -## Setup network cards -# Wipe out all -destroy_all_vifs_of "$GUEST_NAME" -# Tenant network -add_interface "$GUEST_NAME" "$VM_BRIDGE_OR_NET_NAME" "$VM_DEV_NR" -# Management network -add_interface "$GUEST_NAME" "$MGT_BRIDGE_OR_NET_NAME" "$MGT_DEV_NR" -# Public network -add_interface "$GUEST_NAME" "$PUB_BRIDGE_OR_NET_NAME" "$PUB_DEV_NR" - -# -# Inject DevStack inside VM disk -# -$THIS_DIR/build_xva.sh "$GUEST_NAME" - -# Attach a network interface for the integration network (so that the bridge -# is created by XenServer). This is required for Neutron. Also pass that as a -# kernel parameter for DomU -attach_network "$XEN_INT_BRIDGE_OR_NET_NAME" - -XEN_INTEGRATION_BRIDGE_DEFAULT=$(bridge_for "$XEN_INT_BRIDGE_OR_NET_NAME") -append_kernel_cmdline \ - "$GUEST_NAME" \ - "xen_integration_bridge=${XEN_INTEGRATION_BRIDGE_DEFAULT}" - -FLAT_NETWORK_BRIDGE="${FLAT_NETWORK_BRIDGE:-$(bridge_for "$VM_BRIDGE_OR_NET_NAME")}" -append_kernel_cmdline "$GUEST_NAME" "flat_network_bridge=${FLAT_NETWORK_BRIDGE}" - -# Add a separate xvdb, if it was requested -if [[ "0" != "$XEN_XVDB_SIZE_GB" ]]; then - vm=$(xe vm-list name-label="$GUEST_NAME" --minimal) - - # Add a new disk - localsr=$(get_local_sr) - extra_vdi=$(xe vdi-create \ - name-label=xvdb-added-by-devstack \ - virtual-size="${XEN_XVDB_SIZE_GB}GiB" \ - sr-uuid=$localsr type=user) - xe vbd-create vm-uuid=$vm vdi-uuid=$extra_vdi device=1 -fi - -# create a snapshot before the first boot -# to allow a quick re-run with the same settings -xe vm-snapshot vm="$GUEST_NAME" new-name-label="$SNAME_FIRST_BOOT" - -# -# Run DevStack VM -# -xe vm-start vm="$GUEST_NAME" - -function ssh_no_check { - ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "$@" -} - -# Get hold of the Management IP of OpenStack VM -OS_VM_MANAGEMENT_ADDRESS=$MGT_IP -if [ $OS_VM_MANAGEMENT_ADDRESS == "dhcp" ]; then - OS_VM_MANAGEMENT_ADDRESS=$(find_ip_by_name $GUEST_NAME $MGT_DEV_NR) -fi - -# Get hold of the Service IP of OpenStack VM -if [ $HOST_IP_IFACE == "eth${MGT_DEV_NR}" ]; then - OS_VM_SERVICES_ADDRESS=$MGT_IP - if [ $MGT_IP == "dhcp" ]; then - OS_VM_SERVICES_ADDRESS=$(find_ip_by_name $GUEST_NAME $MGT_DEV_NR) - fi -else - OS_VM_SERVICES_ADDRESS=$PUB_IP - if [ $PUB_IP == "dhcp" ]; then - OS_VM_SERVICES_ADDRESS=$(find_ip_by_name $GUEST_NAME $PUB_DEV_NR) - fi -fi - -# Create an ssh-keypair, and set it up for dom0 user -rm -f /root/dom0key /root/dom0key.pub -ssh-keygen -f /root/dom0key -P "" -C "dom0" -DOMID=$(get_domid "$GUEST_NAME") - -xenstore-write /local/domain/$DOMID/authorized_keys/$DOMZERO_USER "$(cat /root/dom0key.pub)" -xenstore-chmod -u /local/domain/$DOMID/authorized_keys/$DOMZERO_USER r$DOMID - -function run_on_appliance { - ssh \ - -i /root/dom0key \ - -o UserKnownHostsFile=/dev/null \ - -o StrictHostKeyChecking=no \ - -o BatchMode=yes \ - "$DOMZERO_USER@$OS_VM_MANAGEMENT_ADDRESS" "$@" -} - -# Wait until we can log in to the appliance -while ! run_on_appliance true; do - sleep 1 -done - -# Remove authenticated_keys updater cronjob -echo "" | run_on_appliance crontab - - -# Generate a passwordless ssh key for domzero user -echo "ssh-keygen -f /home/$DOMZERO_USER/.ssh/id_rsa -C $DOMZERO_USER@appliance -N \"\" -q" | run_on_appliance - -# Authenticate that user to dom0 -run_on_appliance cat /home/$DOMZERO_USER/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys - -# If we have copied our ssh credentials, use ssh to monitor while the installation runs -WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1} -COPYENV=${COPYENV:-1} -if [ "$WAIT_TILL_LAUNCH" = "1" ] && [ -e ~/.ssh/id_rsa.pub ] && [ "$COPYENV" = "1" ]; then - set +x - - echo "VM Launched - Waiting for run.sh" - while ! ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "test -e /opt/stack/run_sh.pid"; do - sleep 10 - done - echo -n "devstack service is running, waiting for stack.sh to start logging..." - - pid=`ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "cat /opt/stack/run_sh.pid"` - if [ -n "$SCREEN_LOGDIR" ]; then - while ! ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "test -e ${SCREEN_LOGDIR}/stack.log"; do - sleep 10 - done - - ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "tail --pid $pid -n +1 -f ${SCREEN_LOGDIR}/stack.log" - else - echo -n "SCREEN_LOGDIR not set; just waiting for process $pid to finish" - ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "wait $pid" - fi - - set -x - # Fail if devstack did not succeed - ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS 'test -e /opt/stack/runsh.succeeded' - - set +x - echo "################################################################################" - echo "" - echo "All Finished!" - echo "You can visit the OpenStack Dashboard" - echo "at http://$OS_VM_SERVICES_ADDRESS, and contact other services at the usual ports." -else - set +x - echo "################################################################################" - echo "" - echo "All Finished!" - echo "Now, you can monitor the progress of the stack.sh installation by " - echo "looking at the console of your domU / checking the log files." - echo "" - echo "ssh into your domU now: 'ssh stack@$OS_VM_MANAGEMENT_ADDRESS' using your password" - echo "and then do: 'sudo service devstack status' to check if devstack is still running." - echo "Check that /opt/stack/runsh.succeeded exists" - echo "" - echo "When devstack completes, you can visit the OpenStack Dashboard" - echo "at http://$OS_VM_SERVICES_ADDRESS, and contact other services at the usual ports." -fi diff --git a/tools/xen/mocks b/tools/xen/mocks deleted file mode 100644 index 3b9b05c747..0000000000 --- a/tools/xen/mocks +++ /dev/null @@ -1,92 +0,0 @@ -#!/bin/bash - -test ! -e "$LIST_OF_ACTIONS" && { - echo "Mocking is not set up properly." - echo "LIST_OF_ACTIONS should point to an existing file." - exit 1 -} - -test ! -e "$LIST_OF_DIRECTORIES" && { - echo "Mocking is not set up properly." - echo "LIST_OF_DIRECTORIES should point to an existing file." - exit 1 -} - -test ! -e "$XE_RESPONSE" && { - echo "Mocking is not set up properly." - echo "XE_RESPONSE should point to an existing file." - exit 1 -} - -test ! -e "$XE_CALLS" && { - echo "Mocking is not set up properly." - echo "XE_CALLS should point to an existing file." - exit 1 -} - -function mktemp { - if test "${1:-}" = "-d"; - then - echo "tempdir" - else - echo "tempfile" - fi -} - -function wget { - if [[ $@ =~ "failurl" ]]; then - return 1 - fi - echo "wget $@" >> $LIST_OF_ACTIONS -} - -function mkdir { - if test "${1:-}" = "-p"; - then - echo "$2" >> $LIST_OF_DIRECTORIES - fi -} - -function unzip { - echo "Random rubbish from unzip" - echo "unzip $@" >> $LIST_OF_ACTIONS -} - -function rm { - echo "rm $@" >> $LIST_OF_ACTIONS -} - -function ln { - echo "ln $@" >> $LIST_OF_ACTIONS -} - -function [ { - if test "${1:-}" = "-d"; - then - echo "[ $@" >> $LIST_OF_ACTIONS - for directory in $(cat $LIST_OF_DIRECTORIES) - do - if test "$directory" = "$2" - then - return 0 - fi - done - return 1 - fi - echo "Mock test does not implement the requested function: ${1:-}" - exit 1 -} - -function die_with_error { - echo "$1" >> $DEAD_MESSAGES -} - -function xe { - cat $XE_RESPONSE - { - for i in $(seq "$#") - do - eval "echo \"\$$i\"" - done - } >> $XE_CALLS -} diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh deleted file mode 100755 index 6de1afc199..0000000000 --- a/tools/xen/prepare_guest.sh +++ /dev/null @@ -1,123 +0,0 @@ -#!/bin/bash - -# This script is run on an Ubuntu VM. -# This script is inserted into the VM by prepare_guest_template.sh -# and is run when that VM boots. -# It customizes a fresh Ubuntu install, so it is ready -# to run stack.sh -# -# This includes installing the XenServer tools, -# creating the user called "stack", -# and shuts down the VM to signal the script has completed - -set -o errexit -set -o nounset -set -o xtrace - -# Configurable nuggets -GUEST_PASSWORD="$1" -STACK_USER="$2" -DOMZERO_USER="$3" - - -function setup_domzero_user { - local username - - username="$1" - - local key_updater_script - local sudoers_file - key_updater_script="/home/$username/update_authorized_keys.sh" - sudoers_file="/etc/sudoers.d/allow_$username" - - # Create user - adduser --disabled-password --quiet "$username" --gecos "$username" - - # Give passwordless sudo - cat > $sudoers_file << EOF - $username ALL = NOPASSWD: ALL -EOF - chmod 0440 $sudoers_file - - # A script to populate this user's authenticated_keys from xenstore - cat > $key_updater_script << EOF -#!/bin/bash -set -eux - -DOMID=\$(sudo xenstore-read domid) -sudo xenstore-exists /local/domain/\$DOMID/authorized_keys/$username -sudo xenstore-read /local/domain/\$DOMID/authorized_keys/$username > /home/$username/xenstore_value -cat /home/$username/xenstore_value > /home/$username/.ssh/authorized_keys -EOF - - # Give the key updater to the user - chown $username:$username $key_updater_script - chmod 0700 $key_updater_script - - # Setup the .ssh folder - mkdir -p /home/$username/.ssh - chown $username:$username /home/$username/.ssh - chmod 0700 /home/$username/.ssh - touch /home/$username/.ssh/authorized_keys - chown $username:$username /home/$username/.ssh/authorized_keys - chmod 0600 /home/$username/.ssh/authorized_keys - - # Setup the key updater as a cron job - crontab -u $username - << EOF -* * * * * $key_updater_script -EOF - -} - -# Make a small cracklib dictionary, so that passwd still works, but we don't -# have the big dictionary. -mkdir -p /usr/share/cracklib -echo a | cracklib-packer - -# Make /etc/shadow, and set the root password -pwconv -echo "root:$GUEST_PASSWORD" | chpasswd - -# Put the VPX into UTC. -rm -f /etc/localtime - -# Add stack user -groupadd libvirtd -useradd $STACK_USER -s /bin/bash -d /opt/stack -G libvirtd -echo $STACK_USER:$GUEST_PASSWORD | chpasswd -echo "$STACK_USER ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers - -setup_domzero_user "$DOMZERO_USER" - -# Add an udev rule, so that new block devices could be written by stack user -cat > /etc/udev/rules.d/50-openstack-blockdev.rules << EOF -KERNEL=="xvd[b-z]", GROUP="$STACK_USER", MODE="0660" -EOF - -# Give ownership of /opt/stack to stack user -chown -R $STACK_USER /opt/stack - -function setup_vimrc { - if [ ! -e $1 ]; then - # Simple but usable vimrc - cat > $1 <$STAGING_DIR/etc/rc.local -#!/bin/sh -e -bash /opt/stack/prepare_guest.sh \\ - "$GUEST_PASSWORD" "$STACK_USER" "$DOMZERO_USER" \\ - > /opt/stack/prepare_guest.log 2>&1 -EOF - -# Update ubuntu repositories -cat > $STAGING_DIR/etc/apt/sources.list << EOF -deb http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE} main restricted -deb-src http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE} main restricted -deb http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE}-updates main restricted -deb-src http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE}-updates main restricted -deb http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE} universe -deb-src http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE} universe -deb http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE}-updates universe -deb-src http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE}-updates universe -deb http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE} multiverse -deb-src http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE} multiverse -deb http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE}-updates multiverse -deb-src http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE}-updates multiverse -deb http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE}-backports main restricted universe multiverse -deb-src http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE}-backports main restricted universe multiverse - -deb http://security.ubuntu.com/ubuntu ${UBUNTU_INST_RELEASE}-security main restricted -deb-src http://security.ubuntu.com/ubuntu ${UBUNTU_INST_RELEASE}-security main restricted -deb http://security.ubuntu.com/ubuntu ${UBUNTU_INST_RELEASE}-security universe -deb-src http://security.ubuntu.com/ubuntu ${UBUNTU_INST_RELEASE}-security universe -deb http://security.ubuntu.com/ubuntu ${UBUNTU_INST_RELEASE}-security multiverse -deb-src http://security.ubuntu.com/ubuntu ${UBUNTU_INST_RELEASE}-security multiverse -EOF - -rm -f $STAGING_DIR/etc/apt/apt.conf -if [ -n "$UBUNTU_INST_HTTP_PROXY" ]; then - cat > $STAGING_DIR/etc/apt/apt.conf << EOF -Acquire::http::Proxy "$UBUNTU_INST_HTTP_PROXY"; -EOF -fi diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh deleted file mode 100755 index 66f7ef4763..0000000000 --- a/tools/xen/scripts/install-os-vpx.sh +++ /dev/null @@ -1,135 +0,0 @@ -#!/bin/bash -# -# Copyright (c) 2011 Citrix Systems, Inc. -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -set -eux - -BRIDGE= -NAME_LABEL= -TEMPLATE_NAME= - -usage() -{ -cat << EOF - - Usage: $0 -t TEMPLATE_NW_INSTALL -l NAME_LABEL [-n BRIDGE] - - Install a VM from a template - - OPTIONS: - - -h Shows this message. - -t template VM template to use - -l name Specifies the name label for the VM. - -n bridge The bridge/network to use for eth0. Defaults to xenbr0 -EOF -} - -get_params() -{ - while getopts "hbn:r:l:t:" OPTION; do - case $OPTION in - h) usage - exit 1 - ;; - n) - BRIDGE=$OPTARG - ;; - l) - NAME_LABEL=$OPTARG - ;; - t) - TEMPLATE_NAME=$OPTARG - ;; - ?) - usage - exit - ;; - esac - done - if [[ -z $BRIDGE ]]; then - BRIDGE=xenbr0 - fi - - if [[ -z $TEMPLATE_NAME ]]; then - echo "Please specify a template name" >&2 - exit 1 - fi - - if [[ -z $NAME_LABEL ]]; then - echo "Please specify a name-label for the new VM" >&2 - exit 1 - fi -} - - -xe_min() -{ - local cmd="$1" - shift - xe "$cmd" --minimal "$@" -} - - -find_network() -{ - result=$(xe_min network-list bridge="$1") - if [ "$result" = "" ]; then - result=$(xe_min network-list name-label="$1") - fi - echo "$result" -} - - -create_vif() -{ - local v="$1" - echo "Installing VM interface on [$BRIDGE]" - local out_network_uuid - out_network_uuid=$(find_network "$BRIDGE") - xe vif-create vm-uuid="$v" network-uuid="$out_network_uuid" device="0" -} - - - -# Make the VM auto-start on server boot. -set_auto_start() -{ - local v="$1" - xe vm-param-set uuid="$v" other-config:auto_poweron=true -} - - -destroy_vifs() -{ - local v="$1" - IFS=, - for vif in $(xe_min vif-list vm-uuid="$v"); do - xe vif-destroy uuid="$vif" - done - unset IFS -} - - -get_params "$@" - -vm_uuid=$(xe_min vm-install template="$TEMPLATE_NAME" new-name-label="$NAME_LABEL") -destroy_vifs "$vm_uuid" -set_auto_start "$vm_uuid" -create_vif "$vm_uuid" -xe vm-param-set actions-after-reboot=Destroy uuid="$vm_uuid" diff --git a/tools/xen/scripts/install_ubuntu_template.sh b/tools/xen/scripts/install_ubuntu_template.sh deleted file mode 100755 index d80ed095e8..0000000000 --- a/tools/xen/scripts/install_ubuntu_template.sh +++ /dev/null @@ -1,84 +0,0 @@ -#!/bin/bash -# -# This creates an Ubuntu Server 32bit or 64bit template -# on Xenserver 5.6.x, 6.0.x and 6.1.x -# The template does a net install only -# -# Based on a script by: David Markey -# - -set -o errexit -set -o nounset -set -o xtrace - -# This directory -BASE_DIR=$(cd $(dirname "$0") && pwd) - -# For default setings see xenrc -source $BASE_DIR/../xenrc - -# Get the params -preseed_url=$1 - -# Delete template or skip template creation as required -previous_template=$(xe template-list name-label="$UBUNTU_INST_TEMPLATE_NAME" \ - params=uuid --minimal) -if [ -n "$previous_template" ]; then - if $CLEAN_TEMPLATES; then - xe template-param-clear param-name=other-config uuid=$previous_template - xe template-uninstall template-uuid=$previous_template force=true - else - echo "Template $UBUNTU_INST_TEMPLATE_NAME already present" - exit 0 - fi -fi - -# Get built-in template -builtin_name="Debian Squeeze 6.0 (32-bit)" -builtin_uuid=$(xe template-list name-label="$builtin_name" --minimal) -if [[ -z $builtin_uuid ]]; then - echo "Can't find the Debian Squeeze 32bit template on your XenServer." - exit 1 -fi - -# Clone built-in template to create new template -new_uuid=$(xe vm-clone uuid=$builtin_uuid \ - new-name-label="$UBUNTU_INST_TEMPLATE_NAME") -disk_size=$(($OSDOMU_VDI_GB * 1024 * 1024 * 1024)) - -# Some of these settings can be found in example preseed files -# however these need to be answered before the netinstall -# is ready to fetch the preseed file, and as such must be here -# to get a fully automated install -pvargs="-- quiet console=hvc0 partman/default_filesystem=ext3 \ -console-setup/ask_detect=false locale=${UBUNTU_INST_LOCALE} \ -keyboard-configuration/layoutcode=${UBUNTU_INST_KEYBOARD} \ -netcfg/choose_interface=eth0 \ -netcfg/get_hostname=os netcfg/get_domain=os auto \ -url=${preseed_url}" - -if [ "$UBUNTU_INST_IP" != "dhcp" ]; then - netcfgargs="netcfg/disable_autoconfig=true \ -netcfg/get_nameservers=${UBUNTU_INST_NAMESERVERS} \ -netcfg/get_ipaddress=${UBUNTU_INST_IP} \ -netcfg/get_netmask=${UBUNTU_INST_NETMASK} \ -netcfg/get_gateway=${UBUNTU_INST_GATEWAY} \ -netcfg/confirm_static=true" - pvargs="${pvargs} ${netcfgargs}" -fi - -xe template-param-set uuid=$new_uuid \ - other-config:install-methods=http \ - other-config:install-repository="http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY}" \ - PV-args="$pvargs" \ - other-config:debian-release="$UBUNTU_INST_RELEASE" \ - other-config:default_template=true \ - other-config:disks='' \ - other-config:install-arch="$UBUNTU_INST_ARCH" - -if ! [ -z "$UBUNTU_INST_HTTP_PROXY" ]; then - xe template-param-set uuid=$new_uuid \ - other-config:install-proxy="$UBUNTU_INST_HTTP_PROXY" -fi - -echo "Ubuntu template installed uuid:$new_uuid" diff --git a/tools/xen/scripts/manage-vdi b/tools/xen/scripts/manage-vdi deleted file mode 100755 index 909ce328b0..0000000000 --- a/tools/xen/scripts/manage-vdi +++ /dev/null @@ -1,96 +0,0 @@ -#!/bin/bash - -set -eux - -action="$1" -vm="$2" -device="${3-0}" -part="${4-}" - -function xe_min() { - local cmd="$1" - shift - xe "$cmd" --minimal "$@" -} - -function run_udev_settle() { - which_udev=$(which udevsettle) || true - if [ -n "$which_udev" ]; then - udevsettle - else - udevadm settle - fi -} - -vm_uuid=$(xe_min vm-list name-label="$vm") -vdi_uuid=$(xe_min vbd-list params=vdi-uuid vm-uuid="$vm_uuid" \ - userdevice="$device") - -dom0_uuid=$(xe_min vm-list is-control-domain=true) - -function get_mount_device() { - vbd_uuid=$1 - - dev=$(xe_min vbd-list params=device uuid="$vbd_uuid") - if [[ "$dev" =~ "sm/" || "$dev" =~ "blktap-2/" ]]; then - DEBIAN_FRONTEND=noninteractive \ - apt-get --option "Dpkg::Options::=--force-confold" --assume-yes \ - install kpartx &> /dev/null || true - mapping=$(kpartx -av "/dev/$dev" | sed -ne 's,^add map \([a-z0-9\-]*\).*$,\1,p' | sed -ne "s,^\(.*${part}\)\$,\1,p") - if [ -z "$mapping" ]; then - echo "Failed to find mapping" - exit -1 - fi - - local device="/dev/mapper/${mapping}" - for (( i = 0; i < 5; i++ )) ; do - if [ -b $device ] ; then - echo $device - return - fi - sleep 1 - done - echo "ERROR: timed out waiting for dev-mapper" - exit 1 - else - echo "/dev/$dev$part" - fi -} - -function clean_dev_mappings() { - dev=$(xe_min vbd-list params=device uuid="$vbd_uuid") - if [[ "$dev" =~ "sm/" || "$dev" =~ "blktap-2/" ]]; then - kpartx -dv "/dev/$dev" - fi -} - -function open_vdi() { - vbd_uuid=$(xe vbd-create vm-uuid="$dom0_uuid" vdi-uuid="$vdi_uuid" \ - device=autodetect) - mp=$(mktemp -d) - xe vbd-plug uuid="$vbd_uuid" - - run_udev_settle - - mount_device=$(get_mount_device "$vbd_uuid") - mount "$mount_device" "$mp" - echo "Your vdi is mounted at $mp" -} - -function close_vdi() { - vbd_uuid=$(xe_min vbd-list vm-uuid="$dom0_uuid" vdi-uuid="$vdi_uuid") - mount_device=$(get_mount_device "$vbd_uuid") - run_udev_settle - umount "$mount_device" - - clean_dev_mappings - - xe vbd-unplug uuid=$vbd_uuid - xe vbd-destroy uuid=$vbd_uuid -} - -if [ "$action" == "open" ]; then - open_vdi -elif [ "$action" == "close" ]; then - close_vdi -fi diff --git a/tools/xen/scripts/on_exit.sh b/tools/xen/scripts/on_exit.sh deleted file mode 100755 index 2846dc42d0..0000000000 --- a/tools/xen/scripts/on_exit.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -set -e -set -o xtrace - -if [ -z "${on_exit_hooks:-}" ]; then - on_exit_hooks=() -fi - -on_exit() -{ - for i in $(seq $((${#on_exit_hooks[*]} - 1)) -1 0); do - eval "${on_exit_hooks[$i]}" - done -} - -add_on_exit() -{ - local n=${#on_exit_hooks[*]} - on_exit_hooks[$n]="$*" - if [[ $n -eq 0 ]]; then - trap on_exit EXIT - fi -} diff --git a/tools/xen/scripts/uninstall-os-vpx.sh b/tools/xen/scripts/uninstall-os-vpx.sh deleted file mode 100755 index 96dad7e852..0000000000 --- a/tools/xen/scripts/uninstall-os-vpx.sh +++ /dev/null @@ -1,88 +0,0 @@ -#!/bin/bash -# -# Copyright (c) 2011 Citrix Systems, Inc. -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -set -ex - -# By default, don't remove the templates -REMOVE_TEMPLATES=${REMOVE_TEMPLATES:-"false"} -if [ "$1" = "--remove-templates" ]; then - REMOVE_TEMPLATES=true -fi - -xe_min() -{ - local cmd="$1" - shift - xe "$cmd" --minimal "$@" -} - -destroy_vdi() -{ - local vbd_uuid="$1" - local type - type=$(xe_min vbd-list uuid=$vbd_uuid params=type) - local dev - dev=$(xe_min vbd-list uuid=$vbd_uuid params=userdevice) - local vdi_uuid - vdi_uuid=$(xe_min vbd-list uuid=$vbd_uuid params=vdi-uuid) - - if [ "$type" == 'Disk' ] && [ "$dev" != 'xvda' ] && [ "$dev" != '0' ]; then - xe vdi-destroy uuid=$vdi_uuid - fi -} - -uninstall() -{ - local vm_uuid="$1" - local power_state - power_state=$(xe_min vm-list uuid=$vm_uuid params=power-state) - - if [ "$power_state" != "halted" ]; then - xe vm-shutdown vm=$vm_uuid force=true - fi - - for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g'); do - destroy_vdi "$v" - done - - xe vm-uninstall vm=$vm_uuid force=true >/dev/null -} - -uninstall_template() -{ - local vm_uuid="$1" - - for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g'); do - destroy_vdi "$v" - done - - xe template-uninstall template-uuid=$vm_uuid force=true >/dev/null -} - -# remove the VMs and their disks -for u in $(xe_min vm-list other-config:os-vpx=true | sed -e 's/,/ /g'); do - uninstall "$u" -done - -# remove the templates -if [ "$REMOVE_TEMPLATES" == "true" ]; then - for u in $(xe_min template-list other-config:os-vpx=true | sed -e 's/,/ /g'); do - uninstall_template "$u" - done -fi diff --git a/tools/xen/test_functions.sh b/tools/xen/test_functions.sh deleted file mode 100755 index 324e6a1a1e..0000000000 --- a/tools/xen/test_functions.sh +++ /dev/null @@ -1,205 +0,0 @@ -#!/bin/bash - -# Tests for functions. -# -# The tests are sourcing the mocks file to mock out various functions. The -# mocking-out always happens in a sub-shell, thus it does not have impact on -# the functions defined here. - -# To run the tests, please run: -# -# ./test_functions.sh run_tests -# -# To only print out the discovered test functions, run: -# -# ./test_functions.sh - -. functions - -# Setup -function before_each_test { - LIST_OF_DIRECTORIES=$(mktemp) - truncate -s 0 $LIST_OF_DIRECTORIES - - LIST_OF_ACTIONS=$(mktemp) - truncate -s 0 $LIST_OF_ACTIONS - - XE_RESPONSE=$(mktemp) - truncate -s 0 $XE_RESPONSE - - XE_CALLS=$(mktemp) - truncate -s 0 $XE_CALLS - - DEAD_MESSAGES=$(mktemp) - truncate -s 0 $DEAD_MESSAGES -} - -# Teardown -function after_each_test { - rm -f $LIST_OF_DIRECTORIES - rm -f $LIST_OF_ACTIONS - rm -f $XE_RESPONSE - rm -f $XE_CALLS -} - -# Helpers -function setup_xe_response { - echo "$1" > $XE_RESPONSE -} - -function given_directory_exists { - echo "$1" >> $LIST_OF_DIRECTORIES -} - -function assert_directory_exists { - grep "$1" $LIST_OF_DIRECTORIES -} - -function assert_previous_command_failed { - [ "$?" != "0" ] || exit 1 -} - -function assert_xe_min { - grep -qe "^--minimal\$" $XE_CALLS -} - -function assert_xe_param { - grep -qe "^$1\$" $XE_CALLS -} - -function assert_died_with { - diff -u <(echo "$1") $DEAD_MESSAGES -} - -function mock_out { - local FNNAME="$1" - local OUTPUT="$2" - - . <(cat << EOF -function $FNNAME { - echo "$OUTPUT" -} -EOF -) -} - -function assert_symlink { - grep -qe "^ln -s $2 $1\$" $LIST_OF_ACTIONS -} - -# Tests -function test_plugin_directory_on_xenserver { - given_directory_exists "/etc/xapi.d/plugins/" - - PLUGDIR=$(. mocks && xapi_plugin_location) - - [ "/etc/xapi.d/plugins/" = "$PLUGDIR" ] -} - -function test_plugin_directory_on_xcp { - given_directory_exists "/usr/lib/xcp/plugins/" - - PLUGDIR=$(. mocks && xapi_plugin_location) - - [ "/usr/lib/xcp/plugins/" = "$PLUGDIR" ] -} - -function test_no_plugin_directory_found { - set +e - - local IGNORE - IGNORE=$(. mocks && xapi_plugin_location) - - assert_previous_command_failed - - grep "[ -d /etc/xapi.d/plugins/ ]" $LIST_OF_ACTIONS - grep "[ -d /usr/lib/xcp/plugins/ ]" $LIST_OF_ACTIONS -} - -function test_create_directory_for_kernels { - ( - . mocks - mock_out get_local_sr_path /var/run/sr-mount/uuid1 - create_directory_for_kernels - ) - - assert_directory_exists "/var/run/sr-mount/uuid1/os-guest-kernels" - assert_symlink "/boot/guest" "/var/run/sr-mount/uuid1/os-guest-kernels" -} - -function test_create_directory_for_kernels_existing_dir { - ( - . mocks - given_directory_exists "/boot/guest" - create_directory_for_kernels - ) - - diff -u $LIST_OF_ACTIONS - << EOF -[ -d /boot/guest ] -EOF -} - -function test_create_directory_for_images { - ( - . mocks - mock_out get_local_sr_path /var/run/sr-mount/uuid1 - create_directory_for_images - ) - - assert_directory_exists "/var/run/sr-mount/uuid1/os-images" - assert_symlink "/images" "/var/run/sr-mount/uuid1/os-images" -} - -function test_create_directory_for_images_existing_dir { - ( - . mocks - given_directory_exists "/images" - create_directory_for_images - ) - - diff -u $LIST_OF_ACTIONS - << EOF -[ -d /images ] -EOF -} - -function test_get_local_sr { - setup_xe_response "uuid123" - - local RESULT - RESULT=$(. mocks && get_local_sr) - - [ "$RESULT" == "uuid123" ] - - assert_xe_param "pool-list" params=default-SR minimal=true -} - -function test_get_local_sr_path { - local RESULT - RESULT=$(mock_out get_local_sr "uuid1" && get_local_sr_path) - - [ "/var/run/sr-mount/uuid1" == "$RESULT" ] -} - -# Test runner -[ "$1" = "" ] && { - grep -e "^function *test_" $0 | cut -d" " -f2 -} - -[ "$1" = "run_tests" ] && { - for testname in $($0); do - echo "$testname" - before_each_test - ( - set -eux - $testname - ) - if [ "$?" != "0" ]; then - echo "FAIL" - exit 1 - else - echo "PASS" - fi - - after_each_test - done -} diff --git a/tools/xen/xenrc b/tools/xen/xenrc deleted file mode 100644 index bb27454e30..0000000000 --- a/tools/xen/xenrc +++ /dev/null @@ -1,114 +0,0 @@ -#!/bin/bash - -# -# XenServer specific defaults for the /tools/xen/ scripts -# Similar to stackrc, you can override these in your localrc -# - -# Name of this guest -GUEST_NAME=${GUEST_NAME:-DevStackOSDomU} - -# Template cleanup -CLEAN_TEMPLATES=${CLEAN_TEMPLATES:-false} - -# Size of image -VDI_MB=${VDI_MB:-5000} - -# Devstack now contains many components. 4GB ram is not enough to prevent -# swapping and memory fragmentation - the latter of which can cause failures -# such as blkfront failing to plug a VBD and lead to random test fails. -# -# Set to 6GB so an 8GB XenServer VM can have a 1GB Dom0 and leave 1GB for VMs -OSDOMU_MEM_MB=6144 -OSDOMU_VDI_GB=8 - -# Network mapping. Specify bridge names or network names. Network names may -# differ across localised versions of XenServer. If a given bridge/network -# was not found, a new network will be created with the specified name. - -# Get the management network from the XS installation -VM_BRIDGE_OR_NET_NAME="OpenStack VM Network" -PUB_BRIDGE_OR_NET_NAME="OpenStack Public Network" -XEN_INT_BRIDGE_OR_NET_NAME="OpenStack VM Integration Network" - -# VM Password -GUEST_PASSWORD=${GUEST_PASSWORD:-secret} - -# Extracted variables for OpenStack VM network device numbers. -# Make sure they form a continuous sequence starting from 0 -MGT_DEV_NR=0 -VM_DEV_NR=1 -PUB_DEV_NR=2 - -# Host Interface, i.e. the interface on the nova vm you want to expose the -# services on. Usually the device connected to the management network or the -# one connected to the public network is used. -HOST_IP_IFACE=${HOST_IP_IFACE:-"eth${MGT_DEV_NR}"} - -# -# Our nova host's network info -# - -# Management network -MGT_IP=${MGT_IP:-dhcp} -MGT_NETMASK=${MGT_NETMASK:-ignored} - -# VM Network -VM_IP=${VM_IP:-10.255.255.255} -VM_NETMASK=${VM_NETMASK:-255.255.255.0} - -# Public network -# Aligned with stack.sh - see FLOATING_RANGE -PUB_IP=${PUB_IP:-172.24.4.10} -PUB_NETMASK=${PUB_NETMASK:-255.255.255.0} - -# Ubuntu install settings -UBUNTU_INST_RELEASE="trusty" -UBUNTU_INST_TEMPLATE_NAME="Ubuntu 14.04 (64-bit) for DevStack" -# For 12.04 use "precise" and update template name -# However, for 12.04, you should be using -# XenServer 6.1 and later or XCP 1.6 or later -# 11.10 is only really supported with XenServer 6.0.2 and later -UBUNTU_INST_ARCH="amd64" -UBUNTU_INST_HTTP_HOSTNAME="archive.ubuntu.com" -UBUNTU_INST_HTTP_DIRECTORY="/ubuntu" -UBUNTU_INST_HTTP_PROXY="" -UBUNTU_INST_LOCALE="en_US" -UBUNTU_INST_KEYBOARD="us" -# network configuration for ubuntu netinstall -UBUNTU_INST_IP="dhcp" -UBUNTU_INST_NAMESERVERS="" -UBUNTU_INST_NETMASK="" -UBUNTU_INST_GATEWAY="" - -# Create a separate xvdb. Tis could be used as a backing device for cinder -# volumes. Specify -# XEN_XVDB_SIZE_GB=10 -# VOLUME_BACKING_DEVICE=/dev/xvdb -# in your localrc to avoid kernel lockups: -# https://bugs.launchpad.net/cinder/+bug/1023755 -# -# Set the size to 0 to avoid creation of additional disk. -XEN_XVDB_SIZE_GB=0 - -STACK_USER=stack -DOMZERO_USER=domzero - -RC_DIR="../.." - -restore_nounset=$(set +o | grep nounset) -set +u - -## Note that the lines below are coming from stackrc to support -## new-style config files - -# allow local overrides of env variables, including repo config -if [[ -f $RC_DIR/localrc ]]; then - # Old-style user-supplied config - source $RC_DIR/localrc -elif [[ -f $RC_DIR/.localrc.auto ]]; then - # New-style user-supplied config extracted from local.conf - source $RC_DIR/.localrc.auto -fi - -$restore_nounset diff --git a/tox.ini b/tox.ini index 55a06d0cae..26cd68c031 100644 --- a/tox.ini +++ b/tox.ini @@ -1,19 +1,19 @@ [tox] -minversion = 1.6 +minversion = 3.18.0 skipsdist = True envlist = bashate [testenv] usedevelop = False -install_command = pip install {opts} {packages} +basepython = python3 [testenv:bashate] # if you want to test out some changes you have made to bashate # against devstack, just set BASHATE_INSTALL_PATH=/path/... to your # modified bashate tree deps = - {env:BASHATE_INSTALL_PATH:bashate==0.5.1} -whitelist_externals = bash + {env:BASHATE_INSTALL_PATH:bashate} +allowlist_externals = bash commands = bash -c "find {toxinidir} \ -not \( -type d -name .?\* -prune \) \ -not \( -type d -name doc -prune \) \ @@ -35,27 +35,22 @@ commands = bash -c "find {toxinidir} \ [testenv:docs] deps = - Pygments - docutils - sphinx>=1.1.2,<1.2 - pbr>=0.6,!=0.7,<1.0 - oslosphinx - nwdiag - blockdiag - sphinxcontrib-blockdiag - sphinxcontrib-nwdiag -whitelist_externals = bash + -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} + -r{toxinidir}/doc/requirements.txt +allowlist_externals = bash setenv = TOP_DIR={toxinidir} commands = - python setup.py build_sphinx + sphinx-build -W -b html -d doc/build/doctrees doc/source doc/build/html + +[testenv:pdf-docs] +deps = {[testenv:docs]deps} +allowlist_externals = + make +commands = + sphinx-build -W -b latex doc/source doc/build/pdf + make -C doc/build/pdf [testenv:venv] -deps = - pbr>=0.6,!=0.7,<1.0 - sphinx>=1.1.2,<1.2 - oslosphinx - blockdiag - sphinxcontrib-blockdiag - sphinxcontrib-nwdiag +deps = -r{toxinidir}/doc/requirements.txt commands = {posargs} diff --git a/unstack.sh b/unstack.sh index a69b2187ce..29c80718f8 100755 --- a/unstack.sh +++ b/unstack.sh @@ -45,6 +45,10 @@ fi # Configure Projects # ================== +# Determine what system we are running on. This provides ``os_VENDOR``, +# ``os_RELEASE``, ``os_PACKAGE``, ``os_CODENAME`` and ``DISTRO`` +GetDistro + # Plugin Phase 0: override_defaults - allow plugins to override # defaults before other services are run run_phase override_defaults @@ -63,14 +67,14 @@ source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone source $TOP_DIR/lib/glance source $TOP_DIR/lib/nova +source $TOP_DIR/lib/placement source $TOP_DIR/lib/cinder source $TOP_DIR/lib/swift -source $TOP_DIR/lib/heat source $TOP_DIR/lib/neutron -source $TOP_DIR/lib/neutron-legacy source $TOP_DIR/lib/ldap source $TOP_DIR/lib/dstat -source $TOP_DIR/lib/dlm +source $TOP_DIR/lib/atop +source $TOP_DIR/lib/etcd3 # Extras Source # -------------- @@ -84,10 +88,6 @@ fi load_plugin_settings -# Determine what system we are running on. This provides ``os_VENDOR``, -# ``os_RELEASE``, ``os_PACKAGE``, ``os_CODENAME`` -GetOSVersion - set -o xtrace # Run extras @@ -96,19 +96,15 @@ set -o xtrace # Phase: unstack run_phase unstack -if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then - source $TOP_DIR/openrc - teardown_neutron_debug -fi - # Call service stop -if is_service_enabled heat; then - stop_heat -fi - if is_service_enabled nova; then stop_nova + cleanup_nova +fi + +if is_service_enabled placement; then + stop_placement fi if is_service_enabled glance; then @@ -135,9 +131,6 @@ if is_service_enabled tls-proxy; then stop_tls_proxy cleanup_CA fi -if [ "$USE_SSL" == "True" ]; then - cleanup_CA -fi SCSI_PERSIST_DIR=$CINDER_STATE_PATH/volumes/* @@ -168,28 +161,35 @@ fi if is_service_enabled neutron; then stop_neutron - stop_neutron_third_party cleanup_neutron fi -if is_service_enabled dstat; then - stop_dstat +if is_service_enabled etcd3; then + stop_etcd3 + cleanup_etcd3 fi -# Clean up the remainder of the screen processes -SCREEN=$(which screen) -if [[ -n "$SCREEN" ]]; then - SESSION=$(screen -ls | awk "/[0-9]+.${SCREEN_NAME}/"'{ print $1 }') - if [[ -n "$SESSION" ]]; then - screen -X -S $SESSION quit - fi +if is_service_enabled openstack-cli-server; then + stop_service devstack@openstack-cli-server +fi + +stop_dstat + +if is_service_enabled atop; then + stop_atop fi -# BUG: maybe it doesn't exist? We should isolate this further down. # NOTE: Cinder automatically installs the lvm2 package, independently of the -# enabled backends. So if Cinder is enabled, we are sure lvm (lvremove, -# /etc/lvm/lvm.conf, etc.) is here. -if is_service_enabled cinder; then - clean_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME || /bin/true +# enabled backends. So if Cinder is enabled, and installed successfully we are +# sure lvm2 (lvremove, /etc/lvm/lvm.conf, etc.) is here. +if is_service_enabled cinder && is_package_installed lvm2; then clean_lvm_filter fi + +clean_pyc_files +rm -Rf $DEST/async + +# Clean any safe.directory items we wrote into the global +# gitconfig. We can identify the relevant ones by checking that they +# point to somewhere in our $DEST directory. +sudo sed -i "\+directory = ${DEST}+ d" /etc/gitconfig