diff --git a/.gitignore b/.gitignore index 1840352636..ad153f4a07 100644 --- a/.gitignore +++ b/.gitignore @@ -1,23 +1,42 @@ -proto *~ .*.sw? *.log -*.log.[1-9] -src -localrc -local.sh +*-log +*.log.* +*-log.* +*.pem +*.pyc +.localrc.auto +.localrc.password +.prereqs +.tox +.stackenv +accrc +doc/files +doc/build files/*.gz +files/*.vmdk +files/*.rpm +files/*.rpm.* +files/*.deb +files/*.deb.* files/*.qcow2 +files/*.img files/images files/pip-* -files/get-pip.py +files/get-pip.py* +files/ir-deploy* +files/ironic-inspector* +files/etcd* +/local.conf +local.sh +localrc +proto +shocco +src stack-screenrc -*.pem -accrc -.stackenv -.prereqs -devstack-docs-* -docs/ -docs-files -.localrc.auto -local.conf +userrc_early +AUTHORS +ChangeLog +tools/dbcounter/build/ +tools/dbcounter/dbcounter.egg-info/ diff --git a/.gitreview b/.gitreview index 570d31a987..e1bf63ba7a 100644 --- a/.gitreview +++ b/.gitreview @@ -1,4 +1,4 @@ [gerrit] -host=review.openstack.org +host=review.opendev.org port=29418 -project=openstack-dev/devstack.git +project=openstack/devstack.git diff --git a/.mailmap b/.mailmap index 29be995ef8..43e4e6ec46 100644 --- a/.mailmap +++ b/.mailmap @@ -4,3 +4,4 @@ Jiajun Liu Jian Wen Joe Gordon +Sean Dague diff --git a/.zuul.yaml b/.zuul.yaml new file mode 100644 index 0000000000..2227f185dd --- /dev/null +++ b/.zuul.yaml @@ -0,0 +1,1140 @@ +- nodeset: + name: openstack-single-node-jammy + nodes: + - name: controller + label: ubuntu-jammy + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: openstack-single-node-noble + nodes: + - name: controller + label: ubuntu-noble + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: openstack-single-node-focal + nodes: + - name: controller + label: ubuntu-focal + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: openstack-single-node-bionic + nodes: + - name: controller + label: ubuntu-bionic + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: devstack-single-node-almalinux-10 + nodes: + - name: controller + label: almalinux-10-8GB + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: devstack-single-node-centos-9-stream + nodes: + - name: controller + label: centos-9-stream + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: devstack-single-node-centos-10-stream + nodes: + - name: controller + label: centos-10-stream-8GB + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: devstack-single-node-debian-trixie + nodes: + - name: controller + label: debian-trixie-8GB + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: devstack-single-node-debian-bookworm + nodes: + - name: controller + label: debian-bookworm + groups: + - name: tempest + nodes: + - controller + +# TODO(frickler): drop this dummy nodeset once all references have been removed +- nodeset: + name: devstack-single-node-opensuse-15 + nodes: [] + +- nodeset: + name: devstack-single-node-rockylinux-9 + nodes: + - name: controller + label: rockylinux-9 + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: devstack-single-node-rockylinux-10 + nodes: + - name: controller + label: rockylinux-10-8GB + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: openstack-two-node-centos-10-stream + nodes: + - name: controller + label: centos-10-stream-8GB + - name: compute1 + label: centos-10-stream-8GB + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + +- nodeset: + name: openstack-two-node-centos-9-stream + nodes: + - name: controller + label: centos-9-stream + - name: compute1 + label: centos-9-stream + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + +- nodeset: + name: openstack-two-node-jammy + nodes: + - name: controller + label: ubuntu-jammy + - name: compute1 + label: ubuntu-jammy + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + +- nodeset: + name: openstack-two-node-noble + nodes: + - name: controller + label: ubuntu-noble + - name: compute1 + label: ubuntu-noble + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + +- nodeset: + name: openstack-two-node-focal + nodes: + - name: controller + label: ubuntu-focal + - name: compute1 + label: ubuntu-focal + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + +- nodeset: + name: openstack-two-node-bionic + nodes: + - name: controller + label: ubuntu-bionic + - name: compute1 + label: ubuntu-bionic + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + +- nodeset: + name: openstack-three-node-focal + nodes: + - name: controller + label: ubuntu-focal + - name: compute1 + label: ubuntu-focal + - name: compute2 + label: ubuntu-focal + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + - compute2 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + - compute2 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + - compute2 + +- nodeset: + name: openstack-three-node-bionic + nodes: + - name: controller + label: ubuntu-bionic + - name: compute1 + label: ubuntu-bionic + - name: compute2 + label: ubuntu-bionic + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + - compute2 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + - compute2 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + - compute2 + +- nodeset: + name: devstack-two-node-debian-bookworm + nodes: + - name: controller + label: debian-bookworm + - name: compute1 + label: debian-bookworm + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + +- nodeset: + name: devstack-two-node-debian-trixie + nodes: + - name: controller + label: debian-trixie-8GB + - name: compute1 + label: debian-trixie-8GB + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + +- job: + name: devstack-base + parent: openstack-multinode-fips + abstract: true + description: | + Base abstract Devstack job. + + Defines plays and base variables, but it does not include any project + and it does not run any service by default. This is a common base for + all single Devstack jobs, single or multinode. + Variables are defined in job.vars, which is what is then used by single + node jobs and by multi node jobs for the controller, as well as in + job.group-vars.peers, which is what is used by multi node jobs for subnode + nodes (everything but the controller). + required-projects: + - opendev.org/openstack/devstack + # this is a workaround for a packaging bug in ubuntu + # remove when https://bugs.launchpad.net/nova/+bug/2109592 + # is resolved and oslo.config is not a dep of the novnc deb + # via the defunct python3-novnc package. + - novnc/novnc + + roles: + - zuul: opendev.org/openstack/openstack-zuul-jobs + vars: + devstack_localrc: + DATABASE_PASSWORD: secretdatabase + RABBIT_PASSWORD: secretrabbit + ADMIN_PASSWORD: secretadmin + SERVICE_PASSWORD: secretservice + NETWORK_GATEWAY: 10.1.0.1 + FIXED_RANGE: 10.1.0.0/20 + IPV4_ADDRS_SAFE_TO_USE: 10.1.0.0/20 + FLOATING_RANGE: 172.24.5.0/24 + PUBLIC_NETWORK_GATEWAY: 172.24.5.1 + LOGFILE: /opt/stack/logs/devstacklog.txt + LOG_COLOR: false + VERBOSE: true + VERBOSE_NO_TIMESTAMP: true + ERROR_ON_CLONE: true + # Gate jobs can't deal with nested virt. Disable it by default. + LIBVIRT_TYPE: '{{ devstack_libvirt_type | default("qemu") }}' + devstack_services: + # Ignore any default set by devstack. Emit a "disable_all_services". + base: false + zuul_copy_output: + '{{ devstack_conf_dir }}/local.conf': logs + '{{ devstack_conf_dir }}/localrc': logs + '{{ devstack_conf_dir }}/.localrc.auto': logs + '{{ devstack_conf_dir }}/.stackenv': logs + '{{ devstack_log_dir }}/dstat-csv.log': logs + '{{ devstack_log_dir }}/atop': logs + '{{ devstack_log_dir }}/devstacklog.txt': logs + '{{ devstack_log_dir }}/devstacklog.txt.summary': logs + '{{ devstack_log_dir }}/tcpdump.pcap': logs + '{{ devstack_log_dir }}/worlddump-latest.txt': logs + '{{ devstack_log_dir }}/qemu.coredump': logs + '{{ devstack_full_log}}': logs + '{{ stage_dir }}/verify_tempest_conf.log': logs + '{{ stage_dir }}/performance.json': logs + '{{ stage_dir }}/apache': logs + '{{ stage_dir }}/apache_config': logs + '{{ stage_dir }}/etc': logs + /var/log/rabbitmq: logs + /var/log/postgresql: logs + /var/log/mysql: logs + /var/log/libvirt: logs + /etc/libvirt: logs + /etc/lvm: logs + /etc/sudoers: logs + /etc/sudoers.d: logs + '{{ stage_dir }}/iptables.txt': logs + '{{ stage_dir }}/df.txt': logs + '{{ stage_dir }}/mount.txt': logs + '{{ stage_dir }}/pip2-freeze.txt': logs + '{{ stage_dir }}/pip3-freeze.txt': logs + '{{ stage_dir }}/dpkg-l.txt': logs + '{{ stage_dir }}/rpm-qa.txt': logs + '{{ stage_dir }}/core': logs + '{{ stage_dir }}/listen53.txt': logs + '{{ stage_dir }}/services.txt': logs + '{{ stage_dir }}/deprecations.log': logs + '{{ stage_dir }}/audit.log': logs + /etc/ceph: logs + /var/log/ceph: logs + /var/log/openvswitch: logs + /var/log/glusterfs: logs + /etc/glusterfs/glusterd.vol: logs + /etc/resolv.conf: logs + /var/log/unbound.log: logs + extensions_to_txt: + conf: true + log: true + localrc: true + stackenv: true + auto: true + group-vars: + subnode: + devstack_localrc: + DATABASE_PASSWORD: secretdatabase + RABBIT_PASSWORD: secretrabbit + ADMIN_PASSWORD: secretadmin + SERVICE_PASSWORD: secretservice + NETWORK_GATEWAY: 10.1.0.1 + FIXED_RANGE: 10.1.0.0/20 + IPV4_ADDRS_SAFE_TO_USE: 10.1.0.0/20 + FLOATING_RANGE: 172.24.5.0/24 + PUBLIC_NETWORK_GATEWAY: 172.24.5.1 + LOGFILE: /opt/stack/logs/devstacklog.txt + LOG_COLOR: false + VERBOSE: true + VERBOSE_NO_TIMESTAMP: true + ERROR_ON_CLONE: true + LIBVIRT_TYPE: qemu + devstack_services: + base: false + pre-run: playbooks/pre.yaml + run: playbooks/devstack.yaml + post-run: playbooks/post.yaml + irrelevant-files: &common-irrelevant-files + # Documentation related + - ^.*\.rst$ + - ^api-ref/.*$ + - ^doc/.*$ + - ^releasenotes/.*$ + # Translations + - ^.*/locale/.*po$ + # pre-commit config + - ^.pre-commit-config.yaml$ + # gitreview config + - ^.gitreview$ + +- job: + name: devstack-minimal + parent: devstack-base + description: | + Minimal devstack base job, intended for use by jobs that need + less than the normal minimum set of required-projects. + nodeset: openstack-single-node-noble + required-projects: + - opendev.org/openstack/requirements + vars: + devstack_localrc: + # Multinode specific settings + SERVICE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" + HOST_IP: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" + PUBLIC_BRIDGE_MTU: '{{ external_bridge_mtu }}' + devstack_services: + # Shared services + dstat: false + etcd3: true + memory_tracker: true + file_tracker: true + mysql: true + rabbit: true + openstack-cli-server: true + group-vars: + subnode: + devstack_services: + # Shared services + dstat: false + memory_tracker: true + file_tracker: true + openstack-cli-server: true + devstack_localrc: + # Multinode specific settings + HOST_IP: "{{ hostvars[inventory_hostname]['nodepool']['private_ipv4'] }}" + SERVICE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" + PUBLIC_BRIDGE_MTU: '{{ external_bridge_mtu }}' + # Subnode specific settings + DATABASE_TYPE: mysql + RABBIT_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" + DATABASE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" + + +- job: + name: devstack + parent: devstack-minimal + description: | + Base devstack job for integration gate. + + This base job can be used for single node and multinode devstack jobs. + + With a single node nodeset, this job sets up an "all-in-one" (aio) + devstack with the seven OpenStack services included in the devstack tree: + keystone, glance, cinder, neutron, nova, placement, and swift. + + With a two node nodeset, this job sets up an aio + compute node. + The controller can be customised using host-vars.controller, the + sub-nodes can be customised using group-vars.subnode. + + Descendent jobs can enable / disable services, add devstack configuration + options, enable devstack plugins, configure log files or directories to be + transferred to the log server. + + The job assumes that there is only one controller node. The number of + subnodes can be scaled up seamlessly by setting a custom nodeset in + job.nodeset. + + The run playbook consists of a single role, so it can be easily rewritten + and extended. + required-projects: + - opendev.org/openstack/cinder + - opendev.org/openstack/glance + - opendev.org/openstack/keystone + - opendev.org/openstack/neutron + - opendev.org/openstack/nova + - opendev.org/openstack/placement + - opendev.org/openstack/swift + - opendev.org/openstack/os-test-images + timeout: 7200 + vars: + # based on observation of the integrated gate + # tempest-integrated-compute was only using ~1.7GB of swap + # when zswap and the host turning are enabled that increase + # slightly to ~2GB. we are setting the swap size to 8GB to + # be safe and account for more complex scenarios. + # we should revisit this value after some time to see if we + # can reduce it. + configure_swap_size: 8192 + devstack_localrc: + # Common OpenStack services settings + SWIFT_REPLICAS: 1 + SWIFT_START_ALL_SERVICES: false + SWIFT_HASH: 1234123412341234 + DEBUG_LIBVIRT_COREDUMPS: true + NOVA_VNC_ENABLED: true + OVN_DBS_LOG_LEVEL: dbg + # tune the host to optimize memory usage and hide io latency + # these setting will configure the kernel to treat the host page + # cache and swap with equal priority, and prefer deferring writes + # changing the default swappiness, dirty_ratio and + # the vfs_cache_pressure + ENABLE_SYSCTL_MEM_TUNING: true + # the net tuning optimizes ipv4 tcp fast open and config the default + # qdisk policy to pfifo_fast which effectively disable all qos. + # this minimizes the cpu load of the host network stack + ENABLE_SYSCTL_NET_TUNING: true + # zswap allows the kernel to compress pages in memory before swapping + # them to disk. this can reduce the amount of swap used and improve + # performance. effectively this trades a small amount of cpu for an + # increase in swap performance by reducing the amount of data + # written to disk. the overall speedup is proportional to the + # compression ratio and the speed of the swap device. + # NOTE: this option is ignored when not using nova with the libvirt + # virt driver. + NOVA_LIBVIRT_TB_CACHE_SIZE: 128 + ENABLE_ZSWAP: true + devstack_local_conf: + post-config: + $NEUTRON_CONF: + DEFAULT: + global_physnet_mtu: '{{ external_bridge_mtu }}' + devstack_services: + # Core services enabled for this branch. + # This list replaces the test-matrix. + # Shared services + dstat: false + etcd3: true + memory_tracker: true + file_tracker: true + mysql: true + rabbit: true + tls-proxy: true + # Keystone services + key: true + # Glance services + g-api: true + # Nova services + n-api: true + n-api-meta: true + n-cond: true + n-cpu: true + n-novnc: true + n-sch: true + # Placement service + placement-api: true + # OVN services + ovn-controller: true + ovn-northd: true + ovs-vswitchd: true + ovsdb-server: true + # Neutron services + q-svc: true + q-ovn-agent: true + # Swift services + s-account: true + s-container: true + s-object: true + s-proxy: true + # Cinder services + c-api: true + c-bak: true + c-sch: true + c-vol: true + # Services we don't need. + # This section is not really needed, it's for readability. + horizon: false + tempest: false + # Test matrix emits ceilometer but ceilomenter is not installed in the + # integrated gate, so specifying the services has not effect. + # ceilometer-*: false + group-vars: + subnode: + devstack_services: + # Core services enabled for this branch. + # This list replaces the test-matrix. + # Shared services + dstat: false + memory_tracker: true + file_tracker: true + tls-proxy: true + # Nova services + n-cpu: true + # Placement services + placement-client: true + # OVN services + ovn-controller: true + ovs-vswitchd: true + ovsdb-server: true + # Neutron services + q-ovn-agent: true + # Cinder services + c-bak: true + c-vol: true + # Services we don't run at all on subnode. + # This section is not really needed, it's for readability. + # keystone: false + # s-*: false + horizon: false + tempest: false + # Test matrix emits ceilometer but ceilometer is not installed in the + # integrated gate, so specifying the services has not effect. + # ceilometer-*: false + devstack_localrc: + # Subnode specific settings + GLANCE_HOSTPORT: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}:9292" + Q_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" + NOVA_VNC_ENABLED: true + ENABLE_CHASSIS_AS_GW: false + # tune the host to optimize memory usage and hide io latency + # these setting will configure the kernel to treat the host page + # cache and swap with equal priority, and prefer deferring writes + # changing the default swappiness, dirty_ratio and + # the vfs_cache_pressure + ENABLE_SYSCTL_MEM_TUNING: true + # the net tuning optimizes ipv4 tcp fast open and config the default + # qdisk policy to pfifo_fast which effectively disable all qos. + # this minimizes the cpu load of the host network stack + ENABLE_SYSCTL_NET_TUNING: true + # zswap allows the kernel to compress pages in memory before swapping + # them to disk. this can reduce the amount of swap used and improve + # performance. effectivly this trades a small amount of cpu for an + # increase in swap performance by reducing the amount of data + # written to disk. the overall speedup is porportional to the + # compression ratio and the speed of the swap device. + ENABLE_ZSWAP: true + # NOTE: this option is ignored when not using nova with the libvirt + # virt driver. + NOVA_LIBVIRT_TB_CACHE_SIZE: 128 + +- job: + name: devstack-ipv6 + parent: devstack + description: | + Devstack single node job for integration gate with IPv6, + all services and tunnels using IPv6 addresses. + vars: + devstack_localrc: + SERVICE_IP_VERSION: 6 + SERVICE_HOST: "" + TUNNEL_IP_VERSION: 6 + +- job: + name: devstack-enforce-scope + parent: devstack + description: | + This job runs the devstack with scope checks enabled. + vars: + devstack_localrc: + ENFORCE_SCOPE: true + +- job: + name: devstack-multinode + parent: devstack + nodeset: openstack-two-node-noble + description: | + Simple multinode test to verify multinode functionality on devstack side. + This is not meant to be used as a parent job. + +# NOTE(ianw) Platform tests have traditionally been non-voting because +# we often have to rush things through devstack to stabilise the gate, +# and these platforms don't have the round-the-clock support to avoid +# becoming blockers in that situation. +- job: + name: devstack-platform-almalinux-purple-lion-ovn-source + parent: tempest-full-py3 + description: AlmaLinux 10 platform test + nodeset: devstack-single-node-almalinux-10 + timeout: 9000 + voting: false + vars: + configure_swap_size: 4096 + devstack_localrc: + OVN_BUILD_FROM_SOURCE: True + OVN_BRANCH: "branch-24.03" + OVS_BRANCH: "branch-3.3" + OVS_SYSCONFDIR: "/usr/local/etc/openvswitch" + +- job: + name: devstack-platform-centos-10-stream + parent: tempest-full-py3 + description: CentOS 10 Stream platform test + nodeset: devstack-single-node-centos-10-stream + timeout: 9000 + voting: false + +- job: + name: devstack-platform-centos-9-stream + parent: tempest-full-py3 + description: CentOS 9 Stream platform test + nodeset: devstack-single-node-centos-9-stream + vars: + devstack_localrc: + # TODO(ykarel) Remove this when moving to 10-stream + PYTHON3_VERSION: 3.11 + timeout: 9000 + voting: false + +- job: + name: devstack-platform-debian-trixie + parent: tempest-full-py3 + description: Debian Trixie platform test + nodeset: devstack-single-node-debian-trixie + timeout: 9000 + vars: + configure_swap_size: 4096 + +- job: + name: devstack-platform-debian-bookworm + parent: tempest-full-py3 + description: Debian Bookworm platform test + nodeset: devstack-single-node-debian-bookworm + timeout: 9000 + vars: + configure_swap_size: 4096 + +- job: + name: devstack-platform-rocky-blue-onyx + parent: tempest-full-py3 + description: Rocky Linux 9 Blue Onyx platform test + nodeset: devstack-single-node-rockylinux-9 + timeout: 9000 + # NOTE(danms): This has been failing lately with some repository metadata + # errors. We're marking this as non-voting until it appears to have + # stabilized: + # https://zuul.openstack.org/builds?job_name=devstack-platform-rocky-blue-onyx&skip=0 + voting: false + vars: + configure_swap_size: 4096 + devstack_localrc: + # TODO(ykarel) Remove this when moving to rocky10 + PYTHON3_VERSION: 3.11 + +- job: + name: devstack-platform-rocky-red-quartz + parent: tempest-full-py3 + description: Rocky Linux Red Quartz platform test + nodeset: devstack-single-node-rockylinux-10 + timeout: 9000 + voting: false + vars: + configure_swap_size: 4096 + +- job: + name: devstack-platform-ubuntu-jammy + parent: tempest-full-py3 + description: Ubuntu 22.04 LTS (Jammy) platform test + nodeset: openstack-single-node-jammy + timeout: 9000 + vars: + configure_swap_size: 8192 + +- job: + name: devstack-platform-ubuntu-noble-ovn-source + parent: devstack-platform-ubuntu-noble + description: Ubuntu 24.04 LTS (noble) platform test (OVN from source) + voting: false + vars: + devstack_localrc: + OVN_BUILD_FROM_SOURCE: True + OVN_BRANCH: "branch-24.03" + OVS_BRANCH: "branch-3.3" + OVS_SYSCONFDIR: "/usr/local/etc/openvswitch" + +- job: + name: devstack-platform-ubuntu-noble-ovs + parent: tempest-full-py3 + description: Ubuntu 24.04 LTS (noble) platform test (OVS) + nodeset: openstack-single-node-noble + voting: false + timeout: 9000 + vars: + configure_swap_size: 8192 + devstack_localrc: + Q_AGENT: openvswitch + Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch + Q_ML2_TENANT_NETWORK_TYPE: vxlan + devstack_services: + # Disable OVN services + ovn-northd: false + ovn-controller: false + ovs-vswitchd: false + ovsdb-server: false + # Disable Neutron ML2/OVN services + q-ovn-metadata-agent: false + # Enable Neutron ML2/OVS services + q-agt: true + q-dhcp: true + q-l3: true + q-meta: true + q-metering: true + group-vars: + subnode: + devstack_services: + # Disable OVN services + ovn-controller: false + ovs-vswitchd: false + ovsdb-server: false + # Disable Neutron ML2/OVN services + q-ovn-metadata-agent: false + # Enable Neutron ML2/OVS services + q-agt: true + +- job: + name: devstack-no-tls-proxy + parent: tempest-full-py3 + description: | + Tempest job with tls-proxy off. + + Some gates run devstack like this and it follows different code paths. + vars: + devstack_services: + tls-proxy: false + +- job: + name: devstack-tox-base + parent: devstack + description: | + Base job for devstack-based functional tests that use tox. + + This job is not intended to be run directly. It's just here + for organizational purposes for devstack-tox-functional and + devstack-tox-functional-consumer. + post-run: playbooks/tox/post.yaml + vars: + tox_envlist: functional + tox_install_siblings: false + +- job: + name: devstack-tox-functional + parent: devstack-tox-base + description: | + Base job for devstack-based functional tests that use tox. + + Runs devstack, then runs the tox ``functional`` environment, + then collects tox/testr build output like normal tox jobs. + + Turns off tox sibling installation. Projects may be involved + in the devstack deployment and so may be in the required-projects + list, but may not want to test against master of the other + projects in their tox env. Child jobs can set tox_install_siblings + to True to re-enable sibling processing. + run: playbooks/tox/run-both.yaml + +- job: + name: devstack-tox-functional-consumer + parent: devstack + description: | + Base job for devstack-based functional tests for projects that + consume the devstack cloud. + + This base job should only be used by projects that are not involved + in the devstack deployment step, but are instead projects that are using + devstack to get a cloud against which they can test things. + + Runs devstack in pre-run, then runs the tox ``functional`` environment, + then collects tox/testr build output like normal tox jobs. + + Turns off tox sibling installation. Projects may be involved + in the devstack deployment and so may be in the required-projects + list, but may not want to test against master of the other + projects in their tox env. Child jobs can set tox_install_siblings + to True to re-enable sibling processing. + pre-run: + - playbooks/devstack.yaml + - playbooks/tox/pre.yaml + run: playbooks/tox/run.yaml + +- job: + name: devstack-unit-tests + nodeset: ubuntu-noble + description: | + Runs unit tests on devstack project. + + It runs ``run_tests.sh``. + pre-run: playbooks/unit-tests/pre.yaml + run: playbooks/unit-tests/run.yaml + +- project: + templates: + - integrated-gate-py3 + - publish-openstack-docs-pti + check: + jobs: + - devstack + - devstack-ipv6 + - devstack-enforce-scope + - devstack-platform-almalinux-purple-lion-ovn-source + - devstack-platform-centos-10-stream + - devstack-platform-centos-9-stream + - devstack-platform-debian-bookworm + - devstack-platform-debian-trixie + - devstack-platform-rocky-blue-onyx + - devstack-platform-rocky-red-quartz + - devstack-platform-ubuntu-noble-ovn-source + - devstack-platform-ubuntu-noble-ovs + - devstack-platform-ubuntu-jammy + - devstack-multinode + - devstack-unit-tests + - openstack-tox-bashate + - ironic-tempest-bios-ipmi-direct + - swift-dsvm-functional + - grenade: + irrelevant-files: *common-irrelevant-files + - neutron-ovs-grenade-multinode: + irrelevant-files: *common-irrelevant-files + - neutron-ovn-tempest-ovs-release: + voting: false + irrelevant-files: *common-irrelevant-files + - tempest-multinode-full-py3: + voting: false + irrelevant-files: *common-irrelevant-files + - openstacksdk-functional-devstack: + irrelevant-files: *common-irrelevant-files + - tempest-ipv6-only: + irrelevant-files: *common-irrelevant-files + - nova-ceph-multistore: + irrelevant-files: *common-irrelevant-files + gate: + jobs: + - devstack + - devstack-ipv6 + - devstack-platform-debian-bookworm + - devstack-platform-debian-trixie + - devstack-platform-ubuntu-noble + # NOTE(danms): Disabled due to instability, see comment in the job + # definition above. + # - devstack-platform-rocky-blue-onyx + - devstack-enforce-scope + - devstack-multinode + - devstack-unit-tests + - openstack-tox-bashate + - neutron-ovs-grenade-multinode: + irrelevant-files: *common-irrelevant-files + - ironic-tempest-bios-ipmi-direct + - swift-dsvm-functional + - grenade: + irrelevant-files: *common-irrelevant-files + - openstacksdk-functional-devstack: + irrelevant-files: *common-irrelevant-files + - tempest-ipv6-only: + irrelevant-files: *common-irrelevant-files + - nova-ceph-multistore: + irrelevant-files: *common-irrelevant-files + # Please add a note on each job and conditions for the job not + # being experimental any more, so we can keep this list somewhat + # pruned. + # + # * nova-next: maintained by nova for unreleased/undefaulted + # things, this job is not experimental but often is used to test + # things that are not yet production ready or to test what will be + # the new default after a deprecation period has ended. + # * nova-multi-cell: maintained by nova and now is voting in the + # check queue for nova changes but relies on devstack configuration + + experimental: + jobs: + - nova-multi-cell + - nova-next + - devstack-plugin-ceph-tempest-py3: + irrelevant-files: *common-irrelevant-files + - neutron-ovs-tempest-dvr: + irrelevant-files: *common-irrelevant-files + - neutron-ovs-tempest-dvr-ha-multinode-full: + irrelevant-files: *common-irrelevant-files + - cinder-tempest-lvm-multibackend: + irrelevant-files: *common-irrelevant-files + - tempest-pg-full: + irrelevant-files: *common-irrelevant-files + - devstack-no-tls-proxy + periodic: + jobs: + - devstack-no-tls-proxy + periodic-weekly: + jobs: + - devstack-platform-almalinux-purple-lion-ovn-source + - devstack-platform-centos-10-stream + - devstack-platform-centos-9-stream + - devstack-platform-debian-bookworm + - devstack-platform-rocky-blue-onyx + - devstack-platform-rocky-red-quartz + - devstack-platform-ubuntu-noble-ovn-source + - devstack-platform-ubuntu-noble-ovs + - devstack-platform-ubuntu-jammy diff --git a/AUTHORS b/AUTHORS deleted file mode 100644 index c6b40d8203..0000000000 --- a/AUTHORS +++ /dev/null @@ -1,50 +0,0 @@ -Aaron Lee -Aaron Rosen -Adam Gandelman -Akihiro MOTOKI -Andrew Laski -Andy Smith -Anthony Young -Armando Migliaccio -Brad Hall -Chmouel Boudjnah -Dan Prince -Dean Troyer -Devin Carlen -Doug hellmann -Eddie Hebert -Edgar Magana -Eoghan Glynn -Eric Windisch -Gabriel Hurley -Gary Kotton -Hengqing Hu -Hua ZHANG -Isaku Yamahata -Jake Dahn -James E. Blair -Jason Cannavale -Jay Pipes -Jesse Andrews -Jian Wen -Joe Gordon -Johannes Erdfelt -John Postlethwait -Josh Kearney -Justin Shepherd -Ken Pepple -Kiall Mac Innes -Matt Joyce -Osamu Habuka -Russell Bryant -Scott Moser -Sean Dague -Sumit Naiksatam -Thierry Carrez -Todd Willey -Tres Henry -Vincent Untz -Vishvananda Ishaya -Yun Mao -Yong Sheng Gong -Zhongyue Luo diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst new file mode 100644 index 0000000000..bb511656f1 --- /dev/null +++ b/CONTRIBUTING.rst @@ -0,0 +1,19 @@ +The source repository for this project can be found at: + + https://opendev.org/openstack/devstack + +Pull requests submitted through GitHub are not monitored. + +To start contributing to OpenStack, follow the steps in the contribution guide +to set up and use Gerrit: + + https://docs.openstack.org/contributors/code-and-documentation/quick-start.html + +Bugs should be filed on Launchpad: + + https://bugs.launchpad.net/devstack + +For more specific information about contributing to this repository, see the +Devstack contributor guide: + + https://docs.openstack.org/devstack/latest/contributor/contributing.html diff --git a/FUTURE.rst b/FUTURE.rst new file mode 100644 index 0000000000..11bea30f0b --- /dev/null +++ b/FUTURE.rst @@ -0,0 +1,113 @@ +============= + Quo Vadimus +============= + +Where are we going? + +This is a document in Devstack to outline where we are headed in the +future. The future might be near or far, but this is where we'd like +to be. + +This is intended to help people contribute, because it will be a +little clearer if a contribution takes us closer to or further away to +our end game. + +================== + Default Services +================== + +Devstack is designed as a development environment first. There are a +lot of ways to compose the OpenStack services, but we do need one +default. + +That should be the Compute Layer (currently Glance + Nova + Cinder + +Neutron Core (not advanced services) + Keystone). It should be the +base building block going forward, and the introduction point of +people to OpenStack via Devstack. + +================ + Service Howtos +================ + +Starting from the base building block all services included in +OpenStack should have an overview page in the Devstack +documentation. That should include the following: + +- A helpful high level overview of that service +- What it depends on (both other OpenStack services and other system + components) +- What new daemons are needed to be started, including where they + should live + +This provides a map for people doing multinode testing to understand +what portions are control plane, which should live on worker nodes. + +Service how to pages will start with an ugly "This team has provided +no information about this service" until someone does. + +=================== + Included Services +=================== + +Devstack doesn't need to eat the world. Given the existence of the +external devstack plugin architecture, the future direction is to move +the bulk of the support code out of devstack itself and into external +plugins. + +This will also promote a more clean separation between services. + +============================= + Included Backends / Drivers +============================= + +Upstream Devstack should only include Open Source backends / drivers, +it's intent is for Open Source development of OpenStack. Proprietary +drivers should be supported via external plugins. + +Just being Open Source doesn't mean it should be in upstream Devstack +if it's not required for base development of OpenStack +components. When in doubt, external plugins should be used. + +======================================== + OpenStack Services vs. System Services +======================================== + +ENABLED_SERVICES is currently entirely too overloaded. We should have +a separation of actual OpenStack services that you have to run (n-cpu, +g-api) and required backends like mysql and rabbitmq. + +=========================== + Splitting up of Functions +=========================== + +The functions-common file has grown over time, and needs to be split +up into smaller libraries that handle specific domains. + +====================== + Testing of Functions +====================== + +Every function in a functions file should get tests. The devstack +testing framework is young, but we do have some unit tests for the +tree, and those should be enhanced. + +============================== + Not Co-Gating with the World +============================== + +As projects spin up functional test jobs, Devstack should not be +co-gated with every single one of those. The Devstack team has one of +the fastest turn arounds for blocking bugs of any Open Stack +project. + +Basic service validation should be included as part of Devstack +installation to mitigate this. + +============================ + Documenting all the things +============================ + +Devstack started off as an explanation as much as an install +script. We would love contributions to that further enhance the +comments and explanations about what is happening, even if it seems a +little pedantic at times. diff --git a/HACKING.rst b/HACKING.rst index 83455e3638..6a91e0a6a8 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -6,42 +6,76 @@ General ------- DevStack is written in UNIX shell script. It uses a number of bash-isms -and so is limited to Bash (version 3 and up) and compatible shells. +and so is limited to Bash (version 4 and up) and compatible shells. Shell script was chosen because it best illustrates the steps used to set up and interact with OpenStack components. -DevStack's official repository is located on GitHub at -https://github.com/openstack-dev/devstack.git. Besides the master branch that +DevStack's official repository is located on opendev.org at +https://opendev.org/openstack/devstack. Besides the master branch that tracks the OpenStack trunk branches a separate branch is maintained for all OpenStack releases starting with Diablo (stable/diablo). Contributing code to DevStack follows the usual OpenStack process as described in `How To Contribute`__ in the OpenStack wiki. `DevStack's LaunchPad project`__ -contains the usual links for blueprints, bugs, tec. +contains the usual links for blueprints, bugs, etc. __ contribute_ -.. _contribute: http://wiki.openstack.org/HowToContribute +.. _contribute: https://docs.openstack.org/infra/manual/developers.html __ lp_ -.. _lp: https://launchpad.net/~devstack +.. _lp: https://launchpad.net/devstack + +The `Gerrit review +queue `__ +is used for all commits. The primary script in DevStack is ``stack.sh``, which performs the bulk of the work for DevStack's use cases. There is a subscript ``functions`` that contains generally useful shell functions and is used by a number of the scripts in DevStack. -The ``lib`` directory contains sub-scripts for projects or packages that ``stack.sh`` -sources to perform much of the work related to those projects. These sub-scripts -contain configuration defaults and functions to configure, start and stop the project -or package. These variables and functions are also used by related projects, -such as Grenade, to manage a DevStack installation. - A number of additional scripts can be found in the ``tools`` directory that may be useful in supporting DevStack installations. Of particular note are ``info.sh`` to collect and report information about the installed system, and ``install_prereqs.sh`` that handles installation of the prerequisite packages for DevStack. It is suitable, for example, to pre-load a system for making a snapshot. +Repo Layout +----------- + +The DevStack repo generally keeps all of the primary scripts at the root +level. + +``doc`` - Contains the Sphinx source for the documentation. +A complete doc build can be run with ``tox -edocs``. + +``extras.d`` - Contains the dispatch scripts called by the hooks in +``stack.sh``, ``unstack.sh`` and ``clean.sh``. See :doc:`the plugins +docs ` for more information. + +``files`` - Contains a variety of otherwise lost files used in +configuring and operating DevStack. This includes templates for +configuration files and the system dependency information. This is also +where image files are downloaded and expanded if necessary. + +``lib`` - Contains the sub-scripts specific to each project. This is +where the work of managing a project's services is located. Each +top-level project (Keystone, Nova, etc) has a file here. Additionally +there are some for system services and project plugins. These +variables and functions are also used by related projects, such as +Grenade, to manage a DevStack installation. + +``samples`` - Contains a sample of the local files not included in the +DevStack repo. + +``tests`` - the DevStack test suite is rather sparse, mostly consisting +of test of specific fragile functions in the ``functions`` and +``functions-common`` files. + +``tools`` - Contains a collection of stand-alone scripts. While these +may reference the top-level DevStack configuration they can generally be +run alone. + Scripts ------- @@ -110,8 +144,8 @@ follows: * Global configuration that may be referenced in ``local.conf``, i.e. ``DEST``, ``DATA_DIR`` * Global service configuration like ``ENABLED_SERVICES`` * Variables used by multiple services that do not have a clear owner, i.e. - ``VOLUME_BACKING_FILE_SIZE`` (nova-volumes and cinder) or ``PUBLIC_NETWORK_NAME`` - (nova-network and neutron) + ``VOLUME_BACKING_FILE_SIZE`` (nova-compute and cinder) or + ``PUBLIC_NETWORK_NAME`` (only neutron but formerly nova-network too) * Variables that can not be cleanly declared in a project file due to dependency ordering, i.e. the order of sourcing the project files can not be changed for other reasons but the earlier file needs to dereference a @@ -126,14 +160,9 @@ and can stay in the project file. Documentation ------------- -The official DevStack repo on GitHub does not include a gh-pages branch that -GitHub uses to create static web sites. That branch is maintained in the -`CloudBuilders DevStack repo`__ mirror that supports the -http://devstack.org site. This is the primary DevStack -documentation along with the DevStack scripts themselves. - -__ repo_ -.. _repo: https://github.com/cloudbuilders/devstack +The DevStack repo now contains all of the static pages of devstack.org in +the ``doc/source`` directory. The OpenStack CI system rebuilds the docs after every +commit and updates devstack.org (now a redirect to https://docs.openstack.org/devstack/latest/). All of the scripts are processed with shocco_ to render them with the comments as text describing the script below. For this reason we tend to be a little @@ -144,89 +173,8 @@ uses Markdown headers to divide the script into logical sections. .. _shocco: https://github.com/dtroyer/shocco/tree/rst_support The script used to drive shocco is tools/build_docs.sh. - - -Exercises ---------- - -The scripts in the exercises directory are meant to 1) perform basic operational -checks on certain aspects of OpenStack; and b) document the use of the -OpenStack command-line clients. - -In addition to the guidelines above, exercise scripts MUST follow the structure -outlined here. ``swift.sh`` is perhaps the clearest example of these guidelines. -These scripts are executed serially by ``exercise.sh`` in testing situations. - -* Begin and end with a banner that stands out in a sea of script logs to aid - in debugging failures, particularly in automated testing situations. If the - end banner is not displayed, the script ended prematurely and can be assumed - to have failed. - - :: - - echo "**************************************************" - echo "Begin DevStack Exercise: $0" - echo "**************************************************" - ... - set +o xtrace - echo "**************************************************" - echo "End DevStack Exercise: $0" - echo "**************************************************" - -* The scripts will generally have the shell ``xtrace`` attribute set to display - the actual commands being executed, and the ``errexit`` attribute set to exit - the script on non-zero exit codes:: - - # This script exits on an error so that errors don't compound and you see - # only the first error that occurred. - set -o errexit - - # Print the commands being run so that we can see the command that triggers - # an error. It is also useful for following allowing as the install occurs. - set -o xtrace - -* Settings and configuration are stored in ``exerciserc``, which must be - sourced after ``openrc`` or ``stackrc``:: - - # Import exercise configuration - source $TOP_DIR/exerciserc - -* There are a couple of helper functions in the common ``functions`` sub-script - that will check for non-zero exit codes and unset environment variables and - print a message and exit the script. These should be called after most client - commands that are not otherwise checked to short-circuit long timeouts - (instance boot failure, for example):: - - swift post $CONTAINER - die_if_error "Failure creating container $CONTAINER" - - FLOATING_IP=`euca-allocate-address | cut -f2` - die_if_not_set FLOATING_IP "Failure allocating floating IP" - -* If you want an exercise to be skipped when for example a service wasn't - enabled for the exercise to be run, you can exit your exercise with the - special exitcode 55 and it will be detected as skipped. - -* The exercise scripts should only use the various OpenStack client binaries to - interact with OpenStack. This specifically excludes any ``*-manage`` tools - as those assume direct access to configuration and databases, as well as direct - database access from the exercise itself. - -* If specific configuration needs to be present for the exercise to complete, - it should be staged in ``stack.sh``, or called from ``stack.sh`` (see - ``files/keystone_data.sh`` for an example of this). - -* The ``OS_*`` environment variables should be the only ones used for all - authentication to OpenStack clients as documented in the CLIAuth_ wiki page. - -.. _CLIAuth: http://wiki.openstack.org/CLIAuth - -* The exercise MUST clean up after itself if successful. If it is not successful, - it is assumed that state will be left behind; this allows a chance for developers - to look around and attempt to debug the problem. The exercise SHOULD clean up - or graciously handle possible artifacts left over from previous runs if executed - again. It is acceptable to require a reboot or even a re-install of DevStack - to restore a clean test environment. +The complete docs build is also handled with tox -edocs per the +OpenStack project standard. Bash Style Guidelines @@ -235,8 +183,12 @@ DevStack defines a bash set of best practices for maintaining large collections of bash scripts. These should be considered as part of the review process. -We have a preliminary enforcing script for this called bash8 (only a -small number of these rules are enforced). +DevStack uses the bashate_ style checker +to enforce basic guidelines, similar to pep8 and flake8 tools for Python. The +list below is not complete for what bashate checks, nor is it all checked +by bashate. So many lines of code, so little time. + +.. _bashate: https://pypi.org/project/bashate/ Whitespace Rules ---------------- @@ -248,6 +200,7 @@ Whitespace Rules Control Structure Rules ----------------------- + - then should be on the same line as the if - do should be on the same line as the for @@ -269,6 +222,7 @@ Example:: Variables and Functions ----------------------- + - functions should be used whenever possible for clarity - functions should use ``local`` variables as much as possible to ensure they are isolated from the rest of the environment @@ -277,3 +231,68 @@ Variables and Functions - function names should_have_underscores, NotCamelCase. - functions should be declared as per the regex ^function foo {$ with code starting on the next line + + +Review Criteria +--------------- + +There are some broad criteria that will be followed when reviewing +your change + +* **Is it passing tests** -- your change will not be reviewed + thoroughly unless the official CI has run successfully against it. + +* **Does this belong in DevStack** -- DevStack reviewers have a + default position of "no" but are ready to be convinced by your + change. + + For very large changes, you should consider :doc:`the plugins system + ` to see if your code is better abstracted from the main + repository. + + For smaller changes, you should always consider if the change can be + encapsulated by per-user settings in ``local.conf``. A common example + is adding a simple config-option to an ``ini`` file. Specific flags + are not usually required for this, although adding documentation + about how to achieve a larger goal (which might include turning on + various settings, etc) is always welcome. + +* **Work-arounds** -- often things get broken and DevStack can be in a + position to fix them. Work-arounds are fine, but should be + presented in the context of fixing the root-cause of the problem. + This means it is well-commented in the code and the change-log and + mostly likely includes links to changes or bugs that fix the + underlying problem. + +* **Should this be upstream** -- DevStack generally does not override + default choices provided by projects and attempts to not + unexpectedly modify behavior. + +* **Context in commit messages** -- DevStack touches many different + areas and reviewers need context around changes to make good + decisions. We also always want it to be clear to someone -- perhaps + even years from now -- why we were motivated to make a change at the + time. + + +Making Changes, Testing, and CI +------------------------------- + +Changes to Devstack are tested by automated continuous integration jobs +that run on a variety of Linux Distros using a handful of common +configurations. What this means is that every change to Devstack is +self testing. One major benefit of this is that developers do not +typically need to add new non voting test jobs to add features to +Devstack. Instead the features can be added, then if testing passes +with the feature enabled the change is ready to merge (pending code +review). + +A concrete example of this was the switch from screen based service +management to systemd based service management. No new jobs were +created for this. Instead the features were added to devstack, tested +locally and in CI using a change that enabled the feature, then once +the enabling change was passing and the new behavior communicated and +documented it was merged. + +Using this process has been proven to be effective and leads to +quicker implementation of desired features. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000..970d8009eb --- /dev/null +++ b/Makefile @@ -0,0 +1,100 @@ +# DevStack Makefile of Sanity + +# Interesting targets: +# ds-remote - Create a Git remote for use by ds-push and ds-pull targets +# DS_REMOTE_URL must be set on the command line +# +# ds-push - Merge a list of branches taken from .ds-test and push them +# to the ds-remote repo in ds-test branch +# +# ds-pull - Pull the remote ds-test branch into a fresh local branch +# +# refresh - Performs a sequence of unstack, refresh and stack + +# Duplicated from stackrc for now +DEST=/opt/stack + +all: + @echo "This just saved you from a terrible mistake!" + +# Do Some Work +stack: + ./stack.sh + +unstack: + ./unstack.sh + +docs: + tox -edocs + +# Just run the shocco source formatting build +docs-build: + INSTALL_SHOCCO=True tools/build_docs.sh + +# Just run the Sphinx docs build +docs-rst: + python setup.py build_sphinx + +# Run the bashate test +bashate: + tox -ebashate + +# Run the function tests +test: + tests/test_ini_config.sh + tests/test_meta_config.sh + tests/test_ip.sh + tests/test_refs.sh + +# Spiff up the place a bit +clean: + ./clean.sh + rm -rf accrc doc/build test*-e *.egg-info + +# Clean out the cache too +realclean: clean + rm -rf files/cirros*.tar.gz files/Fedora*.qcow2 + +# Repo stuffs + +pull: + git pull + + +# These repo targets are used to maintain a branch in a remote repo that +# consists of one or more local branches merged and pushed to the remote. +# This is most useful for iterative testing on multiple or remote servers +# while keeping the working repo local. +# +# It requires: +# * a remote pointing to a remote repo, often GitHub is used for this +# * a branch name to be used on the remote +# * a local file containing the list of local branches to be merged into +# the remote branch + +GIT_REMOTE_NAME=ds-test +GIT_REMOTE_BRANCH=ds-test + +# Push the current branch to a remote named ds-test +ds-push: + git checkout master + git branch -D $(GIT_REMOTE_BRANCH) || true + git checkout -b $(GIT_REMOTE_BRANCH) + for i in $(shell cat .$(GIT_REMOTE_BRANCH) | grep -v "^#" | grep "[^ ]"); do \ + git merge --no-edit $$i; \ + done + git push -f $(GIT_REMOTE_NAME) HEAD:$(GIT_REMOTE_BRANCH) + +# Pull the ds-test branch +ds-pull: + git checkout master + git branch -D $(GIT_REMOTE_BRANCH) || true + git pull $(GIT_REMOTE_NAME) $(GIT_REMOTE_BRANCH) + git checkout $(GIT_REMOTE_BRANCH) + +# Add the remote - set DS_REMOTE_URL=htps://example.com/ on the command line +ds-remote: + git remote add $(GIT_REMOTE_NAME) $(DS_REMOTE_URL) + +# Refresh the current DevStack checkout nd re-initialize +refresh: unstack ds-pull stack diff --git a/README.md b/README.md deleted file mode 100644 index 37b960ef3d..0000000000 --- a/README.md +++ /dev/null @@ -1,403 +0,0 @@ -DevStack is a set of scripts and utilities to quickly deploy an OpenStack cloud. - -# Goals - -* To quickly build dev OpenStack environments in a clean Ubuntu or Fedora - environment -* To describe working configurations of OpenStack (which code branches - work together? what do config files look like for those branches?) -* To make it easier for developers to dive into OpenStack so that they can - productively contribute without having to understand every part of the - system at once -* To make it easy to prototype cross-project features -* To provide an environment for the OpenStack CI testing on every commit - to the projects - -Read more at http://devstack.org. - -IMPORTANT: Be sure to carefully read `stack.sh` and any other scripts you -execute before you run them, as they install software and will alter your -networking configuration. We strongly recommend that you run `stack.sh` -in a clean and disposable vm when you are first getting started. - -# Versions - -The DevStack master branch generally points to trunk versions of OpenStack -components. For older, stable versions, look for branches named -stable/[release] in the DevStack repo. For example, you can do the -following to create a grizzly OpenStack cloud: - - git checkout stable/grizzly - ./stack.sh - -You can also pick specific OpenStack project releases by setting the appropriate -`*_BRANCH` variables in the ``localrc`` section of `local.conf` (look in -`stackrc` for the default set). Usually just before a release there will be -milestone-proposed branches that need to be tested:: - - GLANCE_REPO=git://git.openstack.org/openstack/glance.git - GLANCE_BRANCH=milestone-proposed - -# Start A Dev Cloud - -Installing in a dedicated disposable VM is safer than installing on your -dev machine! Plus you can pick one of the supported Linux distros for -your VM. To start a dev cloud run the following NOT AS ROOT (see -**DevStack Execution Environment** below for more on user accounts): - - ./stack.sh - -When the script finishes executing, you should be able to access OpenStack -endpoints, like so: - -* Horizon: http://myhost/ -* Keystone: http://myhost:5000/v2.0/ - -We also provide an environment file that you can use to interact with your -cloud via CLI: - - # source openrc file to load your environment with OpenStack CLI creds - . openrc - # list instances - nova list - -If the EC2 API is your cup-o-tea, you can create credentials and use euca2ools: - - # source eucarc to generate EC2 credentials and set up the environment - . eucarc - # list instances using ec2 api - euca-describe-instances - -# DevStack Execution Environment - -DevStack runs rampant over the system it runs on, installing things and -uninstalling other things. Running this on a system you care about is a recipe -for disappointment, or worse. Alas, we're all in the virtualization business -here, so run it in a VM. And take advantage of the snapshot capabilities -of your hypervisor of choice to reduce testing cycle times. You might even save -enough time to write one more feature before the next feature freeze... - -``stack.sh`` needs to have root access for a lot of tasks, but uses ``sudo`` -for all of those tasks. However, it needs to be not-root for most of its -work and for all of the OpenStack services. ``stack.sh`` specifically -does not run if started as root. - -This is a recent change (Oct 2013) from the previous behaviour of -automatically creating a ``stack`` user. Automatically creating -user accounts is not the right response to running as root, so -that bit is now an explicit step using ``tools/create-stack-user.sh``. -Run that (as root!) or just check it out to see what DevStack's -expectations are for the account it runs under. Many people simply -use their usual login (the default 'ubuntu' login on a UEC image -for example). - -# Customizing - -You can override environment variables used in `stack.sh` by creating file -name `local.conf` with a ``localrc`` section as shown below. It is likely -that you will need to do this to tweak your networking configuration should -you need to access your cloud from a different host. - - [[local|localrc]] - VARIABLE=value - -See the **Local Configuration** section below for more details. - -# Database Backend - -Multiple database backends are available. The available databases are defined -in the lib/databases directory. -`mysql` is the default database, choose a different one by putting the -following in the `localrc` section: - - disable_service mysql - enable_service postgresql - -`mysql` is the default database. - -# RPC Backend - -Multiple RPC backends are available. Currently, this -includes RabbitMQ (default), Qpid, and ZeroMQ. Your backend of -choice may be selected via the `localrc` section. - -Note that selecting more than one RPC backend will result in a failure. - -Example (ZeroMQ): - - ENABLED_SERVICES="$ENABLED_SERVICES,-rabbit,-qpid,zeromq" - -Example (Qpid): - - ENABLED_SERVICES="$ENABLED_SERVICES,-rabbit,-zeromq,qpid" - -# Apache Frontend - -Apache web server is enabled for wsgi services by setting -`APACHE_ENABLED_SERVICES` in your ``localrc`` section. Remember to -enable these services at first as above. - - APACHE_ENABLED_SERVICES+=key,swift - -# Swift - -Swift is disabled by default. When enabled, it is configured with -only one replica to avoid being IO/memory intensive on a small -vm. When running with only one replica the account, container and -object services will run directly in screen. The others services like -replicator, updaters or auditor runs in background. - -If you would like to enable Swift you can add this to your `localrc` section: - - enable_service s-proxy s-object s-container s-account - -If you want a minimal Swift install with only Swift and Keystone you -can have this instead in your `localrc` section: - - disable_all_services - enable_service key mysql s-proxy s-object s-container s-account - -If you only want to do some testing of a real normal swift cluster -with multiple replicas you can do so by customizing the variable -`SWIFT_REPLICAS` in your `localrc` section (usually to 3). - -# Swift S3 - -If you are enabling `swift3` in `ENABLED_SERVICES` DevStack will -install the swift3 middleware emulation. Swift will be configured to -act as a S3 endpoint for Keystone so effectively replacing the -`nova-objectstore`. - -Only Swift proxy server is launched in the screen session all other -services are started in background and managed by `swift-init` tool. - -# Neutron - -Basic Setup - -In order to enable Neutron a single node setup, you'll need the -following settings in your `local.conf`: - - disable_service n-net - enable_service q-svc - enable_service q-agt - enable_service q-dhcp - enable_service q-l3 - enable_service q-meta - enable_service q-metering - # Optional, to enable tempest configuration as part of DevStack - enable_service tempest - -Then run `stack.sh` as normal. - -DevStack supports setting specific Neutron configuration flags to the -service, Open vSwitch plugin and LinuxBridge plugin configuration files. -To make use of this feature, the settings can be added to ``local.conf``. -The old ``Q_XXX_EXTRA_XXX_OPTS`` variables are deprecated and will be removed -in the near future. The ``local.conf`` headers for the replacements are: - -* ``Q_SRV_EXTRA_OPTS``: - - [[post-config|/$Q_PLUGIN_CONF_FILE]] - [linuxbridge] # or [ovs] - -* ``Q_AGENT_EXTRA_AGENT_OPTS``: - - [[post-config|/$Q_PLUGIN_CONF_FILE]] - [agent] - -* ``Q_AGENT_EXTRA_SRV_OPTS``: - - [[post-config|/$Q_PLUGIN_CONF_FILE]] - [linuxbridge] # or [ovs] - -* ``Q_SRV_EXTRA_DEFAULT_OPTS``: - - [[post-config|$NEUTRON_CONF]] - [DEFAULT] - -Example extra config in `local.conf`: - - [[post-config|/$Q_PLUGIN_CONF_FILE]] - [agent] - tunnel_type=vxlan - vxlan_udp_port=8472 - - [[post-config|$NEUTRON_CONF]] - [DEFAULT] - tenant_network_type=vxlan - -DevStack also supports configuring the Neutron ML2 plugin. The ML2 plugin -can run with the OVS, LinuxBridge, or Hyper-V agents on compute hosts. This -is a simple way to configure the ml2 plugin: - - # VLAN configuration - Q_PLUGIN=ml2 - ENABLE_TENANT_VLANS=True - - # GRE tunnel configuration - Q_PLUGIN=ml2 - ENABLE_TENANT_TUNNELS=True - - # VXLAN tunnel configuration - Q_PLUGIN=ml2 - Q_ML2_TENANT_NETWORK_TYPE=vxlan - -The above will default in DevStack to using the OVS on each compute host. -To change this, set the `Q_AGENT` variable to the agent you want to run -(e.g. linuxbridge). - - Variable Name Notes - ---------------------------------------------------------------------------- - Q_AGENT This specifies which agent to run with the - ML2 Plugin (either `openvswitch` or `linuxbridge`). - Q_ML2_PLUGIN_MECHANISM_DRIVERS The ML2 MechanismDrivers to load. The default - is none. Note, ML2 will work with the OVS - and LinuxBridge agents by default. - Q_ML2_PLUGIN_TYPE_DRIVERS The ML2 TypeDrivers to load. Defaults to - all available TypeDrivers. - Q_ML2_PLUGIN_GRE_TYPE_OPTIONS GRE TypeDriver options. Defaults to none. - Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS VXLAN TypeDriver options. Defaults to none. - Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS VLAN TypeDriver options. Defaults to none. - -# Heat - -Heat is disabled by default. To enable it you'll need the following settings -in your `localrc` section: - - enable_service heat h-api h-api-cfn h-api-cw h-eng - -Heat can also run in standalone mode, and be configured to orchestrate -on an external OpenStack cloud. To launch only Heat in standalone mode -you'll need the following settings in your `localrc` section: - - disable_all_services - enable_service rabbit mysql heat h-api h-api-cfn h-api-cw h-eng - HEAT_STANDALONE=True - KEYSTONE_SERVICE_HOST=... - KEYSTONE_AUTH_HOST=... - -# Tempest - -If tempest has been successfully configured, a basic set of smoke -tests can be run as follows: - - $ cd /opt/stack/tempest - $ nosetests tempest/scenario/test_network_basic_ops.py - -# DevStack on Xenserver - -If you would like to use Xenserver as the hypervisor, please refer -to the instructions in `./tools/xen/README.md`. - -# Additional Projects - -DevStack has a hook mechanism to call out to a dispatch script at specific -points in the execution of `stack.sh`, `unstack.sh` and `clean.sh`. This -allows upper-layer projects, especially those that the lower layer projects -have no dependency on, to be added to DevStack without modifying the core -scripts. Tempest is built this way as an example of how to structure the -dispatch script, see `extras.d/80-tempest.sh`. See `extras.d/README.md` -for more information. - -# Multi-Node Setup - -A more interesting setup involves running multiple compute nodes, with Neutron -networks connecting VMs on different compute nodes. -You should run at least one "controller node", which should have a `stackrc` -that includes at least: - - disable_service n-net - enable_service q-svc - enable_service q-agt - enable_service q-dhcp - enable_service q-l3 - enable_service q-meta - enable_service neutron - -You likely want to change your `localrc` section to run a scheduler that -will balance VMs across hosts: - - SCHEDULER=nova.scheduler.simple.SimpleScheduler - -You can then run many compute nodes, each of which should have a `stackrc` -which includes the following, with the IP address of the above controller node: - - ENABLED_SERVICES=n-cpu,rabbit,g-api,neutron,q-agt - SERVICE_HOST=[IP of controller node] - MYSQL_HOST=$SERVICE_HOST - RABBIT_HOST=$SERVICE_HOST - Q_HOST=$SERVICE_HOST - MATCHMAKER_REDIS_HOST=$SERVICE_HOST - -# Cells - -Cells is a new scaling option with a full spec at: -http://wiki.openstack.org/blueprint-nova-compute-cells. - -To setup a cells environment add the following to your `localrc` section: - - enable_service n-cell - -Be aware that there are some features currently missing in cells, one notable -one being security groups. The exercises have been patched to disable -functionality not supported by cells. - - -# Local Configuration - -Historically DevStack has used ``localrc`` to contain all local configuration -and customizations. More and more of the configuration variables available for -DevStack are passed-through to the individual project configuration files. -The old mechanism for this required specific code for each file and did not -scale well. This is handled now by a master local configuration file. - -# local.conf - -The new config file ``local.conf`` is an extended-INI format that introduces -a new meta-section header that provides some additional information such -as a phase name and destination config filename: - - [[ | ]] - -where ```` is one of a set of phase names defined by ``stack.sh`` -and ```` is the configuration filename. The filename is -eval'ed in the ``stack.sh`` context so all environment variables are -available and may be used. Using the project config file variables in -the header is strongly suggested (see the ``NOVA_CONF`` example below). -If the path of the config file does not exist it is skipped. - -The defined phases are: - -* **local** - extracts ``localrc`` from ``local.conf`` before ``stackrc`` is sourced -* **post-config** - runs after the layer 2 services are configured - and before they are started -* **extra** - runs after services are started and before any files - in ``extra.d`` are executed -* **post-extra** - runs after files in ``extra.d`` are executed - -The file is processed strictly in sequence; meta-sections may be specified more -than once but if any settings are duplicated the last to appear in the file -will be used. - - [[post-config|$NOVA_CONF]] - [DEFAULT] - use_syslog = True - - [osapi_v3] - enabled = False - -A specific meta-section ``local|localrc`` is used to provide a default -``localrc`` file (actually ``.localrc.auto``). This allows all custom -settings for DevStack to be contained in a single file. If ``localrc`` -exists it will be used instead to preserve backward-compatibility. - - [[local|localrc]] - FIXED_RANGE=10.254.1.0/24 - ADMIN_PASSWORD=speciale - LOGFILE=$DEST/logs/stack.sh.log - -Note that ``Q_PLUGIN_CONF_FILE`` is unique in that it is assumed to *NOT* -start with a ``/`` (slash) character. A slash will need to be added: - - [[post-config|/$Q_PLUGIN_CONF_FILE]] diff --git a/README.rst b/README.rst new file mode 100644 index 0000000000..86b85da956 --- /dev/null +++ b/README.rst @@ -0,0 +1,97 @@ +DevStack is a set of scripts and utilities to quickly deploy an OpenStack cloud +from git source trees. + +Goals +===== + +* To quickly build dev OpenStack environments in a clean Ubuntu or RockyLinux + environment +* To describe working configurations of OpenStack (which code branches + work together? what do config files look like for those branches?) +* To make it easier for developers to dive into OpenStack so that they can + productively contribute without having to understand every part of the + system at once +* To make it easy to prototype cross-project features +* To provide an environment for the OpenStack CI testing on every commit + to the projects + +Read more at https://docs.openstack.org/devstack/latest + +IMPORTANT: Be sure to carefully read `stack.sh` and any other scripts you +execute before you run them, as they install software and will alter your +networking configuration. We strongly recommend that you run `stack.sh` +in a clean and disposable vm when you are first getting started. + +Versions +======== + +The DevStack master branch generally points to trunk versions of OpenStack +components. For older, stable versions, look for branches named +stable/[release] in the DevStack repo. For example, you can do the +following to create a Zed OpenStack cloud:: + + git checkout stable/zed + ./stack.sh + +You can also pick specific OpenStack project releases by setting the appropriate +`*_BRANCH` variables in the ``localrc`` section of `local.conf` (look in +`stackrc` for the default set). Usually just before a release there will be +milestone-proposed branches that need to be tested:: + + GLANCE_REPO=https://opendev.org/openstack/glance.git + GLANCE_BRANCH=milestone-proposed + +Start A Dev Cloud +================= + +Installing in a dedicated disposable VM is safer than installing on your +dev machine! Plus you can pick one of the supported Linux distros for +your VM. To start a dev cloud run the following NOT AS ROOT (see +**DevStack Execution Environment** below for more on user accounts): + + ./stack.sh + +When the script finishes executing, you should be able to access OpenStack +endpoints, like so: + +* Horizon: http://myhost/ +* Keystone: http://myhost/identity/v3/ + +We also provide an environment file that you can use to interact with your +cloud via CLI:: + + # source openrc file to load your environment with OpenStack CLI creds + . openrc + # list instances + openstack server list + +DevStack Execution Environment +============================== + +DevStack runs rampant over the system it runs on, installing things and +uninstalling other things. Running this on a system you care about is a recipe +for disappointment, or worse. Alas, we're all in the virtualization business +here, so run it in a VM. And take advantage of the snapshot capabilities +of your hypervisor of choice to reduce testing cycle times. You might even save +enough time to write one more feature before the next feature freeze... + +``stack.sh`` needs to have root access for a lot of tasks, but uses +``sudo`` for all of those tasks. However, it needs to be not-root for +most of its work and for all of the OpenStack services. ``stack.sh`` +specifically does not run if started as root. + +DevStack will not automatically create the user, but provides a helper +script in ``tools/create-stack-user.sh``. Run that (as root!) or just +check it out to see what DevStack's expectations are for the account +it runs under. Many people simply use their usual login (the default +'ubuntu' login on a UEC image for example). + +Customizing +=========== + +DevStack can be extensively configured via the configuration file +`local.conf`. It is likely that you will need to provide and modify +this file if you want anything other than the most basic setup. Start +by reading the `configuration guide +`_ +for details of the configuration file and the many available options. diff --git a/clean.sh b/clean.sh index 7851da3227..092f557a88 100755 --- a/clean.sh +++ b/clean.sh @@ -1,4 +1,4 @@ -#!/usr/bin/env bash +#!/bin/bash # **clean.sh** @@ -18,7 +18,7 @@ source $TOP_DIR/functions FILES=$TOP_DIR/files # Load local configuration -source $TOP_DIR/stackrc +source $TOP_DIR/openrc # Get the variables that are set in stack.sh if [[ -r $TOP_DIR/.stackenv ]]; then @@ -26,7 +26,7 @@ if [[ -r $TOP_DIR/.stackenv ]]; then fi # Determine what system we are running on. This provides ``os_VENDOR``, -# ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` +# ``os_RELEASE``, ``os_PACKAGE``, ``os_CODENAME`` # and ``DISTRO`` GetDistro @@ -40,20 +40,18 @@ source $TOP_DIR/lib/rpc_backend source $TOP_DIR/lib/tls -source $TOP_DIR/lib/oslo +source $TOP_DIR/lib/libraries +source $TOP_DIR/lib/lvm source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone source $TOP_DIR/lib/glance source $TOP_DIR/lib/nova +source $TOP_DIR/lib/placement source $TOP_DIR/lib/cinder source $TOP_DIR/lib/swift -source $TOP_DIR/lib/ceilometer -source $TOP_DIR/lib/heat source $TOP_DIR/lib/neutron -source $TOP_DIR/lib/baremetal -source $TOP_DIR/lib/ironic -source $TOP_DIR/lib/trove +set -o xtrace # Extras Source # -------------- @@ -65,18 +63,16 @@ if [[ -d $TOP_DIR/extras.d ]]; then done fi -# See if there is anything running... -# need to adapt when run_service is merged -SESSION=$(screen -ls | awk '/[0-9].stack/ { print $1 }') -if [[ -n "$SESSION" ]]; then - # Let unstack.sh do its thing first - $TOP_DIR/unstack.sh --all -fi +# Let unstack.sh do its thing first +$TOP_DIR/unstack.sh --all # Run extras # ========== # Phase: clean +load_plugin_settings +run_phase clean + if [[ -d $TOP_DIR/extras.d ]]; then for i in $TOP_DIR/extras.d/*.sh; do [[ -r $i ]] && source $i clean @@ -84,13 +80,17 @@ if [[ -d $TOP_DIR/extras.d ]]; then fi # Clean projects -cleanup_oslo -cleanup_cinder + +# BUG: cinder tgt doesn't exit cleanly if it's not running. +cleanup_cinder || /bin/true + cleanup_glance cleanup_keystone cleanup_nova +cleanup_placement cleanup_neutron cleanup_swift +cleanup_horizon if is_service_enabled ldap; then cleanup_ldap @@ -102,7 +102,7 @@ if is_service_enabled nova && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; th fi # Clean out /etc -sudo rm -rf /etc/keystone /etc/glance /etc/nova /etc/cinder /etc/swift /etc/heat /etc/neutron +sudo rm -rf /etc/keystone /etc/glance /etc/nova /etc/cinder /etc/swift /etc/neutron /etc/openstack/ # Clean out tgt sudo rm -f /etc/tgt/conf.d/* @@ -111,18 +111,38 @@ sudo rm -f /etc/tgt/conf.d/* cleanup_rpc_backend cleanup_database -# Clean out data, logs and status -LOGDIR=$(dirname "$LOGFILE") -sudo rm -rf $DATA_DIR $LOGDIR $DEST/status -if [[ -n "$SCREEN_LOGDIR" ]] && [[ -d "$SCREEN_LOGDIR" ]]; then - sudo rm -rf $SCREEN_LOGDIR +# Clean out data and status +sudo rm -rf $DATA_DIR $DEST/status $DEST/async + +# Clean out the log file and log directories +if [[ -n "$LOGFILE" ]] && [[ -f "$LOGFILE" ]]; then + sudo rm -f $LOGFILE fi +if [[ -n "$LOGDIR" ]] && [[ -d "$LOGDIR" ]]; then + sudo rm -rf $LOGDIR +fi + +# Clean out the systemd unit files. +sudo find $SYSTEMD_DIR -type f -name '*devstack@*service' -delete +# Make systemd aware of the deletion. +$SYSTEMCTL daemon-reload + +# Clean up venvs +DIRS_TO_CLEAN="$WHEELHOUSE ${PROJECT_VENV[@]} .config/openstack" +rm -rf $DIRS_TO_CLEAN # Clean up files -FILES_TO_CLEAN=".localrc.auto docs-files docs/ shocco/ stack-screenrc test*.conf* test.ini*" +FILES_TO_CLEAN=".localrc.auto .localrc.password " +FILES_TO_CLEAN+="docs/files docs/html shocco/ " +FILES_TO_CLEAN+="stack-screenrc test*.conf* test.ini* " FILES_TO_CLEAN+=".stackenv .prereqs" for file in $FILES_TO_CLEAN; do - rm -f $TOP_DIR/$file + rm -rf $TOP_DIR/$file done + +rm -rf ~/.config/openstack + +# Clear any fstab entries made +sudo sed -i '/.*comment=devstack-.*/ d' /etc/fstab diff --git a/data/devstack-plugins-registry.header b/data/devstack-plugins-registry.header new file mode 100644 index 0000000000..576dbbd35a --- /dev/null +++ b/data/devstack-plugins-registry.header @@ -0,0 +1,21 @@ +.. Note to patch submitters: + + # ============================= # + # THIS FILE IS AUTOGENERATED ! # + # ============================= # + + ** Plugins are found automatically and added to this list ** + + This file is created by a periodic proposal job. You should not + edit this file. + + You should edit the files data/devstack-plugins-registry.footer + data/devstack-plugins-registry.header to modify this text. + +========================== + DevStack Plugin Registry +========================== + +The following list is an automatically-generated collection of +available DevStack plugins. This includes, but is not limited to, +official OpenStack projects. diff --git a/doc/requirements.txt b/doc/requirements.txt new file mode 100644 index 0000000000..7980b93ed7 --- /dev/null +++ b/doc/requirements.txt @@ -0,0 +1,7 @@ +pbr>=2.0.0,!=2.1.0 + +Pygments +docutils +sphinx>=2.0.0,!=2.1.0 # BSD +openstackdocstheme>=2.2.1 # Apache-2.0 +zuul-sphinx>=0.2.0 diff --git a/doc/source/assets/images/devstack.png b/doc/source/assets/images/devstack.png new file mode 100644 index 0000000000..ca6297e127 Binary files /dev/null and b/doc/source/assets/images/devstack.png differ diff --git a/doc/source/assets/images/logo-blue.png b/doc/source/assets/images/logo-blue.png new file mode 100644 index 0000000000..6b363afeee Binary files /dev/null and b/doc/source/assets/images/logo-blue.png differ diff --git a/doc/source/assets/images/logo-blue.xcf b/doc/source/assets/images/logo-blue.xcf new file mode 100644 index 0000000000..fff75ee21d Binary files /dev/null and b/doc/source/assets/images/logo-blue.xcf differ diff --git a/doc/source/assets/images/logo.png b/doc/source/assets/images/logo.png new file mode 100644 index 0000000000..9c2087e44f Binary files /dev/null and b/doc/source/assets/images/logo.png differ diff --git a/doc/source/assets/images/neutron-network-1.png b/doc/source/assets/images/neutron-network-1.png new file mode 100644 index 0000000000..7730ca93f1 Binary files /dev/null and b/doc/source/assets/images/neutron-network-1.png differ diff --git a/doc/source/assets/images/neutron-network-2.png b/doc/source/assets/images/neutron-network-2.png new file mode 100644 index 0000000000..919935119d Binary files /dev/null and b/doc/source/assets/images/neutron-network-2.png differ diff --git a/doc/source/assets/images/neutron-network-3.png b/doc/source/assets/images/neutron-network-3.png new file mode 100644 index 0000000000..34f03ed5c9 Binary files /dev/null and b/doc/source/assets/images/neutron-network-3.png differ diff --git a/doc/source/assets/images/screen_session_1.png b/doc/source/assets/images/screen_session_1.png new file mode 100644 index 0000000000..6ad6752bb1 Binary files /dev/null and b/doc/source/assets/images/screen_session_1.png differ diff --git a/doc/source/assets/images/small_logo.png b/doc/source/assets/images/small_logo.png new file mode 100644 index 0000000000..181459fe45 Binary files /dev/null and b/doc/source/assets/images/small_logo.png differ diff --git a/doc/source/assets/local.conf b/doc/source/assets/local.conf new file mode 120000 index 0000000000..cfc2a4e9d8 --- /dev/null +++ b/doc/source/assets/local.conf @@ -0,0 +1 @@ +../../../samples/local.conf \ No newline at end of file diff --git a/doc/source/conf.py b/doc/source/conf.py new file mode 100644 index 0000000000..bb0357286a --- /dev/null +++ b/doc/source/conf.py @@ -0,0 +1,259 @@ +# -*- coding: utf-8 -*- +# +# Tempest documentation build configuration file, created by +# sphinx-quickstart on Tue May 21 17:43:32 2013. +# +# This file is execfile()d with the current directory set to its containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +#sys.path.insert(0, os.path.abspath('.')) + +# -- General configuration ----------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = [ + 'sphinx.ext.autodoc', + 'zuul_sphinx', + 'openstackdocstheme', +] + +# openstackdocstheme options +openstackdocs_repo_name = 'openstack/devstack' +openstackdocs_pdf_link = True +openstackdocs_bug_project = 'devstack' +openstackdocs_bug_tag = '' +openstackdocs_auto_name = False +# This repo is not tagged, so don't set versions +openstackdocs_auto_version = False +version = '' +release = '' + +todo_include_todos = True + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'DevStack' +copyright = u'2014, OpenStack Foundation' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ['_build'] + +# The reST default role (used for this markup: `text`) to use for all documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +add_module_names = False + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'native' + +# A list of ignored prefixes for module index sorting. +modindex_common_prefix = ['DevStack-doc.'] + +# -- Options for man page output ---------------------------------------------- +man_pages = [] + +# -- Options for HTML output --------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'openstackdocs' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +html_domain_indices = False + +# If false, no index is generated. +html_use_index = False + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +#html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +#html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'DevStack-doc' + + +# -- Options for LaTeX output -------------------------------------------------- + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass [howto/manual]). +latex_documents = [ + ('index', 'doc-devstack.tex', u'DevStack Docs', + u'OpenStack DevStack Team', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# If true, show page references after internal links. +#latex_show_pagerefs = False + +# If true, show URL addresses after external links. +#latex_show_urls = False + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_domain_indices = True + +# -- Options for Texinfo output ------------------------------------------------ + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ('index', 'DevStack-doc', u'DevStack Docs', + u'OpenStack DevStack Team', 'DevStack-doc', 'DevStack documentation', + 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +#texinfo_appendices = [] + +# If false, no module index is generated. +#texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +#texinfo_show_urls = 'footnote' + + +# -- Options for Epub output --------------------------------------------------- + +# Bibliographic Dublin Core info. +epub_title = u'DevStack Documentation' +epub_author = u'OpenStack DevStack Team' +epub_publisher = u'OpenStack DevStack Team' +epub_copyright = u'2014, OpenStack DevStack Team' + +# The language of the text. It defaults to the language option +# or en if the language is not set. +#epub_language = '' + +# The scheme of the identifier. Typical schemes are ISBN or URL. +#epub_scheme = '' + +# The unique identifier of the text. This can be a ISBN number +# or the project homepage. +#epub_identifier = '' + +# A unique identification for the text. +#epub_uid = '' + +# A tuple containing the cover image and cover page html template filenames. +#epub_cover = () + +# HTML files that should be inserted before the pages created by sphinx. +# The format is a list of tuples containing the path and title. +#epub_pre_files = [] + +# HTML files shat should be inserted after the pages created by sphinx. +# The format is a list of tuples containing the path and title. +#epub_post_files = [] + +# A list of files that should not be packed into the epub file. +#epub_exclude_files = [] + +# The depth of the table of contents in toc.ncx. +#epub_tocdepth = 3 + +# Allow duplicate toc entries. +#epub_tocdup = True diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst new file mode 100644 index 0000000000..3cfba716ca --- /dev/null +++ b/doc/source/configuration.rst @@ -0,0 +1,806 @@ +============= +Configuration +============= + +.. contents:: + :local: + :depth: 1 + +local.conf +========== + +DevStack configuration is modified via the file ``local.conf``. It is +a modified INI format file that introduces a meta-section header to +carry additional information regarding the configuration files to be +changed. + +A sample is provided in ``devstack/samples`` + +The new header is similar to a normal INI section header but with double +brackets (``[[ ... ]]``) and two internal fields separated by a pipe +(``|``). Note that there are no spaces between the double brackets and the +internal fields. Likewise, there are no spaces between the pipe and the +internal fields: +:: + + '[[' '|' ']]' + +where ```` is one of a set of phase names defined by ``stack.sh`` +and ```` is the configuration filename. The filename +is eval'ed in the ``stack.sh`` context so all environment variables are +available and may be used. Using the project config file variables in +the header is strongly suggested (see the ``NOVA_CONF`` example below). +If the path of the config file does not exist it is skipped. + +The defined phases are: + +- **local** - extracts ``localrc`` from ``local.conf`` before + ``stackrc`` is sourced +- **post-config** - runs after the layer 2 services are configured and + before they are started +- **extra** - runs after services are started and before any files in + ``extra.d`` are executed +- **post-extra** - runs after files in ``extra.d`` are executed +- **test-config** - runs after tempest (and plugins) are configured + +The file is processed strictly in sequence; meta-sections may be +specified more than once but if any settings are duplicated the last to +appear in the file will be used. + +:: + + [[post-config|$NOVA_CONF]] + [DEFAULT] + use_syslog = True + + [osapi_v3] + enabled = False + +A specific meta-section ``local|localrc`` is used to provide a default +``localrc`` file (actually ``.localrc.auto``). This allows all custom +settings for DevStack to be contained in a single file. If ``localrc`` +exists it will be used instead to preserve backward-compatibility. + +:: + + [[local|localrc]] + IPV4_ADDRS_SAFE_TO_USE=10.254.1.0/24 + ADMIN_PASSWORD=speciale + LOGFILE=$DEST/logs/stack.sh.log + +Note that ``Q_PLUGIN_CONF_FILE`` is unique in that it is assumed to +*NOT* start with a ``/`` (slash) character. A slash will need to be +added: + +:: + + [[post-config|/$Q_PLUGIN_CONF_FILE]] + +Also note that the ``localrc`` section is sourced as a shell script +fragment and MUST conform to the shell requirements, specifically no +whitespace around ``=`` (equals). + +openrc +====== + +``openrc`` configures login credentials suitable for use with the +OpenStack command-line tools. ``openrc`` sources ``stackrc`` at the +beginning (which in turn sources the ``localrc`` section of +``local.conf``) in order to pick up ``HOST_IP`` and/or ``SERVICE_HOST`` +to use in the endpoints. The values shown below are the default values. + +OS\_PROJECT\_NAME (OS\_TENANT\_NAME) + Keystone has + standardized the term *project* as the entity that owns resources. In + some places references still exist to the previous term + *tenant* for this use. Also, *project\_name* is preferred to + *project\_id*. OS\_TENANT\_NAME remains supported for compatibility + with older tools. + + :: + + OS_PROJECT_NAME=demo + +OS\_USERNAME + In addition to the owning entity (project), OpenStack calls the entity + performing the action *user*. + + :: + + OS_USERNAME=demo + +OS\_PASSWORD + Keystone's default authentication requires a password be provided. + The usual cautions about putting passwords in environment variables + apply, for most DevStack uses this may be an acceptable tradeoff. + + :: + + OS_PASSWORD=secret + +HOST\_IP, SERVICE\_HOST + Set API endpoint host using ``HOST_IP``. ``SERVICE_HOST`` may also + be used to specify the endpoint, which is convenient for some + ``local.conf`` configurations. Typically, ``HOST_IP`` is set in the + ``localrc`` section. + + :: + + HOST_IP=127.0.0.1 + SERVICE_HOST=$HOST_IP + +OS\_AUTH\_URL + Authenticating against an OpenStack cloud using Keystone returns a + *Token* and *Service Catalog*. The catalog contains the endpoints + for all services the user/tenant has access to - including Nova, + Glance, Keystone and Swift. + + :: + + OS_AUTH_URL=http://$SERVICE_HOST:5000/v3.0 + +KEYSTONECLIENT\_DEBUG, NOVACLIENT\_DEBUG + Set command-line client log level to ``DEBUG``. These are commented + out by default. + + :: + + # export KEYSTONECLIENT_DEBUG=1 + # export NOVACLIENT_DEBUG=1 + + + +.. _minimal-configuration: + +Minimal Configuration +===================== + +While ``stack.sh`` is happy to run without a ``localrc`` section in +``local.conf``, devlife is better when there are a few minimal variables +set. This is an example of a minimal configuration that touches the +values that most often need to be set. + +- no logging +- pre-set the passwords to prevent interactive prompts +- move network ranges away from the local network (``IPV4_ADDRS_SAFE_TO_USE`` + and ``FLOATING_RANGE``, commented out below) +- set the host IP if detection is unreliable (``HOST_IP``, commented + out below) + +:: + + [[local|localrc]] + ADMIN_PASSWORD=secret + DATABASE_PASSWORD=$ADMIN_PASSWORD + RABBIT_PASSWORD=$ADMIN_PASSWORD + SERVICE_PASSWORD=$ADMIN_PASSWORD + #IPV4_ADDRS_SAFE_TO_USE=172.31.1.0/24 + #FLOATING_RANGE=192.168.20.0/25 + #HOST_IP=10.3.4.5 + +If the ``*_PASSWORD`` variables are not set here you will be prompted to +enter values for them by ``stack.sh``. + +.. warning:: Only use alphanumeric characters in your passwords, as some + services fail to work when using special characters. + +The network ranges must not overlap with any networks in use on the +host. Overlap is not uncommon as RFC-1918 'private' ranges are commonly +used for both the local networking and Nova's fixed and floating ranges. + +``HOST_IP`` is normally detected on the first run of ``stack.sh`` but +often is indeterminate on later runs due to the IP being moved from an +Ethernet interface to a bridge on the host. Setting it here also makes it +available for ``openrc`` to set ``OS_AUTH_URL``. ``HOST_IP`` is not set +by default. + +``HOST_IPV6`` is normally detected on the first run of ``stack.sh`` but +will not be set if there is no IPv6 address on the default Ethernet interface. +Setting it here also makes it available for ``openrc`` to set ``OS_AUTH_URL``. +``HOST_IPV6`` is not set by default. + +For architecture specific configurations which differ from the x86 default +here, see `arch-configuration`_. + +Historical Notes +================ + +Historically DevStack obtained all local configuration and +customizations from a ``localrc`` file. In Oct 2013 the +``local.conf`` configuration method was introduced (in `review 46768 +`__) to simplify this +process. + +Configuration Notes +=================== + +.. contents:: + :local: + +Service Repos +------------- + +The Git repositories used to check out the source for each service are +controlled by a pair of variables set for each service. ``*_REPO`` +points to the repository and ``*_BRANCH`` selects which branch to +check out. These may be overridden in ``local.conf`` to pull source +from a different repo for testing, such as a Gerrit branch +proposal. ``GIT_BASE`` points to the primary repository server. + +:: + + NOVA_REPO=$GIT_BASE/openstack/nova.git + NOVA_BRANCH=master + +To pull a branch directly from Gerrit, get the repo and branch from +the Gerrit review page:: + + git fetch https://review.opendev.org/openstack/nova \ + refs/changes/50/5050/1 && git checkout FETCH_HEAD + +The repo is the stanza following ``fetch`` and the branch is the +stanza following that:: + + NOVA_REPO=https://review.opendev.org/openstack/nova + NOVA_BRANCH=refs/changes/50/5050/1 + + +Installation Directory +---------------------- + +The DevStack install directory is set by the ``DEST`` variable. By +default it is ``/opt/stack``. + +By setting it early in the ``localrc`` section you can reference it in +later variables. It can be useful to set it even though it is not +changed from the default value. + +:: + + DEST=/opt/stack + +Logging +------- + +.. _enable_logging: + +Enable Logging +~~~~~~~~~~~~~~ + +By default ``stack.sh`` output is only written to the console where it +runs. It can be sent to a file in addition to the console by setting +``LOGFILE`` to the fully-qualified name of the destination log file. A +timestamp will be appended to the given filename for each run of +``stack.sh``. + +:: + + LOGFILE=$DEST/logs/stack.sh.log + +Old log files are cleaned automatically if ``LOGDAYS`` is set to the +number of days of old log files to keep. + +:: + + LOGDAYS=2 + +Some coloring is used during the DevStack runs to make it easier to +see what is going on. This can be disabled with:: + + LOG_COLOR=False + +When using the logfile, by default logs are sent to the console and +the file. You can set ``VERBOSE`` to ``false`` if you only wish the +logs to be sent to the file (this may avoid having double-logging in +some cases where you are capturing the script output and the log +files). If ``VERBOSE`` is ``true`` you can additionally set +``VERBOSE_NO_TIMESTAMP`` to avoid timestamps being added to each +output line sent to the console. This can be useful in some +situations where the console output is being captured by a runner or +framework (e.g. Ansible) that adds its own timestamps. Note that the +log lines sent to the ``LOGFILE`` will still be prefixed with a +timestamp. + +Logging the Service Output +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +By default, services run under ``systemd`` and are natively logging to +the systemd journal. + +To query the logs use the ``journalctl`` command, such as:: + + sudo journalctl --unit devstack@* + +More examples can be found in :ref:`journalctl-examples`. + +Example Logging Configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +For example, non-interactive installs probably wish to save output to +a file, keep service logs and disable color in the stored files. + +:: + + [[local|localrc]] + DEST=/opt/stack/ + LOGFILE=$DEST/stack.sh.log + LOG_COLOR=False + +Database Backend +---------------- + +Multiple database backends are available. The available databases are defined +in the lib/databases directory. +``mysql`` is the default database, choose a different one by putting the +following in the ``localrc`` section:: + + disable_service mysql + enable_service postgresql + +``mysql`` is the default database. + +RPC Backend +----------- + +Support for a RabbitMQ RPC backend is included. Additional RPC +backends may be available via external plugins. Enabling or disabling +RabbitMQ is handled via the usual service functions and +``ENABLED_SERVICES``. + +Example disabling RabbitMQ in ``local.conf``:: + + disable_service rabbit + +Apache Frontend +--------------- + +The Apache web server is enabled for services that support via WSGI. Today this +means HTTPD and uWSGI but historically this meant HTTPD + mod_wsgi. This +historical legacy is captured by the naming of many variables, which include +``MOD_WSGI`` rather than ``UWSGI``. + +Some services support alternative deployment strategies (e.g. eventlet). You +can enable these ``ENABLE_HTTPD_MOD_WSGI_SERVICES`` to ``False`` in your +``local.conf``. In addition, each service that can be run under HTTPD + +mod_wsgi also has an override toggle available that can be set in your +``local.conf``. These are, however, slowly being removed as services have +adopted standardized deployment mechanisms and more generally moved away from +eventlet. + +Example (Swift):: + + SWIFT_USE_MOD_WSGI="True" + +Example (Heat):: + + HEAT_USE_MOD_WSGI="True" + +Libraries from Git +------------------ + +By default devstack installs OpenStack server components from git, +however it installs client libraries from released versions on pypi. +This is appropriate if you are working on server development, but if +you want to see how an unreleased version of the client affects the +system you can have devstack install it from upstream, or from local +git trees by specifying it in ``LIBS_FROM_GIT``. Multiple libraries +can be specified as a comma separated list. + +:: + + LIBS_FROM_GIT=python-keystoneclient,oslo.config + +Setting the variable to ``ALL`` will activate the download for all +libraries. + +Virtual Environments +-------------------- + +Enable the use of Python virtual environments by setting ``USE_VENV`` +to ``True``. This will enable the creation of venvs for each project +that is defined in the ``PROJECT_VENV`` array. + +Each entry in the ``PROJECT_VENV`` array contains the directory name +of a venv to be used for the project. The array index is the project +name. Multiple projects can use the same venv if desired. + +:: + + PROJECT_VENV["glance"]=${GLANCE_DIR}.venv + +``ADDITIONAL_VENV_PACKAGES`` is a comma-separated list of additional +packages to be installed into each venv. Often projects will not have +certain packages listed in its ``requirements.txt`` file because they +are 'optional' requirements, i.e. only needed for certain +configurations. By default, the enabled databases will have their +Python bindings added when they are enabled. + +:: + + ADDITIONAL_VENV_PACKAGES="python-foo, python-bar" + +A clean install every time +-------------------------- + +By default ``stack.sh`` only clones the project repos if they do not +exist in ``$DEST``. ``stack.sh`` will freshen each repo on each run if +``RECLONE`` is set to ``yes``. This avoids having to manually remove +repos in order to get the current branch from ``$GIT_BASE``. + +:: + + RECLONE=yes + +Upgrade packages installed by pip +--------------------------------- + +By default ``stack.sh`` only installs Python packages if no version is +currently installed or the current version does not match a specified +requirement. If ``PIP_UPGRADE`` is set to ``True`` then existing +required Python packages will be upgraded to the most recent version +that matches requirements. + +:: + + PIP_UPGRADE=True + +Guest Images +------------ + +Images provided in URLS via the comma-separated ``IMAGE_URLS`` +variable will be downloaded and uploaded to glance by DevStack. + +Default guest-images are predefined for each type of hypervisor and +their testing-requirements in ``stack.sh``. Setting +``DOWNLOAD_DEFAULT_IMAGES=False`` will prevent DevStack downloading +these default images; in that case, you will want to populate +``IMAGE_URLS`` with sufficient images to satisfy testing-requirements. + +:: + + DOWNLOAD_DEFAULT_IMAGES=False + IMAGE_URLS="http://foo.bar.com/image.qcow," + IMAGE_URLS+="http://foo.bar.com/image2.qcow" + + +Instance Type +------------- + +``DEFAULT_INSTANCE_TYPE`` can be used to configure the default instance +type. When this parameter is not specified, Devstack creates additional +micro & nano flavors for really small instances to run Tempest tests. + +For guests with larger memory requirements, ``DEFAULT_INSTANCE_TYPE`` +should be specified in the configuration file so Tempest selects the +default flavors instead. + +KVM on Power with QEMU 2.4 requires 512 MB to load the firmware - +`QEMU 2.4 - PowerPC `__ so users +running instances on ppc64/ppc64le can choose one of the default +created flavors as follows: + +:: + + DEFAULT_INSTANCE_TYPE=m1.tiny + + +IP Version +---------- + +``IP_VERSION`` can be used to configure Neutron to create either an +IPv4, IPv6, or dual-stack self-service project data-network by with +either ``IP_VERSION=4``, ``IP_VERSION=6``, or ``IP_VERSION=4+6`` +respectively. + +:: + + IP_VERSION=4+6 + +The following optional variables can be used to alter the default IPv6 +behavior: + +:: + + IPV6_RA_MODE=slaac + IPV6_ADDRESS_MODE=slaac + IPV6_ADDRS_SAFE_TO_USE=fd$IPV6_GLOBAL_ID::/56 + IPV6_PRIVATE_NETWORK_GATEWAY=fd$IPV6_GLOBAL_ID::1 + +*Note*: ``IPV6_ADDRS_SAFE_TO_USE`` and ``IPV6_PRIVATE_NETWORK_GATEWAY`` +can be configured with any valid IPv6 prefix. The default values make +use of an auto-generated ``IPV6_GLOBAL_ID`` to comply with RFC4193. + +Service IP Version +~~~~~~~~~~~~~~~~~~ + +DevStack can enable service operation over either IPv4 or IPv6 by +setting ``SERVICE_IP_VERSION`` to either ``SERVICE_IP_VERSION=4`` or +``SERVICE_IP_VERSION=6`` respectively. + +When set to ``4`` devstack services will open listen sockets on +``0.0.0.0`` and service endpoints will be registered using ``HOST_IP`` +as the address. + +When set to ``6`` devstack services will open listen sockets on ``::`` +and service endpoints will be registered using ``HOST_IPV6`` as the +address. + +The default value for this setting is ``4``. Dual-mode support, for +example ``4+6`` is not currently supported. ``HOST_IPV6`` can +optionally be used to alter the default IPv6 address:: + + HOST_IPV6=${some_local_ipv6_address} + +Tunnel IP Version +~~~~~~~~~~~~~~~~~ + +DevStack can enable tunnel operation over either IPv4 or IPv6 by +setting ``TUNNEL_IP_VERSION`` to either ``TUNNEL_IP_VERSION=4`` or +``TUNNEL_IP_VERSION=6`` respectively. + +When set to ``4`` Neutron will use an IPv4 address for tunnel endpoints, +for example, ``HOST_IP``. + +When set to ``6`` Neutron will use an IPv6 address for tunnel endpoints, +for example, ``HOST_IPV6``. + +The default value for this setting is ``4``. Dual-mode support, for +example ``4+6`` is not supported, as this value must match the address +family of the local tunnel endpoint IP(v6) address. + +The value of ``TUNNEL_IP_VERSION`` has a direct relationship to the +setting of ``TUNNEL_ENDPOINT_IP``, which will default to ``HOST_IP`` +when set to ``4``, and ``HOST_IPV6`` when set to ``6``. + +Multi-node setup +~~~~~~~~~~~~~~~~ + +See the :doc:`multi-node lab guide` + +Projects +-------- + +Neutron +~~~~~~~ + +See the :doc:`neutron configuration guide` for +details on configuration of Neutron + + +Swift +~~~~~ + +Swift is disabled by default. When enabled, it is configured with +only one replica to avoid being IO/memory intensive on a small +VM. + +If you would like to enable Swift you can add this to your ``localrc`` +section: + +:: + + enable_service s-proxy s-object s-container s-account + +If you want a minimal Swift install with only Swift and Keystone you +can have this instead in your ``localrc`` section: + +:: + + disable_all_services + enable_service key mysql s-proxy s-object s-container s-account + +If you only want to do some testing of a real normal swift cluster +with multiple replicas you can do so by customizing the variable +``SWIFT_REPLICAS`` in your ``localrc`` section (usually to 3). + +You can manually override the ring building to use specific storage +nodes, for example when you want to test a multinode environment. In +this case you have to set a space-separated list of IPs in +``SWIFT_STORAGE_IPS`` in your ``localrc`` section that should be used +as Swift storage nodes. +Please note that this does not create a multinode setup, it is only +used when adding nodes to the Swift rings. + +:: + + SWIFT_STORAGE_IPS="192.168.1.10 192.168.1.11 192.168.1.12" + +Swift S3 +++++++++ + +If you are enabling ``s3api`` in ``ENABLED_SERVICES`` DevStack will +install the s3api middleware emulation. Swift will be configured to +act as a S3 endpoint for Keystone so effectively replacing the +``nova-objectstore``. + +Only Swift proxy server is launched in the systemd system all other +services are started in background and managed by ``swift-init`` tool. + +Tempest +~~~~~~~ + +If tempest has been successfully configured, a basic set of smoke +tests can be run as follows: + +:: + + $ cd /opt/stack/tempest + $ tox -e smoke + +By default tempest is downloaded and the config file is generated, but the +tempest package is not installed in the system's global site-packages (the +package install includes installing dependences). So tempest won't run +outside of tox. If you would like to install it add the following to your +``localrc`` section: + +:: + + INSTALL_TEMPEST=True + + +Cinder +~~~~~~ + +The logical volume group used to hold the Cinder-managed volumes is +set by ``VOLUME_GROUP_NAME``, the logical volume name prefix is set with +``VOLUME_NAME_PREFIX`` and the size of the volume backing file is set +with ``VOLUME_BACKING_FILE_SIZE``. + +:: + + VOLUME_GROUP_NAME="stack-volumes" + VOLUME_NAME_PREFIX="volume-" + VOLUME_BACKING_FILE_SIZE=24G + +When running highly concurrent tests, the default per-project quotas +for volumes, backups, or snapshots may be too small. These can be +adjusted by setting ``CINDER_QUOTA_VOLUMES``, ``CINDER_QUOTA_BACKUPS``, +or ``CINDER_QUOTA_SNAPSHOTS`` to the desired value. (The default for +each is 10.) + +DevStack's Cinder LVM configuration module currently supports both iSCSI and +NVMe connections, and we can choose which one to use with options +``CINDER_TARGET_HELPER``, ``CINDER_TARGET_PROTOCOL``, ``CINDER_TARGET_PREFIX``, +and ``CINDER_TARGET_PORT``. + +Defaults use iSCSI with the LIO target manager:: + + CINDER_TARGET_HELPER="lioadm" + CINDER_TARGET_PROTOCOL="iscsi" + CINDER_TARGET_PREFIX="iqn.2010-10.org.openstack:" + CINDER_TARGET_PORT=3260 + +Additionally there are 3 supported transport protocols for NVMe, +``nvmet_rdma``, ``nvmet_tcp``, and ``nvmet_fc``, and when the ``nvmet`` target +is selected the protocol, prefix, and port defaults will change to more +sensible defaults for NVMe:: + + CINDER_TARGET_HELPER="nvmet" + CINDER_TARGET_PROTOCOL="nvmet_rdma" + CINDER_TARGET_PREFIX="nvme-subsystem-1" + CINDER_TARGET_PORT=4420 + +When selecting the RDMA transport protocol DevStack will create on Cinder nodes +a Software RoCE device on top of the ``HOST_IP_IFACE`` and if it is not defined +then on top of the interface with IP address ``HOST_IP`` or ``HOST_IPV6``. + +This Soft-RoCE device will always be created on the Nova compute side since we +cannot tell beforehand whether there will be an RDMA connection or not. + + +Keystone +~~~~~~~~ + +Multi-Region Setup +++++++++++++++++++ + +We want to setup two devstack (RegionOne and RegionTwo) with shared +keystone (same users and services) and horizon. Keystone and Horizon +will be located in RegionOne. Full spec is available at: +``__. + +In RegionOne: + +:: + + REGION_NAME=RegionOne + +In RegionTwo: + +:: + + disable_service horizon + KEYSTONE_SERVICE_HOST= + REGION_NAME=RegionTwo + KEYSTONE_REGION_NAME=RegionOne + +In the devstack for RegionOne, we set REGION_NAME as RegionOne, so region of +the services started in this devstack are registered as RegionOne. In devstack +for RegionTwo, similarly, we set REGION_NAME as RegionTwo since we want +services started in this devstack to be registered in RegionTwo. But Keystone +service is started and registered in RegionOne, not RegionTwo, so we use +KEYSTONE_REGION_NAME to specify the region of Keystone service. +KEYSTONE_REGION_NAME has a default value the same as REGION_NAME thus we omit +it in the configuration of RegionOne. + +Glance +++++++ + +The default image size quota of 1GiB may be too small if larger images +are to be used. Change the default at setup time with: + +:: + + GLANCE_LIMIT_IMAGE_SIZE_TOTAL=5000 + +or at runtime via: + +:: + + openstack --os-cloud devstack-system-admin registered limit set \ + --service glance --default-limit 5000 --region RegionOne image_size_total + +.. _arch-configuration: + +Architectures +------------- + +The upstream CI runs exclusively on nodes with x86 architectures, but +OpenStack supports even more architectures. Some of them need to configure +Devstack in a certain way. + +KVM on s390x (IBM z Systems) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +KVM on s390x (IBM z Systems) is supported since the *Kilo* release. For +an all-in-one setup, these minimal settings in the ``local.conf`` file +are needed:: + + [[local|localrc]] + ADMIN_PASSWORD=secret + DATABASE_PASSWORD=$ADMIN_PASSWORD + RABBIT_PASSWORD=$ADMIN_PASSWORD + SERVICE_PASSWORD=$ADMIN_PASSWORD + + DOWNLOAD_DEFAULT_IMAGES=False + IMAGE_URLS="https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-s390x-disk1.img" + + # Provide a custom etcd3 binary download URL and ints sha256. + # The binary must be located under '//etcd--linux-s390x.tar.gz' + # on this URL. + # Build instructions for etcd3: https://github.com/linux-on-ibm-z/docs/wiki/Building-etcd + ETCD_DOWNLOAD_URL= + ETCD_SHA256= + + enable_service n-sproxy + disable_service n-novnc + + [[post-config|$NOVA_CONF]] + + [serial_console] + base_url=ws://$HOST_IP:6083/ # optional + +Reasoning: + +* The default image of Devstack is x86 only, so we deactivate the download + with ``DOWNLOAD_DEFAULT_IMAGES``. The referenced guest image + in the code above (``IMAGE_URLS``) serves as an example. The list of + possible s390x guest images is not limited to that. + +* This platform doesn't support a graphical console like VNC or SPICE. + The technical reason is the missing framebuffer on the platform. This + means we rely on the substitute feature *serial console* which needs the + proxy service ``n-sproxy``. We also disable VNC's proxy ``n-novnc`` for + that reason . The configuration in the ``post-config`` section is only + needed if you want to use the *serial console* outside of the all-in-one + setup. + +* A link to an etcd3 binary and its sha256 needs to be provided as the + binary for s390x is not hosted on github like it is for other + architectures. For more details see + https://bugs.launchpad.net/devstack/+bug/1693192. Etcd3 can easily be + built along https://github.com/linux-on-ibm-z/docs/wiki/Building-etcd. + +.. note:: To run *Tempest* against this *Devstack* all-in-one, you'll need + to use a guest image which is smaller than 1GB when uncompressed. + The example image from above is bigger than that! diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst new file mode 100644 index 0000000000..8b5a85b3df --- /dev/null +++ b/doc/source/contributor/contributing.rst @@ -0,0 +1,57 @@ +============================ +So You Want to Contribute... +============================ + +For general information on contributing to OpenStack, please check out the +`contributor guide `_ to get started. +It covers all the basics that are common to all OpenStack projects: the accounts +you need, the basics of interacting with our Gerrit review system, how we +communicate as a community, etc. + +Below will cover the more project specific information you need to get started +with Devstack. + +Communication +~~~~~~~~~~~~~ +* IRC channel ``#openstack-qa`` at OFTC. +* Mailing list (prefix subjects with ``[qa][devstack]`` for faster responses) + http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss + +Contacting the Core Team +~~~~~~~~~~~~~~~~~~~~~~~~ +Please refer to the `Devstack Core Team +`_ contacts. + +New Feature Planning +~~~~~~~~~~~~~~~~~~~~ +If you want to propose a new feature please read `Feature Proposal Process`_ +Devstack features are tracked on `Launchpad BP `_. + +Task Tracking +~~~~~~~~~~~~~ +We track our tasks in `Launchpad `_. + +Reporting a Bug +~~~~~~~~~~~~~~~ +You found an issue and want to make sure we are aware of it? You can do so on +`Launchpad `__. +More info about Launchpad usage can be found on `OpenStack docs page +`_ + +Getting Your Patch Merged +~~~~~~~~~~~~~~~~~~~~~~~~~ +All changes proposed to the Devstack require two ``Code-Review +2`` votes from +Devstack core reviewers before one of the core reviewers can approve the patch +by giving ``Workflow +1`` vote. There are 2 exceptions, approving patches to +unblock the gate and patches that do not relate to the Devstack's core logic, +like for example old job cleanups, can be approved by single core reviewers. + +Project Team Lead Duties +~~~~~~~~~~~~~~~~~~~~~~~~ +All common PTL duties are enumerated in the `PTL guide +`_. + +The Release Process for QA is documented in `QA Release Process +`_. + +.. _Feature Proposal Process: https://wiki.openstack.org/wiki/QA#Feature_Proposal_.26_Design_discussions diff --git a/doc/source/debugging.rst b/doc/source/debugging.rst new file mode 100644 index 0000000000..3ca0ad94b4 --- /dev/null +++ b/doc/source/debugging.rst @@ -0,0 +1,52 @@ +===================== +System-wide debugging +===================== + +A lot can go wrong during a devstack run, and there are a few inbuilt +tools to help you. + +dstat +----- + +Enable the ``dstat`` service to produce performance logs during the +devstack run. These will be logged to the journal and also as a CSV +file. + +memory_tracker +-------------- + +The ``memory_tracker`` service periodically monitors RAM usage and +provides consumption output when available memory is seen to be +falling (i.e. processes are consuming memory). It also provides +output showing locked (unswappable) memory. + +file_tracker +------------ + +The ``file_tracker`` service periodically monitors the number of +open files in the system. + +tcpdump +------- + +Enable the ``tcpdump`` service to run a background tcpdump. You must +set the ``TCPDUMP_ARGS`` variable to something suitable (there is no +default). For example, to trace iSCSI communication during a job in +the OpenStack gate and copy the result into the log output, you might +use: + +.. code-block:: yaml + + job: + name: devstack-job + parent: devstack + vars: + devstack_services: + tcpdump: true + devstack_localrc: + TCPDUMP_ARGS: "-i any tcp port 3260" + zuul_copy_output: + '{{ devstack_log_dir }}/tcpdump.pcap': logs + + + diff --git a/doc/source/development.rst b/doc/source/development.rst new file mode 100644 index 0000000000..957de9b0e1 --- /dev/null +++ b/doc/source/development.rst @@ -0,0 +1,117 @@ +========================== + Developing with Devstack +========================== + +Now that you have your nifty DevStack up and running, what can you do +with it? + +Inspecting Services +=================== + +By default most services in DevStack are running as `systemd` units +named `devstack@$servicename.service`. You can see running services +with. + +.. code-block:: bash + + sudo systemctl status "devstack@*" + +To learn more about the basics of systemd, see :doc:`/systemd` + +Patching a Service +================== + +If you want to make a quick change to a running service the easiest +way to do that is to change the code directly in /opt/stack/$service +and then restart the affected daemons. + +.. code-block:: bash + + sudo systemctl restart devstack@n-cpu.service + +If your change impacts more than one daemon you can restart by +wildcard as well. + +.. code-block:: bash + + sudo systemctl restart "devstack@n-*" + +.. warning:: + + All changes you are making are in checked out git trees that + DevStack thinks it has full control over. Uncommitted work, or + work committed to the master branch, may be overwritten during + subsequent DevStack runs. + +Testing a Patch Series +====================== + +When testing a larger set of patches, or patches that will impact more +than one service within a project, it is often less confusing to use +custom git locations, and make all your changes in a dedicated git +tree. + +In your ``local.conf`` you can add ``**_REPO``, ``**_BRANCH`` for most projects +to use a custom git tree instead of the default upstream ones. + +For instance: + +.. code-block:: bash + + [[local|localrc]] + NOVA_REPO=/home/sdague/nova + NOVA_BRANCH=fold_disk_config + +Will use a custom git tree and branch when doing any devstack +operations, such as ``stack.sh``. + +When testing complicated changes committing to these trees, then doing +``./unstack.sh && ./stack.sh`` is often a valuable way to +iterate. This does take longer per iteration than direct patching, as +the whole devstack needs to rebuild. + +You can use this same approach to test patches that are up for review +in gerrit by using the ref name that gerrit assigns to each change. + +.. code-block:: bash + + [[local|localrc]] + NOVA_BRANCH=refs/changes/10/353710/1 + + +Testing Changes to Libraries +============================ + +When testing changes to libraries consumed by OpenStack services (such +as oslo or any of the python-fooclient libraries) things are a little +more complicated. By default we only test with released versions of +these libraries that are on pypi. + +You must first override this with the setting ``LIBS_FROM_GIT``. This +will enable your DevStack with the git version of that library instead +of the released version. + +After that point you can also specify ``**_REPO``, ``**_BRANCH`` to use +your changes instead of just upstream master. + +.. code-block:: bash + + [[local|localrc]] + LIBS_FROM_GIT=oslo.policy + OSLOPOLICY_REPO=/home/sdague/oslo.policy + OSLOPOLICY_BRANCH=better_exception + +As libraries are not installed `editable` by pip, after you make any +local changes you will need to: + +* cd to top of library path +* sudo pip install -U . +* restart all services you want to use the new library + +You can do that with wildcards such as + +.. code-block:: bash + + sudo systemctl restart "devstack@n-*" + +which will restart all nova services. diff --git a/doc/source/faq.rst b/doc/source/faq.rst new file mode 100644 index 0000000000..8214de0f6a --- /dev/null +++ b/doc/source/faq.rst @@ -0,0 +1,233 @@ +=== +FAQ +=== + +.. contents:: + :local: + +General Questions +================= + +Can I use DevStack for production? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +DevStack is targeted at developers and CI systems to use the raw +upstream code. It makes many choices that are not appropriate for +production systems. + +Your best choice is probably to choose a `distribution of OpenStack +`__. + +Can I use DevStack as a development environment? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Sure, you can. That said, there are a couple of things you should note before +doing so: + +- DevStack makes a lot of configuration changes to your system and should not + be run in your main development environment. + +- All the repositories that DevStack clones when deploying are considered + volatile by default and thus are subject to hard resets. This is necessary to + keep you in sync with the latest upstream, which is what you want in a CI + situation, but it can result in branches being overwritten and files being + removed. + + The corollary of this is that if you are working on a specific project, using + the DevStack project repository (defaulted to ``/opt/stack/``) as + the single master repository for storing all your work is not recommended. + This behavior can be overridden by setting the ``RECLONE`` config option to + ``no``. Alternatively, you can avoid running ``stack.sh`` to redeploy by + restarting services manually. In any case, you should generally ensure work + in progress is pushed to Gerrit or otherwise backed up before running + ``stack.sh``. + +- If you use DevStack within a VM, you may wish to mount a local OpenStack + directory, such as ``~/src/openstack``, inside the VM and configure DevStack + to use this as the clone location using the ``{PROJECT}_REPO`` config + variables. For example, assuming you're using Vagrant and sharing your home + directory, you should place the following in ``local.conf``: + + .. code-block:: shell + + NEUTRON_REPO=/home/vagrant/src/neutron + NOVA_REPO=/home/vagrant/src/nova + KEYSTONE_REPO=/home/vagrant/src/keystone + GLANCE_REPO=/home/vagrant/src/glance + SWIFT_REPO=/home/vagrant/src/swift + HORIZON_REPO=/home/vagrant/src/horizon + CINDER_REPO=/home/vagrant/src/cinder + HEAT_REPO=/home/vagrant/src/heat + TEMPEST_REPO=/home/vagrant/src/tempest + HEATCLIENT_REPO=/home/vagrant/src/python-heatclient + GLANCECLIENT_REPO=/home/vagrant/src/python-glanceclient + NOVACLIENT_REPO=/home/vagrant/src/python-novaclient + NEUTRONCLIENT_REPO=/home/vagrant/src/python-neutronclient + OPENSTACKCLIENT_REPO=/home/vagrant/src/python-openstackclient + HEAT_CFNTOOLS_REPO=/home/vagrant/src/heat-cfntools + HEAT_TEMPLATES_REPO=/home/vagrant/src/heat-templates + NEUTRON_FWAAS_REPO=/home/vagrant/src/neutron-fwaas + # ... + +Why a shell script, why not chef/puppet/... +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The script is meant to be read by humans (as well as ran by +computers); it is the primary documentation after all. Using a recipe +system requires everyone to agree and understand chef or puppet. + +I'd like to help! +~~~~~~~~~~~~~~~~~ + +That isn't a question, but please do! The source for DevStack is at +`opendev.org `__ and bug +reports go to `LaunchPad +`__. Contributions follow the +usual process as described in the `developer guide +`__. This +Sphinx documentation is housed in the doc directory. + +Why not use packages? +~~~~~~~~~~~~~~~~~~~~~ + +Unlike packages, DevStack leaves your cloud ready to develop - +checkouts of the code and services running locally under systemd, +making it easy to hack on and test new patches. However, many people +are doing the hard work of packaging and recipes for production +deployments. + +Why isn't $MY\_FAVORITE\_DISTRO supported? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +DevStack is meant for developers and those who want to see how +OpenStack really works. DevStack is known to run on the distro/release +combinations listed in ``README.md``. DevStack is only supported on +releases other than those documented in ``README.md`` on a best-effort +basis. + +Are there any differences between Ubuntu and CentOS/Fedora support? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Both should work well and are tested by DevStack CI. + +Why can't I use another shell? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +DevStack now uses some specific bash-ism that require Bash 4, such as +associative arrays. Simple compatibility patches have been accepted in +the past when they are not complex, at this point no additional +compatibility patches will be considered except for shells matching +the array functionality as it is very ingrained in the repo and +project management. + +Can I test on OS/X? +~~~~~~~~~~~~~~~~~~~ + +Some people have success with bash 4 installed via homebrew to keep +running tests on OS/X. + +Can I at least source ``openrc`` with ``zsh``? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +People have reported success with a special function to run ``openrc`` +through bash for this + +.. code-block:: bash + + function sourceopenrc { + pushd ~/devstack >/dev/null + eval $(bash -c ". openrc $1 $2 >/dev/null;env|sed -n '/OS_/ { s/^/export /;p}'") + popd >/dev/null + } + + +Operation and Configuration +=========================== + +Can DevStack handle a multi-node installation? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Yes, see :doc:`multinode lab guide ` + +How can I document the environment that DevStack is using? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +DevStack includes a script (``tools/info.sh``) that gathers the +versions of the relevant installed apt packages, pip packages and git +repos. This is a good way to verify what Python modules are +installed. + +How do I turn off a service that is enabled by default? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Services can be turned off by adding ``disable_service xxx`` to +``local.conf`` (using ``c-vol`` in this example): + + :: + + disable_service c-vol + +Is enabling a service that defaults to off done with the reverse of the above? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Of course! + + :: + + enable_service q-svc + +How do I run a specific OpenStack release? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +DevStack master tracks the upstream master of all the projects. If you +would like to run a stable branch of OpenStack, you should use the +corresponding stable branch of DevStack as well. For instance the +``stable/ocata`` version of DevStack will already default to all the +projects running at ``stable/ocata`` levels. + +Note: it's also possible to manually adjust the ``*_BRANCH`` variables +further if you would like to test specific milestones, or even custom +out of tree branches. This is done with entries like the following in +your ``local.conf`` + +:: + + [[local|localrc]] + GLANCE_BRANCH=11.0.0.0rc1 + NOVA_BRANCH=12.0.0.0.rc1 + + +Upstream DevStack is only tested with master and stable +branches. Setting custom BRANCH definitions is not guaranteed to +produce working results. + +What can I do about RabbitMQ not wanting to start on my fresh new VM? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This is often caused by ``erlang`` not being happy with the hostname +resolving to a reachable IP address. Make sure your hostname resolves +to a working IP address; setting it to 127.0.0.1 in ``/etc/hosts`` is +often good enough for a single-node installation. And in an extreme +case, use ``clean.sh`` to eradicate it and try again. + +Why are my configuration changes ignored? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You may have run into the package prerequisite installation +timeout. ``tools/install_prereqs.sh`` has a timer that skips the +package installation checks if it was run within the last +``PREREQ_RERUN_HOURS`` hours (default is 2). To override this, set +``FORCE_PREREQ=1`` and the package checks will never be skipped. + +Miscellaneous +============= + +``tools/fixup_stuff.sh`` is broken and shouldn't 'fix' just one version of packages. +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Stuff in there is to correct problems in an environment that need to +be fixed elsewhere or may/will be fixed in a future release. In the +case of ``httplib2`` and ``prettytable`` specific problems with +specific versions are being worked around. If later releases have +those problems than we'll add them to the script. Knowing about the +broken future releases is valuable rather than polling to see if it +has been fixed. diff --git a/doc/source/guides.rst b/doc/source/guides.rst new file mode 100644 index 0000000000..e7b46b6e55 --- /dev/null +++ b/doc/source/guides.rst @@ -0,0 +1,80 @@ +Guides +====== + +.. warning:: + + The guides are point in time contributions, and may not always be + up to date with the latest work in devstack. + +Walk through various setups used by stackers + +.. toctree:: + :glob: + :hidden: + :maxdepth: 1 + + guides/single-vm + guides/single-machine + guides/lxc + guides/multinode-lab + guides/neutron + guides/devstack-with-nested-kvm + guides/nova + guides/devstack-with-octavia + guides/devstack-with-ldap + +All-In-One Single VM +-------------------- + +Run :doc:`OpenStack in a VM `. The VMs launched in your cloud will be slow as +they are running in QEMU (emulation), but it is useful if you don't have +spare hardware laying around. :doc:`[Read] ` + +All-In-One Single Machine +------------------------- + +Run :doc:`OpenStack on dedicated hardware ` This can include a +server-class machine or a laptop at home. +:doc:`[Read] ` + +All-In-One LXC Container +------------------------- + +Run :doc:`OpenStack in a LXC container `. Beneficial for intermediate +and advanced users. The VMs launched in this cloud will be fully accelerated but +not all OpenStack features are supported. :doc:`[Read] ` + +Multi-Node Lab +-------------- + +Setup a :doc:`multi-node cluster ` with dedicated VLANs for VMs & Management. +:doc:`[Read] ` + +DevStack with Neutron Networking +-------------------------------- + +Building a DevStack cluster with :doc:`Neutron Networking `. +This guide is meant for building lab environments with a dedicated +control node and multiple compute nodes. + +DevStack with KVM-based Nested Virtualization +--------------------------------------------- + +Procedure to setup :doc:`DevStack with KVM-based Nested Virtualization +`. With this setup, Nova instances +will be more performant than with plain QEMU emulation. + +Nova and devstack +-------------------------------- + +Guide to working with nova features :doc:`Nova and devstack `. + +Configure Octavia +----------------- + +Guide on :doc:`Configure Octavia `. + +Deploying DevStack with LDAP +---------------------------- + +Guide to setting up :doc:`DevStack with LDAP `. diff --git a/doc/source/guides/devstack-with-ldap.rst b/doc/source/guides/devstack-with-ldap.rst new file mode 100644 index 0000000000..4c54723c71 --- /dev/null +++ b/doc/source/guides/devstack-with-ldap.rst @@ -0,0 +1,174 @@ +============================ +Deploying DevStack with LDAP +============================ + +The OpenStack Identity service has the ability to integrate with LDAP. The goal +of this guide is to walk you through setting up an LDAP-backed OpenStack +development environment. + +Introduction +============ + +LDAP support in keystone is read-only. You can use it to back an entire +OpenStack deployment to a single LDAP server, or you can use it to back +separate LDAP servers to specific keystone domains. Users within those domains +can authenticate against keystone, assume role assignments, and interact with +other OpenStack services. + +Configuration +============= + +To deploy an OpenLDAP server, make sure ``ldap`` is added to the list of +``ENABLED_SERVICES`` in the ``local.conf`` file:: + + enable_service ldap + +Devstack will require a password to set up an LDAP administrator. This +administrative user is also the bind user specified in keystone's configuration +files, similar to a ``keystone`` user for MySQL databases. + +Devstack will prompt you for a password when running ``stack.sh`` if +``LDAP_PASSWORD`` is not set. You can add the following to your +``local.conf``:: + + LDAP_PASSWORD=super_secret_password + +At this point, devstack should have everything it needs to deploy OpenLDAP, +bootstrap it with a minimal set of users, and configure it to back to a domain +in keystone. You can do this by running the ``stack.sh`` script:: + + $ ./stack.sh + +Once ``stack.sh`` completes, you should have a running keystone deployment with +a basic set of users. It is important to note that not all users will live +within LDAP. Instead, keystone will back different domains to different +identity sources. For example, the ``default`` domain will be backed by MySQL. +This is usually where you'll find your administrative and services users. If +you query keystone for a list of domains, you should see a domain called +``Users``. This domain is set up by devstack and points to OpenLDAP. + +User Management +=============== + +Initially, there will only be two users in the LDAP server. The ``Manager`` +user is used by keystone to talk to OpenLDAP. The ``demo`` user is a generic +user that you should be able to see if you query keystone for users within the +``Users`` domain. Both of these users were added to LDAP using basic LDAP +utilities installed by devstack (e.g. ``ldap-utils``) and LDIFs. The LDIFs used +to create these users can be found in ``devstack/files/ldap/``. + +Listing Users +------------- + +To list all users in LDAP directly, you can use ``ldapsearch`` with the LDAP +user bootstrapped by devstack:: + + $ ldapsearch -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \ + -H ldap://localhost -b dc=openstack,dc=org + +As you can see, devstack creates an OpenStack domain called ``openstack.org`` +as a container for the ``Manager`` and ``demo`` users. + +Creating Users +-------------- + +Since keystone's LDAP integration is read-only, users must be added directly to +LDAP. Users added directly to OpenLDAP will automatically be placed into the +``Users`` domain. + +LDIFs can be used to add users via the command line. The following is an +example LDIF that can be used to create a new LDAP user, let's call it +``peter.ldif.in``:: + + dn: cn=peter,ou=Users,dc=openstack,dc=org + cn: peter + displayName: Peter Quill + givenName: Peter Quill + mail: starlord@openstack.org + objectClass: inetOrgPerson + objectClass: top + sn: peter + uid: peter + userPassword: im-a-better-pilot-than-rocket + +Now, we use the ``Manager`` user to create a user for Peter in LDAP:: + + $ ldapadd -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \ + -H ldap://localhost -c -f peter.ldif.in + +We should be able to assign Peter roles on projects. After Peter has some level +of authorization, he should be able to login to Horizon by specifying the +``Users`` domain and using his ``peter`` username and password. Authorization +can be given to Peter by creating a project within the ``Users`` domain and +giving him a role assignment on that project:: + + $ openstack project create --domain Users awesome-mix-vol-1 + +-------------+----------------------------------+ + | Field | Value | + +-------------+----------------------------------+ + | description | | + | domain_id | 61a2de23107c46bea2d758167af707b9 | + | enabled | True | + | id | 7d422396d54945cdac8fe1e8e32baec4 | + | is_domain | False | + | name | awesome-mix-vol-1 | + | parent_id | 61a2de23107c46bea2d758167af707b9 | + | tags | [] | + +-------------+----------------------------------+ + $ openstack role add --user peter --user-domain Users \ + --project awesome-mix-vol-1 --project-domain Users admin + + +Deleting Users +-------------- + +We can use the same basic steps to remove users from LDAP, but instead of using +LDIFs, we can just pass the ``dn`` of the user we want to delete:: + + $ ldapdelete -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \ + -H ldap://localhost cn=peter,ou=Users,dc=openstack,dc=org + +Group Management +================ + +Like users, groups are considered specific identities. This means that groups +also fall under the same read-only constraints as users and they can be managed +directly with LDAP in the same way users are with LDIFs. + +Adding Groups +------------- + +Let's define a specific group with the following LDIF:: + + dn: cn=guardians,ou=UserGroups,dc=openstack,dc=org + objectClass: groupOfNames + cn: guardians + description: Guardians of the Galaxy + member: cn=peter,dc=openstack,dc=org + member: cn=gamora,dc=openstack,dc=org + member: cn=drax,dc=openstack,dc=org + member: cn=rocket,dc=openstack,dc=org + member: cn=groot,dc=openstack,dc=org + +We can create the group using the same ``ldapadd`` command as we did with +users:: + + $ ldapadd -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \ + -H ldap://localhost -c -f guardian-group.ldif.in + +If we check the group membership in Horizon, we'll see that only Peter is a +member of the ``guardians`` group, despite the whole crew being specified in +the LDIF. Once those accounts are created in LDAP, they will automatically be +added to the ``guardians`` group. They will also assume any role assignments +given to the ``guardians`` group. + +Deleting Groups +--------------- + +Just like users, groups can be deleted using the ``dn``:: + + $ ldapdelete -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \ + -H ldap://localhost cn=guardians,ou=UserGroups,dc=openstack,dc=org + +Note that this operation will not remove users within that group. It will only +remove the group itself and the memberships any users had with that group. diff --git a/doc/source/guides/devstack-with-nested-kvm.rst b/doc/source/guides/devstack-with-nested-kvm.rst new file mode 100644 index 0000000000..ba483e9ec9 --- /dev/null +++ b/doc/source/guides/devstack-with-nested-kvm.rst @@ -0,0 +1,141 @@ +.. _kvm_nested_virt: + +======================================================= +Configure DevStack with KVM-based Nested Virtualization +======================================================= + +When using virtualization technologies like KVM, one can take advantage +of "Nested VMX" (i.e. the ability to run KVM on KVM) so that the VMs in +cloud (Nova guests) can run relatively faster than with plain QEMU +emulation. + +Kernels shipped with Linux distributions doesn't have this enabled by +default. This guide outlines the configuration details to enable nested +virtualization in KVM-based environments. And how to setup DevStack +(that'll run in a VM) to take advantage of this. + + +Nested Virtualization Configuration +=================================== + +Configure Nested KVM for Intel-based Machines +--------------------------------------------- + +Procedure to enable nested KVM virtualization on Intel-based machines. + +Check if the nested KVM Kernel parameter is enabled: + +:: + + cat /sys/module/kvm_intel/parameters/nested + N + +Temporarily remove the KVM intel Kernel module, enable nested +virtualization to be persistent across reboots and add the Kernel +module back: + +:: + + sudo rmmod kvm-intel + sudo sh -c "echo 'options kvm-intel nested=y' >> /etc/modprobe.d/dist.conf" + sudo modprobe kvm-intel + +Ensure the Nested KVM Kernel module parameter for Intel is enabled on +the host: + +:: + + cat /sys/module/kvm_intel/parameters/nested + Y + + modinfo kvm_intel | grep nested + parm: nested:bool + +Start your VM, now it should have KVM capabilities -- you can verify +that by ensuring ``/dev/kvm`` character device is present. + + +Configure Nested KVM for AMD-based Machines +------------------------------------------- + +Procedure to enable nested KVM virtualization on AMD-based machines. + +Check if the nested KVM Kernel parameter is enabled: + +:: + + cat /sys/module/kvm_amd/parameters/nested + 0 + + +Temporarily remove the KVM AMD Kernel module, enable nested +virtualization to be persistent across reboots and add the Kernel module +back: + +:: + + sudo rmmod kvm-amd + sudo sh -c "echo 'options kvm-amd nested=1' >> /etc/modprobe.d/dist.conf" + sudo modprobe kvm-amd + +Ensure the Nested KVM Kernel module parameter for AMD is enabled on the +host: + +:: + + cat /sys/module/kvm_amd/parameters/nested + 1 + + modinfo kvm_amd | grep -i nested + parm: nested:int + +To make the above value persistent across reboots, add an entry in +/etc/modprobe.d/dist.conf so it looks as below:: + + cat /etc/modprobe.d/dist.conf + options kvm-amd nested=y + + +Expose Virtualization Extensions to DevStack VM +----------------------------------------------- + +Edit the VM's libvirt XML configuration via ``virsh`` utility: + +:: + + sudo virsh edit devstack-vm + +Add the below snippet to expose the host CPU features to the VM: + +:: + + + + + +Ensure DevStack VM is Using KVM +------------------------------- + +Before invoking ``stack.sh`` in the VM, ensure that KVM is enabled. This +can be verified by checking for the presence of the file ``/dev/kvm`` in +your VM. If it is present, DevStack will default to using the config +attribute ``virt_type = kvm`` in ``/etc/nova.conf``; otherwise, it'll fall +back to ``virt_type=qemu``, i.e. plain QEMU emulation. + +Optionally, to explicitly set the type of virtualization, to KVM, by the +libvirt driver in nova, the below config attribute can be used in +DevStack's ``local.conf``: + +:: + + LIBVIRT_TYPE=kvm + + +Once DevStack is configured successfully, verify if the Nova instances +are using KVM by noticing the QEMU CLI invoked by Nova is using the +parameter ``accel=kvm``, e.g.: + +:: + + ps -ef | grep -i qemu + root 29773 1 0 11:24 ? 00:00:00 /usr/bin/qemu-system-x86_64 -machine accel=kvm [. . .] diff --git a/doc/source/guides/devstack-with-octavia.rst b/doc/source/guides/devstack-with-octavia.rst new file mode 100644 index 0000000000..55939f0f12 --- /dev/null +++ b/doc/source/guides/devstack-with-octavia.rst @@ -0,0 +1,144 @@ +Devstack with Octavia Load Balancing +==================================== + +Starting with the OpenStack Pike release, Octavia is now a standalone service +providing load balancing services for OpenStack. + +This guide will show you how to create a devstack with `Octavia API`_ enabled. + +.. _Octavia API: https://docs.openstack.org/api-ref/load-balancer/v2/index.html + +Phase 1: Create DevStack + 2 nova instances +-------------------------------------------- + +First, set up a VM of your choice with at least 8 GB RAM and 16 GB disk space, +make sure it is updated. Install git and any other developer tools you find +useful. + +Install devstack:: + + git clone https://opendev.org/openstack/devstack + cd devstack/tools + sudo ./create-stack-user.sh + cd ../.. + sudo mv devstack /opt/stack + sudo chown -R stack.stack /opt/stack/devstack + +This will clone the current devstack code locally, then setup the "stack" +account that devstack services will run under. Finally, it will move devstack +into its default location in /opt/stack/devstack. + +Edit your ``/opt/stack/devstack/local.conf`` to look like:: + + [[local|localrc]] + # ===== BEGIN localrc ===== + DATABASE_PASSWORD=password + ADMIN_PASSWORD=password + SERVICE_PASSWORD=password + SERVICE_TOKEN=password + RABBIT_PASSWORD=password + GIT_BASE=https://opendev.org + # Optional settings: + # OCTAVIA_AMP_BASE_OS=centos + # OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID=9-stream + # OCTAVIA_AMP_IMAGE_SIZE=3 + # OCTAVIA_LB_TOPOLOGY=ACTIVE_STANDBY + # OCTAVIA_ENABLE_AMPHORAV2_JOBBOARD=True + # LIBS_FROM_GIT+=octavia-lib, + # Enable Logging + LOGFILE=$DEST/logs/stack.sh.log + VERBOSE=True + LOG_COLOR=True + enable_service rabbit + enable_plugin neutron $GIT_BASE/openstack/neutron + # Octavia supports using QoS policies on the VIP port: + enable_service q-qos + enable_service placement-api placement-client + # Octavia services + enable_plugin octavia $GIT_BASE/openstack/octavia master + enable_plugin octavia-dashboard $GIT_BASE/openstack/octavia-dashboard + enable_plugin ovn-octavia-provider $GIT_BASE/openstack/ovn-octavia-provider + enable_plugin octavia-tempest-plugin $GIT_BASE/openstack/octavia-tempest-plugin + enable_service octavia o-api o-cw o-hm o-hk o-da + # If you are enabling barbican for TLS offload in Octavia, include it here. + # enable_plugin barbican $GIT_BASE/openstack/barbican + # enable_service barbican + # Cinder (optional) + disable_service c-api c-vol c-sch + # Tempest + enable_service tempest + # ===== END localrc ===== + +.. note:: + For best performance it is highly recommended to use KVM + virtualization instead of QEMU. + Also make sure nested virtualization is enabled as documented in + :ref:`the respective guide `. + By adding ``LIBVIRT_CPU_MODE="host-passthrough"`` to your + ``local.conf`` you enable the guest VMs to make use of all features your + host's CPU provides. + +Run stack.sh and do some sanity checks:: + + sudo su - stack + cd /opt/stack/devstack + ./stack.sh + . ./openrc + + openstack network list # should show public and private networks + +Create two nova instances that we can use as test http servers:: + + # create nova instances on private network + openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node1 + openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node2 + openstack server list # should show the nova instances just created + + # add secgroup rules to allow ssh etc.. + openstack security group rule create default --protocol icmp + openstack security group rule create default --protocol tcp --dst-port 22:22 + openstack security group rule create default --protocol tcp --dst-port 80:80 + +Set up a simple web server on each of these instances. One possibility is to use +the `Golang test server`_ that is used by the Octavia project for CI testing +as well. +Copy the binary to your instances and start it as shown below +(username 'cirros', password 'gocubsgo'):: + + INST_IP= + scp -O test_server.bin cirros@${INST_IP}: + ssh -f cirros@${INST_IP} ./test_server.bin -id ${INST_IP} + +When started this way the test server will respond to HTTP requests with +its own IP. + +Phase 2: Create your load balancer +---------------------------------- + +Create your load balancer:: + + openstack loadbalancer create --wait --name lb1 --vip-subnet-id private-subnet + openstack loadbalancer listener create --wait --protocol HTTP --protocol-port 80 --name listener1 lb1 + openstack loadbalancer pool create --wait --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1 + openstack loadbalancer healthmonitor create --wait --delay 5 --timeout 2 --max-retries 1 --type HTTP pool1 + openstack loadbalancer member create --wait --subnet-id private-subnet --address --protocol-port 80 pool1 + openstack loadbalancer member create --wait --subnet-id private-subnet --address --protocol-port 80 pool1 + +Please note: The fields are the IP addresses of the nova +servers created in Phase 1. +Also note, using the API directly you can do all of the above commands in one +API call. + +Phase 3: Test your load balancer +-------------------------------- + +:: + + openstack loadbalancer show lb1 # Note the vip_address + curl http:// + curl http:// + +This should show the "Welcome to " message from each member server. + + +.. _Golang test server: https://opendev.org/openstack/octavia-tempest-plugin/src/branch/master/octavia_tempest_plugin/contrib/test_server diff --git a/doc/source/guides/lxc.rst b/doc/source/guides/lxc.rst new file mode 100644 index 0000000000..dcaa4166c4 --- /dev/null +++ b/doc/source/guides/lxc.rst @@ -0,0 +1,164 @@ +================================ +All-In-One Single LXC Container +================================ + +This guide walks you through the process of deploying OpenStack using devstack +in an LXC container instead of a VM. + +The primary benefits to running devstack inside a container instead of a VM is +faster performance and lower memory overhead while still providing a suitable +level of isolation. This can be particularly useful when you want to simulate +running OpenStack on multiple nodes. + +.. Warning:: Containers do not provide the same level of isolation as a virtual + machine. + +.. Note:: Not all OpenStack features support running inside of a container. See + `Limitations`_ section below for details. :doc:`OpenStack in a VM ` + is recommended for beginners. + +Prerequisites +============== + +This guide is written for Ubuntu 14.04 but should be adaptable for any modern +Linux distribution. + +Install the LXC package:: + + sudo apt-get install lxc + +You can verify support for containerization features in your currently running +kernel using the ``lxc-checkconfig`` command. + +Container Setup +=============== + +Configuration +--------------- + +For a successful run of ``stack.sh`` and to permit use of KVM to run the VMs you +launch inside your container, we need to use the following additional +configuration options. Place the following in a file called +``devstack-lxc.conf``:: + + # Permit access to /dev/loop* + lxc.cgroup.devices.allow = b 7:* rwm + + # Setup access to /dev/net/tun and /dev/kvm + lxc.mount.entry = /dev/net/tun dev/net/tun none bind,create=file 0 0 + lxc.mount.entry = /dev/kvm dev/kvm none bind,create=file 0 0 + + # Networking + lxc.network.type = veth + lxc.network.flags = up + lxc.network.link = lxcbr0 + + +Create Container +------------------- + +The configuration and rootfs for LXC containers are created using the +``lxc-create`` command. + +We will name our container ``devstack`` and use the ``ubuntu`` template which +will use ``debootstrap`` to build a Ubuntu rootfs. It will default to the same +release and architecture as the host system. We also install the additional +packages ``bsdmainutils`` and ``git`` as we'll need them to run devstack:: + + sudo lxc-create -n devstack -t ubuntu -f devstack-lxc.conf -- --packages=bsdmainutils,git + +The first time it builds the rootfs will take a few minutes to download, unpack, +and configure all the necessary packages for a minimal installation of Ubuntu. +LXC will cache this and subsequent containers will only take seconds to create. + +.. Note:: To speed up the initial rootfs creation, you can specify a mirror to + download the Ubuntu packages from by appending ``--mirror=`` and then the URL + of a Ubuntu mirror. To see other other template options, you can run + ``lxc-create -t ubuntu -h``. + +Start Container +---------------- + +To start the container, run:: + + sudo lxc-start -n devstack + +A moment later you should be presented with the login prompt for your container. +You can login using the username ``ubuntu`` and password ``ubuntu``. + +You can also ssh into your container. On your host, run +``sudo lxc-info -n devstack`` to get the IP address (e.g. +``ssh ubuntu@$(sudo lxc-info -n devstack | awk '/IP/ { print $2 }')``). + +Run Devstack +------------- + +You should now be logged into your container and almost ready to run devstack. +The commands in this section should all be run inside your container. + +.. Tip:: You can greatly reduce the runtime of your initial devstack setup by + ensuring you have your apt sources.list configured to use a fast mirror. + Check and update ``/etc/apt/sources.list`` if necessary and then run + ``apt-get update``. + +#. Download DevStack + + :: + + git clone https://opendev.org/openstack/devstack + +#. Configure + + Refer to :ref:`minimal-configuration` if you wish to configure the behaviour + of devstack. + +#. Start the install + + :: + + cd devstack + ./stack.sh + +Cleanup +------- + +To stop the container:: + + lxc-stop -n devstack + +To delete the container:: + + lxc-destroy -n devstack + +Limitations +============ + +Not all OpenStack features may function correctly or at all when ran from within +a container. + +Cinder +------- + +Unable to create LVM backed volume +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + In our configuration, we have not whitelisted access to device-mapper or LVM + devices. Doing so will permit your container to have access and control of LVM + on the host system. To enable, add the following to your + ``devstack-lxc.conf`` before running ``lxc-create``:: + + lxc.cgroup.devices.allow = c 10:236 rwm + lxc.cgroup.devices.allow = b 252:* rwm + + Additionally you'll need to set ``udev_rules = 0`` in the ``activation`` + section of ``/etc/lvm/lvm.conf`` unless you mount devtmpfs in your container. + +Unable to attach volume to instance +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + It is not possible to attach cinder volumes to nova instances due to parts of + the Linux iSCSI implementation not being network namespace aware. This can be + worked around by using network pass-through instead of a separate network + namespace but such a setup significantly reduces the isolation of the + container (e.g. a ``halt`` command issued in the container will cause the host + system to shutdown). diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst new file mode 100644 index 0000000000..ef339f1f5c --- /dev/null +++ b/doc/source/guides/multinode-lab.rst @@ -0,0 +1,462 @@ +============== +Multi-Node Lab +============== + +Here is OpenStack in a realistic test configuration with multiple +physical servers. + +Prerequisites Linux & Network +============================= + +Minimal Install +--------------- + +You need to have a system with a fresh install of Linux. You can +download the `Minimal +CD `__ for +Ubuntu releases since DevStack will download & install all the +additional dependencies. The netinstall ISO is available for +`Fedora `__ +and +`CentOS/RHEL `__. + +Install a couple of packages to bootstrap configuration: + +:: + + apt-get install -y git sudo || dnf install -y git sudo + +Network Configuration +--------------------- + +The first iteration of the lab uses OpenStack's FlatDHCP network +controller so only a single network will be required. It should be on +its own subnet without DHCP; the host IPs and floating IP pool(s) will +come out of this block. This example uses the following: + +- Gateway: 192.168.42.1 +- Physical nodes: 192.168.42.11-192.168.42.99 +- Floating IPs: 192.168.42.128-192.168.42.254 + +Configure each node with a static IP. For Ubuntu edit +``/etc/network/interfaces``: + +:: + + auto eth0 + iface eth0 inet static + address 192.168.42.11 + netmask 255.255.255.0 + gateway 192.168.42.1 + +For Fedora and CentOS/RHEL edit +``/etc/sysconfig/network-scripts/ifcfg-eth0``: + +:: + + BOOTPROTO=static + IPADDR=192.168.42.11 + NETMASK=255.255.255.0 + GATEWAY=192.168.42.1 + +Installation shake and bake +=========================== + +Add the DevStack User +--------------------- + +OpenStack runs as a non-root user that has sudo access to root. There is +nothing special about the name, we'll use ``stack`` here. Every node +must use the same name and preferably uid. If you created a user during +the OS install you can use it and give it sudo privileges below. +Otherwise create the stack user: + +:: + + useradd -s /bin/bash -d /opt/stack -m stack + +Ensure home directory for the ``stack`` user has executable permission for all, +as RHEL based distros create it with ``700`` and Ubuntu 21.04+ with ``750`` +which can cause issues during deployment. + +:: + + chmod +x /opt/stack + +This user will be making many changes to your system during installation +and operation so it needs to have sudo privileges to root without a +password: + +:: + + echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack + +From here on use the ``stack`` user. **Logout** and **login** as the +``stack`` user. + +Set Up Ssh +---------- + +Set up the stack user on each node with an ssh key for access: + +:: + + mkdir ~/.ssh; chmod 700 ~/.ssh + echo "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCyYjfgyPazTvGpd8OaAvtU2utL8W6gWC4JdRS1J95GhNNfQd657yO6s1AH5KYQWktcE6FO/xNUC2reEXSGC7ezy+sGO1kj9Limv5vrvNHvF1+wts0Cmyx61D2nQw35/Qz8BvpdJANL7VwP/cFI/p3yhvx2lsnjFE3hN8xRB2LtLUopUSVdBwACOVUmH2G+2BWMJDjVINd2DPqRIA4Zhy09KJ3O1Joabr0XpQL0yt/I9x8BVHdAx6l9U0tMg9dj5+tAjZvMAFfye3PJcYwwsfJoFxC8w/SLtqlFX7Ehw++8RtvomvuipLdmWCy+T9hIkl+gHYE4cS3OIqXH7f49jdJf jesse@spacey.local" > ~/.ssh/authorized_keys + +Download DevStack +----------------- + +Grab the latest version of DevStack: + +:: + + git clone https://opendev.org/openstack/devstack + cd devstack + +Up to this point all of the steps apply to each node in the cluster. +From here on there are some differences between the cluster controller +(aka 'head node') and the compute nodes. + +Configure Cluster Controller +---------------------------- + +The cluster controller runs all OpenStack services. Configure the +cluster controller's DevStack in ``local.conf``: + +:: + + [[local|localrc]] + HOST_IP=192.168.42.11 + FIXED_RANGE=10.4.128.0/20 + FLOATING_RANGE=192.168.42.128/25 + LOGFILE=/opt/stack/logs/stack.sh.log + ADMIN_PASSWORD=labstack + DATABASE_PASSWORD=supersecret + RABBIT_PASSWORD=supersecret + SERVICE_PASSWORD=supersecret + +In the multi-node configuration the first 10 or so IPs in the private +subnet are usually reserved. Add this to ``local.sh`` to have it run +after every ``stack.sh`` run: + +:: + + for i in `seq 2 10`; do /opt/stack/nova/bin/nova-manage fixed reserve 10.4.128.$i; done + +Fire up OpenStack: + +:: + + ./stack.sh + +A stream of activity ensues. When complete you will see a summary of +``stack.sh``'s work, including the relevant URLs, accounts and passwords +to poke at your shiny new OpenStack. The most recent log file is +available in ``stack.sh.log``. + +Configure Compute Nodes +----------------------- + +The compute nodes only run the OpenStack worker services. For additional +machines, create a ``local.conf`` with: + +:: + + [[local|localrc]] + HOST_IP=192.168.42.12 # change this per compute node + FIXED_RANGE=10.4.128.0/20 + FLOATING_RANGE=192.168.42.128/25 + LOGFILE=/opt/stack/logs/stack.sh.log + ADMIN_PASSWORD=labstack + DATABASE_PASSWORD=supersecret + RABBIT_PASSWORD=supersecret + SERVICE_PASSWORD=supersecret + DATABASE_TYPE=mysql + SERVICE_HOST=192.168.42.11 + MYSQL_HOST=$SERVICE_HOST + RABBIT_HOST=$SERVICE_HOST + GLANCE_HOSTPORT=$SERVICE_HOST:9292 + ENABLED_SERVICES=n-cpu,c-vol,placement-client,ovn-controller,ovs-vswitchd,ovsdb-server,q-ovn-metadata-agent + NOVA_VNC_ENABLED=True + NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_lite.html" + VNCSERVER_LISTEN=$HOST_IP + VNCSERVER_PROXYCLIENT_ADDRESS=$VNCSERVER_LISTEN + +Fire up OpenStack: + +:: + + ./stack.sh + +A stream of activity ensues. When complete you will see a summary of +``stack.sh``'s work, including the relevant URLs, accounts and passwords +to poke at your shiny new OpenStack. The most recent log file is +available in ``stack.sh.log``. + +Starting in the Ocata release, Nova requires a `Cells v2`_ deployment. Compute +node services must be mapped to a cell before they can be used. + +After each compute node is stacked, verify it shows up in the +``nova service-list --binary nova-compute`` output. The compute service is +registered in the cell database asynchronously so this may require polling. + +Once the compute node services shows up, run the ``./tools/discover_hosts.sh`` +script from the control node to map compute hosts to the single cell. + +The compute service running on the primary control node will be +discovered automatically when the control node is stacked so this really +only needs to be performed for subnodes. + +.. _Cells v2: https://docs.openstack.org/nova/latest/user/cells.html + +Configure Tempest Node to run the Tempest tests +----------------------------------------------- + +If there is a need to execute Tempest tests against different Cluster +Controller node then it can be done by re-using the ``local.conf`` file from +the Cluster Controller node but with not enabled Controller services in +``ENABLED_SERVICES`` variable. This variable needs to contain only ``tempest`` +as a configured service. Then variable ``SERVICES_FOR_TEMPEST`` must be +configured to contain those services that were enabled on the Cluster +Controller node in the ``ENABLED_SERVICES`` variable. For example the +``local.conf`` file could look as follows: + +:: + + [[local|localrc]] + HOST_IP=192.168.42.12 # change this per compute node + FIXED_RANGE=10.4.128.0/20 + FLOATING_RANGE=192.168.42.128/25 + LOGFILE=/opt/stack/logs/stack.sh.log + ADMIN_PASSWORD=labstack + DATABASE_PASSWORD=supersecret + RABBIT_PASSWORD=supersecret + SERVICE_PASSWORD=supersecret + DATABASE_TYPE=mysql + SERVICE_HOST=192.168.42.11 + MYSQL_HOST=$SERVICE_HOST + RABBIT_HOST=$SERVICE_HOST + GLANCE_HOSTPORT=$SERVICE_HOST:9292 + NOVA_VNC_ENABLED=True + NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_lite.html" + VNCSERVER_LISTEN=$HOST_IP + VNCSERVER_PROXYCLIENT_ADDRESS=$VNCSERVER_LISTEN + ENABLED_SERVICES=tempest + SERVICES_FOR_TEMPEST=keystone,nova,neutron,glance + +Then just execute the devstack: + +:: + + ./stack.sh + + +Cleaning Up After DevStack +-------------------------- + +Shutting down OpenStack is now as simple as running the included +``unstack.sh`` script: + +:: + + ./unstack.sh + +A more aggressive cleanup can be performed using ``clean.sh``. It +removes certain troublesome packages and attempts to leave the system in +a state where changing the database or queue manager can be reliably +performed. + +:: + + ./clean.sh + +Sometimes running instances are not cleaned up. DevStack attempts to do +this when it runs but there are times it needs to still be done by hand: + +:: + + sudo rm -rf /etc/libvirt/qemu/inst* + sudo virsh list | grep inst | awk '{print $1}' | xargs -n1 virsh destroy + +Going further +============= + +Additional Users +---------------- + +DevStack creates two OpenStack users (``admin`` and ``demo``) and two +projects (also ``admin`` and ``demo``). ``admin`` is exactly what it +sounds like, a privileged administrative account that is a member of +both the ``admin`` and ``demo`` projects. ``demo`` is a normal user +account that is only a member of the ``demo`` project. Creating +additional OpenStack users can be done through the dashboard, sometimes +it is easier to do them in bulk from a script, especially since they get +blown away every time ``stack.sh`` runs. The following steps are ripe +for scripting: + +:: + + # Get admin creds + . openrc admin admin + + # List existing projects + openstack project list + + # List existing users + openstack user list + + # Add a user and project + NAME=bob + PASSWORD=BigSecret + PROJECT=$NAME + openstack project create $PROJECT + openstack user create $NAME --password=$PASSWORD --project $PROJECT + openstack role add Member --user $NAME --project $PROJECT + # The Member role is created by stack.sh + # openstack role assignment list + +Swift +----- + +Swift, OpenStack Object Storage, requires a significant amount of resources +and is disabled by default in DevStack. The support in DevStack is geared +toward a minimal installation but can be used for testing. To implement a +true multi-node test of swift, additional steps will be required. Enabling it is as +simple as enabling the ``swift`` service in ``local.conf``: + +:: + + enable_service s-proxy s-object s-container s-account + +Swift, OpenStack Object Storage, will put its data files in ``SWIFT_DATA_DIR`` (default +``/opt/stack/data/swift``). The size of the data 'partition' created +(really a loop-mounted file) is set by ``SWIFT_LOOPBACK_DISK_SIZE``. The +Swift config files are located in ``SWIFT_CONF_DIR`` (default +``/etc/swift``). All of these settings can be overridden in (wait for +it...) ``local.conf``. + +Volumes +------- + +DevStack will automatically use an existing LVM volume group named +``stack-volumes`` to store cloud-created volumes. If ``stack-volumes`` +doesn't exist, DevStack will set up a loop-mounted file to contain +it. If the default size is insufficient for the number and size of volumes +required, it can be overridden by setting ``VOLUME_BACKING_FILE_SIZE`` in +``local.conf`` (sizes given in ``truncate`` compatible format, e.g. ``24G``). + +``stack-volumes`` can be pre-created on any physical volume supported by +Linux's LVM. The name of the volume group can be changed by setting +``VOLUME_GROUP_NAME`` in ``localrc``. ``stack.sh`` deletes all logical +volumes in ``VOLUME_GROUP_NAME`` that begin with ``VOLUME_NAME_PREFIX`` as +part of cleaning up from previous runs. It is recommended to not use the +root volume group as ``VOLUME_GROUP_NAME``. + +The details of creating the volume group depends on the server hardware +involved but looks something like this: + +:: + + pvcreate /dev/sdc + vgcreate stack-volumes /dev/sdc + +Syslog +------ + +DevStack is capable of using ``rsyslog`` to aggregate logging across the +cluster. It is off by default; to turn it on set ``SYSLOG=True`` in +``local.conf``. ``SYSLOG_HOST`` defaults to ``HOST_IP``; on the compute +nodes it must be set to the IP of the cluster controller to send syslog +output there. In the example above, add this to the compute node +``local.conf``: + +:: + + SYSLOG_HOST=192.168.42.11 + +Using Alternate Repositories/Branches +------------------------------------- + +The git repositories for all of the OpenStack services are defined in +``stackrc``. Since this file is a part of the DevStack package changes +to it will probably be overwritten as updates are applied. Every setting +in ``stackrc`` can be redefined in ``local.conf``. + +To change the repository or branch that a particular OpenStack service +is created from, simply change the value of ``*_REPO`` or ``*_BRANCH`` +corresponding to that service. + +After making changes to the repository or branch, if ``RECLONE`` is not +set in ``localrc`` it may be necessary to remove the corresponding +directory from ``/opt/stack`` to force git to re-clone the repository. + +For example, to pull nova, OpenStack Compute, from a proposed release candidate +in the primary nova repository: + +:: + + NOVA_BRANCH=rc-proposed + +To pull glance, OpenStack Image service, from an experimental fork: + +:: + + GLANCE_BRANCH=try-something-big + GLANCE_REPO=https://github.com/mcuser/glance.git + +Notes stuff you might need to know +================================== + +Set MySQL Password +------------------ + +If you forgot to set the root password you can do this: + +:: + + mysqladmin -u root -pnova password 'supersecret' + +Live Migration +-------------- + +In order for live migration to work with the default live migration URI:: + + [libvirt] + live_migration_uri = qemu+ssh://stack@%s/system + +SSH keys need to be exchanged between each compute node: + +1. The SOURCE root user's public RSA key (likely in /root/.ssh/id_rsa.pub) + needs to be in the DESTINATION stack user's authorized_keys file + (~stack/.ssh/authorized_keys). This can be accomplished by manually + copying the contents from the file on the SOURCE to the DESTINATION. If + you have a password configured for the stack user, then you can use the + following command to accomplish the same thing:: + + ssh-copy-id -i /root/.ssh/id_rsa.pub stack@DESTINATION + +2. The DESTINATION host's public ECDSA key (/etc/ssh/ssh_host_ecdsa_key.pub) + needs to be in the SOURCE root user's known_hosts file + (/root/.ssh/known_hosts). This can be accomplished by running the + following on the SOURCE machine (hostname must be used):: + + ssh-keyscan -H DEST_HOSTNAME | sudo tee -a /root/.ssh/known_hosts + +3. Verify that login via ssh works without a password:: + + ssh -i /root/.ssh/id_rsa stack@DESTINATION + +In essence, this means that every compute node's root user's public RSA key +must exist in every other compute node's stack user's authorized_keys file and +every compute node's public ECDSA key needs to be in every other compute +node's root user's known_hosts file. Please note that if the root or stack +user does not have a SSH key, one can be generated using:: + + ssh-keygen -t rsa + +The above steps are necessary because libvirtd runs as root when the +live_migration_uri uses the "qemu:///system" family of URIs. For more +information, see the `libvirt documentation`_. + +.. _libvirt documentation: https://libvirt.org/drvqemu.html#securitydriver diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst new file mode 100644 index 0000000000..a7adeeff73 --- /dev/null +++ b/doc/source/guides/neutron.rst @@ -0,0 +1,548 @@ +====================================== +Using DevStack with neutron Networking +====================================== + +This guide will walk you through using OpenStack neutron with the ML2 +plugin and the Open vSwitch mechanism driver. + + +.. _single-interface-ovs: + +Using Neutron with a Single Interface +===================================== + +In some instances, like on a developer laptop, there is only one +network interface that is available. In this scenario, the physical +interface is added to the Open vSwitch bridge, and the IP address of +the laptop is migrated onto the bridge interface. That way, the +physical interface can be used to transmit self service project +network traffic, the OpenStack API traffic, and management traffic. + + +.. warning:: + + When using a single interface networking setup, there will be a + temporary network outage as your IP address is moved from the + physical NIC of your machine, to the OVS bridge. If you are SSH'd + into the machine from another computer, there is a risk of being + disconnected from your ssh session (due to arp cache + invalidation), which would stop the stack.sh or leave it in an + unfinished state. In these cases, start stack.sh inside its own + screen session so it can continue to run. + + +Physical Network Setup +---------------------- + +In most cases where DevStack is being deployed with a single +interface, there is a hardware router that is being used for external +connectivity and DHCP. The developer machine is connected to this +network and is on a shared subnet with other machines. The +`local.conf` exhibited here assumes that 1500 is a reasonable MTU to +use on that network. + +.. image:: /assets/images/neutron-network-1.png + :alt: Network configuration for a single DevStack node + + +DevStack Configuration +---------------------- + +The following is a complete `local.conf` for the host named +`devstack-1`. It will run all the API and services, as well as +serving as a hypervisor for guest instances. + +:: + + [[local|localrc]] + HOST_IP=172.18.161.6 + SERVICE_HOST=172.18.161.6 + MYSQL_HOST=172.18.161.6 + RABBIT_HOST=172.18.161.6 + GLANCE_HOSTPORT=172.18.161.6:9292 + ADMIN_PASSWORD=secret + DATABASE_PASSWORD=secret + RABBIT_PASSWORD=secret + SERVICE_PASSWORD=secret + + ## Neutron options + Q_USE_SECGROUP=True + FLOATING_RANGE="172.18.161.0/24" + IPV4_ADDRS_SAFE_TO_USE="10.0.0.0/22" + Q_FLOATING_ALLOCATION_POOL=start=172.18.161.250,end=172.18.161.254 + PUBLIC_NETWORK_GATEWAY="172.18.161.1" + PUBLIC_INTERFACE=eth0 + + # Open vSwitch provider networking configuration + Q_USE_PROVIDERNET_FOR_PUBLIC=True + OVS_PHYSICAL_BRIDGE=br-ex + PUBLIC_BRIDGE=br-ex + OVS_BRIDGE_MAPPINGS=public:br-ex + + +Adding Additional Compute Nodes +------------------------------- + +Let's suppose that after installing DevStack on the first host, you +also want to do multinode testing and networking. + +Physical Network Setup +~~~~~~~~~~~~~~~~~~~~~~ + +.. image:: /assets/images/neutron-network-2.png + :alt: Network configuration for multiple DevStack nodes + +After DevStack installs and configures Neutron, traffic from guest VMs +flows out of `devstack-2` (the compute node) and is encapsulated in a +VXLAN tunnel back to `devstack-1` (the control node) where the L3 +agent is running. + +:: + + stack@devstack-2:~/devstack$ sudo ovs-vsctl show + 8992d965-0ba0-42fd-90e9-20ecc528bc29 + Bridge br-int + fail_mode: secure + Port br-int + Interface br-int + type: internal + Port patch-tun + Interface patch-tun + type: patch + options: {peer=patch-int} + Bridge br-tun + fail_mode: secure + Port "vxlan-c0a801f6" + Interface "vxlan-c0a801f6" + type: vxlan + options: {df_default="true", in_key=flow, local_ip="172.18.161.7", out_key=flow, remote_ip="172.18.161.6"} + Port patch-int + Interface patch-int + type: patch + options: {peer=patch-tun} + Port br-tun + Interface br-tun + type: internal + ovs_version: "2.0.2" + +Open vSwitch on the control node, where the L3 agent runs, is +configured to de-encapsulate traffic from compute nodes, then forward +it over the `br-ex` bridge, where `eth0` is attached. + +:: + + stack@devstack-1:~/devstack$ sudo ovs-vsctl show + 422adeea-48d1-4a1f-98b1-8e7239077964 + Bridge br-tun + fail_mode: secure + Port br-tun + Interface br-tun + type: internal + Port patch-int + Interface patch-int + type: patch + options: {peer=patch-tun} + Port "vxlan-c0a801d8" + Interface "vxlan-c0a801d8" + type: vxlan + options: {df_default="true", in_key=flow, local_ip="172.18.161.6", out_key=flow, remote_ip="172.18.161.7"} + Bridge br-ex + Port phy-br-ex + Interface phy-br-ex + type: patch + options: {peer=int-br-ex} + Port "eth0" + Interface "eth0" + Port br-ex + Interface br-ex + type: internal + Bridge br-int + fail_mode: secure + Port "tapce66332d-ea" + tag: 1 + Interface "tapce66332d-ea" + type: internal + Port "qg-65e5a4b9-15" + tag: 2 + Interface "qg-65e5a4b9-15" + type: internal + Port "qr-33e5e471-88" + tag: 1 + Interface "qr-33e5e471-88" + type: internal + Port "qr-acbe9951-70" + tag: 1 + Interface "qr-acbe9951-70" + type: internal + Port br-int + Interface br-int + type: internal + Port patch-tun + Interface patch-tun + type: patch + options: {peer=patch-int} + Port int-br-ex + Interface int-br-ex + type: patch + options: {peer=phy-br-ex} + ovs_version: "2.0.2" + +`br-int` is a bridge that the Open vSwitch mechanism driver creates, +which is used as the "integration bridge" where ports are created, and +plugged into the virtual switching fabric. `br-ex` is an OVS bridge +that is used to connect physical ports (like `eth0`), so that floating +IP traffic for project networks can be received from the physical +network infrastructure (and the internet), and routed to self service +project network ports. `br-tun` is a tunnel bridge that is used to +connect OpenStack nodes (like `devstack-2`) together. This bridge is +used so that project network traffic, using the VXLAN tunneling +protocol, flows between each compute node where project instances run. + +DevStack Compute Configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The host `devstack-2` has a very minimal `local.conf`. + +:: + + [[local|localrc]] + HOST_IP=172.18.161.7 + SERVICE_HOST=172.18.161.6 + MYSQL_HOST=172.18.161.6 + RABBIT_HOST=172.18.161.6 + GLANCE_HOSTPORT=172.18.161.6:9292 + ADMIN_PASSWORD=secret + MYSQL_PASSWORD=secret + RABBIT_PASSWORD=secret + SERVICE_PASSWORD=secret + + ## Neutron options + PUBLIC_INTERFACE=eth0 + ENABLED_SERVICES=n-cpu,rabbit,q-agt,placement-client + +Network traffic from `eth0` on the compute nodes is then NAT'd by the +controller node that runs Neutron's `neutron-l3-agent` and provides L3 +connectivity. + + +Neutron Networking with Open vSwitch and Provider Networks +========================================================== + +In some instances, it is desirable to use neutron's provider +networking extension, so that networks that are configured on an +external router can be utilized by neutron, and instances created via +Nova can attach to the network managed by the external router. + +For example, in some lab environments, a hardware router has been +pre-configured by another party, and an OpenStack developer has been +given a VLAN tag and IP address range, so that instances created via +DevStack will use the external router for L3 connectivity, as opposed +to the neutron L3 service. + +Physical Network Setup +---------------------- + +.. image:: /assets/images/neutron-network-3.png + :alt: Network configuration for provider networks + +On a compute node, the first interface, eth0 is used for the OpenStack +management (API, message bus, etc) as well as for ssh for an +administrator to access the machine. + +:: + + stack@compute:~$ ifconfig eth0 + eth0 Link encap:Ethernet HWaddr bc:16:65:20:af:fc + inet addr:10.0.0.3 + +eth1 is manually configured at boot to not have an IP address. +Consult your operating system documentation for the appropriate +technique. For Ubuntu, the contents of `/etc/network/interfaces` +contains: + +:: + + auto eth1 + iface eth1 inet manual + up ifconfig $IFACE 0.0.0.0 up + down ifconfig $IFACE 0.0.0.0 down + +The second physical interface, eth1 is added to a bridge (in this case +named br-ex), which is used to forward network traffic from guest VMs. + +:: + + stack@compute:~$ sudo ovs-vsctl add-br br-ex + stack@compute:~$ sudo ovs-vsctl add-port br-ex eth1 + stack@compute:~$ sudo ovs-vsctl show + 9a25c837-32ab-45f6-b9f2-1dd888abcf0f + Bridge br-ex + Port br-ex + Interface br-ex + type: internal + Port phy-br-ex + Interface phy-br-ex + type: patch + options: {peer=int-br-ex} + Port "eth1" + Interface "eth1" + + +Service Configuration +--------------------- + +**Control Node** + +In this example, the control node will run the majority of the +OpenStack API and management services (keystone, glance, +nova, neutron) + + +**Compute Nodes** + +In this example, the nodes that will host guest instances will run +the ``neutron-openvswitch-agent`` for network connectivity, as well as +the compute service ``nova-compute``. + +DevStack Configuration +---------------------- + +.. _ovs-provider-network-controller: + +The following is a snippet of the DevStack configuration on the +controller node. + +:: + + HOST_IP=10.0.0.2 + SERVICE_HOST=10.0.0.2 + MYSQL_HOST=10.0.0.2 + RABBIT_HOST=10.0.0.2 + GLANCE_HOSTPORT=10.0.0.2:9292 + PUBLIC_INTERFACE=eth1 + + ADMIN_PASSWORD=secret + MYSQL_PASSWORD=secret + RABBIT_PASSWORD=secret + SERVICE_PASSWORD=secret + + ## Neutron options + Q_USE_SECGROUP=True + ENABLE_TENANT_VLANS=True + TENANT_VLAN_RANGE=3001:4000 + PHYSICAL_NETWORK=default + OVS_PHYSICAL_BRIDGE=br-ex + + Q_USE_PROVIDER_NETWORKING=True + + disable_service q-l3 + + ## Neutron Networking options used to create Neutron Subnets + + IPV4_ADDRS_SAFE_TO_USE="203.0.113.0/24" + NETWORK_GATEWAY=203.0.113.1 + PROVIDER_SUBNET_NAME="provider_net" + PROVIDER_NETWORK_TYPE="vlan" + SEGMENTATION_ID=2010 + USE_SUBNETPOOL=False + +In this configuration we are defining IPV4_ADDRS_SAFE_TO_USE to be a +publicly routed IPv4 subnet. In this specific instance we are using +the special TEST-NET-3 subnet defined in `RFC 5737 `_, +which is used for documentation. In your DevStack setup, IPV4_ADDRS_SAFE_TO_USE +would be a public IP address range that you or your organization has +allocated to you, so that you could access your instances from the +public internet. + +The following is the DevStack configuration on +compute node 1. + +:: + + HOST_IP=10.0.0.3 + SERVICE_HOST=10.0.0.2 + MYSQL_HOST=10.0.0.2 + RABBIT_HOST=10.0.0.2 + GLANCE_HOSTPORT=10.0.0.2:9292 + ADMIN_PASSWORD=secret + MYSQL_PASSWORD=secret + RABBIT_PASSWORD=secret + SERVICE_PASSWORD=secret + + # Services that a compute node runs + ENABLED_SERVICES=n-cpu,rabbit,q-agt + + ## Open vSwitch provider networking options + PHYSICAL_NETWORK=default + OVS_PHYSICAL_BRIDGE=br-ex + PUBLIC_INTERFACE=eth1 + Q_USE_PROVIDER_NETWORKING=True + +Compute node 2's configuration will be exactly the same, except +``HOST_IP`` will be ``10.0.0.4`` + +When DevStack is configured to use provider networking (via +``Q_USE_PROVIDER_NETWORKING`` is True) - +DevStack will automatically add the network interface defined in +``PUBLIC_INTERFACE`` to the ``OVS_PHYSICAL_BRIDGE`` + +For example, with the above configuration, a bridge is +created, named ``br-ex`` which is managed by Open vSwitch, and the +second interface on the compute node, ``eth1`` is attached to the +bridge, to forward traffic sent by guest VMs. + +Miscellaneous Tips +================== + +Non-Standard MTU on the Physical Network +---------------------------------------- + +Neutron by default uses a MTU of 1500 bytes, which is +the standard MTU for Ethernet. + +A different MTU can be specified by adding the following to +the Neutron section of `local.conf`. For example, +if you have network equipment that supports jumbo frames, you could +set the MTU to 9000 bytes by adding the following + +:: + + [[post-config|/$Q_PLUGIN_CONF_FILE]] + global_physnet_mtu = 9000 + + +Disabling Next Generation Firewall Tools +---------------------------------------- + +DevStack does not properly operate with modern firewall tools. Specifically +it will appear as if the guest VM can access the external network via ICMP, +but UDP and TCP packets will not be delivered to the guest VM. The root cause +of the issue is that both ufw (Uncomplicated Firewall) and firewalld (Fedora's +firewall manager) apply firewall rules to all interfaces in the system, rather +then per-device. One solution to this problem is to revert to iptables +functionality. + +To get a functional firewall configuration for Fedora do the following: + +:: + + sudo service iptables save + sudo systemctl disable firewalld + sudo systemctl enable iptables + sudo systemctl stop firewalld + sudo systemctl start iptables + + +To get a functional firewall configuration for distributions containing ufw, +disable ufw. Note ufw is generally not enabled by default in Ubuntu. To +disable ufw if it was enabled, do the following: + +:: + + sudo service iptables save + sudo ufw disable + +Configuring Extension Drivers for the ML2 Plugin +------------------------------------------------ + +Extension drivers for the ML2 plugin are set with the variable +``Q_ML2_PLUGIN_EXT_DRIVERS``, and includes the 'port_security' extension +by default. If you want to remove all the extension drivers (even +'port_security'), set ``Q_ML2_PLUGIN_EXT_DRIVERS`` to blank. + + +Using MacVTap instead of Open vSwitch +------------------------------------------ + +Security groups are not supported by the MacVTap agent. Due to that, devstack +configures the NoopFirewall driver on the compute node. + +MacVTap agent does not support l3, dhcp and metadata agent. Due to that you can +chose between the following deployment scenarios: + +Single node with provider networks using config drive and external l3, dhcp +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +This scenario applies, if l3 and dhcp services are provided externally, or if +you do not require them. + + +:: + + [[local|localrc]] + HOST_IP=10.0.0.2 + SERVICE_HOST=10.0.0.2 + MYSQL_HOST=10.0.0.2 + RABBIT_HOST=10.0.0.2 + ADMIN_PASSWORD=secret + MYSQL_PASSWORD=secret + RABBIT_PASSWORD=secret + SERVICE_PASSWORD=secret + + Q_ML2_PLUGIN_MECHANISM_DRIVERS=macvtap + Q_USE_PROVIDER_NETWORKING=True + + enable_plugin neutron https://opendev.org/openstack/neutron + + ## MacVTap agent options + Q_AGENT=macvtap + PHYSICAL_NETWORK=default + + IPV4_ADDRS_SAFE_TO_USE="203.0.113.0/24" + NETWORK_GATEWAY=203.0.113.1 + PROVIDER_SUBNET_NAME="provider_net" + PROVIDER_NETWORK_TYPE="vlan" + SEGMENTATION_ID=2010 + USE_SUBNETPOOL=False + + [[post-config|/$Q_PLUGIN_CONF_FILE]] + [macvtap] + physical_interface_mappings = $PHYSICAL_NETWORK:eth1 + + [[post-config|$NOVA_CONF]] + force_config_drive = True + + +Multi node with MacVTap compute node +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +This scenario applies, if you require OpenStack provided l3, dhcp or metadata +services. Those are hosted on a separate controller and network node, running +some other l2 agent technology (in this example Open vSwitch). This node needs +to be configured for VLAN tenant networks. + +For OVS, a similar configuration like described in the +:ref:`OVS Provider Network ` section can be +used. Just add the following line to this local.conf, which also loads +the MacVTap mechanism driver: + +:: + + [[local|localrc]] + ... + Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,macvtap + ... + +For the MacVTap compute node, use this local.conf: + +:: + + HOST_IP=10.0.0.3 + SERVICE_HOST=10.0.0.2 + MYSQL_HOST=10.0.0.2 + RABBIT_HOST=10.0.0.2 + ADMIN_PASSWORD=secret + MYSQL_PASSWORD=secret + RABBIT_PASSWORD=secret + SERVICE_PASSWORD=secret + + # Services that a compute node runs + disable_all_services + enable_plugin neutron https://opendev.org/openstack/neutron + ENABLED_SERVICES+=n-cpu,q-agt + + ## MacVTap agent options + Q_AGENT=macvtap + PHYSICAL_NETWORK=default + + [[post-config|/$Q_PLUGIN_CONF_FILE]] + [macvtap] + physical_interface_mappings = $PHYSICAL_NETWORK:eth1 diff --git a/doc/source/guides/nova.rst b/doc/source/guides/nova.rst new file mode 100644 index 0000000000..6b8aabf8db --- /dev/null +++ b/doc/source/guides/nova.rst @@ -0,0 +1,136 @@ +================= +Nova and DevStack +================= + +This is a rough guide to various configuration parameters for nova +running with DevStack. + + +nova-serialproxy +================ + +In Juno, nova implemented a `spec +`_ +to allow read/write access to the serial console of an instance via +`nova-serialproxy +`_. + +The service can be enabled by adding ``n-sproxy`` to +``ENABLED_SERVICES``. Further options can be enabled via +``local.conf``, e.g. + +:: + + [[post-config|$NOVA_CONF]] + [serial_console] + # + # Options defined in nova.cmd.serialproxy + # + + # Host on which to listen for incoming requests (string value) + #serialproxy_host=0.0.0.0 + + # Port on which to listen for incoming requests (integer + # value) + #serialproxy_port=6083 + + + # + # Options defined in nova.console.serial + # + + # Enable serial console related features (boolean value) + #enabled=false + # Do not set this manually. Instead enable the service as + # outlined above. + + # Range of TCP ports to use for serial ports on compute hosts + # (string value) + #port_range=10000:20000 + + # Location of serial console proxy. (string value) + #base_url=ws://127.0.0.1:6083/ + + # IP address on which instance serial console should listen + # (string value) + #listen=127.0.0.1 + + # The address to which proxy clients (like nova-serialproxy) + # should connect (string value) + #proxyclient_address=127.0.0.1 + + +Enabling the service is enough to be functional for a single machine DevStack. + +These config options are defined in `nova.conf.serial_console +`_. + +For more information on OpenStack configuration see the `OpenStack +Compute Service Configuration Reference +`_ + + +Fake virt driver +================ + +Nova has a `fake virt driver`_ which can be used for scale testing the control +plane services or testing "move" operations between fake compute nodes, for +example cold/live migration, evacuate and unshelve. + +The fake virt driver does not communicate with any hypervisor, it just reports +some fake resource inventory values and keeps track of the state of the +"guests" created, moved and deleted. It is not feature-complete with the +compute API but is good enough for most API testing, and is also used within +the nova functional tests themselves so is fairly robust. + +.. _fake virt driver: https://opendev.org/openstack/nova/src/branch/master/nova/virt/fake.py + +Configuration +------------- + +Set the following in your devstack ``local.conf``: + +.. code-block:: ini + + [[local|localrc]] + VIRT_DRIVER=fake + NUMBER_FAKE_NOVA_COMPUTE= + +The ``NUMBER_FAKE_NOVA_COMPUTE`` variable controls the number of fake +``nova-compute`` services to run and defaults to 1. + +When ``VIRT_DRIVER=fake`` is used, devstack will disable quota checking in +nova and neutron automatically. However, other services, like cinder, will +still enforce quota limits by default. + +Scaling +------- + +The actual value to use for ``NUMBER_FAKE_NOVA_COMPUTE`` depends on factors +such as: + +* The size of the host (physical or virtualized) on which devstack is running. +* The number of API workers. By default, devstack will run ``max($nproc/2, 2)`` + workers per API service. If you are running several fake compute services on + a single host, then consider setting ``API_WORKERS=1`` in ``local.conf``. + +In addition, while quota will be disabled in neutron, there is no fake ML2 +backend for neutron so creating fake VMs will still result in real ports being +created. To create servers without networking, you can specify ``--nic=none`` +when creating the server, for example: + +.. code-block:: shell + + $ openstack --os-compute-api-version 2.37 server create --flavor cirros256 \ + --image cirros-0.6.3-x86_64-disk --nic none --wait test-server + +.. note:: ``--os-compute-api-version`` greater than or equal to 2.37 is + required to use ``--nic=none``. + +To avoid overhead from other services which you may not need, disable them in +your ``local.conf``, for example: + +.. code-block:: ini + + disable_service horizon + disable_service tempest diff --git a/doc/source/guides/single-machine.rst b/doc/source/guides/single-machine.rst new file mode 100644 index 0000000000..263fbb9d6f --- /dev/null +++ b/doc/source/guides/single-machine.rst @@ -0,0 +1,144 @@ +========================= +All-In-One Single Machine +========================= + +Things are about to get real! Using OpenStack in containers or VMs is +nice for kicking the tires, but doesn't compare to the feeling you get +with hardware. + +Prerequisites Linux & Network +============================= + +Minimal Install +--------------- + +You need to have a system with a fresh install of Linux. You can +download the `Minimal +CD `__ for +Ubuntu releases since DevStack will download & install all the +additional dependencies. The netinstall ISO is available for +`Fedora `__ +and +`CentOS/RHEL `__. +You may be tempted to use a desktop distro on a laptop, it will probably +work but you may need to tell Network Manager to keep its fingers off +the interface(s) that OpenStack uses for bridging. + +Network Configuration +--------------------- + +Determine the network configuration on the interface used to integrate +your OpenStack cloud with your existing network. For example, if the IPs +given out on your network by DHCP are 192.168.1.X - where X is between +100 and 200 you will be able to use IPs 201-254 for **floating ips**. + +To make things easier later change your host to use a static IP instead +of DHCP (i.e. 192.168.1.201). + +Installation shake and bake +=========================== + +Add your user +------------- + +We need to add a user to install DevStack. (if you created a user during +install you can skip this step and just give the user sudo privileges +below) + +.. code-block:: console + + $ sudo useradd -s /bin/bash -d /opt/stack -m stack + +Ensure home directory for the ``stack`` user has executable permission for all, +as RHEL based distros create it with ``700`` and Ubuntu 21.04+ with ``750`` +which can cause issues during deployment. + +.. code-block:: console + + $ sudo chmod +x /opt/stack + +Since this user will be making many changes to your system, it will need +to have sudo privileges: + +.. code-block:: console + + $ apt-get install sudo -y || dnf install -y sudo + $ echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack + +.. note:: On some systems you may need to use ``sudo visudo``. + +From here on you should use the user you created. **Logout** and +**login** as that user: + +.. code-block:: console + + $ sudo su stack && cd ~ + +Download DevStack +----------------- + +We'll grab the latest version of DevStack via https: + +.. code-block:: console + + $ sudo apt-get install git -y || sudo dnf install -y git + $ git clone https://opendev.org/openstack/devstack + $ cd devstack + +Run DevStack +------------ + +Now to configure ``stack.sh``. DevStack includes a sample in +``devstack/samples/local.conf``. Create ``local.conf`` as shown below to +do the following: + +- Set ``FLOATING_RANGE`` to a range not used on the local network, i.e. + 192.168.1.224/27. This configures IP addresses ending in 225-254 to + be used as floating IPs. +- Set ``FIXED_RANGE`` to configure the internal address space used by the + instances. +- Set the administrative password. This password is used for the + **admin** and **demo** accounts set up as OpenStack users. +- Set the MySQL administrative password. The default here is a random + hex string which is inconvenient if you need to look at the database + directly for anything. +- Set the RabbitMQ password. +- Set the service password. This is used by the OpenStack services + (Nova, Glance, etc) to authenticate with Keystone. + +.. warning:: Only use alphanumeric characters in your passwords, as some + services fail to work when using special characters. + +``local.conf`` should look something like this: + +.. code-block:: ini + + [[local|localrc]] + FLOATING_RANGE=192.168.1.224/27 + FIXED_RANGE=10.11.12.0/24 + ADMIN_PASSWORD=supersecret + DATABASE_PASSWORD=iheartdatabases + RABBIT_PASSWORD=flopsymopsy + SERVICE_PASSWORD=iheartksl + +.. note:: There is a sample :download:`local.conf ` file + under the *samples* directory in the devstack repository. + +Run DevStack: + +.. code-block:: console + + $ ./stack.sh + +A seemingly endless stream of activity ensues. When complete you will +see a summary of ``stack.sh``'s work, including the relevant URLs, +accounts and passwords to poke at your shiny new OpenStack. + +Using OpenStack +--------------- + +At this point you should be able to access the dashboard from other +computers on the local network. In this example that would be +http://192.168.1.201/ for the dashboard (aka Horizon). Launch VMs and if +you give them floating IPs and security group access those VMs will be +accessible from other machines on your network. diff --git a/doc/source/guides/single-vm.rst b/doc/source/guides/single-vm.rst new file mode 100644 index 0000000000..4272a4b180 --- /dev/null +++ b/doc/source/guides/single-vm.rst @@ -0,0 +1,103 @@ +==================== +All-In-One Single VM +==================== + +Use the cloud to build the cloud! Use your cloud to launch new versions +of OpenStack in about 5 minutes. If you break it, start over! The VMs +launched in the cloud will be slow as they are running in QEMU +(emulation), but their primary use is testing OpenStack development and +operation. + +Prerequisites Cloud & Image +=========================== + +Virtual Machine +--------------- + +DevStack should run in any virtual machine running a supported Linux +release. It will perform best with 4GB or more of RAM. + +OpenStack Deployment & cloud-init +--------------------------------- + +If the cloud service has an image with ``cloud-init`` pre-installed, use +it. You can get one from `Ubuntu's Daily +Build `__ site if necessary. This will +enable you to launch VMs with userdata that installs everything at boot +time. The userdata script below will install and run DevStack with a +minimal configuration. The use of ``cloud-init`` is outside the scope of +this document, refer to the ``cloud-init`` docs for more information. + +If you are directly using a hypervisor like Xen, kvm or VirtualBox you +can manually kick off the script below as a non-root user in a +bare-bones server installation. + +Installation shake and bake +=========================== + +Launching With Cloud-Init +------------------------- + +This cloud config grabs the latest version of DevStack via git, creates +a minimal ``local.conf`` file and kicks off ``stack.sh``. It should be +passed as the user-data file when booting the VM. + +:: + + #cloud-config + + users: + - default + - name: stack + lock_passwd: False + sudo: ["ALL=(ALL) NOPASSWD:ALL\nDefaults:stack !requiretty"] + shell: /bin/bash + + write_files: + - content: | + #!/bin/sh + DEBIAN_FRONTEND=noninteractive sudo apt-get -qqy update || sudo dnf update -qy + DEBIAN_FRONTEND=noninteractive sudo apt-get install -qqy git || sudo dnf install -qy git + sudo chown stack:stack /home/stack + cd /home/stack + git clone https://opendev.org/openstack/devstack + cd devstack + echo '[[local|localrc]]' > local.conf + echo ADMIN_PASSWORD=password >> local.conf + echo DATABASE_PASSWORD=password >> local.conf + echo RABBIT_PASSWORD=password >> local.conf + echo SERVICE_PASSWORD=password >> local.conf + ./stack.sh + path: /home/stack/start.sh + permissions: 0755 + + runcmd: + - su -l stack ./start.sh + +As DevStack will refuse to run as root, this configures ``cloud-init`` +to create a non-root user and run the ``start.sh`` script as that user. + +If you are using cloud-init and you have not +:ref:`enabled custom logging ` of the stack +output, then the stack output can be found in +``/var/log/cloud-init-output.log`` by default. + +Launching By Hand +----------------- + +Using a hypervisor directly, launch the VM and either manually perform +the steps in the embedded shell script above or copy it into the VM. + +Using OpenStack +--------------- + +At this point you should be able to access the dashboard. Launch VMs and +if you give them floating IPs, access those VMs from other machines on +your network. + +One interesting use case is for developers working on a VM on their +laptop. Once ``stack.sh`` has completed once, all of the pre-requisite +packages are installed in the VM and the source trees checked out. +Setting ``OFFLINE=True`` in ``local.conf`` enables ``stack.sh`` to run +multiple times without an Internet connection. DevStack, making hacking +at the lake possible since 2012! diff --git a/doc/source/hacking.rst b/doc/source/hacking.rst new file mode 100644 index 0000000000..a2bcf4fd67 --- /dev/null +++ b/doc/source/hacking.rst @@ -0,0 +1 @@ +.. include:: ../../HACKING.rst diff --git a/doc/source/index.rst b/doc/source/index.rst new file mode 100644 index 0000000000..a07bb84922 --- /dev/null +++ b/doc/source/index.rst @@ -0,0 +1,180 @@ +.. Documentation Architecture for the devstack docs. + + It is really easy for online docs to meander over time as people + attempt to add the small bit of additional information they think + people need, into an existing information architecture. In order to + prevent that we need to be a bit strict as to what's on this front + page. + + This should *only* be the quick start narrative. Which should end + with 2 sections: what you can do with devstack once it's set up, + and how to go beyond this setup. Both should be a set of quick + links to other documents to let people explore from there. + +DevStack +======== + +.. image:: assets/images/logo-blue.png + +DevStack is a series of extensible scripts used to quickly bring up a +complete OpenStack environment based on the latest versions of +everything from git master. It is used interactively as a development +environment and as the basis for much of the OpenStack project's +functional testing. + +The source is available at ``__. + +.. warning:: + + DevStack will make substantial changes to your system during + installation. Only run DevStack on servers or virtual machines that + are dedicated to this purpose. + +Quick Start ++++++++++++ + +Install Linux +------------- + +Start with a clean and minimal install of a Linux system. DevStack +attempts to support the two latest LTS releases of Ubuntu, +Rocky Linux 9 and openEuler. + +If you do not have a preference, Ubuntu 24.04 (Noble) is the +most tested, and will probably go the smoothest. + +Add Stack User (optional) +------------------------- + +DevStack should be run as a non-root user with sudo enabled +(standard logins to cloud images such as "ubuntu" or "cloud-user" +are usually fine). + +If you are not using a cloud image, you can create a separate `stack` user +to run DevStack with + +.. code-block:: console + + $ sudo useradd -s /bin/bash -d /opt/stack -m stack + +Ensure home directory for the ``stack`` user has executable permission for all, +as RHEL based distros create it with ``700`` and Ubuntu 21.04+ with ``750`` +which can cause issues during deployment. + +.. code-block:: console + + $ sudo chmod +x /opt/stack + +Since this user will be making many changes to your system, it should +have sudo privileges: + +.. code-block:: console + + $ echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack + $ sudo -u stack -i + +Download DevStack +----------------- + +.. code-block:: console + + $ git clone https://opendev.org/openstack/devstack + $ cd devstack + +The ``devstack`` repo contains a script that installs OpenStack and +templates for configuration files. + +Create a local.conf +------------------- + +Create a ``local.conf`` file with four passwords preset at the root of the +devstack git repo. + +.. code-block:: ini + + [[local|localrc]] + ADMIN_PASSWORD=secret + DATABASE_PASSWORD=$ADMIN_PASSWORD + RABBIT_PASSWORD=$ADMIN_PASSWORD + SERVICE_PASSWORD=$ADMIN_PASSWORD + +This is the minimum required config to get started with DevStack. + +.. note:: There is a sample :download:`local.conf ` file + under the *samples* directory in the devstack repository. + +.. warning:: Only use alphanumeric characters in your passwords, as some + services fail to work when using special characters. + +Start the install +----------------- + +.. code-block:: console + + $ ./stack.sh + +This will take 15 - 30 minutes, largely depending on the speed of +your internet connection. Many git trees and packages will be +installed during this process. + +Profit! +------- + +You now have a working DevStack! Congrats! + +Your devstack will have installed ``keystone``, ``glance``, ``nova``, +``placement``, ``cinder``, ``neutron``, and ``horizon``. Floating IPs +will be available, guests have access to the external world. + +You can access horizon to experience the web interface to +OpenStack, and manage vms, networks, volumes, and images from +there. + +You can ``source openrc`` in your shell, and then use the +``openstack`` command line tool to manage your devstack. + +You can :ref:`create a VM and SSH into it `. + +You can ``cd /opt/stack/tempest`` and run tempest tests that have +been configured to work with your devstack. + +You can :doc:`make code changes to OpenStack and validate them +`. + +Going further +------------- + +Learn more about our :doc:`configuration system ` to +customize devstack for your needs. Including making adjustments to the +default :doc:`networking `. + +Read :doc:`guides ` for specific setups people have (note: +guides are point in time contributions, and may not always be kept +up to date to the latest devstack). + +Enable :doc:`devstack plugins ` to support additional +services, features, and configuration not present in base devstack. + +Use devstack in your CI with :doc:`Ansible roles ` and +:doc:`Jobs ` for Zuul V3. Migrate your devstack Zuul V2 jobs to Zuul +V3 with this full migration :doc:`how-to `. + +Get :doc:`the big picture ` of what we are trying to do +with devstack, and help us by :doc:`contributing to the project +`. + +If you are a new contributor to devstack please refer: :doc:`contributor/contributing` + +.. toctree:: + :hidden: + + contributor/contributing + +Contents +++++++++ + +.. toctree:: + :glob: + :maxdepth: 2 + + * diff --git a/doc/source/networking.rst b/doc/source/networking.rst new file mode 100644 index 0000000000..10e1c3ff2c --- /dev/null +++ b/doc/source/networking.rst @@ -0,0 +1,238 @@ +===================== + DevStack Networking +===================== + +An important part of the DevStack experience is networking that works +by default for created guests. This might not be optimal for your +particular testing environment, so this document tries its best to +explain what's going on. + +Defaults +======== + +If you don't specify any configuration you will get the following: + +* neutron (including l3 with openvswitch) +* private project networks for each openstack project +* a floating ip range of 172.24.4.0/24 with the gateway of 172.24.4.1 +* the demo project configured with fixed ips on a subnet allocated from + the 10.0.0.0/22 range +* a ``br-ex`` interface controlled by neutron for all its networking + (this is not connected to any physical interfaces). +* DNS resolution for guests based on the resolv.conf for your host +* an ip masq rule that allows created guests to route out + +This creates an environment which is isolated to the single +host. Guests can get to the external network for package +updates. Tempest tests will work in this environment. + +.. note:: + + By default all OpenStack environments have security group rules + which block all inbound packets to guests. If you want to be able + to ssh / ping your created guests you should run the following. + + .. code-block:: bash + + openstack security group rule create --proto icmp --dst-port 0 default + openstack security group rule create --proto tcp --dst-port 22 default + +Locally Accessible Guests +========================= + +If you want to make your guests accessible from other machines on your +network, we have to connect ``br-ex`` to a physical interface. + +Dedicated Guest Interface +------------------------- + +If you have 2 or more interfaces on your devstack server, you can +allocate an interface to neutron to fully manage. This **should not** +be the same interface you use to ssh into the devstack server itself. + +This is done by setting with the ``PUBLIC_INTERFACE`` attribute. + +.. code-block:: bash + + [[local|localrc]] + PUBLIC_INTERFACE=eth1 + +That will put all layer 2 traffic from your guests onto the main +network. When running in this mode the ip masq rule is **not** added +in your devstack, you are responsible for making routing work on your +local network. + +Shared Guest Interface +---------------------- + +.. warning:: + + This is not a recommended configuration. Because of interactions + between OVS and bridging, if you reboot your box with active + networking you may lose network connectivity to your system. + +If you need your guests accessible on the network, but only have 1 +interface (using something like a NUC), you can share your one +network. But in order for this to work you need to manually set a lot +of addresses, and have them all exactly correct. + +.. code-block:: bash + + [[local|localrc]] + PUBLIC_INTERFACE=eth0 + HOST_IP=10.42.0.52 + FLOATING_RANGE=10.42.0.0/24 + PUBLIC_NETWORK_GATEWAY=10.42.0.1 + Q_FLOATING_ALLOCATION_POOL=start=10.42.0.250,end=10.42.0.254 + +In order for this scenario to work the floating ip network must match +the default networking on your server. This breaks HOST_IP detection, +as we exclude the floating range by default, so you have to specify +that manually. + +The ``PUBLIC_NETWORK_GATEWAY`` is the gateway that server would normally +use to get off the network. ``Q_FLOATING_ALLOCATION_POOL`` controls +the range of floating ips that will be handed out. As we are sharing +your existing network, you'll want to give it a slice that your local +dhcp server is not allocating. Otherwise you could easily have +conflicting ip addresses, and cause havoc with your local network. + + +Private Network Addressing +========================== + +The private networks addresses are controlled by the ``IPV4_ADDRS_SAFE_TO_USE`` +and the ``IPV6_ADDRS_SAFE_TO_USE`` variables. This allows users to specify one +single variable of safe internal IPs to use that will be referenced whether or +not subnetpools are in use. + +For IPv4, ``FIXED_RANGE`` and ``SUBNETPOOL_PREFIX_V4`` will just default to +the value of ``IPV4_ADDRS_SAFE_TO_USE`` directly. + +For IPv6, ``FIXED_RANGE_V6`` will default to the first /64 of the value of +``IPV6_ADDRS_SAFE_TO_USE``. If ``IPV6_ADDRS_SAFE_TO_USE`` is /64 or smaller, +``FIXED_RANGE_V6`` will just use the value of that directly. +``SUBNETPOOL_PREFIX_V6`` will just default to the value of +``IPV6_ADDRS_SAFE_TO_USE`` directly. + +.. _ssh: + +SSH access to instances +======================= + +To validate connectivity, you can create an instance using the +``$PRIVATE_NETWORK_NAME`` network (default: ``private``), create a floating IP +using the ``$PUBLIC_NETWORK_NAME`` network (default: ``public``), and attach +this floating IP to the instance: + +.. code-block:: shell + + openstack keypair create --public-key ~/.ssh/id_rsa.pub test-keypair + openstack server create --network private --key-name test-keypair ... test-server + fip_id=$(openstack floating ip create public -f value -c id) + openstack server add floating ip test-server ${fip_id} + +Once done, ensure you have enabled SSH and ICMP (ping) access for the security +group used for the instance. You can either create a custom security group and +specify it when creating the instance or add it after creation, or you can +modify the ``default`` security group created by default for each project. +Let's do the latter: + +.. code-block:: shell + + openstack security group rule create --proto icmp --dst-port 0 default + openstack security group rule create --proto tcp --dst-port 22 default + +Finally, SSH into the instance. If you used the Cirros instance uploaded by +default, then you can run the following: + +.. code-block:: shell + + openstack server ssh test-server -- -l cirros + +This will connect using the ``cirros`` user and the keypair you configured when +creating the instance. + +Remote SSH access to instances +============================== + +You can also SSH to created instances on your DevStack host from other hosts. +This can be helpful if you are e.g. deploying DevStack in a VM on an existing +cloud and wish to do development on your local machine. There are a few ways to +do this. + +.. rubric:: Configure instances to be locally accessible + +The most obvious way is to configure guests to be locally accessible, as +described `above `__. This has the advantage of +requiring no further effort on the client. However, it is more involved and +requires either support from your cloud or some inadvisable workarounds. + +.. rubric:: Use your DevStack host as a jump host + +You can choose to use your DevStack host as a jump host. To SSH to a instance +this way, pass the standard ``-J`` option to the ``openstack ssh`` / ``ssh`` +command. For example: + +.. code-block:: + + openstack server ssh test-server -- -l cirros -J username@devstack-host + +(where ``test-server`` is name of an existing instance, as described +:ref:`previously `, and ``username`` and ``devstack-host`` are the +username and hostname of your DevStack host). + +This can also be configured via your ``~/.ssh/config`` file, making it rather +effortless. However, it only allows SSH access. If you want to access e.g. a +web application on the instance, you will need to configure an SSH tunnel and +forward select ports using the ``-L`` option. For example, to forward HTTP +traffic: + +.. code-block:: + + openstack server ssh test-server -- -l cirros -L 8080:username@devstack-host:80 + +(where ``test-server`` is name of an existing instance, as described +:ref:`previously `, and ``username`` and ``devstack-host`` are the +username and hostname of your DevStack host). + +As you can imagine, this can quickly get out of hand, particularly for more +complex guest applications with multiple ports. + +.. rubric:: Use a proxy or VPN tool + +You can use a proxy or VPN tool to enable tunneling for the floating IP +address range of the ``$PUBLIC_NETWORK_NAME`` network (default: ``public``) +defined by ``$FLOATING_RANGE`` (default: ``172.24.4.0/24``). There are many +such tools available to do this. For example, we could use a useful utility +called `shuttle`__. To enable tunneling using ``shuttle``, first ensure you +have allowed SSH and HTTP(S) traffic to your DevStack host. Allowing HTTP(S) +traffic is necessary so you can use the OpenStack APIs remotely. How you do +this will depend on where your DevStack host is running. Once this is done, +install ``sshuttle`` on your localhost: + +.. code-block:: bash + + sudo apt-get install sshuttle || dnf install sshuttle + +Finally, start ``sshuttle`` on your localhost using the floating IP address +range. For example, assuming you are using the default value for +``$FLOATING_RANGE``, you can do: + +.. code-block:: bash + + sshuttle -r username@devstack-host 172.24.4.0/24 + +(where ``username`` and ``devstack-host`` are the username and hostname of your +DevStack host). + +You should now be able to create an instance and SSH into it: + +.. code-block:: bash + + openstack server ssh test-server -- -l cirros + +(where ``test-server`` is name of an existing instance, as described +:ref:`previously `) + +.. __: https://github.com/sshuttle/sshuttle diff --git a/doc/source/overview.rst b/doc/source/overview.rst new file mode 100644 index 0000000000..c978e8d2cf --- /dev/null +++ b/doc/source/overview.rst @@ -0,0 +1,71 @@ +======== +Overview +======== + +DevStack has evolved to support a large number of configuration options +and alternative platforms and support services. That evolution has grown +well beyond what was originally intended and the majority of +configuration combinations are rarely, if ever, tested. DevStack is not +a general OpenStack installer and was never meant to be everything to +everyone. + +Below is a list of what is specifically is supported (read that as +"tested") going forward. + +Supported Components +==================== + +Base OS +------- + +*The OpenStack Technical Committee (TC) has defined the current CI +strategy to include the latest Ubuntu release and the latest RHEL +release.* + +- Ubuntu: current LTS release plus current development release +- RHEL/CentOS/RockyLinux: current major release +- Other OS platforms may continue to be included but the maintenance of + those platforms shall not be assumed simply due to their presence. + Having a listed point-of-contact for each additional OS will greatly + increase its chance of being well-maintained. +- Patches for Ubuntu and/or RockyLinux will not be held up due to + side-effects on other OS platforms. + +Databases +--------- + +*As packaged by the host OS* + +- MySQL + +Queues +------ + +*As packaged by the host OS* + +- Rabbit + +Web Server +---------- + +*As packaged by the host OS* + +- Apache + +Services +-------- + +The default services configured by DevStack are Identity (keystone), +Object Storage (swift), Image Service (glance), Block Storage +(cinder), Compute (nova), Placement (placement), +Networking (neutron), Dashboard (horizon). + +Additional services not included directly in DevStack can be tied in to +``stack.sh`` using the :doc:`plugin mechanism ` to call +scripts that perform the configuration and startup of the service. + +Node Configurations +------------------- + +- single node +- multi-node configurations as are tested by the gate diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst new file mode 100644 index 0000000000..9185263443 --- /dev/null +++ b/doc/source/plugin-registry.rst @@ -0,0 +1,182 @@ +.. Note to patch submitters: + + # ============================= # + # THIS FILE IS AUTOGENERATED ! # + # ============================= # + + ** Plugins are found automatically and added to this list ** + + This file is created by a periodic proposal job. You should not + edit this file. + + You should edit the files data/devstack-plugins-registry.footer + data/devstack-plugins-registry.header to modify this text. + +========================== + DevStack Plugin Registry +========================== + +The following list is an automatically-generated collection of +available DevStack plugins. This includes, but is not limited to, +official OpenStack projects. + + +======================================== === +Plugin Name URL +======================================== === +openstack/aetos `https://opendev.org/openstack/aetos `__ +openstack/aodh `https://opendev.org/openstack/aodh `__ +openstack/barbican `https://opendev.org/openstack/barbican `__ +openstack/blazar `https://opendev.org/openstack/blazar `__ +openstack/ceilometer `https://opendev.org/openstack/ceilometer `__ +openstack/cloudkitty `https://opendev.org/openstack/cloudkitty `__ +openstack/cyborg `https://opendev.org/openstack/cyborg `__ +openstack/designate `https://opendev.org/openstack/designate `__ +openstack/designate-tempest-plugin `https://opendev.org/openstack/designate-tempest-plugin `__ +openstack/devstack-plugin-amqp1 `https://opendev.org/openstack/devstack-plugin-amqp1 `__ +openstack/devstack-plugin-ceph `https://opendev.org/openstack/devstack-plugin-ceph `__ +openstack/devstack-plugin-container `https://opendev.org/openstack/devstack-plugin-container `__ +openstack/devstack-plugin-kafka `https://opendev.org/openstack/devstack-plugin-kafka `__ +openstack/devstack-plugin-nfs `https://opendev.org/openstack/devstack-plugin-nfs `__ +openstack/devstack-plugin-open-cas `https://opendev.org/openstack/devstack-plugin-open-cas `__ +openstack/devstack-plugin-prometheus `https://opendev.org/openstack/devstack-plugin-prometheus `__ +openstack/freezer `https://opendev.org/openstack/freezer `__ +openstack/freezer-api `https://opendev.org/openstack/freezer-api `__ +openstack/freezer-tempest-plugin `https://opendev.org/openstack/freezer-tempest-plugin `__ +openstack/freezer-web-ui `https://opendev.org/openstack/freezer-web-ui `__ +openstack/grian-ui `https://opendev.org/openstack/grian-ui `__ +openstack/heat `https://opendev.org/openstack/heat `__ +openstack/heat-dashboard `https://opendev.org/openstack/heat-dashboard `__ +openstack/ironic `https://opendev.org/openstack/ironic `__ +openstack/ironic-inspector `https://opendev.org/openstack/ironic-inspector `__ +openstack/ironic-prometheus-exporter `https://opendev.org/openstack/ironic-prometheus-exporter `__ +openstack/ironic-ui `https://opendev.org/openstack/ironic-ui `__ +openstack/keystone `https://opendev.org/openstack/keystone `__ +openstack/kuryr-libnetwork `https://opendev.org/openstack/kuryr-libnetwork `__ +openstack/magnum `https://opendev.org/openstack/magnum `__ +openstack/magnum-ui `https://opendev.org/openstack/magnum-ui `__ +openstack/manila `https://opendev.org/openstack/manila `__ +openstack/manila-tempest-plugin `https://opendev.org/openstack/manila-tempest-plugin `__ +openstack/manila-ui `https://opendev.org/openstack/manila-ui `__ +openstack/masakari `https://opendev.org/openstack/masakari `__ +openstack/mistral `https://opendev.org/openstack/mistral `__ +openstack/monasca-api `https://opendev.org/openstack/monasca-api `__ +openstack/monasca-events-api `https://opendev.org/openstack/monasca-events-api `__ +openstack/monasca-tempest-plugin `https://opendev.org/openstack/monasca-tempest-plugin `__ +openstack/networking-bagpipe `https://opendev.org/openstack/networking-bagpipe `__ +openstack/networking-baremetal `https://opendev.org/openstack/networking-baremetal `__ +openstack/networking-bgpvpn `https://opendev.org/openstack/networking-bgpvpn `__ +openstack/networking-generic-switch `https://opendev.org/openstack/networking-generic-switch `__ +openstack/networking-sfc `https://opendev.org/openstack/networking-sfc `__ +openstack/neutron `https://opendev.org/openstack/neutron `__ +openstack/neutron-dynamic-routing `https://opendev.org/openstack/neutron-dynamic-routing `__ +openstack/neutron-fwaas `https://opendev.org/openstack/neutron-fwaas `__ +openstack/neutron-fwaas-dashboard `https://opendev.org/openstack/neutron-fwaas-dashboard `__ +openstack/neutron-tempest-plugin `https://opendev.org/openstack/neutron-tempest-plugin `__ +openstack/neutron-vpnaas `https://opendev.org/openstack/neutron-vpnaas `__ +openstack/neutron-vpnaas-dashboard `https://opendev.org/openstack/neutron-vpnaas-dashboard `__ +openstack/nova `https://opendev.org/openstack/nova `__ +openstack/octavia `https://opendev.org/openstack/octavia `__ +openstack/octavia-dashboard `https://opendev.org/openstack/octavia-dashboard `__ +openstack/octavia-tempest-plugin `https://opendev.org/openstack/octavia-tempest-plugin `__ +openstack/openstacksdk `https://opendev.org/openstack/openstacksdk `__ +openstack/osprofiler `https://opendev.org/openstack/osprofiler `__ +openstack/ovn-bgp-agent `https://opendev.org/openstack/ovn-bgp-agent `__ +openstack/ovn-octavia-provider `https://opendev.org/openstack/ovn-octavia-provider `__ +openstack/rally-openstack `https://opendev.org/openstack/rally-openstack `__ +openstack/shade `https://opendev.org/openstack/shade `__ +openstack/skyline-apiserver `https://opendev.org/openstack/skyline-apiserver `__ +openstack/storlets `https://opendev.org/openstack/storlets `__ +openstack/tacker `https://opendev.org/openstack/tacker `__ +openstack/tap-as-a-service `https://opendev.org/openstack/tap-as-a-service `__ +openstack/telemetry-tempest-plugin `https://opendev.org/openstack/telemetry-tempest-plugin `__ +openstack/trove `https://opendev.org/openstack/trove `__ +openstack/trove-dashboard `https://opendev.org/openstack/trove-dashboard `__ +openstack/venus `https://opendev.org/openstack/venus `__ +openstack/venus-dashboard `https://opendev.org/openstack/venus-dashboard `__ +openstack/vitrage `https://opendev.org/openstack/vitrage `__ +openstack/vitrage-dashboard `https://opendev.org/openstack/vitrage-dashboard `__ +openstack/vitrage-tempest-plugin `https://opendev.org/openstack/vitrage-tempest-plugin `__ +openstack/watcher `https://opendev.org/openstack/watcher `__ +openstack/watcher-dashboard `https://opendev.org/openstack/watcher-dashboard `__ +openstack/whitebox-tempest-plugin `https://opendev.org/openstack/whitebox-tempest-plugin `__ +openstack/zaqar `https://opendev.org/openstack/zaqar `__ +openstack/zaqar-ui `https://opendev.org/openstack/zaqar-ui `__ +openstack/zun `https://opendev.org/openstack/zun `__ +openstack/zun-ui `https://opendev.org/openstack/zun-ui `__ +performa/os-faults `https://opendev.org/performa/os-faults `__ +starlingx/config `https://opendev.org/starlingx/config `__ +starlingx/fault `https://opendev.org/starlingx/fault `__ +starlingx/ha `https://opendev.org/starlingx/ha `__ +starlingx/integ `https://opendev.org/starlingx/integ `__ +starlingx/metal `https://opendev.org/starlingx/metal `__ +starlingx/nfv `https://opendev.org/starlingx/nfv `__ +starlingx/update `https://opendev.org/starlingx/update `__ +vexxhost/openstack-operator `https://opendev.org/vexxhost/openstack-operator `__ +x/almanach `https://opendev.org/x/almanach `__ +x/bilean `https://opendev.org/x/bilean `__ +x/broadview-collector `https://opendev.org/x/broadview-collector `__ +x/collectd-openstack-plugins `https://opendev.org/x/collectd-openstack-plugins `__ +x/devstack-plugin-additional-pkg-repos `https://opendev.org/x/devstack-plugin-additional-pkg-repos `__ +x/devstack-plugin-glusterfs `https://opendev.org/x/devstack-plugin-glusterfs `__ +x/devstack-plugin-hdfs `https://opendev.org/x/devstack-plugin-hdfs `__ +x/devstack-plugin-libvirt-qemu `https://opendev.org/x/devstack-plugin-libvirt-qemu `__ +x/devstack-plugin-mariadb `https://opendev.org/x/devstack-plugin-mariadb `__ +x/devstack-plugin-tobiko `https://opendev.org/x/devstack-plugin-tobiko `__ +x/devstack-plugin-vmax `https://opendev.org/x/devstack-plugin-vmax `__ +x/drbd-devstack `https://opendev.org/x/drbd-devstack `__ +x/fenix `https://opendev.org/x/fenix `__ +x/gce-api `https://opendev.org/x/gce-api `__ +x/glare `https://opendev.org/x/glare `__ +x/group-based-policy `https://opendev.org/x/group-based-policy `__ +x/gyan `https://opendev.org/x/gyan `__ +x/horizon-mellanox `https://opendev.org/x/horizon-mellanox `__ +x/ironic-staging-drivers `https://opendev.org/x/ironic-staging-drivers `__ +x/kingbird `https://opendev.org/x/kingbird `__ +x/meteos `https://opendev.org/x/meteos `__ +x/meteos-ui `https://opendev.org/x/meteos-ui `__ +x/mixmatch `https://opendev.org/x/mixmatch `__ +x/mogan `https://opendev.org/x/mogan `__ +x/mogan-ui `https://opendev.org/x/mogan-ui `__ +x/networking-6wind `https://opendev.org/x/networking-6wind `__ +x/networking-ansible `https://opendev.org/x/networking-ansible `__ +x/networking-arista `https://opendev.org/x/networking-arista `__ +x/networking-brocade `https://opendev.org/x/networking-brocade `__ +x/networking-cisco `https://opendev.org/x/networking-cisco `__ +x/networking-cumulus `https://opendev.org/x/networking-cumulus `__ +x/networking-dpm `https://opendev.org/x/networking-dpm `__ +x/networking-fortinet `https://opendev.org/x/networking-fortinet `__ +x/networking-hpe `https://opendev.org/x/networking-hpe `__ +x/networking-huawei `https://opendev.org/x/networking-huawei `__ +x/networking-infoblox `https://opendev.org/x/networking-infoblox `__ +x/networking-l2gw `https://opendev.org/x/networking-l2gw `__ +x/networking-lagopus `https://opendev.org/x/networking-lagopus `__ +x/networking-mlnx `https://opendev.org/x/networking-mlnx `__ +x/networking-nec `https://opendev.org/x/networking-nec `__ +x/networking-omnipath `https://opendev.org/x/networking-omnipath `__ +x/networking-opencontrail `https://opendev.org/x/networking-opencontrail `__ +x/networking-ovs-dpdk `https://opendev.org/x/networking-ovs-dpdk `__ +x/networking-plumgrid `https://opendev.org/x/networking-plumgrid `__ +x/networking-spp `https://opendev.org/x/networking-spp `__ +x/networking-vpp `https://opendev.org/x/networking-vpp `__ +x/networking-vsphere `https://opendev.org/x/networking-vsphere `__ +x/neutron-classifier `https://opendev.org/x/neutron-classifier `__ +x/nova-dpm `https://opendev.org/x/nova-dpm `__ +x/nova-mksproxy `https://opendev.org/x/nova-mksproxy `__ +x/oaktree `https://opendev.org/x/oaktree `__ +x/omni `https://opendev.org/x/omni `__ +x/os-xenapi `https://opendev.org/x/os-xenapi `__ +x/picasso `https://opendev.org/x/picasso `__ +x/rsd-virt-for-nova `https://opendev.org/x/rsd-virt-for-nova `__ +x/scalpels `https://opendev.org/x/scalpels `__ +x/slogging `https://opendev.org/x/slogging `__ +x/stackube `https://opendev.org/x/stackube `__ +x/tatu `https://opendev.org/x/tatu `__ +x/trio2o `https://opendev.org/x/trio2o `__ +x/valet `https://opendev.org/x/valet `__ +x/vmware-nsx `https://opendev.org/x/vmware-nsx `__ +x/vmware-vspc `https://opendev.org/x/vmware-vspc `__ +x/whitebox-neutron-tempest-plugin `https://opendev.org/x/whitebox-neutron-tempest-plugin `__ +======================================== === + + diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst new file mode 100644 index 0000000000..fe567e2277 --- /dev/null +++ b/doc/source/plugins.rst @@ -0,0 +1,334 @@ +======= +Plugins +======= + +The OpenStack ecosystem is wide and deep, and only growing more so +every day. The value of DevStack is that it's simple enough to +understand what it's doing clearly. And yet we'd like to support as +much of the OpenStack Ecosystem as possible. We do that with plugins. + +DevStack plugins are bits of bash code that live outside the DevStack +tree. They are called through a strong contract, so these plugins can +be sure that they will continue to work in the future as DevStack +evolves. + +Prerequisites +============= + +If you are planning to create a plugin that is going to host a service in the +service catalog (that is, your plugin will use the command +``get_or_create_service``) please make sure that you apply to the `service +types authority`_ to reserve a valid service-type. This will help to make sure +that all deployments of your service use the same service-type. + +Plugin Interface +================ + +DevStack supports a standard mechanism for including plugins from +external repositories. The plugin interface assumes the following: + +An external git repository that includes a ``devstack/`` top level +directory. Inside this directory there can be 3 files. + +- ``override-defaults`` - a file containing global variables that + will be sourced before the lib/* files. This allows the plugin + to override the defaults that are otherwise set in the lib/* + files. + + For example, override-defaults may export CINDER_ENABLED_BACKENDS + to include the plugin-specific storage backend and thus be able + to override the default lvm only storage backend for Cinder. + +- ``settings`` - a file containing global variables that will be + sourced very early in the process. This is helpful if other plugins + might depend on this one, and need access to global variables to do + their work. + + Your settings should include any ``enable_service`` lines required + by your plugin. This is especially important if you are kicking off + services using ``run_process`` as it only works with enabled + services. + + Be careful to allow users to override global-variables for + customizing their environment. Usually it is best to provide a + default value only if the variable is unset or empty; e.g. in bash + syntax ``FOO=${FOO:-default}``. + + The file should include a ``define_plugin`` line to indicate the + plugin's name, which is the name that should be used by users on + "enable_plugin" lines. It should generally be the last component of + the git repo path (e.g., if the plugin's repo is + openstack/foo, then the name here should be "foo") :: + + define_plugin + + If your plugin depends on another plugin, indicate it in this file + with one or more lines like the following:: + + plugin_requires + + For a complete example, if the plugin "foo" depends on "bar", the + ``settings`` file should include:: + + define_plugin foo + plugin_requires foo bar + + Devstack does not currently use this dependency information, so it's + important that users continue to add enable_plugin lines in the + correct order in ``local.conf``, however adding this information + allows other tools to consider dependency information when + automatically generating ``local.conf`` files. + +- ``plugin.sh`` - the actual plugin. It is executed by devstack at + well defined points during a ``stack.sh`` run. The plugin.sh + internal structure is discussed below. + + +Plugins are registered by adding the following to the localrc section +of ``local.conf``. + +They are added in the following format:: + + [[local|localrc]] + enable_plugin [GITREF] + +- ``name`` - an arbitrary name. (ex: glusterfs, docker, zaqar, congress) +- ``giturl`` - a valid git url that can be cloned +- ``gitref`` - an optional git ref (branch / ref / tag) that will be + cloned. Defaults to master. + +An example would be as follows:: + + enable_plugin ec2-api https://opendev.org/openstack/ec2-api + +plugin.sh contract +================== + +``plugin.sh`` is a bash script that will be called at specific points +during ``stack.sh``, ``unstack.sh``, and ``clean.sh``. It will be +called in the following way:: + + source $PATH/TO/plugin.sh [phase] + +``mode`` can be thought of as the major mode being called, currently +one of: ``stack``, ``unstack``, ``clean``. ``phase`` is used by modes +which have multiple points during their run where it's necessary to +be able to execute code. All existing ``mode`` and ``phase`` points +are considered **strong contracts** and won't be removed without a +reasonable deprecation period. Additional new ``mode`` or ``phase`` +points may be added at any time if we discover we need them to support +additional kinds of plugins in devstack. + +The current full list of ``mode`` and ``phase`` are: + +- **stack** - Called by ``stack.sh`` four times for different phases + of its run: + + - **pre-install** - Called after system (OS) setup is complete and + before project source is installed. + - **install** - Called after the layer 1 and 2 projects source and + their dependencies have been installed. + - **post-config** - Called after the layer 1 and 2 services have + been configured. All configuration files for enabled services + should exist at this point. + - **extra** - Called near the end after layer 1 and 2 services have + been started. + - **test-config** - Called at the end of devstack used to configure tempest + or any other test environments + +- **unstack** - Called by ``unstack.sh`` before other services are shut + down. +- **clean** - Called by ``clean.sh`` before other services are cleaned, + but after ``unstack.sh`` has been called. + +Example plugin +==================== + +An example plugin would look something as follows. + +``devstack/settings``:: + + # settings file for template + enable_service template + + +``devstack/plugin.sh``:: + + # plugin.sh - DevStack plugin.sh dispatch script template + + function install_template { + ... + } + + function init_template { + ... + } + + function configure_template { + ... + } + + # check for service enabled + if is_service_enabled template; then + + if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then + # Set up system services + echo_summary "Configuring system services Template" + install_package cowsay + + elif [[ "$1" == "stack" && "$2" == "install" ]]; then + # Perform installation of service source + echo_summary "Installing Template" + install_template + + elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then + # Configure after the other layer 1 and 2 services have been configured + echo_summary "Configuring Template" + configure_template + + elif [[ "$1" == "stack" && "$2" == "extra" ]]; then + # Initialize and start the template service + echo_summary "Initializing Template" + init_template + fi + + if [[ "$1" == "unstack" ]]; then + # Shut down template services + # no-op + : + fi + + if [[ "$1" == "clean" ]]; then + # Remove state and transient data + # Remember clean.sh first calls unstack.sh + # no-op + : + fi + fi + +Plugin Execution Order +====================== + +Plugins are run after in tree services at each of the stages +above. For example, if you need something to happen before Keystone +starts, you should do that at the ``post-config`` phase. + +Multiple plugins can be specified in your ``local.conf``. When that +happens the plugins will be executed **in order** at each phase. This +allows plugins to conceptually depend on each other through +documenting to the user the order they must be declared. A formal +dependency mechanism is beyond the scope of the current work. + +System Packages +=============== + + + +Devstack based +-------------- + +Devstack provides a custom framework for getting packages installed at +an early phase of its execution. These packages may be defined in a +plugin as files that contain new-line separated lists of packages +required by the plugin + +Supported packaging systems include apt and dnf across multiple +distributions. To enable a plugin to hook into this and install +package dependencies, packages may be listed at the following +locations in the top-level of the plugin repository: + +- ``./devstack/files/debs/$plugin_name`` - Packages to install when running + on Ubuntu or Debian. + +- ``./devstack/files/rpms/$plugin_name`` - Packages to install when running + on Red Hat, Fedora, or CentOS. + +Although there a no plans to remove this method of installing +packages, plugins should consider it deprecated for ``bindep`` support +described below. + +bindep +------ + +The `bindep `__ project has +become the defacto standard for OpenStack projects to specify binary +dependencies. + +A plugin may provide a ``./devstack/files/bindep.txt`` file, which +will be called with the *default* profile to install packages. For +details on the syntax, etc. see the bindep documentation. + +It is also possible to use the ``bindep.txt`` of projects that are +being installed from source with the ``-bindep`` flag available in +install functions. For example + +.. code-block:: bash + + if use_library_from_git "diskimage-builder"; then + GITREPO["diskimage-builder"]=$DISKIMAGE_BUILDER_REPO_URL + GITDIR["diskimage-builder"]=$DEST/diskimage-builder + GITBRANCH["diskimage-builder"]=$DISKIMAGE_BUILDER_REPO_REF + git_clone_by_name "diskimage-builder" + setup_dev_lib -bindep "diskimage-builder" + fi + +will result in any packages required by the ``bindep.txt`` of the +``diskimage-builder`` project being installed. Note however that jobs +that switch projects between source and released/pypi installs +(e.g. with a ``foo-dsvm`` and a ``foo-dsvm-src`` test to cover both +released dependencies and master versions) will have to deal with +``bindep.txt`` being unavailable without the source directory. + + +Using Plugins in the OpenStack Gate +=================================== + +For everyday use, DevStack plugins can exist in any git tree that's +accessible on the internet. However, when using DevStack plugins in +the OpenStack gate, they must live in projects in OpenStack's +gerrit. This allows testing of the plugin as well as provides network +isolation against upstream git repository failures (which we see often +enough to be an issue). + +Ideally a plugin will be included within the ``devstack`` directory of +the project they are being tested. For example, the openstack/ec2-api +project has its plugin support in its own tree. + +However, some times a DevStack plugin might be used solely to +configure a backend service that will be used by the rest of +OpenStack, so there is no "project tree" per say. Good examples +include: integration of back end storage (e.g. ceph or glusterfs), +integration of SDN controllers (e.g. ovn, OpenDayLight), or +integration of alternate RPC systems (e.g. zmq, qpid). In these cases +the best practice is to build a dedicated +``openstack/devstack-plugin-FOO`` project. + +Legacy project-config jobs +-------------------------- + +To enable a plugin to be used in a gate job, the following lines will +be needed in your ``jenkins/jobs/.yaml`` definition in +`project-config `_:: + + # Because we are testing a non standard project, add the + # our project repository. This makes zuul do the right + # reference magic for testing changes. + export PROJECTS="openstack/ec2-api $PROJECTS" + + # note the actual url here is somewhat irrelevant because it + # caches in nodepool, however make it a valid url for + # documentation purposes. + export DEVSTACK_LOCAL_CONFIG="enable_plugin ec2-api https://opendev.org/openstack/ec2-api" + +Zuul v3 jobs +------------ + +See the ``devstack_plugins`` example in :doc:`zuul_ci_jobs_migration`. + +See Also +======== + +For additional inspiration on devstack plugins you can check out the +:doc:`Plugin Registry `. + +.. _service types authority: https://specs.openstack.org/openstack/service-types-authority/ diff --git a/doc/source/systemd.rst b/doc/source/systemd.rst new file mode 100644 index 0000000000..78535202d8 --- /dev/null +++ b/doc/source/systemd.rst @@ -0,0 +1,222 @@ +=========================== + Using Systemd in DevStack +=========================== + +By default DevStack is run with all the services as systemd unit +files. Systemd is now the default init system for nearly every Linux +distro, and systemd encodes and solves many of the problems related to +poorly running processes. + +Why this instead of screen? +=========================== + +The screen model for DevStack was invented when the number of services +that a DevStack user was going to run was typically < 10. This made +screen hot keys to jump around very easy. However, the landscape has +changed (not all services are stoppable in screen as some are under +Apache, there are typically at least 20 items) + +There is also a common developer workflow of changing code in more +than one service, and needing to restart a bunch of services for that +to take effect. + +Unit Structure +============== + +.. note:: + + Originally we actually wanted to do this as user units, however + there are issues with running this under non interactive + shells. For now, we'll be running as system units. Some user unit + code is left in place in case we can switch back later. + +All DevStack user units are created as a part of the DevStack slice +given the name ``devstack@$servicename.service``. This makes it easy +to understand which services are part of the devstack run, and lets us +disable / stop them in a single command. + +Manipulating Units +================== + +Assuming the unit ``n-cpu`` to make the examples more clear. + +Enable a unit (allows it to be started):: + + sudo systemctl enable devstack@n-cpu.service + +Disable a unit:: + + sudo systemctl disable devstack@n-cpu.service + +Start a unit:: + + sudo systemctl start devstack@n-cpu.service + +Stop a unit:: + + sudo systemctl stop devstack@n-cpu.service + +Restart a unit:: + + sudo systemctl restart devstack@n-cpu.service + +See status of a unit:: + + sudo systemctl status devstack@n-cpu.service + +Operating on more than one unit at a time +----------------------------------------- + +Systemd supports wildcarding for unit operations. To restart every +service in devstack you can do that following:: + + sudo systemctl restart devstack@* + +Or to see the status of all Nova processes you can do:: + + sudo systemctl status devstack@n-* + +We'll eventually make the unit names a bit more meaningful so that +it's easier to understand what you are restarting. + +.. _journalctl-examples: + +Querying Logs +============= + +One of the other major things that comes with systemd is journald, a +consolidated way to access logs (including querying through structured +metadata). This is accessed by the user via ``journalctl`` command. + + +Logs can be accessed through ``journalctl``. journalctl has powerful +query facilities. We'll start with some common options. + +Follow logs for a specific service:: + + sudo journalctl -f --unit devstack@n-cpu.service + +Following logs for multiple services simultaneously:: + + sudo journalctl -f --unit devstack@n-cpu.service --unit devstack@n-cond.service + +or you can even do wild cards to follow all the nova services:: + + sudo journalctl -f --unit devstack@n-* + +Use higher precision time stamps:: + + sudo journalctl -f -o short-precise --unit devstack@n-cpu.service + +By default, journalctl strips out "unprintable" characters, including +ASCII color codes. To keep the color codes (which can be interpreted by +an appropriate terminal/pager - e.g. ``less``, the default):: + + sudo journalctl -a --unit devstack@n-cpu.service + +When outputting to the terminal using the default pager, long lines +will be truncated, but horizontal scrolling is supported via the +left/right arrow keys. You can override this by setting the +``SYSTEMD_LESS`` environment variable to e.g. ``FRXM``. + +You can pipe the output to another tool, such as ``grep``. For +example, to find a server instance UUID in the nova logs:: + + sudo journalctl -a --unit devstack@n-* | grep 58391b5c-036f-44d5-bd68-21d3c26349e6 + +See ``man 1 journalctl`` for more. + +Debugging +========= + +Using pdb +--------- + +In order to break into a regular pdb session on a systemd-controlled +service, you need to invoke the process manually - that is, take it out +of systemd's control. + +Discover the command systemd is using to run the service:: + + systemctl show devstack@n-sch.service -p ExecStart --no-pager + +Stop the systemd service:: + + sudo systemctl stop devstack@n-sch.service + +Inject your breakpoint in the source, e.g.:: + + import pdb; pdb.set_trace() + +Invoke the command manually:: + + /usr/local/bin/nova-scheduler --config-file /etc/nova/nova.conf + +Some executables, such as :program:`nova-compute`, will need to be executed +with a particular group. This will be shown in the systemd unit file:: + + sudo systemctl cat devstack@n-cpu.service | grep Group + +:: + + Group = libvirt + +Use the :program:`sg` tool to execute the command as this group:: + + sg libvirt -c '/usr/local/bin/nova-compute --config-file /etc/nova/nova-cpu.conf' + +Using remote-pdb +---------------- + +`remote-pdb`_ works while the process is under systemd control. + +Make sure you have remote-pdb installed:: + + sudo pip install remote-pdb + +Inject your breakpoint in the source, e.g.:: + + import remote_pdb; remote_pdb.set_trace() + +Restart the relevant service:: + + sudo systemctl restart devstack@n-api.service + +The remote-pdb code configures the telnet port when ``set_trace()`` is +invoked. Do whatever it takes to hit the instrumented code path, and +inspect the logs for a message displaying the listening port:: + + Sep 07 16:36:12 p8-100-neo devstack@n-api.service[772]: RemotePdb session open at 127.0.0.1:46771, waiting for connection ... + +Telnet to that port to enter the pdb session:: + + telnet 127.0.0.1 46771 + +See the `remote-pdb`_ home page for more options. + +.. _`remote-pdb`: https://pypi.org/project/remote-pdb/ + +Future Work +=========== + +user units +---------- + +It would be great if we could do services as user units, so that there +is a clear separation of code being run as not root, to ensure running +as root never accidentally gets baked in as an assumption to +services. However, user units interact poorly with devstack-gate and +the way that commands are run as users with ansible and su. + +Maybe someday we can figure that out. + +References +========== + +- Arch Linux Wiki - https://wiki.archlinux.org/index.php/Systemd/User +- Python interface to journald - + https://www.freedesktop.org/software/systemd/python-systemd/journal.html +- Systemd documentation on service files - + https://www.freedesktop.org/software/systemd/man/systemd.service.html +- Systemd documentation on exec (can be used to impact service runs) - + https://www.freedesktop.org/software/systemd/man/systemd.exec.html diff --git a/doc/source/tempest.rst b/doc/source/tempest.rst new file mode 100644 index 0000000000..65dd5b16b2 --- /dev/null +++ b/doc/source/tempest.rst @@ -0,0 +1,25 @@ +======= +Tempest +======= + +`Tempest`_ is the OpenStack Integration test suite. It is installed by default +and is used to provide integration testing for many of the OpenStack services. +Just like DevStack itself, it is possible to extend Tempest with plugins. In +fact, many Tempest plugin packages also include DevStack plugin to do things +like pre-create required static resources. + +The `Tempest documentation `_ provides a thorough guide to using +Tempest. However, if you simply wish to run the standard set of Tempest tests +against an existing deployment, you can do the following: + +.. code-block:: shell + + cd /opt/stack/tempest + /opt/stack/data/venv/bin/tempest run ... + +The above assumes you have installed DevStack in the default location +(configured via the ``DEST`` configuration variable) and have enabled +virtualenv-based installation in the standard location (configured via the +``USE_VENV`` and ``VENV_DEST`` configuration variables, respectively). + +.. _Tempest: https://docs.openstack.org/tempest/latest/ diff --git a/doc/source/zuul_ci_jobs_migration.rst b/doc/source/zuul_ci_jobs_migration.rst new file mode 100644 index 0000000000..c43603ea17 --- /dev/null +++ b/doc/source/zuul_ci_jobs_migration.rst @@ -0,0 +1,320 @@ +=============================== +Migrating Zuul V2 CI jobs to V3 +=============================== + +The OpenStack CI system moved from Zuul v2 to Zuul v3, and all CI jobs moved to +the new CI system. All jobs have been migrated automatically to a format +compatible with Zuul v3; the jobs produced in this way however are suboptimal +and do not use the capabilities introduced by Zuul v3, which allow for re-use of +job parts, in the form of Ansible roles, as well as inheritance between jobs. + +DevStack hosts a set of roles, plays and jobs that can be used by other +repositories to define their DevStack based jobs. To benefit from them, jobs +must be migrated from the legacy v2 ones into v3 native format. + +This document provides guidance and examples to make the migration process as +painless and smooth as possible. + +Where to host the job definitions. +================================== + +In Zuul V3 jobs can be defined in the repository that contains the code they +excercise. If you are writing CI jobs for an OpenStack service you can define +your DevStack based CI jobs in one of the repositories that host the code for +your service. If you have a branchless repo, like a Tempest plugin, that is +a convenient choice to host the job definitions since job changes do not have +to be backported. For example, see the beginning of the ``.zuul.yaml`` from the +sahara Tempest plugin repo: + +.. code:: yaml + + # In https://opendev.org/openstack/sahara-tests/src/branch/master/.zuul.yaml: + - job: + name: sahara-tests-tempest + description: | + Run Tempest tests from the Sahara plugin. + parent: devstack-tempest + +Which base job to start from +============================ + +If your job needs an OpenStack cloud deployed via DevStack, but you don't plan +on running Tempest tests, you can start from one of the base +:doc:`jobs ` defined in the DevStack repo. + +The ``devstack`` job can be used for both single-node jobs and multi-node jobs, +and it includes the list of services used in the integrated gate (keystone, +glance, nova, cinder, neutron and swift). Different topologies can be achieved +by switching the nodeset used in the child job. + +The ``devstack-base`` job is similar to ``devstack`` but it does not specify any +required repo or service to be run in DevStack. It can be useful to setup +children jobs that use a very narrow DevStack setup. + +If your job needs an OpenStack cloud deployed via DevStack, and you do plan +on running Tempest tests, you can start from one of the base jobs defined in the +Tempest repo. + +The ``devstack-tempest`` job can be used for both single-node jobs and +multi-node jobs. Different topologies can be achieved by switching the nodeset +used in the child job. + +Jobs can be customized as follows without writing any Ansible code: + +- add and/or remove DevStack services +- add or modify DevStack and services configuration +- install DevStack plugins +- extend the number of sub-nodes (multinode only) +- define extra log files and/or directories to be uploaded on logs.o.o +- define extra log file extensions to be rewritten to .txt for ease of access + +Tempest jobs can be further customized as follows: + +- define the Tempest tox environment to be used +- define the test concurrency +- define the test regular expression + +Writing Ansible code, or importing existing custom roles, jobs can be further +extended by: + +- adding pre and/or post playbooks +- overriding the run playbook, add custom roles + +The (partial) example below extends a Tempest single node base job +"devstack-tempest" in the Kuryr repository. The parent job name is defined in +job.parent. + +.. code:: yaml + + # https://opendev.org/openstack/kuryr-kubernetes/src/branch/master/.zuul.d/base.yaml: + - job: + name: kuryr-kubernetes-tempest-base + parent: devstack-tempest + description: Base kuryr-kubernetes-job + required-projects: + - openstack/devstack-plugin-container + - openstack/kuryr + - openstack/kuryr-kubernetes + - openstack/kuryr-tempest-plugin + - openstack/neutron-lbaas + vars: + tempest_test_regex: '^(kuryr_tempest_plugin.tests.)' + tox_envlist: 'all' + devstack_localrc: + KURYR_K8S_API_PORT: 8080 + devstack_services: + kubernetes-api: true + kubernetes-controller-manager: true + kubernetes-scheduler: true + kubelet: true + kuryr-kubernetes: true + (...) + devstack_plugins: + kuryr-kubernetes: https://opendev.org/openstack/kuryr + devstack-plugin-container: https://opendev.org/openstack/devstack-plugin-container + neutron-lbaas: https://opendev.org/openstack/neutron-lbaas + tempest_plugins: + - kuryr-tempest-plugin + (...) + +Job variables +============= + +Variables can be added to the job in three different places: + +- job.vars: these are global variables available to all node in the nodeset +- job.host-vars.[HOST]: these are variables available only to the specified HOST +- job.group-vars.[GROUP]: these are variables available only to the specified + GROUP + +Zuul merges dict variables through job inheritance. Host and group variables +override variables with the same name defined as global variables. + +In the example below, for the sundaes job, hosts that are not part of the +subnode group will run vanilla and chocolate. Hosts in the subnode group will +run stracciatella and strawberry. + +.. code:: yaml + + - job: + name: ice-creams + vars: + devstack_service: + vanilla: true + chocolate: false + group-vars: + subnode: + devstack_service: + pistacchio: true + stracciatella: true + + - job: + name: sundaes + parent: ice-creams + vars: + devstack_service: + chocolate: true + group-vars: + subnode: + devstack_service: + strawberry: true + pistacchio: false + + +DevStack Gate Flags +=================== + +The old CI system worked using a combination of DevStack, Tempest and +devstack-gate to setup a test environment and run tests against it. With Zuul +V3, the logic that used to live in devstack-gate is moved into different repos, +including DevStack, Tempest and grenade. + +DevStack-gate exposes an interface for job definition based on a number of +DEVSTACK_GATE_* environment variables, or flags. This guide shows how to map +DEVSTACK_GATE flags into the new +system. + +The repo column indicates in which repository is hosted the code that replaces +the devstack-gate flag. The new implementation column explains how to reproduce +the same or a similar behaviour in Zuul v3 jobs. For localrc settings, +devstack-gate defined a default value. In ansible jobs the default is either the +value defined in the parent job, or the default from DevStack, if any. + +.. list-table:: **DevStack Gate Flags** + :widths: 20 10 60 + :header-rows: 1 + + * - DevStack gate flag + - Repo + - New implementation + * - OVERRIDE_ZUUL_BRANCH + - zuul + - override-checkout: [branch] in the job definition. + * - DEVSTACK_GATE_NET_OVERLAY + - zuul-jobs + - A bridge called br-infra is set up for all jobs that inherit + from multinode with a dedicated `bridge role + `_. + * - DEVSTACK_CINDER_VOLUME_CLEAR + - devstack + - *CINDER_VOLUME_CLEAR: true/false* in devstack_localrc in the + job vars. + * - DEVSTACK_GATE_NEUTRON + - devstack + - True by default. To disable, disable all neutron services in + devstack_services in the job definition. + * - DEVSTACK_GATE_CONFIGDRIVE + - devstack + - *FORCE_CONFIG_DRIVE: true/false* in devstack_localrc in the job + vars. + * - DEVSTACK_GATE_INSTALL_TESTONLY + - devstack + - *INSTALL_TESTONLY_PACKAGES: true/false* in devstack_localrc in + the job vars. + * - DEVSTACK_GATE_VIRT_DRIVER + - devstack + - *VIRT_DRIVER: [virt driver]* in devstack_localrc in the job + vars. + * - DEVSTACK_GATE_LIBVIRT_TYPE + - devstack + - *LIBVIRT_TYPE: [libvirt type]* in devstack_localrc in the job + vars. + * - DEVSTACK_GATE_TEMPEST + - devstack and tempest + - Defined by the job that is used. The ``devstack`` job only runs + devstack. The ``devstack-tempest`` one triggers a Tempest run + as well. + * - DEVSTACK_GATE_TEMPEST_FULL + - tempest + - *tox_envlist: full* in the job vars. + * - DEVSTACK_GATE_TEMPEST_ALL + - tempest + - *tox_envlist: all* in the job vars. + * - DEVSTACK_GATE_TEMPEST_ALL_PLUGINS + - tempest + - *tox_envlist: all-plugin* in the job vars. + * - DEVSTACK_GATE_TEMPEST_SCENARIOS + - tempest + - *tox_envlist: scenario* in the job vars. + * - TEMPEST_CONCURRENCY + - tempest + - *tempest_concurrency: [value]* in the job vars. This is + available only on jobs that inherit from ``devstack-tempest`` + down. + * - DEVSTACK_GATE_TEMPEST_NOTESTS + - tempest + - *tox_envlist: venv-tempest* in the job vars. This will create + Tempest virtual environment but run no tests. + * - DEVSTACK_GATE_SMOKE_SERIAL + - tempest + - *tox_envlist: smoke-serial* in the job vars. + * - DEVSTACK_GATE_TEMPEST_DISABLE_TENANT_ISOLATION + - tempest + - *tox_envlist: full-serial* in the job vars. + *TEMPEST_ALLOW_TENANT_ISOLATION: false* in devstack_localrc in + the job vars. + + +The following flags have not been migrated yet or are legacy and won't be +migrated at all. + +.. list-table:: **Not Migrated DevStack Gate Flags** + :widths: 20 10 60 + :header-rows: 1 + + * - DevStack gate flag + - Status + - Details + * - DEVSTACK_GATE_TOPOLOGY + - WIP + - The topology depends on the base job that is used and more + specifically on the nodeset attached to it. The new job format + allows project to define the variables to be passed to every + node/node-group that exists in the topology. Named topologies + that include the nodeset and the matching variables can be + defined in the form of base jobs. + * - DEVSTACK_GATE_GRENADE + - TBD + - Grenade Zuul V3 jobs will be hosted in the grenade repo. + * - GRENADE_BASE_BRANCH + - TBD + - Grenade Zuul V3 jobs will be hosted in the grenade repo. + * - DEVSTACK_GATE_NEUTRON_DVR + - TBD + - Depends on multinode support. + * - DEVSTACK_GATE_EXERCISES + - TBD + - Can be done on request. + * - DEVSTACK_GATE_IRONIC + - TBD + - This will probably be implemented on ironic side. + * - DEVSTACK_GATE_IRONIC_DRIVER + - TBD + - This will probably be implemented on ironic side. + * - DEVSTACK_GATE_IRONIC_BUILD_RAMDISK + - TBD + - This will probably be implemented on ironic side. + * - DEVSTACK_GATE_POSTGRES + - Legacy + - This flag exists in d-g but the only thing that it does is + capture postgres logs. This is already supported by the roles + in post, so the flag is useless in the new jobs. postgres + itself can be enabled via the devstack_service job variable. + * - DEVSTACK_GATE_ZEROMQ + - Legacy + - This has no effect in d-g. + * - DEVSTACK_GATE_MQ_DRIVER + - Legacy + - This has no effect in d-g. + * - DEVSTACK_GATE_TEMPEST_STRESS_ARGS + - Legacy + - Stress is not in Tempest anymore. + * - DEVSTACK_GATE_TEMPEST_HEAT_SLOW + - Legacy + - This is not used anywhere. + * - DEVSTACK_GATE_CELLS + - Legacy + - This has no effect in d-g. + * - DEVSTACK_GATE_NOVA_API_METADATA_SPLIT + - Legacy + - This has no effect in d-g. diff --git a/doc/source/zuul_jobs.rst b/doc/source/zuul_jobs.rst new file mode 100644 index 0000000000..cf203a8973 --- /dev/null +++ b/doc/source/zuul_jobs.rst @@ -0,0 +1,4 @@ +Zuul CI Jobs +============ + +.. zuul:autojobs:: diff --git a/doc/source/zuul_roles.rst b/doc/source/zuul_roles.rst new file mode 100644 index 0000000000..4939281057 --- /dev/null +++ b/doc/source/zuul_roles.rst @@ -0,0 +1,4 @@ +Zuul CI Roles +============= + +.. zuul:autoroles:: diff --git a/driver_certs/cinder_driver_cert.sh b/driver_certs/cinder_driver_cert.sh deleted file mode 100755 index 7726e7eb01..0000000000 --- a/driver_certs/cinder_driver_cert.sh +++ /dev/null @@ -1,106 +0,0 @@ -#!/usr/bin/env bash - -# **cinder_cert.sh** - -# This script is a simple wrapper around the tempest volume api tests -# It requires that you have a working and functional devstack install -# and that you've enabled your device driver by making the necessary -# modifications to /etc/cinder/cinder.conf - -# This script will refresh your openstack repo's and restart the cinder -# services to pick up your driver changes. -# please NOTE; this script assumes your devstack install is functional -# and includes tempest. A good first step is to make sure you can -# create volumes on your device before you even try and run this script. - -# It also assumes default install location (/opt/stack/xxx) -# to aid in debug, you should also verify that you've added -# an output directory for screen logs: -# -# SCREEN_LOGDIR=/opt/stack/screen-logs - -set -o pipefail - -CERT_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $CERT_DIR/..; pwd) - -source $TOP_DIR/functions -source $TOP_DIR/stackrc -source $TOP_DIR/openrc -source $TOP_DIR/lib/infra -source $TOP_DIR/lib/tempest -source $TOP_DIR/lib/cinder - -TEMPFILE=`mktemp` -RECLONE=True - -function log_message { - MESSAGE=$1 - STEP_HEADER=$2 - if [[ "$STEP_HEADER" = "True" ]]; then - echo -e "\n========================================================" | tee -a $TEMPFILE - fi - echo -e `date +%m/%d/%y/%T:`"${MESSAGE}" | tee -a $TEMPFILE - if [[ "$STEP_HEADER" = "True" ]]; then - echo -e "========================================================" | tee -a $TEMPFILE - fi -} - -if [[ "$OFFLINE" = "True" ]]; then - echo "ERROR: Driver cert requires fresh clone/pull from ${CINDER_BRANCH}" - echo " Please set OFFLINE=False and retry." - exit 1 -fi - -log_message "RUNNING CINDER DRIVER CERTIFICATION CHECK", True -log_message "Output is being logged to: $TEMPFILE" - -cd $CINDER_DIR -log_message "Cloning to ${CINDER_REPO}...", True -install_cinder - -log_message "Pull a fresh Clone of cinder repo...", True -git status | tee -a $TEMPFILE -git log --pretty=oneline -n 1 | tee -a $TEMPFILE - -log_message "Gathering copy of cinder.conf file (passwords will be scrubbed)...", True -cat /etc/cinder/cinder.conf | egrep -v "(^#.*|^$)" | tee -a $TEMPFILE -sed -i "s/\(.*password.*=\).*$/\1 xxx/i" $TEMPFILE -log_message "End of cinder.conf.", True - -cd $TOP_DIR -# Verify tempest is installed/enabled -if ! is_service_enabled tempest; then - log_message "ERROR!!! Cert requires tempest in enabled_services!", True - log_message" Please add tempest to enabled_services and retry." - exit 1 -fi - -cd $TEMPEST_DIR -install_tempest - -log_message "Verify tempest is current....", True -git status | tee -a $TEMPFILE -log_message "Check status and get latest commit..." -git log --pretty=oneline -n 1 | tee -a $TEMPFILE - - -#stop and restart cinder services -log_message "Restart Cinder services...", True -stop_cinder -sleep 1 -start_cinder -sleep 5 - -# run tempest api/volume/test_* -log_message "Run the actual tempest volume tests (./tools/pretty_tox.sh api.volume)...", True -./tools/pretty_tox.sh api.volume 2>&1 | tee -a $TEMPFILE -if [[ $? = 0 ]]; then - log_message "CONGRATULATIONS!!! Device driver PASSED!", True - log_message "Submit output: ($TEMPFILE)" - exit 0 -else - log_message "SORRY!!! Device driver FAILED!", True - log_message "Check output in $TEMPFILE" - exit 1 -fi diff --git a/eucarc b/eucarc deleted file mode 100644 index 343f4ccde2..0000000000 --- a/eucarc +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env bash -# -# source eucarc [username] [tenantname] -# -# Create EC2 credentials for the current user as defined by OS_TENANT_NAME:OS_USERNAME -# Optionally set the tenant/username via openrc - -if [[ -n "$1" ]]; then - USERNAME=$1 -fi -if [[ -n "$2" ]]; then - TENANT=$2 -fi - -# Find the other rc files -RC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) - -# Get user configuration -source $RC_DIR/openrc - -# Set the ec2 url so euca2ools works -export EC2_URL=$(keystone catalog --service ec2 | awk '/ publicURL / { print $4 }') - -# Create EC2 credentials for the current user -CREDS=$(openstack ec2 credentials create) -export EC2_ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }') -export EC2_SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }') - -# Euca2ools Certificate stuff for uploading bundles -# See exercises/bundle.sh to see how to get certs using nova cli -NOVA_KEY_DIR=${NOVA_KEY_DIR:-$RC_DIR} -export S3_URL=$(keystone catalog --service s3 | awk '/ publicURL / { print $4 }') -export EC2_USER_ID=42 # nova does not use user id, but bundling requires it -export EC2_PRIVATE_KEY=${NOVA_KEY_DIR}/pk.pem -export EC2_CERT=${NOVA_KEY_DIR}/cert.pem -export NOVA_CERT=${NOVA_KEY_DIR}/cacert.pem -export EUCALYPTUS_CERT=${NOVA_CERT} # euca-bundle-image seems to require this set -alias ec2-bundle-image="ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_PRIVATE_KEY} --user ${EC2_USER_ID} --ec2cert ${NOVA_CERT}" -alias ec2-upload-bundle="ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}" - diff --git a/exercise.sh b/exercise.sh deleted file mode 100755 index ce694fba66..0000000000 --- a/exercise.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env bash - -# **exercise.sh** - -# Keep track of the current devstack directory. -TOP_DIR=$(cd $(dirname "$0") && pwd) - -# Import common functions -source $TOP_DIR/functions - -# Load local configuration -source $TOP_DIR/stackrc - -# Run everything in the exercises/ directory that isn't explicitly disabled - -# comma separated list of script basenames to skip -# to refrain from exercising euca.sh use SKIP_EXERCISES=euca -SKIP_EXERCISES=${SKIP_EXERCISES:-""} - -# comma separated list of script basenames to run -# to run only euca.sh use RUN_EXERCISES=euca -basenames=${RUN_EXERCISES:-""} - -EXERCISE_DIR=$TOP_DIR/exercises - -if [[ -z "${basenames}" ]]; then - # Locate the scripts we should run - basenames=$(for b in `ls $EXERCISE_DIR/*.sh`; do basename $b .sh; done) -else - # If RUN_EXERCISES was specified, ignore SKIP_EXERCISES. - SKIP_EXERCISES= -fi - -# Track the state of each script -passes="" -failures="" -skips="" - -# Loop over each possible script (by basename) -for script in $basenames; do - if [[ ,$SKIP_EXERCISES, =~ ,$script, ]]; then - skips="$skips $script" - else - echo "=====================================================================" - echo Running $script - echo "=====================================================================" - $EXERCISE_DIR/$script.sh - exitcode=$? - if [[ $exitcode == 55 ]]; then - skips="$skips $script" - elif [[ $exitcode -ne 0 ]]; then - failures="$failures $script" - else - passes="$passes $script" - fi - fi -done - -# output status of exercise run -echo "=====================================================================" -for script in $skips; do - echo SKIP $script -done -for script in $passes; do - echo PASS $script -done -for script in $failures; do - echo FAILED $script -done -echo "=====================================================================" - -if [[ -n "$failures" ]]; then - exit 1 -fi diff --git a/exerciserc b/exerciserc deleted file mode 100644 index 9105fe3331..0000000000 --- a/exerciserc +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env bash -# -# source exerciserc -# -# Configure the DevStack exercise scripts -# For best results, source this _after_ stackrc/localrc as it will set -# values only if they are not already set. - -# Max time to wait while vm goes from build to active state -export ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30} - -# Max time to wait for proper IP association and dis-association. -export ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-15} - -# Max time till the vm is bootable -export BOOT_TIMEOUT=${BOOT_TIMEOUT:-30} - -# Max time from run instance command until it is running -export RUNNING_TIMEOUT=${RUNNING_TIMEOUT:-$(($BOOT_TIMEOUT + $ACTIVE_TIMEOUT))} - -# Max time to wait for a vm to terminate -export TERMINATE_TIMEOUT=${TERMINATE_TIMEOUT:-30} - -# Max time to wait for a euca-volume command to propagate -export VOLUME_TIMEOUT=${VOLUME_TIMEOUT:-30} - -# Max time to wait for a euca-delete command to propagate -export VOLUME_DELETE_TIMEOUT=${SNAPSHOT_DELETE_TIMEOUT:-60} - -# The size of the volume we want to boot from; some storage back-ends -# do not allow a disk resize, so it's important that this can be tuned -export DEFAULT_VOLUME_SIZE=${DEFAULT_VOLUME_SIZE:-1} diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh deleted file mode 100755 index 01d548d1f2..0000000000 --- a/exercises/aggregates.sh +++ /dev/null @@ -1,155 +0,0 @@ -#!/usr/bin/env bash - -# **aggregates.sh** - -# This script demonstrates how to use host aggregates: -# -# * Create an Aggregate -# * Updating Aggregate details -# * Testing Aggregate metadata -# * Testing Aggregate delete -# * Testing General Aggregates (https://blueprints.launchpad.net/nova/+spec/general-host-aggregates) -# * Testing add/remove hosts (with one host) - -echo "**************************************************" -echo "Begin DevStack Exercise: $0" -echo "**************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import configuration -source $TOP_DIR/openrc - -# Import exercise configuration -source $TOP_DIR/exerciserc - -# Test as the admin user -. $TOP_DIR/openrc admin admin - -# If nova api is not enabled we exit with exitcode 55 so that -# the exercise is skipped -is_service_enabled n-api || exit 55 - -# Cells does not support aggregates. -is_service_enabled n-cell && exit 55 - -# Create an aggregate -# =================== - -AGGREGATE_NAME=test_aggregate_$RANDOM -AGGREGATE2_NAME=test_aggregate_$RANDOM -AGGREGATE_A_ZONE=nova - -function exit_if_aggregate_present { - aggregate_name=$1 - - if [ $(nova aggregate-list | grep -c " $aggregate_name ") == 0 ]; then - echo "SUCCESS $aggregate_name not present" - else - die $LINENO "found aggregate: $aggregate_name" - exit -1 - fi -} - -exit_if_aggregate_present $AGGREGATE_NAME - -AGGREGATE_ID=$(nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE_NAME " | get_field 1) -die_if_not_set $LINENO AGGREGATE_ID "Failure creating AGGREGATE_ID for $AGGREGATE_NAME $AGGREGATE_A_ZONE" - -AGGREGATE2_ID=$(nova aggregate-create $AGGREGATE2_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE2_NAME " | get_field 1) -die_if_not_set $LINENO AGGREGATE2_ID "Fail creating AGGREGATE2_ID for $AGGREGATE2_NAME $AGGREGATE_A_ZONE" - -# check aggregate created -nova aggregate-list | grep -q " $AGGREGATE_NAME " || die $LINENO "Aggregate $AGGREGATE_NAME not created" - - -# Ensure creating a duplicate fails -# ================================= - -if nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE; then - die $LINENO "could create duplicate aggregate" -fi - - -# Test aggregate-update (and aggregate-details) -# ============================================= -AGGREGATE_NEW_NAME=test_aggregate_$RANDOM - -nova aggregate-update $AGGREGATE_ID $AGGREGATE_NEW_NAME -nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_NEW_NAME -nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_A_ZONE - -nova aggregate-update $AGGREGATE_ID $AGGREGATE_NAME $AGGREGATE_A_ZONE -nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_NAME -nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_A_ZONE - - -# Test aggregate-set-metadata -# =========================== -META_DATA_1_KEY=asdf -META_DATA_2_KEY=foo -META_DATA_3_KEY=bar - -#ensure no additional metadata is set -nova aggregate-details $AGGREGATE_ID | egrep "\|[{u ]*'availability_zone.+$AGGREGATE_A_ZONE'[ }]*\|" - -nova aggregate-set-metadata $AGGREGATE_ID ${META_DATA_1_KEY}=123 -nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY -nova aggregate-details $AGGREGATE_ID | grep 123 - -nova aggregate-set-metadata $AGGREGATE_ID ${META_DATA_2_KEY}=456 -nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY -nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY - -nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_2_KEY ${META_DATA_3_KEY}=789 -nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY -nova aggregate-details $AGGREGATE_ID | grep $META_DATA_3_KEY - -nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY && die $LINENO "ERROR metadata was not cleared" - -nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_3_KEY $META_DATA_1_KEY -nova aggregate-details $AGGREGATE_ID | egrep "\|[{u ]*'availability_zone.+$AGGREGATE_A_ZONE'[ }]*\|" - - -# Test aggregate-add/remove-host -# ============================== -if [ "$VIRT_DRIVER" == "xenserver" ]; then - echo "TODO(johngarbutt) add tests for add/remove host from pool aggregate" -fi -FIRST_HOST=$(nova host-list | grep compute | get_field 1 | head -1) -# Make sure can add two aggregates to same host -nova aggregate-add-host $AGGREGATE_ID $FIRST_HOST -nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST -if nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST; then - die $LINENO "could add duplicate host to single aggregate" -fi -nova aggregate-remove-host $AGGREGATE2_ID $FIRST_HOST -nova aggregate-remove-host $AGGREGATE_ID $FIRST_HOST - -# Test aggregate-delete -# ===================== -nova aggregate-delete $AGGREGATE_ID -nova aggregate-delete $AGGREGATE2_ID -exit_if_aggregate_present $AGGREGATE_NAME - -set +o xtrace -echo "**************************************************" -echo "End DevStack Exercise: $0" -echo "**************************************************" diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh deleted file mode 100755 index d7566856f5..0000000000 --- a/exercises/boot_from_volume.sh +++ /dev/null @@ -1,223 +0,0 @@ -#!/usr/bin/env bash - -# **boot_from_volume.sh** - -# This script demonstrates how to boot from a volume. It does the following: -# -# * Create a bootable volume -# * Boot a volume-backed instance - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import project functions -source $TOP_DIR/lib/cinder -source $TOP_DIR/lib/neutron - -# Import configuration -source $TOP_DIR/openrc - -# Import exercise configuration -source $TOP_DIR/exerciserc - -# If cinder is not enabled we exit with exitcode 55 so that -# the exercise is skipped -is_service_enabled cinder || exit 55 - -# Ironic does not support boot from volume. -[ "$VIRT_DRIVER" == "ironic" ] && exit 55 - -# Instance type to create -DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} - -# Boot this image, use first AMI image if unset -DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} - -# Security group name -SECGROUP=${SECGROUP:-boot_secgroup} - -# Instance and volume names -VM_NAME=${VM_NAME:-ex-bfv-inst} -VOL_NAME=${VOL_NAME:-ex-vol-bfv} - - -# Launching a server -# ================== - -# List servers for tenant: -nova list - -# Images -# ------ - -# List the images available -glance image-list - -# Grab the id of the image to launch -IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) -die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" - -# Security Groups -# --------------- - -# List security groups -nova secgroup-list - -if is_service_enabled n-cell; then - # Cells does not support security groups, so force the use of "default" - SECGROUP="default" - echo "Using the default security group because of Cells." -else - # Create a secgroup - if ! nova secgroup-list | grep -q $SECGROUP; then - nova secgroup-create $SECGROUP "$SECGROUP description" - if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then - echo "Security group not created" - exit 1 - fi - fi -fi - -# Configure Security Group Rules -if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then - nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 -fi -if ! nova secgroup-list-rules $SECGROUP | grep -q " tcp .* 22 "; then - nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0 -fi - -# List secgroup rules -nova secgroup-list-rules $SECGROUP - -# Set up instance -# --------------- - -# List flavors -nova flavor-list - -# Select a flavor -INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) -if [[ -z "$INSTANCE_TYPE" ]]; then - # grab the first flavor in the list to launch if default doesn't exist - INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) -fi - -# Clean-up from previous runs -nova delete $VM_NAME || true -if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then - echo "server didn't terminate!" - exit 1 -fi - -# Setup Keypair -KEY_NAME=test_key -KEY_FILE=key.pem -nova keypair-delete $KEY_NAME || true -nova keypair-add $KEY_NAME > $KEY_FILE -chmod 600 $KEY_FILE - -# Set up volume -# ------------- - -# Delete any old volume -cinder delete $VOL_NAME || true -if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then - echo "Volume $VOL_NAME not deleted" - exit 1 -fi - -# Create the bootable volume -start_time=$(date +%s) -cinder create --image-id $IMAGE --display-name=$VOL_NAME --display-description "test bootable volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \ - die $LINENO "Failure creating volume $VOL_NAME" -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then - echo "Volume $VOL_NAME not created" - exit 1 -fi -end_time=$(date +%s) -echo "Completed cinder create in $((end_time - start_time)) seconds" - -# Get volume ID -VOL_ID=$(cinder list | grep $VOL_NAME | get_field 1) -die_if_not_set $LINENO VOL_ID "Failure retrieving volume ID for $VOL_NAME" - -# Boot instance -# ------------- - -# Boot using the --block-device-mapping param. The format of mapping is: -# =::: -# Leaving the middle two fields blank appears to do-the-right-thing -VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --block-device-mapping vda=$VOL_ID --security-groups=$SECGROUP --key-name $KEY_NAME $VM_NAME | grep ' id ' | get_field 2) -die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME" - -# Check that the status is active within ACTIVE_TIMEOUT seconds -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then - echo "server didn't become active!" - exit 1 -fi - -# Get the instance IP -IP=$(get_instance_ip $VM_UUID $PRIVATE_NETWORK_NAME) - -die_if_not_set $LINENO IP "Failure retrieving IP address" - -# Private IPs can be pinged in single node deployments -ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT - -# Clean up -# -------- - -# Delete volume backed instance -nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME" -if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then - echo "Server $VM_NAME not deleted" - exit 1 -fi - -# Wait for volume to be released -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then - echo "Volume $VOL_NAME not released" - exit 1 -fi - -# Delete volume -start_time=$(date +%s) -cinder delete $VOL_ID || die $LINENO "Failure deleting volume $VOLUME_NAME" -if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then - echo "Volume $VOL_NAME not deleted" - exit 1 -fi -end_time=$(date +%s) -echo "Completed cinder delete in $((end_time - start_time)) seconds" - -if [[ $SECGROUP = "default" ]] ; then - echo "Skipping deleting default security group" -else - # Delete secgroup - nova secgroup-delete $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP" -fi - -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End DevStack Exercise: $0" -echo "*********************************************************************" diff --git a/exercises/bundle.sh b/exercises/bundle.sh deleted file mode 100755 index 5470960b91..0000000000 --- a/exercises/bundle.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env bash - -# **bundle.sh** - -# we will use the ``euca2ools`` cli tool that wraps the python boto -# library to test ec2 bundle upload compatibility - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import EC2 configuration -source $TOP_DIR/eucarc - -# Import exercise configuration -source $TOP_DIR/exerciserc - -# Remove old certificates -rm -f $TOP_DIR/cacert.pem -rm -f $TOP_DIR/cert.pem -rm -f $TOP_DIR/pk.pem - -# If nova api is not enabled we exit with exitcode 55 so that -# the exercise is skipped -is_service_enabled n-api || exit 55 - -# Get Certificates -nova x509-get-root-cert $TOP_DIR/cacert.pem -nova x509-create-cert $TOP_DIR/pk.pem $TOP_DIR/cert.pem - -# Max time to wait for image to be registered -REGISTER_TIMEOUT=${REGISTER_TIMEOUT:-15} - -BUCKET=testbucket -IMAGE=bundle.img -truncate -s 5M /tmp/$IMAGE -euca-bundle-image -i /tmp/$IMAGE || die $LINENO "Failure bundling image $IMAGE" - -euca-upload-bundle --debug -b $BUCKET -m /tmp/$IMAGE.manifest.xml || die $LINENO "Failure uploading bundle $IMAGE to $BUCKET" - -AMI=`euca-register $BUCKET/$IMAGE.manifest.xml | cut -f2` -die_if_not_set $LINENO AMI "Failure registering $BUCKET/$IMAGE" - -# Wait for the image to become available -if ! timeout $REGISTER_TIMEOUT sh -c "while euca-describe-images | grep $AMI | grep -q available; do sleep 1; done"; then - die $LINENO "Image $AMI not available within $REGISTER_TIMEOUT seconds" -fi - -# Clean up -euca-deregister $AMI || die $LINENO "Failure deregistering $AMI" - -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End DevStack Exercise: $0" -echo "*********************************************************************" diff --git a/exercises/client-args.sh b/exercises/client-args.sh deleted file mode 100755 index b360f1e86a..0000000000 --- a/exercises/client-args.sh +++ /dev/null @@ -1,176 +0,0 @@ -#!/usr/bin/env bash - -# **client-args.sh** - -# Test OpenStack client authentication arguments handling - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import configuration -source $TOP_DIR/openrc - -# Import exercise configuration -source $TOP_DIR/exerciserc - -# Unset all of the known NOVA_* vars -unset NOVA_API_KEY -unset NOVA_ENDPOINT_NAME -unset NOVA_PASSWORD -unset NOVA_PROJECT_ID -unset NOVA_REGION_NAME -unset NOVA_URL -unset NOVA_USERNAME -unset NOVA_VERSION - -# Save the known variables for later -export x_TENANT_NAME=$OS_TENANT_NAME -export x_USERNAME=$OS_USERNAME -export x_PASSWORD=$OS_PASSWORD -export x_AUTH_URL=$OS_AUTH_URL - -# Unset the usual variables to force argument processing -unset OS_TENANT_NAME -unset OS_USERNAME -unset OS_PASSWORD -unset OS_AUTH_URL - -# Common authentication args -TENANT_ARG="--os-tenant-name=$x_TENANT_NAME" -ARGS="--os-username=$x_USERNAME --os-password=$x_PASSWORD --os-auth-url=$x_AUTH_URL" - -# Set global return -RETURN=0 - -# Keystone client -# --------------- -if [[ "$ENABLED_SERVICES" =~ "key" ]]; then - if [[ "$SKIP_EXERCISES" =~ "key" ]]; then - STATUS_KEYSTONE="Skipped" - else - echo -e "\nTest Keystone" - if keystone $TENANT_ARG $ARGS catalog --service identity; then - STATUS_KEYSTONE="Succeeded" - else - STATUS_KEYSTONE="Failed" - RETURN=1 - fi - fi -fi - -# Nova client -# ----------- - -if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then - if [[ "$SKIP_EXERCISES" =~ "n-api" ]]; then - STATUS_NOVA="Skipped" - STATUS_EC2="Skipped" - else - # Test OSAPI - echo -e "\nTest Nova" - if nova $TENANT_ARG $ARGS flavor-list; then - STATUS_NOVA="Succeeded" - else - STATUS_NOVA="Failed" - RETURN=1 - fi - fi -fi - -# Cinder client -# ------------- - -if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then - if [[ "$SKIP_EXERCISES" =~ "c-api" ]]; then - STATUS_CINDER="Skipped" - else - echo -e "\nTest Cinder" - if cinder $TENANT_ARG $ARGS list; then - STATUS_CINDER="Succeeded" - else - STATUS_CINDER="Failed" - RETURN=1 - fi - fi -fi - -# Glance client -# ------------- - -if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then - if [[ "$SKIP_EXERCISES" =~ "g-api" ]]; then - STATUS_GLANCE="Skipped" - else - echo -e "\nTest Glance" - if glance $TENANT_ARG $ARGS image-list; then - STATUS_GLANCE="Succeeded" - else - STATUS_GLANCE="Failed" - RETURN=1 - fi - fi -fi - -# Swift client -# ------------ - -if [[ "$ENABLED_SERVICES" =~ "swift" || "$ENABLED_SERVICES" =~ "s-proxy" ]]; then - if [[ "$SKIP_EXERCISES" =~ "swift" ]]; then - STATUS_SWIFT="Skipped" - else - echo -e "\nTest Swift" - if swift $TENANT_ARG $ARGS stat; then - STATUS_SWIFT="Succeeded" - else - STATUS_SWIFT="Failed" - RETURN=1 - fi - fi -fi - -set +o xtrace - - -# Results -# ======= - -function report { - if [[ -n "$2" ]]; then - echo "$1: $2" - fi -} - -echo -e "\n" -report "Keystone" $STATUS_KEYSTONE -report "Nova" $STATUS_NOVA -report "Cinder" $STATUS_CINDER -report "Glance" $STATUS_GLANCE -report "Swift" $STATUS_SWIFT - -if (( $RETURN == 0 )); then - echo "*********************************************************************" - echo "SUCCESS: End DevStack Exercise: $0" - echo "*********************************************************************" -fi - -exit $RETURN diff --git a/exercises/client-env.sh b/exercises/client-env.sh deleted file mode 100755 index cc518d9a06..0000000000 --- a/exercises/client-env.sh +++ /dev/null @@ -1,188 +0,0 @@ -#!/usr/bin/env bash - -# **client-env.sh** - -# Test OpenStack client environment variable handling - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import configuration -source $TOP_DIR/openrc admin - -# Import exercise configuration -source $TOP_DIR/exerciserc - -# Unset all of the known NOVA_* vars -unset NOVA_API_KEY -unset NOVA_ENDPOINT_NAME -unset NOVA_PASSWORD -unset NOVA_PROJECT_ID -unset NOVA_REGION_NAME -unset NOVA_URL -unset NOVA_USERNAME -unset NOVA_VERSION - -for i in OS_TENANT_NAME OS_USERNAME OS_PASSWORD OS_AUTH_URL; do - is_set $i - if [[ $? -ne 0 ]]; then - echo "$i expected to be set" - ABORT=1 - fi -done -if [[ -n "$ABORT" ]]; then - exit 1 -fi - -# Set global return -RETURN=0 - -# Keystone client -# --------------- -if [[ "$ENABLED_SERVICES" =~ "key" ]]; then - if [[ "$SKIP_EXERCISES" =~ "key" ]]; then - STATUS_KEYSTONE="Skipped" - else - echo -e "\nTest Keystone" - if openstack endpoint show identity; then - STATUS_KEYSTONE="Succeeded" - else - STATUS_KEYSTONE="Failed" - RETURN=1 - fi - fi -fi - -# Nova client -# ----------- - -if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then - if [[ "$SKIP_EXERCISES" =~ "n-api" ]]; then - STATUS_NOVA="Skipped" - STATUS_EC2="Skipped" - else - # Test OSAPI - echo -e "\nTest Nova" - if nova flavor-list; then - STATUS_NOVA="Succeeded" - else - STATUS_NOVA="Failed" - RETURN=1 - fi - - # Test EC2 API - echo -e "\nTest EC2" - # Get EC2 creds - source $TOP_DIR/eucarc - - if euca-describe-images; then - STATUS_EC2="Succeeded" - else - STATUS_EC2="Failed" - RETURN=1 - fi - - # Clean up side effects - unset NOVA_VERSION - fi -fi - -# Cinder client -# ------------- - -if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then - if [[ "$SKIP_EXERCISES" =~ "c-api" ]]; then - STATUS_CINDER="Skipped" - else - echo -e "\nTest Cinder" - if cinder list; then - STATUS_CINDER="Succeeded" - else - STATUS_CINDER="Failed" - RETURN=1 - fi - fi -fi - -# Glance client -# ------------- - -if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then - if [[ "$SKIP_EXERCISES" =~ "g-api" ]]; then - STATUS_GLANCE="Skipped" - else - echo -e "\nTest Glance" - if glance image-list; then - STATUS_GLANCE="Succeeded" - else - STATUS_GLANCE="Failed" - RETURN=1 - fi - fi -fi - -# Swift client -# ------------ - - -if [[ "$ENABLED_SERVICES" =~ "swift" || "$ENABLED_SERVICES" =~ "s-proxy" ]]; then - if [[ "$SKIP_EXERCISES" =~ "swift" ]]; then - STATUS_SWIFT="Skipped" - else - echo -e "\nTest Swift" - if swift stat; then - STATUS_SWIFT="Succeeded" - else - STATUS_SWIFT="Failed" - RETURN=1 - fi - fi -fi - -set +o xtrace - - -# Results -# ======= - -function report { - if [[ -n "$2" ]]; then - echo "$1: $2" - fi -} - -echo -e "\n" -report "Keystone" $STATUS_KEYSTONE -report "Nova" $STATUS_NOVA -report "EC2" $STATUS_EC2 -report "Cinder" $STATUS_CINDER -report "Glance" $STATUS_GLANCE -report "Swift" $STATUS_SWIFT - -if (( $RETURN == 0 )); then - echo "*********************************************************************" - echo "SUCCESS: End DevStack Exercise: $0" - echo "*********************************************************************" -fi - -exit $RETURN diff --git a/exercises/euca.sh b/exercises/euca.sh deleted file mode 100755 index f9c47523e6..0000000000 --- a/exercises/euca.sh +++ /dev/null @@ -1,192 +0,0 @@ -#!/usr/bin/env bash - -# **euca.sh** - -# we will use the ``euca2ools`` cli tool that wraps the python boto -# library to test ec2 compatibility - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) -VOLUME_SIZE=1 -ATTACH_DEVICE=/dev/vdc - -# Import common functions -source $TOP_DIR/functions - -# Import EC2 configuration -source $TOP_DIR/eucarc - -# Import exercise configuration -source $TOP_DIR/exerciserc - -# Import project functions -source $TOP_DIR/lib/neutron - -# If nova api is not enabled we exit with exitcode 55 so that -# the exercise is skipped -is_service_enabled n-api || exit 55 - -# Instance type to create -DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} - -# Boot this image, use first AMI image if unset -DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} - -# Security group name -SECGROUP=${SECGROUP:-euca_secgroup} - - -# Launching a server -# ================== - -# Find a machine image to boot -IMAGE=`euca-describe-images | grep machine | grep ${DEFAULT_IMAGE_NAME} | cut -f2 | head -n1` -die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" - -if is_service_enabled n-cell; then - # Cells does not support security groups, so force the use of "default" - SECGROUP="default" - echo "Using the default security group because of Cells." -else - # Add a secgroup - if ! euca-describe-groups | grep -q $SECGROUP; then - euca-add-group -d "$SECGROUP description" $SECGROUP - if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! euca-describe-groups | grep -q $SECGROUP; do sleep 1; done"; then - die $LINENO "Security group not created" - fi - fi -fi - -# Launch it -INSTANCE=`euca-run-instances -g $SECGROUP -t $DEFAULT_INSTANCE_TYPE $IMAGE | grep INSTANCE | cut -f2` -die_if_not_set $LINENO INSTANCE "Failure launching instance" - -# Assure it has booted within a reasonable time -if ! timeout $RUNNING_TIMEOUT sh -c "while ! euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then - die $LINENO "server didn't become active within $RUNNING_TIMEOUT seconds" -fi - -# Volumes -# ------- -if is_service_enabled c-vol && ! is_service_enabled n-cell && [ "$VIRT_DRIVER" != "ironic" ]; then - VOLUME_ZONE=`euca-describe-availability-zones | head -n1 | cut -f2` - die_if_not_set $LINENO VOLUME_ZONE "Failure to find zone for volume" - - VOLUME=`euca-create-volume -s 1 -z $VOLUME_ZONE | cut -f2` - die_if_not_set $LINENO VOLUME "Failure to create volume" - - # Test that volume has been created - VOLUME=`euca-describe-volumes $VOLUME | cut -f2` - die_if_not_set $LINENO VOLUME "Failure to get volume" - - # Test volume has become available - if ! timeout $RUNNING_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q available; do sleep 1; done"; then - die $LINENO "volume didn't become available within $RUNNING_TIMEOUT seconds" - fi - - # Attach volume to an instance - euca-attach-volume -i $INSTANCE -d $ATTACH_DEVICE $VOLUME || \ - die $LINENO "Failure attaching volume $VOLUME to $INSTANCE" - if ! timeout $ACTIVE_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -A 1 in-use | grep -q attach; do sleep 1; done"; then - die $LINENO "Could not attach $VOLUME to $INSTANCE" - fi - - # Detach volume from an instance - euca-detach-volume $VOLUME || \ - die $LINENO "Failure detaching volume $VOLUME to $INSTANCE" - if ! timeout $ACTIVE_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q available; do sleep 1; done"; then - die $LINENO "Could not detach $VOLUME to $INSTANCE" - fi - - # Remove volume - euca-delete-volume $VOLUME || \ - die $LINENO "Failure to delete volume" - if ! timeout $ACTIVE_TIMEOUT sh -c "while euca-describe-volumes | grep $VOLUME; do sleep 1; done"; then - die $LINENO "Could not delete $VOLUME" - fi -else - echo "Volume Tests Skipped" -fi - -if is_service_enabled n-cell; then - echo "Floating IP Tests Skipped because of Cells." -else - # Allocate floating address - FLOATING_IP=`euca-allocate-address | cut -f2` - die_if_not_set $LINENO FLOATING_IP "Failure allocating floating IP" - # describe all instances at this moment - euca-describe-instances - # Associate floating address - euca-associate-address -i $INSTANCE $FLOATING_IP || \ - die $LINENO "Failure associating address $FLOATING_IP to $INSTANCE" - - # Authorize pinging - euca-authorize -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \ - die $LINENO "Failure authorizing rule in $SECGROUP" - - # Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds - ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT - - # Revoke pinging - euca-revoke -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \ - die $LINENO "Failure revoking rule in $SECGROUP" - - # Release floating address - euca-disassociate-address $FLOATING_IP || \ - die $LINENO "Failure disassociating address $FLOATING_IP" - - # Wait just a tick for everything above to complete so release doesn't fail - if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep $INSTANCE | grep -q $FLOATING_IP; do sleep 1; done"; then - die $LINENO "Floating ip $FLOATING_IP not disassociated within $ASSOCIATE_TIMEOUT seconds" - fi - - # Release floating address - euca-release-address $FLOATING_IP || \ - die $LINENO "Failure releasing address $FLOATING_IP" - - # Wait just a tick for everything above to complete so terminate doesn't fail - if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep -q $FLOATING_IP; do sleep 1; done"; then - die $LINENO "Floating ip $FLOATING_IP not released within $ASSOCIATE_TIMEOUT seconds" - fi -fi - -# Terminate instance -euca-terminate-instances $INSTANCE || \ - die $LINENO "Failure terminating instance $INSTANCE" - -# Assure it has terminated within a reasonable time. The behaviour of this -# case changed with bug/836978. Requesting the status of an invalid instance -# will now return an error message including the instance id, so we need to -# filter that out. -if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -ve '\(InstanceNotFound\|InvalidInstanceID\.NotFound\)' | grep -q $INSTANCE; do sleep 1; done"; then - die $LINENO "server didn't terminate within $TERMINATE_TIMEOUT seconds" -fi - -if [[ "$SECGROUP" = "default" ]] ; then - echo "Skipping deleting default security group" -else - # Delete secgroup - euca-delete-group $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP" -fi - -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End DevStack Exercise: $0" -echo "*********************************************************************" diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh deleted file mode 100755 index 7e90e5adaf..0000000000 --- a/exercises/floating_ips.sh +++ /dev/null @@ -1,215 +0,0 @@ -#!/usr/bin/env bash - -# **floating_ips.sh** - using the cloud can be fun - -# Test instance connectivity with the ``nova`` command from ``python-novaclient`` - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import configuration -source $TOP_DIR/openrc - -# Import project functions -source $TOP_DIR/lib/neutron - -# Import exercise configuration -source $TOP_DIR/exerciserc - -# If nova api is not enabled we exit with exitcode 55 so that -# the exercise is skipped -is_service_enabled n-api || exit 55 - -# Instance type to create -DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} - -# Boot this image, use first AMI image if unset -DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} - -# Security group name -SECGROUP=${SECGROUP:-test_secgroup} - -# Default floating IP pool name -DEFAULT_FLOATING_POOL=${DEFAULT_FLOATING_POOL:-public} - -# Additional floating IP pool and range -TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test} - -# Instance name -VM_NAME="ex-float" - -# Cells does not support floating ips API calls -is_service_enabled n-cell && exit 55 - -# Launching a server -# ================== - -# List servers for tenant: -nova list - -# Images -# ------ - -# List the images available -glance image-list - -# Grab the id of the image to launch -IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) -die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" - -# Security Groups -# --------------- - -# List security groups -nova secgroup-list - -# Create a secgroup -if ! nova secgroup-list | grep -q $SECGROUP; then - nova secgroup-create $SECGROUP "$SECGROUP description" - if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then - die $LINENO "Security group not created" - fi -fi - -# Configure Security Group Rules -if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then - nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 -fi -if ! nova secgroup-list-rules $SECGROUP | grep -q " tcp .* 22 "; then - nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0 -fi - -# List secgroup rules -nova secgroup-list-rules $SECGROUP - -# Set up instance -# --------------- - -# List flavors -nova flavor-list - -# Select a flavor -INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) -if [[ -z "$INSTANCE_TYPE" ]]; then - # grab the first flavor in the list to launch if default doesn't exist - INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) - die_if_not_set $LINENO INSTANCE_TYPE "Failure retrieving INSTANCE_TYPE" -fi - -# Clean-up from previous runs -nova delete $VM_NAME || true -if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then - die $LINENO "server didn't terminate!" - exit 1 -fi - -# Boot instance -# ------------- - -VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security-groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2) -die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME" - -# Check that the status is active within ACTIVE_TIMEOUT seconds -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then - die $LINENO "server didn't become active!" -fi - -# Get the instance IP -IP=$(get_instance_ip $VM_UUID $PRIVATE_NETWORK_NAME) -die_if_not_set $LINENO IP "Failure retrieving IP address" - -# Private IPs can be pinged in single node deployments -ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT - -# Floating IPs -# ------------ - -# Allocate a floating IP from the default pool -FLOATING_IP=$(nova floating-ip-create | grep $DEFAULT_FLOATING_POOL | get_field 1) -die_if_not_set $LINENO FLOATING_IP "Failure creating floating IP from pool $DEFAULT_FLOATING_POOL" - -# List floating addresses -if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep -q $FLOATING_IP; do sleep 1; done"; then - die $LINENO "Floating IP not allocated" -fi - -# Add floating IP to our server -nova add-floating-ip $VM_UUID $FLOATING_IP || \ - die $LINENO "Failure adding floating IP $FLOATING_IP to $VM_NAME" - -# Test we can ping our floating IP within ASSOCIATE_TIMEOUT seconds -ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT - -if ! is_service_enabled neutron; then - # Allocate an IP from second floating pool - TEST_FLOATING_IP=$(nova floating-ip-create $TEST_FLOATING_POOL | grep $TEST_FLOATING_POOL | get_field 1) - die_if_not_set $LINENO TEST_FLOATING_IP "Failure creating floating IP in $TEST_FLOATING_POOL" - - # list floating addresses - if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep $TEST_FLOATING_POOL | grep -q $TEST_FLOATING_IP; do sleep 1; done"; then - die $LINENO "Floating IP not allocated" - fi -fi - -# Dis-allow icmp traffic (ping) -nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || \ - die $LINENO "Failure deleting security group rule from $SECGROUP" - -if ! timeout $ASSOCIATE_TIMEOUT sh -c "while nova secgroup-list-rules $SECGROUP | grep -q icmp; do sleep 1; done"; then - die $LINENO "Security group rule not deleted from $SECGROUP" -fi - -# FIXME (anthony): make xs support security groups -if [ "$VIRT_DRIVER" != "ironic" -a "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then - # Test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds - ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT Fail -fi - -# Clean up -# -------- - -if ! is_service_enabled neutron; then - # Delete second floating IP - nova floating-ip-delete $TEST_FLOATING_IP || \ - die $LINENO "Failure deleting floating IP $TEST_FLOATING_IP" -fi - -# Delete the floating ip -nova floating-ip-delete $FLOATING_IP || \ - die $LINENO "Failure deleting floating IP $FLOATING_IP" - -# Delete instance -nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME" -# Wait for termination -if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then - die $LINENO "Server $VM_NAME not deleted" -fi - -# Delete secgroup -nova secgroup-delete $SECGROUP || \ - die $LINENO "Failure deleting security group $SECGROUP" - -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End DevStack Exercise: $0" -echo "*********************************************************************" diff --git a/exercises/horizon.sh b/exercises/horizon.sh deleted file mode 100755 index d62ad52123..0000000000 --- a/exercises/horizon.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env bash - -# **horizon.sh** - -# Sanity check that horizon started if enabled - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import configuration -source $TOP_DIR/openrc - -# Import exercise configuration -source $TOP_DIR/exerciserc - -is_service_enabled horizon || exit 55 - -# can we get the front page -curl http://$SERVICE_HOST 2>/dev/null | grep -q '

Log In

' || die $LINENO "Horizon front page not functioning!" - -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End DevStack Exercise: $0" -echo "*********************************************************************" - diff --git a/exercises/marconi.sh b/exercises/marconi.sh deleted file mode 100755 index 9d83a99f02..0000000000 --- a/exercises/marconi.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env bash - -# **marconi.sh** - -# Sanity check that Marconi started if enabled - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import configuration -source $TOP_DIR/openrc - -# Import exercise configuration -source $TOP_DIR/exerciserc - -is_service_enabled marconi-server || exit 55 - -curl http://$SERVICE_HOST:8888/v1/ 2>/dev/null | grep -q 'queue_name' || die $LINENO "Marconi API not functioning!" - -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End DevStack Exercise: $0" -echo "*********************************************************************" diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh deleted file mode 100755 index 6679670aee..0000000000 --- a/exercises/neutron-adv-test.sh +++ /dev/null @@ -1,453 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright 2012, Cisco Systems -# Copyright 2012, VMware, Inc. -# Copyright 2012, NTT MCL, Inc. -# -# Please direct any questions to dedutta@cisco.com, dwendlandt@vmware.com, nachi@nttmcl.com -# -# **neutron-adv-test.sh** - -# Perform integration testing of Nova and other components with Neutron. - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. - -set -o errtrace - -trap failed ERR -function failed { - local r=$? - set +o errtrace - set +o xtrace - echo "Failed to execute" - echo "Starting cleanup..." - delete_all - echo "Finished cleanup" - exit $r -} - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - -# Environment -# ----------- - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import configuration -source $TOP_DIR/openrc - -# Import neutron functions -source $TOP_DIR/lib/neutron - -# If neutron is not enabled we exit with exitcode 55, which means exercise is skipped. -neutron_plugin_check_adv_test_requirements || exit 55 - -# Import exercise configuration -source $TOP_DIR/exerciserc - -# Neutron Settings -# ---------------- - -TENANTS="DEMO1" -# TODO (nati)_Test public network -#TENANTS="DEMO1,DEMO2" - -PUBLIC_NAME="admin" -DEMO1_NAME="demo1" -DEMO2_NAME="demo2" - -PUBLIC_NUM_NET=1 -DEMO1_NUM_NET=1 -DEMO2_NUM_NET=2 - -PUBLIC_NET1_CIDR="200.0.0.0/24" -DEMO1_NET1_CIDR="10.10.0.0/24" -DEMO2_NET1_CIDR="10.20.0.0/24" -DEMO2_NET2_CIDR="10.20.1.0/24" - -PUBLIC_NET1_GATEWAY="200.0.0.1" -DEMO1_NET1_GATEWAY="10.10.0.1" -DEMO2_NET1_GATEWAY="10.20.0.1" -DEMO2_NET2_GATEWAY="10.20.1.1" - -PUBLIC_NUM_VM=1 -DEMO1_NUM_VM=1 -DEMO2_NUM_VM=2 - -PUBLIC_VM1_NET='admin-net1' -DEMO1_VM1_NET='demo1-net1' -# Multinic settings. But this is fail without nic setting in OS image -DEMO2_VM1_NET='demo2-net1' -DEMO2_VM2_NET='demo2-net2' - -PUBLIC_NUM_ROUTER=1 -DEMO1_NUM_ROUTER=1 -DEMO2_NUM_ROUTER=1 - -PUBLIC_ROUTER1_NET="admin-net1" -DEMO1_ROUTER1_NET="demo1-net1" -DEMO2_ROUTER1_NET="demo2-net1" - -# Various functions -# ----------------- - -function foreach_tenant { - COMMAND=$1 - for TENANT in ${TENANTS//,/ };do - eval ${COMMAND//%TENANT%/$TENANT} - done -} - -function foreach_tenant_resource { - COMMAND=$1 - RESOURCE=$2 - for TENANT in ${TENANTS//,/ };do - eval 'NUM=$'"${TENANT}_NUM_$RESOURCE" - for i in `seq $NUM`;do - local COMMAND_LOCAL=${COMMAND//%TENANT%/$TENANT} - COMMAND_LOCAL=${COMMAND_LOCAL//%NUM%/$i} - eval $COMMAND_LOCAL - done - done -} - -function foreach_tenant_vm { - COMMAND=$1 - foreach_tenant_resource "$COMMAND" 'VM' -} - -function foreach_tenant_net { - COMMAND=$1 - foreach_tenant_resource "$COMMAND" 'NET' -} - -function get_image_id { - local IMAGE_ID=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) - die_if_not_set $LINENO IMAGE_ID "Failure retrieving IMAGE_ID" - echo "$IMAGE_ID" -} - -function get_tenant_id { - local TENANT_NAME=$1 - local TENANT_ID=`openstack project list | grep " $TENANT_NAME " | head -n 1 | get_field 1` - die_if_not_set $LINENO TENANT_ID "Failure retrieving TENANT_ID for $TENANT_NAME" - echo "$TENANT_ID" -} - -function get_user_id { - local USER_NAME=$1 - local USER_ID=`openstack user list | grep $USER_NAME | awk '{print $2}'` - die_if_not_set $LINENO USER_ID "Failure retrieving USER_ID for $USER_NAME" - echo "$USER_ID" -} - -function get_role_id { - local ROLE_NAME=$1 - local ROLE_ID=`openstack role list | grep $ROLE_NAME | awk '{print $2}'` - die_if_not_set $LINENO ROLE_ID "Failure retrieving ROLE_ID for $ROLE_NAME" - echo "$ROLE_ID" -} - -function get_network_id { - local NETWORK_NAME="$1" - local NETWORK_ID=`neutron net-list -F id -- --name=$NETWORK_NAME | awk "NR==4" | awk '{print $2}'` - echo $NETWORK_ID -} - -function get_flavor_id { - local INSTANCE_TYPE=$1 - local FLAVOR_ID=`nova flavor-list | grep $INSTANCE_TYPE | awk '{print $2}'` - die_if_not_set $LINENO FLAVOR_ID "Failure retrieving FLAVOR_ID for $INSTANCE_TYPE" - echo "$FLAVOR_ID" -} - -function confirm_server_active { - local VM_UUID=$1 - if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then - echo "server '$VM_UUID' did not become active!" - false - fi -} - -function neutron_debug_admin { - local os_username=$OS_USERNAME - local os_tenant_id=$OS_TENANT_ID - source $TOP_DIR/openrc admin admin - neutron-debug $@ - source $TOP_DIR/openrc $os_username $os_tenant_id -} - -function add_tenant { - openstack project create $1 - openstack user create $2 --password ${ADMIN_PASSWORD} --project $1 - openstack role add Member --project $1 --user $2 -} - -function remove_tenant { - local TENANT=$1 - local TENANT_ID=$(get_tenant_id $TENANT) - openstack project delete $TENANT_ID -} - -function remove_user { - local USER=$1 - local USER_ID=$(get_user_id $USER) - openstack user delete $USER_ID -} - -function create_tenants { - source $TOP_DIR/openrc admin admin - add_tenant demo1 demo1 demo1 - add_tenant demo2 demo2 demo2 - source $TOP_DIR/openrc demo demo -} - -function delete_tenants_and_users { - source $TOP_DIR/openrc admin admin - remove_user demo1 - remove_tenant demo1 - remove_user demo2 - remove_tenant demo2 - echo "removed all tenants" - source $TOP_DIR/openrc demo demo -} - -function create_network { - local TENANT=$1 - local GATEWAY=$2 - local CIDR=$3 - local NUM=$4 - local EXTRA=$5 - local NET_NAME="${TENANT}-net$NUM" - local ROUTER_NAME="${TENANT}-router${NUM}" - source $TOP_DIR/openrc admin admin - local TENANT_ID=$(get_tenant_id $TENANT) - source $TOP_DIR/openrc $TENANT $TENANT - local NET_ID=$(neutron net-create --tenant-id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' ) - die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $TENANT_ID $NET_NAME $EXTRA" - neutron subnet-create --ip-version 4 --tenant-id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR - neutron_debug_admin probe-create --device-owner compute $NET_ID - source $TOP_DIR/openrc demo demo -} - -function create_networks { - foreach_tenant_net 'create_network ${%TENANT%_NAME} ${%TENANT%_NET%NUM%_GATEWAY} ${%TENANT%_NET%NUM%_CIDR} %NUM% ${%TENANT%_NET%NUM%_EXTRA}' - #TODO(nati) test security group function - # allow ICMP for both tenant's security groups - #source $TOP_DIR/openrc demo1 demo1 - #$NOVA secgroup-add-rule default icmp -1 -1 0.0.0.0/0 - #source $TOP_DIR/openrc demo2 demo2 - #$NOVA secgroup-add-rule default icmp -1 -1 0.0.0.0/0 -} - -function create_vm { - local TENANT=$1 - local NUM=$2 - local NET_NAMES=$3 - source $TOP_DIR/openrc $TENANT $TENANT - local NIC="" - for NET_NAME in ${NET_NAMES//,/ };do - NIC="$NIC --nic net-id="`get_network_id $NET_NAME` - done - #TODO (nati) Add multi-nic test - #TODO (nati) Add public-net test - local VM_UUID=`nova boot --flavor $(get_flavor_id m1.tiny) \ - --image $(get_image_id) \ - $NIC \ - $TENANT-server$NUM | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'` - die_if_not_set $LINENO VM_UUID "Failure launching $TENANT-server$NUM" - confirm_server_active $VM_UUID -} - -function create_vms { - foreach_tenant_vm 'create_vm ${%TENANT%_NAME} %NUM% ${%TENANT%_VM%NUM%_NET}' -} - -function ping_ip { - # Test agent connection. Assumes namespaces are disabled, and - # that DHCP is in use, but not L3 - local VM_NAME=$1 - local NET_NAME=$2 - IP=$(get_instance_ip $VM_NAME $NET_NAME) - ping_check $NET_NAME $IP $BOOT_TIMEOUT -} - -function check_vm { - local TENANT=$1 - local NUM=$2 - local VM_NAME="$TENANT-server$NUM" - local NET_NAME=$3 - source $TOP_DIR/openrc $TENANT $TENANT - ping_ip $VM_NAME $NET_NAME - # TODO (nati) test ssh connection - # TODO (nati) test inter connection between vm - # TODO (nati) test dhcp host routes - # TODO (nati) test multi-nic -} - -function check_vms { - foreach_tenant_vm 'check_vm ${%TENANT%_NAME} %NUM% ${%TENANT%_VM%NUM%_NET}' -} - -function shutdown_vm { - local TENANT=$1 - local NUM=$2 - source $TOP_DIR/openrc $TENANT $TENANT - VM_NAME=${TENANT}-server$NUM - nova delete $VM_NAME -} - -function shutdown_vms { - foreach_tenant_vm 'shutdown_vm ${%TENANT%_NAME} %NUM%' - if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q ACTIVE; do sleep 1; done"; then - die $LINENO "Some VMs failed to shutdown" - fi -} - -function delete_network { - local TENANT=$1 - local NUM=$2 - local NET_NAME="${TENANT}-net$NUM" - source $TOP_DIR/openrc admin admin - local TENANT_ID=$(get_tenant_id $TENANT) - #TODO(nati) comment out until l3-agent merged - #for res in port subnet net router;do - for net_id in `neutron net-list -c id -c name | grep $NET_NAME | awk '{print $2}'`;do - delete_probe $net_id - neutron subnet-list | grep $net_id | awk '{print $2}' | xargs -I% neutron subnet-delete % - neutron net-delete $net_id - done - source $TOP_DIR/openrc demo demo -} - -function delete_networks { - foreach_tenant_net 'delete_network ${%TENANT%_NAME} %NUM%' - # TODO(nati) add secuirty group check after it is implemented - # source $TOP_DIR/openrc demo1 demo1 - # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0 - # source $TOP_DIR/openrc demo2 demo2 - # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0 -} - -function create_all { - create_tenants - create_networks - create_vms -} - -function delete_all { - shutdown_vms - delete_networks - delete_tenants_and_users -} - -function all { - create_all - check_vms - delete_all -} - -# Test functions -# -------------- - -function test_functions { - IMAGE=$(get_image_id) - echo $IMAGE - - TENANT_ID=$(get_tenant_id demo) - echo $TENANT_ID - - FLAVOR_ID=$(get_flavor_id m1.tiny) - echo $FLAVOR_ID - - NETWORK_ID=$(get_network_id admin) - echo $NETWORK_ID -} - -# Usage and main -# -------------- - -function usage { - echo "$0: [-h]" - echo " -h, --help Display help message" - echo " -t, --tenant Create tenants" - echo " -n, --net Create networks" - echo " -v, --vm Create vms" - echo " -c, --check Check connection" - echo " -x, --delete-tenants Delete tenants" - echo " -y, --delete-nets Delete networks" - echo " -z, --delete-vms Delete vms" - echo " -T, --test Test functions" -} - -function main { - - echo Description - - if [ $# -eq 0 ] ; then - # if no args are provided, run all tests - all - else - - while [ "$1" != "" ]; do - case $1 in - -h | --help ) usage - exit - ;; - -n | --net ) create_networks - exit - ;; - -v | --vm ) create_vms - exit - ;; - -t | --tenant ) create_tenants - exit - ;; - -c | --check ) check_vms - exit - ;; - -T | --test ) test_functions - exit - ;; - -x | --delete-tenants ) delete_tenants_and_users - exit - ;; - -y | --delete-nets ) delete_networks - exit - ;; - -z | --delete-vms ) shutdown_vms - exit - ;; - -a | --all ) all - exit - ;; - * ) usage - exit 1 - esac - shift - done - fi -} - -# Kick off script -# --------------- - -echo $* -main $* - -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End DevStack Exercise: $0" -echo "*********************************************************************" diff --git a/exercises/sahara.sh b/exercises/sahara.sh deleted file mode 100755 index 867920ed31..0000000000 --- a/exercises/sahara.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env bash - -# **sahara.sh** - -# Sanity check that Sahara started if enabled - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import configuration -source $TOP_DIR/openrc - -# Import exercise configuration -source $TOP_DIR/exerciserc - -is_service_enabled sahara || exit 55 - -curl http://$SERVICE_HOST:8386/ 2>/dev/null | grep -q 'Auth' || die $LINENO "Sahara API isn't functioning!" - -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End DevStack Exercise: $0" -echo "*********************************************************************" diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh deleted file mode 100755 index 5f8b0a4d5d..0000000000 --- a/exercises/sec_groups.sh +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/env bash - -# **sec_groups.sh** - -# Test security groups via the command line - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import configuration -source $TOP_DIR/openrc - -# Import exercise configuration -source $TOP_DIR/exerciserc - -# If nova api is not enabled we exit with exitcode 55 so that -# the exercise is skipped -is_service_enabled n-api || exit 55 - - -# Testing Security Groups -# ======================= - -# List security groups -nova secgroup-list - -# Create random name for new sec group and create secgroup of said name -SEC_GROUP_NAME="ex-secgroup-$(openssl rand -hex 4)" -nova secgroup-create $SEC_GROUP_NAME 'a test security group' - -# Add some rules to the secgroup -RULES_TO_ADD=( 22 3389 5900 ) - -for RULE in "${RULES_TO_ADD[@]}"; do - nova secgroup-add-rule $SEC_GROUP_NAME tcp $RULE $RULE 0.0.0.0/0 -done - -# Check to make sure rules were added -SEC_GROUP_RULES=( $(nova secgroup-list-rules $SEC_GROUP_NAME | grep -v \- | grep -v 'Source Group' | cut -d '|' -f3 | tr -d ' ') ) -die_if_not_set $LINENO SEC_GROUP_RULES "Failure retrieving SEC_GROUP_RULES for $SEC_GROUP_NAME" -for i in "${RULES_TO_ADD[@]}"; do - skip= - for j in "${SEC_GROUP_RULES[@]}"; do - [[ $i == $j ]] && { skip=1; break; } - done - [[ -n $skip ]] || exit 1 -done - -# Delete rules and secgroup -for RULE in "${RULES_TO_ADD[@]}"; do - nova secgroup-delete-rule $SEC_GROUP_NAME tcp $RULE $RULE 0.0.0.0/0 -done - -# Delete secgroup -nova secgroup-delete $SEC_GROUP_NAME || \ - die $LINENO "Failure deleting security group $SEC_GROUP_NAME" - -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End DevStack Exercise: $0" -echo "*********************************************************************" diff --git a/exercises/swift.sh b/exercises/swift.sh deleted file mode 100755 index 25ea6719c1..0000000000 --- a/exercises/swift.sh +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/env bash - -# **swift.sh** - -# Test swift via the ``swift`` command line from ``python-swiftclient`` - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import configuration -source $TOP_DIR/openrc - -# Import exercise configuration -source $TOP_DIR/exerciserc - -# If swift is not enabled we exit with exitcode 55 which mean -# exercise is skipped. -is_service_enabled s-proxy || exit 55 - -# Container name -CONTAINER=ex-swift - - -# Testing Swift -# ============= - -# Check if we have to swift via keystone -swift stat || die $LINENO "Failure geting status" - -# We start by creating a test container -swift post $CONTAINER || die $LINENO "Failure creating container $CONTAINER" - -# add some files into it. -swift upload $CONTAINER /etc/issue || die $LINENO "Failure uploading file to container $CONTAINER" - -# list them -swift list $CONTAINER || die $LINENO "Failure listing contents of container $CONTAINER" - -# And we may want to delete them now that we have tested that -# everything works. -swift delete $CONTAINER || die $LINENO "Failure deleting container $CONTAINER" - -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End DevStack Exercise: $0" -echo "*********************************************************************" diff --git a/exercises/trove.sh b/exercises/trove.sh deleted file mode 100755 index d48d5fec99..0000000000 --- a/exercises/trove.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env bash - -# **trove.sh** - -# Sanity check that trove started if enabled - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import configuration -source $TOP_DIR/openrc - -# Import exercise configuration -source $TOP_DIR/exerciserc - -is_service_enabled trove || exit 55 - -# can we get a list versions -curl http://$SERVICE_HOST:8779/ 2>/dev/null | grep -q 'versions' || die $LINENO "Trove API not functioning!" - -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End DevStack Exercise: $0" -echo "*********************************************************************" - diff --git a/exercises/volumes.sh b/exercises/volumes.sh deleted file mode 100755 index 1dff6a41ab..0000000000 --- a/exercises/volumes.sh +++ /dev/null @@ -1,224 +0,0 @@ -#!/usr/bin/env bash - -# **volumes.sh** - -# Test cinder volumes with the ``cinder`` command from ``python-cinderclient`` - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import configuration -source $TOP_DIR/openrc - -# Import project functions -source $TOP_DIR/lib/cinder -source $TOP_DIR/lib/neutron - -# Import exercise configuration -source $TOP_DIR/exerciserc - -# If cinder is not enabled we exit with exitcode 55 which mean -# exercise is skipped. -is_service_enabled cinder || exit 55 - -# Ironic does not currently support volume attachment. -[ "$VIRT_DRIVER" == "ironic" ] && exit 55 - -# Instance type to create -DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} - -# Boot this image, use first AMI image if unset -DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} - -# Security group name -SECGROUP=${SECGROUP:-vol_secgroup} - -# Instance and volume names -VM_NAME=${VM_NAME:-ex-vol-inst} -VOL_NAME="ex-vol-$(openssl rand -hex 4)" - - -# Launching a server -# ================== - -# List servers for tenant: -nova list - -# Images -# ------ - -# List the images available -glance image-list - -# Grab the id of the image to launch -IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) -die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" - -# Security Groups -# --------------- - -# List security groups -nova secgroup-list - -if is_service_enabled n-cell; then - # Cells does not support security groups, so force the use of "default" - SECGROUP="default" - echo "Using the default security group because of Cells." -else - # Create a secgroup - if ! nova secgroup-list | grep -q $SECGROUP; then - nova secgroup-create $SECGROUP "$SECGROUP description" - if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then - echo "Security group not created" - exit 1 - fi - fi -fi - -# Configure Security Group Rules -if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then - nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 -fi -if ! nova secgroup-list-rules $SECGROUP | grep -q " tcp .* 22 "; then - nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0 -fi - -# List secgroup rules -nova secgroup-list-rules $SECGROUP - -# Set up instance -# --------------- - -# List flavors -nova flavor-list - -# Select a flavor -INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) -if [[ -z "$INSTANCE_TYPE" ]]; then - # grab the first flavor in the list to launch if default doesn't exist - INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) - die_if_not_set $LINENO INSTANCE_TYPE "Failure retrieving INSTANCE_TYPE" -fi - -# Clean-up from previous runs -nova delete $VM_NAME || true -if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then - die $LINENO "server didn't terminate!" -fi - -# Boot instance -# ------------- - -VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security-groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2) -die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME" - -# Check that the status is active within ACTIVE_TIMEOUT seconds -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then - die $LINENO "server didn't become active!" -fi - -# Get the instance IP -IP=$(get_instance_ip $VM_UUID $PRIVATE_NETWORK_NAME) - -die_if_not_set $LINENO IP "Failure retrieving IP address" - -# Private IPs can be pinged in single node deployments -ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT - -# Volumes -# ------- - -# Verify it doesn't exist -if [[ -n $(cinder list | grep $VOL_NAME | head -1 | get_field 2) ]]; then - die $LINENO "Volume $VOL_NAME already exists" -fi - -# Create a new volume -start_time=$(date +%s) -cinder create --display-name $VOL_NAME --display-description "test volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \ - die $LINENO "Failure creating volume $VOL_NAME" -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then - die $LINENO "Volume $VOL_NAME not created" -fi -end_time=$(date +%s) -echo "Completed cinder create in $((end_time - start_time)) seconds" - -# Get volume ID -VOL_ID=$(cinder list | grep $VOL_NAME | head -1 | get_field 1) -die_if_not_set $LINENO VOL_ID "Failure retrieving volume ID for $VOL_NAME" - -# Attach to server -DEVICE=/dev/vdb -start_time=$(date +%s) -nova volume-attach $VM_UUID $VOL_ID $DEVICE || \ - die $LINENO "Failure attaching volume $VOL_NAME to $VM_NAME" -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep in-use; do sleep 1; done"; then - die $LINENO "Volume $VOL_NAME not attached to $VM_NAME" -fi -end_time=$(date +%s) -echo "Completed volume-attach in $((end_time - start_time)) seconds" - -VOL_ATTACH=$(cinder list | grep $VOL_NAME | head -1 | get_field -1) -die_if_not_set $LINENO VOL_ATTACH "Failure retrieving $VOL_NAME status" -if [[ "$VOL_ATTACH" != $VM_UUID ]]; then - die $LINENO "Volume not attached to correct instance" -fi - -# Clean up -# -------- - -# Detach volume -start_time=$(date +%s) -nova volume-detach $VM_UUID $VOL_ID || die $LINENO "Failure detaching volume $VOL_NAME from $VM_NAME" -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then - die $LINENO "Volume $VOL_NAME not detached from $VM_NAME" -fi -end_time=$(date +%s) -echo "Completed volume-detach in $((end_time - start_time)) seconds" - -# Delete volume -start_time=$(date +%s) -cinder delete $VOL_ID || die $LINENO "Failure deleting volume $VOL_NAME" -if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then - die $LINENO "Volume $VOL_NAME not deleted" -fi -end_time=$(date +%s) -echo "Completed cinder delete in $((end_time - start_time)) seconds" - -# Delete instance -nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME" -if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then - die $LINENO "Server $VM_NAME not deleted" -fi - -if [[ $SECGROUP = "default" ]] ; then - echo "Skipping deleting default security group" -else - # Delete secgroup - nova secgroup-delete $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP" -fi - -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End DevStack Exercise: $0" -echo "*********************************************************************" diff --git a/extras.d/50-ironic.sh b/extras.d/50-ironic.sh deleted file mode 100644 index 3b8e3d5045..0000000000 --- a/extras.d/50-ironic.sh +++ /dev/null @@ -1,43 +0,0 @@ -# ironic.sh - Devstack extras script to install ironic - -if is_service_enabled ir-api ir-cond; then - if [[ "$1" == "source" ]]; then - # Initial source - source $TOP_DIR/lib/ironic - elif [[ "$1" == "stack" && "$2" == "install" ]]; then - echo_summary "Installing Ironic" - install_ironic - install_ironicclient - cleanup_ironic - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - echo_summary "Configuring Ironic" - configure_ironic - - if is_service_enabled key; then - create_ironic_accounts - fi - - elif [[ "$1" == "stack" && "$2" == "extra" ]]; then - # Initialize ironic - init_ironic - - # Start the ironic API and ironic taskmgr components - echo_summary "Starting Ironic" - start_ironic - - if [[ "$IRONIC_BAREMETAL_BASIC_OPS" = "True" ]]; then - prepare_baremetal_basic_ops - fi - fi - - if [[ "$1" == "unstack" ]]; then - stop_ironic - if [[ "$IRONIC_BAREMETAL_BASIC_OPS" = "True" ]]; then - cleanup_baremetal_basic_ops - fi - fi - - if [[ "$1" == "clean" ]]; then - cleanup_ironic - fi -fi diff --git a/extras.d/70-gantt.sh b/extras.d/70-gantt.sh deleted file mode 100644 index ac1efba748..0000000000 --- a/extras.d/70-gantt.sh +++ /dev/null @@ -1,31 +0,0 @@ -# gantt.sh - Devstack extras script to install Gantt - -if is_service_enabled n-sch; then - disable_service gantt -fi - -if is_service_enabled gantt; then - if [[ "$1" == "source" ]]; then - # Initial source - source $TOP_DIR/lib/gantt - elif [[ "$1" == "stack" && "$2" == "install" ]]; then - echo_summary "Installing Gantt" - install_gantt - cleanup_gantt - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - echo_summary "Configuring Gantt" - configure_gantt - - elif [[ "$1" == "stack" && "$2" == "extra" ]]; then - # Initialize gantt - init_gantt - - # Start gantt - echo_summary "Starting Gantt" - start_gantt - fi - - if [[ "$1" == "unstack" ]]; then - stop_gantt - fi -fi diff --git a/extras.d/70-marconi.sh b/extras.d/70-marconi.sh deleted file mode 100644 index a96a4c546c..0000000000 --- a/extras.d/70-marconi.sh +++ /dev/null @@ -1,29 +0,0 @@ -# marconi.sh - Devstack extras script to install Marconi - -if is_service_enabled marconi-server; then - if [[ "$1" == "source" ]]; then - # Initial source - source $TOP_DIR/lib/marconi - elif [[ "$1" == "stack" && "$2" == "install" ]]; then - echo_summary "Installing Marconi" - install_marconiclient - install_marconi - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - echo_summary "Configuring Marconi" - configure_marconi - configure_marconiclient - - if is_service_enabled key; then - create_marconi_accounts - fi - - elif [[ "$1" == "stack" && "$2" == "extra" ]]; then - echo_summary "Initializing Marconi" - init_marconi - start_marconi - fi - - if [[ "$1" == "unstack" ]]; then - stop_marconi - fi -fi diff --git a/extras.d/70-sahara.sh b/extras.d/70-sahara.sh deleted file mode 100644 index 80e07ff7b9..0000000000 --- a/extras.d/70-sahara.sh +++ /dev/null @@ -1,37 +0,0 @@ -# sahara.sh - DevStack extras script to install Sahara - -if is_service_enabled sahara; then - if [[ "$1" == "source" ]]; then - # Initial source - source $TOP_DIR/lib/sahara - source $TOP_DIR/lib/sahara-dashboard - elif [[ "$1" == "stack" && "$2" == "install" ]]; then - echo_summary "Installing sahara" - install_sahara - cleanup_sahara - if is_service_enabled horizon; then - install_sahara_dashboard - fi - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - echo_summary "Configuring sahara" - configure_sahara - create_sahara_accounts - if is_service_enabled horizon; then - configure_sahara_dashboard - fi - elif [[ "$1" == "stack" && "$2" == "extra" ]]; then - echo_summary "Initializing sahara" - start_sahara - fi - - if [[ "$1" == "unstack" ]]; then - stop_sahara - if is_service_enabled horizon; then - cleanup_sahara_dashboard - fi - fi - - if [[ "$1" == "clean" ]]; then - cleanup_sahara - fi -fi diff --git a/extras.d/70-trove.sh b/extras.d/70-trove.sh deleted file mode 100644 index a4dc7fbc5b..0000000000 --- a/extras.d/70-trove.sh +++ /dev/null @@ -1,33 +0,0 @@ -# trove.sh - Devstack extras script to install Trove - -if is_service_enabled trove; then - if [[ "$1" == "source" ]]; then - # Initial source - source $TOP_DIR/lib/trove - elif [[ "$1" == "stack" && "$2" == "install" ]]; then - echo_summary "Installing Trove" - install_trove - install_troveclient - cleanup_trove - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - echo_summary "Configuring Trove" - configure_troveclient - configure_trove - - if is_service_enabled key; then - create_trove_accounts - fi - - elif [[ "$1" == "stack" && "$2" == "extra" ]]; then - # Initialize trove - init_trove - - # Start the trove API and trove taskmgr components - echo_summary "Starting Trove" - start_trove - fi - - if [[ "$1" == "unstack" ]]; then - stop_trove - fi -fi diff --git a/extras.d/80-opendaylight.sh b/extras.d/80-opendaylight.sh deleted file mode 100644 index bf99866f92..0000000000 --- a/extras.d/80-opendaylight.sh +++ /dev/null @@ -1,74 +0,0 @@ -# opendaylight.sh - DevStack extras script - -if is_service_enabled odl-server odl-compute; then - # Initial source - [[ "$1" == "source" ]] && source $TOP_DIR/lib/opendaylight -fi - -if is_service_enabled odl-server; then - if [[ "$1" == "source" ]]; then - # no-op - : - elif [[ "$1" == "stack" && "$2" == "install" ]]; then - install_opendaylight - configure_opendaylight - init_opendaylight - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - configure_ml2_odl - # This has to start before Neutron - start_opendaylight - elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then - # no-op - : - fi - - if [[ "$1" == "unstack" ]]; then - stop_opendaylight - cleanup_opendaylight - fi - - if [[ "$1" == "clean" ]]; then - # no-op - : - fi -fi - -if is_service_enabled odl-compute; then - if [[ "$1" == "source" ]]; then - # no-op - : - elif [[ "$1" == "stack" && "$2" == "install" ]]; then - install_opendaylight-compute - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - create_nova_conf_neutron - elif [[ "$1" == "stack" && "$2" == "extra" ]]; then - echo_summary "Initializing OpenDaylight" - ODL_LOCAL_IP=${ODL_LOCAL_IP:-$HOST_IP} - ODL_MGR_PORT=${ODL_MGR_PORT:-6640} - read ovstbl <<< $(sudo ovs-vsctl get Open_vSwitch . _uuid) - sudo ovs-vsctl set-manager tcp:$ODL_MGR_IP:$ODL_MGR_PORT - if [[ -n "$OVS_BRIDGE_MAPPINGS" ]] && [[ "$ENABLE_TENANT_VLANS" == "True" ]]; then - sudo ovs-vsctl set Open_vSwitch $ovstbl \ - other_config:bridge_mappings=$OVS_BRIDGE_MAPPINGS - fi - sudo ovs-vsctl set Open_vSwitch $ovstbl other_config:local_ip=$ODL_LOCAL_IP - elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then - # no-op - : - fi - - if [[ "$1" == "unstack" ]]; then - sudo ovs-vsctl del-manager - BRIDGES=$(sudo ovs-vsctl list-br) - for bridge in $BRIDGES ; do - sudo ovs-vsctl del-controller $bridge - done - - stop_opendaylight-compute - fi - - if [[ "$1" == "clean" ]]; then - # no-op - : - fi -fi diff --git a/extras.d/80-tempest.sh b/extras.d/80-tempest.sh index 74f4c60d10..06c73ec763 100644 --- a/extras.d/80-tempest.sh +++ b/extras.d/80-tempest.sh @@ -6,17 +6,22 @@ if is_service_enabled tempest; then source $TOP_DIR/lib/tempest elif [[ "$1" == "stack" && "$2" == "install" ]]; then echo_summary "Installing Tempest" - install_tempest + async_runfunc install_tempest elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then # Tempest config must come after layer 2 services are running - create_tempest_accounts + : elif [[ "$1" == "stack" && "$2" == "extra" ]]; then - echo_summary "Initializing Tempest" - configure_tempest - init_tempest + # Tempest config must come after all other plugins are run + : elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then # local.conf Tempest option overrides : + elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then + async_wait install_tempest + echo_summary "Initializing Tempest" + configure_tempest + echo_summary "Installing Tempest Plugins" + install_tempest_plugins fi if [[ "$1" == "unstack" ]]; then @@ -29,4 +34,3 @@ if is_service_enabled tempest; then : fi fi - diff --git a/extras.d/README.md b/extras.d/README.md index 1dd17da2d6..4cec14b4e7 100644 --- a/extras.d/README.md +++ b/extras.d/README.md @@ -14,17 +14,35 @@ The scripts are sourced at the beginning of each script that calls them. The entire `stack.sh` variable space is available. The scripts are sourced with one or more arguments, the first of which defines the hook phase: - source | stack | unstack | clean + override_defaults | source | stack | unstack | clean - source: always called first in any of the scripts, used to set the - initial defaults in a lib/* script or similar + override_defaults: always called first in any of the scripts, used to + override defaults (if need be) that are otherwise set in lib/* scripts + + source: called by stack.sh. Used to set the initial defaults in a lib/* + script or similar stack: called by stack.sh. There are four possible values for the second arg to distinguish the phase stack.sh is in: - arg 2: install | post-config | extra | post-extra + arg 2: pre-install | install | post-config | extra unstack: called by unstack.sh clean: called by clean.sh. Remember, clean.sh also calls unstack.sh so that work need not be repeated. + +The `stack` phase sub-phases are called from `stack.sh` in the following places: + + pre-install - After all system prerequisites have been installed but before any + DevStack-specific services are installed (including database and rpc). + + install - After all OpenStack services have been installed and configured + but before any OpenStack services have been started. Changes to OpenStack + service configurations should be done here. + + post-config - After OpenStack services have been initialized but still before + they have been started. (This is probably mis-named, think of it as post-init.) + + extra - After everything is started. + diff --git a/files/apache-cinder-api.template b/files/apache-cinder-api.template new file mode 100644 index 0000000000..e401803abc --- /dev/null +++ b/files/apache-cinder-api.template @@ -0,0 +1,18 @@ +Listen %PUBLICPORT% + + + WSGIDaemonProcess osapi_volume processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% + WSGIProcessGroup osapi_volume + WSGIScriptAlias / %CINDER_BIN_DIR%/cinder-wsgi + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + ErrorLogFormat "%{cu}t %M" + ErrorLog /var/log/%APACHE_NAME%/c-api.log + %SSLENGINE% + %SSLCERTFILE% + %SSLKEYFILE% + + + Require all granted + + diff --git a/files/apache-horizon.template b/files/apache-horizon.template index af880c4f51..c6c55ecf27 100644 --- a/files/apache-horizon.template +++ b/files/apache-horizon.template @@ -1,6 +1,6 @@ - WSGIScriptAlias / %HORIZON_DIR%/openstack_dashboard/wsgi/django.wsgi - WSGIDaemonProcess horizon user=%USER% group=%GROUP% processes=3 threads=10 home=%HORIZON_DIR% + WSGIScriptAlias %WEBROOT% %HORIZON_DIR%/openstack_dashboard/wsgi.py + WSGIDaemonProcess horizon user=%USER% group=%GROUP% processes=3 threads=10 home=%HORIZON_DIR% display-name=%{GROUP} WSGIApplicationGroup %{GLOBAL} SetEnv APACHE_RUN_USER %USER% @@ -8,7 +8,10 @@ WSGIProcessGroup horizon DocumentRoot %HORIZON_DIR%/.blackhole/ - Alias /media %HORIZON_DIR%/openstack_dashboard/static + Alias %WEBROOT%/media %HORIZON_DIR%/openstack_dashboard/static + Alias %WEBROOT%/static %HORIZON_DIR%/static + + RedirectMatch "^/$" "%WEBROOT%/" Options FollowSymLinks @@ -17,15 +20,14 @@ Options Indexes FollowSymLinks MultiViews - %HORIZON_REQUIRE% AllowOverride None - Order allow,deny - allow from all + Require all granted - + ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/%APACHE_NAME%/horizon_error.log LogLevel warn CustomLog /var/log/%APACHE_NAME%/horizon_access.log combined +%WSGIPYTHONHOME% WSGISocketPrefix /var/run/%APACHE_NAME% diff --git a/files/apache-keystone.template b/files/apache-keystone.template index 919452a040..d99e8e6ce0 100644 --- a/files/apache-keystone.template +++ b/files/apache-keystone.template @@ -1,22 +1,37 @@ Listen %PUBLICPORT% -Listen %ADMINPORT% +LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" keystone_combined + + + Require all granted + - WSGIDaemonProcess keystone-public processes=5 threads=1 user=%USER% + WSGIDaemonProcess keystone-public processes=3 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% WSGIProcessGroup keystone-public - WSGIScriptAlias / %PUBLICWSGI% + WSGIScriptAlias / %KEYSTONE_BIN%/keystone-wsgi-public WSGIApplicationGroup %{GLOBAL} - ErrorLog /var/log/%APACHE_NAME%/keystone - LogLevel debug - CustomLog /var/log/%APACHE_NAME%/access.log combined + WSGIPassAuthorization On + ErrorLogFormat "%M" + ErrorLog /var/log/%APACHE_NAME%/keystone.log + CustomLog /var/log/%APACHE_NAME%/keystone_access.log keystone_combined + %SSLENGINE% + %SSLCERTFILE% + %SSLKEYFILE% - - WSGIDaemonProcess keystone-admin processes=5 threads=1 user=%USER% - WSGIProcessGroup keystone-admin - WSGIScriptAlias / %ADMINWSGI% +%SSLLISTEN% +%SSLLISTEN% %SSLENGINE% +%SSLLISTEN% %SSLCERTFILE% +%SSLLISTEN% %SSLKEYFILE% +%SSLLISTEN% SSLProtocol -all +TLSv1.3 +TLSv1.2 +%SSLLISTEN% + +Alias /identity %KEYSTONE_BIN%/keystone-wsgi-public + + SetHandler wsgi-script + Options +ExecCGI + + WSGIProcessGroup keystone-public WSGIApplicationGroup %{GLOBAL} - ErrorLog /var/log/%APACHE_NAME%/keystone - LogLevel debug - CustomLog /var/log/%APACHE_NAME%/access.log combined - + WSGIPassAuthorization On + diff --git a/files/apache-neutron.template b/files/apache-neutron.template new file mode 100644 index 0000000000..358e87f5da --- /dev/null +++ b/files/apache-neutron.template @@ -0,0 +1,37 @@ +Listen %PUBLICPORT% +LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" neutron_combined + + + Require all granted + + + + WSGIDaemonProcess neutron-server processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% + WSGIProcessGroup neutron-server + WSGIScriptAlias / %NEUTRON_BIN%/neutron-api + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + ErrorLogFormat "%M" + ErrorLog /var/log/%APACHE_NAME%/neutron.log + CustomLog /var/log/%APACHE_NAME%/neutron_access.log neutron_combined + %SSLENGINE% + %SSLCERTFILE% + %SSLKEYFILE% + + + +%SSLLISTEN% +%SSLLISTEN% %SSLENGINE% +%SSLLISTEN% %SSLCERTFILE% +%SSLLISTEN% %SSLKEYFILE% +%SSLLISTEN% SSLProtocol -all +TLSv1.3 +TLSv1.2 +%SSLLISTEN% + +Alias /networking %NEUTRON_BIN%/neutron-api + + SetHandler wsgi-script + Options +ExecCGI + WSGIProcessGroup neutron-server + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + diff --git a/files/apache-nova-api.template b/files/apache-nova-api.template new file mode 100644 index 0000000000..66fcf73cf2 --- /dev/null +++ b/files/apache-nova-api.template @@ -0,0 +1,23 @@ +Listen %PUBLICPORT% + + + WSGIDaemonProcess nova-api processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% + WSGIProcessGroup nova-api + WSGIScriptAlias / %PUBLICWSGI% + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + ErrorLogFormat "%M" + ErrorLog /var/log/%APACHE_NAME%/nova-api.log + %SSLENGINE% + %SSLCERTFILE% + %SSLKEYFILE% + + +Alias /compute %PUBLICWSGI% + + SetHandler wsgi-script + Options +ExecCGI + WSGIProcessGroup nova-api + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + diff --git a/files/apache-nova-metadata.template b/files/apache-nova-metadata.template new file mode 100644 index 0000000000..64be03166e --- /dev/null +++ b/files/apache-nova-metadata.template @@ -0,0 +1,23 @@ +Listen %PUBLICPORT% + + + WSGIDaemonProcess nova-metadata processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% + WSGIProcessGroup nova-metadata + WSGIScriptAlias / %PUBLICWSGI% + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + ErrorLogFormat "%M" + ErrorLog /var/log/%APACHE_NAME%/nova-metadata.log + %SSLENGINE% + %SSLCERTFILE% + %SSLKEYFILE% + + +Alias /metadata %PUBLICWSGI% + + SetHandler wsgi-script + Options +ExecCGI + WSGIProcessGroup nova-metadata + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + diff --git a/files/apts/ceilometer-collector b/files/apts/ceilometer-collector deleted file mode 100644 index f1b692ac71..0000000000 --- a/files/apts/ceilometer-collector +++ /dev/null @@ -1,6 +0,0 @@ -python-pymongo #NOPRIME -mongodb-server #NOPRIME -libnspr4-dev -pkg-config -libxml2-dev -libxslt-dev \ No newline at end of file diff --git a/files/apts/dstat b/files/apts/dstat deleted file mode 100644 index 2b643b8b1b..0000000000 --- a/files/apts/dstat +++ /dev/null @@ -1 +0,0 @@ -dstat diff --git a/files/apts/general b/files/apts/general deleted file mode 100644 index d81ec7a553..0000000000 --- a/files/apts/general +++ /dev/null @@ -1,24 +0,0 @@ -bridge-utils -pylint -python-setuptools -screen -unzip -wget -psmisc -gcc -git -lsof # useful when debugging -openssh-server -openssl -python-virtualenv -python-unittest2 -iputils-ping -wget -curl -tcpdump -euca2ools # only for testing client -tar -python-cmd2 # dist:precise -python-dev -python2.7 -bc diff --git a/files/apts/glance b/files/apts/glance deleted file mode 100644 index b5d8c77094..0000000000 --- a/files/apts/glance +++ /dev/null @@ -1,15 +0,0 @@ -libffi-dev -libmysqlclient-dev # testonly -libpq-dev # testonly -libssl-dev # testonly -libxml2-dev -libxslt1-dev # testonly -python-eventlet -python-routes -python-greenlet -python-sqlalchemy -python-wsgiref -python-pastedeploy -python-xattr -python-iso8601 -zlib1g-dev # testonly diff --git a/files/apts/horizon b/files/apts/horizon deleted file mode 100644 index 8969046355..0000000000 --- a/files/apts/horizon +++ /dev/null @@ -1,21 +0,0 @@ -apache2 # NOPRIME -libapache2-mod-wsgi # NOPRIME -python-beautifulsoup -python-dateutil -python-paste -python-pastedeploy -python-anyjson -python-routes -python-xattr -python-sqlalchemy -python-webob -python-kombu -pylint -python-eventlet -python-nose -python-sphinx -python-mox -python-kombu -python-coverage -python-cherrypy3 # why? -python-migrate diff --git a/files/apts/ironic b/files/apts/ironic deleted file mode 100644 index b77a6b1a9b..0000000000 --- a/files/apts/ironic +++ /dev/null @@ -1,11 +0,0 @@ -iptables -libguestfs0 -libvirt-bin -openssh-client -openvswitch-switch -openvswitch-datapath-dkms -python-libguestfs -python-libvirt -syslinux -tftpd-hpa -xinetd diff --git a/files/apts/keystone b/files/apts/keystone deleted file mode 100644 index 57fde80aea..0000000000 --- a/files/apts/keystone +++ /dev/null @@ -1,13 +0,0 @@ -python-lxml -python-pastescript -python-pastedeploy -python-paste -sqlite3 -python-pysqlite2 -python-sqlalchemy -python-mysqldb -python-webob -python-greenlet -python-routes -libldap2-dev -libsasl2-dev diff --git a/files/apts/marconi-server b/files/apts/marconi-server deleted file mode 100644 index bc7ef22445..0000000000 --- a/files/apts/marconi-server +++ /dev/null @@ -1,3 +0,0 @@ -python-pymongo -mongodb-server -pkg-config diff --git a/files/apts/n-api b/files/apts/n-api deleted file mode 100644 index b4372d9361..0000000000 --- a/files/apts/n-api +++ /dev/null @@ -1,3 +0,0 @@ -python-dateutil -msgpack-python -fping diff --git a/files/apts/n-cpu b/files/apts/n-cpu deleted file mode 100644 index a82304dfe2..0000000000 --- a/files/apts/n-cpu +++ /dev/null @@ -1,8 +0,0 @@ -# Stuff for diablo volumes -lvm2 -open-iscsi -open-iscsi-utils # Deprecated since quantal dist:precise -genisoimage -sysfsutils -sg3-utils -python-guestfs # NOPRIME diff --git a/files/apts/n-novnc b/files/apts/n-novnc deleted file mode 100644 index c8722b9f66..0000000000 --- a/files/apts/n-novnc +++ /dev/null @@ -1 +0,0 @@ -python-numpy diff --git a/files/apts/neutron b/files/apts/neutron deleted file mode 100644 index 648716a75e..0000000000 --- a/files/apts/neutron +++ /dev/null @@ -1,25 +0,0 @@ -ebtables -iptables -iputils-ping -iputils-arping -mysql-server #NOPRIME -sudo -python-boto -python-iso8601 -python-paste -python-routes -python-suds -python-pastedeploy -python-greenlet -python-kombu -python-eventlet -python-sqlalchemy -python-mysqldb -python-pyudev -python-qpid # dist:precise -dnsmasq-base -dnsmasq-utils # for dhcp_release only available in dist:precise -rabbitmq-server # NOPRIME -qpidd # NOPRIME -sqlite3 -vlan diff --git a/files/apts/nova b/files/apts/nova deleted file mode 100644 index 38c99c735b..0000000000 --- a/files/apts/nova +++ /dev/null @@ -1,46 +0,0 @@ -dnsmasq-base -dnsmasq-utils # for dhcp_release -kpartx -parted -iputils-arping -mysql-server # NOPRIME -python-mysqldb -python-xattr # needed for glance which is needed for nova --- this shouldn't be here -python-lxml # needed for glance which is needed for nova --- this shouldn't be here -gawk -iptables -ebtables -sqlite3 -sudo -qemu-kvm # NOPRIME -qemu # dist:wheezy,jessie NOPRIME -libvirt-bin # NOPRIME -pm-utils -libjs-jquery-tablesorter # Needed for coverage html reports -vlan -curl -genisoimage # required for config_drive -rabbitmq-server # NOPRIME -qpidd # dist:precise NOPRIME -socat # used by ajaxterm -python-mox -python-paste -python-migrate -python-greenlet -python-libvirt # NOPRIME -python-libxml2 -python-routes -python-numpy # used by websockify for spice console -python-pastedeploy -python-eventlet -python-cheetah -python-tempita -python-sqlalchemy -python-suds -python-lockfile -python-m2crypto -python-boto -python-kombu -python-feedparser -python-iso8601 -python-qpid # dist:precise diff --git a/files/apts/opendaylight b/files/apts/opendaylight deleted file mode 100644 index ec3cc9daf8..0000000000 --- a/files/apts/opendaylight +++ /dev/null @@ -1,2 +0,0 @@ -openvswitch-datapath-dkms # NOPRIME -openvswitch-switch # NOPRIME diff --git a/files/apts/postgresql b/files/apts/postgresql deleted file mode 100644 index bf19d397cb..0000000000 --- a/files/apts/postgresql +++ /dev/null @@ -1 +0,0 @@ -python-psycopg2 diff --git a/files/apts/ryu b/files/apts/ryu deleted file mode 100644 index 9b850807e6..0000000000 --- a/files/apts/ryu +++ /dev/null @@ -1,2 +0,0 @@ -python-eventlet -python-sphinx diff --git a/files/apts/swift b/files/apts/swift deleted file mode 100644 index 080ecdb255..0000000000 --- a/files/apts/swift +++ /dev/null @@ -1,15 +0,0 @@ -curl -libffi-dev -memcached -python-configobj -python-coverage -python-eventlet -python-greenlet -python-netifaces -python-nose -python-pastedeploy -python-simplejson -python-webob -python-xattr -sqlite3 -xfsprogs diff --git a/files/apts/tempest b/files/apts/tempest deleted file mode 100644 index f244e4e783..0000000000 --- a/files/apts/tempest +++ /dev/null @@ -1 +0,0 @@ -libxslt1-dev \ No newline at end of file diff --git a/files/apts/tls-proxy b/files/apts/tls-proxy deleted file mode 100644 index 8fca42d124..0000000000 --- a/files/apts/tls-proxy +++ /dev/null @@ -1 +0,0 @@ -stud # only available in dist:precise diff --git a/files/apts/trema b/files/apts/trema deleted file mode 100644 index f685ca53b4..0000000000 --- a/files/apts/trema +++ /dev/null @@ -1,15 +0,0 @@ -# Trema -make -ruby1.8 -rubygems1.8 -ruby1.8-dev -libpcap-dev -libsqlite3-dev -libglib2.0-dev - -# Sliceable Switch -sqlite3 -libdbi-perl -libdbd-sqlite3-perl -apache2 -libjson-perl diff --git a/files/apts/trove b/files/apts/trove deleted file mode 100644 index 09dcee8104..0000000000 --- a/files/apts/trove +++ /dev/null @@ -1 +0,0 @@ -libxslt1-dev # testonly diff --git a/files/apts/baremetal b/files/debs/baremetal similarity index 100% rename from files/apts/baremetal rename to files/debs/baremetal diff --git a/files/debs/ceph b/files/debs/ceph new file mode 100644 index 0000000000..69863abc34 --- /dev/null +++ b/files/debs/ceph @@ -0,0 +1,2 @@ +ceph # NOPRIME +xfsprogs diff --git a/files/debs/cinder b/files/debs/cinder new file mode 100644 index 0000000000..5d390e24bf --- /dev/null +++ b/files/debs/cinder @@ -0,0 +1,4 @@ +lvm2 +qemu-utils +tgt # NOPRIME +thin-provisioning-tools diff --git a/files/debs/dstat b/files/debs/dstat new file mode 100644 index 0000000000..40d00f4aa4 --- /dev/null +++ b/files/debs/dstat @@ -0,0 +1,2 @@ +dstat # dist:bionic +pcp diff --git a/files/debs/general b/files/debs/general new file mode 100644 index 0000000000..1e63e4f582 --- /dev/null +++ b/files/debs/general @@ -0,0 +1,37 @@ +apache2 +apache2-dev +bc +bsdmainutils +curl +default-jre-headless # NOPRIME +g++ +gawk +gcc +gettext # used for compiling message catalogs +git +graphviz # needed for docs +iputils-ping +libffi-dev # for pyOpenSSL +libjpeg-dev # Pillow 3.0.0 +libpq-dev # psycopg2 +libssl-dev # for pyOpenSSL +libsystemd-dev # for systemd-python +libxml2-dev # lxml +libxslt1-dev # lxml +libyaml-dev +lsof # useful when debugging +openssh-server +openssl +pkg-config +psmisc +python3-dev +python3-pip +python3-systemd +python3-venv +tar +tcpdump +unzip +uuid-runtime +wget +wget +zlib1g-dev diff --git a/files/debs/horizon b/files/debs/horizon new file mode 100644 index 0000000000..48332893b1 --- /dev/null +++ b/files/debs/horizon @@ -0,0 +1,2 @@ +apache2 # NOPRIME +libapache2-mod-wsgi # NOPRIME diff --git a/files/debs/keystone b/files/debs/keystone new file mode 100644 index 0000000000..1cfa6ffa38 --- /dev/null +++ b/files/debs/keystone @@ -0,0 +1,6 @@ +libkrb5-dev +libldap2-dev +libsasl2-dev +memcached +python3-mysqldb +sqlite3 diff --git a/files/apts/ldap b/files/debs/ldap similarity index 56% rename from files/apts/ldap rename to files/debs/ldap index 26f7aeffe3..54896bb845 100644 --- a/files/apts/ldap +++ b/files/debs/ldap @@ -1,3 +1,3 @@ ldap-utils +python3-ldap slapd -python-ldap diff --git a/files/debs/n-cpu b/files/debs/n-cpu new file mode 100644 index 0000000000..54d6fa3fd1 --- /dev/null +++ b/files/debs/n-cpu @@ -0,0 +1,11 @@ +cryptsetup +dosfstools +genisoimage +gir1.2-libosinfo-1.0 +lvm2 # NOPRIME +netcat-openbsd +open-iscsi +python3-guestfs # NOPRIME +qemu-utils +sg3-utils +sysfsutils diff --git a/files/debs/neutron-agent b/files/debs/neutron-agent new file mode 100644 index 0000000000..ea8819e884 --- /dev/null +++ b/files/debs/neutron-agent @@ -0,0 +1 @@ +ipset diff --git a/files/debs/neutron-common b/files/debs/neutron-common new file mode 100644 index 0000000000..f6afc5bf55 --- /dev/null +++ b/files/debs/neutron-common @@ -0,0 +1,16 @@ +acl +dnsmasq-base +dnsmasq-utils # for dhcp_release +ebtables +haproxy # to serve as metadata proxy inside router/dhcp namespaces +iptables +iputils-arping +iputils-ping +mysql-server #NOPRIME +postgresql-server-dev-all +python3-mysqldb +rabbitmq-server # NOPRIME +radvd # NOPRIME +sqlite3 +sudo +vlan diff --git a/files/debs/neutron-l3 b/files/debs/neutron-l3 new file mode 100644 index 0000000000..106a6a35aa --- /dev/null +++ b/files/debs/neutron-l3 @@ -0,0 +1,3 @@ +conntrack +conntrackd +keepalived diff --git a/files/debs/nova b/files/debs/nova new file mode 100644 index 0000000000..5c00ad72d9 --- /dev/null +++ b/files/debs/nova @@ -0,0 +1,21 @@ +conntrack +curl +ebtables +genisoimage # required for config_drive +iptables +iputils-arping +kpartx +libjs-jquery-tablesorter # Needed for coverage html reports +libvirt-clients # NOPRIME +libvirt-daemon-system # NOPRIME +libvirt-dev # NOPRIME +mysql-server # NOPRIME +parted +pm-utils +python3-mysqldb +qemu-kvm # NOPRIME +rabbitmq-server # NOPRIME +socat # used by ajaxterm +sqlite3 +sudo +vlan diff --git a/files/debs/openvswitch b/files/debs/openvswitch new file mode 100644 index 0000000000..4c0af4ae2d --- /dev/null +++ b/files/debs/openvswitch @@ -0,0 +1,3 @@ +fakeroot +make +openvswitch-switch diff --git a/files/apts/cinder b/files/debs/os-brick similarity index 69% rename from files/apts/cinder rename to files/debs/os-brick index 7819c31655..4148b0c421 100644 --- a/files/apts/cinder +++ b/files/debs/os-brick @@ -1,6 +1,3 @@ -tgt -lvm2 -qemu-utils -libpq-dev +lsscsi open-iscsi open-iscsi-utils # Deprecated since quantal dist:precise diff --git a/files/debs/ovn b/files/debs/ovn new file mode 100644 index 0000000000..81eea5e633 --- /dev/null +++ b/files/debs/ovn @@ -0,0 +1,3 @@ +ovn-central +ovn-controller-vtep +ovn-host diff --git a/files/debs/q-agt b/files/debs/q-agt new file mode 120000 index 0000000000..99fe353094 --- /dev/null +++ b/files/debs/q-agt @@ -0,0 +1 @@ +neutron-agent \ No newline at end of file diff --git a/files/debs/q-l3 b/files/debs/q-l3 new file mode 120000 index 0000000000..0a5ca2a45f --- /dev/null +++ b/files/debs/q-l3 @@ -0,0 +1 @@ +neutron-l3 \ No newline at end of file diff --git a/files/debs/swift b/files/debs/swift new file mode 100644 index 0000000000..67c6c8ddb4 --- /dev/null +++ b/files/debs/swift @@ -0,0 +1,7 @@ +curl +liberasurecode-dev +make +memcached +rsync +sqlite3 +xfsprogs diff --git a/files/debs/tls-proxy b/files/debs/tls-proxy new file mode 100644 index 0000000000..5bd8e213a2 --- /dev/null +++ b/files/debs/tls-proxy @@ -0,0 +1 @@ +apache2 diff --git a/files/default_catalog.templates b/files/default_catalog.templates deleted file mode 100644 index ff00e38e09..0000000000 --- a/files/default_catalog.templates +++ /dev/null @@ -1,63 +0,0 @@ -# config for TemplatedCatalog, using camelCase because I don't want to do -# translations for legacy compat -catalog.RegionOne.identity.publicURL = http://%SERVICE_HOST%:$(public_port)s/v2.0 -catalog.RegionOne.identity.adminURL = http://%SERVICE_HOST%:$(admin_port)s/v2.0 -catalog.RegionOne.identity.internalURL = http://%SERVICE_HOST%:$(public_port)s/v2.0 -catalog.RegionOne.identity.name = Identity Service - - -catalog.RegionOne.compute.publicURL = http://%SERVICE_HOST%:8774/v2/$(tenant_id)s -catalog.RegionOne.compute.adminURL = http://%SERVICE_HOST%:8774/v2/$(tenant_id)s -catalog.RegionOne.compute.internalURL = http://%SERVICE_HOST%:8774/v2/$(tenant_id)s -catalog.RegionOne.compute.name = Compute Service - - -catalog.RegionOne.computev3.publicURL = http://%SERVICE_HOST%:8774/v3 -catalog.RegionOne.computev3.adminURL = http://%SERVICE_HOST%:8774/v3 -catalog.RegionOne.computev3.internalURL = http://%SERVICE_HOST%:8774/v3 -catalog.RegionOne.computev3.name = Compute Service V3 - - -catalog.RegionOne.volume.publicURL = http://%SERVICE_HOST%:8776/v1/$(tenant_id)s -catalog.RegionOne.volume.adminURL = http://%SERVICE_HOST%:8776/v1/$(tenant_id)s -catalog.RegionOne.volume.internalURL = http://%SERVICE_HOST%:8776/v1/$(tenant_id)s -catalog.RegionOne.volume.name = Volume Service - - -catalog.RegionOne.volumev2.publicURL = http://%SERVICE_HOST%:8776/v2/$(tenant_id)s -catalog.RegionOne.volumev2.adminURL = http://%SERVICE_HOST%:8776/v2/$(tenant_id)s -catalog.RegionOne.volumev2.internalURL = http://%SERVICE_HOST%:8776/v2/$(tenant_id)s -catalog.RegionOne.volumev2.name = Volume Service V2 - - -catalog.RegionOne.ec2.publicURL = http://%SERVICE_HOST%:8773/services/Cloud -catalog.RegionOne.ec2.adminURL = http://%SERVICE_HOST%:8773/services/Admin -catalog.RegionOne.ec2.internalURL = http://%SERVICE_HOST%:8773/services/Cloud -catalog.RegionOne.ec2.name = EC2 Service - - -catalog.RegionOne.s3.publicURL = http://%SERVICE_HOST%:%S3_SERVICE_PORT% -catalog.RegionOne.s3.adminURL = http://%SERVICE_HOST%:%S3_SERVICE_PORT% -catalog.RegionOne.s3.internalURL = http://%SERVICE_HOST%:%S3_SERVICE_PORT% -catalog.RegionOne.s3.name = S3 Service - - -catalog.RegionOne.image.publicURL = http://%SERVICE_HOST%:9292 -catalog.RegionOne.image.adminURL = http://%SERVICE_HOST%:9292 -catalog.RegionOne.image.internalURL = http://%SERVICE_HOST%:9292 -catalog.RegionOne.image.name = Image Service - -catalog.RegionOne.cloudformation.publicURL = http://%SERVICE_HOST%:8000/v1 -catalog.RegionOne.cloudformation.adminURL = http://%SERVICE_HOST%:8000/v1 -catalog.RegionOne.cloudformation.internalURL = http://%SERVICE_HOST%:8000/v1 -catalog.RegionOne.cloudformation.name = CloudFormation service - -catalog.RegionOne.orchestration.publicURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s -catalog.RegionOne.orchestration.adminURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s -catalog.RegionOne.orchestration.internalURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s -catalog.RegionOne.orchestration.name = Orchestration Service - -catalog.RegionOne.metering.publicURL = http://%SERVICE_HOST%:8777/v1 -catalog.RegionOne.metering.adminURL = http://%SERVICE_HOST%:8777/v1 -catalog.RegionOne.metering.internalURL = http://%SERVICE_HOST%:8777/v1 -catalog.RegionOne.metering.name = Telemetry Service diff --git a/files/dnsmasq-for-baremetal-from-nova-network.conf b/files/dnsmasq-for-baremetal-from-nova-network.conf deleted file mode 100644 index 66a375190e..0000000000 --- a/files/dnsmasq-for-baremetal-from-nova-network.conf +++ /dev/null @@ -1,3 +0,0 @@ -enable-tftp -tftp-root=/tftpboot -dhcp-boot=pxelinux.0 diff --git a/files/ldap/manager.ldif.in b/files/ldap/manager.ldif.in index 2f1f1395ee..d3b9be8b6e 100644 --- a/files/ldap/manager.ldif.in +++ b/files/ldap/manager.ldif.in @@ -1,4 +1,4 @@ -dn: olcDatabase={${LDAP_OLCDB_NUMBER}}hdb,cn=config +dn: olcDatabase={${LDAP_OLCDB_NUMBER}}${LDAP_OLCDB_TYPE},cn=config changetype: modify replace: olcSuffix olcSuffix: ${BASE_DN} diff --git a/files/ldap/user.ldif.in b/files/ldap/user.ldif.in new file mode 100644 index 0000000000..16a980757d --- /dev/null +++ b/files/ldap/user.ldif.in @@ -0,0 +1,23 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +# Demo LDAP user +dn: cn=demo,ou=Users,${BASE_DN} +cn: demo +displayName: demo +givenName: demo +mail: demo@openstack.org +objectClass: inetOrgPerson +objectClass: top +sn: demo +uid: demo +userPassword: demo diff --git a/files/lvm-backing-file.template b/files/lvm-backing-file.template new file mode 100644 index 0000000000..dc519d7745 --- /dev/null +++ b/files/lvm-backing-file.template @@ -0,0 +1,16 @@ +[Unit] +Description=Activate LVM backing file %BACKING_FILE% +DefaultDependencies=no +After=systemd-udev-settle.service +Before=lvm2-activation-early.service +Wants=systemd-udev-settle.service + +[Service] +ExecStart=/sbin/losetup --find --show %DIRECTIO% %BACKING_FILE% +ExecStop=/bin/sh -c '/sbin/losetup -d $$(/sbin/losetup --associated %BACKING_FILE% -O NAME -n)' +RemainAfterExit=yes +Type=oneshot + +[Install] +WantedBy=local-fs.target +Also=systemd-udev-settle.service diff --git a/files/openstack-cli-server/openstack b/files/openstack-cli-server/openstack new file mode 100755 index 0000000000..47fbfc5e17 --- /dev/null +++ b/files/openstack-cli-server/openstack @@ -0,0 +1,118 @@ +#!/usr/bin/env python3 +# Copyright 2016 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import socket +import sys +import os +import os.path +import json + +server_address = "/tmp/openstack.sock" + +sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + +try: + sock.connect(server_address) +except socket.error as msg: + print(msg, file=sys.stderr) + sys.exit(1) + + +def send(sock, doc): + jdoc = json.dumps(doc) + sock.send(b'%d\n' % len(jdoc)) + sock.sendall(jdoc.encode('utf-8')) + +def recv(sock): + length_str = b'' + + char = sock.recv(1) + if len(char) == 0: + print("Unexpected end of file", file=sys.stderr) + sys.exit(1) + + while char != b'\n': + length_str += char + char = sock.recv(1) + if len(char) == 0: + print("Unexpected end of file", file=sys.stderr) + sys.exit(1) + + total = int(length_str) + + # use a memoryview to receive the data chunk by chunk efficiently + jdoc = memoryview(bytearray(total)) + next_offset = 0 + while total - next_offset > 0: + recv_size = sock.recv_into(jdoc[next_offset:], total - next_offset) + next_offset += recv_size + try: + doc = json.loads(jdoc.tobytes()) + except (TypeError, ValueError) as e: + raise Exception('Data received was not in JSON format') + return doc + +try: + env = {} + passenv = ["CINDER_VERSION", + "OS_AUTH_URL", + "OS_NO_CACHE", + "OS_PASSWORD", + "OS_PROJECT_NAME", + "OS_REGION_NAME", + "OS_TENANT_NAME", + "OS_USERNAME", + "OS_VOLUME_API_VERSION", + "OS_CLOUD"] + for name in passenv: + if name in os.environ: + env[name] = os.environ[name] + + cmd = { + "app": os.path.basename(sys.argv[0]), + "env": env, + "argv": sys.argv[1:] + } + try: + image_idx = sys.argv.index('image') + create_idx = sys.argv.index('create') + missing_file = image_idx < create_idx and \ + not any(x.startswith('--file') for x in sys.argv) + except ValueError: + missing_file = False + + if missing_file: + # This means we were called with an image create command, but were + # not provided a --file option. That likely means we're being passed + # the image data to stdin, which won't work because we do not proxy + # stdin to the server. So, we just reject the operation and ask the + # caller to provide the file with --file instead. + # We've already connected to the server, we need to send it some dummy + # data so it doesn't wait forever. + send(sock, {}) + print('Image create without --file is not allowed in server mode', + file=sys.stderr) + sys.exit(1) + else: + send(sock, cmd) + + doc = recv(sock) + if doc["stdout"] != b'': + print(doc["stdout"], end='') + if doc["stderr"] != b'': + print(doc["stderr"], file=sys.stderr) + sys.exit(doc["status"]) +finally: + sock.close() diff --git a/files/openstack-cli-server/openstack-cli-server b/files/openstack-cli-server/openstack-cli-server new file mode 100755 index 0000000000..f3d2747e52 --- /dev/null +++ b/files/openstack-cli-server/openstack-cli-server @@ -0,0 +1,118 @@ +#!/usr/bin/env python3 +# Copyright 2016 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import socket +import sys +import os +import json + +from openstackclient import shell as osc_shell +from io import StringIO + +server_address = "/tmp/openstack.sock" + +try: + os.unlink(server_address) +except OSError: + if os.path.exists(server_address): + raise + +sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) +print('starting up on %s' % server_address, file=sys.stderr) +sock.bind(server_address) + +# Listen for incoming connections +sock.listen(1) + +def send(sock, doc): + jdoc = json.dumps(doc) + sock.send(b'%d\n' % len(jdoc)) + sock.sendall(jdoc.encode('utf-8')) + +def recv(sock): + length_str = b'' + char = sock.recv(1) + while char != b'\n': + length_str += char + char = sock.recv(1) + + total = int(length_str) + + # use a memoryview to receive the data chunk by chunk efficiently + jdoc = memoryview(bytearray(total)) + next_offset = 0 + while total - next_offset > 0: + recv_size = sock.recv_into(jdoc[next_offset:], total - next_offset) + next_offset += recv_size + try: + doc = json.loads(jdoc.tobytes()) + except (TypeError, ValueError) as e: + raise Exception('Data received was not in JSON format') + return doc + +while True: + csock, client_address = sock.accept() + try: + doc = recv(csock) + + print("%s %s" % (doc["app"], doc["argv"]), file=sys.stderr) + oldenv = {} + for name in doc["env"].keys(): + oldenv[name] = os.environ.get(name, None) + os.environ[name] = doc["env"][name] + + try: + old_stdout = sys.stdout + old_stderr = sys.stderr + my_stdout = sys.stdout = StringIO() + my_stderr = sys.stderr = StringIO() + + class Exit(BaseException): + def __init__(self, status): + self.status = status + + def noexit(stat): + raise Exit(stat) + + sys.exit = noexit + + if doc["app"] == "openstack": + sh = osc_shell.OpenStackShell() + ret = sh.run(doc["argv"]) + else: + print("Unknown application %s" % doc["app"], file=sys.stderr) + ret = 1 + except Exit as e: + ret = e.status + finally: + sys.stdout = old_stdout + sys.stderr = old_stderr + + for name in oldenv.keys(): + if oldenv[name] is None: + del os.environ[name] + else: + os.environ[name] = oldenv[name] + + send(csock, { + "stdout": my_stdout.getvalue(), + "stderr": my_stderr.getvalue(), + "status": ret, + }) + + except BaseException as e: + print(e, file=sys.stderr) + finally: + csock.close() diff --git a/files/patches/unittest2-discover.patch b/files/patches/unittest2-discover.patch deleted file mode 100644 index 347300d172..0000000000 --- a/files/patches/unittest2-discover.patch +++ /dev/null @@ -1,16 +0,0 @@ -diff -r b2efb7df637b discover.py ---- a/discover.py Thu Mar 24 00:31:02 2011 -0400 -+++ b/discover.py Thu Nov 28 12:02:19 2013 +0000 -@@ -82,7 +82,11 @@ - """ - testMethodPrefix = 'test' - sortTestMethodsUsing = cmp -- suiteClass = unittest.TestSuite -+ try: -+ import unittest2 -+ suiteClass = unittest2.TestSuite -+ except ImportError: -+ suiteClass = unittest.TestSuite - _top_level_dir = None - - def loadTestsFromTestCase(self, testCaseClass): diff --git a/files/rpms-suse/baremetal b/files/rpms-suse/baremetal deleted file mode 100644 index 61f73eeae3..0000000000 --- a/files/rpms-suse/baremetal +++ /dev/null @@ -1 +0,0 @@ -dnsmasq diff --git a/files/rpms-suse/ceilometer-collector b/files/rpms-suse/ceilometer-collector deleted file mode 100644 index c76454fded..0000000000 --- a/files/rpms-suse/ceilometer-collector +++ /dev/null @@ -1,4 +0,0 @@ -# Not available in openSUSE main repositories, but can be fetched from OBS -# (devel:languages:python and server:database projects) -mongodb -python-pymongo diff --git a/files/rpms-suse/cinder b/files/rpms-suse/cinder deleted file mode 100644 index 55078da27c..0000000000 --- a/files/rpms-suse/cinder +++ /dev/null @@ -1,6 +0,0 @@ -lvm2 -tgt -qemu-tools -python-devel -postgresql-devel -open-iscsi diff --git a/files/rpms-suse/dstat b/files/rpms-suse/dstat deleted file mode 100644 index 2b643b8b1b..0000000000 --- a/files/rpms-suse/dstat +++ /dev/null @@ -1 +0,0 @@ -dstat diff --git a/files/rpms-suse/general b/files/rpms-suse/general deleted file mode 100644 index 82cb09d934..0000000000 --- a/files/rpms-suse/general +++ /dev/null @@ -1,23 +0,0 @@ -bc -bridge-utils -ca-certificates-mozilla -curl -euca2ools -gcc -git-core -iputils -libopenssl-devel # to rebuild pyOpenSSL if needed -lsof # useful when debugging -make -openssh -openssl -psmisc -python-cmd2 # dist:opensuse-12.3 -python-pylint -python-setuptools # instead of python-distribute; dist:sle11sp2 -python-unittest2 -screen -tar -tcpdump -unzip -wget diff --git a/files/rpms-suse/glance b/files/rpms-suse/glance deleted file mode 100644 index edd1564e92..0000000000 --- a/files/rpms-suse/glance +++ /dev/null @@ -1,12 +0,0 @@ -libxml2-devel -python-PasteDeploy -python-Routes -python-SQLAlchemy -python-argparse -python-devel -python-eventlet -python-greenlet -python-iso8601 -python-pyOpenSSL -python-wsgiref -python-xattr diff --git a/files/rpms-suse/horizon b/files/rpms-suse/horizon deleted file mode 100644 index d3bde2690c..0000000000 --- a/files/rpms-suse/horizon +++ /dev/null @@ -1,20 +0,0 @@ -apache2 # NOPRIME -apache2-mod_wsgi # NOPRIME -python-CherryPy # why? (coming from apts) -python-Paste -python-PasteDeploy -python-Routes -python-Sphinx -python-SQLAlchemy -python-WebOb -python-anyjson -python-beautifulsoup -python-coverage -python-dateutil -python-eventlet -python-kombu -python-mox -python-nose -python-pylint -python-sqlalchemy-migrate -python-xattr diff --git a/files/rpms-suse/keystone b/files/rpms-suse/keystone deleted file mode 100644 index 403d82f926..0000000000 --- a/files/rpms-suse/keystone +++ /dev/null @@ -1,14 +0,0 @@ -cyrus-sasl-devel -openldap2-devel -python-Paste -python-PasteDeploy -python-PasteScript -python-Routes -python-SQLAlchemy -python-WebOb -python-devel -python-greenlet -python-lxml -python-mysql -python-pysqlite -sqlite3 diff --git a/files/rpms-suse/ldap b/files/rpms-suse/ldap deleted file mode 100644 index 46d26f0796..0000000000 --- a/files/rpms-suse/ldap +++ /dev/null @@ -1,3 +0,0 @@ -openldap2 -openldap2-client -python-ldap diff --git a/files/rpms-suse/n-api b/files/rpms-suse/n-api deleted file mode 100644 index 6f59e603b2..0000000000 --- a/files/rpms-suse/n-api +++ /dev/null @@ -1,2 +0,0 @@ -python-dateutil -fping diff --git a/files/rpms-suse/n-cpu b/files/rpms-suse/n-cpu deleted file mode 100644 index 7040b843bf..0000000000 --- a/files/rpms-suse/n-cpu +++ /dev/null @@ -1,6 +0,0 @@ -# Stuff for diablo volumes -genisoimage -lvm2 -open-iscsi -sysfsutils -sg3_utils diff --git a/files/rpms-suse/n-novnc b/files/rpms-suse/n-novnc deleted file mode 100644 index c8722b9f66..0000000000 --- a/files/rpms-suse/n-novnc +++ /dev/null @@ -1 +0,0 @@ -python-numpy diff --git a/files/rpms-suse/n-spice b/files/rpms-suse/n-spice deleted file mode 100644 index c8722b9f66..0000000000 --- a/files/rpms-suse/n-spice +++ /dev/null @@ -1 +0,0 @@ -python-numpy diff --git a/files/rpms-suse/neutron b/files/rpms-suse/neutron deleted file mode 100644 index d4841b161a..0000000000 --- a/files/rpms-suse/neutron +++ /dev/null @@ -1,26 +0,0 @@ -dnsmasq -dnsmasq-utils # dist:opensuse-12.3,opensuse-13.1 -ebtables -iptables -iputils -mariadb # NOPRIME -python-boto -python-eventlet -python-greenlet -python-iso8601 -python-kombu -python-mysql -python-Paste -python-PasteDeploy -python-pyudev -python-Routes -python-SQLAlchemy -python-suds -rabbitmq-server # NOPRIME -sqlite3 -sudo -vlan - -# FIXME: qpid is not part of openSUSE, those names are tentative -python-qpid # NOPRIME -qpidd # NOPRIME diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova deleted file mode 100644 index 3e95724150..0000000000 --- a/files/rpms-suse/nova +++ /dev/null @@ -1,50 +0,0 @@ -curl -dnsmasq -dnsmasq-utils # dist:opensuse-12.3,opensuse-13.1 -ebtables -gawk -genisoimage # required for config_drive -iptables -iputils -kpartx -kvm # NOPRIME -# qemu as fallback if kvm cannot be used -qemu # NOPRIME -libvirt # NOPRIME -libvirt-python # NOPRIME -mariadb # NOPRIME -parted -polkit -python-M2Crypto -python-m2crypto # dist:sle11sp2 -python-Paste -python-PasteDeploy -python-Routes -python-SQLAlchemy -python-Tempita -python-boto -python-cheetah -python-eventlet -python-feedparser -python-greenlet -python-iso8601 -python-kombu -python-libxml2 -python-lockfile -python-lxml # needed for glance which is needed for nova --- this shouldn't be here -python-mox -python-mysql -python-numpy # needed by websockify for spice console -python-paramiko -python-sqlalchemy-migrate -python-suds -python-xattr # needed for glance which is needed for nova --- this shouldn't be here -rabbitmq-server # NOPRIME -socat -sqlite3 -sudo -vlan - -# FIXME: qpid is not part of openSUSE, those names are tentative -python-qpid # NOPRIME -qpidd # NOPRIME diff --git a/files/rpms-suse/opendaylight b/files/rpms-suse/opendaylight deleted file mode 100644 index f7fafffae1..0000000000 --- a/files/rpms-suse/opendaylight +++ /dev/null @@ -1,3 +0,0 @@ -openvswitch # NOPRIME -openvswitch-switch # NOPRIME - diff --git a/files/rpms-suse/postgresql b/files/rpms-suse/postgresql deleted file mode 100644 index bf19d397cb..0000000000 --- a/files/rpms-suse/postgresql +++ /dev/null @@ -1 +0,0 @@ -python-psycopg2 diff --git a/files/rpms-suse/ryu b/files/rpms-suse/ryu deleted file mode 100644 index 6b426fb163..0000000000 --- a/files/rpms-suse/ryu +++ /dev/null @@ -1,2 +0,0 @@ -python-Sphinx -python-eventlet diff --git a/files/rpms-suse/swift b/files/rpms-suse/swift deleted file mode 100644 index 4b14098064..0000000000 --- a/files/rpms-suse/swift +++ /dev/null @@ -1,16 +0,0 @@ -curl -memcached -python-PasteDeploy -python-WebOb -python-configobj -python-coverage -python-devel -python-eventlet -python-greenlet -python-netifaces -python-nose -python-simplejson -python-xattr -sqlite3 -xfsprogs -xinetd diff --git a/files/rpms-suse/trove b/files/rpms-suse/trove deleted file mode 100644 index 09dcee8104..0000000000 --- a/files/rpms-suse/trove +++ /dev/null @@ -1 +0,0 @@ -libxslt1-dev # testonly diff --git a/files/rpms/ceilometer-collector b/files/rpms/ceilometer-collector deleted file mode 100644 index 9cf580d22d..0000000000 --- a/files/rpms/ceilometer-collector +++ /dev/null @@ -1,4 +0,0 @@ -selinux-policy-targeted -mongodb-server #NOPRIME -pymongo # NOPRIME -mongodb # NOPRIME diff --git a/files/rpms/ceph b/files/rpms/ceph new file mode 100644 index 0000000000..19f158fd57 --- /dev/null +++ b/files/rpms/ceph @@ -0,0 +1,3 @@ +ceph # NOPRIME +redhat-lsb-core # not:rhel9,openEuler-22.03 +xfsprogs diff --git a/files/rpms/cinder b/files/rpms/cinder index ce6181eedf..375f93e090 100644 --- a/files/rpms/cinder +++ b/files/rpms/cinder @@ -1,6 +1,3 @@ lvm2 -scsi-target-utils qemu-img -postgresql-devel -iscsi-initiator-utils -python-lxml #dist:f19,f20,rhel7 +targetcli diff --git a/files/rpms/dstat b/files/rpms/dstat index 8a8f8fe737..6524bed607 100644 --- a/files/rpms/dstat +++ b/files/rpms/dstat @@ -1 +1 @@ -dstat \ No newline at end of file +pcp-system-tools diff --git a/files/rpms/general b/files/rpms/general index c940de6dfe..6f4572c708 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -1,30 +1,42 @@ -bridge-utils +bc curl dbus -euca2ools # only for testing client +gawk gcc +gcc-c++ +gettext # used for compiling message catalogs git-core +glibc-langpack-en # dist:rhel9 +graphviz # needed only for docs +httpd +httpd-devel +iptables-nft # dist:rhel9,rhel10 +iptables-services +java-1.8.0-openjdk-headless # not:rhel10 +java-21-openjdk-headless # dist:rhel10 +libffi-devel +libjpeg-turbo-devel # Pillow 3.0.0 +libxml2-devel # lxml +libxslt-devel # lxml +libyaml-devel +mod_ssl # required for tls-proxy on centos 9 stream computes +net-tools openssh-server openssl openssl-devel # to rebuild pyOpenSSL if needed -libxml2-devel -libxslt-devel +pcre2-devel # dist:rhel10 for python-pcre2 +pcre-devel # not:rhel10 for python-pcre +pkgconfig +postgresql-devel # psycopg2 psmisc -pylint -python-setuptools -python-prettytable # dist:rhel6 [1] -python-unittest2 -python-virtualenv -python-devel -screen +python3-devel +python3-pip # not:openEuler-22.03 +python3-systemd +redhat-rpm-config # not:openEuler-22.03 missing dep for gcc hardening flags, see rhbz#1217376 tar tcpdump unzip +util-linux wget which -bc - -# [1] : some of installed tools have unversioned dependencies on this, -# but others have versioned (<=0.7). So if a later version (0.7.1) -# gets installed in response to an unversioned dependency, it breaks. -# This pre-installs a compatible 0.6(ish) version from RHEL +zlib-devel diff --git a/files/rpms/glance b/files/rpms/glance deleted file mode 100644 index fc07fa787b..0000000000 --- a/files/rpms/glance +++ /dev/null @@ -1,16 +0,0 @@ -libffi-devel -libxml2-devel # testonly -libxslt-devel # testonly -mysql-devel # testonly -openssl-devel # testonly -postgresql-devel # testonly -python-argparse -python-eventlet -python-greenlet -python-lxml #dist:f19,f20,rhel7 -python-paste-deploy #dist:f19,f20,rhel7 -python-routes -python-sqlalchemy -python-wsgiref #dist:f18,f19,f20 -pyxattr -zlib-devel # testonly diff --git a/files/rpms/horizon b/files/rpms/horizon index 92afed2985..a88552bc84 100644 --- a/files/rpms/horizon +++ b/files/rpms/horizon @@ -1,23 +1,2 @@ -Django httpd # NOPRIME mod_wsgi # NOPRIME -pylint -python-anyjson -python-BeautifulSoup -python-boto -python-coverage -python-dateutil -python-eventlet -python-greenlet -python-httplib2 -python-kombu -python-migrate -python-mox -python-nose -python-paste #dist:f19,f20 -python-paste-deploy #dist:f19,f20 -python-routes -python-sphinx -python-sqlalchemy -python-webob -pyxattr diff --git a/files/rpms/ironic b/files/rpms/ironic deleted file mode 100644 index 6534095c20..0000000000 --- a/files/rpms/ironic +++ /dev/null @@ -1,11 +0,0 @@ -iptables -libguestfs -libvirt -libvirt-python -net-tools -openssh-clients -openvswitch -python-libguestfs -syslinux -tftp-server -xinetd diff --git a/files/rpms/keystone b/files/rpms/keystone index 7182091b31..5f19c6f70c 100644 --- a/files/rpms/keystone +++ b/files/rpms/keystone @@ -1,12 +1,3 @@ -python-greenlet -libxslt-devel # dist:f20 -python-lxml #dist:f19,f20 -python-paste #dist:f19,f20 -python-paste-deploy #dist:f19,f20 -python-paste-script #dist:f19,f20 -python-routes -python-sqlalchemy -python-webob +memcached +mod_ssl sqlite - -# Deps installed via pip for RHEL diff --git a/files/rpms/ldap b/files/rpms/ldap index 2f7ab5de46..d5b8fa4374 100644 --- a/files/rpms/ldap +++ b/files/rpms/ldap @@ -1,3 +1,2 @@ -openldap-servers openldap-clients -python-ldap +openldap-servers diff --git a/files/rpms/marconi-server b/files/rpms/marconi-server deleted file mode 100644 index d7b7ea89c1..0000000000 --- a/files/rpms/marconi-server +++ /dev/null @@ -1,3 +0,0 @@ -selinux-policy-targeted -mongodb-server -pymongo diff --git a/files/rpms/n-api b/files/rpms/n-api deleted file mode 100644 index 6f59e603b2..0000000000 --- a/files/rpms/n-api +++ /dev/null @@ -1,2 +0,0 @@ -python-dateutil -fping diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu index 32b1546c39..3d50f3a062 100644 --- a/files/rpms/n-cpu +++ b/files/rpms/n-cpu @@ -1,7 +1,9 @@ -# Stuff for diablo volumes +cryptsetup +dosfstools iscsi-initiator-utils +libosinfo lvm2 -genisoimage -sysfsutils sg3_utils -python-libguestfs # NOPRIME +# Stuff for diablo volumes +sysfsutils +xorriso diff --git a/files/rpms/n-novnc b/files/rpms/n-novnc deleted file mode 100644 index 24ce15ab7e..0000000000 --- a/files/rpms/n-novnc +++ /dev/null @@ -1 +0,0 @@ -numpy diff --git a/files/rpms/n-spice b/files/rpms/n-spice deleted file mode 100644 index 24ce15ab7e..0000000000 --- a/files/rpms/n-spice +++ /dev/null @@ -1 +0,0 @@ -numpy diff --git a/files/rpms/neutron b/files/rpms/neutron deleted file mode 100644 index 9fafecbf52..0000000000 --- a/files/rpms/neutron +++ /dev/null @@ -1,23 +0,0 @@ -MySQL-python -dnsmasq-utils # for dhcp_release -ebtables -iptables -iputils -mysql-server # NOPRIME -openvswitch # NOPRIME -python-boto -python-eventlet -python-greenlet -python-iso8601 -python-kombu -#rhel6 gets via pip -python-paste # dist:f19,f20,rhel7 -python-paste-deploy # dist:f19,f20,rhel7 -python-qpid -python-routes -python-sqlalchemy -python-suds -rabbitmq-server # NOPRIME -qpid-cpp-server # NOPRIME -sqlite -sudo diff --git a/files/rpms/neutron-agent b/files/rpms/neutron-agent new file mode 100644 index 0000000000..ea8819e884 --- /dev/null +++ b/files/rpms/neutron-agent @@ -0,0 +1 @@ +ipset diff --git a/files/rpms/neutron-common b/files/rpms/neutron-common new file mode 100644 index 0000000000..fe25f57ea6 --- /dev/null +++ b/files/rpms/neutron-common @@ -0,0 +1,12 @@ +acl +dnsmasq # for q-dhcp +dnsmasq-utils # for dhcp_release +ebtables +haproxy # to serve as metadata proxy inside router/dhcp namespaces +iptables +iputils +openvswitch # NOPRIME +rabbitmq-server # NOPRIME +radvd # NOPRIME +sqlite +sudo diff --git a/files/rpms/neutron-l3 b/files/rpms/neutron-l3 new file mode 100644 index 0000000000..a7a190c063 --- /dev/null +++ b/files/rpms/neutron-l3 @@ -0,0 +1,2 @@ +conntrack-tools +keepalived diff --git a/files/rpms/nova b/files/rpms/nova index e05d0d7a7e..c323224279 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -1,42 +1,13 @@ -MySQL-python +conntrack-tools curl -dnsmasq-utils # for dhcp_release ebtables -gawk -genisoimage # required for config_drive +genisoimage iptables iputils +kernel-modules # not:openEuler-22.03 kpartx -kvm # NOPRIME -libvirt-bin # NOPRIME -libvirt-python # NOPRIME -libxml2-python -numpy # needed by websockify for spice console -m2crypto -mysql-server # NOPRIME parted polkit -python-boto -python-cheetah -python-eventlet -python-feedparser -python-greenlet -python-iso8601 -python-kombu -python-lockfile -python-migrate -python-mox -python-paramiko # dist:f19,f20,rhel7 -# ^ on RHEL6, brings in python-crypto which conflicts with version from -# pip we need -python-paste # dist:f19,f20,rhel7 -python-paste-deploy # dist:f19,f20,rhel7 -python-qpid -python-routes -python-sqlalchemy -python-suds -python-tempita rabbitmq-server # NOPRIME -qpid-cpp-server # NOPRIME sqlite sudo diff --git a/files/rpms/opendaylight b/files/rpms/opendaylight deleted file mode 100644 index 98aaaf48f7..0000000000 --- a/files/rpms/opendaylight +++ /dev/null @@ -1 +0,0 @@ -openvswitch # NOPRIME diff --git a/files/rpms/openvswitch b/files/rpms/openvswitch new file mode 100644 index 0000000000..64796f72cd --- /dev/null +++ b/files/rpms/openvswitch @@ -0,0 +1 @@ +openvswitch diff --git a/files/rpms/os-brick b/files/rpms/os-brick new file mode 100644 index 0000000000..14ff870557 --- /dev/null +++ b/files/rpms/os-brick @@ -0,0 +1,2 @@ +iscsi-initiator-utils +lsscsi diff --git a/files/rpms/ovn b/files/rpms/ovn new file mode 100644 index 0000000000..698e57b0de --- /dev/null +++ b/files/rpms/ovn @@ -0,0 +1,3 @@ +ovn-central +ovn-host +ovn-vtep diff --git a/files/rpms/postgresql b/files/rpms/postgresql deleted file mode 100644 index bf19d397cb..0000000000 --- a/files/rpms/postgresql +++ /dev/null @@ -1 +0,0 @@ -python-psycopg2 diff --git a/files/rpms/q-agt b/files/rpms/q-agt new file mode 120000 index 0000000000..99fe353094 --- /dev/null +++ b/files/rpms/q-agt @@ -0,0 +1 @@ +neutron-agent \ No newline at end of file diff --git a/files/rpms/q-l3 b/files/rpms/q-l3 new file mode 120000 index 0000000000..0a5ca2a45f --- /dev/null +++ b/files/rpms/q-l3 @@ -0,0 +1 @@ +neutron-l3 \ No newline at end of file diff --git a/files/rpms/ryu b/files/rpms/ryu deleted file mode 100644 index 9b850807e6..0000000000 --- a/files/rpms/ryu +++ /dev/null @@ -1,2 +0,0 @@ -python-eventlet -python-sphinx diff --git a/files/rpms/swift b/files/rpms/swift index 938d2c8fe6..c3921a47d4 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -1,16 +1,6 @@ curl -libffi-devel +liberasurecode-devel memcached -python-configobj -python-coverage -python-eventlet -python-greenlet -python-netifaces -python-nose -python-paste-deploy # dist:f19,f20,rhel7 -python-simplejson -python-webob -pyxattr +rsync-daemon sqlite xfsprogs -xinetd diff --git a/files/rpms/tempest b/files/rpms/tempest deleted file mode 100644 index e7bbd43cd6..0000000000 --- a/files/rpms/tempest +++ /dev/null @@ -1 +0,0 @@ -libxslt-devel diff --git a/files/rpms/trove b/files/rpms/trove deleted file mode 100644 index c5cbdea012..0000000000 --- a/files/rpms/trove +++ /dev/null @@ -1 +0,0 @@ -libxslt-devel # testonly diff --git a/files/swift/rsyncd.conf b/files/swift/rsyncd.conf index c670531b31..937d6c4b9a 100644 --- a/files/swift/rsyncd.conf +++ b/files/swift/rsyncd.conf @@ -4,76 +4,76 @@ log file = %SWIFT_DATA_DIR%/logs/rsyncd.log pid file = %SWIFT_DATA_DIR%/run/rsyncd.pid address = 127.0.0.1 -[account6012] +[account6612] max connections = 25 -path = %SWIFT_DATA_DIR%/1/node/ +path = %SWIFT_DATA_DIR%/1/ read only = false -lock file = %SWIFT_DATA_DIR%/run/account6012.lock +lock file = %SWIFT_DATA_DIR%/run/account6612.lock -[account6022] +[account6622] max connections = 25 -path = %SWIFT_DATA_DIR%/2/node/ +path = %SWIFT_DATA_DIR%/2/ read only = false -lock file = %SWIFT_DATA_DIR%/run/account6022.lock +lock file = %SWIFT_DATA_DIR%/run/account6622.lock -[account6032] +[account6632] max connections = 25 -path = %SWIFT_DATA_DIR%/3/node/ +path = %SWIFT_DATA_DIR%/3/ read only = false -lock file = %SWIFT_DATA_DIR%/run/account6032.lock +lock file = %SWIFT_DATA_DIR%/run/account6632.lock -[account6042] +[account6642] max connections = 25 -path = %SWIFT_DATA_DIR%/4/node/ +path = %SWIFT_DATA_DIR%/4/ read only = false -lock file = %SWIFT_DATA_DIR%/run/account6042.lock +lock file = %SWIFT_DATA_DIR%/run/account6642.lock -[container6011] +[container6611] max connections = 25 -path = %SWIFT_DATA_DIR%/1/node/ +path = %SWIFT_DATA_DIR%/1/ read only = false -lock file = %SWIFT_DATA_DIR%/run/container6011.lock +lock file = %SWIFT_DATA_DIR%/run/container6611.lock -[container6021] +[container6621] max connections = 25 -path = %SWIFT_DATA_DIR%/2/node/ +path = %SWIFT_DATA_DIR%/2/ read only = false -lock file = %SWIFT_DATA_DIR%/run/container6021.lock +lock file = %SWIFT_DATA_DIR%/run/container6621.lock -[container6031] +[container6631] max connections = 25 -path = %SWIFT_DATA_DIR%/3/node/ +path = %SWIFT_DATA_DIR%/3/ read only = false -lock file = %SWIFT_DATA_DIR%/run/container6031.lock +lock file = %SWIFT_DATA_DIR%/run/container6631.lock -[container6041] +[container6641] max connections = 25 -path = %SWIFT_DATA_DIR%/4/node/ +path = %SWIFT_DATA_DIR%/4/ read only = false -lock file = %SWIFT_DATA_DIR%/run/container6041.lock +lock file = %SWIFT_DATA_DIR%/run/container6641.lock -[object6010] +[object6613] max connections = 25 -path = %SWIFT_DATA_DIR%/1/node/ +path = %SWIFT_DATA_DIR%/1/ read only = false -lock file = %SWIFT_DATA_DIR%/run/object6010.lock +lock file = %SWIFT_DATA_DIR%/run/object6613.lock -[object6020] +[object6623] max connections = 25 -path = %SWIFT_DATA_DIR%/2/node/ +path = %SWIFT_DATA_DIR%/2/ read only = false -lock file = %SWIFT_DATA_DIR%/run/object6020.lock +lock file = %SWIFT_DATA_DIR%/run/object6623.lock -[object6030] +[object6633] max connections = 25 -path = %SWIFT_DATA_DIR%/3/node/ +path = %SWIFT_DATA_DIR%/3/ read only = false -lock file = %SWIFT_DATA_DIR%/run/object6030.lock +lock file = %SWIFT_DATA_DIR%/run/object6633.lock -[object6040] +[object6643] max connections = 25 -path = %SWIFT_DATA_DIR%/4/node/ +path = %SWIFT_DATA_DIR%/4/ read only = false -lock file = %SWIFT_DATA_DIR%/run/object6040.lock +lock file = %SWIFT_DATA_DIR%/run/object6643.lock diff --git a/functions b/functions index 80f98adfb2..829fc86c55 100644 --- a/functions +++ b/functions @@ -1,3 +1,5 @@ +#!/bin/bash +# # functions - DevStack-specific functions # # The following variables are assumed to be defined by certain functions: @@ -8,69 +10,161 @@ # - ``GLANCE_HOSTPORT`` # +# ensure we don't re-source this in the same environment +[[ -z "$_DEVSTACK_FUNCTIONS" ]] || return 0 +declare -r -g _DEVSTACK_FUNCTIONS=1 + # Include the common functions FUNC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) source ${FUNC_DIR}/functions-common +source ${FUNC_DIR}/inc/ini-config +source ${FUNC_DIR}/inc/meta-config +source ${FUNC_DIR}/inc/python +source ${FUNC_DIR}/inc/rootwrap +source ${FUNC_DIR}/inc/async # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_FUNCTIONS=$(set +o | grep xtrace) set +o xtrace +# Check if a function already exists +function function_exists { + declare -f -F $1 > /dev/null +} -# Checks if installed Apache is <= given version -# $1 = x.y.z (version string of Apache) -function check_apache_version { - local cmd="apachectl" - if ! [[ -x $(which apachectl 2>/dev/null) ]]; then - cmd="/usr/sbin/apachectl" - fi +# short_source prints out the current location of the caller in a way +# that strips redundant directories. This is useful for PS4 usage. +function short_source { + saveIFS=$IFS + IFS=" " + called=($(caller 0)) + IFS=$saveIFS + file=${called[2]} + file=${file#$RC_DIR/} + printf "%-40s " "$file:${called[1]}:${called[0]}" +} +# PS4 is exported to child shells and uses the 'short_source' function, so +# export it so child shells have access to the 'short_source' function also. +export -f short_source - local version=$($cmd -v | grep version | grep -Po 'Apache/\K[^ ]*') - expr "$version" '>=' $1 > /dev/null +# Download a file from a URL +# +# Will check cache (in $FILES) or download given URL. +# +# Argument is the URL to the remote file +# +# Will echo the local path to the file as the output. Will die on +# failure to download. +# +# Files can be pre-cached for CI environments, see EXTRA_CACHE_URLS +# and tools/image_list.sh +function get_extra_file { + local file_url=$1 + + file_name=$(basename "$file_url") + if [[ $file_url != file* ]]; then + # If the file isn't cache, download it + if [[ ! -f $FILES/$file_name ]]; then + wget --progress=dot:giga -t 2 -c $file_url -O $FILES/$file_name + if [[ $? -ne 0 ]]; then + die "$file_url could not be downloaded" + fi + fi + echo "$FILES/$file_name" + return + else + # just strip the file:// bit and that's the path to the file + echo $file_url | sed 's/$file:\/\///g' + fi } +# Generate image property arguments for OSC +# +# Arguments: properties, one per, like propname=value +# +# Result is --property propname1=value1 --property propname2=value2 +function _image_properties_to_arg { + local result="" + for property in $*; do + result+=" --property $property" + done + echo $result +} -# Cleanup anything from /tmp on unstack -# clean_tmp -function cleanup_tmp { - local tmp_dir=${TMPDIR:-/tmp} +# Upload an image to glance using the configured mechanism +# +# Arguments: +# image name +# container format +# disk format +# path to image file +# optional properties (format of propname=value) +# +function _upload_image { + local image_name="$1" + shift + local container="$1" + shift + local disk="$1" + shift + local image="$1" + shift + local properties + local useimport + + properties=$(_image_properties_to_arg $*) + + if [[ "$GLANCE_USE_IMPORT_WORKFLOW" == "True" ]]; then + useimport="--import" + fi - # see comments in pip_install - sudo rm -rf ${tmp_dir}/pip-build.* + openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" --public --container-format "$container" --disk-format "$disk" $useimport $properties --file $(readlink -f "${image}") } - # Retrieve an image from a URL and upload into Glance. # Uses the following variables: # # - ``FILES`` must be set to the cache dir # - ``GLANCE_HOSTPORT`` # -# upload_image image-url glance-token +# upload_image image-url function upload_image { local image_url=$1 - local token=$2 + + local image image_fname image_name + + local max_attempts=5 # Create a directory for the downloaded image tarballs. mkdir -p $FILES/images - IMAGE_FNAME=`basename "$image_url"` + image_fname=`basename "$image_url"` if [[ $image_url != file* ]]; then # Downloads the image (uec ami+akistyle), then extracts it. - if [[ ! -f $FILES/$IMAGE_FNAME || "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then - wget -c $image_url -O $FILES/$IMAGE_FNAME - if [[ $? -ne 0 ]]; then - echo "Not found: $image_url" - return - fi + if [[ ! -f $FILES/$image_fname || "$(stat -c "%s" $FILES/$image_fname)" = "0" ]]; then + for attempt in `seq $max_attempts`; do + local rc=0 + wget --progress=dot:giga -c $image_url -O $FILES/$image_fname || rc=$? + if [[ $rc -ne 0 ]]; then + if [[ "$attempt" -eq "$max_attempts" ]]; then + echo "Not found: $image_url" + # Signal failure to download to the caller, so they can fail early + return 1 + fi + echo "Download failed, retrying in $attempt second, attempt: $attempt" + sleep $attempt + else + break + fi + done fi - IMAGE="$FILES/${IMAGE_FNAME}" + image="$FILES/${image_fname}" else - # File based URL (RFC 1738): file://host/path + # File based URL (RFC 1738): ``file://host/path`` # Remote files are not considered here. - # *nix: file:///home/user/path/file - # windows: file:///C:/Documents%20and%20Settings/user/path/file - IMAGE=$(echo $image_url | sed "s/^file:\/\///g") - if [[ ! -f $IMAGE || "$(stat -c "%s" $IMAGE)" == "0" ]]; then + # unix: ``file:///home/user/path/file`` + # windows: ``file:///C:/Documents%20and%20Settings/user/path/file`` + image=$(echo $image_url | sed "s/^file:\/\///g") + if [[ ! -f $image || "$(stat -c "%s" $image)" == "0" ]]; then echo "Not found: $image_url" return fi @@ -78,14 +172,14 @@ function upload_image { # OpenVZ-format images are provided as .tar.gz, but not decompressed prior to loading if [[ "$image_url" =~ 'openvz' ]]; then - IMAGE_NAME="${IMAGE_FNAME%.tar.gz}" - glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format ami --disk-format ami < "${IMAGE}" + image_name="${image_fname%.tar.gz}" + _upload_image "$image_name" ami ami "$image" return fi # vmdk format images if [[ "$image_url" =~ '.vmdk' ]]; then - IMAGE_NAME="${IMAGE_FNAME%.vmdk}" + image_name="${image_fname%.vmdk}" # Before we can upload vmdk type images to glance, we need to know it's # disk type, storage adapter, and networking adapter. These values are @@ -98,17 +192,19 @@ function upload_image { # If the filename does not follow the above format then the vsphere # driver will supply default values. - vmdk_adapter_type="" - vmdk_disktype="" - vmdk_net_adapter="" + local vmdk_disktype="" + local vmdk_net_adapter="e1000" + local path_len # vmdk adapter type - vmdk_adapter_type="$(head -25 $IMAGE | { grep -a -F -m 1 'ddb.adapterType =' $IMAGE || true; })" + local vmdk_adapter_type + vmdk_adapter_type="$(head -25 $image | { grep -a -F -m 1 'ddb.adapterType =' $image || true; })" vmdk_adapter_type="${vmdk_adapter_type#*\"}" vmdk_adapter_type="${vmdk_adapter_type%?}" # vmdk disk type - vmdk_create_type="$(head -25 $IMAGE | { grep -a -F -m 1 'createType=' $IMAGE || true; })" + local vmdk_create_type + vmdk_create_type="$(head -25 $image | { grep -a -F -m 1 'createType=' $image || true; })" vmdk_create_type="${vmdk_create_type#*\"}" vmdk_create_type="${vmdk_create_type%\"*}" @@ -116,47 +212,47 @@ function upload_image { `"should use a descriptor-data pair." if [[ "$vmdk_create_type" = "monolithicSparse" ]]; then vmdk_disktype="sparse" - elif [[ "$vmdk_create_type" = "monolithicFlat" || \ - "$vmdk_create_type" = "vmfs" ]]; then - # Attempt to retrieve the *-flat.vmdk - flat_fname="$(head -25 $IMAGE | { grep -G 'RW\|RDONLY [0-9]+ FLAT\|VMFS' $IMAGE || true; })" + elif [[ "$vmdk_create_type" = "monolithicFlat" || "$vmdk_create_type" = "vmfs" ]]; then + # Attempt to retrieve the ``*-flat.vmdk`` + local flat_fname + flat_fname="$(head -25 $image | { grep -G 'RW\|RDONLY [0-9]+ FLAT\|VMFS' $image || true; })" flat_fname="${flat_fname#*\"}" flat_fname="${flat_fname%?}" if [[ -z "$flat_fname" ]]; then - flat_fname="$IMAGE_NAME-flat.vmdk" + flat_fname="$image_name-flat.vmdk" fi - path_len=`expr ${#image_url} - ${#IMAGE_FNAME}` - flat_url="${image_url:0:$path_len}$flat_fname" + path_len=`expr ${#image_url} - ${#image_fname}` + local flat_url="${image_url:0:$path_len}$flat_fname" warn $LINENO "$descriptor_data_pair_msg"` `" Attempt to retrieve the *-flat.vmdk: $flat_url" if [[ $flat_url != file* ]]; then if [[ ! -f $FILES/$flat_fname || \ "$(stat -c "%s" $FILES/$flat_fname)" = "0" ]]; then - wget -c $flat_url -O $FILES/$flat_fname + wget --progress=dot:giga -c $flat_url -O $FILES/$flat_fname fi - IMAGE="$FILES/${flat_fname}" + image="$FILES/${flat_fname}" else - IMAGE=$(echo $flat_url | sed "s/^file:\/\///g") - if [[ ! -f $IMAGE || "$(stat -c "%s" $IMAGE)" == "0" ]]; then + image=$(echo $flat_url | sed "s/^file:\/\///g") + if [[ ! -f $image || "$(stat -c "%s" $image)" == "0" ]]; then echo "Flat disk not found: $flat_url" return 1 fi fi - IMAGE_NAME="${flat_fname}" + image_name="${flat_fname}" vmdk_disktype="preallocated" elif [[ "$vmdk_create_type" = "streamOptimized" ]]; then vmdk_disktype="streamOptimized" elif [[ -z "$vmdk_create_type" ]]; then # *-flat.vmdk provided: attempt to retrieve the descriptor (*.vmdk) # to retrieve appropriate metadata - if [[ ${IMAGE_NAME: -5} != "-flat" ]]; then + if [[ ${image_name: -5} != "-flat" ]]; then warn $LINENO "Expected filename suffix: '-flat'."` - `" Filename provided: ${IMAGE_NAME}" + `" Filename provided: ${image_name}" else - descriptor_fname="${IMAGE_NAME:0:${#IMAGE_NAME} - 5}.vmdk" - path_len=`expr ${#image_url} - ${#IMAGE_FNAME}` - flat_path="${image_url:0:$path_len}" - descriptor_url=$flat_path$descriptor_fname + descriptor_fname="${image_name:0:${#image_name} - 5}.vmdk" + path_len=`expr ${#image_url} - ${#image_fname}` + local flat_path="${image_url:0:$path_len}" + local descriptor_url=$flat_path$descriptor_fname warn $LINENO "$descriptor_data_pair_msg"` `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url" if [[ $flat_path != file* ]]; then @@ -185,129 +281,157 @@ function upload_image { # NOTE: For backwards compatibility reasons, colons may be used in place # of semi-colons for property delimiters but they are not permitted # characters in NTFS filesystems. - property_string=`echo "$IMAGE_NAME" | { grep -oP '(?<=-)(?!.*-).*[:;].*[:;].*$' || true; }` + property_string=`echo "$image_name" | { grep -oP '(?<=-)(?!.*-).*[:;].*[:;].*$' || true; }` IFS=':;' read -a props <<< "$property_string" vmdk_disktype="${props[0]:-$vmdk_disktype}" vmdk_adapter_type="${props[1]:-$vmdk_adapter_type}" vmdk_net_adapter="${props[2]:-$vmdk_net_adapter}" - glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format bare --disk-format vmdk --property vmware_disktype="$vmdk_disktype" --property vmware_adaptertype="$vmdk_adapter_type" --property hw_vif_model="$vmdk_net_adapter" < "${IMAGE}" + _upload_image "$image_name" bare vmdk "$image" vmware_disktype="$vmdk_disktype" vmware_adaptertype="$vmdk_adapter_type" hw_vif_model="$vmdk_net_adapter" + return fi - # XenServer-vhd-ovf-format images are provided as .vhd.tgz - # and should not be decompressed prior to loading - if [[ "$image_url" =~ '.vhd.tgz' ]]; then - IMAGE_NAME="${IMAGE_FNAME%.vhd.tgz}" - FORCE_VM_MODE="" - if [[ "$IMAGE_NAME" =~ 'cirros' ]]; then - # Cirros VHD image currently only boots in PV mode. - # Nova defaults to PV for all VHD images, but - # the glance setting is needed for booting - # directly from volume. - FORCE_VM_MODE="--property vm_mode=xen" + if [[ "$image_url" =~ '.hds' ]]; then + image_name="${image_fname%.hds}" + vm_mode=${image_name##*-} + if [[ $vm_mode != 'exe' && $vm_mode != 'hvm' ]]; then + die $LINENO "Unknown vm_mode=${vm_mode} for Virtuozzo image" fi - glance \ - --os-auth-token $token \ - --os-image-url http://$GLANCE_HOSTPORT \ - image-create \ - --name "$IMAGE_NAME" --is-public=True \ - --container-format=ovf --disk-format=vhd \ - $FORCE_VM_MODE < "${IMAGE}" + + _upload_image "$image_name" bare ploop "$image" vm_mode=$vm_mode return fi - # .xen-raw.tgz suggests a Xen capable raw image inside a tgz. - # and should not be decompressed prior to loading. - # Setting metadata, so PV mode is used. - if [[ "$image_url" =~ '.xen-raw.tgz' ]]; then - IMAGE_NAME="${IMAGE_FNAME%.xen-raw.tgz}" - glance \ - --os-auth-token $token \ - --os-image-url http://$GLANCE_HOSTPORT \ - image-create \ - --name "$IMAGE_NAME" --is-public=True \ - --container-format=tgz --disk-format=raw \ - --property vm_mode=xen < "${IMAGE}" - return + local kernel="" + local ramdisk="" + local disk_format="" + local container_format="" + local unpack="" + local img_property="" + + # NOTE(danms): If we're on libvirt/qemu or libvirt/kvm, set the hw_rng_model + # to libvirt in the image properties. + if [[ "$VIRT_DRIVER" == "libvirt" ]]; then + if [[ "$LIBVIRT_TYPE" == "qemu" || "$LIBVIRT_TYPE" == "kvm" ]]; then + img_property="hw_rng_model=virtio" + fi fi - KERNEL="" - RAMDISK="" - DISK_FORMAT="" - CONTAINER_FORMAT="" - UNPACK="" - case "$IMAGE_FNAME" in + case "$image_fname" in *.tar.gz|*.tgz) # Extract ami and aki files - [ "${IMAGE_FNAME%.tar.gz}" != "$IMAGE_FNAME" ] && - IMAGE_NAME="${IMAGE_FNAME%.tar.gz}" || - IMAGE_NAME="${IMAGE_FNAME%.tgz}" - xdir="$FILES/images/$IMAGE_NAME" + [ "${image_fname%.tar.gz}" != "$image_fname" ] && + image_name="${image_fname%.tar.gz}" || + image_name="${image_fname%.tgz}" + local xdir="$FILES/images/$image_name" rm -Rf "$xdir"; mkdir "$xdir" - tar -zxf $FILES/$IMAGE_FNAME -C "$xdir" - KERNEL=$(for f in "$xdir/"*-vmlinuz* "$xdir/"aki-*/image; do + tar -zxf $image -C "$xdir" + kernel=$(for f in "$xdir/"*-vmlinuz* "$xdir/"aki-*/image; do [ -f "$f" ] && echo "$f" && break; done; true) - RAMDISK=$(for f in "$xdir/"*-initrd* "$xdir/"ari-*/image; do + ramdisk=$(for f in "$xdir/"*-initrd* "$xdir/"ari-*/image; do [ -f "$f" ] && echo "$f" && break; done; true) - IMAGE=$(for f in "$xdir/"*.img "$xdir/"ami-*/image; do + image=$(for f in "$xdir/"*.img "$xdir/"ami-*/image; do [ -f "$f" ] && echo "$f" && break; done; true) - if [[ -z "$IMAGE_NAME" ]]; then - IMAGE_NAME=$(basename "$IMAGE" ".img") + if [[ -z "$image_name" ]]; then + image_name=$(basename "$image" ".img") fi ;; *.img) - IMAGE_NAME=$(basename "$IMAGE" ".img") - format=$(qemu-img info ${IMAGE} | awk '/^file format/ { print $3; exit }') + image_name=$(basename "$image" ".img") + local format + format=$(qemu-img info ${image} | awk '/^file format/ { print $3; exit }') if [[ ",qcow2,raw,vdi,vmdk,vpc," =~ ",$format," ]]; then - DISK_FORMAT=$format + disk_format=$format else - DISK_FORMAT=raw + disk_format=raw fi - CONTAINER_FORMAT=bare + container_format=bare ;; *.img.gz) - IMAGE_NAME=$(basename "$IMAGE" ".img.gz") - DISK_FORMAT=raw - CONTAINER_FORMAT=bare - UNPACK=zcat + image_name=$(basename "$image" ".img.gz") + disk_format=raw + container_format=bare + unpack=zcat + ;; + *.img.bz2) + image_name=$(basename "$image" ".img.bz2") + disk_format=qcow2 + container_format=bare + unpack=bunzip2 ;; *.qcow2) - IMAGE_NAME=$(basename "$IMAGE" ".qcow2") - DISK_FORMAT=qcow2 - CONTAINER_FORMAT=bare + image_name=$(basename "$image" ".qcow2") + disk_format=qcow2 + container_format=bare + ;; + *.qcow2.xz) + image_name=$(basename "$image" ".qcow2.xz") + disk_format=qcow2 + container_format=bare + unpack=unxz + ;; + *.raw) + image_name=$(basename "$image" ".raw") + disk_format=raw + container_format=bare ;; *.iso) - IMAGE_NAME=$(basename "$IMAGE" ".iso") - DISK_FORMAT=iso - CONTAINER_FORMAT=bare + image_name=$(basename "$image" ".iso") + disk_format=iso + container_format=bare + ;; + *.vhd|*.vhdx|*.vhd.gz|*.vhdx.gz) + local extension="${image_fname#*.}" + image_name=$(basename "$image" ".$extension") + disk_format=$(echo $image_fname | grep -oP '(?<=\.)vhdx?(?=\.|$)') + container_format=bare + if [ "${image_fname##*.}" == "gz" ]; then + unpack=zcat + fi ;; - *) echo "Do not know what to do with $IMAGE_FNAME"; false;; + *) echo "Do not know what to do with $image_fname"; false;; esac - if is_arch "ppc64"; then - IMG_PROPERTY="--property hw_cdrom_bus=scsi" + if is_arch "ppc64le" || is_arch "ppc64" || is_arch "ppc"; then + img_property="$img_property hw_cdrom_bus=scsi os_command_line=console=hvc0" + fi + + if is_arch "aarch64"; then + img_property="$img_property hw_machine_type=virt hw_cdrom_bus=scsi hw_scsi_model=virtio-scsi os_command_line='console=ttyAMA0'" fi - if [ "$CONTAINER_FORMAT" = "bare" ]; then - if [ "$UNPACK" = "zcat" ]; then - glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" $IMG_PROPERTY --is-public True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < <(zcat --force "${IMAGE}") + if [ "$container_format" = "bare" ]; then + if [ "$unpack" = "zcat" ]; then + _upload_image "$image_name" $container_format $disk_format <(zcat --force "$image") $img_property + elif [ "$unpack" = "bunzip2" ]; then + _upload_image "$image_name" $container_format $disk_format <(bunzip2 -cdk "$image") $img_property + elif [ "$unpack" = "unxz" ]; then + # NOTE(brtknr): unxz the file first and cleanup afterwards to + # prevent timeout while Glance tries to upload image (e.g. to Swift). + local tmp_dir + local image_path + tmp_dir=$(mktemp -d) + image_path="$tmp_dir/$image_name" + unxz -cv "${image}" > "$image_path" + _upload_image "$image_name" $container_format $disk_format "$image_path" $img_property + rm -rf $tmp_dir else - glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" $IMG_PROPERTY --is-public True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < "${IMAGE}" + _upload_image "$image_name" $container_format $disk_format "$image" $img_property fi else # Use glance client to add the kernel the root filesystem. # We parse the results of the first upload to get the glance ID of the # kernel for use when uploading the root filesystem. - KERNEL_ID=""; RAMDISK_ID=""; - if [ -n "$KERNEL" ]; then - KERNEL_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-kernel" $IMG_PROPERTY --is-public True --container-format aki --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2) + local kernel_id="" ramdisk_id=""; + if [ -n "$kernel" ]; then + kernel_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-kernel" $(_image_properties_to_arg $img_property) --public --container-format aki --disk-format aki --file $(readlink -f "$kernel") -f value -c id) fi - if [ -n "$RAMDISK" ]; then - RAMDISK_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-ramdisk" $IMG_PROPERTY --is-public True --container-format ari --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2) + if [ -n "$ramdisk" ]; then + ramdisk_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-ramdisk" $(_image_properties_to_arg $img_property) --public --container-format ari --disk-format ari --file $(readlink -f "$ramdisk") -f value -c id) fi - glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "${IMAGE_NAME%.img}" $IMG_PROPERTY --is-public True --container-format ami --disk-format ami ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}" + _upload_image "${image_name%.img}" ami ami "$image" ${kernel_id:+ kernel_id=$kernel_id} ${ramdisk_id:+ ramdisk_id=$ramdisk_id} $img_property fi } @@ -323,57 +447,103 @@ function use_database { # No backends registered means this is likely called from ``localrc`` # This is now deprecated usage DATABASE_TYPE=$1 - DEPRECATED_TEXT="$DEPRECATED_TEXT\nThe database backend needs to be properly set in ENABLED_SERVICES; use_database is deprecated localrc\n" + deprecated "The database backend needs to be properly set in ENABLED_SERVICES; use_database is deprecated localrc" else # This should no longer get called...here for posterity use_exclusive_service DATABASE_BACKENDS DATABASE_TYPE $1 fi } +#Macro for curl statements. curl requires -g option for literal IPv6 addresses. +CURL_GET="${CURL_GET:-curl -g}" # Wait for an HTTP server to start answering requests # wait_for_service timeout url +# +# If the service we want is behind a proxy, the proxy may be available +# before the service. Compliant proxies will return a 503 in this case +# Loop until we get something else. +# Also check for the case where there is no proxy and the service just +# hasn't started yet. curl returns 7 for Failed to connect to host. function wait_for_service { local timeout=$1 local url=$2 - timeout $timeout sh -c "while ! curl --noproxy '*' -s $url >/dev/null; do sleep 1; done" + local rval=0 + time_start "wait_for_service" + timeout $timeout bash -x < [boot-timeout] [from_net] [expected] function ping_check { - if is_service_enabled neutron; then - _ping_check_neutron "$1" $2 $3 $4 - return + local ip=$1 + local timeout=${2:-30} + local from_net=${3:-""} + local expected=${4:-True} + local op="!" + local failmsg="[Fail] Couldn't ping server" + local ping_cmd="ping" + + # if we don't specify a from_net we're expecting things to work + # fine from our local box. + if [[ -n "$from_net" ]]; then + # TODO(stephenfin): Is there any way neutron could be disabled now? + if is_service_enabled neutron; then + ping_cmd="$TOP_DIR/tools/ping_neutron.sh $from_net" + fi fi - _ping_check_novanet "$1" $2 $3 $4 -} -# ping check for nova -# Uses globals ``MULTI_HOST``, ``PRIVATE_NETWORK`` -function _ping_check_novanet { - local from_net=$1 - local ip=$2 - local boot_timeout=$3 - local expected=${4:-"True"} - local check_command="" - MULTI_HOST=`trueorfalse False $MULTI_HOST` - if [[ "$MULTI_HOST" = "True" && "$from_net" = "$PRIVATE_NETWORK_NAME" ]]; then - return + # inverse the logic if we're testing no connectivity + if [[ "$expected" != "True" ]]; then + op="" + failmsg="[Fail] Could ping server" fi - if [[ "$expected" = "True" ]]; then - check_command="while ! ping -c1 -w1 $ip; do sleep 1; done" - else - check_command="while ping -c1 -w1 $ip; do sleep 1; done" - fi - if ! timeout $boot_timeout sh -c "$check_command"; then - if [[ "$expected" = "True" ]]; then - die $LINENO "[Fail] Couldn't ping server" - else - die $LINENO "[Fail] Could ping server" - fi + + # Because we've transformed this command so many times, print it + # out at the end. + local check_command="while $op $ping_cmd -c1 -w1 $ip; do sleep 1; done" + echo "Checking connectivity with $check_command" + + if ! timeout $timeout sh -c "$check_command"; then + die $LINENO $failmsg fi } @@ -381,11 +551,14 @@ function _ping_check_novanet { function get_instance_ip { local vm_id=$1 local network_name=$2 - local nova_result="$(nova show $vm_id)" - local ip=$(echo "$nova_result" | grep "$network_name" | get_field 2) + local addresses + local ip + + addresses=$(openstack server show -c addresses -f value "$vm_id") + ip=$(echo $addresses | sed -n "s/^.*$network_name=\([0-9\.]*\).*$/\1/p") if [[ $ip = "" ]];then - echo "$nova_result" - die $LINENO "[Fail] Coudn't get ipaddress of VM" + echo "addresses of server $vm_id : $addresses" + die $LINENO "[Fail] Couldn't get ipaddress of VM" fi echo $ip } @@ -432,7 +605,8 @@ function check_path_perm_sanity { # homedir permissions on RHEL and common practice of making DEST in # the stack user's homedir. - local real_path=$(readlink -f $1) + local real_path + real_path=$(readlink -f $1) local rebuilt_path="" for i in $(echo ${real_path} | tr "/" " "); do rebuilt_path=$rebuilt_path"/"$i @@ -441,7 +615,7 @@ function check_path_perm_sanity { echo "*** DEST path element" echo "*** ${rebuilt_path}" echo "*** appears to have 0700 permissions." - echo "*** This is very likely to cause fatal issues for devstack daemons." + echo "*** This is very likely to cause fatal issues for DevStack daemons." if [[ -n "$SKIP_PATH_SANITY" ]]; then return @@ -454,82 +628,253 @@ function check_path_perm_sanity { } -# This function recursively compares versions, and is not meant to be -# called by anything other than vercmp_numbers below. This function does -# not work with alphabetic versions. -# -# _vercmp_r sep ver1 ver2 -function _vercmp_r { - typeset sep - typeset -a ver1=() ver2=() - sep=$1; shift - ver1=("${@:1:sep}") - ver2=("${@:sep+1}") - - if ((ver1 > ver2)); then - echo 1; return 0 - elif ((ver2 > ver1)); then - echo -1; return 0 +# vercmp ver1 op ver2 +# Compare VER1 to VER2 +# - op is one of < <= == >= > +# - returns true if satisified +# e.g. +# if vercmp 1.0 "<" 2.0; then +# ... +# fi +function vercmp { + local v1=$1 + local op=$2 + local v2=$3 + local result + + # sort the two numbers with sort's "-V" argument. Based on if v2 + # swapped places with v1, we can determine ordering. + result=$(echo -e "$v1\n$v2" | sort -V | head -1) + + case $op in + "==") + [ "$v1" = "$v2" ] + return + ;; + ">") + [ "$v1" != "$v2" ] && [ "$result" = "$v2" ] + return + ;; + "<") + [ "$v1" != "$v2" ] && [ "$result" = "$v1" ] + return + ;; + ">=") + [ "$result" = "$v2" ] + return + ;; + "<=") + [ "$result" = "$v1" ] + return + ;; + *) + die $LINENO "unrecognised op: $op" + ;; + esac +} + +# This sets up defaults we like in devstack for logging for tracking +# down issues, and makes sure everything is done the same between +# projects. +# NOTE(jh): Historically this function switched between three different +# functions: setup_systemd_logging, setup_colorized_logging and +# setup_standard_logging_identity. Since we always run with systemd now, +# this could be cleaned up, but the other functions may still be in use +# by plugins. Since deprecations haven't worked in the past, we'll just +# leave them in place. +function setup_logging { + setup_systemd_logging $1 +} + +# This function sets log formatting options for colorizing log +# output to stdout. It is meant to be called by lib modules. +function setup_colorized_logging { + local conf_file=$1 + # Add color to logging output + iniset $conf_file DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %(project_name)s %(user_name)s%(color)s] %(instance)s%(color)s%(message)s" + iniset $conf_file DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" + iniset $conf_file DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" + iniset $conf_file DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" + # Enable or disable color for oslo.log + iniset $conf_file DEFAULT log_color $LOG_COLOR +} + +function setup_systemd_logging { + local conf_file=$1 + # NOTE(sdague): this is a nice to have, and means we're using the + # native systemd path, which provides for things like search on + # request-id. However, there may be an eventlet interaction here, + # so going off for now. + USE_JOURNAL=$(trueorfalse False USE_JOURNAL) + local pidstr="" + if [[ "$USE_JOURNAL" == "True" ]]; then + iniset $conf_file DEFAULT use_journal "True" + # if we are using the journal directly, our process id is already correct + else + pidstr="(pid=%(process)d) " fi + iniset $conf_file DEFAULT logging_debug_format_suffix "{{${pidstr}%(funcName)s %(pathname)s:%(lineno)d}}" + + iniset $conf_file DEFAULT logging_context_format_string "%(color)s%(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(project_name)s %(user_name)s%(color)s] %(instance)s%(color)s%(message)s" + iniset $conf_file DEFAULT logging_default_format_string "%(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" + iniset $conf_file DEFAULT logging_exception_prefix "ERROR %(name)s %(instance)s" + + # Enable or disable color for oslo.log + iniset $conf_file DEFAULT log_color $LOG_COLOR +} + +function setup_standard_logging_identity { + local conf_file=$1 + iniset $conf_file DEFAULT logging_user_identity_format "%(project_name)s %(user_name)s" +} - if ((sep <= 1)); then - echo 0; return 0 +# These functions are provided for basic fall-back functionality for +# projects that include parts of DevStack (Grenade). stack.sh will +# override these with more specific versions for DevStack (with fancy +# spinners, etc). We never override an existing version +if ! function_exists echo_summary; then + function echo_summary { + echo $@ + } +fi +if ! function_exists echo_nolog; then + function echo_nolog { + echo $@ + } +fi + + +# create_disk - Create, configure, and mount a backing disk +function create_disk { + local node_number + local disk_image=${1} + local storage_data_dir=${2} + local loopback_disk_size=${3} + local key + + key=$(echo $disk_image | sed 's#/.##') + key="devstack-$key" + + destroy_disk $disk_image $storage_data_dir + + # Create an empty file of the correct size (and ensure the + # directory structure up to that path exists) + sudo mkdir -p $(dirname ${disk_image}) + sudo truncate -s ${loopback_disk_size} ${disk_image} + + # Make a fresh XFS filesystem. Use bigger inodes so xattr can fit in + # a single inode. Keeping the default inode size (256) will result in multiple + # inodes being used to store xattr. Retrieving the xattr will be slower + # since we have to read multiple inodes. This statement is true for both + # Swift and Ceph. + sudo mkfs.xfs -f -i size=1024 ${disk_image} + + # Install a new loopback fstab entry for this disk image, and mount it + echo "$disk_image $storage_data_dir xfs loop,noatime,nodiratime,logbufs=8,comment=$key 0 0" | sudo tee -a /etc/fstab + sudo mkdir -p $storage_data_dir + sudo mount -v $storage_data_dir +} + +# Unmount, de-configure, and destroy a backing disk +function destroy_disk { + local disk_image=$1 + local storage_data_dir=$2 + local key + + key=$(echo $disk_image | sed 's#/.##') + key="devstack-$key" + + # Unmount the target, if mounted + if egrep -q $storage_data_dir /proc/mounts; then + sudo umount $storage_data_dir fi - _vercmp_r $((sep-1)) "${ver1[@]:1}" "${ver2[@]:1}" + # Clear any fstab rules + sudo sed -i '/.*comment=$key.*/ d' /etc/fstab + + # Delete the file + sudo rm -f $disk_image } -# This function compares two versions and is meant to be called by -# external callers. Please note the function assumes non-alphabetic -# versions. For example, this will work: -# -# vercmp_numbers 1.10 1.4 -# -# The above will return "1", as 1.10 is greater than 1.4. -# -# vercmp_numbers 5.2 6.4 -# -# The above will return "-1", as 5.2 is less than 6.4. -# -# vercmp_numbers 4.0 4.0 -# -# The above will return "0", as the versions are equal. -# -# vercmp_numbers ver1 ver2 -function vercmp_numbers { - typeset v1=$1 v2=$2 sep - typeset -a ver1 ver2 +# set_mtu - Set MTU on a device +function set_mtu { + local dev=$1 + local mtu=$2 + sudo ip link set mtu $mtu dev $dev +} - IFS=. read -ra ver1 <<< "$v1" - IFS=. read -ra ver2 <<< "$v2" - _vercmp_r "${#ver1[@]}" "${ver1[@]}" "${ver2[@]}" +# running_in_container - Returns true otherwise false +function running_in_container { + [[ $(systemd-detect-virt --container) != 'none' ]] } -# This function sets log formatting options for colorizing log -# output to stdout. It is meant to be called by lib modules. -# The last two parameters are optional and can be used to specify -# non-default value for project and user format variables. -# Defaults are respectively 'project_name' and 'user_name' +# enable_kernel_bridge_firewall - Enable kernel support for bridge firewalling +function enable_kernel_bridge_firewall { + # Load bridge module. This module provides access to firewall for bridged + # frames; and also on older kernels (pre-3.18) it provides sysctl knobs to + # enable/disable bridge firewalling + sudo modprobe bridge + # For newer kernels (3.18+), those sysctl settings are split into a separate + # kernel module (br_netfilter). Load it too, if present. + sudo modprobe br_netfilter 2>> /dev/null || : + # Enable bridge firewalling in case it's disabled in kernel (upstream + # default is enabled, but some distributions may decide to change it). + # This is at least needed for RHEL 7.2 and earlier releases. + for proto in ip ip6; do + sudo sysctl -w net.bridge.bridge-nf-call-${proto}tables=1 + done +} + + +# Set a systemd system override # -# setup_colorized_logging something.conf SOMESECTION -function setup_colorized_logging { - local conf_file=$1 - local conf_section=$2 - local project_var=${3:-"project_name"} - local user_var=${4:-"user_name"} - # Add color to logging output - iniset $conf_file $conf_section logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %("$user_var")s %("$project_var")s%(color)s] %(instance)s%(color)s%(message)s" - iniset $conf_file $conf_section logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" - iniset $conf_file $conf_section logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" - iniset $conf_file $conf_section logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" +# This sets a system-side override in system.conf. A per-service +# override would be /etc/systemd/system/${service}.service/override.conf +function set_systemd_override { + local key="$1" + local value="$2" + + local sysconf="/etc/systemd/system.conf" + iniset -sudo "${sysconf}" "Manager" "$key" "$value" + echo "Set systemd system override for ${key}=${value}" + + sudo systemctl daemon-reload } +# Get a random port from the local port range +# +# This function returns an available port in the local port range. The search +# order is not truly random, but should be considered a random value by the +# user because it depends on the state of your local system. +function get_random_port { + read lower_port upper_port < /proc/sys/net/ipv4/ip_local_port_range + while true; do + for (( port = upper_port ; port >= lower_port ; port-- )); do + sudo lsof -i ":$port" &> /dev/null + if [[ $? > 0 ]] ; then + break 2 + fi + done + done + echo $port +} + +# Save some state information +# +# Write out various useful state information to /etc/devstack-version +function write_devstack_version { + cat - </dev/null +DevStack Version: ${DEVSTACK_SERIES} +Change: $(git log --format="%H %s %ci" -1) +OS Version: ${os_VENDOR} ${os_RELEASE} ${os_CODENAME} +EOF +} # Restore xtrace -$XTRACE +$_XTRACE_FUNCTIONS # Local variables: # mode: shell-script diff --git a/functions-common b/functions-common index cc90c073cf..c2042c4fef 100644 --- a/functions-common +++ b/functions-common @@ -1,3 +1,5 @@ +#!/bin/bash +# # functions-common - Common functions used by DevStack components # # The canonical copy of this file is maintained in the DevStack repo. @@ -13,7 +15,6 @@ # - OpenStack Functions # - Package Functions # - Process Functions -# - Python Functions # - Service Functions # - System Functions # @@ -23,179 +24,244 @@ # - ``ERROR_ON_CLONE`` # - ``FILES`` # - ``OFFLINE`` -# - ``PIP_DOWNLOAD_CACHE`` -# - ``PIP_USE_MIRRORS`` # - ``RECLONE`` # - ``REQUIREMENTS_DIR`` # - ``STACK_USER`` -# - ``TRACK_DEPENDS`` -# - ``UNDO_REQUIREMENTS`` # - ``http_proxy``, ``https_proxy``, ``no_proxy`` +# # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_FUNCTIONS_COMMON=$(set +o | grep xtrace) set +o xtrace - -# Config Functions -# ================ - -# Append a new option in an ini file without replacing the old value -# iniadd config-file section option value1 value2 value3 ... -function iniadd { - local xtrace=$(set +o | grep xtrace) - set +o xtrace - local file=$1 - local section=$2 - local option=$3 - shift 3 - local values="$(iniget_multiline $file $section $option) $@" - iniset_multiline $file $section $option $values - $xtrace -} - -# Comment an option in an INI file -# inicomment config-file section option -function inicomment { - local xtrace=$(set +o | grep xtrace) - set +o xtrace - local file=$1 - local section=$2 - local option=$3 - sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" "$file" - $xtrace +# ensure we don't re-source this in the same environment +[[ -z "$_DEVSTACK_FUNCTIONS_COMMON" ]] || return 0 +declare -r -g _DEVSTACK_FUNCTIONS_COMMON=1 + +# Global Config Variables +declare -A -g GITREPO +declare -A -g GITBRANCH +declare -A -g GITDIR + +# Systemd service file environment variables per service +declare -A -g SYSTEMD_ENV_VARS + +KILL_PATH="$(which kill)" + +# Save these variables to .stackenv +STACK_ENV_VARS="BASE_SQL_CONN DATA_DIR DEST ENABLED_SERVICES HOST_IP \ + KEYSTONE_SERVICE_URI \ + LOGFILE OS_CACERT SERVICE_HOST STACK_USER TLS_IP \ + HOST_IPV6 SERVICE_IP_VERSION TUNNEL_ENDPOINT_IP TUNNEL_IP_VERSION" + + +# Saves significant environment variables to .stackenv for later use +# Refers to a lot of globals, only TOP_DIR and STACK_ENV_VARS are required to +# function, the rest are simply saved and do not cause problems if they are undefined. +# save_stackenv [tag] +function save_stackenv { + local tag=${1:-""} + # Save some values we generated for later use + time_stamp=$(date "+$TIMESTAMP_FORMAT") + echo "# $time_stamp $tag" >$TOP_DIR/.stackenv + for i in $STACK_ENV_VARS; do + echo $i=${!i} >>$TOP_DIR/.stackenv + done } -# Get an option from an INI file -# iniget config-file section option -function iniget { - local xtrace=$(set +o | grep xtrace) - set +o xtrace - local file=$1 - local section=$2 - local option=$3 - local line - line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") - echo ${line#*=} - $xtrace -} +# Update/create user clouds.yaml file. +# clouds.yaml will have +# - A `devstack` entry for the `demo` user for the `demo` project. +# - A `devstack-admin` entry for the `admin` user for the `admin` project. +# write_clouds_yaml +function write_clouds_yaml { + # The location is a variable to allow for easier refactoring later to make it + # overridable. There is currently no usecase where doing so makes sense, so + # it's not currently configurable. -# Get a multiple line option from an INI file -# iniget_multiline config-file section option -function iniget_multiline { - local xtrace=$(set +o | grep xtrace) - set +o xtrace - local file=$1 - local section=$2 - local option=$3 - local values - values=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { s/^$option[ \t]*=[ \t]*//gp; }" "$file") - echo ${values} - $xtrace -} + CLOUDS_YAML=/etc/openstack/clouds.yaml -# Determinate is the given option present in the INI file -# ini_has_option config-file section option -function ini_has_option { - local xtrace=$(set +o | grep xtrace) - set +o xtrace - local file=$1 - local section=$2 - local option=$3 - local line - line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") - $xtrace - [ -n "$line" ] -} + sudo mkdir -p $(dirname $CLOUDS_YAML) + sudo chown -R $STACK_USER /etc/openstack -# Set an option in an INI file -# iniset config-file section option value -function iniset { - local xtrace=$(set +o | grep xtrace) + CA_CERT_ARG='' + if [ -f "$SSL_BUNDLE_FILE" ]; then + CA_CERT_ARG="--os-cacert $SSL_BUNDLE_FILE" + fi + # devstack: user with the member role on demo project + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username demo \ + --os-password $ADMIN_PASSWORD \ + --os-project-name demo + + # devstack-admin: user with the admin role on the admin project + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-admin \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username admin \ + --os-password $ADMIN_PASSWORD \ + --os-project-name admin + + # devstack-admin-demo: user with the admin role on the demo project + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-admin-demo \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username admin \ + --os-password $ADMIN_PASSWORD \ + --os-project-name demo + + # devstack-alt: user with the member role on alt_demo project + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-alt \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username alt_demo \ + --os-password $ADMIN_PASSWORD \ + --os-project-name alt_demo + + # devstack-alt-member: user with the member role on alt_demo project + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-alt-member \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username alt_demo_member \ + --os-password $ADMIN_PASSWORD \ + --os-project-name alt_demo + + # devstack-alt-reader: user with the reader role on alt_demo project + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-alt-reader \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username alt_demo_reader \ + --os-password $ADMIN_PASSWORD \ + --os-project-name alt_demo + + # devstack-reader: user with the reader role on demo project + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-reader \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username demo_reader \ + --os-password $ADMIN_PASSWORD \ + --os-project-name demo + + # devstack-system-admin: user with the admin role on the system + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-system-admin \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username admin \ + --os-password $ADMIN_PASSWORD \ + --os-system-scope all + + # devstack-system-member: user with the member role on the system + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-system-member \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username system_member \ + --os-password $ADMIN_PASSWORD \ + --os-system-scope all + + # devstack-system-reader: user with the reader role on the system + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-system-reader \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username system_reader \ + --os-password $ADMIN_PASSWORD \ + --os-system-scope all + + cat >> $CLOUDS_YAML < +# +# Normalize config-value provided in variable VAR to either "True" or +# "False". If VAR is unset (i.e. $VAR evaluates as empty), the value +# of the second argument will be used as the default value. +# +# Accepts as False: 0 no No NO false False FALSE +# Accepts as True: 1 yes Yes YES true True TRUE +# +# usage: +# VAL=$(trueorfalse False VAL) +function trueorfalse { + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace - local file=$1 - local section=$2 - local option=$3 - local value=$4 - [[ -z $section || -z $option ]] && return + local default=$1 - if ! grep -q "^\[$section\]" "$file" 2>/dev/null; then - # Add section at the end - echo -e "\n[$section]" >>"$file" - fi - if ! ini_has_option "$file" "$section" "$option"; then - # Add it - sed -i -e "/^\[$section\]/ a\\ -$option = $value -" "$file" - else - local sep=$(echo -ne "\x01") - # Replace it - sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file" + if [ -z $2 ]; then + die $LINENO "variable to normalize required" fi - $xtrace -} + local testval=${!2:-} + + case "$testval" in + "1" | [yY]es | "YES" | [tT]rue | "TRUE" ) echo "True" ;; + "0" | [nN]o | "NO" | [fF]alse | "FALSE" ) echo "False" ;; + * ) echo "$default" ;; + esac -# Set a multiple line option in an INI file -# iniset_multiline config-file section option value1 value2 valu3 ... -function iniset_multiline { - local xtrace=$(set +o | grep xtrace) - set +o xtrace - local file=$1 - local section=$2 - local option=$3 - shift 3 - local values - for v in $@; do - # The later sed command inserts each new value in the line next to - # the section identifier, which causes the values to be inserted in - # the reverse order. Do a reverse here to keep the original order. - values="$v ${values}" - done - if ! grep -q "^\[$section\]" "$file"; then - # Add section at the end - echo -e "\n[$section]" >>"$file" - else - # Remove old values - sed -i -e "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ d; }" "$file" - fi - # Add new ones - for v in $values; do - sed -i -e "/^\[$section\]/ a\\ -$option = $v -" "$file" - done $xtrace } -# Uncomment an option in an INI file -# iniuncomment config-file section option -function iniuncomment { - local xtrace=$(set +o | grep xtrace) +# bool_to_int +# +# Convert True|False to int 1 or 0 +# This function can be used to convert the output of trueorfalse +# to an int follow c conventions where false is 0 and 1 it true. +function bool_to_int { + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace - local file=$1 - local section=$2 - local option=$3 - sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" "$file" + if [ -z $1 ]; then + die $LINENO "Bool value required" + fi + if [[ $1 == "True" ]] ; then + echo '1' + else + echo '0' + fi $xtrace } -# Normalize config values to True or False -# Accepts as False: 0 no No NO false False FALSE -# Accepts as True: 1 yes Yes YES true True TRUE -# VAR=$(trueorfalse default-value test-value) -function trueorfalse { - local xtrace=$(set +o | grep xtrace) - set +o xtrace - local default=$1 - local testval=$2 - [[ -z "$testval" ]] && { echo "$default"; return; } - [[ "0 no No NO false False FALSE" =~ "$testval" ]] && { echo "False"; return; } - [[ "1 yes Yes YES true True TRUE" =~ "$testval" ]] && { echo "True"; return; } - echo "$default" - $xtrace +function isset { + [[ -v "$1" ]] } @@ -207,7 +273,8 @@ function trueorfalse { # backtrace level function backtrace { local level=$1 - local deep=$((${#BASH_SOURCE[@]} - 1)) + local deep + deep=$((${#BASH_SOURCE[@]} - 1)) echo "[Call Trace]" while [ $level -le $deep ]; do echo "${BASH_SOURCE[$deep]}:${BASH_LINENO[$deep-1]}:${FUNCNAME[$deep-1]}" @@ -237,28 +304,36 @@ function die { # die_if_not_set $LINENO env-var "message" function die_if_not_set { local exitcode=$? - FXTRACE=$(set +o | grep xtrace) + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace local line=$1; shift local evar=$1; shift if ! is_set $evar || [ $exitcode != 0 ]; then die $line "$*" fi - $FXTRACE + $xtrace +} + +function deprecated { + local text=$1 + DEPRECATED_TEXT+="\n$text" + echo "WARNING: $text" >&2 } # Prints line number and "message" in error format # err $LINENO "message" function err { local exitcode=$? - errXTRACE=$(set +o | grep xtrace) + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace local msg="[ERROR] ${BASH_SOURCE[2]}:$1 $2" - echo $msg 1>&2; - if [[ -n ${SCREEN_LOGDIR} ]]; then - echo $msg >> "${SCREEN_LOGDIR}/error.log" + echo "$msg" 1>&2; + if [[ -n ${LOGDIR} ]]; then + echo "$msg" >> "${LOGDIR}/error.log" fi - $errXTRACE + $xtrace return $exitcode } @@ -268,14 +343,15 @@ function err { # err_if_not_set $LINENO env-var "message" function err_if_not_set { local exitcode=$? - errinsXTRACE=$(set +o | grep xtrace) + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace local line=$1; shift local evar=$1; shift if ! is_set $evar || [ $exitcode != 0 ]; then err $line "$*" fi - $errinsXTRACE + $xtrace return $exitcode } @@ -304,14 +380,12 @@ function is_set { # warn $LINENO "message" function warn { local exitcode=$? - errXTRACE=$(set +o | grep xtrace) + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace local msg="[WARNING] ${BASH_SOURCE[2]}:$1 $2" - echo $msg 1>&2; - if [[ -n ${SCREEN_LOGDIR} ]]; then - echo $msg >> "${SCREEN_LOGDIR}/error.log" - fi - $errXTRACE + echo "$msg" + $xtrace return $exitcode } @@ -320,162 +394,157 @@ function warn { # ================ # Determine OS Vendor, Release and Update -# Tested with OS/X, Ubuntu, RedHat, CentOS, Fedora -# Returns results in global variables: -# os_VENDOR - vendor name -# os_RELEASE - release -# os_UPDATE - update -# os_PACKAGE - package type -# os_CODENAME - vendor's codename for release + +# +# NOTE : For portability, you almost certainly do not want to use +# these variables directly! The "is_*" functions defined below this +# bundle up compatible platforms under larger umbrellas that we have +# determinted are compatible enough (e.g. is_ubuntu covers Ubuntu & +# Debian, is_fedora covers RPM-based distros). Higher-level functions +# such as "install_package" further abstract things in better ways. +# +# ``os_VENDOR`` - vendor name: ``Ubuntu``, ``Fedora``, etc +# ``os_RELEASE`` - major release: ``22.04`` (Ubuntu), ``23`` (Fedora) +# ``os_PACKAGE`` - package type: ``deb`` or ``rpm`` +# ``os_CODENAME`` - vendor's codename for release: ``jammy`` + +declare -g os_VENDOR os_RELEASE os_PACKAGE os_CODENAME + +# Make a *best effort* attempt to install lsb_release packages for the +# user if not available. Note can't use generic install_package* +# because they depend on this! +function _ensure_lsb_release { + if [[ -x $(command -v lsb_release 2>/dev/null) ]]; then + return + fi + + if [[ -x $(command -v apt-get 2>/dev/null) ]]; then + sudo apt-get install -y lsb-release + elif [[ -x $(command -v zypper 2>/dev/null) ]]; then + sudo zypper -n install lsb-release + elif [[ -x $(command -v dnf 2>/dev/null) ]]; then + sudo dnf install -y python3-distro || sudo dnf install -y openeuler-lsb + else + die $LINENO "Unable to find or auto-install lsb_release" + fi +} + # GetOSVersion +# Set the following variables: +# - os_RELEASE +# - os_CODENAME +# - os_VENDOR +# - os_PACKAGE function GetOSVersion { - # Figure out which vendor we are - if [[ -x "`which sw_vers 2>/dev/null`" ]]; then - # OS/X - os_VENDOR=`sw_vers -productName` - os_RELEASE=`sw_vers -productVersion` - os_UPDATE=${os_RELEASE##*.} - os_RELEASE=${os_RELEASE%.*} - os_PACKAGE="" - if [[ "$os_RELEASE" =~ "10.7" ]]; then - os_CODENAME="lion" - elif [[ "$os_RELEASE" =~ "10.6" ]]; then - os_CODENAME="snow leopard" - elif [[ "$os_RELEASE" =~ "10.5" ]]; then - os_CODENAME="leopard" - elif [[ "$os_RELEASE" =~ "10.4" ]]; then - os_CODENAME="tiger" - elif [[ "$os_RELEASE" =~ "10.3" ]]; then - os_CODENAME="panther" - else - os_CODENAME="" - fi - elif [[ -x $(which lsb_release 2>/dev/null) ]]; then - os_VENDOR=$(lsb_release -i -s) + # CentOS Stream 9 or later and RHEL 9 or later do not provide lsb_release + source /etc/os-release + if [[ "${ID}" =~ (almalinux|centos|rocky|rhel) ]]; then + os_RELEASE=${VERSION_ID} + os_CODENAME=$(echo $VERSION | grep -oP '(?<=[(])[^)]*' || echo 'n/a') + os_VENDOR=$(echo $NAME | tr -d '[:space:]') + else + _ensure_lsb_release + os_RELEASE=$(lsb_release -r -s) - os_UPDATE="" - os_PACKAGE="rpm" - if [[ "Debian,Ubuntu,LinuxMint" =~ $os_VENDOR ]]; then - os_PACKAGE="deb" - elif [[ "SUSE LINUX" =~ $os_VENDOR ]]; then - lsb_release -d -s | grep -q openSUSE - if [[ $? -eq 0 ]]; then - os_VENDOR="openSUSE" - fi - elif [[ $os_VENDOR == "openSUSE project" ]]; then - os_VENDOR="openSUSE" - elif [[ $os_VENDOR =~ Red.*Hat ]]; then - os_VENDOR="Red Hat" - fi os_CODENAME=$(lsb_release -c -s) - elif [[ -r /etc/redhat-release ]]; then - # Red Hat Enterprise Linux Server release 5.5 (Tikanga) - # Red Hat Enterprise Linux Server release 7.0 Beta (Maipo) - # CentOS release 5.5 (Final) - # CentOS Linux release 6.0 (Final) - # Fedora release 16 (Verne) - # XenServer release 6.2.0-70446c (xenenterprise) - os_CODENAME="" - for r in "Red Hat" CentOS Fedora XenServer; do - os_VENDOR=$r - if [[ -n "`grep \"$r\" /etc/redhat-release`" ]]; then - ver=`sed -e 's/^.* \([0-9].*\) (\(.*\)).*$/\1\|\2/' /etc/redhat-release` - os_CODENAME=${ver#*|} - os_RELEASE=${ver%|*} - os_UPDATE=${os_RELEASE##*.} - os_RELEASE=${os_RELEASE%.*} - break - fi - os_VENDOR="" - done - os_PACKAGE="rpm" - elif [[ -r /etc/SuSE-release ]]; then - for r in openSUSE "SUSE Linux"; do - if [[ "$r" = "SUSE Linux" ]]; then - os_VENDOR="SUSE LINUX" - else - os_VENDOR=$r - fi + os_VENDOR=$(lsb_release -i -s) + fi - if [[ -n "`grep \"$r\" /etc/SuSE-release`" ]]; then - os_CODENAME=`grep "CODENAME = " /etc/SuSE-release | sed 's:.* = ::g'` - os_RELEASE=`grep "VERSION = " /etc/SuSE-release | sed 's:.* = ::g'` - os_UPDATE=`grep "PATCHLEVEL = " /etc/SuSE-release | sed 's:.* = ::g'` - break - fi - os_VENDOR="" - done - os_PACKAGE="rpm" - # If lsb_release is not installed, we should be able to detect Debian OS - elif [[ -f /etc/debian_version ]] && [[ $(cat /proc/version) =~ "Debian" ]]; then - os_VENDOR="Debian" + if [[ $os_VENDOR =~ (Debian|Ubuntu) ]]; then os_PACKAGE="deb" - os_CODENAME=$(awk '/VERSION=/' /etc/os-release | sed 's/VERSION=//' | sed -r 's/\"|\(|\)//g' | awk '{print $2}') - os_RELEASE=$(awk '/VERSION_ID=/' /etc/os-release | sed 's/VERSION_ID=//' | sed 's/\"//g') + else + os_PACKAGE="rpm" fi - export os_VENDOR os_RELEASE os_UPDATE os_PACKAGE os_CODENAME + + typeset -xr os_VENDOR + typeset -xr os_RELEASE + typeset -xr os_PACKAGE + typeset -xr os_CODENAME } # Translate the OS version values into common nomenclature # Sets global ``DISTRO`` from the ``os_*`` values +declare -g DISTRO + function GetDistro { GetOSVersion if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) ]]; then - # 'Everyone' refers to Ubuntu / Debian releases by the code name adjective + # 'Everyone' refers to Ubuntu / Debian releases by + # the code name adjective DISTRO=$os_CODENAME elif [[ "$os_VENDOR" =~ (Fedora) ]]; then # For Fedora, just use 'f' and the release DISTRO="f$os_RELEASE" - elif [[ "$os_VENDOR" =~ (openSUSE) ]]; then - DISTRO="opensuse-$os_RELEASE" - elif [[ "$os_VENDOR" =~ (SUSE LINUX) ]]; then - # For SLE, also use the service pack - if [[ -z "$os_UPDATE" ]]; then - DISTRO="sle${os_RELEASE}" - else - DISTRO="sle${os_RELEASE}sp${os_UPDATE}" - fi - elif [[ "$os_VENDOR" =~ (Red Hat) || "$os_VENDOR" =~ (CentOS) ]]; then - # Drop the . release as we assume it's compatible - DISTRO="rhel${os_RELEASE::1}" - elif [[ "$os_VENDOR" =~ (XenServer) ]]; then - DISTRO="xs$os_RELEASE" + elif [[ "$os_VENDOR" =~ (Red.*Hat) || \ + "$os_VENDOR" =~ (CentOS) || \ + "$os_VENDOR" =~ (AlmaLinux) || \ + "$os_VENDOR" =~ (Scientific) || \ + "$os_VENDOR" =~ (OracleServer) || \ + "$os_VENDOR" =~ (RockyLinux) || \ + "$os_VENDOR" =~ (Virtuozzo) ]]; then + MAJOR_VERSION=$(echo $os_RELEASE | cut -d. -f1) + DISTRO="rhel${MAJOR_VERSION}" + elif [[ "$os_VENDOR" =~ (openEuler) ]]; then + DISTRO="openEuler-$os_RELEASE" else - # Catch-all for now is Vendor + Release + Update - DISTRO="$os_VENDOR-$os_RELEASE.$os_UPDATE" + # We can't make a good choice here. Setting a sensible DISTRO + # is part of the problem, but not the major issue -- we really + # only use DISTRO in the code as a fine-filter. + # + # The bigger problem is categorising the system into one of + # our two big categories as Ubuntu/Debian-ish or + # Fedora/CentOS-ish. + # + # The setting of os_PACKAGE above is only set to "deb" based + # on a hard-coded list of vendor names ... thus we will + # default to thinking unknown distros are RPM based + # (ie. is_ubuntu does not match). But the platform will then + # also not match in is_fedora, because that also has a list of + # names. + # + # So, if you are reading this, getting your distro supported + # is really about making sure it matches correctly in these + # functions. Then you can choose a sensible way to construct + # DISTRO based on your distros release approach. + die $LINENO "Unable to determine DISTRO, can not continue." fi - export DISTRO + typeset -xr DISTRO } # Utility function for checking machine architecture # is_arch arch-type function is_arch { - ARCH_TYPE=$1 - - [[ "$(uname -m)" == "$ARCH_TYPE" ]] + [[ "$(uname -m)" == "$1" ]] } -# Determine if current distribution is a Fedora-based distribution -# (Fedora, RHEL, CentOS, etc). -# is_fedora -function is_fedora { +# Determine if current distribution is an Oracle distribution +# is_oraclelinux +function is_oraclelinux { if [[ -z "$os_VENDOR" ]]; then GetOSVersion fi - [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || [ "$os_VENDOR" = "CentOS" ] + [ "$os_VENDOR" = "OracleServer" ] } -# Determine if current distribution is a SUSE-based distribution -# (openSUSE, SLE). -# is_suse -function is_suse { +# Determine if current distribution is a Fedora-based distribution +# (Fedora, RHEL, CentOS, Rocky, etc). +# is_fedora +function is_fedora { if [[ -z "$os_VENDOR" ]]; then GetOSVersion fi - [ "$os_VENDOR" = "openSUSE" ] || [ "$os_VENDOR" = "SUSE LINUX" ] + [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || \ + [ "$os_VENDOR" = "openEuler" ] || \ + [ "$os_VENDOR" = "RedHatEnterpriseServer" ] || \ + [ "$os_VENDOR" = "RedHatEnterprise" ] || \ + [ "$os_VENDOR" = "RedHatEnterpriseLinux" ] || \ + [ "$os_VENDOR" = "RockyLinux" ] || \ + [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "CentOSStream" ] || \ + [ "$os_VENDOR" = "AlmaLinux" ] || \ + [ "$os_VENDOR" = "OracleServer" ] || [ "$os_VENDOR" = "Virtuozzo" ] } @@ -489,7 +558,14 @@ function is_ubuntu { [ "$os_PACKAGE" = "deb" ] } - +# Determine if current distribution is an openEuler distribution +# is_openeuler +function is_openeuler { + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + [ "$os_VENDOR" = "openEuler" ] +} # Git Functions # ============= @@ -497,7 +573,7 @@ function is_ubuntu { # ``get_release_name_from_branch branch-name`` function get_release_name_from_branch { local branch=$1 - if [[ $branch =~ "stable/" ]]; then + if [[ $branch =~ "stable/" || $branch =~ "proposed/" ]]; then echo ${branch#*/} else echo "master" @@ -507,76 +583,112 @@ function get_release_name_from_branch { # git clone only if directory doesn't exist already. Since ``DEST`` might not # be owned by the installation user, we create the directory and change the # ownership to the proper user. -# Set global RECLONE=yes to simulate a clone when dest-dir exists -# Set global ERROR_ON_CLONE=True to abort execution with an error if the git repo +# Set global ``RECLONE=yes`` to simulate a clone when dest-dir exists +# Set global ``ERROR_ON_CLONE=True`` to abort execution with an error if the git repo # does not exist (default is False, meaning the repo will be cloned). -# Uses global ``OFFLINE`` +# Uses globals ``ERROR_ON_CLONE``, ``OFFLINE``, ``RECLONE`` # git_clone remote dest-dir branch function git_clone { - GIT_REMOTE=$1 - GIT_DEST=$2 - GIT_REF=$3 - RECLONE=$(trueorfalse False $RECLONE) - local orig_dir=`pwd` + local git_remote=$1 + local git_dest=$2 + local git_ref=$3 + local orig_dir + orig_dir=$(pwd) + local git_clone_flags="" + + RECLONE=$(trueorfalse False RECLONE) + if [[ "${GIT_DEPTH}" -gt 0 ]]; then + git_clone_flags="$git_clone_flags --depth $GIT_DEPTH" + fi if [[ "$OFFLINE" = "True" ]]; then echo "Running in offline mode, clones already exist" # print out the results so we know what change was used in the logs - cd $GIT_DEST + cd $git_dest git show --oneline | head -1 cd $orig_dir return fi - if echo $GIT_REF | egrep -q "^refs"; then + if echo $git_ref | egrep -q "^refs"; then # If our branch name is a gerrit style refs/changes/... - if [[ ! -d $GIT_DEST ]]; then - [[ "$ERROR_ON_CLONE" = "True" ]] && \ - die $LINENO "Cloning not allowed in this configuration" - git_timed clone $GIT_REMOTE $GIT_DEST + if [[ ! -d $git_dest ]]; then + if [[ "$ERROR_ON_CLONE" = "True" ]]; then + echo "The $git_dest project was not found; if this is a gate job, add" + echo "the project to 'required-projects' in the job definition." + die $LINENO "ERROR_ON_CLONE is set to True so cloning not allowed in this configuration" + fi + git_timed clone $git_clone_flags $git_remote $git_dest fi - cd $GIT_DEST - git_timed fetch $GIT_REMOTE $GIT_REF && git checkout FETCH_HEAD + cd $git_dest + git_timed fetch $git_remote $git_ref && git checkout FETCH_HEAD else # do a full clone only if the directory doesn't exist - if [[ ! -d $GIT_DEST ]]; then - [[ "$ERROR_ON_CLONE" = "True" ]] && \ - die $LINENO "Cloning not allowed in this configuration" - git_timed clone $GIT_REMOTE $GIT_DEST - cd $GIT_DEST - # This checkout syntax works for both branches and tags - git checkout $GIT_REF + if [[ ! -d $git_dest ]]; then + if [[ "$ERROR_ON_CLONE" = "True" ]]; then + echo "The $git_dest project was not found; if this is a gate job, add" + echo "the project to the \$PROJECTS variable in the job definition." + die $LINENO "ERROR_ON_CLONE is set to True so cloning not allowed in this configuration" + fi + git_timed clone --no-checkout $git_clone_flags $git_remote $git_dest + cd $git_dest + git_timed fetch $git_clone_flags origin $git_ref + git_timed checkout FETCH_HEAD elif [[ "$RECLONE" = "True" ]]; then # if it does exist then simulate what clone does if asked to RECLONE - cd $GIT_DEST + cd $git_dest # set the url to pull from and fetch - git remote set-url origin $GIT_REMOTE + git remote set-url origin $git_remote git_timed fetch origin # remove the existing ignored files (like pyc) as they cause breakage # (due to the py files having older timestamps than our pyc, so python # thinks the pyc files are correct using them) - find $GIT_DEST -name '*.pyc' -delete - - # handle GIT_REF accordingly to type (tag, branch) - if [[ -n "`git show-ref refs/tags/$GIT_REF`" ]]; then - git_update_tag $GIT_REF - elif [[ -n "`git show-ref refs/heads/$GIT_REF`" ]]; then - git_update_branch $GIT_REF - elif [[ -n "`git show-ref refs/remotes/origin/$GIT_REF`" ]]; then - git_update_remote_branch $GIT_REF + sudo find $git_dest -name '*.pyc' -delete + + # handle git_ref accordingly to type (tag, branch) + if [[ -n "`git show-ref refs/tags/$git_ref`" ]]; then + git_update_tag $git_ref + elif [[ -n "`git show-ref refs/heads/$git_ref`" ]]; then + git_update_branch $git_ref + elif [[ -n "`git show-ref refs/remotes/origin/$git_ref`" ]]; then + git_update_remote_branch $git_ref else - die $LINENO "$GIT_REF is neither branch nor tag" + die $LINENO "$git_ref is neither branch nor tag" fi fi fi + # NOTE(ianw) 2022-04-13 : commit [1] has broken many assumptions + # about how we clone and work with repos. Mark them safe globally + # as a work-around. + # + # NOTE(danms): On bionic (and likely others) git-config may write + # ~stackuser/.gitconfig if not run with sudo -H. Using --system + # writes these changes to /etc/gitconfig which is more + # discoverable anyway. + # + # [1] https://github.com/git/git/commit/8959555cee7ec045958f9b6dd62e541affb7e7d9 + sudo git config --system --add safe.directory ${git_dest} + # print out the results so we know what change was used in the logs - cd $GIT_DEST + cd $git_dest git show --oneline | head -1 cd $orig_dir } +# A variation on git clone that lets us specify a project by it's +# actual name, like oslo.config. This is exceptionally useful in the +# library installation case +function git_clone_by_name { + local name=$1 + local repo=${GITREPO[$name]} + local dir=${GITDIR[$name]} + local branch=${GITBRANCH[$name]} + git_clone $repo $dir $branch +} + + # git can sometimes get itself infinitely stuck with transient network # errors or other issues with the remote end. This wraps git in a # timeout/retry loop and is intended to watch over non-local git @@ -592,6 +704,7 @@ function git_timed { timeout=${GIT_TIMEOUT} fi + time_start "git_timed" until timeout -s SIGINT ${timeout} git "$@"; do # 124 is timeout(1)'s special return code when it reached the # timeout; otherwise assume fatal failure @@ -600,46 +713,44 @@ function git_timed { fi count=$(($count + 1)) - warn "timeout ${count} for git call: [git $@]" + warn $LINENO "timeout ${count} for git call: [git $@]" if [ $count -eq 3 ]; then die $LINENO "Maximum of 3 git retries reached" fi sleep 5 done + time_stop "git_timed" } # git update using reference as a branch. # git_update_branch ref function git_update_branch { + local git_branch=$1 - GIT_BRANCH=$1 - - git checkout -f origin/$GIT_BRANCH + git checkout -f origin/$git_branch # a local branch might not exist - git branch -D $GIT_BRANCH || true - git checkout -b $GIT_BRANCH + git branch -D $git_branch || true + git checkout -b $git_branch } # git update using reference as a branch. # git_update_remote_branch ref function git_update_remote_branch { + local git_branch=$1 - GIT_BRANCH=$1 - - git checkout -b $GIT_BRANCH -t origin/$GIT_BRANCH + git checkout -b $git_branch -t origin/$git_branch } # git update using reference as a tag. Be careful editing source at that repo # as working copy will be in a detached mode # git_update_tag ref function git_update_tag { + local git_tag=$1 - GIT_TAG=$1 - - git tag -d $GIT_TAG + git tag -d $git_tag # fetching given tag only - git_timed fetch origin tag $GIT_TAG - git checkout -f $GIT_TAG + git_timed fetch origin tag $git_tag + git checkout -f $git_tag } @@ -653,22 +764,29 @@ function get_default_host_ip { local floating_range=$2 local host_ip_iface=$3 local host_ip=$4 + local af=$5 - # Find the interface used for the default route - host_ip_iface=${host_ip_iface:-$(ip route | sed -n '/^default/{ s/.*dev \(\w\+\)\s\+.*/\1/; p; }' | head -1)} # Search for an IP unless an explicit is set by ``HOST_IP`` environment variable if [ -z "$host_ip" -o "$host_ip" == "dhcp" ]; then host_ip="" - host_ips=`LC_ALL=C ip -f inet addr show ${host_ip_iface} | awk '/inet/ {split($2,parts,"/"); print parts[1]}'` - for IP in $host_ips; do + # Find the interface used for the default route + host_ip_iface=${host_ip_iface:-$(ip -f $af route list match default table all | grep via | awk '/default/ {print $5}' | head -1)} + local host_ips + host_ips=$(LC_ALL=C ip -f $af addr show ${host_ip_iface} | sed /temporary/d |awk /$af'/ {split($2,parts,"/"); print parts[1]}') + local ip + for ip in $host_ips; do # Attempt to filter out IP addresses that are part of the fixed and # floating range. Note that this method only works if the ``netaddr`` # python library is installed. If it is not installed, an error # will be printed and the first IP from the interface will be used. # If that is not correct set ``HOST_IP`` in ``localrc`` to the correct # address. - if ! (address_in_net $IP $fixed_range || address_in_net $IP $floating_range); then - host_ip=$IP + if [[ "$af" == "inet6" ]]; then + host_ip=$ip + break; + fi + if ! (address_in_net $ip $fixed_range || address_in_net $ip $floating_range); then + host_ip=$ip break; fi done @@ -676,11 +794,19 @@ function get_default_host_ip { echo $host_ip } +# Generates hex string from ``size`` byte of pseudo random data +# generate_hex_string size +function generate_hex_string { + local size=$1 + hexdump -n "$size" -v -e '/1 "%02x"' /dev/urandom +} + # Grab a numbered field from python prettytable output # Fields are numbered starting with 1 # Reverse syntax is supported: -1 is the last field, -2 is second to last, etc. # get_field field-number function get_field { + local data field while read data; do if [ "$1" -lt 0 ]; then field="(\$(NF$1))" @@ -691,6 +817,29 @@ function get_field { done } +# install default policy +# copy over a default policy.json and policy.d for projects +function install_default_policy { + local project=$1 + local project_uc + project_uc=$(echo $1|tr a-z A-Z) + local conf_dir="${project_uc}_CONF_DIR" + # eval conf dir to get the variable + conf_dir="${!conf_dir}" + local project_dir="${project_uc}_DIR" + # eval project dir to get the variable + project_dir="${!project_dir}" + local sample_conf_dir="${project_dir}/etc/${project}" + local sample_policy_dir="${project_dir}/etc/${project}/policy.d" + + # first copy any policy.json + cp -p $sample_conf_dir/policy.json $conf_dir + # then optionally copy over policy.d + if [[ -d $sample_policy_dir ]]; then + cp -r $sample_policy_dir $conf_dir/policy.d + fi +} + # Add a policy to a policy.json file # Do nothing if the policy already exists # ``policy_add policy_file policy_name policy_permissions`` @@ -706,7 +855,8 @@ function policy_add { # Add a terminating comma to policy lines without one # Remove the closing '}' and all lines following to the end-of-file - local tmpfile=$(mktemp) + local tmpfile + tmpfile=$(mktemp) uniq ${policy_file} | sed -e ' s/]$/],/ /^[}]/,$d @@ -719,120 +869,363 @@ function policy_add { mv ${tmpfile} ${policy_file} } +# Gets or creates a domain +# Usage: get_or_create_domain +function get_or_create_domain { + local domain_id + domain_id=$( + openstack --os-cloud devstack-system-admin domain create $1 \ + --description "$2" --or-show \ + -f value -c id + ) + echo $domain_id +} + +# Gets or creates group +# Usage: get_or_create_group [] +function get_or_create_group { + local desc="${3:-}" + local group_id + # Gets group id + group_id=$( + # Creates new group with --or-show + openstack --os-cloud devstack-system-admin group create $1 \ + --domain $2 --description "$desc" --or-show \ + -f value -c id + ) + echo $group_id +} + +# Gets or creates user +# Usage: get_or_create_user [] +function get_or_create_user { + local user_id + if [[ ! -z "$4" ]]; then + local email="--email=$4" + else + local email="" + fi + # Gets user id + user_id=$( + # Creates new user with --or-show + openstack --os-cloud devstack-system-admin user create \ + $1 \ + --password "$2" \ + --domain=$3 \ + $email \ + --or-show \ + -f value -c id + ) + echo $user_id +} + +# Gets or creates project +# Usage: get_or_create_project +function get_or_create_project { + local project_id + project_id=$( + # Creates new project with --or-show + openstack --os-cloud devstack-system-admin project create $1 \ + --domain=$2 \ + --or-show -f value -c id + ) + echo $project_id +} + +# Gets or creates role +# Usage: get_or_create_role +function get_or_create_role { + local role_id + role_id=$( + # Creates role with --or-show + openstack --os-cloud devstack-system-admin role create $1 \ + --or-show -f value -c id + ) + echo $role_id +} + +# Returns the domain parts of a function call if present +# Usage: _get_domain_args [ ] +function _get_domain_args { + local domain + domain="" + + if [[ -n "$1" ]]; then + domain="$domain --user-domain $1" + fi + if [[ -n "$2" ]]; then + domain="$domain --project-domain $2" + fi + + echo $domain +} + +# Gets or adds user role to project +# Usage: get_or_add_user_project_role [ ] +function get_or_add_user_project_role { + local user_role_id + local domain_args + + domain_args=$(_get_domain_args $4 $5) + + # Note this is idempotent so we are safe across multiple + # duplicate calls. + openstack --os-cloud devstack-system-admin role add $1 \ + --user $2 \ + --project $3 \ + $domain_args + user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ + --role $1 \ + --user $2 \ + --project $3 \ + $domain_args \ + -c Role -f value) + echo $user_role_id +} + +# Gets or adds user role to domain +# Usage: get_or_add_user_domain_role +function get_or_add_user_domain_role { + local user_role_id + + # Note this is idempotent so we are safe across multiple + # duplicate calls. + openstack --os-cloud devstack-system-admin role add $1 \ + --user $2 \ + --domain $3 + user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ + --role $1 \ + --user $2 \ + --domain $3 \ + -c Role -f value) + + echo $user_role_id +} + +# Gets or adds user role to system +# Usage: get_or_add_user_system_role [] +function get_or_add_user_system_role { + local user_role_id + local domain_args + + domain_args=$(_get_domain_args $4) + + # Gets user role id + user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ + --role $1 \ + --user $2 \ + --system $3 \ + $domain_args \ + -f value -c Role) + if [[ -z "$user_role_id" ]]; then + # Adds role to user and get it + openstack --os-cloud devstack-system-admin role add $1 \ + --user $2 \ + --system $3 \ + $domain_args + user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ + --role $1 \ + --user $2 \ + --system $3 \ + $domain_args \ + -f value -c Role) + fi + echo $user_role_id +} + +# Gets or adds group role to project +# Usage: get_or_add_group_project_role +function get_or_add_group_project_role { + local group_role_id + + # Note this is idempotent so we are safe across multiple + # duplicate calls. + openstack role add $1 \ + --group $2 \ + --project $3 + group_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ + --role $1 \ + --group $2 \ + --project $3 \ + -f value -c Role) + + echo $group_role_id +} + +# Gets or creates service +# Usage: get_or_create_service +function get_or_create_service { + local service_id + # Gets service id + service_id=$( + # Gets service id + openstack --os-cloud devstack-system-admin service show $2 -f value -c id 2>/dev/null || + # Creates new service if not exists + openstack --os-cloud devstack-system-admin service create \ + $2 \ + --name $1 \ + --description="$3" \ + -f value -c id + ) + echo $service_id +} + +# Create an endpoint with a specific interface +# Usage: _get_or_create_endpoint_with_interface +function _get_or_create_endpoint_with_interface { + local endpoint_id + endpoint_id=$(openstack --os-cloud devstack-system-admin endpoint list \ + --service $1 \ + --interface $2 \ + --region $4 \ + -c ID -f value) + if [[ -z "$endpoint_id" ]]; then + # Creates new endpoint + endpoint_id=$(openstack --os-cloud devstack-system-admin endpoint create \ + $1 $2 $3 --region $4 -f value -c id) + fi + + echo $endpoint_id +} + +# Gets or creates endpoint +# Usage: get_or_create_endpoint [adminurl] [internalurl] +function get_or_create_endpoint { + # NOTE(jamielennnox): when converting to v3 endpoint creation we go from + # creating one endpoint with multiple urls to multiple endpoints each with + # a different interface. To maintain the existing function interface we + # create 3 endpoints and return the id of the public one. In reality + # returning the public id will not make a lot of difference as there are no + # scenarios currently that use the returned id. Ideally this behaviour + # should be pushed out to the service setups and let them create the + # endpoints they need. + local public_id + public_id=$(_get_or_create_endpoint_with_interface $1 public $3 $2) + # only create admin/internal urls if provided content for them + if [[ -n "$4" ]]; then + _get_or_create_endpoint_with_interface $1 admin $4 $2 + fi + if [[ -n "$5" ]]; then + _get_or_create_endpoint_with_interface $1 internal $5 $2 + fi + # return the public id to indicate success, and this is the endpoint most likely wanted + echo $public_id +} + +# Get a URL from the identity service +# Usage: get_endpoint_url +function get_endpoint_url { + echo $(openstack --os-cloud devstack-system-admin endpoint list \ + --service $1 --interface $2 \ + -c URL -f value) +} + +# check if we are using ironic with hardware +# TODO(jroll) this is a kludge left behind when ripping ironic code +# out of tree, as it is used by nova and neutron. +# figure out a way to refactor nova/neutron code to eliminate this +function is_ironic_hardware { + is_service_enabled ironic && [[ "$IRONIC_IS_HARDWARE" == "True" ]] && return 0 + return 1 +} + +function is_ironic_enforce_scope { + is_service_enabled ironic && [[ "$IRONIC_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]] && return 0 + return 1 +} + +function is_ironic_sharded { + # todo(JayF): Support >1 shard with multiple n-cpu instances for each + is_service_enabled ironic && [[ "$IRONIC_SHARDS" == "1" ]] && return 0 + return 1 +} + # Package Functions # ================= # _get_package_dir function _get_package_dir { + local base_dir=$1 local pkg_dir + + if [[ -z "$base_dir" ]]; then + base_dir=$FILES + fi if is_ubuntu; then - pkg_dir=$FILES/apts + pkg_dir=$base_dir/debs elif is_fedora; then - pkg_dir=$FILES/rpms - elif is_suse; then - pkg_dir=$FILES/rpms-suse + pkg_dir=$base_dir/rpms else exit_distro_not_supported "list of packages" fi echo "$pkg_dir" } +# Wrapper for ``apt-get update`` to try multiple times on the update +# to address bad package mirrors (which happen all the time). +function apt_get_update { + # only do this once per run + if [[ "$REPOS_UPDATED" == "True" && "$RETRY_UPDATE" != "True" ]]; then + return + fi + + # bail if we are offline + [[ "$OFFLINE" = "True" ]] && return + + local sudo="sudo" + [[ "$(id -u)" = "0" ]] && sudo="env" + + # time all the apt operations + time_start "apt-get-update" + + local proxies="http_proxy=${http_proxy:-} https_proxy=${https_proxy:-} no_proxy=${no_proxy:-} " + local update_cmd="$sudo $proxies apt-get update" + if ! timeout 300 sh -c "while ! $update_cmd; do sleep 30; done"; then + die $LINENO "Failed to update apt repos, we're dead now" + fi + + REPOS_UPDATED=True + # stop the clock + time_stop "apt-get-update" +} + # Wrapper for ``apt-get`` to set cache and proxy environment variables # Uses globals ``OFFLINE``, ``*_proxy`` # apt_get operation package [package ...] function apt_get { - local xtrace=$(set +o | grep xtrace) + local xtrace result + xtrace=$(set +o | grep xtrace) set +o xtrace [[ "$OFFLINE" = "True" || -z "$@" ]] && return local sudo="sudo" [[ "$(id -u)" = "0" ]] && sudo="env" + # time all the apt operations + time_start "apt-get" + $xtrace + $sudo DEBIAN_FRONTEND=noninteractive \ - http_proxy=$http_proxy https_proxy=$https_proxy \ - no_proxy=$no_proxy \ - apt-get --option "Dpkg::Options::=--force-confold" --assume-yes "$@" + http_proxy=${http_proxy:-} https_proxy=${https_proxy:-} \ + no_proxy=${no_proxy:-} \ + apt-get --option "Dpkg::Options::=--force-confold" --assume-yes "$@" < /dev/null + result=$? + + # stop the clock + time_stop "apt-get" + return $result } -# get_packages() collects a list of package names of any type from the -# prerequisite files in ``files/{apts|rpms}``. The list is intended -# to be passed to a package installer such as apt or yum. -# -# Only packages required for the services in 1st argument will be -# included. Two bits of metadata are recognized in the prerequisite files: -# -# - ``# NOPRIME`` defers installation to be performed later in `stack.sh` -# - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection -# of the package to the distros listed. The distro names are case insensitive. -function get_packages { - local xtrace=$(set +o | grep xtrace) - set +o xtrace - local services=$@ - local package_dir=$(_get_package_dir) - local file_to_parse - local service +function _parse_package_files { + local files_to_parse=$@ - if [[ -z "$package_dir" ]]; then - echo "No package directory supplied" - return 1 - fi if [[ -z "$DISTRO" ]]; then GetDistro - echo "Found Distro $DISTRO" fi - for service in ${services//,/ }; do - # Allow individual services to specify dependencies - if [[ -e ${package_dir}/${service} ]]; then - file_to_parse="${file_to_parse} $service" - fi - # NOTE(sdague) n-api needs glance for now because that's where - # glance client is - if [[ $service == n-api ]]; then - if [[ ! $file_to_parse =~ nova ]]; then - file_to_parse="${file_to_parse} nova" - fi - if [[ ! $file_to_parse =~ glance ]]; then - file_to_parse="${file_to_parse} glance" - fi - elif [[ $service == c-* ]]; then - if [[ ! $file_to_parse =~ cinder ]]; then - file_to_parse="${file_to_parse} cinder" - fi - elif [[ $service == ceilometer-* ]]; then - if [[ ! $file_to_parse =~ ceilometer ]]; then - file_to_parse="${file_to_parse} ceilometer" - fi - elif [[ $service == s-* ]]; then - if [[ ! $file_to_parse =~ swift ]]; then - file_to_parse="${file_to_parse} swift" - fi - elif [[ $service == n-* ]]; then - if [[ ! $file_to_parse =~ nova ]]; then - file_to_parse="${file_to_parse} nova" - fi - elif [[ $service == g-* ]]; then - if [[ ! $file_to_parse =~ glance ]]; then - file_to_parse="${file_to_parse} glance" - fi - elif [[ $service == key* ]]; then - if [[ ! $file_to_parse =~ keystone ]]; then - file_to_parse="${file_to_parse} keystone" - fi - elif [[ $service == q-* ]]; then - if [[ ! $file_to_parse =~ neutron ]]; then - file_to_parse="${file_to_parse} neutron" - fi - elif [[ $service == ir-* ]]; then - if [[ ! $file_to_parse =~ ironic ]]; then - file_to_parse="${file_to_parse} ironic" - fi - fi - done - for file in ${file_to_parse}; do - local fname=${package_dir}/${file} + for fname in ${files_to_parse}; do local OIFS line package distros distro [[ -e $fname ]] || continue @@ -843,8 +1236,9 @@ function get_packages { continue fi - # Assume we want this package - package=${line%#*} + # Assume we want this package; free-form + # comments allowed after a # + package=${line%%#*} inst_pkg=1 # Look for # dist:xxx in comment @@ -852,7 +1246,7 @@ function get_packages { # We are using BASH regexp matching feature. package=${BASH_REMATCH[1]} distros=${BASH_REMATCH[2]} - # In bash ${VAR,,} will lowecase VAR + # In bash ${VAR,,} will lowercase VAR # Look for a match in the distro list if [[ ! ${distros,,} =~ ${DISTRO,,} ]]; then # If no match then skip this package @@ -860,12 +1254,15 @@ function get_packages { fi fi - # Look for # testonly in comment - if [[ $line =~ (.*)#.*testonly.* ]]; then + # Look for # not:xxx in comment + if [[ $line =~ (.*)#.*not:([^ ]*) ]]; then + # We are using BASH regexp matching feature. package=${BASH_REMATCH[1]} - # Are we installing test packages? (test for the default value) - if [[ $INSTALL_TESTONLY_PACKAGES = "False" ]]; then - # If not installing test packages the skip this package + distros=${BASH_REMATCH[2]} + # In bash ${VAR,,} will lowercase VAR + # Look for a match in the distro list + if [[ ${distros,,} =~ ${DISTRO,,} ]]; then + # If match then skip this package inst_pkg=0 fi fi @@ -876,148 +1273,388 @@ function get_packages { done IFS=$OIFS done - $xtrace } -# Distro-agnostic package installer -# install_package package [package ...] -function install_package { - local xtrace=$(set +o | grep xtrace) +# get_packages() collects a list of package names of any type from the +# prerequisite files in ``files/{debs|rpms}``. The list is intended +# to be passed to a package installer such as apt or yum. +# +# Only packages required for the services in 1st argument will be +# included. Two bits of metadata are recognized in the prerequisite files: +# +# - ``# NOPRIME`` defers installation to be performed later in `stack.sh` +# - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection +# of the package to the distros listed. The distro names are case insensitive. +# - ``# not:DISTRO`` or ``not:DISTRO1,DISTRO2`` limits the selection +# of the package to the distros not listed. The distro names are case insensitive. +function get_packages { + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace - if is_ubuntu; then - # if there are transient errors pulling the updates, that's fine. It may - # be secondary repositories that we don't really care about. - [[ "$NO_UPDATE_REPOS" = "True" ]] || apt_get update || /bin/true - NO_UPDATE_REPOS=True + local services=$@ + local package_dir + package_dir=$(_get_package_dir) + local file_to_parse="" + local service="" - $xtrace - apt_get install "$@" - elif is_fedora; then - $xtrace - yum_install "$@" - elif is_suse; then - $xtrace - zypper_install "$@" - else - $xtrace - exit_distro_not_supported "installing packages" + if [ $# -ne 1 ]; then + die $LINENO "get_packages takes a single, comma-separated argument" fi -} -# Distro-agnostic function to tell if a package is installed -# is_package_installed package [package ...] -function is_package_installed { - if [[ -z "$@" ]]; then + if [[ -z "$package_dir" ]]; then + echo "No package directory supplied" return 1 fi - - if [[ -z "$os_PACKAGE" ]]; then - GetOSVersion - fi - - if [[ "$os_PACKAGE" = "deb" ]]; then - dpkg -s "$@" > /dev/null 2> /dev/null - elif [[ "$os_PACKAGE" = "rpm" ]]; then - rpm --quiet -q "$@" - else - exit_distro_not_supported "finding if a package is installed" - fi -} - + for service in ${services//,/ }; do + # Allow individual services to specify dependencies + if [[ -e ${package_dir}/${service} ]]; then + file_to_parse="${file_to_parse} ${package_dir}/${service}" + fi + # NOTE(sdague) n-api needs glance for now because that's where + # glance client is + if [[ $service == n-api ]]; then + if [[ ! $file_to_parse =~ $package_dir/nova ]]; then + file_to_parse="${file_to_parse} ${package_dir}/nova" + fi + if [[ ! $file_to_parse =~ $package_dir/glance ]]; then + file_to_parse="${file_to_parse} ${package_dir}/glance" + fi + if [[ ! $file_to_parse =~ $package_dir/os-brick ]]; then + file_to_parse="${file_to_parse} ${package_dir}/os-brick" + fi + elif [[ $service == c-* ]]; then + if [[ ! $file_to_parse =~ $package_dir/cinder ]]; then + file_to_parse="${file_to_parse} ${package_dir}/cinder" + fi + if [[ ! $file_to_parse =~ $package_dir/os-brick ]]; then + file_to_parse="${file_to_parse} ${package_dir}/os-brick" + fi + elif [[ $service == s-* ]]; then + if [[ ! $file_to_parse =~ $package_dir/swift ]]; then + file_to_parse="${file_to_parse} ${package_dir}/swift" + fi + elif [[ $service == n-* ]]; then + if [[ ! $file_to_parse =~ $package_dir/nova ]]; then + file_to_parse="${file_to_parse} ${package_dir}/nova" + fi + if [[ ! $file_to_parse =~ $package_dir/os-brick ]]; then + file_to_parse="${file_to_parse} ${package_dir}/os-brick" + fi + elif [[ $service == g-* ]]; then + if [[ ! $file_to_parse =~ $package_dir/glance ]]; then + file_to_parse="${file_to_parse} ${package_dir}/glance" + fi + elif [[ $service == key* ]]; then + if [[ ! $file_to_parse =~ $package_dir/keystone ]]; then + file_to_parse="${file_to_parse} ${package_dir}/keystone" + fi + elif [[ $service == q-* || $service == neutron-* ]]; then + if [[ ! $file_to_parse =~ $package_dir/neutron-common ]]; then + file_to_parse="${file_to_parse} ${package_dir}/neutron-common" + fi + elif [[ $service == ir-* ]]; then + if [[ ! $file_to_parse =~ $package_dir/ironic ]]; then + file_to_parse="${file_to_parse} ${package_dir}/ironic" + fi + fi + done + echo "$(_parse_package_files $file_to_parse)" + $xtrace +} + +# get_plugin_packages() collects a list of package names of any type from a +# plugin's prerequisite files in ``$PLUGIN/devstack/files/{debs|rpms}``. The +# list is intended to be passed to a package installer such as apt or yum. +# +# Only packages required for enabled and collected plugins will included. +# +# The same metadata used in the main DevStack prerequisite files may be used +# in these prerequisite files, see get_packages() for more info. +function get_plugin_packages { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local files_to_parse="" + local package_dir="" + for plugin in ${DEVSTACK_PLUGINS//,/ }; do + package_dir="$(_get_package_dir ${GITDIR[$plugin]}/devstack/files)" + files_to_parse+=" $package_dir/$plugin" + done + echo "$(_parse_package_files $files_to_parse)" + $xtrace +} + +# Search plugins for a bindep.txt file +# +# Uses globals ``BINDEP_CMD``, ``GITDIR``, ``DEVSTACK_PLUGINS`` +# +# Note this is only valid after BINDEP_CMD is setup in stack.sh, and +# is thus not really intended to be called externally. +function _get_plugin_bindep_packages { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + local bindep_file + local packages + + for plugin in ${DEVSTACK_PLUGINS//,/ }; do + bindep_file=${GITDIR[$plugin]}/devstack/files/bindep.txt + if [[ -f ${bindep_file} ]]; then + packages+=$($BINDEP_CMD -b --file ${bindep_file} || true) + fi + done + echo "${packages}" + $xtrace +} + +# Distro-agnostic package installer +# Uses globals ``NO_UPDATE_REPOS``, ``REPOS_UPDATED``, ``RETRY_UPDATE`` +# install_package package [package ...] +function update_package_repo { + NO_UPDATE_REPOS=${NO_UPDATE_REPOS:-False} + REPOS_UPDATED=${REPOS_UPDATED:-False} + RETRY_UPDATE=${RETRY_UPDATE:-False} + + if [[ "$NO_UPDATE_REPOS" = "True" ]]; then + return 0 + fi + + if is_ubuntu; then + apt_get_update + fi +} + +function real_install_package { + if is_ubuntu; then + apt_get install "$@" + elif is_fedora; then + yum_install "$@" + else + exit_distro_not_supported "installing packages" + fi +} + +# Distro-agnostic package installer +# install_package package [package ...] +function install_package { + update_package_repo + if ! real_install_package "$@"; then + RETRY_UPDATE=True update_package_repo && real_install_package "$@" + fi +} + +# Distro-agnostic function to tell if a package is installed +# is_package_installed package [package ...] +function is_package_installed { + if [[ -z "$@" ]]; then + return 1 + fi + + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + + if [[ "$os_PACKAGE" = "deb" ]]; then + dpkg -s "$@" > /dev/null 2> /dev/null + elif [[ "$os_PACKAGE" = "rpm" ]]; then + rpm --quiet -q "$@" + else + exit_distro_not_supported "finding if a package is installed" + fi +} + # Distro-agnostic package uninstaller # uninstall_package package [package ...] function uninstall_package { if is_ubuntu; then apt_get purge "$@" elif is_fedora; then - sudo yum remove -y "$@" - elif is_suse; then - sudo zypper rm "$@" + sudo dnf remove -y "$@" ||: else exit_distro_not_supported "uninstalling packages" fi } -# Wrapper for ``yum`` to set proxy environment variables +# Wrapper for ``dnf`` to set proxy environment variables # Uses globals ``OFFLINE``, ``*_proxy`` +# The name is kept for backwards compatability with external +# callers, despite none of our supported platforms using yum +# any more. # yum_install package [package ...] function yum_install { - [[ "$OFFLINE" = "True" ]] && return - local sudo="sudo" - [[ "$(id -u)" = "0" ]] && sudo="env" + local result parse_yum_result - # The manual check for missing packages is because yum -y assumes - # missing packages are OK. See - # https://bugzilla.redhat.com/show_bug.cgi?id=965567 - $sudo http_proxy=$http_proxy https_proxy=$https_proxy \ - no_proxy=$no_proxy \ - yum install -y "$@" 2>&1 | \ - awk ' - BEGIN { fail=0 } - /No package/ { fail=1 } - { print } - END { exit fail }' || \ - die $LINENO "Missing packages detected" + [[ "$OFFLINE" = "True" ]] && return - # also ensure we catch a yum failure - if [[ ${PIPESTATUS[0]} != 0 ]]; then - die $LINENO "Yum install failure" - fi + time_start "yum_install" + sudo_with_proxies dnf install -y "$@" + time_stop "yum_install" } # zypper wrapper to set arguments correctly +# Uses globals ``OFFLINE``, ``*_proxy`` # zypper_install package [package ...] function zypper_install { [[ "$OFFLINE" = "True" ]] && return local sudo="sudo" [[ "$(id -u)" = "0" ]] && sudo="env" - $sudo http_proxy=$http_proxy https_proxy=$https_proxy \ - zypper --non-interactive install --auto-agree-with-licenses "$@" + $sudo http_proxy="${http_proxy:-}" https_proxy="${https_proxy:-}" \ + no_proxy="${no_proxy:-}" \ + zypper --non-interactive install --auto-agree-with-licenses --no-recommends "$@" } +# Run bindep and install packages it outputs +# +# Usage: +# install_bindep [profile,profile] +# +# Note unlike the bindep command itself, profile(s) specified should +# be a single, comma-separated string, no spaces. +function install_bindep { + local file=$1 + local profiles=${2:-""} + local pkgs -# Process Functions -# ================= + if [[ ! -f $file ]]; then + warn $LINENO "Can not find bindep file: $file" + return + fi + + # converting here makes it much easier to work with passing + # arguments + profiles=${profiles/,/ /} + + # Note bindep returns 1 when packages need to be installed, so we + # have to ignore it's return for "-e" + pkgs=$($DEST/bindep-venv/bin/bindep -b --file $file $profiles || true) + + if [[ -n "${pkgs}" ]]; then + install_package ${pkgs} + fi +} -# _run_process() is designed to be backgrounded by run_process() to simulate a -# fork. It includes the dirty work of closing extra filehandles and preparing log -# files to produce the same logs as screen_it(). The log filename is derived -# from the service name and global-and-now-misnamed SCREEN_LOGDIR -# _run_process service "command-line" -function _run_process { +function write_user_unit_file { local service=$1 local command="$2" + local group=$3 + local user=$4 + local env_vars="$5" + local extra="" + if [[ -n "$group" ]]; then + extra="Group=$group" + fi + local unitfile="$SYSTEMD_DIR/$service" + mkdir -p $SYSTEMD_DIR + + iniset -sudo $unitfile "Unit" "Description" "Devstack $service" + iniset -sudo $unitfile "Service" "Environment" "\"PATH=$PATH\"" + iniset -sudo $unitfile "Service" "User" "$user" + iniset -sudo $unitfile "Service" "ExecStart" "$command" + iniset -sudo $unitfile "Service" "KillMode" "process" + iniset -sudo $unitfile "Service" "TimeoutStopSec" "300" + iniset -sudo $unitfile "Service" "ExecReload" "$KILL_PATH -HUP \$MAINPID" + if [[ -n "$env_vars" ]] ; then + iniset -sudo $unitfile "Service" "Environment" "$env_vars" + fi + if [[ -n "$group" ]]; then + iniset -sudo $unitfile "Service" "Group" "$group" + fi + iniset -sudo $unitfile "Install" "WantedBy" "multi-user.target" - # Undo logging redirections and close the extra descriptors - exec 1>&3 - exec 2>&3 - exec 3>&- - exec 6>&- - - if [[ -n ${SCREEN_LOGDIR} ]]; then - exec 1>&${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log 2>&1 - ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log + # changes to existing units sometimes need a refresh + $SYSTEMCTL daemon-reload +} - # TODO(dtroyer): Hack to get stdout from the Python interpreter for the logs. - export PYTHONUNBUFFERED=1 +function write_uwsgi_user_unit_file { + local service=$1 + local command="$2" + local group=$3 + local user=$4 + local env_vars="$5" + local unitfile="$SYSTEMD_DIR/$service" + mkdir -p $SYSTEMD_DIR + + iniset -sudo $unitfile "Unit" "Description" "Devstack $service" + iniset -sudo $unitfile "Service" "Environment" "\"PATH=$PATH\"" + iniset -sudo $unitfile "Service" "SyslogIdentifier" "$service" + iniset -sudo $unitfile "Service" "User" "$user" + iniset -sudo $unitfile "Service" "ExecStart" "$command" + iniset -sudo $unitfile "Service" "ExecReload" "$KILL_PATH -HUP \$MAINPID" + iniset -sudo $unitfile "Service" "Type" "notify" + iniset -sudo $unitfile "Service" "KillMode" "process" + iniset -sudo $unitfile "Service" "Restart" "always" + iniset -sudo $unitfile "Service" "NotifyAccess" "all" + iniset -sudo $unitfile "Service" "RestartForceExitStatus" "100" + + if [[ -n "$env_vars" ]] ; then + iniset -sudo $unitfile "Service" "Environment" "$env_vars" + fi + if [[ -n "$group" ]]; then + iniset -sudo $unitfile "Service" "Group" "$group" fi + iniset -sudo $unitfile "Install" "WantedBy" "multi-user.target" - exec /bin/bash -c "$command" - die "$service exec failure: $command" + # changes to existing units sometimes need a refresh + $SYSTEMCTL daemon-reload } -# Helper to remove the ``*.failure`` files under ``$SERVICE_DIR/$SCREEN_NAME``. -# This is used for ``service_check`` when all the ``screen_it`` are called finished -# init_service_check -function init_service_check { - SCREEN_NAME=${SCREEN_NAME:-stack} - SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} +function _common_systemd_pitfalls { + local cmd=$1 + # do some sanity checks on $cmd to see things we don't expect to work + + if [[ "$cmd" =~ "sudo" ]]; then + read -r -d '' msg << EOF || true # read returns 1 for EOF, but it is ok here +You are trying to use run_process with sudo, this is not going to work under systemd. + +If you need to run a service as a user other than \$STACK_USER call it with: - if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then - mkdir -p "$SERVICE_DIR/$SCREEN_NAME" + run_process \$name \$cmd \$group \$user +EOF + die $LINENO "$msg" + fi + + if [[ ! "$cmd" =~ ^/ ]]; then + read -r -d '' msg << EOF || true # read returns 1 for EOF, but it is ok here +The cmd="$cmd" does not start with an absolute path. It will fail to +start under systemd. + +Please update your run_process stanza to have an absolute path. +EOF + die $LINENO "$msg" + fi + +} + +# Helper function to build a basic unit file and run it under systemd. +function _run_under_systemd { + local service=$1 + local command="$2" + local cmd=$command + # sanity check the command + _common_systemd_pitfalls "$cmd" + + local systemd_service="devstack@$service.service" + local group=$3 + local user=${4:-$STACK_USER} + if [[ -z "$user" ]]; then + user=$STACK_USER + fi + local env_vars="$5" + if [[ -v SYSTEMD_ENV_VARS[$service] ]]; then + env_vars="${SYSTEMD_ENV_VARS[$service]} $env_vars" + fi + if [[ "$command" =~ "uwsgi" ]] ; then + if [[ "$GLOBAL_VENV" == "True" ]] ; then + cmd="$cmd --venv $DEVSTACK_VENV" + fi + write_uwsgi_user_unit_file $systemd_service "$cmd" "$group" "$user" "$env_vars" + else + write_user_unit_file $systemd_service "$cmd" "$group" "$user" "$env_vars" fi - rm -f "$SERVICE_DIR/$SCREEN_NAME"/*.failure + $SYSTEMCTL enable $systemd_service + $SYSTEMCTL start $systemd_service } # Find out if a process exists by partial name. @@ -1025,285 +1662,250 @@ function init_service_check { function is_running { local name=$1 ps auxw | grep -v grep | grep ${name} > /dev/null - RC=$? + local exitcode=$? # some times I really hate bash reverse binary logic - return $RC + return $exitcode } -# run_process() launches a child process that closes all file descriptors and -# then exec's the passed in command. This is meant to duplicate the semantics -# of screen_it() without screen. PIDs are written to -# $SERVICE_DIR/$SCREEN_NAME/$service.pid -# run_process service "command-line" +# Run a single service under screen or directly +# If the command includes shell metachatacters (;<>*) it must be run using a shell +# If an optional group is provided sg will be used to run the +# command as that group. +# run_process service "command-line" [group] [user] [env_vars] +# env_vars must be a space separated list of variable assigments, ie: "A=1 B=2" function run_process { local service=$1 local command="$2" + local group=$3 + local user=$4 + local env_vars="$5" - # Spawn the child process - _run_process "$service" "$command" & - echo $! -} - -# Helper to launch a service in a named screen -# screen_it service "command-line" -function screen_it { - SCREEN_NAME=${SCREEN_NAME:-stack} - SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} - USE_SCREEN=$(trueorfalse True $USE_SCREEN) - - if is_service_enabled $1; then - # Append the service to the screen rc file - screen_rc "$1" "$2" - - if [[ "$USE_SCREEN" = "True" ]]; then - screen -S $SCREEN_NAME -X screen -t $1 - - if [[ -n ${SCREEN_LOGDIR} ]]; then - screen -S $SCREEN_NAME -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log - screen -S $SCREEN_NAME -p $1 -X log on - ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log - fi + local name=$service - # sleep to allow bash to be ready to be send the command - we are - # creating a new window in screen and then sends characters, so if - # bash isn't running by the time we send the command, nothing happens - sleep 1.5 - - NL=`echo -ne '\015'` - # This fun command does the following: - # - the passed server command is backgrounded - # - the pid of the background process is saved in the usual place - # - the server process is brought back to the foreground - # - if the server process exits prematurely the fg command errors - # and a message is written to stdout and the service failure file - # The pid saved can be used in screen_stop() as a process group - # id to kill off all child processes - screen -S $SCREEN_NAME -p $1 -X stuff "$2 & echo \$! >$SERVICE_DIR/$SCREEN_NAME/$1.pid; fg || echo \"$1 failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL" - else - # Spawn directly without screen - run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$1.pid - fi + time_start "run_process" + if is_service_enabled $service; then + _run_under_systemd "$name" "$command" "$group" "$user" "$env_vars" fi + time_stop "run_process" } -# Screen rc file builder -# screen_rc service "command-line" -function screen_rc { - SCREEN_NAME=${SCREEN_NAME:-stack} - SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc - if [[ ! -e $SCREENRC ]]; then - # Name the screen session - echo "sessionname $SCREEN_NAME" > $SCREENRC - # Set a reasonable statusbar - echo "hardstatus alwayslastline '$SCREEN_HARDSTATUS'" >> $SCREENRC - # Some distributions override PROMPT_COMMAND for the screen terminal type - turn that off - echo "setenv PROMPT_COMMAND /bin/true" >> $SCREENRC - echo "screen -t shell bash" >> $SCREENRC - fi - # If this service doesn't already exist in the screenrc file - if ! grep $1 $SCREENRC 2>&1 > /dev/null; then - NL=`echo -ne '\015'` - echo "screen -t $1 bash" >> $SCREENRC - echo "stuff \"$2$NL\"" >> $SCREENRC - - if [[ -n ${SCREEN_LOGDIR} ]]; then - echo "logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log" >>$SCREENRC - echo "log on" >>$SCREENRC - fi - fi -} - -# Stop a service in screen +# Stop a service process # If a PID is available use it, kill the whole process group via TERM # If screen is being used kill the screen window; this will catch processes # that did not leave a PID behind -# screen_stop service -function screen_stop { - SCREEN_NAME=${SCREEN_NAME:-stack} +# Uses globals ``SERVICE_DIR`` +# stop_process service +function stop_process { + local service=$1 + SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} - USE_SCREEN=$(trueorfalse True $USE_SCREEN) - if is_service_enabled $1; then - # Kill via pid if we have one available - if [[ -r $SERVICE_DIR/$SCREEN_NAME/$1.pid ]]; then - pkill -TERM -P -$(cat $SERVICE_DIR/$SCREEN_NAME/$1.pid) - rm $SERVICE_DIR/$SCREEN_NAME/$1.pid - fi - if [[ "$USE_SCREEN" = "True" ]]; then - # Clean up the screen window - screen -S $SCREEN_NAME -p $1 -X kill + if is_service_enabled $service; then + # Only do this for units which appear enabled, this also + # catches units that don't really exist for cases like + # keystone without a failure. + if $SYSTEMCTL is-enabled devstack@$service.service; then + $SYSTEMCTL stop devstack@$service.service + $SYSTEMCTL disable devstack@$service.service fi fi } -# Helper to get the status of each running service -# service_check +# use systemctl to check service status function service_check { local service - local failures - SCREEN_NAME=${SCREEN_NAME:-stack} - SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} - - - if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then - echo "No service status directory found" - return - fi - - # Check if there is any falure flag file under $SERVICE_DIR/$SCREEN_NAME - # make this -o errexit safe - failures=`ls "$SERVICE_DIR/$SCREEN_NAME"/*.failure 2>/dev/null || /bin/true` - - for service in $failures; do - service=`basename $service` - service=${service%.failure} - echo "Error: Service $service is not running" + for service in ${ENABLED_SERVICES//,/ }; do + # because some things got renamed like key => keystone + if $SYSTEMCTL is-enabled devstack@$service.service; then + # no-pager is needed because otherwise status dumps to a + # pager when in interactive mode, which will stop a manual + # devstack run. + $SYSTEMCTL status devstack@$service.service --no-pager + fi done - - if [ -n "$failures" ]; then - die $LINENO "More details about the above errors can be found with screen, with ./rejoin-stack.sh" - fi } -# Python Functions -# ================ +# Plugin Functions +# ================= -# Get the path to the pip command. -# get_pip_command -function get_pip_command { - which pip || which pip-python +DEVSTACK_PLUGINS=${DEVSTACK_PLUGINS:-""} - if [ $? -ne 0 ]; then - die $LINENO "Unable to find pip; cannot continue" +# enable_plugin [branch] +# +# ``name`` is an arbitrary name - (aka: glusterfs, nova-docker, zaqar) +# ``url`` is a git url +# ``branch`` is a gitref. If it's not set, defaults to master +function enable_plugin { + local name=$1 + local url=$2 + local branch=${3:-master} + if is_plugin_enabled $name; then + die $LINENO "Plugin attempted to be enabled twice: ${name} ${url} ${branch}" fi + DEVSTACK_PLUGINS+=",$name" + GITREPO[$name]=$url + GITDIR[$name]=$DEST/$name + GITBRANCH[$name]=$branch } -# Get the path to the direcotry where python executables are installed. -# get_python_exec_prefix -function get_python_exec_prefix { - if is_fedora || is_suse; then - echo "/usr/bin" - else - echo "/usr/local/bin" +# is_plugin_enabled +# +# Check if the plugin was enabled, e.g. using enable_plugin +# +# ``name`` The name with which the plugin was enabled +function is_plugin_enabled { + local name=$1 + if [[ ",${DEVSTACK_PLUGINS}," =~ ",${name}," ]]; then + return 0 fi + return 1 } -# Wrapper for ``pip install`` to set cache and proxy environment variables -# Uses globals ``OFFLINE``, ``PIP_DOWNLOAD_CACHE``, ``PIP_USE_MIRRORS``, -# ``TRACK_DEPENDS``, ``*_proxy`` -# pip_install package [package ...] -function pip_install { - local xtrace=$(set +o | grep xtrace) - set +o xtrace - if [[ "$OFFLINE" = "True" || -z "$@" ]]; then - $xtrace +# fetch_plugins +# +# clones all plugins +function fetch_plugins { + local plugins="${DEVSTACK_PLUGINS}" + local plugin + + # short circuit if nothing to do + if [[ -z $plugins ]]; then return fi - if [[ -z "$os_PACKAGE" ]]; then - GetOSVersion - fi - if [[ $TRACK_DEPENDS = True ]]; then - source $DEST/.venv/bin/activate - CMD_PIP=$DEST/.venv/bin/pip - SUDO_PIP="env" - else - SUDO_PIP="sudo" - CMD_PIP=$(get_pip_command) - fi + echo "Fetching DevStack plugins" + for plugin in ${plugins//,/ }; do + git_clone_by_name $plugin + done +} - # Mirror option not needed anymore because pypi has CDN available, - # but it's useful in certain circumstances - PIP_USE_MIRRORS=${PIP_USE_MIRRORS:-False} - if [[ "$PIP_USE_MIRRORS" != "False" ]]; then - PIP_MIRROR_OPT="--use-mirrors" - fi +# load_plugin_settings +# +# Load settings from plugins in the order that they were registered +function load_plugin_settings { + local plugins="${DEVSTACK_PLUGINS}" + local plugin - # pip < 1.4 has a bug where it will use an already existing build - # directory unconditionally. Say an earlier component installs - # foo v1.1; pip will have built foo's source in - # /tmp/$USER-pip-build. Even if a later component specifies foo < - # 1.1, the existing extracted build will be used and cause - # confusing errors. By creating unique build directories we avoid - # this problem. See https://github.com/pypa/pip/issues/709 - local pip_build_tmp=$(mktemp --tmpdir -d pip-build.XXXXX) + # short circuit if nothing to do + if [[ -z $plugins ]]; then + return + fi - $xtrace - $SUDO_PIP PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \ - http_proxy=$http_proxy \ - https_proxy=$https_proxy \ - no_proxy=$no_proxy \ - $CMD_PIP install --build=${pip_build_tmp} \ - $PIP_MIRROR_OPT $@ \ - && $SUDO_PIP rm -rf ${pip_build_tmp} + echo "Loading plugin settings" + for plugin in ${plugins//,/ }; do + local dir=${GITDIR[$plugin]} + # source any known settings + if [[ -f $dir/devstack/settings ]]; then + source $dir/devstack/settings + fi + done } -# this should be used if you want to install globally, all libraries should -# use this, especially *oslo* ones -function setup_install { - local project_dir=$1 - setup_package_with_req_sync $project_dir -} +# plugin_override_defaults +# +# Run an extremely early setting phase for plugins that allows default +# overriding of services. +function plugin_override_defaults { + local plugins="${DEVSTACK_PLUGINS}" + local plugin + + # short circuit if nothing to do + if [[ -z $plugins ]]; then + return + fi -# this should be used for projects which run services, like all services -function setup_develop { - local project_dir=$1 - setup_package_with_req_sync $project_dir -e + echo "Overriding Configuration Defaults" + for plugin in ${plugins//,/ }; do + local dir=${GITDIR[$plugin]} + # source any overrides + if [[ -f $dir/devstack/override-defaults ]]; then + # be really verbose that an override is happening, as it + # may not be obvious if things fail later. + echo "$plugin has overridden the following defaults" + cat $dir/devstack/override-defaults + source $dir/devstack/override-defaults + fi + done } -# ``pip install -e`` the package, which processes the dependencies -# using pip before running `setup.py develop` -# -# Updates the dependencies in project_dir from the -# openstack/requirements global list before installing anything. +# run_plugins # -# Uses globals ``TRACK_DEPENDS``, ``REQUIREMENTS_DIR``, ``UNDO_REQUIREMENTS`` -# setup_develop directory -function setup_package_with_req_sync { - local project_dir=$1 - local flags=$2 - - # Don't update repo if local changes exist - # Don't use buggy "git diff --quiet" - # ``errexit`` requires us to trap the exit code when the repo is changed - local update_requirements=$(cd $project_dir && git diff --exit-code >/dev/null || echo "changed") +# Run the devstack/plugin.sh in all the plugin directories. These are +# run in registration order. +function run_plugins { + local mode=$1 + local phase=$2 + + local plugins="${DEVSTACK_PLUGINS}" + local plugin + for plugin in ${plugins//,/ }; do + local dir=${GITDIR[$plugin]} + if [[ -f $dir/devstack/plugin.sh ]]; then + source $dir/devstack/plugin.sh $mode $phase + fi + done +} - if [[ $update_requirements != "changed" ]]; then - (cd $REQUIREMENTS_DIR; \ - $SUDO_CMD python update.py $project_dir) +function run_phase { + local mode=$1 + local phase=$2 + if [[ -d $TOP_DIR/extras.d ]]; then + local extra_plugin_file_name + for extra_plugin_file_name in $TOP_DIR/extras.d/*.sh; do + # NOTE(sdague): only process extras.d for the 3 explicitly + # white listed elements in tree. We want these to move out + # over time as well, but they are in tree, so we need to + # manage that. + local exceptions="80-tempest.sh" + local extra + extra=$(basename $extra_plugin_file_name) + if [[ ! ( $exceptions =~ "$extra" ) ]]; then + warn "use of extras.d is no longer supported" + warn "processing of project $extra is skipped" + else + [[ -r $extra_plugin_file_name ]] && source $extra_plugin_file_name $mode $phase + fi + done fi - - setup_package $project_dir $flags - - # We've just gone and possibly modified the user's source tree in an - # automated way, which is considered bad form if it's a development - # tree because we've screwed up their next git checkin. So undo it. - # - # However... there are some circumstances, like running in the gate - # where we really really want the overridden version to stick. So provide - # a variable that tells us whether or not we should UNDO the requirements - # changes (this will be set to False in the OpenStack ci gate) - if [ $UNDO_REQUIREMENTS = "True" ]; then - if [[ $update_requirements != "changed" ]]; then - (cd $project_dir && git reset --hard) - fi + # the source phase corresponds to settings loading in plugins + if [[ "$mode" == "source" ]]; then + load_plugin_settings + verify_disabled_services + elif [[ "$mode" == "override_defaults" ]]; then + plugin_override_defaults + else + run_plugins $mode $phase fi } -# ``pip install -e`` the package, which processes the dependencies -# using pip before running `setup.py develop` -# Uses globals ``STACK_USER`` -# setup_develop_no_requirements_update directory -function setup_package { - local project_dir=$1 - local flags=$2 +# define_plugin +# +# This function is a no-op. It allows a plugin to define its name So +# that other plugins may reference it by name. It should generally be +# the last component of the canonical git repo name. E.g., +# openstack/devstack-foo should use "devstack-foo" as the name here. +# +# This function is currently a noop, but the value may still be used +# by external tools (as in plugin_requires) and may be used by +# devstack in the future. +# +# ``name`` is an arbitrary name - (aka: glusterfs, nova-docker, zaqar) +function define_plugin { + : +} - pip_install $flags $project_dir - # ensure that further actions can do things like setup.py sdist - if [[ "$flags" == "-e" ]]; then - safe_chown -R $STACK_USER $1/*.egg-info - fi +# plugin_requires +# +# This function is a no-op. It is currently used by external tools +# (such as the devstack module for Ansible) to automatically generate +# local.conf files. It is not currently used by devstack itself to +# resolve dependencies. +# +# ``name`` is an arbitrary name - (aka: glusterfs, nova-docker, zaqar) +# ``other`` is the name of another plugin +function plugin_requires { + : } @@ -1313,11 +1915,17 @@ function setup_package { # remove extra commas from the input string (i.e. ``ENABLED_SERVICES``) # _cleanup_service_list service-list function _cleanup_service_list { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + echo "$1" | sed -e ' s/,,/,/g; s/^,//; s/,$// ' + + $xtrace } # disable_all_services() removes all current services @@ -1335,56 +1943,89 @@ function disable_all_services { # Uses global ``ENABLED_SERVICES`` # disable_negated_services function disable_negated_services { - local tmpsvcs="${ENABLED_SERVICES}" + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + local to_remove="" + local remaining="" local service - for service in ${tmpsvcs//,/ }; do + + # build up list of services that should be removed; i.e. they + # begin with "-" + for service in ${ENABLED_SERVICES//,/ }; do if [[ ${service} == -* ]]; then - tmpsvcs=$(echo ${tmpsvcs}|sed -r "s/(,)?(-)?${service#-}(,)?/,/g") + to_remove+=",${service#-}" + else + remaining+=",${service}" fi done - ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs") + + # go through the service list. if this service appears in the "to + # be removed" list, drop it + ENABLED_SERVICES=$(remove_disabled_services "$remaining" "$to_remove") + + $xtrace } -# disable_service() removes the services passed as argument to the -# ``ENABLED_SERVICES`` list, if they are present. +# disable_service() prepares the services passed as argument to be +# removed from the ``ENABLED_SERVICES`` list, if they are present. # # For example: # disable_service rabbit # -# This function does not know about the special cases -# for nova, glance, and neutron built into is_service_enabled(). -# Uses global ``ENABLED_SERVICES`` +# Uses global ``DISABLED_SERVICES`` # disable_service service [service ...] function disable_service { - local tmpsvcs=",${ENABLED_SERVICES}," + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + local disabled_svcs="${DISABLED_SERVICES}" + local enabled_svcs=",${ENABLED_SERVICES}," local service for service in $@; do + disabled_svcs+=",$service" if is_service_enabled $service; then - tmpsvcs=${tmpsvcs//,$service,/,} + enabled_svcs=${enabled_svcs//,$service,/,} fi done - ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs") + DISABLED_SERVICES=$(_cleanup_service_list "$disabled_svcs") + ENABLED_SERVICES=$(_cleanup_service_list "$enabled_svcs") + + $xtrace } # enable_service() adds the services passed as argument to the # ``ENABLED_SERVICES`` list, if they are not already present. # # For example: -# enable_service qpid +# enable_service q-svc # # This function does not know about the special cases # for nova, glance, and neutron built into is_service_enabled(). # Uses global ``ENABLED_SERVICES`` # enable_service service [service ...] function enable_service { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local tmpsvcs="${ENABLED_SERVICES}" + local service for service in $@; do + if [[ ,${DISABLED_SERVICES}, =~ ,${service}, ]]; then + warn $LINENO "Attempt to enable_service ${service} when it has been disabled" + continue + fi if ! is_service_enabled $service; then tmpsvcs+=",$service" fi done ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs") disable_negated_services + + $xtrace } # is_service_enabled() checks if the service(s) specified as arguments are @@ -1396,7 +2037,6 @@ function enable_service { # There are special cases for some 'catch-all' services:: # **nova** returns true if any service enabled start with **n-** # **cinder** returns true if any service enabled start with **c-** -# **ceilometer** returns true if any service enabled start with **ceilometer** # **glance** returns true if any service enabled start with **g-** # **neutron** returns true if any service enabled start with **q-** # **swift** returns true if any service enabled start with **s-** @@ -1404,45 +2044,72 @@ function enable_service { # For backward compatibility if we have **swift** in ENABLED_SERVICES all the # **s-** services will be enabled. This will be deprecated in the future. # -# Cells within nova is enabled if **n-cell** is in ``ENABLED_SERVICES``. -# We also need to make sure to treat **n-cell-region** and **n-cell-child** -# as enabled in this case. -# # Uses global ``ENABLED_SERVICES`` # is_service_enabled service [service ...] function is_service_enabled { - local xtrace=$(set +o | grep xtrace) + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace + local enabled=1 - services=$@ + local services=$@ + local service for service in ${services}; do [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && enabled=0 # Look for top-level 'enabled' function for this service if type is_${service}_enabled >/dev/null 2>&1; then # A function exists for this service, use it - is_${service}_enabled - enabled=$? + is_${service}_enabled && enabled=0 fi # TODO(dtroyer): Remove these legacy special-cases after the is_XXX_enabled() # are implemented - [[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && enabled=0 - [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && enabled=0 - [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && enabled=0 - [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && enabled=0 - [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && enabled=0 - [[ ${service} == "ironic" && ${ENABLED_SERVICES} =~ "ir-" ]] && enabled=0 - [[ ${service} == "neutron" && ${ENABLED_SERVICES} =~ "q-" ]] && enabled=0 - [[ ${service} == "trove" && ${ENABLED_SERVICES} =~ "tr-" ]] && enabled=0 - [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && enabled=0 - [[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && enabled=0 + [[ ${service} == n-cpu-* && ,${ENABLED_SERVICES} =~ ,"n-cpu" ]] && enabled=0 + [[ ${service} == "nova" && ,${ENABLED_SERVICES} =~ ,"n-" ]] && enabled=0 + [[ ${service} == "glance" && ,${ENABLED_SERVICES} =~ ,"g-" ]] && enabled=0 + [[ ${service} == "neutron" && ,${ENABLED_SERVICES} =~ ,"q-" ]] && enabled=0 + [[ ${service} == "trove" && ,${ENABLED_SERVICES} =~ ,"tr-" ]] && enabled=0 + [[ ${service} == "swift" && ,${ENABLED_SERVICES} =~ ,"s-" ]] && enabled=0 + [[ ${service} == s-* && ,${ENABLED_SERVICES} =~ ,"swift" ]] && enabled=0 done + $xtrace return $enabled } +# remove specified list from the input string +# remove_disabled_services service-list remove-list +function remove_disabled_services { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + local service_list=$1 + local remove_list=$2 + local service + local enabled="" + + for service in ${service_list//,/ }; do + local remove + local add=1 + for remove in ${remove_list//,/ }; do + if [[ ${remove} == ${service} ]]; then + add=0 + break + fi + done + if [[ $add == 1 ]]; then + enabled="${enabled},$service" + fi + done + + $xtrace + + _cleanup_service_list "$enabled" +} + # Toggle enable/disable_service for services that must run exclusive of each other # $1 The name of a variable containing a space-separated list of services # $2 The name of a variable in which to store the enabled service's name @@ -1450,8 +2117,9 @@ function is_service_enabled { function use_exclusive_service { local options=${!1} local selection=$3 - out=$2 + local out=$2 [ -z $selection ] || [[ ! "$options" =~ "$selection" ]] && return 1 + local opt for opt in $options;do [[ "$opt" = "$selection" ]] && enable_service $opt || disable_service $opt done @@ -1459,6 +2127,18 @@ function use_exclusive_service { return 0 } +# Make sure that nothing has manipulated ENABLED_SERVICES in a way +# that conflicts with prior calls to disable_service. +# Uses global ``ENABLED_SERVICES`` +function verify_disabled_services { + local service + for service in ${ENABLED_SERVICES//,/ }; do + if [[ ,${DISABLED_SERVICES}, =~ ,${service}, ]]; then + die $LINENO "ENABLED_SERVICES directly modified to overcome 'disable_service ${service}'" + fi + done +} + # System Functions # ================ @@ -1466,7 +2146,8 @@ function use_exclusive_service { # Only run the command if the target file (the last arg) is not on an # NFS filesystem. function _safe_permission_operation { - local xtrace=$(set +o | grep xtrace) + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace local args=( $@ ) local last @@ -1475,7 +2156,7 @@ function _safe_permission_operation { let last="${#args[*]} - 1" - dir_to_check=${args[$last]} + local dir_to_check=${args[$last]} if [ ! -d "$dir_to_check" ]; then dir_to_check=`dirname "$dir_to_check"` fi @@ -1485,11 +2166,7 @@ function _safe_permission_operation { return 0 fi - if [[ $TRACK_DEPENDS = True ]]; then - sudo_cmd="env" - else - sudo_cmd="sudo" - fi + sudo_cmd="sudo" $xtrace $sudo_cmd $@ @@ -1502,8 +2179,10 @@ function address_in_net { local ip=$1 local range=$2 local masklen=${range#*/} - local network=$(maskip ${range%/*} $(cidr2netmask $masklen)) - local subnet=$(maskip $ip $(cidr2netmask $masklen)) + local network + network=$(maskip ${range%/*} $(cidr2netmask $masklen)) + local subnet + subnet=$(maskip $ip $(cidr2netmask $masklen)) [[ $network == $subnet ]] } @@ -1513,16 +2192,7 @@ function add_user_to_group { local user=$1 local group=$2 - if [[ -z "$os_VENDOR" ]]; then - GetOSVersion - fi - - # SLE11 and openSUSE 12.2 don't have the usual usermod - if ! is_suse || [[ "$os_VENDOR" = "openSUSE" && "$os_RELEASE" != "12.2" ]]; then - sudo usermod -a -G "$group" "$user" - else - sudo usermod -A "$group" "$user" - fi + sudo usermod -a -G "$group" "$user" } # Convert CIDR notation to a IPv4 netmask @@ -1534,6 +2204,23 @@ function cidr2netmask { echo ${1-0}.${2-0}.${3-0}.${4-0} } +# Check if this is a valid ipv4 address string +function is_ipv4_address { + local address=$1 + local regex='([0-9]{1,3}\.){3}[0-9]{1,3}' + # TODO(clarkb) make this more robust + if [[ "$address" =~ $regex ]] ; then + return 0 + else + return 1 + fi +} + +# Remove "[]" around urlquoted IPv6 addresses +function ipv6_unquote { + echo $1 | tr -d [] +} + # Gracefully cp only if source file/dir exists # cp_it source destination function cp_it { @@ -1551,20 +2238,21 @@ function cp_it { # http_proxy=http://proxy.example.com:3128/ no_proxy=repo.example.net ./stack.sh function export_proxy_variables { - if [[ -n "$http_proxy" ]]; then + if isset http_proxy ; then export http_proxy=$http_proxy fi - if [[ -n "$https_proxy" ]]; then + if isset https_proxy ; then export https_proxy=$https_proxy fi - if [[ -n "$no_proxy" ]]; then + if isset no_proxy ; then export no_proxy=$no_proxy fi } # Returns true if the directory is on a filesystem mounted via NFS. function is_nfs_directory { - local mount_type=`stat -f -L -c %T $1` + local mount_type + mount_type=`stat -f -L -c %T $1` test "$mount_type" == "nfs" } @@ -1575,18 +2263,53 @@ function maskip { local ip=$1 local mask=$2 local l="${ip%.*}"; local r="${ip#*.}"; local n="${mask%.*}"; local m="${mask#*.}" - local subnet=$((${ip%%.*}&${mask%%.*})).$((${r%%.*}&${m%%.*})).$((${l##*.}&${n##*.})).$((${ip##*.}&${mask##*.})) + local subnet + subnet=$((${ip%%.*}&${mask%%.*})).$((${r%%.*}&${m%%.*})).$((${l##*.}&${n##*.})).$((${ip##*.}&${mask##*.})) echo $subnet } +function is_provider_network { + if [ "$Q_USE_PROVIDER_NETWORKING" == "True" ]; then + return 0 + fi + return 1 +} + + +# Return just the . for the given python interpreter +function _get_python_version { + local interp=$1 + local version + # disable erroring out here, otherwise if python 3 doesn't exist we fail hard. + if [[ -x $(which $interp 2> /dev/null) ]]; then + version=$($interp -c 'import sys; print("%s.%s" % sys.version_info[0:2])') + fi + echo ${version} +} + +# Return the current python as "python." +function python_version { + local python_version + python_version=$(_get_python_version python2) + echo "python${python_version}" +} + +function python3_version { + local python3_version + python3_version=$(_get_python_version python3) + echo "python${python3_version}" +} + + # Service wrapper to restart services # restart_service service-name function restart_service { - if is_ubuntu; then - sudo /usr/sbin/service $1 restart + if [ -x /bin/systemctl ]; then + sudo /bin/systemctl restart $1 else - sudo /sbin/service $1 restart + sudo service $1 restart fi + } # Only change permissions of a file or directory if it is not on an @@ -1604,26 +2327,226 @@ function safe_chown { # Service wrapper to start services # start_service service-name function start_service { - if is_ubuntu; then - sudo /usr/sbin/service $1 start + if [ -x /bin/systemctl ]; then + sudo /bin/systemctl start $1 else - sudo /sbin/service $1 start + sudo service $1 start fi } # Service wrapper to stop services # stop_service service-name function stop_service { - if is_ubuntu; then - sudo /usr/sbin/service $1 stop + if [ -x /bin/systemctl ]; then + sudo /bin/systemctl stop $1 + else + sudo service $1 stop + fi +} + +# Service wrapper to reload services +# If the service was not in running state it will start it +# reload_service service-name +function reload_service { + if [ -x /bin/systemctl ]; then + sudo /bin/systemctl reload-or-restart $1 else - sudo /sbin/service $1 stop + sudo service $1 reload fi } +# Test with a finite retry loop. +# +function test_with_retry { + local testcmd=$1 + local failmsg=$2 + local until=${3:-10} + local sleep=${4:-0.5} + + time_start "test_with_retry" + if ! timeout $until sh -c "while ! $testcmd; do sleep $sleep; done"; then + die $LINENO "$failmsg" + fi + time_stop "test_with_retry" +} + +# Like sudo but forwarding http_proxy https_proxy no_proxy environment vars. +# If it is run as superuser then sudo is replaced by env. +# +function sudo_with_proxies { + local sudo + + [[ "$(id -u)" = "0" ]] && sudo="env" || sudo="sudo" + + $sudo http_proxy="${http_proxy:-}" https_proxy="${https_proxy:-}"\ + no_proxy="${no_proxy:-}" "$@" +} + +# Timing infrastructure - figure out where large blocks of time are +# used in DevStack +# +# The timing infrastructure for DevStack is about collecting buckets +# of time that are spend in some subtask. For instance, that might be +# 'apt', 'pip', 'osc', even database migrations. We do this by a pair +# of functions: time_start / time_stop. +# +# These take a single parameter: $name - which specifies the name of +# the bucket to be accounted against. time_totals function spits out +# the results. +# +# Resolution is only in whole seconds, so should be used for long +# running activities. + +declare -A -g _TIME_TOTAL +declare -A -g _TIME_START +declare -r -g _TIME_BEGIN=$(date +%s) + +# time_start $name +# +# starts the clock for a timer by name. Errors if that clock is +# already started. +function time_start { + local name=$1 + local start_time=${_TIME_START[$name]} + if [[ -n "$start_time" ]]; then + die $LINENO "Trying to start the clock on $name, but it's already been started" + fi + _TIME_START[$name]=$(date +%s%3N) +} + +# time_stop $name +# +# stops the clock for a timer by name, and accumulate that time in the +# global counter for that name. Errors if that clock had not +# previously been started. +function time_stop { + local name + local end_time + local elapsed_time + local total + local start_time + + name=$1 + start_time=${_TIME_START[$name]} + + if [[ -z "$start_time" ]]; then + die $LINENO "Trying to stop the clock on $name, but it was never started" + fi + end_time=$(date +%s%3N) + elapsed_time=$(($end_time - $start_time)) + total=${_TIME_TOTAL[$name]:-0} + # reset the clock so we can start it in the future + _TIME_START[$name]="" + _TIME_TOTAL[$name]=$(($total + $elapsed_time)) +} + +function install_openstack_cli_server { + export PATH=$TOP_DIR/files/openstack-cli-server:$PATH + run_process openstack-cli-server "$PYTHON $TOP_DIR/files/openstack-cli-server/openstack-cli-server" +} + +function oscwrap { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + local out + local rc + local start + local end + # Cannot use timer_start and timer_stop as we run in subshells + # and those rely on modifying vars in the same process (which cannot + # happen from a subshell. + start=$(date +%s%3N) + out=$(command openstack "$@") + rc=$? + end=$(date +%s%3N) + echo $((end - start)) >> $OSCWRAP_TIMER_FILE + + echo "$out" + $xtrace + return $rc +} + +function install_oscwrap { + # File to accumulate our timing data + OSCWRAP_TIMER_FILE=$(mktemp) + # Bash by default doesn't expand aliases, allow it for the aliases + # we want to whitelist. + shopt -s expand_aliases + # Remove all aliases that might be expanded to preserve old unexpanded + # behavior + unalias -a + # Add only the alias we want for openstack + alias openstack=oscwrap +} + +function cleanup_oscwrap { + local total=0 + total=$(cat $OSCWRAP_TIMER_FILE | $PYTHON -c "import sys; print(sum(int(l) for l in sys.stdin))") + _TIME_TOTAL["osc"]=$total + rm $OSCWRAP_TIMER_FILE +} + +# time_totals +# Print out total time summary +function time_totals { + local elapsed_time + local end_time + local len=20 + local xtrace + local unaccounted_time + + end_time=$(date +%s) + elapsed_time=$(($end_time - $_TIME_BEGIN)) + unaccounted_time=$elapsed_time + + # pad 1st column this far + for t in ${!_TIME_TOTAL[*]}; do + if [[ ${#t} -gt $len ]]; then + len=${#t} + fi + done + + cleanup_oscwrap + + xtrace=$(set +o | grep xtrace) + set +o xtrace + + echo + echo "=========================" + echo "DevStack Component Timing" + echo " (times are in seconds) " + echo "=========================" + for t in ${!_TIME_TOTAL[*]}; do + local v=${_TIME_TOTAL[$t]} + # because we're recording in milliseconds + v=$(($v / 1000)) + printf "%-${len}s %3d\n" "$t" "$v" + unaccounted_time=$(($unaccounted_time - $v)) + done + echo "-------------------------" + printf "%-${len}s %3d\n" "Unaccounted time" "$unaccounted_time" + echo "=========================" + printf "%-${len}s %3d\n" "Total runtime" "$elapsed_time" + + $xtrace +} + +function clean_pyc_files { + # Clean up all *.pyc files + if [[ -n "$DEST" ]] && [[ -d "$DEST" ]]; then + sudo find $DEST -name "*.pyc" -delete + fi +} + +function is_fips_enabled { + fips=`cat /proc/sys/crypto/fips_enabled` + [ "$fips" == "1" ] +} # Restore xtrace -$XTRACE +$_XTRACE_FUNCTIONS_COMMON # Local variables: # mode: shell-script diff --git a/gate/updown.sh b/gate/updown.sh new file mode 100755 index 0000000000..f46385cfe1 --- /dev/null +++ b/gate/updown.sh @@ -0,0 +1,24 @@ +#!/bin/bash -xe +# +# An up / down test for gate functional testing +# +# Note: this is expected to start running as jenkins + +# Step 1: give back sudoers permissions to DevStack +TEMPFILE=`mktemp` +echo "stack ALL=(root) NOPASSWD:ALL" >$TEMPFILE +chmod 0440 $TEMPFILE +sudo chown root:root $TEMPFILE +sudo mv $TEMPFILE /etc/sudoers.d/51_stack_sh + +# TODO: do something to start a guest to create crud that should +# disappear + +# Step 2: unstack +echo "Running unstack.sh" +sudo -H -u stack stdbuf -oL -eL bash -ex ./unstack.sh + +# Step 3: clean +echo "Running clean.sh" +sudo -H -u stack stdbuf -oL -eL bash -ex ./clean.sh + diff --git a/inc/async b/inc/async new file mode 100644 index 0000000000..56338f5343 --- /dev/null +++ b/inc/async @@ -0,0 +1,256 @@ +#!/bin/bash +# +# Symbolic asynchronous tasks for devstack +# +# Usage: +# +# async_runfunc my_shell_func foo bar baz +# +# ... do other stuff ... +# +# async_wait my_shell_func +# + +DEVSTACK_PARALLEL=$(trueorfalse True DEVSTACK_PARALLEL) +_ASYNC_BG_TIME=0 + +# Keep track of how much total time was spent in background tasks +# Takes a job runtime in ms. +function _async_incr_bg_time { + local elapsed_ms="$1" + _ASYNC_BG_TIME=$(($_ASYNC_BG_TIME + $elapsed_ms)) +} + +# Get the PID of a named future to wait on +function async_pidof { + local name="$1" + local inifile="${DEST}/async/${name}.ini" + + if [ -f "$inifile" ]; then + iniget $inifile job pid + else + echo 'UNKNOWN' + return 1 + fi +} + +# Log a message about a job. If the message contains "%command" then the +# full command line of the job will be substituted in the output +function async_log { + local name="$1" + shift + local message="$*" + local inifile=${DEST}/async/${name}.ini + local pid + local command + + pid=$(iniget $inifile job pid) + command=$(iniget $inifile job command | tr '#' '-') + message=$(echo "$message" | sed "s#%command#$command#g") + + echo "[$BASHPID Async ${name}:${pid}]: $message" +} + +# Inner function that actually runs the requested task. We wrap it like this +# just so we can emit a finish message as soon as the work is done, to make +# it easier to find the tracking just before an error. +function async_inner { + local name="$1" + local rc + local fifo="${DEST}/async/${name}.fifo" + shift + set -o xtrace + if $* >${DEST}/async/${name}.log 2>&1; then + rc=0 + set +o xtrace + async_log "$name" "finished successfully" + else + rc=$? + set +o xtrace + async_log "$name" "FAILED with rc $rc" + fi + iniset ${DEST}/async/${name}.ini job end_time $(date "+%s%3N") + # Block on the fifo until we are signaled to exit by the main process + cat $fifo + return $rc +} + +# Run something async. Takes a symbolic name and a list of arguments of +# what to run. Ideally this would be rarely used and async_runfunc() would +# be used everywhere for readability. +# +# This spawns the work in a background worker, records a "future" to be +# collected by a later call to async_wait() +function async_run { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + local name="$1" + shift + local inifile=${DEST}/async/${name}.ini + local fifo=${DEST}/async/${name}.fifo + + touch $inifile + iniset $inifile job command "$*" + iniset $inifile job start_time $(date +%s%3N) + + if [[ "$DEVSTACK_PARALLEL" = "True" ]]; then + mkfifo $fifo + async_inner $name $* & + iniset $inifile job pid $! + async_log "$name" "running: %command" + $xtrace + else + iniset $inifile job pid "self" + async_log "$name" "Running synchronously: %command" + $xtrace + $* + return $? + fi +} + +# Shortcut for running a shell function async. Uses the function name as the +# async name. +function async_runfunc { + async_run $1 $* +} + +# Dump some information to help debug a failed wait +function async_wait_dump { + local failpid=$1 + + echo "=== Wait failure dump from $BASHPID ===" + echo "Processes:" + ps -f + echo "Waiting jobs:" + for name in $(ls ${DEST}/async/*.ini); do + echo "Job $name :" + cat "$name" + done + echo "Failed PID status:" + sudo cat /proc/$failpid/status + sudo cat /proc/$failpid/cmdline + echo "=== End wait failure dump ===" +} + +# Wait for an async future to complete. May return immediately if already +# complete, or of the future has already been waited on (avoid this). May +# block until the future completes. +function async_wait { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + local pid rc running inifile runtime fifo + rc=0 + for name in $*; do + running=$(ls ${DEST}/async/*.ini 2>/dev/null | wc -l) + inifile="${DEST}/async/${name}.ini" + fifo="${DEST}/async/${name}.fifo" + + if pid=$(async_pidof "$name"); then + async_log "$name" "Waiting for completion of %command" \ + "running on PID $pid ($running other jobs running)" + time_start async_wait + if [[ "$pid" != "self" ]]; then + # Signal the child to go ahead and exit since we are about to + # wait for it to collect its status. + async_log "$name" "Signaling child to exit" + echo WAKEUP > $fifo + async_log "$name" "Signaled" + # Do not actually call wait if we ran synchronously + if wait $pid; then + rc=0 + else + rc=$? + fi + cat ${DEST}/async/${name}.log + rm -f $fifo + fi + time_stop async_wait + local start_time + local end_time + start_time=$(iniget $inifile job start_time) + end_time=$(iniget $inifile job end_time) + _async_incr_bg_time $(($end_time - $start_time)) + runtime=$((($end_time - $start_time) / 1000)) + async_log "$name" "finished %command with result" \ + "$rc in $runtime seconds" + rm -f $inifile + if [ $rc -ne 0 ]; then + async_wait_dump $pid + echo Stopping async wait due to error: $* + break + fi + else + # This could probably be removed - it is really just here + # to help notice if you wait for something by the wrong + # name, but it also shows up for things we didn't start + # because they were not enabled. + echo Not waiting for async task $name that we never started or \ + has already been waited for + fi + done + + $xtrace + return $rc +} + +# Check for uncollected futures and wait on them +function async_cleanup { + local name + + if [[ "$DEVSTACK_PARALLEL" != "True" ]]; then + return 0 + fi + + for inifile in $(find ${DEST}/async -name '*.ini'); do + name=$(basename $pidfile .ini) + echo "WARNING: uncollected async future $name" + async_wait $name || true + done +} + +# Make sure our async dir is created and clean +function async_init { + local async_dir=${DEST}/async + + # Clean any residue if present from previous runs + rm -Rf $async_dir + + # Make sure we have a state directory + mkdir -p $async_dir +} + +function async_print_timing { + local bg_time_minus_wait + local elapsed_time + local serial_time + local speedup + + if [[ "$DEVSTACK_PARALLEL" != "True" ]]; then + return 0 + fi + + # The logic here is: All the background task time would be + # serialized if we did not do them in the background. So we can + # add that to the elapsed time for the whole run. However, time we + # spend waiting for async things to finish adds to the elapsed + # time, but is time where we're not doing anything useful. Thus, + # we substract that from the would-be-serialized time. + + bg_time_minus_wait=$((\ + ($_ASYNC_BG_TIME - ${_TIME_TOTAL[async_wait]}) / 1000)) + elapsed_time=$(($(date "+%s") - $_TIME_BEGIN)) + serial_time=$(($elapsed_time + $bg_time_minus_wait)) + + echo + echo "=================" + echo " Async summary" + echo "=================" + echo " Time spent in the background minus waits: $bg_time_minus_wait sec" + echo " Elapsed time: $elapsed_time sec" + echo " Time if we did everything serially: $serial_time sec" + echo " Speedup: " $(echo | awk "{print $serial_time / $elapsed_time}") +} diff --git a/inc/ini-config b/inc/ini-config new file mode 100644 index 0000000000..920d4775fa --- /dev/null +++ b/inc/ini-config @@ -0,0 +1,458 @@ +#!/bin/bash +# +# **inc/ini-config** - Configuration/INI functions +# +# Support for manipulating INI-style configuration files +# +# These functions have no external dependencies and no side-effects + +# Save trace setting +INC_CONF_TRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Config Functions +# ================ + +# Append a new option in an ini file without replacing the old value +# iniadd [-sudo] config-file section option value1 value2 value3 ... +function iniadd { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local sudo="" + if [ $1 == "-sudo" ]; then + sudo="-sudo " + shift + fi + local file=$1 + local section=$2 + local option=$3 + shift 3 + + local values + values="$(iniget_multiline $file $section $option) $@" + iniset_multiline $sudo $file $section $option $values + $xtrace +} + +# Comment an option in an INI file +# inicomment [-sudo] config-file section option +function inicomment { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local sudo="" + if [ $1 == "-sudo" ]; then + sudo="sudo " + shift + fi + local file=$1 + local section=$2 + local option=$3 + + $sudo sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" "$file" + $xtrace +} + +# Get an option from an INI file +# iniget config-file section option +function iniget { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local file=$1 + local section=$2 + local option=$3 + local line + + line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") + echo ${line#*=} + $xtrace +} + +# Get a multiple line option from an INI file +# iniget_multiline config-file section option +function iniget_multiline { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local file=$1 + local section=$2 + local option=$3 + local values + + values=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { s/^$option[ \t]*=[ \t]*//gp; }" "$file") + echo ${values} + $xtrace +} + +# Determinate is the given option present in the INI file +# ini_has_option [-sudo] config-file section option +function ini_has_option { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local sudo="" + if [ $1 == "-sudo" ]; then + sudo="sudo " + shift + fi + local file=$1 + local section=$2 + local option=$3 + local line + + line=$($sudo sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") + $xtrace + [ -n "$line" ] +} + +# Add another config line for a multi-line option. +# It's normally called after iniset of the same option and assumes +# that the section already exists. +# +# Note that iniset_multiline requires all the 'lines' to be supplied +# in the argument list. Doing that will cause incorrect configuration +# if spaces are used in the config values. +# +# iniadd_literal [-sudo] config-file section option value +function iniadd_literal { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local sudo="" + if [ $1 == "-sudo" ]; then + sudo="sudo " + shift + fi + local file=$1 + local section=$2 + local option=$3 + local value=$4 + + if [[ -z $section || -z $option ]]; then + $xtrace + return + fi + + # Add it + $sudo sed -i -e "/^\[$section\]/ a\\ +$option = $value +" "$file" + + $xtrace +} + +# Remove an option from an INI file +# inidelete [-sudo] config-file section option +function inidelete { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local sudo="" + if [ $1 == "-sudo" ]; then + sudo="sudo " + shift + fi + local file=$1 + local section=$2 + local option=$3 + + if [[ -z $section || -z $option ]]; then + $xtrace + return + fi + + # Remove old values + $sudo sed -i -e "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ d; }" "$file" + + $xtrace +} + +# Set an option in an INI file +# iniset [-sudo] config-file section option value +# - if the file does not exist, it is created +function iniset { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local sudo="" + local sudo_option="" + if [ $1 == "-sudo" ]; then + sudo="sudo " + sudo_option="-sudo " + shift + fi + local file=$1 + local section=$2 + local option=$3 + local value=$4 + + # Escape the ampersand (&) and backslash (\) characters for sed + # Order of substitution matters: we escape backslashes first before + # adding more backslashes to escape ampersands + value=$(echo $value | sed -e 's/\\/\\\\/g' -e 's/&/\\&/g') + + if [[ -z $section || -z $option ]]; then + $xtrace + return + fi + + if ! $sudo grep -q "^\[$section\]" "$file" 2>/dev/null; then + # Add section at the end + echo -e "\n[$section]" | $sudo tee --append "$file" > /dev/null + fi + if ! ini_has_option $sudo_option "$file" "$section" "$option"; then + # Add it + $sudo sed -i -e "/^\[$section\]/ a\\ +$option = $value +" "$file" + else + local sep + sep=$(echo -ne "\x01") + # Replace it + $sudo sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('"${option}"'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file" + fi + $xtrace +} + +# Set a multiple line option in an INI file +# iniset_multiline [-sudo] config-file section option value1 value2 value3 ... +function iniset_multiline { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local sudo="" + if [ $1 == "-sudo" ]; then + sudo="sudo " + shift + fi + local file=$1 + local section=$2 + local option=$3 + + shift 3 + local values + for v in $@; do + # The later sed command inserts each new value in the line next to + # the section identifier, which causes the values to be inserted in + # the reverse order. Do a reverse here to keep the original order. + values="$v ${values}" + done + if ! $sudo grep -q "^\[$section\]" "$file"; then + # Add section at the end + echo -e "\n[$section]" | $sudo tee --append "$file" > /dev/null + else + # Remove old values + $sudo sed -i -e "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ d; }" "$file" + fi + # Add new ones + for v in $values; do + $sudo sed -i -e "/^\[$section\]/ a\\ +$option = $v +" "$file" + done + $xtrace +} + +# Uncomment an option in an INI file +# iniuncomment config-file section option +function iniuncomment { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local sudo="" + if [ $1 == "-sudo" ]; then + sudo="sudo " + shift + fi + local file=$1 + local section=$2 + local option=$3 + $sudo sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" "$file" + $xtrace +} + +# Get list of sections from an INI file +# iniget_sections config-file +function iniget_sections { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local file=$1 + + echo $(sed -ne "s/^\[\(.*\)\]/\1/p" "$file") + $xtrace +} + +# Set a localrc var +function localrc_set { + local file=$1 + local group="local" + local conf="localrc" + local section="" + local option=$2 + local value=$3 + localconf_set "$file" "$group" "$conf" "$section" "$option" "$value" +} + +# Check if local.conf has section. +function localconf_has_section { + local file=$1 + local group=$2 + local conf=$3 + local section=$4 + local sep + sep=$(echo -ne "\x01") + local line + line=$(sed -ne "\\${sep}^\[\[${group}|${conf}\]\]${sep},\\${sep}\[\[.*\]\]${sep}{ + /\[${section}\]/p + }" "$file") + [ -n "$line" ] +} + +# Check if local.conf has option. +function localconf_has_option { + local file=$1 + local group=$2 + local conf=$3 + local section=$4 + local option=$5 + local sep + sep=$(echo -ne "\x01") + local line + if [[ -z "$section" ]]; then + line=$(sed -ne "\\${sep}^\[\[${group}|${conf}\]\]${sep},\\${sep}\[\[.*\]\]${sep}{ + /${option}[ \t]*=.*$/p + }" "$file") + else + line=$(sed -ne "\\${sep}^\[\[${group}|${conf}\]\]${sep},\\${sep}\[\[.*\]\]${sep}{ + /\[${section}\]/,/\[\[.*\]\]\|\[.*\]/{ + /${option}[ \t]*=.*$/p} + }" "$file") + fi + [ -n "$line" ] +} + +# Update option in local.conf. +function localconf_update_option { + local sudo=$1 + local file=$2 + local group=$3 + local conf=$4 + local section=$5 + local option=$6 + local value=$7 + local sep + sep=$(echo -ne "\x01") + if [[ -z "$section" ]]; then + $sudo sed -i -e "\\${sep}^\[\[${group}|${conf}\]\]${sep},\\${sep}\[\[.*\]\]${sep}{ + s${sep}^\(${option}[ \t]*=[ \t]*\).*\$${sep}\1${value}${sep} + }" "$file" + else + $sudo sed -i -e "\\${sep}^\[\[${group}|${conf}\]\]${sep},\\${sep}\[\[.*\]\]${sep}{ + /\[${section}\]/,/\[\[.*\]\]\|\[.*\]/s${sep}^\(${option}[ \t]*=[ \t]*\).*\$${sep}\1${value}${sep} + }" "$file" + fi +} + +# Add option in local.conf. +function localconf_add_option { + local sudo=$1 + local file=$2 + local group=$3 + local conf=$4 + local section=$5 + local option=$6 + local value=$7 + local sep + sep=$(echo -ne "\x01") + if [[ -z "$section" ]]; then + $sudo sed -i -e "\\${sep}^\[\[${group}|${conf}\]\]${sep} a $option=$value" "$file" + else + $sudo sed -i -e "\\${sep}^\[\[${group}|${conf}\]\]${sep},\\${sep}\[\[.*\]\]${sep}{ + /\[${section}\]/ a $option=$value + }" "$file" + fi +} + +# Add section and option in local.conf. +function localconf_add_section_and_option { + local sudo=$1 + local file=$2 + local group=$3 + local conf=$4 + local section=$5 + local option=$6 + local value=$7 + local sep + sep=$(echo -ne "\x01") + $sudo sed -i -e "\\${sep}^\[\[${group}|${conf}\]\]${sep} { + a [$section] + a $option=$value + }" "$file" +} + +# Set an option in a local.conf file. +# localconf_set [-sudo] config-file group conf-name section option value +# - if the file does not exist, it is created +function localconf_set { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local sep + sep=$(echo -ne "\x01") + local sudo="" + if [ $1 == "-sudo" ]; then + sudo="sudo " + shift + fi + local file=$1 + local group=$2 + local conf=$3 + local section=$4 + local option=$5 + local value=$6 + + if [[ -z $group || -z $conf || -z $option || -z $value ]]; then + $xtrace + return + fi + + if ! grep -q "^\[\[${group}|${conf}\]\]" "$file" 2>/dev/null; then + # Add meta section at the end if it does not exist + echo -e "\n[[${group}|${conf}]]" | $sudo tee --append "$file" > /dev/null + # Add section at the end + if [[ -n "$section" ]]; then + echo -e "[$section]" | $sudo tee --append "$file" > /dev/null + fi + # Add option at the end + echo -e "$option=$value" | $sudo tee --append "$file" > /dev/null + elif [[ -z "$section" ]]; then + if ! localconf_has_option "$file" "$group" "$conf" "$section" "$option"; then + # Add option + localconf_add_option "$sudo" "$file" "$group" "$conf" "$section" "$option" "$value" + else + # Replace it + localconf_update_option "$sudo" "$file" "$group" "$conf" "$section" "$option" "$value" + fi + elif ! localconf_has_section "$file" "$group" "$conf" "$section"; then + # Add section and option in specified meta section + localconf_add_section_and_option "$sudo" "$file" "$group" "$conf" "$section" "$option" "$value" + elif ! localconf_has_option "$file" "$group" "$conf" "$section" "$option"; then + # Add option + localconf_add_option "$sudo" "$file" "$group" "$conf" "$section" "$option" "$value" + else + # Replace it + localconf_update_option "$sudo" "$file" "$group" "$conf" "$section" "$option" "$value" + fi + $xtrace +} + +# Restore xtrace +$INC_CONF_TRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/inc/meta-config b/inc/meta-config new file mode 100644 index 0000000000..1215bb8307 --- /dev/null +++ b/inc/meta-config @@ -0,0 +1,226 @@ +#!/bin/bash +# +# **lib/meta-config** - Configuration file manipulation functions +# +# Support for DevStack's local.conf meta-config sections +# +# These functions have no external dependencies and the following side-effects: +# +# CONFIG_AWK_CMD is defined, default is ``awk`` + +# Meta-config files contain multiple INI-style configuration files +# using a specific new section header to delimit them: +# +# [[group-name|file-name]] +# +# group-name refers to the group of configuration file changes to be processed +# at a particular time. These are called phases in ``stack.sh`` but +# group here as these functions are not DevStack-specific. +# +# file-name is the destination of the config file + +# Save trace setting +_XTRACE_INC_META=$(set +o | grep xtrace) +set +o xtrace + + +# Allow the awk command to be overridden on legacy platforms +CONFIG_AWK_CMD=${CONFIG_AWK_CMD:-awk} + +# Get the section for the specific group and config file +# get_meta_section infile group configfile +function get_meta_section { + local file=$1 + local matchgroup=$2 + local configfile=$3 + + [[ -r $file ]] || return 0 + [[ -z $configfile ]] && return 0 + + $CONFIG_AWK_CMD -v matchgroup=$matchgroup -v configfile=$configfile ' + BEGIN { group = "" } + /^\[\[.+\|.*\]\]/ { + gsub("[][]", "", $1); + split($1, a, "|"); + if (a[1] == matchgroup && a[2] == configfile) { + group=a[1] + } else { + group="" + } + next + } + { + if (group != "") + print $0 + } + ' $file +} + + +# Get a list of config files for a specific group +# get_meta_section_files infile group +function get_meta_section_files { + local file=$1 + local matchgroup=$2 + + [[ -r $file ]] || return 0 + + $CONFIG_AWK_CMD -v matchgroup=$matchgroup ' + /^\[\[.+\|.*\]\]/ { + gsub("[][]", "", $1); + split($1, a, "|"); + if (a[1] == matchgroup) + print a[2] + } + ' $file +} + + +# Merge the contents of a meta-config file into its destination config file +# If configfile does not exist it will be created. +# merge_config_file infile group configfile +function merge_config_file { + local file=$1 + local matchgroup=$2 + local configfile=$3 + + # note, configfile might be a variable (note the iniset, etc + # created in the mega-awk below is "eval"ed too, so we just leave + # it alone. + local real_configfile + real_configfile=$(eval echo $configfile) + if [ ! -f $real_configfile ]; then + touch $real_configfile || die $LINENO "could not create config file $real_configfile ($configfile)" + fi + + get_meta_section $file $matchgroup $configfile | \ + $CONFIG_AWK_CMD -v configfile=$configfile ' + BEGIN { + section = "" + last_section = "" + section_count = 0 + } + /^\[.+\]/ { + gsub("[][]", "", $1); + section=$1 + next + } + /^ *\#/ { + next + } + /^[^ \t]+/ { + # get offset of first '=' in $0 + eq_idx = index($0, "=") + # extract attr & value from $0 + attr = substr($0, 1, eq_idx - 1) + value = substr($0, eq_idx + 1) + # only need to strip trailing whitespace from attr + sub(/[ \t]*$/, "", attr) + # need to strip leading & trailing whitespace from value + sub(/^[ \t]*/, "", value) + sub(/[ \t]*$/, "", value) + + # cfg_attr_count: number of config lines per [section, attr] + # cfg_attr: three dimensional array to keep all the config lines per [section, attr] + # cfg_section: keep the section names in the same order as they appear in local.conf + # cfg_sec_attr_name: keep the attr names in the same order as they appear in local.conf + if (! (section, attr) in cfg_attr_count) { + if (section != last_section) { + cfg_section[section_count++] = section + last_section = section + } + attr_count = cfg_sec_attr_count[section_count - 1]++ + cfg_sec_attr_name[section_count - 1, attr_count] = attr + + cfg_attr[section, attr, 0] = value + cfg_attr_count[section, attr] = 1 + } else { + lno = cfg_attr_count[section, attr]++ + cfg_attr[section, attr, lno] = value + } + } + END { + # Process each section in order + for (sno = 0; sno < section_count; sno++) { + section = cfg_section[sno] + # The ini routines simply append a config item immediately + # after the section header. To keep the same order as defined + # in local.conf, invoke the ini routines in the reverse order + for (attr_no = cfg_sec_attr_count[sno] - 1; attr_no >=0; attr_no--) { + attr = cfg_sec_attr_name[sno, attr_no] + if (cfg_attr_count[section, attr] == 1) + print "iniset " configfile " " section " " attr " \"" cfg_attr[section, attr, 0] "\"" + else { + # For multiline, invoke the ini routines in the reverse order + count = cfg_attr_count[section, attr] + print "inidelete " configfile " " section " " attr + print "iniset " configfile " " section " " attr " \"" cfg_attr[section, attr, count - 1] "\"" + for (l = count -2; l >= 0; l--) + print "iniadd_literal " configfile " " section " " attr " \"" cfg_attr[section, attr, l] "\"" + } + } + } + } + ' | while read a; do eval "$a"; done +} + + +# Merge all of the files specified by group +# merge_config_group infile group [group ...] +function merge_config_group { + local localfile=$1; shift + local matchgroups=$@ + + [[ -r $localfile ]] || return 0 + + local configfile group + for group in $matchgroups; do + for configfile in $(get_meta_section_files $localfile $group); do + local realconfigfile + local dir + + realconfigfile=$(eval "echo $configfile") + if [[ -z $realconfigfile ]]; then + warn $LINENO "unknown config file specification: $configfile is undefined" + break + fi + dir=$(dirname $realconfigfile) + + test -e $dir && ! test -d $dir && die $LINENO "bogus config file specification $configfile ($configfile=$realconfigfile, $dir exists but it is not a directory)" + + if ! [[ -e $dir ]] ; then + sudo mkdir -p $dir || die $LINENO "could not create the directory of $real_configfile ($configfile)" + sudo chown ${STACK_USER} $dir + fi + + merge_config_file $localfile $group $configfile + done + done +} + +function extract_localrc_section { + local configfile=$1 # top_dir/local.conf + local localrcfile=$2 # top_dir/localrc + local localautofile=$3 # top_dir/.localrc.auto + + if [[ -r $configfile ]]; then + LRC=$(get_meta_section_files $configfile local) + for lfile in $LRC; do + if [[ "$lfile" == "localrc" ]]; then + if [[ -r $localrcfile ]]; then + echo "localrc and local.conf:[[local]] both exist, using localrc" + else + echo "# Generated file, do not edit" >$localautofile + get_meta_section $configfile local $lfile >>$localautofile + fi + fi + done + fi +} + +# Restore xtrace +$_XTRACE_INC_META + +# Local variables: +# mode: shell-script +# End: diff --git a/inc/python b/inc/python new file mode 100644 index 0000000000..3969c1fa82 --- /dev/null +++ b/inc/python @@ -0,0 +1,507 @@ +#!/bin/bash +# +# **inc/python** - Python-related functions +# +# Support for pip/setuptools interfaces and virtual environments +# +# External functions used: +# - GetOSVersion +# - is_fedora +# - safe_chown + +# Save trace setting +INC_PY_TRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Global Config Variables + +# PROJECT_VENV contains the name of the virtual environment for each +# project. A null value installs to the system Python directories. +declare -A -g PROJECT_VENV + +# Utility Functions +# ================= + +# Joins bash array of extras with commas as expected by other functions +function join_extras { + local IFS="," + echo "$*" +} + +# Python Functions +# ================ + +# Setup the global devstack virtualenvs and the associated environment +# updates. +function setup_devstack_virtualenv { + # We run devstack out of a global virtualenv. + if [[ ! -d $DEVSTACK_VENV ]] ; then + # Using system site packages to enable nova to use libguestfs. + # This package is currently installed via the distro and not + # available on pypi. + $PYTHON -m venv --system-site-packages "${DEVSTACK_VENV}" + pip_install -U pip setuptools[core] + fi + if [[ ":$PATH:" != *":$DEVSTACK_VENV/bin:"* ]] ; then + export PATH="$DEVSTACK_VENV/bin:$PATH" + export PYTHON="$DEVSTACK_VENV/bin/python3" + fi +} + +# Get the path to the pip command. +# get_pip_command +function get_pip_command { + local version="$1" + if [ -z "$version" ]; then + die $LINENO "pip python version is not set." + fi + + # NOTE(dhellmann): I don't know if we actually get a pip3.4-python + # under any circumstances. + which pip${version} || which pip${version}-python + + if [ $? -ne 0 ]; then + die $LINENO "Unable to find pip${version}; cannot continue" + fi +} + +# Get the path to the directory where python executables are installed. +# get_python_exec_prefix +function get_python_exec_prefix { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + $xtrace + + if [[ "$GLOBAL_VENV" == "True" ]] ; then + echo "$DEVSTACK_VENV/bin" + else + echo "/usr/local/bin" + fi +} + +# Wrapper for ``pip install`` that only installs versions of libraries +# from the global-requirements specification. +# +# Uses globals ``REQUIREMENTS_DIR`` +# +# pip_install_gr packagename +function pip_install_gr { + local name=$1 + local clean_name + clean_name=$(get_from_global_requirements $name) + pip_install $clean_name +} + +# Wrapper for ``pip install`` that only installs versions of libraries +# from the global-requirements specification with extras. +# +# Uses globals ``REQUIREMENTS_DIR`` +# +# pip_install_gr_extras packagename extra1,extra2,... +function pip_install_gr_extras { + local name=$1 + local extras=$2 + local version_constraints + version_constraints=$(get_version_constraints_from_global_requirements $name) + pip_install $name[$extras]$version_constraints +} + +# enable_python3_package() -- no-op for backwards compatibility +# +# enable_python3_package dir [dir ...] +function enable_python3_package { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + echo "It is no longer necessary to call enable_python3_package()." + + $xtrace +} + +# disable_python3_package() -- no-op for backwards compatibility +# +# disable_python3_package dir [dir ...] +function disable_python3_package { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + echo "It is no longer possible to call disable_python3_package()." + + $xtrace +} + +# Wrapper for ``pip install`` to set cache and proxy environment variables +# Uses globals ``OFFLINE``, ``PIP_VIRTUAL_ENV``, +# ``PIP_UPGRADE``, ``*_proxy``, +# Usage: +# pip_install pip_arguments +function pip_install { + local xtrace result + xtrace=$(set +o | grep xtrace) + set +o xtrace + local upgrade="" + local offline=${OFFLINE:-False} + if [[ "$offline" == "True" || -z "$@" ]]; then + $xtrace + return + fi + + time_start "pip_install" + + PIP_UPGRADE=$(trueorfalse False PIP_UPGRADE) + if [[ "$PIP_UPGRADE" = "True" ]] ; then + upgrade="--upgrade" + fi + + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + + # Try to extract the path of the package we are installing into + # package_dir. We need this to check for test-requirements.txt, + # at least. + # + # ${!#} expands to the last positional argument to this function. + # With "extras" syntax included, our arguments might be something + # like: + # -e /path/to/fooproject[extra] + # Thus this magic line grabs just the path without extras + # + # Note that this makes no sense if this is a pypi (rather than + # local path) install; ergo you must check this path exists before + # use. Also, if we had multiple or mixed installs, we would also + # likely break. But for historical reasons, it's basically only + # the other wrapper functions in here calling this to install + # local packages, and they do so with single call per install. So + # this works (for now...) + local package_dir=${!#%\[*\]} + + if [[ -n ${PIP_VIRTUAL_ENV:=} && -d ${PIP_VIRTUAL_ENV} ]]; then + local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip + local sudo_pip="env" + elif [[ "${GLOBAL_VENV}" == "True" && -d ${DEVSTACK_VENV} ]] ; then + # We have to check that the DEVSTACK_VENV exists because early + # devstack boostrapping needs to operate in a system context + # too bootstrap pip. Once pip is bootstrapped we create the + # global venv and can start to use it. + local cmd_pip=$DEVSTACK_VENV/bin/pip + local sudo_pip="env" + echo "Using python $PYTHON3_VERSION to install $package_dir" + else + local cmd_pip="python$PYTHON3_VERSION -m pip" + local sudo_pip="sudo -H LC_ALL=en_US.UTF-8" + echo "Using python $PYTHON3_VERSION to install $package_dir" + fi + + cmd_pip="$cmd_pip install" + # Always apply constraints + cmd_pip="$cmd_pip -c $REQUIREMENTS_DIR/upper-constraints.txt" + + $xtrace + + $sudo_pip \ + http_proxy="${http_proxy:-}" \ + https_proxy="${https_proxy:-}" \ + no_proxy="${no_proxy:-}" \ + PIP_FIND_LINKS=$PIP_FIND_LINKS \ + $cmd_pip $upgrade \ + $@ + result=$? + + time_stop "pip_install" + return $result +} + +function pip_uninstall { + # Skip uninstall if offline + [[ "${OFFLINE}" = "True" ]] && return + + local name=$1 + if [[ -n ${PIP_VIRTUAL_ENV:=} && -d ${PIP_VIRTUAL_ENV} ]]; then + local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip + local sudo_pip="env" + else + local cmd_pip="python$PYTHON3_VERSION -m pip" + local sudo_pip="sudo -H LC_ALL=en_US.UTF-8" + fi + # don't error if we can't uninstall, it might not be there + $sudo_pip $cmd_pip uninstall -y $name || /bin/true +} + +# get version of a package from global requirements file +# get_from_global_requirements +function get_from_global_requirements { + local package=$1 + local required_pkg + required_pkg=$(grep -i -h ^${package} $REQUIREMENTS_DIR/global-requirements.txt | cut -d\# -f1) + if [[ $required_pkg == "" ]]; then + die $LINENO "Can't find package $package in requirements" + fi + echo $required_pkg +} + +# get only version constraints of a package from global requirements file +# get_version_constraints_from_global_requirements +function get_version_constraints_from_global_requirements { + local package=$1 + local required_pkg_version_constraint + # drop the package name from output (\K) + required_pkg_version_constraint=$(grep -i -h -o -P "^${package}\K.*" $REQUIREMENTS_DIR/global-requirements.txt | cut -d\# -f1) + if [[ $required_pkg_version_constraint == "" ]]; then + die $LINENO "Can't find package $package in requirements" + fi + echo $required_pkg_version_constraint +} + +# should we use this library from their git repo, or should we let it +# get pulled in via pip dependencies. +function use_library_from_git { + local name=$1 + local enabled=1 + [[ ${LIBS_FROM_GIT} = 'ALL' ]] || [[ ,${LIBS_FROM_GIT}, =~ ,${name}, ]] && enabled=0 + return $enabled +} + +# determine if a package was installed from git +function lib_installed_from_git { + local name=$1 + local safe_name + safe_name=$(python -c "from packaging import canonicalize_name; print(canonicalize_name('${name}'))") + # Note "pip freeze" doesn't always work here, because it tries to + # be smart about finding the remote of the git repo the package + # was installed from. This doesn't work with zuul which clones + # repos with no remote. + # + # The best option seems to be to use "pip list" which will tell + # you the path an editable install was installed from; for example + # in response to something like + # pip install -e 'git+https://opendev.org/openstack/bashate#egg=bashate' + # pip list --format columns shows + # bashate 0.5.2.dev19 /tmp/env/src/bashate + # Thus we check the third column to see if we're installed from + # some local place. + [[ -n $(pip list --format=columns 2>/dev/null | awk "/^$safe_name/ {print \$3}") ]] +} + +# setup a library by name. If we are trying to use the library from +# git, we'll do a git based install, otherwise we'll punt and the +# library should be installed by a requirements pull from another +# project. +function setup_lib { + local name=$1 + local dir=${GITDIR[$name]} + setup_install $dir +} + +# setup a library by name in editable mode. If we are trying to use +# the library from git, we'll do a git based install, otherwise we'll +# punt and the library should be installed by a requirements pull from +# another project. +# +# use this for non namespaced libraries +# +# setup_dev_lib [-bindep] [] +function setup_dev_lib { + local bindep + if [[ $1 == -bindep* ]]; then + bindep="${1}" + shift + fi + local name=$1 + local dir=${GITDIR[$name]} + local extras=$2 + setup_develop $bindep $dir $extras +} + +# this should be used if you want to install globally, all libraries should +# use this, especially *oslo* ones +# +# setup_install project_dir [extras] +# project_dir: directory of project repo (e.g., /opt/stack/keystone) +# extras: comma-separated list of optional dependencies to install +# (e.g., ldap,memcache). +# See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements +# bindep: Set "-bindep" as first argument to install bindep.txt packages +# The command is like "pip install []" +function setup_install { + local bindep + if [[ $1 == -bindep* ]]; then + bindep="${1}" + shift + fi + local project_dir=$1 + local extras=$2 + _setup_package_with_constraints_edit $bindep $project_dir "" $extras +} + +# this should be used for projects which run services, like all services +# +# setup_develop project_dir [extras] +# project_dir: directory of project repo (e.g., /opt/stack/keystone) +# extras: comma-separated list of optional dependencies to install +# (e.g., ldap,memcache). +# See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements +# The command is like "pip install -e []" +function setup_develop { + local bindep + if [[ $1 == -bindep* ]]; then + bindep="${1}" + shift + fi + local project_dir=$1 + local extras=$2 + _setup_package_with_constraints_edit $bindep $project_dir -e $extras +} + +# ``pip install -e`` the package, which processes the dependencies +# using pip before running `setup.py develop` +# +# Updates the constraints from REQUIREMENTS_DIR to reflect the +# future installed state of this package. This ensures when we +# install this package we get the from source version. +# +# Uses globals ``REQUIREMENTS_DIR`` +# _setup_package_with_constraints_edit project_dir flags [extras] +# project_dir: directory of project repo (e.g., /opt/stack/keystone) +# flags: pip CLI options/flags +# extras: comma-separated list of optional dependencies to install +# (e.g., ldap,memcache). +# See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements +# The command is like "pip install []" +function _setup_package_with_constraints_edit { + local bindep + if [[ $1 == -bindep* ]]; then + bindep="${1}" + shift + fi + local project_dir=$1 + local flags=$2 + local extras=$3 + + # Normalize the directory name to avoid + # "installation from path or url cannot be constrained to a version" + # error. + # REVISIT(yamamoto): Remove this when fixed in pip. + # https://github.com/pypa/pip/pull/3582 + project_dir=$(cd $project_dir && pwd) + + if [ -n "$REQUIREMENTS_DIR" ]; then + # Remove this package from constraints before we install it. + # That way, later installs won't "downgrade" the install from + # source we are about to do. + local name + name=$(awk '/^name.*=/ {print $3}' $project_dir/setup.cfg) + if [ -z $name ]; then + name=$(awk '/^name =/ {gsub(/"/, "", $3); print $3}' $project_dir/pyproject.toml) + fi + $REQUIREMENTS_DIR/.venv/bin/edit-constraints \ + $REQUIREMENTS_DIR/upper-constraints.txt -- $name + fi + + setup_package $bindep $project_dir "$flags" $extras + + # If this project is in LIBS_FROM_GIT, verify it was actually installed + # correctly. This helps catch errors caused by constraints mismatches. + if use_library_from_git "$project_dir"; then + if ! lib_installed_from_git "$project_dir"; then + die $LINENO "The following LIBS_FROM_GIT was not installed correctly: $project_dir" + fi + fi +} + +# ``pip install -e`` the package, which processes the dependencies +# using pip before running `setup.py develop`. The command is like +# "pip install []" +# +# Uses globals ``STACK_USER`` +# +# Usage: +# setup_package [-bindep[=profile,profile]] [extras] +# +# -bindep : Use bindep to install dependencies; select extra profiles +# as comma separated arguments after "=" +# project_dir : directory of project repo (e.g., /opt/stack/keystone) +# flags : pip CLI options/flags +# extras : comma-separated list of optional dependencies to install +# (e.g., ldap,memcache). +# See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements +function setup_package { + local bindep=0 + local bindep_flag="" + local bindep_profiles="" + if [[ $1 == -bindep* ]]; then + bindep=1 + IFS="=" read bindep_flag bindep_profiles <<< ${1} + shift + fi + local project_dir=$1 + local flags=$2 + local extras=$3 + + # if the flags variable exists, and it doesn't look like a flag, + # assume it's actually the extras list. + if [[ -n "$flags" && -z "$extras" && ! "$flags" =~ ^-.* ]]; then + extras=$flags + flags="" + fi + + if [[ ! -z "$extras" ]]; then + extras="[$extras]" + fi + + # install any bindep packages + if [[ $bindep == 1 ]]; then + install_bindep $project_dir/bindep.txt $bindep_profiles + fi + + pip_install $flags "$project_dir$extras" + # ensure that further actions can do things like setup.py sdist + if [[ "$flags" == "-e" && "$GLOBAL_VENV" == "False" ]]; then + # egg-info is not created when project have pyproject.toml + if [ -d $1/*.egg-info ]; then + safe_chown -R $STACK_USER $1/*.egg-info + fi + fi +} + +# Report whether python 3 should be used +# TODO(frickler): drop this once all legacy uses are removed +function python3_enabled { + return 0 +} + +# Provide requested python version and sets PYTHON variable +function install_python { + install_python3 + export PYTHON=$(which python${PYTHON3_VERSION} 2>/dev/null) +} + +# Install python3 packages +function install_python3 { + if is_ubuntu; then + apt_get install python${PYTHON3_VERSION} python${PYTHON3_VERSION}-dev + elif is_fedora; then + install_package python${PYTHON3_VERSION}-devel python${PYTHON3_VERSION}-pip + fi +} + +function install_devstack_tools { + # intentionally old to ensure devstack-gate has control + local dstools_version=${DSTOOLS_VERSION:-0.1.2} + install_python3 + sudo pip3 install -U devstack-tools==${dstools_version} +} + +# Restore xtrace +$INC_PY_TRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/inc/rootwrap b/inc/rootwrap new file mode 100644 index 0000000000..4c65440a4e --- /dev/null +++ b/inc/rootwrap @@ -0,0 +1,94 @@ +#!/bin/bash +# +# **inc/rootwrap** - Rootwrap functions +# +# Handle rootwrap's foibles + +# Uses: ``STACK_USER`` +# Defines: ``SUDO_SECURE_PATH_FILE`` + +# Save trace setting +INC_ROOT_TRACE=$(set +o | grep xtrace) +set +o xtrace + +# Accumulate all additions to sudo's ``secure_path`` in one file read last +# so they all work in a venv configuration +SUDO_SECURE_PATH_FILE=${SUDO_SECURE_PATH_FILE:-/etc/sudoers.d/zz-secure-path} + +# Add a directory to the common sudo ``secure_path`` +# add_sudo_secure_path dir +function add_sudo_secure_path { + local dir=$1 + local line + + # This is pretty simplistic for now - assume only the first line is used + if [[ -r $SUDO_SECURE_PATH_FILE ]]; then + line=$(head -1 $SUDO_SECURE_PATH_FILE) + else + line="Defaults:$STACK_USER secure_path=/usr/local/sbin:/usr/local/bin:/usr/sbin:/sbin:/usr/bin:/bin" + fi + + # Only add ``dir`` if it is not already present + if [[ ! $line =~ $dir ]]; then + echo "${line}:$dir" | sudo tee $SUDO_SECURE_PATH_FILE + sudo chmod 400 $SUDO_SECURE_PATH_FILE + sudo chown root:root $SUDO_SECURE_PATH_FILE + fi +} + +# Configure rootwrap +# Make a load of assumptions otherwise we'll have 6 arguments +# configure_rootwrap project +function configure_rootwrap { + local project=$1 + local project_uc + project_uc=$(echo $1|tr a-z A-Z) + local bin_dir="${project_uc}_BIN_DIR" + bin_dir="${!bin_dir}" + local project_dir="${project_uc}_DIR" + project_dir="${!project_dir}" + + local rootwrap_conf_src_dir="${project_dir}/etc/${project}" + local rootwrap_bin="${bin_dir}/${project}-rootwrap" + + # Start fresh with rootwrap filters + sudo rm -rf /etc/${project}/rootwrap.d + sudo install -d -o root -g root -m 755 /etc/${project}/rootwrap.d + sudo install -o root -g root -m 644 $rootwrap_conf_src_dir/rootwrap.d/*.filters /etc/${project}/rootwrap.d + + # Set up rootwrap.conf, pointing to /etc/*/rootwrap.d + sudo install -o root -g root -m 644 $rootwrap_conf_src_dir/rootwrap.conf /etc/${project}/rootwrap.conf + sudo sed -e "s:^filters_path=.*$:filters_path=/etc/${project}/rootwrap.d:" -i /etc/${project}/rootwrap.conf + + # Rely on $PATH set by devstack to determine what is safe to execute + # by rootwrap rather than use explicit whitelist of paths in + # rootwrap.conf + sudo sed -e 's/^exec_dirs=.*/#&/' -i /etc/${project}/rootwrap.conf + + # Set up the rootwrap sudoers + local tempfile + tempfile=$(mktemp) + # Specify rootwrap.conf as first parameter to rootwrap + rootwrap_sudo_cmd="${rootwrap_bin} /etc/${project}/rootwrap.conf *" + echo "$STACK_USER ALL=(root) NOPASSWD: $rootwrap_sudo_cmd" >$tempfile + if [ -f ${bin_dir}/${project}-rootwrap-daemon ]; then + # rootwrap daemon does not need any parameters + rootwrap_sudo_cmd="${rootwrap_bin}-daemon /etc/${project}/rootwrap.conf" + echo "$STACK_USER ALL=(root) NOPASSWD: $rootwrap_sudo_cmd" >>$tempfile + fi + chmod 0440 $tempfile + sudo chown root:root $tempfile + sudo mv $tempfile /etc/sudoers.d/${project}-rootwrap + + # Add bin dir to sudo's secure_path because rootwrap is being called + # without a path because BROKEN. + add_sudo_secure_path $(dirname $rootwrap_bin) +} + + +# Restore xtrace +$INC_ROOT_TRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/apache b/lib/apache index 2d5e39a65d..b3379a7cde 100644 --- a/lib/apache +++ b/lib/apache @@ -1,3 +1,5 @@ +#!/bin/bash +# # lib/apache # Functions to control configuration and operation of apache web server @@ -8,9 +10,8 @@ # # lib/apache exports the following functions: # -# - is_apache_enabled_service # - install_apache_wsgi -# - config_apache_wsgi +# - apache_site_config_for # - enable_apache_site # - disable_apache_site # - start_apache_server @@ -18,7 +19,7 @@ # - restart_apache_server # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_LIB_APACHE=$(set +o | grep xtrace) set +o xtrace # Allow overriding the default Apache user and group, default to @@ -26,36 +27,102 @@ set +o xtrace APACHE_USER=${APACHE_USER:-$STACK_USER} APACHE_GROUP=${APACHE_GROUP:-$(id -gn $APACHE_USER)} +APACHE_LOCAL_HOST=$SERVICE_LOCAL_HOST +if [[ "$SERVICE_IP_VERSION" == 6 ]]; then + APACHE_LOCAL_HOST=[$APACHE_LOCAL_HOST] +fi + # Set up apache name and configuration directory +# Note that APACHE_CONF_DIR is really more accurately apache's vhost +# configuration dir but we can't just change this because public interfaces. if is_ubuntu; then APACHE_NAME=apache2 - APACHE_CONF_DIR=sites-available + APACHE_CONF_DIR=${APACHE_CONF_DIR:-/etc/$APACHE_NAME/sites-available} + APACHE_SETTINGS_DIR=${APACHE_SETTINGS_DIR:-/etc/$APACHE_NAME/conf-enabled} elif is_fedora; then APACHE_NAME=httpd - APACHE_CONF_DIR=conf.d -elif is_suse; then - APACHE_NAME=apache2 - APACHE_CONF_DIR=vhosts.d + APACHE_CONF_DIR=${APACHE_CONF_DIR:-/etc/$APACHE_NAME/conf.d} + APACHE_SETTINGS_DIR=${APACHE_SETTINGS_DIR:-/etc/$APACHE_NAME/conf.d} fi +APACHE_LOG_DIR="/var/log/${APACHE_NAME}" # Functions # --------- -# is_apache_enabled_service() checks if the service(s) specified as arguments are -# apache enabled by the user in ``APACHE_ENABLED_SERVICES`` as web front end. -# -# Multiple services specified as arguments are ``OR``'ed together; the test -# is a short-circuit boolean, i.e it returns on the first match. -# -# Uses global ``APACHE_ENABLED_SERVICES`` -# APACHE_ENABLED_SERVICES service [service ...] -function is_apache_enabled_service { - services=$@ - for service in ${services}; do - [[ ,${APACHE_ENABLED_SERVICES}, =~ ,${service}, ]] && return 0 - done - return 1 +# Enable apache mod and restart apache if it isn't already enabled. +function enable_apache_mod { + local mod=$1 + local should_restart=$2 + # Apache installation, because we mark it NOPRIME + if is_ubuntu; then + # Skip mod_version as it is not a valid mod to enable + # on debuntu, instead it is built in. + if [[ "$mod" != "version" ]] && ! a2query -m $mod ; then + sudo a2enmod $mod + if [[ "$should_restart" != "norestart" ]] ; then + restart_apache_server + fi + fi + elif is_fedora; then + # pass + true + else + exit_distro_not_supported "apache enable mod" + fi +} + +# NOTE(sdague): Install uwsgi including apache module, we need to get +# to 2.0.6+ to get a working mod_proxy_uwsgi. We can probably build a +# check for that and do it differently for different platforms. +function install_apache_uwsgi { + local apxs="apxs2" + if is_fedora; then + apxs="apxs" + fi + + if is_ubuntu; then + local pkg_list="uwsgi uwsgi-plugin-python3" + install_package ${pkg_list} + # NOTE(ianw) 2022-02-03 : Fedora 35 needs to skip this and fall + # into the install-from-source because the upstream packages + # didn't fix Python 3.10 compatibility before release. Should be + # fixed in uwsgi 4.9.0; can remove this when packages available + # or we drop this release + elif is_fedora && ! is_openeuler && ! [[ $DISTRO =~ rhel9 ]]; then + # Note httpd comes with mod_proxy_uwsgi and it is loaded by + # default; the mod_proxy_uwsgi package actually conflicts now. + # See: + # https://bugzilla.redhat.com/show_bug.cgi?id=1574335 + # + # Thus there is nothing else to do after this install + install_package uwsgi \ + uwsgi-plugin-python3 + else + # Compile uwsgi from source. + local dir + dir=$(mktemp -d) + pushd $dir + pip_install uwsgi + pip download uwsgi -c $REQUIREMENTS_DIR/upper-constraints.txt + local uwsgi + uwsgi=$(ls uwsgi*) + tar xvf $uwsgi + cd uwsgi*/apache2 + sudo $apxs -i -c mod_proxy_uwsgi.c + popd + # delete the temp directory + sudo rm -rf $dir + fi + + if is_ubuntu; then + if ! a2query -m proxy || ! a2query -m proxy_uwsgi ; then + # we've got to enable proxy and proxy_uwsgi for this to work + sudo a2enmod proxy + sudo a2enmod proxy_uwsgi + restart_apache_server + fi + fi } # install_apache_wsgi() - Install Apache server and wsgi module @@ -63,29 +130,75 @@ function install_apache_wsgi { # Apache installation, because we mark it NOPRIME if is_ubuntu; then # Install apache2, which is NOPRIME'd - install_package apache2 libapache2-mod-wsgi - # WSGI isn't enabled by default, enable it - sudo a2enmod wsgi + install_package apache2 + if is_package_installed libapache2-mod-wsgi; then + uninstall_package libapache2-mod-wsgi + fi + install_package libapache2-mod-wsgi-py3 elif is_fedora; then sudo rm -f /etc/httpd/conf.d/000-* - install_package httpd mod_wsgi - elif is_suse; then - install_package apache2 apache2-mod_wsgi - # WSGI isn't enabled by default, enable it - sudo a2enmod wsgi + install_package httpd python${PYTHON3_VERSION}-mod_wsgi + # rpm distros dont enable httpd by default so enable it to support reboots. + sudo systemctl enable httpd + # For consistency with Ubuntu, switch to the worker mpm, as + # the default is event + sudo sed -i '/mod_mpm_prefork.so/s/^/#/g' /etc/httpd/conf.modules.d/00-mpm.conf + sudo sed -i '/mod_mpm_event.so/s/^/#/g' /etc/httpd/conf.modules.d/00-mpm.conf + sudo sed -i '/mod_mpm_worker.so/s/^#//g' /etc/httpd/conf.modules.d/00-mpm.conf else - exit_distro_not_supported "apache installation" + exit_distro_not_supported "apache wsgi installation" + fi + # WSGI isn't enabled by default, enable it + enable_apache_mod wsgi +} + +# apache_site_config_for() - The filename of the site's configuration file. +# This function uses the global variables APACHE_NAME and APACHE_CONF_DIR. +# +# On Ubuntu 14.04+, the site configuration file must have a .conf suffix for a2ensite and a2dissite to +# recognise it. a2ensite and a2dissite ignore the .conf suffix used as parameter. The default sites' +# files are 000-default.conf and default-ssl.conf. +# +# On Fedora, any file in /etc/httpd/conf.d/ whose name ends with .conf is enabled. +# +# On RHEL and CentOS, things should hopefully work as in Fedora. +# +# The table below summarizes what should happen on each distribution: +# +----------------------+--------------------+--------------------------+--------------------------+ +# | Distribution | File name | Site enabling command | Site disabling command | +# +----------------------+--------------------+--------------------------+--------------------------+ +# | Ubuntu 14.04 | site.conf | a2ensite site | a2dissite site | +# | Fedora, RHEL, CentOS | site.conf.disabled | mv site.conf{.disabled,} | mv site.conf{,.disabled} | +# +----------------------+--------------------+--------------------------+--------------------------+ +function apache_site_config_for { + local site=$@ + if is_ubuntu; then + # Ubuntu 14.04 - Apache 2.4 + echo $APACHE_CONF_DIR/${site}.conf + elif is_fedora; then + # fedora conf.d is only imported if it ends with .conf so this is approx the same + local enabled_site_file="$APACHE_CONF_DIR/${site}.conf" + if [ -f $enabled_site_file ]; then + echo ${enabled_site_file} + else + echo ${enabled_site_file}.disabled + fi fi } # enable_apache_site() - Enable a particular apache site function enable_apache_site { local site=$@ + # Many of our sites use mod version. Just enable it. + enable_apache_mod version if is_ubuntu; then sudo a2ensite ${site} elif is_fedora; then - # fedora conf.d is only imported if it ends with .conf so this is approx the same - sudo mv /etc/$APACHE_NAME/$APACHE_CONF_DIR/${site} /etc/$APACHE_NAME/$APACHE_CONF_DIR/${site}.conf + local enabled_site_file="$APACHE_CONF_DIR/${site}.conf" + # Do nothing if site already enabled or no site config exists + if [[ -f ${enabled_site_file}.disabled ]] && [[ ! -f ${enabled_site_file} ]]; then + sudo mv ${enabled_site_file}.disabled ${enabled_site_file} + fi fi } @@ -93,9 +206,13 @@ function enable_apache_site { function disable_apache_site { local site=$@ if is_ubuntu; then - sudo a2dissite ${site} + sudo a2dissite ${site} || true elif is_fedora; then - sudo mv /etc/$APACHE_NAME/$APACHE_CONF_DIR/${site}.conf /etc/$APACHE_NAME/$APACHE_CONF_DIR/${site} + local enabled_site_file="$APACHE_CONF_DIR/${site}.conf" + # Do nothing if no site config exists + if [[ -f ${enabled_site_file} ]]; then + sudo mv ${enabled_site_file} ${enabled_site_file}.disabled + fi fi } @@ -115,11 +232,193 @@ function stop_apache_server { # restart_apache_server function restart_apache_server { + # Apache can be slow to stop, doing an explicit stop, sleep, start helps + # to mitigate issues where apache will claim a port it's listening on is + # still in use and fail to start. restart_service $APACHE_NAME } +# write_uwsgi_config() - Create a new uWSGI config file +function write_uwsgi_config { + local conf=$1 + local wsgi=$2 + local url=$3 + local http=$4 + local name=$5 + + if [ -z "$name" ]; then + name=$(basename $wsgi) + fi + + # create a home for the sockets; note don't use /tmp -- apache has + # a private view of it on some platforms. + local socket_dir='/var/run/uwsgi' + + # /var/run will be empty on ubuntu after reboot, so we can use systemd-temptiles + # to automatically create $socket_dir. + sudo mkdir -p /etc/tmpfiles.d/ + echo "d $socket_dir 0755 $STACK_USER root" | sudo tee /etc/tmpfiles.d/uwsgi.conf + sudo systemd-tmpfiles --create /etc/tmpfiles.d/uwsgi.conf + + local socket="$socket_dir/${name}.socket" + + # always cleanup given that we are using iniset here + rm -rf $conf + # Set either the module path or wsgi script path depending on what we've + # been given. Note that the regex isn't exhaustive - neither Python modules + # nor Python variables can start with a number - but it's "good enough" + if [[ "$wsgi" =~ ^[a-zA-Z0-9_.]+:[a-zA-Z0-9_]+$ ]]; then + iniset "$conf" uwsgi module "$wsgi" + else + deprecated 'Configuring uWSGI with a WSGI file is deprecated, use module paths instead' + iniset "$conf" uwsgi wsgi-file "$wsgi" + fi + iniset "$conf" uwsgi processes $API_WORKERS + # This is running standalone + iniset "$conf" uwsgi master true + # Set die-on-term & exit-on-reload so that uwsgi shuts down + iniset "$conf" uwsgi die-on-term true + iniset "$conf" uwsgi exit-on-reload false + # Set worker-reload-mercy so that worker will not exit till the time + # configured after graceful shutdown + iniset "$conf" uwsgi worker-reload-mercy $WORKER_TIMEOUT + iniset "$conf" uwsgi enable-threads true + iniset "$conf" uwsgi plugins http,python3 + # uwsgi recommends this to prevent thundering herd on accept. + iniset "$conf" uwsgi thunder-lock true + # Set hook to trigger graceful shutdown on SIGTERM + iniset "$conf" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all" + # Override the default size for headers from the 4k default. + iniset "$conf" uwsgi buffer-size 65535 + # Make sure the client doesn't try to re-use the connection. + iniset "$conf" uwsgi add-header "Connection: close" + # This ensures that file descriptors aren't shared between processes. + iniset "$conf" uwsgi lazy-apps true + # Starting time of the WSGi server + iniset "$conf" uwsgi start-time %t + + # If we said bind directly to http, then do that and don't start the apache proxy + if [[ -n "$http" ]]; then + iniset "$conf" uwsgi http $http + else + local apache_conf="" + apache_conf=$(apache_site_config_for $name) + iniset "$conf" uwsgi socket "$socket" + iniset "$conf" uwsgi chmod-socket 666 + echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}\" retry=0 acquire=1 " | sudo tee -a $apache_conf + enable_apache_site $name + restart_apache_server + fi +} + +# For services using chunked encoding, the only services known to use this +# currently are Glance and Swift, we need to use an http proxy instead of +# mod_proxy_uwsgi because the chunked encoding gets dropped. See: +# https://github.com/unbit/uwsgi/issues/1540. +function write_local_uwsgi_http_config { + local conf=$1 + local wsgi=$2 + local url=$3 + local name=$4 + + if [ -z "$name" ]; then + name=$(basename $wsgi) + fi + + # create a home for the sockets; note don't use /tmp -- apache has + # a private view of it on some platforms. + + # always cleanup given that we are using iniset here + rm -rf $conf + # Set either the module path or wsgi script path depending on what we've + # been given + if [[ "$wsgi" =~ ^[a-zA-Z0-9_.]+:[a-zA-Z0-9_]+$ ]]; then + iniset "$conf" uwsgi module "$wsgi" + else + deprecated 'Configuring uWSGI with a WSGI file is deprecated, use module paths instead' + iniset "$conf" uwsgi wsgi-file "$wsgi" + fi + port=$(get_random_port) + iniset "$conf" uwsgi http-socket "$APACHE_LOCAL_HOST:$port" + iniset "$conf" uwsgi processes $API_WORKERS + # This is running standalone + iniset "$conf" uwsgi master true + # Set die-on-term & exit-on-reload so that uwsgi shuts down + iniset "$conf" uwsgi die-on-term true + iniset "$conf" uwsgi exit-on-reload false + # Set worker-reload-mercy so that worker will not exit till the time + # configured after graceful shutdown + iniset "$conf" uwsgi worker-reload-mercy $WORKER_TIMEOUT + iniset "$conf" uwsgi enable-threads true + iniset "$conf" uwsgi plugins http,python3 + # uwsgi recommends this to prevent thundering herd on accept. + iniset "$conf" uwsgi thunder-lock true + # Set hook to trigger graceful shutdown on SIGTERM + iniset "$conf" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all" + # Override the default size for headers from the 4k default. + iniset "$conf" uwsgi buffer-size 65535 + # Make sure the client doesn't try to re-use the connection. + iniset "$conf" uwsgi add-header "Connection: close" + # This ensures that file descriptors aren't shared between processes. + iniset "$conf" uwsgi lazy-apps true + iniset "$conf" uwsgi chmod-socket 666 + iniset "$conf" uwsgi http-raw-body true + iniset "$conf" uwsgi http-chunked-input true + iniset "$conf" uwsgi http-auto-chunked true + iniset "$conf" uwsgi http-keepalive false + # Increase socket timeout for slow chunked uploads + iniset "$conf" uwsgi socket-timeout 30 + # Starting time of the WSGi server + iniset "$conf" uwsgi start-time %t + + enable_apache_mod proxy + enable_apache_mod proxy_http + local apache_conf="" + apache_conf=$(apache_site_config_for $name) + echo "KeepAlive Off" | sudo tee $apache_conf + echo "SetEnv proxy-sendchunked 1" | sudo tee -a $apache_conf + echo "ProxyPass \"${url}\" \"http://$APACHE_LOCAL_HOST:$port\" retry=0 acquire=1 " | sudo tee -a $apache_conf + enable_apache_site $name + restart_apache_server +} + +# Write a straight-through proxy for a service that runs locally and just needs +# to be reachable via the main http proxy at $loc +function write_local_proxy_http_config { + local name=$1 + local url=$2 + local loc=$3 + local apache_conf + apache_conf=$(apache_site_config_for $name) + + enable_apache_mod proxy + enable_apache_mod proxy_http + + echo "KeepAlive Off" | sudo tee $apache_conf + echo "SetEnv proxy-sendchunked 1" | sudo tee -a $apache_conf + echo "ProxyPass \"${loc}\" \"$url\" retry=0 acquire=1 " | sudo tee -a $apache_conf + enable_apache_site $name + restart_apache_server +} + +function remove_uwsgi_config { + local conf=$1 + local wsgi=$2 + local name="" + # TODO(stephenfin): Remove this call when everyone is using module path + # configuration instead of file path configuration + name=$(basename $wsgi) + + if [[ "$wsgi" = /* ]]; then + deprecated "Passing a wsgi script to remove_uwsgi_config is deprecated, pass an application name instead" + fi + + rm -rf $conf + disable_apache_site $name +} + # Restore xtrace -$XTRACE +$_XTRACE_LIB_APACHE # Tell emacs to use shell-script-mode ## Local variables: diff --git a/lib/atop b/lib/atop new file mode 100644 index 0000000000..25c8e9a83f --- /dev/null +++ b/lib/atop @@ -0,0 +1,49 @@ +#!/bin/bash +# +# lib/atop +# Functions to start and stop atop + +# Dependencies: +# +# - ``functions`` file + +# ``stack.sh`` calls the entry points in this order: +# +# - configure_atop +# - install_atop +# - start_atop +# - stop_atop + +# Save trace setting +_XTRACE_ATOP=$(set +o | grep xtrace) +set +o xtrace + +function configure_atop { + mkdir -p $LOGDIR/atop + cat </dev/null +# /etc/default/atop +# see man atoprc for more possibilities to configure atop execution + +LOGOPTS="-R" +LOGINTERVAL=${ATOP_LOGINTERVAL:-"30"} +LOGGENERATIONS=${ATOP_LOGGENERATIONS:-"1"} +LOGPATH=$LOGDIR/atop +EOF +} + +function install_atop { + install_package atop +} + +# start_() - Start running processes +function start_atop { + start_service atop +} + +# stop_atop() stop atop process +function stop_atop { + stop_service atop +} + +# Restore xtrace +$_XTRACE_ATOP diff --git a/lib/baremetal b/lib/baremetal deleted file mode 100644 index adcbe4ccad..0000000000 --- a/lib/baremetal +++ /dev/null @@ -1,438 +0,0 @@ -## vim: tabstop=4 shiftwidth=4 softtabstop=4 - -## Copyright (c) 2012 Hewlett-Packard Development Company, L.P. -## All Rights Reserved. -## -## Licensed under the Apache License, Version 2.0 (the "License"); you may -## not use this file except in compliance with the License. You may obtain -## a copy of the License at -## -## http://www.apache.org/licenses/LICENSE-2.0 -## -## Unless required by applicable law or agreed to in writing, software -## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -## License for the specific language governing permissions and limitations -## under the License. - - -# This file provides devstack with the environment and utilities to -# control nova-compute's baremetal driver. -# It sets reasonable defaults to run within a single host, -# using virtual machines in place of physical hardware. -# However, by changing just a few options, devstack+baremetal can in fact -# control physical hardware resources on the same network, if you know -# the MAC address(es) and IPMI credentials. -# -# At a minimum, to enable the baremetal driver, you must set these in localrc: -# -# VIRT_DRIVER=baremetal -# ENABLED_SERVICES="$ENABLED_SERVICES,baremetal" -# -# -# We utilize diskimage-builder to create a ramdisk, and then -# baremetal driver uses that to push a disk image onto the node(s). -# -# Below we define various defaults which control the behavior of the -# baremetal compute service, and inform it of the hardware it will control. -# -# Below that, various functions are defined, which are called by devstack -# in the following order: -# -# before nova-cpu starts: -# -# - prepare_baremetal_toolchain -# - configure_baremetal_nova_dirs -# -# after nova and glance have started: -# -# - build_and_upload_baremetal_deploy_k_and_r $token -# - create_baremetal_flavor $BM_DEPLOY_KERNEL_ID $BM_DEPLOY_RAMDISK_ID -# - upload_baremetal_image $url $token -# - add_baremetal_node - - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Sub-driver settings -# ------------------- - -# sub-driver to use for kernel deployment -# -# - nova.virt.baremetal.pxe.PXE -# - nova.virt.baremetal.tilera.TILERA -BM_DRIVER=${BM_DRIVER:-nova.virt.baremetal.pxe.PXE} - -# sub-driver to use for remote power management -# -# - nova.virt.baremetal.fake.FakePowerManager, for manual power control -# - nova.virt.baremetal.ipmi.IPMI, for remote IPMI -# - nova.virt.baremetal.tilera_pdu.Pdu, for TilePro hardware -BM_POWER_MANAGER=${BM_POWER_MANAGER:-nova.virt.baremetal.fake.FakePowerManager} - - -# These should be customized to your environment and hardware -# ----------------------------------------------------------- - -# To provide PXE, configure nova-network's dnsmasq rather than run the one -# dedicated to baremetal. When enable this, make sure these conditions are -# fulfilled: -# -# 1) nova-compute and nova-network runs on the same host -# 2) nova-network uses FlatDHCPManager -# -# NOTE: the other BM_DNSMASQ_* have no effect on the behavior if this option -# is enabled. -BM_DNSMASQ_FROM_NOVA_NETWORK=`trueorfalse False $BM_DNSMASQ_FROM_NOVA_NETWORK` - -# BM_DNSMASQ_IFACE should match FLAT_NETWORK_BRIDGE -BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-eth0} -# if testing on a physical network, -# BM_DNSMASQ_RANGE must be changed to suit your network -BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-} - -# BM_DNSMASQ_DNS provide dns server to bootstrap clients -BM_DNSMASQ_DNS=${BM_DNSMASQ_DNS:-} - -# BM_FIRST_MAC *must* be set to the MAC address of the node you will -# boot. This is passed to dnsmasq along with the kernel/ramdisk to -# deploy via PXE. -BM_FIRST_MAC=${BM_FIRST_MAC:-} - -# BM_SECOND_MAC is only important if the host has >1 NIC. -BM_SECOND_MAC=${BM_SECOND_MAC:-} - -# Hostname for the baremetal nova-compute node, if not run on this host -BM_HOSTNAME=${BM_HOSTNAME:-$(hostname -f)} - -# BM_PM_* options are only necessary if BM_POWER_MANAGER=...IPMI -BM_PM_ADDR=${BM_PM_ADDR:-0.0.0.0} -BM_PM_USER=${BM_PM_USER:-user} -BM_PM_PASS=${BM_PM_PASS:-pass} - -# BM_FLAVOR_* options are arbitrary and not necessarily related to -# physical hardware capacity. These can be changed if you are testing -# BaremetalHostManager with multiple nodes and different flavors. -BM_CPU_ARCH=${BM_CPU_ARCH:-x86_64} -BM_FLAVOR_CPU=${BM_FLAVOR_CPU:-1} -BM_FLAVOR_RAM=${BM_FLAVOR_RAM:-1024} -BM_FLAVOR_ROOT_DISK=${BM_FLAVOR_ROOT_DISK:-10} -BM_FLAVOR_EPHEMERAL_DISK=${BM_FLAVOR_EPHEMERAL_DISK:-0} -BM_FLAVOR_SWAP=${BM_FLAVOR_SWAP:-1} -BM_FLAVOR_NAME=${BM_FLAVOR_NAME:-bm.small} -BM_FLAVOR_ID=${BM_FLAVOR_ID:-11} -BM_FLAVOR_ARCH=${BM_FLAVOR_ARCH:-$BM_CPU_ARCH} - - -# Below this, we set some path and filenames. -# Defaults are probably sufficient. -DIB_DIR=${DIB_DIR:-$DEST/diskimage-builder} - -# Use DIB to create deploy ramdisk and kernel. -BM_BUILD_DEPLOY_RAMDISK=`trueorfalse True $BM_BUILD_DEPLOY_RAMDISK` -# If not use DIB, these files are used as deploy ramdisk/kernel. -# (The value must be a relative path from $TOP_DIR/files/) -BM_DEPLOY_RAMDISK=${BM_DEPLOY_RAMDISK:-} -BM_DEPLOY_KERNEL=${BM_DEPLOY_KERNEL:-} - -# If you need to add any extra flavors to the deploy ramdisk image -# eg, specific network drivers, specify them here -# -# NOTE(deva): this will be moved to lib/ironic in a future patch -# for now, set the default to a suitable value for Ironic's needs -BM_DEPLOY_FLAVOR=${BM_DEPLOY_FLAVOR:--a amd64 ubuntu deploy-ironic} - -# set URL and version for google shell-in-a-box -BM_SHELL_IN_A_BOX=${BM_SHELL_IN_A_BOX:-http://shellinabox.googlecode.com/files/shellinabox-2.14.tar.gz} - - -# Functions -# --------- - -# Check if baremetal is properly enabled -# Returns false if VIRT_DRIVER is not baremetal, or if ENABLED_SERVICES -# does not contain "baremetal" -function is_baremetal { - if [[ "$ENABLED_SERVICES" =~ 'baremetal' && "$VIRT_DRIVER" = 'baremetal' ]]; then - return 0 - fi - return 1 -} - -# Install diskimage-builder and shell-in-a-box -# so that we can build the deployment kernel & ramdisk -function prepare_baremetal_toolchain { - git_clone $DIB_REPO $DIB_DIR $DIB_BUILD_BRANCH - - local shellinabox_basename=$(basename $BM_SHELL_IN_A_BOX) - if [[ ! -e $DEST/$shellinabox_basename ]]; then - cd $DEST - wget $BM_SHELL_IN_A_BOX - fi - if [[ ! -d $DEST/${shellinabox_basename%%.tar.gz} ]]; then - cd $DEST - tar xzf $shellinabox_basename - fi - if [[ ! $(which shellinaboxd) ]]; then - cd $DEST/${shellinabox_basename%%.tar.gz} - ./configure - make - sudo make install - fi -} - -# prepare various directories needed by baremetal hypervisor -function configure_baremetal_nova_dirs { - # ensure /tftpboot is prepared - sudo mkdir -p /tftpboot - sudo mkdir -p /tftpboot/pxelinux.cfg - - PXEBIN=/usr/share/syslinux/pxelinux.0 - if [ ! -f $PXEBIN ]; then - PXEBIN=/usr/lib/syslinux/pxelinux.0 - if [ ! -f $PXEBIN ]; then - die $LINENO "pxelinux.0 (from SYSLINUX) not found." - fi - fi - - sudo cp $PXEBIN /tftpboot/ - sudo chown -R $STACK_USER:$LIBVIRT_GROUP /tftpboot - - # ensure $NOVA_STATE_PATH/baremetal is prepared - sudo mkdir -p $NOVA_STATE_PATH/baremetal - sudo mkdir -p $NOVA_STATE_PATH/baremetal/console - sudo mkdir -p $NOVA_STATE_PATH/baremetal/dnsmasq - sudo touch $NOVA_STATE_PATH/baremetal/dnsmasq/dnsmasq-dhcp.host - sudo chown -R $STACK_USER $NOVA_STATE_PATH/baremetal - - # ensure dnsmasq is installed but not running - # because baremetal driver will reconfigure and restart this as needed - is_package_installed dnsmasq || install_package dnsmasq - stop_service dnsmasq -} - -# build deploy kernel+ramdisk, then upload them to glance -# this function sets BM_DEPLOY_KERNEL_ID and BM_DEPLOY_RAMDISK_ID -function upload_baremetal_deploy { - token=$1 - - if [ "$BM_BUILD_DEPLOY_RAMDISK" = "True" ]; then - BM_DEPLOY_KERNEL=bm-deploy.kernel - BM_DEPLOY_RAMDISK=bm-deploy.initramfs - if [ ! -e "$TOP_DIR/files/$BM_DEPLOY_KERNEL" -o ! -e "$TOP_DIR/files/$BM_DEPLOY_RAMDISK" ]; then - $DIB_DIR/bin/ramdisk-image-create $BM_DEPLOY_FLAVOR \ - -o $TOP_DIR/files/bm-deploy - fi - fi - - # load them into glance - BM_DEPLOY_KERNEL_ID=$(glance \ - --os-auth-token $token \ - --os-image-url http://$GLANCE_HOSTPORT \ - image-create \ - --name $BM_DEPLOY_KERNEL \ - --is-public True --disk-format=aki \ - < $TOP_DIR/files/$BM_DEPLOY_KERNEL | grep ' id ' | get_field 2) - BM_DEPLOY_RAMDISK_ID=$(glance \ - --os-auth-token $token \ - --os-image-url http://$GLANCE_HOSTPORT \ - image-create \ - --name $BM_DEPLOY_RAMDISK \ - --is-public True --disk-format=ari \ - < $TOP_DIR/files/$BM_DEPLOY_RAMDISK | grep ' id ' | get_field 2) -} - -# create a basic baremetal flavor, associated with deploy kernel & ramdisk -# -# Usage: create_baremetal_flavor -function create_baremetal_flavor { - aki=$1 - ari=$2 - nova flavor-create $BM_FLAVOR_NAME $BM_FLAVOR_ID \ - $BM_FLAVOR_RAM $BM_FLAVOR_ROOT_DISK $BM_FLAVOR_CPU - nova flavor-key $BM_FLAVOR_NAME set \ - "cpu_arch"="$BM_FLAVOR_ARCH" \ - "baremetal:deploy_kernel_id"="$aki" \ - "baremetal:deploy_ramdisk_id"="$ari" - -} - -# Pull run-time kernel/ramdisk out of disk image and load into glance. -# Note that $file is currently expected to be in qcow2 format. -# Sets KERNEL_ID and RAMDISK_ID -# -# Usage: extract_and_upload_k_and_r_from_image $token $file -function extract_and_upload_k_and_r_from_image { - token=$1 - file=$2 - image_name=$(basename "$file" ".qcow2") - - # this call returns the file names as "$kernel,$ramdisk" - out=$($DIB_DIR/bin/disk-image-get-kernel \ - -x -d $TOP_DIR/files -o bm-deploy -i $file) - if [ $? -ne 0 ]; then - die $LINENO "Failed to get kernel and ramdisk from $file" - fi - XTRACE=$(set +o | grep xtrace) - set +o xtrace - out=$(echo "$out" | tail -1) - $XTRACE - OUT_KERNEL=${out%%,*} - OUT_RAMDISK=${out##*,} - - # load them into glance - KERNEL_ID=$(glance \ - --os-auth-token $token \ - --os-image-url http://$GLANCE_HOSTPORT \ - image-create \ - --name $image_name-kernel \ - --is-public True --disk-format=aki \ - < $TOP_DIR/files/$OUT_KERNEL | grep ' id ' | get_field 2) - RAMDISK_ID=$(glance \ - --os-auth-token $token \ - --os-image-url http://$GLANCE_HOSTPORT \ - image-create \ - --name $image_name-initrd \ - --is-public True --disk-format=ari \ - < $TOP_DIR/files/$OUT_RAMDISK | grep ' id ' | get_field 2) -} - - -# Re-implementation of devstack's "upload_image" function -# -# Takes the same parameters, but has some peculiarities which made it -# easier to create a separate method, rather than complicate the logic -# of the existing function. -function upload_baremetal_image { - local image_url=$1 - local token=$2 - - # Create a directory for the downloaded image tarballs. - mkdir -p $FILES/images - - # Downloads the image (uec ami+aki style), then extracts it. - IMAGE_FNAME=`basename "$image_url"` - if [[ ! -f $FILES/$IMAGE_FNAME || \ - "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then - wget -c $image_url -O $FILES/$IMAGE_FNAME - if [[ $? -ne 0 ]]; then - echo "Not found: $image_url" - return - fi - fi - - local KERNEL="" - local RAMDISK="" - local DISK_FORMAT="" - local CONTAINER_FORMAT="" - case "$IMAGE_FNAME" in - *.tar.gz|*.tgz) - # Extract ami and aki files - [ "${IMAGE_FNAME%.tar.gz}" != "$IMAGE_FNAME" ] && - IMAGE_NAME="${IMAGE_FNAME%.tar.gz}" || - IMAGE_NAME="${IMAGE_FNAME%.tgz}" - xdir="$FILES/images/$IMAGE_NAME" - rm -Rf "$xdir"; - mkdir "$xdir" - tar -zxf $FILES/$IMAGE_FNAME -C "$xdir" - KERNEL=$(for f in "$xdir/"*-vmlinuz* "$xdir/"aki-*/image; do - [ -f "$f" ] && echo "$f" && break; done; true) - RAMDISK=$(for f in "$xdir/"*-initrd* "$xdir/"ari-*/image; do - [ -f "$f" ] && echo "$f" && break; done; true) - IMAGE=$(for f in "$xdir/"*.img "$xdir/"ami-*/image; do - [ -f "$f" ] && echo "$f" && break; done; true) - if [[ -z "$IMAGE_NAME" ]]; then - IMAGE_NAME=$(basename "$IMAGE" ".img") - fi - DISK_FORMAT=ami - CONTAINER_FORMAT=ami - ;; - *.qcow2) - IMAGE="$FILES/${IMAGE_FNAME}" - IMAGE_NAME=$(basename "$IMAGE" ".qcow2") - DISK_FORMAT=qcow2 - CONTAINER_FORMAT=bare - ;; - *) echo "Do not know what to do with $IMAGE_FNAME"; false;; - esac - - if [ "$CONTAINER_FORMAT" = "bare" ]; then - extract_and_upload_k_and_r_from_image $token $IMAGE - elif [ "$CONTAINER_FORMAT" = "ami" ]; then - KERNEL_ID=$(glance \ - --os-auth-token $token \ - --os-image-url http://$GLANCE_HOSTPORT \ - image-create \ - --name "$IMAGE_NAME-kernel" --is-public True \ - --container-format aki \ - --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2) - RAMDISK_ID=$(glance \ - --os-auth-token $token \ - --os-image-url http://$GLANCE_HOSTPORT \ - image-create \ - --name "$IMAGE_NAME-ramdisk" --is-public True \ - --container-format ari \ - --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2) - else - # TODO(deva): add support for other image types - return - fi - - glance \ - --os-auth-token $token \ - --os-image-url http://$GLANCE_HOSTPORT \ - image-create \ - --name "${IMAGE_NAME%.img}" --is-public True \ - --container-format $CONTAINER_FORMAT \ - --disk-format $DISK_FORMAT \ - ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} \ - ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}" - - # override DEFAULT_IMAGE_NAME so that tempest can find the image - # that we just uploaded in glance - DEFAULT_IMAGE_NAME="${IMAGE_NAME%.img}" -} - -function clear_baremetal_of_all_nodes { - list=$(nova baremetal-node-list | awk -F '| ' 'NR>3 {print $2}' ) - for node in $list; do - nova baremetal-node-delete $node - done -} - -# Inform nova-baremetal about nodes, MACs, etc. -# Defaults to using BM_FIRST_MAC and BM_SECOND_MAC if parameters not specified -# -# Usage: add_baremetal_node -function add_baremetal_node { - mac_1=${1:-$BM_FIRST_MAC} - mac_2=${2:-$BM_SECOND_MAC} - - id=$(nova baremetal-node-create \ - --pm_address="$BM_PM_ADDR" \ - --pm_user="$BM_PM_USER" \ - --pm_password="$BM_PM_PASS" \ - "$BM_HOSTNAME" \ - "$BM_FLAVOR_CPU" \ - "$BM_FLAVOR_RAM" \ - "$BM_FLAVOR_ROOT_DISK" \ - "$mac_1" \ - | grep ' id ' | get_field 2 ) - [ $? -eq 0 ] || [ "$id" ] || die $LINENO "Error adding baremetal node" - if [ -n "$mac_2" ]; then - id2=$(nova baremetal-interface-add "$id" "$mac_2" ) - [ $? -eq 0 ] || [ "$id2" ] || die $LINENO "Error adding interface to barmetal node $id" - fi -} - - -# Restore xtrace -$XTRACE - -# Tell emacs to use shell-script-mode -## Local variables: -## mode: shell-script -## End: diff --git a/lib/ceilometer b/lib/ceilometer deleted file mode 100644 index a4be7af480..0000000000 --- a/lib/ceilometer +++ /dev/null @@ -1,280 +0,0 @@ -# lib/ceilometer -# Install and start **Ceilometer** service - -# To enable a minimal set of Ceilometer services, add the following to localrc: -# -# enable_service ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api -# -# To ensure Ceilometer alarming services are enabled also, further add to the localrc: -# -# enable_service ceilometer-alarm-notifier ceilometer-alarm-evaluator - -# Dependencies: -# -# - functions -# - OS_AUTH_URL for auth in api -# - DEST set to the destination directory -# - SERVICE_PASSWORD, SERVICE_TENANT_NAME for auth in api -# - STACK_USER service user - -# stack.sh -# --------- -# - install_ceilometer -# - configure_ceilometer -# - init_ceilometer -# - start_ceilometer -# - stop_ceilometer -# - cleanup_ceilometer - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# Set up default directories -CEILOMETER_DIR=$DEST/ceilometer -CEILOMETERCLIENT_DIR=$DEST/python-ceilometerclient -CEILOMETER_CONF_DIR=/etc/ceilometer -CEILOMETER_CONF=$CEILOMETER_CONF_DIR/ceilometer.conf -CEILOMETER_API_LOG_DIR=/var/log/ceilometer-api -CEILOMETER_AUTH_CACHE_DIR=${CEILOMETER_AUTH_CACHE_DIR:-/var/cache/ceilometer} - -# Support potential entry-points console scripts -CEILOMETER_BIN_DIR=$(get_python_exec_prefix) - -# Set up database backend -CEILOMETER_BACKEND=${CEILOMETER_BACKEND:-mysql} - -# Ceilometer connection info. -CEILOMETER_SERVICE_PROTOCOL=http -CEILOMETER_SERVICE_HOST=$SERVICE_HOST -CEILOMETER_SERVICE_PORT=${CEILOMETER_SERVICE_PORT:-8777} - -# Tell Tempest this project is present -TEMPEST_SERVICES+=,ceilometer - - -# Functions -# --------- - -# Test if any Ceilometer services are enabled -# is_ceilometer_enabled -function is_ceilometer_enabled { - [[ ,${ENABLED_SERVICES} =~ ,"ceilometer-" ]] && return 0 - return 1 -} - -# create_ceilometer_accounts() - Set up common required ceilometer accounts - -# Project User Roles -# ------------------------------------------------------------------ -# SERVICE_TENANT_NAME ceilometer admin -# SERVICE_TENANT_NAME ceilometer ResellerAdmin (if Swift is enabled) - -create_ceilometer_accounts() { - - SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") - - # Ceilometer - if [[ "$ENABLED_SERVICES" =~ "ceilometer-api" ]]; then - CEILOMETER_USER=$(openstack user create \ - ceilometer \ - --password "$SERVICE_PASSWORD" \ - --project $SERVICE_TENANT \ - --email ceilometer@example.com \ - | grep " id " | get_field 2) - openstack role add \ - $ADMIN_ROLE \ - --project $SERVICE_TENANT \ - --user $CEILOMETER_USER - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - CEILOMETER_SERVICE=$(openstack service create \ - ceilometer \ - --type=metering \ - --description="OpenStack Telemetry Service" \ - | grep " id " | get_field 2) - openstack endpoint create \ - $CEILOMETER_SERVICE \ - --region RegionOne \ - --publicurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \ - --adminurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \ - --internalurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" - fi - if is_service_enabled swift; then - # Ceilometer needs ResellerAdmin role to access swift account stats. - openstack role add \ - --project $SERVICE_TENANT_NAME \ - --user ceilometer \ - ResellerAdmin - fi - fi -} - - -# cleanup_ceilometer() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_ceilometer { - if [ "$CEILOMETER_BACKEND" != 'mysql' ] && [ "$CEILOMETER_BACKEND" != 'postgresql' ] ; then - mongo ceilometer --eval "db.dropDatabase();" - fi -} - -# configure_ceilometerclient() - Set config files, create data dirs, etc -function configure_ceilometerclient { - setup_develop $CEILOMETERCLIENT_DIR - sudo install -D -m 0644 -o $STACK_USER {$CEILOMETERCLIENT_DIR/tools/,/etc/bash_completion.d/}ceilometer.bash_completion -} - -# configure_ceilometer() - Set config files, create data dirs, etc -function configure_ceilometer { - setup_develop $CEILOMETER_DIR - - [ ! -d $CEILOMETER_CONF_DIR ] && sudo mkdir -m 755 -p $CEILOMETER_CONF_DIR - sudo chown $STACK_USER $CEILOMETER_CONF_DIR - - [ ! -d $CEILOMETER_API_LOG_DIR ] && sudo mkdir -m 755 -p $CEILOMETER_API_LOG_DIR - sudo chown $STACK_USER $CEILOMETER_API_LOG_DIR - - iniset_rpc_backend ceilometer $CEILOMETER_CONF DEFAULT - - iniset $CEILOMETER_CONF DEFAULT notification_topics 'notifications' - iniset $CEILOMETER_CONF DEFAULT verbose True - iniset $CEILOMETER_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" - - # Install the policy file for the API server - cp $CEILOMETER_DIR/etc/ceilometer/policy.json $CEILOMETER_CONF_DIR - iniset $CEILOMETER_CONF DEFAULT policy_file $CEILOMETER_CONF_DIR/policy.json - - cp $CEILOMETER_DIR/etc/ceilometer/pipeline.yaml $CEILOMETER_CONF_DIR - cp $CEILOMETER_DIR/etc/ceilometer/api_paste.ini $CEILOMETER_CONF_DIR - cp $CEILOMETER_DIR/etc/ceilometer/event_definitions.yaml $CEILOMETER_CONF_DIR - - if [ "$CEILOMETER_PIPELINE_INTERVAL" ]; then - sed -i "s/interval:.*/interval: ${CEILOMETER_PIPELINE_INTERVAL}/" $CEILOMETER_CONF_DIR/pipeline.yaml - fi - - # the compute and central agents need these credentials in order to - # call out to other services' public APIs - # the alarm evaluator needs these options to call ceilometer APIs - iniset $CEILOMETER_CONF service_credentials os_username ceilometer - iniset $CEILOMETER_CONF service_credentials os_password $SERVICE_PASSWORD - iniset $CEILOMETER_CONF service_credentials os_tenant_name $SERVICE_TENANT_NAME - - iniset $CEILOMETER_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST - iniset $CEILOMETER_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT - iniset $CEILOMETER_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset $CEILOMETER_CONF keystone_authtoken admin_user ceilometer - iniset $CEILOMETER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD - iniset $CEILOMETER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $CEILOMETER_CONF keystone_authtoken signing_dir $CEILOMETER_AUTH_CACHE_DIR - - if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] ; then - iniset $CEILOMETER_CONF database connection `database_connection_url ceilometer` - else - iniset $CEILOMETER_CONF database connection mongodb://localhost:27017/ceilometer - configure_mongodb - cleanup_ceilometer - fi - - if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then - iniset $CEILOMETER_CONF DEFAULT hypervisor_inspector vsphere - iniset $CEILOMETER_CONF vmware host_ip "$VMWAREAPI_IP" - iniset $CEILOMETER_CONF vmware host_username "$VMWAREAPI_USER" - iniset $CEILOMETER_CONF vmware host_password "$VMWAREAPI_PASSWORD" - fi -} - -function configure_mongodb { - # server package is the same on all - local packages=mongodb-server - - if is_fedora; then - # mongodb client + python bindings - packages="${packages} mongodb pymongo" - else - packages="${packages} python-pymongo" - fi - - install_package ${packages} - - if is_fedora; then - # ensure smallfiles selected to minimize freespace requirements - sudo sed -i '/--smallfiles/!s/OPTIONS=\"/OPTIONS=\"--smallfiles /' /etc/sysconfig/mongod - - restart_service mongod - fi - - # give mongodb time to start-up - sleep 5 -} - -# init_ceilometer() - Initialize etc. -function init_ceilometer { - # Create cache dir - sudo mkdir -p $CEILOMETER_AUTH_CACHE_DIR - sudo chown $STACK_USER $CEILOMETER_AUTH_CACHE_DIR - rm -f $CEILOMETER_AUTH_CACHE_DIR/* - - if is_service_enabled mysql postgresql; then - if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] ; then - recreate_database ceilometer utf8 - $CEILOMETER_BIN_DIR/ceilometer-dbsync - fi - fi -} - -# install_ceilometer() - Collect source and prepare -function install_ceilometer { - git_clone $CEILOMETER_REPO $CEILOMETER_DIR $CEILOMETER_BRANCH -} - -# install_ceilometerclient() - Collect source and prepare -function install_ceilometerclient { - git_clone $CEILOMETERCLIENT_REPO $CEILOMETERCLIENT_DIR $CEILOMETERCLIENT_BRANCH -} - -# start_ceilometer() - Start running processes, including screen -function start_ceilometer { - if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then - screen_it ceilometer-acompute "cd ; sg $LIBVIRT_GROUP 'ceilometer-agent-compute --config-file $CEILOMETER_CONF'" - fi - if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then - screen_it ceilometer-acompute "cd ; ceilometer-agent-compute --config-file $CEILOMETER_CONF" - fi - screen_it ceilometer-acentral "cd ; ceilometer-agent-central --config-file $CEILOMETER_CONF" - screen_it ceilometer-anotification "cd ; ceilometer-agent-notification --config-file $CEILOMETER_CONF" - screen_it ceilometer-collector "cd ; ceilometer-collector --config-file $CEILOMETER_CONF" - screen_it ceilometer-api "cd ; ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" - - # only die on API if it was actually intended to be turned on - if is_service_enabled ceilometer-api; then - echo "Waiting for ceilometer-api to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s http://localhost:8777/v2/ >/dev/null; do sleep 1; done"; then - die $LINENO "ceilometer-api did not start" - fi - fi - - screen_it ceilometer-alarm-notifier "cd ; ceilometer-alarm-notifier --config-file $CEILOMETER_CONF" - screen_it ceilometer-alarm-evaluator "cd ; ceilometer-alarm-evaluator --config-file $CEILOMETER_CONF" -} - -# stop_ceilometer() - Stop running processes -function stop_ceilometer { - # Kill the ceilometer screen windows - for serv in ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-evaluator; do - screen_stop $serv - done -} - - -# Restore xtrace -$XTRACE - -# Tell emacs to use shell-script-mode -## Local variables: -## mode: shell-script -## End: diff --git a/lib/cinder b/lib/cinder index dadbe40a3b..02056c20f4 100644 --- a/lib/cinder +++ b/lib/cinder @@ -1,3 +1,5 @@ +#!/bin/bash +# # lib/cinder # Install and start **Cinder** volume service @@ -18,7 +20,7 @@ # - cleanup_cinder # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_CINDER=$(set +o | grep xtrace) set +o xtrace @@ -28,6 +30,8 @@ set +o xtrace # set up default driver CINDER_DRIVER=${CINDER_DRIVER:-default} CINDER_PLUGINS=$TOP_DIR/lib/cinder_plugins +CINDER_BACKENDS=$TOP_DIR/lib/cinder_backends +CINDER_BACKUPS=$TOP_DIR/lib/cinder_backups # grab plugin config if specified via cinder_driver if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then @@ -35,59 +39,176 @@ if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then fi # set up default directories +GITDIR["python-cinderclient"]=$DEST/python-cinderclient +GITDIR["python-brick-cinderclient-ext"]=$DEST/python-brick-cinderclient-ext CINDER_DIR=$DEST/cinder -CINDERCLIENT_DIR=$DEST/python-cinderclient + +if [[ $SERVICE_IP_VERSION == 6 ]]; then + CINDER_MY_IP="$HOST_IPV6" +else + CINDER_MY_IP="$HOST_IP" +fi + + +# Cinder virtual environment +if [[ ${USE_VENV} = True ]]; then + PROJECT_VENV["cinder"]=${CINDER_DIR}.venv + CINDER_BIN_DIR=${PROJECT_VENV["cinder"]}/bin +else + CINDER_BIN_DIR=$(get_python_exec_prefix) +fi + CINDER_STATE_PATH=${CINDER_STATE_PATH:=$DATA_DIR/cinder} -CINDER_AUTH_CACHE_DIR=${CINDER_AUTH_CACHE_DIR:-/var/cache/cinder} +OS_BRICK_LOCK_PATH=${OS_BRICK_LOCK_PATH:=$DATA_DIR/os_brick} CINDER_CONF_DIR=/etc/cinder CINDER_CONF=$CINDER_CONF_DIR/cinder.conf +CINDER_UWSGI=cinder.wsgi.api:application +CINDER_UWSGI_CONF=$CINDER_CONF_DIR/cinder-api-uwsgi.ini CINDER_API_PASTE_INI=$CINDER_CONF_DIR/api-paste.ini # Public facing bits +if is_service_enabled tls-proxy; then + CINDER_SERVICE_PROTOCOL="https" +fi CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST} CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776} CINDER_SERVICE_PORT_INT=${CINDER_SERVICE_PORT_INT:-18776} CINDER_SERVICE_PROTOCOL=${CINDER_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} +CINDER_SERVICE_LISTEN_ADDRESS=${CINDER_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)} + +# We do not need to report service status every 10s for devstack-like +# deployments. In the gate this generates extra work for the services and the +# database which are already taxed. +CINDER_SERVICE_REPORT_INTERVAL=${CINDER_SERVICE_REPORT_INTERVAL:-120} + +# What type of LVM device should Cinder use for LVM backend +# Defaults to auto, which will do thin provisioning if it's a fresh +# volume group, otherwise it will do thick. The other valid choices are +# default, which is thick, or thin, which as the name implies utilizes lvm +# thin provisioning. +CINDER_LVM_TYPE=${CINDER_LVM_TYPE:-auto} + +# ``CINDER_USE_SERVICE_TOKEN`` is a mode where service token is passed along with +# user token while communicating to external REST APIs like Glance. +CINDER_USE_SERVICE_TOKEN=$(trueorfalse True CINDER_USE_SERVICE_TOKEN) + +# Default backends +# The backend format is type:name where type is one of the supported backend +# types (lvm, nfs, etc) and name is the identifier used in the Cinder +# configuration and for the volume type name. Multiple backends are +# comma-separated. +# The old ``CINDER_MULTI_LVM_BACKEND=True`` setting had a default of: +# CINDER_ENABLED_BACKENDS=${CINDER_ENABLED_BACKENDS:-lvm:lvmdriver-1,lvm:lvmdriver-2} +CINDER_ENABLED_BACKENDS=${CINDER_ENABLED_BACKENDS:-lvm:lvmdriver-1} + +CINDER_VOLUME_CLEAR=${CINDER_VOLUME_CLEAR:-${CINDER_VOLUME_CLEAR_DEFAULT:-zero}} +CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]') + +VOLUME_TYPE_MULTIATTACH=${VOLUME_TYPE_MULTIATTACH:-multiattach} + +if [[ -n "$CINDER_ISCSI_HELPER" ]]; then + if [[ -z "$CINDER_TARGET_HELPER" ]]; then + deprecated 'Using CINDER_ISCSI_HELPER is deprecated, use CINDER_TARGET_HELPER instead' + CINDER_TARGET_HELPER="$CINDER_ISCSI_HELPER" + else + deprecated 'Deprecated CINDER_ISCSI_HELPER is set, but is being overwritten by CINDER_TARGET_HELPER' + fi +fi +CINDER_TARGET_HELPER=${CINDER_TARGET_HELPER:-lioadm} -# Support entry points installation of console scripts -if [[ -d $CINDER_DIR/bin ]]; then - CINDER_BIN_DIR=$CINDER_DIR/bin +if [[ $CINDER_TARGET_HELPER == 'nvmet' ]]; then + CINDER_TARGET_PROTOCOL=${CINDER_TARGET_PROTOCOL:-'nvmet_rdma'} + CINDER_TARGET_PREFIX=${CINDER_TARGET_PREFIX:-'nvme-subsystem-1'} + CINDER_TARGET_PORT=${CINDER_TARGET_PORT:-4420} else - CINDER_BIN_DIR=$(get_python_exec_prefix) + CINDER_TARGET_PROTOCOL=${CINDER_TARGET_PROTOCOL:-'iscsi'} + CINDER_TARGET_PREFIX=${CINDER_TARGET_PREFIX:-'iqn.2010-10.org.openstack:'} + CINDER_TARGET_PORT=${CINDER_TARGET_PORT:-3260} +fi + + +# EL should only use lioadm +if is_fedora; then + if [[ ${CINDER_TARGET_HELPER} != "lioadm" && ${CINDER_TARGET_HELPER} != 'nvmet' ]]; then + die "lioadm and nvmet are the only valid Cinder target_helper config on this platform" + fi +fi + +# When Cinder is used as a backend for Glance, it can be configured to clone +# the volume containing image data directly in the backend instead of +# transferring data from volume to volume. Value is a comma separated list of +# schemes (currently only 'file' and 'cinder' are supported). The default +# configuration in Cinder is empty (that is, do not use this feature). NOTE: +# to use this feature you must also enable GLANCE_SHOW_DIRECT_URL and/or +# GLANCE_SHOW_MULTIPLE_LOCATIONS for glance-api.conf. +CINDER_ALLOWED_DIRECT_URL_SCHEMES=${CINDER_ALLOWED_DIRECT_URL_SCHEMES:-} +if [[ -n "$CINDER_ALLOWED_DIRECT_URL_SCHEMES" ]]; then + if [[ "${GLANCE_SHOW_DIRECT_URL:-False}" != "True" \ + && "${GLANCE_SHOW_MULTIPLE_LOCATIONS:-False}" != "True" ]]; then + warn $LINENO "CINDER_ALLOWED_DIRECT_URL_SCHEMES is set, but neither \ +GLANCE_SHOW_DIRECT_URL nor GLANCE_SHOW_MULTIPLE_LOCATIONS is True" + fi +fi + +# For backward compatibility +# Before CINDER_BACKUP_DRIVER was introduced, ceph backup driver was configured +# along with ceph backend driver. +if [[ -z "${CINDER_BACKUP_DRIVER}" && "$CINDER_ENABLED_BACKENDS" =~ "ceph" ]]; then + CINDER_BACKUP_DRIVER=ceph +fi + +# Supported backup drivers are in lib/cinder_backups +CINDER_BACKUP_DRIVER=${CINDER_BACKUP_DRIVER:-swift} + +# Source the enabled backends +if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then + for be in ${CINDER_ENABLED_BACKENDS//,/ }; do + be_type=${be%%:*} + be_name=${be##*:} + if [[ -r $CINDER_BACKENDS/${be_type} ]]; then + source $CINDER_BACKENDS/${be_type} + fi + done fi -# Support for multi lvm backend configuration (default is no support) -CINDER_MULTI_LVM_BACKEND=$(trueorfalse False $CINDER_MULTI_LVM_BACKEND) +# Source the backup driver +if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then + if [[ -r $CINDER_BACKUPS/$CINDER_BACKUP_DRIVER ]]; then + source $CINDER_BACKUPS/$CINDER_BACKUP_DRIVER + else + die "cinder backup driver $CINDER_BACKUP_DRIVER is not supported" + fi +fi -# Should cinder perform secure deletion of volumes? -# Defaults to true, can be set to False to avoid this bug when testing: -# https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1023755 -CINDER_SECURE_DELETE=`trueorfalse True $CINDER_SECURE_DELETE` +# Environment variables to configure the image-volume cache +CINDER_IMG_CACHE_ENABLED=${CINDER_IMG_CACHE_ENABLED:-True} -# Cinder reports allocations back to the scheduler on periodic intervals -# it turns out we can get an "out of space" issue when we run tests too -# quickly just because cinder didn't realize we'd freed up resources. -# Make this configurable so that devstack-gate/tempest can set it to -# less than the 60 second default -# https://bugs.launchpad.net/cinder/+bug/1180976 -CINDER_PERIODIC_INTERVAL=${CINDER_PERIODIC_INTERVAL:-60} +# Environment variables to configure the optimized volume upload +CINDER_UPLOAD_OPTIMIZED=${CINDER_UPLOAD_OPTIMIZED:-False} -# Name of the lvm volume groups to use/create for iscsi volumes -VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} -VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DATA_DIR/${VOLUME_GROUP}-backing-file} -VOLUME_BACKING_DEVICE=${VOLUME_BACKING_DEVICE:-} +# Environment variables to configure the internal tenant during optimized volume upload +CINDER_UPLOAD_INTERNAL_TENANT=${CINDER_UPLOAD_INTERNAL_TENANT:-False} -# VOLUME_GROUP2 is used only if CINDER_MULTI_LVM_BACKEND = True -VOLUME_GROUP2=${VOLUME_GROUP2:-stack-volumes2} -VOLUME_BACKING_FILE2=${VOLUME_BACKING_FILE2:-$DATA_DIR/${VOLUME_GROUP2}-backing-file} -VOLUME_BACKING_DEVICE2=${VOLUME_BACKING_DEVICE2:-} +# For limits, if left unset, it will use cinder defaults of 0 for unlimited +CINDER_IMG_CACHE_SIZE_GB=${CINDER_IMG_CACHE_SIZE_GB:-} +CINDER_IMG_CACHE_SIZE_COUNT=${CINDER_IMG_CACHE_SIZE_COUNT:-} -VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} +# Configure which cinder backends will have the image-volume cache, this takes the same +# form as the CINDER_ENABLED_BACKENDS config option. By default it will +# enable the cache for all cinder backends. +CINDER_CACHE_ENABLED_FOR_BACKENDS=${CINDER_CACHE_ENABLED_FOR_BACKENDS:-$CINDER_ENABLED_BACKENDS} -# Tell Tempest this project is present -TEMPEST_SERVICES+=,cinder +# Configure which cinder backends will have optimized volume upload, this takes the same +# form as the CINDER_ENABLED_BACKENDS config option. By default it will +# enable the cache for all cinder backends. +CINDER_UPLOAD_OPTIMIZED_BACKENDS=${CINDER_UPLOAD_OPTIMIZED_BACKENDS:-$CINDER_ENABLED_BACKENDS} +# Flag to set the oslo_policy.enforce_scope. This is used to switch +# the Volume API policies to start checking the scope of token. by default, +# this flag is False. +# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope +CINDER_ENFORCE_SCOPE=$(trueorfalse False CINDER_ENFORCE_SCOPE) # Functions # --------- @@ -95,43 +216,14 @@ TEMPEST_SERVICES+=,cinder # Test if any Cinder services are enabled # is_cinder_enabled function is_cinder_enabled { + [[ ,${DISABLED_SERVICES} =~ ,"cinder" ]] && return 1 [[ ,${ENABLED_SERVICES} =~ ,"c-" ]] && return 0 return 1 } -# _clean_lvm_lv removes all cinder LVM volumes -# -# Usage: _clean_lvm_lv $VOLUME_GROUP $VOLUME_NAME_PREFIX -function _clean_lvm_lv { - local vg=$1 - local lv_prefix=$2 - - # Clean out existing volumes - for lv in `sudo lvs --noheadings -o lv_name $vg`; do - # lv_prefix prefixes the LVs we want - if [[ "${lv#$lv_prefix}" != "$lv" ]]; then - sudo lvremove -f $vg/$lv - fi - done -} - -# _clean_lvm_backing_file() removes the backing file of the -# volume group used by cinder -# -# Usage: _clean_lvm_backing_file() $VOLUME_GROUP -function _clean_lvm_backing_file { - local vg=$1 - - # if there is no logical volume left, it's safe to attempt a cleanup - # of the backing file - if [ -z "`sudo lvs --noheadings -o lv_name $vg`" ]; then - # if the backing physical device is a loop device, it was probably setup by devstack - if [[ -n "$VG_DEV" ]] && [[ -e "$VG_DEV" ]]; then - VG_DEV=$(sudo losetup -j $DATA_DIR/${vg}-backing-file | awk -F':' '/backing-file/ { print $1}') - sudo losetup -d $VG_DEV - rm -f $DATA_DIR/${vg}-backing-file - fi - fi +# _cinder_cleanup_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file +function _cinder_cleanup_apache_wsgi { + sudo rm -f $(apache_site_config_for osapi-volume) } # cleanup_cinder() - Remove residual data files, anything left over from previous @@ -139,88 +231,76 @@ function _clean_lvm_backing_file { function cleanup_cinder { # ensure the volume group is cleared up because fails might # leave dead volumes in the group - TARGETS=$(sudo tgtadm --op show --mode target) - if [ $? -ne 0 ]; then - # If tgt driver isn't running this won't work obviously - # So check the response and restart if need be - echo "tgtd seems to be in a bad state, restarting..." + if [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then + local targets + targets=$(sudo tgtadm --op show --mode target) + if [ $? -ne 0 ]; then + # If tgt driver isn't running this won't work obviously + # So check the response and restart if need be + echo "tgtd seems to be in a bad state, restarting..." + if is_ubuntu; then + restart_service tgt + else + restart_service tgtd + fi + targets=$(sudo tgtadm --op show --mode target) + fi + + if [[ -n "$targets" ]]; then + local iqn_list=( $(grep --no-filename -r iqn $SCSI_PERSIST_DIR | sed 's///') ) + for i in "${iqn_list[@]}"; do + echo removing iSCSI target: $i + sudo tgt-admin --delete $i + done + fi + if is_ubuntu; then - restart_service tgt + stop_service tgt else - restart_service tgtd + stop_service tgtd fi - TARGETS=$(sudo tgtadm --op show --mode target) + elif [ "$CINDER_TARGET_HELPER" = "lioadm" ]; then + sudo cinder-rtstool get-targets | sudo xargs -rn 1 cinder-rtstool delete + elif [ "$CINDER_TARGET_HELPER" = "nvmet" ]; then + # If we don't disconnect everything vgremove will block + sudo nvme disconnect-all + sudo nvmetcli clear + else + die $LINENO "Unknown value \"$CINDER_TARGET_HELPER\" for CINDER_TARGET_HELPER" fi - if [[ -n "$TARGETS" ]]; then - iqn_list=( $(grep --no-filename -r iqn $SCSI_PERSIST_DIR | sed 's///') ) - for i in "${iqn_list[@]}"; do - echo removing iSCSI target: $i - sudo tgt-admin --delete $i + if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then + local be be_name be_type + for be in ${CINDER_ENABLED_BACKENDS//,/ }; do + be_type=${be%%:*} + be_name=${be##*:} + if type cleanup_cinder_backend_${be_type} >/dev/null 2>&1; then + cleanup_cinder_backend_${be_type} ${be_name} + fi done fi - if is_service_enabled cinder; then - sudo rm -rf $CINDER_STATE_PATH/volumes/* - fi - - if is_ubuntu; then - stop_service tgt - else - stop_service tgtd + if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then + if type cleanup_cinder_backup_$CINDER_BACKUP_DRIVER >/dev/null 2>&1; then + cleanup_cinder_backup_$CINDER_BACKUP_DRIVER + fi fi - # Campsite rule: leave behind a volume group at least as clean as we found it - _clean_lvm_lv $VOLUME_GROUP $VOLUME_NAME_PREFIX - _clean_lvm_backing_file $VOLUME_GROUP - - if [ "$CINDER_MULTI_LVM_BACKEND" = "True" ]; then - _clean_lvm_lv $VOLUME_GROUP2 $VOLUME_NAME_PREFIX - _clean_lvm_backing_file $VOLUME_GROUP2 - fi -} - -# configure_cinder_rootwrap() - configure Cinder's rootwrap -function configure_cinder_rootwrap { - # Set the paths of certain binaries - CINDER_ROOTWRAP=$(get_rootwrap_location cinder) - - # Deploy new rootwrap filters files (owned by root). - # Wipe any existing rootwrap.d files first - if [[ -d $CINDER_CONF_DIR/rootwrap.d ]]; then - sudo rm -rf $CINDER_CONF_DIR/rootwrap.d - fi - # Deploy filters to /etc/cinder/rootwrap.d - sudo mkdir -m 755 $CINDER_CONF_DIR/rootwrap.d - sudo cp $CINDER_DIR/etc/cinder/rootwrap.d/*.filters $CINDER_CONF_DIR/rootwrap.d - sudo chown -R root:root $CINDER_CONF_DIR/rootwrap.d - sudo chmod 644 $CINDER_CONF_DIR/rootwrap.d/* - # Set up rootwrap.conf, pointing to /etc/cinder/rootwrap.d - sudo cp $CINDER_DIR/etc/cinder/rootwrap.conf $CINDER_CONF_DIR/ - sudo sed -e "s:^filters_path=.*$:filters_path=$CINDER_CONF_DIR/rootwrap.d:" -i $CINDER_CONF_DIR/rootwrap.conf - sudo chown root:root $CINDER_CONF_DIR/rootwrap.conf - sudo chmod 0644 $CINDER_CONF_DIR/rootwrap.conf - # Specify rootwrap.conf as first parameter to rootwrap - ROOTWRAP_CSUDOER_CMD="$CINDER_ROOTWRAP $CINDER_CONF_DIR/rootwrap.conf *" - - # Set up the rootwrap sudoers for cinder - TEMPFILE=`mktemp` - echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_CSUDOER_CMD" >$TEMPFILE - chmod 0440 $TEMPFILE - sudo chown root:root $TEMPFILE - sudo mv $TEMPFILE /etc/sudoers.d/cinder-rootwrap + stop_process "c-api" + remove_uwsgi_config "$CINDER_UWSGI_CONF" "cinder-wsgi" } # configure_cinder() - Set config files, create data dirs, etc function configure_cinder { - if [[ ! -d $CINDER_CONF_DIR ]]; then - sudo mkdir -p $CINDER_CONF_DIR - fi - sudo chown $STACK_USER $CINDER_CONF_DIR + sudo install -d -o $STACK_USER -m 755 $CINDER_CONF_DIR + + rm -f $CINDER_CONF - cp -p $CINDER_DIR/etc/cinder/policy.json $CINDER_CONF_DIR + configure_rootwrap cinder - configure_cinder_rootwrap + if [[ -f "$CINDER_DIR/etc/cinder/resource_filters.json" ]]; then + cp -p "$CINDER_DIR/etc/cinder/resource_filters.json" "$CINDER_CONF_DIR/resource_filters.json" + fi cp $CINDER_DIR/etc/cinder/api-paste.ini $CINDER_API_PASTE_INI @@ -233,321 +313,448 @@ function configure_cinder { inicomment $CINDER_API_PASTE_INI filter:authtoken admin_password inicomment $CINDER_API_PASTE_INI filter:authtoken signing_dir - cp $CINDER_DIR/etc/cinder/cinder.conf.sample $CINDER_CONF - - iniset $CINDER_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST - iniset $CINDER_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT - iniset $CINDER_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset $CINDER_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA - iniset $CINDER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $CINDER_CONF keystone_authtoken admin_user cinder - iniset $CINDER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD - iniset $CINDER_CONF keystone_authtoken signing_dir $CINDER_AUTH_CACHE_DIR + configure_keystone_authtoken_middleware $CINDER_CONF cinder - iniset $CINDER_CONF DEFAULT auth_strategy keystone iniset $CINDER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $CINDER_CONF DEFAULT verbose True - if [ "$CINDER_MULTI_LVM_BACKEND" = "True" ]; then - iniset $CINDER_CONF DEFAULT enabled_backends lvmdriver-1,lvmdriver-2 - iniset $CINDER_CONF lvmdriver-1 volume_group $VOLUME_GROUP - iniset $CINDER_CONF lvmdriver-1 volume_driver cinder.volume.drivers.lvm.LVMISCSIDriver - iniset $CINDER_CONF lvmdriver-1 volume_backend_name LVM_iSCSI - iniset $CINDER_CONF lvmdriver-2 volume_group $VOLUME_GROUP2 - iniset $CINDER_CONF lvmdriver-2 volume_driver cinder.volume.drivers.lvm.LVMISCSIDriver - iniset $CINDER_CONF lvmdriver-2 volume_backend_name LVM_iSCSI_2 - # NOTE(mriedem): Work around Cinder "wishlist" bug 1255593 - if [[ "$CINDER_SECURE_DELETE" == "False" ]]; then - iniset $CINDER_CONF lvmdriver-1 volume_clear none - iniset $CINDER_CONF lvmdriver-2 volume_clear none - fi - else - iniset $CINDER_CONF DEFAULT volume_group $VOLUME_GROUP - iniset $CINDER_CONF DEFAULT volume_name_template ${VOLUME_NAME_PREFIX}%s - fi - iniset $CINDER_CONF DEFAULT my_ip "$CINDER_SERVICE_HOST" - iniset $CINDER_CONF DEFAULT iscsi_helper tgtadm - iniset $CINDER_CONF DEFAULT sql_connection `database_connection_url cinder` + + iniset $CINDER_CONF DEFAULT target_helper "$CINDER_TARGET_HELPER" + iniset $CINDER_CONF database connection `database_connection_url cinder` iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI iniset $CINDER_CONF DEFAULT rootwrap_config "$CINDER_CONF_DIR/rootwrap.conf" iniset $CINDER_CONF DEFAULT osapi_volume_extension cinder.api.contrib.standard_extensions + iniset $CINDER_CONF DEFAULT osapi_volume_listen $CINDER_SERVICE_LISTEN_ADDRESS iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH - iniset $CINDER_CONF DEFAULT lock_path $CINDER_STATE_PATH - iniset $CINDER_CONF DEFAULT periodic_interval $CINDER_PERIODIC_INTERVAL + iniset $CINDER_CONF oslo_concurrency lock_path $CINDER_STATE_PATH + iniset $CINDER_CONF DEFAULT my_ip "$CINDER_MY_IP" + iniset $CINDER_CONF key_manager backend cinder.keymgr.conf_key_mgr.ConfKeyManager + iniset $CINDER_CONF key_manager fixed_key $(openssl rand -hex 16) + if [[ -n "$CINDER_ALLOWED_DIRECT_URL_SCHEMES" ]]; then + iniset $CINDER_CONF DEFAULT allowed_direct_url_schemes $CINDER_ALLOWED_DIRECT_URL_SCHEMES + fi + + # set default quotas + iniset $CINDER_CONF DEFAULT quota_volumes ${CINDER_QUOTA_VOLUMES:-10} + iniset $CINDER_CONF DEFAULT quota_backups ${CINDER_QUOTA_BACKUPS:-10} + iniset $CINDER_CONF DEFAULT quota_snapshots ${CINDER_QUOTA_SNAPSHOTS:-10} + + # Avoid RPC timeouts in slow CI and test environments by doubling the + # default response timeout set by RPC clients. See bug #1873234 for more + # details and example failures. + iniset $CINDER_CONF DEFAULT rpc_response_timeout 120 + + iniset $CINDER_CONF DEFAULT report_interval $CINDER_SERVICE_REPORT_INTERVAL + iniset $CINDER_CONF DEFAULT service_down_time $(($CINDER_SERVICE_REPORT_INTERVAL * 6)) + + if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then + local enabled_backends="" + local default_name="" + local be be_name be_type + for be in ${CINDER_ENABLED_BACKENDS//,/ }; do + be_type=${be%%:*} + be_name=${be##*:} + if type configure_cinder_backend_${be_type} >/dev/null 2>&1; then + configure_cinder_backend_${be_type} ${be_name} + fi + if [[ -z "$default_name" ]]; then + default_name=$be_name + fi + enabled_backends+=$be_name, + done + iniset $CINDER_CONF DEFAULT enabled_backends ${enabled_backends%,*} + if [[ -n "$default_name" ]]; then + iniset $CINDER_CONF DEFAULT default_volume_type ${default_name} + fi + configure_cinder_image_volume_cache + + # The upload optimization uses Cinder's clone volume functionality to + # clone the Image-Volume from source volume hence can only be + # performed when glance is using cinder as it's backend. + if [[ "$USE_CINDER_FOR_GLANCE" == "True" ]]; then + # Configure optimized volume upload + configure_cinder_volume_upload + fi + fi - if is_service_enabled swift; then - iniset $CINDER_CONF DEFAULT backup_swift_url "http://$SERVICE_HOST:8080/v1/AUTH_" + if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then + if type configure_cinder_backup_$CINDER_BACKUP_DRIVER >/dev/null 2>&1; then + configure_cinder_backup_$CINDER_BACKUP_DRIVER + else + die "configure_cinder_backup_$CINDER_BACKUP_DRIVER doesn't exist in $CINDER_BACKUPS/$CINDER_BACKUP_DRIVER" + fi fi if is_service_enabled ceilometer; then - iniset $CINDER_CONF DEFAULT notification_driver "cinder.openstack.common.notifier.rpc_notifier" + iniset $CINDER_CONF oslo_messaging_notifications driver "messagingv2" fi if is_service_enabled tls-proxy; then - # Set the service port for a proxy to take the original - iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT + if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then + # Set the service port for a proxy to take the original + iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT + iniset $CINDER_CONF oslo_middleware enable_proxy_headers_parsing True + fi fi if [ "$SYSLOG" != "False" ]; then iniset $CINDER_CONF DEFAULT use_syslog True fi - iniset_rpc_backend cinder $CINDER_CONF DEFAULT - - if [[ "$CINDER_SECURE_DELETE" == "False" ]]; then - iniset $CINDER_CONF DEFAULT secure_delete False - iniset $CINDER_CONF DEFAULT volume_clear none - fi + iniset_rpc_backend cinder $CINDER_CONF # Format logging - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then - setup_colorized_logging $CINDER_CONF DEFAULT "project_id" "user_id" + setup_logging $CINDER_CONF + + if is_service_enabled c-api; then + write_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" "/volume" "" "cinder-api" fi if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then configure_cinder_driver fi - if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then - # Cinder clones are slightly larger due to some extra - # metadata. RHEL6 will not allow auto-extending of LV's - # without this, leading to clones giving hard-to-track disk - # I/O errors. - # see https://bugzilla.redhat.com/show_bug.cgi?id=975052 - sudo sed -i~ \ - -e 's/snapshot_autoextend_threshold =.*/snapshot_autoextend_threshold = 80/' \ - -e 's/snapshot_autoextend_percent =.*/snapshot_autoextend_percent = 20/' \ - /etc/lvm/lvm.conf + iniset $CINDER_CONF DEFAULT osapi_volume_workers "$API_WORKERS" + + iniset $CINDER_CONF DEFAULT glance_api_servers "$GLANCE_URL" + if is_service_enabled tls-proxy; then + iniset $CINDER_CONF DEFAULT glance_protocol https + iniset $CINDER_CONF DEFAULT glance_ca_certificates_file $SSL_BUNDLE_FILE + fi + + # Set glance credentials (used for location APIs) + configure_keystone_authtoken_middleware $CINDER_CONF glance glance + + # Set nova credentials (used for os-assisted-snapshots) + configure_keystone_authtoken_middleware $CINDER_CONF nova nova + iniset $CINDER_CONF nova region_name "$REGION_NAME" + iniset $CINDER_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" + + if [[ ! -z "$CINDER_COORDINATION_URL" ]]; then + iniset $CINDER_CONF coordination backend_url "$CINDER_COORDINATION_URL" + elif is_service_enabled etcd3; then + # NOTE(jan.gutter): api_version can revert to default once tooz is + # updated with the etcd v3.4 defaults + iniset $CINDER_CONF coordination backend_url "etcd3+http://${SERVICE_HOST}:$ETCD_PORT?api_version=v3" fi - configure_API_version $CINDER_CONF $IDENTITY_API_VERSION - iniset $CINDER_CONF keystone_authtoken admin_user cinder - iniset $CINDER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $CINDER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD + if [[ "$CINDER_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $CINDER_CONF oslo_policy enforce_scope true + iniset $CINDER_CONF oslo_policy enforce_new_defaults true + else + iniset $CINDER_CONF oslo_policy enforce_scope false + iniset $CINDER_CONF oslo_policy enforce_new_defaults false + fi + + if [ "$CINDER_USE_SERVICE_TOKEN" == "True" ]; then + init_cinder_service_user_conf + fi } # create_cinder_accounts() - Set up common required cinder accounts -# Tenant User Roles +# Project User Roles # ------------------------------------------------------------------ -# service cinder admin # if enabled +# SERVICE_PROJECT_NAME cinder service +# SERVICE_PROJECT_NAME cinder creator (if Barbican is enabled) # Migrated from keystone_data.sh function create_cinder_accounts { - - SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") - # Cinder if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then - CINDER_USER=$(openstack user create \ - cinder \ - --password "$SERVICE_PASSWORD" \ - --project $SERVICE_TENANT \ - --email cinder@example.com \ - | grep " id " | get_field 2) - openstack role add \ - $ADMIN_ROLE \ - --project $SERVICE_TENANT \ - --user $CINDER_USER - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - CINDER_SERVICE=$(openstack service create \ - cinder \ - --type=volume \ - --description="Cinder Volume Service" \ - | grep " id " | get_field 2) - openstack endpoint create \ - $CINDER_SERVICE \ - --region RegionOne \ - --publicurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \ - --adminurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \ - --internalurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" - CINDER_V2_SERVICE=$(openstack service create \ - cinderv2 \ - --type=volumev2 \ - --description="Cinder Volume Service V2" \ - | grep " id " | get_field 2) - openstack endpoint create \ - $CINDER_V2_SERVICE \ - --region RegionOne \ - --publicurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \ - --adminurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \ - --internalurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" - fi - fi -} -# create_cinder_cache_dir() - Part of the init_cinder() process -function create_cinder_cache_dir { - # Create cache dir - sudo mkdir -p $CINDER_AUTH_CACHE_DIR - sudo chown $STACK_USER $CINDER_AUTH_CACHE_DIR - rm -f $CINDER_AUTH_CACHE_DIR/* -} + local extra_role="" -function create_cinder_volume_group { - # According to the ``CINDER_MULTI_LVM_BACKEND`` value, configure one or two default volumes - # group called ``stack-volumes`` (and ``stack-volumes2``) for the volume - # service if it (they) does (do) not yet exist. If you don't wish to use a - # file backed volume group, create your own volume group called ``stack-volumes`` - # and ``stack-volumes2`` before invoking ``stack.sh``. - # - # The two backing files are ``VOLUME_BACKING_FILE_SIZE`` in size, and they are stored in - # the ``DATA_DIR``. - - if ! sudo vgs $VOLUME_GROUP; then - if [ -z "$VOLUME_BACKING_DEVICE" ]; then - # Only create if the file doesn't already exists - [[ -f $VOLUME_BACKING_FILE ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE - DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE` - - # Only create if the loopback device doesn't contain $VOLUME_GROUP - if ! sudo vgs $VOLUME_GROUP; then - sudo vgcreate $VOLUME_GROUP $DEV - fi - else - sudo vgcreate $VOLUME_GROUP $VOLUME_BACKING_DEVICE + # cinder needs the "creator" role in order to interact with barbican + if is_service_enabled barbican; then + extra_role=$(get_or_create_role "creator") fi - fi - if [ "$CINDER_MULTI_LVM_BACKEND" = "True" ]; then - #set up the second volume if CINDER_MULTI_LVM_BACKEND is enabled - if ! sudo vgs $VOLUME_GROUP2; then - if [ -z "$VOLUME_BACKING_DEVICE2" ]; then - # Only create if the file doesn't already exists - [[ -f $VOLUME_BACKING_FILE2 ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE2 + create_service_user "cinder" $extra_role - DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE2` + local cinder_api_url + cinder_api_url="$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume" - # Only create if the loopback device doesn't contain $VOLUME_GROUP - if ! sudo vgs $VOLUME_GROUP2; then - sudo vgcreate $VOLUME_GROUP2 $DEV - fi - else - sudo vgcreate $VOLUME_GROUP2 $VOLUME_BACKING_DEVICE2 - fi - fi + # block-storage is the official service type + get_or_create_service "cinder" "block-storage" "Cinder Volume Service" + get_or_create_endpoint \ + "block-storage" \ + "$REGION_NAME" \ + "$cinder_api_url/v3" + configure_cinder_internal_tenant fi - - mkdir -p $CINDER_STATE_PATH/volumes } # init_cinder() - Initialize database and volume group function init_cinder { - # Force nova volumes off - NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/osapi_volume,//") - if is_service_enabled $DATABASE_BACKENDS; then # (Re)create cinder database - recreate_database cinder utf8 + recreate_database cinder + time_start "dbsync" # Migrate cinder database - $CINDER_BIN_DIR/cinder-manage db sync + $CINDER_BIN_DIR/cinder-manage --config-file $CINDER_CONF db sync + time_stop "dbsync" fi - if is_service_enabled c-vol; then - - create_cinder_volume_group - - if sudo vgs $VOLUME_GROUP; then - if is_fedora || is_suse; then - # service is not started by default - start_service tgtd + if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then + local be be_name be_type + for be in ${CINDER_ENABLED_BACKENDS//,/ }; do + be_type=${be%%:*} + be_name=${be##*:} + if type init_cinder_backend_${be_type} >/dev/null 2>&1; then + init_cinder_backend_${be_type} ${be_name} fi + done + fi - # Remove iscsi targets - sudo tgtadm --op show --mode target | grep $VOLUME_NAME_PREFIX | grep Target | cut -f3 -d ' ' | sudo xargs -n1 tgt-admin --delete || true - # Start with a clean volume group - _clean_lvm_lv $VOLUME_GROUP $VOLUME_NAME_PREFIX - if [ "$CINDER_MULTI_LVM_BACKEND" = "True" ]; then - _clean_lvm_lv $VOLUME_GROUP2 $VOLUME_NAME_PREFIX - fi + if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then + if type init_cinder_backup_$CINDER_BACKUP_DRIVER >/dev/null 2>&1; then + init_cinder_backup_$CINDER_BACKUP_DRIVER fi fi - create_cinder_cache_dir + mkdir -p $CINDER_STATE_PATH/volumes +} + + +function init_os_brick { + mkdir -p $OS_BRICK_LOCK_PATH + if is_service_enabled cinder; then + iniset $CINDER_CONF os_brick lock_path $OS_BRICK_LOCK_PATH + fi + if is_service_enabled nova; then + iniset $NOVA_CONF os_brick lock_path $OS_BRICK_LOCK_PATH + fi + if is_service_enabled glance; then + iniset $GLANCE_API_CONF os_brick lock_path $OS_BRICK_LOCK_PATH + iniset $GLANCE_CACHE_CONF os_brick lock_path $OS_BRICK_LOCK_PATH + fi } # install_cinder() - Collect source and prepare function install_cinder { git_clone $CINDER_REPO $CINDER_DIR $CINDER_BRANCH setup_develop $CINDER_DIR + if [[ "$CINDER_TARGET_HELPER" == "tgtadm" ]]; then + install_package tgt + elif [[ "$CINDER_TARGET_HELPER" == "lioadm" ]]; then + if is_ubuntu; then + # TODO(frickler): Workaround for https://launchpad.net/bugs/1819819 + sudo mkdir -p /etc/target + + install_package targetcli-fb + else + install_package targetcli + fi + elif [[ "$CINDER_TARGET_HELPER" == "nvmet" ]]; then + install_package nvme-cli + + # TODO: Remove manual installation of the dependency when the + # requirement is added to nvmetcli: + # http://lists.infradead.org/pipermail/linux-nvme/2022-July/033576.html + if is_ubuntu; then + install_package python3-configshell-fb + else + install_package python3-configshell + fi + # Install from source because Ubuntu doesn't have the package and some packaged versions didn't work on Python 3 + pip_install git+git://git.infradead.org/users/hch/nvmetcli.git + + sudo modprobe nvmet + sudo modprobe nvme-fabrics + + if [[ $CINDER_TARGET_PROTOCOL == 'nvmet_rdma' ]]; then + install_package rdma-core + sudo modprobe nvme-rdma + + # Create the Soft-RoCE device over the networking interface + local iface=${HOST_IP_IFACE:-`ip -br -$SERVICE_IP_VERSION a | grep $CINDER_MY_IP | awk '{print $1}'`} + if [[ -z "$iface" ]]; then + die $LINENO "Cannot find interface to bind Soft-RoCE" + fi + + if ! sudo rdma link | grep $iface ; then + sudo rdma link add rxe_$iface type rxe netdev $iface + fi + + elif [[ $CINDER_TARGET_PROTOCOL == 'nvmet_tcp' ]]; then + sudo modprobe nvme-tcp + + else # 'nvmet_fc' + sudo modprobe nvme-fc + fi + fi } # install_cinderclient() - Collect source and prepare function install_cinderclient { - git_clone $CINDERCLIENT_REPO $CINDERCLIENT_DIR $CINDERCLIENT_BRANCH - setup_develop $CINDERCLIENT_DIR - sudo install -D -m 0644 -o $STACK_USER {$CINDERCLIENT_DIR/tools/,/etc/bash_completion.d/}cinder.bash_completion + if use_library_from_git "python-brick-cinderclient-ext"; then + git_clone_by_name "python-brick-cinderclient-ext" + setup_dev_lib "python-brick-cinderclient-ext" + fi + + if use_library_from_git "python-cinderclient"; then + git_clone_by_name "python-cinderclient" + setup_dev_lib "python-cinderclient" + sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-cinderclient"]}/tools/,/etc/bash_completion.d/}cinder.bash_completion + fi } # apply config.d approach for cinder volumes directory function _configure_tgt_for_config_d { if [[ ! -d /etc/tgt/stack.d/ ]]; then sudo ln -sf $CINDER_STATE_PATH/volumes /etc/tgt/stack.d + fi + if ! grep -q "include /etc/tgt/stack.d/*" /etc/tgt/targets.conf; then echo "include /etc/tgt/stack.d/*" | sudo tee -a /etc/tgt/targets.conf fi } -# start_cinder() - Start running processes, including screen +# start_cinder() - Start running processes function start_cinder { - if is_service_enabled c-vol; then - # Delete any old stack.conf - sudo rm -f /etc/tgt/conf.d/stack.conf - _configure_tgt_for_config_d - if is_ubuntu; then - sudo service tgt restart - elif is_fedora; then - if [[ $DISTRO =~ (rhel6) ]]; then - sudo /sbin/service tgtd restart + local service_port=$CINDER_SERVICE_PORT + local service_protocol=$CINDER_SERVICE_PROTOCOL + local cinder_url + if [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then + if is_service_enabled c-vol; then + # Delete any old stack.conf + sudo rm -f /etc/tgt/conf.d/stack.conf + _configure_tgt_for_config_d + if is_ubuntu; then + sudo service tgt restart else - # bypass redirection to systemctl during restart - sudo /sbin/service --skip-redirect tgtd restart + restart_service tgtd fi - elif is_suse; then - restart_service tgtd - else - # note for other distros: unstack.sh also uses the tgt/tgtd service - # name, and would need to be adjusted too - exit_distro_not_supported "restarting tgt" + # NOTE(gfidente): ensure tgtd is running in debug mode + sudo tgtadm --mode system --op update --name debug --value on fi - # NOTE(gfidente): ensure tgtd is running in debug mode - sudo tgtadm --mode system --op update --name debug --value on fi - screen_it c-api "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-api --config-file $CINDER_CONF" - screen_it c-sch "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-scheduler --config-file $CINDER_CONF" - screen_it c-bak "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-backup --config-file $CINDER_CONF" - screen_it c-vol "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-volume --config-file $CINDER_CONF" + if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then + run_process "c-api" "$(which uwsgi) --procname-prefix cinder-api --ini $CINDER_UWSGI_CONF" + cinder_url=$service_protocol://$SERVICE_HOST/volume/v3 + fi + + echo "Waiting for Cinder API to start..." + if ! wait_for_service $SERVICE_TIMEOUT $cinder_url; then + die $LINENO "c-api did not start" + fi + + run_process c-sch "$CINDER_BIN_DIR/cinder-scheduler --config-file $CINDER_CONF" + # Tune glibc for Python Services using single malloc arena for all threads + # and disabling dynamic thresholds to reduce memory usage when using native + # threads directly or via eventlet.tpool + # https://www.gnu.org/software/libc/manual/html_node/Memory-Allocation-Tunables.html + malloc_tuning="MALLOC_ARENA_MAX=1 MALLOC_MMAP_THRESHOLD_=131072 MALLOC_TRIM_THRESHOLD_=262144" + run_process c-bak "$CINDER_BIN_DIR/cinder-backup --config-file $CINDER_CONF" "" "" "$malloc_tuning" + run_process c-vol "$CINDER_BIN_DIR/cinder-volume --config-file $CINDER_CONF" "" "" "$malloc_tuning" # NOTE(jdg): For cinder, startup order matters. To ensure that repor_capabilities is received # by the scheduler start the cinder-volume service last (or restart it) after the scheduler # has started. This is a quick fix for lp bug/1189595 - - # Start proxies if enabled - if is_service_enabled c-api && is_service_enabled tls-proxy; then - start_tls_proxy '*' $CINDER_SERVICE_PORT $CINDER_SERVICE_HOST $CINDER_SERVICE_PORT_INT & - fi } # stop_cinder() - Stop running processes function stop_cinder { - # Kill the cinder screen windows - for serv in c-api c-bak c-sch c-vol; do - screen_stop $serv - done + stop_process c-api + stop_process c-bak + stop_process c-sch + stop_process c-vol +} - if is_service_enabled c-vol; then - if is_ubuntu; then - stop_service tgt - else - stop_service tgtd +function create_one_type { + type_name=$1 + property_key=$2 + property_value=$3 + # NOTE (e0ne): openstack client doesn't work with cinder in noauth mode + if is_service_enabled keystone; then + openstack --os-region-name="$REGION_NAME" volume type create --property $property_key="$property_value" $type_name + else + # TODO (e0ne): use openstack client once it will support cinder in noauth mode: + # https://bugs.launchpad.net/python-cinderclient/+bug/1755279 + local cinder_url + cinder_url=$CINDER_SERVICE_PROTOCOL://$SERVICE_HOST:$CINDER_SERVICE_PORT/v3 + OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-create $type_name + OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-key $type_name set $property_key="$property_value" + fi +} + +# create_volume_types() - Create Cinder's configured volume types +function create_volume_types { + # Create volume types + if is_service_enabled c-api && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then + local be be_name + for be in ${CINDER_ENABLED_BACKENDS//,/ }; do + be_name=${be##*:} + create_one_type $be_name "volume_backend_name" $be_name + done + + if [[ $ENABLE_VOLUME_MULTIATTACH == "True" ]]; then + create_one_type $VOLUME_TYPE_MULTIATTACH $VOLUME_TYPE_MULTIATTACH " True" + fi + + # Increase quota for the service project if glance is using cinder, + # since it's likely to occasionally go above the default 10 in parallel + # test execution. + if [[ "$USE_CINDER_FOR_GLANCE" == "True" ]]; then + openstack --os-region-name="$REGION_NAME" \ + quota set --volumes 50 "$SERVICE_PROJECT_NAME" fi fi } +# Compatibility for Grenade + +function create_cinder_volume_group { + # During a transition period Grenade needs to have this function defined + # It is effectively a no-op in the Grenade 'target' use case + : +} + +function configure_cinder_internal_tenant { + # Re-use the Cinder service account for simplicity. + iniset $CINDER_CONF DEFAULT cinder_internal_tenant_project_id $(get_or_create_project $SERVICE_PROJECT_NAME) + iniset $CINDER_CONF DEFAULT cinder_internal_tenant_user_id $(get_or_create_user "cinder") +} + +function configure_cinder_image_volume_cache { + # Expect CINDER_CACHE_ENABLED_FOR_BACKENDS to be a list of backends + # similar to CINDER_ENABLED_BACKENDS with NAME:TYPE where NAME will + # be the backend specific configuration stanza in cinder.conf. + for be in ${CINDER_CACHE_ENABLED_FOR_BACKENDS//,/ }; do + local be_name=${be##*:} + + iniset $CINDER_CONF $be_name image_volume_cache_enabled $CINDER_IMG_CACHE_ENABLED + + if [[ -n $CINDER_IMG_CACHE_SIZE_GB ]]; then + iniset $CINDER_CONF $be_name image_volume_cache_max_size_gb $CINDER_IMG_CACHE_SIZE_GB + fi + + if [[ -n $CINDER_IMG_CACHE_SIZE_COUNT ]]; then + iniset $CINDER_CONF $be_name image_volume_cache_max_count $CINDER_IMG_CACHE_SIZE_COUNT + fi + done +} + +function configure_cinder_volume_upload { + # Expect UPLOAD_VOLUME_OPTIMIZED_FOR_BACKENDS to be a list of backends + # similar to CINDER_ENABLED_BACKENDS with NAME:TYPE where NAME will + # be the backend specific configuration stanza in cinder.conf. + local be be_name + for be in ${CINDER_UPLOAD_OPTIMIZED_BACKENDS//,/ }; do + be_name=${be##*:} + + iniset $CINDER_CONF $be_name image_upload_use_cinder_backend $CINDER_UPLOAD_OPTIMIZED + iniset $CINDER_CONF $be_name image_upload_use_internal_tenant $CINDER_UPLOAD_INTERNAL_TENANT + done +} + +function init_cinder_service_user_conf { + configure_keystone_authtoken_middleware $CINDER_CONF cinder service_user + iniset $CINDER_CONF service_user send_service_user_token True +} # Restore xtrace -$XTRACE +$_XTRACE_CINDER # Tell emacs to use shell-script-mode ## Local variables: diff --git a/lib/cinder_backends/ceph b/lib/cinder_backends/ceph new file mode 100644 index 0000000000..0b465730c0 --- /dev/null +++ b/lib/cinder_backends/ceph @@ -0,0 +1,51 @@ +#!/bin/bash +# +# lib/cinder_backends/ceph +# Configure the ceph backend + +# Enable with: +# +# CINDER_ENABLED_BACKENDS+=,ceph:ceph + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# configure_ceph_backend_lvm - called from configure_cinder() + + +# Save trace setting +_XTRACE_CINDER_CEPH=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + + +# Entry Points +# ------------ + +# configure_cinder_backend_ceph - Set config files, create data dirs, etc +# configure_cinder_backend_ceph $name +function configure_cinder_backend_ceph { + local be_name=$1 + + iniset $CINDER_CONF $be_name volume_backend_name $be_name + iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.rbd.RBDDriver" + iniset $CINDER_CONF $be_name rbd_ceph_conf "$CEPH_CONF_FILE" + iniset $CINDER_CONF $be_name rbd_pool "$CINDER_CEPH_POOL" + iniset $CINDER_CONF $be_name rbd_user "$CINDER_CEPH_USER" + iniset $CINDER_CONF $be_name rbd_secret_uuid "$CINDER_CEPH_UUID" + iniset $CINDER_CONF $be_name rbd_flatten_volume_from_snapshot False + iniset $CINDER_CONF $be_name rbd_max_clone_depth 5 + iniset $CINDER_CONF DEFAULT glance_api_version 2 +} + +# Restore xtrace +$_XTRACE_CINDER_CEPH + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_backends/ceph_iscsi b/lib/cinder_backends/ceph_iscsi new file mode 100644 index 0000000000..94412e0da6 --- /dev/null +++ b/lib/cinder_backends/ceph_iscsi @@ -0,0 +1,56 @@ +#!/bin/bash +# +# lib/cinder_backends/ceph_iscsi +# Configure the ceph_iscsi backend + +# Enable with: +# +# CINDER_ENABLED_BACKENDS+=,ceph_iscsi:ceph_iscsi +# +# Optional paramteters: +# CEPH_ISCSI_API_URL= +# +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# configure_ceph_backend_ceph_iscsi - called from configure_cinder() + + +# Save trace setting +_XTRACE_CINDER_CEPH_ISCSI=$(set +o | grep xtrace) +set +o xtrace + +# Entry Points +# ------------ + +# configure_cinder_backend_ceph_iscsi - Set config files, create data dirs, etc +# configure_cinder_backend_ceph_iscsi $name +function configure_cinder_backend_ceph_iscsi { + local be_name=$1 + + CEPH_ISCSI_API_URL=${CEPH_ISCSI_API_URL:-http://$CEPH_ISCSI_API_HOST:$CEPH_ISCSI_API_PORT} + + iniset $CINDER_CONF $be_name volume_backend_name $be_name + iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.ceph.rbd_iscsi.RBDISCSIDriver" + iniset $CINDER_CONF $be_name rbd_ceph_conf "$CEPH_CONF_FILE" + iniset $CINDER_CONF $be_name rbd_pool "$CINDER_CEPH_POOL" + iniset $CINDER_CONF $be_name rbd_user "$CINDER_CEPH_USER" + iniset $CINDER_CONF $be_name rbd_iscsi_api_user "$CEPH_ISCSI_API_USER" + iniset $CINDER_CONF $be_name rbd_iscsi_api_password "$CEPH_ISCSI_API_PASSWORD" + iniset $CINDER_CONF $be_name rbd_iscsi_api_url "$CEPH_ISCSI_API_URL" + iniset $CINDER_CONF $be_name rbd_iscsi_target_iqn "$CEPH_ISCSI_TARGET_IQN" + iniset $CINDER_CONF $be_name rbd_flatten_volume_from_snapshot False + iniset $CINDER_CONF $be_name rbd_max_clone_depth 5 + iniset $CINDER_CONF DEFAULT glance_api_version 2 + + pip_install rbd-iscsi-client +} + +# Restore xtrace +$_XTRACE_CINDER_CEPH_ISCSI + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_backends/fake b/lib/cinder_backends/fake new file mode 100644 index 0000000000..4749aced69 --- /dev/null +++ b/lib/cinder_backends/fake @@ -0,0 +1,47 @@ +#!/bin/bash +# +# lib/cinder_backends/fake +# Configure the Fake backend + +# Enable with: +# +# CINDER_ENABLED_BACKENDS+=,fake:fake + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# CINDER_CONF + +# clean_cinder_backend_fake - called from clean_cinder() +# configure_cinder_backend_fake - called from configure_cinder() +# init_cinder_backend_fake - called from init_cinder() + + +# Save trace setting +_XTRACE_CINDER_FAKE=$(set +o | grep xtrace) +set +o xtrace + + +function cleanup_cinder_backend_fake { + local be_name=$1 +} + +function configure_cinder_backend_fake { + local be_name=$1 + + iniset $CINDER_CONF $be_name volume_backend_name $be_name + iniset $CINDER_CONF $be_name volume_driver "cinder.tests.fake_driver.FakeLoggingVolumeDriver" + +} + +function init_cinder_backend_fake { + local be_name=$1 +} + +# Restore xtrace +$_XTRACE_CINDER_FAKE + +# mode: shell-script +# End: diff --git a/lib/cinder_backends/fake_gate b/lib/cinder_backends/fake_gate new file mode 100644 index 0000000000..3b9f1d1164 --- /dev/null +++ b/lib/cinder_backends/fake_gate @@ -0,0 +1,74 @@ +#!/bin/bash +# +# lib/cinder_backends/lvm +# Configure the LVM backend + +# Enable with: +# +# CINDER_ENABLED_BACKENDS+=,fake_gate:lvmname + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# CINDER_CONF +# DATA_DIR +# VOLUME_GROUP_NAME + +# clean_cinder_backend_lvm - called from clean_cinder() +# configure_cinder_backend_lvm - called from configure_cinder() +# init_cinder_backend_lvm - called from init_cinder() + + +# Save trace setting +_XTRACE_CINDER_LVM=$(set +o | grep xtrace) +set +o xtrace + + +# TODO: resurrect backing device...need to know how to set values +#VOLUME_BACKING_DEVICE=${VOLUME_BACKING_DEVICE:-} + +# Entry Points +# ------------ + +# cleanup_cinder_backend_lvm - Delete volume group and remove backing file +# cleanup_cinder_backend_lvm $be_name +function cleanup_cinder_backend_lvm { + local be_name=$1 + + # Campsite rule: leave behind a volume group at least as clean as we found it + clean_lvm_volume_group $VOLUME_GROUP_NAME-$be_name + clean_lvm_filter +} + +# configure_cinder_backend_lvm - Set config files, create data dirs, etc +# configure_cinder_backend_lvm $be_name +function configure_cinder_backend_lvm { + local be_name=$1 + + iniset $CINDER_CONF $be_name volume_backend_name $be_name + iniset $CINDER_CONF $be_name volume_driver "cinder.tests.fake_driver.FakeGateDriver" + iniset $CINDER_CONF $be_name volume_group $VOLUME_GROUP_NAME-$be_name + iniset $CINDER_CONF $be_name target_helper "$CINDER_TARGET_HELPER" + iniset $CINDER_CONF $be_name lvm_type "$CINDER_LVM_TYPE" + + if [[ "$CINDER_VOLUME_CLEAR" == "non" ]]; then + iniset $CINDER_CONF $be_name volume_clear none + fi +} + +# init_cinder_backend_lvm - Initialize volume group +# init_cinder_backend_lvm $be_name +function init_cinder_backend_lvm { + local be_name=$1 + + # Start with a clean volume group + init_lvm_volume_group $VOLUME_GROUP_NAME-$be_name $VOLUME_BACKING_FILE_SIZE +} + +# Restore xtrace +$_XTRACE_CINDER_LVM + +# mode: shell-script +# End: diff --git a/lib/cinder_backends/glusterfs b/lib/cinder_backends/glusterfs new file mode 100644 index 0000000000..4e34f8ef6c --- /dev/null +++ b/lib/cinder_backends/glusterfs @@ -0,0 +1,48 @@ +#!/bin/bash +# +# lib/cinder_backends/glusterfs +# Configure the glusterfs backend + +# Enable with: +# +# CINDER_ENABLED_BACKENDS+=,glusterfs: + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# CINDER_CONF +# CINDER_CONF_DIR +# CINDER_GLUSTERFS_SHARES - Contents of glusterfs shares config file + +# configure_cinder_backend_glusterfs - Configure Cinder for GlusterFS backends + +# Save trace setting +_XTRACE_CINDER_GLUSTERFS=$(set +o | grep xtrace) +set +o xtrace + + +# Entry Points +# ------------ + +# configure_cinder_backend_glusterfs - Set config files, create data dirs, etc +function configure_cinder_backend_glusterfs { + local be_name=$1 + iniset $CINDER_CONF $be_name volume_backend_name $be_name + iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.glusterfs.GlusterfsDriver" + iniset $CINDER_CONF $be_name glusterfs_shares_config "$CINDER_CONF_DIR/glusterfs-shares-$be_name.conf" + + if [[ -n "$CINDER_GLUSTERFS_SHARES" ]]; then + CINDER_GLUSTERFS_SHARES=$(echo $CINDER_GLUSTERFS_SHARES | tr ";" "\n") + echo "$CINDER_GLUSTERFS_SHARES" | tee "$CINDER_CONF_DIR/glusterfs-shares-$be_name.conf" + fi +} + + +# Restore xtrace +$_XTRACE_CINDER_GLUSTERFS + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_backends/lvm b/lib/cinder_backends/lvm new file mode 100644 index 0000000000..42865119da --- /dev/null +++ b/lib/cinder_backends/lvm @@ -0,0 +1,74 @@ +#!/bin/bash +# +# lib/cinder_backends/lvm +# Configure the LVM backend + +# Enable with: +# +# CINDER_ENABLED_BACKENDS+=,lvm:lvmname + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# CINDER_CONF +# DATA_DIR +# VOLUME_GROUP_NAME + +# clean_cinder_backend_lvm - called from clean_cinder() +# configure_cinder_backend_lvm - called from configure_cinder() +# init_cinder_backend_lvm - called from init_cinder() + + +# Save trace setting +_XTRACE_CINDER_LVM=$(set +o | grep xtrace) +set +o xtrace + + +# TODO: resurrect backing device...need to know how to set values +#VOLUME_BACKING_DEVICE=${VOLUME_BACKING_DEVICE:-} + +# Entry Points +# ------------ + +# cleanup_cinder_backend_lvm - Delete volume group and remove backing file +# cleanup_cinder_backend_lvm $be_name +function cleanup_cinder_backend_lvm { + local be_name=$1 + + # Campsite rule: leave behind a volume group at least as clean as we found it + clean_lvm_volume_group $VOLUME_GROUP_NAME-$be_name + clean_lvm_filter +} + +# configure_cinder_backend_lvm - Set config files, create data dirs, etc +# configure_cinder_backend_lvm $be_name +function configure_cinder_backend_lvm { + local be_name=$1 + + iniset $CINDER_CONF $be_name volume_backend_name $be_name + iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.lvm.LVMVolumeDriver" + iniset $CINDER_CONF $be_name volume_group $VOLUME_GROUP_NAME-$be_name + iniset $CINDER_CONF $be_name target_helper "$CINDER_TARGET_HELPER" + iniset $CINDER_CONF $be_name target_protocol "$CINDER_TARGET_PROTOCOL" + iniset $CINDER_CONF $be_name target_port "$CINDER_TARGET_PORT" + iniset $CINDER_CONF $be_name target_prefix "$CINDER_TARGET_PREFIX" + iniset $CINDER_CONF $be_name lvm_type "$CINDER_LVM_TYPE" + iniset $CINDER_CONF $be_name volume_clear "$CINDER_VOLUME_CLEAR" +} + +# init_cinder_backend_lvm - Initialize volume group +# init_cinder_backend_lvm $be_name +function init_cinder_backend_lvm { + local be_name=$1 + + # Start with a clean volume group + init_lvm_volume_group $VOLUME_GROUP_NAME-$be_name $VOLUME_BACKING_FILE_SIZE +} + +# Restore xtrace +$_XTRACE_CINDER_LVM + +# mode: shell-script +# End: diff --git a/lib/cinder_backends/netapp_iscsi b/lib/cinder_backends/netapp_iscsi new file mode 100644 index 0000000000..5cce30a6d3 --- /dev/null +++ b/lib/cinder_backends/netapp_iscsi @@ -0,0 +1,66 @@ +#!/bin/bash +# +# lib/cinder_backends/netapp_iscsi +# Configure the NetApp iSCSI driver + +# Enable with: +# +# iSCSI: +# CINDER_ENABLED_BACKENDS+=,netapp_iscsi: + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# ``CINDER_CONF`` +# ``CINDER_CONF_DIR`` +# ``CINDER_ENABLED_BACKENDS`` + +# configure_cinder_backend_netapp_iscsi - configure iSCSI + +# Save trace setting +_XTRACE_CINDER_NETAPP=$(set +o | grep xtrace) +set +o xtrace + + +# Entry Points +# ------------ + +# configure_cinder_backend_netapp_iscsi - Set config files, create data dirs, etc +function configure_cinder_backend_netapp_iscsi { + # To use NetApp, set the following in local.conf: + # CINDER_ENABLED_BACKENDS+=,netapp_iscsi: + # NETAPP_MODE=ontap_7mode|ontap_cluster + # NETAPP_IP= + # NETAPP_LOGIN= + # NETAPP_PASSWORD= + # NETAPP_ISCSI_VOLUME_LIST= + + # In ontap_cluster mode, the following also needs to be defined: + # NETAPP_ISCSI_VSERVER= + + local be_name=$1 + iniset $CINDER_CONF $be_name volume_backend_name $be_name + iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.netapp.common.NetAppDriver" + iniset $CINDER_CONF $be_name netapp_storage_family ${NETAPP_MODE:-ontap_7mode} + iniset $CINDER_CONF $be_name netapp_server_hostname $NETAPP_IP + iniset $CINDER_CONF $be_name netapp_login $NETAPP_LOGIN + iniset $CINDER_CONF $be_name netapp_password $NETAPP_PASSWORD + iniset $CINDER_CONF $be_name netapp_volume_list $NETAPP_ISCSI_VOLUME_LIST + + iniset $CINDER_CONF $be_name netapp_storage_protocol iscsi + iniset $CINDER_CONF $be_name netapp_transport_type https + + if [[ "$NETAPP_MODE" == "ontap_cluster" ]]; then + iniset $CINDER_CONF $be_name netapp_vserver $NETAPP_ISCSI_VSERVER + fi +} + + +# Restore xtrace +$_XTRACE_CINDER_NETAPP + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_backends/netapp_nfs b/lib/cinder_backends/netapp_nfs new file mode 100644 index 0000000000..7ba36d2a3b --- /dev/null +++ b/lib/cinder_backends/netapp_nfs @@ -0,0 +1,77 @@ +#!/bin/bash +# +# lib/cinder_backends/netapp_nfs +# Configure the NetApp NFS driver + +# Enable with: +# +# NFS: +# CINDER_ENABLED_BACKENDS+=,netapp_nfs: + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# ``CINDER_CONF`` +# ``CINDER_CONF_DIR`` +# ``CINDER_ENABLED_BACKENDS`` + +# configure_cinder_backend_netapp_nfs - configure NFS + +# Save trace setting +_XTRACE_CINDER_NETAPP=$(set +o | grep xtrace) +set +o xtrace + + +# Entry Points +# ------------ + +# configure_cinder_backend_netapp_nfs - Set config files, create data dirs, etc +function configure_cinder_backend_netapp_nfs { + # To use NetApp, set the following in local.conf: + # CINDER_ENABLED_BACKENDS+=,netapp_nfs: + # NETAPP_MODE=ontap_7mode|ontap_cluster + # NETAPP_IP= + # NETAPP_LOGIN= + # NETAPP_PASSWORD= + # NETAPP_NFS_VOLUME_LIST= + + # In ontap_cluster mode, the following also needs to be defined: + # NETAPP_NFS_VSERVER= + + local be_name=$1 + iniset $CINDER_CONF $be_name volume_backend_name $be_name + iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.netapp.common.NetAppDriver" + iniset $CINDER_CONF $be_name netapp_storage_family ${NETAPP_MODE:-ontap_7mode} + iniset $CINDER_CONF $be_name netapp_server_hostname $NETAPP_IP + iniset $CINDER_CONF $be_name netapp_login $NETAPP_LOGIN + iniset $CINDER_CONF $be_name netapp_password $NETAPP_PASSWORD + + iniset $CINDER_CONF $be_name netapp_storage_protocol nfs + iniset $CINDER_CONF $be_name netapp_transport_type https + iniset $CINDER_CONF $be_name nfs_shares_config $CINDER_CONF_DIR/netapp_shares.conf + + echo "$NETAPP_NFS_VOLUME_LIST" | tee "$CINDER_CONF_DIR/netapp_shares.conf" + + if [[ "$NETAPP_MODE" == "ontap_cluster" ]]; then + iniset $CINDER_CONF $be_name netapp_vserver $NETAPP_NFS_VSERVER + fi +} + +function cleanup_cinder_backend_netapp_nfs { + # Clean up remaining NFS mounts + # Be blunt and do them all + local m + for m in $CINDER_STATE_PATH/mnt/*; do + sudo umount $m + done +} + + +# Restore xtrace +$_XTRACE_CINDER_NETAPP + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_backends/nfs b/lib/cinder_backends/nfs new file mode 100644 index 0000000000..f3fcbeff19 --- /dev/null +++ b/lib/cinder_backends/nfs @@ -0,0 +1,54 @@ +#!/bin/bash +# +# lib/cinder_backends/nfs +# Configure the nfs backend + +# Enable with: +# +# CINDER_ENABLED_BACKENDS+=,nfs: + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# CINDER_CONF +# CINDER_CONF_DIR +# CINDER_NFS_SERVERPATH - contents of nfs shares config file + +# configure_cinder_backend_nfs - Configure Cinder for NFS backends + +# Save trace setting +_XTRACE_CINDER_NFS=$(set +o | grep xtrace) +set +o xtrace + + +# Entry Points +# ------------ + +# configure_cinder_backend_nfs - Set config files, create data dirs, etc +function configure_cinder_backend_nfs { + local be_name=$1 + iniset $CINDER_CONF $be_name volume_backend_name $be_name + iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.nfs.NfsDriver" + iniset $CINDER_CONF $be_name nfs_shares_config "$CINDER_CONF_DIR/nfs-shares-$be_name.conf" + iniset $CINDER_CONF $be_name nas_host localhost + iniset $CINDER_CONF $be_name nas_share_path ${NFS_EXPORT_DIR} + iniset $CINDER_CONF $be_name nas_secure_file_operations \ + ${NFS_SECURE_FILE_OPERATIONS} + iniset $CINDER_CONF $be_name nas_secure_file_permissions \ + ${NFS_SECURE_FILE_PERMISSIONS} + + # NFS snapshot support is currently opt-in only. + iniset $CINDER_CONF $be_name nfs_snapshot_support True + + echo "$CINDER_NFS_SERVERPATH" | tee "$CINDER_CONF_DIR/nfs-shares-$be_name.conf" +} + + +# Restore xtrace +$_XTRACE_CINDER_NFS + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_backends/vmdk b/lib/cinder_backends/vmdk new file mode 100644 index 0000000000..3a6a5cf2ff --- /dev/null +++ b/lib/cinder_backends/vmdk @@ -0,0 +1,47 @@ +#!/bin/bash +# +# lib/cinder_backends/vmdk +# Configure the VMware vmdk backend + +# Enable with: +# +# CINDER_ENABLED_BACKENDS+=,vmdk: + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# configure_cinder_backend_vmdk - Configure Cinder for VMware vmdk backends + +# Save trace setting +_XTRACE_CINDER_VMDK=$(set +o | grep xtrace) +set +o xtrace + + +# Entry Points +# ------------ + +# configure_cinder_backend_vmdk - Set config files, create data dirs, etc +function configure_cinder_backend_vmdk { + # To use VMware vmdk backend, set the following in local.conf: + # CINDER_ENABLED_BACKENDS+=,vmdk: + # VMWAREAPI_IP= + # VMWAREAPI_USER= + # VMWAREAPI_PASSWORD= + + local be_name=$1 + iniset $CINDER_CONF $be_name volume_backend_name $be_name + iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver" + iniset $CINDER_CONF $be_name vmware_host_ip "$VMWAREAPI_IP" + iniset $CINDER_CONF $be_name vmware_host_username "$VMWAREAPI_USER" + iniset $CINDER_CONF $be_name vmware_host_password "$VMWAREAPI_PASSWORD" +} + + +# Restore xtrace +$_XTRACE_CINDER_VMDK + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_backups/ceph b/lib/cinder_backups/ceph new file mode 100644 index 0000000000..e4d6b96407 --- /dev/null +++ b/lib/cinder_backups/ceph @@ -0,0 +1,58 @@ +#!/bin/bash +# +# lib/cinder_backups/ceph +# Configure the ceph backup driver + +# Enable with: +# +# CINDER_BACKUP_DRIVER=ceph + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# Save trace setting +_XTRACE_CINDER_CEPH=$(set +o | grep xtrace) +set +o xtrace + +# Defaults +# -------- + +CINDER_BAK_CEPH_MAX_SNAPSHOTS=${CINDER_BAK_CEPH_MAX_SNAPSHOTS:-0} +CINDER_BAK_CEPH_POOL=${CINDER_BAK_CEPH_POOL:-backups} +CINDER_BAK_CEPH_POOL_PG=${CINDER_BAK_CEPH_POOL_PG:-8} +CINDER_BAK_CEPH_POOL_PGP=${CINDER_BAK_CEPH_POOL_PGP:-8} +CINDER_BAK_CEPH_USER=${CINDER_BAK_CEPH_USER:-cinder-bak} + + +function configure_cinder_backup_ceph { + # Execute this part only when cephadm is not used + if [[ "$CEPHADM_DEPLOY" = "False" ]]; then + sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP} + if [[ "$REMOTE_CEPH" = "False" && "$CEPH_REPLICAS" -ne 1 ]]; then + sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID} + fi + sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "profile rbd" osd "profile rbd pool=${CINDER_BAK_CEPH_POOL}, profile rbd pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring + sudo chown $STACK_USER ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring + fi + + iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph.CephBackupDriver" + iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF_FILE" + iniset $CINDER_CONF DEFAULT backup_ceph_max_snapshots "$CINDER_BAK_CEPH_MAX_SNAPSHOTS" + iniset $CINDER_CONF DEFAULT backup_ceph_pool "$CINDER_BAK_CEPH_POOL" + iniset $CINDER_CONF DEFAULT backup_ceph_user "$CINDER_BAK_CEPH_USER" + iniset $CINDER_CONF DEFAULT backup_ceph_stripe_unit 0 + iniset $CINDER_CONF DEFAULT backup_ceph_stripe_count 0 + iniset $CINDER_CONF DEFAULT restore_discard_excess_bytes True +} + +# init_cinder_backup_ceph: nothing to do +# cleanup_cinder_backup_ceph: nothing to do + +# Restore xtrace +$_XTRACE_CINDER_CEPH + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_backups/s3_swift b/lib/cinder_backups/s3_swift new file mode 100644 index 0000000000..6fb248606e --- /dev/null +++ b/lib/cinder_backups/s3_swift @@ -0,0 +1,45 @@ +#!/bin/bash +# +# lib/cinder_backups/s3_swift +# Configure the s3 backup driver with swift s3api +# +# TODO: create lib/cinder_backup/s3 for external s3 compatible storage + +# Enable with: +# +# CINDER_BACKUP_DRIVER=s3_swift +# enable_service s3api s-proxy s-object s-container s-account + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# Save trace setting +_XTRACE_CINDER_S3_SWIFT=$(set +o | grep xtrace) +set +o xtrace + +function configure_cinder_backup_s3_swift { + # This configuration requires swift and s3api. If we're + # on a subnode we might not know if they are enabled + iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.s3.S3BackupDriver" + iniset $CINDER_CONF DEFAULT backup_s3_endpoint_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$S3_SERVICE_PORT" +} + +function init_cinder_backup_s3_swift { + openstack ec2 credential create + iniset $CINDER_CONF DEFAULT backup_s3_store_access_key "$(openstack ec2 credential list -c Access -f value)" + iniset $CINDER_CONF DEFAULT backup_s3_store_secret_key "$(openstack ec2 credential list -c Secret -f value)" + if is_service_enabled tls-proxy; then + iniset $CINDER_CONF DEFAULT backup_s3_ca_cert_file "$SSL_BUNDLE_FILE" + fi +} + +# cleanup_cinder_backup_s3_swift: nothing to do + +# Restore xtrace +$_XTRACE_CINDER_S3_SWIFT + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_backups/swift b/lib/cinder_backups/swift new file mode 100644 index 0000000000..c7ec306246 --- /dev/null +++ b/lib/cinder_backups/swift @@ -0,0 +1,41 @@ +#!/bin/bash +# +# lib/cinder_backups/swift +# Configure the swift backup driver + +# Enable with: +# +# CINDER_BACKUP_DRIVER=swift + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# Save trace setting +_XTRACE_CINDER_SWIFT=$(set +o | grep xtrace) +set +o xtrace + + +function configure_cinder_backup_swift { + # NOTE(mriedem): The default backup driver uses swift and if we're + # on a subnode we might not know if swift is enabled, but chances are + # good that it is on the controller so configure the backup service + # to use it. + iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.swift.SwiftBackupDriver" + iniset $CINDER_CONF DEFAULT backup_swift_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_" + if is_service_enabled tls-proxy; then + iniset $CINDER_CONF DEFAULT backup_swift_ca_cert_file $SSL_BUNDLE_FILE + fi +} + +# init_cinder_backup_swift: nothing to do +# cleanup_cinder_backup_swift: nothing to do + + +# Restore xtrace +$_XTRACE_CINDER_SWIFT + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_plugins/XenAPINFS b/lib/cinder_plugins/XenAPINFS deleted file mode 100644 index fa10715bdf..0000000000 --- a/lib/cinder_plugins/XenAPINFS +++ /dev/null @@ -1,44 +0,0 @@ -# lib/cinder_plugins/XenAPINFS -# Configure the XenAPINFS driver - -# Enable with: -# -# CINDER_DRIVER=XenAPINFS - -# Dependencies: -# -# - ``functions`` file -# - ``cinder`` configurations - -# configure_cinder_driver - make configuration changes, including those to other services - -# Save trace setting -MY_XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# Set up default directories - - -# Entry Points -# ------------ - -# configure_cinder_driver - Set config files, create data dirs, etc -function configure_cinder_driver { - iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.xenapi.sm.XenAPINFSDriver" - iniset $CINDER_CONF DEFAULT xenapi_connection_url "$CINDER_XENAPI_CONNECTION_URL" - iniset $CINDER_CONF DEFAULT xenapi_connection_username "$CINDER_XENAPI_CONNECTION_USERNAME" - iniset $CINDER_CONF DEFAULT xenapi_connection_password "$CINDER_XENAPI_CONNECTION_PASSWORD" - iniset $CINDER_CONF DEFAULT xenapi_nfs_server "$CINDER_XENAPI_NFS_SERVER" - iniset $CINDER_CONF DEFAULT xenapi_nfs_serverpath "$CINDER_XENAPI_NFS_SERVERPATH" -} - -# Restore xtrace -$MY_XTRACE - -# Local variables: -# mode: shell-script -# End: diff --git a/lib/cinder_plugins/glusterfs b/lib/cinder_plugins/glusterfs index b4196e4738..329dd6c649 100644 --- a/lib/cinder_plugins/glusterfs +++ b/lib/cinder_plugins/glusterfs @@ -1,3 +1,5 @@ +#!/bin/bash +# # lib/cinder_plugins/glusterfs # Configure the glusterfs driver @@ -13,7 +15,7 @@ # configure_cinder_driver - make configuration changes, including those to other services # Save trace setting -MY_XTRACE=$(set +o | grep xtrace) +_XTRACE_CINDER_GLUSTERFS=$(set +o | grep xtrace) set +o xtrace @@ -43,7 +45,7 @@ function configure_cinder_driver { } # Restore xtrace -$MY_XTRACE +$_XTRACE_CINDER_GLUSTERFS # Local variables: # mode: shell-script diff --git a/lib/cinder_plugins/nfs b/lib/cinder_plugins/nfs index 5f4cc5369a..6e4ffe068e 100644 --- a/lib/cinder_plugins/nfs +++ b/lib/cinder_plugins/nfs @@ -1,3 +1,5 @@ +#!/bin/bash +# # lib/cinder_plugins/nfs # Configure the nfs driver @@ -13,7 +15,7 @@ # configure_cinder_driver - make configuration changes, including those to other services # Save trace setting -MY_XTRACE=$(set +o | grep xtrace) +_XTRACE_CINDER_NFS=$(set +o | grep xtrace) set +o xtrace @@ -34,7 +36,7 @@ function configure_cinder_driver { } # Restore xtrace -$MY_XTRACE +$_XTRACE_CINDER_NFS # Local variables: # mode: shell-script diff --git a/lib/cinder_plugins/sheepdog b/lib/cinder_plugins/sheepdog deleted file mode 100644 index 30c60c6efe..0000000000 --- a/lib/cinder_plugins/sheepdog +++ /dev/null @@ -1,39 +0,0 @@ -# lib/cinder_plugins/sheepdog -# Configure the sheepdog driver - -# Enable with: -# -# CINDER_DRIVER=sheepdog - -# Dependencies: -# -# - ``functions`` file -# - ``cinder`` configurations - -# configure_cinder_driver - make configuration changes, including those to other services - -# Save trace setting -MY_XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# Set up default directories - - -# Entry Points -# ------------ - -# configure_cinder_driver - Set config files, create data dirs, etc -function configure_cinder_driver { - iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.sheepdog.SheepdogDriver" -} - -# Restore xtrace -$MY_XTRACE - -# Local variables: -# mode: shell-script -# End: diff --git a/lib/cinder_plugins/solidfire b/lib/cinder_plugins/solidfire deleted file mode 100644 index 2c970b5adf..0000000000 --- a/lib/cinder_plugins/solidfire +++ /dev/null @@ -1,48 +0,0 @@ -# lib/cinder_plugins/solidfire -# Configure the solidfire driver - -# Enable with: -# -# CINDER_DRIVER=solidfire - -# Dependencies: -# -# - ``functions`` file -# - ``cinder`` configurations - -# configure_cinder_driver - make configuration changes, including those to other services - -# Save trace setting -MY_XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# Set up default directories - - -# Entry Points -# ------------ - -# configure_cinder_driver - Set config files, create data dirs, etc -function configure_cinder_driver { - # To use solidfire, set the following in localrc: - # CINDER_DRIVER=solidfire - # SAN_IP= - # SAN_LOGIN= - # SAN_PASSWORD= - - iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.solidfire.SolidFireDriver" - iniset $CINDER_CONF DEFAULT san_ip $SAN_IP - iniset $CINDER_CONF DEFAULT san_login $SAN_LOGIN - iniset $CINDER_CONF DEFAULT san_password $SAN_PASSWORD -} - -# Restore xtrace -$MY_XTRACE - -# Local variables: -# mode: shell-script -# End: diff --git a/lib/cinder_plugins/vsphere b/lib/cinder_plugins/vsphere index 436b060377..1b28ffe602 100644 --- a/lib/cinder_plugins/vsphere +++ b/lib/cinder_plugins/vsphere @@ -1,3 +1,5 @@ +#!/bin/bash +# # lib/cinder_plugins/vsphere # Configure the vsphere driver @@ -13,7 +15,7 @@ # configure_cinder_driver - make configuration changes, including those to other services # Save trace setting -MY_XTRACE=$(set +o | grep xtrace) +_XTRACE_CINDER_VSPHERE=$(set +o | grep xtrace) set +o xtrace @@ -35,7 +37,7 @@ function configure_cinder_driver { } # Restore xtrace -$MY_XTRACE +$_XTRACE_CINDER_VSPHERE # Local variables: # mode: shell-script diff --git a/lib/config b/lib/config deleted file mode 100644 index 552aeb0ad1..0000000000 --- a/lib/config +++ /dev/null @@ -1,130 +0,0 @@ -# lib/config - Configuration file manipulation functions - -# These functions have no external dependencies and the following side-effects: -# -# CONFIG_AWK_CMD is defined, default is ``awk`` - -# Meta-config files contain multiple INI-style configuration files -# using a specific new section header to delimit them: -# -# [[group-name|file-name]] -# -# group-name refers to the group of configuration file changes to be processed -# at a particular time. These are called phases in ``stack.sh`` but -# group here as these functions are not DevStack-specific. -# -# file-name is the destination of the config file - -# Save trace setting -C_XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Allow the awk command to be overridden on legacy platforms -CONFIG_AWK_CMD=${CONFIG_AWK_CMD:-awk} - -# Get the section for the specific group and config file -# get_meta_section infile group configfile -function get_meta_section { - local file=$1 - local matchgroup=$2 - local configfile=$3 - - [[ -r $file ]] || return 0 - [[ -z $configfile ]] && return 0 - - $CONFIG_AWK_CMD -v matchgroup=$matchgroup -v configfile=$configfile ' - BEGIN { group = "" } - /^\[\[.+\|.*\]\]/ { - if (group == "") { - gsub("[][]", "", $1); - split($1, a, "|"); - if (a[1] == matchgroup && a[2] == configfile) { - group=a[1] - } - } else { - group="" - } - next - } - { - if (group != "") - print $0 - } - ' $file -} - - -# Get a list of config files for a specific group -# get_meta_section_files infile group -function get_meta_section_files { - local file=$1 - local matchgroup=$2 - - [[ -r $file ]] || return 0 - - $CONFIG_AWK_CMD -v matchgroup=$matchgroup ' - /^\[\[.+\|.*\]\]/ { - gsub("[][]", "", $1); - split($1, a, "|"); - if (a[1] == matchgroup) - print a[2] - } - ' $file -} - - -# Merge the contents of a meta-config file into its destination config file -# If configfile does not exist it will be created. -# merge_config_file infile group configfile -function merge_config_file { - local file=$1 - local matchgroup=$2 - local configfile=$3 - - [[ -r $configfile ]] || touch $configfile - - get_meta_section $file $matchgroup $configfile | \ - $CONFIG_AWK_CMD -v configfile=$configfile ' - BEGIN { section = "" } - /^\[.+\]/ { - gsub("[][]", "", $1); - section=$1 - next - } - /^ *\#/ { - next - } - /^[^ \t]+/ { - split($0, d, " *= *") - print "iniset " configfile " " section " " d[1] " \"" d[2] "\"" - } - ' | while read a; do eval "$a"; done - -} - - -# Merge all of the files specified by group -# merge_config_group infile group [group ...] -function merge_config_group { - local localfile=$1; shift - local matchgroups=$@ - - [[ -r $localfile ]] || return 0 - - for group in $matchgroups; do - for configfile in $(get_meta_section_files $localfile $group); do - if [[ -d $(dirname $configfile) ]]; then - merge_config_file $localfile $group $configfile - fi - done - done -} - - -# Restore xtrace -$C_XTRACE - -# Local variables: -# mode: shell-script -# End: diff --git a/lib/database b/lib/database index 0661049e70..78563f6f6d 100644 --- a/lib/database +++ b/lib/database @@ -1,3 +1,5 @@ +#!/bin/bash +# # lib/database # Interface for interacting with different database backends @@ -18,9 +20,10 @@ # and call register_database $DATABASE_TYPE # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_LIB_DB=$(set +o | grep xtrace) set +o xtrace +DATABASE_BACKENDS="" # Register a database backend # @@ -28,7 +31,7 @@ set +o xtrace # # This is required to be defined before the specific database scripts are sourced function register_database { - [ -z "$DATABASE_BACKENDS" ] && DATABASE_BACKENDS=$1 || DATABASE_BACKENDS+=" $1" + DATABASE_BACKENDS+=" $1" } # Sourcing the database libs sets DATABASE_BACKENDS with the available list @@ -67,18 +70,29 @@ function initialize_database_backends { # For backward-compatibility, read in the MYSQL_HOST/USER variables and use # them as the default values for the DATABASE_HOST/USER variables. - MYSQL_HOST=${MYSQL_HOST:-127.0.0.1} + MYSQL_HOST=${MYSQL_HOST:-$SERVICE_LOCAL_HOST} MYSQL_USER=${MYSQL_USER:-root} - DATABASE_HOST=${DATABASE_HOST:-${MYSQL_HOST}} + # Set DATABASE_HOST equal to MYSQL_HOST. If SERVICE_IP_VERSION is equal to 6, + # set DATABASE_HOST equal to [MYSQL_HOST]. MYSQL_HOST cannot use brackets due + # to mysql not using bracketing for IPv6 addresses. DATABASE_HOST must have brackets + # due to sqlalchemy only reading IPv6 addresses with brackets. + if [[ "$SERVICE_IP_VERSION" == 6 ]]; then + DATABASE_HOST=${DATABASE_HOST:-[$MYSQL_HOST]} + else + DATABASE_HOST=${DATABASE_HOST:-${MYSQL_HOST}} + fi + DATABASE_USER=${DATABASE_USER:-${MYSQL_USER}} if [ -n "$MYSQL_PASSWORD" ]; then DATABASE_PASSWORD=$MYSQL_PASSWORD - else - read_password DATABASE_PASSWORD "ENTER A PASSWORD TO USE FOR THE DATABASE." fi + return 0 +} + +function define_database_baseurl { # We configure Nova, Horizon, Glance and Keystone to use MySQL as their # database server. While they share a single server, each has their own # database and tables. @@ -89,18 +103,14 @@ function initialize_database_backends { # a multi-node DevStack installation. # NOTE: Don't specify ``/db`` in this string so we can use it for multiple services - BASE_SQL_CONN=${BASE_SQL_CONN:-${DATABASE_TYPE}://$DATABASE_USER:$DATABASE_PASSWORD@$DATABASE_HOST} - - return 0 + BASE_SQL_CONN=${BASE_SQL_CONN:-$(get_database_type_$DATABASE_TYPE)://$DATABASE_USER:$DATABASE_PASSWORD@$DATABASE_HOST} } # Recreate a given database # $1 The name of the database -# $2 The character set/encoding of the database function recreate_database { local db=$1 - local charset=$2 - recreate_database_$DATABASE_TYPE $db $charset + recreate_database_$DATABASE_TYPE $db } # Install the database @@ -108,6 +118,11 @@ function install_database { install_database_$DATABASE_TYPE } +# Install the database Python packages +function install_database_python { + install_database_python_$DATABASE_TYPE +} + # Configure and start the database function configure_database { configure_database_$DATABASE_TYPE @@ -122,7 +137,7 @@ function database_connection_url { # Restore xtrace -$XTRACE +$_XTRACE_LIB_DB # Tell emacs to use shell-script-mode ## Local variables: diff --git a/lib/databases/mysql b/lib/databases/mysql index ea22d14ac6..a47580ca3d 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -1,3 +1,5 @@ +#!/bin/bash +# # lib/databases/mysql # Functions to control the configuration and operation of the **MySQL** database backend @@ -6,94 +8,153 @@ # - DATABASE_{HOST,USER,PASSWORD} must be defined # Save trace setting -MY_XTRACE=$(set +o | grep xtrace) +_XTRACE_DB_MYSQL=$(set +o | grep xtrace) set +o xtrace +MYSQL_DRIVER=${MYSQL_DRIVER:-PyMySQL} +INSTALL_DATABASE_SERVER_PACKAGES=$(trueorfalse True INSTALL_DATABASE_SERVER_PACKAGES) register_database mysql +if [[ -z "$MYSQL_SERVICE_NAME" ]]; then + MYSQL_SERVICE_NAME=mysql + if is_fedora && ! is_oraclelinux; then + MYSQL_SERVICE_NAME=mariadb + elif [[ "$DISTRO" =~ trixie|bookworm|bullseye ]]; then + MYSQL_SERVICE_NAME=mariadb + fi +fi # Functions # --------- +function get_database_type_mysql { + if [[ "$MYSQL_DRIVER" == "PyMySQL" ]]; then + echo mysql+pymysql + else + echo mysql + fi +} + # Get rid of everything enough to cleanly change database backends function cleanup_database_mysql { + stop_service $MYSQL_SERVICE_NAME if is_ubuntu; then # Get ruthless with mysql - stop_service $MYSQL - apt_get purge -y mysql* + apt_get purge -y mysql* mariadb* sudo rm -rf /var/lib/mysql sudo rm -rf /etc/mysql return + elif is_oraclelinux; then + uninstall_package mysql-community-server + sudo rm -rf /var/lib/mysql elif is_fedora; then - if [[ $DISTRO =~ (rhel7) ]]; then - MYSQL=mariadb - else - MYSQL=mysqld - fi - elif is_suse; then - MYSQL=mysql + uninstall_package mariadb-server + sudo rm -rf /var/lib/mysql else return fi - stop_service $MYSQL } function recreate_database_mysql { local db=$1 - local charset=$2 mysql -u$DATABASE_USER -p$DATABASE_PASSWORD -h$MYSQL_HOST -e "DROP DATABASE IF EXISTS $db;" - mysql -u$DATABASE_USER -p$DATABASE_PASSWORD -h$MYSQL_HOST -e "CREATE DATABASE $db CHARACTER SET $charset;" + mysql -u$DATABASE_USER -p$DATABASE_PASSWORD -h$MYSQL_HOST -e "CREATE DATABASE $db CHARACTER SET utf8;" } function configure_database_mysql { - local slow_log + local my_conf mysql slow_log my_client_conf echo_summary "Configuring and starting MySQL" if is_ubuntu; then - MY_CONF=/etc/mysql/my.cnf - MYSQL=mysql + my_conf=/etc/mysql/my.cnf + elif is_oraclelinux; then + my_conf=/etc/my.cnf elif is_fedora; then - if [[ $DISTRO =~ (rhel7) ]]; then - MYSQL=mariadb - else - MYSQL=mysqld + my_conf=/etc/my.cnf + local cracklib_conf=/etc/my.cnf.d/cracklib_password_check.cnf + if [ -f "$cracklib_conf" ]; then + inicomment -sudo "$cracklib_conf" "mariadb" "plugin-load-add" fi - MY_CONF=/etc/my.cnf - elif is_suse; then - MY_CONF=/etc/my.cnf - MYSQL=mysql else exit_distro_not_supported "mysql configuration" fi - # Start mysql-server - if is_fedora || is_suse; then + # Set fips mode on + if is_ubuntu; then + if is_fips_enabled; then + my_client_conf=/etc/mysql/mysql.conf.d/mysql.cnf + iniset -sudo $my_client_conf mysql ssl-fips-mode "on" + iniset -sudo $my_conf mysqld ssl-fips-mode "on" + fi + fi + + # Change bind-address from localhost (127.0.0.1) to any (::) + iniset -sudo $my_conf mysqld bind-address "$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)" + + # (Re)Start mysql-server + if is_fedora; then # service is not started by default - start_service $MYSQL + start_service $MYSQL_SERVICE_NAME + elif is_ubuntu; then + # required since bind-address could have changed above + restart_service $MYSQL_SERVICE_NAME fi # Set the root password - only works the first time. For Ubuntu, we already - # did that with debconf before installing the package. - if ! is_ubuntu; then + # did that with debconf before installing the package, but we still try, + # because the package might have been installed already. We don't do this + # for Ubuntu 22.04+ because the authorization model change in + # version 10.4 of mariadb. See + # https://mariadb.org/authentication-in-mariadb-10-4/ + if ! (is_ubuntu && [[ ! "$DISTRO" =~ trixie|bookworm|bullseye ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]); then sudo mysqladmin -u root password $DATABASE_PASSWORD || true fi - # Update the DB to give user ‘$DATABASE_USER’@’%’ full control of the all databases: - sudo mysql -uroot -p$DATABASE_PASSWORD -h127.0.0.1 -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';" + # In case of Mariadb, giving hostname in arguments causes permission + # problems as it expects connection through socket + if is_ubuntu && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then + local cmd_args="-uroot -p$DATABASE_PASSWORD " + else + local cmd_args="-uroot -p$DATABASE_PASSWORD -h$SERVICE_LOCAL_HOST " + fi - # Now update ``my.cnf`` for some local needs and restart the mysql service + # Workaround for mariadb > 11.6.2, + # see https://bugs.launchpad.net/nova/+bug/2116186/comments/3 + min_db_ver="11.6.2" + db_version=$(sudo mysql ${cmd_args} -e "select version();" -sN | cut -d '-' -f 1) + max_db_ver=$(printf '%s\n' ${min_db_ver} ${db_version} | sort -V | tail -n 1) + if [[ "${min_db_ver}" != "${max_db_ver}" ]]; then + iniset -sudo $my_conf mysqld innodb_snapshot_isolation OFF + restart_service $MYSQL_SERVICE_NAME + fi + + # In mariadb e.g. on Ubuntu socket plugin is used for authentication + # as root so it works only as sudo. To restore old "mysql like" behaviour, + # we need to change auth plugin for root user + # TODO(frickler): simplify this logic + if is_ubuntu && [[ ! "$DISTRO" =~ bookworm|bullseye ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then + # For Ubuntu 22.04+ we follow the model outlined in + # https://mariadb.org/authentication-in-mariadb-10-4/ + sudo mysql -e "ALTER USER $DATABASE_USER@localhost IDENTIFIED VIA mysql_native_password USING PASSWORD('$DATABASE_PASSWORD');" + fi + if ! (is_ubuntu && [[ ! "$DISTRO" =~ bookworm|bullseye ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]); then + # Create DB user if it does not already exist + sudo mysql $cmd_args -e "CREATE USER IF NOT EXISTS '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';" + # Update the DB to give user '$DATABASE_USER'@'%' full control of the all databases: + sudo mysql $cmd_args -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%';" + fi - # Change ‘bind-address’ from localhost (127.0.0.1) to any (0.0.0.0) and - # set default db type to InnoDB - sudo bash -c "source $TOP_DIR/functions && \ - iniset $MY_CONF mysqld bind-address 0.0.0.0 && \ - iniset $MY_CONF mysqld default-storage-engine InnoDB" + # Now update ``my.cnf`` for some local needs and restart the mysql service + # Set default db type to InnoDB + iniset -sudo $my_conf mysqld sql_mode TRADITIONAL + iniset -sudo $my_conf mysqld default-storage-engine InnoDB + iniset -sudo $my_conf mysqld max_connections 1024 if [[ "$DATABASE_QUERY_LOGGING" == "True" ]]; then echo_summary "Enabling MySQL query logging" - if is_fedora && ! [[ $DISTRO =~ (rhel6) ]]; then + if is_fedora; then slow_log=/var/log/mariadb/mariadb-slow.log else slow_log=/var/log/mysql/mysql-slow.log @@ -101,29 +162,50 @@ function configure_database_mysql { sudo sed -e '/log.slow.queries/d' \ -e '/long.query.time/d' \ -e '/log.queries.not.using.indexes/d' \ - -i $MY_CONF + -i $my_conf # Turn on slow query log, log all queries (any query taking longer than # 0 seconds) and log all non-indexed queries - sudo bash -c "source $TOP_DIR/functions && \ - iniset $MY_CONF mysqld slow-query-log 1 && \ - iniset $MY_CONF mysqld slow-query-log-file $slow_log && \ - iniset $MY_CONF mysqld long-query-time 0 && \ - iniset $MY_CONF mysqld log-queries-not-using-indexes 1" + iniset -sudo $my_conf mysqld slow-query-log 1 + iniset -sudo $my_conf mysqld slow-query-log-file $slow_log + iniset -sudo $my_conf mysqld long-query-time 0 + iniset -sudo $my_conf mysqld log-queries-not-using-indexes 1 + fi + + if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" ]]; then + echo "enabling MySQL performance counting" + + # Install our sqlalchemy plugin + pip_install ${TOP_DIR}/tools/dbcounter + # Create our stats database for accounting + recreate_database stats + mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST -e \ + "CREATE TABLE queries (db VARCHAR(32), op VARCHAR(32), + count INT, PRIMARY KEY (db, op)) ENGINE MEMORY" stats fi - restart_service $MYSQL + if [[ "$MYSQL_REDUCE_MEMORY" == "True" ]]; then + iniset -sudo $my_conf mysqld read_buffer_size 64K + iniset -sudo $my_conf mysqld innodb_buffer_pool_size 16M + iniset -sudo $my_conf mysqld thread_stack 192K + iniset -sudo $my_conf mysqld thread_cache_size 8 + iniset -sudo $my_conf mysqld tmp_table_size 8M + iniset -sudo $my_conf mysqld sort_buffer_size 8M + iniset -sudo $my_conf mysqld max_allowed_packet 8M + fi + + restart_service $MYSQL_SERVICE_NAME } function install_database_mysql { if is_ubuntu; then # Seed configuration with mysql password so that apt-get install doesn't # prompt us for a password upon install. - cat <> $HOME/.my.cnf + fi chmod 0600 $HOME/.my.cnf fi # Install mysql-server - if is_ubuntu || is_fedora; then - if [[ $DISTRO =~ (rhel7) ]]; then - install_package mariadb-server - else - install_package mysql-server - fi - elif is_suse; then - if ! is_package_installed mariadb; then + if [[ "$INSTALL_DATABASE_SERVER_PACKAGES" == "True" ]]; then + if is_oraclelinux; then install_package mysql-community-server + elif is_fedora; then + install_package mariadb-server mariadb-devel mariadb + sudo systemctl enable $MYSQL_SERVICE_NAME + elif is_ubuntu; then + install_package $MYSQL_SERVICE_NAME-server + else + exit_distro_not_supported "mysql installation" fi - else - exit_distro_not_supported "mysql installation" + fi +} + +function install_database_python_mysql { + # Install Python client module + pip_install_gr $MYSQL_DRIVER + if [[ "$MYSQL_DRIVER" == "MySQL-python" ]]; then + ADDITIONAL_VENV_PACKAGES+=",MySQL-python" + elif [[ "$MYSQL_DRIVER" == "PyMySQL" ]]; then + ADDITIONAL_VENV_PACKAGES+=",PyMySQL" fi } function database_connection_url_mysql { local db=$1 - echo "$BASE_SQL_CONN/$db?charset=utf8" + local plugin + + # NOTE(danms): We don't enable perf on subnodes yet because the + # plugin is not installed there + if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" ]]; then + if is_service_enabled mysql; then + plugin="&plugin=dbcounter" + fi + fi + + echo "$BASE_SQL_CONN/$db?charset=utf8$plugin" } # Restore xtrace -$MY_XTRACE +$_XTRACE_DB_MYSQL # Local variables: # mode: shell-script diff --git a/lib/databases/postgresql b/lib/databases/postgresql index 96a5947a60..2aa38ccf76 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -1,3 +1,5 @@ +#!/bin/bash +# # lib/databases/postgresql # Functions to control the configuration and operation of the **PostgreSQL** database backend @@ -6,16 +8,23 @@ # - DATABASE_{HOST,USER,PASSWORD} must be defined # Save trace setting -PG_XTRACE=$(set +o | grep xtrace) +_XTRACE_PG=$(set +o | grep xtrace) set +o xtrace +MAX_DB_CONNECTIONS=${MAX_DB_CONNECTIONS:-200} +INSTALL_DATABASE_SERVER_PACKAGES=$(trueorfalse True INSTALL_DATABASE_SERVER_PACKAGES) + register_database postgresql # Functions # --------- +function get_database_type_postgresql { + echo postgresql +} + # Get rid of everything enough to cleanly change database backends function cleanup_database_postgresql { stop_service postgresql @@ -32,47 +41,53 @@ function cleanup_database_postgresql { function recreate_database_postgresql { local db=$1 - local charset=$2 # Avoid unsightly error when calling dropdb when the database doesn't exist psql -h$DATABASE_HOST -U$DATABASE_USER -dtemplate1 -c "DROP DATABASE IF EXISTS $db" - createdb -h $DATABASE_HOST -U$DATABASE_USER -l C -T template0 -E $charset $db + createdb -h $DATABASE_HOST -U$DATABASE_USER -l C -T template0 -E utf8 $db +} + +function _exit_pg_init { + sudo cat /var/lib/pgsql/initdb_postgresql.log } function configure_database_postgresql { + local pg_conf pg_dir pg_hba check_role version echo_summary "Configuring and starting PostgreSQL" if is_fedora; then - PG_HBA=/var/lib/pgsql/data/pg_hba.conf - PG_CONF=/var/lib/pgsql/data/postgresql.conf - if ! sudo [ -e $PG_HBA ]; then - if ! [[ $DISTRO =~ (rhel6) ]]; then - sudo postgresql-setup initdb - else - sudo service postgresql initdb - fi + pg_hba=/var/lib/pgsql/data/pg_hba.conf + pg_conf=/var/lib/pgsql/data/postgresql.conf + if ! sudo [ -e $pg_hba ]; then + trap _exit_pg_init EXIT + sudo postgresql-setup initdb + trap - EXIT fi elif is_ubuntu; then - PG_DIR=`find /etc/postgresql -name pg_hba.conf|xargs dirname` - PG_HBA=$PG_DIR/pg_hba.conf - PG_CONF=$PG_DIR/postgresql.conf - elif is_suse; then - PG_HBA=/var/lib/pgsql/data/pg_hba.conf - PG_CONF=/var/lib/pgsql/data/postgresql.conf - # initdb is called when postgresql is first started - sudo [ -e $PG_HBA ] || start_service postgresql + version=`psql --version | cut -d ' ' -f3 | cut -d. -f1-2` + if vercmp $version '>=' 9.3; then + if [ -z "`pg_lsclusters -h`" ]; then + echo 'No PostgreSQL clusters exist; will create one' + sudo pg_createcluster $version main --start + fi + fi + pg_dir=`find /etc/postgresql -name pg_hba.conf|xargs dirname` + pg_hba=$pg_dir/pg_hba.conf + pg_conf=$pg_dir/postgresql.conf else exit_distro_not_supported "postgresql configuration" fi # Listen on all addresses - sudo sed -i "/listen_addresses/s/.*/listen_addresses = '*'/" $PG_CONF + sudo sed -i "/listen_addresses/s/.*/listen_addresses = '*'/" $pg_conf + # Set max_connections + sudo sed -i "/max_connections/s/.*/max_connections = $MAX_DB_CONNECTIONS/" $pg_conf # Do password auth from all IPv4 clients - sudo sed -i "/^host/s/all\s\+127.0.0.1\/32\s\+ident/$DATABASE_USER\t0.0.0.0\/0\tpassword/" $PG_HBA + sudo sed -i "/^host/s/all\s\+127.0.0.1\/32\s\+ident/$DATABASE_USER\t0.0.0.0\/0\tpassword/" $pg_hba # Do password auth for all IPv6 clients - sudo sed -i "/^host/s/all\s\+::1\/128\s\+ident/$DATABASE_USER\t::0\/0\tpassword/" $PG_HBA + sudo sed -i "/^host/s/all\s\+::1\/128\s\+ident/$DATABASE_USER\t::0\/0\tpassword/" $pg_hba restart_service postgresql # Create the role if it's not here or else alter it. - root_roles=$(sudo -u root sudo -u postgres -i psql -t -c "SELECT 'HERE' from pg_roles where rolname='root'") - if [[ ${root_roles} == *HERE ]];then + check_role=$(sudo -u root sudo -u postgres -i psql -t -c "SELECT 'HERE' from pg_roles where rolname='$DATABASE_USER'") + if [[ ${check_role} == *HERE ]];then sudo -u root sudo -u postgres -i psql -c "ALTER ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" else sudo -u root sudo -u postgres -i psql -c "CREATE ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" @@ -81,24 +96,35 @@ function configure_database_postgresql { function install_database_postgresql { echo_summary "Installing postgresql" - PGPASS=$HOME/.pgpass - if [[ ! -e $PGPASS ]]; then - cat < $PGPASS + local pgpass=$HOME/.pgpass + if [[ ! -e $pgpass ]]; then + cat < $pgpass *:*:*:$DATABASE_USER:$DATABASE_PASSWORD EOF - chmod 0600 $PGPASS + chmod 0600 $pgpass else - sed -i "s/:root:\w\+/:root:$DATABASE_PASSWORD/" $PGPASS + sed -i "s/:root:\w\+/:root:$DATABASE_PASSWORD/" $pgpass fi - if is_ubuntu; then - install_package postgresql - elif is_fedora || is_suse; then - install_package postgresql-server - else - exit_distro_not_supported "postgresql installation" + if [[ "$INSTALL_DATABASE_SERVER_PACKAGES" == "True" ]]; then + if is_ubuntu; then + install_package postgresql + elif is_fedora; then + install_package postgresql-server + if is_fedora; then + sudo systemctl enable postgresql + fi + else + exit_distro_not_supported "postgresql installation" + fi fi } +function install_database_python_postgresql { + # Install Python client module + pip_install_gr psycopg2 + ADDITIONAL_VENV_PACKAGES+=",psycopg2" +} + function database_connection_url_postgresql { local db=$1 echo "$BASE_SQL_CONN/$db?client_encoding=utf8" @@ -106,7 +132,7 @@ function database_connection_url_postgresql { # Restore xtrace -$PG_XTRACE +$_XTRACE_PG # Local variables: # mode: shell-script diff --git a/lib/dstat b/lib/dstat new file mode 100644 index 0000000000..9bd0370847 --- /dev/null +++ b/lib/dstat @@ -0,0 +1,58 @@ +#!/bin/bash +# +# lib/dstat +# Functions to start and stop dstat + +# Dependencies: +# +# - ``functions`` file + +# ``stack.sh`` calls the entry points in this order: +# +# - install_dstat +# - start_dstat +# - stop_dstat + +# Save trace setting +_XTRACE_DSTAT=$(set +o | grep xtrace) +set +o xtrace + +# install_dstat() - Install prerequisites for dstat services +function install_dstat { + if is_service_enabled memory_tracker; then + # Install python libraries required by tools/mlock_report.py + pip_install_gr psutil + fi +} + +# start_dstat() - Start running processes +function start_dstat { + # A better kind of sysstat, with the top process per time slice + run_process dstat "$TOP_DIR/tools/dstat.sh $LOGDIR" + + # To enable memory_tracker add: + # enable_service memory_tracker + # to your localrc + run_process memory_tracker "$TOP_DIR/tools/memory_tracker.sh" "" "root" "PYTHON=python${PYTHON3_VERSION}" + + # TODO(jh): Fail when using the old service name otherwise consumers might + # never notice that is has been removed. + if is_service_enabled peakmem_tracker; then + die $LINENO "The peakmem_tracker service has been removed, use memory_tracker instead" + fi + + # To enable file_tracker add: + # enable_service file_tracker + # to your localrc + run_process file_tracker "$TOP_DIR/tools/file_tracker.sh" +} + +# stop_dstat() stop dstat process +function stop_dstat { + stop_process dstat + stop_process memory_tracker + stop_process file_tracker +} + +# Restore xtrace +$_XTRACE_DSTAT diff --git a/lib/etcd3 b/lib/etcd3 new file mode 100644 index 0000000000..0d22de8c73 --- /dev/null +++ b/lib/etcd3 @@ -0,0 +1,136 @@ +#!/bin/bash +# +# lib/etcd3 +# +# Functions to control the installation and configuration of etcd 3.x +# that provides a key-value store (and possibly other functions). + +# Dependencies: +# +# - ``functions`` file + +# ``stack.sh`` calls the entry points in this order: +# +# - start_etcd3 +# - stop_etcd3 +# - cleanup_etcd3 + +# Save trace setting +_XTRACE_ETCD3=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default values for etcd +ETCD_DATA_DIR="$DATA_DIR/etcd" +ETCD_SYSTEMD_SERVICE="devstack@etcd.service" +ETCD_BIN_DIR="$DEST/bin" +# Option below will mount ETCD_DATA_DIR as ramdisk, which is useful to run +# etcd-heavy services in the gate VM's, e.g. Kubernetes. +ETCD_USE_RAMDISK=$(trueorfalse True ETCD_USE_RAMDISK) +ETCD_RAMDISK_MB=${ETCD_RAMDISK_MB:-512} + +if is_ubuntu ; then + UBUNTU_RELEASE_BASE_NUM=`lsb_release -r | awk '{print $2}' | cut -d '.' -f 1` +fi + +# start_etcd3() - Starts to run the etcd process +function start_etcd3 { + local cmd="$ETCD_BIN_DIR/etcd" + cmd+=" --name $HOSTNAME --data-dir $ETCD_DATA_DIR" + cmd+=" --initial-cluster-state new --initial-cluster-token etcd-cluster-01" + cmd+=" --initial-cluster $HOSTNAME=http://$SERVICE_HOST:$ETCD_PEER_PORT" + cmd+=" --initial-advertise-peer-urls http://$SERVICE_HOST:$ETCD_PEER_PORT" + cmd+=" --advertise-client-urls http://$SERVICE_HOST:$ETCD_PORT" + if [ "$SERVICE_LISTEN_ADDRESS" == "::" ]; then + cmd+=" --listen-peer-urls http://[::]:$ETCD_PEER_PORT " + else + cmd+=" --listen-peer-urls http://0.0.0.0:$ETCD_PEER_PORT " + fi + cmd+=" --listen-client-urls http://$SERVICE_HOST:$ETCD_PORT" + if [ "$ENABLE_DEBUG_LOG_LEVEL" == "True" ]; then + cmd+=" --log-level=debug" + fi + + local unitfile="$SYSTEMD_DIR/$ETCD_SYSTEMD_SERVICE" + write_user_unit_file $ETCD_SYSTEMD_SERVICE "$cmd" "" "root" + + iniset -sudo $unitfile "Unit" "After" "network.target" + iniset -sudo $unitfile "Service" "Type" "notify" + iniset -sudo $unitfile "Service" "Restart" "on-failure" + iniset -sudo $unitfile "Service" "LimitNOFILE" "65536" + if is_arch "aarch64"; then + iniset -sudo $unitfile "Service" "Environment" "ETCD_UNSUPPORTED_ARCH=arm64" + fi + + $SYSTEMCTL daemon-reload + $SYSTEMCTL enable $ETCD_SYSTEMD_SERVICE + $SYSTEMCTL start $ETCD_SYSTEMD_SERVICE +} + +# stop_etcd3() stops the etcd3 process +function stop_etcd3 { + # Don't install in sub nodes (multinode scenario) + if [ "$SERVICE_HOST" != "$HOST_IP" ]; then + return + fi + + $SYSTEMCTL stop $ETCD_SYSTEMD_SERVICE +} + +function cleanup_etcd3 { + # Don't install in sub nodes (multinode scenario) + if [ "$SERVICE_HOST" != "$HOST_IP" ]; then + return + fi + + $SYSTEMCTL disable $ETCD_SYSTEMD_SERVICE + + local unitfile="$SYSTEMD_DIR/$ETCD_SYSTEMD_SERVICE" + sudo rm -f $unitfile + + $SYSTEMCTL daemon-reload + + if [[ "$ETCD_USE_RAMDISK" == "True" ]]; then + sudo umount $ETCD_DATA_DIR + fi + sudo rm -rf $ETCD_DATA_DIR +} + +function install_etcd3 { + echo "Installing etcd" + + # Create the necessary directories + sudo mkdir -p $ETCD_BIN_DIR + sudo mkdir -p $ETCD_DATA_DIR + if [[ "$ETCD_USE_RAMDISK" == "True" ]]; then + sudo mount -t tmpfs -o nodev,nosuid,size=${ETCD_RAMDISK_MB}M tmpfs $ETCD_DATA_DIR + fi + + # Download and cache the etcd tgz for subsequent use + local etcd_file + etcd_file="$(get_extra_file $ETCD_DOWNLOAD_LOCATION)" + if [ ! -f "$FILES/etcd-$ETCD_VERSION-linux-$ETCD_ARCH/etcd" ]; then + echo "${ETCD_SHA256} $etcd_file" > $FILES/etcd.sha256sum + # NOTE(yuanke wei): rm the damaged file when checksum fails + sha256sum -c $FILES/etcd.sha256sum || (sudo rm -f $etcd_file; exit 1) + + tar xzvf $etcd_file -C $FILES + sudo cp $FILES/$ETCD_NAME/etcd $ETCD_BIN_DIR/etcd + sudo cp $FILES/$ETCD_NAME/etcdctl $ETCD_BIN_DIR/etcdctl + fi + if [ ! -f "$ETCD_BIN_DIR/etcd" ]; then + sudo cp $FILES/$ETCD_NAME/etcd $ETCD_BIN_DIR/etcd + sudo cp $FILES/$ETCD_NAME/etcdctl $ETCD_BIN_DIR/etcdctl + fi +} + +# Restore xtrace +$_XTRACE_ETCD3 + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/gantt b/lib/gantt deleted file mode 100644 index 8db2ca1406..0000000000 --- a/lib/gantt +++ /dev/null @@ -1,96 +0,0 @@ -# lib/gantt -# Install and start **Gantt** scheduler service - -# Dependencies: -# -# - functions -# - DEST, DATA_DIR, STACK_USER must be defined - -# stack.sh -# --------- -# - install_gantt -# - configure_gantt -# - init_gantt -# - start_gantt -# - stop_gantt -# - cleanup_gantt - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - -# Defaults -# -------- - -# set up default directories -GANTT_DIR=$DEST/gantt -GANTT_STATE_PATH=${GANTT_STATE_PATH:=$DATA_DIR/gantt} -GANTT_REPO=${GANTT_REPO:-${GIT_BASE}/openstack/gantt.git} -GANTT_BRANCH=${GANTT_BRANCH:-master} - -GANTTCLIENT_DIR=$DEST/python-ganttclient -GANTTCLIENT_REPO=${GANTT_REPO:-${GIT_BASE}/openstack/python-ganttclient.git} -GANTTCLIENT_BRANCH=${GANTT_BRANCH:-master} - -# eventually we will have a separate gantt config -# file but for compatibility reasone stick with -# nova.conf for now -GANTT_CONF_DIR=${GANTT_CONF_DIR:-/etc/nova} -GANTT_CONF=$GANTT_CONF_DIR/nova.conf - -# Support entry points installation of console scripts -GANTT_BIN_DIR=$(get_python_exec_prefix) - - -# Functions -# --------- - -# cleanup_gantt() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_gantt { - echo "Cleanup Gantt" -} - -# configure_gantt() - Set config files, create data dirs, etc -function configure_gantt { - echo "Configure Gantt" -} - -# init_gantt() - Initialize database and volume group -function init_gantt { - echo "Initialize Gantt" -} - -# install_gantt() - Collect source and prepare -function install_gantt { - git_clone $GANTT_REPO $GANTT_DIR $GANTT_BRANCH - setup_develop $GANTT_DIR -} - -# install_ganttclient() - Collect source and prepare -function install_ganttclient { - echo "Install Gantt Client" -# git_clone $GANTTCLIENT_REPO $GANTTCLIENT_DIR $GANTTCLIENT_BRANCH -# setup_develop $GANTTCLIENT_DIR -} - -# start_gantt() - Start running processes, including screen -function start_gantt { - if is_service_enabled gantt; then - screen_it gantt "cd $GANTT_DIR && $GANTT_BIN_DIR/gantt-scheduler --config-file $GANTT_CONF" - fi -} - -# stop_gantt() - Stop running processes -function stop_gantt { - echo "Stop Gantt" - screen_stop gantt -} - -# Restore xtrace -$XTRACE - -# Tell emacs to use shell-script-mode -## Local variables: -## mode: shell-script -## End: diff --git a/lib/glance b/lib/glance index 51e4399388..9422c22141 100644 --- a/lib/glance +++ b/lib/glance @@ -1,3 +1,5 @@ +#!/bin/bash +# # lib/glance # Functions to control the configuration and operation of the **Glance** service @@ -19,7 +21,7 @@ # - cleanup_glance # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_GLANCE=$(set +o | grep xtrace) set +o xtrace @@ -27,34 +29,110 @@ set +o xtrace # -------- # Set up default directories +GITDIR["python-glanceclient"]=$DEST/python-glanceclient +GITDIR["glance_store"]=$DEST/glance_store GLANCE_DIR=$DEST/glance -GLANCECLIENT_DIR=$DEST/python-glanceclient + +# Glance virtual environment +if [[ ${USE_VENV} = True ]]; then + PROJECT_VENV["glance"]=${GLANCE_DIR}.venv + GLANCE_BIN_DIR=${PROJECT_VENV["glance"]}/bin +else + GLANCE_BIN_DIR=$(get_python_exec_prefix) +fi + +#S3 for Glance +GLANCE_USE_S3=$(trueorfalse False GLANCE_USE_S3) +GLANCE_S3_DEFAULT_BACKEND=${GLANCE_S3_DEFAULT_BACKEND:-s3_fast} +GLANCE_S3_BUCKET_ON_PUT=$(trueorfalse True GLANCE_S3_BUCKET_ON_PUT) +GLANCE_S3_BUCKET_NAME=${GLANCE_S3_BUCKET_NAME:-images} + +# Cinder for Glance +USE_CINDER_FOR_GLANCE=$(trueorfalse False USE_CINDER_FOR_GLANCE) +# GLANCE_CINDER_DEFAULT_BACKEND should be one of the values +# from CINDER_ENABLED_BACKENDS +GLANCE_CINDER_DEFAULT_BACKEND=${GLANCE_CINDER_DEFAULT_BACKEND:-lvmdriver-1} +GLANCE_STORE_ROOTWRAP_BASE_DIR=/usr/local/etc/glance +if [[ "$GLOBAL_VENV" == "True" ]] ; then + GLANCE_STORE_ROOTWRAP_BASE_DIR=${DEVSTACK_VENV}/etc/glance +fi +# When Cinder is used as a glance store, you can optionally configure cinder to +# optimize bootable volume creation by allowing volumes to be cloned directly +# in the backend instead of transferring data via Glance. To use this feature, +# set CINDER_ALLOWED_DIRECT_URL_SCHEMES for cinder.conf and enable +# GLANCE_SHOW_DIRECT_URL and/or GLANCE_SHOW_MULTIPLE_LOCATIONS for Glance. The +# default value for both of these is False, because for some backends they +# present a grave security risk (though not for Cinder, because all that's +# exposed is the volume_id where the image data is stored.) See OSSN-0065 for +# more information: https://wiki.openstack.org/wiki/OSSN/OSSN-0065 +GLANCE_SHOW_DIRECT_URL=$(trueorfalse False GLANCE_SHOW_DIRECT_URL) +GLANCE_SHOW_MULTIPLE_LOCATIONS=$(trueorfalse False GLANCE_SHOW_MULTIPLE_LOCATIONS) + +# Glance multi-store configuration +# Boolean flag to enable multiple store configuration for glance +GLANCE_ENABLE_MULTIPLE_STORES=$(trueorfalse False GLANCE_ENABLE_MULTIPLE_STORES) + +# Comma separated list for configuring multiple file stores of glance, +# for example; GLANCE_MULTIPLE_FILE_STORES = fast,cheap,slow +GLANCE_MULTIPLE_FILE_STORES=${GLANCE_MULTIPLE_FILE_STORES:-fast} + +# Default store/backend for glance, must be one of the store specified +# in GLANCE_MULTIPLE_FILE_STORES option. +GLANCE_DEFAULT_BACKEND=${GLANCE_DEFAULT_BACKEND:-fast} + GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache} +GLANCE_CACHE_DRIVER=${GLANCE_CACHE_DRIVER:-centralized_db} + +# File path for each store specified in GLANCE_MULTIPLE_FILE_STORES, the store +# identifier will be appended to this path at runtime. If GLANCE_MULTIPLE_FILE_STORES +# has fast,cheap specified then filepath will be generated like $DATA_DIR/glance/fast +# and $DATA_DIR/glance/cheap. +GLANCE_MULTISTORE_FILE_IMAGE_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/glance} GLANCE_IMAGE_DIR=${GLANCE_IMAGE_DIR:=$DATA_DIR/glance/images} -GLANCE_AUTH_CACHE_DIR=${GLANCE_AUTH_CACHE_DIR:-/var/cache/glance} +GLANCE_NFS_MOUNTPOINT=$GLANCE_IMAGE_DIR/mnt +GLANCE_LOCK_DIR=${GLANCE_LOCK_DIR:=$DATA_DIR/glance/locks} +GLANCE_STAGING_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/os_glance_staging_store} +GLANCE_TASKS_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/os_glance_tasks_store} + +GLANCE_USE_IMPORT_WORKFLOW=$(trueorfalse False GLANCE_USE_IMPORT_WORKFLOW) +GLANCE_ENABLE_QUOTAS=$(trueorfalse True GLANCE_ENABLE_QUOTAS) + +# Flag to set the oslo_policy.enforce_scope. This is used to switch +# This is used to disable the Image API policies scope and new defaults. +# By Default, it is True. +# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope +GLANCE_ENFORCE_SCOPE=$(trueorfalse True GLANCE_ENFORCE_SCOPE) + +# Flag to disable image format inspection on upload +GLANCE_ENFORCE_IMAGE_FORMAT=$(trueorfalse True GLANCE_ENFORCE_IMAGE_FORMAT) GLANCE_CONF_DIR=${GLANCE_CONF_DIR:-/etc/glance} -GLANCE_REGISTRY_CONF=$GLANCE_CONF_DIR/glance-registry.conf +GLANCE_METADEF_DIR=$GLANCE_CONF_DIR/metadefs GLANCE_API_CONF=$GLANCE_CONF_DIR/glance-api.conf -GLANCE_REGISTRY_PASTE_INI=$GLANCE_CONF_DIR/glance-registry-paste.ini GLANCE_API_PASTE_INI=$GLANCE_CONF_DIR/glance-api-paste.ini GLANCE_CACHE_CONF=$GLANCE_CONF_DIR/glance-cache.conf -GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json GLANCE_SCHEMA_JSON=$GLANCE_CONF_DIR/schema-image.json +GLANCE_SWIFT_STORE_CONF=$GLANCE_CONF_DIR/glance-swift-store.conf +GLANCE_IMAGE_IMPORT_CONF=$GLANCE_CONF_DIR/glance-image-import.conf -# Support entry points installation of console scripts -if [[ -d $GLANCE_DIR/bin ]]; then - GLANCE_BIN_DIR=$GLANCE_DIR/bin -else - GLANCE_BIN_DIR=$(get_python_exec_prefix) +if is_service_enabled tls-proxy; then + GLANCE_SERVICE_PROTOCOL="https" fi # Glance connection info. Note the port must be specified. -GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$SERVICE_HOST:9292} +GLANCE_SERVICE_HOST=${GLANCE_SERVICE_HOST:-$SERVICE_HOST} +GLANCE_SERVICE_LISTEN_ADDRESS=${GLANCE_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)} +GLANCE_SERVICE_PORT=${GLANCE_SERVICE_PORT:-9292} +GLANCE_SERVICE_PORT_INT=${GLANCE_SERVICE_PORT_INT:-19292} +GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT} +GLANCE_SERVICE_PROTOCOL=${GLANCE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} +GLANCE_UWSGI=glance.wsgi.api:application +GLANCE_UWSGI_CONF=$GLANCE_CONF_DIR/glance-uwsgi.ini -# Tell Tempest this project is present -TEMPEST_SERVICES+=,glance +# Glance default limit for Devstack +GLANCE_LIMIT_IMAGE_SIZE_TOTAL=${GLANCE_LIMIT_IMAGE_SIZE_TOTAL:-2000} +GLANCE_URL="$GLANCE_SERVICE_PROTOCOL://$GLANCE_SERVICE_HOST/image" # Functions # --------- @@ -62,6 +140,7 @@ TEMPEST_SERVICES+=,glance # Test if any Glance services are enabled # is_glance_enabled function is_glance_enabled { + [[ ,${DISABLED_SERVICES} =~ ,"glance" ]] && return 1 [[ ,${ENABLED_SERVICES} =~ ,"g-" ]] && return 0 return 1 } @@ -69,148 +148,374 @@ function is_glance_enabled { # cleanup_glance() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_glance { - # kill instances (nova) - # delete image files (glance) - sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR $GLANCE_AUTH_CACHE_DIR + # delete image files (glance) and all of the glance-remote temporary + # storage + sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR "${DATA_DIR}/glance-remote" + + # Cleanup multiple stores directories + if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "True" ]]; then + local store file_dir + for store in $(echo $GLANCE_MULTIPLE_FILE_STORES | tr "," "\n"); do + file_dir="${GLANCE_MULTISTORE_FILE_IMAGE_DIR}/${store}/" + sudo rm -rf $file_dir + done + + # Cleanup reserved stores directories + sudo rm -rf $GLANCE_STAGING_DIR $GLANCE_TASKS_DIR + fi + remove_uwsgi_config "$GLANCE_UWSGI_CONF" "glance-wsgi-api" +} + +# Set multiple s3 store related config options +# +function configure_multiple_s3_stores { + enabled_backends="${GLANCE_S3_DEFAULT_BACKEND}:s3" + + iniset $GLANCE_API_CONF DEFAULT enabled_backends ${enabled_backends} + iniset $GLANCE_API_CONF glance_store default_backend $GLANCE_S3_DEFAULT_BACKEND +} + +# Set common S3 store options to given config section +# +# Arguments: +# config_section +# +function set_common_s3_store_params { + local config_section="$1" + openstack ec2 credential create + iniset $GLANCE_API_CONF $config_section s3_store_host "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$S3_SERVICE_PORT" + iniset $GLANCE_API_CONF $config_section s3_store_access_key "$(openstack ec2 credential list -c Access -f value)" + iniset $GLANCE_API_CONF $config_section s3_store_secret_key "$(openstack ec2 credential list -c Secret -f value)" + iniset $GLANCE_API_CONF $config_section s3_store_create_bucket_on_put $GLANCE_S3_BUCKET_ON_PUT + iniset $GLANCE_API_CONF $config_section s3_store_bucket $GLANCE_S3_BUCKET_NAME + iniset $GLANCE_API_CONF $config_section s3_store_bucket_url_format "path" + if is_service_enabled tls-proxy; then + iniset $GLANCE_API_CONF $config_section s3_store_cacert $SSL_BUNDLE_FILE + fi +} + +# Set multiple cinder store related config options for each of the cinder store +# +function configure_multiple_cinder_stores { + + local be be_name be_type enabled_backends + for be in ${CINDER_ENABLED_BACKENDS//,/ }; do + be_type=${be%%:*} + be_name=${be##*:} + enabled_backends+="${be_name}:cinder," + + set_common_cinder_store_params $be_name + iniset $GLANCE_API_CONF $be_name cinder_volume_type ${be_name} + if [[ "$be_type" == "nfs" ]]; then + mkdir -p "$GLANCE_NFS_MOUNTPOINT" + iniset $GLANCE_API_CONF $be_name cinder_mount_point_base "$GLANCE_NFS_MOUNTPOINT" + fi + done + iniset $GLANCE_API_CONF DEFAULT enabled_backends ${enabled_backends::-1} + iniset $GLANCE_API_CONF glance_store default_backend $GLANCE_CINDER_DEFAULT_BACKEND +} + +# Set common cinder store options to given config section +# +# Arguments: +# config_section +# +function set_common_cinder_store_params { + local config_section="$1" + iniset $GLANCE_API_CONF $config_section cinder_store_auth_address $KEYSTONE_SERVICE_URI_V3 + iniset $GLANCE_API_CONF $config_section cinder_store_user_name glance + iniset $GLANCE_API_CONF $config_section cinder_store_password $SERVICE_PASSWORD + iniset $GLANCE_API_CONF $config_section cinder_store_project_name $SERVICE_PROJECT_NAME +} + +# Configure multiple file stores options for each file store +# +# Arguments: +# +function configure_multiple_file_stores { + local store enabled_backends + enabled_backends="" + for store in $(echo $GLANCE_MULTIPLE_FILE_STORES | tr "," "\n"); do + enabled_backends+="${store}:file," + done + iniset $GLANCE_API_CONF DEFAULT enabled_backends ${enabled_backends::-1} + + # Glance multiple store Store specific configs + iniset $GLANCE_API_CONF glance_store default_backend $GLANCE_DEFAULT_BACKEND + local store + for store in $(echo $glance_multiple_file_stores | tr "," "\n"); do + iniset $GLANCE_API_CONF $store filesystem_store_datadir "${GLANCE_MULTISTORE_FILE_IMAGE_DIR}/${store}/" + done +} + +# Set reserved stores for glance +function configure_reserved_stores { + iniset $GLANCE_API_CONF os_glance_staging_store filesystem_store_datadir "${GLANCE_MULTISTORE_FILE_IMAGE_DIR}/os_glance_staging_store/" + iniset $GLANCE_API_CONF os_glance_tasks_store filesystem_store_datadir "${GLANCE_MULTISTORE_FILE_IMAGE_DIR}/os_glance_tasks_store/" +} + +# Copy rootwrap file from glance_store/etc/glance to /etc/glance +# +# Arguments: +# source_path Source path to copy rootwrap files from +# +function copy_rootwrap { + local source_path="$1" + # Make glance configuration directory if it is not exists + sudo install -d -o $STACK_USER $GLANCE_CONF_DIR + cp -r $source_path/rootwrap.* $GLANCE_CONF_DIR/ +} + +# Set glance_store related config options +# +# Arguments: +# USE_CINDER_FOR_GLANCE +# GLANCE_ENABLE_MULTIPLE_STORES +# +function configure_glance_store { + local use_cinder_for_glance="$1" + local glance_enable_multiple_stores="$2" + local be + + if [[ "$glance_enable_multiple_stores" == "False" ]]; then + if [[ "$use_cinder_for_glance" == "True" ]]; then + # set common glance_store parameters + iniset $GLANCE_API_CONF glance_store stores "cinder,file,http" + iniset $GLANCE_API_CONF glance_store default_store cinder + + # set cinder related store parameters + set_common_cinder_store_params glance_store + # set nfs mount_point dir + for be in ${CINDER_ENABLED_BACKENDS//,/ }; do + local be_name=${be##*:} + if [[ "$be_name" == "nfs" ]]; then + mkdir -p $GLANCE_NFS_MOUNTPOINT + iniset $GLANCE_API_CONF glance_store cinder_mount_point_base $GLANCE_NFS_MOUNTPOINT + fi + done + fi + # Store specific configs + iniset $GLANCE_API_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/ + else + if [[ "$use_cinder_for_glance" == "True" ]]; then + # Configure multiple cinder stores for glance + configure_multiple_cinder_stores + elif ! is_service_enabled s-proxy && [[ "$GLANCE_USE_S3" == "False" ]]; then + # Configure multiple file stores for glance + configure_multiple_file_stores + fi + # Configure reserved stores + configure_reserved_stores + fi +} + +function configure_glance_quotas { + + # Registered limit resources in keystone are system-specific resources. + # Make sure we use a system-scoped token to interact with this API. + + openstack --os-cloud devstack-system-admin registered limit create --service glance \ + --default-limit $GLANCE_LIMIT_IMAGE_SIZE_TOTAL --region $REGION_NAME image_size_total + openstack --os-cloud devstack-system-admin registered limit create --service glance \ + --default-limit $GLANCE_LIMIT_IMAGE_SIZE_TOTAL --region $REGION_NAME image_stage_total + openstack --os-cloud devstack-system-admin registered limit create --service glance \ + --default-limit 100 --region $REGION_NAME image_count_total + openstack --os-cloud devstack-system-admin registered limit create --service glance \ + --default-limit 100 --region $REGION_NAME image_count_uploading + + # Tell glance to use these limits + iniset $GLANCE_API_CONF DEFAULT use_keystone_limits True + + # Configure oslo_limit so it can talk to keystone + iniset $GLANCE_API_CONF oslo_limit user_domain_name $SERVICE_DOMAIN_NAME + iniset $GLANCE_API_CONF oslo_limit password $SERVICE_PASSWORD + iniset $GLANCE_API_CONF oslo_limit username glance + iniset $GLANCE_API_CONF oslo_limit auth_type password + iniset $GLANCE_API_CONF oslo_limit auth_url $KEYSTONE_SERVICE_URI + iniset $GLANCE_API_CONF oslo_limit system_scope all + iniset $GLANCE_API_CONF oslo_limit endpoint_id \ + $(openstack --os-cloud devstack-system-admin endpoint list --service glance -f value -c ID) + + # Allow the glance service user to read quotas + openstack --os-cloud devstack-system-admin role add --user glance \ + --user-domain $SERVICE_DOMAIN_NAME --system all reader } # configure_glance() - Set config files, create data dirs, etc function configure_glance { - if [[ ! -d $GLANCE_CONF_DIR ]]; then - sudo mkdir -p $GLANCE_CONF_DIR - fi - sudo chown $STACK_USER $GLANCE_CONF_DIR - - # Copy over our glance configurations and update them - cp $GLANCE_DIR/etc/glance-registry.conf $GLANCE_REGISTRY_CONF - iniset $GLANCE_REGISTRY_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - inicomment $GLANCE_REGISTRY_CONF DEFAULT log_file - local dburl=`database_connection_url glance` - iniset $GLANCE_REGISTRY_CONF DEFAULT sql_connection $dburl - iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG - iniset $GLANCE_REGISTRY_CONF paste_deploy flavor keystone - iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST - iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT - iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset $GLANCE_REGISTRY_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA - configure_API_version $GLANCE_REGISTRY_CONF $IDENTITY_API_VERSION - iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_user glance - iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_password $SERVICE_PASSWORD - iniset $GLANCE_REGISTRY_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/registry - - cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF + sudo install -d -o $STACK_USER $GLANCE_CONF_DIR $GLANCE_METADEF_DIR + + # Set non-default configuration options for the API server + local dburl + dburl=`database_connection_url glance` + iniset $GLANCE_API_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - inicomment $GLANCE_API_CONF DEFAULT log_file - iniset $GLANCE_API_CONF DEFAULT sql_connection $dburl + iniset $GLANCE_API_CONF database connection $dburl iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG - iniset $GLANCE_API_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/ iniset $GLANCE_API_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ + iniset $GLANCE_API_CONF DEFAULT image_cache_driver $GLANCE_CACHE_DRIVER + iniset $GLANCE_API_CONF oslo_concurrency lock_path $GLANCE_LOCK_DIR iniset $GLANCE_API_CONF paste_deploy flavor keystone+cachemanagement - iniset $GLANCE_API_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST - iniset $GLANCE_API_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT - iniset $GLANCE_API_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset $GLANCE_API_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA - configure_API_version $GLANCE_API_CONF $IDENTITY_API_VERSION - iniset $GLANCE_API_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $GLANCE_API_CONF keystone_authtoken admin_user glance - iniset $GLANCE_API_CONF keystone_authtoken admin_password $SERVICE_PASSWORD - if is_service_enabled qpid || [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then - iniset $GLANCE_API_CONF DEFAULT notification_driver messaging + configure_keystone_authtoken_middleware $GLANCE_API_CONF glance + iniset $GLANCE_API_CONF oslo_messaging_notifications driver messagingv2 + iniset_rpc_backend glance $GLANCE_API_CONF + if [ "$VIRT_DRIVER" = 'libvirt' ] && [ "$LIBVIRT_TYPE" = 'parallels' ]; then + iniset $GLANCE_API_CONF DEFAULT disk_formats "ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso,ploop" fi - iniset_rpc_backend glance $GLANCE_API_CONF DEFAULT - iniset $GLANCE_API_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/api - if [ "$VIRT_DRIVER" = 'xenserver' ]; then - iniset $GLANCE_API_CONF DEFAULT container_formats "ami,ari,aki,bare,ovf,tgz" - iniset $GLANCE_API_CONF DEFAULT disk_formats "ami,ari,aki,vhd,raw,iso" + # Only use these if you know what you are doing! See OSSN-0065 + iniset $GLANCE_API_CONF DEFAULT show_image_direct_url $GLANCE_SHOW_DIRECT_URL + iniset $GLANCE_API_CONF DEFAULT show_multiple_locations $GLANCE_SHOW_MULTIPLE_LOCATIONS + iniset $GLANCE_API_CONF image_format require_image_format_match $GLANCE_ENFORCE_IMAGE_FORMAT + + # Configure glance_store + configure_glance_store $USE_CINDER_FOR_GLANCE $GLANCE_ENABLE_MULTIPLE_STORES + + # CORS feature support - to allow calls from Horizon by default + if [ -n "$GLANCE_CORS_ALLOWED_ORIGIN" ]; then + iniset $GLANCE_API_CONF cors allowed_origin "$GLANCE_CORS_ALLOWED_ORIGIN" + else + iniset $GLANCE_API_CONF cors allowed_origin "http://$SERVICE_HOST" fi - # Store the images in swift if enabled. - if is_service_enabled s-proxy; then - iniset $GLANCE_API_CONF DEFAULT default_store swift - iniset $GLANCE_API_CONF DEFAULT swift_store_auth_address $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ - iniset $GLANCE_API_CONF DEFAULT swift_store_user $SERVICE_TENANT_NAME:glance-swift - iniset $GLANCE_API_CONF DEFAULT swift_store_key $SERVICE_PASSWORD - iniset $GLANCE_API_CONF DEFAULT swift_store_create_container_on_put True + # No multiple stores for swift yet + if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "False" ]]; then + # Return if s3api is enabled for glance + if [[ "$GLANCE_USE_S3" == "True" ]]; then + if is_service_enabled s3api; then + # set common glance_store parameters + iniset $GLANCE_API_CONF glance_store stores "s3,file,http" + iniset $GLANCE_API_CONF glance_store default_store s3 + fi + elif is_service_enabled s-proxy; then + # Store the images in swift if enabled. + iniset $GLANCE_API_CONF glance_store default_store swift + iniset $GLANCE_API_CONF glance_store swift_store_create_container_on_put True + + iniset $GLANCE_API_CONF glance_store swift_store_config_file $GLANCE_SWIFT_STORE_CONF + iniset $GLANCE_API_CONF glance_store default_swift_reference ref1 + iniset $GLANCE_API_CONF glance_store stores "file, http, swift" + if is_service_enabled tls-proxy; then + iniset $GLANCE_API_CONF glance_store swift_store_cacert $SSL_BUNDLE_FILE + fi + iniset $GLANCE_API_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" + + iniset $GLANCE_SWIFT_STORE_CONF ref1 user $SERVICE_PROJECT_NAME:glance-swift + + iniset $GLANCE_SWIFT_STORE_CONF ref1 key $SERVICE_PASSWORD + iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $KEYSTONE_SERVICE_URI/v3 + iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_version 3 + fi + else + if [[ "$GLANCE_USE_S3" == "True" ]]; then + if is_service_enabled s3api; then + configure_multiple_s3_stores + fi + fi + fi + + # We need to tell glance what it's public endpoint is so that the version + # discovery document will be correct + iniset $GLANCE_API_CONF DEFAULT public_endpoint $GLANCE_URL - iniset $GLANCE_API_CONF DEFAULT known_stores "glance.store.filesystem.Store, glance.store.http.Store, glance.store.swift.Store" + if is_service_enabled tls-proxy; then + iniset $GLANCE_API_CONF DEFAULT bind_port $GLANCE_SERVICE_PORT_INT + iniset $GLANCE_API_CONF keystone_authtoken identity_uri $KEYSTONE_SERVICE_URI fi - cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI + # Format logging + setup_logging $GLANCE_API_CONF cp -p $GLANCE_DIR/etc/glance-api-paste.ini $GLANCE_API_PASTE_INI - cp $GLANCE_DIR/etc/glance-cache.conf $GLANCE_CACHE_CONF + # Set non-default configuration options for the glance-cache iniset $GLANCE_CACHE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - inicomment $GLANCE_CACHE_CONF DEFAULT log_file iniset $GLANCE_CACHE_CONF DEFAULT use_syslog $SYSLOG - iniset $GLANCE_CACHE_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/ iniset $GLANCE_CACHE_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ - iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_url - iniset $GLANCE_CACHE_CONF DEFAULT auth_url $KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0 - iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_tenant_name - iniset $GLANCE_CACHE_CONF DEFAULT admin_tenant_name $SERVICE_TENANT_NAME - iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_user - iniset $GLANCE_CACHE_CONF DEFAULT admin_user glance - iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_password - iniset $GLANCE_CACHE_CONF DEFAULT admin_password $SERVICE_PASSWORD - - cp -p $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON + iniset $GLANCE_CACHE_CONF DEFAULT image_cache_driver $GLANCE_CACHE_DRIVER + + # Store specific confs + iniset $GLANCE_CACHE_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/ + + # Set default configuration options for the glance-image-import + iniset $GLANCE_IMAGE_IMPORT_CONF image_import_opts image_import_plugins "[]" + iniset $GLANCE_IMAGE_IMPORT_CONF inject_metadata_properties ignore_user_roles admin + iniset $GLANCE_IMAGE_IMPORT_CONF inject_metadata_properties inject + cp -p $GLANCE_DIR/etc/schema-image.json $GLANCE_SCHEMA_JSON + cp -p $GLANCE_DIR/etc/metadefs/*.json $GLANCE_METADEF_DIR + + if is_service_enabled tls-proxy; then + CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST} + CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776} + + iniset $GLANCE_API_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/%(project_id)s" + iniset $GLANCE_CACHE_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/%(project_id)s" + fi + + write_local_uwsgi_http_config "$GLANCE_UWSGI_CONF" "$GLANCE_UWSGI" "/image" "glance-api" + + # Grab our uwsgi listen address and use that to fill out our + # worker_self_reference_url config + iniset $GLANCE_API_CONF DEFAULT worker_self_reference_url $(awk '-F= ' '/^http-socket/ { print "http://"$2}' $GLANCE_UWSGI_CONF) + + # Configure the Python binary used for "import" plugins. If unset, these + # will attempt the uwsgi binary instead. + iniset $GLANCE_API_CONF wsgi python_interpreter $PYTHON + + if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $GLANCE_API_CONF oslo_policy enforce_scope true + iniset $GLANCE_API_CONF oslo_policy enforce_new_defaults true + iniset $GLANCE_API_CONF DEFAULT enforce_secure_rbac true + else + iniset $GLANCE_API_CONF oslo_policy enforce_scope false + iniset $GLANCE_API_CONF oslo_policy enforce_new_defaults false + iniset $GLANCE_API_CONF DEFAULT enforce_secure_rbac false + fi } # create_glance_accounts() - Set up common required glance accounts -# Project User Roles -# ------------------------------------------------------------------ -# SERVICE_TENANT_NAME glance service -# SERVICE_TENANT_NAME glance-swift ResellerAdmin (if Swift is enabled) +# Project User Roles +# --------------------------------------------------------------------- +# SERVICE_PROJECT_NAME glance service +# SERVICE_PROJECT_NAME glance-swift ResellerAdmin (if Swift is enabled) +# SERVICE_PROJECT_NAME glance-search search (if Search is enabled) function create_glance_accounts { if is_service_enabled g-api; then - openstack user create \ - --password "$SERVICE_PASSWORD" \ - --project $SERVICE_TENANT_NAME \ - glance - openstack role add \ - --project $SERVICE_TENANT_NAME \ - --user glance \ - service + + # When cinder talk to glance service APIs user needs service + # role for RBAC checks and admin role for cinder to access images. + create_service_user "glance" "admin" + # required for swift access if is_service_enabled s-proxy; then - openstack user create \ - --password "$SERVICE_PASSWORD" \ - --project $SERVICE_TENANT_NAME \ - glance-swift - openstack role add \ - --project $SERVICE_TENANT_NAME \ - --user glance-swift \ - ResellerAdmin + create_service_user "glance-swift" "ResellerAdmin" fi - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - openstack service create \ - --type image \ - --description "Glance Image Service" \ - glance - openstack endpoint create \ - --region RegionOne \ - --publicurl "http://$GLANCE_HOSTPORT" \ - --adminurl "http://$GLANCE_HOSTPORT" \ - --internalurl "http://$GLANCE_HOSTPORT" \ - glance + + get_or_create_service "glance" "image" "Glance Image Service" + get_or_create_endpoint \ + "image" \ + "$REGION_NAME" \ + "$GLANCE_URL" + + # Note(frickler): Crude workaround for https://bugs.launchpad.net/glance-store/+bug/1620999 + service_domain_id=$(get_or_create_domain $SERVICE_DOMAIN_NAME) + iniset $GLANCE_SWIFT_STORE_CONF ref1 project_domain_id $service_domain_id + iniset $GLANCE_SWIFT_STORE_CONF ref1 user_domain_id $service_domain_id + + if [[ "$GLANCE_ENABLE_QUOTAS" = True ]]; then + configure_glance_quotas fi - fi -} -# create_glance_cache_dir() - Part of the init_glance() process -function create_glance_cache_dir { - # Create cache dir - sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/api - sudo chown $STACK_USER $GLANCE_AUTH_CACHE_DIR/api - rm -f $GLANCE_AUTH_CACHE_DIR/api/* - sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/registry - sudo chown $STACK_USER $GLANCE_AUTH_CACHE_DIR/registry - rm -f $GLANCE_AUTH_CACHE_DIR/registry/* + if is_service_enabled s3api && [[ "$GLANCE_USE_S3" == "True" ]]; then + if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "False" ]]; then + set_common_s3_store_params glance_store + else + set_common_s3_store_params $GLANCE_S3_DEFAULT_BACKEND + fi + fi + fi } # init_glance() - Initialize databases, etc. @@ -219,51 +524,151 @@ function init_glance { rm -rf $GLANCE_IMAGE_DIR mkdir -p $GLANCE_IMAGE_DIR - # Delete existing cache - rm -rf $GLANCE_CACHE_DIR - mkdir -p $GLANCE_CACHE_DIR - # (Re)create glance database - recreate_database glance utf8 + recreate_database glance + time_start "dbsync" # Migrate glance database - $GLANCE_BIN_DIR/glance-manage db_sync + $GLANCE_BIN_DIR/glance-manage --config-file $GLANCE_CONF_DIR/glance-api.conf db_sync - create_glance_cache_dir + # Load metadata definitions + $GLANCE_BIN_DIR/glance-manage --config-file $GLANCE_CONF_DIR/glance-api.conf db_load_metadefs + time_stop "dbsync" } # install_glanceclient() - Collect source and prepare function install_glanceclient { - git_clone $GLANCECLIENT_REPO $GLANCECLIENT_DIR $GLANCECLIENT_BRANCH - setup_develop $GLANCECLIENT_DIR + if use_library_from_git "python-glanceclient"; then + git_clone_by_name "python-glanceclient" + setup_dev_lib "python-glanceclient" + sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-glanceclient"]}/tools/,/etc/bash_completion.d/}glance.bash_completion + fi } # install_glance() - Collect source and prepare function install_glance { + local glance_store_extras=() + + if is_service_enabled cinder; then + glance_store_extras=("cinder" "${glance_store_extras[@]}") + fi + + if is_service_enabled swift; then + glance_store_extras=("swift" "${glance_store_extras[@]}") + fi + + # Install glance_store from git so we make sure we're testing + # the latest code. + if use_library_from_git "glance_store"; then + git_clone_by_name "glance_store" + setup_dev_lib "glance_store" $(join_extras "${glance_store_extras[@]}") + copy_rootwrap ${DEST}/glance_store/etc/glance + else + # we still need to pass extras + pip_install_gr_extras glance-store $(join_extras "${glance_store_extras[@]}") + copy_rootwrap $GLANCE_STORE_ROOTWRAP_BASE_DIR + fi + git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH + setup_develop $GLANCE_DIR } -# start_glance() - Start running processes, including screen +# glance_remote_conf() - Return the path to an alternate config file for +# the remote glance clone +function glance_remote_conf { + echo $(dirname "${GLANCE_CONF_DIR}")/glance-remote/$(basename "$1") +} + +# start_glance_remote_clone() - Clone the regular glance api worker +function start_glance_remote_clone { + local glance_remote_conf_dir glance_remote_port remote_data + local glance_remote_uwsgi venv + + glance_remote_conf_dir="$(glance_remote_conf "")" + glance_remote_port=$(get_random_port) + glance_remote_uwsgi="$(glance_remote_conf $GLANCE_UWSGI_CONF)" + + # Clone the existing ready-to-go glance-api setup + sudo rm -Rf "$glance_remote_conf_dir" + sudo cp -r "$GLANCE_CONF_DIR" "$glance_remote_conf_dir" + sudo chown $STACK_USER -R "$glance_remote_conf_dir" + + # Point this worker at different data dirs + remote_data="${DATA_DIR}/glance-remote" + mkdir -p $remote_data/os_glance_tasks_store \ + "${remote_data}/os_glance_staging_store" + iniset $(glance_remote_conf "$GLANCE_API_CONF") os_glance_staging_store \ + filesystem_store_datadir "${remote_data}/os_glance_staging_store" + iniset $(glance_remote_conf "$GLANCE_API_CONF") os_glance_tasks_store \ + filesystem_store_datadir "${remote_data}/os_glance_tasks_store" + + # Point this worker to use different cache dir + mkdir -p "$remote_data/cache" + iniset $(glance_remote_conf "$GLANCE_API_CONF") DEFAULT \ + image_cache_dir "${remote_data}/cache" + + # Change our uwsgi to our new port + sed -ri "s/^(http-socket.*):[0-9]+/\1:$glance_remote_port/" \ + "$glance_remote_uwsgi" + + # Update the self-reference url with our new port + iniset $(glance_remote_conf $GLANCE_API_CONF) DEFAULT \ + worker_self_reference_url \ + $(awk '-F= ' '/^http-socket/ { print "http://"$2 }' \ + "$glance_remote_uwsgi") + + # We need to create the systemd service for the clone, but then + # change it to include an Environment line to point the WSGI app + # at the alternate config directory. + if [[ "$GLOBAL_VENV" == True ]]; then + venv="--venv $DEVSTACK_VENV" + fi + write_uwsgi_user_unit_file devstack@g-api-r.service "$(which uwsgi) \ + --procname-prefix \ + glance-api-remote \ + --ini $glance_remote_uwsgi \ + $venv" \ + "" "$STACK_USER" + iniadd -sudo ${SYSTEMD_DIR}/devstack@g-api-r.service \ + "Service" "Environment" \ + "OS_GLANCE_CONFIG_DIR=$glance_remote_conf_dir" + + # Reload and restart with the new config + $SYSTEMCTL daemon-reload + $SYSTEMCTL restart devstack@g-api-r + + get_or_create_service glance_remote image_remote "Alternate glance" + get_or_create_endpoint image_remote $REGION_NAME \ + $(awk '-F= ' '/^http-socket/ { print "http://"$2 }' \ + $glance_remote_uwsgi) +} + +# start_glance() - Start running processes function start_glance { - screen_it g-reg "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf" - screen_it g-api "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf" - echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then + local service_protocol=$GLANCE_SERVICE_PROTOCOL + + run_process g-api "$(which uwsgi) --procname-prefix glance-api --ini $GLANCE_UWSGI_CONF" + + if is_service_enabled g-api-r; then + echo "Starting the g-api-r clone service..." + start_glance_remote_clone + fi + + echo "Waiting for g-api ($GLANCE_SERVICE_HOST) to start..." + if ! wait_for_service $SERVICE_TIMEOUT $GLANCE_URL; then die $LINENO "g-api did not start" fi } # stop_glance() - Stop running processes function stop_glance { - # Kill the Glance screen windows - screen_stop g-api - screen_stop g-reg + stop_process g-api + stop_process g-api-r } - # Restore xtrace -$XTRACE +$_XTRACE_GLANCE # Tell emacs to use shell-script-mode ## Local variables: diff --git a/lib/heat b/lib/heat deleted file mode 100644 index fe75ec9dc4..0000000000 --- a/lib/heat +++ /dev/null @@ -1,297 +0,0 @@ -# lib/heat -# Install and start **Heat** service - -# To enable, add the following to localrc -# -# ENABLED_SERVICES+=,heat,h-api,h-api-cfn,h-api-cw,h-eng - -# Dependencies: -# -# - functions - -# stack.sh -# --------- -# - install_heatclient -# - install_heat -# - configure_heatclient -# - configure_heat -# - init_heat -# - start_heat -# - stop_heat -# - cleanup_heat - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# set up default directories -HEAT_DIR=$DEST/heat -HEATCLIENT_DIR=$DEST/python-heatclient -HEAT_AUTH_CACHE_DIR=${HEAT_AUTH_CACHE_DIR:-/var/cache/heat} -HEAT_STANDALONE=`trueorfalse False $HEAT_STANDALONE` -HEAT_CONF_DIR=/etc/heat -HEAT_CONF=$HEAT_CONF_DIR/heat.conf -HEAT_ENV_DIR=$HEAT_CONF_DIR/environment.d -HEAT_TEMPLATES_DIR=$HEAT_CONF_DIR/templates -HEAT_STACK_DOMAIN=`trueorfalse True $HEAT_STACK_DOMAIN` - -# other default options -HEAT_DEFERRED_AUTH=${HEAT_DEFERRED_AUTH:-trusts} - -# Tell Tempest this project is present -TEMPEST_SERVICES+=,heat - - -# Functions -# --------- - -# Test if any Heat services are enabled -# is_heat_enabled -function is_heat_enabled { - [[ ,${ENABLED_SERVICES} =~ ,"h-" ]] && return 0 - return 1 -} - -# cleanup_heat() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_heat { - sudo rm -rf $HEAT_AUTH_CACHE_DIR - sudo rm -rf $HEAT_ENV_DIR - sudo rm -rf $HEAT_TEMPLATES_DIR -} - -# configure_heat() - Set config files, create data dirs, etc -function configure_heat { - setup_develop $HEAT_DIR - - if [[ ! -d $HEAT_CONF_DIR ]]; then - sudo mkdir -p $HEAT_CONF_DIR - fi - sudo chown $STACK_USER $HEAT_CONF_DIR - # remove old config files - rm -f $HEAT_CONF_DIR/heat-*.conf - - HEAT_API_CFN_HOST=${HEAT_API_CFN_HOST:-$HOST_IP} - HEAT_API_CFN_PORT=${HEAT_API_CFN_PORT:-8000} - HEAT_ENGINE_HOST=${HEAT_ENGINE_HOST:-$SERVICE_HOST} - HEAT_ENGINE_PORT=${HEAT_ENGINE_PORT:-8001} - HEAT_API_CW_HOST=${HEAT_API_CW_HOST:-$HOST_IP} - HEAT_API_CW_PORT=${HEAT_API_CW_PORT:-8003} - HEAT_API_HOST=${HEAT_API_HOST:-$HOST_IP} - HEAT_API_PORT=${HEAT_API_PORT:-8004} - HEAT_API_PASTE_FILE=$HEAT_CONF_DIR/api-paste.ini - HEAT_POLICY_FILE=$HEAT_CONF_DIR/policy.json - - cp $HEAT_DIR/etc/heat/api-paste.ini $HEAT_API_PASTE_FILE - cp $HEAT_DIR/etc/heat/policy.json $HEAT_POLICY_FILE - cp $HEAT_DIR/etc/heat/heat.conf.sample $HEAT_CONF - - # common options - iniset_rpc_backend heat $HEAT_CONF DEFAULT - iniset $HEAT_CONF DEFAULT heat_metadata_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT - iniset $HEAT_CONF DEFAULT heat_waitcondition_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1/waitcondition - iniset $HEAT_CONF DEFAULT heat_watch_server_url http://$HEAT_API_CW_HOST:$HEAT_API_CW_PORT - iniset $HEAT_CONF database connection `database_connection_url heat` - iniset $HEAT_CONF DEFAULT auth_encryption_key `hexdump -n 16 -v -e '/1 "%02x"' /dev/urandom` - - # logging - iniset $HEAT_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $HEAT_CONF DEFAULT use_syslog $SYSLOG - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then - # Add color to logging output - setup_colorized_logging $HEAT_CONF DEFAULT tenant user - fi - - # keystone authtoken - iniset $HEAT_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST - iniset $HEAT_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT - iniset $HEAT_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - configure_API_version $HEAT_CONF $IDENTITY_API_VERSION - iniset $HEAT_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA - iniset $HEAT_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $HEAT_CONF keystone_authtoken admin_user heat - iniset $HEAT_CONF keystone_authtoken admin_password $SERVICE_PASSWORD - iniset $HEAT_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR - - # ec2authtoken - iniset $HEAT_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 - - # paste_deploy - [[ "$HEAT_STANDALONE" = "True" ]] && iniset $HEAT_CONF paste_deploy flavor standalone - - # OpenStack API - iniset $HEAT_CONF heat_api bind_port $HEAT_API_PORT - - # Cloudformation API - iniset $HEAT_CONF heat_api_cfn bind_port $HEAT_API_CFN_PORT - - # Cloudwatch API - iniset $HEAT_CONF heat_api_cloudwatch bind_port $HEAT_API_CW_PORT - - # heat environment - sudo mkdir -p $HEAT_ENV_DIR - sudo chown $STACK_USER $HEAT_ENV_DIR - # copy the default environment - cp $HEAT_DIR/etc/heat/environment.d/* $HEAT_ENV_DIR/ - - # heat template resources. - sudo mkdir -p $HEAT_TEMPLATES_DIR - sudo chown $STACK_USER $HEAT_TEMPLATES_DIR - # copy the default templates - cp $HEAT_DIR/etc/heat/templates/* $HEAT_TEMPLATES_DIR/ - -} - -# init_heat() - Initialize database -function init_heat { - - # (re)create heat database - recreate_database heat utf8 - - $HEAT_DIR/bin/heat-manage db_sync - create_heat_cache_dir -} - -# create_heat_cache_dir() - Part of the init_heat() process -function create_heat_cache_dir { - # Create cache dirs - sudo mkdir -p $HEAT_AUTH_CACHE_DIR - sudo chown $STACK_USER $HEAT_AUTH_CACHE_DIR -} - -# install_heatclient() - Collect source and prepare -function install_heatclient { - git_clone $HEATCLIENT_REPO $HEATCLIENT_DIR $HEATCLIENT_BRANCH - setup_develop $HEATCLIENT_DIR - sudo install -D -m 0644 -o $STACK_USER {$HEATCLIENT_DIR/tools/,/etc/bash_completion.d/}heat.bash_completion -} - -# install_heat() - Collect source and prepare -function install_heat { - git_clone $HEAT_REPO $HEAT_DIR $HEAT_BRANCH -} - -# start_heat() - Start running processes, including screen -function start_heat { - screen_it h-eng "cd $HEAT_DIR; bin/heat-engine --config-file=$HEAT_CONF" - screen_it h-api "cd $HEAT_DIR; bin/heat-api --config-file=$HEAT_CONF" - screen_it h-api-cfn "cd $HEAT_DIR; bin/heat-api-cfn --config-file=$HEAT_CONF" - screen_it h-api-cw "cd $HEAT_DIR; bin/heat-api-cloudwatch --config-file=$HEAT_CONF" -} - -# stop_heat() - Stop running processes -function stop_heat { - # Kill the screen windows - for serv in h-eng h-api h-api-cfn h-api-cw; do - screen_stop $serv - done -} - -function disk_image_create { - local elements_path=$1 - local elements=$2 - local arch=$3 - local output=$TOP_DIR/files/$4 - if [[ -f "$output.qcow2" ]]; then - echo "Image file already exists: $output_file" - else - ELEMENTS_PATH=$elements_path disk-image-create \ - $elements -a $arch -o $output - fi - # upload with fake URL so that image in $TOP_DIR/files is used - upload_image "http://localhost/$output.qcow2" $TOKEN -} - -# create_heat_accounts() - Set up common required heat accounts -function create_heat_accounts { - # migrated from files/keystone_data.sh - SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") - - HEAT_USER=$(openstack user create \ - heat \ - --password "$SERVICE_PASSWORD" \ - --project $SERVICE_TENANT \ - --email heat@example.com \ - | grep " id " | get_field 2) - openstack role add \ - $ADMIN_ROLE \ - --project $SERVICE_TENANT \ - --user $HEAT_USER - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - HEAT_SERVICE=$(openstack service create \ - heat \ - --type=orchestration \ - --description="Heat Orchestration Service" \ - | grep " id " | get_field 2) - openstack endpoint create \ - $HEAT_SERVICE \ - --region RegionOne \ - --publicurl "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \ - --adminurl "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \ - --internalurl "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" - HEAT_CFN_SERVICE=$(openstack service create \ - heat \ - --type=cloudformation \ - --description="Heat CloudFormation Service" \ - | grep " id " | get_field 2) - openstack endpoint create \ - $HEAT_CFN_SERVICE \ - --region RegionOne \ - --publicurl "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \ - --adminurl "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \ - --internalurl "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" - fi - - # heat_stack_user role is for users created by Heat - openstack role create heat_stack_user - - if [[ $HEAT_DEFERRED_AUTH == trusts ]]; then - # heat_stack_owner role is given to users who create Heat stacks, - # it's the default role used by heat to delegate to the heat service - # user (for performing deferred operations via trusts), see heat.conf - HEAT_OWNER_ROLE=$(openstack role create \ - heat_stack_owner \ - | grep " id " | get_field 2) - - # Give the role to the demo and admin users so they can create stacks - # in either of the projects created by devstack - openstack role add $HEAT_OWNER_ROLE --project demo --user demo - openstack role add $HEAT_OWNER_ROLE --project demo --user admin - openstack role add $HEAT_OWNER_ROLE --project admin --user admin - iniset $HEAT_CONF DEFAULT deferred_auth_method trusts - fi - - if [[ "$HEAT_STACK_DOMAIN" == "True" ]]; then - # Note we have to pass token/endpoint here because the current endpoint and - # version negotiation in OSC means just --os-identity-api-version=3 won't work - KS_ENDPOINT_V3="$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v3" - D_ID=$(openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \ - --os-identity-api-version=3 domain create heat \ - --description "Owns users and projects created by heat" \ - | grep ' id ' | get_field 2) - iniset $HEAT_CONF DEFAULT stack_user_domain ${D_ID} - - openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \ - --os-identity-api-version=3 user create --password $SERVICE_PASSWORD \ - --domain $D_ID heat_domain_admin \ - --description "Manages users and projects created by heat" - openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \ - --os-identity-api-version=3 role add \ - --user heat_domain_admin --domain ${D_ID} admin - iniset $HEAT_CONF DEFAULT stack_domain_admin heat_domain_admin - iniset $HEAT_CONF DEFAULT stack_domain_admin_password $SERVICE_PASSWORD - fi -} - -# Restore xtrace -$XTRACE - -# Tell emacs to use shell-script-mode -## Local variables: -## mode: shell-script -## End: diff --git a/lib/horizon b/lib/horizon index 9ce485317c..7c0d443aa6 100644 --- a/lib/horizon +++ b/lib/horizon @@ -1,3 +1,5 @@ +#!/bin/bash +# # lib/horizon # Functions to control the configuration and operation of the horizon service @@ -17,24 +19,19 @@ # - cleanup_horizon # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_HORIZON=$(set +o | grep xtrace) set +o xtrace # Defaults # -------- -# Set up default directories HORIZON_DIR=$DEST/horizon # local_settings.py is used to customize Dashboard settings. # The example file in Horizon repo is used by default. HORIZON_SETTINGS=${HORIZON_SETTINGS:-$HORIZON_DIR/openstack_dashboard/local/local_settings.py.example} -# Tell Tempest this project is present -TEMPEST_SERVICES+=,horizon - - # Functions # --------- @@ -46,9 +43,10 @@ function _horizon_config_set { local value=$4 if [ -z "$section" ]; then - sed -e "/^$option/d" -i $local_settings - echo -e "\n$option=$value" >> $file + sed -e "/^$option/d" -i $file + echo "$option = $value" >> $file elif grep -q "^$section" $file; then + local line line=$(sed -ne "/^$section/,/^}/ { /^ *'$option':/ p; }" $file) if [ -n "$line" ]; then sed -i -e "/^$section/,/^}/ s/^\( *'$option'\) *:.*$/\1: $value,/" $file @@ -68,86 +66,106 @@ function _horizon_config_set { # cleanup_horizon() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_horizon { - if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then - # If ``/usr/bin/node`` points into ``$DEST`` - # we installed it via ``install_nodejs`` - if [[ $(readlink -f /usr/bin/node) =~ ($DEST) ]]; then - sudo rm /usr/bin/node - fi - fi + disable_apache_site horizon + sudo rm -f $(apache_site_config_for horizon) } # configure_horizon() - Set config files, create data dirs, etc function configure_horizon { setup_develop $HORIZON_DIR -} -# init_horizon() - Initialize databases, etc. -function init_horizon { + # Compile message catalogs. + # Horizon is installed as develop mode, so we can compile here. + # Message catalog compilation is handled by Django admin script, + # so compiling them after the installation avoids Django installation twice. + (cd $HORIZON_DIR; $PYTHON manage.py compilemessages) + # ``local_settings.py`` is used to override horizon default settings. - local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py + local local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py cp $HORIZON_SETTINGS $local_settings - if is_service_enabled neutron; then - _horizon_config_set $local_settings OPENSTACK_NEUTRON_NETWORK enable_security_group $Q_USE_SECGROUP - fi - # enable loadbalancer dashboard in case service is enabled - if is_service_enabled q-lbaas; then - _horizon_config_set $local_settings OPENSTACK_NEUTRON_NETWORK enable_lb True - fi + # Ensure local_setting.py file ends with EOL (newline) + echo >> $local_settings - # enable firewall dashboard in case service is enabled - if is_service_enabled q-fwaas; then - _horizon_config_set $local_settings OPENSTACK_NEUTRON_NETWORK enable_firewall True - fi + _horizon_config_set $local_settings "" WEBROOT \"$HORIZON_APACHE_ROOT/\" - # enable VPN dashboard in case service is enabled - if is_service_enabled q-vpn; then - _horizon_config_set $local_settings OPENSTACK_NEUTRON_NETWORK enable_vpn True - fi + _horizon_config_set $local_settings "" COMPRESS_OFFLINE True + _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_DEFAULT_ROLE \"member\" _horizon_config_set $local_settings "" OPENSTACK_HOST \"${KEYSTONE_SERVICE_HOST}\" - _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_URL "\"${KEYSTONE_SERVICE_PROTOCOL}://%s:${KEYSTONE_SERVICE_PORT}/v2.0\" % OPENSTACK_HOST" + + _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_URL "\"${KEYSTONE_SERVICE_URI}/v3\"" + + # note(trebskit): if HOST_IP points at non-localhost ip address, horizon cannot be accessed + # from outside the virtual machine. This fixes is meant primarily for local development + # purpose + _horizon_config_set $local_settings "" ALLOWED_HOSTS [\"*\"] if [ -f $SSL_BUNDLE_FILE ]; then _horizon_config_set $local_settings "" OPENSTACK_SSL_CACERT \"${SSL_BUNDLE_FILE}\" fi + if is_service_enabled ldap; then + _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT "True" + fi + + if is_service_enabled c-bak; then + _horizon_config_set $local_settings OPENSTACK_CINDER_FEATURES enable_backup "True" + fi + # Create an empty directory that apache uses as docroot sudo mkdir -p $HORIZON_DIR/.blackhole - # Apache 2.4 uses mod_authz_host for access control now (instead of "Allow") - HORIZON_REQUIRE='' - if check_apache_version "2.4" ; then - HORIZON_REQUIRE='Require all granted' + local horizon_conf + horizon_conf=$(apache_site_config_for horizon) + + local wsgi_venv_config="" + if [[ "$GLOBAL_VENV" == "True" ]] ; then + wsgi_venv_config="WSGIPythonHome $DEVSTACK_VENV" fi - local horizon_conf=/etc/$APACHE_NAME/$APACHE_CONF_DIR/horizon.conf + # Configure apache to run horizon + # Set up the django horizon application to serve via apache/wsgi + sudo sh -c "sed -e \" + s,%USER%,$APACHE_USER,g; + s,%GROUP%,$APACHE_GROUP,g; + s,%HORIZON_DIR%,$HORIZON_DIR,g; + s,%APACHE_NAME%,$APACHE_NAME,g; + s,%DEST%,$DEST,g; + s,%WEBROOT%,$HORIZON_APACHE_ROOT,g; + s,%WSGIPYTHONHOME%,$wsgi_venv_config,g; + \" $FILES/apache-horizon.template >$horizon_conf" + if is_ubuntu; then disable_apache_site 000-default sudo touch $horizon_conf - enable_apache_site horizon.conf elif is_fedora; then - sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf - elif is_suse; then : # nothing to do else exit_distro_not_supported "horizon apache configuration" fi + enable_apache_site horizon +} - # Remove old log files that could mess with how devstack detects whether Horizon +# init_horizon() - Initialize databases, etc. +function init_horizon { + # Remove old log files that could mess with how DevStack detects whether Horizon # has been successfully started (see start_horizon() and functions::screen_it()) + # and run_process sudo rm -f /var/log/$APACHE_NAME/horizon_* - # Configure apache to run horizon - sudo sh -c "sed -e \" - s,%USER%,$APACHE_USER,g; - s,%GROUP%,$APACHE_GROUP,g; - s,%HORIZON_DIR%,$HORIZON_DIR,g; - s,%APACHE_NAME%,$APACHE_NAME,g; - s,%DEST%,$DEST,g; - s,%HORIZON_REQUIRE%,$HORIZON_REQUIRE,g; - \" $FILES/apache-horizon.template >$horizon_conf" + # Setup alias for django-admin which could be different depending on distro + local django_admin + if type -p django-admin > /dev/null; then + django_admin=django-admin + else + django_admin=django-admin.py + fi + + # These need to be run after horizon plugins are configured. + DJANGO_SETTINGS_MODULE=openstack_dashboard.settings $django_admin collectstatic --noinput + DJANGO_SETTINGS_MODULE=openstack_dashboard.settings $django_admin compress --force + } # install_horizon() - Collect source and prepare @@ -155,23 +173,25 @@ function install_horizon { # Apache installation, because we mark it NOPRIME install_apache_wsgi - git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH $HORIZON_TAG + # Install the memcache library so that horizon can use memcached as its + # cache backend + pip_install_gr pymemcache + + git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH } -# start_horizon() - Start running processes, including screen +# start_horizon() - Start running processes function start_horizon { restart_apache_server - screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/$APACHE_NAME/horizon_error.log" } -# stop_horizon() - Stop running processes (non-screen) +# stop_horizon() - Stop running processes function stop_horizon { stop_apache_server } - # Restore xtrace -$XTRACE +$_XTRACE_HORIZON # Tell emacs to use shell-script-mode ## Local variables: diff --git a/lib/host b/lib/host new file mode 100644 index 0000000000..58062eff6b --- /dev/null +++ b/lib/host @@ -0,0 +1,98 @@ +#!/bin/bash + +# Kernel Samepage Merging (KSM) +# ----------------------------- + +# Processes that mark their memory as mergeable can share identical memory +# pages if KSM is enabled. This is particularly useful for nova + libvirt +# backends but any other setup that marks its memory as mergeable can take +# advantage. The drawback is there is higher cpu load; however, we tend to +# be memory bound not cpu bound so enable KSM by default but allow people +# to opt out if the CPU time is more important to them. +ENABLE_KSM=$(trueorfalse True ENABLE_KSM) +ENABLE_KSMTUNED=$(trueorfalse True ENABLE_KSMTUNED) +function configure_ksm { + if [[ $ENABLE_KSMTUNED == "True" ]] ; then + install_package "ksmtuned" + fi + if [[ -f /sys/kernel/mm/ksm/run ]] ; then + echo $(bool_to_int ENABLE_KSM) | sudo tee /sys/kernel/mm/ksm/run + fi +} + +# Compressed swap (ZSWAP) +#------------------------ + +# as noted in the kernel docs https://docs.kernel.org/admin-guide/mm/zswap.html +# Zswap is a lightweight compressed cache for swap pages. +# It takes pages that are in the process of being swapped out and attempts +# to compress them into a dynamically allocated RAM-based memory pool. +# zswap basically trades CPU cycles for potentially reduced swap I/O. +# This trade-off can also result in a significant performance improvement +# if reads from the compressed cache are faster than reads from a swap device. + +ENABLE_ZSWAP=$(trueorfalse False ENABLE_ZSWAP) +# lz4 is very fast although it does not have the best compression +# zstd has much better compression but more latency +ZSWAP_COMPRESSOR=${ZSWAP_COMPRESSOR:="lz4"} +ZSWAP_ZPOOL=${ZSWAP_ZPOOL:="zsmalloc"} +function configure_zswap { + if [[ $ENABLE_ZSWAP == "True" ]] ; then + # Centos 9 stream seems to only support enabling but not run time + # tuning so dont try to choose better default on centos + if is_ubuntu; then + echo ${ZSWAP_COMPRESSOR} | sudo tee /sys/module/zswap/parameters/compressor + echo ${ZSWAP_ZPOOL} | sudo tee /sys/module/zswap/parameters/zpool + fi + echo 1 | sudo tee /sys/module/zswap/parameters/enabled + # print curent zswap kernel config + sudo grep -R . /sys/module/zswap/parameters || /bin/true + fi +} + +ENABLE_SYSCTL_MEM_TUNING=$(trueorfalse False ENABLE_SYSCTL_MEM_TUNING) +function configure_sysctl_mem_parmaters { + if [[ $ENABLE_SYSCTL_MEM_TUNING == "True" ]] ; then + # defer write when memory is available + sudo sysctl -w vm.dirty_ratio=60 + sudo sysctl -w vm.dirty_background_ratio=10 + sudo sysctl -w vm.vfs_cache_pressure=50 + # assume swap is compressed so on new kernels + # give it equal priority as page cache which is + # uncompressed. on kernels < 5.8 the max is 100 + # not 200 so it will strongly prefer swapping. + sudo sysctl -w vm.swappiness=100 + sudo grep -R . /proc/sys/vm/ || /bin/true + fi +} + +function configure_host_mem { + configure_zswap + configure_ksm + configure_sysctl_mem_parmaters +} + +ENABLE_SYSCTL_NET_TUNING=$(trueorfalse False ENABLE_SYSCTL_NET_TUNING) +function configure_sysctl_net_parmaters { + if [[ $ENABLE_SYSCTL_NET_TUNING == "True" ]] ; then + # detect dead TCP connections after 120 seconds + sudo sysctl -w net.ipv4.tcp_keepalive_time=60 + sudo sysctl -w net.ipv4.tcp_keepalive_intvl=10 + sudo sysctl -w net.ipv4.tcp_keepalive_probes=6 + # reudce network latency for new connections + sudo sysctl -w net.ipv4.tcp_fastopen=3 + # print tcp options + sudo grep -R . /proc/sys/net/ipv4/tcp* || /bin/true + # disable qos by default + sudo sysctl -w net.core.default_qdisc=pfifo_fast + fi +} + +function configure_host_net { + configure_sysctl_net_parmaters +} + +function tune_host { + configure_host_mem + configure_host_net +} diff --git a/lib/infra b/lib/infra index e2f7dadf3d..f4760c352c 100644 --- a/lib/infra +++ b/lib/infra @@ -1,3 +1,5 @@ +#!/bin/bash +# # lib/infra # # Functions to install infrastructure projects needed by other projects @@ -10,47 +12,45 @@ # ``stack.sh`` calls the entry points in this order: # -# - unfubar_setuptools # - install_infra # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_INFRA=$(set +o | grep xtrace) set +o xtrace # Defaults # -------- -PBR_DIR=$DEST/pbr -REQUIREMENTS_DIR=$DEST/requirements +GITDIR["pbr"]=$DEST/pbr # Entry Points # ------------ -# unfubar_setuptools() - Unbreak the giant mess that is the current state of setuptools -function unfubar_setuptools { - # this is a giant game of who's on first, but it does consistently work - # there is hope that upstream python packaging fixes this in the future - echo_summary "Unbreaking setuptools" - pip_install -U setuptools - pip_install -U pip - uninstall_package python-setuptools - pip_install -U setuptools - pip_install -U pip -} - - # install_infra() - Collect source and prepare function install_infra { - # bring down global requirements - git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH + local PIP_VIRTUAL_ENV="$REQUIREMENTS_DIR/.venv" + [ ! -d $PIP_VIRTUAL_ENV ] && ${VIRTUALENV_CMD} $PIP_VIRTUAL_ENV + # We don't care about testing git pbr in the requirements venv. + PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install -U pbr setuptools[core] + PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install $REQUIREMENTS_DIR + + # Unset the PIP_VIRTUAL_ENV so that PBR does not end up trapped + # down the VENV well + unset PIP_VIRTUAL_ENV # Install pbr - git_clone $PBR_REPO $PBR_DIR $PBR_BRANCH - setup_install $PBR_DIR + if use_library_from_git "pbr"; then + git_clone_by_name "pbr" + setup_dev_lib "pbr" + else + # Always upgrade pbr to latest version as we may have pulled it + # in via system packages. + pip_install "-U" "pbr" + fi } # Restore xtrace -$XTRACE +$_XTRACE_INFRA # Tell emacs to use shell-script-mode ## Local variables: diff --git a/lib/ironic b/lib/ironic deleted file mode 100644 index 389040cd39..0000000000 --- a/lib/ironic +++ /dev/null @@ -1,563 +0,0 @@ -# lib/ironic -# Functions to control the configuration and operation of the **Ironic** service - -# Dependencies: -# -# - ``functions`` file -# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined -# - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined -# - ``SERVICE_HOST`` -# - ``KEYSTONE_TOKEN_FORMAT`` must be defined - -# ``stack.sh`` calls the entry points in this order: -# -# - install_ironic -# - install_ironicclient -# - init_ironic -# - start_ironic -# - stop_ironic -# - cleanup_ironic - -# Save trace and pipefail settings -XTRACE=$(set +o | grep xtrace) -PIPEFAIL=$(set +o | grep pipefail) -set +o xtrace -set +o pipefail - -# Defaults -# -------- - -# Set up default directories -IRONIC_DIR=$DEST/ironic -IRONIC_DATA_DIR=$DATA_DIR/ironic -IRONIC_STATE_PATH=/var/lib/ironic -IRONICCLIENT_DIR=$DEST/python-ironicclient -IRONIC_AUTH_CACHE_DIR=${IRONIC_AUTH_CACHE_DIR:-/var/cache/ironic} -IRONIC_CONF_DIR=${IRONIC_CONF_DIR:-/etc/ironic} -IRONIC_CONF_FILE=$IRONIC_CONF_DIR/ironic.conf -IRONIC_ROOTWRAP_CONF=$IRONIC_CONF_DIR/rootwrap.conf -IRONIC_POLICY_JSON=$IRONIC_CONF_DIR/policy.json - -# Set up defaults for functional / integration testing -IRONIC_SCRIPTS_DIR=${IRONIC_SCRIPTS_DIR:-$TOP_DIR/tools/ironic/scripts} -IRONIC_TEMPLATES_DIR=${IRONIC_TEMPLATES_DIR:-$TOP_DIR/tools/ironic/templates} -IRONIC_BAREMETAL_BASIC_OPS=$(trueorfalse False $IRONIC_BAREMETAL_BASIC_OPS) -IRONIC_ENABLED_DRIVERS=${IRONIC_ENABLED_DRIVERS:-fake,pxe_ssh,pxe_ipmitool} -IRONIC_SSH_USERNAME=${IRONIC_SSH_USERNAME:-`whoami`} -IRONIC_SSH_KEY_DIR=${IRONIC_SSH_KEY_DIR:-$IRONIC_DATA_DIR/ssh_keys} -IRONIC_SSH_KEY_FILENAME=${IRONIC_SSH_KEY_FILENAME:-ironic_key} -IRONIC_KEY_FILE=$IRONIC_SSH_KEY_DIR/$IRONIC_SSH_KEY_FILENAME -IRONIC_SSH_VIRT_TYPE=${IRONIC_SSH_VIRT_TYPE:-virsh} -IRONIC_TFTPBOOT_DIR=${IRONIC_TFTPBOOT_DIR:-$IRONIC_DATA_DIR/tftpboot} -IRONIC_VM_SSH_PORT=${IRONIC_VM_SSH_PORT:-22} -IRONIC_VM_SSH_ADDRESS=${IRONIC_VM_SSH_ADDRESS:-$HOST_IP} -IRONIC_VM_COUNT=${IRONIC_VM_COUNT:-1} -IRONIC_VM_SPECS_CPU=${IRONIC_VM_SPECS_CPU:-1} -# NOTE(adam_g): Kernels 3.12 and newer user tmpfs by default for initramfs. -# DIB produced ramdisks tend to be ~250MB but tmpfs will only allow -# use of 50% of available memory before ENOSPC. Set minimum 1GB -# for nodes to avoid (LP: #1311987) and ensure consistency across -# older and newer kernels. -IRONIC_VM_SPECS_RAM=${IRONIC_VM_SPECS_RAM:-1024} -IRONIC_VM_SPECS_DISK=${IRONIC_VM_SPECS_DISK:-10} -IRONIC_VM_EPHEMERAL_DISK=${IRONIC_VM_EPHEMERAL_DISK:-0} -IRONIC_VM_EMULATOR=${IRONIC_VM_EMULATOR:-/usr/bin/qemu-system-x86_64} -IRONIC_VM_NETWORK_BRIDGE=${IRONIC_VM_NETWORK_BRIDGE:-brbm} -IRONIC_VM_NETWORK_RANGE=${IRONIC_VM_NETWORK_RANGE:-192.0.2.0/24} -IRONIC_VM_MACS_CSV_FILE=${IRONIC_VM_MACS_CSV_FILE:-$IRONIC_DATA_DIR/ironic_macs.csv} -IRONIC_AUTHORIZED_KEYS_FILE=${IRONIC_AUTHORIZED_KEYS_FILE:-$HOME/.ssh/authorized_keys} - -# By default, baremetal VMs will console output to file. -IRONIC_VM_LOG_CONSOLE=${IRONIC_VM_LOG_CONSOLE:-True} -IRONIC_VM_LOG_DIR=${IRONIC_VM_LOG_DIR:-$IRONIC_DATA_DIR/logs/} - -DIB_DIR=${DIB_DIR:-$DEST/diskimage-builder} - -# Use DIB to create deploy ramdisk and kernel. -IRONIC_BUILD_DEPLOY_RAMDISK=`trueorfalse True $IRONIC_BUILD_DEPLOY_RAMDISK` -# If not use DIB, these files are used as deploy ramdisk/kernel. -# (The value must be a absolute path) -IRONIC_DEPLOY_RAMDISK=${IRONIC_DEPLOY_RAMDISK:-} -IRONIC_DEPLOY_KERNEL=${IRONIC_DEPLOY_KERNEL:-} -IRONIC_DEPLOY_ELEMENT=${IRONIC_DEPLOY_ELEMENT:-deploy-ironic} - -#TODO(agordeev): replace 'ubuntu' with host distro name getting -IRONIC_DEPLOY_FLAVOR=${IRONIC_DEPLOY_FLAVOR:-ubuntu $IRONIC_DEPLOY_ELEMENT} - -# Support entry points installation of console scripts -IRONIC_BIN_DIR=$(get_python_exec_prefix) - -# Ironic connection info. Note the port must be specified. -IRONIC_SERVICE_PROTOCOL=http -IRONIC_HOSTPORT=${IRONIC_HOSTPORT:-$SERVICE_HOST:6385} - -# Tell Tempest this project is present -TEMPEST_SERVICES+=,ironic - - -# Functions -# --------- - -# Test if any Ironic services are enabled -# is_ironic_enabled -function is_ironic_enabled { - [[ ,${ENABLED_SERVICES} =~ ,"ir-" ]] && return 0 - return 1 -} - -# install_ironic() - Collect source and prepare -function install_ironic { - git_clone $IRONIC_REPO $IRONIC_DIR $IRONIC_BRANCH - setup_develop $IRONIC_DIR -} - -# install_ironicclient() - Collect sources and prepare -function install_ironicclient { - git_clone $IRONICCLIENT_REPO $IRONICCLIENT_DIR $IRONICCLIENT_BRANCH - setup_develop $IRONICCLIENT_DIR -} - -# cleanup_ironic() - Remove residual data files, anything left over from previous -# runs that would need to clean up. -function cleanup_ironic { - sudo rm -rf $IRONIC_AUTH_CACHE_DIR -} - -# configure_ironic() - Set config files, create data dirs, etc -function configure_ironic { - if [[ ! -d $IRONIC_CONF_DIR ]]; then - sudo mkdir -p $IRONIC_CONF_DIR - fi - sudo chown $STACK_USER $IRONIC_CONF_DIR - - # Copy over ironic configuration file and configure common parameters. - cp $IRONIC_DIR/etc/ironic/ironic.conf.sample $IRONIC_CONF_FILE - iniset $IRONIC_CONF_FILE DEFAULT debug True - inicomment $IRONIC_CONF_FILE DEFAULT log_file - iniset $IRONIC_CONF_FILE DEFAULT sql_connection `database_connection_url ironic` - iniset $IRONIC_CONF_FILE DEFAULT state_path $IRONIC_STATE_PATH - iniset $IRONIC_CONF_FILE DEFAULT use_syslog $SYSLOG - # Configure Ironic conductor, if it was enabled. - if is_service_enabled ir-cond; then - configure_ironic_conductor - fi - - # Configure Ironic API, if it was enabled. - if is_service_enabled ir-api; then - configure_ironic_api - fi - - if [[ "$IRONIC_BAREMETAL_BASIC_OPS" == "True" ]]; then - configure_ironic_auxiliary - fi -} - -# configure_ironic_api() - Is used by configure_ironic(). Performs -# API specific configuration. -function configure_ironic_api { - iniset $IRONIC_CONF_FILE DEFAULT auth_strategy keystone - iniset $IRONIC_CONF_FILE DEFAULT policy_file $IRONIC_POLICY_JSON - iniset $IRONIC_CONF_FILE keystone_authtoken auth_host $KEYSTONE_AUTH_HOST - iniset $IRONIC_CONF_FILE keystone_authtoken auth_port $KEYSTONE_AUTH_PORT - iniset $IRONIC_CONF_FILE keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset $IRONIC_CONF_FILE keystone_authtoken cafile $KEYSTONE_SSL_CA - iniset $IRONIC_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ - iniset $IRONIC_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $IRONIC_CONF_FILE keystone_authtoken admin_user ironic - iniset $IRONIC_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD - iniset_rpc_backend ironic $IRONIC_CONF_FILE DEFAULT - iniset $IRONIC_CONF_FILE keystone_authtoken signing_dir $IRONIC_AUTH_CACHE_DIR/api - - cp -p $IRONIC_DIR/etc/ironic/policy.json $IRONIC_POLICY_JSON -} - -# configure_ironic_conductor() - Is used by configure_ironic(). -# Sets conductor specific settings. -function configure_ironic_conductor { - cp $IRONIC_DIR/etc/ironic/rootwrap.conf $IRONIC_ROOTWRAP_CONF - cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_CONF_DIR - IRONIC_ROOTWRAP=$(get_rootwrap_location ironic) - ROOTWRAP_ISUDOER_CMD="$IRONIC_ROOTWRAP $IRONIC_CONF_DIR/rootwrap.conf *" - - # Set up the rootwrap sudoers for ironic - TEMPFILE=`mktemp` - echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_ISUDOER_CMD" >$TEMPFILE - chmod 0440 $TEMPFILE - sudo chown root:root $TEMPFILE - sudo mv $TEMPFILE /etc/sudoers.d/ironic-rootwrap - - iniset $IRONIC_CONF_FILE DEFAULT rootwrap_config $IRONIC_ROOTWRAP_CONF - iniset $IRONIC_CONF_FILE DEFAULT enabled_drivers $IRONIC_ENABLED_DRIVERS - iniset $IRONIC_CONF_FILE conductor api_url http://$HOST_IP:6385 - iniset $IRONIC_CONF_FILE pxe tftp_server $HOST_IP - iniset $IRONIC_CONF_FILE pxe tftp_root $IRONIC_TFTPBOOT_DIR - iniset $IRONIC_CONF_FILE pxe tftp_master_path $IRONIC_TFTPBOOT_DIR/master_images - if [[ "$IRONIC_VM_LOG_CONSOLE" == "True" ]] ; then - iniset $IRONIC_CONF_FILE pxe pxe_append_params "nofb nomodeset vga=normal console=ttyS0" - fi -} - -# create_ironic_cache_dir() - Part of the init_ironic() process -function create_ironic_cache_dir { - # Create cache dir - sudo mkdir -p $IRONIC_AUTH_CACHE_DIR/api - sudo chown $STACK_USER $IRONIC_AUTH_CACHE_DIR/api - rm -f $IRONIC_AUTH_CACHE_DIR/api/* - sudo mkdir -p $IRONIC_AUTH_CACHE_DIR/registry - sudo chown $STACK_USER $IRONIC_AUTH_CACHE_DIR/registry - rm -f $IRONIC_AUTH_CACHE_DIR/registry/* -} - -# create_ironic_accounts() - Set up common required ironic accounts - -# Tenant User Roles -# ------------------------------------------------------------------ -# service ironic admin # if enabled -function create_ironic_accounts { - - SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") - - # Ironic - if [[ "$ENABLED_SERVICES" =~ "ir-api" ]]; then - IRONIC_USER=$(openstack user create \ - ironic \ - --password "$SERVICE_PASSWORD" \ - --project $SERVICE_TENANT \ - --email ironic@example.com \ - | grep " id " | get_field 2) - openstack role add \ - $ADMIN_ROLE \ - --project $SERVICE_TENANT \ - --user $IRONIC_USER - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - IRONIC_SERVICE=$(openstack service create \ - ironic \ - --type=baremetal \ - --description="Ironic baremetal provisioning service" \ - | grep " id " | get_field 2) - openstack endpoint create \ - $IRONIC_SERVICE \ - --region RegionOne \ - --publicurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" \ - --adminurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" \ - --internalurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" - fi - fi -} - - -# init_ironic() - Initialize databases, etc. -function init_ironic { - # (Re)create ironic database - recreate_database ironic utf8 - - # Migrate ironic database - $IRONIC_BIN_DIR/ironic-dbsync - - create_ironic_cache_dir -} - -# start_ironic() - Start running processes, including screen -function start_ironic { - # Start Ironic API server, if enabled. - if is_service_enabled ir-api; then - start_ironic_api - fi - - # Start Ironic conductor, if enabled. - if is_service_enabled ir-cond; then - start_ironic_conductor - fi -} - -# start_ironic_api() - Used by start_ironic(). -# Starts Ironic API server. -function start_ironic_api { - screen_it ir-api "cd $IRONIC_DIR; $IRONIC_BIN_DIR/ironic-api --config-file=$IRONIC_CONF_FILE" - echo "Waiting for ir-api ($IRONIC_HOSTPORT) to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$IRONIC_HOSTPORT; do sleep 1; done"; then - die $LINENO "ir-api did not start" - fi -} - -# start_ironic_conductor() - Used by start_ironic(). -# Starts Ironic conductor. -function start_ironic_conductor { - screen_it ir-cond "cd $IRONIC_DIR; $IRONIC_BIN_DIR/ironic-conductor --config-file=$IRONIC_CONF_FILE" - # TODO(romcheg): Find a way to check whether the conductor has started. -} - -# stop_ironic() - Stop running processes -function stop_ironic { - # Kill the Ironic screen windows - screen -S $SCREEN_NAME -p ir-api -X kill - screen -S $SCREEN_NAME -p ir-cond -X kill -} - -function is_ironic { - if ( is_service_enabled ir-cond && is_service_enabled ir-api ); then - return 0 - fi - return 1 -} - -function configure_ironic_dirs { - sudo mkdir -p $IRONIC_DATA_DIR - sudo mkdir -p $IRONIC_STATE_PATH - sudo mkdir -p $IRONIC_TFTPBOOT_DIR - sudo chown -R $STACK_USER $IRONIC_DATA_DIR $IRONIC_STATE_PATH - sudo chown -R $STACK_USER:$LIBVIRT_GROUP $IRONIC_TFTPBOOT_DIR - if is_ubuntu; then - PXEBIN=/usr/lib/syslinux/pxelinux.0 - elif is_fedora; then - PXEBIN=/usr/share/syslinux/pxelinux.0 - fi - if [ ! -f $PXEBIN ]; then - die $LINENO "pxelinux.0 (from SYSLINUX) not found." - fi - - cp $PXEBIN $IRONIC_TFTPBOOT_DIR - mkdir -p $IRONIC_TFTPBOOT_DIR/pxelinux.cfg -} - -function create_bridge_and_vms { - # Call libvirt setup scripts in a new shell to ensure any new group membership - sudo su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/setup-network" - if [[ "$IRONIC_VM_LOG_CONSOLE" == "True" ]] ; then - LOG_ARG="$IRONIC_VM_LOG_DIR" - else - LOG_ARG="" - fi - sudo su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/create-nodes \ - $IRONIC_VM_SPECS_CPU $IRONIC_VM_SPECS_RAM $IRONIC_VM_SPECS_DISK \ - amd64 $IRONIC_VM_COUNT $IRONIC_VM_NETWORK_BRIDGE $IRONIC_VM_EMULATOR \ - $LOG_ARG" >> $IRONIC_VM_MACS_CSV_FILE -} - -function enroll_vms { - - CHASSIS_ID=$(ironic chassis-create -d "ironic test chassis" | grep " uuid " | get_field 2) - IRONIC_NET_ID=$(neutron net-list | grep private | get_field 1) - local idx=0 - - # work around; need to know what netns neutron uses for private network. - # Without knowing how to interconnect the networks, PXE won't work properly - # for fake baremetal instances. The network should be configured prior all - # the instances operation. If we don't do this, the first port creation - # only happens in the middle of fake baremetal instance's spawning by nova, - # so we'll end up with unbootable fake baremetal VM due to broken PXE. - PORT_ID=$(neutron port-create private | grep " id " | get_field 2) - - while read MAC; do - - NODE_ID=$(ironic node-create --chassis_uuid $CHASSIS_ID --driver pxe_ssh \ - -i ssh_virt_type=$IRONIC_SSH_VIRT_TYPE \ - -i ssh_address=$IRONIC_VM_SSH_ADDRESS \ - -i ssh_port=$IRONIC_VM_SSH_PORT \ - -i ssh_username=$IRONIC_SSH_USERNAME \ - -i ssh_key_filename=$IRONIC_SSH_KEY_DIR/$IRONIC_SSH_KEY_FILENAME \ - -p cpus=$IRONIC_VM_SPECS_CPU \ - -p memory_mb=$IRONIC_VM_SPECS_RAM \ - -p local_gb=$IRONIC_VM_SPECS_DISK \ - -p cpu_arch=x86_64 \ - | grep " uuid " | get_field 2) - - ironic port-create --address $MAC --node_uuid $NODE_ID - - idx=$((idx+1)) - - done < $IRONIC_VM_MACS_CSV_FILE - - # create the nova flavor - adjusted_disk=$(($IRONIC_VM_SPECS_DISK - $IRONIC_VM_EPHEMERAL_DISK)) - nova flavor-create --ephemeral $IRONIC_VM_EPHEMERAL_DISK baremetal auto $IRONIC_VM_SPECS_RAM $adjusted_disk $IRONIC_VM_SPECS_CPU - nova flavor-key baremetal set "cpu_arch"="x86_64" "baremetal:deploy_kernel_id"="$IRONIC_DEPLOY_KERNEL_ID" "baremetal:deploy_ramdisk_id"="$IRONIC_DEPLOY_RAMDISK_ID" - - # intentional sleep to make sure the tag has been set to port - sleep 10 - TAPDEV=$(sudo ip netns exec qdhcp-${IRONIC_NET_ID} ip link list | grep tap | cut -d':' -f2 | cut -b2-) - TAG_ID=$(sudo ovs-vsctl show |grep ${TAPDEV} -A1 -m1 | grep tag | cut -d':' -f2 | cut -b2-) - - # make sure veth pair is not existing, otherwise delete its links - sudo ip link show ovs-tap1 && sudo ip link delete ovs-tap1 - sudo ip link show brbm-tap1 && sudo ip link delete brbm-tap1 - # create veth pair for future interconnection between br-int and brbm - sudo ip link add brbm-tap1 type veth peer name ovs-tap1 - sudo ip link set dev brbm-tap1 up - sudo ip link set dev ovs-tap1 up - - sudo ovs-vsctl -- --if-exists del-port ovs-tap1 -- add-port br-int ovs-tap1 tag=$TAG_ID - sudo ovs-vsctl -- --if-exists del-port brbm-tap1 -- add-port $IRONIC_VM_NETWORK_BRIDGE brbm-tap1 - - # Remove the port needed only for workaround. For additional info read the - # comment at the beginning of this function - neutron port-delete $PORT_ID -} - -function configure_iptables { - # enable tftp natting for allowing connections to HOST_IP's tftp server - sudo modprobe nf_conntrack_tftp - sudo modprobe nf_nat_tftp - # nodes boot from TFTP and callback to the API server listening on $HOST_IP - sudo iptables -I INPUT -d $HOST_IP -p udp --dport 69 -j ACCEPT || true - sudo iptables -I INPUT -d $HOST_IP -p tcp --dport 6385 -j ACCEPT || true -} - -function configure_tftpd { - if is_ubuntu; then - PXEBIN=/usr/lib/syslinux/pxelinux.0 - elif is_fedora; then - PXEBIN=/usr/share/syslinux/pxelinux.0 - fi - if [ ! -f $PXEBIN ]; then - die $LINENO "pxelinux.0 (from SYSLINUX) not found." - fi - - # stop tftpd and setup serving via xinetd - stop_service tftpd-hpa || true - [ -f /etc/init/tftpd-hpa.conf ] && echo "manual" | sudo tee /etc/init/tftpd-hpa.override - sudo cp $IRONIC_TEMPLATES_DIR/tftpd-xinetd.template /etc/xinetd.d/tftp - sudo sed -e "s|%TFTPBOOT_DIR%|$IRONIC_TFTPBOOT_DIR|g" -i /etc/xinetd.d/tftp - - # setup tftp file mapping to satisfy requests at the root (booting) and - # /tftpboot/ sub-dir (as per deploy-ironic elements) - echo "r ^([^/]) $IRONIC_TFTPBOOT_DIR/\1" >$IRONIC_TFTPBOOT_DIR/map-file - echo "r ^(/tftpboot/) $IRONIC_TFTPBOOT_DIR/\2" >>$IRONIC_TFTPBOOT_DIR/map-file - - chmod -R 0755 $IRONIC_TFTPBOOT_DIR - restart_service xinetd -} - -function configure_ironic_ssh_keypair { - # Generating ssh key pair for stack user - if [[ ! -d $IRONIC_SSH_KEY_DIR ]]; then - mkdir -p $IRONIC_SSH_KEY_DIR - fi - if [[ ! -d $HOME/.ssh ]]; then - mkdir -p $HOME/.ssh - chmod 700 $HOME/.ssh - fi - echo -e 'n\n' | ssh-keygen -q -t rsa -P '' -f $IRONIC_KEY_FILE - cat $IRONIC_KEY_FILE.pub | tee -a $IRONIC_AUTHORIZED_KEYS_FILE -} - -function ironic_ssh_check { - local KEY_FILE=$1 - local FLOATING_IP=$2 - local PORT=$3 - local DEFAULT_INSTANCE_USER=$4 - local ACTIVE_TIMEOUT=$5 - if ! timeout $ACTIVE_TIMEOUT sh -c "while ! ssh -p $PORT -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP echo success; do sleep 1; done"; then - die $LINENO "server didn't become ssh-able!" - fi -} - -function configure_ironic_auxiliary { - configure_ironic_dirs - configure_ironic_ssh_keypair - ironic_ssh_check $IRONIC_SSH_KEY_DIR/$IRONIC_SSH_KEY_FILENAME $IRONIC_VM_SSH_ADDRESS $IRONIC_VM_SSH_PORT $IRONIC_SSH_USERNAME 10 -} - -# build deploy kernel+ramdisk, then upload them to glance -# this function sets IRONIC_DEPLOY_KERNEL_ID and IRONIC_DEPLOY_RAMDISK_ID -function upload_baremetal_ironic_deploy { - token=$1 - - if [ -z "$IRONIC_DEPLOY_KERNEL" -o -z "$IRONIC_DEPLOY_RAMDISK" ]; then - IRONIC_DEPLOY_KERNEL_PATH=$TOP_DIR/files/ir-deploy.kernel - IRONIC_DEPLOY_RAMDISK_PATH=$TOP_DIR/files/ir-deploy.initramfs - else - IRONIC_DEPLOY_KERNEL_PATH=$IRONIC_DEPLOY_KERNEL - IRONIC_DEPLOY_RAMDISK_PATH=$IRONIC_DEPLOY_RAMDISK - fi - - if [ ! -e "$IRONIC_DEPLOY_RAMDISK_PATH" -o ! -e "$IRONIC_DEPLOY_KERNEL_PATH" ]; then - # files don't exist, need to build them - if [ "$IRONIC_BUILD_DEPLOY_RAMDISK" = "True" ]; then - # we can build them only if we're not offline - if [ "$OFFLINE" != "True" ]; then - $DIB_DIR/bin/ramdisk-image-create $IRONIC_DEPLOY_FLAVOR \ - -o $TOP_DIR/files/ir-deploy - else - die $LINENO "Deploy kernel+ramdisk files don't exist and cannot be build in OFFLINE mode" - fi - else - die $LINENO "Deploy kernel+ramdisk files don't exist and their building was disabled explicitly by IRONIC_BUILD_DEPLOY_RAMDISK" - fi - fi - - # load them into glance - IRONIC_DEPLOY_KERNEL_ID=$(glance \ - --os-auth-token $token \ - --os-image-url http://$GLANCE_HOSTPORT \ - image-create \ - --name $(basename $IRONIC_DEPLOY_KERNEL_PATH) \ - --is-public True --disk-format=aki \ - < $IRONIC_DEPLOY_KERNEL_PATH | grep ' id ' | get_field 2) - IRONIC_DEPLOY_RAMDISK_ID=$(glance \ - --os-auth-token $token \ - --os-image-url http://$GLANCE_HOSTPORT \ - image-create \ - --name $(basename $IRONIC_DEPLOY_RAMDISK_PATH) \ - --is-public True --disk-format=ari \ - < $IRONIC_DEPLOY_RAMDISK_PATH | grep ' id ' | get_field 2) -} - -function prepare_baremetal_basic_ops { - - # install diskimage-builder - git_clone $DIB_REPO $DIB_DIR $DIB_BRANCH - - # make sure all needed service were enabled - for srv in nova glance key neutron; do - if ! is_service_enabled "$srv"; then - die $LINENO "$srv should be enabled for ironic tests" - fi - done - - TOKEN=$(keystone token-get | grep ' id ' | get_field 2) - die_if_not_set $LINENO TOKEN "Keystone fail to get token" - - echo_summary "Creating and uploading baremetal images for ironic" - - # build and upload separate deploy kernel & ramdisk - upload_baremetal_ironic_deploy $TOKEN - - create_bridge_and_vms - enroll_vms - configure_tftpd - configure_iptables - - # restart nova-compute to ensure its resource tracking is up to - # date with newly enrolled nodes - stop_nova_compute || true - start_nova_compute -} - -function cleanup_baremetal_basic_ops { - rm -f $IRONIC_VM_MACS_CSV_FILE - if [ -f $IRONIC_KEY_FILE ]; then - KEY=`cat $IRONIC_KEY_FILE.pub` - # remove public key from authorized_keys - grep -v "$KEY" $IRONIC_AUTHORIZED_KEYS_FILE > temp && mv temp $IRONIC_AUTHORIZED_KEYS_FILE - chmod 0600 $IRONIC_AUTHORIZED_KEYS_FILE - fi - sudo rm -rf $IRONIC_DATA_DIR $IRONIC_STATE_PATH - sudo su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/cleanup-nodes $IRONIC_VM_COUNT $IRONIC_VM_NETWORK_BRIDGE" - sudo rm -rf /etc/xinetd.d/tftp /etc/init/tftpd-hpa.override - restart_service xinetd - sudo iptables -D INPUT -d $HOST_IP -p udp --dport 69 -j ACCEPT || true - sudo iptables -D INPUT -d $HOST_IP -p tcp --dport 6385 -j ACCEPT || true - sudo rmmod nf_conntrack_tftp || true - sudo rmmod nf_nat_tftp || true -} - -# Restore xtrace + pipefail -$XTRACE -$PIPEFAIL - -# Tell emacs to use shell-script-mode -## Local variables: -## mode: shell-script -## End: diff --git a/lib/keystone b/lib/keystone index f8e92f4449..840103b9f4 100644 --- a/lib/keystone +++ b/lib/keystone @@ -1,3 +1,5 @@ +#!/bin/bash +# # lib/keystone # Functions to control the configuration and operation of **Keystone** @@ -6,10 +8,9 @@ # - ``functions`` file # - ``tls`` file # - ``DEST``, ``STACK_USER`` -# - ``IDENTITY_API_VERSION`` +# - ``FILES`` # - ``BASE_SQL_CONN`` # - ``SERVICE_HOST``, ``SERVICE_PROTOCOL`` -# - ``SERVICE_TOKEN`` # - ``S3_SERVICE_PORT`` (template backend only) # ``stack.sh`` calls the entry points in this order: @@ -19,50 +20,54 @@ # - _config_keystone_apache_wsgi # - init_keystone # - start_keystone +# - bootstrap_keystone # - create_keystone_accounts # - stop_keystone # - cleanup_keystone -# - _cleanup_keystone_apache_wsgi # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_KEYSTONE=$(set +o | grep xtrace) set +o xtrace # Defaults # -------- # Set up default directories +GITDIR["keystoneauth"]=$DEST/keystoneauth +GITDIR["python-keystoneclient"]=$DEST/python-keystoneclient +GITDIR["keystonemiddleware"]=$DEST/keystonemiddleware KEYSTONE_DIR=$DEST/keystone -KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone} -KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf -KEYSTONE_PASTE_INI=${KEYSTONE_PASTE_INI:-$KEYSTONE_CONF_DIR/keystone-paste.ini} -KEYSTONE_AUTH_CACHE_DIR=${KEYSTONE_AUTH_CACHE_DIR:-/var/cache/keystone} -KEYSTONE_WSGI_DIR=${KEYSTONE_WSGI_DIR:-/var/www/keystone} -KEYSTONECLIENT_DIR=$DEST/python-keystoneclient - -# Select the backend for Keystone's service catalog -KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-sql} -KEYSTONE_CATALOG=$KEYSTONE_CONF_DIR/default_catalog.templates +# Keystone virtual environment +if [[ ${USE_VENV} = True ]]; then + PROJECT_VENV["keystone"]=${KEYSTONE_DIR}.venv + KEYSTONE_BIN_DIR=${PROJECT_VENV["keystone"]}/bin +else + KEYSTONE_BIN_DIR=$(get_python_exec_prefix) +fi -# Select the backend for Tokens -KEYSTONE_TOKEN_BACKEND=${KEYSTONE_TOKEN_BACKEND:-sql} +KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone} +KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf +KEYSTONE_PUBLIC_UWSGI_CONF=$KEYSTONE_CONF_DIR/keystone-uwsgi-public.ini +KEYSTONE_PUBLIC_UWSGI=keystone.wsgi.api:application -# Select the backend for Identity +# Select the Identity backend driver KEYSTONE_IDENTITY_BACKEND=${KEYSTONE_IDENTITY_BACKEND:-sql} -# Select the backend for Assignment +# Select the Assignment backend driver KEYSTONE_ASSIGNMENT_BACKEND=${KEYSTONE_ASSIGNMENT_BACKEND:-sql} -# Select Keystone's token format -# Choose from 'UUID' and 'PKI' -KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-PKI} +# Select the Role backend driver +KEYSTONE_ROLE_BACKEND=${KEYSTONE_ROLE_BACKEND:-sql} + +# Select the Resource backend driver +KEYSTONE_RESOURCE_BACKEND=${KEYSTONE_RESOURCE_BACKEND:-sql} -# Set Keystone interface configuration -KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST} -KEYSTONE_AUTH_PORT=${KEYSTONE_AUTH_PORT:-35357} -KEYSTONE_AUTH_PORT_INT=${KEYSTONE_AUTH_PORT_INT:-35358} -KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-$SERVICE_PROTOCOL} +# Select Keystone's token provider (and format) +# Refer keystone doc for supported token provider: +# https://docs.openstack.org/keystone/latest/admin/token-provider.html +KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-fernet} +KEYSTONE_TOKEN_FORMAT=$(echo ${KEYSTONE_TOKEN_FORMAT} | tr '[:upper:]' '[:lower:]') # Public facing bits KEYSTONE_SERVICE_HOST=${KEYSTONE_SERVICE_HOST:-$SERVICE_HOST} @@ -70,182 +75,150 @@ KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000} KEYSTONE_SERVICE_PORT_INT=${KEYSTONE_SERVICE_PORT_INT:-5001} KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} -# Bind hosts -KEYSTONE_ADMIN_BIND_HOST=${KEYSTONE_ADMIN_BIND_HOST:-$KEYSTONE_SERVICE_HOST} -# Set the tenant for service accounts in Keystone -SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service} - -# valid identity backends as per dir keystone/identity/backends -KEYSTONE_VALID_IDENTITY_BACKENDS=kvs,ldap,pam,sql +# Set the project for service accounts in Keystone +SERVICE_DOMAIN_NAME=${SERVICE_DOMAIN_NAME:-Default} +SERVICE_PROJECT_NAME=${SERVICE_PROJECT_NAME:-service} -# valid assignment backends as per dir keystone/identity/backends -KEYSTONE_VALID_ASSIGNMENT_BACKENDS=kvs,ldap,sql +# Note 2016-03 : SERVICE_TENANT_NAME is kept for backwards +# compatibility; we should be using SERVICE_PROJECT_NAME now +SERVICE_TENANT_NAME=${SERVICE_PROJECT_NAME:-service} # if we are running with SSL use https protocols -if is_ssl_enabled_service "key"; then - KEYSTONE_AUTH_PROTOCOL="https" +if is_service_enabled tls-proxy; then KEYSTONE_SERVICE_PROTOCOL="https" fi +KEYSTONE_SERVICE_URI=${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}/identity +# for compat +KEYSTONE_AUTH_URI=$KEYSTONE_SERVICE_URI + +# V3 URIs +KEYSTONE_AUTH_URI_V3=$KEYSTONE_SERVICE_URI/v3 +KEYSTONE_SERVICE_URI_V3=$KEYSTONE_SERVICE_URI/v3 + +# Security compliance +KEYSTONE_SECURITY_COMPLIANCE_ENABLED=${KEYSTONE_SECURITY_COMPLIANCE_ENABLED:-True} +KEYSTONE_LOCKOUT_FAILURE_ATTEMPTS=${KEYSTONE_LOCKOUT_FAILURE_ATTEMPTS:-2} +KEYSTONE_LOCKOUT_DURATION=${KEYSTONE_LOCKOUT_DURATION:-10} +KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT=${KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT:-2} + +# Number of bcrypt hashing rounds, increasing number exponentially increases required +# resources to generate password hash. This is very effective way to protect from +# bruteforce attacks. 4 is minimal value that can be specified for bcrypt and +# it works way faster than default 12. Minimal value is great for CI and development +# however may not be suitable for real production. +KEYSTONE_PASSWORD_HASH_ROUNDS=${KEYSTONE_PASSWORD_HASH_ROUNDS:-4} + +# Cache settings +KEYSTONE_ENABLE_CACHE=${KEYSTONE_ENABLE_CACHE:-True} + +# Whether to create a keystone admin endpoint for legacy applications +KEYSTONE_ADMIN_ENDPOINT=$(trueorfalse False KEYSTONE_ADMIN_ENDPOINT) + +# Flag to set the oslo_policy.enforce_scope. This is used to switch +# the Identity API policies to start checking the scope of token. By Default, +# this flag is False. +# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope +KEYSTONE_ENFORCE_SCOPE=$(trueorfalse False KEYSTONE_ENFORCE_SCOPE) # Functions # --------- + +# Test if Keystone is enabled +# is_keystone_enabled +function is_keystone_enabled { + [[ ,${DISABLED_SERVICES} =~ ,"keystone" ]] && return 1 + [[ ,${ENABLED_SERVICES}, =~ ,"key", ]] && return 0 + return 1 +} + # cleanup_keystone() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_keystone { - # kill instances (nova) - # delete image files (glance) - # This function intentionally left blank - : -} - -# _cleanup_keystone_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file -function _cleanup_keystone_apache_wsgi { - sudo rm -f $KEYSTONE_WSGI_DIR/*.wsgi - disable_apache_site keystone - sudo rm -f /etc/$APACHE_NAME/$APACHE_CONF_DIR/keystone + stop_process "keystone" + remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "keystone-wsgi-public" + sudo rm -f $(apache_site_config_for keystone-wsgi-public) } # _config_keystone_apache_wsgi() - Set WSGI config files of Keystone function _config_keystone_apache_wsgi { - sudo mkdir -p $KEYSTONE_WSGI_DIR + local keystone_apache_conf + keystone_apache_conf=$(apache_site_config_for keystone) + keystone_ssl_listen="#" + local keystone_ssl="" + local keystone_certfile="" + local keystone_keyfile="" + local keystone_service_port=$KEYSTONE_SERVICE_PORT + local venv_path="" - # copy proxy vhost and wsgi file - sudo cp $KEYSTONE_DIR/httpd/keystone.py $KEYSTONE_WSGI_DIR/main - sudo cp $KEYSTONE_DIR/httpd/keystone.py $KEYSTONE_WSGI_DIR/admin + if is_service_enabled tls-proxy; then + keystone_service_port=$KEYSTONE_SERVICE_PORT_INT + fi + if [[ ${USE_VENV} = True ]]; then + venv_path="python-path=${PROJECT_VENV["keystone"]}/lib/$(python_version)/site-packages" + fi - sudo cp $FILES/apache-keystone.template /etc/$APACHE_NAME/$APACHE_CONF_DIR/keystone + sudo cp $FILES/apache-keystone.template $keystone_apache_conf sudo sed -e " - s|%PUBLICPORT%|$KEYSTONE_SERVICE_PORT|g; - s|%ADMINPORT%|$KEYSTONE_AUTH_PORT|g; + s|%PUBLICPORT%|$keystone_service_port|g; s|%APACHE_NAME%|$APACHE_NAME|g; - s|%PUBLICWSGI%|$KEYSTONE_WSGI_DIR/main|g; - s|%ADMINWSGI%|$KEYSTONE_WSGI_DIR/admin|g; - s|%USER%|$STACK_USER|g - " -i /etc/$APACHE_NAME/$APACHE_CONF_DIR/keystone - enable_apache_site keystone + s|%SSLLISTEN%|$keystone_ssl_listen|g; + s|%SSLENGINE%|$keystone_ssl|g; + s|%SSLCERTFILE%|$keystone_certfile|g; + s|%SSLKEYFILE%|$keystone_keyfile|g; + s|%USER%|$STACK_USER|g; + s|%VIRTUALENV%|$venv_path|g + s|%KEYSTONE_BIN%|$KEYSTONE_BIN_DIR|g + " -i $keystone_apache_conf } # configure_keystone() - Set config files, create data dirs, etc function configure_keystone { - if [[ ! -d $KEYSTONE_CONF_DIR ]]; then - sudo mkdir -p $KEYSTONE_CONF_DIR - fi - sudo chown $STACK_USER $KEYSTONE_CONF_DIR + sudo install -d -o $STACK_USER $KEYSTONE_CONF_DIR if [[ "$KEYSTONE_CONF_DIR" != "$KEYSTONE_DIR/etc" ]]; then - cp -p $KEYSTONE_DIR/etc/keystone.conf.sample $KEYSTONE_CONF - chmod 600 $KEYSTONE_CONF - cp -p $KEYSTONE_DIR/etc/policy.json $KEYSTONE_CONF_DIR - if [[ -f "$KEYSTONE_DIR/etc/keystone-paste.ini" ]]; then - cp -p "$KEYSTONE_DIR/etc/keystone-paste.ini" "$KEYSTONE_PASTE_INI" - fi - fi - if [[ -f "$KEYSTONE_PASTE_INI" ]]; then - iniset "$KEYSTONE_CONF" paste_deploy config_file "$KEYSTONE_PASTE_INI" - else - # compatibility with mixed cfg and paste.deploy configuration - KEYSTONE_PASTE_INI="$KEYSTONE_CONF" + install -m 600 /dev/null $KEYSTONE_CONF fi - - # Rewrite stock ``keystone.conf`` - + # Populate ``keystone.conf`` if is_service_enabled ldap; then - #Set all needed ldap values - iniset $KEYSTONE_CONF ldap password $LDAP_PASSWORD - iniset $KEYSTONE_CONF ldap user $LDAP_MANAGER_DN - iniset $KEYSTONE_CONF ldap suffix $LDAP_BASE_DN - iniset $KEYSTONE_CONF ldap use_dumb_member "True" - iniset $KEYSTONE_CONF ldap user_attribute_ignore "enabled,email,tenants,default_project_id" - iniset $KEYSTONE_CONF ldap tenant_attribute_ignore "enabled" - iniset $KEYSTONE_CONF ldap tenant_domain_id_attribute "businessCategory" - iniset $KEYSTONE_CONF ldap tenant_desc_attribute "description" - iniset $KEYSTONE_CONF ldap tenant_tree_dn "ou=Projects,$LDAP_BASE_DN" - iniset $KEYSTONE_CONF ldap user_domain_id_attribute "businessCategory" - iniset $KEYSTONE_CONF ldap user_tree_dn "ou=Users,$LDAP_BASE_DN" - iniset $KEYSTONE_CONF DEFAULT member_role_id "9fe2ff9ee4384b1894a90878d3e92bab" - iniset $KEYSTONE_CONF DEFAULT member_role_name "_member_" + iniset $KEYSTONE_CONF identity domain_config_dir "$KEYSTONE_CONF_DIR/domains" + iniset $KEYSTONE_CONF identity domain_specific_drivers_enabled "True" fi + iniset $KEYSTONE_CONF identity driver "$KEYSTONE_IDENTITY_BACKEND" + iniset $KEYSTONE_CONF identity password_hash_rounds $KEYSTONE_PASSWORD_HASH_ROUNDS + iniset $KEYSTONE_CONF assignment driver "$KEYSTONE_ASSIGNMENT_BACKEND" + iniset $KEYSTONE_CONF role driver "$KEYSTONE_ROLE_BACKEND" + iniset $KEYSTONE_CONF resource driver "$KEYSTONE_RESOURCE_BACKEND" - # check if identity backend is valid - if [[ "$KEYSTONE_VALID_IDENTITY_BACKENDS" =~ "$KEYSTONE_IDENTITY_BACKEND" ]]; then - iniset $KEYSTONE_CONF identity driver "keystone.identity.backends.$KEYSTONE_IDENTITY_BACKEND.Identity" - fi - - # check if assignment backend is valid - if [[ "$KEYSTONE_VALID_ASSIGNMENT_BACKENDS" =~ "$KEYSTONE_ASSIGNMENT_BACKEND" ]]; then - iniset $KEYSTONE_CONF assignment driver "keystone.assignment.backends.$KEYSTONE_ASSIGNMENT_BACKEND.Assignment" - fi + # Enable caching + iniset $KEYSTONE_CONF cache enabled $KEYSTONE_ENABLE_CACHE + iniset $KEYSTONE_CONF cache backend $CACHE_BACKEND + iniset $KEYSTONE_CONF cache memcache_servers $MEMCACHE_SERVERS - # Set the URL advertised in the ``versions`` structure returned by the '/' route - iniset $KEYSTONE_CONF DEFAULT public_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(public_port)s/" - iniset $KEYSTONE_CONF DEFAULT admin_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(admin_port)s/" - iniset $KEYSTONE_CONF DEFAULT admin_bind_host "$KEYSTONE_ADMIN_BIND_HOST" + # Enable errors if response validation fails. We want this enabled in CI + # and development contexts to highlights bugs in our response schemas. + iniset $KEYSTONE_CONF api response_validation error - # Register SSL certificates if provided - if is_ssl_enabled_service key; then - ensure_certificates KEYSTONE + iniset_rpc_backend keystone $KEYSTONE_CONF oslo_messaging_notifications - iniset $KEYSTONE_CONF ssl enable True - iniset $KEYSTONE_CONF ssl certfile $KEYSTONE_SSL_CERT - iniset $KEYSTONE_CONF ssl keyfile $KEYSTONE_SSL_KEY - fi + local service_port=$KEYSTONE_SERVICE_PORT if is_service_enabled tls-proxy; then # Set the service ports for a proxy to take the originals - iniset $KEYSTONE_CONF DEFAULT public_port $KEYSTONE_SERVICE_PORT_INT - iniset $KEYSTONE_CONF DEFAULT admin_port $KEYSTONE_AUTH_PORT_INT + service_port=$KEYSTONE_SERVICE_PORT_INT fi - iniset $KEYSTONE_CONF DEFAULT admin_token "$SERVICE_TOKEN" + # Override the endpoints advertised by keystone so that clients use the correct + # endpoint. By default, the keystone server uses the public_port which isn't + # going to work when you want to use a different port (in the case of proxy), + # or you don't want the port (in the case of putting keystone on a path in apache). + iniset $KEYSTONE_CONF DEFAULT public_endpoint $KEYSTONE_SERVICE_URI - if [[ "$KEYSTONE_TOKEN_FORMAT" = "UUID" ]]; then - iniset $KEYSTONE_CONF token provider keystone.token.providers.uuid.Provider + if [[ "$KEYSTONE_TOKEN_FORMAT" != "" ]]; then + iniset $KEYSTONE_CONF token provider $KEYSTONE_TOKEN_FORMAT fi iniset $KEYSTONE_CONF database connection `database_connection_url keystone` - iniset $KEYSTONE_CONF ec2 driver "keystone.contrib.ec2.backends.sql.Ec2" - - if [[ "$KEYSTONE_TOKEN_BACKEND" = "sql" ]]; then - iniset $KEYSTONE_CONF token driver keystone.token.backends.sql.Token - elif [[ "$KEYSTONE_TOKEN_BACKEND" = "memcache" ]]; then - iniset $KEYSTONE_CONF token driver keystone.token.backends.memcache.Token - else - iniset $KEYSTONE_CONF token driver keystone.token.backends.kvs.Token - fi - - if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then - # Configure ``keystone.conf`` to use sql - iniset $KEYSTONE_CONF catalog driver keystone.catalog.backends.sql.Catalog - inicomment $KEYSTONE_CONF catalog template_file - else - cp -p $FILES/default_catalog.templates $KEYSTONE_CATALOG - - # Add swift endpoints to service catalog if swift is enabled - if is_service_enabled s-proxy; then - echo "catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG - echo "catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:8080/" >> $KEYSTONE_CATALOG - echo "catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG - echo "catalog.RegionOne.object_store.name = Swift Service" >> $KEYSTONE_CATALOG - fi - - # Add neutron endpoints to service catalog if neutron is enabled - if is_service_enabled neutron; then - echo "catalog.RegionOne.network.publicURL = http://%SERVICE_HOST%:$Q_PORT/" >> $KEYSTONE_CATALOG - echo "catalog.RegionOne.network.adminURL = http://%SERVICE_HOST%:$Q_PORT/" >> $KEYSTONE_CATALOG - echo "catalog.RegionOne.network.internalURL = http://%SERVICE_HOST%:$Q_PORT/" >> $KEYSTONE_CATALOG - echo "catalog.RegionOne.network.name = Neutron Service" >> $KEYSTONE_CATALOG - fi - - sed -e " - s,%SERVICE_HOST%,$SERVICE_HOST,g; - s,%S3_SERVICE_PORT%,$S3_SERVICE_PORT,g; - " -i $KEYSTONE_CATALOG - - # Configure ``keystone.conf`` to use templates - iniset $KEYSTONE_CONF catalog driver "keystone.catalog.backends.templated.TemplatedCatalog" - iniset $KEYSTONE_CONF catalog template_file "$KEYSTONE_CATALOG" - fi # Set up logging if [ "$SYSLOG" != "False" ]; then @@ -253,120 +226,239 @@ function configure_keystone { fi # Format logging - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && ! is_apache_enabled_service key ; then - setup_colorized_logging $KEYSTONE_CONF DEFAULT + setup_logging $KEYSTONE_CONF + + iniset $KEYSTONE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + + write_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" "/identity" "" "keystone-api" + + iniset $KEYSTONE_CONF DEFAULT max_token_size 16384 + + iniset $KEYSTONE_CONF fernet_tokens key_repository "$KEYSTONE_CONF_DIR/fernet-keys/" + + iniset $KEYSTONE_CONF credential key_repository "$KEYSTONE_CONF_DIR/credential-keys/" + + # Configure the project created by the 'keystone-manage bootstrap' as the cloud-admin project. + # The users from this project are globally admin as before, but it also + # allows policy changes in order to clarify the adminess scope. + #iniset $KEYSTONE_CONF resource admin_project_domain_name Default + #iniset $KEYSTONE_CONF resource admin_project_name admin + + if [[ "$KEYSTONE_SECURITY_COMPLIANCE_ENABLED" = True ]]; then + iniset $KEYSTONE_CONF security_compliance lockout_failure_attempts $KEYSTONE_LOCKOUT_FAILURE_ATTEMPTS + iniset $KEYSTONE_CONF security_compliance lockout_duration $KEYSTONE_LOCKOUT_DURATION + iniset $KEYSTONE_CONF security_compliance unique_last_password_count $KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT fi - if is_apache_enabled_service key; then - iniset $KEYSTONE_CONF DEFAULT debug "True" - # Eliminate the %(asctime)s.%(msecs)03d from the log format strings - iniset $KEYSTONE_CONF DEFAULT logging_context_format_string "%(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s" - iniset $KEYSTONE_CONF DEFAULT logging_default_format_string "%(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s" - iniset $KEYSTONE_CONF DEFAULT logging_debug_format_suffix "%(funcName)s %(pathname)s:%(lineno)d" - iniset $KEYSTONE_CONF DEFAULT logging_exception_prefix "%(process)d TRACE %(name)s %(instance)s" - _config_keystone_apache_wsgi + iniset $KEYSTONE_CONF oslo_policy policy_file policy.yaml + + if [[ "$KEYSTONE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $KEYSTONE_CONF oslo_policy enforce_scope true + iniset $KEYSTONE_CONF oslo_policy enforce_new_defaults true + else + iniset $KEYSTONE_CONF oslo_policy enforce_scope false + iniset $KEYSTONE_CONF oslo_policy enforce_new_defaults false fi } # create_keystone_accounts() - Sets up common required keystone accounts -# Tenant User Roles +# Project User Roles # ------------------------------------------------------------------ # admin admin admin # service -- -- # -- -- service # -- -- ResellerAdmin -# -- -- Member +# -- -- member # demo admin admin -# demo demo Member, anotherrole -# invisible_to_admin demo Member +# demo demo member, anotherrole +# alt_demo admin admin +# alt_demo alt_demo member, anotherrole +# invisible_to_admin demo member + +# Group Users Roles Project +# ------------------------------------------------------------------ +# admins admin admin admin +# nonadmins demo, alt_demo member, anotherrole demo, alt_demo + +# System User Roles +# ------------------------------------------------------------------ +# all admin admin +# all system_reader reader +# all system_member member + # Migrated from keystone_data.sh function create_keystone_accounts { - # admin - ADMIN_TENANT=$(openstack project create \ - admin \ - | grep " id " | get_field 2) - ADMIN_USER=$(openstack user create \ - admin \ - --project "$ADMIN_TENANT" \ - --email admin@example.com \ - --password "$ADMIN_PASSWORD" \ - | grep " id " | get_field 2) - ADMIN_ROLE=$(openstack role create \ - admin \ - | grep " id " | get_field 2) - openstack role add \ - $ADMIN_ROLE \ - --project $ADMIN_TENANT \ - --user $ADMIN_USER + # The keystone bootstrapping process (performed via keystone-manage + # bootstrap) creates an admin user and an admin + # project. As a sanity check we exercise the CLI to retrieve the IDs for + # these values. + local admin_project + admin_project=$(openstack project show "admin" -f value -c id) + local admin_user + admin_user=$(openstack user show "admin" -f value -c id) + # These roles are also created during bootstrap but we don't need their IDs + local admin_role="admin" + local member_role="member" + local reader_role="reader" + + async_run ks-domain-role get_or_add_user_domain_role $admin_role $admin_user default # Create service project/role - openstack project create $SERVICE_TENANT_NAME + get_or_create_domain "$SERVICE_DOMAIN_NAME" + async_run ks-project get_or_create_project "$SERVICE_PROJECT_NAME" "$SERVICE_DOMAIN_NAME" # Service role, so service users do not have to be admins - openstack role create service + async_run ks-service get_or_create_role service # The ResellerAdmin role is used by Nova and Ceilometer so we need to keep it. - # The admin role in swift allows a user to act as an admin for their tenant, - # but ResellerAdmin is needed for a user to act as any tenant. The name of this + # The admin role in swift allows a user to act as an admin for their project, + # but ResellerAdmin is needed for a user to act as any project. The name of this # role is also configurable in swift-proxy.conf - openstack role create ResellerAdmin + async_run ks-reseller get_or_create_role ResellerAdmin - # The Member role is used by Horizon and Swift so we need to keep it: - MEMBER_ROLE=$(openstack role create \ - Member \ - | grep " id " | get_field 2) - # ANOTHER_ROLE demonstrates that an arbitrary role may be created and used + # another_role demonstrates that an arbitrary role may be created and used # TODO(sleepsonthefloor): show how this can be used for rbac in the future! - ANOTHER_ROLE=$(openstack role create \ - anotherrole \ - | grep " id " | get_field 2) + local another_role="anotherrole" + async_run ks-anotherrole get_or_create_role $another_role - # invisible tenant - admin can't see this one - INVIS_TENANT=$(openstack project create \ - invisible_to_admin \ - | grep " id " | get_field 2) + # invisible project - admin can't see this one + local invis_project + invis_project=$(get_or_create_project "invisible_to_admin" default) # demo - DEMO_TENANT=$(openstack project create \ - demo \ - | grep " id " | get_field 2) - DEMO_USER=$(openstack user create \ - demo \ - --project $DEMO_TENANT \ - --email demo@example.com \ - --password "$ADMIN_PASSWORD" \ - | grep " id " | get_field 2) - - openstack role add --project $DEMO_TENANT --user $DEMO_USER $MEMBER_ROLE - openstack role add --project $DEMO_TENANT --user $ADMIN_USER $ADMIN_ROLE - openstack role add --project $DEMO_TENANT --user $DEMO_USER $ANOTHER_ROLE - openstack role add --project $INVIS_TENANT --user $DEMO_USER $MEMBER_ROLE - - # Keystone - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - KEYSTONE_SERVICE=$(openstack service create \ - keystone \ - --type identity \ - --description "Keystone Identity Service" \ - | grep " id " | get_field 2) - openstack endpoint create \ - $KEYSTONE_SERVICE \ - --region RegionOne \ - --publicurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$IDENTITY_API_VERSION" \ - --adminurl "$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v$IDENTITY_API_VERSION" \ - --internalurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$IDENTITY_API_VERSION" + local demo_project + demo_project=$(get_or_create_project "demo" default) + local demo_user + demo_user=$(get_or_create_user "demo" \ + "$ADMIN_PASSWORD" "default" "demo@example.com") + + async_wait ks-{domain-role,domain,project,service,reseller,anotherrole} + + async_run ks-demo-member get_or_add_user_project_role $member_role $demo_user $demo_project + + async_run ks-demo-admin get_or_add_user_project_role $admin_role $admin_user $demo_project + async_run ks-demo-another get_or_add_user_project_role $another_role $demo_user $demo_project + async_run ks-demo-invis get_or_add_user_project_role $member_role $demo_user $invis_project + + # Create a user to act as a reader on project demo + local demo_reader + demo_reader=$(get_or_create_user "demo_reader" \ + "$ADMIN_PASSWORD" "default" "demo_reader@example.com") + + async_run ks-demo-reader get_or_add_user_project_role $reader_role $demo_reader $demo_project + + # Create a different project called alt_demo + local alt_demo_project + alt_demo_project=$(get_or_create_project "alt_demo" default) + # Create a user to act as member, admin and anotherrole on project alt_demo + local alt_demo_user + alt_demo_user=$(get_or_create_user "alt_demo" \ + "$ADMIN_PASSWORD" "default" "alt_demo@example.com") + + async_run ks-alt-admin get_or_add_user_project_role $admin_role $alt_demo_user $alt_demo_project + async_run ks-alt-another get_or_add_user_project_role $another_role $alt_demo_user $alt_demo_project + + # Create another user to act as a member on project alt_demo + local alt_demo_member + alt_demo_member=$(get_or_create_user "alt_demo_member" \ + "$ADMIN_PASSWORD" "default" "alt_demo_member@example.com") + async_run ks-alt-member-user get_or_add_user_project_role $member_role $alt_demo_member $alt_demo_project + + # Create another user to act as a reader on project alt_demo + local alt_demo_reader + alt_demo_reader=$(get_or_create_user "alt_demo_reader" \ + "$ADMIN_PASSWORD" "default" "alt_demo_reader@example.com") + async_run ks-alt-reader-user get_or_add_user_project_role $reader_role $alt_demo_reader $alt_demo_project + + # Create two users, give one the member role on the system and the other the + # reader role on the system. These two users model system-member and + # system-reader personas. The admin user already has the admin role on the + # system and we can re-use this user as a system-admin. + system_member_user=$(get_or_create_user "system_member" \ + "$ADMIN_PASSWORD" "default" "system_member@example.com") + async_run ks-system-member get_or_add_user_system_role $member_role $system_member_user "all" + + system_reader_user=$(get_or_create_user "system_reader" \ + "$ADMIN_PASSWORD" "default" "system_reader@example.com") + async_run ks-system-reader get_or_add_user_system_role $reader_role $system_reader_user "all" + + # groups + local admin_group + admin_group=$(get_or_create_group "admins" \ + "default" "openstack admin group") + local non_admin_group + non_admin_group=$(get_or_create_group "nonadmins" \ + "default" "non-admin group") + + async_run ks-group-memberdemo get_or_add_group_project_role $member_role $non_admin_group $demo_project + async_run ks-group-anotherdemo get_or_add_group_project_role $another_role $non_admin_group $demo_project + async_run ks-group-memberalt get_or_add_group_project_role $member_role $non_admin_group $alt_demo_project + async_run ks-group-anotheralt get_or_add_group_project_role $another_role $non_admin_group $alt_demo_project + async_run ks-group-admin get_or_add_group_project_role $admin_role $admin_group $admin_project + + async_wait ks-demo-{member,admin,another,invis,reader} + async_wait ks-alt-{admin,another,member-user,reader-user} + async_wait ks-system-{member,reader} + async_wait ks-group-{memberdemo,anotherdemo,memberalt,anotheralt,admin} + + if is_service_enabled ldap; then + create_ldap_domain + fi +} + +# Create a user that is capable of verifying keystone tokens for use with auth_token middleware. +# +# create_service_user [role] +# +# We always add the service role, other roles are also allowed to be added as historically +# a lot of projects have configured themselves with the admin or other role here if they are +# using this user for other purposes beyond simply auth_token middleware. +function create_service_user { + get_or_create_user "$1" "$SERVICE_PASSWORD" "$SERVICE_DOMAIN_NAME" + get_or_add_user_project_role service "$1" "$SERVICE_PROJECT_NAME" "$SERVICE_DOMAIN_NAME" "$SERVICE_DOMAIN_NAME" + + if [[ -n "$2" ]]; then + get_or_add_user_project_role "$2" "$1" "$SERVICE_PROJECT_NAME" "$SERVICE_DOMAIN_NAME" "$SERVICE_DOMAIN_NAME" fi } -# Configure the API version for the OpenStack projects. -# configure_API_version conf_file version -function configure_API_version { +# Configure a service to use the auth token middleware. +# +# configure_keystone_authtoken_middleware conf_file admin_user IGNORED [section] +# +# section defaults to keystone_authtoken, which is where auth_token looks in +# the .conf file. If the paste config file is used (api-paste.ini) then +# provide the section name for the auth_token filter. +function configure_keystone_authtoken_middleware { local conf_file=$1 - local api_version=$2 - iniset $conf_file keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$api_version + local admin_user=$2 + local section=${3:-keystone_authtoken} + local service_type=$4 + + iniset $conf_file $section auth_type password + iniset $conf_file $section interface public + iniset $conf_file $section auth_url $KEYSTONE_SERVICE_URI + iniset $conf_file $section username $admin_user + iniset $conf_file $section password $SERVICE_PASSWORD + iniset $conf_file $section user_domain_name "$SERVICE_DOMAIN_NAME" + iniset $conf_file $section project_name $SERVICE_PROJECT_NAME + iniset $conf_file $section project_domain_name "$SERVICE_DOMAIN_NAME" + + iniset $conf_file $section cafile $SSL_BUNDLE_FILE + iniset $conf_file $section memcached_servers $MEMCACHE_SERVERS + if [[ -n "$service_type" ]]; then + iniset $conf_file $section service_type $service_type + fi +} + +# configure_auth_token_middleware conf_file admin_user IGNORED [section] +# TODO(frickler): old function for backwards compatibility, remove in U cycle +function configure_auth_token_middleware { + echo "WARNING: configure_auth_token_middleware is deprecated, use configure_keystone_authtoken_middleware instead" + configure_keystone_authtoken_middleware $1 $2 $4 } # init_keystone() - Initialize databases, etc. @@ -375,29 +467,55 @@ function init_keystone { init_ldap fi - # (Re)create keystone database - recreate_database keystone utf8 + if [[ "$RECREATE_KEYSTONE_DB" == True ]]; then + # (Re)create keystone database + recreate_database keystone + fi + time_start "dbsync" # Initialize keystone database - $KEYSTONE_DIR/bin/keystone-manage db_sync + $KEYSTONE_BIN_DIR/keystone-manage --config-file $KEYSTONE_CONF db_sync + time_stop "dbsync" + + if [[ "$KEYSTONE_TOKEN_FORMAT" == "fernet" ]]; then + rm -rf "$KEYSTONE_CONF_DIR/fernet-keys/" + $KEYSTONE_BIN_DIR/keystone-manage --config-file $KEYSTONE_CONF fernet_setup + fi + rm -rf "$KEYSTONE_CONF_DIR/credential-keys/" + $KEYSTONE_BIN_DIR/keystone-manage --config-file $KEYSTONE_CONF credential_setup - if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then - # Set up certificates - rm -rf $KEYSTONE_CONF_DIR/ssl - $KEYSTONE_DIR/bin/keystone-manage pki_setup +} - # Create cache dir - sudo mkdir -p $KEYSTONE_AUTH_CACHE_DIR - sudo chown $STACK_USER $KEYSTONE_AUTH_CACHE_DIR - rm -f $KEYSTONE_AUTH_CACHE_DIR/* +# install_keystoneauth() - Collect source and prepare +function install_keystoneauth { + if use_library_from_git "keystoneauth"; then + git_clone_by_name "keystoneauth" + setup_dev_lib "keystoneauth" fi } # install_keystoneclient() - Collect source and prepare function install_keystoneclient { - git_clone $KEYSTONECLIENT_REPO $KEYSTONECLIENT_DIR $KEYSTONECLIENT_BRANCH - setup_develop $KEYSTONECLIENT_DIR - sudo install -D -m 0644 -o $STACK_USER {$KEYSTONECLIENT_DIR/tools/,/etc/bash_completion.d/}keystone.bash_completion + if use_library_from_git "python-keystoneclient"; then + git_clone_by_name "python-keystoneclient" + setup_dev_lib "python-keystoneclient" + fi +} + +# install_keystonemiddleware() - Collect source and prepare +function install_keystonemiddleware { + # install_keystonemiddleware() is called when keystonemiddleware is needed + # to provide an opportunity to install it from the source repo + if use_library_from_git "keystonemiddleware"; then + git_clone_by_name "keystonemiddleware" + setup_dev_lib "keystonemiddleware" + else + # When not installing from repo, keystonemiddleware is still needed... + pip_install_gr keystonemiddleware + fi + # Install the memcache library so keystonemiddleware can cache tokens in a + # shared location. + pip_install_gr python-memcached } # install_keystone() - Collect source and prepare @@ -406,63 +524,133 @@ function install_keystone { if is_service_enabled ldap; then install_ldap fi - if [[ "$KEYSTONE_TOKEN_BACKEND" = "memcache" ]]; then - # Install memcached and the memcache Python library that keystone uses. - # Unfortunately the Python library goes by different names in the .deb - # and .rpm circles. - install_package memcached - if is_ubuntu; then - install_package python-memcache - else - install_package python-memcached - fi - fi + git_clone $KEYSTONE_REPO $KEYSTONE_DIR $KEYSTONE_BRANCH setup_develop $KEYSTONE_DIR - if is_apache_enabled_service key; then - install_apache_wsgi + + if is_service_enabled ldap; then + setup_develop $KEYSTONE_DIR ldap fi } -# start_keystone() - Start running processes, including screen +# start_keystone() - Start running processes function start_keystone { # Get right service port for testing local service_port=$KEYSTONE_SERVICE_PORT + local auth_protocol=$KEYSTONE_SERVICE_PROTOCOL if is_service_enabled tls-proxy; then service_port=$KEYSTONE_SERVICE_PORT_INT + auth_protocol="http" fi - if is_apache_enabled_service key; then - restart_apache_server - screen_it key "cd $KEYSTONE_DIR && sudo tail -f /var/log/$APACHE_NAME/keystone" - else - # Start Keystone in a screen window - screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF --debug" - fi + run_process keystone "$(which uwsgi) --procname-prefix keystone --ini $KEYSTONE_PUBLIC_UWSGI_CONF" "" echo "Waiting for keystone to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -k -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then + # Check that the keystone service is running. Even if the tls tunnel + # should be enabled, make sure the internal port is checked using + # unencryted traffic at this point. + # If running in Apache, use the path rather than port. + + local service_uri=$auth_protocol://$KEYSTONE_SERVICE_HOST/identity/v3/ + + if ! wait_for_service $SERVICE_TIMEOUT $service_uri; then die $LINENO "keystone did not start" fi # Start proxies if enabled if is_service_enabled tls-proxy; then - start_tls_proxy '*' $KEYSTONE_SERVICE_PORT $KEYSTONE_SERVICE_HOST $KEYSTONE_SERVICE_PORT_INT & - start_tls_proxy '*' $KEYSTONE_AUTH_PORT $KEYSTONE_AUTH_HOST $KEYSTONE_AUTH_PORT_INT & + start_tls_proxy keystone-service '*' $KEYSTONE_SERVICE_PORT $KEYSTONE_SERVICE_HOST $KEYSTONE_SERVICE_PORT_INT fi + + # (re)start memcached to make sure we have a clean memcache. + restart_service memcached } # stop_keystone() - Stop running processes function stop_keystone { - # Kill the Keystone screen window - screen_stop key - # Cleanup the WSGI files and VHOST - _cleanup_keystone_apache_wsgi + stop_process keystone } +# bootstrap_keystone() - Initialize user, role and project +# This function uses the following GLOBAL variables: +# - ``KEYSTONE_BIN_DIR`` +# - ``ADMIN_PASSWORD`` +# - ``REGION_NAME`` +# - ``KEYSTONE_SERVICE_URI`` +function bootstrap_keystone { + $KEYSTONE_BIN_DIR/keystone-manage bootstrap \ + --bootstrap-username admin \ + --bootstrap-password "$ADMIN_PASSWORD" \ + --bootstrap-project-name admin \ + --bootstrap-role-name admin \ + --bootstrap-service-name keystone \ + --bootstrap-region-id "$REGION_NAME" \ + --bootstrap-public-url "$KEYSTONE_SERVICE_URI" + if [ "$KEYSTONE_ADMIN_ENDPOINT" == "True" ]; then + openstack endpoint create --region "$REGION_NAME" \ + --os-username admin \ + --os-user-domain-id default \ + --os-password "$ADMIN_PASSWORD" \ + --os-project-name admin \ + --os-project-domain-id default \ + keystone admin "$KEYSTONE_SERVICE_URI" + fi +} + +# create_ldap_domain() - Create domain file and initialize domain with a user +function create_ldap_domain { + # Creates domain Users + openstack domain create --description "LDAP domain" Users + + # Create domain file inside etc/keystone/domains + KEYSTONE_LDAP_DOMAIN_FILE=$KEYSTONE_CONF_DIR/domains/keystone.Users.conf + mkdir -p "$KEYSTONE_CONF_DIR/domains" + touch "$KEYSTONE_LDAP_DOMAIN_FILE" + + # Set identity driver 'ldap' + iniset $KEYSTONE_LDAP_DOMAIN_FILE identity driver "ldap" + + # LDAP settings for Users domain + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_tree_dn "ou=Users,$LDAP_BASE_DN" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_objectclass "inetOrgPerson" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_name_attribute "cn" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_mail_attribute "mail" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_id_attribute "uid" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_enabled_emulation "True" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user "cn=Manager,dc=openstack,dc=org" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap url "ldap://localhost" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap suffix $LDAP_BASE_DN + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap password $LDAP_PASSWORD + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap group_tree_dn "ou=Groups,$LDAP_BASE_DN" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap group_objectclass "groupOfNames" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap group_name_attribute "cn" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap group_id_attribute "cn" + + # Restart apache and identity services to associate domain and conf file + sudo service apache2 reload + sudo systemctl restart devstack@keystone + + # Create LDAP user.ldif and add user to LDAP backend + local tmp_ldap_dir + tmp_ldap_dir=$(mktemp -d -t ldap.$$.XXXXXXXXXX) + + _ldap_varsubst $FILES/ldap/user.ldif.in $slappass >$tmp_ldap_dir/user.ldif + sudo ldapadd -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -c -f $tmp_ldap_dir/user.ldif + rm -rf $tmp_ldap_dir + + local admin_project + admin_project=$(get_or_create_project "admin" default) + local ldap_user + ldap_user=$(openstack user show --domain=Users demo -f value -c id) + local admin_role="admin" + get_or_create_role $admin_role + + # Grant demo LDAP user access to project and role + get_or_add_user_project_role $admin_role $ldap_user $admin_project +} # Restore xtrace -$XTRACE +$_XTRACE_KEYSTONE # Tell emacs to use shell-script-mode ## Local variables: diff --git a/lib/ldap b/lib/ldap index efe2f096d7..66c2afc4d5 100644 --- a/lib/ldap +++ b/lib/ldap @@ -1,3 +1,5 @@ +#!/bin/bash +# # lib/ldap # Functions to control the installation and configuration of **ldap** @@ -6,7 +8,7 @@ # - install_ldap() # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_LDAP=$(set +o | grep xtrace) set +o xtrace @@ -31,16 +33,12 @@ LDAP_SERVICE_NAME=slapd if is_ubuntu; then LDAP_OLCDB_NUMBER=1 + LDAP_OLCDB_TYPE=mdb LDAP_ROOTPW_COMMAND=replace elif is_fedora; then LDAP_OLCDB_NUMBER=2 + LDAP_OLCDB_TYPE=hdb LDAP_ROOTPW_COMMAND=add -elif is_suse; then - # SUSE has slappasswd in /usr/sbin/ - PATH=$PATH:/usr/sbin/ - LDAP_OLCDB_NUMBER=1 - LDAP_ROOTPW_COMMAND=add - LDAP_SERVICE_NAME=ldap fi @@ -51,9 +49,11 @@ fi # _ldap_varsubst file function _ldap_varsubst { local infile=$1 + local slappass=$2 sed -e " s|\${LDAP_OLCDB_NUMBER}|$LDAP_OLCDB_NUMBER| - s|\${SLAPPASS}|$SLAPPASS| + s|\${LDAP_OLCDB_TYPE}|$LDAP_OLCDB_TYPE| + s|\${SLAPPASS}|$slappass| s|\${LDAP_ROOTPW_COMMAND}|$LDAP_ROOTPW_COMMAND| s|\${BASE_DC}|$LDAP_BASE_DC| s|\${BASE_DN}|$LDAP_BASE_DN| @@ -69,8 +69,6 @@ function cleanup_ldap { sudo rm -rf /etc/ldap/ldap.conf /var/lib/ldap elif is_fedora; then sudo rm -rf /etc/openldap /var/lib/ldap - elif is_suse; then - sudo rm -rf /var/lib/ldap fi } @@ -79,10 +77,19 @@ function cleanup_ldap { function init_ldap { local keystone_ldif - TMP_LDAP_DIR=$(mktemp -d -t ldap.$$.XXXXXXXXXX) + local tmp_ldap_dir + tmp_ldap_dir=$(mktemp -d -t ldap.$$.XXXXXXXXXX) # Remove data but not schemas clear_ldap_state + if is_ubuntu; then + # a bug in OpenLDAP 2.6.7+ + # (https://bugs.openldap.org/show_bug.cgi?id=10336) causes slapd crash + # after deleting nonexisting tree. It is fixed upstream, but Ubuntu is + # still not having a fix in Noble. Try temporarily simly restarting the + # process. + sudo service $LDAP_SERVICE_NAME restart + fi # Add our top level ldap nodes if ldapsearch -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -b "$LDAP_BASE_DN" | grep -q "Success"; then @@ -91,17 +98,17 @@ function init_ldap { printf "Configuring LDAP for $LDAP_BASE_DC\n" # If BASE_DN is changed, the user may override the default file if [[ -r $FILES/ldap/${LDAP_BASE_DC}.ldif.in ]]; then - keystone_ldif=${LDAP_BASE_DC}.ldif + local keystone_ldif=${LDAP_BASE_DC}.ldif else - keystone_ldif=keystone.ldif + local keystone_ldif=keystone.ldif fi - _ldap_varsubst $FILES/ldap/${keystone_ldif}.in >$TMP_LDAP_DIR/${keystone_ldif} - if [[ -r $TMP_LDAP_DIR/${keystone_ldif} ]]; then - ldapadd -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -c -f $TMP_LDAP_DIR/${keystone_ldif} + _ldap_varsubst $FILES/ldap/${keystone_ldif}.in >$tmp_ldap_dir/${keystone_ldif} + if [[ -r $tmp_ldap_dir/${keystone_ldif} ]]; then + ldapadd -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -c -f $tmp_ldap_dir/${keystone_ldif} fi fi - rm -rf TMP_LDAP_DIR + rm -rf $tmp_ldap_dir } # install_ldap @@ -110,28 +117,24 @@ function install_ldap { echo "Installing LDAP inside function" echo "os_VENDOR is $os_VENDOR" - TMP_LDAP_DIR=$(mktemp -d -t ldap.$$.XXXXXXXXXX) + local tmp_ldap_dir + tmp_ldap_dir=$(mktemp -d -t ldap.$$.XXXXXXXXXX) printf "installing OpenLDAP" if is_ubuntu; then - # Ubuntu automatically starts LDAP so no need to call start_ldap() - : + configure_ldap elif is_fedora; then start_ldap - elif is_suse; then - _ldap_varsubst $FILES/ldap/suse-base-config.ldif.in >$TMP_LDAP_DIR/suse-base-config.ldif - sudo slapadd -F /etc/openldap/slapd.d/ -bcn=config -l $TMP_LDAP_DIR/suse-base-config.ldif - sudo sed -i '/^OPENLDAP_START_LDAPI=/s/"no"/"yes"/g' /etc/sysconfig/openldap - start_ldap fi echo "LDAP_PASSWORD is $LDAP_PASSWORD" - SLAPPASS=$(slappasswd -s $LDAP_PASSWORD) - printf "LDAP secret is $SLAPPASS\n" + local slappass + slappass=$(slappasswd -s $LDAP_PASSWORD) + printf "LDAP secret is $slappass\n" # Create manager.ldif and add to olcdb - _ldap_varsubst $FILES/ldap/manager.ldif.in >$TMP_LDAP_DIR/manager.ldif - sudo ldapmodify -Y EXTERNAL -H ldapi:/// -f $TMP_LDAP_DIR/manager.ldif + _ldap_varsubst $FILES/ldap/manager.ldif.in $slappass >$tmp_ldap_dir/manager.ldif + sudo ldapmodify -Y EXTERNAL -H ldapi:/// -f $tmp_ldap_dir/manager.ldif # On fedora we need to manually add cosine and inetorgperson schemas if is_fedora; then @@ -139,7 +142,28 @@ function install_ldap { sudo ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/inetorgperson.ldif fi - rm -rf TMP_LDAP_DIR + rm -rf $tmp_ldap_dir +} + +# configure_ldap() - Configure LDAP - reconfigure slapd +function configure_ldap { + sudo debconf-set-selections </dev/null)" ]]; then + local backing_file=$DATA_DIR/$vg$BACKING_FILE_SUFFIX + + if [[ -n "$vg$BACKING_FILE_SUFFIX" ]] && \ + [[ -e "/etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service" ]]; then + sudo systemctl disable --now $vg$BACKING_FILE_SUFFIX.service + sudo rm -f /etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service + sudo systemctl daemon-reload + fi + + # If the backing physical device is a loop device, it was probably setup by DevStack + if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then + rm -f $backing_file + fi + fi +} + +# _create_lvm_volume_group creates default volume group +# +# Usage: _create_lvm_volume_group() $vg $size +function _create_lvm_volume_group { + local vg=$1 + local size=$2 + + local backing_file=$DATA_DIR/$vg$BACKING_FILE_SUFFIX + if ! sudo vgs $vg; then + # Only create if the file doesn't already exists + [[ -f $backing_file ]] || truncate -s $size $backing_file + + local directio="" + # Check to see if we can do direct-io + if losetup -h | grep -q direct-io; then + directio="--direct-io=on" + fi + + # Only create systemd service if it doesn't already exists + if [[ ! -e "/etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service" ]]; then + sed -e " + s|%DIRECTIO%|${directio}|g; + s|%BACKING_FILE%|${backing_file}|g; + " $FILES/lvm-backing-file.template | sudo tee \ + /etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service + + sudo systemctl daemon-reload + sudo systemctl enable --now $vg$BACKING_FILE_SUFFIX.service + fi + + local vg_dev + vg_dev=$(sudo losetup --associated $backing_file -O NAME -n) + + # Only create volume group if it doesn't already exist + if ! sudo vgs $vg; then + sudo vgcreate $vg $vg_dev + fi + fi +} + +# init_lvm_volume_group() initializes the volume group creating the backing +# file if necessary +# +# Usage: init_lvm_volume_group() $vg $size +function init_lvm_volume_group { + local vg=$1 + local size=$2 + + # Start the tgtd service on Fedora if tgtadm is used + if is_fedora; then + start_service tgtd + fi + + # Start with a clean volume group + _create_lvm_volume_group $vg $size + + if is_service_enabled cinder; then + # Remove iscsi targets + if [ "$CINDER_TARGET_HELPER" = "lioadm" ]; then + sudo cinder-rtstool get-targets | sudo xargs -rn 1 cinder-rtstool delete + elif [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then + sudo tgtadm --op show --mode target | awk '/Target/ {print $3}' | sudo xargs -r -n1 tgt-admin --delete + elif [ "$CINDER_TARGET_HELPER" = "nvmet" ]; then + # If we don't disconnect everything vgremove will block + sudo nvme disconnect-all + sudo nvmetcli clear + fi + fi + _clean_lvm_volume_group $vg +} + +# Sentinal value to ensure that init of default lvm volume group is +# only performed once across calls of init_default_lvm_volume_group. +_DEFAULT_LVM_INIT=${_DEFAULT_LVM_INIT:-0} + +# init_default_lvm_volume_group() initializes a default volume group +# intended to be shared between cinder and nova. It is idempotent; +# the init of the default volume group is guaranteed to be performed +# only once so that either or both of the dependent services can +# safely call this function. +# +# Usage: init_default_lvm_volume_group() +function init_default_lvm_volume_group { + if [[ "$_DEFAULT_LVM_INIT" = "0" ]]; then + init_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME $VOLUME_BACKING_FILE_SIZE + _DEFAULT_LVM_INIT=1 + fi +} + +# clean_lvm_filter() Remove the filter rule set in set_lvm_filter() +# +# Usage: clean_lvm_filter() +function clean_lvm_filter { + sudo sed -i "s/^.*# from devstack$//" /etc/lvm/lvm.conf +} + +# set_lvm_filter() Gather all devices configured for LVM and +# use them to build a global device filter +# set_lvm_filter() Create a device filter +# and add to /etc/lvm.conf. Note this uses +# all current PV's in use by LVM on the +# system to build it's filter. +# +# Usage: set_lvm_filter() +function set_lvm_filter { + local filter_suffix='"r|.*|" ] # from devstack' + local filter_string="global_filter = [ " + local pv + local vg + local line + + for pv_info in $(sudo pvs --noheadings -o name); do + pv=$(echo -e "${pv_info}" | sed 's/ //g' | sed 's/\/dev\///g') + new="\"a|$pv|\", " + filter_string=$filter_string$new + done + filter_string=$filter_string$filter_suffix + + clean_lvm_filter + sudo sed -i "/# global_filter = \[.*\]/a\ $filter_string" /etc/lvm/lvm.conf + echo_summary "set lvm.conf device global_filter to: $filter_string" +} + +# Restore xtrace +$_XTRACE_LVM + +# mode: shell-script +# End: diff --git a/lib/marconi b/lib/marconi deleted file mode 100644 index 473c8cd5ef..0000000000 --- a/lib/marconi +++ /dev/null @@ -1,209 +0,0 @@ -# lib/marconi -# Install and start **Marconi** service - -# To enable a minimal set of Marconi services, add the following to localrc: -# -# enable_service marconi-server -# -# Dependencies: -# - functions -# - OS_AUTH_URL for auth in api -# - DEST set to the destination directory -# - SERVICE_PASSWORD, SERVICE_TENANT_NAME for auth in api -# - STACK_USER service user - -# stack.sh -# --------- -# install_marconi -# configure_marconi -# init_marconi -# start_marconi -# stop_marconi -# cleanup_marconi - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# Set up default directories -MARCONI_DIR=$DEST/marconi -MARCONICLIENT_DIR=$DEST/python-marconiclient -MARCONI_CONF_DIR=/etc/marconi -MARCONI_CONF=$MARCONI_CONF_DIR/marconi.conf -MARCONI_API_LOG_DIR=/var/log/marconi -MARCONI_API_LOG_FILE=$MARCONI_API_LOG_DIR/queues.log -MARCONI_AUTH_CACHE_DIR=${MARCONI_AUTH_CACHE_DIR:-/var/cache/marconi} - -# Support potential entry-points console scripts -MARCONI_BIN_DIR=$(get_python_exec_prefix) - -# Set up database backend -MARCONI_BACKEND=${MARCONI_BACKEND:-sqlite} - - -# Set Marconi repository -MARCONI_REPO=${MARCONI_REPO:-${GIT_BASE}/openstack/marconi.git} -MARCONI_BRANCH=${MARCONI_BRANCH:-master} - -# Set client library repository -MARCONICLIENT_REPO=${MARCONICLIENT_REPO:-${GIT_BASE}/openstack/python-marconiclient.git} -MARCONICLIENT_BRANCH=${MARCONICLIENT_BRANCH:-master} - -# Set Marconi Connection Info -MARCONI_SERVICE_HOST=${MARCONI_SERVICE_HOST:-$SERVICE_HOST} -MARCONI_SERVICE_PORT=${MARCONI_SERVICE_PORT:-8888} -MARCONI_SERVICE_PROTOCOL=${MARCONI_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} - -# Tell Tempest this project is present -TEMPEST_SERVICES+=,marconi - - -# Functions -# --------- - -# Test if any Marconi services are enabled -# is_marconi_enabled -function is_marconi_enabled { - [[ ,${ENABLED_SERVICES} =~ ,"marconi-" ]] && return 0 - return 1 -} - -# cleanup_marconi() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_marconi { - if ! timeout $SERVICE_TIMEOUT sh -c "while ! mongo marconi --eval 'db.dropDatabase();'; do sleep 1; done"; then - die $LINENO "Mongo DB did not start" - fi -} - -# configure_marconiclient() - Set config files, create data dirs, etc -function configure_marconiclient { - setup_develop $MARCONICLIENT_DIR -} - -# configure_marconi() - Set config files, create data dirs, etc -function configure_marconi { - setup_develop $MARCONI_DIR - - [ ! -d $MARCONI_CONF_DIR ] && sudo mkdir -m 755 -p $MARCONI_CONF_DIR - sudo chown $USER $MARCONI_CONF_DIR - - [ ! -d $MARCONI_API_LOG_DIR ] && sudo mkdir -m 755 -p $MARCONI_API_LOG_DIR - sudo chown $USER $MARCONI_API_LOG_DIR - - iniset $MARCONI_CONF DEFAULT verbose True - iniset $MARCONI_CONF DEFAULT use_syslog $SYSLOG - iniset $MARCONI_CONF DEFAULT log_file $MARCONI_API_LOG_FILE - iniset $MARCONI_CONF 'drivers:transport:wsgi' bind $MARCONI_SERVICE_HOST - - iniset $MARCONI_CONF keystone_authtoken auth_protocol http - iniset $MARCONI_CONF keystone_authtoken admin_user marconi - iniset $MARCONI_CONF keystone_authtoken admin_password $SERVICE_PASSWORD - iniset $MARCONI_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $MARCONI_CONF keystone_authtoken signing_dir $MARCONI_AUTH_CACHE_DIR - - if [ "$MARCONI_BACKEND" = 'mysql' ] || [ "$MARCONI_BACKEND" = 'postgresql' ] ; then - iniset $MARCONI_CONF drivers storage sqlalchemy - iniset $MARCONI_CONF 'drivers:storage:sqlalchemy' uri `database_connection_url marconi` - elif [ "$MARCONI_BACKEND" = 'mongodb' ] ; then - iniset $MARCONI_CONF drivers storage mongodb - iniset $MARCONI_CONF 'drivers:storage:mongodb' uri mongodb://localhost:27017/marconi - configure_mongodb - cleanup_marconi - fi -} - -function configure_mongodb { - # Set nssize to 2GB. This increases the number of namespaces supported - # # per database. - if is_ubuntu; then - sudo sed -i -e " - s|[^ \t]*#[ \t]*\(nssize[ \t]*=.*\$\)|\1| - s|^\(nssize[ \t]*=[ \t]*\).*\$|\1 2047| - " /etc/mongodb.conf - restart_service mongodb - elif is_fedora; then - sudo sed -i '/--nssize/!s/OPTIONS=\"/OPTIONS=\"--nssize 2047 /' /etc/sysconfig/mongod - restart_service mongod - fi -} - -# init_marconi() - Initialize etc. -function init_marconi { - # Create cache dir - sudo mkdir -p $MARCONI_AUTH_CACHE_DIR - sudo chown $STACK_USER $MARCONI_AUTH_CACHE_DIR - rm -f $MARCONI_AUTH_CACHE_DIR/* -} - -# install_marconi() - Collect source and prepare -function install_marconi { - git_clone $MARCONI_REPO $MARCONI_DIR $MARCONI_BRANCH - setup_develop $MARCONI_DIR -} - -# install_marconiclient() - Collect source and prepare -function install_marconiclient { - git_clone $MARCONICLIENT_REPO $MARCONICLIENT_DIR $MARCONICLIENT_BRANCH - setup_develop $MARCONICLIENT_DIR -} - -# start_marconi() - Start running processes, including screen -function start_marconi { - screen_it marconi-server "marconi-server --config-file $MARCONI_CONF --daemon" - echo "Waiting for Marconi to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- $MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT/v1/health; do sleep 1; done"; then - die $LINENO "Marconi did not start" - fi -} - -# stop_marconi() - Stop running processes -function stop_marconi { - # Kill the marconi screen windows - for serv in marconi-server; do - screen -S $SCREEN_NAME -p $serv -X kill - done -} - -function create_marconi_accounts { - SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") - - MARCONI_USER=$(openstack user create \ - marconi \ - --password "$SERVICE_PASSWORD" \ - --project $SERVICE_TENANT \ - --email marconi@example.com \ - | grep " id " | get_field 2) - openstack role add \ - $ADMIN_ROLE \ - --project $SERVICE_TENANT \ - --user $MARCONI_USER - - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - MARCONI_SERVICE=$(openstack service create \ - marconi \ - --type=queuing \ - --description="Marconi Service" \ - | grep " id " | get_field 2) - openstack endpoint create \ - $MARCONI_SERVICE \ - --region RegionOne \ - --publicurl "$MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT" \ - --adminurl "$MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT" \ - --internalurl "$MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT" - fi - -} - - -# Restore xtrace -$XTRACE - -# Local variables: -# mode: shell-script -# End: diff --git a/lib/neutron b/lib/neutron index 15cfa8eee9..dec15fb782 100644 --- a/lib/neutron +++ b/lib/neutron @@ -1,3 +1,5 @@ +#!/bin/bash +# # lib/neutron # functions - functions specific to neutron @@ -8,25 +10,27 @@ # ``stack.sh`` calls the entry points in this order: # -# - install_neutron -# - install_neutronclient # - install_neutron_agent_packages +# - install_neutronclient +# - install_neutron # - install_neutron_third_party # - configure_neutron # - init_neutron # - configure_neutron_third_party # - init_neutron_third_party # - start_neutron_third_party -# - create_neutron_cache_dir # - create_nova_conf_neutron +# - configure_neutron_after_post_config # - start_neutron_service_and_check -# - create_neutron_initial_network -# - setup_neutron_debug +# - check_neutron_third_party_integration # - start_neutron_agents +# - create_neutron_initial_network # # ``unstack.sh`` calls the entry points in this order: # # - stop_neutron +# - stop_neutron_third_party +# - cleanup_neutron # Functions in lib/neutron are classified into the following categories: # @@ -43,36 +47,26 @@ # to run Neutron on this host, make sure that q-svc is also in # ``ENABLED_SERVICES``. # -# If you're planning to use the Neutron openvswitch plugin, set -# ``Q_PLUGIN`` to "openvswitch" and make sure the q-agt service is enabled -# in ``ENABLED_SERVICES``. If you're planning to use the Neutron -# linuxbridge plugin, set ``Q_PLUGIN`` to "linuxbridge" and make sure the -# q-agt service is enabled in ``ENABLED_SERVICES``. -# # See "Neutron Network Configuration" below for additional variables # that must be set in localrc for connectivity across hosts with # Neutron. -# -# With Neutron networking the NETWORK_MANAGER variable is ignored. -# -# To enable specific configuration options for either the Open vSwitch or -# LinuxBridge plugin, please see the top level README file under the -# Neutron section. + +# Settings +# -------- # Neutron Network Configuration # ----------------------------- -# Gateway and subnet defaults, in case they are not customized in localrc -NETWORK_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1} -PUBLIC_NETWORK_GATEWAY=${PUBLIC_NETWORK_GATEWAY:-172.24.4.1} -PRIVATE_SUBNET_NAME=${PRIVATE_SUBNET_NAME:-"private-subnet"} -PUBLIC_SUBNET_NAME=${PUBLIC_SUBNET_NAME:-"public-subnet"} +if is_service_enabled tls-proxy; then + Q_PROTOCOL="https" +fi # Set up default directories +GITDIR["python-neutronclient"]=$DEST/python-neutronclient + NEUTRON_DIR=$DEST/neutron -NEUTRONCLIENT_DIR=$DEST/python-neutronclient -NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron} +NEUTRON_FWAAS_DIR=$DEST/neutron-fwaas # Support entry points installation of console scripts if [[ -d $NEUTRON_DIR/bin/neutron-server ]]; then @@ -85,51 +79,98 @@ NEUTRON_CONF_DIR=/etc/neutron NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"} +NEUTRON_UWSGI=neutron.wsgi.api:application +NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini + +# If NEUTRON_ENFORCE_SCOPE == True, it will set "enforce_scope" +# and "enforce_new_defaults" to True in the Neutron's config to enforce usage +# of the new RBAC policies and scopes. Set it to False if you do not +# want to run Neutron with new RBAC. +NEUTRON_ENFORCE_SCOPE=$(trueorfalse True NEUTRON_ENFORCE_SCOPE) + +# Agent binaries. Note, binary paths for other agents are set in per-service +# scripts in lib/neutron_plugins/services/ +AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent" +AGENT_L3_BINARY=${AGENT_L3_BINARY:-"$NEUTRON_BIN_DIR/neutron-l3-agent"} +AGENT_META_BINARY="$NEUTRON_BIN_DIR/neutron-metadata-agent" + +# Agent config files. Note, plugin-specific Q_PLUGIN_CONF_FILE is set and +# loaded from per-plugin scripts in lib/neutron_plugins/ +Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini +# NOTE(slaweq): NEUTRON_DHCP_CONF is used e.g. in neutron repository, +# it was previously defined in the lib/neutron module which is now deleted. +NEUTRON_DHCP_CONF=$Q_DHCP_CONF_FILE +Q_L3_CONF_FILE=$NEUTRON_CONF_DIR/l3_agent.ini +# NOTE(slaweq): NEUTRON_L3_CONF is used e.g. in neutron repository, +# it was previously defined in the lib/neutron module which is now deleted. +NEUTRON_L3_CONF=$Q_L3_CONF_FILE +Q_META_CONF_FILE=$NEUTRON_CONF_DIR/metadata_agent.ini + +# Default name for Neutron database +Q_DB_NAME=${Q_DB_NAME:-neutron} # Default Neutron Plugin Q_PLUGIN=${Q_PLUGIN:-ml2} # Default Neutron Port Q_PORT=${Q_PORT:-9696} +# Default Neutron Internal Port when using TLS proxy +Q_PORT_INT=${Q_PORT_INT:-19696} # Default Neutron Host Q_HOST=${Q_HOST:-$SERVICE_HOST} +# Default protocol +Q_PROTOCOL=${Q_PROTOCOL:-$SERVICE_PROTOCOL} +# Default listen address +Q_LISTEN_ADDRESS=${Q_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)} # Default admin username Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-neutron} # Default auth strategy Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone} -# Use namespace or not -Q_USE_NAMESPACE=${Q_USE_NAMESPACE:-True} # RHEL's support for namespaces requires using veths with ovs Q_OVS_USE_VETH=${Q_OVS_USE_VETH:-False} Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True} +Q_USE_ROOTWRAP_DAEMON=$(trueorfalse True Q_USE_ROOTWRAP_DAEMON) # Meta data IP -Q_META_DATA_IP=${Q_META_DATA_IP:-$SERVICE_HOST} +Q_META_DATA_IP=${Q_META_DATA_IP:-$(ipv6_unquote $SERVICE_HOST)} # Allow Overlapping IP among subnets Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True} -# Use neutron-debug command -Q_USE_DEBUG_COMMAND=${Q_USE_DEBUG_COMMAND:-False} -# The name of the default q-l3 router -Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1} -# nova vif driver that all plugins should use -NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} -Q_NOTIFY_NOVA_PORT_STATUS_CHANGE=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGE:-True} -Q_NOTIFY_NOVA_ON_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_CHANGE:-True} +Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True} +Q_NOTIFY_NOVA_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_DATA_CHANGES:-True} VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True} VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300} -# The next two variables are configured by plugin -# e.g. _configure_neutron_l3_agent or lib/neutron_plugins/* -# -# The plugin supports L3. -Q_L3_ENABLED=${Q_L3_ENABLED:-False} -# L3 routers exist per tenant -Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-False} +# Allow to skip stopping of OVN services +SKIP_STOP_OVN=${SKIP_STOP_OVN:-False} + +# The directory which contains files for Q_PLUGIN_EXTRA_CONF_FILES. +# /etc/neutron is assumed by many of devstack plugins. Do not change. +_Q_PLUGIN_EXTRA_CONF_PATH=/etc/neutron + +# The name of the service in the endpoint URL +NEUTRON_ENDPOINT_SERVICE_NAME=${NEUTRON_ENDPOINT_SERVICE_NAME-"networking"} +if [[ -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then + NEUTRON_ENDPOINT_SERVICE_NAME="networking" +fi + +# Source install libraries +ALEMBIC_REPO=${ALEMBIC_REPO:-https://github.com/sqlalchemy/alembic.git} +ALEMBIC_DIR=${ALEMBIC_DIR:-$DEST/alembic} +ALEMBIC_BRANCH=${ALEMBIC_BRANCH:-main} +SQLALCHEMY_REPO=${SQLALCHEMY_REPO:-https://github.com/sqlalchemy/sqlalchemy.git} +SQLALCHEMY_DIR=${SQLALCHEMY_DIR:-$DEST/sqlalchemy} +SQLALCHEMY_BRANCH=${SQLALCHEMY_BRANCH:-main} # List of config file names in addition to the main plugin config file -# See _configure_neutron_common() for details about setting it up -declare -a Q_PLUGIN_EXTRA_CONF_FILES +# To add additional plugin config files, use ``neutron_server_config_add`` +# utility function. For example: +# +# ``neutron_server_config_add file1`` +# +# These config files are relative to ``/etc/neutron``. The above +# example would specify ``--config-file /etc/neutron/file1`` for +# neutron server. +declare -a -g Q_PLUGIN_EXTRA_CONF_FILES -# List of (optional) config files for VPN device drivers to use with -# the neutron-q-vpn agent -declare -a Q_VPN_EXTRA_CONF_FILES +# same as Q_PLUGIN_EXTRA_CONF_FILES, but with absolute path. +declare -a -g _Q_PLUGIN_EXTRA_CONF_FILES_ABS Q_RR_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf @@ -138,31 +179,46 @@ if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then else NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron) Q_RR_COMMAND="sudo $NEUTRON_ROOTWRAP $Q_RR_CONF_FILE" + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + Q_RR_DAEMON_COMMAND="sudo $NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE" + fi +fi + + +# Distributed Virtual Router (DVR) configuration +# Can be: +# - ``legacy`` - No DVR functionality +# - ``dvr_snat`` - Controller or single node DVR +# - ``dvr`` - Compute node in multi-node DVR +# - ``dvr_no_external`` - Compute node in multi-node DVR, no external network +# +Q_DVR_MODE=${Q_DVR_MODE:-legacy} +if [[ "$Q_DVR_MODE" != "legacy" ]]; then + Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,l2population fi # Provider Network Configurations # -------------------------------- -# The following variables control the Neutron openvswitch and -# linuxbridge plugins' allocation of tenant networks and -# availability of provider networks. If these are not configured -# in ``localrc``, tenant networks will be local to the host (with no -# remote connectivity), and no physical resources will be -# available for the allocation of provider networks. +# The following variables control the Neutron ML2 plugins' allocation +# of tenant networks and availability of provider networks. If these +# are not configured in ``localrc``, tenant networks will be local to +# the host (with no remote connectivity), and no physical resources +# will be available for the allocation of provider networks. -# To use GRE tunnels for tenant networks, set to True in -# ``localrc``. GRE tunnels are only supported by the openvswitch -# plugin, and currently only on Ubuntu. -ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-False} +# To disable tunnels (GRE or VXLAN) for tenant networks, +# set to False in ``local.conf``. +# GRE tunnels are only supported by the openvswitch. +ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-True} -# If using GRE tunnels for tenant networks, specify the range of -# tunnel IDs from which tenant networks are allocated. Can be -# overriden in ``localrc`` in necesssary. -TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGE:-1:1000} +# If using GRE, VXLAN or GENEVE tunnels for tenant networks, +# specify the range of IDs from which tenant networks are +# allocated. Can be overridden in ``localrc`` if necessary. +TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGES:-1:1000} # To use VLANs for tenant networks, set to True in localrc. VLANs -# are supported by the openvswitch and linuxbridge plugins, each -# requiring additional configuration described below. +# are supported by the ML2 plugins, requiring additional configuration +# described below. ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False} # If using VLANs for tenant networks, set in ``localrc`` to specify @@ -176,13 +232,12 @@ TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} # If using VLANs for tenant networks, or if using flat or VLAN # provider networks, set in ``localrc`` to the name of the physical # network, and also configure ``OVS_PHYSICAL_BRIDGE`` for the -# openvswitch agent or ``LB_PHYSICAL_INTERFACE`` for the linuxbridge -# agent, as described below. +# openvswitch agent, as described below. # # Example: ``PHYSICAL_NETWORK=default`` -PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-} +PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-public} -# With the openvswitch plugin, if using VLANs for tenant networks, +# With the openvswitch agent, if using VLANs for tenant networks, # or if using flat or VLAN provider networks, set in ``localrc`` to # the name of the OVS bridge to use for the physical network. The # bridge will be created if it does not already exist, but a @@ -190,15 +245,7 @@ PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-} # port for external connectivity. # # Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` -OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-} - -# With the linuxbridge plugin, if using VLANs for tenant networks, -# or if using flat or VLAN provider networks, set in ``localrc`` to -# the name of the network interface to use for the physical -# network. -# -# Example: ``LB_PHYSICAL_INTERFACE=eth1`` -LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-} +OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-br-ex} # With the openvswitch plugin, set to True in ``localrc`` to enable # provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False. @@ -206,17 +253,21 @@ LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-} # Example: ``OVS_ENABLE_TUNNELING=True`` OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS} +# Use DHCP agent for providing metadata service in the case of +# without L3 agent (No Route Agent), set to True in localrc. +ENABLE_ISOLATED_METADATA=${ENABLE_ISOLATED_METADATA:-False} + +# Add a static route as dhcp option, so the request to 169.254.169.254 +# will be able to reach through a route(DHCP agent) +# This option require ENABLE_ISOLATED_METADATA = True +ENABLE_METADATA_NETWORK=${ENABLE_METADATA_NETWORK:-False} # Neutron plugin specific functions # --------------------------------- # Please refer to ``lib/neutron_plugins/README.md`` for details. -source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN - -# Agent loadbalancer service plugin functions -# ------------------------------------------- - -# Hardcoding for 1 service plugin for now -source $TOP_DIR/lib/neutron_plugins/services/loadbalancer +if [ -f $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN ]; then + source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN +fi # Agent metering service plugin functions # ------------------------------------------- @@ -224,14 +275,14 @@ source $TOP_DIR/lib/neutron_plugins/services/loadbalancer # Hardcoding for 1 service plugin for now source $TOP_DIR/lib/neutron_plugins/services/metering -# VPN service plugin functions -# ------------------------------------------- -# Hardcoding for 1 service plugin for now -source $TOP_DIR/lib/neutron_plugins/services/vpn +# L3 Service functions +source $TOP_DIR/lib/neutron_plugins/services/l3 -# Firewall Service Plugin functions -# --------------------------------- -source $TOP_DIR/lib/neutron_plugins/services/firewall +# Additional Neutron service plugins +source $TOP_DIR/lib/neutron_plugins/services/placement +source $TOP_DIR/lib/neutron_plugins/services/trunk +source $TOP_DIR/lib/neutron_plugins/services/qos +source $TOP_DIR/lib/neutron_plugins/services/segments # Use security group or not if has_neutron_plugin_security_group; then @@ -240,12 +291,13 @@ else Q_USE_SECGROUP=False fi -# Tell Tempest this project is present -TEMPEST_SERVICES+=,neutron - +# OVN_BRIDGE_MAPPINGS - ovn-bridge-mappings +# NOTE(hjensas): Initialize after sourcing neutron_plugins/services/l3 +# which initialize PUBLIC_BRIDGE. +OVN_BRIDGE_MAPPINGS=${OVN_BRIDGE_MAPPINGS:-$PHYSICAL_NETWORK:$PUBLIC_BRIDGE} # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_NEUTRON=$(set +o | grep xtrace) set +o xtrace @@ -255,83 +307,178 @@ set +o xtrace # Test if any Neutron services are enabled # is_neutron_enabled function is_neutron_enabled { - [[ ,${ENABLED_SERVICES} =~ ,"q-" ]] && return 0 + [[ ,${DISABLED_SERVICES} =~ ,"neutron" ]] && return 1 + [[ ,${ENABLED_SERVICES} =~ ,"neutron-" || ,${ENABLED_SERVICES} =~ ,"q-" ]] && return 0 return 1 } +# Test if any Neutron services are enabled +# TODO(slaweq): this is not really needed now and we should remove it as soon +# as it will not be called from any other Devstack plugins, like e.g. Neutron +# plugin +function is_neutron_legacy_enabled { + return 0 +} + +function _determine_config_server { + if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" != '' ]]; then + if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" = "$_Q_PLUGIN_EXTRA_CONF_PATH" ]]; then + deprecated "Q_PLUGIN_EXTRA_CONF_PATH is deprecated" + else + die $LINENO "Q_PLUGIN_EXTRA_CONF_PATH is deprecated" + fi + fi + if [[ ${#Q_PLUGIN_EXTRA_CONF_FILES[@]} > 0 ]]; then + deprecated "Q_PLUGIN_EXTRA_CONF_FILES is deprecated. Use neutron_server_config_add instead." + fi + for cfg_file in ${Q_PLUGIN_EXTRA_CONF_FILES[@]}; do + _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($_Q_PLUGIN_EXTRA_CONF_PATH/$cfg_file) + done + + local cfg_file + local opts="--config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" + for cfg_file in ${_Q_PLUGIN_EXTRA_CONF_FILES_ABS[@]}; do + opts+=" --config-file $cfg_file" + done + echo "$opts" +} + +function _determine_config_l3 { + local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE" + echo "$opts" +} + +function _enable_ovn_maintenance { + if [[ $Q_AGENT == "ovn" ]]; then + enable_service neutron-ovn-maintenance-worker + fi +} + +function _run_ovn_maintenance { + if [[ $Q_AGENT == "ovn" ]]; then + run_process neutron-ovn-maintenance-worker "$NEUTRON_BIN_DIR/neutron-ovn-maintenance-worker $cfg_file_options" + fi +} + +function _stop_ovn_maintenance { + if [[ $Q_AGENT == "ovn" ]]; then + stop_process neutron-ovn-maintenance-worker + fi +} + +# For services and agents that require it, dynamically construct a list of +# --config-file arguments that are passed to the binary. +function determine_config_files { + local opts="" + case "$1" in + "neutron-server") opts="$(_determine_config_server)" ;; + "neutron-l3-agent") opts="$(_determine_config_l3)" ;; + esac + if [ -z "$opts" ] ; then + die $LINENO "Could not determine config files for $1." + fi + echo "$opts" +} + # configure_neutron() # Set common config for all neutron server and agents. function configure_neutron { _configure_neutron_common - iniset_rpc_backend neutron $NEUTRON_CONF DEFAULT + iniset_rpc_backend neutron $NEUTRON_CONF - # goes before q-svc to init Q_SERVICE_PLUGIN_CLASSES - if is_service_enabled q-lbaas; then - _configure_neutron_lbaas - fi - if is_service_enabled q-metering; then + if is_service_enabled q-metering neutron-metering; then _configure_neutron_metering fi - if is_service_enabled q-vpn; then - _configure_neutron_vpn - fi - if is_service_enabled q-fwaas; then - _configure_neutron_fwaas - fi - if is_service_enabled q-agt q-svc; then - _configure_neutron_service - fi - if is_service_enabled q-agt; then + if is_service_enabled q-agt neutron-agent; then _configure_neutron_plugin_agent fi - if is_service_enabled q-dhcp; then + if is_service_enabled q-dhcp neutron-dhcp; then _configure_neutron_dhcp_agent fi - if is_service_enabled q-l3; then + if is_service_enabled q-l3 neutron-l3; then _configure_neutron_l3_agent fi - if is_service_enabled q-meta; then + if is_service_enabled q-meta neutron-metadata-agent; then _configure_neutron_metadata_agent fi - _configure_neutron_debug_command -} - -function create_nova_conf_neutron { - iniset $NOVA_CONF DEFAULT network_api_class "nova.network.neutronv2.api.API" - iniset $NOVA_CONF DEFAULT neutron_admin_username "$Q_ADMIN_USERNAME" - iniset $NOVA_CONF DEFAULT neutron_admin_password "$SERVICE_PASSWORD" - iniset $NOVA_CONF DEFAULT neutron_admin_auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0" - iniset $NOVA_CONF DEFAULT neutron_auth_strategy "$Q_AUTH_STRATEGY" - iniset $NOVA_CONF DEFAULT neutron_admin_tenant_name "$SERVICE_TENANT_NAME" - iniset $NOVA_CONF DEFAULT neutron_region_name "RegionOne" - iniset $NOVA_CONF DEFAULT neutron_url "http://$Q_HOST:$Q_PORT" + if [[ "$Q_DVR_MODE" != "legacy" ]]; then + _configure_dvr + fi + if is_service_enabled ceilometer; then + _configure_neutron_ceilometer_notifications + fi - if [[ "$Q_USE_SECGROUP" == "True" ]]; then - LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver - iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER - iniset $NOVA_CONF DEFAULT security_group_api neutron + if [[ $Q_AGENT == "ovn" ]]; then + configure_ovn + configure_ovn_plugin fi - # set NOVA_VIF_DRIVER and optionally set options in nova_conf - neutron_plugin_create_nova_conf + # Configure Neutron's advanced services + if is_service_enabled q-placement neutron-placement; then + configure_placement_extension + fi + if is_service_enabled q-trunk neutron-trunk; then + configure_trunk_extension + fi + if is_service_enabled q-qos neutron-qos; then + configure_qos + if is_service_enabled q-l3 neutron-l3; then + configure_l3_agent_extension_fip_qos + configure_l3_agent_extension_gateway_ip_qos + fi + fi + if is_service_enabled neutron-segments; then + configure_placement_neutron + configure_segments_extension + fi - iniset $NOVA_CONF libvirt vif_driver "$NOVA_VIF_DRIVER" - iniset $NOVA_CONF DEFAULT linuxnet_interface_driver "$LINUXNET_VIF_DRIVER" - if is_service_enabled q-meta; then - iniset $NOVA_CONF DEFAULT service_neutron_metadata_proxy "True" + # Finally configure Neutron server and core plugin + if is_service_enabled q-agt neutron-agent q-svc neutron-api; then + _configure_neutron_service fi - iniset $NOVA_CONF DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL" - iniset $NOVA_CONF DEFAULT vif_plugging_timeout "$VIF_PLUGGING_TIMEOUT" + iniset $NEUTRON_CONF DEFAULT api_workers "$API_WORKERS" + # devstack is not a tool for running uber scale OpenStack + # clouds, therefore running without a dedicated RPC worker + # for state reports is more than adequate. + iniset $NEUTRON_CONF DEFAULT rpc_state_report_workers 0 + + write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_UWSGI" "/networking" "" "neutron-api" } -# create_neutron_cache_dir() - Part of the _neutron_setup_keystone() process -function create_neutron_cache_dir { - # Create cache dir - sudo mkdir -p $NEUTRON_AUTH_CACHE_DIR - sudo chown $STACK_USER $NEUTRON_AUTH_CACHE_DIR - rm -f $NEUTRON_AUTH_CACHE_DIR/* +function configure_neutron_nova { + create_nova_conf_neutron $NOVA_CONF + if [[ "${CELLSV2_SETUP}" == "superconductor" ]]; then + for i in $(seq 1 $NOVA_NUM_CELLS); do + local conf + conf=$(conductor_conf $i) + create_nova_conf_neutron $conf + done + fi +} + +function create_nova_conf_neutron { + local conf=${1:-$NOVA_CONF} + iniset $conf neutron auth_type "password" + iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI" + iniset $conf neutron username nova + iniset $conf neutron password "$SERVICE_PASSWORD" + iniset $conf neutron user_domain_name "$SERVICE_DOMAIN_NAME" + iniset $conf neutron project_name "$SERVICE_PROJECT_NAME" + iniset $conf neutron project_domain_name "$SERVICE_DOMAIN_NAME" + iniset $conf neutron auth_strategy "$Q_AUTH_STRATEGY" + iniset $conf neutron region_name "$REGION_NAME" + + # optionally set options in nova_conf + neutron_plugin_create_nova_conf $conf + + if is_service_enabled q-meta neutron-metadata-agent; then + iniset $conf neutron service_metadata_proxy "True" + fi + + iniset $conf DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL" + iniset $conf DEFAULT vif_plugging_timeout "$VIF_PLUGGING_TIMEOUT" } # create_neutron_accounts() - Set up common required neutron accounts @@ -342,240 +489,403 @@ function create_neutron_cache_dir { # Migrated from keystone_data.sh function create_neutron_accounts { - - SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") - - if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then - NEUTRON_USER=$(openstack user create \ - neutron \ - --password "$SERVICE_PASSWORD" \ - --project $SERVICE_TENANT \ - --email neutron@example.com \ - | grep " id " | get_field 2) - openstack role add \ - $ADMIN_ROLE \ - --project $SERVICE_TENANT \ - --user $NEUTRON_USER - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - NEUTRON_SERVICE=$(openstack service create \ - neutron \ - --type=network \ - --description="Neutron Service" \ - | grep " id " | get_field 2) - openstack endpoint create \ - $NEUTRON_SERVICE \ - --region RegionOne \ - --publicurl "http://$SERVICE_HOST:9696/" \ - --adminurl "http://$SERVICE_HOST:9696/" \ - --internalurl "http://$SERVICE_HOST:9696/" - fi + local neutron_url + neutron_url=$Q_PROTOCOL://$SERVICE_HOST/ + if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then + neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME fi -} -function create_neutron_initial_network { - TENANT_ID=$(openstack project list | grep " demo " | get_field 1) - die_if_not_set $LINENO TENANT_ID "Failure retrieving TENANT_ID for demo" + if is_service_enabled q-svc neutron-api; then - # Create a small network - # Since neutron command is executed in admin context at this point, - # ``--tenant-id`` needs to be specified. - if is_baremetal; then - if [[ "$PUBLIC_INTERFACE" == '' || "$OVS_PHYSICAL_BRIDGE" == '' ]]; then - die $LINENO "Neutron settings for baremetal not set.. exiting" - fi - sudo ovs-vsctl add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE - for IP in $(ip addr show dev $PUBLIC_INTERFACE | grep ' inet ' | awk '{print $2}'); do - sudo ip addr del $IP dev $PUBLIC_INTERFACE - sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE - done - NET_ID=$(neutron net-create $PHYSICAL_NETWORK --tenant-id $TENANT_ID --provider:network_type flat --provider:physical_network "$PHYSICAL_NETWORK" | grep ' id ' | get_field 2) - die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $TENANT_ID" - SUBNET_ID=$(neutron subnet-create --tenant-id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) - die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $TENANT_ID" - sudo ifconfig $OVS_PHYSICAL_BRIDGE up - sudo route add default gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE - else - NET_ID=$(neutron net-create --tenant-id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) - die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $TENANT_ID" - SUBNET_ID=$(neutron subnet-create --tenant-id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) - die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $TENANT_ID" - fi - - if [[ "$Q_L3_ENABLED" == "True" ]]; then - # Create a router, and add the private subnet as one of its interfaces - if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then - # create a tenant-owned router. - ROUTER_ID=$(neutron router-create --tenant-id $TENANT_ID $Q_ROUTER_NAME | grep ' id ' | get_field 2) - die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $TENANT_ID $Q_ROUTER_NAME" - else - # Plugin only supports creating a single router, which should be admin owned. - ROUTER_ID=$(neutron router-create $Q_ROUTER_NAME | grep ' id ' | get_field 2) - die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $Q_ROUTER_NAME" - fi - neutron router-interface-add $ROUTER_ID $SUBNET_ID - # Create an external network, and a subnet. Configure the external network as router gw - EXT_NET_ID=$(neutron net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2) - die_if_not_set $LINENO EXT_NET_ID "Failure creating EXT_NET_ID for $PUBLIC_NETWORK_NAME" - EXT_GW_IP=$(neutron subnet-create --ip_version 4 ${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} --gateway $PUBLIC_NETWORK_GATEWAY --name $PUBLIC_SUBNET_NAME $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2) - die_if_not_set $LINENO EXT_GW_IP "Failure creating EXT_GW_IP" - neutron router-gateway-set $ROUTER_ID $EXT_NET_ID - - if is_service_enabled q-l3; then - # logic is specific to using the l3-agent for l3 - if is_neutron_ovs_base_plugin && [[ "$Q_USE_NAMESPACE" = "True" ]]; then - CIDR_LEN=${FLOATING_RANGE#*/} - sudo ip addr add $EXT_GW_IP/$CIDR_LEN dev $PUBLIC_BRIDGE - sudo ip link set $PUBLIC_BRIDGE up - ROUTER_GW_IP=`neutron port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' '{ print $8; }'` - die_if_not_set $LINENO ROUTER_GW_IP "Failure retrieving ROUTER_GW_IP" - sudo route add -net $FIXED_RANGE gw $ROUTER_GW_IP - fi - if [[ "$Q_USE_NAMESPACE" == "False" ]]; then - # Explicitly set router id in l3 agent configuration - iniset $Q_L3_CONF_FILE DEFAULT router_id $ROUTER_ID - fi - fi + create_service_user "neutron" + + get_or_create_service "neutron" "network" "Neutron Service" + get_or_create_endpoint \ + "network" \ + "$REGION_NAME" "$neutron_url" fi } # init_neutron() - Initialize databases, etc. function init_neutron { - recreate_database $Q_DB_NAME utf8 + recreate_database $Q_DB_NAME + time_start "dbsync" # Run Neutron db migrations $NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head + time_stop "dbsync" } # install_neutron() - Collect source and prepare function install_neutron { + # Install neutron-lib from git so we make sure we're testing + # the latest code. + if use_library_from_git "neutron-lib"; then + git_clone_by_name "neutron-lib" + setup_dev_lib "neutron-lib" + fi + + # Install SQLAlchemy and alembic from git when these are required + # see https://bugs.launchpad.net/neutron/+bug/2042941 + if use_library_from_git "sqlalchemy"; then + git_clone $SQLALCHEMY_REPO $SQLALCHEMY_DIR $SQLALCHEMY_BRANCH + setup_develop $SQLALCHEMY_DIR + fi + if use_library_from_git "alembic"; then + git_clone $ALEMBIC_REPO $ALEMBIC_DIR $ALEMBIC_BRANCH + setup_develop $ALEMBIC_DIR + fi + git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH setup_develop $NEUTRON_DIR + + if [[ $Q_AGENT == "ovn" ]]; then + install_ovn + fi } # install_neutronclient() - Collect source and prepare function install_neutronclient { - git_clone $NEUTRONCLIENT_REPO $NEUTRONCLIENT_DIR $NEUTRONCLIENT_BRANCH - setup_develop $NEUTRONCLIENT_DIR - sudo install -D -m 0644 -o $STACK_USER {$NEUTRONCLIENT_DIR/tools/,/etc/bash_completion.d/}neutron.bash_completion + if use_library_from_git "python-neutronclient"; then + git_clone_by_name "python-neutronclient" + setup_dev_lib "python-neutronclient" + fi } # install_neutron_agent_packages() - Collect source and prepare function install_neutron_agent_packages { + # radvd doesn't come with the OS. Install it if the l3 service is enabled. + if is_service_enabled q-l3 neutron-l3; then + install_package radvd + fi # install packages that are specific to plugin agent(s) - if is_service_enabled q-agt q-dhcp q-l3; then + if is_service_enabled q-agt neutron-agent q-dhcp neutron-dhcp q-l3 neutron-l3; then neutron_plugin_install_agent_packages fi +} - if is_service_enabled q-lbaas; then - neutron_agent_lbaas_install_agent_packages +# Finish neutron configuration +function configure_neutron_after_post_config { + if [[ $Q_SERVICE_PLUGIN_CLASSES != '' ]]; then + iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES fi + configure_rbac_policies } -# Start running processes, including screen +# configure_rbac_policies() - Configure Neutron to enforce new RBAC +# policies and scopes if NEUTRON_ENFORCE_SCOPE == True +function configure_rbac_policies { + if [[ "$NEUTRON_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == True ]]; then + iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True + iniset $NEUTRON_CONF oslo_policy enforce_scope True + else + iniset $NEUTRON_CONF oslo_policy enforce_new_defaults False + iniset $NEUTRON_CONF oslo_policy enforce_scope False + fi +} + +# Start running OVN processes +function start_ovn_services { + if [[ $Q_AGENT == "ovn" ]]; then + if [ "$VIRT_DRIVER" != 'ironic' ]; then + # NOTE(TheJulia): Ironic's devstack plugin needs to perform + # additional networking configuration to setup a working test + # environment with test virtual machines to emulate baremetal, + # which requires OVN to be up and running earlier to complete + # that base configuration. + init_ovn + start_ovn + fi + if [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]]; then + if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" != "True" ]]; then + echo "OVN_L3_CREATE_PUBLIC_NETWORK=True is being ignored " + echo "because NEUTRON_CREATE_INITIAL_NETWORKS is set to False" + else + create_public_bridge + fi + fi + fi +} + +# Start running processes function start_neutron_service_and_check { - # build config-file options - local cfg_file - local CFG_FILE_OPTIONS="--config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" - for cfg_file in ${Q_PLUGIN_EXTRA_CONF_FILES[@]}; do - CFG_FILE_OPTIONS+=" --config-file /$cfg_file" - done + local service_port=$Q_PORT + local service_protocol=$Q_PROTOCOL + local cfg_file_options + local neutron_url + + cfg_file_options="$(determine_config_files neutron-server)" + + if is_service_enabled tls-proxy; then + service_port=$Q_PORT_INT + service_protocol="http" + fi + # Start the Neutron service - screen_it q-svc "cd $NEUTRON_DIR && python $NEUTRON_BIN_DIR/neutron-server $CFG_FILE_OPTIONS" + # The default value of "rpc_workers" is None (not defined). If + # "rpc_workers" is explicitly set to 0, the RPC workers process + # should not be executed. + local rpc_workers + rpc_workers=$(iniget_multiline $NEUTRON_CONF DEFAULT rpc_workers) + + enable_service neutron-api + run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF" + neutron_url=$Q_PROTOCOL://$Q_HOST/ + if [ "$rpc_workers" != "0" ]; then + enable_service neutron-rpc-server + fi + enable_service neutron-periodic-workers + _enable_ovn_maintenance + if [ "$rpc_workers" != "0" ]; then + run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options" + fi + run_process neutron-periodic-workers "$NEUTRON_BIN_DIR/neutron-periodic-workers $cfg_file_options" + _run_ovn_maintenance + if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then + neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME + fi echo "Waiting for Neutron to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$Q_HOST:$Q_PORT; do sleep 1; done"; then - die $LINENO "Neutron did not start" + + local testcmd="wget ${ssl_ca} --no-proxy -q -O- $neutron_url" + test_with_retry "$testcmd" "Neutron did not start" $SERVICE_TIMEOUT +} + +function start_neutron { + start_l2_agent "$@" + start_other_agents "$@" +} + +# Control of the l2 agent is separated out to make it easier to test partial +# upgrades (everything upgraded except the L2 agent) +function start_l2_agent { + run_process q-agt "$AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" + + if is_provider_network && [[ $Q_AGENT == "openvswitch" ]]; then + sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE + sudo ip link set $OVS_PHYSICAL_BRIDGE up + sudo ip link set br-int up + sudo ip link set $PUBLIC_INTERFACE up + if is_ironic_hardware; then + for IP in $(ip addr show dev $PUBLIC_INTERFACE | grep ' inet ' | awk '{print $2}'); do + sudo ip addr del $IP dev $PUBLIC_INTERFACE + sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE + done + sudo ip route replace $FIXED_RANGE via $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE + fi fi } +function start_other_agents { + run_process q-dhcp "$AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file $Q_DHCP_CONF_FILE" + + run_process q-l3 "$AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)" + + run_process q-meta "$AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file $Q_META_CONF_FILE" + run_process q-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME" +} + # Start running processes, including screen function start_neutron_agents { - # Start up the neutron agents if enabled - screen_it q-agt "cd $NEUTRON_DIR && python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" - screen_it q-dhcp "cd $NEUTRON_DIR && python $AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file=$Q_DHCP_CONF_FILE" + # NOTE(slaweq): it's now just a wrapper for start_neutron function + start_neutron "$@" +} - L3_CONF_FILES="--config-file $NEUTRON_CONF --config-file=$Q_L3_CONF_FILE" +function stop_l2_agent { + stop_process q-agt +} - if is_service_enabled q-fwaas; then - L3_CONF_FILES="$L3_CONF_FILES --config-file $Q_FWAAS_CONF_FILE" - VPN_CONF_FILES="$VPN_CONF_FILES --config-file $Q_FWAAS_CONF_FILE" - fi - if is_service_enabled q-vpn; then - screen_it q-vpn "cd $NEUTRON_DIR && $AGENT_VPN_BINARY $VPN_CONF_FILES" - else - screen_it q-l3 "cd $NEUTRON_DIR && python $AGENT_L3_BINARY $L3_CONF_FILES" +# stop_other() - Stop running processes +function stop_other { + if is_service_enabled q-dhcp neutron-dhcp; then + stop_process q-dhcp + pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }') + [ ! -z "$pid" ] && sudo kill -9 $pid fi - screen_it q-meta "cd $NEUTRON_DIR && python $AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file=$Q_META_CONF_FILE" + stop_process neutron-rpc-server + stop_process neutron-periodic-workers + stop_process neutron-api + _stop_ovn_maintenance + + if is_service_enabled q-l3 neutron-l3; then + sudo pkill -f "radvd -C $DATA_DIR/neutron/ra" + stop_process q-l3 + fi - if [ "$VIRT_DRIVER" = 'xenserver' ]; then - # For XenServer, start an agent for the domU openvswitch - screen_it q-domua "cd $NEUTRON_DIR && python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE.domU" + if is_service_enabled q-meta neutron-metadata-agent; then + stop_process q-meta fi - if is_service_enabled q-lbaas; then - screen_it q-lbaas "cd $NEUTRON_DIR && python $AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME" + if is_service_enabled q-metering neutron-metering; then + neutron_metering_stop fi - if is_service_enabled q-metering; then - screen_it q-metering "cd $NEUTRON_DIR && python $AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME" + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + # pkill takes care not to kill itself, but it may kill its parent + # sudo unless we use the "ps | grep [f]oo" trick + sudo pkill -9 -f "$NEUTRON_ROOTWRAP-[d]aemon" || : fi } # stop_neutron() - Stop running processes (non-screen) function stop_neutron { - if is_service_enabled q-dhcp; then - pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }') - [ ! -z "$pid" ] && sudo kill -9 $pid - fi - if is_service_enabled q-meta; then - sudo pkill -9 -f neutron-ns-metadata-proxy || : - fi + stop_other + stop_l2_agent - if is_service_enabled q-lbaas; then - neutron_lbaas_stop + if [[ $Q_AGENT == "ovn" && $SKIP_STOP_OVN != "True" ]]; then + stop_ovn fi - if is_service_enabled q-fwaas; then - neutron_fwaas_stop - fi - if is_service_enabled q-vpn; then - neutron_vpn_stop +} + +# _move_neutron_addresses_route() - Move the primary IP to the OVS bridge +# on startup, or back to the public interface on cleanup. If no IP is +# configured on the interface, just add it as a port to the OVS bridge. +function _move_neutron_addresses_route { + local from_intf=$1 + local to_intf=$2 + local add_ovs_port=$3 + local del_ovs_port=$4 + local af=$5 + + if [[ -n "$from_intf" && -n "$to_intf" ]]; then + # Remove the primary IP address from $from_intf and add it to $to_intf, + # along with the default route, if it exists. Also, when called + # on configure we will also add $from_intf as a port on $to_intf, + # assuming it is an OVS bridge. + + local IP_REPLACE="" + local IP_DEL="" + local IP_UP="" + local DEFAULT_ROUTE_GW + DEFAULT_ROUTE_GW=$(ip -f $af r | awk "/default.+$from_intf\s/ { print \$3; exit }") + local ADD_OVS_PORT="" + local DEL_OVS_PORT="" + local ARP_CMD="" + + IP_BRD=$(ip -f $af a s dev $from_intf scope global primary | grep inet | awk '{ print $2, $3, $4; exit }') + + if [ "$DEFAULT_ROUTE_GW" != "" ]; then + ADD_DEFAULT_ROUTE="sudo ip -f $af r replace default via $DEFAULT_ROUTE_GW dev $to_intf" + fi + + if [[ "$add_ovs_port" == "True" ]]; then + ADD_OVS_PORT="sudo ovs-vsctl --may-exist add-port $to_intf $from_intf" + fi + + if [[ "$del_ovs_port" == "True" ]]; then + DEL_OVS_PORT="sudo ovs-vsctl --if-exists del-port $from_intf $to_intf" + fi + + if [[ "$IP_BRD" != "" ]]; then + IP_DEL="sudo ip addr del $IP_BRD dev $from_intf" + IP_REPLACE="sudo ip addr replace $IP_BRD dev $to_intf" + IP_UP="sudo ip link set $to_intf up" + if [[ "$af" == "inet" ]]; then + IP=$(echo $IP_BRD | awk '{ print $1; exit }' | grep -o -E '(.*)/' | cut -d "/" -f1) + ARP_CMD="sudo arping -A -c 3 -w 5 -I $to_intf $IP " + fi + fi + + # The add/del OVS port calls have to happen either before or + # after the address is moved in order to not leave it orphaned. + $DEL_OVS_PORT; $IP_DEL; $IP_REPLACE; $IP_UP; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE; $ARP_CMD fi - if is_service_enabled q-metering; then - neutron_metering_stop +} + +# _configure_public_network_connectivity() - Configures connectivity to the +# external network using $PUBLIC_INTERFACE or NAT on the single interface +# machines +function _configure_public_network_connectivity { + # If we've given a PUBLIC_INTERFACE to take over, then we assume + # that we can own the whole thing, and privot it into the OVS + # bridge. If we are not, we're probably on a single interface + # machine, and we just setup NAT so that fixed guests can get out. + if [[ -n "$PUBLIC_INTERFACE" ]]; then + _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True False "inet" + + if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then + _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6" + fi + else + for d in $default_v4_route_devs; do + sudo iptables -t nat -A POSTROUTING -o $d -s $FLOATING_RANGE -j MASQUERADE + done fi } # cleanup_neutron() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_neutron { + stop_process neutron-api + stop_process neutron-rpc-server + stop_process neutron-periodic-workers + _stop_ovn_maintenance + remove_uwsgi_config "$NEUTRON_UWSGI_CONF" "neutron-api" + sudo rm -f $(apache_site_config_for neutron-api) + + if [[ -n "$OVS_PHYSICAL_BRIDGE" ]]; then + _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False True "inet" + + if [[ $(ip -f inet6 a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then + # ip(8) wants the prefix length when deleting + local v6_gateway + v6_gateway=$(ip -6 a s dev $OVS_PHYSICAL_BRIDGE | grep $IPV6_PUBLIC_NETWORK_GATEWAY | awk '{ print $2 }') + sudo ip -6 addr del $v6_gateway dev $OVS_PHYSICAL_BRIDGE + _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False False "inet6" + fi + + if is_provider_network && is_ironic_hardware; then + for IP in $(ip addr show dev $OVS_PHYSICAL_BRIDGE | grep ' inet ' | awk '{print $2}'); do + sudo ip addr del $IP dev $OVS_PHYSICAL_BRIDGE + sudo ip addr add $IP dev $PUBLIC_INTERFACE + done + sudo route del -net $FIXED_RANGE gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE + fi + fi + if is_neutron_ovs_base_plugin; then neutron_ovs_base_cleanup fi # delete all namespaces created by neutron - for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|qlbaas)-[0-9a-f-]*'); do + for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|fip|snat)-[0-9a-f-]*'); do sudo ip netns delete ${ns} done + + if [[ $Q_AGENT == "ovn" ]]; then + cleanup_ovn + fi +} + + +function _create_neutron_conf_dir { + # Put config files in ``NEUTRON_CONF_DIR`` for everyone to find + sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR } # _configure_neutron_common() # Set common config for all neutron server and agents. # This MUST be called before other ``_configure_neutron_*`` functions. function _configure_neutron_common { - # Put config files in ``NEUTRON_CONF_DIR`` for everyone to find - if [[ ! -d $NEUTRON_CONF_DIR ]]; then - sudo mkdir -p $NEUTRON_CONF_DIR - fi - sudo chown $STACK_USER $NEUTRON_CONF_DIR + _create_neutron_conf_dir + + # Uses oslo config generator to generate core sample configuration files + (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh) - cp $NEUTRON_DIR/etc/neutron.conf $NEUTRON_CONF + cp $NEUTRON_DIR/etc/neutron.conf.sample $NEUTRON_CONF + + Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json + + # allow neutron user to administer neutron to match neutron account + # NOTE(amotoki): This is required for nova works correctly with neutron. + if [ -f $NEUTRON_DIR/etc/policy.json ]; then + cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE + sed -i 's/"context_is_admin": "role:admin"/"context_is_admin": "role:admin or user_name:neutron"/g' $Q_POLICY_FILE + else + echo '{"context_is_admin": "role:admin or user_name:neutron"}' > $Q_POLICY_FILE + fi # Set plugin-specific variables ``Q_DB_NAME``, ``Q_PLUGIN_CLASS``. # For main plugin config file, set ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``. - # For addition plugin config files, set ``Q_PLUGIN_EXTRA_CONF_PATH``, - # ``Q_PLUGIN_EXTRA_CONF_FILES``. For example: - # - # ``Q_PLUGIN_EXTRA_CONF_FILES=(file1, file2)`` neutron_plugin_configure_common if [[ "$Q_PLUGIN_CONF_PATH" == '' || "$Q_PLUGIN_CONF_FILENAME" == '' || "$Q_PLUGIN_CLASS" == '' ]]; then @@ -585,24 +895,25 @@ function _configure_neutron_common { # If needed, move config file from ``$NEUTRON_DIR/etc/neutron`` to ``NEUTRON_CONF_DIR`` mkdir -p /$Q_PLUGIN_CONF_PATH Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME - cp $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE + # NOTE(slaweq): NEUTRON_CORE_PLUGIN_CONF is used e.g. in neutron repository, + # it was previously defined in the lib/neutron module which is now deleted. + NEUTRON_CORE_PLUGIN_CONF=$Q_PLUGIN_CONF_FILE + # NOTE(hichihara): Some neutron vendor plugins were already decomposed and + # there is no config file in Neutron tree. They should prepare the file in each plugin. + if [ -f "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" ]; then + cp "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" /$Q_PLUGIN_CONF_FILE + elif [ -f $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE ]; then + cp $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE + fi - iniset /$Q_PLUGIN_CONF_FILE database connection `database_connection_url $Q_DB_NAME` + iniset $NEUTRON_CONF database connection `database_connection_url $Q_DB_NAME` iniset $NEUTRON_CONF DEFAULT state_path $DATA_DIR/neutron + iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG + iniset $NEUTRON_CONF DEFAULT bind_host $Q_LISTEN_ADDRESS + iniset $NEUTRON_CONF oslo_concurrency lock_path $DATA_DIR/neutron/lock - # If addition config files are set, make sure their path name is set as well - if [[ ${#Q_PLUGIN_EXTRA_CONF_FILES[@]} > 0 && $Q_PLUGIN_EXTRA_CONF_PATH == '' ]]; then - die $LINENO "Neutron additional plugin config not set.. exiting" - fi - - # If additional config files exist, copy them over to neutron configuration - # directory - if [[ $Q_PLUGIN_EXTRA_CONF_PATH != '' ]]; then - local f - for (( f=0; $f < ${#Q_PLUGIN_EXTRA_CONF_FILES[@]}; f+=1 )); do - Q_PLUGIN_EXTRA_CONF_FILES[$f]=$Q_PLUGIN_EXTRA_CONF_PATH/${Q_PLUGIN_EXTRA_CONF_FILES[$f]} - done - fi + # NOTE(freerunner): Need to adjust Region Name for nova in multiregion installation + iniset $NEUTRON_CONF nova region_name $REGION_NAME if [ "$VIRT_DRIVER" = 'fake' ]; then # Disable arbitrary limits @@ -614,106 +925,48 @@ function _configure_neutron_common { fi # Format logging - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then - setup_colorized_logging $NEUTRON_CONF DEFAULT project_id - fi + setup_logging $NEUTRON_CONF _neutron_setup_rootwrap } -function _configure_neutron_debug_command { - if [[ "$Q_USE_DEBUG_COMMAND" != "True" ]]; then - return - fi - - cp $NEUTRON_DIR/etc/l3_agent.ini $NEUTRON_TEST_CONFIG_FILE - - iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT verbose False - iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT debug False - iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE - iniset $NEUTRON_TEST_CONFIG_FILE agent root_helper "$Q_RR_COMMAND" - - _neutron_setup_interface_driver $NEUTRON_TEST_CONFIG_FILE - - neutron_plugin_configure_debug_command -} - function _configure_neutron_dhcp_agent { - AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent" - Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini - cp $NEUTRON_DIR/etc/dhcp_agent.ini $Q_DHCP_CONF_FILE + cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $Q_DHCP_CONF_FILE - iniset $Q_DHCP_CONF_FILE DEFAULT verbose True iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE - iniset $Q_DHCP_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" - - # Define extra "DEFAULT" configuration options when q-dhcp is configured by - # defining the array ``Q_DHCP_EXTRA_DEFAULT_OPTS``. - # For Example: ``Q_DHCP_EXTRA_DEFAULT_OPTS=(foo=true bar=2)`` - for I in "${Q_DHCP_EXTRA_DEFAULT_OPTS[@]}"; do - # Replace the first '=' with ' ' for iniset syntax - iniset $Q_DHCP_CONF_FILE DEFAULT ${I/=/ } - done + # make it so we have working DNS from guests + iniset $Q_DHCP_CONF_FILE DEFAULT dnsmasq_local_resolv True + configure_root_helper_options $Q_DHCP_CONF_FILE + + if ! is_service_enabled q-l3 neutron-l3; then + if [[ "$ENABLE_ISOLATED_METADATA" = "True" ]]; then + iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata $ENABLE_ISOLATED_METADATA + iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network $ENABLE_METADATA_NETWORK + else + if [[ "$ENABLE_METADATA_NETWORK" = "True" ]]; then + die "$LINENO" "Enable isolated metadata is a must for metadata network" + fi + fi + fi _neutron_setup_interface_driver $Q_DHCP_CONF_FILE - neutron_plugin_configure_dhcp_agent + neutron_plugin_configure_dhcp_agent $Q_DHCP_CONF_FILE } -function _configure_neutron_l3_agent { - local cfg_file - Q_L3_ENABLED=True - # for l3-agent, only use per tenant router if we have namespaces - Q_L3_ROUTER_PER_TENANT=$Q_USE_NAMESPACE - - AGENT_L3_BINARY=${AGENT_L3_BINARY:-"$NEUTRON_BIN_DIR/neutron-l3-agent"} - Q_L3_CONF_FILE=$NEUTRON_CONF_DIR/l3_agent.ini - - if is_service_enabled q-fwaas; then - Q_FWAAS_CONF_FILE=$NEUTRON_CONF_DIR/fwaas_driver.ini - fi - - if is_service_enabled q-vpn; then - Q_VPN_CONF_FILE=$NEUTRON_CONF_DIR/vpn_agent.ini - cp $NEUTRON_DIR/etc/vpn_agent.ini $Q_VPN_CONF_FILE - VPN_CONF_FILES="--config-file $NEUTRON_CONF --config-file=$Q_L3_CONF_FILE --config-file=$Q_VPN_CONF_FILE" - for cfg_file in ${Q_VPN_EXTRA_CONF_FILES[@]}; do - VPN_CONF_FILES+=" --config-file $cfg_file" - done - fi - - cp $NEUTRON_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE - - iniset $Q_L3_CONF_FILE DEFAULT verbose True - iniset $Q_L3_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE - iniset $Q_L3_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" - - _neutron_setup_interface_driver $Q_L3_CONF_FILE - - neutron_plugin_configure_l3_agent -} function _configure_neutron_metadata_agent { - AGENT_META_BINARY="$NEUTRON_BIN_DIR/neutron-metadata-agent" - Q_META_CONF_FILE=$NEUTRON_CONF_DIR/metadata_agent.ini - - cp $NEUTRON_DIR/etc/metadata_agent.ini $Q_META_CONF_FILE + cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $Q_META_CONF_FILE - iniset $Q_META_CONF_FILE DEFAULT verbose True iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP - iniset $Q_META_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" - - _neutron_setup_keystone $Q_META_CONF_FILE DEFAULT True True True - + iniset $Q_META_CONF_FILE DEFAULT nova_metadata_host $Q_META_DATA_IP + iniset $Q_META_CONF_FILE DEFAULT metadata_workers $API_WORKERS + configure_root_helper_options $Q_META_CONF_FILE } -function _configure_neutron_lbaas { - neutron_agent_lbaas_configure_common - neutron_agent_lbaas_configure_agent +function _configure_neutron_ceilometer_notifications { + iniset $NEUTRON_CONF oslo_messaging_notifications driver messagingv2 } function _configure_neutron_metering { @@ -721,23 +974,18 @@ function _configure_neutron_metering { neutron_agent_metering_configure_agent } -function _configure_neutron_fwaas { - neutron_fwaas_configure_common - neutron_fwaas_configure_driver +function _configure_dvr { + iniset $NEUTRON_CONF DEFAULT router_distributed True + iniset $Q_L3_CONF_FILE DEFAULT agent_mode $Q_DVR_MODE } -function _configure_neutron_vpn { - neutron_vpn_install_agent_packages - neutron_vpn_configure_common -} # _configure_neutron_plugin_agent() - Set config files for neutron plugin agent # It is called when q-agt is enabled. function _configure_neutron_plugin_agent { # Specify the default root helper prior to agent configuration to # ensure that an agent's configuration can override the default - iniset /$Q_PLUGIN_CONF_FILE agent root_helper "$Q_RR_COMMAND" - iniset $NEUTRON_CONF DEFAULT verbose True + configure_root_helper_options /$Q_PLUGIN_CONF_FILE iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL # Configure agent for plugin @@ -748,43 +996,31 @@ function _configure_neutron_plugin_agent { # It is called when q-svc is enabled. function _configure_neutron_service { Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini - Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json - - cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE - cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE + if test -r $NEUTRON_DIR/etc/neutron/api-paste.ini; then + cp $NEUTRON_DIR/etc/neutron/api-paste.ini $Q_API_PASTE_FILE + else + # TODO(stephenfin): Remove this branch once [1] merges + # [1] https://review.opendev.org/c/openstack/neutron/+/961130 + cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE + fi # Update either configuration file with plugin iniset $NEUTRON_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS - if [[ $Q_SERVICE_PLUGIN_CLASSES != '' ]]; then - iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES - fi - - iniset $NEUTRON_CONF DEFAULT verbose True iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $NEUTRON_CONF DEFAULT policy_file $Q_POLICY_FILE - iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP + iniset $NEUTRON_CONF oslo_policy policy_file $Q_POLICY_FILE iniset $NEUTRON_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY - _neutron_setup_keystone $NEUTRON_CONF keystone_authtoken - - # Define extra "DEFAULT" configuration options when q-svc is configured by - # defining the array ``Q_SRV_EXTRA_DEFAULT_OPTS``. - # For Example: ``Q_SRV_EXTRA_DEFAULT_OPTS=(foo=true bar=2)`` - for I in "${Q_SRV_EXTRA_DEFAULT_OPTS[@]}"; do - # Replace the first '=' with ' ' for iniset syntax - iniset $NEUTRON_CONF DEFAULT ${I/=/ } - done + configure_keystone_authtoken_middleware $NEUTRON_CONF $Q_ADMIN_USERNAME + + # Configuration for neutron notifications to nova. + iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES + iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES + + configure_keystone_authtoken_middleware $NEUTRON_CONF nova nova - # Configuration for neutron notifations to nova. - iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_change $Q_NOTIFY_NOVA_PORT_STATUS_CHANGE - iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_ON_PORT_DATA_CHANGES - iniset $NEUTRON_CONF DEFAULT nova_url "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2" - iniset $NEUTRON_CONF DEFAULT nova_admin_username nova - iniset $NEUTRON_CONF DEFAULT nova_admin_password $SERVICE_PASSWORD - ADMIN_TENANT_ID=$(openstack project list | awk "/ service / { print \$2 }") - iniset $NEUTRON_CONF DEFAULT nova_admin_tenant_id $ADMIN_TENANT_ID - iniset $NEUTRON_CONF DEFAULT nova_admin_auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0" + # Configuration for placement client + configure_keystone_authtoken_middleware $NEUTRON_CONF placement placement # Configure plugin neutron_plugin_configure_service @@ -793,8 +1029,8 @@ function _configure_neutron_service { # Utility Functions #------------------ -# _neutron_service_plugin_class_add() - add service plugin class -function _neutron_service_plugin_class_add { +# neutron_service_plugin_class_add() - add service plugin class +function neutron_service_plugin_class_add { local service_plugin_class=$1 if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then Q_SERVICE_PLUGIN_CLASSES=$service_plugin_class @@ -803,71 +1039,80 @@ function _neutron_service_plugin_class_add { fi } +# neutron_ml2_extension_driver_add() - add ML2 extension driver +function neutron_ml2_extension_driver_add { + local extension=$1 + if [[ $Q_ML2_PLUGIN_EXT_DRIVERS == '' ]]; then + Q_ML2_PLUGIN_EXT_DRIVERS=$extension + elif [[ ! ,${Q_ML2_PLUGIN_EXT_DRIVERS}, =~ ,${extension}, ]]; then + Q_ML2_PLUGIN_EXT_DRIVERS="$Q_ML2_PLUGIN_EXT_DRIVERS,$extension" + fi +} + +# neutron_server_config_add() - add server config file +function neutron_server_config_add { + _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($1) +} + +# neutron_deploy_rootwrap_filters() - deploy rootwrap filters to $Q_CONF_ROOTWRAP_D (owned by root). +function neutron_deploy_rootwrap_filters { + if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then + return + fi + local srcdir=$1 + sudo install -d -o root -m 755 $Q_CONF_ROOTWRAP_D + sudo install -o root -m 644 $srcdir/etc/neutron/rootwrap.d/* $Q_CONF_ROOTWRAP_D/ +} + # _neutron_setup_rootwrap() - configure Neutron's rootwrap function _neutron_setup_rootwrap { if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then return fi - # Deploy new rootwrap filters files (owned by root). # Wipe any existing ``rootwrap.d`` files first Q_CONF_ROOTWRAP_D=$NEUTRON_CONF_DIR/rootwrap.d if [[ -d $Q_CONF_ROOTWRAP_D ]]; then sudo rm -rf $Q_CONF_ROOTWRAP_D fi - # Deploy filters to ``$NEUTRON_CONF_DIR/rootwrap.d`` - mkdir -p -m 755 $Q_CONF_ROOTWRAP_D - cp -pr $NEUTRON_DIR/etc/neutron/rootwrap.d/* $Q_CONF_ROOTWRAP_D/ - sudo chown -R root:root $Q_CONF_ROOTWRAP_D - sudo chmod 644 $Q_CONF_ROOTWRAP_D/* + + neutron_deploy_rootwrap_filters $NEUTRON_DIR + # Set up ``rootwrap.conf``, pointing to ``$NEUTRON_CONF_DIR/rootwrap.d`` # location moved in newer versions, prefer new location if test -r $NEUTRON_DIR/etc/neutron/rootwrap.conf; then - sudo cp -p $NEUTRON_DIR/etc/neutron/rootwrap.conf $Q_RR_CONF_FILE + sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/neutron/rootwrap.conf $Q_RR_CONF_FILE else - sudo cp -p $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE + # TODO(stephenfin): Remove this branch once [1] merges + # [1] https://review.opendev.org/c/openstack/neutron/+/961130 + sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE fi sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE - sudo chown root:root $Q_RR_CONF_FILE - sudo chmod 0644 $Q_RR_CONF_FILE + # Rely on $PATH set by devstack to determine what is safe to execute + # by rootwrap rather than use explicit whitelist of paths in + # rootwrap.conf + sudo sed -e 's/^exec_dirs=.*/#&/' -i $Q_RR_CONF_FILE + # Specify ``rootwrap.conf`` as first parameter to neutron-rootwrap ROOTWRAP_SUDOER_CMD="$NEUTRON_ROOTWRAP $Q_RR_CONF_FILE *" + ROOTWRAP_DAEMON_SUDOER_CMD="$NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE" # Set up the rootwrap sudoers for neutron TEMPFILE=`mktemp` echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE + echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_DAEMON_SUDOER_CMD" >>$TEMPFILE chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/neutron-rootwrap # Update the root_helper - iniset $NEUTRON_CONF agent root_helper "$Q_RR_COMMAND" -} - -# Configures keystone integration for neutron service and agents -function _neutron_setup_keystone { - local conf_file=$1 - local section=$2 - local use_auth_url=$3 - local skip_auth_cache=$4 - local use_service_port=$5 - local keystone_port=$KEYSTONE_AUTH_PORT - if [[ -n $use_service_port ]]; then - keystone_port=$KEYSTONE_SERVICE_PORT - fi - if [[ -n $use_auth_url ]]; then - iniset $conf_file $section auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_AUTH_HOST:$keystone_port/v2.0" - else - iniset $conf_file $section auth_host $KEYSTONE_SERVICE_HOST - iniset $conf_file $section auth_port $keystone_port - iniset $conf_file $section auth_protocol $KEYSTONE_SERVICE_PROTOCOL - fi - iniset $conf_file $section admin_tenant_name $SERVICE_TENANT_NAME - iniset $conf_file $section admin_user $Q_ADMIN_USERNAME - iniset $conf_file $section admin_password $SERVICE_PASSWORD - if [[ -z $skip_auth_cache ]]; then - iniset $conf_file $section signing_dir $NEUTRON_AUTH_CACHE_DIR - # Create cache dir - create_neutron_cache_dir + configure_root_helper_options $NEUTRON_CONF +} + +function configure_root_helper_options { + local conffile=$1 + iniset $conffile agent root_helper "$Q_RR_COMMAND" + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + iniset $conffile agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" fi } @@ -879,63 +1124,9 @@ function _neutron_setup_interface_driver { neutron_plugin_setup_interface_driver $1 } - # Functions for Neutron Exercises #-------------------------------- -function delete_probe { - local from_net="$1" - net_id=`_get_net_id $from_net` - probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'` - neutron-debug --os-tenant-name admin --os-username admin probe-delete $probe_id -} - -function setup_neutron_debug { - if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then - public_net_id=`_get_net_id $PUBLIC_NETWORK_NAME` - neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create --device-owner compute $public_net_id - private_net_id=`_get_net_id $PRIVATE_NETWORK_NAME` - neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create --device-owner compute $private_net_id - fi -} - -function teardown_neutron_debug { - delete_probe $PUBLIC_NETWORK_NAME - delete_probe $PRIVATE_NETWORK_NAME -} - -function _get_net_id { - neutron --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD net-list | grep $1 | awk '{print $2}' -} - -function _get_probe_cmd_prefix { - local from_net="$1" - net_id=`_get_net_id $from_net` - probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}' | head -n 1` - echo "$Q_RR_COMMAND ip netns exec qprobe-$probe_id" -} - -function _ping_check_neutron { - local from_net=$1 - local ip=$2 - local timeout_sec=$3 - local expected=${4:-"True"} - local check_command="" - probe_cmd=`_get_probe_cmd_prefix $from_net` - if [[ "$expected" = "True" ]]; then - check_command="while ! $probe_cmd ping -w 1 -c 1 $ip; do sleep 1; done" - else - check_command="while $probe_cmd ping -w 1 -c 1 $ip; do sleep 1; done" - fi - if ! timeout $timeout_sec sh -c "$check_command"; then - if [[ "$expected" = "True" ]]; then - die $LINENO "[Fail] Couldn't ping server" - else - die $LINENO "[Fail] Could ping server" - fi - fi -} - # ssh check function _ssh_check_neutron { local from_net=$1 @@ -945,63 +1136,21 @@ function _ssh_check_neutron { local timeout_sec=$5 local probe_cmd = "" probe_cmd=`_get_probe_cmd_prefix $from_net` - if ! timeout $timeout_sec sh -c "while ! $probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success; do sleep 1; done"; then - die $LINENO "server didn't become ssh-able!" - fi + local testcmd="$probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success" + test_with_retry "$testcmd" "server $ip didn't become ssh-able" $timeout_sec } -# Neutron 3rd party programs -#--------------------------- - -# please refer to ``lib/neutron_thirdparty/README.md`` for details -NEUTRON_THIRD_PARTIES="" -for f in $TOP_DIR/lib/neutron_thirdparty/*; do - third_party=$(basename $f) - if is_service_enabled $third_party; then - source $TOP_DIR/lib/neutron_thirdparty/$third_party - NEUTRON_THIRD_PARTIES="$NEUTRON_THIRD_PARTIES,$third_party" +function plugin_agent_add_l2_agent_extension { + local l2_agent_extension=$1 + if [[ -z "$L2_AGENT_EXTENSIONS" ]]; then + L2_AGENT_EXTENSIONS=$l2_agent_extension + elif [[ ! ,${L2_AGENT_EXTENSIONS}, =~ ,${l2_agent_extension}, ]]; then + L2_AGENT_EXTENSIONS+=",$l2_agent_extension" fi -done - -function _neutron_third_party_do { - for third_party in ${NEUTRON_THIRD_PARTIES//,/ }; do - ${1}_${third_party} - done -} - -# configure_neutron_third_party() - Set config files, create data dirs, etc -function configure_neutron_third_party { - _neutron_third_party_do configure -} - -# init_neutron_third_party() - Initialize databases, etc. -function init_neutron_third_party { - _neutron_third_party_do init } -# install_neutron_third_party() - Collect source and prepare -function install_neutron_third_party { - _neutron_third_party_do install -} - -# start_neutron_third_party() - Start running processes, including screen -function start_neutron_third_party { - _neutron_third_party_do start -} - -# stop_neutron_third_party - Stop running processes (non-screen) -function stop_neutron_third_party { - _neutron_third_party_do stop -} - -# check_neutron_third_party_integration() - Check that third party integration is sane -function check_neutron_third_party_integration { - _neutron_third_party_do check -} - - # Restore xtrace -$XTRACE +$_XTRACE_NEUTRON # Tell emacs to use shell-script-mode ## Local variables: diff --git a/lib/neutron-legacy b/lib/neutron-legacy new file mode 100644 index 0000000000..e90400fec1 --- /dev/null +++ b/lib/neutron-legacy @@ -0,0 +1,6 @@ +#!/bin/bash + +# TODO(slaweq): remove that file when other projects, like e.g. Grenade will +# be using lib/neutron + +source $TOP_DIR/lib/neutron diff --git a/lib/neutron_plugins/README.md b/lib/neutron_plugins/README.md index be8fd96677..728aaee85f 100644 --- a/lib/neutron_plugins/README.md +++ b/lib/neutron_plugins/README.md @@ -16,17 +16,14 @@ functions ``lib/neutron`` calls the following functions when the ``$Q_PLUGIN`` is enabled * ``neutron_plugin_create_nova_conf`` : - set ``NOVA_VIF_DRIVER`` and optionally set options in nova_conf - e.g. - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} + optionally set options in nova_conf * ``neutron_plugin_install_agent_packages`` : install packages that is specific to plugin agent e.g. install_package bridge-utils * ``neutron_plugin_configure_common`` : set plugin-specific variables, ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``, - ``Q_DB_NAME``, ``Q_PLUGIN_CLASS`` -* ``neutron_plugin_configure_debug_command`` + ``Q_PLUGIN_CLASS`` * ``neutron_plugin_configure_dhcp_agent`` * ``neutron_plugin_configure_l3_agent`` * ``neutron_plugin_configure_plugin_agent`` diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight index efdd9ef794..84ca7ec42c 100644 --- a/lib/neutron_plugins/bigswitch_floodlight +++ b/lib/neutron_plugins/bigswitch_floodlight @@ -1,8 +1,10 @@ -# Neuton Big Switch/FloodLight plugin +#!/bin/bash +# +# Neutron Big Switch/FloodLight plugin # ------------------------------------ # Save trace setting -BS_XTRACE=$(set +o | grep xtrace) +_XTRACE_NEUTRON_BIGSWITCH=$(set +o | grep xtrace) set +o xtrace source $TOP_DIR/lib/neutron_plugins/ovs_base @@ -19,16 +21,11 @@ function neutron_plugin_install_agent_packages { function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/bigswitch Q_PLUGIN_CONF_FILENAME=restproxy.ini - Q_DB_NAME="restproxy_neutron" Q_PLUGIN_CLASS="neutron.plugins.bigswitch.plugin.NeutronRestProxyV2" BS_FL_CONTROLLERS_PORT=${BS_FL_CONTROLLERS_PORT:-localhost:80} BS_FL_CONTROLLER_TIMEOUT=${BS_FL_CONTROLLER_TIMEOUT:-10} } -function neutron_plugin_configure_debug_command { - _neutron_ovs_base_configure_debug_command -} - function neutron_plugin_configure_dhcp_agent { : } @@ -57,9 +54,9 @@ function neutron_plugin_configure_service { function neutron_plugin_setup_interface_driver { local conf_file=$1 if [ "$BS_FL_VIF_DRIVER" = "ivs" ]; then - iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.IVSInterfaceDriver + iniset $conf_file DEFAULT interface_driver ivs else - iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver + iniset $conf_file DEFAULT interface_driver openvswitch fi } @@ -70,8 +67,8 @@ function has_neutron_plugin_security_group { } function neutron_plugin_check_adv_test_requirements { - is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 + is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0 } # Restore xtrace -$BS_XTRACE +$_XTRACE_NEUTRON_BIGSWITCH diff --git a/lib/neutron_plugins/brocade b/lib/neutron_plugins/brocade index e4cc754039..96400634af 100644 --- a/lib/neutron_plugins/brocade +++ b/lib/neutron_plugins/brocade @@ -1,8 +1,10 @@ +#!/bin/bash +# # Brocade Neutron Plugin # ---------------------- # Save trace setting -BRCD_XTRACE=$(set +o | grep xtrace) +_XTRACE_NEUTRON_BROCADE=$(set +o | grep xtrace) set +o xtrace function is_neutron_ovs_base_plugin { @@ -10,7 +12,7 @@ function is_neutron_ovs_base_plugin { } function neutron_plugin_create_nova_conf { - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} + : } function neutron_plugin_install_agent_packages { @@ -20,7 +22,6 @@ function neutron_plugin_install_agent_packages { function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/brocade Q_PLUGIN_CONF_FILENAME=brocade.ini - Q_DB_NAME="brcd_neutron" Q_PLUGIN_CLASS="neutron.plugins.brocade.NeutronPlugin.BrocadePluginV2" } @@ -48,16 +49,11 @@ function neutron_plugin_configure_service { } -function neutron_plugin_configure_debug_command { - iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge -} - function neutron_plugin_configure_dhcp_agent { iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport } function neutron_plugin_configure_l3_agent { - iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport } @@ -67,7 +63,7 @@ function neutron_plugin_configure_plugin_agent { function neutron_plugin_setup_interface_driver { local conf_file=$1 - iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.BridgeInterfaceDriver + iniset $conf_file DEFAULT interface_driver linuxbridge } function has_neutron_plugin_security_group { @@ -76,8 +72,8 @@ function has_neutron_plugin_security_group { } function neutron_plugin_check_adv_test_requirements { - is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 + is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0 } # Restore xtrace -$BRCD_XTRACE +$_XTRACE_NEUTRON_BROCADE diff --git a/lib/neutron_plugins/cisco b/lib/neutron_plugins/cisco index dccf4003c3..b397169b59 100644 --- a/lib/neutron_plugins/cisco +++ b/lib/neutron_plugins/cisco @@ -1,8 +1,10 @@ +#!/bin/bash +# # Neutron Cisco plugin # --------------------------- # Save trace setting -CISCO_XTRACE=$(set +o | grep xtrace) +_XTRACE_NEUTRON_CISCO=$(set +o | grep xtrace) set +o xtrace # Scecify the VSM parameters @@ -20,38 +22,12 @@ Q_CISCO_PLUGIN_VXLAN_ID_RANGES=${Q_CISCO_PLUGIN_VXLAN_ID_RANGES:-5000:10000} # Specify the VLAN range Q_CISCO_PLUGIN_VLAN_RANGES=${Q_CISCO_PLUGIN_VLAN_RANGES:-vlan:1:4094} -# Specify ncclient package information -NCCLIENT_DIR=$DEST/ncclient -NCCLIENT_VERSION=${NCCLIENT_VERSION:-0.3.1} -NCCLIENT_REPO=${NCCLIENT_REPO:-git://github.com/CiscoSystems/ncclient.git} -NCCLIENT_BRANCH=${NCCLIENT_BRANCH:-master} - # This routine put a prefix on an existing function name function _prefix_function { declare -F $1 > /dev/null || die "$1 doesn't exist" eval "$(echo "${2}_${1}()"; declare -f ${1} | tail -n +2)" } -function _has_ovs_subplugin { - local subplugin - for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do - if [[ "$subplugin" == "openvswitch" ]]; then - return 0 - fi - done - return 1 -} - -function _has_nexus_subplugin { - local subplugin - for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do - if [[ "$subplugin" == "nexus" ]]; then - return 0 - fi - done - return 1 -} - function _has_n1kv_subplugin { local subplugin for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do @@ -62,27 +38,6 @@ function _has_n1kv_subplugin { return 1 } -# This routine populates the cisco config file with the information for -# a particular nexus switch -function _config_switch { - local cisco_cfg_file=$1 - local switch_ip=$2 - local username=$3 - local password=$4 - local ssh_port=$5 - shift 5 - - local section="NEXUS_SWITCH:$switch_ip" - iniset $cisco_cfg_file $section username $username - iniset $cisco_cfg_file $section password $password - iniset $cisco_cfg_file $section ssh_port $ssh_port - - while [[ ${#@} != 0 ]]; do - iniset $cisco_cfg_file $section $1 $2 - shift 2 - done -} - # Prefix openvswitch plugin routines with "ovs" in order to differentiate from # cisco plugin routines. This means, ovs plugin routines will coexist with cisco # plugin routines in this script. @@ -90,7 +45,6 @@ source $TOP_DIR/lib/neutron_plugins/openvswitch _prefix_function neutron_plugin_create_nova_conf ovs _prefix_function neutron_plugin_install_agent_packages ovs _prefix_function neutron_plugin_configure_common ovs -_prefix_function neutron_plugin_configure_debug_command ovs _prefix_function neutron_plugin_configure_dhcp_agent ovs _prefix_function neutron_plugin_configure_l3_agent ovs _prefix_function neutron_plugin_configure_plugin_agent ovs @@ -98,73 +52,17 @@ _prefix_function neutron_plugin_configure_service ovs _prefix_function neutron_plugin_setup_interface_driver ovs _prefix_function has_neutron_plugin_security_group ovs -# Check the version of the installed ncclient package -function check_ncclient_version { -python << EOF -version = '$NCCLIENT_VERSION' -import sys -try: - import pkg_resources - import ncclient - module_version = pkg_resources.get_distribution('ncclient').version - if version != module_version: - sys.exit(1) -except: - sys.exit(1) -EOF -} - -# Install the ncclient package -function install_ncclient { - git_clone $NCCLIENT_REPO $NCCLIENT_DIR $NCCLIENT_BRANCH - (cd $NCCLIENT_DIR; sudo python setup.py install) -} - -# Check if the required version of ncclient has been installed -function is_ncclient_installed { - # Check if the Cisco ncclient repository exists - if [[ -d $NCCLIENT_DIR ]]; then - remotes=$(cd $NCCLIENT_DIR; git remote -v | grep fetch | awk '{ print $2}') - for remote in $remotes; do - if [[ $remote == $NCCLIENT_REPO ]]; then - break; - fi - done - if [[ $remote != $NCCLIENT_REPO ]]; then - return 1 - fi - else - return 1 - fi - - # Check if the ncclient is installed with the right version - if ! check_ncclient_version; then - return 1 - fi - return 0 -} - function has_neutron_plugin_security_group { - if _has_ovs_subplugin; then - ovs_has_neutron_plugin_security_group - else - return 1 - fi + return 1 } function is_neutron_ovs_base_plugin { - # Cisco uses OVS if openvswitch subplugin is deployed - _has_ovs_subplugin return } # populate required nova configuration parameters function neutron_plugin_create_nova_conf { - if _has_ovs_subplugin; then - ovs_neutron_plugin_create_nova_conf - else - _neutron_ovs_base_configure_nova_vif_driver - fi + _neutron_ovs_base_configure_nova_vif_driver } function neutron_plugin_install_agent_packages { @@ -177,33 +75,11 @@ function neutron_plugin_configure_common { # setup default subplugins if [ ! -v Q_CISCO_PLUGIN_SUBPLUGINS ]; then declare -ga Q_CISCO_PLUGIN_SUBPLUGINS - Q_CISCO_PLUGIN_SUBPLUGINS=(openvswitch nexus) - fi - if _has_ovs_subplugin; then - ovs_neutron_plugin_configure_common - Q_PLUGIN_EXTRA_CONF_PATH=etc/neutron/plugins/cisco - Q_PLUGIN_EXTRA_CONF_FILES=(cisco_plugins.ini) - # Copy extra config files to /etc so that they can be modified - # later according to Cisco-specific localrc settings. - mkdir -p /$Q_PLUGIN_EXTRA_CONF_PATH - local f - local extra_conf_file - for (( f=0; $f < ${#Q_PLUGIN_EXTRA_CONF_FILES[@]}; f+=1 )); do - extra_conf_file=$Q_PLUGIN_EXTRA_CONF_PATH/${Q_PLUGIN_EXTRA_CONF_FILES[$f]} - cp $NEUTRON_DIR/$extra_conf_file /$extra_conf_file - done - else - Q_PLUGIN_CONF_PATH=etc/neutron/plugins/cisco - Q_PLUGIN_CONF_FILENAME=cisco_plugins.ini + Q_CISCO_PLUGIN_SUBPLUGINS=(n1kv) fi + Q_PLUGIN_CONF_PATH=etc/neutron/plugins/cisco + Q_PLUGIN_CONF_FILENAME=cisco_plugins.ini Q_PLUGIN_CLASS="neutron.plugins.cisco.network_plugin.PluginV2" - Q_DB_NAME=cisco_neutron -} - -function neutron_plugin_configure_debug_command { - if _has_ovs_subplugin; then - ovs_neutron_plugin_configure_debug_command - fi } function neutron_plugin_configure_dhcp_agent { @@ -211,53 +87,7 @@ function neutron_plugin_configure_dhcp_agent { } function neutron_plugin_configure_l3_agent { - if _has_ovs_subplugin; then - ovs_neutron_plugin_configure_l3_agent - fi -} - -function _configure_nexus_subplugin { - local cisco_cfg_file=$1 - - # Install a known compatible ncclient from the Cisco repository if necessary - if ! is_ncclient_installed; then - # Preserve the two global variables - local offline=$OFFLINE - local reclone=$RECLONE - # Change their values to allow installation - OFFLINE=False - RECLONE=yes - install_ncclient - # Restore their values - OFFLINE=$offline - RECLONE=$reclone - fi - - # Setup default nexus switch information - if [ ! -v Q_CISCO_PLUGIN_SWITCH_INFO ]; then - declare -A Q_CISCO_PLUGIN_SWITCH_INFO - HOST_NAME=$(hostname) - Q_CISCO_PLUGIN_SWITCH_INFO=([1.1.1.1]=stack:stack:22:${HOST_NAME}:1/10) - else - iniset $cisco_cfg_file CISCO nexus_driver neutron.plugins.cisco.nexus.cisco_nexus_network_driver_v2.CiscoNEXUSDriver - fi - - # Setup the switch configurations - local nswitch - local sw_info - local segment - local sw_info_array - declare -i count=0 - for nswitch in ${!Q_CISCO_PLUGIN_SWITCH_INFO[@]}; do - sw_info=${Q_CISCO_PLUGIN_SWITCH_INFO[$nswitch]} - sw_info_array=${sw_info//:/ } - sw_info_array=( $sw_info_array ) - count=${#sw_info_array[@]} - if [[ $count < 5 || $(( ($count-3) % 2 )) != 0 ]]; then - die $LINENO "Incorrect switch configuration: ${Q_CISCO_PLUGIN_SWITCH_INFO[$nswitch]}" - fi - _config_switch $cisco_cfg_file $nswitch ${sw_info_array[@]} - done + : } # Configure n1kv plugin @@ -280,57 +110,43 @@ function _configure_n1kv_subplugin { } function neutron_plugin_configure_plugin_agent { - if _has_ovs_subplugin; then - ovs_neutron_plugin_configure_plugin_agent - fi + : } function neutron_plugin_configure_service { local subplugin local cisco_cfg_file - if _has_ovs_subplugin; then - ovs_neutron_plugin_configure_service - cisco_cfg_file=/${Q_PLUGIN_EXTRA_CONF_FILES[0]} - else - cisco_cfg_file=/$Q_PLUGIN_CONF_FILE - fi + cisco_cfg_file=/$Q_PLUGIN_CONF_FILE # Setup the [CISCO_PLUGINS] section if [[ ${#Q_CISCO_PLUGIN_SUBPLUGINS[@]} > 2 ]]; then die $LINENO "At most two subplugins are supported." fi - if _has_ovs_subplugin && _has_n1kv_subplugin; then - die $LINENO "OVS subplugin and n1kv subplugin cannot coexist" - fi - # Setup the subplugins - inicomment $cisco_cfg_file CISCO_PLUGINS nexus_plugin inicomment $cisco_cfg_file CISCO_PLUGINS vswitch_plugin inicomment $cisco_cfg_file CISCO_TEST host for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do case $subplugin in - nexus) iniset $cisco_cfg_file CISCO_PLUGINS nexus_plugin neutron.plugins.cisco.nexus.cisco_nexus_plugin_v2.NexusPlugin;; - openvswitch) iniset $cisco_cfg_file CISCO_PLUGINS vswitch_plugin neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2;; n1kv) iniset $cisco_cfg_file CISCO_PLUGINS vswitch_plugin neutron.plugins.cisco.n1kv.n1kv_neutron_plugin.N1kvNeutronPluginV2;; *) die $LINENO "Unsupported cisco subplugin: $subplugin";; esac done - if _has_nexus_subplugin; then - _configure_nexus_subplugin $cisco_cfg_file - fi - if _has_n1kv_subplugin; then _configure_n1kv_subplugin $cisco_cfg_file fi } +function neutron_plugin_create_initial_network_profile { + neutron cisco-network-profile-create default_network_profile vlan --segment_range 1-3000 --physical_network "$1" +} + function neutron_plugin_setup_interface_driver { local conf_file=$1 - iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver + iniset $conf_file DEFAULT interface_driver openvswitch } # Restore xtrace -$CISCO_XTRACE +$_XTRACE_NEUTRON_CISCO diff --git a/lib/neutron_plugins/embrane b/lib/neutron_plugins/embrane index cce108a4a1..385dab8354 100644 --- a/lib/neutron_plugins/embrane +++ b/lib/neutron_plugins/embrane @@ -1,14 +1,17 @@ +#!/bin/bash +# # Neutron Embrane plugin # --------------------------- # Save trace setting -EMBR_XTRACE=$(set +o | grep xtrace) +_XTRACE_NEUTRON_EMBR=$(set +o | grep xtrace) set +o xtrace source $TOP_DIR/lib/neutron_plugins/openvswitch function save_function { - local ORIG_FUNC=$(declare -f $1) + local ORIG_FUNC + ORIG_FUNC=$(declare -f $1) local NEW_FUNC="$2${ORIG_FUNC#$1}" eval "$NEW_FUNC" } @@ -18,7 +21,6 @@ save_function neutron_plugin_configure_service _neutron_plugin_configure_service function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/embrane Q_PLUGIN_CONF_FILENAME=heleos_conf.ini - Q_DB_NAME="ovs_neutron" Q_PLUGIN_CLASS="neutron.plugins.embrane.plugins.embrane_ovs_plugin.EmbraneOvsPlugin" } @@ -37,4 +39,5 @@ function neutron_plugin_configure_service { } # Restore xtrace -$EMBR_XTRACE +$_XTRACE_NEUTRON_EMBR + diff --git a/lib/neutron_plugins/ibm b/lib/neutron_plugins/ibm deleted file mode 100644 index 3aef9d0359..0000000000 --- a/lib/neutron_plugins/ibm +++ /dev/null @@ -1,133 +0,0 @@ -# Neutron IBM SDN-VE plugin -# --------------------------- - -# Save trace setting -IBM_XTRACE=$(set +o | grep xtrace) -set +o xtrace - -source $TOP_DIR/lib/neutron_plugins/ovs_base - -function neutron_plugin_install_agent_packages { - _neutron_ovs_base_install_agent_packages -} - -function _neutron_interface_setup { - # Setup one interface on the integration bridge if needed - # The plugin agent to be used if more than one interface is used - local bridge=$1 - local interface=$2 - sudo ovs-vsctl --no-wait -- --may-exist add-port $bridge $interface -} - -function neutron_setup_integration_bridge { - # Setup integration bridge if needed - if [[ "$SDNVE_INTEGRATION_BRIDGE" != "" ]]; then - neutron_ovs_base_cleanup - _neutron_ovs_base_setup_bridge $SDNVE_INTEGRATION_BRIDGE - if [[ "$SDNVE_INTERFACE_MAPPINGS" != "" ]]; then - interfaces=(${SDNVE_INTERFACE_MAPPINGS//[,:]/ }) - _neutron_interface_setup $SDNVE_INTEGRATION_BRIDGE ${interfaces[1]} - fi - fi - - # Set controller to SDNVE controller (1st of list) if exists - if [[ "$SDNVE_CONTROLLER_IPS" != "" ]]; then - # Get the first controller - controllers=(${SDNVE_CONTROLLER_IPS//[\[,\]]/ }) - SDNVE_IP=${controllers[0]} - sudo ovs-vsctl set-controller $SDNVE_INTEGRATION_BRIDGE tcp:$SDNVE_IP - fi -} - -function neutron_plugin_create_nova_conf { - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} - # if n-cpu is enabled, then setup integration bridge - if is_service_enabled n-cpu; then - neutron_setup_integration_bridge - fi -} - -function is_neutron_ovs_base_plugin { - if [[ "$SDNVE_INTEGRATION_BRIDGE" != "" ]]; then - # Yes, we use OVS. - return 0 - else - # No, we do not use OVS. - return 1 - fi -} - -function neutron_plugin_configure_common { - Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ibm - Q_PLUGIN_CONF_FILENAME=sdnve_neutron_plugin.ini - Q_DB_NAME="sdnve_neutron" - Q_PLUGIN_CLASS="neutron.plugins.ibm.sdnve_neutron_plugin.SdnvePluginV2" -} - -function neutron_plugin_configure_service { - # Define extra "SDNVE" configuration options when q-svc is configured - - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver - - if [[ "$SDNVE_CONTROLLER_IPS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE sdnve controller_ips $SDNVE_CONTROLLER_IPS - fi - - if [[ "$SDNVE_INTEGRATION_BRIDGE" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE sdnve integration_bridge $SDNVE_INTEGRATION_BRIDGE - fi - - if [[ "$SDNVE_RESET_BRIDGE" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE sdnve reset_bridge $SDNVE_RESET_BRIDGE - fi - - if [[ "$SDNVE_OUT_OF_BAND" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE sdnve out_of_band $SDNVE_OUT_OF_BAND - fi - - if [[ "$SDNVE_INTERFACE_MAPPINGS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE sdnve interface_mappings $SDNVE_INTERFACE_MAPPINGS - fi - - if [[ "$SDNVE_FAKE_CONTROLLER" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE sdnve use_fake_controller $SDNVE_FAKE_CONTROLLER - fi - - - iniset $NEUTRON_CONF DEFAULT notification_driver neutron.openstack.common.notifier.no_op_notifier - -} - -function neutron_plugin_configure_plugin_agent { - AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-ibm-agent" -} - -function neutron_plugin_configure_debug_command { - : -} - -function neutron_plugin_setup_interface_driver { - return 0 -} - -function has_neutron_plugin_security_group { - # Does not support Security Groups - return 1 -} - -function neutron_ovs_base_cleanup { - if [[ "$SDNVE_RESET_BRIDGE" != False ]]; then - # remove all OVS ports that look like Neutron created ports - for port in $(sudo ovs-vsctl list port | grep -o -e tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do - sudo ovs-vsctl del-port ${port} - done - - # remove integration bridge created by Neutron - for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${SDNVE_INTEGRATION_BRIDGE}); do - sudo ovs-vsctl del-br ${bridge} - done - fi -} - -# Restore xtrace -$IBM_XTRACE diff --git a/lib/neutron_plugins/linuxbridge b/lib/neutron_plugins/linuxbridge deleted file mode 100644 index 113a7dfda6..0000000000 --- a/lib/neutron_plugins/linuxbridge +++ /dev/null @@ -1,56 +0,0 @@ -# Neutron Linux Bridge plugin -# --------------------------- - -# Save trace setting -LBRIDGE_XTRACE=$(set +o | grep xtrace) -set +o xtrace - -source $TOP_DIR/lib/neutron_plugins/linuxbridge_agent - -function neutron_plugin_configure_common { - Q_PLUGIN_CONF_PATH=etc/neutron/plugins/linuxbridge - Q_PLUGIN_CONF_FILENAME=linuxbridge_conf.ini - Q_DB_NAME="neutron_linux_bridge" - Q_PLUGIN_CLASS="neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2" -} - -function neutron_plugin_configure_service { - if [[ "$ENABLE_TENANT_VLANS" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE vlans tenant_network_type vlan - else - echo "WARNING - The linuxbridge plugin is using local tenant networks, with no connectivity between hosts." - fi - - # Override ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` - # for more complex physical network configurations. - if [[ "$LB_VLAN_RANGES" == "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then - LB_VLAN_RANGES=$PHYSICAL_NETWORK - if [[ "$TENANT_VLAN_RANGE" != "" ]]; then - LB_VLAN_RANGES=$LB_VLAN_RANGES:$TENANT_VLAN_RANGE - fi - fi - if [[ "$LB_VLAN_RANGES" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE vlans network_vlan_ranges $LB_VLAN_RANGES - fi - if [[ "$Q_USE_SECGROUP" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver - else - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver - fi - - # Define extra "LINUX_BRIDGE" configuration options when q-svc is configured by defining - # the array ``Q_SRV_EXTRA_OPTS``. - # For Example: ``Q_SRV_EXTRA_OPTS=(foo=true bar=2)`` - for I in "${Q_SRV_EXTRA_OPTS[@]}"; do - # Replace the first '=' with ' ' for iniset syntax - iniset /$Q_PLUGIN_CONF_FILE linux_bridge ${I/=/ } - done -} - -function has_neutron_plugin_security_group { - # 0 means True here - return 0 -} - -# Restore xtrace -$LBRIDGE_XTRACE diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent deleted file mode 100644 index 82b5fc903d..0000000000 --- a/lib/neutron_plugins/linuxbridge_agent +++ /dev/null @@ -1,76 +0,0 @@ -# Neutron Linux Bridge L2 agent -# ----------------------------- - -# Save trace setting -PLUGIN_XTRACE=$(set +o | grep xtrace) -set +o xtrace - -function is_neutron_ovs_base_plugin { - # linuxbridge doesn't use OVS - return 1 -} - -function neutron_plugin_create_nova_conf { - : -} - -function neutron_plugin_install_agent_packages { - install_package bridge-utils -} - -function neutron_plugin_configure_debug_command { - iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge -} - -function neutron_plugin_configure_dhcp_agent { - iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport -} - -function neutron_plugin_configure_l3_agent { - iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge - iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport -} - -function neutron_plugin_configure_plugin_agent { - # Setup physical network interface mappings. Override - # ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` for more - # complex physical network configurations. - if [[ "$LB_INTERFACE_MAPPINGS" == "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$LB_PHYSICAL_INTERFACE" != "" ]]; then - LB_INTERFACE_MAPPINGS=$PHYSICAL_NETWORK:$LB_PHYSICAL_INTERFACE - fi - if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE linux_bridge physical_interface_mappings $LB_INTERFACE_MAPPINGS - fi - if [[ "$Q_USE_SECGROUP" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver - else - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver - fi - AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-linuxbridge-agent" - # Define extra "AGENT" configuration options when q-agt is configured by defining - # the array ``Q_AGENT_EXTRA_AGENT_OPTS``. - # For Example: ``Q_AGENT_EXTRA_AGENT_OPTS=(foo=true bar=2)`` - for I in "${Q_AGENT_EXTRA_AGENT_OPTS[@]}"; do - # Replace the first '=' with ' ' for iniset syntax - iniset /$Q_PLUGIN_CONF_FILE agent ${I/=/ } - done - # Define extra "LINUX_BRIDGE" configuration options when q-agt is configured by defining - # the array ``Q_AGENT_EXTRA_SRV_OPTS``. - # For Example: ``Q_AGENT_EXTRA_SRV_OPTS=(foo=true bar=2)`` - for I in "${Q_AGENT_EXTRA_SRV_OPTS[@]}"; do - # Replace the first '=' with ' ' for iniset syntax - iniset /$Q_PLUGIN_CONF_FILE linux_bridge ${I/=/ } - done -} - -function neutron_plugin_setup_interface_driver { - local conf_file=$1 - iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.BridgeInterfaceDriver -} - -function neutron_plugin_check_adv_test_requirements { - is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 -} - -# Restore xtrace -$PLUGIN_XTRACE diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet deleted file mode 100644 index c5373d656f..0000000000 --- a/lib/neutron_plugins/midonet +++ /dev/null @@ -1,87 +0,0 @@ -# Neutron MidoNet plugin -# ---------------------- - -MIDONET_DIR=${MIDONET_DIR:-$DEST/midonet} -MIDONET_API_PORT=${MIDONET_API_PORT:-8080} -MIDONET_API_URL=${MIDONET_API_URL:-http://localhost:$MIDONET_API_PORT/midonet-api} - -# Save trace setting -MN_XTRACE=$(set +o | grep xtrace) -set +o xtrace - -function is_neutron_ovs_base_plugin { - # MidoNet does not use l3-agent - # 0 means True here - return 1 -} - -function neutron_plugin_create_nova_conf { - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} -} - -function neutron_plugin_install_agent_packages { - : -} - -function neutron_plugin_configure_common { - Q_PLUGIN_CONF_PATH=etc/neutron/plugins/midonet - Q_PLUGIN_CONF_FILENAME=midonet.ini - Q_DB_NAME="neutron_midonet" - Q_PLUGIN_CLASS="neutron.plugins.midonet.plugin.MidonetPluginV2" -} - -function neutron_plugin_configure_debug_command { - : -} - -function neutron_plugin_configure_dhcp_agent { - DHCP_DRIVER=${DHCP_DRIVER:-"neutron.plugins.midonet.agent.midonet_driver.DhcpNoOpDriver"} - neutron_plugin_setup_interface_driver $Q_DHCP_CONF_FILE - iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_driver $DHCP_DRIVER - iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces True - iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata True -} - -function neutron_plugin_configure_l3_agent { - die $LINENO "q-l3 must not be executed with MidoNet plugin!" -} - -function neutron_plugin_configure_plugin_agent { - die $LINENO "q-agt must not be executed with MidoNet plugin!" -} - -function neutron_plugin_configure_service { - if [[ "$MIDONET_API_URL" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE MIDONET midonet_uri $MIDONET_API_URL - fi - if [[ "$MIDONET_USERNAME" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE MIDONET username $MIDONET_USERNAME - fi - if [[ "$MIDONET_PASSWORD" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE MIDONET password $MIDONET_PASSWORD - fi - if [[ "$MIDONET_PROJECT_ID" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE MIDONET project_id $MIDONET_PROJECT_ID - fi - - Q_L3_ENABLED=True - Q_L3_ROUTER_PER_TENANT=True -} - -function neutron_plugin_setup_interface_driver { - local conf_file=$1 - iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.MidonetInterfaceDriver -} - -function has_neutron_plugin_security_group { - # 0 means True here - return 0 -} - -function neutron_plugin_check_adv_test_requirements { - # 0 means True here - return 1 -} - -# Restore xtrace -$MN_XTRACE diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index 99663733e1..687167bf79 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -1,37 +1,51 @@ +#!/bin/bash +# # Neutron Modular Layer 2 plugin # ------------------------------ # Save trace setting -ML2_XTRACE=$(set +o | grep xtrace) +_XTRACE_NEUTRON_ML2=$(set +o | grep xtrace) set +o xtrace +# Default OVN L2 agent +Q_AGENT=${Q_AGENT:-ovn} +if [ -f $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent ]; then + source $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent +fi + # Enable this to simply and quickly enable tunneling with ML2. -# Select either 'gre', 'vxlan', or '(gre vxlan)' -Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-} +# For ML2/OVS select either 'gre', 'vxlan', or 'gre,vxlan'. +# For ML2/OVN use 'geneve'. +Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"geneve"} # This has to be set here since the agent will set this in the config file -if [[ "$Q_ML2_TENANT_NETWORK_TYPE" != "" ]]; then - Q_AGENT_EXTRA_AGENT_OPTS+=(tunnel_types=$Q_ML2_TENANT_NETWORK_TYPE) +if [[ "$Q_ML2_TENANT_NETWORK_TYPE" == "gre" || "$Q_ML2_TENANT_NETWORK_TYPE" == "vxlan" ]]; then + Q_TUNNEL_TYPES=$Q_ML2_TENANT_NETWORK_TYPE elif [[ "$ENABLE_TENANT_TUNNELS" == "True" ]]; then - Q_AGENT_EXTRA_AGENT_OPTS+=(tunnel_types=gre) + Q_TUNNEL_TYPES=gre fi -# Default openvswitch L2 agent -Q_AGENT=${Q_AGENT:-openvswitch} -source $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent - # List of MechanismDrivers to load -Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-openvswitch,linuxbridge} -# List of Type Drivers to load -Q_ML2_PLUGIN_TYPE_DRIVERS=${Q_ML2_PLUGIN_TYPE_DRIVERS:-local,flat,vlan,gre,vxlan} +Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-ovn} # Default GRE TypeDriver options Q_ML2_PLUGIN_GRE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GRE_TYPE_OPTIONS:-tunnel_id_ranges=$TENANT_TUNNEL_RANGES} # Default VXLAN TypeDriver options -Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS=${Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS:-vni_ranges=1001:2000} +Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS=${Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS:-vni_ranges=$TENANT_TUNNEL_RANGES} # Default VLAN TypeDriver options Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS=${Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS:-} +# Default GENEVE TypeDriver options +Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS:-vni_ranges=$TENANT_TUNNEL_RANGES} +# List of extension drivers to load, use '-' instead of ':-' to allow people to +# explicitly override this to blank +if [[ "$NEUTRON_PORT_SECURITY" = "True" ]]; then + Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS-port_security} +else + Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS:-} +fi # L3 Plugin to load for ML2 -ML2_L3_PLUGIN=${ML2_L3_PLUGIN:-neutron.services.l3_router.l3_router_plugin.L3RouterPlugin} +# For some flat network environment, they not want to extend L3 plugin. +# Make sure it is able to set empty to ML2_L3_PLUGIN. +ML2_L3_PLUGIN=${ML2_L3_PLUGIN-router} function populate_ml2_config { CONF=$1 @@ -50,15 +64,14 @@ function populate_ml2_config { function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ml2 Q_PLUGIN_CONF_FILENAME=ml2_conf.ini - Q_DB_NAME="neutron_ml2" - Q_PLUGIN_CLASS="neutron.plugins.ml2.plugin.Ml2Plugin" + Q_PLUGIN_CLASS="ml2" # The ML2 plugin delegates L3 routing/NAT functionality to # the L3 service plugin which must therefore be specified. - _neutron_service_plugin_class_add $ML2_L3_PLUGIN + neutron_service_plugin_class_add $ML2_L3_PLUGIN } function neutron_plugin_configure_service { - if [[ "$Q_ML2_TENANT_NETWORK_TYPE" != "" ]]; then + if [[ "$Q_ML2_TENANT_NETWORK_TYPE" != "local" ]]; then Q_SRV_EXTRA_OPTS+=(tenant_network_types=$Q_ML2_TENANT_NETWORK_TYPE) elif [[ "$ENABLE_TENANT_TUNNELS" == "True" ]]; then # This assumes you want a simple configuration, and will overwrite @@ -85,25 +98,29 @@ function neutron_plugin_configure_service { fi fi - # REVISIT(rkukura): Setting firewall_driver here for - # neutron.agent.securitygroups_rpc.is_firewall_enabled() which is - # used in the server, in case no L2 agent is configured on the - # server's node. If an L2 agent is configured, this will get - # overridden with the correct driver. The ml2 plugin should - # instead use its own config variable to indicate whether security - # groups is enabled, and that will need to be set here instead. - if [[ "$Q_USE_SECGROUP" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.not.a.real.FirewallDriver - else - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver - fi - # Since we enable the tunnel TypeDrivers, also enable a local_ip - iniset /$Q_PLUGIN_CONF_FILE ovs local_ip $HOST_IP + # Allow for setup the flat type network + if [[ -z "$Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS" ]]; then + if [[ -n "$PHYSICAL_NETWORK" || -n "$PUBLIC_PHYSICAL_NETWORK" ]]; then + Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS="flat_networks=" + if [[ -n "$PHYSICAL_NETWORK" ]]; then + Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS+="${PHYSICAL_NETWORK}," + fi + if [[ -n "$PUBLIC_PHYSICAL_NETWORK" ]] && [[ "${PHYSICAL_NETWORK}" != "$PUBLIC_PHYSICAL_NETWORK" ]]; then + Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS+="${PUBLIC_PHYSICAL_NETWORK}," + fi + fi + fi + populate_ml2_config /$Q_PLUGIN_CONF_FILE securitygroup enable_security_group=$Q_USE_SECGROUP populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 overlay_ip_version=$TUNNEL_IP_VERSION + + if [[ -n "$Q_ML2_PLUGIN_TYPE_DRIVERS" ]]; then + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 type_drivers=$Q_ML2_PLUGIN_TYPE_DRIVERS + fi - populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 type_drivers=$Q_ML2_PLUGIN_TYPE_DRIVERS + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 extension_drivers=$Q_ML2_PLUGIN_EXT_DRIVERS populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 $Q_SRV_EXTRA_OPTS @@ -111,12 +128,27 @@ function neutron_plugin_configure_service { populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_vxlan $Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_flat $Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_vlan $Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS + + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_geneve $Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS + + if [[ "$Q_DVR_MODE" != "legacy" ]]; then + populate_ml2_config /$Q_PLUGIN_CONF_FILE agent l2_population=True + populate_ml2_config /$Q_PLUGIN_CONF_FILE agent tunnel_types=vxlan + populate_ml2_config /$Q_PLUGIN_CONF_FILE agent enable_distributed_routing=True + populate_ml2_config /$Q_PLUGIN_CONF_FILE agent arp_responder=True + fi } function has_neutron_plugin_security_group { return 0 } +function configure_qos_ml2 { + neutron_ml2_extension_driver_add "qos" +} + # Restore xtrace -$ML2_XTRACE +$_XTRACE_NEUTRON_ML2 diff --git a/lib/neutron_plugins/nec b/lib/neutron_plugins/nec deleted file mode 100644 index d76f7d4aaf..0000000000 --- a/lib/neutron_plugins/nec +++ /dev/null @@ -1,130 +0,0 @@ -# Neutron NEC OpenFlow plugin -# --------------------------- - -# Save trace setting -NEC_XTRACE=$(set +o | grep xtrace) -set +o xtrace - -# Configuration parameters -OFC_HOST=${OFC_HOST:-127.0.0.1} -OFC_PORT=${OFC_PORT:-8888} - -OFC_API_HOST=${OFC_API_HOST:-$OFC_HOST} -OFC_API_PORT=${OFC_API_PORT:-$OFC_PORT} -OFC_OFP_HOST=${OFC_OFP_HOST:-$OFC_HOST} -OFC_OFP_PORT=${OFC_OFP_PORT:-6633} -OFC_DRIVER=${OFC_DRIVER:-trema} -OFC_RETRY_MAX=${OFC_RETRY_MAX:-0} -OFC_RETRY_INTERVAL=${OFC_RETRY_INTERVAL:-1} - -# Main logic -# --------------------------- - -source $TOP_DIR/lib/neutron_plugins/ovs_base - -function neutron_plugin_create_nova_conf { - _neutron_ovs_base_configure_nova_vif_driver -} - -function neutron_plugin_install_agent_packages { - # SKIP_OVS_INSTALL is useful when we want to use Open vSwitch whose - # version is different from the version provided by the distribution. - if [[ "$SKIP_OVS_INSTALL" = "True" ]]; then - echo "You need to install Open vSwitch manually." - return - fi - _neutron_ovs_base_install_agent_packages -} - -function neutron_plugin_configure_common { - Q_PLUGIN_CONF_PATH=etc/neutron/plugins/nec - Q_PLUGIN_CONF_FILENAME=nec.ini - Q_DB_NAME="neutron_nec" - Q_PLUGIN_CLASS="neutron.plugins.nec.nec_plugin.NECPluginV2" -} - -function neutron_plugin_configure_debug_command { - _neutron_ovs_base_configure_debug_command -} - -function neutron_plugin_configure_dhcp_agent { - : -} - -function neutron_plugin_configure_l3_agent { - _neutron_ovs_base_configure_l3_agent -} - -function _quantum_plugin_setup_bridge { - if [[ "$SKIP_OVS_BRIDGE_SETUP" = "True" ]]; then - return - fi - # Set up integration bridge - _neutron_ovs_base_setup_bridge $OVS_BRIDGE - # Generate datapath ID from HOST_IP - local dpid=$(printf "%07d%03d%03d%03d\n" ${HOST_IP//./ }) - sudo ovs-vsctl --no-wait set Bridge $OVS_BRIDGE other-config:datapath-id=$dpid - sudo ovs-vsctl --no-wait set-fail-mode $OVS_BRIDGE secure - sudo ovs-vsctl --no-wait set-controller $OVS_BRIDGE tcp:$OFC_OFP_HOST:$OFC_OFP_PORT - if [ -n "$OVS_INTERFACE" ]; then - sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_BRIDGE $OVS_INTERFACE - fi - _neutron_setup_ovs_tunnels $OVS_BRIDGE -} - -function neutron_plugin_configure_plugin_agent { - _quantum_plugin_setup_bridge - - AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-nec-agent" - - _neutron_ovs_base_configure_firewall_driver -} - -function neutron_plugin_configure_service { - iniset $NEUTRON_CONF DEFAULT api_extensions_path neutron/plugins/nec/extensions/ - iniset /$Q_PLUGIN_CONF_FILE ofc host $OFC_API_HOST - iniset /$Q_PLUGIN_CONF_FILE ofc port $OFC_API_PORT - iniset /$Q_PLUGIN_CONF_FILE ofc driver $OFC_DRIVER - iniset /$Q_PLUGIN_CONF_FILE ofc api_retry_max OFC_RETRY_MAX - iniset /$Q_PLUGIN_CONF_FILE ofc api_retry_interval OFC_RETRY_INTERVAL - - _neutron_ovs_base_configure_firewall_driver -} - -function neutron_plugin_setup_interface_driver { - local conf_file=$1 - iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver - iniset $conf_file DEFAULT ovs_use_veth True -} - -# Utility functions -# --------------------------- - -# Setup OVS tunnel manually -function _neutron_setup_ovs_tunnels { - local bridge=$1 - local id=0 - GRE_LOCAL_IP=${GRE_LOCAL_IP:-$HOST_IP} - if [ -n "$GRE_REMOTE_IPS" ]; then - for ip in ${GRE_REMOTE_IPS//:/ }; do - if [[ "$ip" == "$GRE_LOCAL_IP" ]]; then - continue - fi - sudo ovs-vsctl --no-wait add-port $bridge gre$id -- \ - set Interface gre$id type=gre options:remote_ip=$ip - id=`expr $id + 1` - done - fi -} - -function has_neutron_plugin_security_group { - # 0 means True here - return 0 -} - -function neutron_plugin_check_adv_test_requirements { - is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 -} - -# Restore xtrace -$NEC_XTRACE diff --git a/lib/neutron_plugins/nuage b/lib/neutron_plugins/nuage index 86f09d2b54..8c75e15048 100644 --- a/lib/neutron_plugins/nuage +++ b/lib/neutron_plugins/nuage @@ -1,16 +1,16 @@ +#!/bin/bash +# # Nuage Neutron Plugin # ---------------------- # Save trace setting -NU_XTRACE=$(set +o | grep xtrace) +_XTRACE_NEUTRON_NU=$(set +o | grep xtrace) set +o xtrace function neutron_plugin_create_nova_conf { + local conf="$1" NOVA_OVS_BRIDGE=${NOVA_OVS_BRIDGE:-"br-int"} - iniset $NOVA_CONF DEFAULT neutron_ovs_bridge $NOVA_OVS_BRIDGE - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} - LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver - iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER + iniset $conf neutron ovs_bridge $NOVA_OVS_BRIDGE } function neutron_plugin_install_agent_packages { @@ -20,7 +20,6 @@ function neutron_plugin_install_agent_packages { function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/nuage Q_PLUGIN_CONF_FILENAME=nuage_plugin.ini - Q_DB_NAME="nuage_neutron" Q_PLUGIN_CLASS="neutron.plugins.nuage.plugin.NuagePlugin" Q_PLUGIN_EXTENSIONS_PATH=neutron/plugins/nuage/extensions #Nuage specific Neutron defaults. Actual value must be set and sourced @@ -33,10 +32,6 @@ function neutron_plugin_configure_common { NUAGE_CNA_DEF_NETPART_NAME=${NUAGE_CNA_DEF_NETPART_NAME:-''} } -function neutron_plugin_configure_debug_command { - : -} - function neutron_plugin_configure_dhcp_agent { : } @@ -66,4 +61,4 @@ function has_neutron_plugin_security_group { } # Restore xtrace -$NU_XTRACE +$_XTRACE_NEUTRON_NU diff --git a/lib/neutron_plugins/ofagent_agent b/lib/neutron_plugins/ofagent_agent deleted file mode 100644 index b8321f3375..0000000000 --- a/lib/neutron_plugins/ofagent_agent +++ /dev/null @@ -1,94 +0,0 @@ -# OpenFlow Agent plugin -# ---------------------- - -# Save trace setting -OFA_XTRACE=$(set +o | grep xtrace) -set +o xtrace - -source $TOP_DIR/lib/neutron_plugins/ovs_base -source $TOP_DIR/lib/neutron_thirdparty/ryu # for RYU_DIR, install_ryu, etc - -function neutron_plugin_create_nova_conf { - _neutron_ovs_base_configure_nova_vif_driver -} - -function neutron_plugin_install_agent_packages { - _neutron_ovs_base_install_agent_packages - - # This agent uses ryu to talk with switches - install_package $(get_packages "ryu") - install_ryu - configure_ryu -} - -function neutron_plugin_configure_debug_command { - _neutron_ovs_base_configure_debug_command -} - -function neutron_plugin_configure_dhcp_agent { - iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport -} - -function neutron_plugin_configure_l3_agent { - _neutron_ovs_base_configure_l3_agent - iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport -} - -function neutron_plugin_configure_plugin_agent { - # Set up integration bridge - _neutron_ovs_base_setup_bridge $OVS_BRIDGE - _neutron_ovs_base_configure_firewall_driver - - # Check a supported openflow version - OF_VERSION=`ovs-ofctl --version | grep "OpenFlow versions" | awk '{print $3}' | cut -d':' -f2` - if [ `vercmp_numbers "$OF_VERSION" "0x3"` -lt "0" ]; then - die $LINENO "This agent requires OpenFlow 1.3+ capable switch." - fi - - # Enable tunnel networks if selected - if [[ "$OVS_ENABLE_TUNNELING" == "True" ]]; then - # Verify tunnels are supported - # REVISIT - also check kernel module support for GRE and patch ports - OVS_VERSION=`ovs-vsctl --version | head -n 1 | grep -E -o "[0-9]+\.[0-9]+"` - if [ `vercmp_numbers "$OVS_VERSION" "1.4"` -lt "0" ]; then - die $LINENO "You are running OVS version $OVS_VERSION. OVS 1.4+ is required for tunneling between multiple hosts." - fi - iniset /$Q_PLUGIN_CONF_FILE ovs enable_tunneling True - iniset /$Q_PLUGIN_CONF_FILE ovs local_ip $HOST_IP - fi - - # Setup physical network bridge mappings. Override - # ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` for more - # complex physical network configurations. - if [[ "$OVS_BRIDGE_MAPPINGS" == "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then - OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE - - # Configure bridge manually with physical interface as port for multi-node - sudo ovs-vsctl --no-wait -- --may-exist add-br $OVS_PHYSICAL_BRIDGE - fi - if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE ovs bridge_mappings $OVS_BRIDGE_MAPPINGS - fi - AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-ofagent-agent" - - # Define extra "AGENT" configuration options when q-agt is configured by defining - # defining the array ``Q_AGENT_EXTRA_AGENT_OPTS``. - # For Example: ``Q_AGENT_EXTRA_AGENT_OPTS=(foo=true bar=2)`` - for I in "${Q_AGENT_EXTRA_AGENT_OPTS[@]}"; do - # Replace the first '=' with ' ' for iniset syntax - iniset /$Q_PLUGIN_CONF_FILE agent ${I/=/ } - done -} - -function neutron_plugin_setup_interface_driver { - local conf_file=$1 - iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver - iniset $conf_file DEFAULT ovs_use_veth True -} - -function neutron_plugin_check_adv_test_requirements { - is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 -} - -# Restore xtrace -$OFA_XTRACE diff --git a/lib/neutron_plugins/oneconvergence b/lib/neutron_plugins/oneconvergence deleted file mode 100644 index 06f1eee8c7..0000000000 --- a/lib/neutron_plugins/oneconvergence +++ /dev/null @@ -1,76 +0,0 @@ -# Neutron One Convergence plugin -# --------------------------- -# Save trace setting -OC_XTRACE=$(set +o | grep xtrace) -set +o xtrace - -source $TOP_DIR/lib/neutron_plugins/ovs_base - -Q_L3_ENABLED=true -Q_L3_ROUTER_PER_TENANT=true -Q_USE_NAMESPACE=true - -function neutron_plugin_install_agent_packages { - _neutron_ovs_base_install_agent_packages -} -# Configure common parameters -function neutron_plugin_configure_common { - - Q_PLUGIN_CONF_PATH=etc/neutron/plugins/oneconvergence - Q_PLUGIN_CONF_FILENAME=nvsdplugin.ini - Q_PLUGIN_CLASS="neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2" - Q_DB_NAME='oc_nvsd_neutron' -} - -# Configure plugin specific information -function neutron_plugin_configure_service { - iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_ip $NVSD_IP - iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_port $NVSD_PORT - iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_user $NVSD_USER - iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_passwd $NVSD_PASSWD -} - -function neutron_plugin_configure_debug_command { - _neutron_ovs_base_configure_debug_command -} - -function neutron_plugin_setup_interface_driver { - local conf_file=$1 - iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver -} - -function has_neutron_plugin_security_group { - # 1 means False here - return 0 -} - -function setup_integration_bridge { - _neutron_ovs_base_setup_bridge $OVS_BRIDGE -} - -function neutron_plugin_configure_dhcp_agent { - setup_integration_bridge - iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport -} - -function neutron_plugin_configure_l3_agent { - _neutron_ovs_base_configure_l3_agent - iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport -} - -function neutron_plugin_configure_plugin_agent { - - AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-nvsd-agent" - - _neutron_ovs_base_configure_firewall_driver -} - -function neutron_plugin_create_nova_conf { - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} - if ( is_service_enabled n-cpu && ! ( is_service_enabled q-dhcp )) ; then - setup_integration_bridge - fi -} - -# Restore xtrace -$OC_XTRACE diff --git a/lib/neutron_plugins/openvswitch b/lib/neutron_plugins/openvswitch index fc81092682..130eaacab3 100644 --- a/lib/neutron_plugins/openvswitch +++ b/lib/neutron_plugins/openvswitch @@ -1,8 +1,13 @@ -# Neutron Open vSwitch plugin -# --------------------------- +#!/bin/bash +# +# Common code used by cisco and embrane plugins +# --------------------------------------------- + +# This module used to be for Open vSwitch monolithic plugin, +# which has been removed in Juno. # Save trace setting -OVS_XTRACE=$(set +o | grep xtrace) +_XTRACE_NEUTRON_OVS=$(set +o | grep xtrace) set +o xtrace source $TOP_DIR/lib/neutron_plugins/openvswitch_agent @@ -10,7 +15,6 @@ source $TOP_DIR/lib/neutron_plugins/openvswitch_agent function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/openvswitch Q_PLUGIN_CONF_FILENAME=ovs_neutron_plugin.ini - Q_DB_NAME="ovs_neutron" Q_PLUGIN_CLASS="neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2" } @@ -36,11 +40,6 @@ function neutron_plugin_configure_service { iniset /$Q_PLUGIN_CONF_FILE ovs network_vlan_ranges $OVS_VLAN_RANGES fi - # Enable tunnel networks if selected - if [[ $OVS_ENABLE_TUNNELING == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE ovs enable_tunneling True - fi - _neutron_ovs_base_configure_firewall_driver # Define extra "OVS" configuration options when q-svc is configured by defining @@ -57,4 +56,5 @@ function has_neutron_plugin_security_group { } # Restore xtrace -$OVS_XTRACE +$_XTRACE_NEUTRON_OVS + diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent index fbc013f565..6e79984e9b 100644 --- a/lib/neutron_plugins/openvswitch_agent +++ b/lib/neutron_plugins/openvswitch_agent @@ -1,37 +1,34 @@ +#!/bin/bash +# # Neutron Open vSwitch L2 agent # ----------------------------- # Save trace setting -OVSA_XTRACE=$(set +o | grep xtrace) +_XTRACE_NEUTRON_OVSL2=$(set +o | grep xtrace) set +o xtrace source $TOP_DIR/lib/neutron_plugins/ovs_base function neutron_plugin_create_nova_conf { _neutron_ovs_base_configure_nova_vif_driver - if [ "$VIRT_DRIVER" == 'xenserver' ]; then - iniset $NOVA_CONF xenserver vif_driver nova.virt.xenapi.vif.XenAPIOpenVswitchDriver - iniset $NOVA_CONF xenserver ovs_integration_bridge $XEN_INTEGRATION_BRIDGE - # Disable nova's firewall so that it does not conflict with neutron - iniset $NOVA_CONF DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver - fi } function neutron_plugin_install_agent_packages { _neutron_ovs_base_install_agent_packages -} - -function neutron_plugin_configure_debug_command { - _neutron_ovs_base_configure_debug_command + if use_library_from_git "os-ken"; then + git_clone_by_name "os-ken" + setup_dev_lib "os-ken" + fi } function neutron_plugin_configure_dhcp_agent { - iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport + local conf_file=$1 + : } function neutron_plugin_configure_l3_agent { + local conf_file=$1 _neutron_ovs_base_configure_l3_agent - iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport } function neutron_plugin_configure_plugin_agent { @@ -41,91 +38,38 @@ function neutron_plugin_configure_plugin_agent { # Setup agent for tunneling if [[ "$OVS_ENABLE_TUNNELING" == "True" ]]; then - # Verify tunnels are supported - # REVISIT - also check kernel module support for GRE and patch ports - OVS_VERSION=`ovs-vsctl --version | head -n 1 | grep -E -o "[0-9]+\.[0-9]+"` - if [ `vercmp_numbers "$OVS_VERSION" "1.4"` -lt "0" ] && ! is_service_enabled q-svc ; then - die $LINENO "You are running OVS version $OVS_VERSION. OVS 1.4+ is required for tunneling between multiple hosts." - fi - iniset /$Q_PLUGIN_CONF_FILE ovs enable_tunneling True - iniset /$Q_PLUGIN_CONF_FILE ovs local_ip $HOST_IP + iniset /$Q_PLUGIN_CONF_FILE ovs local_ip $TUNNEL_ENDPOINT_IP + iniset /$Q_PLUGIN_CONF_FILE ovs tunnel_bridge $OVS_TUNNEL_BRIDGE fi # Setup physical network bridge mappings. Override # ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` for more # complex physical network configurations. - if [[ "$OVS_BRIDGE_MAPPINGS" == "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then - OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE + if [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then + if [[ "$OVS_BRIDGE_MAPPINGS" == "" ]]; then + OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE + fi # Configure bridge manually with physical interface as port for multi-node - sudo ovs-vsctl --no-wait -- --may-exist add-br $OVS_PHYSICAL_BRIDGE + _neutron_ovs_base_add_bridge $OVS_PHYSICAL_BRIDGE fi if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE ovs bridge_mappings $OVS_BRIDGE_MAPPINGS fi AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-openvswitch-agent" - if [ "$VIRT_DRIVER" == 'xenserver' ]; then - # Make a copy of our config for domU - sudo cp /$Q_PLUGIN_CONF_FILE "/$Q_PLUGIN_CONF_FILE.domu" - - # Deal with Dom0's L2 Agent: - Q_RR_DOM0_COMMAND="$NEUTRON_BIN_DIR/neutron-rootwrap-xen-dom0 $Q_RR_CONF_FILE" - - # For now, duplicate the xen configuration already found in nova.conf - iniset $Q_RR_CONF_FILE xenapi xenapi_connection_url "$XENAPI_CONNECTION_URL" - iniset $Q_RR_CONF_FILE xenapi xenapi_connection_username "$XENAPI_USER" - iniset $Q_RR_CONF_FILE xenapi xenapi_connection_password "$XENAPI_PASSWORD" - - # Under XS/XCP, the ovs agent needs to target the dom0 - # integration bridge. This is enabled by using a root wrapper - # that executes commands on dom0 via a XenAPI plugin. - iniset /$Q_PLUGIN_CONF_FILE agent root_helper "$Q_RR_DOM0_COMMAND" - - # Set "physical" mapping - iniset /$Q_PLUGIN_CONF_FILE ovs bridge_mappings "physnet1:$FLAT_NETWORK_BRIDGE" - - # XEN_INTEGRATION_BRIDGE is the integration bridge in dom0 - iniset /$Q_PLUGIN_CONF_FILE ovs integration_bridge $XEN_INTEGRATION_BRIDGE - - # Set up domU's L2 agent: - - # Create a bridge "br-$GUEST_INTERFACE_DEFAULT" - sudo ovs-vsctl --no-wait -- --may-exist add-br "br-$GUEST_INTERFACE_DEFAULT" - # Add $GUEST_INTERFACE_DEFAULT to that bridge - sudo ovs-vsctl add-port "br-$GUEST_INTERFACE_DEFAULT" $GUEST_INTERFACE_DEFAULT - - # Set bridge mappings to "physnet1:br-$GUEST_INTERFACE_DEFAULT" - iniset "/$Q_PLUGIN_CONF_FILE.domU" ovs bridge_mappings "physnet1:br-$GUEST_INTERFACE_DEFAULT" - # Set integration bridge to domU's - iniset "/$Q_PLUGIN_CONF_FILE.domU" ovs integration_bridge $OVS_BRIDGE - # Set root wrap - iniset "/$Q_PLUGIN_CONF_FILE.domU" agent root_helper "$Q_RR_COMMAND" - fi - # Define extra "AGENT" configuration options when q-agt is configured by defining - # defining the array ``Q_AGENT_EXTRA_AGENT_OPTS``. - # For Example: ``Q_AGENT_EXTRA_AGENT_OPTS=(foo=true bar=2)`` - for I in "${Q_AGENT_EXTRA_AGENT_OPTS[@]}"; do - # Replace the first '=' with ' ' for iniset syntax - iniset /$Q_PLUGIN_CONF_FILE agent ${I/=/ } - done - # Define extra "OVS" configuration options when q-agt is configured by defining - # defining the array ``Q_AGENT_EXTRA_SRV_OPTS``. - # For Example: ``Q_AGENT_EXTRA_SRV_OPTS=(foo=true bar=2)`` - for I in "${Q_AGENT_EXTRA_SRV_OPTS[@]}"; do - # Replace the first '=' with ' ' for iniset syntax - iniset /$Q_PLUGIN_CONF_FILE ovs ${I/=/ } - done + iniset /$Q_PLUGIN_CONF_FILE agent tunnel_types $Q_TUNNEL_TYPES + iniset /$Q_PLUGIN_CONF_FILE ovs datapath_type $OVS_DATAPATH_TYPE } function neutron_plugin_setup_interface_driver { local conf_file=$1 - iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver + iniset $conf_file DEFAULT interface_driver openvswitch } function neutron_plugin_check_adv_test_requirements { - is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 + is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0 } # Restore xtrace -$OVSA_XTRACE +$_XTRACE_NEUTRON_OVSL2 diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent new file mode 100644 index 0000000000..48e92a1782 --- /dev/null +++ b/lib/neutron_plugins/ovn_agent @@ -0,0 +1,867 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +# Global Sources +# -------------- + +# There are some ovs functions OVN depends on that must be sourced from +# the ovs neutron plugins. +source ${TOP_DIR}/lib/neutron_plugins/ovs_base +source ${TOP_DIR}/lib/neutron_plugins/openvswitch_agent + +# Load devstack ovs compliation and loading functions +source ${TOP_DIR}/lib/neutron_plugins/ovs_source + +# Set variables for building OVN from source +OVN_REPO=${OVN_REPO:-https://github.com/ovn-org/ovn.git} +OVN_REPO_NAME=$(basename ${OVN_REPO} | cut -f1 -d'.') +OVN_REPO_NAME=${OVN_REPO_NAME:-ovn} +OVN_BRANCH=${OVN_BRANCH:-branch-24.03} +# The commit removing OVN bits from the OVS tree, it is the commit that is not +# present in OVN tree and is used to distinguish if OVN is part of OVS or not. +# https://github.com/openvswitch/ovs/commit/05bf1dbb98b0635a51f75e268ef8aed27601401d +OVN_SPLIT_HASH=05bf1dbb98b0635a51f75e268ef8aed27601401d + +if is_service_enabled tls-proxy; then + OVN_PROTO=ssl +else + OVN_PROTO=tcp +fi + +# How to connect to ovsdb-server hosting the OVN SB database. +OVN_SB_REMOTE=${OVN_SB_REMOTE:-$OVN_PROTO:$SERVICE_HOST:6642} + +# How to connect to ovsdb-server hosting the OVN NB database +OVN_NB_REMOTE=${OVN_NB_REMOTE:-$OVN_PROTO:$SERVICE_HOST:6641} + +# ml2/config for neutron_sync_mode +OVN_NEUTRON_SYNC_MODE=${OVN_NEUTRON_SYNC_MODE:-log} + +# Configured DNS servers to be used with internal_dns extension, only +# if the subnet DNS is not configured. +OVN_DNS_SERVERS=${OVN_DNS_SERVERS:-8.8.8.8} + +# The type of OVN L3 Scheduler to use. The OVN L3 Scheduler determines the +# hypervisor/chassis where a routers gateway should be hosted in OVN. The +# default OVN L3 scheduler is leastloaded +OVN_L3_SCHEDULER=${OVN_L3_SCHEDULER:-leastloaded} + +# A UUID to uniquely identify this system. If one is not specified, a random +# one will be generated. A randomly generated UUID will be saved in a file +# $OVS_SYSCONFDIR/system-id.conf (typically /etc/openvswitch/system-id.conf) +# so that the same one will be re-used if you re-run DevStack or restart +# Open vSwitch service. +OVN_UUID=${OVN_UUID:-} + +# Whether or not to build the openvswitch kernel module from ovs. This is required +# unless the distro kernel includes ovs+conntrack support. +OVN_BUILD_MODULES=$(trueorfalse False OVN_BUILD_MODULES) +OVN_BUILD_FROM_SOURCE=$(trueorfalse False OVN_BUILD_FROM_SOURCE) +if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + Q_BUILD_OVS_FROM_GIT=True +fi + +# Whether or not to install the ovs python module from ovs source. This can be +# used to test and validate new ovs python features. This should only be used +# for development purposes since the ovs python version is controlled by OpenStack +# requirements. +OVN_INSTALL_OVS_PYTHON_MODULE=$(trueorfalse False OVN_INSTALL_OVS_PYTHON_MODULE) + +# GENEVE overlay protocol overhead. Defaults to 38 bytes plus the IP version +# overhead (20 bytes for IPv4 (default) or 40 bytes for IPv6) which is determined +# based on the ML2 overlay_ip_version option. The ML2 framework will use this to +# configure the MTU DHCP option. +OVN_GENEVE_OVERHEAD=${OVN_GENEVE_OVERHEAD:-38} + +# The log level of the OVN databases (north and south). +# Supported log levels are: off, emer, err, warn, info or dbg. +# More information about log levels can be found at +# http://www.openvswitch.org/support/dist-docs/ovs-appctl.8.txt +OVN_DBS_LOG_LEVEL=${OVN_DBS_LOG_LEVEL:-info} + +# OVN metadata agent configuration +OVN_META_CONF=$NEUTRON_CONF_DIR/neutron_ovn_metadata_agent.ini +OVN_META_DATA_HOST=${OVN_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)} + +# OVN agent configuration +# The OVN agent is configured, by default, with the "metadata" extension. +OVN_AGENT_CONF=$NEUTRON_CONF_DIR/plugins/ml2/ovn_agent.ini +OVN_AGENT_EXTENSIONS=${OVN_AGENT_EXTENSIONS:-metadata} +# The variable TARGET_ENABLE_OVN_AGENT, if True, overrides the OVN Metadata +# agent service (q-ovn-metadata-agent neutron-ovn-metadata-agent) and the OVN +# agent service (q-ovn-agent neutron-ovn-agent) configuration, always disabling +# the first one (OVN Metadata agent) and enabling the second (OVN agent). +# This variable will be removed in 2026.2, along with the OVN Metadata agent +# removal. +TARGET_ENABLE_OVN_AGENT=$(trueorfalse False TARGET_ENABLE_OVN_AGENT) + +# If True (default) the node will be considered a gateway node. +ENABLE_CHASSIS_AS_GW=$(trueorfalse True ENABLE_CHASSIS_AS_GW) +OVN_L3_CREATE_PUBLIC_NETWORK=$(trueorfalse True OVN_L3_CREATE_PUBLIC_NETWORK) + +export OVSDB_SERVER_LOCAL_HOST=$SERVICE_LOCAL_HOST +TUNNEL_IP=$TUNNEL_ENDPOINT_IP +if [[ "$SERVICE_IP_VERSION" == 6 ]]; then + OVSDB_SERVER_LOCAL_HOST=[$OVSDB_SERVER_LOCAL_HOST] + TUNNEL_IP=[$TUNNEL_IP] +fi + +OVN_IGMP_SNOOPING_ENABLE=$(trueorfalse False OVN_IGMP_SNOOPING_ENABLE) + +OVS_PREFIX= +if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + OVS_PREFIX=/usr/local +fi +OVS_SBINDIR=$OVS_PREFIX/sbin +OVS_BINDIR=$OVS_PREFIX/bin +OVS_RUNDIR=$OVS_PREFIX/var/run/openvswitch +OVS_SHAREDIR=$OVS_PREFIX/share/openvswitch +OVS_SCRIPTDIR=$OVS_SHAREDIR/scripts +OVS_DATADIR=$DATA_DIR/ovs +OVS_SYSCONFDIR=${OVS_SYSCONFDIR:-$OVS_PREFIX/etc/openvswitch} + +if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + OVN_DATADIR=$DATA_DIR/ovn +else + # When using OVN from packages, the data dir for OVN DBs is + # /var/lib/ovn + OVN_DATADIR=/var/lib/ovn +fi +OVN_SHAREDIR=$OVS_PREFIX/share/ovn +OVN_SCRIPTDIR=$OVN_SHAREDIR/scripts +OVN_RUNDIR=$OVS_PREFIX/var/run/ovn + +NEUTRON_OVN_BIN_DIR=$(get_python_exec_prefix) +NEUTRON_OVN_METADATA_BINARY="neutron-ovn-metadata-agent" +NEUTRON_OVN_AGENT_BINARY="neutron-ovn-agent" + +STACK_GROUP="$( id --group --name "$STACK_USER" )" + +OVN_NORTHD_SERVICE=ovn-northd.service +if is_ubuntu; then + # The ovn-central.service file on Ubuntu is responsible for starting + # ovn-northd and the OVN DBs (on CentOS this is done by ovn-northd.service) + OVN_NORTHD_SERVICE=ovn-central.service +fi +OVSDB_SERVER_SERVICE=ovsdb-server.service +OVS_VSWITCHD_SERVICE=ovs-vswitchd.service +OVN_CONTROLLER_SERVICE=ovn-controller.service +OVN_CONTROLLER_VTEP_SERVICE=ovn-controller-vtep.service +if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + OVSDB_SERVER_SERVICE=devstack@ovsdb-server.service + OVS_VSWITCHD_SERVICE=devstack@ovs-vswitchd.service + OVN_NORTHD_SERVICE=devstack@ovn-northd.service + OVN_CONTROLLER_SERVICE=devstack@ovn-controller.service + OVN_CONTROLLER_VTEP_SERVICE=devstack@ovn-controller-vtep.service +fi + +# Defaults Overwrite +# ------------------ +# NOTE(ralonsoh): during the eventlet removal, the "logger" mech +# driver has been removed from this list. Re-add it once the removal +# is finished or the mech driver does not call monkey_patch(). +Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-ovn} +Q_ML2_PLUGIN_TYPE_DRIVERS=${Q_ML2_PLUGIN_TYPE_DRIVERS:-local,flat,vlan,geneve} +Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"geneve"} +Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS:-"vni_ranges=1:65536"} +Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS:-port_security,qos} +# this one allows empty: +ML2_L3_PLUGIN=${ML2_L3_PLUGIN-"ovn-router"} + +Q_LOG_DRIVER_RATE_LIMIT=${Q_LOG_DRIVER_RATE_LIMIT:-100} +Q_LOG_DRIVER_BURST_LIMIT=${Q_LOG_DRIVER_BURST_LIMIT:-25} +Q_LOG_DRIVER_LOG_BASE=${Q_LOG_DRIVER_LOG_BASE:-acl_log_meter} + +# Utility Functions +# ----------------- + +function wait_for_db_file { + local count=0 + while [ ! -f $1 ]; do + sleep 1 + count=$((count+1)) + if [ "$count" -gt 40 ]; then + die $LINENO "DB File $1 not found" + fi + done +} + +function wait_for_sock_file { + local count=0 + while [ ! -S $1 ]; do + sleep 1 + count=$((count+1)) + if [ "$count" -gt 40 ]; then + die $LINENO "Socket $1 not found" + fi + done +} + +function use_new_ovn_repository { + if [[ "$OVN_BUILD_FROM_SOURCE" == "False" ]]; then + return 0 + fi + if [ -z "$is_new_ovn" ]; then + local ovs_repo_dir=$DEST/$OVS_REPO_NAME + if [ ! -d $ovs_repo_dir ]; then + git_timed clone $OVS_REPO $ovs_repo_dir + pushd $ovs_repo_dir + git checkout $OVS_BRANCH + popd + else + clone_repository $OVS_REPO $ovs_repo_dir $OVS_BRANCH + fi + # Check the split commit exists in the current branch + pushd $ovs_repo_dir + git log $OVS_BRANCH --pretty=format:"%H" | grep -q $OVN_SPLIT_HASH + is_new_ovn=$? + popd + fi + return $is_new_ovn +} + +# NOTE(rtheis): Function copied from DevStack _neutron_ovs_base_setup_bridge +# and _neutron_ovs_base_add_bridge with the call to neutron-ovs-cleanup +# removed. The call is not relevant for OVN, as it is specific to the use +# of Neutron's OVS agent and hangs when running stack.sh because +# neutron-ovs-cleanup uses the OVSDB native interface. +function ovn_base_setup_bridge { + local bridge=$1 + local addbr_cmd="sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge -- set bridge $bridge protocols=OpenFlow13,OpenFlow15" + + if [ "$OVS_DATAPATH_TYPE" != "system" ] ; then + addbr_cmd="$addbr_cmd -- set Bridge $bridge datapath_type=${OVS_DATAPATH_TYPE}" + fi + + $addbr_cmd + sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge +} + +function _start_process { + $SYSTEMCTL daemon-reload + $SYSTEMCTL enable $1 + $SYSTEMCTL restart $1 +} + +function _run_process { + local service=$1 + local cmd="$2" + local stop_cmd="$3" + local group=$4 + local user=$5 + local rundir=${6:-$OVS_RUNDIR} + + local systemd_service="devstack@$service.service" + local unit_file="$SYSTEMD_DIR/$systemd_service" + local environment="OVN_RUNDIR=$OVN_RUNDIR OVN_DBDIR=$OVN_DATADIR OVN_LOGDIR=$LOGDIR OVS_RUNDIR=$OVS_RUNDIR OVS_DBDIR=$OVS_DATADIR OVS_LOGDIR=$LOGDIR" + + echo "Starting $service executed command": $cmd + + write_user_unit_file $systemd_service "$cmd" "$group" "$user" + iniset -sudo $unit_file "Service" "Type" "forking" + iniset -sudo $unit_file "Service" "RemainAfterExit" "yes" + iniset -sudo $unit_file "Service" "KillMode" "mixed" + iniset -sudo $unit_file "Service" "LimitNOFILE" "65536" + iniset -sudo $unit_file "Service" "Environment" "$environment" + if [ -n "$stop_cmd" ]; then + iniset -sudo $unit_file "Service" "ExecStop" "$stop_cmd" + fi + + _start_process $systemd_service + + local testcmd="test -e $rundir/$service.pid" + test_with_retry "$testcmd" "$service did not start" $SERVICE_TIMEOUT 1 + local service_ctl_file + service_ctl_file=$(ls $rundir | grep $service | grep ctl) + if [ -z "$service_ctl_file" ]; then + die $LINENO "ctl file for service $service is not present." + fi + sudo ovs-appctl -t $rundir/$service_ctl_file vlog/set console:off syslog:info file:info +} + +function clone_repository { + local repo=$1 + local dir=$2 + local branch=$3 + # Set ERROR_ON_CLONE to false to avoid the need of having the + # repositories like OVN and OVS in the required_projects of the job + # definition. + ERROR_ON_CLONE=false git_clone $repo $dir $branch +} + +function create_public_bridge { + # Create the public bridge that OVN will use + sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE -- set bridge $PUBLIC_BRIDGE protocols=OpenFlow13,OpenFlow15 + sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=${OVN_BRIDGE_MAPPINGS} + _configure_public_network_connectivity +} + +function is_ovn_metadata_agent_enabled { + if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent && [[ "$TARGET_ENABLE_OVN_AGENT" == "False" ]]; then + return 0 + fi + return 1 +} + +function is_ovn_agent_enabled { + if is_service_enabled q-ovn-agent neutron-ovn-agent || [[ "$TARGET_ENABLE_OVN_AGENT" == "True" ]]; then + enable_service q-ovn-agent + return 0 + fi + return 1 + +} + +# OVN compilation functions +# ------------------------- + + +# compile_ovn() - Compile OVN from source and load needed modules +# Accepts three parameters: +# - first optional parameter defines prefix for +# ovn compilation +# - second optional parameter defines localstatedir for +# ovn single machine runtime +function compile_ovn { + local prefix=$1 + local localstatedir=$2 + + if [ -n "$prefix" ]; then + prefix="--prefix=$prefix" + fi + + if [ -n "$localstatedir" ]; then + localstatedir="--localstatedir=$localstatedir" + fi + + clone_repository $OVN_REPO $DEST/$OVN_REPO_NAME $OVN_BRANCH + pushd $DEST/$OVN_REPO_NAME + + if [ ! -f configure ] ; then + ./boot.sh + fi + + # NOTE(mnaser): OVN requires that you build using the OVS from the + # submodule. + # + # https://github.com/ovn-org/ovn/blob/3fb397b63663297acbcbf794e1233951222ae5af/Documentation/intro/install/general.rst#bootstrapping + # https://github.com/ovn-org/ovn/issues/128 + git submodule update --init + pushd ovs + if [ ! -f configure ] ; then + ./boot.sh + fi + if [ ! -f config.status ] || [ configure -nt config.status ] ; then + ./configure + fi + make -j$(($(nproc) + 1)) + popd + + if [ ! -f config.status ] || [ configure -nt config.status ] ; then + ./configure $prefix $localstatedir + fi + make -j$(($(nproc) + 1)) + sudo make install + popd +} + + +# OVN Neutron driver functions +# ---------------------------- + +# OVN service sanity check +function ovn_sanity_check { + if is_service_enabled q-agt neutron-agent; then + die $LINENO "The q-agt/neutron-agt service must be disabled with OVN." + elif is_service_enabled q-l3 neutron-l3; then + die $LINENO "The q-l3/neutron-l3 service must be disabled with OVN." + elif is_service_enabled q-svc neutron-api && [[ ! $Q_ML2_PLUGIN_MECHANISM_DRIVERS =~ "ovn" ]]; then + die $LINENO "OVN needs to be enabled in \$Q_ML2_PLUGIN_MECHANISM_DRIVERS" + elif is_service_enabled q-svc neutron-api && [[ ! $Q_ML2_PLUGIN_TYPE_DRIVERS =~ "geneve" ]]; then + die $LINENO "Geneve needs to be enabled in \$Q_ML2_PLUGIN_TYPE_DRIVERS to be used with OVN" + fi +} + +# install_ovn() - Collect source and prepare +function install_ovn { + echo "Installing OVN and dependent packages" + + # Check the OVN configuration + ovn_sanity_check + + # Install tox, used to generate the config (see devstack/override-defaults) + pip_install tox + + sudo mkdir -p $OVS_RUNDIR + sudo chown $(whoami) $OVS_RUNDIR + + if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + # If OVS is already installed, remove it, because we're about to + # re-install it from source. + for package in openvswitch openvswitch-switch openvswitch-common; do + if is_package_installed $package ; then + uninstall_package $package + fi + done + + remove_ovs_packages + sudo rm -f $OVS_RUNDIR/* + + compile_ovs $OVN_BUILD_MODULES + if use_new_ovn_repository; then + compile_ovn + fi + + sudo mkdir -p $OVS_PREFIX/var/log/openvswitch + sudo chown $(whoami) $OVS_PREFIX/var/log/openvswitch + sudo mkdir -p $OVS_PREFIX/var/log/ovn + sudo chown $(whoami) $OVS_PREFIX/var/log/ovn + else + install_package $(get_packages openvswitch) + install_package $(get_packages ovn) + fi + + # Ensure that the OVS commands are accessible in the PATH + export PATH=$OVS_BINDIR:$PATH + + # Archive log files and create new + local log_archive_dir=$LOGDIR/archive + mkdir -p $log_archive_dir + for logfile in ovs-vswitchd.log ovn-northd.log ovn-controller.log ovn-controller-vtep.log ovs-vtep.log ovsdb-server.log ovsdb-server-nb.log ovsdb-server-sb.log; do + if [ -f "$LOGDIR/$logfile" ] ; then + mv "$LOGDIR/$logfile" "$log_archive_dir/$logfile.${CURRENT_LOG_TIME}" + fi + done + + # Install ovsdbapp from source if requested + if use_library_from_git "ovsdbapp"; then + git_clone_by_name "ovsdbapp" + setup_dev_lib "ovsdbapp" + fi + + # Install ovs python module from ovs source. + if [[ "$OVN_INSTALL_OVS_PYTHON_MODULE" == "True" ]]; then + sudo pip uninstall -y ovs + # Clone the OVS repository if it's not yet present + clone_repository $OVS_REPO $DEST/$OVS_REPO_NAME $OVS_BRANCH + sudo pip install -e $DEST/$OVS_REPO_NAME/python + fi +} + +# filter_network_api_extensions() - Remove non-supported API extensions by +# the OVN driver from the list of enabled API extensions +function filter_network_api_extensions { + SUPPORTED_NETWORK_API_EXTENSIONS=$($PYTHON -c \ + 'from neutron.common.ovn import extensions ;\ + print(",".join(extensions.ML2_SUPPORTED_API_EXTENSIONS))') + SUPPORTED_NETWORK_API_EXTENSIONS=$SUPPORTED_NETWORK_API_EXTENSIONS,$($PYTHON -c \ + 'from neutron.common.ovn import extensions ;\ + print(",".join(extensions.ML2_SUPPORTED_API_EXTENSIONS_OVN_L3))') + if is_service_enabled q-qos neutron-qos ; then + SUPPORTED_NETWORK_API_EXTENSIONS="$SUPPORTED_NETWORK_API_EXTENSIONS,qos" + fi + NETWORK_API_EXTENSIONS=${NETWORK_API_EXTENSIONS:-$SUPPORTED_NETWORK_API_EXTENSIONS} + extensions=$(echo $NETWORK_API_EXTENSIONS | tr ', ' '\n' | sort -u) + supported_ext=$(echo $SUPPORTED_NETWORK_API_EXTENSIONS | tr ', ' '\n' | sort -u) + enabled_ext=$(comm -12 <(echo -e "$extensions") <(echo -e "$supported_ext")) + disabled_ext=$(comm -3 <(echo -e "$extensions") <(echo -e "$enabled_ext")) + + # Log a message in case some extensions had to be disabled because + # they are not supported by the OVN driver + if [ ! -z "$disabled_ext" ]; then + _disabled=$(echo $disabled_ext | tr ' ' ',') + echo "The folling network API extensions have been disabled because they are not supported by OVN: $_disabled" + fi + + # Export the final list of extensions that have been enabled and are + # supported by OVN + export NETWORK_API_EXTENSIONS=$(echo $enabled_ext | tr ' ' ',') +} + +function configure_ovn_plugin { + echo "Configuring Neutron for OVN" + + if is_service_enabled q-svc neutron-api; then + filter_network_api_extensions + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_geneve max_header_size=$OVN_GENEVE_OVERHEAD + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_connection="$OVN_NB_REMOTE" + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_sb_connection="$OVN_SB_REMOTE" + if is_service_enabled tls-proxy; then + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_sb_ca_cert="$INT_CA_DIR/ca-chain.pem" + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_sb_certificate="$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt" + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_sb_private_key="$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key" + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_ca_cert="$INT_CA_DIR/ca-chain.pem" + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_certificate="$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt" + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_private_key="$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key" + fi + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn neutron_sync_mode="$OVN_NEUTRON_SYNC_MODE" + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_l3_scheduler="$OVN_L3_SCHEDULER" + populate_ml2_config /$Q_PLUGIN_CONF_FILE securitygroup enable_security_group="$Q_USE_SECGROUP" + inicomment /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver + + if is_service_enabled q-log neutron-log; then + populate_ml2_config /$Q_PLUGIN_CONF_FILE network_log rate_limit="$Q_LOG_DRIVER_RATE_LIMIT" + populate_ml2_config /$Q_PLUGIN_CONF_FILE network_log burst_limit="$Q_LOG_DRIVER_BURST_LIMIT" + inicomment /$Q_PLUGIN_CONF_FILE network_log local_output_log_base="$Q_LOG_DRIVER_LOG_BASE" + fi + + if is_ovn_metadata_agent_enabled; then + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True + elif is_ovn_agent_enabled && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]]; then + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True + else + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=False + fi + + if is_service_enabled q-dns neutron-dns ; then + iniset $NEUTRON_CONF DEFAULT dns_domain openstackgate.local + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn dns_servers="$OVN_DNS_SERVERS" + fi + + iniset $NEUTRON_CONF ovs igmp_snooping_enable $OVN_IGMP_SNOOPING_ENABLE + fi + + if is_service_enabled q-dhcp neutron-dhcp ; then + iniset $NEUTRON_CONF DEFAULT dhcp_agent_notification True + else + iniset $NEUTRON_CONF DEFAULT dhcp_agent_notification False + fi + + if is_service_enabled n-api-meta ; then + if is_ovn_metadata_agent_enabled; then + iniset $NOVA_CONF neutron service_metadata_proxy True + elif is_ovn_agent_enabled && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]]; then + iniset $NOVA_CONF neutron service_metadata_proxy True + fi + fi +} + +function configure_ovn { + echo "Configuring OVN" + + if [ -z "$OVN_UUID" ] ; then + if [ -f $OVS_SYSCONFDIR/system-id.conf ]; then + OVN_UUID=$(cat $OVS_SYSCONFDIR/system-id.conf) + else + OVN_UUID=$(uuidgen) + echo $OVN_UUID | sudo tee $OVS_SYSCONFDIR/system-id.conf + fi + else + local ovs_uuid + ovs_uuid=$(cat $OVS_SYSCONFDIR/system-id.conf) + if [ "$ovs_uuid" != $OVN_UUID ]; then + echo $OVN_UUID | sudo tee $OVS_SYSCONFDIR/system-id.conf + fi + fi + + # Erase the pre-set configurations from packages. DevStack will + # configure OVS and OVN accordingly for its use. + if [[ "$OVN_BUILD_FROM_SOURCE" == "False" ]] && is_fedora; then + sudo truncate -s 0 /etc/openvswitch/default.conf + sudo truncate -s 0 /etc/sysconfig/openvswitch + sudo truncate -s 0 /etc/sysconfig/ovn + fi + + # Metadata + local sample_file="" + local config_file="" + if is_ovn_agent_enabled && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]] && is_service_enabled ovn-controller; then + sample_file=$NEUTRON_DIR/etc/neutron/plugins/ml2/ovn_agent.ini.sample + config_file=$OVN_AGENT_CONF + elif is_ovn_metadata_agent_enabled && is_service_enabled ovn-controller; then + sample_file=$NEUTRON_DIR/etc/neutron_ovn_metadata_agent.ini.sample + config_file=$OVN_META_CONF + fi + if [ -n "$config_file" ]; then + sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR + + mkdir -p $NEUTRON_DIR/etc/neutron/plugins/ml2 + (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh) + + cp $sample_file $config_file + configure_root_helper_options $config_file + + iniset $config_file DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $config_file DEFAULT nova_metadata_host $OVN_META_DATA_HOST + iniset $config_file DEFAULT metadata_workers $API_WORKERS + iniset $config_file DEFAULT state_path $DATA_DIR/neutron + iniset $config_file ovs ovsdb_connection tcp:$OVSDB_SERVER_LOCAL_HOST:6640 + iniset $config_file ovn ovn_sb_connection $OVN_SB_REMOTE + if is_service_enabled tls-proxy; then + iniset $config_file ovn \ + ovn_sb_ca_cert $INT_CA_DIR/ca-chain.pem + iniset $config_file ovn \ + ovn_sb_certificate $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt + iniset $config_file ovn \ + ovn_sb_private_key $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key + fi + if [[ $config_file == $OVN_AGENT_CONF ]]; then + iniset $config_file agent extensions $OVN_AGENT_EXTENSIONS + iniset $config_file ovn ovn_nb_connection $OVN_NB_REMOTE + fi + fi +} + +function init_ovn { + # clean up from previous (possibly aborted) runs + # create required data files + + # Assumption: this is a dedicated test system and there is nothing important + # in the ovn, ovn-nb, or ovs databases. We're going to trash them and + # create new ones on each devstack run. + + local mkdir_cmd="mkdir -p ${OVN_DATADIR}" + + if [[ "$OVN_BUILD_FROM_SOURCE" == "False" ]]; then + mkdir_cmd="sudo ${mkdir_cmd}" + fi + + $mkdir_cmd + mkdir -p $OVS_DATADIR + + rm -f $OVS_DATADIR/*.db + rm -f $OVS_DATADIR/.*.db.~lock~ + sudo rm -f $OVN_DATADIR/*.db + sudo rm -f $OVN_DATADIR/.*.db.~lock~ + sudo rm -f $OVN_RUNDIR/*.sock +} + +function _start_ovs { + echo "Starting OVS" + if is_service_enabled ovn-controller ovn-controller-vtep ovn-northd; then + # ovsdb-server and ovs-vswitchd are used privately in OVN as openvswitch service names. + enable_service ovsdb-server + enable_service ovs-vswitchd + + if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + if [ ! -f $OVS_DATADIR/conf.db ]; then + ovsdb-tool create $OVS_DATADIR/conf.db $OVS_SHAREDIR/vswitch.ovsschema + fi + + if is_service_enabled ovn-controller-vtep; then + if [ ! -f $OVS_DATADIR/vtep.db ]; then + ovsdb-tool create $OVS_DATADIR/vtep.db $OVS_SHAREDIR/vtep.ovsschema + fi + fi + + local dbcmd="$OVS_SBINDIR/ovsdb-server --remote=punix:$OVS_RUNDIR/db.sock --remote=ptcp:6640:$OVSDB_SERVER_LOCAL_HOST --pidfile --detach --log-file" + dbcmd+=" --remote=db:Open_vSwitch,Open_vSwitch,manager_options" + if is_service_enabled ovn-controller-vtep; then + dbcmd+=" --remote=db:hardware_vtep,Global,managers $OVS_DATADIR/vtep.db" + fi + dbcmd+=" $OVS_DATADIR/conf.db" + _run_process ovsdb-server "$dbcmd" "" "$STACK_GROUP" "root" "$OVS_RUNDIR" + + # Note: ovn-controller will create and configure br-int once it is started. + # So, no need to create it now because nothing depends on that bridge here. + local ovscmd="$OVS_SBINDIR/ovs-vswitchd --log-file --pidfile --detach" + _run_process ovs-vswitchd "$ovscmd" "" "$STACK_GROUP" "root" "$OVS_RUNDIR" + else + _start_process "$OVSDB_SERVER_SERVICE" + _start_process "$OVS_VSWITCHD_SERVICE" + fi + + echo "Configuring OVSDB" + if is_service_enabled tls-proxy; then + sudo ovs-vsctl --no-wait set-ssl \ + $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key \ + $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt \ + $INT_CA_DIR/ca-chain.pem + fi + + sudo ovs-vsctl --no-wait set-manager ptcp:6640:$OVSDB_SERVER_LOCAL_HOST + sudo ovs-vsctl --no-wait set open_vswitch . system-type="devstack" + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:system-id="$OVN_UUID" + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-remote="$OVN_SB_REMOTE" + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-bridge="br-int" + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-type="geneve" + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-ip="$TUNNEL_IP" + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:hostname=$(hostname) + # Select this chassis to host gateway routers + if [[ "$ENABLE_CHASSIS_AS_GW" == "True" ]]; then + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-cms-options="enable-chassis-as-gw" + fi + + if is_provider_network || [[ $Q_USE_PROVIDERNET_FOR_PUBLIC == "True" ]]; then + ovn_base_setup_bridge $OVS_PHYSICAL_BRIDGE + sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=${PHYSICAL_NETWORK}:${OVS_PHYSICAL_BRIDGE} + fi + + if is_service_enabled ovn-controller-vtep ; then + ovn_base_setup_bridge br-v + vtep-ctl add-ps br-v + vtep-ctl set Physical_Switch br-v tunnel_ips=$TUNNEL_IP + + enable_service ovs-vtep + local vtepcmd="$OVS_SCRIPTDIR/ovs-vtep --log-file --pidfile --detach br-v" + _run_process ovs-vtep "$vtepcmd" "" "$STACK_GROUP" "root" "$OVS_RUNDIR" + + vtep-ctl set-manager tcp:$HOST_IP:6640 + fi + fi +} + +function _wait_for_ovn_and_set_custom_config { + # Wait for the service to be ready + # Check for socket and db files for both OVN NB and SB + wait_for_sock_file $OVN_RUNDIR/ovnnb_db.sock + wait_for_sock_file $OVN_RUNDIR/ovnsb_db.sock + wait_for_db_file $OVN_DATADIR/ovnnb_db.db + wait_for_db_file $OVN_DATADIR/ovnsb_db.db + + if is_service_enabled tls-proxy; then + sudo ovn-nbctl --db=unix:$OVN_RUNDIR/ovnnb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem + sudo ovn-sbctl --db=unix:$OVN_RUNDIR/ovnsb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem + fi + + sudo ovn-nbctl --db=unix:$OVN_RUNDIR/ovnnb_db.sock set-connection p${OVN_PROTO}:6641:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 + sudo ovn-sbctl --db=unix:$OVN_RUNDIR/ovnsb_db.sock set-connection p${OVN_PROTO}:6642:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 + sudo ovs-appctl -t $OVN_RUNDIR/ovnnb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL + sudo ovs-appctl -t $OVN_RUNDIR/ovnsb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL +} + +# start_ovn() - Start running processes, including screen +function start_ovn { + echo "Starting OVN" + + _start_ovs + + local SCRIPTDIR=$OVN_SCRIPTDIR + if ! use_new_ovn_repository; then + SCRIPTDIR=$OVS_SCRIPTDIR + fi + + if is_service_enabled ovn-northd ; then + if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_northd" + local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_northd" + + _run_process ovn-northd "$cmd" "$stop_cmd" "$STACK_GROUP" "root" "$OVN_RUNDIR" + else + _start_process "$OVN_NORTHD_SERVICE" + fi + + _wait_for_ovn_and_set_custom_config + + fi + + if is_service_enabled ovn-controller ; then + if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_controller" + local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_controller" + + _run_process ovn-controller "$cmd" "$stop_cmd" "$STACK_GROUP" "root" "$OVN_RUNDIR" + else + _start_process "$OVN_CONTROLLER_SERVICE" + fi + fi + + if is_service_enabled ovn-controller-vtep ; then + if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + local cmd="$OVS_BINDIR/ovn-controller-vtep --log-file --pidfile --detach --ovnsb-db=$OVN_SB_REMOTE" + _run_process ovn-controller-vtep "$cmd" "" "$STACK_GROUP" "root" "$OVN_RUNDIR" + else + _start_process "$OVN_CONTROLLER_VTEP_SERVICE" + fi + fi + + if is_ovn_metadata_agent_enabled; then + run_process q-ovn-metadata-agent "$NEUTRON_OVN_BIN_DIR/$NEUTRON_OVN_METADATA_BINARY --config-file $OVN_META_CONF" + # Format logging + setup_logging $OVN_META_CONF + fi + + if is_ovn_agent_enabled; then + run_process q-ovn-agent "$NEUTRON_OVN_BIN_DIR/$NEUTRON_OVN_AGENT_BINARY --config-file $OVN_AGENT_CONF" + # Format logging + setup_logging $OVN_AGENT_CONF + fi +} + +function _stop_ovs_dp { + sudo ovs-dpctl dump-dps | sudo xargs -n1 ovs-dpctl del-dp + modprobe -q -r vport_geneve vport_vxlan openvswitch || true +} + +function _stop_process { + local service=$1 + echo "Stopping process $service" + if $SYSTEMCTL is-enabled $service; then + $SYSTEMCTL stop $service + $SYSTEMCTL disable $service + fi +} + +function stop_ovn { + # NOTE(ralonsoh): this check doesn't use "is_ovn_metadata_agent_enabled", + # instead it relies only in the configured services, disregarding the + # flag "TARGET_ENABLE_OVN_AGENT". It is needed to force the OVN Metadata + # agent stop in case the flag "TARGET_ENABLE_OVN_AGENT" is set. + if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then + # pkill takes care not to kill itself, but it may kill its parent + # sudo unless we use the "ps | grep [f]oo" trick + sudo pkill -9 -f "[h]aproxy" || : + _stop_process "devstack@q-ovn-metadata-agent.service" + fi + if is_ovn_agent_enabled; then + # pkill takes care not to kill itself, but it may kill its parent + # sudo unless we use the "ps | grep [f]oo" trick + sudo pkill -9 -f "[h]aproxy" || : + _stop_process "devstack@q-ovn-agent.service" + fi + if is_service_enabled ovn-controller-vtep ; then + _stop_process "$OVN_CONTROLLER_VTEP_SERVICE" + fi + if is_service_enabled ovn-controller ; then + _stop_process "$OVN_CONTROLLER_SERVICE" + fi + if is_service_enabled ovn-northd ; then + _stop_process "$OVN_NORTHD_SERVICE" + fi + if is_service_enabled ovs-vtep ; then + _stop_process "devstack@ovs-vtep.service" + fi + + _stop_process "$OVS_VSWITCHD_SERVICE" + _stop_process "$OVSDB_SERVER_SERVICE" + + _stop_ovs_dp +} + +function _cleanup { + local path=${1:-$DEST/$OVN_REPO_NAME} + pushd $path + cd $path + sudo make uninstall + sudo make distclean + popd +} + +# cleanup_ovn() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_ovn { + local ovn_path=$DEST/$OVN_REPO_NAME + local ovs_path=$DEST/$OVS_REPO_NAME + + if [ -d $ovn_path ]; then + _cleanup $ovn_path + fi + + if [ -d $ovs_path ]; then + _cleanup $ovs_path + fi + + sudo rm -rf $OVN_RUNDIR +} diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index 1e293a187e..adabc56412 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -1,74 +1,118 @@ +#!/bin/bash +# # common functions for ovs based plugin # ------------------------------------- # Save trace setting -OVSB_XTRACE=$(set +o | grep xtrace) +_XTRACE_NEUTRON_OVS_BASE=$(set +o | grep xtrace) set +o xtrace +# Load devstack ovs compliation and loading functions +source ${TOP_DIR}/lib/neutron_plugins/ovs_source + +# Defaults +# -------- + OVS_BRIDGE=${OVS_BRIDGE:-br-int} -PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} +# OVS recognize default 'system' datapath or 'netdev' for userspace datapath +OVS_DATAPATH_TYPE=${OVS_DATAPATH_TYPE:-system} +OVS_TUNNEL_BRIDGE=${OVS_TUNNEL_BRIDGE:-br-tun} function is_neutron_ovs_base_plugin { # Yes, we use OVS. return 0 } +function _neutron_ovs_base_add_bridge { + local bridge=$1 + local addbr_cmd="sudo ovs-vsctl -- --may-exist add-br $bridge" + + if [ "$OVS_DATAPATH_TYPE" != "system" ] ; then + addbr_cmd="$addbr_cmd -- set Bridge $bridge datapath_type=${OVS_DATAPATH_TYPE}" + fi + + $addbr_cmd +} + function _neutron_ovs_base_setup_bridge { local bridge=$1 - neutron-ovs-cleanup - sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge + neutron-ovs-cleanup --config-file $NEUTRON_CONF + _neutron_ovs_base_add_bridge $bridge sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge } function neutron_ovs_base_cleanup { # remove all OVS ports that look like Neutron created ports - for port in $(sudo ovs-vsctl list port | grep -o -e tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do + for port in $(sudo ovs-vsctl list port | grep -o -e [a-zA-Z\-]*tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do sudo ovs-vsctl del-port ${port} done # remove all OVS bridges created by Neutron - for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${OVS_BRIDGE} -e ${PUBLIC_BRIDGE}); do + for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${OVS_BRIDGE} -e ${PUBLIC_BRIDGE} -e ${OVS_TUNNEL_BRIDGE}); do sudo ovs-vsctl del-br ${bridge} done } -function _neutron_ovs_base_install_agent_packages { +function _neutron_ovs_base_install_ubuntu_dkms { + # install Dynamic Kernel Module Support packages if needed local kernel_version - # Install deps - # FIXME add to ``files/apts/neutron``, but don't install if not needed! - if is_ubuntu; then - kernel_version=`cat /proc/version | cut -d " " -f3` - install_package make fakeroot dkms openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version - elif is_fedora; then - install_package openvswitch - # Ensure that the service is started - restart_service openvswitch - elif is_suse; then - install_package openvswitch-switch - restart_service openvswitch-switch + kernel_version=$(uname -r) + local kernel_major_minor + kernel_major_minor=`echo $kernel_version | cut -d. -f1-2` + # From kernel 3.13 on, openvswitch-datapath-dkms is not needed + if vercmp "$kernel_major_minor" "<" "3.13" ; then + install_package "dkms openvswitch-datapath-dkms linux-headers-$kernel_version" fi } -function _neutron_ovs_base_configure_debug_command { - iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE +function _neutron_ovs_base_install_agent_packages { + if [ "$Q_BUILD_OVS_FROM_GIT" == "True" ]; then + remove_ovs_packages + compile_ovs False /usr/local /var + load_conntrack_gre_module + start_new_ovs + else + # Install deps + install_package $(get_packages "openvswitch") + if is_ubuntu; then + _neutron_ovs_base_install_ubuntu_dkms + restart_service openvswitch-switch + elif is_fedora; then + restart_service openvswitch + sudo systemctl enable openvswitch + fi + fi } function _neutron_ovs_base_configure_firewall_driver { if [[ "$Q_USE_SECGROUP" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver + iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver openvswitch + if ! running_in_container; then + enable_kernel_bridge_firewall + fi else - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver + iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver noop fi } function _neutron_ovs_base_configure_l3_agent { - iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE + neutron-ovs-cleanup --config-file $NEUTRON_CONF + if [[ "$Q_USE_PUBLIC_VETH" = "True" ]]; then + ip link show $Q_PUBLIC_VETH_INT > /dev/null 2>&1 || + sudo ip link add $Q_PUBLIC_VETH_INT type veth \ + peer name $Q_PUBLIC_VETH_EX + sudo ip link set $Q_PUBLIC_VETH_INT up + sudo ip link set $Q_PUBLIC_VETH_EX up + sudo ip addr flush dev $Q_PUBLIC_VETH_EX + else + _neutron_ovs_base_add_public_bridge + sudo ovs-vsctl br-set-external-id $PUBLIC_BRIDGE bridge-id $PUBLIC_BRIDGE + fi +} - neutron-ovs-cleanup - sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE - sudo ovs-vsctl --no-wait br-set-external-id $PUBLIC_BRIDGE bridge-id $PUBLIC_BRIDGE - # ensure no IP is configured on the public bridge - sudo ip addr flush dev $PUBLIC_BRIDGE +function _neutron_ovs_base_add_public_bridge { + _neutron_ovs_base_add_bridge $PUBLIC_BRIDGE + set_mtu $PUBLIC_BRIDGE $PUBLIC_BRIDGE_MTU } function _neutron_ovs_base_configure_nova_vif_driver { @@ -76,4 +120,4 @@ function _neutron_ovs_base_configure_nova_vif_driver { } # Restore xtrace -$OVSB_XTRACE +$_XTRACE_NEUTRON_OVS_BASE diff --git a/lib/neutron_plugins/ovs_source b/lib/neutron_plugins/ovs_source new file mode 100644 index 0000000000..6b6f531a01 --- /dev/null +++ b/lib/neutron_plugins/ovs_source @@ -0,0 +1,214 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Defaults +# -------- +Q_BUILD_OVS_FROM_GIT=$(trueorfalse False Q_BUILD_OVS_FROM_GIT) + +# Set variables for building OVS from source +OVS_REPO=${OVS_REPO:-https://github.com/openvswitch/ovs.git} +OVS_REPO_NAME=$(basename ${OVS_REPO} | cut -f1 -d'.') +OVS_REPO_NAME=${OVS_REPO_NAME:-ovs} +OVS_BRANCH=${OVS_BRANCH:-branch-3.3} + +# Functions + +# load_module() - Load module using modprobe module given by argument and dies +# on failure +# - fatal argument is optional and says whether function should +# exit if module can't be loaded +function load_module { + local module=$1 + local fatal=$2 + + if [ "$(trueorfalse True fatal)" == "True" ]; then + sudo modprobe $module || (sudo dmesg && die $LINENO "FAILED TO LOAD $module") + else + sudo modprobe $module || (echo "FAILED TO LOAD $module" && sudo dmesg) + fi +} + +# prepare_for_compilation() - Fetch ovs git repository and install packages needed for +# compilation. +function prepare_for_ovs_compilation { + local build_modules=${1:-False} + OVS_DIR=$DEST/$OVS_REPO_NAME + + if [ ! -d $OVS_DIR ] ; then + # We can't use git_clone here because we want to ignore ERROR_ON_CLONE + git_timed clone $OVS_REPO $OVS_DIR + cd $OVS_DIR + git checkout $OVS_BRANCH + else + # Even though the directory already exists, call git_clone to update it + # if needed based on the RECLONE option + git_clone $OVS_REPO $OVS_DIR $OVS_BRANCH + cd $OVS_DIR + fi + + # TODO: Can you create package list files like you can inside devstack? + install_package autoconf automake libtool gcc patch make + + # If build_modules is False, we don't need to install the kernel-* + # packages. Just return. + if [[ "$build_modules" == "False" ]]; then + return + fi + + KERNEL_VERSION=`uname -r` + if is_fedora ; then + # is_fedora covers Fedora, RHEL, CentOS, etc... + if [[ "$os_VENDOR" == "Fedora" ]]; then + install_package elfutils-libelf-devel + KERNEL_VERSION=`echo $KERNEL_VERSION | cut --delimiter='-' --field 1` + elif [[ ${KERNEL_VERSION:0:2} != "3." ]]; then + # dash is illegal character in rpm version so replace + # them with underscore like it is done in the kernel + # https://github.com/torvalds/linux/blob/master/scripts/package/mkspec#L25 + # but only for latest series of the kernel, not 3.x + + KERNEL_VERSION=`echo $KERNEL_VERSION | tr - _` + fi + + echo NOTE: if kernel-devel-$KERNEL_VERSION or kernel-headers-$KERNEL_VERSION installation + echo failed, please, provide a repository with the package, or yum update / reboot + echo your machine to get the latest kernel. + + install_package kernel-devel-$KERNEL_VERSION + install_package kernel-headers-$KERNEL_VERSION + if is_service_enabled tls-proxy; then + install_package openssl-devel + fi + + elif is_ubuntu ; then + install_package linux-headers-$KERNEL_VERSION + if is_service_enabled tls-proxy; then + install_package libssl-dev + fi + fi +} + +# load_ovs_kernel_modules() - load openvswitch kernel module +function load_ovs_kernel_modules { + load_module openvswitch + load_module vport-geneve False + sudo dmesg | tail +} + +# reload_ovs_kernel_modules() - reload openvswitch kernel module +function reload_ovs_kernel_modules { + set +e + ovs_system=$(sudo ovs-dpctl dump-dps | grep ovs-system) + if [ -n "$ovs_system" ]; then + sudo ovs-dpctl del-dp ovs-system + fi + set -e + sudo modprobe -r vport_geneve + sudo modprobe -r openvswitch + load_ovs_kernel_modules +} + +# compile_ovs() - Compile OVS from source and load needed modules. +# Accepts two parameters: +# - first one is False by default and means that modules are not built and installed. +# - second optional parameter defines prefix for ovs compilation +# - third optional parameter defines localstatedir for ovs single machine runtime +# Env variables OVS_REPO_NAME, OVS_REPO and OVS_BRANCH must be set +function compile_ovs { + local _pwd=$PWD + local build_modules=${1:-False} + local prefix=$2 + local localstatedir=$3 + + if [ -n "$prefix" ]; then + prefix="--prefix=$prefix" + fi + + if [ -n "$localstatedir" ]; then + localstatedir="--localstatedir=$localstatedir" + fi + + prepare_for_ovs_compilation $build_modules + + KERNEL_VERSION=$(uname -r) + major_version=$(echo "${KERNEL_VERSION}" | cut -d '.' -f1) + patch_level=$(echo "${KERNEL_VERSION}" | cut -d '.' -f2) + if [ "${major_version}" -gt 5 ] || [ "${major_version}" == 5 ] && [ "${patch_level}" -gt 5 ]; then + echo "NOTE: KERNEL VERSION is ${KERNEL_VERSION} and OVS doesn't support compiling " + echo "Kernel module for version higher than 5.5. Skipping module compilation..." + build_modules="False" + fi + + if [ ! -f configure ] ; then + ./boot.sh + fi + if [ ! -f config.status ] || [ configure -nt config.status ] ; then + if [[ "$build_modules" == "True" ]]; then + ./configure $prefix $localstatedir --with-linux=/lib/modules/$(uname -r)/build + else + ./configure $prefix $localstatedir + fi + fi + make -j$(($(nproc) + 1)) + sudo make install + if [[ "$build_modules" == "True" ]]; then + sudo make INSTALL_MOD_DIR=kernel/net/openvswitch modules_install + fi + reload_ovs_kernel_modules + + cd $_pwd +} + +# action_service - call an action over openvswitch service +# Accepts one parameter that can be either +# 'start', 'restart' and 'stop'. +function action_openvswitch { + local action=$1 + + if is_ubuntu; then + ${action}_service openvswitch-switch + elif is_fedora; then + ${action}_service openvswitch + fi +} + +# start_new_ovs() - removes old ovs database, creates a new one and starts ovs +function start_new_ovs { + sudo rm -f /etc/openvswitch/conf.db /etc/openvswitch/.conf.db~lock~ + sudo /usr/local/share/openvswitch/scripts/ovs-ctl start +} + +# stop_new_ovs() - stops ovs +function stop_new_ovs { + local ovs_ctl='/usr/local/share/openvswitch/scripts/ovs-ctl' + + if [ -x $ovs_ctl ] ; then + sudo $ovs_ctl stop + fi +} + +# remove_ovs_packages() - removes old ovs packages from the system +function remove_ovs_packages { + for package in openvswitch openvswitch-switch openvswitch-common; do + if is_package_installed $package; then + uninstall_package $package + fi + done +} + + +# load_conntrack_gre_module() - loads nf_conntrack_proto_gre kernel module +function load_conntrack_gre_module { + load_module nf_conntrack_proto_gre False +} diff --git a/lib/neutron_plugins/plumgrid b/lib/neutron_plugins/plumgrid deleted file mode 100644 index 178bca7dc2..0000000000 --- a/lib/neutron_plugins/plumgrid +++ /dev/null @@ -1,57 +0,0 @@ -# PLUMgrid Neutron Plugin -# Edgar Magana emagana@plumgrid.com -# ------------------------------------ - -# Save trace settings -PG_XTRACE=$(set +o | grep xtrace) -set +o xtrace - -function neutron_plugin_create_nova_conf { - : -} - -function neutron_plugin_setup_interface_driver { - : -} - -function neutron_plugin_configure_common { - Q_PLUGIN_CONF_PATH=etc/neutron/plugins/plumgrid - Q_PLUGIN_CONF_FILENAME=plumgrid.ini - Q_DB_NAME="plumgrid_neutron" - Q_PLUGIN_CLASS="neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2" - PLUMGRID_DIRECTOR_IP=${PLUMGRID_DIRECTOR_IP:-localhost} - PLUMGRID_DIRECTOR_PORT=${PLUMGRID_DIRECTOR_PORT:-7766} - PLUMGRID_ADMIN=${PLUMGRID_ADMIN:-username} - PLUMGRID_PASSWORD=${PLUMGRID_PASSWORD:-password} - PLUMGRID_TIMEOUT=${PLUMGRID_TIMEOUT:-70} - PLUMGRID_DRIVER=${PLUMGRID_DRIVER:-neutron.plugins.plumgrid.drivers.fake_plumlib.Plumlib} -} - -function neutron_plugin_configure_service { - iniset /$Q_PLUGIN_CONF_FILE plumgriddirector director_server $PLUMGRID_DIRECTOR_IP - iniset /$Q_PLUGIN_CONF_FILE plumgriddirector director_server_port $PLUMGRID_DIRECTOR_PORT - iniset /$Q_PLUGIN_CONF_FILE plumgriddirector username $PLUMGRID_ADMIN - iniset /$Q_PLUGIN_CONF_FILE plumgriddirector password $PLUMGRID_PASSWORD - iniset /$Q_PLUGIN_CONF_FILE plumgriddirector servertimeout $PLUMGRID_TIMEOUT - iniset /$Q_PLUGIN_CONF_FILE plumgriddirector driver $PLUMGRID_DRIVER -} - -function neutron_plugin_configure_debug_command { - : -} - -function is_neutron_ovs_base_plugin { - # False - return 1 -} - -function has_neutron_plugin_security_group { - # False - return 1 -} - -function neutron_plugin_check_adv_test_requirements { - is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 -} -# Restore xtrace -$PG_XTRACE diff --git a/lib/neutron_plugins/ryu b/lib/neutron_plugins/ryu deleted file mode 100644 index ceb89faf6e..0000000000 --- a/lib/neutron_plugins/ryu +++ /dev/null @@ -1,80 +0,0 @@ -# Neutron Ryu plugin -# ------------------ - -# Save trace setting -RYU_XTRACE=$(set +o | grep xtrace) -set +o xtrace - -source $TOP_DIR/lib/neutron_plugins/ovs_base -source $TOP_DIR/lib/neutron_thirdparty/ryu # for configuration value - -function neutron_plugin_create_nova_conf { - _neutron_ovs_base_configure_nova_vif_driver - iniset $NOVA_CONF DEFAULT libvirt_ovs_integration_bridge "$OVS_BRIDGE" -} - -function neutron_plugin_install_agent_packages { - _neutron_ovs_base_install_agent_packages - - # neutron_ryu_agent requires ryu module - install_package $(get_packages "ryu") - install_ryu - configure_ryu -} - -function neutron_plugin_configure_common { - Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ryu - Q_PLUGIN_CONF_FILENAME=ryu.ini - Q_DB_NAME="ovs_neutron" - Q_PLUGIN_CLASS="neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2" -} - -function neutron_plugin_configure_debug_command { - _neutron_ovs_base_configure_debug_command - iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT -} - -function neutron_plugin_configure_dhcp_agent { - iniset $Q_DHCP_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT -} - -function neutron_plugin_configure_l3_agent { - iniset $Q_L3_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT - _neutron_ovs_base_configure_l3_agent -} - -function neutron_plugin_configure_plugin_agent { - # Set up integration bridge - _neutron_ovs_base_setup_bridge $OVS_BRIDGE - if [ -n "$RYU_INTERNAL_INTERFACE" ]; then - sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_BRIDGE $RYU_INTERNAL_INTERFACE - fi - iniset /$Q_PLUGIN_CONF_FILE ovs integration_bridge $OVS_BRIDGE - AGENT_BINARY="$NEUTRON_DIR/neutron/plugins/ryu/agent/ryu_neutron_agent.py" - - _neutron_ovs_base_configure_firewall_driver -} - -function neutron_plugin_configure_service { - iniset /$Q_PLUGIN_CONF_FILE ovs openflow_rest_api $RYU_API_HOST:$RYU_API_PORT - - _neutron_ovs_base_configure_firewall_driver -} - -function neutron_plugin_setup_interface_driver { - local conf_file=$1 - iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver - iniset $conf_file DEFAULT ovs_use_veth True -} - -function has_neutron_plugin_security_group { - # 0 means True here - return 0 -} - -function neutron_plugin_check_adv_test_requirements { - is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 -} - -# Restore xtrace -$RYU_XTRACE diff --git a/lib/neutron_plugins/services/firewall b/lib/neutron_plugins/services/firewall deleted file mode 100644 index b5253dbeef..0000000000 --- a/lib/neutron_plugins/services/firewall +++ /dev/null @@ -1,27 +0,0 @@ -# Neutron firewall plugin -# --------------------------- - -# Save trace setting -FW_XTRACE=$(set +o | grep xtrace) -set +o xtrace - -FWAAS_PLUGIN=neutron.services.firewall.fwaas_plugin.FirewallPlugin - -function neutron_fwaas_configure_common { - _neutron_service_plugin_class_add $FWAAS_PLUGIN -} - -function neutron_fwaas_configure_driver { - FWAAS_DRIVER_CONF_FILENAME=/etc/neutron/fwaas_driver.ini - cp $NEUTRON_DIR/etc/fwaas_driver.ini $FWAAS_DRIVER_CONF_FILENAME - - iniset_multiline $FWAAS_DRIVER_CONF_FILENAME fwaas enabled True - iniset_multiline $FWAAS_DRIVER_CONF_FILENAME fwaas driver "neutron.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver" -} - -function neutron_fwaas_stop { - : -} - -# Restore xtrace -$FW_XTRACE diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 new file mode 100644 index 0000000000..bbedc57a44 --- /dev/null +++ b/lib/neutron_plugins/services/l3 @@ -0,0 +1,436 @@ +#!/bin/bash +# Subnet IP version +IP_VERSION=${IP_VERSION:-"4+6"} +# Validate IP_VERSION +if [[ $IP_VERSION != "4" ]] && [[ $IP_VERSION != "6" ]] && [[ $IP_VERSION != "4+6" ]]; then + die $LINENO "IP_VERSION must be either 4, 6, or 4+6" +fi +# Specify if the initial private and external networks should be created +NEUTRON_CREATE_INITIAL_NETWORKS=${NEUTRON_CREATE_INITIAL_NETWORKS:-True} + +## Provider Network Information +PROVIDER_SUBNET_NAME=${PROVIDER_SUBNET_NAME:-"provider_net"} +IPV6_PROVIDER_SUBNET_NAME=${IPV6_PROVIDER_SUBNET_NAME:-"provider_net_v6"} +IPV6_PROVIDER_FIXED_RANGE=${IPV6_PROVIDER_FIXED_RANGE:-} +IPV6_PROVIDER_NETWORK_GATEWAY=${IPV6_PROVIDER_NETWORK_GATEWAY:-} + +PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} +PUBLIC_BRIDGE_MTU=${PUBLIC_BRIDGE_MTU:-1500} + +# If Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE=True, assign the gateway IP of the public +# subnet to the public bridge interface even if Q_USE_PROVIDERNET_FOR_PUBLIC is +# used. +Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE=${Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE:-True} + +# The name of the default router +Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1} + +# If Q_USE_PUBLIC_VETH=True, create and use a veth pair instead of +# PUBLIC_BRIDGE. This is intended to be used with +# Q_USE_PROVIDERNET_FOR_PUBLIC=True. +Q_USE_PUBLIC_VETH=${Q_USE_PUBLIC_VETH:-False} +Q_PUBLIC_VETH_EX=${Q_PUBLIC_VETH_EX:-veth-pub-ex} +Q_PUBLIC_VETH_INT=${Q_PUBLIC_VETH_INT:-veth-pub-int} + +# The next variable is configured by plugin +# e.g. _configure_neutron_l3_agent or lib/neutron_plugins/* +# +# L3 routers exist per tenant +Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-True} + + +# Use providernet for public network +# +# If Q_USE_PROVIDERNET_FOR_PUBLIC=True, use a provider network +# for external interface of neutron l3-agent. In that case, +# PUBLIC_PHYSICAL_NETWORK specifies provider:physical_network value +# used for the network. In case of ofagent, you should add the +# corresponding entry to your OFAGENT_PHYSICAL_INTERFACE_MAPPINGS. +# For openvswitch agent, you should add the corresponding entry to +# your OVS_BRIDGE_MAPPINGS and for OVN add the corresponding entry +# to your OVN_BRIDGE_MAPPINGS. +# +# eg. (ofagent) +# Q_USE_PROVIDERNET_FOR_PUBLIC=True +# Q_USE_PUBLIC_VETH=True +# PUBLIC_PHYSICAL_NETWORK=public +# OFAGENT_PHYSICAL_INTERFACE_MAPPINGS=public:veth-pub-int +# +# eg. (openvswitch agent) +# Q_USE_PROVIDERNET_FOR_PUBLIC=True +# PUBLIC_PHYSICAL_NETWORK=public +# OVS_BRIDGE_MAPPINGS=public:br-ex +# +# eg. (ovn agent) +# Q_USER_PROVIDERNET_FOR_PUBLIC=True +# PUBLIC_PHYSICAL_NETWORK=public +# OVN_BRIDGE_MAPPINGS=public:br-ex +# +# The provider-network-type defaults to flat, however, the values +# PUBLIC_PROVIDERNET_TYPE and PUBLIC_PROVIDERNET_SEGMENTATION_ID could +# be set to specify the parameters for an alternate network type. +Q_USE_PROVIDERNET_FOR_PUBLIC=${Q_USE_PROVIDERNET_FOR_PUBLIC:-True} +PUBLIC_PHYSICAL_NETWORK=${PUBLIC_PHYSICAL_NETWORK:-public} + +# Generate 40-bit IPv6 Global ID to comply with RFC 4193 +IPV6_GLOBAL_ID=`uuidgen | sed s/-//g | cut -c 23- | sed -e "s/\(..\)\(....\)\(....\)/\1:\2:\3/"` + +# IPv6 gateway and subnet defaults, in case they are not customized in localrc +IPV6_RA_MODE=${IPV6_RA_MODE:-slaac} +IPV6_ADDRESS_MODE=${IPV6_ADDRESS_MODE:-slaac} +IPV6_PUBLIC_SUBNET_NAME=${IPV6_PUBLIC_SUBNET_NAME:-ipv6-public-subnet} +IPV6_PRIVATE_SUBNET_NAME=${IPV6_PRIVATE_SUBNET_NAME:-ipv6-private-subnet} +IPV6_ADDRS_SAFE_TO_USE=${IPV6_ADDRS_SAFE_TO_USE:-fd$IPV6_GLOBAL_ID::/56} +# if we got larger than a /64 safe to use, we only use the first /64 to +# avoid side effects outlined in rfc7421 +FIXED_RANGE_V6=${FIXED_RANGE_V6:-$(echo $IPV6_ADDRS_SAFE_TO_USE | awk -F '/' '{ print $1"/"($2>63 ? $2 : 64) }')} +IPV6_PRIVATE_NETWORK_GATEWAY=${IPV6_PRIVATE_NETWORK_GATEWAY:-} +IPV6_PUBLIC_RANGE=${IPV6_PUBLIC_RANGE:-2001:db8::/64} +IPV6_PUBLIC_NETWORK_GATEWAY=${IPV6_PUBLIC_NETWORK_GATEWAY:-2001:db8::2} +IPV6_ROUTER_GW_IP=${IPV6_ROUTER_GW_IP:-2001:db8::1} + +# Gateway and subnet defaults, in case they are not customized in localrc +NETWORK_GATEWAY=${NETWORK_GATEWAY:-} +PUBLIC_NETWORK_GATEWAY=${PUBLIC_NETWORK_GATEWAY:-} +PRIVATE_SUBNET_NAME=${PRIVATE_SUBNET_NAME:-"private-subnet"} +PUBLIC_SUBNET_NAME=${PUBLIC_SUBNET_NAME:-"public-subnet"} + +# Subnetpool defaults +USE_SUBNETPOOL=${USE_SUBNETPOOL:-True} +SUBNETPOOL_NAME_V4=${SUBNETPOOL_NAME:-"shared-default-subnetpool-v4"} +SUBNETPOOL_NAME_V6=${SUBNETPOOL_NAME:-"shared-default-subnetpool-v6"} + +SUBNETPOOL_PREFIX_V4=${SUBNETPOOL_PREFIX_V4:-$IPV4_ADDRS_SAFE_TO_USE} +SUBNETPOOL_PREFIX_V6=${SUBNETPOOL_PREFIX_V6:-$IPV6_ADDRS_SAFE_TO_USE} + +SUBNETPOOL_SIZE_V4=${SUBNETPOOL_SIZE_V4:-26} +SUBNETPOOL_SIZE_V6=${SUBNETPOOL_SIZE_V6:-64} + +default_v4_route_devs=$(ip -4 route | grep ^default | awk '{print $5}') + +default_v6_route_devs=$(ip -6 route list match default table all | grep via | awk '{print $5}') + +function _determine_config_l3 { + local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE" + echo "$opts" +} + +function _configure_neutron_l3_agent { + + cp $NEUTRON_DIR/etc/l3_agent.ini.sample $Q_L3_CONF_FILE + + iniset $Q_L3_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $Q_L3_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + iniset $Q_L3_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND" + fi + + _neutron_setup_interface_driver $Q_L3_CONF_FILE + + neutron_plugin_configure_l3_agent $Q_L3_CONF_FILE + + _configure_public_network_connectivity +} + +# Explicitly set router id in l3 agent configuration +function _neutron_set_router_id { + if [[ "$Q_L3_ROUTER_PER_TENANT" == "False" ]]; then + iniset $Q_L3_CONF_FILE DEFAULT router_id $ROUTER_ID + fi +} + +# Get ext_gw_interface depending on value of Q_USE_PUBLIC_VETH +function _neutron_get_ext_gw_interface { + if [[ "$Q_USE_PUBLIC_VETH" == "True" ]]; then + echo $Q_PUBLIC_VETH_EX + else + # Disable in-band as we are going to use local port + # to communicate with VMs + sudo ovs-vsctl set Bridge $PUBLIC_BRIDGE \ + other_config:disable-in-band=true + echo $PUBLIC_BRIDGE + fi +} + +function create_neutron_initial_network { + # Allow drivers that need to create an initial network to do so here + if type -p neutron_plugin_create_initial_network_profile > /dev/null; then + neutron_plugin_create_initial_network_profile $PHYSICAL_NETWORK + fi + + if is_networking_extension_supported "auto-allocated-topology"; then + if [[ "$USE_SUBNETPOOL" == "True" ]]; then + if [[ "$IP_VERSION" =~ 4.* ]]; then + SUBNETPOOL_V4_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME_V4 --default-prefix-length $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --share --default -f value -c id) + fi + if [[ "$IP_VERSION" =~ .*6 ]]; then + SUBNETPOOL_V6_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME_V6 --default-prefix-length $SUBNETPOOL_SIZE_V6 --pool-prefix $SUBNETPOOL_PREFIX_V6 --share --default -f value -c id) + fi + fi + fi + + if is_provider_network; then + die_if_not_set $LINENO PHYSICAL_NETWORK "You must specify the PHYSICAL_NETWORK" + die_if_not_set $LINENO PROVIDER_NETWORK_TYPE "You must specify the PROVIDER_NETWORK_TYPE" + NET_ID=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share -f value -c id) + die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK" + + if [[ "$IP_VERSION" =~ 4.* ]]; then + if [ -z $SUBNETPOOL_V4_ID ]; then + fixed_range_v4=$FIXED_RANGE + fi + SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} -f value -c id) + die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME" + fi + + if [[ "$IP_VERSION" =~ .*6 ]]; then + die_if_not_set $LINENO IPV6_PROVIDER_FIXED_RANGE "IPV6_PROVIDER_FIXED_RANGE has not been set, but Q_USE_PROVIDER_NETWORKING is true and IP_VERSION includes 6" + die_if_not_set $LINENO IPV6_PROVIDER_NETWORK_GATEWAY "IPV6_PROVIDER_NETWORK_GATEWAY has not been set, but Q_USE_PROVIDER_NETWORKING is true and IP_VERSION includes 6" + if [ -z $SUBNETPOOL_V6_ID ]; then + fixed_range_v6=$IPV6_PROVIDER_FIXED_RANGE + fi + IPV6_SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} -f value -c id) + die_if_not_set $LINENO IPV6_SUBNET_ID "Failure creating IPV6_SUBNET_ID for $IPV6_PROVIDER_SUBNET_NAME" + fi + + if [[ $Q_AGENT == "openvswitch" ]]; then + sudo ip link set $OVS_PHYSICAL_BRIDGE up + sudo ip link set br-int up + sudo ip link set $PUBLIC_INTERFACE up + fi + else + NET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" network create "$PRIVATE_NETWORK_NAME" -f value -c id) + die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PRIVATE_NETWORK_NAME" + + if [[ "$IP_VERSION" =~ 4.* ]]; then + # Create IPv4 private subnet + SUBNET_ID=$(_neutron_create_private_subnet_v4) + fi + + if [[ "$IP_VERSION" =~ .*6 ]]; then + # Create IPv6 private subnet + IPV6_SUBNET_ID=$(_neutron_create_private_subnet_v6) + fi + fi + + if is_networking_extension_supported "router" && is_networking_extension_supported "external-net"; then + # Create a router, and add the private subnet as one of its interfaces + if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then + # create a tenant-owned router. + ROUTER_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" router create $Q_ROUTER_NAME -f value -c id) + die_if_not_set $LINENO ROUTER_ID "Failure creating router $Q_ROUTER_NAME" + else + # Plugin only supports creating a single router, which should be admin owned. + ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create $Q_ROUTER_NAME -f value -c id) + die_if_not_set $LINENO ROUTER_ID "Failure creating router $Q_ROUTER_NAME" + fi + + EXTERNAL_NETWORK_FLAGS="--external" + if is_networking_extension_supported "auto-allocated-topology"; then + EXTERNAL_NETWORK_FLAGS="$EXTERNAL_NETWORK_FLAGS --default" + fi + # Create an external network, and a subnet. Configure the external network as router gw + if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then + EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type ${PUBLIC_PROVIDERNET_TYPE:-flat} ${PUBLIC_PROVIDERNET_SEGMENTATION_ID:+--provider-segment $PUBLIC_PROVIDERNET_SEGMENTATION_ID} --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} -f value -c id) + else + EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS -f value -c id) + fi + die_if_not_set $LINENO EXT_NET_ID "Failure creating EXT_NET_ID for $PUBLIC_NETWORK_NAME" + + if [[ "$IP_VERSION" =~ 4.* ]]; then + # Configure router for IPv4 public access + _neutron_configure_router_v4 + fi + + if [[ "$IP_VERSION" =~ .*6 ]]; then + # Configure router for IPv6 public access + _neutron_configure_router_v6 + fi + fi +} + +# Create private IPv4 subnet +function _neutron_create_private_subnet_v4 { + if [ -z $SUBNETPOOL_V4_ID ]; then + fixed_range_v4=$FIXED_RANGE + fi + local subnet_params="--ip-version 4 " + if [[ -n "$NETWORK_GATEWAY" ]]; then + subnet_params+="--gateway $NETWORK_GATEWAY " + fi + + subnet_params+="${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} " + subnet_params+="${fixed_range_v4:+--subnet-range $fixed_range_v4} " + subnet_params+="--network $NET_ID $PRIVATE_SUBNET_NAME" + local subnet_id + subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params -f value -c id) + die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet" + echo $subnet_id +} + +# Create private IPv6 subnet +function _neutron_create_private_subnet_v6 { + die_if_not_set $LINENO IPV6_RA_MODE "IPV6 RA Mode not set" + die_if_not_set $LINENO IPV6_ADDRESS_MODE "IPV6 Address Mode not set" + local ipv6_modes="--ipv6-ra-mode $IPV6_RA_MODE --ipv6-address-mode $IPV6_ADDRESS_MODE" + if [ -z $SUBNETPOOL_V6_ID ]; then + fixed_range_v6=$FIXED_RANGE_V6 + fi + local subnet_params="--ip-version 6 " + if [[ -n "$IPV6_PRIVATE_NETWORK_GATEWAY" ]]; then + subnet_params+="--gateway $IPV6_PRIVATE_NETWORK_GATEWAY " + fi + subnet_params+="${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} " + subnet_params+="${fixed_range_v6:+--subnet-range $fixed_range_v6} " + subnet_params+="$ipv6_modes --network $NET_ID $IPV6_PRIVATE_SUBNET_NAME " + local ipv6_subnet_id + ipv6_subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params -f value -c id) + die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet" + echo $ipv6_subnet_id +} + +# Create public IPv4 subnet +function _neutron_create_public_subnet_v4 { + local subnet_params="--ip-version 4 " + subnet_params+="${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} " + if [[ -n "$PUBLIC_NETWORK_GATEWAY" ]]; then + subnet_params+="--gateway $PUBLIC_NETWORK_GATEWAY " + fi + subnet_params+="--network $EXT_NET_ID --subnet-range $FLOATING_RANGE --no-dhcp " + subnet_params+="$PUBLIC_SUBNET_NAME" + local id_and_ext_gw_ip + id_and_ext_gw_ip=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ') + die_if_not_set $LINENO id_and_ext_gw_ip "Failure creating public IPv4 subnet" + echo $id_and_ext_gw_ip +} + +# Create public IPv6 subnet +function _neutron_create_public_subnet_v6 { + local subnet_params="--ip-version 6 " + subnet_params+="--gateway $IPV6_PUBLIC_NETWORK_GATEWAY " + subnet_params+="--network $EXT_NET_ID --subnet-range $IPV6_PUBLIC_RANGE --no-dhcp " + subnet_params+="$IPV6_PUBLIC_SUBNET_NAME" + local ipv6_id_and_ext_gw_ip + ipv6_id_and_ext_gw_ip=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ') + die_if_not_set $LINENO ipv6_id_and_ext_gw_ip "Failure creating an IPv6 public subnet" + echo $ipv6_id_and_ext_gw_ip +} + +# Configure neutron router for IPv4 public access +function _neutron_configure_router_v4 { + openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router add subnet $ROUTER_ID $SUBNET_ID + # Create a public subnet on the external network + local id_and_ext_gw_ip + id_and_ext_gw_ip=$(_neutron_create_public_subnet_v4 $EXT_NET_ID) + local ext_gw_ip + ext_gw_ip=$(echo $id_and_ext_gw_ip | get_field 2) + PUB_SUBNET_ID=$(echo $id_and_ext_gw_ip | get_field 5) + # Configure the external network as the default router gateway + openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID + + # This logic is specific to using OVN or the l3-agent for layer 3 + if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-api) || is_service_enabled q-l3 neutron-l3; then + # Configure and enable public bridge + local ext_gw_interface="none" + if is_neutron_ovs_base_plugin; then + ext_gw_interface=$(_neutron_get_ext_gw_interface) + fi + if [[ "$ext_gw_interface" != "none" ]]; then + local cidr_len=${FLOATING_RANGE#*/} + local testcmd="ip -o link | grep -q $ext_gw_interface" + test_with_retry "$testcmd" "$ext_gw_interface creation failed" + if [[ $(ip addr show dev $ext_gw_interface | grep -c $ext_gw_ip) == 0 && ( $Q_USE_PROVIDERNET_FOR_PUBLIC == "False" || $Q_USE_PUBLIC_VETH == "True" || $Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE == "True" ) ]]; then + sudo ip addr add $ext_gw_ip/$cidr_len dev $ext_gw_interface + sudo ip link set $ext_gw_interface up + fi + ROUTER_GW_IP=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' --device-owner network:router_gateway | awk -F'ip_address' '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ') + die_if_not_set $LINENO ROUTER_GW_IP "Failure retrieving ROUTER_GW_IP" + fi + _neutron_set_router_id + fi +} + +# Configure neutron router for IPv6 public access +function _neutron_configure_router_v6 { + openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router add subnet $ROUTER_ID $IPV6_SUBNET_ID + # Create a public subnet on the external network + local ipv6_id_and_ext_gw_ip + ipv6_id_and_ext_gw_ip=$(_neutron_create_public_subnet_v6 $EXT_NET_ID) + local ipv6_ext_gw_ip + ipv6_ext_gw_ip=$(echo $ipv6_id_and_ext_gw_ip | get_field 2) + local ipv6_pub_subnet_id + ipv6_pub_subnet_id=$(echo $ipv6_id_and_ext_gw_ip | get_field 5) + + # If the external network has not already been set as the default router + # gateway when configuring an IPv4 public subnet, do so now + if [[ "$IP_VERSION" == "6" ]]; then + openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID + fi + + # This logic is specific to using OVN or the l3-agent for layer 3 + if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-api) || is_service_enabled q-l3 neutron-l3; then + # if the Linux host considers itself to be a router then it will + # ignore all router advertisements + # Ensure IPv6 RAs are accepted on interfaces with a default route. + # This is needed for neutron-based devstack clouds to work in + # IPv6-only clouds in the gate. Please do not remove this without + # talking to folks in Infra. + for d in $default_v6_route_devs; do + # Slashes must be used in this sysctl command because route devices + # can have dots in their names. If dots were used, dots in the + # device name would be reinterpreted as a slash, causing an error. + sudo sysctl -w net/ipv6/conf/$d/accept_ra=2 + done + # Ensure IPv6 forwarding is enabled on the host + sudo sysctl -w net.ipv6.conf.all.forwarding=1 + # Configure and enable public bridge + # Override global IPV6_ROUTER_GW_IP with the true value from neutron + # NOTE(slaweq): when enforce scopes is enabled in Neutron, router's + # gateway ports aren't visible in API because such ports don't belongs + # to any tenant. Because of that, at least temporary we need to find + # IPv6 address of the router's gateway in a bit different way. + # It can be reverted when bug + # https://bugs.launchpad.net/neutron/+bug/1959332 will be fixed + IPV6_ROUTER_GW_IP=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router show $ROUTER_ID -c external_gateway_info -f json | grep -C 1 $ipv6_pub_subnet_id | grep ip_address | awk '{print $2}' | tr -d '"') + die_if_not_set $LINENO IPV6_ROUTER_GW_IP "Failure retrieving IPV6_ROUTER_GW_IP" + + if is_neutron_ovs_base_plugin; then + local ext_gw_interface + ext_gw_interface=$(_neutron_get_ext_gw_interface) + local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/} + + # Configure interface for public bridge by setting the interface + # to "up" in case the job is running entirely private network based + # testing. + sudo ip link set $ext_gw_interface up + sudo ip -6 addr replace $ipv6_ext_gw_ip/$ipv6_cidr_len dev $ext_gw_interface + # Any IPv6 private subnet that uses the default IPV6 subnet pool + # and that is plugged into the default router (Q_ROUTER_NAME) will + # be reachable from the devstack node (ex: ipv6-private-subnet). + # Some scenario tests (such as octavia-tempest-plugin) rely heavily + # on this feature. + local replace_range=${SUBNETPOOL_PREFIX_V6} + if [[ -z "${SUBNETPOOL_V6_ID}" ]]; then + replace_range=${FIXED_RANGE_V6} + fi + sudo ip -6 route replace $replace_range via $IPV6_ROUTER_GW_IP dev $ext_gw_interface + fi + _neutron_set_router_id + fi +} + +function is_networking_extension_supported { + local extension=$1 + # TODO(sc68cal) cache this instead of calling every time + EXT_LIST=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" extension list --network -c Alias -f value) + [[ $EXT_LIST =~ $extension ]] && return 0 +} + +function plugin_agent_add_l3_agent_extension { + local l3_agent_extension=$1 + if [[ -z "$L3_AGENT_EXTENSIONS" ]]; then + L3_AGENT_EXTENSIONS=$l3_agent_extension + elif [[ ! ,${L3_AGENT_EXTENSIONS}, =~ ,${l3_agent_extension}, ]]; then + L3_AGENT_EXTENSIONS+=",$l3_agent_extension" + fi +} diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer deleted file mode 100644 index 78e7738345..0000000000 --- a/lib/neutron_plugins/services/loadbalancer +++ /dev/null @@ -1,51 +0,0 @@ -# Neutron loadbalancer plugin -# --------------------------- - -# Save trace setting -LB_XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -AGENT_LBAAS_BINARY="$NEUTRON_BIN_DIR/neutron-lbaas-agent" -LBAAS_PLUGIN=neutron.services.loadbalancer.plugin.LoadBalancerPlugin - -function neutron_agent_lbaas_install_agent_packages { - if is_ubuntu || is_fedora; then - install_package haproxy - elif is_suse; then - ### FIXME: Find out if package can be pushed to Factory - echo "HAProxy packages can be installed from server:http project in OBS" - fi -} - -function neutron_agent_lbaas_configure_common { - _neutron_service_plugin_class_add $LBAAS_PLUGIN -} - -function neutron_agent_lbaas_configure_agent { - LBAAS_AGENT_CONF_PATH=/etc/neutron/services/loadbalancer/haproxy - mkdir -p $LBAAS_AGENT_CONF_PATH - - LBAAS_AGENT_CONF_FILENAME="$LBAAS_AGENT_CONF_PATH/lbaas_agent.ini" - - cp $NEUTRON_DIR/etc/lbaas_agent.ini $LBAAS_AGENT_CONF_FILENAME - - # ovs_use_veth needs to be set before the plugin configuration - # occurs to allow plugins to override the setting. - iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT ovs_use_veth $Q_OVS_USE_VETH - - neutron_plugin_setup_interface_driver $LBAAS_AGENT_CONF_FILENAME - - if is_fedora; then - iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT user_group "nobody" - iniset $LBAAS_AGENT_CONF_FILENAME haproxy user_group "nobody" - fi -} - -function neutron_lbaas_stop { - pids=$(ps aux | awk '/haproxy/ { print $2 }') - [ ! -z "$pids" ] && sudo kill $pids -} - -# Restore xtrace -$LB_XTRACE diff --git a/lib/neutron_plugins/services/metering b/lib/neutron_plugins/services/metering index 51123e2ff8..757a562ee6 100644 --- a/lib/neutron_plugins/services/metering +++ b/lib/neutron_plugins/services/metering @@ -1,8 +1,10 @@ +#!/bin/bash + # Neutron metering plugin # --------------------------- # Save trace setting -METER_XTRACE=$(set +o | grep xtrace) +_XTRACE_NETURON_METER=$(set +o | grep xtrace) set +o xtrace @@ -10,7 +12,7 @@ AGENT_METERING_BINARY="$NEUTRON_BIN_DIR/neutron-metering-agent" METERING_PLUGIN="neutron.services.metering.metering_plugin.MeteringPlugin" function neutron_agent_metering_configure_common { - _neutron_service_plugin_class_add $METERING_PLUGIN + neutron_service_plugin_class_add $METERING_PLUGIN } function neutron_agent_metering_configure_agent { @@ -19,12 +21,13 @@ function neutron_agent_metering_configure_agent { METERING_AGENT_CONF_FILENAME="$METERING_AGENT_CONF_PATH/metering_agent.ini" - cp $NEUTRON_DIR/etc/metering_agent.ini $METERING_AGENT_CONF_FILENAME + cp $NEUTRON_DIR/etc/metering_agent.ini.sample $METERING_AGENT_CONF_FILENAME } function neutron_metering_stop { - : + stop_process q-metering } # Restore xtrace -$METER_XTRACE +$_XTRACE_NETURON_METER + diff --git a/lib/neutron_plugins/services/placement b/lib/neutron_plugins/services/placement new file mode 100644 index 0000000000..3ec185bae6 --- /dev/null +++ b/lib/neutron_plugins/services/placement @@ -0,0 +1,21 @@ +#!/bin/bash + +function configure_placement_service_plugin { + neutron_service_plugin_class_add "placement" +} + +function configure_placement_neutron { + iniset $NEUTRON_CONF placement auth_type "$NEUTRON_PLACEMENT_AUTH_TYPE" + iniset $NEUTRON_CONF placement auth_url "$KEYSTONE_SERVICE_URI" + iniset $NEUTRON_CONF placement username "$NEUTRON_PLACEMENT_USERNAME" + iniset $NEUTRON_CONF placement password "$SERVICE_PASSWORD" + iniset $NEUTRON_CONF placement user_domain_name "$SERVICE_DOMAIN_NAME" + iniset $NEUTRON_CONF placement project_name "$SERVICE_TENANT_NAME" + iniset $NEUTRON_CONF placement project_domain_name "$SERVICE_DOMAIN_NAME" + iniset $NEUTRON_CONF placement region_name "$REGION_NAME" +} + +function configure_placement_extension { + configure_placement_service_plugin + configure_placement_neutron +} diff --git a/lib/neutron_plugins/services/qos b/lib/neutron_plugins/services/qos new file mode 100644 index 0000000000..c11c315586 --- /dev/null +++ b/lib/neutron_plugins/services/qos @@ -0,0 +1,30 @@ +#!/bin/bash + +function configure_qos_service_plugin { + neutron_service_plugin_class_add "qos" +} + + +function configure_qos_core_plugin { + configure_qos_$Q_PLUGIN +} + + +function configure_qos_l2_agent { + plugin_agent_add_l2_agent_extension "qos" +} + + +function configure_qos { + configure_qos_service_plugin + configure_qos_core_plugin + configure_qos_l2_agent +} + +function configure_l3_agent_extension_fip_qos { + plugin_agent_add_l3_agent_extension "fip_qos" +} + +function configure_l3_agent_extension_gateway_ip_qos { + plugin_agent_add_l3_agent_extension "gateway_ip_qos" +} diff --git a/lib/neutron_plugins/services/segments b/lib/neutron_plugins/services/segments new file mode 100644 index 0000000000..08936bae49 --- /dev/null +++ b/lib/neutron_plugins/services/segments @@ -0,0 +1,10 @@ +#!/bin/bash + +function configure_segments_service_plugin { + neutron_service_plugin_class_add segments +} + +function configure_segments_extension { + configure_segments_service_plugin +} + diff --git a/lib/neutron_plugins/services/trunk b/lib/neutron_plugins/services/trunk new file mode 100644 index 0000000000..8e0f6944cf --- /dev/null +++ b/lib/neutron_plugins/services/trunk @@ -0,0 +1,5 @@ +#!/bin/bash + +function configure_trunk_extension { + neutron_service_plugin_class_add "trunk" +} diff --git a/lib/neutron_plugins/services/vpn b/lib/neutron_plugins/services/vpn deleted file mode 100644 index d920ba621f..0000000000 --- a/lib/neutron_plugins/services/vpn +++ /dev/null @@ -1,33 +0,0 @@ -# Neutron VPN plugin -# --------------------------- - -# Save trace setting -VPN_XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -AGENT_VPN_BINARY="$NEUTRON_BIN_DIR/neutron-vpn-agent" -VPN_PLUGIN="neutron.services.vpn.plugin.VPNDriverPlugin" -IPSEC_PACKAGE=${IPSEC_PACKAGE:-"openswan"} - -function neutron_vpn_install_agent_packages { - install_package $IPSEC_PACKAGE -} - -function neutron_vpn_configure_common { - _neutron_service_plugin_class_add $VPN_PLUGIN -} - -function neutron_vpn_stop { - local ipsec_data_dir=$DATA_DIR/neutron/ipsec - local pids - if [ -d $ipsec_data_dir ]; then - pids=$(find $ipsec_data_dir -name 'pluto.pid' -exec cat {} \;) - fi - if [ -n "$pids" ]; then - sudo kill $pids - fi -} - -# Restore xtrace -$VPN_XTRACE diff --git a/lib/neutron_plugins/vmware_nsx b/lib/neutron_plugins/vmware_nsx deleted file mode 100644 index f2f87355ef..0000000000 --- a/lib/neutron_plugins/vmware_nsx +++ /dev/null @@ -1,149 +0,0 @@ -# Neutron VMware NSX plugin -# ------------------------- - -# Save trace setting -NSX_XTRACE=$(set +o | grep xtrace) -set +o xtrace - -source $TOP_DIR/lib/neutron_plugins/ovs_base - -function setup_integration_bridge { - _neutron_ovs_base_setup_bridge $OVS_BRIDGE - # Set manager to NSX controller (1st of list) - if [[ "$NSX_CONTROLLERS" != "" ]]; then - # Get the first controller - controllers=(${NSX_CONTROLLERS//,/ }) - OVS_MGR_IP=${controllers[0]} - else - die $LINENO "Error - No controller specified. Unable to set a manager for OVS" - fi - sudo ovs-vsctl set-manager ssl:$OVS_MGR_IP -} - -function is_neutron_ovs_base_plugin { - # NSX uses OVS, but not the l3-agent - return 0 -} - -function neutron_plugin_create_nova_conf { - # if n-cpu is enabled, then setup integration bridge - if is_service_enabled n-cpu; then - setup_integration_bridge - fi -} - -function neutron_plugin_install_agent_packages { - # VMware NSX Plugin does not run q-agt, but it currently needs dhcp and metadata agents - _neutron_ovs_base_install_agent_packages -} - -function neutron_plugin_configure_common { - Q_PLUGIN_CONF_PATH=etc/neutron/plugins/vmware - Q_PLUGIN_CONF_FILENAME=nsx.ini - Q_DB_NAME="neutron_nsx" - Q_PLUGIN_CLASS="neutron.plugins.vmware.plugin.NsxPlugin" -} - -function neutron_plugin_configure_debug_command { - sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE - iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge "$PUBLIC_BRIDGE" -} - -function neutron_plugin_configure_dhcp_agent { - setup_integration_bridge - iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata True - iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network True - iniset $Q_DHCP_CONF_FILE DEFAULT ovs_use_veth True -} - -function neutron_plugin_configure_l3_agent { - # VMware NSX plugin does not run L3 agent - die $LINENO "q-l3 should must not be executed with VMware NSX plugin!" -} - -function neutron_plugin_configure_plugin_agent { - # VMware NSX plugin does not run L2 agent - die $LINENO "q-agt must not be executed with VMware NSX plugin!" -} - -function neutron_plugin_configure_service { - if [[ "$MAX_LP_PER_BRIDGED_LS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE nsx max_lp_per_bridged_ls $MAX_LP_PER_BRIDGED_LS - fi - if [[ "$MAX_LP_PER_OVERLAY_LS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE nsx max_lp_per_overlay_ls $MAX_LP_PER_OVERLAY_LS - fi - if [[ "$FAILOVER_TIME" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE nsx failover_time $FAILOVER_TIME - fi - if [[ "$CONCURRENT_CONNECTIONS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE nsx concurrent_connections $CONCURRENT_CONNECTIONS - fi - - if [[ "$DEFAULT_TZ_UUID" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_tz_uuid $DEFAULT_TZ_UUID - else - die $LINENO "The VMware NSX plugin won't work without a default transport zone." - fi - if [[ "$DEFAULT_L3_GW_SVC_UUID" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l3_gw_service_uuid $DEFAULT_L3_GW_SVC_UUID - Q_L3_ENABLED=True - Q_L3_ROUTER_PER_TENANT=True - iniset /$Q_PLUGIN_CONF_FILE nsx metadata_mode access_network - fi - if [[ "$DEFAULT_L2_GW_SVC_UUID" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l2_gw_service_uuid $DEFAULT_L2_GW_SVC_UUID - fi - # NSX_CONTROLLERS must be a comma separated string - if [[ "$NSX_CONTROLLERS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_controllers $NSX_CONTROLLERS - else - die $LINENO "The VMware NSX plugin needs at least an NSX controller." - fi - if [[ "$NSX_USER" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_user $NSX_USER - fi - if [[ "$NSX_PASSWORD" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_password $NSX_PASSWORD - fi - if [[ "$NSX_REQ_TIMEOUT" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE DEFAULT req_timeout $NSX_REQ_TIMEOUT - fi - if [[ "$NSX_HTTP_TIMEOUT" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE DEFAULT http_timeout $NSX_HTTP_TIMEOUT - fi - if [[ "$NSX_RETRIES" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE DEFAULT retries $NSX_RETRIES - fi - if [[ "$NSX_REDIRECTS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE DEFAULT redirects $NSX_REDIRECTS - fi - if [[ "$AGENT_MODE" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE nsx agent_mode $AGENT_MODE - if [[ "$AGENT_MODE" == "agentless" ]]; then - if [[ "$DEFAULT_SERVICE_CLUSTER_UUID" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_service_cluster_uuid $DEFAULT_SERVICE_CLUSTER_UUID - else - die $LINENO "Agentless mode requires a service cluster." - fi - iniset /$Q_PLUGIN_CONF_FILE nsx_metadata metadata_server_address $Q_META_DATA_IP - fi - fi -} - -function neutron_plugin_setup_interface_driver { - local conf_file=$1 - iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver -} - -function has_neutron_plugin_security_group { - # 0 means True here - return 0 -} - -function neutron_plugin_check_adv_test_requirements { - is_service_enabled q-dhcp && return 0 -} - -# Restore xtrace -$NSX_XTRACE diff --git a/lib/neutron_thirdparty/README.md b/lib/neutron_thirdparty/README.md deleted file mode 100644 index 2460e5cac7..0000000000 --- a/lib/neutron_thirdparty/README.md +++ /dev/null @@ -1,39 +0,0 @@ -Neutron third party specific files -================================== -Some Neutron plugins require third party programs to function. -The files under the directory, ``lib/neutron_thirdparty/``, will be used -when their service are enabled. -Third party program specific configuration variables should be in this file. - -* filename: ```` - * The corresponding file name should be same to service name, ````. - -functions ---------- -``lib/neutron`` calls the following functions when the ```` is enabled - -functions to be implemented -* ``configure_``: - set config files, create data dirs, etc - e.g. - sudo python setup.py deploy - iniset $XXXX_CONF... - -* ``init_``: - initialize databases, etc - -* ``install_``: - collect source and prepare - e.g. - git clone xxx - -* ``start_``: - start running processes, including screen - e.g. - screen_it XXXX "cd $XXXXY_DIR && $XXXX_DIR/bin/XXXX-bin" - -* ``stop_``: - stop running processes (non-screen) - -* ``check_``: - verify that the integration between neutron server and third-party components is sane diff --git a/lib/neutron_thirdparty/bigswitch_floodlight b/lib/neutron_thirdparty/bigswitch_floodlight deleted file mode 100644 index 033731e27c..0000000000 --- a/lib/neutron_thirdparty/bigswitch_floodlight +++ /dev/null @@ -1,52 +0,0 @@ -# Big Switch/FloodLight OpenFlow Controller -# ------------------------------------------ - -# Save trace setting -BS3_XTRACE=$(set +o | grep xtrace) -set +o xtrace - -BS_FL_CONTROLLERS_PORT=${BS_FL_CONTROLLERS_PORT:-localhost:80} -BS_FL_OF_PORT=${BS_FL_OF_PORT:-6633} - -function configure_bigswitch_floodlight { - : -} - -function init_bigswitch_floodlight { - install_neutron_agent_packages - - echo -n "Installing OVS managed by the openflow controllers:" - echo ${BS_FL_CONTROLLERS_PORT} - - # Create local OVS bridge and configure it - sudo ovs-vsctl --no-wait -- --if-exists del-br ${OVS_BRIDGE} - sudo ovs-vsctl --no-wait add-br ${OVS_BRIDGE} - sudo ovs-vsctl --no-wait br-set-external-id ${OVS_BRIDGE} bridge-id ${OVS_BRIDGE} - - ctrls= - for ctrl in `echo ${BS_FL_CONTROLLERS_PORT} | tr ',' ' '`; do - ctrl=${ctrl%:*} - ctrls="${ctrls} tcp:${ctrl}:${BS_FL_OF_PORT}" - done - echo "Adding Network conttrollers: " ${ctrls} - sudo ovs-vsctl --no-wait set-controller ${OVS_BRIDGE} ${ctrls} -} - -function install_bigswitch_floodlight { - : -} - -function start_bigswitch_floodlight { - : -} - -function stop_bigswitch_floodlight { - : -} - -function check_bigswitch_floodlight { - : -} - -# Restore xtrace -$BS3_XTRACE diff --git a/lib/neutron_thirdparty/midonet b/lib/neutron_thirdparty/midonet deleted file mode 100644 index 099a66eb2d..0000000000 --- a/lib/neutron_thirdparty/midonet +++ /dev/null @@ -1,49 +0,0 @@ -# MidoNet -# ------- - -# This file implements functions required to configure MidoNet as the third-party -# system used with devstack's Neutron. To include this file, specify the following -# variables in localrc: -# -# * enable_service midonet -# - -# MidoNet devstack destination dir -MIDONET_DIR=${MIDONET_DIR:-$DEST/midonet} - -# MidoNet client repo -MIDONET_CLIENT_REPO=${MIDONET_CLIENT_REPO:-https://github.com/midokura/python-midonetclient.git} -MIDONET_CLIENT_BRANCH=${MIDONET_CLIENT_BRANCH:-master} -MIDONET_CLIENT_DIR=${MIDONET_CLIENT_DIR:-$MIDONET_DIR/python-midonetclient} - -# Save trace setting -MN3_XTRACE=$(set +o | grep xtrace) -set +o xtrace - -function configure_midonet { - : -} - -function init_midonet { - : -} - -function install_midonet { - git_clone $MIDONET_CLIENT_REPO $MIDONET_CLIENT_DIR $MIDONET_CLIENT_BRANCH - export PYTHONPATH=$MIDONET_CLIENT_DIR/src:$PYTHONPATH -} - -function start_midonet { - : -} - -function stop_midonet { - : -} - -function check_midonet { - : -} - -# Restore xtrace -$MN3_XTRACE diff --git a/lib/neutron_thirdparty/ryu b/lib/neutron_thirdparty/ryu deleted file mode 100644 index bbe227eeb9..0000000000 --- a/lib/neutron_thirdparty/ryu +++ /dev/null @@ -1,78 +0,0 @@ -# Ryu OpenFlow Controller -# ----------------------- - -# Save trace setting -RYU3_XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -RYU_DIR=$DEST/ryu -# Ryu API Host -RYU_API_HOST=${RYU_API_HOST:-127.0.0.1} -# Ryu API Port -RYU_API_PORT=${RYU_API_PORT:-8080} -# Ryu OFP Host -RYU_OFP_HOST=${RYU_OFP_HOST:-127.0.0.1} -# Ryu OFP Port -RYU_OFP_PORT=${RYU_OFP_PORT:-6633} -# Ryu Applications -RYU_APPS=${RYU_APPS:-ryu.app.simple_isolation,ryu.app.rest} - -function configure_ryu { - : -} - -function init_ryu { - RYU_CONF_DIR=/etc/ryu - if [[ ! -d $RYU_CONF_DIR ]]; then - sudo mkdir -p $RYU_CONF_DIR - fi - sudo chown $STACK_USER $RYU_CONF_DIR - RYU_CONF=$RYU_CONF_DIR/ryu.conf - sudo rm -rf $RYU_CONF - - # Ryu configuration - RYU_CONF_CONTENTS=${RYU_CONF_CONTENTS:-"[DEFAULT] -app_lists=$RYU_APPS -wsapi_host=$RYU_API_HOST -wsapi_port=$RYU_API_PORT -ofp_listen_host=$RYU_OFP_HOST -ofp_tcp_listen_port=$RYU_OFP_PORT -neutron_url=http://$Q_HOST:$Q_PORT -neutron_admin_username=$Q_ADMIN_USERNAME -neutron_admin_password=$SERVICE_PASSWORD -neutron_admin_tenant_name=$SERVICE_TENANT_NAME -neutron_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0 -neutron_auth_strategy=$Q_AUTH_STRATEGY -neutron_controller_addr=tcp:$RYU_OFP_HOST:$RYU_OFP_PORT -"} - echo "${RYU_CONF_CONTENTS}" > $RYU_CONF -} - -# install_ryu can be called multiple times as neutron_pluing/ryu may call -# this function for neutron-ryu-agent -# Make this function idempotent and avoid cloning same repo many times -# with RECLONE=yes -_RYU_INSTALLED=${_RYU_INSTALLED:-False} -function install_ryu { - if [[ "$_RYU_INSTALLED" == "False" ]]; then - git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH - export PYTHONPATH=$RYU_DIR:$PYTHONPATH - _RYU_INSTALLED=True - fi -} - -function start_ryu { - screen_it ryu "cd $RYU_DIR && $RYU_DIR/bin/ryu-manager --config-file $RYU_CONF" -} - -function stop_ryu { - : -} - -function check_ryu { - : -} - -# Restore xtrace -$RYU3_XTRACE diff --git a/lib/neutron_thirdparty/trema b/lib/neutron_thirdparty/trema deleted file mode 100644 index f829aa82ff..0000000000 --- a/lib/neutron_thirdparty/trema +++ /dev/null @@ -1,117 +0,0 @@ -# Trema Sliceable Switch -# ---------------------- - -# Trema is a Full-Stack OpenFlow Framework in Ruby and C -# https://github.com/trema/trema -# -# Trema Sliceable Switch is an OpenFlow controller which provides -# virtual layer-2 network slices. -# https://github.com/trema/apps/wiki - -# Trema Sliceable Switch (OpenFlow Controller) -TREMA_APPS_REPO=${TREMA_APPS_REPO:-https://github.com/trema/apps.git} -TREMA_APPS_BRANCH=${TREMA_APPS_BRANCH:-master} - -# Save trace setting -TREMA3_XTRACE=$(set +o | grep xtrace) -set +o xtrace - -TREMA_DIR=${TREMA_DIR:-$DEST/trema} -TREMA_SS_DIR="$TREMA_DIR/apps/sliceable_switch" - -TREMA_DATA_DIR=${TREMA_DATA_DIR:-$DATA_DIR/trema} -TREMA_SS_ETC_DIR=$TREMA_DATA_DIR/sliceable_switch/etc -TREMA_SS_DB_DIR=$TREMA_DATA_DIR/sliceable_switch/db -TREMA_SS_SCRIPT_DIR=$TREMA_DATA_DIR/sliceable_switch/script -TREMA_TMP_DIR=$TREMA_DATA_DIR/trema - -TREMA_LOG_LEVEL=${TREMA_LOG_LEVEL:-info} - -TREMA_SS_CONFIG=$TREMA_SS_ETC_DIR/sliceable.conf -TREMA_SS_APACHE_CONFIG=/etc/apache2/sites-available/sliceable_switch.conf - -# configure_trema - Set config files, create data dirs, etc -function configure_trema { - # prepare dir - for d in $TREMA_SS_ETC_DIR $TREMA_SS_DB_DIR $TREMA_SS_SCRIPT_DIR; do - sudo mkdir -p $d - sudo chown -R `whoami` $d - done - sudo mkdir -p $TREMA_TMP_DIR -} - -# init_trema - Initialize databases, etc. -function init_trema { - local _pwd=$(pwd) - - # Initialize databases for Sliceable Switch - cd $TREMA_SS_DIR - rm -f filter.db slice.db - ./create_tables.sh - mv filter.db slice.db $TREMA_SS_DB_DIR - # Make sure that apache cgi has write access to the databases - sudo chown -R www-data.www-data $TREMA_SS_DB_DIR - cd $_pwd - - # Setup HTTP Server for sliceable_switch - cp $TREMA_SS_DIR/{Slice.pm,Filter.pm,config.cgi} $TREMA_SS_SCRIPT_DIR - sed -i -e "s|/home/sliceable_switch/db|$TREMA_SS_DB_DIR|" \ - $TREMA_SS_SCRIPT_DIR/config.cgi - - sudo cp $TREMA_SS_DIR/apache/sliceable_switch $TREMA_SS_APACHE_CONFIG - sudo sed -i -e "s|/home/sliceable_switch/script|$TREMA_SS_SCRIPT_DIR|" \ - $TREMA_SS_APACHE_CONFIG - sudo a2enmod rewrite actions - sudo a2ensite sliceable_switch.conf - - cp $TREMA_SS_DIR/sliceable_switch_null.conf $TREMA_SS_CONFIG - sed -i -e "s|^\$apps_dir.*$|\$apps_dir = \"$TREMA_DIR/apps\"|" \ - -e "s|^\$db_dir.*$|\$db_dir = \"$TREMA_SS_DB_DIR\"|" \ - $TREMA_SS_CONFIG -} - -function gem_install { - [[ "$OFFLINE" = "True" ]] && return - [ -n "$RUBYGEMS_CMD" ] || get_gem_command - - local pkg=$1 - $RUBYGEMS_CMD list | grep "^${pkg} " && return - sudo $RUBYGEMS_CMD install $pkg -} - -function get_gem_command { - # Trema requires ruby 1.8, so gem1.8 is checked first - RUBYGEMS_CMD=$(which gem1.8 || which gem) - if [ -z "$RUBYGEMS_CMD" ]; then - echo "Warning: ruby gems command not found." - fi -} - -function install_trema { - # Trema - gem_install trema - # Sliceable Switch - git_clone $TREMA_APPS_REPO $TREMA_DIR/apps $TREMA_APPS_BRANCH - make -C $TREMA_DIR/apps/topology - make -C $TREMA_DIR/apps/flow_manager - make -C $TREMA_DIR/apps/sliceable_switch -} - -function start_trema { - # APACHE_NAME is defined in init_horizon (in lib/horizon) - restart_service $APACHE_NAME - - sudo LOGGING_LEVEL=$TREMA_LOG_LEVEL TREMA_TMP=$TREMA_TMP_DIR \ - trema run -d -c $TREMA_SS_CONFIG -} - -function stop_trema { - sudo TREMA_TMP=$TREMA_TMP_DIR trema killall -} - -function check_trema { - : -} - -# Restore xtrace -$TREMA3_XTRACE diff --git a/lib/neutron_thirdparty/vmware_nsx b/lib/neutron_thirdparty/vmware_nsx deleted file mode 100644 index 7a76570775..0000000000 --- a/lib/neutron_thirdparty/vmware_nsx +++ /dev/null @@ -1,86 +0,0 @@ -# VMware NSX -# ---------- - -# This third-party addition can be used to configure connectivity between a DevStack instance -# and an NSX Gateway in dev/test environments. In order to use this correctly, the following -# env variables need to be set (e.g. in your localrc file): -# -# * enable_service vmware_nsx --> to execute this third-party addition -# * PUBLIC_BRIDGE --> bridge used for external connectivity, typically br-ex -# * NSX_GATEWAY_NETWORK_INTERFACE --> interface used to communicate with the NSX Gateway -# * NSX_GATEWAY_NETWORK_CIDR --> CIDR to configure br-ex, e.g. 172.24.4.211/24 - -# Save trace setting -NSX3_XTRACE=$(set +o | grep xtrace) -set +o xtrace - -# This is the interface that connects the Devstack instance -# to an network that allows it to talk to the gateway for -# testing purposes -NSX_GATEWAY_NETWORK_INTERFACE=${NSX_GATEWAY_NETWORK_INTERFACE:-eth2} -# Re-declare floating range as it's needed also in stop_vmware_nsx, which -# is invoked by unstack.sh -FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24} - -function configure_vmware_nsx { - : -} - -function init_vmware_nsx { - if ! is_set NSX_GATEWAY_NETWORK_CIDR; then - NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} - echo "The IP address to set on br-ex was not specified. " - echo "Defaulting to "$NSX_GATEWAY_NETWORK_CIDR - fi - # Make sure the interface is up, but not configured - sudo ip link set $NSX_GATEWAY_NETWORK_INTERFACE up - # Save and then flush the IP addresses on the interface - addresses=$(ip addr show dev $NSX_GATEWAY_NETWORK_INTERFACE | grep inet | awk {'print $2'}) - sudo ip addr flush $NSX_GATEWAY_NETWORK_INTERFACE - # Use the PUBLIC Bridge to route traffic to the NSX gateway - # NOTE(armando-migliaccio): if running in a nested environment this will work - # only with mac learning enabled, portsecurity and security profiles disabled - # The public bridge might not exist for the NSX plugin if Q_USE_DEBUG_COMMAND is off - # Try to create it anyway - sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE - sudo ovs-vsctl -- --may-exist add-port $PUBLIC_BRIDGE $NSX_GATEWAY_NETWORK_INTERFACE - nsx_gw_net_if_mac=$(ip link show $NSX_GATEWAY_NETWORK_INTERFACE | awk '/ether/ {print $2}') - sudo ip link set address $nsx_gw_net_if_mac dev $PUBLIC_BRIDGE - for address in $addresses; do - sudo ip addr add dev $PUBLIC_BRIDGE $address - done - sudo ip addr add dev $PUBLIC_BRIDGE $NSX_GATEWAY_NETWORK_CIDR -} - -function install_vmware_nsx { - : -} - -function start_vmware_nsx { - : -} - -function stop_vmware_nsx { - if ! is_set NSX_GATEWAY_NETWORK_CIDR; then - NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} - echo "The IP address expected on br-ex was not specified. " - echo "Defaulting to "$NSX_GATEWAY_NETWORK_CIDR - fi - sudo ip addr del $NSX_GATEWAY_NETWORK_CIDR dev $PUBLIC_BRIDGE - # Save and then flush remaining addresses on the interface - addresses=$(ip addr show dev $PUBLIC_BRIDGE | grep inet | awk {'print $2'}) - sudo ip addr flush $PUBLIC_BRIDGE - # Try to detach physical interface from PUBLIC_BRIDGE - sudo ovs-vsctl del-port $NSX_GATEWAY_NETWORK_INTERFACE - # Restore addresses on NSX_GATEWAY_NETWORK_INTERFACE - for address in $addresses; do - sudo ip addr add dev $NSX_GATEWAY_NETWORK_INTERFACE $address - done -} - -function check_vmware_nsx { - neutron-check-nsx-config $NEUTRON_CONF_DIR/plugins/vmware/nsx.ini -} - -# Restore xtrace -$NSX3_XTRACE diff --git a/lib/nova b/lib/nova index c51d584728..460b4adc85 100644 --- a/lib/nova +++ b/lib/nova @@ -1,3 +1,5 @@ +#!/bin/bash +# # lib/nova # Functions to control the configuration and operation of the **Nova** service @@ -5,6 +7,7 @@ # # - ``functions`` file # - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined +# - ``FILES`` # - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined # - ``LIBVIRT_TYPE`` must be defined # - ``INSTANCE_NAME_PREFIX``, ``VOLUME_NAME_PREFIX`` must be defined @@ -21,62 +24,112 @@ # - cleanup_nova # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_LIB_NOVA=$(set +o | grep xtrace) set +o xtrace - # Defaults # -------- # Set up default directories +GITDIR["python-novaclient"]=$DEST/python-novaclient +GITDIR["os-vif"]=$DEST/os-vif NOVA_DIR=$DEST/nova -NOVACLIENT_DIR=$DEST/python-novaclient + +# Nova virtual environment +if [[ ${USE_VENV} = True ]]; then + PROJECT_VENV["nova"]=${NOVA_DIR}.venv + NOVA_BIN_DIR=${PROJECT_VENV["nova"]}/bin +else + NOVA_BIN_DIR=$(get_python_exec_prefix) +fi + NOVA_STATE_PATH=${NOVA_STATE_PATH:=$DATA_DIR/nova} # INSTANCES_PATH is the previous name for this NOVA_INSTANCES_PATH=${NOVA_INSTANCES_PATH:=${INSTANCES_PATH:=$NOVA_STATE_PATH/instances}} -NOVA_AUTH_CACHE_DIR=${NOVA_AUTH_CACHE_DIR:-/var/cache/nova} NOVA_CONF_DIR=/etc/nova NOVA_CONF=$NOVA_CONF_DIR/nova.conf -NOVA_CELLS_CONF=$NOVA_CONF_DIR/nova-cells.conf -NOVA_CELLS_DB=${NOVA_CELLS_DB:-nova_cell} +NOVA_COND_CONF=$NOVA_CONF_DIR/nova.conf +NOVA_CPU_CONF=$NOVA_CONF_DIR/nova-cpu.conf +NOVA_FAKE_CONF=$NOVA_CONF_DIR/nova-fake.conf +NOVA_API_DB=${NOVA_API_DB:-nova_api} +NOVA_UWSGI=nova.wsgi.osapi_compute:application +NOVA_METADATA_UWSGI=nova.wsgi.metadata:application +NOVA_UWSGI_CONF=$NOVA_CONF_DIR/nova-api-uwsgi.ini +NOVA_METADATA_UWSGI_CONF=$NOVA_CONF_DIR/nova-metadata-uwsgi.ini + +# Allow forcing the stable compute uuid to something specific. This would be +# done by deployment tools that pre-allocate the UUIDs, but it is also handy +# for developers that need to re-stack a compute-only deployment multiple +# times. Since the DB is non-local and not erased on an unstack, making it +# stay the same each time is what developers want. Set to a uuid here or +# leave it blank for default allocate-on-start behavior. +NOVA_CPU_UUID="" + +# The total number of cells we expect. Must be greater than one and doesn't +# count cell0. +NOVA_NUM_CELLS=${NOVA_NUM_CELLS:-1} +# Our cell index, so we know what rabbit vhost to connect to. +# This should be in the range of 1-$NOVA_NUM_CELLS +NOVA_CPU_CELL=${NOVA_CPU_CELL:-1} NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini} +# We do not need to report service status every 10s for devstack-like +# deployments. In the gate this generates extra work for the services and the +# database which are already taxed. +NOVA_SERVICE_REPORT_INTERVAL=${NOVA_SERVICE_REPORT_INTERVAL:-120} + +if is_service_enabled tls-proxy; then + NOVA_SERVICE_PROTOCOL="https" +fi + +# Whether to use TLS for comms between the VNC/SPICE/serial proxy +# services and the compute node +NOVA_CONSOLE_PROXY_COMPUTE_TLS=${NOVA_CONSOLE_PROXY_COMPUTE_TLS:-False} + +# Validate configuration +if ! is_service_enabled tls-proxy && [ "$NOVA_CONSOLE_PROXY_COMPUTE_TLS" == "True" ]; then + die $LINENO "enabling TLS for the console proxy requires the tls-proxy service" +fi + # Public facing bits NOVA_SERVICE_HOST=${NOVA_SERVICE_HOST:-$SERVICE_HOST} NOVA_SERVICE_PORT=${NOVA_SERVICE_PORT:-8774} NOVA_SERVICE_PORT_INT=${NOVA_SERVICE_PORT_INT:-18774} NOVA_SERVICE_PROTOCOL=${NOVA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} - -# Support entry points installation of console scripts -if [[ -d $NOVA_DIR/bin ]]; then - NOVA_BIN_DIR=$NOVA_DIR/bin +NOVA_SERVICE_LISTEN_ADDRESS=${NOVA_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)} +METADATA_SERVICE_PORT=${METADATA_SERVICE_PORT:-8775} +NOVA_ENABLE_CACHE=${NOVA_ENABLE_CACHE:-True} + +# Flag to set the oslo_policy.enforce_scope and oslo_policy.enforce_new_defaults. +# This is used to disable the compute API policies scope and new defaults. +# By Default, it is True. +# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope +NOVA_ENFORCE_SCOPE=$(trueorfalse True NOVA_ENFORCE_SCOPE) + +if [[ $SERVICE_IP_VERSION == 6 ]]; then + NOVA_MY_IP="$HOST_IPV6" else - NOVA_BIN_DIR=$(get_python_exec_prefix) + NOVA_MY_IP="$HOST_IP" fi -# Set the paths of certain binaries -NOVA_ROOTWRAP=$(get_rootwrap_location nova) - -# Allow rate limiting to be turned off for testing, like for Tempest -# NOTE: Set API_RATE_LIMIT="False" to turn OFF rate limiting -API_RATE_LIMIT=${API_RATE_LIMIT:-"True"} - # Option to enable/disable config drive -# NOTE: Set FORCE_CONFIG_DRIVE="False" to turn OFF config drive -FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"always"} +# NOTE: Set ``FORCE_CONFIG_DRIVE="False"`` to turn OFF config drive +FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"False"} -# Nova supports pluggable schedulers. The default ``FilterScheduler`` -# should work in most cases. -SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler} +# The following NOVA_FILTERS contains SameHostFilter and DifferentHostFilter with +# the default filters. +NOVA_FILTERS="ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter" QEMU_CONF=/etc/libvirt/qemu.conf -# Set default defaults here as some hypervisor drivers override these -PUBLIC_INTERFACE_DEFAULT=br100 -GUEST_INTERFACE_DEFAULT=eth0 -FLAT_NETWORK_BRIDGE_DEFAULT=br100 +# ``NOVA_VNC_ENABLED`` can be used to forcibly enable VNC configuration. +# In multi-node setups allows compute hosts to not run ``n-novnc``. +NOVA_VNC_ENABLED=$(trueorfalse False NOVA_VNC_ENABLED) +# same as ``NOVA_VNC_ENABLED`` but for Spice and serial console respectively. +NOVA_SPICE_ENABLED=$(trueorfalse False NOVA_SPICE_ENABLED) +NOVA_SERIAL_ENABLED=$(trueorfalse False NOVA_SERIAL_ENABLED) # Get hypervisor configuration # ---------------------------- @@ -87,44 +140,36 @@ if is_service_enabled nova && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; th source $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER fi +# Other Nova configurations +# ---------------------------- -# Nova Network Configuration -# -------------------------- +# ``NOVA_USE_SERVICE_TOKEN`` is a mode where service token is passed along with +# user token while communicating to external RESP API's like Neutron, Cinder +# and Glance. +NOVA_USE_SERVICE_TOKEN=$(trueorfalse True NOVA_USE_SERVICE_TOKEN) -NETWORK_MANAGER=${NETWORK_MANAGER:-${NET_MAN:-FlatDHCPManager}} -PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT} -VLAN_INTERFACE=${VLAN_INTERFACE:-$GUEST_INTERFACE_DEFAULT} -FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT} -EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST} +# ``NOVA_ALLOW_MOVE_TO_SAME_HOST`` can be set to False in multi node DevStack, +# where there are at least two nova-computes. +NOVA_ALLOW_MOVE_TO_SAME_HOST=$(trueorfalse True NOVA_ALLOW_MOVE_TO_SAME_HOST) -# If you are using the FlatDHCP network mode on multiple hosts, set the -# ``FLAT_INTERFACE`` variable but make sure that the interface doesn't already -# have an IP or you risk breaking things. -# -# **DHCP Warning**: If your flat interface device uses DHCP, there will be a -# hiccup while the network is moved from the flat interface to the flat network -# bridge. This will happen when you launch your first instance. Upon launch -# you will lose all connectivity to the node, and the VM launch will probably -# fail. -# -# If you are running on a single node and don't need to access the VMs from -# devices other than that node, you can set ``FLAT_INTERFACE=`` -# This will stop nova from bridging any interfaces into ``FLAT_NETWORK_BRIDGE``. -FLAT_INTERFACE=${FLAT_INTERFACE:-$GUEST_INTERFACE_DEFAULT} +# Enable debugging levels for iscsid service (goes from 0-8) +ISCSID_DEBUG=$(trueorfalse False ISCSID_DEBUG) +ISCSID_DEBUG_LEVEL=${ISCSID_DEBUG_LEVEL:-4} -# ``MULTI_HOST`` is a mode where each compute node runs its own network node. This -# allows network operations and routing for a VM to occur on the server that is -# running the VM - removing a SPOF and bandwidth bottleneck. -MULTI_HOST=`trueorfalse False $MULTI_HOST` +# Format for notifications. Nova defaults to "unversioned" since Train. +# Other options include "versioned" and "both". +NOVA_NOTIFICATION_FORMAT=${NOVA_NOTIFICATION_FORMAT:-unversioned} -# Test floating pool and range are used for testing. They are defined -# here until the admin APIs can replace nova-manage -TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test} -TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29} +# Timeout for servers to gracefully shutdown the OS during operations +# like shelve, rescue, stop, rebuild. Defaults to 0 since the default +# image in devstack is CirrOS. +NOVA_SHUTDOWN_TIMEOUT=${NOVA_SHUTDOWN_TIMEOUT:-0} -# Tell Tempest this project is present -TEMPEST_SERVICES+=,nova +# Whether to use Keystone unified limits instead of legacy quota limits. +NOVA_USE_UNIFIED_LIMITS=$(trueorfalse False NOVA_USE_UNIFIED_LIMITS) +# TB Cache Size in MiB for qemu guests +NOVA_LIBVIRT_TB_CACHE_SIZE=${NOVA_LIBVIRT_TB_CACHE_SIZE:-0} # Functions # --------- @@ -132,14 +177,15 @@ TEMPEST_SERVICES+=,nova # Test if any Nova services are enabled # is_nova_enabled function is_nova_enabled { + [[ ,${DISABLED_SERVICES} =~ ,"nova" ]] && return 1 [[ ,${ENABLED_SERVICES} =~ ,"n-" ]] && return 0 return 1 } -# Test if any Nova Cell services are enabled -# is_nova_enabled -function is_n-cell_enabled { - [[ ,${ENABLED_SERVICES} =~ ,"n-cell" ]] && return 0 +# is_nova_console_proxy_compute_tls_enabled() - Test if the Nova Console Proxy +# service has TLS enabled +function is_nova_console_proxy_compute_tls_enabled { + [[ ${NOVA_CONSOLE_PROXY_COMPUTE_TLS} = "True" ]] && return 0 return 1 } @@ -163,24 +209,33 @@ function cleanup_nova { clean_iptables # Destroy old instances + local instances instances=`sudo virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"` if [ ! "$instances" = "" ]; then echo $instances | xargs -n1 sudo virsh destroy || true - echo $instances | xargs -n1 sudo virsh undefine --managed-save || true + if ! xargs -n1 sudo virsh undefine --managed-save --nvram <<< $instances; then + # Can't delete with nvram flags, then just try without this flag + xargs -n1 sudo virsh undefine --managed-save <<< $instances + fi fi # Logout and delete iscsi sessions + local tgts tgts=$(sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d ' ' -f2) + local target for target in $tgts; do sudo iscsiadm --mode node -T $target --logout || true done sudo iscsiadm --mode node --op delete || true + # Disconnect all nvmeof connections + sudo nvme disconnect-all || true + # Clean out the instances directory. sudo rm -rf $NOVA_INSTANCES_PATH/* fi - sudo rm -rf $NOVA_STATE_PATH $NOVA_AUTH_CACHE_DIR + sudo rm -rf $NOVA_STATE_PATH # NOTE(dtroyer): This really should be called from here but due to the way # nova abuses the _cleanup() function we're moving it @@ -188,64 +243,27 @@ function cleanup_nova { #if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then # cleanup_nova_hypervisor #fi -} -# configure_nova_rootwrap() - configure Nova's rootwrap -function configure_nova_rootwrap { - # Deploy new rootwrap filters files (owned by root). - # Wipe any existing rootwrap.d files first - if [[ -d $NOVA_CONF_DIR/rootwrap.d ]]; then - sudo rm -rf $NOVA_CONF_DIR/rootwrap.d - fi - # Deploy filters to /etc/nova/rootwrap.d - sudo mkdir -m 755 $NOVA_CONF_DIR/rootwrap.d - sudo cp $NOVA_DIR/etc/nova/rootwrap.d/*.filters $NOVA_CONF_DIR/rootwrap.d - sudo chown -R root:root $NOVA_CONF_DIR/rootwrap.d - sudo chmod 644 $NOVA_CONF_DIR/rootwrap.d/* - # Set up rootwrap.conf, pointing to /etc/nova/rootwrap.d - sudo cp $NOVA_DIR/etc/nova/rootwrap.conf $NOVA_CONF_DIR/ - sudo sed -e "s:^filters_path=.*$:filters_path=$NOVA_CONF_DIR/rootwrap.d:" -i $NOVA_CONF_DIR/rootwrap.conf - sudo chown root:root $NOVA_CONF_DIR/rootwrap.conf - sudo chmod 0644 $NOVA_CONF_DIR/rootwrap.conf - # Specify rootwrap.conf as first parameter to nova-rootwrap - ROOTWRAP_SUDOER_CMD="$NOVA_ROOTWRAP $NOVA_CONF_DIR/rootwrap.conf *" - - # Set up the rootwrap sudoers for nova - TEMPFILE=`mktemp` - echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE - chmod 0440 $TEMPFILE - sudo chown root:root $TEMPFILE - sudo mv $TEMPFILE /etc/sudoers.d/nova-rootwrap + stop_process "n-api" + stop_process "n-api-meta" + remove_uwsgi_config "$NOVA_UWSGI_CONF" "nova-api" + remove_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "nova-metadata" + + if [[ "$NOVA_BACKEND" == "LVM" ]]; then + clean_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME + fi } # configure_nova() - Set config files, create data dirs, etc function configure_nova { # Put config files in ``/etc/nova`` for everyone to find - if [[ ! -d $NOVA_CONF_DIR ]]; then - sudo mkdir -p $NOVA_CONF_DIR - fi - sudo chown $STACK_USER $NOVA_CONF_DIR - - cp -p $NOVA_DIR/etc/nova/policy.json $NOVA_CONF_DIR + sudo install -d -o $STACK_USER $NOVA_CONF_DIR - configure_nova_rootwrap - - if is_service_enabled n-api; then - # Remove legacy paste config if present - rm -f $NOVA_DIR/bin/nova-api-paste.ini + configure_rootwrap nova + if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then # Get the sample configuration file in place cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_CONF_DIR - - # Comment out the keystone configs in Nova's api-paste.ini. - # We are using nova.conf to configure this instead. - inicomment $NOVA_API_PASTE_INI filter:authtoken auth_host - inicomment $NOVA_API_PASTE_INI filter:authtoken auth_protocol - inicomment $NOVA_API_PASTE_INI filter:authtoken admin_tenant_name - inicomment $NOVA_API_PASTE_INI filter:authtoken cafile - inicomment $NOVA_API_PASTE_INI filter:authtoken admin_user - inicomment $NOVA_API_PASTE_INI filter:authtoken admin_password - inicomment $NOVA_API_PASTE_INI filter:authtoken signing_dir fi if is_service_enabled n-cpu; then @@ -261,7 +279,9 @@ function configure_nova { if [ ! -e /dev/kvm ]; then echo "WARNING: Switching to QEMU" LIBVIRT_TYPE=qemu - if which selinuxenabled 2>&1 > /dev/null && selinuxenabled; then + LIBVIRT_CPU_MODE=custom + LIBVIRT_CPU_MODEL=Nehalem + if which selinuxenabled >/dev/null 2>&1 && selinuxenabled; then # https://bugzilla.redhat.com/show_bug.cgi?id=753589 sudo setsebool virt_use_execmem on fi @@ -273,15 +293,10 @@ function configure_nova { # to simulate multiple systems. if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then if is_ubuntu; then - if [[ ! "$DISTRO" > natty ]]; then - cgline="none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0" - sudo mkdir -p /cgroup - if ! grep -q cgroup /etc/fstab; then - echo "$cgline" | sudo tee -a /etc/fstab - fi - if ! mount -n | grep -q cgroup; then - sudo mount /cgroup - fi + # enable nbd for lxc unless you're using an lvm backend + # otherwise you can't boot instances + if [[ "$NOVA_BACKEND" != "LVM" ]]; then + sudo modprobe nbd fi fi fi @@ -291,8 +306,7 @@ function configure_nova { # ---------------- # Nova stores each instance in its own directory. - sudo mkdir -p $NOVA_INSTANCES_PATH - sudo chown -R $STACK_USER $NOVA_INSTANCES_PATH + sudo install -d -o $STACK_USER $NOVA_INSTANCES_PATH # You can specify a different disk to be mounted and used for backing the # virtual machines. If there is a partition labeled nova-instances we @@ -303,9 +317,49 @@ function configure_nova { sudo chown -R $STACK_USER $NOVA_INSTANCES_PATH fi fi - if is_suse; then - # iscsid is not started by default - start_service iscsid + + # Due to cinder bug #1966513 we ALWAYS need an initiator name for LVM + # Ensure each compute host uses a unique iSCSI initiator + echo InitiatorName=$(iscsi-iname) | sudo tee /etc/iscsi/initiatorname.iscsi + + if [[ ${ISCSID_DEBUG} == "True" ]]; then + # Install an override that starts iscsid with debugging + # enabled. + cat > /tmp/iscsid.override <=v1.0.0 from source. + NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:$((6080 + offset))/vnc_lite.html"} + fi + iniset $NOVA_CPU_CONF vnc novncproxy_base_url "$NOVNCPROXY_URL" + SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:$((6081 + offset))/spice_auto.html"} + iniset $NOVA_CPU_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL" fi - if is_service_enabled n-novnc || is_service_enabled n-xvnc; then + if is_service_enabled n-novnc || [ "$NOVA_VNC_ENABLED" != False ]; then # Address on which instance vncservers will listen on compute hosts. # For multi-host, this should be the management ip of the compute host. - VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1} - VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1} - iniset $NOVA_CONF DEFAULT vnc_enabled true - iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN" - iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" + VNCSERVER_LISTEN=${VNCSERVER_LISTEN:-$NOVA_SERVICE_LISTEN_ADDRESS} + VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS:-$default_proxyclient_addr} + iniset $NOVA_CPU_CONF vnc server_listen "$VNCSERVER_LISTEN" + iniset $NOVA_CPU_CONF vnc server_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" else - iniset $NOVA_CONF DEFAULT vnc_enabled false + iniset $NOVA_CPU_CONF vnc enabled false fi - if is_service_enabled n-spice; then + if is_service_enabled n-spice || [ "$NOVA_SPICE_ENABLED" != False ]; then # Address on which instance spiceservers will listen on compute hosts. # For multi-host, this should be the management ip of the compute host. - SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=127.0.0.1} - SPICESERVER_LISTEN=${SPICESERVER_LISTEN=127.0.0.1} - iniset $NOVA_CONF spice enabled true - iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN" - iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS" - else - iniset $NOVA_CONF spice enabled false + SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS:-$default_proxyclient_addr} + SPICESERVER_LISTEN=${SPICESERVER_LISTEN:-$NOVA_SERVICE_LISTEN_ADDRESS} + iniset $NOVA_CPU_CONF spice enabled true + iniset $NOVA_CPU_CONF spice server_listen "$SPICESERVER_LISTEN" + iniset $NOVA_CPU_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS" fi - iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST" - iniset_rpc_backend nova $NOVA_CONF DEFAULT - iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT" + if is_service_enabled n-sproxy || [ "$NOVA_SERIAL_ENABLED" != False ]; then + iniset $NOVA_CPU_CONF serial_console enabled True + iniset $NOVA_CPU_CONF serial_console base_url "ws://$SERVICE_HOST:$((6082 + offset))/" + fi } -function init_nova_cells { - if is_service_enabled n-cell; then - cp $NOVA_CONF $NOVA_CELLS_CONF - iniset $NOVA_CELLS_CONF DEFAULT sql_connection `database_connection_url $NOVA_CELLS_DB` - iniset $NOVA_CELLS_CONF DEFAULT rabbit_virtual_host child_cell - iniset $NOVA_CELLS_CONF DEFAULT dhcpbridge_flagfile $NOVA_CELLS_CONF - iniset $NOVA_CELLS_CONF cells enable True - iniset $NOVA_CELLS_CONF cells cell_type compute - iniset $NOVA_CELLS_CONF cells name child - - iniset $NOVA_CONF cells enable True - iniset $NOVA_CONF cells cell_type api - iniset $NOVA_CONF cells name region - - if is_service_enabled n-api-meta; then - NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/,metadata//") - iniset $NOVA_CONF DEFAULT enabled_apis $NOVA_ENABLED_APIS - iniset $NOVA_CELLS_CONF DEFAULT enabled_apis metadata +function configure_console_proxies { + # Use the provided config file path or default to $NOVA_CONF. + local conf=${1:-$NOVA_CONF} + local offset=${2:-0} + # Stagger the offset based on the total number of possible console proxies + # (novnc, spice, serial) so that their ports will not collide if + # all are enabled. + offset=$((offset * 3)) + + if is_service_enabled n-novnc || [ "$NOVA_VNC_ENABLED" != False ]; then + iniset $conf vnc novncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" + iniset $conf vnc novncproxy_port $((6080 + offset)) + + if is_nova_console_proxy_compute_tls_enabled ; then + iniset $conf vnc auth_schemes "vencrypt" + iniset $conf vnc vencrypt_client_key "/etc/pki/nova-novnc/client-key.pem" + iniset $conf vnc vencrypt_client_cert "/etc/pki/nova-novnc/client-cert.pem" + iniset $conf vnc vencrypt_ca_certs "/etc/pki/nova-novnc/ca-cert.pem" + + sudo mkdir -p /etc/pki/nova-novnc + deploy_int_CA /etc/pki/nova-novnc/ca-cert.pem + deploy_int_cert /etc/pki/nova-novnc/client-cert.pem /etc/pki/nova-novnc/client-key.pem + # OpenSSL 1.1.0 generates the key file with permissions: 600, by + # default, and the deploy_int* methods use 'sudo cp' to copy the + # files, making them owned by root:root. + # Change ownership of everything under /etc/pki/nova-novnc to + # $STACK_USER:$(id -g ${STACK_USER}) so that $STACK_USER can read + # the key file. + sudo chown -R $STACK_USER:$(id -g ${STACK_USER}) /etc/pki/nova-novnc + # This is needed to enable TLS in the proxy itself, example log: + # WebSocket server settings: + # - Listen on 0.0.0.0:6080 + # - Flash security policy server + # - Web server (no directory listings). Web root: /usr/share/novnc + # - SSL/TLS support + # - proxying from 0.0.0.0:6080 to None:None + iniset $conf DEFAULT key "/etc/pki/nova-novnc/client-key.pem" + iniset $conf DEFAULT cert "/etc/pki/nova-novnc/client-cert.pem" fi + fi + + if is_service_enabled n-spice; then + iniset $conf spice html5proxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" + iniset $conf spice html5proxy_port $((6081 + offset)) + fi - $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CELLS_CONF db sync - $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CELLS_CONF cell create --name=region --cell_type=parent --username=guest --hostname=$RABBIT_HOST --port=5672 --password=$RABBIT_PASSWORD --virtual_host=/ --woffset=0 --wscale=1 - $NOVA_BIN_DIR/nova-manage cell create --name=child --cell_type=child --username=guest --hostname=$RABBIT_HOST --port=5672 --password=$RABBIT_PASSWORD --virtual_host=child_cell --woffset=0 --wscale=1 + if is_service_enabled n-sproxy; then + iniset $conf serial_console serialproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" + iniset $conf serial_console serialproxy_port $((6082 + offset)) fi } -# create_nova_cache_dir() - Part of the init_nova() process -function create_nova_cache_dir { - # Create cache dir - sudo mkdir -p $NOVA_AUTH_CACHE_DIR - sudo chown $STACK_USER $NOVA_AUTH_CACHE_DIR - rm -f $NOVA_AUTH_CACHE_DIR/* +function configure_nova_unified_limits { + # Registered limit resources in keystone are system-specific resources. + # Make sure we use a system-scoped token to interact with this API. + + # Default limits here mirror the legacy config-based default values. + # Note: disk quota is new in nova as of unified limits. + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 10 --region $REGION_NAME servers + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 20 --region $REGION_NAME class:VCPU + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit $((50 * 1024)) --region $REGION_NAME class:MEMORY_MB + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 20 --region $REGION_NAME class:DISK_GB + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 128 --region $REGION_NAME server_metadata_items + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 5 --region $REGION_NAME server_injected_files + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 10240 --region $REGION_NAME server_injected_file_content_bytes + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 255 --region $REGION_NAME server_injected_file_path_bytes + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 100 --region $REGION_NAME server_key_pairs + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 10 --region $REGION_NAME server_groups + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 10 --region $REGION_NAME server_group_members + + # Tell nova to use these limits + iniset $NOVA_CONF quota driver "nova.quota.UnifiedLimitsDriver" + + # Configure oslo_limit so it can talk to keystone + iniset $NOVA_CONF oslo_limit user_domain_name $SERVICE_DOMAIN_NAME + iniset $NOVA_CONF oslo_limit password $SERVICE_PASSWORD + iniset $NOVA_CONF oslo_limit username nova + iniset $NOVA_CONF oslo_limit auth_type password + iniset $NOVA_CONF oslo_limit auth_url $KEYSTONE_SERVICE_URI + iniset $NOVA_CONF oslo_limit system_scope all + iniset $NOVA_CONF oslo_limit endpoint_id \ + $(openstack endpoint list --service nova -f value -c ID) + + # Allow the nova service user to read quotas + openstack --os-cloud devstack-system-admin role add --user nova \ + --user-domain $SERVICE_DOMAIN_NAME --system all reader } -function create_nova_conf_nova_network { - iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NETWORK_MANAGER" - iniset $NOVA_CONF DEFAULT public_interface "$PUBLIC_INTERFACE" - iniset $NOVA_CONF DEFAULT vlan_interface "$VLAN_INTERFACE" - iniset $NOVA_CONF DEFAULT flat_network_bridge "$FLAT_NETWORK_BRIDGE" - if [ -n "$FLAT_INTERFACE" ]; then - iniset $NOVA_CONF DEFAULT flat_interface "$FLAT_INTERFACE" - fi +function init_nova_service_user_conf { + iniset $NOVA_CONF service_user send_service_user_token True + iniset $NOVA_CONF service_user auth_type password + iniset $NOVA_CONF service_user auth_url "$KEYSTONE_SERVICE_URI" + iniset $NOVA_CONF service_user username nova + iniset $NOVA_CONF service_user password "$SERVICE_PASSWORD" + iniset $NOVA_CONF service_user user_domain_name "$SERVICE_DOMAIN_NAME" + iniset $NOVA_CONF service_user project_name "$SERVICE_PROJECT_NAME" + iniset $NOVA_CONF service_user project_domain_name "$SERVICE_DOMAIN_NAME" +} + +function conductor_conf { + local cell="$1" + echo "${NOVA_CONF_DIR}/nova_cell${cell}.conf" } # create_nova_keys_dir() - Part of the init_nova() process function create_nova_keys_dir { # Create keys dir - sudo mkdir -p ${NOVA_STATE_PATH}/keys - sudo chown -R $STACK_USER ${NOVA_STATE_PATH} + sudo install -d -o $STACK_USER ${NOVA_STATE_PATH} ${NOVA_STATE_PATH}/keys +} + +function init_nova_db { + local dbname="$1" + local conffile="$2" + recreate_database $dbname + $NOVA_BIN_DIR/nova-manage --config-file $conffile db sync --local_cell } # init_nova() - Initialize databases, etc. @@ -613,58 +872,108 @@ function init_nova { # All nova components talk to a central database. # Only do this step once on the API node for an entire cluster. if is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-api; then - # (Re)create nova database - # Explicitly use latin1: to avoid lp#829209, nova expects the database to - # use latin1 by default, and then upgrades the database to utf8 (see the - # 082_essex.py in nova) - recreate_database nova latin1 + # (Re)create nova databases + if [[ "$CELLSV2_SETUP" == "singleconductor" ]]; then + # If we are doing singleconductor mode, we have some strange + # interdependencies. in that the main config refers to cell1 + # instead of cell0. In that case, just make sure the cell0 database + # is created before we need it below, but don't db_sync it until + # after the cellN databases are there. + recreate_database nova_cell0 + else + async_run nova-cell-0 init_nova_db nova_cell0 $NOVA_CONF + fi - # Migrate nova database - $NOVA_BIN_DIR/nova-manage db sync + for i in $(seq 1 $NOVA_NUM_CELLS); do + async_run nova-cell-$i init_nova_db nova_cell${i} $(conductor_conf $i) + done - if is_service_enabled n-cell; then - recreate_database $NOVA_CELLS_DB latin1 - fi + recreate_database $NOVA_API_DB + $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF api_db sync + + # map_cell0 will create the cell mapping record in the nova_api DB so + # this needs to come after the api_db sync happens. + $NOVA_BIN_DIR/nova-manage cell_v2 map_cell0 --database_connection `database_connection_url nova_cell0` - # (Re)create nova baremetal database - if is_baremetal; then - recreate_database nova_bm latin1 - $NOVA_BIN_DIR/nova-baremetal-manage db sync + # Wait for DBs to finish from above + for i in $(seq 0 $NOVA_NUM_CELLS); do + async_wait nova-cell-$i + done + + if [[ "$CELLSV2_SETUP" == "singleconductor" ]]; then + # We didn't db sync cell0 above, so run it now + $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db sync fi + + # Run online migrations on the new databases + # Needed for flavor conversion + $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db online_data_migrations + + # create the cell1 cell for the main nova db where the hosts live + for i in $(seq 1 $NOVA_NUM_CELLS); do + $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF --config-file $(conductor_conf $i) cell_v2 create_cell --name "cell$i" + done fi - create_nova_cache_dir create_nova_keys_dir + + if [[ "$NOVA_BACKEND" == "LVM" ]]; then + init_default_lvm_volume_group + fi } # install_novaclient() - Collect source and prepare function install_novaclient { - git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH - setup_develop $NOVACLIENT_DIR - sudo install -D -m 0644 -o $STACK_USER {$NOVACLIENT_DIR/tools/,/etc/bash_completion.d/}nova.bash_completion + if use_library_from_git "python-novaclient"; then + git_clone_by_name "python-novaclient" + setup_dev_lib "python-novaclient" + sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-novaclient"]}/tools/,/etc/bash_completion.d/}nova.bash_completion + fi } # install_nova() - Collect source and prepare function install_nova { + + # Install os-vif + if use_library_from_git "os-vif"; then + git_clone_by_name "os-vif" + setup_dev_lib "os-vif" + fi + if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then install_nova_hypervisor fi if is_service_enabled n-novnc; then # a websockets/html5 or flash powered VNC console for vm instances - NOVNC_FROM_PACKAGE=`trueorfalse False $NOVNC_FROM_PACKAGE` + NOVNC_FROM_PACKAGE=$(trueorfalse False NOVNC_FROM_PACKAGE) if [ "$NOVNC_FROM_PACKAGE" = "True" ]; then + # Installing novnc on Debian bullseye breaks the global pip + # install. This happens because novnc pulls in distro cryptography + # which will be prefered by distro pip, but if anything has + # installed pyOpenSSL from pypi (keystone) that is not compatible + # with distro cryptography. Fix this by installing + # python3-openssl (pyOpenSSL) from the distro which pip will prefer + # on Debian. Ubuntu has inverse problems so we only do this for + # Debian. + local novnc_packages + novnc_packages="novnc" + GetOSVersion + if [[ "$os_VENDOR" = "Debian" ]] ; then + novnc_packages="$novnc_packages python3-openssl" + fi + NOVNC_WEB_DIR=/usr/share/novnc - install_package novnc + install_package $novnc_packages else - NOVNC_WEB_DIR=$DEST/noVNC + NOVNC_WEB_DIR=$DEST/novnc git_clone $NOVNC_REPO $NOVNC_WEB_DIR $NOVNC_BRANCH fi fi if is_service_enabled n-spice; then # a websockets/html5 or flash powered SPICE console for vm instances - SPICE_FROM_PACKAGE=`trueorfalse True $SPICE_FROM_PACKAGE` + SPICE_FROM_PACKAGE=$(trueorfalse True SPICE_FROM_PACKAGE) if [ "$SPICE_FROM_PACKAGE" = "True" ]; then SPICE_WEB_DIR=/usr/share/spice-html5 install_package spice-html5 @@ -683,106 +992,307 @@ function install_nova { function start_nova_api { # Get right service port for testing local service_port=$NOVA_SERVICE_PORT + local service_protocol=$NOVA_SERVICE_PROTOCOL + local nova_url if is_service_enabled tls-proxy; then service_port=$NOVA_SERVICE_PORT_INT + service_protocol="http" fi - screen_it n-api "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api" + # Hack to set the path for rootwrap + local old_path=$PATH + export PATH=$NOVA_BIN_DIR:$PATH + + run_process "n-api" "$(which uwsgi) --procname-prefix nova-api --ini $NOVA_UWSGI_CONF" + nova_url=$service_protocol://$SERVICE_HOST/compute/v2.1/ + echo "Waiting for nova-api to start..." - if ! wait_for_service $SERVICE_TIMEOUT http://$SERVICE_HOST:$service_port; then + if ! wait_for_service $SERVICE_TIMEOUT $nova_url; then die $LINENO "nova-api did not start" fi - # Start proxies if enabled - if is_service_enabled tls-proxy; then - start_tls_proxy '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT & - fi + export PATH=$old_path } + # start_nova_compute() - Start the compute process function start_nova_compute { - if is_service_enabled n-cell; then - local compute_cell_conf=$NOVA_CELLS_CONF + # Hack to set the path for rootwrap + local old_path=$PATH + export PATH=$NOVA_BIN_DIR:$PATH + + local compute_cell_conf=$NOVA_CONF + + # Bug #1802143: $NOVA_CPU_CONF is constructed by first copying $NOVA_CONF... + cp $compute_cell_conf $NOVA_CPU_CONF + # ...and then adding/overriding anything explicitly set in $NOVA_CPU_CONF + merge_config_file $TOP_DIR/local.conf post-config '$NOVA_CPU_CONF' + + if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then + # NOTE(danms): Grenade doesn't setup multi-cell rabbit, so + # skip these bits and use the normal config. + echo "Skipping multi-cell conductor fleet setup" else - local compute_cell_conf=$NOVA_CONF + # "${CELLSV2_SETUP}" is "superconductor" + # FIXME(danms): Should this be configurable? + iniset $NOVA_CPU_CONF workarounds disable_group_policy_check_upcall True + # Since the nova-compute service cannot reach nova-scheduler over + # RPC, we also disable track_instance_changes. + iniset $NOVA_CPU_CONF filter_scheduler track_instance_changes False + iniset_rpc_backend nova $NOVA_CPU_CONF DEFAULT "nova_cell${NOVA_CPU_CELL}" + fi + + # Make sure we nuke any database config + inidelete $NOVA_CPU_CONF database connection + inidelete $NOVA_CPU_CONF api_database connection + + # Console proxies were configured earlier in create_nova_conf. Now that the + # nova-cpu.conf has been created, configure the console settings required + # by the compute process. + configure_console_compute + + # Set rebuild timeout longer for BFV instances because we likely have + # slower disk than expected. Default is 20s/GB + iniset $NOVA_CPU_CONF DEFAULT reimage_timeout_per_gb 180 + + # Configure the OVSDB connection for os-vif + if [ -n "$OVSDB_SERVER_LOCAL_HOST" ]; then + iniset $NOVA_CPU_CONF os_vif_ovs ovsdb_connection "tcp:$OVSDB_SERVER_LOCAL_HOST:6640" + fi + + # Workaround bug #1939108 + if [[ "$VIRT_DRIVER" == "libvirt" && "$LIBVIRT_TYPE" == "qemu" ]]; then + iniset $NOVA_CPU_CONF workarounds libvirt_disable_apic True + fi + + if [[ "$NOVA_CPU_UUID" ]]; then + echo -n $NOVA_CPU_UUID > $NOVA_CONF_DIR/compute_id fi if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then + if [ ${NOVA_LIBVIRT_TB_CACHE_SIZE} -gt 0 ]; then + iniset $NOVA_CPU_CONF libvirt tb_cache_size ${NOVA_LIBVIRT_TB_CACHE_SIZE} + fi # The group **$LIBVIRT_GROUP** is added to the current user in this script. - # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group. - screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf'" + # ``sg`` is used in run_process to execute nova-compute as a member of the + # **$LIBVIRT_GROUP** group. + run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF" $LIBVIRT_GROUP + elif [[ "$VIRT_DRIVER" = 'lxd' ]]; then + run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF" $LXD_GROUP + elif [[ "$VIRT_DRIVER" = 'docker' || "$VIRT_DRIVER" = 'zun' ]]; then + run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF" $DOCKER_GROUP elif [[ "$VIRT_DRIVER" = 'fake' ]]; then + local i for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`; do - screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf --config-file <(echo -e '[DEFAULT]\nhost=${HOSTNAME}${i}')" + # Avoid process redirection of fake host configurations by + # creating or modifying real configurations. Each fake + # gets its own configuration and own log file. + local fake_conf="${NOVA_FAKE_CONF}-${i}" + iniset $fake_conf DEFAULT host "${HOSTNAME}${i}" + # Ensure that each fake compute has its own state path so that it + # can have its own compute_id file + local state_path + state_path="$NOVA_STATE_PATH/${HOSTNAME}${i}" + COMPUTE_ID=$(uuidgen) + sudo mkdir -p "$state_path" + iniset $fake_conf DEFAULT state_path "$state_path" + # use the generated UUID as the stable compute node UUID + echo "$COMPUTE_ID" | sudo tee "$state_path/compute_id" + run_process "n-cpu-${i}" "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF --config-file $fake_conf" done else if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then start_nova_hypervisor fi - screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" + run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF" fi + + export PATH=$old_path } -# start_nova() - Start running processes, including screen +# start_nova() - Start running processes function start_nova_rest { + # Hack to set the path for rootwrap + local old_path=$PATH + export PATH=$NOVA_BIN_DIR:$PATH + + local compute_cell_conf=$NOVA_CONF + + run_process n-sch "$NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf" + run_process n-api-meta "$(which uwsgi) --procname-prefix nova-api-meta --ini $NOVA_METADATA_UWSGI_CONF" + + export PATH=$old_path +} + +function enable_nova_console_proxies { + for i in $(seq 1 $NOVA_NUM_CELLS); do + for srv in n-novnc n-spice n-sproxy; do + if is_service_enabled $srv; then + enable_service ${srv}-cell${i} + fi + done + done +} + +function start_nova_console_proxies { + # Hack to set the path for rootwrap + local old_path=$PATH + # This is needed to find the nova conf + export PATH=$NOVA_BIN_DIR:$PATH + local api_cell_conf=$NOVA_CONF - if is_service_enabled n-cell; then - local compute_cell_conf=$NOVA_CELLS_CONF + # console proxies run globally for singleconductor, else they run per cell + if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then + run_process n-novnc "$NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_WEB_DIR" + run_process n-spice "$NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $api_cell_conf --web $SPICE_WEB_DIR" + run_process n-sproxy "$NOVA_BIN_DIR/nova-serialproxy --config-file $api_cell_conf" else - local compute_cell_conf=$NOVA_CONF + enable_nova_console_proxies + for i in $(seq 1 $NOVA_NUM_CELLS); do + local conf + conf=$(conductor_conf $i) + run_process n-novnc-cell${i} "$NOVA_BIN_DIR/nova-novncproxy --config-file $conf --web $NOVNC_WEB_DIR" + run_process n-spice-cell${i} "$NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $conf --web $SPICE_WEB_DIR" + run_process n-sproxy-cell${i} "$NOVA_BIN_DIR/nova-serialproxy --config-file $conf" + done fi - # ``screen_it`` checks ``is_service_enabled``, it is not needed here - screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor --config-file $compute_cell_conf" - screen_it n-cell-region "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $api_cell_conf" - screen_it n-cell-child "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $compute_cell_conf" + export PATH=$old_path +} - screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert --config-file $api_cell_conf" - screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network --config-file $compute_cell_conf" - screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf" - screen_it n-api-meta "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf" +function enable_nova_fleet { + if is_service_enabled n-cond; then + enable_service n-super-cond + for i in $(seq 1 $NOVA_NUM_CELLS); do + enable_service n-cond-cell${i} + done + fi +} - screen_it n-novnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_WEB_DIR" - screen_it n-xvnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-xvpvncproxy --config-file $api_cell_conf" - screen_it n-spice "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $api_cell_conf --web $SPICE_WEB_DIR" - screen_it n-cauth "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-consoleauth --config-file $api_cell_conf" +function start_nova_conductor { + if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then + echo "Starting nova-conductor in a cellsv1-compatible way" + run_process n-cond "$NOVA_BIN_DIR/nova-conductor --config-file $NOVA_COND_CONF" + return + fi - # Starting the nova-objectstore only if swift3 service is not enabled. - # Swift will act as s3 objectstore. - is_service_enabled swift3 || \ - screen_it n-obj "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-objectstore --config-file $api_cell_conf" + enable_nova_fleet + if is_service_enabled n-super-cond; then + run_process n-super-cond "$NOVA_BIN_DIR/nova-conductor --config-file $NOVA_COND_CONF" + fi + for i in $(seq 1 $NOVA_NUM_CELLS); do + if is_service_enabled n-cond-cell${i}; then + local conf + conf=$(conductor_conf $i) + run_process n-cond-cell${i} "$NOVA_BIN_DIR/nova-conductor --config-file $conf" + fi + done +} + +function is_nova_ready { + # NOTE(sdague): with cells v2 all the compute services must be up + # and checked into the database before discover_hosts is run. This + # happens in all in one installs by accident, because > 30 seconds + # happen between here and the script ending. However, in multinode + # tests this can very often not be the case. So ensure that the + # compute is up before we move on. + wait_for_compute $NOVA_READY_TIMEOUT } function start_nova { - start_nova_compute start_nova_rest + start_nova_console_proxies + start_nova_conductor + start_nova_compute + if is_service_enabled n-api; then + # dump the cell mapping to ensure life is good + echo "Dumping cells_v2 mapping" + $NOVA_BIN_DIR/nova-manage cell_v2 list_cells --verbose + fi } function stop_nova_compute { - screen_stop n-cpu + if [ "$VIRT_DRIVER" == "fake" ]; then + local i + for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`; do + stop_process n-cpu-${i} + done + else + stop_process n-cpu + fi if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then stop_nova_hypervisor fi } function stop_nova_rest { - # Kill the nova screen windows - # Some services are listed here twice since more than one instance - # of a service may be running in certain configs. - for serv in n-api n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-obj; do - screen_stop $serv + # Kill the non-compute nova processes + for serv in n-api n-api-meta n-sch; do + stop_process $serv done } -# stop_nova() - Stop running processes (non-screen) +function stop_nova_console_proxies { + if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then + for srv in n-novnc n-spice n-sproxy; do + stop_process $srv + done + else + enable_nova_console_proxies + for i in $(seq 1 $NOVA_NUM_CELLS); do + for srv in n-novnc n-spice n-sproxy; do + stop_process ${srv}-cell${i} + done + done + fi +} + +function stop_nova_conductor { + if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then + stop_process n-cond + return + fi + + enable_nova_fleet + for srv in n-super-cond $(seq -f n-cond-cell%0.f 1 $NOVA_NUM_CELLS); do + if is_service_enabled $srv; then + stop_process $srv + fi + done +} + +# stop_nova() - Stop running processes function stop_nova { stop_nova_rest + stop_nova_console_proxies + stop_nova_conductor stop_nova_compute } +# create_instance_types(): Create default flavors +function create_flavors { + if is_service_enabled n-api; then + if ! openstack --os-region-name="$REGION_NAME" flavor list | grep -q ds512M; then + # Note that danms hates these flavors and apologizes for sdague + openstack --os-region-name="$REGION_NAME" flavor create --id c1 --ram 256 --disk 1 --vcpus 1 --property hw_rng:allowed=True cirros256 + openstack --os-region-name="$REGION_NAME" flavor create --id d1 --ram 512 --disk 5 --vcpus 1 --property hw_rng:allowed=True ds512M + openstack --os-region-name="$REGION_NAME" flavor create --id d2 --ram 1024 --disk 10 --vcpus 1 --property hw_rng:allowed=True ds1G + openstack --os-region-name="$REGION_NAME" flavor create --id d3 --ram 2048 --disk 10 --vcpus 2 --property hw_rng:allowed=True ds2G + openstack --os-region-name="$REGION_NAME" flavor create --id d4 --ram 4096 --disk 20 --vcpus 4 --property hw_rng:allowed=True ds4G + fi + + if ! openstack --os-region-name="$REGION_NAME" flavor list | grep -q m1.tiny; then + openstack --os-region-name="$REGION_NAME" flavor create --id 1 --ram 512 --disk 1 --vcpus 1 --property hw_rng:allowed=True m1.tiny + openstack --os-region-name="$REGION_NAME" flavor create --id 2 --ram 2048 --disk 20 --vcpus 1 --property hw_rng:allowed=True m1.small + openstack --os-region-name="$REGION_NAME" flavor create --id 3 --ram 4096 --disk 40 --vcpus 2 --property hw_rng:allowed=True m1.medium + openstack --os-region-name="$REGION_NAME" flavor create --id 4 --ram 8192 --disk 80 --vcpus 4 --property hw_rng:allowed=True m1.large + openstack --os-region-name="$REGION_NAME" flavor create --id 5 --ram 16384 --disk 160 --vcpus 8 --property hw_rng:allowed=True m1.xlarge + fi + fi +} # Restore xtrace -$XTRACE +$_XTRACE_LIB_NOVA # Tell emacs to use shell-script-mode ## Local variables: diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index f435456e7f..c0713f9953 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -1,3 +1,5 @@ +#!/bin/bash +# # lib/nova_plugins/functions-libvirt # Common libvirt configuration functions @@ -6,51 +8,113 @@ # ``STACK_USER`` has to be defined # Save trace setting -LV_XTRACE=$(set +o | grep xtrace) +_XTRACE_NOVA_FN_LIBVIRT=$(set +o | grep xtrace) set +o xtrace # Defaults -# ------- +# -------- + +# Turn on selective debug log filters for libvirt. +# (NOTE: Enabling this by default, because the log filters enabled in +# 'configure_libvirt' function further below are _selective_ and not +# extremely verbose.) +DEBUG_LIBVIRT=$(trueorfalse True DEBUG_LIBVIRT) + +# Try to enable coredumps for libvirt +# Currently fairly specific to OpenStackCI hosts +DEBUG_LIBVIRT_COREDUMPS=$(trueorfalse False DEBUG_LIBVIRT_COREDUMPS) + +# Enable the Fedora Virtualization Preview Copr repo that provides the latest +# rawhide builds of QEMU, Libvirt and other virt tools. +ENABLE_FEDORA_VIRT_PREVIEW_REPO=$(trueorfalse False ENABLE_FEDORA_VIRT_PREVIEW_REPO) + +# Enable coredumps for libvirt +# Bug: https://bugs.launchpad.net/nova/+bug/1643911 +function _enable_coredump { + local confdir=/etc/systemd/system/libvirtd.service.d + local conffile=${confdir}/coredump.conf + + # Create a coredump directory, and instruct the kernel to save to + # here + sudo mkdir -p /var/core + sudo chmod a+wrx /var/core + echo '/var/core/core.%e.%p.%h.%t' | \ + sudo tee /proc/sys/kernel/core_pattern + + # Drop a config file to up the core ulimit + sudo mkdir -p ${confdir} + sudo tee ${conffile} < + elif is_fedora; then + + # Optionally enable the virt-preview repo when on Fedora + if [[ $DISTRO =~ f[0-9][0-9] ]] && [[ ${ENABLE_FEDORA_VIRT_PREVIEW_REPO} == "True" ]]; then + # https://copr.fedorainfracloud.org/coprs/g/virtmaint-sig/virt-preview/ + sudo dnf copr enable -y @virtmaint-sig/virt-preview + fi + + if is_openeuler; then + qemu_package=qemu + else + qemu_package=qemu-kvm + fi + + # Note that in CentOS/RHEL this needs to come from the RDO + # repositories (qemu-kvm-ev ... which provides this package) + # as the base system version is too old. We should have + # pre-installed these + install_package $qemu_package + install_package libvirt libvirt-devel + + if [[ $DISTRO =~ rhel9 ]]; then + pip_install_gr libvirt-python + else + install_package python3-libvirt + fi + + if is_arch "aarch64"; then + install_package edk2-aarch64 + fi fi - # workaround for - # https://bugzilla.redhat.com/show_bug.cgi?id=1098376; if we see - # the empty Xen proc file then remove the xen/libxl plugin - # shared-libraries (yum remove would uninstall libvirt due to - # dependencies, so let's avoid that...) - if is_fedora && [ -f /proc/xen/capabilities ] && \ - [ $(stat -c '%s' /proc/xen/capabilities) -eq 0 ]; then - sudo rm -f /usr/lib64/libvirt/connection-driver/libvirt_driver_libxl.so - sudo rm -f /usr/lib64/libvirt/connection-driver/libvirt_driver_xen.so - - # another bug requires these to be restarted to avoid - # potential hang of libvirtd - # https://bugzilla.redhat.com/show_bug.cgi?id=1098866 - sudo service dbus restart - sudo service firewalld restart + if [[ $DEBUG_LIBVIRT_COREDUMPS == True ]]; then + _enable_coredump fi } # Configures the installed libvirt system so that is accessible by # STACK_USER via qemu:///system with management capabilities. function configure_libvirt { - if is_service_enabled neutron && is_neutron_ovs_base_plugin && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF; then + if is_service_enabled neutron && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF; then # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces cat <= 1.2.3 + local log_filters="1:libvirt.c 1:qemu 1:conf 1:security 3:object 3:event 3:json 3:file 1:util 1:cpu" + else + local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:object 3:event 3:json 3:file 1:util 1:cpu" + fi local log_outputs="1:file:/var/log/libvirt/libvirtd.log" - if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then + if ! sudo grep -q "^log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf fi - if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then + if ! sudo grep -q "^log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf fi fi - # libvirt detects various settings on startup, as we potentially changed - # the system configuration (modules, filesystems), we need to restart - # libvirt to detect those changes. - restart_service $LIBVIRT_DAEMON + if is_nova_console_proxy_compute_tls_enabled ; then + echo "vnc_tls = 1" | sudo tee -a $QEMU_CONF + echo "vnc_tls_x509_verify = 1" | sudo tee -a $QEMU_CONF + + sudo mkdir -p /etc/pki/libvirt-vnc + deploy_int_CA /etc/pki/libvirt-vnc/ca-cert.pem + deploy_int_cert /etc/pki/libvirt-vnc/server-cert.pem /etc/pki/libvirt-vnc/server-key.pem + # OpenSSL 1.1.0 generates the key file with permissions: 600, by + # default and the deploy_int* methods use 'sudo cp' to copy the + # files, making them owned by root:root. + # Change ownership of everything under /etc/pki/libvirt-vnc to + # libvirt-qemu:libvirt-qemu so that libvirt-qemu can read the key + # file. + sudo chown -R libvirt-qemu:libvirt-qemu /etc/pki/libvirt-vnc + fi + + # Service needs to be started on redhat/fedora -- do a restart for + # sanity after fiddling the config. + restart_service libvirtd + + # Restart virtlogd companion service to ensure it is running properly + # https://bugs.launchpad.net/ubuntu/+source/libvirt/+bug/1577455 + # https://bugzilla.redhat.com/show_bug.cgi?id=1290357 + # (not all platforms have it; libvirt 1.3+ only, thus the ignore) + restart_service virtlogd || true } # Restore xtrace -$LV_XTRACE +$_XTRACE_NOVA_FN_LIBVIRT # Local variables: # mode: shell-script diff --git a/lib/nova_plugins/hypervisor-baremetal b/lib/nova_plugins/hypervisor-baremetal deleted file mode 100644 index 1d4d4144df..0000000000 --- a/lib/nova_plugins/hypervisor-baremetal +++ /dev/null @@ -1,93 +0,0 @@ -# lib/nova_plugins/hypervisor-baremetal -# Configure the baremetal hypervisor - -# Enable with: -# VIRT_DRIVER=baremetal - -# Dependencies: -# ``functions`` file -# ``nova`` configuration - -# install_nova_hypervisor - install any external requirements -# configure_nova_hypervisor - make configuration changes, including those to other services -# start_nova_hypervisor - start any external services -# stop_nova_hypervisor - stop any external services -# cleanup_nova_hypervisor - remove transient data and cache - -# Save trace setting -MY_XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -NETWORK_MANAGER=${NETWORK_MANAGER:-FlatManager} -PUBLIC_INTERFACE_DEFAULT=eth0 -FLAT_INTERFACE=${FLAT_INTERFACE:-eth0} -FLAT_NETWORK_BRIDGE_DEFAULT=br100 -STUB_NETWORK=${STUB_NETWORK:-False} - - -# Entry Points -# ------------ - -# clean_nova_hypervisor - Clean up an installation -function cleanup_nova_hypervisor { - # This function intentionally left blank - : -} - -# configure_nova_hypervisor - Set config files, create data dirs, etc -function configure_nova_hypervisor { - configure_baremetal_nova_dirs - - iniset $NOVA_CONF baremetal sql_connection `database_connection_url nova_bm` - LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.firewall.NoopFirewallDriver"} - iniset $NOVA_CONF DEFAULT compute_driver nova.virt.baremetal.driver.BareMetalDriver - iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER - iniset $NOVA_CONF DEFAULT scheduler_host_manager nova.scheduler.baremetal_host_manager.BaremetalHostManager - iniset $NOVA_CONF DEFAULT ram_allocation_ratio 1.0 - iniset $NOVA_CONF DEFAULT reserved_host_memory_mb 0 - iniset $NOVA_CONF baremetal flavor_extra_specs cpu_arch:$BM_CPU_ARCH - iniset $NOVA_CONF baremetal driver $BM_DRIVER - iniset $NOVA_CONF baremetal power_manager $BM_POWER_MANAGER - iniset $NOVA_CONF baremetal tftp_root /tftpboot - if [[ "$BM_DNSMASQ_FROM_NOVA_NETWORK" = "True" ]]; then - BM_DNSMASQ_CONF=$NOVA_CONF_DIR/dnsmasq-for-baremetal-from-nova-network.conf - sudo cp "$FILES/dnsmasq-for-baremetal-from-nova-network.conf" "$BM_DNSMASQ_CONF" - iniset $NOVA_CONF DEFAULT dnsmasq_config_file "$BM_DNSMASQ_CONF" - fi - - # Define extra baremetal nova conf flags by defining the array ``EXTRA_BAREMETAL_OPTS``. - for I in "${EXTRA_BAREMETAL_OPTS[@]}"; do - # Attempt to convert flags to options - iniset $NOVA_CONF baremetal ${I/=/ } - done -} - -# install_nova_hypervisor() - Install external components -function install_nova_hypervisor { - # This function intentionally left blank - : -} - -# start_nova_hypervisor - Start any required external services -function start_nova_hypervisor { - # This function intentionally left blank - : -} - -# stop_nova_hypervisor - Stop any external services -function stop_nova_hypervisor { - # This function intentionally left blank - : -} - - -# Restore xtrace -$MY_XTRACE - -# Local variables: -# mode: shell-script -# End: diff --git a/lib/nova_plugins/hypervisor-fake b/lib/nova_plugins/hypervisor-fake index e7a833f806..39cb45ca67 100644 --- a/lib/nova_plugins/hypervisor-fake +++ b/lib/nova_plugins/hypervisor-fake @@ -1,3 +1,5 @@ +#!/bin/bash +# # lib/nova_plugins/hypervisor-fake # Configure the fake hypervisor @@ -15,7 +17,7 @@ # cleanup_nova_hypervisor - remove transient data and cache # Save trace setting -MY_XTRACE=$(set +o | grep xtrace) +_XTRACE_VIRTFAKE=$(set +o | grep xtrace) set +o xtrace @@ -34,20 +36,9 @@ function cleanup_nova_hypervisor { # configure_nova_hypervisor - Set config files, create data dirs, etc function configure_nova_hypervisor { - iniset $NOVA_CONF DEFAULT compute_driver "nova.virt.fake.FakeDriver" + iniset $NOVA_CONF DEFAULT compute_driver "fake.FakeDriverWithoutFakeNodes" # Disable arbitrary limits - iniset $NOVA_CONF DEFAULT quota_instances -1 - iniset $NOVA_CONF DEFAULT quota_cores -1 - iniset $NOVA_CONF DEFAULT quota_ram -1 - iniset $NOVA_CONF DEFAULT quota_floating_ips -1 - iniset $NOVA_CONF DEFAULT quota_fixed_ips -1 - iniset $NOVA_CONF DEFAULT quota_metadata_items -1 - iniset $NOVA_CONF DEFAULT quota_injected_files -1 - iniset $NOVA_CONF DEFAULT quota_injected_file_path_bytes -1 - iniset $NOVA_CONF DEFAULT quota_security_groups -1 - iniset $NOVA_CONF DEFAULT quota_security_group_rules -1 - iniset $NOVA_CONF DEFAULT quota_key_pairs -1 - iniset $NOVA_CONF DEFAULT scheduler_default_filters "RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter" + iniset $NOVA_CONF quota driver nova.quota.NoopQuotaDriver } # install_nova_hypervisor() - Install external components @@ -70,7 +61,7 @@ function stop_nova_hypervisor { # Restore xtrace -$MY_XTRACE +$_XTRACE_VIRTFAKE # Local variables: # mode: shell-script diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic index e72f7c1dc0..9a39c798a8 100644 --- a/lib/nova_plugins/hypervisor-ironic +++ b/lib/nova_plugins/hypervisor-ironic @@ -1,3 +1,5 @@ +#!/bin/bash +# # lib/nova_plugins/hypervisor-ironic # Configure the ironic hypervisor @@ -15,7 +17,7 @@ # cleanup_nova_hypervisor - remove transient data and cache # Save trace setting -MY_XTRACE=$(set +o | grep xtrace) +_XTRACE_HYP_IRONIC=$(set +o | grep xtrace) set +o xtrace source $TOP_DIR/lib/nova_plugins/functions-libvirt @@ -34,28 +36,45 @@ function cleanup_nova_hypervisor { # configure_nova_hypervisor - Set config files, create data dirs, etc function configure_nova_hypervisor { - configure_libvirt - LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.firewall.NoopFirewallDriver"} - - # NOTE(adam_g): The ironic compute driver currently lives in the ironic - # tree. We purposely configure Nova to load it from there until it moves - # back into Nova proper. - iniset $NOVA_CONF DEFAULT compute_driver ironic.nova.virt.ironic.IronicDriver - iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER - iniset $NOVA_CONF DEFAULT scheduler_host_manager ironic.nova.scheduler.ironic_host_manager.IronicHostManager - iniset $NOVA_CONF DEFAULT ram_allocation_ratio 1.0 - iniset $NOVA_CONF DEFAULT reserved_host_memory_mb 0 + if ! is_ironic_hardware; then + configure_libvirt + fi + + iniset $NOVA_CONF DEFAULT compute_driver ironic.IronicDriver + # ironic section - iniset $NOVA_CONF ironic admin_username admin - iniset $NOVA_CONF ironic admin_password $ADMIN_PASSWORD - iniset $NOVA_CONF ironic admin_url $KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0 - iniset $NOVA_CONF ironic admin_tenant_name demo - iniset $NOVA_CONF ironic api_endpoint http://$SERVICE_HOST:6385/v1 - iniset $NOVA_CONF ironic sql_connection `database_connection_url nova_bm` + iniset $NOVA_CONF ironic auth_type password + iniset $NOVA_CONF ironic username admin + iniset $NOVA_CONF ironic password $ADMIN_PASSWORD + iniset $NOVA_CONF ironic auth_url $KEYSTONE_SERVICE_URI + if is_ironic_enforce_scope; then + iniset $NOVA_CONF ironic system_scope all + else + iniset $NOVA_CONF ironic project_domain_id default + iniset $NOVA_CONF ironic project_name demo + fi + if is_ironic_sharded; then + iniset $NOVA_CONF ironic shard $IRONIC_SHARD_1_NAME + fi + + iniset $NOVA_CONF ironic user_domain_id default + iniset $NOVA_CONF ironic region_name $REGION_NAME + + # These are used with crufty legacy ironicclient + iniset $NOVA_CONF ironic api_max_retries 300 + iniset $NOVA_CONF ironic api_retry_interval 5 + # These are used with shiny new openstacksdk + iniset $NOVA_CONF ironic connect_retries 300 + iniset $NOVA_CONF ironic connect_retry_delay 5 + iniset $NOVA_CONF ironic status_code_retries 300 + iniset $NOVA_CONF ironic status_code_retry_delay 5 } # install_nova_hypervisor() - Install external components function install_nova_hypervisor { + if is_ironic_hardware; then + return + fi install_libvirt } @@ -71,9 +90,8 @@ function stop_nova_hypervisor { : } - # Restore xtrace -$MY_XTRACE +$_XTRACE_HYP_IRONIC # Local variables: # mode: shell-script diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index 259bf15a27..4b44c1f86f 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -1,3 +1,5 @@ +#!/bin/bash +# # lib/nova_plugins/hypervisor-libvirt # Configure the libvirt hypervisor @@ -15,7 +17,7 @@ # cleanup_nova_hypervisor - remove transient data and cache # Save trace setting -MY_XTRACE=$(set +o | grep xtrace) +_XTRACE_NOVA_LIBVIRT=$(set +o | grep xtrace) set +o xtrace source $TOP_DIR/lib/nova_plugins/functions-libvirt @@ -23,9 +25,6 @@ source $TOP_DIR/lib/nova_plugins/functions-libvirt # Defaults # -------- -# File injection is disabled by default in Nova. This will turn it back on. -ENABLE_FILE_INJECTION=${ENABLE_FILE_INJECTION:-False} - # Entry Points # ------------ @@ -40,28 +39,50 @@ function cleanup_nova_hypervisor { function configure_nova_hypervisor { configure_libvirt iniset $NOVA_CONF libvirt virt_type "$LIBVIRT_TYPE" - iniset $NOVA_CONF libvirt cpu_mode "none" - iniset $NOVA_CONF libvirt use_usb_tablet "False" + iniset $NOVA_CONF libvirt cpu_mode "$LIBVIRT_CPU_MODE" + if [ "$LIBVIRT_CPU_MODE" == "custom" ] ; then + iniset $NOVA_CONF libvirt cpu_model "$LIBVIRT_CPU_MODEL" + fi + # Do not enable USB tablet input devices to avoid QEMU CPU overhead. + iniset $NOVA_CONF DEFAULT pointer_model "ps2mouse" + iniset $NOVA_CONF libvirt live_migration_uri "qemu+ssh://$STACK_USER@%s/system" iniset $NOVA_CONF DEFAULT default_ephemeral_format "ext4" iniset $NOVA_CONF DEFAULT compute_driver "libvirt.LibvirtDriver" - LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} - iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER" # Power architecture currently does not support graphical consoles. if is_arch "ppc64"; then - iniset $NOVA_CONF DEFAULT vnc_enabled "false" + iniset $NOVA_CONF vnc enabled "false" + fi + + # arm64-specific configuration + if is_arch "aarch64"; then + iniset $NOVA_CONF libvirt cpu_mode "host-passthrough" + # NOTE(mnaser): We cannot have `cpu_model` set if the `cpu_mode` is + # set to `host-passthrough`, or `nova-compute` refuses to + # start. + inidelete $NOVA_CONF libvirt cpu_model + fi + + if isset ENABLE_FILE_INJECTION; then + if [ "$ENABLE_FILE_INJECTION" == "True" ]; then + # -1 means use libguestfs to inspect the guest OS image for the + # root partition to use for file injection. + iniset $NOVA_CONF libvirt inject_partition '-1' + fi fi - ENABLE_FILE_INJECTION=$(trueorfalse False $ENABLE_FILE_INJECTION) - if [[ "$ENABLE_FILE_INJECTION" = "True" ]] ; then - # When libguestfs is available for file injection, enable using - # libguestfs to inspect the image and figure out the proper - # partition to inject into. - iniset $NOVA_CONF libvirt inject_partition '-1' - iniset $NOVA_CONF libvirt inject_key 'true' - else - # File injection is being disabled by default in the near future - - # disable it here for now to avoid surprises later. - iniset $NOVA_CONF libvirt inject_partition '-2' + if [[ "$LIBVIRT_TYPE" = "parallels" ]]; then + iniset $NOVA_CONF libvirt connection_uri "parallels+unix:///system" + iniset $NOVA_CONF libvirt images_type "ploop" + iniset $NOVA_CONF DEFAULT force_raw_images "False" + iniset $NOVA_CONF vnc server_proxyclient_address $HOST_IP + iniset $NOVA_CONF vnc server_listen $HOST_IP + iniset $NOVA_CONF vnc keymap + elif [[ "$NOVA_BACKEND" == "LVM" ]]; then + iniset $NOVA_CONF libvirt images_type "lvm" + iniset $NOVA_CONF libvirt images_volume_group $DEFAULT_VOLUME_GROUP_NAME + if isset LVM_VOLUME_CLEAR; then + iniset $NOVA_CONF libvirt volume_clear "$LVM_VOLUME_CLEAR" + fi fi } @@ -83,6 +104,24 @@ function install_nova_hypervisor { yum_install libcgroup-tools fi fi + + if [[ "$ENABLE_FILE_INJECTION" == "True" ]] ; then + if is_ubuntu; then + install_package python3-guestfs + # NOTE(andreaf) Ubuntu kernel can only be read by root, which breaks libguestfs: + # https://bugs.launchpad.net/ubuntu/+source/linux/+bug/759725) + INSTALLED_KERNELS="$(ls /boot/vmlinuz-*)" + for kernel in $INSTALLED_KERNELS; do + STAT_OVERRIDE="root root 644 ${kernel}" + # unstack won't remove the statoverride, so make this idempotent + if [[ ! $(dpkg-statoverride --list | grep "$STAT_OVERRIDE") ]]; then + sudo dpkg-statoverride --add --update $STAT_OVERRIDE + fi + done + elif is_fedora; then + install_package python3-libguestfs + fi + fi } # start_nova_hypervisor - Start any required external services @@ -99,7 +138,7 @@ function stop_nova_hypervisor { # Restore xtrace -$MY_XTRACE +$_XTRACE_NOVA_LIBVIRT # Local variables: # mode: shell-script diff --git a/lib/nova_plugins/hypervisor-openvz b/lib/nova_plugins/hypervisor-openvz index a1636adf9c..57dc45c1c5 100644 --- a/lib/nova_plugins/hypervisor-openvz +++ b/lib/nova_plugins/hypervisor-openvz @@ -1,3 +1,5 @@ +#!/bin/bash +# # lib/nova_plugins/hypervisor-openvz # Configure the openvz hypervisor @@ -15,7 +17,7 @@ # cleanup_nova_hypervisor - remove transient data and cache # Save trace setting -MY_XTRACE=$(set +o | grep xtrace) +_XTRACE_OPENVZ=$(set +o | grep xtrace) set +o xtrace @@ -36,8 +38,6 @@ function cleanup_nova_hypervisor { function configure_nova_hypervisor { iniset $NOVA_CONF DEFAULT compute_driver "openvz.OpenVzDriver" iniset $NOVA_CONF DEFAULT connection_type "openvz" - LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} - iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER" } # install_nova_hypervisor() - Install external components @@ -60,7 +60,7 @@ function stop_nova_hypervisor { # Restore xtrace -$MY_XTRACE +$_XTRACE_OPENVZ # Local variables: # mode: shell-script diff --git a/lib/nova_plugins/hypervisor-vsphere b/lib/nova_plugins/hypervisor-vsphere index 9933a3c712..7c08bc945b 100644 --- a/lib/nova_plugins/hypervisor-vsphere +++ b/lib/nova_plugins/hypervisor-vsphere @@ -1,3 +1,5 @@ +#!/bin/bash +# # lib/nova_plugins/hypervisor-vsphere # Configure the vSphere hypervisor @@ -15,7 +17,7 @@ # cleanup_nova_hypervisor - remove transient data and cache # Save trace setting -MY_XTRACE=$(set +o | grep xtrace) +_XTRACE_NOVA_VSPHERE=$(set +o | grep xtrace) set +o xtrace @@ -40,9 +42,6 @@ function configure_nova_hypervisor { iniset $NOVA_CONF vmware host_username "$VMWAREAPI_USER" iniset $NOVA_CONF vmware host_password "$VMWAREAPI_PASSWORD" iniset_multiline $NOVA_CONF vmware cluster_name "$VMWAREAPI_CLUSTER" - if is_service_enabled neutron; then - iniset $NOVA_CONF vmware integration_bridge $OVS_BRIDGE - fi } # install_nova_hypervisor() - Install external components @@ -65,7 +64,7 @@ function stop_nova_hypervisor { # Restore xtrace -$MY_XTRACE +$_XTRACE_NOVA_VSPHERE # Local variables: # mode: shell-script diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver deleted file mode 100644 index c37969b9e8..0000000000 --- a/lib/nova_plugins/hypervisor-xenserver +++ /dev/null @@ -1,117 +0,0 @@ -# lib/nova_plugins/hypervisor-xenserver -# Configure the XenServer hypervisor - -# Enable with: -# VIRT_DRIVER=xenserver - -# Dependencies: -# ``functions`` file -# ``nova`` configuration - -# install_nova_hypervisor - install any external requirements -# configure_nova_hypervisor - make configuration changes, including those to other services -# start_nova_hypervisor - start any external services -# stop_nova_hypervisor - stop any external services -# cleanup_nova_hypervisor - remove transient data and cache - -# Save trace setting -MY_XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -PUBLIC_INTERFACE_DEFAULT=eth2 -GUEST_INTERFACE_DEFAULT=eth1 -# Allow ``build_domU.sh`` to specify the flat network bridge via kernel args -FLAT_NETWORK_BRIDGE_DEFAULT=$(sed -e 's/.* flat_network_bridge=\([[:alnum:]]*\).*$/\1/g' /proc/cmdline) -if is_service_enabled neutron; then - XEN_INTEGRATION_BRIDGE=$(sed -e 's/.* xen_integration_bridge=\([[:alnum:]]*\).*$/\1/g' /proc/cmdline) -fi - -VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1} - - -# Entry Points -# ------------ - -# clean_nova_hypervisor - Clean up an installation -function cleanup_nova_hypervisor { - # This function intentionally left blank - : -} - -# configure_nova_hypervisor - Set config files, create data dirs, etc -function configure_nova_hypervisor { - if [ -z "$XENAPI_CONNECTION_URL" ]; then - die $LINENO "XENAPI_CONNECTION_URL is not specified" - fi - read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN." - iniset $NOVA_CONF DEFAULT compute_driver "xenapi.XenAPIDriver" - iniset $NOVA_CONF DEFAULT xenapi_connection_url "$XENAPI_CONNECTION_URL" - iniset $NOVA_CONF DEFAULT xenapi_connection_username "$XENAPI_USER" - iniset $NOVA_CONF DEFAULT xenapi_connection_password "$XENAPI_PASSWORD" - iniset $NOVA_CONF DEFAULT flat_injected "False" - # Need to avoid crash due to new firewall support - XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"} - iniset $NOVA_CONF DEFAULT firewall_driver "$XEN_FIREWALL_DRIVER" - - local dom0_ip - dom0_ip=$(echo "$XENAPI_CONNECTION_URL" | cut -d "/" -f 3-) - - local ssh_dom0 - ssh_dom0="sudo -u $DOMZERO_USER ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@$dom0_ip" - - # Find where the plugins should go in dom0 - xen_functions=`cat $TOP_DIR/tools/xen/functions` - PLUGIN_DIR=`$ssh_dom0 "$xen_functions; set -eux; xapi_plugin_location"` - - # install nova plugins to dom0 - tar -czf - -C $NOVA_DIR/plugins/xenserver/xenapi/etc/xapi.d/plugins/ ./ | - $ssh_dom0 "tar -xzf - -C $PLUGIN_DIR && chmod a+x $PLUGIN_DIR/*" - - # install console logrotate script - tar -czf - -C $NOVA_DIR/tools/xenserver/ rotate_xen_guest_logs.sh | - $ssh_dom0 'tar -xzf - -C /root/ && chmod +x /root/rotate_xen_guest_logs.sh && mkdir -p /var/log/xen/guest' - - # Create a cron job that will rotate guest logs - $ssh_dom0 crontab - << CRONTAB -* * * * * /root/rotate_xen_guest_logs.sh -CRONTAB - - # Create directories for kernels and images - { - echo "set -eux" - cat $TOP_DIR/tools/xen/functions - echo "create_directory_for_images" - echo "create_directory_for_kernels" - } | $ssh_dom0 - -} - -# install_nova_hypervisor() - Install external components -function install_nova_hypervisor { - # This function intentionally left blank - : -} - -# start_nova_hypervisor - Start any required external services -function start_nova_hypervisor { - # This function intentionally left blank - : -} - -# stop_nova_hypervisor - Stop any external services -function stop_nova_hypervisor { - # This function intentionally left blank - : -} - - -# Restore xtrace -$MY_XTRACE - -# Local variables: -# mode: shell-script -# End: diff --git a/lib/opendaylight b/lib/opendaylight deleted file mode 100644 index be3db6e685..0000000000 --- a/lib/opendaylight +++ /dev/null @@ -1,181 +0,0 @@ -# lib/opendaylight -# Functions to control the configuration and operation of the opendaylight service - -# Dependencies: -# -# - ``functions`` file -# # ``DEST`` must be defined -# # ``STACK_USER`` must be defined - -# ``stack.sh`` calls the entry points in this order: -# -# - is_opendaylight_enabled -# - is_opendaylight-compute_enabled -# - install_opendaylight -# - install_opendaylight-compute -# - configure_opendaylight -# - init_opendaylight -# - start_opendaylight -# - stop_opendaylight-compute -# - stop_opendaylight -# - cleanup_opendaylight - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# For OVS_BRIDGE and PUBLIC_BRIDGE -source $TOP_DIR/lib/neutron_plugins/ovs_base - -# Defaults -# -------- - -# The IP address of ODL. Set this in local.conf. -# ODL_MGR_IP= -ODL_MGR_IP=${ODL_MGR_IP:-$SERVICE_HOST} - -# The ODL endpoint URL -ODL_ENDPOINT=${ODL_ENDPOINT:-http://${ODL_MGR_IP}:8080/controller/nb/v2/neutron} - -# The ODL username -ODL_USERNAME=${ODL_USERNAME:-admin} - -# The ODL password -ODL_PASSWORD=${ODL_PASSWORD:-admin} - -# -ODL_DIR=$DEST/opendaylight - -# The OpenDaylight Package, currently using 'Hydrogen' release -ODL_PKG=${ODL_PKG:-distributions-virtualization-0.1.1-osgipackage.zip} - -# The OpenDaylight URL -ODL_URL=${ODL_URL:-https://nexus.opendaylight.org/content/repositories/opendaylight.release/org/opendaylight/integration/distributions-virtualization/0.1.1} - -# Default arguments for OpenDaylight. This is typically used to set -# Java memory options. -# ODL_ARGS=Xmx1024m -XX:MaxPermSize=512m -ODL_ARGS=${ODL_ARGS:-"-XX:MaxPermSize=384m"} - -# How long to pause after ODL starts to let it complete booting -ODL_BOOT_WAIT=${ODL_BOOT_WAIT:-60} - -# Set up default directories - - -# Entry Points -# ------------ - -# Test if OpenDaylight is enabled -# is_opendaylight_enabled -function is_opendaylight_enabled { - [[ ,${ENABLED_SERVICES} =~ ,"odl-" ]] && return 0 - return 1 -} - -# cleanup_opendaylight() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_opendaylight { - : -} - -# configure_opendaylight() - Set config files, create data dirs, etc -function configure_opendaylight { - # Remove simple forwarder - rm -f $ODL_DIR/opendaylight/plugins/org.opendaylight.controller.samples.simpleforwarding* - - # Configure OpenFlow 1.3 - echo "ovsdb.of.version=1.3" >> $ODL_DIR/opendaylight/configuration/config.ini -} - -function configure_ml2_odl { - populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_odl url=$ODL_ENDPOINT - populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_odl username=$ODL_USERNAME - populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_odl password=$ODL_PASSWORD -} - -# init_opendaylight() - Initialize databases, etc. -function init_opendaylight { - # clean up from previous (possibly aborted) runs - # create required data files - : -} - -# install_opendaylight() - Collect source and prepare -function install_opendaylight { - local _pwd=$(pwd) - - if is_ubuntu; then - install_package maven openjdk-7-jre openjdk-7-jdk - else - yum_install maven java-1.7.0-openjdk - fi - - # Download OpenDaylight - mkdir -p $ODL_DIR - cd $ODL_DIR - wget -N $ODL_URL/$ODL_PKG - unzip -u $ODL_PKG -} - -# install_opendaylight-compute - Make sure OVS is install -function install_opendaylight-compute { - local kernel_version - # Install deps - # FIXME add to ``files/apts/neutron``, but don't install if not needed! - if is_ubuntu; then - kernel_version=`cat /proc/version | cut -d " " -f3` - install_package make fakeroot dkms openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version - elif is_fedora; then - install_package openvswitch - # Ensure that the service is started - restart_service openvswitch - elif is_suse; then - install_package openvswitch-switch - restart_service openvswitch-switch - fi -} - -# start_opendaylight() - Start running processes, including screen -function start_opendaylight { - if is_ubuntu; then - JHOME=/usr/lib/jvm/java-1.7.0-openjdk-amd64 - else - JHOME=/usr/lib/jvm/java-1.7.0-openjdk - fi - - # The flags to ODL have the following meaning: - # -of13: runs ODL using OpenFlow 1.3 protocol support. - # -virt ovsdb: Runs ODL in "virtualization" mode with OVSDB support - screen_it odl-server "cd $ODL_DIR/opendaylight && JAVA_HOME=$JHOME ./run.sh $ODL_ARGS -of13 -virt ovsdb" - - # Sleep a bit to let OpenDaylight finish starting up - sleep $ODL_BOOT_WAIT -} - -# stop_opendaylight() - Stop running processes (non-screen) -function stop_opendaylight { - screen_stop odl-server -} - -# stop_opendaylight-compute() - Remove OVS bridges -function stop_opendaylight-compute { - # remove all OVS ports that look like Neutron created ports - for port in $(sudo ovs-vsctl list port | grep -o -e tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do - sudo ovs-vsctl del-port ${port} - done - - # remove all OVS bridges created by Neutron - for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${OVS_BRIDGE} -e ${PUBLIC_BRIDGE}); do - sudo ovs-vsctl del-br ${bridge} - done -} - -# Restore xtrace -$XTRACE - -# Tell emacs to use shell-script-mode -## Local variables: -## mode: shell-script -## End: diff --git a/lib/os-vif b/lib/os-vif new file mode 100644 index 0000000000..7c8bee3744 --- /dev/null +++ b/lib/os-vif @@ -0,0 +1,22 @@ +#!/bin/bash + +function is_ml2_ovs { + if [[ "${Q_AGENT}" == "openvswitch" ]]; then + echo "True" + fi + echo "False" +} + +# This should be true for any ml2/ovs job but should be set to false for +# all other ovs based jobs e.g. ml2/ovn +OS_VIF_OVS_ISOLATE_VIF=${OS_VIF_OVS_ISOLATE_VIF:=$(is_ml2_ovs)} +OS_VIF_OVS_ISOLATE_VIF=$(trueorfalse False OS_VIF_OVS_ISOLATE_VIF) + +function configure_os_vif { + if [[ -e ${NOVA_CONF} ]]; then + iniset ${NOVA_CONF} os_vif_ovs isolate_vif ${OS_VIF_OVS_ISOLATE_VIF} + fi + if [[ -e ${NEUTRON_CONF} ]]; then + iniset ${NEUTRON_CONF} os_vif_ovs isolate_vif ${OS_VIF_OVS_ISOLATE_VIF} + fi +} diff --git a/lib/oslo b/lib/oslo deleted file mode 100644 index 3cf72186a1..0000000000 --- a/lib/oslo +++ /dev/null @@ -1,83 +0,0 @@ -# lib/oslo -# -# Functions to install oslo libraries from git -# -# We need this to handle the fact that projects would like to use -# pre-released versions of oslo libraries. - -# Dependencies: -# -# - ``functions`` file - -# ``stack.sh`` calls the entry points in this order: -# -# - install_oslo - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- -CLIFF_DIR=$DEST/cliff -OSLOCFG_DIR=$DEST/oslo.config -OSLOMSG_DIR=$DEST/oslo.messaging -OSLORWRAP_DIR=$DEST/oslo.rootwrap -OSLOVMWARE_DIR=$DEST/oslo.vmware -PYCADF_DIR=$DEST/pycadf -STEVEDORE_DIR=$DEST/stevedore -TASKFLOW_DIR=$DEST/taskflow - -# Entry Points -# ------------ - -# install_oslo() - Collect source and prepare -function install_oslo { - # TODO(sdague): remove this once we get to Icehouse, this just makes - # for a smoother transition of existing users. - cleanup_oslo - - git_clone $CLIFF_REPO $CLIFF_DIR $CLIFF_BRANCH - setup_install $CLIFF_DIR - - git_clone $OSLOCFG_REPO $OSLOCFG_DIR $OSLOCFG_BRANCH - setup_install $OSLOCFG_DIR - - git_clone $OSLOMSG_REPO $OSLOMSG_DIR $OSLOMSG_BRANCH - setup_install $OSLOMSG_DIR - - git_clone $OSLORWRAP_REPO $OSLORWRAP_DIR $OSLORWRAP_BRANCH - setup_install $OSLORWRAP_DIR - - git_clone $OSLOVMWARE_REPO $OSLOVMWARE_DIR $OSLOVMWARE_BRANCH - setup_install $OSLOVMWARE_DIR - - git_clone $PYCADF_REPO $PYCADF_DIR $PYCADF_BRANCH - setup_install $PYCADF_DIR - - git_clone $STEVEDORE_REPO $STEVEDORE_DIR $STEVEDORE_BRANCH - setup_install $STEVEDORE_DIR - - git_clone $TASKFLOW_REPO $TASKFLOW_DIR $TASKFLOW_BRANCH - setup_install $TASKFLOW_DIR -} - -# cleanup_oslo() - purge possibly old versions of oslo -function cleanup_oslo { - # this means we've got an old oslo installed, lets get rid of it - if ! python -c 'import oslo.config' 2>/dev/null; then - echo "Found old oslo.config... removing to ensure consistency" - local PIP_CMD=$(get_pip_command) - pip_install oslo.config - sudo $PIP_CMD uninstall -y oslo.config - fi -} - -# Restore xtrace -$XTRACE - -# Tell emacs to use shell-script-mode -## Local variables: -## mode: shell-script -## End: diff --git a/lib/placement b/lib/placement new file mode 100644 index 0000000000..03aaa0344b --- /dev/null +++ b/lib/placement @@ -0,0 +1,151 @@ +#!/bin/bash +# +# lib/placement +# Functions to control the configuration and operation of the **Placement** service +# + +# Dependencies: +# +# - ``functions`` file +# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined +# - ``FILES`` + +# ``stack.sh`` calls the entry points in this order: +# +# - install_placement +# - cleanup_placement +# - configure_placement +# - init_placement +# - start_placement +# - stop_placement + +# Save trace setting +_XTRACE_LIB_PLACEMENT=$(set +o | grep xtrace) +set +o xtrace + +# Defaults +# -------- + +PLACEMENT_DIR=$DEST/placement +PLACEMENT_CONF_DIR=/etc/placement +PLACEMENT_CONF=$PLACEMENT_CONF_DIR/placement.conf +PLACEMENT_AUTH_STRATEGY=${PLACEMENT_AUTH_STRATEGY:-keystone} +# Placement virtual environment +if [[ ${USE_VENV} = True ]]; then + PROJECT_VENV["placement"]=${PLACEMENT_DIR}.venv + PLACEMENT_BIN_DIR=${PROJECT_VENV["placement"]}/bin +else + PLACEMENT_BIN_DIR=$(get_python_exec_prefix) +fi +PLACEMENT_UWSGI=placement.wsgi.api:application +PLACEMENT_UWSGI_CONF=$PLACEMENT_CONF_DIR/placement-uwsgi.ini + +if is_service_enabled tls-proxy; then + PLACEMENT_SERVICE_PROTOCOL="https" +fi + +# Public facing bits +PLACEMENT_SERVICE_PROTOCOL=${PLACEMENT_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} +PLACEMENT_SERVICE_HOST=${PLACEMENT_SERVICE_HOST:-$SERVICE_HOST} + +# Flag to set the oslo_policy.enforce_scope and oslo_policy.enforce_new_defaults. +# This is used to switch the Placement API policies scope and new defaults. +# By Default, these flag are False. +# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope +PLACEMENT_ENFORCE_SCOPE=$(trueorfalse False PLACEMENT_ENFORCE_SCOPE) + +# Functions +# --------- + +# Test if any placement services are enabled +# is_placement_enabled +function is_placement_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"placement-api" ]] && return 0 + return 1 +} + +# cleanup_placement() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_placement { + sudo rm -f $(apache_site_config_for placement-api) + remove_uwsgi_config "$PLACEMENT_UWSGI_CONF" "placement-api" +} + +# create_placement_conf() - Write config +function create_placement_conf { + rm -f $PLACEMENT_CONF + iniset $PLACEMENT_CONF placement_database connection `database_connection_url placement` + iniset $PLACEMENT_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" + iniset $PLACEMENT_CONF api auth_strategy $PLACEMENT_AUTH_STRATEGY + configure_keystone_authtoken_middleware $PLACEMENT_CONF placement + setup_logging $PLACEMENT_CONF +} + +# configure_placement() - Set config files, create data dirs, etc +function configure_placement { + sudo install -d -o $STACK_USER $PLACEMENT_CONF_DIR + create_placement_conf + + write_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI" "/placement" "" "placement-api" + if [[ "$PLACEMENT_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]]; then + iniset $PLACEMENT_CONF oslo_policy enforce_new_defaults True + iniset $PLACEMENT_CONF oslo_policy enforce_scope True + else + iniset $PLACEMENT_CONF oslo_policy enforce_new_defaults False + iniset $PLACEMENT_CONF oslo_policy enforce_scope False + fi +} + +# create_placement_accounts() - Set up required placement accounts +# and service and endpoints. +function create_placement_accounts { + create_service_user "placement" "admin" + local placement_api_url="$PLACEMENT_SERVICE_PROTOCOL://$PLACEMENT_SERVICE_HOST/placement" + get_or_create_service "placement" "placement" "Placement Service" + get_or_create_endpoint \ + "placement" \ + "$REGION_NAME" \ + "$placement_api_url" +} + +# init_placement() - Create service user and endpoints +function init_placement { + recreate_database placement + $PLACEMENT_BIN_DIR/placement-manage db sync + create_placement_accounts +} + +# install_placement() - Collect source and prepare +function install_placement { + # Install the openstackclient placement client plugin for CLI + pip_install_gr osc-placement + git_clone $PLACEMENT_REPO $PLACEMENT_DIR $PLACEMENT_BRANCH + setup_develop $PLACEMENT_DIR +} + +# start_placement_api() - Start the API processes ahead of other things +function start_placement_api { + run_process "placement-api" "$(which uwsgi) --procname-prefix placement --ini $PLACEMENT_UWSGI_CONF" + + echo "Waiting for placement-api to start..." + if ! wait_for_service $SERVICE_TIMEOUT $PLACEMENT_SERVICE_PROTOCOL://$PLACEMENT_SERVICE_HOST/placement; then + die $LINENO "placement-api did not start" + fi +} + +function start_placement { + start_placement_api +} + +# stop_placement() - Disable the api service and stop it. +function stop_placement { + stop_process "placement-api" +} + +# Restore xtrace +$_XTRACE_LIB_PLACEMENT + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/rpc_backend b/lib/rpc_backend index e922daa078..bbb41499be 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -1,92 +1,49 @@ +#!/bin/bash +# # lib/rpc_backend -# Interface for interactig with different rpc backend -# rpc backend settings +# Interface for installing RabbitMQ on the system # Dependencies: # # - ``functions`` file -# - ``RABBIT_{HOST|PASSWORD}`` must be defined when RabbitMQ is used +# - ``RABBIT_{HOST|PASSWORD|USERID}`` must be defined when RabbitMQ is used # ``stack.sh`` calls the entry points in this order: # # - check_rpc_backend # - install_rpc_backend # - restart_rpc_backend -# - iniset_rpc_backend +# - iniset_rpc_backend (stable interface) +# +# Note: if implementing an out of tree plugin for an RPC backend, you +# should install all services through normal plugin methods, then +# redefine ``iniset_rpc_backend`` in your code. That's the one portion +# of this file which is a standard interface. # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_RPC_BACKEND=$(set +o | grep xtrace) set +o xtrace +RABBIT_USERID=${RABBIT_USERID:-stackrabbit} +if is_service_enabled rabbit; then + RABBIT_HOST=${RABBIT_HOST:-$SERVICE_HOST} +fi # Functions # --------- - -# Make sure we only have one rpc backend enabled. -# Also check the specified rpc backend is available on your platform. -function check_rpc_backend { - local rpc_needed=1 - # We rely on the fact that filenames in lib/* match the service names - # that can be passed as arguments to is_service_enabled. - # We check for a call to iniset_rpc_backend in these files, meaning - # the service needs a backend. - rpc_candidates=$(grep -rl iniset_rpc_backend $TOP_DIR/lib/ | awk -F/ '{print $NF}') - for c in ${rpc_candidates}; do - if is_service_enabled $c; then - rpc_needed=0 - break - fi - done - local rpc_backend_cnt=0 - for svc in qpid zeromq rabbit; do - is_service_enabled $svc && - ((rpc_backend_cnt++)) - done - if [ "$rpc_backend_cnt" -gt 1 ]; then - echo "ERROR: only one rpc backend may be enabled," - echo " set only one of 'rabbit', 'qpid', 'zeromq'" - echo " via ENABLED_SERVICES." - elif [ "$rpc_backend_cnt" == 0 ] && [ "$rpc_needed" == 0 ]; then - echo "ERROR: at least one rpc backend must be enabled," - echo " set one of 'rabbit', 'qpid', 'zeromq'" - echo " via ENABLED_SERVICES." - fi - - if is_service_enabled qpid && ! qpid_is_supported; then - die $LINENO "Qpid support is not available for this version of your distribution." - fi -} - # clean up after rpc backend - eradicate all traces so changing backends # produces a clean switch function cleanup_rpc_backend { if is_service_enabled rabbit; then # Obliterate rabbitmq-server uninstall_package rabbitmq-server - sudo killall epmd || sudo killall -9 epmd + # in case it's not actually running, /bin/true at the end + sudo killall epmd || sudo killall -9 epmd || /bin/true if is_ubuntu; then # And the Erlang runtime too apt_get purge -y erlang* fi - elif is_service_enabled qpid; then - if is_fedora; then - uninstall_package qpid-cpp-server - elif is_ubuntu; then - uninstall_package qpidd - else - exit_distro_not_supported "qpid installation" - fi - elif is_service_enabled zeromq; then - if is_fedora; then - uninstall_package zeromq python-zmq redis - elif is_ubuntu; then - uninstall_package libzmq1 python-zmq redis-server - elif is_suse; then - uninstall_package libzmq1 python-pyzmq redis - else - exit_distro_not_supported "zeromq installation" - fi fi } @@ -94,43 +51,15 @@ function cleanup_rpc_backend { function install_rpc_backend { if is_service_enabled rabbit; then # Install rabbitmq-server - # the temp file is necessary due to LP: #878600 - tfile=$(mktemp) - install_package rabbitmq-server > "$tfile" 2>&1 - cat "$tfile" - rm -f "$tfile" - elif is_service_enabled qpid; then + install_package rabbitmq-server if is_fedora; then - install_package qpid-cpp-server - if [[ $DISTRO =~ (rhel6) ]]; then - # RHEL6 leaves "auth=yes" in /etc/qpidd.conf, it needs to - # be no or you get GSS authentication errors as it - # attempts to default to this. - sudo sed -i.bak 's/^auth=yes$/auth=no/' /etc/qpidd.conf - fi - elif is_ubuntu; then - install_package qpidd - sudo sed -i '/PLAIN/!s/mech_list: /mech_list: PLAIN /' /etc/sasl2/qpidd.conf - sudo chmod o+r /etc/qpid/qpidd.sasldb - else - exit_distro_not_supported "qpid installation" + # NOTE(jangutter): If rabbitmq is not running (as in a fresh + # install) then rabbit_setuser triggers epmd@0.0.0.0.socket with + # socket activation. This fails the first time and does not get + # cleared. It is benign, but the workaround is to start rabbitmq a + # bit earlier for RPM based distros. + sudo systemctl --now enable rabbitmq-server fi - elif is_service_enabled zeromq; then - # NOTE(ewindisch): Redis is not strictly necessary - # but there is a matchmaker driver that works - # really well & out of the box for multi-node. - if is_fedora; then - install_package zeromq python-zmq redis - elif is_ubuntu; then - install_package libzmq1 python-zmq redis-server - elif is_suse; then - install_package libzmq1 python-pyzmq redis - else - exit_distro_not_supported "zeromq installation" - fi - # Necessary directory for socket location. - sudo mkdir -p /var/run/openstack - sudo chown $STACK_USER /var/run/openstack fi } @@ -141,70 +70,109 @@ function restart_rpc_backend { echo_summary "Starting RabbitMQ" # NOTE(bnemec): Retry initial rabbitmq configuration to deal with # the fact that sometimes it fails to start properly. - # Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1059028 - for i in `seq 10`; do - if is_fedora || is_suse; then - # service is not started by default + # Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1144100 + # NOTE(tonyb): Extend the original retry logic to only restart rabbitmq + # every second time around the loop. + # See: https://bugs.launchpad.net/devstack/+bug/1449056 for details on + # why this is needed. This can bee seen on vivid and Debian unstable + # (May 2015) + # TODO(tonyb): Remove this when Debian and Ubuntu have a fixed systemd + # service file. + local i + for i in `seq 20`; do + local rc=0 + + [[ $i -eq "20" ]] && die $LINENO "Failed to set rabbitmq password" + + if [[ $(( i % 2 )) == "0" ]] ; then restart_service rabbitmq-server fi + + rabbit_setuser "$RABBIT_USERID" "$RABBIT_PASSWORD" || rc=$? + if [ $rc -ne 0 ]; then + continue + fi + # change the rabbit password since the default is "guest" - sudo rabbitmqctl change_password guest $RABBIT_PASSWORD && break - [[ $i -eq "10" ]] && die $LINENO "Failed to set rabbitmq password" - done - if is_service_enabled n-cell; then - # Add partitioned access for the child cell - if [ -z `sudo rabbitmqctl list_vhosts | grep child_cell` ]; then - sudo rabbitmqctl add_vhost child_cell - sudo rabbitmqctl set_permissions -p child_cell guest ".*" ".*" ".*" + sudo rabbitmqctl change_password \ + $RABBIT_USERID $RABBIT_PASSWORD || rc=$? + if [ $rc -ne 0 ]; then + continue; fi + + break + done + # NOTE(frickler): Remove the default guest user + sudo rabbitmqctl delete_user guest || true + fi +} + +# adds a vhost to the rpc backend +function rpc_backend_add_vhost { + local vhost="$1" + if is_service_enabled rabbit; then + if [ -z `sudo rabbitmqctl list_vhosts | grep $vhost` ]; then + sudo rabbitmqctl add_vhost $vhost + sudo rabbitmqctl set_permissions -p $vhost $RABBIT_USERID ".*" ".*" ".*" fi - elif is_service_enabled qpid; then - echo_summary "Starting qpid" - restart_service qpidd + else + echo 'RPC backend does not support vhosts' + return 1 fi } -# iniset cofiguration +# Returns the address of the RPC backend in URL format. +function get_transport_url { + local virtual_host=$1 + if is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then + echo "rabbit://$RABBIT_USERID:$RABBIT_PASSWORD@$RABBIT_HOST:5672/$virtual_host" + fi +} + +# Returns the address of the Notification backend in URL format. This +# should be used to set the transport_url option in the +# oslo_messaging_notifications group. +function get_notification_url { + local virtual_host=$1 + if is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then + echo "rabbit://$RABBIT_USERID:$RABBIT_PASSWORD@$RABBIT_HOST:5672/$virtual_host" + fi +} + +# iniset configuration function iniset_rpc_backend { local package=$1 local file=$2 - local section=$3 - if is_service_enabled zeromq; then - iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_zmq - iniset $file $section rpc_zmq_matchmaker \ - ${package}.openstack.common.rpc.matchmaker_redis.MatchMakerRedis - # Set MATCHMAKER_REDIS_HOST if running multi-node. - MATCHMAKER_REDIS_HOST=${MATCHMAKER_REDIS_HOST:-127.0.0.1} - iniset $file matchmaker_redis host $MATCHMAKER_REDIS_HOST - elif is_service_enabled qpid || [ -n "$QPID_HOST" ]; then - iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_qpid - iniset $file $section qpid_hostname ${QPID_HOST:-$SERVICE_HOST} - if is_ubuntu; then - QPID_PASSWORD=`sudo strings /etc/qpid/qpidd.sasldb | grep -B1 admin | head -1` - iniset $file $section qpid_password $QPID_PASSWORD - iniset $file $section qpid_username admin + local section=${3:-DEFAULT} + local virtual_host=$4 + if is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then + iniset $file $section transport_url $(get_transport_url "$virtual_host") + if [ -n "$RABBIT_HEARTBEAT_TIMEOUT_THRESHOLD" ]; then + iniset $file oslo_messaging_rabbit heartbeat_timeout_threshold $RABBIT_HEARTBEAT_TIMEOUT_THRESHOLD + fi + if [ -n "$RABBIT_HEARTBEAT_RATE" ]; then + iniset $file oslo_messaging_rabbit heartbeat_rate $RABBIT_HEARTBEAT_RATE fi - elif is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then - iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_kombu - iniset $file $section rabbit_hosts $RABBIT_HOST - iniset $file $section rabbit_password $RABBIT_PASSWORD fi } -# Check if qpid can be used on the current distro. -# qpid_is_supported -function qpid_is_supported { - if [[ -z "$DISTRO" ]]; then - GetDistro +function rabbit_setuser { + local user="$1" pass="$2" found="" out="" + out=$(sudo rabbitmqctl list_users) || + { echo "failed to list users" 1>&2; return 1; } + found=$(echo "$out" | awk '$1 == user { print $1 }' "user=$user") + if [ "$found" = "$user" ]; then + sudo rabbitmqctl change_password "$user" "$pass" || + { echo "failed changing pass for '$user'" 1>&2; return 1; } + else + sudo rabbitmqctl add_user "$user" "$pass" || + { echo "failed changing pass for $user"; return 1; } fi - - # Qpid is not in openSUSE - ( ! is_suse ) + sudo rabbitmqctl set_permissions "$user" ".*" ".*" ".*" } - # Restore xtrace -$XTRACE +$_XTRACE_RPC_BACKEND # Tell emacs to use shell-script-mode ## Local variables: diff --git a/lib/sahara b/lib/sahara deleted file mode 100644 index d56cf1b444..0000000000 --- a/lib/sahara +++ /dev/null @@ -1,184 +0,0 @@ -# lib/sahara - -# Dependencies: -# ``functions`` file -# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined - -# ``stack.sh`` calls the entry points in this order: -# -# install_sahara -# configure_sahara -# start_sahara -# stop_sahara -# cleanup_sahara - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# Set up default repos -SAHARA_REPO=${SAHARA_REPO:-${GIT_BASE}/openstack/sahara.git} -SAHARA_BRANCH=${SAHARA_BRANCH:-master} - -# Set up default directories -SAHARA_DIR=$DEST/sahara -SAHARA_CONF_DIR=${SAHARA_CONF_DIR:-/etc/sahara} -SAHARA_CONF_FILE=${SAHARA_CONF_DIR}/sahara.conf - -SAHARA_SERVICE_HOST=${SAHARA_SERVICE_HOST:-$SERVICE_HOST} -SAHARA_SERVICE_PORT=${SAHARA_SERVICE_PORT:-8386} -SAHARA_SERVICE_PROTOCOL=${SAHARA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} - -SAHARA_AUTH_CACHE_DIR=${SAHARA_AUTH_CACHE_DIR:-/var/cache/sahara} - -SAHARA_ENABLED_PLUGINS=${SAHARA_ENABLED_PLUGINS:-vanilla,hdp,fake} - -# Support entry points installation of console scripts -if [[ -d $SAHARA_DIR/bin ]]; then - SAHARA_BIN_DIR=$SAHARA_DIR/bin -else - SAHARA_BIN_DIR=$(get_python_exec_prefix) -fi - -# Tell Tempest this project is present -TEMPEST_SERVICES+=,sahara - -# Functions -# --------- - -# create_sahara_accounts() - Set up common required sahara accounts -# -# Tenant User Roles -# ------------------------------ -# service sahara admin -function create_sahara_accounts { - - SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") - - SAHARA_USER=$(openstack user create \ - sahara \ - --password "$SERVICE_PASSWORD" \ - --project $SERVICE_TENANT \ - --email sahara@example.com \ - | grep " id " | get_field 2) - openstack role add \ - $ADMIN_ROLE \ - --project $SERVICE_TENANT \ - --user $SAHARA_USER - - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - SAHARA_SERVICE=$(openstack service create \ - sahara \ - --type=data_processing \ - --description="Sahara Data Processing" \ - | grep " id " | get_field 2) - openstack endpoint create \ - $SAHARA_SERVICE \ - --region RegionOne \ - --publicurl "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \ - --adminurl "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \ - --internalurl "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" - fi -} - -# cleanup_sahara() - Remove residual data files, anything left over from -# previous runs that would need to clean up. -function cleanup_sahara { - - # Cleanup auth cache dir - sudo rm -rf $SAHARA_AUTH_CACHE_DIR -} - -# configure_sahara() - Set config files, create data dirs, etc -function configure_sahara { - - if [[ ! -d $SAHARA_CONF_DIR ]]; then - sudo mkdir -p $SAHARA_CONF_DIR - fi - sudo chown $STACK_USER $SAHARA_CONF_DIR - - # Copy over sahara configuration file and configure common parameters. - cp $SAHARA_DIR/etc/sahara/sahara.conf.sample $SAHARA_CONF_FILE - - # Create auth cache dir - sudo mkdir -p $SAHARA_AUTH_CACHE_DIR - sudo chown $STACK_USER $SAHARA_AUTH_CACHE_DIR - rm -rf $SAHARA_AUTH_CACHE_DIR/* - - # Set obsolete keystone auth configs for backward compatibility - iniset $SAHARA_CONF_FILE DEFAULT os_auth_host $KEYSTONE_AUTH_HOST - iniset $SAHARA_CONF_FILE DEFAULT os_auth_port $KEYSTONE_AUTH_PORT - iniset $SAHARA_CONF_FILE DEFAULT os_auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset $SAHARA_CONF_FILE DEFAULT os_admin_password $SERVICE_PASSWORD - iniset $SAHARA_CONF_FILE DEFAULT os_admin_username sahara - iniset $SAHARA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME - - # Set actual keystone auth configs - iniset $SAHARA_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ - iniset $SAHARA_CONF_FILE keystone_authtoken auth_host $KEYSTONE_AUTH_HOST - iniset $SAHARA_CONF_FILE keystone_authtoken auth_port $KEYSTONE_AUTH_PORT - iniset $SAHARA_CONF_FILE keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset $SAHARA_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $SAHARA_CONF_FILE keystone_authtoken admin_user sahara - iniset $SAHARA_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD - iniset $SAHARA_CONF_FILE keystone_authtoken signing_dir $SAHARA_AUTH_CACHE_DIR - iniset $SAHARA_CONF_FILE keystone_authtoken cafile $KEYSTONE_SSL_CA - - iniset $SAHARA_CONF_FILE DEFAULT verbose True - iniset $SAHARA_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - - iniset $SAHARA_CONF_FILE DEFAULT plugins $SAHARA_ENABLED_PLUGINS - - iniset $SAHARA_CONF_FILE database connection `database_connection_url sahara` - - if is_service_enabled neutron; then - iniset $SAHARA_CONF_FILE DEFAULT use_neutron true - iniset $SAHARA_CONF_FILE DEFAULT use_floating_ips true - fi - - if is_service_enabled heat; then - iniset $SAHARA_CONF_FILE DEFAULT infrastructure_engine heat - else - iniset $SAHARA_CONF_FILE DEFAULT infrastructure_engine direct - fi - - iniset $SAHARA_CONF_FILE DEFAULT use_syslog $SYSLOG - - # Format logging - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then - setup_colorized_logging $SAHARA_CONF_FILE DEFAULT - fi - - recreate_database sahara utf8 - $SAHARA_BIN_DIR/sahara-db-manage --config-file $SAHARA_CONF_FILE upgrade head -} - -# install_sahara() - Collect source and prepare -function install_sahara { - git_clone $SAHARA_REPO $SAHARA_DIR $SAHARA_BRANCH - setup_develop $SAHARA_DIR -} - -# start_sahara() - Start running processes, including screen -function start_sahara { - screen_it sahara "cd $SAHARA_DIR && $SAHARA_BIN_DIR/sahara-all --config-file $SAHARA_CONF_FILE" -} - -# stop_sahara() - Stop running processes -function stop_sahara { - # Kill the Sahara screen windows - screen -S $SCREEN_NAME -p sahara -X kill -} - - -# Restore xtrace -$XTRACE - -# Local variables: -# mode: shell-script -# End: diff --git a/lib/sahara-dashboard b/lib/sahara-dashboard deleted file mode 100644 index a81df0f7a8..0000000000 --- a/lib/sahara-dashboard +++ /dev/null @@ -1,72 +0,0 @@ -# lib/sahara-dashboard - -# Dependencies: -# -# - ``functions`` file -# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined -# - ``SERVICE_HOST`` - -# ``stack.sh`` calls the entry points in this order: -# -# - install_sahara_dashboard -# - configure_sahara_dashboard -# - cleanup_sahara_dashboard - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - -source $TOP_DIR/lib/horizon - -# Defaults -# -------- - -# Set up default repos -SAHARA_DASHBOARD_REPO=${SAHARA_DASHBOARD_REPO:-${GIT_BASE}/openstack/sahara-dashboard.git} -SAHARA_DASHBOARD_BRANCH=${SAHARA_DASHBOARD_BRANCH:-master} - -SAHARA_PYTHONCLIENT_REPO=${SAHARA_PYTHONCLIENT_REPO:-${GIT_BASE}/openstack/python-saharaclient.git} -SAHARA_PYTHONCLIENT_BRANCH=${SAHARA_PYTHONCLIENT_BRANCH:-master} - -# Set up default directories -SAHARA_DASHBOARD_DIR=$DEST/sahara-dashboard -SAHARA_PYTHONCLIENT_DIR=$DEST/python-saharaclient - -# Functions -# --------- - -function configure_sahara_dashboard { - - echo -e "AUTO_ASSIGNMENT_ENABLED = False" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py - echo -e "HORIZON_CONFIG['dashboards'] += ('sahara',)" >> $HORIZON_DIR/openstack_dashboard/settings.py - echo -e "INSTALLED_APPS += ('saharadashboard',)" >> $HORIZON_DIR/openstack_dashboard/settings.py - - if is_service_enabled neutron; then - echo -e "SAHARA_USE_NEUTRON = True" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py - fi -} - -# install_sahara_dashboard() - Collect source and prepare -function install_sahara_dashboard { - install_python_saharaclient - git_clone $SAHARA_DASHBOARD_REPO $SAHARA_DASHBOARD_DIR $SAHARA_DASHBOARD_BRANCH - setup_develop $SAHARA_DASHBOARD_DIR -} - -function install_python_saharaclient { - git_clone $SAHARA_PYTHONCLIENT_REPO $SAHARA_PYTHONCLIENT_DIR $SAHARA_PYTHONCLIENT_BRANCH - setup_develop $SAHARA_PYTHONCLIENT_DIR -} - -# Cleanup file settings.py from Sahara -function cleanup_sahara_dashboard { - sed -i '/sahara/d' $HORIZON_DIR/openstack_dashboard/settings.py -} - -# Restore xtrace -$XTRACE - -# Local variables: -# mode: shell-script -# End: - diff --git a/lib/stack b/lib/stack new file mode 100644 index 0000000000..bada26f1c2 --- /dev/null +++ b/lib/stack @@ -0,0 +1,40 @@ +#!/bin/bash +# +# lib/stack +# +# These functions are code snippets pulled out of ``stack.sh`` for easier +# re-use by Grenade. They can assume the same environment is available +# as in the lower part of ``stack.sh``, namely a valid stackrc has been sourced +# as well as all of the ``lib/*`` files for the services have been sourced. +# +# For clarity, all functions declared here that came from ``stack.sh`` +# shall be named with the prefix ``stack_``. + + +# Functions +# --------- + +# Generic service install handles venv creation if configured for service +# stack_install_service service +function stack_install_service { + local service=$1 + if type install_${service} >/dev/null 2>&1; then + # FIXME(dhellmann): Needs to be python3-aware at some point. + if [[ ${USE_VENV} = True && -n ${PROJECT_VENV[$service]:-} ]]; then + rm -rf ${PROJECT_VENV[$service]} + source $TOP_DIR/tools/build_venv.sh ${PROJECT_VENV[$service]} ${ADDITIONAL_VENV_PACKAGES//,/ } + export PIP_VIRTUAL_ENV=${PROJECT_VENV[$service]:-} + + # Install other OpenStack prereqs that might come from source repos + install_oslo + install_keystonemiddleware + fi + install_${service} + if [[ ${USE_VENV} = True && -n ${PROJECT_VENV[$service]:-} ]]; then + unset PIP_VIRTUAL_ENV + fi + else + echo "No function declared with name 'install_${service}'." + exit 1 + fi +} diff --git a/lib/stackforge b/lib/stackforge deleted file mode 100644 index e6528afc59..0000000000 --- a/lib/stackforge +++ /dev/null @@ -1,68 +0,0 @@ -# lib/stackforge -# -# Functions to install stackforge libraries that we depend on so -# that we can try their git versions during devstack gate. -# -# This is appropriate for python libraries that release to pypi and are -# expected to be used beyond OpenStack like, but are requirements -# for core services in global-requirements. -# -# * wsme -# * pecan -# -# This is not appropriate for stackforge projects which are early stage -# OpenStack tools - -# Dependencies: -# ``functions`` file - -# ``stack.sh`` calls the entry points in this order: -# -# install_stackforge - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- -WSME_DIR=$DEST/wsme -PECAN_DIR=$DEST/pecan - -# Entry Points -# ------------ - -# install_stackforge() - Collect source and prepare -function install_stackforge { - # TODO(sdague): remove this once we get to Icehouse, this just makes - # for a smoother transition of existing users. - cleanup_stackforge - - git_clone $WSME_REPO $WSME_DIR $WSME_BRANCH - setup_package $WSME_DIR - - git_clone $PECAN_REPO $PECAN_DIR $PECAN_BRANCH - setup_package $PECAN_DIR -} - -# cleanup_stackforge() - purge possibly old versions of stackforge libraries -function cleanup_stackforge { - # this means we've got an old version installed, lets get rid of it - # otherwise python hates itself - for lib in wsme pecan; do - if ! python -c "import $lib" 2>/dev/null; then - echo "Found old $lib... removing to ensure consistency" - local PIP_CMD=$(get_pip_command) - pip_install $lib - sudo $PIP_CMD uninstall -y $lib - fi - done -} - -# Restore xtrace -$XTRACE - -# Local variables: -# mode: shell-script -# End: diff --git a/lib/swift b/lib/swift index 1e24c2cbb8..862927437d 100644 --- a/lib/swift +++ b/lib/swift @@ -1,3 +1,5 @@ +#!/bin/bash +# # lib/swift # Functions to control the configuration and operation of the **Swift** service @@ -5,7 +7,7 @@ # # - ``functions`` file # - ``apache`` file -# - ``DEST``, ``SCREEN_NAME``, `SWIFT_HASH` must be defined +# - ``DEST``, `SWIFT_HASH` must be defined # - ``STACK_USER`` must be defined # - ``SWIFT_DATA_DIR`` or ``DATA_DIR`` must be defined # - ``lib/keystone`` file @@ -22,19 +24,36 @@ # - _cleanup_swift_apache_wsgi # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_LIB_SWIFT=$(set +o | grep xtrace) set +o xtrace # Defaults # -------- +if is_service_enabled tls-proxy; then + SWIFT_SERVICE_PROTOCOL="https" +fi + # Set up default directories +GITDIR["python-swiftclient"]=$DEST/python-swiftclient SWIFT_DIR=$DEST/swift -SWIFTCLIENT_DIR=$DEST/python-swiftclient -SWIFT_AUTH_CACHE_DIR=${SWIFT_AUTH_CACHE_DIR:-/var/cache/swift} + +# Swift virtual environment +if [[ ${USE_VENV} = True ]]; then + PROJECT_VENV["swift"]=${SWIFT_DIR}.venv + SWIFT_BIN_DIR=${PROJECT_VENV["swift"]}/bin +else + SWIFT_BIN_DIR=$(get_python_exec_prefix) +fi + SWIFT_APACHE_WSGI_DIR=${SWIFT_APACHE_WSGI_DIR:-/var/www/swift} -SWIFT3_DIR=$DEST/swift3 + +SWIFT_SERVICE_PROTOCOL=${SWIFT_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} +SWIFT_DEFAULT_BIND_PORT=${SWIFT_DEFAULT_BIND_PORT:-8080} +SWIFT_DEFAULT_BIND_PORT_INT=${SWIFT_DEFAULT_BIND_PORT_INT:-8081} +SWIFT_SERVICE_LOCAL_HOST=${SWIFT_SERVICE_LOCAL_HOST:-$SERVICE_LOCAL_HOST} +SWIFT_SERVICE_LISTEN_ADDRESS=${SWIFT_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)} # TODO: add logging to different location. @@ -45,23 +64,33 @@ SWIFT_DISK_IMAGE=${SWIFT_DATA_DIR}/drives/images/swift.img # Set ``SWIFT_CONF_DIR`` to the location of the configuration files. # Default is ``/etc/swift``. -# TODO(dtroyer): remove SWIFT_CONFIG_DIR after cutting stable/grizzly -SWIFT_CONF_DIR=${SWIFT_CONF_DIR:-${SWIFT_CONFIG_DIR:-/etc/swift}} +SWIFT_CONF_DIR=${SWIFT_CONF_DIR:-/etc/swift} -if is_service_enabled s-proxy && is_service_enabled swift3; then - # If we are using swift3, we can default the s3 port to swift instead +if is_service_enabled s-proxy && is_service_enabled s3api; then + # If we are using ``s3api``, we can default the S3 port to swift instead # of nova-objectstore - S3_SERVICE_PORT=${S3_SERVICE_PORT:-8080} + S3_SERVICE_PORT=${S3_SERVICE_PORT:-$SWIFT_DEFAULT_BIND_PORT} +fi + +if is_service_enabled g-api; then + # Minimum Cinder volume size is 1G so if Swift backend for Glance is + # only 1G we can not upload volume to image. + # Increase Swift disk size up to 2G + SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=2G + SWIFT_MAX_FILE_SIZE_DEFAULT=1073741824 # 1G +else + # DevStack will create a loop-back disk formatted as XFS to store the + # swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in + # kilobytes. + # Default is 1 gigabyte. + SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=1G + SWIFT_MAX_FILE_SIZE_DEFAULT=536870912 # 512M fi -# DevStack will create a loop-back disk formatted as XFS to store the -# swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in -# kilobytes. -# Default is 1 gigabyte. -SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=1G # if tempest enabled the default size is 6 Gigabyte. if is_service_enabled tempest; then SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=${SWIFT_LOOPBACK_DISK_SIZE:-6G} + SWIFT_MAX_FILE_SIZE_DEFAULT=5368709122 # Swift default 5G fi SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-$SWIFT_LOOPBACK_DISK_SIZE_DEFAULT} @@ -72,13 +101,13 @@ SWIFT_EXTRAS_MIDDLEWARE=${SWIFT_EXTRAS_MIDDLEWARE:-formpost staticweb} # Set ``SWIFT_EXTRAS_MIDDLEWARE_LAST`` to extras middlewares that need to be at # the end of the pipeline. -SWIFT_EXTRAS_MIDDLEWARE_LAST=${SWIFT_EXTRAS_MIDDLEWARE_LAST} +SWIFT_EXTRAS_MIDDLEWARE_LAST=${SWIFT_EXTRAS_MIDDLEWARE_LAST:-} # Set ``SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH`` to extras middlewares that need to be at # the beginning of the pipeline, before authentication middlewares. SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH=${SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH:-crossdomain} -# The ring uses a configurable number of bits from a path’s MD5 hash as +# The ring uses a configurable number of bits from a path's MD5 hash as # a partition index that designates a device. The number of bits kept # from the hash is known as the partition power, and 2 to the partition # power indicates the partition count. Partitioning the full MD5 hash @@ -96,6 +125,11 @@ SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9} SWIFT_REPLICAS=${SWIFT_REPLICAS:-1} SWIFT_REPLICAS_SEQ=$(seq ${SWIFT_REPLICAS}) +# Set ``SWIFT_START_ALL_SERVICES`` to control whether all Swift +# services (including the *-auditor, *-replicator, *-reconstructor, etc. +# daemons) should be started. +SWIFT_START_ALL_SERVICES=$(trueorfalse True SWIFT_START_ALL_SERVICES) + # Set ``SWIFT_LOG_TOKEN_LENGTH`` to configure how many characters of an auth # token should be placed in the logs. When keystone is used with PKI tokens, # the token values can be huge, seemingly larger the 2K, at the least. We @@ -103,20 +137,32 @@ SWIFT_REPLICAS_SEQ=$(seq ${SWIFT_REPLICAS}) # trace through the logs when looking for its use. SWIFT_LOG_TOKEN_LENGTH=${SWIFT_LOG_TOKEN_LENGTH:-12} -# Set ``SWIFT_MAX_HEADER_SIZE`` to configure the maximun length of headers in +# Set ``SWIFT_MAX_HEADER_SIZE`` to configure the maximum length of headers in # Swift API SWIFT_MAX_HEADER_SIZE=${SWIFT_MAX_HEADER_SIZE:-16384} +# Set ``SWIFT_MAX_FILE_SIZE`` to configure the maximum file size in Swift API +# Default 500MB because the loopback file used for swift could be 1 or 2 GB +SWIFT_MAX_FILE_SIZE=${SWIFT_MAX_FILE_SIZE:-$SWIFT_MAX_FILE_SIZE_DEFAULT} + # Set ``OBJECT_PORT_BASE``, ``CONTAINER_PORT_BASE``, ``ACCOUNT_PORT_BASE`` -# Port bases used in port number calclution for the service "nodes" -# The specified port number will be used, the additinal ports calculated by +# Port bases used in port number calculation for the service "nodes" +# The specified port number will be used, the additional ports calculated by # base_port + node_num * 10 -OBJECT_PORT_BASE=${OBJECT_PORT_BASE:-6013} -CONTAINER_PORT_BASE=${CONTAINER_PORT_BASE:-6011} -ACCOUNT_PORT_BASE=${ACCOUNT_PORT_BASE:-6012} +OBJECT_PORT_BASE=${OBJECT_PORT_BASE:-6613} +CONTAINER_PORT_BASE=${CONTAINER_PORT_BASE:-6611} +ACCOUNT_PORT_BASE=${ACCOUNT_PORT_BASE:-6612} + +# Enable tempurl feature +SWIFT_ENABLE_TEMPURLS=${SWIFT_ENABLE_TEMPURLS:-False} +SWIFT_TEMPURL_KEY=${SWIFT_TEMPURL_KEY:-} -# Tell Tempest this project is present -TEMPEST_SERVICES+=,swift +# Toggle for deploying Swift under HTTPD + mod_wsgi +SWIFT_USE_MOD_WSGI=${SWIFT_USE_MOD_WSGI:-False} + +# A space-separated list of storage node IPs that +# should be used to create the Swift rings +SWIFT_STORAGE_IPS=${SWIFT_STORAGE_IPS:-} # Functions @@ -125,6 +171,7 @@ TEMPEST_SERVICES+=,swift # Test if any Swift services are enabled # is_swift_enabled function is_swift_enabled { + [[ ,${DISABLED_SERVICES} =~ ,"swift" ]] && return 1 [[ ,${ENABLED_SERVICES} =~ ,"s-" ]] && return 0 return 1 } @@ -132,14 +179,11 @@ function is_swift_enabled { # cleanup_swift() - Remove residual data files function cleanup_swift { rm -f ${SWIFT_CONF_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz} - if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then - sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 - fi - if [[ -e ${SWIFT_DISK_IMAGE} ]]; then - rm ${SWIFT_DISK_IMAGE} - fi + + destroy_disk ${SWIFT_DISK_IMAGE} ${SWIFT_DATA_DIR}/drives/sdb1 + rm -rf ${SWIFT_DATA_DIR}/run/ - if is_apache_enabled_service swift; then + if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then _cleanup_swift_apache_wsgi fi } @@ -148,11 +192,12 @@ function cleanup_swift { function _cleanup_swift_apache_wsgi { sudo rm -f $SWIFT_APACHE_WSGI_DIR/*.wsgi disable_apache_site proxy-server + local node_number type for node_number in ${SWIFT_REPLICAS_SEQ}; do for type in object container account; do - site_name=${type}-server-${node_number} + local site_name=${type}-server-${node_number} disable_apache_site ${site_name} - sudo rm -f /etc/$APACHE_NAME/$APACHE_CONF_DIR/${site_name} + sudo rm -f $(apache_site_config_for ${site_name}) done done } @@ -160,18 +205,17 @@ function _cleanup_swift_apache_wsgi { # _config_swift_apache_wsgi() - Set WSGI config files of Swift function _config_swift_apache_wsgi { sudo mkdir -p ${SWIFT_APACHE_WSGI_DIR} - local apache_vhost_dir=/etc/${APACHE_NAME}/$APACHE_CONF_DIR - local proxy_port=${SWIFT_DEFAULT_BIND_PORT:-8080} + local proxy_port=${SWIFT_DEFAULT_BIND_PORT} # copy proxy vhost and wsgi file - sudo cp ${SWIFT_DIR}/examples/apache2/proxy-server.template ${apache_vhost_dir}/proxy-server + sudo cp ${SWIFT_DIR}/examples/apache2/proxy-server.template $(apache_site_config_for proxy-server) sudo sed -e " /^#/d;/^$/d; s/%PORT%/$proxy_port/g; s/%SERVICENAME%/proxy-server/g; s/%APACHE_NAME%/${APACHE_NAME}/g; s/%USER%/${STACK_USER}/g; - " -i ${apache_vhost_dir}/proxy-server + " -i $(apache_site_config_for proxy-server) enable_apache_site proxy-server sudo cp ${SWIFT_DIR}/examples/wsgi/proxy-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/proxy-server.wsgi @@ -181,18 +225,22 @@ function _config_swift_apache_wsgi { " -i ${SWIFT_APACHE_WSGI_DIR}/proxy-server.wsgi # copy apache vhost file and set name and port + local node_number for node_number in ${SWIFT_REPLICAS_SEQ}; do - object_port=$[OBJECT_PORT_BASE + 10 * ($node_number - 1)] - container_port=$[CONTAINER_PORT_BASE + 10 * ($node_number - 1)] - account_port=$[ACCOUNT_PORT_BASE + 10 * ($node_number - 1)] - - sudo cp ${SWIFT_DIR}/examples/apache2/object-server.template ${apache_vhost_dir}/object-server-${node_number} + local object_port + object_port=$(( OBJECT_PORT_BASE + 10 * (node_number - 1) )) + local container_port + container_port=$(( CONTAINER_PORT_BASE + 10 * (node_number - 1) )) + local account_port + account_port=$(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) )) + + sudo cp ${SWIFT_DIR}/examples/apache2/object-server.template $(apache_site_config_for object-server-${node_number}) sudo sed -e " s/%PORT%/$object_port/g; s/%SERVICENAME%/object-server-${node_number}/g; s/%APACHE_NAME%/${APACHE_NAME}/g; s/%USER%/${STACK_USER}/g; - " -i ${apache_vhost_dir}/object-server-${node_number} + " -i $(apache_site_config_for object-server-${node_number}) enable_apache_site object-server-${node_number} sudo cp ${SWIFT_DIR}/examples/wsgi/object-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/object-server-${node_number}.wsgi @@ -201,14 +249,14 @@ function _config_swift_apache_wsgi { s/%SERVICECONF%/object-server\/${node_number}.conf/g; " -i ${SWIFT_APACHE_WSGI_DIR}/object-server-${node_number}.wsgi - sudo cp ${SWIFT_DIR}/examples/apache2/container-server.template ${apache_vhost_dir}/container-server-${node_number} + sudo cp ${SWIFT_DIR}/examples/apache2/container-server.template $(apache_site_config_for container-server-${node_number}) sudo sed -e " /^#/d;/^$/d; s/%PORT%/$container_port/g; s/%SERVICENAME%/container-server-${node_number}/g; s/%APACHE_NAME%/${APACHE_NAME}/g; s/%USER%/${STACK_USER}/g; - " -i ${apache_vhost_dir}/container-server-${node_number} + " -i $(apache_site_config_for container-server-${node_number}) enable_apache_site container-server-${node_number} sudo cp ${SWIFT_DIR}/examples/wsgi/container-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/container-server-${node_number}.wsgi @@ -217,14 +265,14 @@ function _config_swift_apache_wsgi { s/%SERVICECONF%/container-server\/${node_number}.conf/g; " -i ${SWIFT_APACHE_WSGI_DIR}/container-server-${node_number}.wsgi - sudo cp ${SWIFT_DIR}/examples/apache2/account-server.template ${apache_vhost_dir}/account-server-${node_number} + sudo cp ${SWIFT_DIR}/examples/apache2/account-server.template $(apache_site_config_for account-server-${node_number}) sudo sed -e " /^#/d;/^$/d; s/%PORT%/$account_port/g; s/%SERVICENAME%/account-server-${node_number}/g; s/%APACHE_NAME%/${APACHE_NAME}/g; s/%USER%/${STACK_USER}/g; - " -i ${apache_vhost_dir}/account-server-${node_number} + " -i $(apache_site_config_for account-server-${node_number}) enable_apache_site account-server-${node_number} sudo cp ${SWIFT_DIR}/examples/wsgi/account-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/account-server-${node_number}.wsgi @@ -237,14 +285,14 @@ function _config_swift_apache_wsgi { # This function generates an object/container/account configuration # emulating 4 nodes on different ports -function generate_swift_config { +function generate_swift_config_services { local swift_node_config=$1 local node_id=$2 local bind_port=$3 local server_type=$4 - log_facility=$[ node_id - 1 ] - node_path=${SWIFT_DATA_DIR}/${node_number} + log_facility=$(( node_id - 1 )) + local node_path=${SWIFT_DATA_DIR}/${node_number} iniuncomment ${swift_node_config} DEFAULT user iniset ${swift_node_config} DEFAULT user ${STACK_USER} @@ -262,7 +310,7 @@ function generate_swift_config { iniset ${swift_node_config} DEFAULT log_facility LOG_LOCAL${log_facility} iniuncomment ${swift_node_config} DEFAULT workers - iniset ${swift_node_config} DEFAULT workers 1 + iniset ${swift_node_config} DEFAULT workers ${API_WORKERS:-1} iniuncomment ${swift_node_config} DEFAULT disable_fallocate iniset ${swift_node_config} DEFAULT disable_fallocate true @@ -270,10 +318,13 @@ function generate_swift_config { iniuncomment ${swift_node_config} DEFAULT mount_check iniset ${swift_node_config} DEFAULT mount_check false - iniuncomment ${swift_node_config} ${server_type}-replicator vm_test_mode - iniset ${swift_node_config} ${server_type}-replicator vm_test_mode yes -} + iniuncomment ${swift_node_config} ${server_type}-replicator rsync_module + iniset ${swift_node_config} ${server_type}-replicator rsync_module "{replication_ip}::${server_type}{replication_port}" + # Using a sed and not iniset/iniuncomment because we want to a global + # modification and make sure it works for new sections. + sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config} +} # configure_swift() - Set config files, create data dirs and loop image function configure_swift { @@ -283,10 +334,10 @@ function configure_swift { local swift_log_dir # Make sure to kill all swift processes first - swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true + $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true - sudo mkdir -p ${SWIFT_CONF_DIR}/{object,container,account}-server - sudo chown -R ${STACK_USER}: ${SWIFT_CONF_DIR} + sudo install -d -o ${STACK_USER} ${SWIFT_CONF_DIR} + sudo install -d -o ${STACK_USER} ${SWIFT_CONF_DIR}/{object,container,account}-server if [[ "$SWIFT_CONF_DIR" != "/etc/swift" ]]; then # Some swift tools are hard-coded to use ``/etc/swift`` and are apparently not going to be fixed. @@ -298,7 +349,7 @@ function configure_swift { # partitions (which make more sense when you have a multi-node # setup) we configure it with our version of rsync. sed -e " - s/%GROUP%/${USER_GROUP}/; + s/%GROUP%/$(id -g -n ${STACK_USER})/; s/%USER%/${STACK_USER}/; s,%SWIFT_DATA_DIR%,$SWIFT_DATA_DIR,; " $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf @@ -311,8 +362,14 @@ function configure_swift { SWIFT_CONFIG_PROXY_SERVER=${SWIFT_CONF_DIR}/proxy-server.conf cp ${SWIFT_DIR}/etc/proxy-server.conf-sample ${SWIFT_CONFIG_PROXY_SERVER} + cp ${SWIFT_DIR}/etc/internal-client.conf-sample ${SWIFT_CONF_DIR}/internal-client.conf - cp ${SWIFT_DIR}/etc/container-sync-realms.conf-sample ${SWIFT_CONF_DIR}/container-sync-realms.conf + # To run container sync feature introduced in Swift ver 1.12.0, + # container sync "realm" is added in container-sync-realms.conf + local csyncfile=${SWIFT_CONF_DIR}/container-sync-realms.conf + cp ${SWIFT_DIR}/etc/container-sync-realms.conf-sample ${csyncfile} + iniset ${csyncfile} realm1 key realm1key + iniset ${csyncfile} realm1 cluster_name1 "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/" iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user ${STACK_USER} @@ -326,147 +383,181 @@ function configure_swift { iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level DEBUG + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_ip + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_ip ${SWIFT_SERVICE_LISTEN_ADDRESS} + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port - iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080} - - # Devstack is commonly run in a small slow environment, so bump the - # timeouts up. - # node_timeout is how long between read operations a node takes to - # respond to the proxy server - # conn_timeout is all about how long it takes a connect() system call to - # return + if is_service_enabled tls-proxy; then + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT_INT} + else + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT} + fi + + # DevStack is commonly run in a small slow environment, so bump the timeouts up. + # ``node_timeout`` is the node read operation response time to the proxy server + # ``conn_timeout`` is how long it takes a connect() system call to return iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server node_timeout 120 iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server conn_timeout 20 - # Skipped due to bug 1294789 - ## Configure Ceilometer - #if is_service_enabled ceilometer; then - # iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer use "egg:ceilometer#swift" - # SWIFT_EXTRAS_MIDDLEWARE_LAST="${SWIFT_EXTRAS_MIDDLEWARE_LAST} ceilometer" - #fi + # Versioned Writes + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:versioned_writes allow_versioned_writes true + + # Add sha1 temporary https://storyboard.openstack.org/#!/story/2010068 + if [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]]; then + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempurl allowed_digests "sha1 sha256 sha512" + fi + + # Configure Ceilometer + if is_service_enabled ceilometer; then + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer "set log_level" "WARN" + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer paste.filter_factory "ceilometermiddleware.swift:filter_factory" + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer control_exchange "swift" + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer url $(get_notification_url) + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer driver "messaging" + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer topic "notifications" + SWIFT_EXTRAS_MIDDLEWARE_LAST="${SWIFT_EXTRAS_MIDDLEWARE_LAST} ceilometer" + fi - # Restrict the length of auth tokens in the swift proxy-server logs. + # Restrict the length of auth tokens in the Swift ``proxy-server`` logs. iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:proxy-logging reveal_sensitive_prefix ${SWIFT_LOG_TOKEN_LENGTH} - # By default Swift will be installed with keystone and tempauth middleware - # and add the swift3 middleware if its configured for it. The token for + # By default Swift will be installed with Keystone and tempauth middleware + # and add the s3api middleware if its configured for it. The token for # tempauth would be prefixed with the reseller_prefix setting `TEMPAUTH_` the # token for keystoneauth would have the standard reseller_prefix `AUTH_` - if is_service_enabled swift3;then - swift_pipeline+=" swift3 s3token " + if is_service_enabled s3api;then + swift_pipeline+=" s3api" fi - swift_pipeline+=" authtoken keystoneauth tempauth " + if is_service_enabled keystone; then + swift_pipeline+=" authtoken" + if is_service_enabled s3api;then + swift_pipeline+=" s3token" + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token auth_uri ${KEYSTONE_SERVICE_URI_V3} + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token delay_auth_decision true + fi + swift_pipeline+=" keystoneauth" + fi + + swift_pipeline+=" tempauth " + sed -i "/^pipeline/ { s/tempauth/${swift_pipeline} ${SWIFT_EXTRAS_MIDDLEWARE}/ ;}" ${SWIFT_CONFIG_PROXY_SERVER} sed -i "/^pipeline/ { s/proxy-server/${SWIFT_EXTRAS_MIDDLEWARE_LAST} proxy-server/ ; }" ${SWIFT_CONFIG_PROXY_SERVER} - iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth account_autocreate iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server account_autocreate true - - iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth reseller_prefix - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth reseller_prefix "TEMPAUTH" + iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server allow_account_management true # Configure Crossdomain iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:crossdomain use "egg:swift#crossdomain" - # Configure Keystone - sed -i '/^# \[filter:authtoken\]/,/^# \[filter:keystoneauth\]$/ s/^#[ \t]*//' ${SWIFT_CONFIG_PROXY_SERVER} - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_host $KEYSTONE_AUTH_HOST - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_port $KEYSTONE_AUTH_PORT - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken cafile $KEYSTONE_SSL_CA - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_user swift - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_password $SERVICE_PASSWORD - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken signing_dir $SWIFT_AUTH_CACHE_DIR - # This causes the authtoken middleware to use the same python logging - # adapter provided by the swift proxy-server, so that request transaction + # Configure authtoken middleware to use the same Python logging + # adapter provided by the Swift ``proxy-server``, so that request transaction # IDs will included in all of its log messages. iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken log_name swift - iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth use - iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken paste.filter_factory keystonemiddleware.auth_token:filter_factory + configure_keystone_authtoken_middleware $SWIFT_CONFIG_PROXY_SERVER swift filter:authtoken + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken delay_auth_decision 1 + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken cache swift.cache + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken include_service_catalog False + + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth use "egg:swift#keystoneauth" iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles "Member, admin" - if is_service_enabled swift3; then - cat <>${SWIFT_CONFIG_PROXY_SERVER} -# NOTE(chmou): s3token middleware is not updated yet to use only -# username and password. -[filter:s3token] -paste.filter_factory = keystoneclient.middleware.s3_token:filter_factory -auth_port = ${KEYSTONE_AUTH_PORT} -auth_host = ${KEYSTONE_AUTH_HOST} -auth_protocol = ${KEYSTONE_AUTH_PROTOCOL} -cafile = ${KEYSTONE_SSL_CA} -auth_token = ${SERVICE_TOKEN} -admin_token = ${SERVICE_TOKEN} - -[filter:swift3] -use = egg:swift3#swift3 -EOF - fi + # Configure Tempauth. In the sample config file Keystoneauth is commented + # out. Make sure we uncomment Tempauth after we uncomment Keystoneauth + # otherwise, this code also sets the reseller_prefix for Keystoneauth. + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth account_autocreate + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth reseller_prefix "TEMPAUTH" + + # Allow both reseller prefixes to be used with domain_remap + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:domain_remap reseller_prefixes "AUTH, TEMPAUTH" cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONF_DIR}/swift.conf iniset ${SWIFT_CONF_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH} iniset ${SWIFT_CONF_DIR}/swift.conf swift-constraints max_header_size ${SWIFT_MAX_HEADER_SIZE} + iniset ${SWIFT_CONF_DIR}/swift.conf swift-constraints max_file_size ${SWIFT_MAX_FILE_SIZE} + local node_number for node_number in ${SWIFT_REPLICAS_SEQ}; do - swift_node_config=${SWIFT_CONF_DIR}/object-server/${node_number}.conf + local swift_node_config=${SWIFT_CONF_DIR}/object-server/${node_number}.conf cp ${SWIFT_DIR}/etc/object-server.conf-sample ${swift_node_config} - generate_swift_config ${swift_node_config} ${node_number} $[OBJECT_PORT_BASE + 10 * (node_number - 1)] object + generate_swift_config_services ${swift_node_config} ${node_number} $(( OBJECT_PORT_BASE + 10 * (node_number - 1) )) object + iniuncomment ${swift_node_config} DEFAULT bind_ip + iniset ${swift_node_config} DEFAULT bind_ip ${SWIFT_SERVICE_LISTEN_ADDRESS} iniset ${swift_node_config} filter:recon recon_cache_path ${SWIFT_DATA_DIR}/cache - # Using a sed and not iniset/iniuncomment because we want to a global - # modification and make sure it works for new sections. - sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config} swift_node_config=${SWIFT_CONF_DIR}/container-server/${node_number}.conf cp ${SWIFT_DIR}/etc/container-server.conf-sample ${swift_node_config} - generate_swift_config ${swift_node_config} ${node_number} $[CONTAINER_PORT_BASE + 10 * (node_number - 1)] container - iniuncomment ${swift_node_config} app:container-server allow_versions - iniset ${swift_node_config} app:container-server allow_versions "true" - sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config} + generate_swift_config_services ${swift_node_config} ${node_number} $(( CONTAINER_PORT_BASE + 10 * (node_number - 1) )) container + iniuncomment ${swift_node_config} DEFAULT bind_ip + iniset ${swift_node_config} DEFAULT bind_ip ${SWIFT_SERVICE_LISTEN_ADDRESS} swift_node_config=${SWIFT_CONF_DIR}/account-server/${node_number}.conf cp ${SWIFT_DIR}/etc/account-server.conf-sample ${swift_node_config} - generate_swift_config ${swift_node_config} ${node_number} $[ACCOUNT_PORT_BASE + 10 * (node_number - 1)] account - sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config} + generate_swift_config_services ${swift_node_config} ${node_number} $(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) )) account + iniuncomment ${swift_node_config} DEFAULT bind_ip + iniset ${swift_node_config} DEFAULT bind_ip ${SWIFT_SERVICE_LISTEN_ADDRESS} done - # Set new accounts in tempauth to match keystone tenant/user (to make testing easier) - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth user_swifttenanttest1_swiftusertest1 "testing .admin" - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth user_swifttenanttest2_swiftusertest2 "testing2 .admin" - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth user_swifttenanttest1_swiftusertest3 "testing3 .admin" + # Set new accounts in tempauth to match keystone project/user (to make testing easier) + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth user_swiftprojecttest1_swiftusertest1 "testing .admin" + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth user_swiftprojecttest2_swiftusertest2 "testing2 .admin" + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth user_swiftprojecttest1_swiftusertest3 "testing3 .admin" testfile=${SWIFT_CONF_DIR}/test.conf cp ${SWIFT_DIR}/test/sample.conf ${testfile} # Set accounts for functional tests - iniset ${testfile} func_test account swifttenanttest1 + iniset ${testfile} func_test account swiftprojecttest1 iniset ${testfile} func_test username swiftusertest1 iniset ${testfile} func_test username3 swiftusertest3 - iniset ${testfile} func_test account2 swifttenanttest2 + iniset ${testfile} func_test account2 swiftprojecttest2 iniset ${testfile} func_test username2 swiftusertest2 + iniset ${testfile} func_test account4 swiftprojecttest4 + iniset ${testfile} func_test username4 swiftusertest4 + iniset ${testfile} func_test password4 testing4 + iniset ${testfile} func_test domain4 swift_test - if is_service_enabled key;then + if is_service_enabled keystone; then iniuncomment ${testfile} func_test auth_version + local auth_vers + auth_vers=$(iniget ${testfile} func_test auth_version) iniset ${testfile} func_test auth_host ${KEYSTONE_SERVICE_HOST} - iniset ${testfile} func_test auth_port ${KEYSTONE_AUTH_PORT} - iniset ${testfile} func_test auth_prefix /v2.0/ + if [[ "$KEYSTONE_SERVICE_PROTOCOL" == "https" ]]; then + iniset ${testfile} func_test auth_port 443 + else + iniset ${testfile} func_test auth_port 80 + fi + iniset ${testfile} func_test auth_uri ${KEYSTONE_SERVICE_URI} + if [[ "$auth_vers" == "3" ]]; then + iniset ${testfile} func_test auth_prefix /identity/v3/ + else + iniset ${testfile} func_test auth_prefix /identity/v2.0/ + fi + if is_service_enabled tls-proxy; then + iniset ${testfile} func_test cafile ${SSL_BUNDLE_FILE} + iniset ${testfile} func_test web_front_end apache2 + fi fi - swift_log_dir=${SWIFT_DATA_DIR}/logs - rm -rf ${swift_log_dir} - mkdir -p ${swift_log_dir}/hourly - sudo chown -R ${STACK_USER}:adm ${swift_log_dir} + local user_group + user_group=$(id -g ${STACK_USER}) + sudo install -d -o ${STACK_USER} -g ${user_group} ${SWIFT_DATA_DIR} + + local swift_log_dir=${SWIFT_DATA_DIR}/logs + sudo rm -rf ${swift_log_dir} + local swift_log_group=adm + sudo install -d -o ${STACK_USER} -g ${swift_log_group} ${swift_log_dir}/hourly if [[ $SYSLOG != "False" ]]; then sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \ tee /etc/rsyslog.d/10-swift.conf + echo "MaxMessageSize 6k" | sudo tee /etc/rsyslog.d/99-maxsize.conf # restart syslog to take the changes sudo killall -HUP rsyslogd fi - if is_apache_enabled_service swift; then + if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then _config_swift_apache_wsgi fi } @@ -478,122 +569,107 @@ function create_swift_disk { # First do a bit of setup by creating the directories and # changing the permissions so we can run it as our user. - USER_GROUP=$(id -g ${STACK_USER}) - sudo mkdir -p ${SWIFT_DATA_DIR}/{drives,cache,run,logs} - sudo chown -R ${STACK_USER}:${USER_GROUP} ${SWIFT_DATA_DIR} + local user_group + user_group=$(id -g ${STACK_USER}) + sudo install -d -o ${STACK_USER} -g ${user_group} ${SWIFT_DATA_DIR}/{drives,cache,run,logs} # Create a loopback disk and format it to XFS. - if [[ -e ${SWIFT_DISK_IMAGE} ]]; then - if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then - sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 - sudo rm -f ${SWIFT_DISK_IMAGE} - fi - fi - - mkdir -p ${SWIFT_DATA_DIR}/drives/images - sudo touch ${SWIFT_DISK_IMAGE} - sudo chown ${STACK_USER}: ${SWIFT_DISK_IMAGE} - - truncate -s ${SWIFT_LOOPBACK_DISK_SIZE} ${SWIFT_DISK_IMAGE} - - # Make a fresh XFS filesystem - /sbin/mkfs.xfs -f -i size=1024 ${SWIFT_DISK_IMAGE} - - # Mount the disk with mount options to make it as efficient as possible - mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1 - if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then - sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 \ - ${SWIFT_DISK_IMAGE} ${SWIFT_DATA_DIR}/drives/sdb1 - fi + create_disk ${SWIFT_DISK_IMAGE} ${SWIFT_DATA_DIR}/drives/sdb1 ${SWIFT_LOOPBACK_DISK_SIZE} # Create a link to the above mount and # create all of the directories needed to emulate a few different servers + local node_number for node_number in ${SWIFT_REPLICAS_SEQ}; do - sudo ln -sf ${SWIFT_DATA_DIR}/drives/sdb1/$node_number ${SWIFT_DATA_DIR}/$node_number; - drive=${SWIFT_DATA_DIR}/drives/sdb1/${node_number} - node=${SWIFT_DATA_DIR}/${node_number}/node - node_device=${node}/sdb1 - [[ -d $node ]] && continue - [[ -d $drive ]] && continue - sudo install -o ${STACK_USER} -g $USER_GROUP -d $drive - sudo install -o ${STACK_USER} -g $USER_GROUP -d $node_device - sudo chown -R ${STACK_USER}: ${node} + # node_devices must match *.conf devices option + local node_devices=${SWIFT_DATA_DIR}/${node_number} + local real_devices=${SWIFT_DATA_DIR}/drives/sdb1/$node_number + sudo ln -sf $real_devices $node_devices; + local device=${real_devices}/sdb1 + [[ -d $device ]] && continue + sudo install -o ${STACK_USER} -g $user_group -d $device done } -# create_swift_accounts() - Set up standard swift accounts and extra + +# create_swift_accounts() - Set up standard Swift accounts and extra # one for tests we do this by attaching all words in the account name # since we want to make it compatible with tempauth which use # underscores for separators. -# Tenant User Roles -# ------------------------------------------------------------------ -# service swift service -# swifttenanttest1 swiftusertest1 admin -# swifttenanttest1 swiftusertest3 anotherrole -# swifttenanttest2 swiftusertest2 admin +# Project User Roles Domain +# ------------------------------------------------------------------- +# service swift service default +# swiftprojecttest1 swiftusertest1 admin default +# swiftprojecttest1 swiftusertest3 anotherrole default +# swiftprojecttest2 swiftusertest2 admin default +# swiftprojecttest4 swiftusertest4 admin swift_test function create_swift_accounts { - # Defines specific passwords used by tools/create_userrc.sh - SWIFTUSERTEST1_PASSWORD=testing - SWIFTUSERTEST2_PASSWORD=testing2 - SWIFTUSERTEST3_PASSWORD=testing3 - - KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-sql} - - SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") - - SWIFT_USER=$(openstack user create \ - swift \ - --password "$SERVICE_PASSWORD" \ - --project $SERVICE_TENANT \ - --email=swift@example.com \ - | grep " id " | get_field 2) - openstack role add \ - $ADMIN_ROLE \ - --project $SERVICE_TENANT \ - --user $SWIFT_USER - - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - SWIFT_SERVICE=$(openstack service create \ - swift \ - --type="object-store" \ - --description="Swift Service" \ - | grep " id " | get_field 2) - openstack endpoint create \ - $SWIFT_SERVICE \ - --region RegionOne \ - --publicurl "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" \ - --adminurl "http://$SERVICE_HOST:8080" \ - --internalurl "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" - fi - - SWIFT_TENANT_TEST1=$(openstack project create swifttenanttest1 | grep " id " | get_field 2) - die_if_not_set $LINENO SWIFT_TENANT_TEST1 "Failure creating SWIFT_TENANT_TEST1" - SWIFT_USER_TEST1=$(openstack user create swiftusertest1 --password=$SWIFTUSERTEST1_PASSWORD \ - --project "$SWIFT_TENANT_TEST1" --email=test@example.com | grep " id " | get_field 2) + # Defines specific passwords used by ``tools/create_userrc.sh`` + # As these variables are used by ``create_userrc.sh,`` they must be exported + # The _password suffix is expected by ``create_userrc.sh``. + export swiftusertest1_password=testing + export swiftusertest2_password=testing2 + export swiftusertest3_password=testing3 + export swiftusertest4_password=testing4 + + local another_role + another_role=$(get_or_create_role "anotherrole") + + # NOTE(jroll): Swift doesn't need the admin role here, however Ironic uses + # temp urls, which break when uploaded by a non-admin role + create_service_user "swift" "admin" + + get_or_create_service "swift" "object-store" "Swift Service" + get_or_create_endpoint \ + "object-store" \ + "$REGION_NAME" \ + "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_\$(project_id)s" \ + "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT" + + local swift_project_test1 + swift_project_test1=$(get_or_create_project swiftprojecttest1 default) + die_if_not_set $LINENO swift_project_test1 "Failure creating swift_project_test1" + SWIFT_USER_TEST1=$(get_or_create_user swiftusertest1 $swiftusertest1_password \ + "default" "test@example.com") die_if_not_set $LINENO SWIFT_USER_TEST1 "Failure creating SWIFT_USER_TEST1" - openstack role add --user $SWIFT_USER_TEST1 --project $SWIFT_TENANT_TEST1 $ADMIN_ROLE - - SWIFT_USER_TEST3=$(openstack user create swiftusertest3 --password=$SWIFTUSERTEST3_PASSWORD \ - --project "$SWIFT_TENANT_TEST1" --email=test3@example.com | grep " id " | get_field 2) - die_if_not_set $LINENO SWIFT_USER_TEST3 "Failure creating SWIFT_USER_TEST3" - openstack role add --user $SWIFT_USER_TEST3 --project $SWIFT_TENANT_TEST1 $ANOTHER_ROLE - - SWIFT_TENANT_TEST2=$(openstack project create swifttenanttest2 | grep " id " | get_field 2) - die_if_not_set $LINENO SWIFT_TENANT_TEST2 "Failure creating SWIFT_TENANT_TEST2" - - SWIFT_USER_TEST2=$(openstack user create swiftusertest2 --password=$SWIFTUSERTEST2_PASSWORD \ - --project "$SWIFT_TENANT_TEST2" --email=test2@example.com | grep " id " | get_field 2) - die_if_not_set $LINENO SWIFT_USER_TEST2 "Failure creating SWIFT_USER_TEST2" - openstack role add --user $SWIFT_USER_TEST2 --project $SWIFT_TENANT_TEST2 $ADMIN_ROLE + get_or_add_user_project_role admin $SWIFT_USER_TEST1 $swift_project_test1 + + local swift_user_test3 + swift_user_test3=$(get_or_create_user swiftusertest3 $swiftusertest3_password \ + "default" "test3@example.com") + die_if_not_set $LINENO swift_user_test3 "Failure creating swift_user_test3" + get_or_add_user_project_role $another_role $swift_user_test3 $swift_project_test1 + + local swift_project_test2 + swift_project_test2=$(get_or_create_project swiftprojecttest2 default) + die_if_not_set $LINENO swift_project_test2 "Failure creating swift_project_test2" + + local swift_user_test2 + swift_user_test2=$(get_or_create_user swiftusertest2 $swiftusertest2_password \ + "default" "test2@example.com") + die_if_not_set $LINENO swift_user_test2 "Failure creating swift_user_test2" + get_or_add_user_project_role admin $swift_user_test2 $swift_project_test2 + + local swift_domain + swift_domain=$(get_or_create_domain swift_test 'Used for swift functional testing') + die_if_not_set $LINENO swift_domain "Failure creating swift_test domain" + + local swift_project_test4 + swift_project_test4=$(get_or_create_project swiftprojecttest4 $swift_domain) + die_if_not_set $LINENO swift_project_test4 "Failure creating swift_project_test4" + + local swift_user_test4 + swift_user_test4=$(get_or_create_user swiftusertest4 $swiftusertest4_password \ + $swift_domain "test4@example.com") + die_if_not_set $LINENO swift_user_test4 "Failure creating swift_user_test4" + get_or_add_user_project_role admin $swift_user_test4 $swift_project_test4 } # init_swift() - Initialize rings function init_swift { local node_number # Make sure to kill all swift processes first - swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true + $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true # Forcibly re-create the backing filesystem create_swift_disk @@ -604,40 +680,75 @@ function init_swift { rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz - swift-ring-builder object.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 - swift-ring-builder container.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 - swift-ring-builder account.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 + $SWIFT_BIN_DIR/swift-ring-builder object.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 + $SWIFT_BIN_DIR/swift-ring-builder container.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 + $SWIFT_BIN_DIR/swift-ring-builder account.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 + + # The ring will be created on each node, and because the order of + # nodes is identical we can use a seed for rebalancing, making it + # possible to get a ring on each node that uses the same partition + # assignment. + if [[ -n $SWIFT_STORAGE_IPS ]]; then + local node_number + node_number=1 + + for node in ${SWIFT_STORAGE_IPS}; do + $SWIFT_BIN_DIR/swift-ring-builder object.builder add z${node_number}-${node}:${OBJECT_PORT_BASE}/sdb1 1 + $SWIFT_BIN_DIR/swift-ring-builder container.builder add z${node_number}-${node}:${CONTAINER_PORT_BASE}/sdb1 1 + $SWIFT_BIN_DIR/swift-ring-builder account.builder add z${node_number}-${node}:${ACCOUNT_PORT_BASE}/sdb1 1 + let "node_number=node_number+1" + done - for node_number in ${SWIFT_REPLICAS_SEQ}; do - swift-ring-builder object.builder add z${node_number}-127.0.0.1:$[OBJECT_PORT_BASE + 10 * (node_number - 1)]/sdb1 1 - swift-ring-builder container.builder add z${node_number}-127.0.0.1:$[CONTAINER_PORT_BASE + 10 * (node_number - 1)]/sdb1 1 - swift-ring-builder account.builder add z${node_number}-127.0.0.1:$[ACCOUNT_PORT_BASE + 10 * (node_number - 1)]/sdb1 1 - done - swift-ring-builder object.builder rebalance - swift-ring-builder container.builder rebalance - swift-ring-builder account.builder rebalance - } && popd >/dev/null + else - # Create cache dir - sudo mkdir -p $SWIFT_AUTH_CACHE_DIR - sudo chown $STACK_USER $SWIFT_AUTH_CACHE_DIR - rm -f $SWIFT_AUTH_CACHE_DIR/* + for node_number in ${SWIFT_REPLICAS_SEQ}; do + $SWIFT_BIN_DIR/swift-ring-builder object.builder add z${node_number}-${SWIFT_SERVICE_LOCAL_HOST}:$(( OBJECT_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1 + $SWIFT_BIN_DIR/swift-ring-builder container.builder add z${node_number}-${SWIFT_SERVICE_LOCAL_HOST}:$(( CONTAINER_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1 + $SWIFT_BIN_DIR/swift-ring-builder account.builder add z${node_number}-${SWIFT_SERVICE_LOCAL_HOST}:$(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1 + done + fi + + # We use a seed for rebalancing. Doing this allows us to create + # identical rings on multiple nodes if SWIFT_STORAGE_IPS is the same + $SWIFT_BIN_DIR/swift-ring-builder object.builder rebalance 42 + $SWIFT_BIN_DIR/swift-ring-builder container.builder rebalance 42 + $SWIFT_BIN_DIR/swift-ring-builder account.builder rebalance 42 + } && popd >/dev/null } function install_swift { git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH - setup_develop $SWIFT_DIR - if is_apache_enabled_service swift; then + # keystonemiddleware needs to be installed via keystone extras as defined + # in setup.cfg, see bug #1909018 for more details. + setup_develop $SWIFT_DIR keystone + if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then install_apache_wsgi fi } function install_swiftclient { - git_clone $SWIFTCLIENT_REPO $SWIFTCLIENT_DIR $SWIFTCLIENT_BRANCH - setup_develop $SWIFTCLIENT_DIR + if use_library_from_git "python-swiftclient"; then + git_clone_by_name "python-swiftclient" + setup_dev_lib "python-swiftclient" + fi } -# start_swift() - Start running processes, including screen +# install_ceilometermiddleware() - Collect source and prepare +# note that this doesn't really have anything to do with ceilometer; +# though ceilometermiddleware has ceilometer in its name as an +# artifact of history, it is not a ceilometer specific tool. It +# simply generates pycadf-based notifications about requests and +# responses on the swift proxy +function install_ceilometermiddleware { + if use_library_from_git "ceilometermiddleware"; then + git_clone_by_name "ceilometermiddleware" + setup_dev_lib "ceilometermiddleware" + else + pip_install_gr ceilometermiddleware + fi +} + +# start_swift() - Start running processes function start_swift { # (re)start memcached to make sure we have a clean memcache. restart_service memcached @@ -651,62 +762,102 @@ function start_swift { start_service rsyncd fi - if is_apache_enabled_service swift; then + if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then + # Apache should serve the "PACO" a.k.a "main" services restart_apache_server - swift-init --run-dir=${SWIFT_DATA_DIR}/run rest start - screen_it s-proxy "cd $SWIFT_DIR && sudo tail -f /var/log/$APACHE_NAME/proxy-server" - if [[ ${SWIFT_REPLICAS} == 1 ]]; then - for type in object container account; do - screen_it s-${type} "cd $SWIFT_DIR && sudo tail -f /var/log/$APACHE_NAME/${type}-server-1" - done - fi + # The rest of the services should be started in backgroud + $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run rest start return 0 fi - # By default with only one replica we are launching the proxy, - # container, account and object server in screen in foreground and - # other services in background. If we have SWIFT_REPLICAS set to something - # greater than one we first spawn all the swift services then kill the proxy - # service so we can run it in foreground in screen. ``swift-init ... - # {stop|restart}`` exits with '1' if no servers are running, ignore it just - # in case - swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true - if [[ ${SWIFT_REPLICAS} == 1 ]]; then - todo="object container account" - fi - for type in proxy ${todo}; do - swift-init --run-dir=${SWIFT_DATA_DIR}/run ${type} stop || true - done - screen_it s-proxy "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v" + + # By default with only one replica we are launching the proxy, container + # account and object server in screen in foreground. Then, the rest of + # the services is optionally started. + # + # If we have ``SWIFT_REPLICAS`` set to something greater than one + # we first spawn *all* the Swift services then kill the proxy service + # so we can run it in foreground in screen. + # + # ``swift-init ... {stop|restart}`` exits with '1' if no servers are + # running, ignore it just in case if [[ ${SWIFT_REPLICAS} == 1 ]]; then - for type in object container account; do - screen_it s-${type} "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v" + local foreground_services type + + foreground_services="object container account" + for type in ${foreground_services}; do + run_process s-${type} "$SWIFT_BIN_DIR/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v" done + + if [[ "$SWIFT_START_ALL_SERVICES" == "True" ]]; then + $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run rest start + else + # The container-sync daemon is strictly needed to pass the container + # sync Tempest tests. + enable_service s-container-sync + run_process s-container-sync "$SWIFT_BIN_DIR/swift-container-sync ${SWIFT_CONF_DIR}/container-server/1.conf" + fi + else + $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true + $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run proxy stop || true + fi + + if is_service_enabled tls-proxy; then + local proxy_port=${SWIFT_DEFAULT_BIND_PORT} + start_tls_proxy swift '*' $proxy_port $SERVICE_HOST $SWIFT_DEFAULT_BIND_PORT_INT $SWIFT_MAX_HEADER_SIZE + fi + run_process s-proxy "$SWIFT_BIN_DIR/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v" + + # We also started the storage services, but proxy started last and + # will take the longest to start, so by the time it comes up, we're + # probably fine. + echo "Waiting for swift proxy to start..." + if ! wait_for_service $SERVICE_TIMEOUT $SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/info; then + die $LINENO "swift proxy did not start" + fi + + if [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]]; then + swift_configure_tempurls fi } -# stop_swift() - Stop running processes (non-screen) +# stop_swift() - Stop running processes function stop_swift { + local type - if is_apache_enabled_service swift; then - swift-init --run-dir=${SWIFT_DATA_DIR}/run rest stop && return 0 + if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then + $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run rest stop && return 0 fi - # screen normally killed by unstack.sh - if type -p swift-init >/dev/null; then - swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true + # screen normally killed by ``unstack.sh`` + if type -p $SWIFT_BIN_DIR/swift-init >/dev/null; then + $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true fi # Dump all of the servers - # Maintain the iteration as screen_stop() has some desirable side-effects + # Maintain the iteration as stop_process() has some desirable side-effects for type in proxy object container account; do - screen_stop s-${type} + stop_process s-${type} done # Blast out any stragglers - pkill -f swift- + pkill -f swift- || true +} + +function swift_configure_tempurls { + # note we are using swift credentials! + openstack --os-cloud="" \ + --os-region-name="$REGION_NAME" \ + --os-auth-url="$KEYSTONE_SERVICE_URI" \ + --os-username="swift" \ + --os-password="$SERVICE_PASSWORD" \ + --os-user-domain-name="$SERVICE_DOMAIN_NAME" \ + --os-project-name="$SERVICE_PROJECT_NAME" \ + --os-project-domain-name="$SERVICE_DOMAIN_NAME" \ + object store account \ + set --property "Temp-URL-Key=$SWIFT_TEMPURL_KEY" } # Restore xtrace -$XTRACE +$_XTRACE_LIB_SWIFT # Tell emacs to use shell-script-mode ## Local variables: diff --git a/lib/tcpdump b/lib/tcpdump new file mode 100644 index 0000000000..16e8269d02 --- /dev/null +++ b/lib/tcpdump @@ -0,0 +1,43 @@ +#!/bin/bash +# +# lib/tcpdump +# Functions to start and stop a tcpdump + +# Dependencies: +# +# - ``functions`` file + +# ``stack.sh`` calls the entry points in this order: +# +# - start_tcpdump +# - stop_tcpdump + +# Save trace setting +_XTRACE_TCPDUMP=$(set +o | grep xtrace) +set +o xtrace + +TCPDUMP_OUTPUT=${TCPDUMP_OUTPUT:-$LOGDIR/tcpdump.pcap} + +# e.g. for iscsi +# "-i any tcp port 3260" +TCPDUMP_ARGS=${TCPDUMP_ARGS:-""} + +# start_tcpdump() - Start running processes +function start_tcpdump { + # Run a tcpdump with given arguments and save the packet capture + if is_service_enabled tcpdump; then + if [[ -z "${TCPDUMP_ARGS}" ]]; then + die $LINENO "The tcpdump service requires TCPDUMP_ARGS to be set" + fi + touch ${TCPDUMP_OUTPUT} + run_process tcpdump "/usr/sbin/tcpdump -w $TCPDUMP_OUTPUT $TCPDUMP_ARGS" root root + fi +} + +# stop_tcpdump() stop tcpdump process +function stop_tcpdump { + stop_process tcpdump +} + +# Restore xtrace +$_XTRACE_TCPDUMP diff --git a/lib/tempest b/lib/tempest index fb9971c3a7..1ebe9c5f1f 100644 --- a/lib/tempest +++ b/lib/tempest @@ -1,3 +1,5 @@ +#!/bin/bash +# # lib/tempest # Install and configure Tempest @@ -9,34 +11,34 @@ # - ``DEST``, ``FILES`` # - ``ADMIN_PASSWORD`` # - ``DEFAULT_IMAGE_NAME`` +# - ``DEFAULT_IMAGE_FILE_NAME`` # - ``S3_SERVICE_PORT`` # - ``SERVICE_HOST`` # - ``BASE_SQL_CONN`` ``lib/database`` declares # - ``PUBLIC_NETWORK_NAME`` -# - ``Q_USE_NAMESPACE`` -# - ``Q_ROUTER_NAME`` -# - ``Q_L3_ENABLED`` # - ``VIRT_DRIVER`` # - ``LIBVIRT_TYPE`` -# - ``KEYSTONE_SERVICE_PROTOCOL``, ``KEYSTONE_SERVICE_HOST`` from lib/keystone +# - ``KEYSTONE_SERVICE_URI_V3`` from lib/keystone # # Optional Dependencies: # -# - ``ALT_*`` (similar vars exists in keystone_data.sh) +# - ``ALT_*`` # - ``LIVE_MIGRATION_AVAILABLE`` # - ``USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION`` # - ``DEFAULT_INSTANCE_TYPE`` # - ``DEFAULT_INSTANCE_USER`` -# - ``CINDER_MULTI_LVM_BACKEND`` +# - ``DEFAULT_INSTANCE_ALT_USER`` +# - ``CINDER_ENABLED_BACKENDS`` +# - ``CINDER_BACKUP_DRIVER`` +# - ``NOVA_ALLOW_DUPLICATE_NETWORKS`` # # ``stack.sh`` calls the entry points in this order: # # - install_tempest # - configure_tempest -# - init_tempest # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_TEMPEST=$(set +o | grep xtrace) set +o xtrace @@ -49,28 +51,164 @@ TEMPEST_CONFIG_DIR=${TEMPEST_CONFIG_DIR:-$TEMPEST_DIR/etc} TEMPEST_CONFIG=$TEMPEST_CONFIG_DIR/tempest.conf TEMPEST_STATE_PATH=${TEMPEST_STATE_PATH:=$DATA_DIR/tempest} -NOVA_SOURCE_DIR=$DEST/nova - -BUILD_INTERVAL=1 -BUILD_TIMEOUT=196 +# This is the timeout that tempest will wait for a VM to change state, +# spawn, delete, etc. +# The default is set to 196 seconds. +BUILD_TIMEOUT=${BUILD_TIMEOUT:-196} +# This must be False on stable branches, as master tempest +# deps do not match stable branch deps. Set this to True to +# have tempest installed in DevStack by default. +INSTALL_TEMPEST=${INSTALL_TEMPEST:-"True"} -BOTO_MATERIALS_PATH="$FILES/images/s3-materials/cirros-${CIRROS_VERSION}" +# This variable is passed directly to pip install inside the common tox venv +# that is created +TEMPEST_PLUGINS=${TEMPEST_PLUGINS:-0} # Cinder/Volume variables TEMPEST_VOLUME_DRIVER=${TEMPEST_VOLUME_DRIVER:-default} -TEMPEST_VOLUME_VENDOR=${TEMPEST_VOLUME_VENDOR:-"Open Source"} -TEMPEST_STORAGE_PROTOCOL=${TEMPEST_STORAGE_PROTOCOL:-iSCSI} +TEMPEST_DEFAULT_VOLUME_VENDOR="Open Source" +TEMPEST_VOLUME_VENDOR=${TEMPEST_VOLUME_VENDOR:-$TEMPEST_DEFAULT_VOLUME_VENDOR} +TEMPEST_DEFAULT_STORAGE_PROTOCOL="iSCSI" +TEMPEST_STORAGE_PROTOCOL=${TEMPEST_STORAGE_PROTOCOL:-$TEMPEST_DEFAULT_STORAGE_PROTOCOL} + +# Glance/Image variables +# When Glance image import is enabled, image creation is asynchronous and images +# may not yet be active when tempest looks for them. In that case, we poll +# Glance every TEMPEST_GLANCE_IMPORT_POLL_INTERVAL seconds for the number of +# times specified by TEMPEST_GLANCE_IMPORT_POLL_LIMIT. If you are importing +# multiple images, set TEMPEST_GLANCE_IMAGE_COUNT so the poller does not quit +# too early (though it will not exceed the polling limit). +TEMPEST_GLANCE_IMPORT_POLL_INTERVAL=${TEMPEST_GLANCE_IMPORT_POLL_INTERVAL:-1} +TEMPEST_GLANCE_IMPORT_POLL_LIMIT=${TEMPEST_GLANCE_IMPORT_POLL_LIMIT:-12} +TEMPEST_GLANCE_IMAGE_COUNT=${TEMPEST_GLANCE_IMAGE_COUNT:-1} # Neutron/Network variables -IPV6_ENABLED=$(trueorfalse True $IPV6_ENABLED) +IPV6_ENABLED=$(trueorfalse True IPV6_ENABLED) +IPV6_SUBNET_ATTRIBUTES_ENABLED=$(trueorfalse True IPV6_SUBNET_ATTRIBUTES_ENABLED) + +# Do we want to make a configuration where Tempest has admin on +# the cloud. We don't always want to so that we can ensure Tempest +# would work on a public cloud. +TEMPEST_HAS_ADMIN=$(trueorfalse True TEMPEST_HAS_ADMIN) + +# Credential provider configuration option variables +TEMPEST_ALLOW_TENANT_ISOLATION=${TEMPEST_ALLOW_TENANT_ISOLATION:-$TEMPEST_HAS_ADMIN} +TEMPEST_USE_TEST_ACCOUNTS=$(trueorfalse False TEMPEST_USE_TEST_ACCOUNTS) + +# The number of workers tempest is expected to be run with. This is used for +# generating a accounts.yaml for running with test-accounts. This is also the +# same variable that devstack-gate uses to specify the number of workers that +# it will run tempest with +TEMPEST_CONCURRENCY=${TEMPEST_CONCURRENCY:-$(nproc)} + +TEMPEST_FLAVOR_RAM=${TEMPEST_FLAVOR_RAM:-192} +TEMPEST_FLAVOR_ALT_RAM=${TEMPEST_FLAVOR_ALT_RAM:-256} + +TEMPEST_USE_ISO_IMAGE=$(trueorfalse False TEMPEST_USE_ISO_IMAGE) # Functions # --------- +# remove_disabled_extension - removes disabled extensions from the list of extensions +# to test for a given service +function remove_disabled_extensions { + local extensions_list=$1 + shift + local disabled_exts=$* + remove_disabled_services "$extensions_list" "$disabled_exts" +} + +# image_size_in_gib - converts an image size from bytes to GiB, rounded up +# Takes an image ID parameter as input +function image_size_in_gib { + local size + size=$(openstack --os-cloud devstack-admin image show $1 -c size -f value) + echo $size | python3 -c "import math; print(int(math.ceil(float(int(input()) / 1024.0 ** 3))))" +} + +function set_tempest_venv_constraints { + local tmp_c + tmp_c=$1 + if [[ $TEMPEST_VENV_UPPER_CONSTRAINTS == "master" ]]; then + (cd $REQUIREMENTS_DIR && + git show master:upper-constraints.txt 2>/dev/null || + git show origin/master:upper-constraints.txt) > $tmp_c + # NOTE(gmann): we need to set the below env var pointing to master + # constraints even that is what default in tox.ini. Otherwise it can + # create the issue for grenade run where old and new devstack can have + # different tempest (old and master) to install. For detail problem, + # refer to the https://bugs.launchpad.net/devstack/+bug/2003993 + export UPPER_CONSTRAINTS_FILE=https://releases.openstack.org/constraints/upper/master + export TOX_CONSTRAINTS_FILE=https://releases.openstack.org/constraints/upper/master + else + echo "Using $TEMPEST_VENV_UPPER_CONSTRAINTS constraints in Tempest virtual env." + cat $TEMPEST_VENV_UPPER_CONSTRAINTS > $tmp_c + # NOTE: setting both tox env var and once Tempest start using new var + # TOX_CONSTRAINTS_FILE then we can remove the old one. + export UPPER_CONSTRAINTS_FILE=$TEMPEST_VENV_UPPER_CONSTRAINTS + export TOX_CONSTRAINTS_FILE=$TEMPEST_VENV_UPPER_CONSTRAINTS + fi +} + +# Makes a call to glance to get a list of active images, ignoring +# ramdisk and kernel images. Takes 3 arguments, an array and two +# variables. The array will contain the list of active image UUIDs; +# if an image with ``DEFAULT_IMAGE_NAME`` is found, its UUID will be +# set as the value img_id ($2) parameters. +function get_active_images { + declare -n img_array=$1 + declare -n img_id=$2 + + # start with a fresh array in case we are called multiple times + img_array=() + + # NOTE(gmaan): Most of the iso image require ssh to be enabled explicitly + # and if we set those iso images in image_ref and image_ref_alt that can + # cause test to fail because many tests using image_ref and image_ref_alt + # to boot server also perform ssh. We skip to set iso image in tempest + # unless it is requested via TEMPEST_USE_ISO_IMAGE. + while read -r IMAGE_NAME IMAGE_UUID DISK_FORMAT; do + if [[ "$DISK_FORMAT" == "iso" ]] && [[ "$TEMPEST_USE_ISO_IMAGE" == False ]]; then + continue + fi + if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then + img_id="$IMAGE_UUID" + fi + img_array+=($IMAGE_UUID) + done < <(openstack --os-cloud devstack-admin image list --long --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2,$4 }') +} + +function poll_glance_images { + declare -n image_array=$1 + declare -n image_id=$2 + local -i poll_count + + poll_count=$TEMPEST_GLANCE_IMPORT_POLL_LIMIT + while (( poll_count-- > 0 )) ; do + sleep $TEMPEST_GLANCE_IMPORT_POLL_INTERVAL + get_active_images image_array image_id + if (( ${#image_array[*]} >= $TEMPEST_GLANCE_IMAGE_COUNT )) ; then + return + fi + done + local msg + msg="Polling limit of $TEMPEST_GLANCE_IMPORT_POLL_LIMIT exceeded; " + msg+="poll interval was $TEMPEST_GLANCE_IMPORT_POLL_INTERVAL sec" + warn $LINENO "$msg" +} + # configure_tempest() - Set config files, create data dirs, etc function configure_tempest { - setup_develop $TEMPEST_DIR + if [[ "$INSTALL_TEMPEST" == "True" ]]; then + setup_develop $TEMPEST_DIR + else + # install testr since its used to process tempest logs + pip_install_gr testrepository + fi + + local ENABLED_SERVICES=${SERVICES_FOR_TEMPEST:=$ENABLED_SERVICES} + local image_lines local images local num_images @@ -82,11 +220,12 @@ function configure_tempest { local available_flavors local flavors_ref local flavor_lines + local flavor_ref_size + local flavor_ref_alt_size local public_network_id local public_router_id - local tenant_networks_reachable - local boto_instance_type="m1.tiny" - local ssh_connect_method="fixed" + local ssh_connect_method="floating" + local disk # Save IFS ifs=$IFS @@ -101,137 +240,168 @@ function configure_tempest { # ... Also ensure we only take active images, so we don't get snapshots in process declare -a images - while read -r IMAGE_NAME IMAGE_UUID; do - if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then - image_uuid="$IMAGE_UUID" - image_uuid_alt="$IMAGE_UUID" - fi - images+=($IMAGE_UUID) - done < <(glance image-list --status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }') - - case "${#images[*]}" in - 0) - echo "Found no valid images to use!" - exit 1 - ;; - 1) - if [ -z "$image_uuid" ]; then - image_uuid=${images[0]} - image_uuid_alt=${images[0]} - fi - ;; - *) - if [ -z "$image_uuid" ]; then - image_uuid=${images[0]} - image_uuid_alt=${images[1]} - fi - ;; - esac - - # Create tempest.conf from tempest.conf.sample - # copy every time, because the image UUIDS are going to change - if [[ ! -d $TEMPEST_CONFIG_DIR ]]; then - sudo mkdir -p $TEMPEST_CONFIG_DIR - fi - sudo chown $STACK_USER $TEMPEST_CONFIG_DIR - cp $TEMPEST_DIR/etc/tempest.conf.sample $TEMPEST_CONFIG - chmod 644 $TEMPEST_CONFIG - - password=${ADMIN_PASSWORD:-secrete} - - # See files/keystone_data.sh and stack.sh where admin, demo and alt_demo - # user and tenant are set up... - ADMIN_USERNAME=${ADMIN_USERNAME:-admin} - ADMIN_TENANT_NAME=${ADMIN_TENANT_NAME:-admin} - ADMIN_DOMAIN_NAME=${ADMIN_DOMAIN_NAME:-Default} - TEMPEST_USERNAME=${TEMPEST_USERNAME:-demo} - TEMPEST_TENANT_NAME=${TEMPEST_TENANT_NAME:-demo} - ALT_USERNAME=${ALT_USERNAME:-alt_demo} - ALT_TENANT_NAME=${ALT_TENANT_NAME:-alt_demo} - - # If the ``DEFAULT_INSTANCE_TYPE`` not declared, use the new behavior - # Tempest creates instane types for himself - if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then - available_flavors=$(nova flavor-list) - if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then - if is_arch "ppc64"; then - # qemu needs at least 128MB of memory to boot on ppc64 - nova flavor-create m1.nano 42 128 0 1 - else - nova flavor-create m1.nano 42 64 0 1 + if is_service_enabled glance; then + get_active_images images image_uuid + + if (( ${#images[*]} < $TEMPEST_GLANCE_IMAGE_COUNT )); then + # Glance image import is asynchronous and may be configured + # to do image conversion. If image import is being used, + # it's possible that this code is being executed before the + # import has completed and there may be no active images yet. + if [[ "$GLANCE_USE_IMPORT_WORKFLOW" == "True" ]]; then + poll_glance_images images image_uuid + if (( ${#images[*]} < $TEMPEST_GLANCE_IMAGE_COUNT )); then + echo "Only found ${#images[*]} image(s), was looking for $TEMPEST_GLANCE_IMAGE_COUNT" + exit 1 + fi fi fi - flavor_ref=42 - boto_instance_type=m1.nano - if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then - if is_arch "ppc64"; then - nova flavor-create m1.micro 84 256 0 1 - else - nova flavor-create m1.micro 84 128 0 1 - fi - fi - flavor_ref_alt=84 - else - # Check Nova for existing flavors and, if set, look for the - # ``DEFAULT_INSTANCE_TYPE`` and use that. - boto_instance_type=$DEFAULT_INSTANCE_TYPE - flavor_lines=`nova flavor-list` - IFS=$'\r\n' - flavors="" - for line in $flavor_lines; do - f=$(echo $line | awk "/ $DEFAULT_INSTANCE_TYPE / { print \$2 }") - flavors="$flavors $f" - done - for line in $flavor_lines; do - flavors="$flavors `echo $line | grep -v "^\(|\s*ID\|+--\)" | cut -d' ' -f2`" - done + case "${#images[*]}" in + 0) + echo "Found no valid images to use!" + exit 1 + ;; + 1) + if [ -z "$image_uuid" ]; then + image_uuid=${images[0]} + fi + image_uuid_alt=$image_uuid + ;; + *) + if [ -z "$image_uuid" ]; then + image_uuid=${images[0]} + if [ -z "$image_uuid_alt" ]; then + image_uuid_alt=${images[1]} + fi + elif [ -z "$image_uuid_alt" ]; then + for image in ${images[@]}; do + if [[ "$image" != "$image_uuid" ]]; then + image_uuid_alt=$image + break + fi + done + fi + ;; + esac + fi - IFS=" " - flavors=($flavors) - num_flavors=${#flavors[*]} - echo "Found $num_flavors flavors" - if [[ $num_flavors -eq 0 ]]; then - echo "Found no valid flavors to use!" - exit 1 - fi - flavor_ref=${flavors[0]} - flavor_ref_alt=$flavor_ref - - # ensure flavor_ref and flavor_ref_alt have different values - # some resize instance in tempest tests depends on this. - for f in ${flavors[@]:1}; do - if [[ $f -ne $flavor_ref ]]; then - flavor_ref_alt=$f - break + # (Re)create ``tempest.conf`` + # Create every time because the image UUIDS are going to change + sudo install -d -o $STACK_USER $TEMPEST_CONFIG_DIR + rm -f $TEMPEST_CONFIG + + local password=${ADMIN_PASSWORD:-secret} + + # See ``lib/keystone`` where these users and tenants are set up + local admin_username=${ADMIN_USERNAME:-admin} + local admin_project_name=${ADMIN_TENANT_NAME:-admin} + local admin_domain_name=${ADMIN_DOMAIN_NAME:-Default} + local alt_username=${ALT_USERNAME:-alt_demo} + local alt_project_name=${ALT_TENANT_NAME:-alt_demo} + local admin_project_id + admin_project_id=$(openstack --os-cloud devstack-admin project list | awk "/ admin / { print \$2 }") + + if is_service_enabled nova; then + # If ``DEFAULT_INSTANCE_TYPE`` is not declared, use the new behavior + # Tempest creates its own instance types + available_flavors=$(openstack --os-cloud devstack-admin flavor list) + if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then + if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then + # Determine the flavor disk size based on the image size. + disk=$(image_size_in_gib $image_uuid) + ram=${TEMPEST_FLAVOR_RAM} + openstack --os-cloud devstack-admin flavor create --id 42 --ram ${ram} --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.nano fi - done + flavor_ref=42 + if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then + # Determine the alt flavor disk size based on the alt image size. + disk=$(image_size_in_gib $image_uuid_alt) + ram=${TEMPEST_FLAVOR_ALT_RAM} + openstack --os-cloud devstack-admin flavor create --id 84 --ram ${ram} --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.micro + fi + flavor_ref_alt=84 + else + # Check Nova for existing flavors, if ``DEFAULT_INSTANCE_TYPE`` is set use it. + IFS=$'\r\n' + flavors="" + for line in $available_flavors; do + f=$(echo $line | awk "/ $DEFAULT_INSTANCE_TYPE / { print \$2 }") + flavors="$flavors $f" + done + + for line in $available_flavors; do + flavors="$flavors `echo $line | grep -v "^\(|\s*ID\|+--\)" | cut -d' ' -f2`" + done + + IFS=" " + flavors=($flavors) + num_flavors=${#flavors[*]} + echo "Found $num_flavors flavors" + if [[ $num_flavors -eq 0 ]]; then + echo "Found no valid flavors to use!" + exit 1 + fi + flavor_ref=${flavors[0]} + flavor_ref_alt=$flavor_ref + flavor_ref_size=$(openstack --os-cloud devstack-admin flavor show --format value --column disk "${flavor_ref}") + + # Ensure ``flavor_ref`` and ``flavor_ref_alt`` have different values. + # Some resize instance in tempest tests depends on this. + for f in ${flavors[@]:1}; do + if [[ "$f" != "$flavor_ref" ]]; then + # + # NOTE(sdatko): Resize is only possible when target flavor + # is not smaller than the original one. For + # Tempest tests, in case there was a bigger + # flavor selected as default, e.g. m1.small, + # we need to perform additional check. + # + flavor_ref_alt_size=$(openstack --os-cloud devstack-admin flavor show --format value --column disk "${f}") + if [[ "${flavor_ref_alt_size}" -lt "${flavor_ref_size}" ]]; then + continue + fi + + flavor_ref_alt=$f + break + fi + done + fi fi - if [ "$Q_USE_NAMESPACE" != "False" ]; then - tenant_networks_reachable=false - if ! is_service_enabled n-net; then - ssh_connect_method="floating" + if is_service_enabled glance; then + git_clone $OSTESTIMAGES_REPO $OSTESTIMAGES_DIR $OSTESTIMAGES_BRANCH + pushd $OSTESTIMAGES_DIR + tox -egenerate + popd + iniset $TEMPEST_CONFIG image images_manifest_file ${OSTESTIMAGES_DIR}/images/manifest.yaml + local image_conversion + image_conversion=$(iniget $GLANCE_IMAGE_IMPORT_CONF image_conversion output_format) + if [[ -n "$image_conversion" ]]; then + iniset $TEMPEST_CONFIG image-feature-enabled image_conversion True fi - else - tenant_networks_reachable=true + iniset $TEMPEST_CONFIG image-feature-enabled image_format_enforcement $GLANCE_ENFORCE_IMAGE_FORMAT fi + iniset $TEMPEST_CONFIG network project_network_cidr $FIXED_RANGE + ssh_connect_method=${TEMPEST_SSH_CONNECT_METHOD:-$ssh_connect_method} - if [ "$Q_L3_ENABLED" = "True" ]; then - public_network_id=$(neutron net-list | grep $PUBLIC_NETWORK_NAME | \ - awk '{print $2}') - if [ "$Q_USE_NAMESPACE" == "False" ]; then - # If namespaces are disabled, devstack will create a single - # public router that tempest should be configured to use. - public_router_id=$(neutron router-list | awk "/ $Q_ROUTER_NAME / \ - { print \$2 }") - fi + # the public network (for floating ip access) is only available + # if the extension is enabled. + # If NEUTRON_CREATE_INITIAL_NETWORKS is not true, there is no network created + # and the public_network_id should not be set. + if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]] && is_networking_extension_supported 'external-net'; then + public_network_id=$(openstack --os-cloud devstack-admin network show -f value -c id $PUBLIC_NETWORK_NAME) + # make sure shared network presence does not confuses the tempest tests + openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create --share shared + openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --description shared-subnet --subnet-range ${TEMPEST_SHARED_POOL:-192.168.233.0/24} --network shared shared-subnet fi + iniset $TEMPEST_CONFIG DEFAULT use_syslog $SYSLOG + # Oslo - iniset $TEMPEST_CONFIG DEFAULT lock_path $TEMPEST_STATE_PATH + iniset $TEMPEST_CONFIG oslo_concurrency lock_path $TEMPEST_STATE_PATH mkdir -p $TEMPEST_STATE_PATH iniset $TEMPEST_CONFIG DEFAULT use_stderr False iniset $TEMPEST_CONFIG DEFAULT log_file tempest.log @@ -240,134 +410,314 @@ function configure_tempest { # Timeouts iniset $TEMPEST_CONFIG compute build_timeout $BUILD_TIMEOUT iniset $TEMPEST_CONFIG volume build_timeout $BUILD_TIMEOUT - iniset $TEMPEST_CONFIG boto build_timeout $BUILD_TIMEOUT - iniset $TEMPEST_CONFIG compute build_interval $BUILD_INTERVAL - iniset $TEMPEST_CONFIG volume build_interval $BUILD_INTERVAL - iniset $TEMPEST_CONFIG boto build_interval $BUILD_INTERVAL - iniset $TEMPEST_CONFIG boto http_socket_timeout 5 # Identity - iniset $TEMPEST_CONFIG identity uri "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v2.0/" - iniset $TEMPEST_CONFIG identity uri_v3 "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v3/" - iniset $TEMPEST_CONFIG identity username $TEMPEST_USERNAME - iniset $TEMPEST_CONFIG identity password "$password" - iniset $TEMPEST_CONFIG identity tenant_name $TEMPEST_TENANT_NAME - iniset $TEMPEST_CONFIG identity alt_username $ALT_USERNAME - iniset $TEMPEST_CONFIG identity alt_password "$password" - iniset $TEMPEST_CONFIG identity alt_tenant_name $ALT_TENANT_NAME - iniset $TEMPEST_CONFIG identity admin_username $ADMIN_USERNAME - iniset $TEMPEST_CONFIG identity admin_password "$password" - iniset $TEMPEST_CONFIG identity admin_tenant_name $ADMIN_TENANT_NAME - iniset $TEMPEST_CONFIG identity admin_domain_name $ADMIN_DOMAIN_NAME - iniset $TEMPEST_CONFIG identity auth_version ${TEMPEST_AUTH_VERSION:-v2} + iniset $TEMPEST_CONFIG identity uri_v3 "$KEYSTONE_SERVICE_URI_V3" + iniset $TEMPEST_CONFIG identity user_lockout_failure_attempts $KEYSTONE_LOCKOUT_FAILURE_ATTEMPTS + iniset $TEMPEST_CONFIG identity user_lockout_duration $KEYSTONE_LOCKOUT_DURATION + iniset $TEMPEST_CONFIG identity user_unique_last_password_count $KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT + if [[ "$TEMPEST_HAS_ADMIN" == "True" ]]; then + iniset $TEMPEST_CONFIG auth admin_username $admin_username + iniset $TEMPEST_CONFIG auth admin_password "$password" + iniset $TEMPEST_CONFIG auth admin_project_name $admin_project_name + iniset $TEMPEST_CONFIG auth admin_domain_name $admin_domain_name + fi + iniset $TEMPEST_CONFIG identity auth_version ${TEMPEST_AUTH_VERSION:-v3} + if is_service_enabled tls-proxy; then + iniset $TEMPEST_CONFIG identity ca_certificates_file $SSL_BUNDLE_FILE + fi + + # Identity Features + if [[ "$KEYSTONE_SECURITY_COMPLIANCE_ENABLED" = True ]]; then + iniset $TEMPEST_CONFIG identity-feature-enabled security_compliance True + fi + + # When LDAP is enabled domain specific drivers are also enabled and the users + # and groups identity tests must adapt to this scenario + if is_service_enabled ldap; then + iniset $TEMPEST_CONFIG identity-feature-enabled domain_specific_drivers True + fi + + # TODO(felipemonteiro): Remove this once Tempest no longer supports Pike + # as this is supported in Queens and beyond. + iniset $TEMPEST_CONFIG identity-feature-enabled project_tags True + + # In Queens and later, application credentials are enabled by default + # so remove this once Tempest no longer supports Pike. + iniset $TEMPEST_CONFIG identity-feature-enabled application_credentials True + + # In Train and later, access rules for application credentials are enabled + # by default so remove this once Tempest no longer supports Stein. + iniset $TEMPEST_CONFIG identity-feature-enabled access_rules True # Image - # for the gate we want to be able to override this variable so we aren't - # doing an HTTP fetch over the wide internet for this test + # We want to be able to override this variable in the gate to avoid + # doing an external HTTP fetch for this test. if [[ ! -z "$TEMPEST_HTTP_IMAGE" ]]; then iniset $TEMPEST_CONFIG image http_image $TEMPEST_HTTP_IMAGE fi + iniset $TEMPEST_CONFIG image-feature-enabled import_image $GLANCE_USE_IMPORT_WORKFLOW + iniset $TEMPEST_CONFIG image-feature-enabled os_glance_reserved True + if is_service_enabled g-api-r; then + iniset $TEMPEST_CONFIG image alternate_image_endpoint image_remote + fi # Compute - iniset $TEMPEST_CONFIG compute allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} - iniset $TEMPEST_CONFIG compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED - iniset $TEMPEST_CONFIG compute network_for_ssh $PRIVATE_NETWORK_NAME - iniset $TEMPEST_CONFIG compute ip_version_for_ssh 4 - iniset $TEMPEST_CONFIG compute ssh_timeout $BUILD_TIMEOUT iniset $TEMPEST_CONFIG compute image_ref $image_uuid - iniset $TEMPEST_CONFIG compute image_ssh_user ${DEFAULT_INSTANCE_USER:-cirros} iniset $TEMPEST_CONFIG compute image_ref_alt $image_uuid_alt - iniset $TEMPEST_CONFIG compute image_alt_ssh_user ${DEFAULT_INSTANCE_USER:-cirros} iniset $TEMPEST_CONFIG compute flavor_ref $flavor_ref iniset $TEMPEST_CONFIG compute flavor_ref_alt $flavor_ref_alt - iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method + iniset $TEMPEST_CONFIG validation connect_method $ssh_connect_method + if ! is_service_enabled neutron; then + iniset $TEMPEST_CONFIG compute fixed_network_name $PRIVATE_NETWORK_NAME + fi + + # Set the service catalog entry for Tempest to run on. Typically + # used to try different compute API version targets. The tempest + # default if 'compute', which is typically valid, so only set this + # if you want to change it. + if [[ -n "$TEMPEST_COMPUTE_TYPE" ]]; then + iniset $TEMPEST_CONFIG compute catalog_type $TEMPEST_COMPUTE_TYPE + fi # Compute Features + # Set the microversion range for compute tests. + # This is used to run the Nova microversions tests. + # Setting [None, latest] range of microversion which allow Tempest to run all microversions tests. + # NOTE- To avoid microversion tests failure on stable branch, we need to change "tempest_compute_max_microversion" + # for stable branch on each release which should be changed from "latest" to max supported version of that release. + local tempest_compute_min_microversion=${TEMPEST_COMPUTE_MIN_MICROVERSION:-None} + local tempest_compute_max_microversion=${TEMPEST_COMPUTE_MAX_MICROVERSION:-"latest"} + # Reset microversions to None where v2.0 is running which does not support microversion. + # Both "None" means no microversion testing. + if [[ "$TEMPEST_COMPUTE_TYPE" == "compute_legacy" ]]; then + tempest_compute_min_microversion=None + tempest_compute_max_microversion=None + fi + if [ "$tempest_compute_min_microversion" == "None" ]; then + inicomment $TEMPEST_CONFIG compute min_microversion + else + iniset $TEMPEST_CONFIG compute min_microversion $tempest_compute_min_microversion + fi + if [ "$tempest_compute_max_microversion" == "None" ]; then + inicomment $TEMPEST_CONFIG compute max_microversion + else + iniset $TEMPEST_CONFIG compute max_microversion $tempest_compute_max_microversion + fi + + iniset $TEMPEST_CONFIG compute-feature-enabled personality ${ENABLE_FILE_INJECTION:-False} iniset $TEMPEST_CONFIG compute-feature-enabled resize True iniset $TEMPEST_CONFIG compute-feature-enabled live_migration ${LIVE_MIGRATION_AVAILABLE:-False} iniset $TEMPEST_CONFIG compute-feature-enabled change_password False iniset $TEMPEST_CONFIG compute-feature-enabled block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} + iniset $TEMPEST_CONFIG compute-feature-enabled live_migrate_back_and_forth ${LIVE_MIGRATE_BACK_AND_FORTH:-False} + iniset $TEMPEST_CONFIG compute-feature-enabled attach_encrypted_volume ${ATTACH_ENCRYPTED_VOLUME_AVAILABLE:-True} - # Compute admin - iniset $TEMPEST_CONFIG "compute-admin" username $ADMIN_USERNAME - iniset $TEMPEST_CONFIG "compute-admin" password "$password" - iniset $TEMPEST_CONFIG "compute-admin" tenant_name $ADMIN_TENANT_NAME + # Starting Wallaby, nova sanitizes instance hostnames having freeform characters with dashes + iniset $TEMPEST_CONFIG compute-feature-enabled hostname_fqdn_sanitization True - # Network - iniset $TEMPEST_CONFIG network api_version 2.0 - iniset $TEMPEST_CONFIG network tenant_networks_reachable "$tenant_networks_reachable" + if [[ -n "$NOVA_FILTERS" ]]; then + iniset $TEMPEST_CONFIG compute-feature-enabled scheduler_enabled_filters ${NOVA_FILTERS} + fi + + if [[ $ENABLE_VOLUME_MULTIATTACH == "True" ]]; then + iniset $TEMPEST_CONFIG compute-feature-enabled volume_multiattach True + fi + + if is_service_enabled n-novnc || [ "$NOVA_VNC_ENABLED" != False ]; then + iniset $TEMPEST_CONFIG compute-feature-enabled vnc_console True + fi + if is_service_enabled n-spice || [ "$NOVA_SPICE_ENABLED" != False ]; then + iniset $TEMPEST_CONFIG compute-feature-enabled spice_console True + fi + if is_service_enabled n-sproxy || [ "$NOVA_SERIAL_ENABLED" != False ]; then + iniset $TEMPEST_CONFIG compute-feature-enabled serial_console True + fi + + # NOTE(gmaan): Since 2025.2, 'manager' role is available in nova. + local nova_policy_roles="admin,manager,member,reader,service" + iniset $TEMPEST_CONFIG compute-feature-enabled nova_policy_roles $nova_policy_roles + + # Network + iniset $TEMPEST_CONFIG network project_networks_reachable false iniset $TEMPEST_CONFIG network public_network_id "$public_network_id" iniset $TEMPEST_CONFIG network public_router_id "$public_router_id" iniset $TEMPEST_CONFIG network default_network "$FIXED_RANGE" iniset $TEMPEST_CONFIG network-feature-enabled ipv6 "$IPV6_ENABLED" + iniset $TEMPEST_CONFIG network-feature-enabled ipv6_subnet_attributes "$IPV6_SUBNET_ATTRIBUTES_ENABLED" + iniset $TEMPEST_CONFIG network-feature-enabled port_security $NEUTRON_PORT_SECURITY - # boto - iniset $TEMPEST_CONFIG boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud" - iniset $TEMPEST_CONFIG boto s3_url "http://$SERVICE_HOST:${S3_SERVICE_PORT:-3333}" - iniset $TEMPEST_CONFIG boto s3_materials_path "$BOTO_MATERIALS_PATH" - iniset $TEMPEST_CONFIG boto ari_manifest cirros-${CIRROS_VERSION}-x86_64-initrd.manifest.xml - iniset $TEMPEST_CONFIG boto ami_manifest cirros-${CIRROS_VERSION}-x86_64-blank.img.manifest.xml - iniset $TEMPEST_CONFIG boto aki_manifest cirros-${CIRROS_VERSION}-x86_64-vmlinuz.manifest.xml - iniset $TEMPEST_CONFIG boto instance_type "$boto_instance_type" - iniset $TEMPEST_CONFIG boto http_socket_timeout 30 - iniset $TEMPEST_CONFIG boto ssh_user ${DEFAULT_INSTANCE_USER:-cirros} - - # Orchestration Tests - if is_service_enabled heat; then - if [[ ! -z "$HEAT_CFN_IMAGE_URL" ]]; then - iniset $TEMPEST_CONFIG orchestration image_ref $(basename "$HEAT_CFN_IMAGE_URL" ".qcow2") - fi - # build a specialized heat flavor that is likely to be fast - available_flavors=$(nova flavor-list) - if [[ ! ( $available_flavors =~ 'm1.heat' ) ]]; then - nova flavor-create m1.heat 451 1024 0 2 - fi - iniset $TEMPEST_CONFIG orchestration instance_type "m1.heat" - iniset $TEMPEST_CONFIG orchestration build_timeout 900 - fi + iniset $TEMPEST_CONFIG enforce_scope neutron "$NEUTRON_ENFORCE_SCOPE" # Scenario - iniset $TEMPEST_CONFIG scenario img_dir "$FILES/images/cirros-${CIRROS_VERSION}-x86_64-uec" - iniset $TEMPEST_CONFIG scenario ami_img_file "cirros-${CIRROS_VERSION}-x86_64-blank.img" - iniset $TEMPEST_CONFIG scenario ari_img_file "cirros-${CIRROS_VERSION}-x86_64-initrd" - iniset $TEMPEST_CONFIG scenario aki_img_file "cirros-${CIRROS_VERSION}-x86_64-vmlinuz" + SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES} + SCENARIO_IMAGE_FILE=$DEFAULT_IMAGE_FILE_NAME + SCENARIO_IMAGE_TYPE=${SCENARIO_IMAGE_TYPE:-cirros} + iniset $TEMPEST_CONFIG scenario img_file $SCENARIO_IMAGE_DIR/$SCENARIO_IMAGE_FILE + + # since version 0.6.0 cirros uses dhcpcd dhcp client by default, however, cirros, prior to the + # version 0.6.0, used udhcpc (the only available client at that time) which is also tempest's default + if [[ "$SCENARIO_IMAGE_TYPE" == "cirros" ]]; then + # the image is a cirros image + # use dhcpcd client when version greater or equal 0.6.0 + if [[ $(echo $CIRROS_VERSION | tr -d '.') -ge 060 ]]; then + iniset $TEMPEST_CONFIG scenario dhcp_client dhcpcd + fi + fi - # Large Ops Number - iniset $TEMPEST_CONFIG scenario large_ops_number ${TEMPEST_LARGE_OPS_NUMBER:-0} + # If using provider networking, use the physical network for validation rather than private + TEMPEST_SSH_NETWORK_NAME=$PRIVATE_NETWORK_NAME + if is_provider_network; then + TEMPEST_SSH_NETWORK_NAME=$PHYSICAL_NETWORK + fi + # Validation + iniset $TEMPEST_CONFIG validation run_validation ${TEMPEST_RUN_VALIDATION:-True} + iniset $TEMPEST_CONFIG validation ip_version_for_ssh 4 + iniset $TEMPEST_CONFIG validation ssh_timeout $BUILD_TIMEOUT + iniset $TEMPEST_CONFIG validation image_ssh_user ${DEFAULT_INSTANCE_USER:=cirros} + iniset $TEMPEST_CONFIG validation image_alt_ssh_user ${DEFAULT_INSTANCE_ALT_USER:-$DEFAULT_INSTANCE_USER} + iniset $TEMPEST_CONFIG validation network_for_ssh $TEMPEST_SSH_NETWORK_NAME # Volume + # Only turn on TEMPEST_VOLUME_MANAGE_SNAPSHOT by default for "lvm" backends + if [[ "$CINDER_ENABLED_BACKENDS" == *"lvm"* ]]; then + TEMPEST_VOLUME_MANAGE_SNAPSHOT=${TEMPEST_VOLUME_MANAGE_SNAPSHOT:-True} + fi + iniset $TEMPEST_CONFIG volume-feature-enabled manage_snapshot $(trueorfalse False TEMPEST_VOLUME_MANAGE_SNAPSHOT) + # Only turn on TEMPEST_VOLUME_MANAGE_VOLUME by default for "lvm" backends + if [[ "$CINDER_ENABLED_BACKENDS" == *"lvm"* ]]; then + TEMPEST_VOLUME_MANAGE_VOLUME=${TEMPEST_VOLUME_MANAGE_VOLUME:-True} + fi + iniset $TEMPEST_CONFIG volume-feature-enabled manage_volume $(trueorfalse False TEMPEST_VOLUME_MANAGE_VOLUME) + # Only turn on TEMPEST_EXTEND_ATTACHED_VOLUME by default for "lvm" backends + # in Cinder and the libvirt driver in Nova. + if [[ "$CINDER_ENABLED_BACKENDS" == *"lvm"* ]] && [ "$VIRT_DRIVER" = "libvirt" ]; then + TEMPEST_EXTEND_ATTACHED_VOLUME=${TEMPEST_EXTEND_ATTACHED_VOLUME:-True} + fi + iniset $TEMPEST_CONFIG volume-feature-enabled extend_attached_volume $(trueorfalse False TEMPEST_EXTEND_ATTACHED_VOLUME) + # Only turn on TEMPEST_VOLUME_REVERT_TO_SNAPSHOT by default for "lvm" backends + if [[ "$CINDER_ENABLED_BACKENDS" == *"lvm"* ]]; then + TEMPEST_VOLUME_REVERT_TO_SNAPSHOT=${TEMPEST_VOLUME_REVERT_TO_SNAPSHOT:-True} + fi + iniset $TEMPEST_CONFIG volume-feature-enabled volume_revert $(trueorfalse False TEMPEST_VOLUME_REVERT_TO_SNAPSHOT) + iniset $TEMPEST_CONFIG volume-feature-enabled extend_attached_encrypted_volume ${TEMPEST_EXTEND_ATTACHED_ENCRYPTED_VOLUME:-False} + if [[ "$CINDER_BACKUP_DRIVER" == *"swift"* ]]; then + iniset $TEMPEST_CONFIG volume backup_driver swift + fi + local tempest_volume_min_microversion=${TEMPEST_VOLUME_MIN_MICROVERSION:-None} + local tempest_volume_max_microversion=${TEMPEST_VOLUME_MAX_MICROVERSION:-"latest"} + if [ "$tempest_volume_min_microversion" == "None" ]; then + inicomment $TEMPEST_CONFIG volume min_microversion + else + iniset $TEMPEST_CONFIG volume min_microversion $tempest_volume_min_microversion + fi + + if [ "$tempest_volume_max_microversion" == "None" ]; then + inicomment $TEMPEST_CONFIG volume max_microversion + else + iniset $TEMPEST_CONFIG volume max_microversion $tempest_volume_max_microversion + fi + if ! is_service_enabled c-bak; then iniset $TEMPEST_CONFIG volume-feature-enabled backup False fi - CINDER_MULTI_LVM_BACKEND=$(trueorfalse False $CINDER_MULTI_LVM_BACKEND) - if [ $CINDER_MULTI_LVM_BACKEND == "True" ]; then + + # Using ``CINDER_ENABLED_BACKENDS`` + # Cinder uses a comma separated list with "type:backend_name": + # CINDER_ENABLED_BACKENDS = ceph:cephBE1,lvm:lvmBE2,foo:my_foo + if [[ -n "$CINDER_ENABLED_BACKENDS" ]] && [[ $CINDER_ENABLED_BACKENDS =~ .*,.* ]]; then + # We have at least 2 backends iniset $TEMPEST_CONFIG volume-feature-enabled multi_backend "True" - iniset $TEMPEST_CONFIG volume backend1_name "LVM_iSCSI" - iniset $TEMPEST_CONFIG volume backend2_name "LVM_iSCSI_2" + local add_comma_seperator=0 + local backends_list='' + local be + # Tempest uses a comma separated list of backend_names: + # backend_names = BACKEND_1,BACKEND_2 + for be in ${CINDER_ENABLED_BACKENDS//,/ }; do + if [ "$add_comma_seperator" -eq "1" ]; then + backends_list+=,${be##*:} + else + # first element in the list + backends_list+=${be##*:} + add_comma_seperator=1 + fi + done + iniset $TEMPEST_CONFIG volume "backend_names" "$backends_list" fi - if [ $TEMPEST_VOLUME_DRIVER != "default" ]; then - iniset $TEMPEST_CONFIG volume vendor_name $TEMPEST_VOLUME_VENDOR - iniset $TEMPEST_CONFIG volume storage_protocol $TEMPEST_STORAGE_PROTOCOL + if [ $TEMPEST_VOLUME_DRIVER != "default" -o \ + "$TEMPEST_VOLUME_VENDOR" != "$TEMPEST_DEFAULT_VOLUME_VENDOR" ]; then + iniset $TEMPEST_CONFIG volume vendor_name "$TEMPEST_VOLUME_VENDOR" + fi + if [ $TEMPEST_VOLUME_DRIVER != "default" -o \ + "$TEMPEST_STORAGE_PROTOCOL" != "$TEMPEST_DEFAULT_STORAGE_PROTOCOL" ]; then + iniset $TEMPEST_CONFIG volume storage_protocol "$TEMPEST_STORAGE_PROTOCOL" fi - # Dashboard - iniset $TEMPEST_CONFIG dashboard dashboard_url "http://$SERVICE_HOST/" - iniset $TEMPEST_CONFIG dashboard login_url "http://$SERVICE_HOST/auth/login/" - - # cli - iniset $TEMPEST_CONFIG cli cli_dir $NOVA_BIN_DIR + if [[ $ENABLE_VOLUME_MULTIATTACH == "True" ]]; then + iniset $TEMPEST_CONFIG volume volume_type_multiattach $VOLUME_TYPE_MULTIATTACH + fi - # Networking - iniset $TEMPEST_CONFIG network-feature-enabled api_extensions "${NETWORK_API_EXTENSIONS:-all}" + # Placement Features + # Set the microversion range for placement. + # Setting [None, latest] range of microversion which allow Tempest to run all microversions tests. + # NOTE- To avoid microversion tests failure on stable branch, we need to change "tempest_placement_max_microversion" + # for stable branch on each release which should be changed from "latest" to max supported version of that release. + local tempest_placement_min_microversion=${TEMPEST_PLACEMENT_MIN_MICROVERSION:-None} + local tempest_placement_max_microversion=${TEMPEST_PLACEMENT_MAX_MICROVERSION:-"latest"} + if [ "$tempest_placement_min_microversion" == "None" ]; then + inicomment $TEMPEST_CONFIG placement min_microversion + else + iniset $TEMPEST_CONFIG placement min_microversion $tempest_placement_min_microversion + fi + if [ "$tempest_placement_max_microversion" == "None" ]; then + inicomment $TEMPEST_CONFIG placement max_microversion + else + iniset $TEMPEST_CONFIG placement max_microversion $tempest_placement_max_microversion + fi # Baremetal if [ "$VIRT_DRIVER" = "ironic" ] ; then - iniset $TEMPEST_CONFIG baremetal driver_enabled True + iniset $TEMPEST_CONFIG compute-feature-enabled change_password False + iniset $TEMPEST_CONFIG compute-feature-enabled console_output False + iniset $TEMPEST_CONFIG compute-feature-enabled interface_attach False + iniset $TEMPEST_CONFIG compute-feature-enabled live_migration False + iniset $TEMPEST_CONFIG compute-feature-enabled pause False + iniset $TEMPEST_CONFIG compute-feature-enabled rescue False + iniset $TEMPEST_CONFIG compute-feature-enabled resize False + iniset $TEMPEST_CONFIG compute-feature-enabled shelve False + iniset $TEMPEST_CONFIG compute-feature-enabled snapshot False + iniset $TEMPEST_CONFIG compute-feature-enabled suspend False fi - # service_available - for service in ${TEMPEST_SERVICES//,/ }; do + # Libvirt + if [ "$VIRT_DRIVER" = "libvirt" ]; then + # Libvirt-LXC + if [ "$LIBVIRT_TYPE" = "lxc" ]; then + iniset $TEMPEST_CONFIG compute-feature-enabled rescue False + iniset $TEMPEST_CONFIG compute-feature-enabled resize False + iniset $TEMPEST_CONFIG compute-feature-enabled shelve False + iniset $TEMPEST_CONFIG compute-feature-enabled snapshot False + iniset $TEMPEST_CONFIG compute-feature-enabled suspend False + else + iniset $TEMPEST_CONFIG compute-feature-enabled shelve_migrate True + iniset $TEMPEST_CONFIG compute-feature-enabled stable_rescue True + iniset $TEMPEST_CONFIG compute-feature-enabled swap_volume True + fi + fi + + # ``service_available`` + # + # this tempest service list needs to be the services that + # tempest own, otherwise we can have an erroneous set of + # defaults (something defaulting true in Tempest, but not listed here). + # services tested by tempest plugins needs to be set on service devstack + # plugin side as devstack cannot keep track of all the tempest plugins + # services. Refer Bug#1743688 for more details. + # 'horizon' is also kept here as no devtack plugin for horizon. + local service + local tempest_services="key,glance,nova,neutron,cinder,swift,horizon" + for service in ${tempest_services//,/ }; do if is_service_enabled $service ; then iniset $TEMPEST_CONFIG service_available $service "True" else @@ -375,66 +725,166 @@ function configure_tempest { fi done - # Restore IFS - IFS=$ifs -} + # ``enforce_scope`` + # If services enable the enforce_scope for their policy + # we need to enable the same on Tempest side so that + # test can be run with scoped token. + if [[ "$KEYSTONE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $TEMPEST_CONFIG enforce_scope keystone true + fi + + if [[ "$NOVA_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $TEMPEST_CONFIG enforce_scope nova true + fi + + if [[ "$PLACEMENT_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $TEMPEST_CONFIG enforce_scope placement true + fi + + if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $TEMPEST_CONFIG enforce_scope glance true + fi + + if [[ "$CINDER_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $TEMPEST_CONFIG enforce_scope cinder true + fi + + if [ "$VIRT_DRIVER" = "libvirt" ] && [ "$LIBVIRT_TYPE" = "lxc" ]; then + # libvirt-lxc does not support boot from volume or attaching volumes + # so basically anything with cinder is out of the question. + iniset $TEMPEST_CONFIG service_available cinder "False" + fi + + # Run tempest configuration utilities. This must be done last during configuration to + # ensure as complete a config as possible already exists + + # NOTE(mtreinish): Respect constraints on tempest verify-config venv + local tmp_cfg_file + tmp_cfg_file=$(mktemp) + cd $TEMPEST_DIR + + local tmp_u_c_m + tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX) + set_tempest_venv_constraints $tmp_u_c_m + if [[ "$OFFLINE" != "True" ]]; then + tox -revenv-tempest --notest + fi + tox -evenv-tempest -- pip install -c $tmp_u_c_m -r requirements.txt + rm -f $tmp_u_c_m + + # Auth: + if [[ $TEMPEST_USE_TEST_ACCOUNTS == "True" ]]; then + if [[ $TEMPEST_HAS_ADMIN == "True" ]]; then + tox -evenv-tempest -- tempest account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-project-name $admin_project_name -r $TEMPEST_CONCURRENCY --with-admin etc/accounts.yaml + else + tox -evenv-tempest -- tempest account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-project-name $admin_project_name -r $TEMPEST_CONCURRENCY etc/accounts.yaml + fi + iniset $TEMPEST_CONFIG auth use_dynamic_credentials False + iniset $TEMPEST_CONFIG auth test_accounts_file "etc/accounts.yaml" + elif [[ $TEMPEST_HAS_ADMIN == "False" ]]; then + iniset $TEMPEST_CONFIG auth use_dynamic_credentials ${TEMPEST_ALLOW_TENANT_ISOLATION:-False} + + else + iniset $TEMPEST_CONFIG auth use_dynamic_credentials ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} + fi + + # API Extensions + # Run ``verify_tempest_config -ur`` to retrieve enabled extensions on API endpoints + # NOTE(mtreinish): This must be done after auth settings are added to the tempest config + tox -evenv -- tempest verify-config -uro $tmp_cfg_file + + # Neutron API Extensions + + # disable metering if we didn't enable the service + if ! is_service_enabled q-metering neutron-metering; then + DISABLE_NETWORK_API_EXTENSIONS+=", metering" + fi -# create_tempest_accounts() - Set up common required tempest accounts - -# Project User Roles -# ------------------------------------------------------------------ -# alt_demo alt_demo Member - -# Migrated from keystone_data.sh -function create_tempest_accounts { - if is_service_enabled tempest; then - # Tempest has some tests that validate various authorization checks - # between two regular users in separate tenants - openstack project create \ - alt_demo - openstack user create \ - --project alt_demo \ - --password "$ADMIN_PASSWORD" \ - alt_demo - openstack role add \ - --project alt_demo \ - --user alt_demo \ - Member + # disable l3_agent_scheduler if we didn't enable L3 agent + if ! is_service_enabled q-l3 neutron-l3; then + DISABLE_NETWORK_API_EXTENSIONS+=", l3_agent_scheduler" fi + + local network_api_extensions=${NETWORK_API_EXTENSIONS:-"all"} + if [[ ! -z "$DISABLE_NETWORK_API_EXTENSIONS" ]]; then + # Enabled extensions are either the ones explicitly specified or those available on the API endpoint + network_api_extensions=${NETWORK_API_EXTENSIONS:-$(iniget $tmp_cfg_file network-feature-enabled api_extensions | tr -d " ")} + # Remove disabled extensions + network_api_extensions=$(remove_disabled_extensions $network_api_extensions $DISABLE_NETWORK_API_EXTENSIONS) + fi + if [[ -n "$ADDITIONAL_NETWORK_API_EXTENSIONS" ]] && [[ "$network_api_extensions" != "all" ]]; then + network_api_extensions+=",$ADDITIONAL_NETWORK_API_EXTENSIONS" + fi + iniset $TEMPEST_CONFIG network-feature-enabled api_extensions $network_api_extensions + # Swift API Extensions + local object_storage_api_extensions=${OBJECT_STORAGE_API_EXTENSIONS:-"all"} + if [[ ! -z "$DISABLE_OBJECT_STORAGE_API_EXTENSIONS" ]]; then + # Enabled extensions are either the ones explicitly specified or those available on the API endpoint + object_storage_api_extensions=${OBJECT_STORAGE_API_EXTENSIONS:-$(iniget $tmp_cfg_file object-storage-feature-enabled discoverable_apis | tr -d " ")} + # Remove disabled extensions + object_storage_api_extensions=$(remove_disabled_extensions $object_storage_api_extensions $DISABLE_STORAGE_API_EXTENSIONS) + fi + iniset $TEMPEST_CONFIG object-storage-feature-enabled discoverable_apis $object_storage_api_extensions + # Cinder API Extensions + local volume_api_extensions=${VOLUME_API_EXTENSIONS:-"all"} + if [[ ! -z "$DISABLE_VOLUME_API_EXTENSIONS" ]]; then + # Enabled extensions are either the ones explicitly specified or those available on the API endpoint + volume_api_extensions=${VOLUME_API_EXTENSIONS:-$(iniget $tmp_cfg_file volume-feature-enabled api_extensions | tr -d " ")} + # Remove disabled extensions + volume_api_extensions=$(remove_disabled_extensions $volume_api_extensions $DISABLE_VOLUME_API_EXTENSIONS) + fi + iniset $TEMPEST_CONFIG volume-feature-enabled api_extensions $volume_api_extensions + + # Restore IFS + IFS=$ifs } # install_tempest() - Collect source and prepare function install_tempest { git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH - pip_install "tox<1.7" + # NOTE(gmann): Pinning tox<4.0.0 for stable/zed and lower. Tox 4.0.0 + # released after zed was released and has some incompatible changes + # and it is ok not to fix the issues caused by tox 4.0.0 in stable + # beanches jobs. We can continue testing the stable/zed and lower + # branches with tox<4.0.0 + pip_install 'tox!=2.8.0,<4.0.0' + pushd $TEMPEST_DIR + # NOTE(gmann): checkout the TEMPEST_BRANCH in case TEMPEST_BRANCH + # is tag name not master. git_clone would not checkout tag because + # TEMPEST_DIR already exist until RECLONE is true. + git checkout $TEMPEST_BRANCH + + local tmp_u_c_m + tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX) + set_tempest_venv_constraints $tmp_u_c_m + + tox -r --notest -efull + # NOTE(mtreinish) Respect constraints in the tempest full venv, things that + # are using a tox job other than full will not be respecting constraints but + # running pip install -U on tempest requirements + $TEMPEST_DIR/.tox/tempest/bin/pip install -c $tmp_u_c_m -r requirements.txt + PROJECT_VENV["tempest"]=${TEMPEST_DIR}/.tox/tempest + rm -f $tmp_u_c_m + popd } -# init_tempest() - Initialize ec2 images -function init_tempest { - local base_image_name=cirros-${CIRROS_VERSION}-x86_64 - # /opt/stack/devstack/files/images/cirros-${CIRROS_VERSION}-x86_64-uec - local image_dir="$FILES/images/${base_image_name}-uec" - local kernel="$image_dir/${base_image_name}-vmlinuz" - local ramdisk="$image_dir/${base_image_name}-initrd" - local disk_image="$image_dir/${base_image_name}-blank.img" - # if the cirros uec downloaded and the system is uec capable - if [ -f "$kernel" -a -f "$ramdisk" -a -f "$disk_image" -a "$VIRT_DRIVER" != "openvz" \ - -a \( "$LIBVIRT_TYPE" != "lxc" -o "$VIRT_DRIVER" != "libvirt" \) ]; then - echo "Prepare aki/ari/ami Images" - ( #new namespace - # tenant:demo ; user: demo - source $TOP_DIR/accrc/demo/demo - euca-bundle-image -r x86_64 -i "$kernel" --kernel true -d "$BOTO_MATERIALS_PATH" - euca-bundle-image -r x86_64 -i "$ramdisk" --ramdisk true -d "$BOTO_MATERIALS_PATH" - euca-bundle-image -r x86_64 -i "$disk_image" -d "$BOTO_MATERIALS_PATH" - ) 2>&1 @@ -19,7 +21,7 @@ # - cleanup_XXXX # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_TEMPLATE=$(set +o | grep xtrace) set +o xtrace @@ -33,12 +35,13 @@ XXXX_DIR=$DEST/XXXX XXX_CONF_DIR=/etc/XXXX -# Entry Points -# ------------ +# Functions +# --------- # Test if any XXXX services are enabled # is_XXXX_enabled function is_XXXX_enabled { + [[ ,${DISABLED_SERVICES} =~ ,"XXXX" ]] && return 1 [[ ,${ENABLED_SERVICES} =~ ,"XX-" ]] && return 0 return 1 } @@ -60,6 +63,11 @@ function configure_XXXX { : } +# create_XXXX_accounts() - Create required service accounts +function create_XXXX_accounts { + : +} + # init_XXXX() - Initialize databases, etc. function init_XXXX { # clean up from previous (possibly aborted) runs @@ -73,20 +81,24 @@ function install_XXXX { : } -# start_XXXX() - Start running processes, including screen +# start_XXXX() - Start running processes function start_XXXX { - # screen_it XXXX "cd $XXXX_DIR && $XXXX_DIR/bin/XXXX-bin" + # The quoted command must be a single command and not include an + # shell metacharacters, redirections or shell builtins. + # run_process XXXX "$XXXX_DIR/bin/XXXX-bin" : } -# stop_XXXX() - Stop running processes (non-screen) +# stop_XXXX() - Stop running processes function stop_XXXX { - # FIXME(dtroyer): stop only our screen screen window? + # for serv in serv-a serv-b; do + # stop_process $serv + # done : } # Restore xtrace -$XTRACE +$_XTRACE_TEMPLATE # Tell emacs to use shell-script-mode ## Local variables: diff --git a/lib/tls b/lib/tls index 88e5f60473..fa0a448d7d 100644 --- a/lib/tls +++ b/lib/tls @@ -1,3 +1,5 @@ +#!/bin/bash +# # lib/tls # Functions to control the configuration and operation of the TLS proxy service @@ -18,27 +20,27 @@ # - configure_proxy # - start_tls_proxy -# - make_root_ca -# - make_int_ca -# - new_cert $INT_CA_DIR int-server "abc" +# - stop_tls_proxy +# - cleanup_CA + +# - make_root_CA +# - make_int_CA +# - make_cert ca-dir cert-name "common-name" ["alt-name" ...] # - start_tls_proxy HOST_IP 5000 localhost 5000 # - ensure_certificates # - is_ssl_enabled_service +# - enable_mod_ssl + # Defaults # -------- if is_service_enabled tls-proxy; then # TODO(dtroyer): revisit this below after the search for HOST_IP has been done - TLS_IP=${TLS_IP:-$SERVICE_IP} - - # Set the default ``SERVICE_PROTOCOL`` for TLS - SERVICE_PROTOCOL=https + TLS_IP=${TLS_IP:-$(ipv6_unquote $SERVICE_HOST)} fi -# Make up a hostname for cert purposes -# will be added to /etc/hosts? -DEVSTACK_HOSTNAME=secure.devstack.org +DEVSTACK_HOSTNAME=$(hostname -f) DEVSTACK_CERT_NAME=devstack-cert DEVSTACK_CERT=$DATA_DIR/$DEVSTACK_CERT_NAME.pem @@ -65,9 +67,9 @@ function configure_CA { # build common config file # Verify ``TLS_IP`` is good - if [[ -n "$HOST_IP" && "$HOST_IP" != "$TLS_IP" ]]; then + if [[ -n "$SERVICE_HOST" && "$(ipv6_unquote $SERVICE_HOST)" != "$TLS_IP" ]]; then # auto-discover has changed the IP - TLS_IP=$HOST_IP + TLS_IP=$(ipv6_unquote $SERVICE_HOST) fi } @@ -81,6 +83,7 @@ function create_CA_base { return 0 fi + local i for i in certs crl newcerts private; do mkdir -p $ca_dir/$i done @@ -89,7 +92,6 @@ function create_CA_base { cp /dev/null $ca_dir/index.txt } - # Create a new CA configuration file # create_CA_config ca-dir common-name function create_CA_config { @@ -111,11 +113,11 @@ new_certs_dir = \$dir/newcerts certificate = \$dir/cacert.pem private_key = \$dir/private/cacert.key RANDFILE = \$dir/private/.rand -default_md = default +default_md = sha256 [ req ] -default_bits = 1024 -default_md = sha1 +default_bits = 2048 +default_md = sha256 prompt = no distinguished_name = ca_distinguished_name @@ -167,7 +169,7 @@ default_md = default [ req ] default_bits = 1024 -default_md = sha1 +default_md = sha256 prompt = no distinguished_name = req_distinguished_name @@ -205,6 +207,16 @@ function init_CA { # Create the CA bundle cat $ROOT_CA_DIR/cacert.pem $INT_CA_DIR/cacert.pem >>$INT_CA_DIR/ca-chain.pem + cat $INT_CA_DIR/ca-chain.pem >> $SSL_BUNDLE_FILE + + if is_fedora; then + sudo cp $INT_CA_DIR/ca-chain.pem /usr/share/pki/ca-trust-source/anchors/devstack-chain.pem + sudo update-ca-trust + elif is_ubuntu; then + sudo cp $INT_CA_DIR/ca-chain.pem /usr/local/share/ca-certificates/devstack-int.crt + sudo cp $ROOT_CA_DIR/cacert.pem /usr/local/share/ca-certificates/devstack-root.crt + sudo update-ca-certificates + fi } # Create an initial server cert @@ -212,8 +224,10 @@ function init_CA { function init_cert { if [[ ! -r $DEVSTACK_CERT ]]; then if [[ -n "$TLS_IP" ]]; then - # Lie to let incomplete match routines work - TLS_IP="DNS:$TLS_IP" + TLS_IP="IP:$TLS_IP" + if [[ -n "$HOST_IPV6" ]]; then + TLS_IP="$TLS_IP,IP:$HOST_IPV6" + fi fi make_cert $INT_CA_DIR $DEVSTACK_CERT_NAME $DEVSTACK_HOSTNAME "$TLS_IP" @@ -222,7 +236,6 @@ function init_cert { fi } - # make_cert creates and signs a new certificate with the given commonName and CA # make_cert ca-dir cert-name "common-name" ["alt-name" ...] function make_cert { @@ -231,33 +244,45 @@ function make_cert { local common_name=$3 local alt_names=$4 - # Generate a signing request - $OPENSSL req \ - -sha1 \ - -newkey rsa \ - -nodes \ - -keyout $ca_dir/private/$cert_name.key \ - -out $ca_dir/$cert_name.csr \ - -subj "/O=${ORG_NAME}/OU=${ORG_UNIT_NAME} Servers/CN=${common_name}" - - if [[ -z "$alt_names" ]]; then - alt_names="DNS:${common_name}" - else - alt_names="DNS:${common_name},${alt_names}" + if [ "$common_name" != "$SERVICE_HOST" ]; then + if is_ipv4_address "$SERVICE_HOST" ; then + if [[ -z "$alt_names" ]]; then + alt_names="IP:$SERVICE_HOST" + else + alt_names="$alt_names,IP:$SERVICE_HOST" + fi + fi fi - # Sign the request valid for 1 year - SUBJECT_ALT_NAME="$alt_names" \ - $OPENSSL ca -config $ca_dir/signing.conf \ - -extensions req_extensions \ - -days 365 \ - -notext \ - -in $ca_dir/$cert_name.csr \ - -out $ca_dir/$cert_name.crt \ - -subj "/O=${ORG_NAME}/OU=${ORG_UNIT_NAME} Servers/CN=${common_name}" \ - -batch -} + # Only generate the certificate if it doesn't exist yet on the disk + if [ ! -r "$ca_dir/$cert_name.crt" ]; then + # Generate a signing request + $OPENSSL req \ + -sha256 \ + -newkey rsa \ + -nodes \ + -keyout $ca_dir/private/$cert_name.key \ + -out $ca_dir/$cert_name.csr \ + -subj "/O=${ORG_NAME}/OU=${ORG_UNIT_NAME} Servers/CN=${common_name}" + + if [[ -z "$alt_names" ]]; then + alt_names="DNS:${common_name}" + else + alt_names="DNS:${common_name},${alt_names}" + fi + # Sign the request valid for 1 year + SUBJECT_ALT_NAME="$alt_names" \ + $OPENSSL ca -config $ca_dir/signing.conf \ + -extensions req_extensions \ + -days 365 \ + -notext \ + -in $ca_dir/$cert_name.csr \ + -out $ca_dir/$cert_name.crt \ + -subj "/O=${ORG_NAME}/OU=${ORG_UNIT_NAME} Servers/CN=${common_name}" \ + -batch + fi +} # Make an intermediate CA to sign everything else # make_int_CA ca-dir signing-ca-dir @@ -270,23 +295,25 @@ function make_int_CA { create_CA_config $ca_dir 'Intermediate CA' create_signing_config $ca_dir - # Create a signing certificate request - $OPENSSL req -config $ca_dir/ca.conf \ - -sha1 \ - -newkey rsa \ - -nodes \ - -keyout $ca_dir/private/cacert.key \ - -out $ca_dir/cacert.csr \ - -outform PEM - - # Sign the intermediate request valid for 1 year - $OPENSSL ca -config $signing_ca_dir/ca.conf \ - -extensions ca_extensions \ - -days 365 \ - -notext \ - -in $ca_dir/cacert.csr \ - -out $ca_dir/cacert.pem \ - -batch + if [ ! -r "$ca_dir/cacert.pem" ]; then + # Create a signing certificate request + $OPENSSL req -config $ca_dir/ca.conf \ + -sha256 \ + -newkey rsa \ + -nodes \ + -keyout $ca_dir/private/cacert.key \ + -out $ca_dir/cacert.csr \ + -outform PEM + + # Sign the intermediate request valid for 1 year + $OPENSSL ca -config $signing_ca_dir/ca.conf \ + -extensions ca_extensions \ + -days 365 \ + -notext \ + -in $ca_dir/cacert.csr \ + -out $ca_dir/cacert.pem \ + -batch + fi } # Make a root CA to sign other CAs @@ -298,45 +325,86 @@ function make_root_CA { create_CA_base $ca_dir create_CA_config $ca_dir 'Root CA' - # Create a self-signed certificate valid for 5 years - $OPENSSL req -config $ca_dir/ca.conf \ - -x509 \ - -nodes \ - -newkey rsa \ - -days 21360 \ - -keyout $ca_dir/private/cacert.key \ - -out $ca_dir/cacert.pem \ - -outform PEM + if [ ! -r "$ca_dir/cacert.pem" ]; then + # Create a self-signed certificate valid for 5 years + $OPENSSL req -config $ca_dir/ca.conf \ + -x509 \ + -nodes \ + -newkey rsa \ + -days 21360 \ + -keyout $ca_dir/private/cacert.key \ + -out $ca_dir/cacert.pem \ + -outform PEM + fi } +# Deploy the service cert & key to a service specific +# location +function deploy_int_cert { + local cert_target_file=$1 + local key_target_file=$2 -# Certificate Input Configuration -# =============================== + sudo cp "$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt" "$cert_target_file" + sudo cp "$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key" "$key_target_file" +} -# check to see if the service(s) specified are to be SSL enabled. -# -# Multiple services specified as arguments are ``OR``'ed together; the test -# is a short-circuit boolean, i.e it returns on the first match. -# -# Uses global ``SSL_ENABLED_SERVICES`` +# Deploy the intermediate CA cert bundle file to a service +# specific location +function deploy_int_CA { + local ca_target_file=$1 + + sudo cp "$INT_CA_DIR/ca-chain.pem" "$ca_target_file" +} + +# If a non-system python-requests is installed then it will use the +# built-in CA certificate store rather than the distro-specific +# CA certificate store. Detect this and symlink to the correct +# one. If the value for the CA is not rooted in /etc then we know +# we need to change it. +function fix_system_ca_bundle_path { + if is_service_enabled tls-proxy; then + local capath + if [[ "$GLOBAL_VENV" == "True" ]] ; then + capath=$($DEVSTACK_VENV/bin/python3 -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass') + else + capath=$(python$PYTHON3_VERSION -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass') + fi + if [[ ! $capath == "" && ! $capath =~ ^/etc/.* && ! -L $capath ]]; then + if is_fedora; then + sudo rm -f $capath + sudo ln -s /etc/pki/tls/certs/ca-bundle.crt $capath + elif is_ubuntu; then + sudo rm -f $capath + sudo ln -s /etc/ssl/certs/ca-certificates.crt $capath + else + echo "Don't know how to set the CA bundle, expect the install to fail." + fi + fi + fi +} + + +# Only for compatibility, return if the tls-proxy is enabled function is_ssl_enabled_service { - services=$@ - for service in ${services}; do - [[ ,${SSL_ENABLED_SERVICES}, =~ ,${service}, ]] && return 0 - done - return 1 + return is_service_enabled tls-proxy } +# Certificate Input Configuration +# =============================== # Ensure that the certificates for a service are in place. This function does # not check that a service is SSL enabled, this should already have been # completed. # # The function expects to find a certificate, key and CA certificate in the -# variables {service}_SSL_CERT, {service}_SSL_KEY and {service}_SSL_CA. For -# example for keystone this would be KEYSTONE_SSL_CERT, KEYSTONE_SSL_KEY and -# KEYSTONE_SSL_CA. If it does not find these certificates the program will -# quit. +# variables ``{service}_SSL_CERT``, ``{service}_SSL_KEY`` and ``{service}_SSL_CA``. For +# example for keystone this would be ``KEYSTONE_SSL_CERT``, ``KEYSTONE_SSL_KEY`` and +# ``KEYSTONE_SSL_CA``. +# +# If it does not find these certificates then the DevStack-issued server +# certificate, key and CA certificate will be associated with the service. +# +# If only some of the variables are provided then the function will quit. function ensure_certificates { local service=$1 @@ -348,7 +416,15 @@ function ensure_certificates { local key=${!key_var} local ca=${!ca_var} - if [[ -z "$cert" || -z "$key" || -z "$ca" ]]; then + if [[ -z "$cert" && -z "$key" && -z "$ca" ]]; then + local cert="$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt" + local key="$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key" + local ca="$INT_CA_DIR/ca-chain.pem" + eval ${service}_SSL_CERT=\$cert + eval ${service}_SSL_KEY=\$key + eval ${service}_SSL_CA=\$ca + return # the CA certificate is already in the bundle + elif [[ -z "$cert" || -z "$key" || -z "$ca" ]]; then die $LINENO "Missing either the ${cert_var} ${key_var} or ${ca_var}" \ "variable to enable SSL for ${service}" fi @@ -356,21 +432,184 @@ function ensure_certificates { cat $ca >> $SSL_BUNDLE_FILE } +# Enable the mod_ssl plugin in Apache +function enable_mod_ssl { + echo "Enabling mod_ssl" + + if is_ubuntu; then + sudo a2enmod ssl + elif is_fedora; then + # Fedora enables mod_ssl by default + : + fi + if ! sudo `which httpd || which apache2ctl` -M | grep -w -q ssl_module; then + die $LINENO "mod_ssl is not enabled in apache2/httpd, please check for it manually and run stack.sh again" + fi +} + # Proxy Functions # =============== +function tune_apache_connections { + local should_restart=$1 + local tuning_file=$APACHE_SETTINGS_DIR/connection-tuning.conf + if ! [ -f $tuning_file ] ; then + sudo bash -c "cat > $tuning_file" << EOF +# worker MPM +# StartServers: initial number of server processes to start +# MinSpareThreads: minimum number of worker threads which are kept spare +# MaxSpareThreads: maximum number of worker threads which are kept spare +# ThreadLimit: ThreadsPerChild can be changed to this maximum value during a +# graceful restart. ThreadLimit can only be changed by stopping +# and starting Apache. +# ThreadsPerChild: constant number of worker threads in each server process +# MaxClients: maximum number of simultaneous client connections +# MaxRequestsPerChild: maximum number of requests a server process serves +# +# We want to be memory thrifty so tune down apache to allow 256 total +# connections. This should still be plenty for a dev env yet lighter than +# apache defaults. + +# Note that the next three conf values must be changed together. +# MaxClients = ServerLimit * ThreadsPerChild +ServerLimit 8 +ThreadsPerChild 32 +MaxClients 256 +StartServers 2 +MinSpareThreads 32 +MaxSpareThreads 96 +ThreadLimit 64 +MaxRequestsPerChild 0 + + +# Note that the next three conf values must be changed together. +# MaxClients = ServerLimit * ThreadsPerChild +ServerLimit 8 +ThreadsPerChild 32 +MaxClients 256 +StartServers 2 +MinSpareThreads 32 +MaxSpareThreads 96 +ThreadLimit 64 +MaxRequestsPerChild 0 + +EOF + if [ "$should_restart" != "norestart" ] ; then + # Only restart the apache server if we know we really want to + # do so. Too many restarts in a short period of time is treated + # as an error by systemd. + restart_apache_server + fi + fi +} + # Starts the TLS proxy for the given IP/ports -# start_tls_proxy front-host front-port back-host back-port +# start_tls_proxy service-name front-host front-port back-host back-port function start_tls_proxy { - local f_host=$1 - local f_port=$2 - local b_host=$3 - local b_port=$4 + local b_service="$1-tls-proxy" + local f_host=$2 + local f_port=$3 + local b_host=$4 + local b_port=$5 + # 8190 is the default apache size. + local f_header_size=${6:-8190} + + # We don't restart apache here as we'll do it at the end of the function. + tune_apache_connections norestart + + local config_file + config_file=$(apache_site_config_for $b_service) + local listen_string + # Default apache configs on ubuntu and centos listen on 80 and 443 + # newer apache seems fine with duplicate listen directive but older + # apache does not so special case 80 and 443. + if [[ "$f_port" == "80" ]] || [[ "$f_port" == "443" ]]; then + listen_string="" + elif [[ "$f_host" == '*' ]] ; then + listen_string="Listen $f_port" + else + listen_string="Listen $f_host:$f_port" + fi + sudo bash -c "cat >$config_file" << EOF +$listen_string + + + SSLEngine On + SSLCertificateFile $DEVSTACK_CERT + SSLProtocol -all +TLSv1.3 +TLSv1.2 + + # Disable KeepAlive to fix bug #1630664 a.k.a the + # ('Connection aborted.', BadStatusLine("''",)) error + KeepAlive Off + + # This increase in allowed request header sizes is required + # for swift functional testing to work with tls enabled. It is 2 bytes + # larger than the apache default of 8190. + LimitRequestFieldSize $f_header_size + RequestHeader set X-Forwarded-Proto "https" + + # Avoid races (at the cost of performance) to re-use a pooled connection + # where the connection is closed (bug 1807518). + # Set acquire=1 to disable waiting for connection pool members so that + # we can determine when apache is overloaded (returns 503). + SetEnv proxy-initial-not-pooled + + ProxyPass http://$b_host:$b_port/ retry=0 nocanon acquire=1 + ProxyPassReverse http://$b_host:$b_port/ + + ErrorLog $APACHE_LOG_DIR/tls-proxy_error.log + ErrorLogFormat "%{cu}t [%-m:%l] [pid %P:tid %T] %7F: %E: [client\ %a] [frontend\ %A] %M% ,\ referer\ %{Referer}i" + LogLevel info + CustomLog $APACHE_LOG_DIR/tls-proxy_access.log combined + +EOF + for mod in headers ssl proxy proxy_http; do + # We don't need to restart here as we will restart once at the end + # of the function. + enable_apache_mod $mod norestart + done + enable_apache_site $b_service + restart_apache_server +} - stud $STUD_PROTO -f $f_host,$f_port -b $b_host,$b_port $DEVSTACK_CERT 2>/dev/null +# Cleanup Functions +# ================= + +# Stops the apache service. This should be done only after all services +# using tls configuration are down. +function stop_tls_proxy { + stop_apache_server + + # NOTE(jh): Removing all tls-proxy configs is a bit of a hack, but + # necessary so that we can restart after an unstack. A better + # solution would be to ensure that each service calling + # start_tls_proxy will call stop_tls_proxy with the same + # parameters on shutdown so we can use the disable_apache_site + # function and remove individual files there. + if is_ubuntu; then + sudo rm -f /etc/apache2/sites-enabled/*-tls-proxy.conf + else + for i in $APACHE_CONF_DIR/*-tls-proxy.conf; do + sudo mv $i $i.disabled + done + fi } +# Clean up the CA files +# cleanup_CA +function cleanup_CA { + if is_fedora; then + sudo rm -f /usr/share/pki/ca-trust-source/anchors/devstack-chain.pem + sudo update-ca-trust + elif is_ubuntu; then + sudo rm -f /usr/local/share/ca-certificates/devstack-int.crt + sudo rm -f /usr/local/share/ca-certificates/devstack-root.crt + sudo update-ca-certificates + fi + + rm -rf "$INT_CA_DIR" "$ROOT_CA_DIR" "$DEVSTACK_CERT" +} # Tell emacs to use shell-script-mode ## Local variables: diff --git a/lib/trove b/lib/trove deleted file mode 100644 index 86314700bf..0000000000 --- a/lib/trove +++ /dev/null @@ -1,233 +0,0 @@ -# lib/trove -# Functions to control the configuration and operation of the **Trove** service - -# Dependencies: -# ``functions`` file -# ``DEST``, ``STACK_USER`` must be defined -# ``SERVICE_{HOST|PROTOCOL|TOKEN}`` must be defined - -# ``stack.sh`` calls the entry points in this order: -# -# install_trove -# configure_trove -# init_trove -# start_trove -# stop_trove -# cleanup_trove - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - -# Defaults -# -------- -if is_service_enabled neutron; then - TROVE_HOST_GATEWAY=${PUBLIC_NETWORK_GATEWAY:-172.24.4.1} -else - TROVE_HOST_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1} -fi - -# Set up default configuration -TROVE_DIR=$DEST/trove -TROVECLIENT_DIR=$DEST/python-troveclient -TROVE_CONF_DIR=/etc/trove -TROVE_LOCAL_CONF_DIR=$TROVE_DIR/etc/trove -TROVE_AUTH_CACHE_DIR=${TROVE_AUTH_CACHE_DIR:-/var/cache/trove} - -# Support entry points installation of console scripts -if [[ -d $TROVE_DIR/bin ]]; then - TROVE_BIN_DIR=$TROVE_DIR/bin -else - TROVE_BIN_DIR=$(get_python_exec_prefix) -fi - -# Tell Tempest this project is present -TEMPEST_SERVICES+=,trove - - -# Functions -# --------- - -# Test if any Trove services are enabled -# is_trove_enabled -function is_trove_enabled { - [[ ,${ENABLED_SERVICES} =~ ,"tr-" ]] && return 0 - return 1 -} - -# setup_trove_logging() - Adds logging configuration to conf files -function setup_trove_logging { - local CONF=$1 - iniset $CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $CONF DEFAULT use_syslog $SYSLOG - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then - # Add color to logging output - setup_colorized_logging $CONF DEFAULT tenant user - fi -} - -# create_trove_accounts() - Set up common required trove accounts - -# Tenant User Roles -# ------------------------------------------------------------------ -# service trove admin # if enabled - -function create_trove_accounts { - # Trove - SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - SERVICE_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") - - if [[ "$ENABLED_SERVICES" =~ "trove" ]]; then - TROVE_USER=$(openstack user create \ - trove \ - --password "$SERVICE_PASSWORD" \ - --project $SERVICE_TENANT \ - --email trove@example.com \ - | grep " id " | get_field 2) - openstack role add \ - $SERVICE_ROLE \ - --project $SERVICE_TENANT \ - --user $TROVE_USER - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - TROVE_SERVICE=$(openstack service create \ - trove \ - --type=database \ - --description="Trove Service" \ - | grep " id " | get_field 2) - openstack endpoint create \ - $TROVE_SERVICE \ - --region RegionOne \ - --publicurl "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" \ - --adminurl "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" \ - --internalurl "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" - fi - fi -} - -# stack.sh entry points -# --------------------- - -# cleanup_trove() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_trove { - #Clean up dirs - rm -fr $TROVE_AUTH_CACHE_DIR/* - rm -fr $TROVE_CONF_DIR/* -} - -# configure_troveclient() - Set config files, create data dirs, etc -function configure_troveclient { - setup_develop $TROVECLIENT_DIR -} - -# configure_trove() - Set config files, create data dirs, etc -function configure_trove { - setup_develop $TROVE_DIR - - # Create the trove conf dir and cache dirs if they don't exist - sudo mkdir -p ${TROVE_CONF_DIR} - sudo mkdir -p ${TROVE_AUTH_CACHE_DIR} - sudo chown -R $STACK_USER: ${TROVE_CONF_DIR} - sudo chown -R $STACK_USER: ${TROVE_AUTH_CACHE_DIR} - - # Copy api-paste file over to the trove conf dir and configure it - cp $TROVE_LOCAL_CONF_DIR/api-paste.ini $TROVE_CONF_DIR/api-paste.ini - TROVE_API_PASTE_INI=$TROVE_CONF_DIR/api-paste.ini - iniset $TROVE_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST - iniset $TROVE_API_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT - iniset $TROVE_API_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset $TROVE_API_PASTE_INI filter:authtoken cafile $KEYSTONE_SSL_CA - iniset $TROVE_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $TROVE_API_PASTE_INI filter:authtoken admin_user trove - iniset $TROVE_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD - iniset $TROVE_API_PASTE_INI filter:authtoken signing_dir $TROVE_AUTH_CACHE_DIR - - # (Re)create trove conf files - rm -f $TROVE_CONF_DIR/trove.conf - rm -f $TROVE_CONF_DIR/trove-taskmanager.conf - rm -f $TROVE_CONF_DIR/trove-conductor.conf - - iniset $TROVE_CONF_DIR/trove.conf DEFAULT rabbit_password $RABBIT_PASSWORD - iniset $TROVE_CONF_DIR/trove.conf DEFAULT sql_connection `database_connection_url trove` - iniset $TROVE_CONF_DIR/trove.conf DEFAULT add_addresses True - iniset $TROVE_CONF_DIR/trove.conf DEFAULT nova_compute_url $NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2 - iniset $TROVE_CONF_DIR/trove.conf DEFAULT cinder_url $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1 - iniset $TROVE_CONF_DIR/trove.conf DEFAULT swift_url http://$SERVICE_HOST:8080/v1/AUTH_ - - iniset $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample DEFAULT rabbit_password $RABBIT_PASSWORD - sed -i "s/localhost/$TROVE_HOST_GATEWAY/g" $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample - - setup_trove_logging $TROVE_CONF_DIR/trove.conf - setup_trove_logging $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample - - # (Re)create trove taskmanager conf file if needed - if is_service_enabled tr-tmgr; then - TROVE_AUTH_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT//v$IDENTITY_API_VERSION - - iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT rabbit_password $RABBIT_PASSWORD - iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT sql_connection `database_connection_url trove` - iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT taskmanager_manager trove.taskmanager.manager.Manager - iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_proxy_admin_user radmin - iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_proxy_admin_tenant_name trove - iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS - iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_compute_url $NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2 - iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT cinder_url $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1 - iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT swift_url http://$SERVICE_HOST:8080/v1/AUTH_ - iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT - setup_trove_logging $TROVE_CONF_DIR/trove-taskmanager.conf - fi - - # (Re)create trove conductor conf file if needed - if is_service_enabled tr-cond; then - iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT rabbit_password $RABBIT_PASSWORD - iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT sql_connection `database_connection_url trove` - iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT nova_proxy_admin_user radmin - iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT nova_proxy_admin_tenant_name trove - iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS - iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT - iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT control_exchange trove - setup_trove_logging $TROVE_CONF_DIR/trove-conductor.conf - fi -} - -# install_troveclient() - Collect source and prepare -function install_troveclient { - git_clone $TROVECLIENT_REPO $TROVECLIENT_DIR $TROVECLIENT_BRANCH -} - -# install_trove() - Collect source and prepare -function install_trove { - git_clone $TROVE_REPO $TROVE_DIR $TROVE_BRANCH -} - -# init_trove() - Initializes Trove Database as a Service -function init_trove { - #(Re)Create trove db - recreate_database trove utf8 - - #Initialize the trove database - $TROVE_BIN_DIR/trove-manage db_sync -} - -# start_trove() - Start running processes, including screen -function start_trove { - screen_it tr-api "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-api --config-file=$TROVE_CONF_DIR/trove.conf --debug 2>&1" - screen_it tr-tmgr "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-taskmanager --config-file=$TROVE_CONF_DIR/trove-taskmanager.conf --debug 2>&1" - screen_it tr-cond "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-conductor --config-file=$TROVE_CONF_DIR/trove-conductor.conf --debug 2>&1" -} - -# stop_trove() - Stop running processes -function stop_trove { - # Kill the trove screen windows - for serv in tr-api tr-tmgr tr-cond; do - screen_stop $serv - done -} - -# Restore xtrace -$XTRACE - -# Tell emacs to use shell-script-mode -## Local variables: -## mode: shell-script -## End: diff --git a/openrc b/openrc index fc066ad82c..e800abeb3d 100644 --- a/openrc +++ b/openrc @@ -1,20 +1,17 @@ #!/usr/bin/env bash # -# source openrc [username] [tenantname] +# source openrc [username] [projectname] # -# Configure a set of credentials for $TENANT/$USERNAME: -# Set OS_TENANT_NAME to override the default tenant 'demo' +# Configure a set of credentials for $PROJECT/$USERNAME: +# Set OS_PROJECT_NAME to override the default project 'demo' # Set OS_USERNAME to override the default user name 'demo' # Set ADMIN_PASSWORD to set the password for 'admin' and 'demo' -# NOTE: support for the old NOVA_* novaclient environment variables has -# been removed. - if [[ -n "$1" ]]; then OS_USERNAME=$1 fi if [[ -n "$2" ]]; then - OS_TENANT_NAME=$2 + OS_PROJECT_NAME=$2 fi # Find the other rc files @@ -29,61 +26,49 @@ source $RC_DIR/stackrc # Load the last env variables if available if [[ -r $RC_DIR/.stackenv ]]; then source $RC_DIR/.stackenv + export OS_CACERT fi # Get some necessary configuration source $RC_DIR/lib/tls -# The introduction of Keystone to the OpenStack ecosystem has standardized the -# term **tenant** as the entity that owns resources. In some places references -# still exist to the original Nova term **project** for this use. Also, -# **tenant_name** is preferred to **tenant_id**. -export OS_TENANT_NAME=${OS_TENANT_NAME:-demo} - -# In addition to the owning entity (tenant), nova stores the entity performing -# the action as the **user**. +# Minimal configuration +export OS_AUTH_TYPE=password +export OS_PROJECT_NAME=${OS_PROJECT_NAME:-demo} export OS_USERNAME=${OS_USERNAME:-demo} +export OS_PASSWORD=${ADMIN_PASSWORD:-secret} +export OS_REGION_NAME=${REGION_NAME:-RegionOne} + +# Set the host API endpoint. This will default to HOST_IP if SERVICE_IP_VERSION +# is 4, else HOST_IPV6 if it's 6. SERVICE_HOST may also be used to specify the +# endpoint, which is convenient for some localrc configurations. Additionally, +# some exercises call Glance directly. On a single-node installation, Glance +# should be listening on a local IP address, depending on the setting of +# SERVICE_IP_VERSION. If its running elsewhere, it can be set here. +if [[ $SERVICE_IP_VERSION == 6 ]]; then + HOST_IPV6=${HOST_IPV6:-::1} + SERVICE_HOST=${SERVICE_HOST:-[$HOST_IPV6]} + GLANCE_HOST=${GLANCE_HOST:-[$HOST_IPV6]} +else + HOST_IP=${HOST_IP:-127.0.0.1} + SERVICE_HOST=${SERVICE_HOST:-$HOST_IP} + GLANCE_HOST=${GLANCE_HOST:-$HOST_IP} +fi -# With Keystone you pass the keystone password instead of an api key. -# Recent versions of novaclient use OS_PASSWORD instead of NOVA_API_KEYs -# or NOVA_PASSWORD. -export OS_PASSWORD=${ADMIN_PASSWORD:-secrete} - -# Don't put the key into a keyring by default. Testing for development is much -# easier with this off. -export OS_NO_CACHE=${OS_NO_CACHE:-1} - -# Set api HOST_IP endpoint. SERVICE_HOST may also be used to specify the endpoint, -# which is convenient for some localrc configurations. -HOST_IP=${HOST_IP:-127.0.0.1} -SERVICE_HOST=${SERVICE_HOST:-$HOST_IP} -SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http} -KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-$SERVICE_PROTOCOL} - -# Some exercises call glance directly. On a single-node installation, Glance -# should be listening on HOST_IP. If its running elsewhere, it can be set here -GLANCE_HOST=${GLANCE_HOST:-$HOST_IP} - -# Identity API version -export OS_IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-2.0} +# If you don't have a working .stackenv, this is the backup position +KEYSTONE_BACKUP=$SERVICE_PROTOCOL://$SERVICE_HOST:5000 +KEYSTONE_SERVICE_URI=${KEYSTONE_SERVICE_URI:-$KEYSTONE_BACKUP} -# Authenticating against an OpenStack cloud using Keystone returns a **Token** -# and **Service Catalog**. The catalog contains the endpoints for all services -# the user/tenant has access to - including nova, glance, keystone, swift, ... -# We currently recommend using the 2.0 *identity api*. -# -export OS_AUTH_URL=$KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:5000/v${OS_IDENTITY_API_VERSION} +export OS_AUTH_URL=${OS_AUTH_URL:-$KEYSTONE_SERVICE_URI} -# Set the pointer to our CA certificate chain. Harmless if TLS is not used. -export OS_CACERT=${OS_CACERT:-$INT_CA_DIR/ca-chain.pem} +export OS_USER_DOMAIN_ID=${OS_USER_DOMAIN_ID:-"default"} +export OS_PROJECT_DOMAIN_ID=${OS_PROJECT_DOMAIN_ID:-"default"} -# Currently novaclient needs you to specify the *compute api* version. This -# needs to match the config of your catalog returned by Keystone. -export NOVA_VERSION=${NOVA_VERSION:-1.1} -# In the future this will change names: -export COMPUTE_API_VERSION=${COMPUTE_API_VERSION:-$NOVA_VERSION} - -# Currently cinderclient needs you to specify the *volume api* version. This -# needs to match the config of your catalog returned by Keystone. -export CINDER_VERSION=${CINDER_VERSION:-2} -export OS_VOLUME_API_VERSION=${OS_VOLUME_API_VERSION:-$CINDER_VERSION} +# Set OS_CACERT to a default CA certificate chain if it exists. +if [[ ! -v OS_CACERT ]] ; then + DEFAULT_OS_CACERT=$INT_CA_DIR/ca-chain.pem + # If the file does not exist, this may confuse preflight sanity checks + if [ -e $DEFAULT_OS_CACERT ] ; then + export OS_CACERT=$DEFAULT_OS_CACERT + fi +fi diff --git a/playbooks/devstack.yaml b/playbooks/devstack.yaml new file mode 100644 index 0000000000..d0906380ab --- /dev/null +++ b/playbooks/devstack.yaml @@ -0,0 +1,7 @@ +- hosts: all + # This is the default strategy, however since orchestrate-devstack requires + # "linear", it is safer to enforce it in case this is running in an + # environment configured with a different default strategy. + strategy: linear + roles: + - orchestrate-devstack diff --git a/playbooks/post.yaml b/playbooks/post.yaml new file mode 100644 index 0000000000..0047d78ea5 --- /dev/null +++ b/playbooks/post.yaml @@ -0,0 +1,41 @@ +- hosts: all + become: True + vars: + devstack_log_dir: "{{ devstack_base_dir|default('/opt/stack') }}/logs/" + devstack_conf_dir: "{{ devstack_base_dir|default('/opt/stack') }}/devstack/" + devstack_full_log: "{{ devstack_early_log|default('/opt/stack/logs/devstack-early.txt') }}" + tasks: + # NOTE(andreaf) If the tempest service is enabled, a tempest.log is + # generated as part of lib/tempest, as a result of verify_tempest_config + - name: Check if a tempest log exits + stat: + path: "{{ devstack_conf_dir }}/tempest.log" + register: tempest_log + - name: Link post-devstack tempest.log + file: + src: "{{ devstack_conf_dir }}/tempest.log" + dest: "{{ stage_dir }}/verify_tempest_conf.log" + state: hard + when: tempest_log.stat.exists + - name: Capture most recent qemu crash dump, if any + shell: + executable: /bin/bash + cmd: | + coredumpctl -o {{ devstack_log_dir }}/qemu.coredump dump /usr/bin/qemu-system-x86_64 + ignore_errors: yes + roles: + - export-devstack-journal + - apache-logs-conf + # This should run as early as possible to make sure we don't skew + # the post-tempest results with other activities. + - capture-performance-data + - devstack-project-conf + # capture-system-logs should be the last role before stage-output + - capture-system-logs + - role: stage-output + # NOTE(andreaf) We need fetch-devstack-log-dir only as long as the base job + # starts pulling logs for us from {{ ansible_user_dir }}/logs. + # Meanwhile we already store things in ansible_user_dir and use + # fetch-devstack-log-dir setting devstack_base_dir + - role: fetch-devstack-log-dir + devstack_base_dir: "{{ ansible_user_dir }}" diff --git a/playbooks/pre.yaml b/playbooks/pre.yaml new file mode 100644 index 0000000000..68cb1d8c7a --- /dev/null +++ b/playbooks/pre.yaml @@ -0,0 +1,37 @@ +- hosts: all + pre_tasks: + - name: Fix the permissions of the zuul home directory + # Make sure that the zuul home can be traversed, + # so that all users can access the sources placed there. + # Some distributions create it with 700 by default. + file: + path: "{{ ansible_user_dir }}" + mode: a+x + - name: Gather minimum local MTU + set_fact: + local_mtu: > + {% set mtus = [] -%} + {% for interface in ansible_interfaces -%} + {% set interface_variable = 'ansible_' + interface -%} + {% if interface_variable in hostvars[inventory_hostname] -%} + {% set _ = mtus.append(hostvars[inventory_hostname][interface_variable]['mtu']|int) -%} + {% endif -%} + {% endfor -%} + {{- mtus|min -}} + - name: Calculate external_bridge_mtu + # 30 bytes is overhead for vxlan (which is greater than GRE + # allowing us to use either overlay option with this MTU. + # 40 bytes is overhead for IPv6, which will also support an IPv4 overlay. + # TODO(andreaf) This should work, but it may have to be reconcilied with + # the MTU setting used by the multinode setup roles in multinode pre.yaml + set_fact: + external_bridge_mtu: "{{ local_mtu | int - 30 - 40 }}" + roles: + - configure-swap + - setup-stack-user + - setup-tempest-user + - setup-devstack-source-dirs + - setup-devstack-log-dir + - setup-devstack-cache + - start-fresh-logging + - write-devstack-local-conf diff --git a/playbooks/tox/post.yaml b/playbooks/tox/post.yaml new file mode 100644 index 0000000000..7f0cb19824 --- /dev/null +++ b/playbooks/tox/post.yaml @@ -0,0 +1,4 @@ +- hosts: all + roles: + - fetch-tox-output + - fetch-subunit-output diff --git a/playbooks/tox/pre.yaml b/playbooks/tox/pre.yaml new file mode 100644 index 0000000000..68d5254251 --- /dev/null +++ b/playbooks/tox/pre.yaml @@ -0,0 +1,14 @@ +- hosts: all + roles: + # Run bindep and test-setup after devstack so that they won't interfere + - role: bindep + bindep_profile: test + bindep_dir: "{{ zuul_work_dir }}" + - test-setup + # NOTE(gmann): Pinning tox<4.0.0 for stable/zed and lower. Tox 4.0.0 + # released after zed was released and has some incompatible changes + # and it is ok not to fix the issues caused by tox 4.0.0 in stable + # beanches jobs. We can continue testing the stable/zed and lower + # branches with tox<4.0.0 + - role: ensure-tox + ensure_tox_version: "<4" diff --git a/playbooks/tox/run-both.yaml b/playbooks/tox/run-both.yaml new file mode 100644 index 0000000000..e4043d8231 --- /dev/null +++ b/playbooks/tox/run-both.yaml @@ -0,0 +1,11 @@ +- hosts: all + roles: + - run-devstack + # Run bindep and test-setup after devstack so that they won't interfere + - role: bindep + bindep_profile: test + bindep_dir: "{{ zuul_work_dir }}" + - test-setup + - ensure-tox + - get-devstack-os-environment + - tox diff --git a/playbooks/tox/run.yaml b/playbooks/tox/run.yaml new file mode 100644 index 0000000000..0d065c6ca2 --- /dev/null +++ b/playbooks/tox/run.yaml @@ -0,0 +1,4 @@ +- hosts: all + roles: + - get-devstack-os-environment + - tox diff --git a/playbooks/unit-tests/pre.yaml b/playbooks/unit-tests/pre.yaml new file mode 100644 index 0000000000..cfa1676378 --- /dev/null +++ b/playbooks/unit-tests/pre.yaml @@ -0,0 +1,13 @@ +- hosts: all + + tasks: + + - name: Install prerequisites + shell: + chdir: '{{ zuul.project.src_dir }}' + executable: /bin/bash + cmd: | + set -e + set -x + echo "IPV4_ADDRS_SAFE_TO_USE=10.1.0.0/20" >> localrc + ./tools/install_prereqs.sh diff --git a/playbooks/unit-tests/run.yaml b/playbooks/unit-tests/run.yaml new file mode 100644 index 0000000000..181521f072 --- /dev/null +++ b/playbooks/unit-tests/run.yaml @@ -0,0 +1,12 @@ +- hosts: all + + tasks: + + - name: Run run_tests.sh + shell: + chdir: '{{ zuul.project.src_dir }}' + executable: /bin/bash + cmd: | + set -e + set -x + ./run_tests.sh diff --git a/rejoin-stack.sh b/rejoin-stack.sh deleted file mode 100755 index 30b7bab1cc..0000000000 --- a/rejoin-stack.sh +++ /dev/null @@ -1,24 +0,0 @@ -#! /usr/bin/env bash - -# This script rejoins an existing screen, or re-creates a -# screen session from a previous run of stack.sh. - -TOP_DIR=`dirname $0` - -# Import common functions in case the localrc (loaded via stackrc) -# uses them. -source $TOP_DIR/functions - -source $TOP_DIR/stackrc - -# if screenrc exists, run screen -if [[ -e $TOP_DIR/stack-screenrc ]]; then - if screen -ls | egrep -q "[0-9].stack"; then - echo "Attaching to already started screen session.." - exec screen -r stack - fi - exec screen -c $TOP_DIR/stack-screenrc -fi - -echo "Couldn't find $TOP_DIR/stack-screenrc file; have you run stack.sh yet?" -exit 1 diff --git a/roles/apache-logs-conf/README.rst b/roles/apache-logs-conf/README.rst new file mode 100644 index 0000000000..eccee403a5 --- /dev/null +++ b/roles/apache-logs-conf/README.rst @@ -0,0 +1,12 @@ +Prepare apache configs and logs for staging + +Make sure apache config files and log files are available in a linux flavor +independent location. Note that this relies on hard links, to the staging +directory must be in the same partition where the logs and configs are. + +**Role Variables** + +.. zuul:rolevar:: stage_dir + :default: {{ ansible_user_dir }} + + The base stage directory. diff --git a/roles/apache-logs-conf/defaults/main.yaml b/roles/apache-logs-conf/defaults/main.yaml new file mode 100644 index 0000000000..1fb04fedc8 --- /dev/null +++ b/roles/apache-logs-conf/defaults/main.yaml @@ -0,0 +1,2 @@ +devstack_base_dir: /opt/stack +stage_dir: "{{ ansible_user_dir }}" diff --git a/roles/apache-logs-conf/tasks/main.yaml b/roles/apache-logs-conf/tasks/main.yaml new file mode 100644 index 0000000000..6b7ea37857 --- /dev/null +++ b/roles/apache-logs-conf/tasks/main.yaml @@ -0,0 +1,90 @@ +- name: Ensure {{ stage_dir }}/apache exists + file: + path: "{{ stage_dir }}/apache" + state: directory + +- name: Link apache logs on Debian/SuSE + block: + - name: Find logs + find: + path: "/var/log/apache2" + file_type: any + register: debian_suse_apache_logs + + - name: Dereference files + stat: + path: "{{ item.path }}" + with_items: "{{ debian_suse_apache_logs.files }}" + register: debian_suse_apache_deref_logs + + - name: Create hard links + file: + src: "{{ item.stat.lnk_source | default(item.stat.path) }}" + dest: "{{ stage_dir }}/apache/{{ item.stat.path | basename }}" + state: hard + with_items: "{{ debian_suse_apache_deref_logs.results }}" + when: + - item.stat.isreg or item.stat.islnk + when: ansible_os_family in ('Debian', 'Suse') + no_log: true + +- name: Link apache logs on RedHat + block: + - name: Find logs + find: + path: "/var/log/httpd" + file_type: any + register: redhat_apache_logs + + - name: Dereference files + stat: + path: "{{ item.path }}" + with_items: "{{ redhat_apache_logs.files }}" + register: redhat_apache_deref_logs + + - name: Create hard links + file: + src: "{{ item.stat.lnk_source | default(item.stat.path) }}" + dest: "{{ stage_dir }}/apache/{{ item.stat.path | basename }}" + state: hard + with_items: "{{ redhat_apache_deref_logs.results }}" + when: + - item.stat.isreg or item.stat.islnk + when: ansible_os_family == 'RedHat' + no_log: true + +- name: Ensure {{ stage_dir }}/apache_config apache_config exists + file: + path: "{{ stage_dir }}/apache_config" + state: directory + +- name: Define config paths + set_fact: + apache_config_paths: + 'Debian': '/etc/apache2/sites-enabled/' + 'Suse': '/etc/apache2/conf.d/' + 'RedHat': '/etc/httpd/conf.d/' + 'openEuler': '/etc/httpd/conf.d/' + +- name: Discover configurations + find: + path: "{{ apache_config_paths[ansible_os_family] }}" + file_type: any + register: apache_configs + no_log: true + +- name: Dereference configurations + stat: + path: "{{ item.path }}" + with_items: "{{ apache_configs.files }}" + register: apache_configs_deref + no_log: true + +- name: Link configurations + file: + src: "{{ item.stat.lnk_source | default(item.stat.path) }}" + dest: "{{ stage_dir }}/apache_config/{{ item.stat.path | basename }}" + state: hard + with_items: "{{ apache_configs_deref.results }}" + when: item.stat.isreg or item.stat.islnk + no_log: true diff --git a/roles/capture-performance-data/README.rst b/roles/capture-performance-data/README.rst new file mode 100644 index 0000000000..b7a37c223f --- /dev/null +++ b/roles/capture-performance-data/README.rst @@ -0,0 +1,25 @@ +Generate performance logs for staging + +Captures usage information from mysql, systemd, apache logs, and other +parts of the system and generates a performance.json file in the +staging directory. + +**Role Variables** + +.. zuul:rolevar:: stage_dir + :default: {{ ansible_user_dir }} + + The base stage directory + +.. zuul:rolevar:: devstack_conf_dir + :default: /opt/stack + + The base devstack destination directory + +.. zuul:rolevar:: debian_suse_apache_deref_logs + + The apache logs found in the debian/suse locations + +.. zuul:rolevar:: redhat_apache_deref_logs + + The apache logs found in the redhat locations diff --git a/roles/capture-performance-data/defaults/main.yaml b/roles/capture-performance-data/defaults/main.yaml new file mode 100644 index 0000000000..7bd79f4c4f --- /dev/null +++ b/roles/capture-performance-data/defaults/main.yaml @@ -0,0 +1,3 @@ +devstack_base_dir: /opt/stack +devstack_conf_dir: "{{ devstack_base_dir }}" +stage_dir: "{{ ansible_user_dir }}" diff --git a/roles/capture-performance-data/tasks/main.yaml b/roles/capture-performance-data/tasks/main.yaml new file mode 100644 index 0000000000..51a11b60bc --- /dev/null +++ b/roles/capture-performance-data/tasks/main.yaml @@ -0,0 +1,18 @@ +- name: Generate statistics + shell: + executable: /bin/bash + cmd: | + source {{ devstack_conf_dir }}/stackrc + source {{ devstack_conf_dir }}/inc/python + setup_devstack_virtualenv + $PYTHON {{ devstack_conf_dir }}/tools/get-stats.py \ + --db-user="$DATABASE_USER" \ + --db-pass="$DATABASE_PASSWORD" \ + --db-host="$DATABASE_HOST" \ + {{ apache_logs }} > {{ stage_dir }}/performance.json + vars: + apache_logs: >- + {% for i in debian_suse_apache_deref_logs.results | default([]) + redhat_apache_deref_logs.results | default([]) %} + --apache-log="{{ i.stat.path }}" + {% endfor %} + ignore_errors: yes diff --git a/roles/capture-system-logs/README.rst b/roles/capture-system-logs/README.rst new file mode 100644 index 0000000000..1376f63bfc --- /dev/null +++ b/roles/capture-system-logs/README.rst @@ -0,0 +1,21 @@ +Stage a number of system type logs + +Stage a number of different logs / reports: +- snapshot of iptables +- disk space available +- pip[2|3] freeze +- installed packages (dpkg/rpm) +- ceph, openswitch, gluster +- coredumps +- dns resolver +- listen53 +- services +- unbound.log +- deprecation messages + +**Role Variables** + +.. zuul:rolevar:: stage_dir + :default: {{ ansible_user_dir }} + + The base stage directory. diff --git a/roles/capture-system-logs/defaults/main.yaml b/roles/capture-system-logs/defaults/main.yaml new file mode 100644 index 0000000000..fea05c8146 --- /dev/null +++ b/roles/capture-system-logs/defaults/main.yaml @@ -0,0 +1 @@ +devstack_base_dir: /opt/stack diff --git a/roles/capture-system-logs/tasks/main.yaml b/roles/capture-system-logs/tasks/main.yaml new file mode 100644 index 0000000000..4b5ec4836b --- /dev/null +++ b/roles/capture-system-logs/tasks/main.yaml @@ -0,0 +1,59 @@ +# TODO(andreaf) Make this into proper Ansible +- name: Stage various logs and reports + shell: + executable: /bin/bash + cmd: | + sudo iptables-save > {{ stage_dir }}/iptables.txt + + # NOTE(sfernand): Run 'df' with a 60s timeout to prevent hangs from + # stale NFS mounts. + timeout -s 9 60s df -h > {{ stage_dir }}/df.txt || true + # If 'df' times out, the mount output helps debug which NFS share + # is unresponsive. + mount > {{ stage_dir }}/mount.txt + + for py_ver in 2 3; do + if [[ `which python${py_ver}` ]]; then + python${py_ver} -m pip freeze > {{ stage_dir }}/pip${py_ver}-freeze.txt + fi + done + + if [ `command -v dpkg` ]; then + dpkg -l> {{ stage_dir }}/dpkg-l.txt + fi + if [ `command -v rpm` ]; then + rpm -qa | sort > {{ stage_dir }}/rpm-qa.txt + fi + + # Services status + sudo systemctl status --all > services.txt 2>/dev/null + + # NOTE(kchamart) The 'audit.log' can be useful in cases when QEMU + # failed to start due to denials from SELinux — useful for CentOS + # and Fedora machines. For Ubuntu (which runs AppArmor), DevStack + # already captures the contents of /var/log/kern.log (via + # `journalctl -t kernel` redirected into syslog.txt.gz), which + # contains AppArmor-related messages. + if [ -f /var/log/audit/audit.log ] ; then + sudo cp /var/log/audit/audit.log {{stage_dir }}/audit.log && + chmod +r {{ stage_dir }}/audit.log; + fi + + # gzip and save any coredumps in /var/core + if [ -d /var/core ]; then + sudo gzip -r /var/core + sudo cp -r /var/core {{ stage_dir }}/ + fi + + sudo ss -lntup | grep ':53' > {{ stage_dir }}/listen53.txt + + # NOTE(andreaf) Service logs are already in logs/ thanks for the + # export-devstack-journal log. Apache logs are under apache/ thans to the + # apache-logs-conf role. + grep -i deprecat {{ stage_dir }}/logs/*.txt {{ stage_dir }}/apache/*.log | \ + sed -r 's/[0-9]{1,2}\:[0-9]{1,2}\:[0-9]{1,2}\.[0-9]{1,3}/ /g' | \ + sed -r 's/[0-9]{1,2}\:[0-9]{1,2}\:[0-9]{1,2}/ /g' | \ + sed -r 's/[0-9]{1,4}-[0-9]{1,2}-[0-9]{1,4}/ /g' | + sed -r 's/\[.*\]/ /g' | \ + sed -r 's/\s[0-9]+\s/ /g' | \ + awk '{if ($0 in seen) {seen[$0]++} else {out[++n]=$0;seen[$0]=1}} END { for (i=1; i<=n; i++) print seen[out[i]]" :: " out[i] }' > {{ stage_dir }}/deprecations.log diff --git a/roles/devstack-ipv6-only-deployments-verification/README.rst b/roles/devstack-ipv6-only-deployments-verification/README.rst new file mode 100644 index 0000000000..3bddf5ea60 --- /dev/null +++ b/roles/devstack-ipv6-only-deployments-verification/README.rst @@ -0,0 +1,16 @@ +Verify all addresses in IPv6-only deployments + +This role needs to be invoked from a playbook that +runs tests. This role verifies the IPv6 settings on the +devstack side and that devstack deploys with all addresses +being IPv6. This role is invoked before tests are run so that +if there is any missing IPv6 setting, deployments can fail +the job early. + + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. diff --git a/roles/devstack-ipv6-only-deployments-verification/defaults/main.yaml b/roles/devstack-ipv6-only-deployments-verification/defaults/main.yaml new file mode 100644 index 0000000000..fea05c8146 --- /dev/null +++ b/roles/devstack-ipv6-only-deployments-verification/defaults/main.yaml @@ -0,0 +1 @@ +devstack_base_dir: /opt/stack diff --git a/roles/devstack-ipv6-only-deployments-verification/tasks/main.yaml b/roles/devstack-ipv6-only-deployments-verification/tasks/main.yaml new file mode 100644 index 0000000000..59d3b79bc1 --- /dev/null +++ b/roles/devstack-ipv6-only-deployments-verification/tasks/main.yaml @@ -0,0 +1,4 @@ +- name: Verify the ipv6-only deployments + become: true + become_user: stack + shell: "{{ devstack_base_dir }}/devstack/tools/verify-ipv6-only-deployments.sh" diff --git a/roles/devstack-project-conf/README.rst b/roles/devstack-project-conf/README.rst new file mode 100644 index 0000000000..3f2d4c9697 --- /dev/null +++ b/roles/devstack-project-conf/README.rst @@ -0,0 +1,11 @@ +Prepare OpenStack project configurations for staging + +Prepare all relevant config files for staging. +This is helpful to avoid staging the entire /etc. + +**Role Variables** + +.. zuul:rolevar:: stage_dir + :default: {{ ansible_user_dir }} + + The base stage directory. diff --git a/roles/devstack-project-conf/defaults/main.yaml b/roles/devstack-project-conf/defaults/main.yaml new file mode 100644 index 0000000000..f8fb8deac9 --- /dev/null +++ b/roles/devstack-project-conf/defaults/main.yaml @@ -0,0 +1 @@ +stage_dir: "{{ ansible_user_dir }}" diff --git a/roles/devstack-project-conf/tasks/main.yaml b/roles/devstack-project-conf/tasks/main.yaml new file mode 100644 index 0000000000..917cdbc370 --- /dev/null +++ b/roles/devstack-project-conf/tasks/main.yaml @@ -0,0 +1,25 @@ +- name: Ensure {{ stage_dir }}/etc exists + file: + path: "{{ stage_dir }}/etc" + state: directory + +- name: Check which projects have a config folder + stat: + path: "/etc/{{ item.value.short_name }}" + with_dict: "{{ zuul.projects }}" + register: project_configs + no_log: true + +- name: Copy configuration files + command: cp -pRL {{ item.stat.path }} {{ stage_dir }}/etc/{{ item.item.value.short_name }} + when: item.stat.exists + with_items: "{{ project_configs.results }}" + +- name: Check if openstack has a config folder + stat: + path: "/etc/openstack" + register: openstack_configs + +- name: Copy configuration files + command: cp -pRL /etc/openstack {{ stage_dir }}/etc/ + when: openstack_configs.stat.exists diff --git a/roles/export-devstack-journal/README.rst b/roles/export-devstack-journal/README.rst new file mode 100644 index 0000000000..9e3c919627 --- /dev/null +++ b/roles/export-devstack-journal/README.rst @@ -0,0 +1,25 @@ +Export journal files from devstack services + +This performs a number of logging collection services + +* Export the systemd journal in native format +* For every devstack service, export logs to text in a file named + ``screen-*`` to maintain legacy compatability when devstack services + used to run in a screen session and were logged separately. +* Export a syslog-style file with kernel and sudo messages for legacy + compatability. + +Writes the output to the ``logs/`` subdirectory of ``stage_dir``. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. This is used to obtain the + ``log-start-timestamp.txt``, used to filter the systemd journal. + +.. zuul:rolevar:: stage_dir + :default: {{ ansible_user_dir }} + + The base stage directory. diff --git a/roles/export-devstack-journal/defaults/main.yaml b/roles/export-devstack-journal/defaults/main.yaml new file mode 100644 index 0000000000..1fb04fedc8 --- /dev/null +++ b/roles/export-devstack-journal/defaults/main.yaml @@ -0,0 +1,2 @@ +devstack_base_dir: /opt/stack +stage_dir: "{{ ansible_user_dir }}" diff --git a/roles/export-devstack-journal/tasks/main.yaml b/roles/export-devstack-journal/tasks/main.yaml new file mode 100644 index 0000000000..db38b10a44 --- /dev/null +++ b/roles/export-devstack-journal/tasks/main.yaml @@ -0,0 +1,54 @@ +# NOTE(andreaf) This bypasses the stage-output role +- name: Ensure {{ stage_dir }}/logs exists + become: true + file: + path: "{{ stage_dir }}/logs" + state: directory + owner: "{{ ansible_user }}" + +- name: Export legacy stack screen log files + become: true + shell: + cmd: | + u="" + name="" + for u in $(systemctl list-unit-files | grep devstack | awk '{print $1}'); do + name=$(echo $u | sed 's/devstack@/screen-/' | sed 's/\.service//') + journalctl -o short-precise --unit $u > {{ stage_dir }}/logs/$name.txt + done + +- name: Export legacy syslog.txt + become: true + shell: + # The journal contains everything running under systemd, we'll + # build an old school version of the syslog with just the + # kernel and sudo messages. + cmd: | + journalctl \ + -t kernel \ + -t sudo \ + --no-pager \ + --since="$(cat {{ devstack_base_dir }}/log-start-timestamp.txt)" \ + > {{ stage_dir }}/logs/syslog.txt + +# TODO: convert this to ansible +# - make a list of the above units +# - iterate the list here +- name: Export journal + become: true + shell: + # Export the journal in export format to make it downloadable + # for later searching. It can then be rewritten to a journal native + # format locally using systemd-journal-remote. This makes a class of + # debugging much easier. We don't do the native conversion here as + # some distros do not package that tooling. + cmd: | + journalctl -o export \ + --since="$(cat {{ devstack_base_dir }}/log-start-timestamp.txt)" \ + | gzip > {{ stage_dir }}/logs/devstack.journal.gz + +- name: Save journal README + become: true + template: + src: devstack.journal.README.txt.j2 + dest: '{{ stage_dir }}/logs/devstack.journal.README.txt' diff --git a/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 b/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 new file mode 100644 index 0000000000..30519f63d7 --- /dev/null +++ b/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 @@ -0,0 +1,33 @@ +Devstack systemd journal +======================== + +The devstack.journal file is a copy of the systemd journal during the +devstack run. + +To use it, you will need to convert it so journalctl can read it +locally. After downloading the file: + + $ /lib/systemd/systemd-journal-remote <(zcat ./devstack.journal.gz) -o output.journal + +Note this binary is not in the regular path. On Debian/Ubuntu +platforms, you will need to have the "systemd-journal-remote" package +installed. + +It should result in something like: + + Finishing after writing entries + +You can then use journalctl to examine this file. For example, to see +all devstack services try: + + $ journalctl --file ./output.journal -u 'devstack@*' + +To see just cinder API server logs restrict the match with + + $ journalctl --file ./output.journal -u 'devstack@c-api' + +There may be many types of logs available in the journal, a command like + + $ journalctl --file ./output.journal --output=json-pretty | grep "_SYSTEMD_UNIT" | sort -u + +can help you find interesting things to filter on. \ No newline at end of file diff --git a/roles/fetch-devstack-log-dir/README.rst b/roles/fetch-devstack-log-dir/README.rst new file mode 100644 index 0000000000..360a2e3dd0 --- /dev/null +++ b/roles/fetch-devstack-log-dir/README.rst @@ -0,0 +1,10 @@ +Fetch content from the devstack log directory + +Copy logs from every host back to the zuul executor. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. diff --git a/roles/fetch-devstack-log-dir/defaults/main.yaml b/roles/fetch-devstack-log-dir/defaults/main.yaml new file mode 100644 index 0000000000..fea05c8146 --- /dev/null +++ b/roles/fetch-devstack-log-dir/defaults/main.yaml @@ -0,0 +1 @@ +devstack_base_dir: /opt/stack diff --git a/roles/fetch-devstack-log-dir/tasks/main.yaml b/roles/fetch-devstack-log-dir/tasks/main.yaml new file mode 100644 index 0000000000..276c4e0eb5 --- /dev/null +++ b/roles/fetch-devstack-log-dir/tasks/main.yaml @@ -0,0 +1,10 @@ +# as the user in the guest may not exist on the executor +# we do not preserve the group or owner of the copied logs. + +- name: Collect devstack logs + synchronize: + dest: "{{ zuul.executor.log_root }}/{{ inventory_hostname }}" + mode: pull + src: "{{ devstack_base_dir }}/logs" + group: no + owner: no diff --git a/roles/get-devstack-os-environment/README.rst b/roles/get-devstack-os-environment/README.rst new file mode 100644 index 0000000000..68ddce8b5a --- /dev/null +++ b/roles/get-devstack-os-environment/README.rst @@ -0,0 +1,40 @@ +Reads the OS_* variables set by devstack through openrc +for the specified user and project and exports them as +the os_env_vars fact. + +**WARNING**: this role is meant to be used as porting aid +for the non-unified python-client jobs which +are already around, as those clients do not use clouds.yaml +as openstackclient does. +When those clients and their jobs are deprecated and removed, +or anyway when the new code is able to read from clouds.yaml +directly, this role should be removed as well. + + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. + +.. zuul:rolevar:: openrc_file + :default: {{ devstack_base_dir }}/devstack/openrc + + The location of the generated openrc file. + +.. zuul:rolevar:: openrc_user + :default: admin + + The user whose credentials should be retrieved. + +.. zuul:rolevar:: openrc_project + :default: admin + + The project (which openrc_user is part of) whose + access data should be retrieved. + +.. zuul:rolevar:: openrc_enable_export + :default: false + + Set it to true to export os_env_vars. diff --git a/roles/get-devstack-os-environment/defaults/main.yaml b/roles/get-devstack-os-environment/defaults/main.yaml new file mode 100644 index 0000000000..f68ea560d0 --- /dev/null +++ b/roles/get-devstack-os-environment/defaults/main.yaml @@ -0,0 +1,6 @@ +devstack_base_dir: "/opt/stack" +openrc_file: "{{ devstack_base_dir }}/devstack/openrc" +openrc_user: admin +openrc_project: admin +openrc_enable_export: false +tox_environment: {} diff --git a/roles/get-devstack-os-environment/tasks/main.yaml b/roles/get-devstack-os-environment/tasks/main.yaml new file mode 100644 index 0000000000..b2c5e93ed4 --- /dev/null +++ b/roles/get-devstack-os-environment/tasks/main.yaml @@ -0,0 +1,14 @@ +- when: openrc_enable_export + block: + - name: Extract the OS_ environment variables + shell: + cmd: | + source {{ openrc_file }} {{ openrc_user }} {{ openrc_project }} &>/dev/null + env | awk -F= 'BEGIN {print "---" } /^OS_/ { print " "$1": \""$2"\""} ' + args: + executable: "/bin/bash" + register: env_os + + - name: Append the the OS_ environment variables to tox_environment + set_fact: + tox_environment: "{{ env_os.stdout|from_yaml|default({})|combine(tox_environment) }}" diff --git a/roles/orchestrate-devstack/README.rst b/roles/orchestrate-devstack/README.rst new file mode 100644 index 0000000000..097dcea55e --- /dev/null +++ b/roles/orchestrate-devstack/README.rst @@ -0,0 +1,25 @@ +Orchestrate a devstack + +Runs devstack in a multinode scenario, with one controller node +and a group of subnodes. + +The reason for this role is so that jobs in other repository may +run devstack in their plays with no need for re-implementing the +orchestration logic. + +The "run-devstack" role is available to run devstack with no +orchestration. + +This role sets up the controller and CA first, it then pushes CA +data to sub-nodes and run devstack there. The only requirement for +this role is for the controller inventory_hostname to be "controller" +and for all sub-nodes to be defined in a group called "subnode". + +This role needs to be invoked from a playbook that uses a "linear" strategy. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. diff --git a/roles/orchestrate-devstack/defaults/main.yaml b/roles/orchestrate-devstack/defaults/main.yaml new file mode 100644 index 0000000000..fea05c8146 --- /dev/null +++ b/roles/orchestrate-devstack/defaults/main.yaml @@ -0,0 +1 @@ +devstack_base_dir: /opt/stack diff --git a/roles/orchestrate-devstack/tasks/main.yaml b/roles/orchestrate-devstack/tasks/main.yaml new file mode 100644 index 0000000000..b8ee7e35a7 --- /dev/null +++ b/roles/orchestrate-devstack/tasks/main.yaml @@ -0,0 +1,50 @@ +- name: Run devstack on the controller + include_role: + name: run-devstack + when: inventory_hostname == 'controller' + +- name: Setup devstack on sub-nodes + any_errors_fatal: true + block: + + - name: Distribute the build sshkey for the user "stack" + include_role: + name: copy-build-sshkey + vars: + copy_sshkey_target_user: 'stack' + + - name: Sync CA data to subnodes (when any) + # Only do this if the tls-proxy service is defined and enabled + include_role: + name: sync-devstack-data + when: devstack_services['tls-proxy']|default(false) + + - name: Sync controller ceph.conf and key rings to subnode + include_role: + name: sync-controller-ceph-conf-and-keys + when: devstack_plugins is defined and 'devstack-plugin-ceph' in devstack_plugins + + - name: Run devstack on the sub-nodes + include_role: + name: run-devstack + when: inventory_hostname in groups['subnode'] + + - name: Discover hosts + # Discovers compute nodes (subnodes) and maps them to cells. Only run + # on the controller node. + # NOTE(mriedem): We want to remove this if/when nova supports + # auto-registration of computes with cells, but that's not happening in + # Ocata. + # NOTE(andreaf) This is taken (NOTE included) from the discover_hosts + # function in devstack gate. Since this is now in devstack, which is + # branched, we know that the discover_hosts tool exists. + become: true + become_user: stack + shell: ./tools/discover_hosts.sh + args: + chdir: "{{ devstack_base_dir }}/devstack" + when: inventory_hostname == 'controller' + + when: + - '"controller" in hostvars' + - '"subnode" in groups' diff --git a/roles/process-stackviz/README.rst b/roles/process-stackviz/README.rst new file mode 100644 index 0000000000..a8447d2355 --- /dev/null +++ b/roles/process-stackviz/README.rst @@ -0,0 +1,22 @@ +Generate stackviz report. + +Generate stackviz report using subunit and dstat data, using +the stackviz archive embedded in test images. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. + +.. zuul:rolevar:: stage_dir + :default: "{{ ansible_user_dir }}" + + The stage directory where the input data can be found and + the output will be produced. + +.. zuul:rolevar:: zuul_work_dir + :default: {{ devstack_base_dir }}/tempest + + Directory to work in. It has to be a fully qualified path. diff --git a/roles/process-stackviz/defaults/main.yaml b/roles/process-stackviz/defaults/main.yaml new file mode 100644 index 0000000000..f3bc32b149 --- /dev/null +++ b/roles/process-stackviz/defaults/main.yaml @@ -0,0 +1,3 @@ +devstack_base_dir: /opt/stack +stage_dir: "{{ ansible_user_dir }}" +zuul_work_dir: "{{ devstack_base_dir }}/tempest" diff --git a/roles/process-stackviz/tasks/main.yaml b/roles/process-stackviz/tasks/main.yaml new file mode 100644 index 0000000000..3ba3d9c2e6 --- /dev/null +++ b/roles/process-stackviz/tasks/main.yaml @@ -0,0 +1,73 @@ +- name: Process Stackviz + block: + + - name: Devstack checks if stackviz archive exists + stat: + path: "/opt/cache/files/stackviz-latest.tar.gz" + register: stackviz_archive + + - debug: + msg: "Stackviz archive could not be found in /opt/cache/files/stackviz-latest.tar.gz" + when: not stackviz_archive.stat.exists + + - name: Check if subunit data exists + stat: + path: "{{ zuul_work_dir }}/testrepository.subunit" + register: subunit_input + + - debug: + msg: "Subunit file could not be found at {{ zuul_work_dir }}/testrepository.subunit" + when: not subunit_input.stat.exists + + - name: Install stackviz + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists + block: + - include_role: + name: ensure-pip + + - pip: + name: "file://{{ stackviz_archive.stat.path }}" + virtualenv: /tmp/stackviz + virtualenv_command: '{{ ensure_pip_virtualenv_command }}' + extra_args: -U + + - name: Deploy stackviz static html+js + command: cp -pR /tmp/stackviz/share/stackviz-html {{ stage_dir }}/stackviz + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists + + - name: Check if dstat data exists + stat: + path: "{{ devstack_base_dir }}/logs/dstat-csv.log" + register: dstat_input + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists + + - name: Run stackviz with dstat + shell: | + cat {{ subunit_input.stat.path }} | \ + /tmp/stackviz/bin/stackviz-export \ + --dstat "{{ devstack_base_dir }}/logs/dstat-csv.log" \ + --env --stdin \ + {{ stage_dir }}/stackviz/data + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists + - dstat_input.stat.exists + + - name: Run stackviz without dstat + shell: | + cat {{ subunit_input.stat.path }} | \ + /tmp/stackviz/bin/stackviz-export \ + --env --stdin \ + {{ stage_dir }}/stackviz/data + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists + - not dstat_input.stat.exists + + ignore_errors: yes diff --git a/roles/run-devstack/README.rst b/roles/run-devstack/README.rst new file mode 100644 index 0000000000..d77eb15e99 --- /dev/null +++ b/roles/run-devstack/README.rst @@ -0,0 +1,8 @@ +Run devstack + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. diff --git a/roles/run-devstack/defaults/main.yaml b/roles/run-devstack/defaults/main.yaml new file mode 100644 index 0000000000..fea05c8146 --- /dev/null +++ b/roles/run-devstack/defaults/main.yaml @@ -0,0 +1 @@ +devstack_base_dir: /opt/stack diff --git a/roles/run-devstack/tasks/main.yaml b/roles/run-devstack/tasks/main.yaml new file mode 100644 index 0000000000..f58b31d477 --- /dev/null +++ b/roles/run-devstack/tasks/main.yaml @@ -0,0 +1,11 @@ +- name: Run devstack + shell: + cmd: | + ./stack.sh 2>&1 + rc=$? + echo "*** FINISHED ***" + exit $rc + args: + chdir: "{{devstack_base_dir}}/devstack" + become: true + become_user: stack diff --git a/roles/setup-devstack-cache/README.rst b/roles/setup-devstack-cache/README.rst new file mode 100644 index 0000000000..b8938c3dea --- /dev/null +++ b/roles/setup-devstack-cache/README.rst @@ -0,0 +1,15 @@ +Set up the devstack cache directory + +If the node has a cache of devstack image files, copy it into place. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. + +.. zuul:rolevar:: devstack_cache_dir + :default: /opt/cache + + The directory with the cached files. diff --git a/roles/setup-devstack-cache/defaults/main.yaml b/roles/setup-devstack-cache/defaults/main.yaml new file mode 100644 index 0000000000..c56720b4f5 --- /dev/null +++ b/roles/setup-devstack-cache/defaults/main.yaml @@ -0,0 +1,2 @@ +devstack_base_dir: /opt/stack +devstack_cache_dir: /opt/cache diff --git a/roles/setup-devstack-cache/tasks/main.yaml b/roles/setup-devstack-cache/tasks/main.yaml new file mode 100644 index 0000000000..3adff17d5d --- /dev/null +++ b/roles/setup-devstack-cache/tasks/main.yaml @@ -0,0 +1,15 @@ +- name: Copy cached devstack files + # This uses hard links to avoid using extra space. + command: "find {{ devstack_cache_dir }}/files -mindepth 1 -maxdepth 1 -exec cp -l {} {{ devstack_base_dir }}/devstack/files/ ;" + become: true + ignore_errors: yes + +- name: Set ownership of cached files + file: + path: '{{ devstack_base_dir }}/devstack/files' + state: directory + recurse: true + owner: stack + group: stack + mode: a+r + become: yes diff --git a/roles/setup-devstack-log-dir/README.rst b/roles/setup-devstack-log-dir/README.rst new file mode 100644 index 0000000000..9d8dba3442 --- /dev/null +++ b/roles/setup-devstack-log-dir/README.rst @@ -0,0 +1,11 @@ +Set up the devstack log directory + +Create a log directory on the ephemeral disk partition to save space +on the root device. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. diff --git a/roles/setup-devstack-log-dir/defaults/main.yaml b/roles/setup-devstack-log-dir/defaults/main.yaml new file mode 100644 index 0000000000..fea05c8146 --- /dev/null +++ b/roles/setup-devstack-log-dir/defaults/main.yaml @@ -0,0 +1 @@ +devstack_base_dir: /opt/stack diff --git a/roles/setup-devstack-log-dir/tasks/main.yaml b/roles/setup-devstack-log-dir/tasks/main.yaml new file mode 100644 index 0000000000..d8e8cfe70a --- /dev/null +++ b/roles/setup-devstack-log-dir/tasks/main.yaml @@ -0,0 +1,8 @@ +- name: Create logs directory + file: + path: '{{ devstack_base_dir }}/logs' + state: directory + mode: 0755 + owner: stack + group: stack + become: yes diff --git a/roles/setup-devstack-source-dirs/README.rst b/roles/setup-devstack-source-dirs/README.rst new file mode 100644 index 0000000000..0aa048b7d2 --- /dev/null +++ b/roles/setup-devstack-source-dirs/README.rst @@ -0,0 +1,16 @@ +Set up the devstack source directories + +Ensure that the base directory exists, and then move the source repos +into it. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. + +.. zuul:rolevar:: devstack_sources_branch + :default: None + + The target branch to be setup (where available). diff --git a/roles/setup-devstack-source-dirs/defaults/main.yaml b/roles/setup-devstack-source-dirs/defaults/main.yaml new file mode 100644 index 0000000000..77a74d7b89 --- /dev/null +++ b/roles/setup-devstack-source-dirs/defaults/main.yaml @@ -0,0 +1,9 @@ +devstack_base_dir: /opt/stack +devstack_source_dirs: + - src/opendev.org/opendev + - src/opendev.org/openstack + - src/opendev.org/openstack-dev + - src/opendev.org/openstack-infra + - src/opendev.org/starlingx + - src/opendev.org/x + - src/opendev.org/zuul diff --git a/roles/setup-devstack-source-dirs/tasks/main.yaml b/roles/setup-devstack-source-dirs/tasks/main.yaml new file mode 100644 index 0000000000..cb7c6e3af8 --- /dev/null +++ b/roles/setup-devstack-source-dirs/tasks/main.yaml @@ -0,0 +1,72 @@ +- name: Find all OpenStack source repos used by this job + find: + paths: "{{ devstack_source_dirs }}" + file_type: directory + register: found_repos + +- name: Copy Zuul repos into devstack working directory + command: rsync -a {{ item.path }} {{ devstack_base_dir }} + with_items: '{{ found_repos.files }}' + become: yes + +# Github projects are github.com/username/repo (username might be a +# top-level project too), so we have to do a two-step swizzle to just +# get the full repo path (ansible's find module doesn't help with this +# :/) +- name: Find top level github projects + find: + paths: + - src/github.com + file_type: directory + register: found_github_projects + +- name: Find actual github repos + find: + paths: '{{ found_github_projects.files | map(attribute="path") | list }}' + file_type: directory + register: found_github_repos + when: found_github_projects.files + +- name: Copy github repos into devstack working directory + command: rsync -a {{ item.path }} {{ devstack_base_dir }} + with_items: '{{ found_github_repos.files }}' + become: yes + when: found_github_projects.files + +- name: Setup refspec for repos into devstack working directory + shell: + # Copied almost "as-is" from devstack-gate setup-workspace function + # but removing the dependency on functions.sh + # TODO this should be rewritten as a python module. + cmd: | + cd {{ devstack_base_dir }}/{{ item.path | basename }} + base_branch={{ devstack_sources_branch }} + if git branch -a | grep "$base_branch" > /dev/null ; then + git checkout $base_branch + elif [[ "$base_branch" == stable/* ]] || [[ "$base_branch" == unmaintained/* ]]; then + # Look for an eol tag for the stable branch. + eol_tag="${base_branch#*/}-eol" + if git tag -l |grep $eol_tag >/dev/null; then + git checkout $eol_tag + git reset --hard $eol_tag + if ! git clean -x -f -d -q ; then + sleep 1 + git clean -x -f -d -q + fi + fi + else + git checkout master + fi + args: + executable: /bin/bash + with_items: '{{ found_repos.files }}' + when: devstack_sources_branch is defined + +- name: Set ownership of repos + file: + path: '{{ devstack_base_dir }}' + state: directory + recurse: true + owner: stack + group: stack + become: yes diff --git a/roles/setup-stack-user/README.rst b/roles/setup-stack-user/README.rst new file mode 100644 index 0000000000..80c4d39eff --- /dev/null +++ b/roles/setup-stack-user/README.rst @@ -0,0 +1,16 @@ +Set up the `stack` user + +Create the stack user, set up its home directory, and allow it to +sudo. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. + +.. zuul:rolevar:: devstack_stack_home_dir + :default: {{ devstack_base_dir }} + + The home directory for the stack user. diff --git a/roles/setup-stack-user/defaults/main.yaml b/roles/setup-stack-user/defaults/main.yaml new file mode 100644 index 0000000000..6d0be666d4 --- /dev/null +++ b/roles/setup-stack-user/defaults/main.yaml @@ -0,0 +1,2 @@ +devstack_base_dir: /opt/stack +devstack_stack_home_dir: '{{ devstack_base_dir }}' diff --git a/roles/setup-stack-user/files/50_stack_sh b/roles/setup-stack-user/files/50_stack_sh new file mode 100644 index 0000000000..4c6b46bdb1 --- /dev/null +++ b/roles/setup-stack-user/files/50_stack_sh @@ -0,0 +1 @@ +stack ALL=(root) NOPASSWD:ALL diff --git a/roles/setup-stack-user/tasks/main.yaml b/roles/setup-stack-user/tasks/main.yaml new file mode 100644 index 0000000000..0fc7c2d78b --- /dev/null +++ b/roles/setup-stack-user/tasks/main.yaml @@ -0,0 +1,47 @@ +- name: Create stack group + group: + name: stack + become: yes + +# NOTE(andreaf) Create a user home_dir is not safe via +# the user module since it will fail if the containing +# folder does not exists. If the folder does exists and +# it's empty, the skeleton is setup and ownership set. +- name: Create the stack user home folder + file: + path: '{{ devstack_stack_home_dir }}' + state: directory + become: yes + +- name: Create stack user + user: + name: stack + shell: /bin/bash + home: '{{ devstack_stack_home_dir }}' + group: stack + become: yes + +- name: Set stack user home directory permissions and ownership + file: + path: '{{ devstack_stack_home_dir }}' + mode: 0755 + owner: stack + group: stack + become: yes + +- name: Copy 50_stack_sh file to /etc/sudoers.d + copy: + src: 50_stack_sh + dest: /etc/sudoers.d + mode: 0440 + owner: root + group: root + become: yes + +- name: Create .cache folder within BASE + file: + path: '{{ devstack_stack_home_dir }}/.cache' + state: directory + owner: stack + group: stack + become: yes diff --git a/roles/setup-tempest-user/README.rst b/roles/setup-tempest-user/README.rst new file mode 100644 index 0000000000..bb29c50a28 --- /dev/null +++ b/roles/setup-tempest-user/README.rst @@ -0,0 +1,10 @@ +Set up the `tempest` user + +Create the tempest user and allow it to sudo. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. diff --git a/roles/setup-tempest-user/files/51_tempest_sh b/roles/setup-tempest-user/files/51_tempest_sh new file mode 100644 index 0000000000..f88ff9f4f2 --- /dev/null +++ b/roles/setup-tempest-user/files/51_tempest_sh @@ -0,0 +1,3 @@ +tempest ALL=(root) NOPASSWD:/sbin/ip +tempest ALL=(root) NOPASSWD:/sbin/iptables +tempest ALL=(root) NOPASSWD:/usr/bin/ovsdb-client diff --git a/roles/setup-tempest-user/tasks/main.yaml b/roles/setup-tempest-user/tasks/main.yaml new file mode 100644 index 0000000000..892eaf655a --- /dev/null +++ b/roles/setup-tempest-user/tasks/main.yaml @@ -0,0 +1,20 @@ +- name: Create tempest group + group: + name: tempest + become: yes + +- name: Create tempest user + user: + name: tempest + shell: /bin/bash + group: tempest + become: yes + +- name: Copy 51_tempest_sh to /etc/sudoers.d + copy: + src: 51_tempest_sh + dest: /etc/sudoers.d + owner: root + group: root + mode: 0440 + become: yes diff --git a/roles/start-fresh-logging/README.rst b/roles/start-fresh-logging/README.rst new file mode 100644 index 0000000000..11b029e182 --- /dev/null +++ b/roles/start-fresh-logging/README.rst @@ -0,0 +1,11 @@ +Restart logging on all hosts + +Restart syslog so that the system logs only include output from the +job. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. diff --git a/roles/start-fresh-logging/defaults/main.yaml b/roles/start-fresh-logging/defaults/main.yaml new file mode 100644 index 0000000000..fea05c8146 --- /dev/null +++ b/roles/start-fresh-logging/defaults/main.yaml @@ -0,0 +1 @@ +devstack_base_dir: /opt/stack diff --git a/roles/start-fresh-logging/tasks/main.yaml b/roles/start-fresh-logging/tasks/main.yaml new file mode 100644 index 0000000000..6c7ba66de7 --- /dev/null +++ b/roles/start-fresh-logging/tasks/main.yaml @@ -0,0 +1,56 @@ +- name: Check for /bin/journalctl file + command: which journalctl + changed_when: False + failed_when: False + register: which_out + +- block: + - name: Get current date + command: date +"%Y-%m-%d %H:%M:%S" + register: date_out + + - name: Copy current date to log-start-timestamp.txt + copy: + dest: "{{ devstack_base_dir }}/log-start-timestamp.txt" + content: "{{ date_out.stdout }}" + when: which_out.rc == 0 + become: yes + +- block: + - name: Stop rsyslog + service: name=rsyslog state=stopped + + - name: Save syslog file prior to devstack run + command: mv /var/log/syslog /var/log/syslog-pre-devstack + + - name: Save kern.log file prior to devstack run + command: mv /var/log/kern.log /var/log/kern_log-pre-devstack + + - name: Recreate syslog file + file: name=/var/log/syslog state=touch + + - name: Recreate syslog file owner and group + command: chown /var/log/syslog --ref /var/log/syslog-pre-devstack + + - name: Recreate syslog file permissions + command: chmod /var/log/syslog --ref /var/log/syslog-pre-devstack + + - name: Add read permissions to all on syslog file + file: name=/var/log/syslog mode=a+r + + - name: Recreate kern.log file + file: name=/var/log/kern.log state=touch + + - name: Recreate kern.log file owner and group + command: chown /var/log/kern.log --ref /var/log/kern_log-pre-devstack + + - name: Recreate kern.log file permissions + command: chmod /var/log/kern.log --ref /var/log/kern_log-pre-devstack + + - name: Add read permissions to all on kern.log file + file: name=/var/log/kern.log mode=a+r + + - name: Start rsyslog + service: name=rsyslog state=started + when: which_out.rc == 1 + become: yes diff --git a/roles/sync-controller-ceph-conf-and-keys/README.rst b/roles/sync-controller-ceph-conf-and-keys/README.rst new file mode 100644 index 0000000000..e3d2bb42a4 --- /dev/null +++ b/roles/sync-controller-ceph-conf-and-keys/README.rst @@ -0,0 +1,3 @@ +Sync ceph config and keys between controller and subnodes + +Simply copy the contents of /etc/ceph on the controller to subnodes. diff --git a/roles/sync-controller-ceph-conf-and-keys/tasks/main.yaml b/roles/sync-controller-ceph-conf-and-keys/tasks/main.yaml new file mode 100644 index 0000000000..71ece579e6 --- /dev/null +++ b/roles/sync-controller-ceph-conf-and-keys/tasks/main.yaml @@ -0,0 +1,15 @@ +- name: Ensure /etc/ceph exists on subnode + become: true + file: + path: /etc/ceph + state: directory + +- name: Copy /etc/ceph from controller to subnode + become: true + synchronize: + owner: yes + group: yes + perms: yes + src: /etc/ceph/ + dest: /etc/ceph/ + delegate_to: controller diff --git a/roles/sync-devstack-data/README.rst b/roles/sync-devstack-data/README.rst new file mode 100644 index 0000000000..388625c893 --- /dev/null +++ b/roles/sync-devstack-data/README.rst @@ -0,0 +1,19 @@ +Sync devstack data for multinode configurations + +Sync any data files which include certificates to be used if TLS is enabled. +This role must be executed on the controller and it pushes data to all +subnodes. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. + +.. zuul:rolevar:: devstack_data_base_dir + :default: {{ devstack_base_dir }} + + The devstack base directory for data/. + Useful for example when multiple executions of devstack (i.e. grenade) + share the same data directory. diff --git a/roles/sync-devstack-data/defaults/main.yaml b/roles/sync-devstack-data/defaults/main.yaml new file mode 100644 index 0000000000..6b5017b811 --- /dev/null +++ b/roles/sync-devstack-data/defaults/main.yaml @@ -0,0 +1,2 @@ +devstack_base_dir: /opt/stack +devstack_data_base_dir: "{{ devstack_base_dir }}" diff --git a/roles/sync-devstack-data/tasks/main.yaml b/roles/sync-devstack-data/tasks/main.yaml new file mode 100644 index 0000000000..a1d37c3951 --- /dev/null +++ b/roles/sync-devstack-data/tasks/main.yaml @@ -0,0 +1,59 @@ +- name: Ensure the data folder exists + become: true + file: + path: "{{ devstack_data_base_dir }}/data" + state: directory + owner: stack + group: stack + mode: 0755 + when: 'inventory_hostname in groups["subnode"]|default([])' + +- name: Ensure the CA folder exists + become: true + file: + path: "{{ devstack_data_base_dir }}/data/CA" + state: directory + owner: stack + group: stack + mode: 0755 + when: 'inventory_hostname in groups["subnode"]|default([])' + +- name: Pull the CA certificate and folder + become: true + synchronize: + src: "{{ item }}" + dest: "{{ zuul.executor.work_root }}/{{ item | basename }}" + mode: pull + with_items: + - "{{ devstack_data_base_dir }}/data/ca-bundle.pem" + - "{{ devstack_data_base_dir }}/data/CA" + when: inventory_hostname == 'controller' + +- name: Push the CA certificate + become: true + become_user: stack + synchronize: + src: "{{ zuul.executor.work_root }}/ca-bundle.pem" + dest: "{{ devstack_data_base_dir }}/data/ca-bundle.pem" + mode: push + when: 'inventory_hostname in groups["subnode"]|default([])' + +- name: Push the CA folder + become: true + become_user: stack + synchronize: + src: "{{ zuul.executor.work_root }}/CA/" + dest: "{{ devstack_data_base_dir }}/data/" + mode: push + when: 'inventory_hostname in groups["subnode"]|default([])' + +- name: Ensure the data folder and subfolders have the correct permissions + become: true + file: + path: "{{ devstack_data_base_dir }}/data" + state: directory + owner: stack + group: stack + mode: 0755 + recurse: yes + when: 'inventory_hostname in groups["subnode"]|default([])' diff --git a/roles/write-devstack-local-conf/README.rst b/roles/write-devstack-local-conf/README.rst new file mode 100644 index 0000000000..d0a51e77c2 --- /dev/null +++ b/roles/write-devstack-local-conf/README.rst @@ -0,0 +1,99 @@ +Write the local.conf file for use by devstack + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. + +.. zuul:rolevar:: devstack_local_conf_path + :default: {{ devstack_base_dir }}/devstack/local.conf + + The path of the local.conf file. + +.. zuul:rolevar:: devstack_localrc + :type: dict + + A dictionary of variables that should be written to the localrc + section of local.conf. The values (which are strings) may contain + bash shell variables, and will be ordered so that variables used by + later entries appear first. + + As a special case, the variable ``LIBS_FROM_GIT`` will be + constructed automatically from the projects which appear in the + ``required-projects`` list defined by the job plus the project of + the change under test. To instruct devstack to install a library + from source rather than pypi, simply add that library to the job's + ``required-projects`` list. To override the + automatically-generated value, set ``LIBS_FROM_GIT`` in + ``devstack_localrc`` to the desired value. + +.. zuul:rolevar:: devstack_local_conf + :type: dict + + A complex argument consisting of nested dictionaries which combine + to form the meta-sections of the local_conf file. The top level is + a dictionary of phases, followed by dictionaries of filenames, then + sections, which finally contain key-value pairs for the INI file + entries in those sections. + + The keys in this dictionary are the devstack phases. + + .. zuul:rolevar:: [phase] + :type: dict + + The keys in this dictionary are the filenames for this phase. + + .. zuul:rolevar:: [filename] + :type: dict + + The keys in this dictionary are the INI sections in this file. + + .. zuul:rolevar:: [section] + :type: dict + + This is a dictionary of key-value pairs which comprise + this section of the INI file. + +.. zuul:rolevar:: devstack_base_services + :type: list + :default: {{ base_services | default(omit) }} + + A list of base services which are enabled. Services can be added or removed + from this list via the ``devstack_services`` variable. This is ignored if + ``base`` is set to ``False`` in ``devstack_services``. + +.. zuul:rolevar:: devstack_services + :type: dict + + A dictionary mapping service names to boolean values. If the + boolean value is ``false``, a ``disable_service`` line will be + emitted for the service name. If it is ``true``, then + ``enable_service`` will be emitted. All other values are ignored. + + The special key ``base`` can be used to enable or disable the base set of + services enabled by default. If ``base`` is found, it will processed before + all other keys. If its value is ``False`` a ``disable_all_services`` will be + emitted; if its value is ``True`` services from ``devstack_base_services`` + will be emitted via ``ENABLED_SERVICES``. + +.. zuul:rolevar:: devstack_plugins + :type: dict + + A dictionary mapping a plugin name to a git repo location. If the + location is a non-empty string, then an ``enable_plugin`` line will + be emmitted for the plugin name. + + If a plugin declares a dependency on another plugin (via + ``plugin_requires`` in the plugin's settings file), this role will + automatically emit ``enable_plugin`` lines in the correct order. + +.. zuul:rolevar:: tempest_plugins + :type: list + + A list of tempest plugins which are installed alongside tempest. + + The list of values will be combined with the base devstack directory + and used to populate the ``TEMPEST_PLUGINS`` variable. If the variable + already exists, its value is *not* changed. diff --git a/roles/write-devstack-local-conf/defaults/main.yaml b/roles/write-devstack-local-conf/defaults/main.yaml new file mode 100644 index 0000000000..7bc1dec9b8 --- /dev/null +++ b/roles/write-devstack-local-conf/defaults/main.yaml @@ -0,0 +1,3 @@ +devstack_base_dir: /opt/stack +devstack_local_conf_path: "{{ devstack_base_dir }}/devstack/local.conf" +devstack_base_services: "{{ enabled_services | default(omit) }}" diff --git a/roles/write-devstack-local-conf/library/devstack_local_conf.py b/roles/write-devstack-local-conf/library/devstack_local_conf.py new file mode 100644 index 0000000000..2f97d0e355 --- /dev/null +++ b/roles/write-devstack-local-conf/library/devstack_local_conf.py @@ -0,0 +1,351 @@ +# Copyright (C) 2017 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import re + + +class DependencyGraph(object): + # This is based on the JobGraph from Zuul. + + def __init__(self): + self._names = set() + self._dependencies = {} # dependent_name -> set(parent_names) + + def add(self, name, dependencies): + # Append the dependency information + self._dependencies.setdefault(name, set()) + try: + for dependency in dependencies: + # Make sure a circular dependency is never created + ancestors = self._getParentNamesRecursively( + dependency, soft=True) + ancestors.add(dependency) + if name in ancestors: + raise Exception("Dependency cycle detected in {}". + format(name)) + self._dependencies[name].add(dependency) + except Exception: + del self._dependencies[name] + raise + + def getDependenciesRecursively(self, parent): + dependencies = [] + + current_dependencies = self._dependencies[parent] + for current in current_dependencies: + if current not in dependencies: + dependencies.append(current) + for dep in self.getDependenciesRecursively(current): + if dep not in dependencies: + dependencies.append(dep) + return dependencies + + def _getParentNamesRecursively(self, dependent, soft=False): + all_parent_items = set() + items_to_iterate = set([dependent]) + while len(items_to_iterate) > 0: + current_item = items_to_iterate.pop() + current_parent_items = self._dependencies.get(current_item) + if current_parent_items is None: + if soft: + current_parent_items = set() + else: + raise Exception("Dependent item {} not found: ".format( + dependent)) + new_parent_items = current_parent_items - all_parent_items + items_to_iterate |= new_parent_items + all_parent_items |= new_parent_items + return all_parent_items + + +class VarGraph(DependencyGraph): + def __init__(self, vars): + super(VarGraph, self).__init__() + self.vars = {} + self._varnames = set() + for k, v in vars.items(): + self._varnames.add(k) + for k, v in vars.items(): + self._addVar(k, str(v)) + + bash_var_re = re.compile(r'\$\{?(\w+)') + def getDependencies(self, value): + return self.bash_var_re.findall(value) + + def _addVar(self, key, value): + if key in self.vars: + raise Exception("Variable {} already added".format(key)) + self.vars[key] = value + # Append the dependency information + dependencies = set() + for dependency in self.getDependencies(value): + if dependency == key: + # A variable is allowed to reference itself; no + # dependency link needed in that case. + continue + if dependency not in self._varnames: + # It's not necessary to create a link for an + # external variable. + continue + dependencies.add(dependency) + try: + self.add(key, dependencies) + except Exception: + del self.vars[key] + raise + + def getVars(self): + ret = [] + keys = sorted(self.vars.keys()) + seen = set() + for key in keys: + dependencies = self.getDependenciesRecursively(key) + for var in dependencies + [key]: + if var not in seen: + ret.append((var, self.vars[var])) + seen.add(var) + return ret + + +class PluginGraph(DependencyGraph): + def __init__(self, base_dir, plugins): + super(PluginGraph, self).__init__() + # The dependency trees expressed by all the plugins we found + # (which may be more than those the job is using). + self._plugin_dependencies = {} + self.loadPluginNames(base_dir) + + self.plugins = {} + self._pluginnames = set() + for k, v in plugins.items(): + self._pluginnames.add(k) + for k, v in plugins.items(): + self._addPlugin(k, str(v)) + + def loadPluginNames(self, base_dir): + if base_dir is None: + return + git_roots = [] + for root, dirs, files in os.walk(base_dir): + if '.git' not in dirs: + continue + # Don't go deeper than git roots + dirs[:] = [] + git_roots.append(root) + for root in git_roots: + devstack = os.path.join(root, 'devstack') + if not (os.path.exists(devstack) and os.path.isdir(devstack)): + continue + settings = os.path.join(devstack, 'settings') + if not (os.path.exists(settings) and os.path.isfile(settings)): + continue + self.loadDevstackPluginInfo(settings) + + define_re = re.compile(r'^define_plugin\s+(\S+).*') + require_re = re.compile(r'^plugin_requires\s+(\S+)\s+(\S+).*') + def loadDevstackPluginInfo(self, fn): + name = None + reqs = set() + with open(fn) as f: + for line in f: + m = self.define_re.match(line) + if m: + name = m.group(1) + m = self.require_re.match(line) + if m: + if name == m.group(1): + reqs.add(m.group(2)) + if name and reqs: + self._plugin_dependencies[name] = reqs + + def getDependencies(self, value): + return self._plugin_dependencies.get(value, []) + + def _addPlugin(self, key, value): + if key in self.plugins: + raise Exception("Plugin {} already added".format(key)) + self.plugins[key] = value + # Append the dependency information + dependencies = set() + for dependency in self.getDependencies(key): + if dependency == key: + continue + dependencies.add(dependency) + try: + self.add(key, dependencies) + except Exception: + del self.plugins[key] + raise + + def getPlugins(self): + ret = [] + keys = sorted(self.plugins.keys()) + seen = set() + for key in keys: + dependencies = self.getDependenciesRecursively(key) + for plugin in dependencies + [key]: + if plugin not in seen: + ret.append((plugin, self.plugins[plugin])) + seen.add(plugin) + return ret + + +class LocalConf(object): + + def __init__(self, localrc, localconf, base_services, services, plugins, + base_dir, projects, project, tempest_plugins): + self.localrc = [] + self.warnings = [] + self.meta_sections = {} + self.plugin_deps = {} + self.base_dir = base_dir + self.projects = projects + self.project = project + self.tempest_plugins = tempest_plugins + if services or base_services: + self.handle_services(base_services, services or {}) + self.handle_localrc(localrc) + # Plugins must be the last items in localrc, otherwise + # the configuration lines which follows them in the file are + # not applied to the plugins (for example, the value of DEST.) + if plugins: + self.handle_plugins(plugins) + if localconf: + self.handle_localconf(localconf) + + def handle_plugins(self, plugins): + pg = PluginGraph(self.base_dir, plugins) + for k, v in pg.getPlugins(): + if v: + self.localrc.append('enable_plugin {} {}'.format(k, v)) + + def handle_services(self, base_services, services): + enable_base_services = services.pop('base', True) + if enable_base_services and base_services: + self.localrc.append('ENABLED_SERVICES={}'.format( + ",".join(base_services))) + else: + self.localrc.append('disable_all_services') + for k, v in services.items(): + if v is False: + self.localrc.append('disable_service {}'.format(k)) + elif v is True: + self.localrc.append('enable_service {}'.format(k)) + + def handle_localrc(self, localrc): + lfg = False + tp = False + if localrc: + vg = VarGraph(localrc) + for k, v in vg.getVars(): + # Avoid double quoting + if len(v) and v[0]=='"': + self.localrc.append('{}={}'.format(k, v)) + else: + self.localrc.append('{}="{}"'.format(k, v)) + if k == 'LIBS_FROM_GIT': + lfg = True + elif k == 'TEMPEST_PLUGINS': + tp = True + + if not lfg and (self.projects or self.project): + required_projects = [] + if self.projects: + for project_name, project_info in self.projects.items(): + if project_info.get('required'): + required_projects.append(project_info['short_name']) + if self.project: + if self.project['short_name'] not in required_projects: + required_projects.append(self.project['short_name']) + if required_projects: + self.localrc.append('LIBS_FROM_GIT={}'.format( + ','.join(required_projects))) + + if self.tempest_plugins: + if not tp: + tp_dirs = [] + for tempest_plugin in self.tempest_plugins: + tp_dirs.append(os.path.join(self.base_dir, tempest_plugin)) + self.localrc.append('TEMPEST_PLUGINS="{}"'.format( + ' '.join(tp_dirs))) + else: + self.warnings.append('TEMPEST_PLUGINS already defined ({}),' + 'requested value {} ignored'.format( + tp, self.tempest_plugins)) + + + def handle_localconf(self, localconf): + for phase, phase_data in localconf.items(): + for fn, fn_data in phase_data.items(): + ms_name = '[[{}|{}]]'.format(phase, fn) + ms_data = [] + for section, section_data in fn_data.items(): + ms_data.append('[{}]'.format(section)) + for k, v in section_data.items(): + ms_data.append('{} = {}'.format(k, v)) + ms_data.append('') + self.meta_sections[ms_name] = ms_data + + def write(self, path): + with open(path, 'w') as f: + f.write('[[local|localrc]]\n') + f.write('\n'.join(self.localrc)) + f.write('\n\n') + for section, lines in self.meta_sections.items(): + f.write('{}\n'.format(section)) + f.write('\n'.join(lines)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + plugins=dict(type='dict'), + base_services=dict(type='list'), + services=dict(type='dict'), + localrc=dict(type='dict'), + local_conf=dict(type='dict'), + base_dir=dict(type='path'), + path=dict(type='str'), + projects=dict(type='dict'), + project=dict(type='dict'), + tempest_plugins=dict(type='list'), + ) + ) + + p = module.params + lc = LocalConf(p.get('localrc'), + p.get('local_conf'), + p.get('base_services'), + p.get('services'), + p.get('plugins'), + p.get('base_dir'), + p.get('projects'), + p.get('project'), + p.get('tempest_plugins')) + lc.write(p['path']) + + module.exit_json(warnings=lc.warnings) + + +try: + from ansible.module_utils.basic import * # noqa + from ansible.module_utils.basic import AnsibleModule +except ImportError: + pass + +if __name__ == '__main__': + main() diff --git a/roles/write-devstack-local-conf/library/test.py b/roles/write-devstack-local-conf/library/test.py new file mode 100644 index 0000000000..7c526b34c8 --- /dev/null +++ b/roles/write-devstack-local-conf/library/test.py @@ -0,0 +1,291 @@ +# Copyright (C) 2017 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +import tempfile +import unittest + +from devstack_local_conf import LocalConf +from collections import OrderedDict + +class TestDevstackLocalConf(unittest.TestCase): + + @staticmethod + def _init_localconf(p): + lc = LocalConf(p.get('localrc'), + p.get('local_conf'), + p.get('base_services'), + p.get('services'), + p.get('plugins'), + p.get('base_dir'), + p.get('projects'), + p.get('project'), + p.get('tempest_plugins')) + return lc + + def setUp(self): + self.tmpdir = tempfile.mkdtemp() + + def tearDown(self): + shutil.rmtree(self.tmpdir) + + def test_plugins(self): + "Test that plugins without dependencies work" + localrc = {'test_localrc': '1'} + local_conf = {'install': + {'nova.conf': + {'main': + {'test_conf': '2'}}}} + services = {'cinder': True} + # We use ordereddict here to make sure the plugins are in the + # *wrong* order for testing. + plugins = OrderedDict([ + ('bar', 'https://git.openstack.org/openstack/bar-plugin'), + ('foo', 'https://git.openstack.org/openstack/foo-plugin'), + ('baz', 'https://git.openstack.org/openstack/baz-plugin'), + ]) + p = dict(localrc=localrc, + local_conf=local_conf, + base_services=[], + services=services, + plugins=plugins, + base_dir='./test', + path=os.path.join(self.tmpdir, 'test.local.conf')) + lc = self._init_localconf(p) + lc.write(p['path']) + + plugins = [] + with open(p['path']) as f: + for line in f: + if line.startswith('enable_plugin'): + plugins.append(line.split()[1]) + self.assertEqual(['bar', 'baz', 'foo'], plugins) + + + def test_plugin_deps(self): + "Test that plugins with dependencies work" + os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', 'devstack')) + os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', '.git')) + os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', 'devstack')) + os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', '.git')) + with open(os.path.join( + self.tmpdir, + 'foo-plugin', 'devstack', 'settings'), 'w') as f: + f.write('define_plugin foo-plugin\n') + with open(os.path.join( + self.tmpdir, + 'bar-plugin', 'devstack', 'settings'), 'w') as f: + f.write('define_plugin bar-plugin\n') + f.write('plugin_requires bar-plugin foo-plugin\n') + + localrc = {'test_localrc': '1'} + local_conf = {'install': + {'nova.conf': + {'main': + {'test_conf': '2'}}}} + services = {'cinder': True} + # We use ordereddict here to make sure the plugins are in the + # *wrong* order for testing. + plugins = OrderedDict([ + ('bar-plugin', 'https://git.openstack.org/openstack/bar-plugin'), + ('foo-plugin', 'https://git.openstack.org/openstack/foo-plugin'), + ]) + p = dict(localrc=localrc, + local_conf=local_conf, + base_services=[], + services=services, + plugins=plugins, + base_dir=self.tmpdir, + path=os.path.join(self.tmpdir, 'test.local.conf')) + lc = self._init_localconf(p) + lc.write(p['path']) + + plugins = [] + with open(p['path']) as f: + for line in f: + if line.startswith('enable_plugin'): + plugins.append(line.split()[1]) + self.assertEqual(['foo-plugin', 'bar-plugin'], plugins) + + def test_libs_from_git(self): + "Test that LIBS_FROM_GIT is auto-generated" + projects = { + 'git.openstack.org/openstack/nova': { + 'required': True, + 'short_name': 'nova', + }, + 'git.openstack.org/openstack/oslo.messaging': { + 'required': True, + 'short_name': 'oslo.messaging', + }, + 'git.openstack.org/openstack/devstack-plugin': { + 'required': False, + 'short_name': 'devstack-plugin', + }, + } + project = { + 'short_name': 'glance', + } + p = dict(base_services=[], + base_dir='./test', + path=os.path.join(self.tmpdir, 'test.local.conf'), + projects=projects, + project=project) + lc = self._init_localconf(p) + lc.write(p['path']) + + lfg = None + with open(p['path']) as f: + for line in f: + if line.startswith('LIBS_FROM_GIT'): + lfg = line.strip().split('=')[1] + self.assertEqual('nova,oslo.messaging,glance', lfg) + + def test_overridelibs_from_git(self): + "Test that LIBS_FROM_GIT can be overridden" + localrc = {'LIBS_FROM_GIT': 'oslo.db'} + projects = { + 'git.openstack.org/openstack/nova': { + 'required': True, + 'short_name': 'nova', + }, + 'git.openstack.org/openstack/oslo.messaging': { + 'required': True, + 'short_name': 'oslo.messaging', + }, + 'git.openstack.org/openstack/devstack-plugin': { + 'required': False, + 'short_name': 'devstack-plugin', + }, + } + p = dict(localrc=localrc, + base_services=[], + base_dir='./test', + path=os.path.join(self.tmpdir, 'test.local.conf'), + projects=projects) + lc = self._init_localconf(p) + lc.write(p['path']) + + lfg = None + with open(p['path']) as f: + for line in f: + if line.startswith('LIBS_FROM_GIT'): + lfg = line.strip().split('=')[1] + self.assertEqual('"oslo.db"', lfg) + + def test_avoid_double_quote(self): + "Test that there a no duplicated quotes" + localrc = {'TESTVAR': '"quoted value"'} + p = dict(localrc=localrc, + base_services=[], + base_dir='./test', + path=os.path.join(self.tmpdir, 'test.local.conf'), + projects={}) + lc = self._init_localconf(p) + lc.write(p['path']) + + testvar = None + with open(p['path']) as f: + for line in f: + if line.startswith('TESTVAR'): + testvar = line.strip().split('=')[1] + self.assertEqual('"quoted value"', testvar) + + def test_plugin_circular_deps(self): + "Test that plugins with circular dependencies fail" + os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', 'devstack')) + os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', '.git')) + os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', 'devstack')) + os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', '.git')) + with open(os.path.join( + self.tmpdir, + 'foo-plugin', 'devstack', 'settings'), 'w') as f: + f.write('define_plugin foo\n') + f.write('plugin_requires foo bar\n') + with open(os.path.join( + self.tmpdir, + 'bar-plugin', 'devstack', 'settings'), 'w') as f: + f.write('define_plugin bar\n') + f.write('plugin_requires bar foo\n') + + localrc = {'test_localrc': '1'} + local_conf = {'install': + {'nova.conf': + {'main': + {'test_conf': '2'}}}} + services = {'cinder': True} + # We use ordereddict here to make sure the plugins are in the + # *wrong* order for testing. + plugins = OrderedDict([ + ('bar', 'https://git.openstack.org/openstack/bar-plugin'), + ('foo', 'https://git.openstack.org/openstack/foo-plugin'), + ]) + p = dict(localrc=localrc, + local_conf=local_conf, + base_services=[], + services=services, + plugins=plugins, + base_dir=self.tmpdir, + path=os.path.join(self.tmpdir, 'test.local.conf')) + with self.assertRaises(Exception): + lc = self._init_localconf(p) + lc.write(p['path']) + + def _find_tempest_plugins_value(self, file_path): + tp = None + with open(file_path) as f: + for line in f: + if line.startswith('TEMPEST_PLUGINS'): + found = line.strip().split('=')[1] + self.assertIsNone(tp, + "TEMPEST_PLUGIN ({}) found again ({})".format( + tp, found)) + tp = found + return tp + + def test_tempest_plugins(self): + "Test that TEMPEST_PLUGINS is correctly populated." + p = dict(base_services=[], + base_dir='./test', + path=os.path.join(self.tmpdir, 'test.local.conf'), + tempest_plugins=['heat-tempest-plugin', 'sahara-tests']) + lc = self._init_localconf(p) + lc.write(p['path']) + + tp = self._find_tempest_plugins_value(p['path']) + self.assertEqual('"./test/heat-tempest-plugin ./test/sahara-tests"', tp) + self.assertEqual(len(lc.warnings), 0) + + def test_tempest_plugins_not_overridden(self): + """Test that the existing value of TEMPEST_PLUGINS is not overridden + by the user-provided value, but a warning is emitted.""" + localrc = {'TEMPEST_PLUGINS': 'someplugin'} + p = dict(localrc=localrc, + base_services=[], + base_dir='./test', + path=os.path.join(self.tmpdir, 'test.local.conf'), + tempest_plugins=['heat-tempest-plugin', 'sahara-tests']) + lc = self._init_localconf(p) + lc.write(p['path']) + + tp = self._find_tempest_plugins_value(p['path']) + self.assertEqual('"someplugin"', tp) + self.assertEqual(len(lc.warnings), 1) + + +if __name__ == '__main__': + unittest.main() diff --git a/roles/write-devstack-local-conf/tasks/main.yaml b/roles/write-devstack-local-conf/tasks/main.yaml new file mode 100644 index 0000000000..bfd086034b --- /dev/null +++ b/roles/write-devstack-local-conf/tasks/main.yaml @@ -0,0 +1,14 @@ +- name: Write a job-specific local_conf file + become: true + become_user: stack + devstack_local_conf: + path: "{{ devstack_local_conf_path }}" + plugins: "{{ devstack_plugins|default(omit) }}" + base_services: "{{ devstack_base_services|default(omit) }}" + services: "{{ devstack_services|default(omit) }}" + localrc: "{{ devstack_localrc|default(omit) }}" + local_conf: "{{ devstack_local_conf|default(omit) }}" + base_dir: "{{ devstack_base_dir|default(omit) }}" + projects: "{{ zuul.projects }}" + project: "{{ zuul.project }}" + tempest_plugins: "{{ tempest_plugins|default(omit) }}" diff --git a/run_tests.sh b/run_tests.sh index b1aef4f81f..a9a3d0bb48 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -11,54 +11,22 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# -# -# this runs a series of unit tests for devstack to ensure it's functioning + +# This runs a series of unit tests for DevStack to ensure it's functioning PASSES="" FAILURES="" -# Check the return code and add the test to PASSES or FAILURES as appropriate -# pass_fail -function pass_fail { - local result=$1 - local expected=$2 - local test_name=$3 - - if [[ $result -ne $expected ]]; then - FAILURES="$FAILURES $test_name" +for testfile in tests/test_*.sh; do + $testfile + if [[ $? -eq 0 ]]; then + PASSES="$PASSES $testfile" else - PASSES="$PASSES $test_name" + FAILURES="$FAILURES $testfile" fi -} - -if [[ -n $@ ]]; then - FILES=$@ -else - LIBS=`find lib -type f | grep -v \.md` - SCRIPTS=`find . -type f -name \*\.sh` - EXTRA="functions functions-common stackrc openrc exerciserc eucarc" - FILES="$SCRIPTS $LIBS $EXTRA" -fi - -echo "Running bash8..." - -./tools/bash8.py -v $FILES -pass_fail $? 0 bash8 - - -# Test that no one is trying to land crazy refs as branches - -echo "Ensuring we don't have crazy refs" - -REFS=`grep BRANCH stackrc | grep -v -- '-master'` -rc=$? -pass_fail $rc 1 crazy-refs -if [[ $rc -eq 0 ]]; then - echo "Branch defaults must be master. Found:" - echo $REFS -fi +done +# Summary display now that all is said and done echo "=====================================================================" for script in $PASSES; do echo PASS $script diff --git a/samples/local.conf b/samples/local.conf index c8126c22af..55b729809d 100644 --- a/samples/local.conf +++ b/samples/local.conf @@ -1,9 +1,8 @@ # Sample ``local.conf`` for user-configurable variables in ``stack.sh`` -# NOTE: Copy this file to the root ``devstack`` directory for it to -# work properly. +# NOTE: Copy this file to the root DevStack directory for it to work properly. -# ``local.conf`` is a user-maintained setings file that is sourced from ``stackrc``. +# ``local.conf`` is a user-maintained settings file that is sourced from ``stackrc``. # This gives it the ability to override any variables set in ``stackrc``. # Also, most of the settings in ``stack.sh`` are written to only be set if no # value has already been set; this lets ``local.conf`` effectively override the @@ -11,7 +10,7 @@ # This is a collection of some of the settings we have found to be useful # in our DevStack development environments. Additional settings are described -# in http://devstack.org/local.conf.html +# in https://docs.openstack.org/devstack/latest/configuration.html#local-conf # These should be considered as samples and are unsupported DevStack code. # The ``localrc`` section replaces the old ``localrc`` configuration file. @@ -25,20 +24,21 @@ # there are a few minimal variables set: # If the ``*_PASSWORD`` variables are not set here you will be prompted to enter -# values for them by ``stack.sh`` and they will be added to ``local.conf``. -ADMIN_PASSWORD=nomoresecrete -MYSQL_PASSWORD=stackdb +# values for them by ``stack.sh``and they will be added to ``local.conf``. +ADMIN_PASSWORD=nomoresecret +DATABASE_PASSWORD=stackdb RABBIT_PASSWORD=stackqueue SERVICE_PASSWORD=$ADMIN_PASSWORD -# ``HOST_IP`` should be set manually for best results if the NIC configuration -# of the host is unusual, i.e. ``eth1`` has the default route but ``eth0`` is the -# public interface. It is auto-detected in ``stack.sh`` but often is indeterminate -# on later runs due to the IP moving from an Ethernet interface to a bridge on -# the host. Setting it here also makes it available for ``openrc`` to include -# when setting ``OS_AUTH_URL``. -# ``HOST_IP`` is not set by default. +# ``HOST_IP`` and ``HOST_IPV6`` should be set manually for best results if +# the NIC configuration of the host is unusual, i.e. ``eth1`` has the default +# route but ``eth0`` is the public interface. They are auto-detected in +# ``stack.sh`` but often is indeterminate on later runs due to the IP moving +# from an Ethernet interface to a bridge on the host. Setting it here also +# makes it available for ``openrc`` to include when setting ``OS_AUTH_URL``. +# Neither is set by default. #HOST_IP=w.x.y.z +#HOST_IPV6=2001:db8::7 # Logging @@ -49,7 +49,7 @@ SERVICE_PASSWORD=$ADMIN_PASSWORD # path of the destination log file. A timestamp will be appended to the given name. LOGFILE=$DEST/logs/stack.sh.log -# Old log files are automatically removed after 7 days to keep things neat. Change +# Old log files are automatically removed after 2 days to keep things neat. Change # the number of days by setting ``LOGDAYS``. LOGDAYS=2 @@ -61,7 +61,8 @@ LOGDAYS=2 # Using milestone-proposed branches # --------------------------------- -# Uncomment these to grab the milestone-proposed branches from the repos: +# Uncomment these to grab the milestone-proposed branches from the +# repos: #CINDER_BRANCH=milestone-proposed #GLANCE_BRANCH=milestone-proposed #HORIZON_BRANCH=milestone-proposed @@ -72,14 +73,20 @@ LOGDAYS=2 #NEUTRON_BRANCH=milestone-proposed #SWIFT_BRANCH=milestone-proposed +# Using git versions of clients +# ----------------------------- +# By default clients are installed from pip. See LIBS_FROM_GIT in +# stackrc for details on getting clients from specific branches or +# revisions. e.g. +# LIBS_FROM_GIT="python-ironicclient" +# IRONICCLIENT_BRANCH=refs/changes/44/2.../1 # Swift # ----- -# Swift is now used as the back-end for the S3-like object store. If Nova's -# objectstore (``n-obj`` in ``ENABLED_SERVICES``) is enabled, it will NOT -# run if Swift is enabled. Setting the hash value is required and you will -# be prompted for it if Swift is enabled so just set it to something already: +# Swift is now used as the back-end for the S3-like object store. Setting the +# hash value is required and you will be prompted for it if Swift is enabled +# so just set it to something already: SWIFT_HASH=66a3d6b56c1f479c8b4e70ab5c2000f5 # For development purposes the default of 3 replicas is usually not required. diff --git a/samples/local.sh b/samples/local.sh index 664cb663fe..7e6ae70ad4 100755 --- a/samples/local.sh +++ b/samples/local.sh @@ -3,15 +3,14 @@ # Sample ``local.sh`` for user-configurable tasks to run automatically # at the successful conclusion of ``stack.sh``. -# NOTE: Copy this file to the root ``devstack`` directory for it to -# work properly. +# NOTE: Copy this file to the root DevStack directory for it to work properly. # This is a collection of some of the things we have found to be useful to run # after ``stack.sh`` to tweak the OpenStack configuration that DevStack produces. # These should be considered as samples and are unsupported DevStack code. -# Keep track of the devstack directory +# Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0") && pwd) # Import common functions @@ -32,16 +31,23 @@ if is_service_enabled nova; then # ``demo``) # Get OpenStack user auth - source $TOP_DIR/openrc + export OS_CLOUD=devstack # Add first keypair found in localhost:$HOME/.ssh for i in $HOME/.ssh/id_rsa.pub $HOME/.ssh/id_dsa.pub; do if [[ -r $i ]]; then - nova keypair-add --pub_key=$i `hostname` + openstack keypair create --public-key $i `hostname` break fi done + # Update security default group + # ----------------------------- + + # Add tcp/22 and icmp to default security group + default=$(openstack security group list -f value -c ID) + openstack security group rule create $default --protocol tcp --dst-port 22 + openstack security group rule create $default --protocol icmp # Create A Flavor # --------------- @@ -50,20 +56,12 @@ if is_service_enabled nova; then source $TOP_DIR/openrc admin admin # Name of new flavor - # set in ``localrc`` with ``DEFAULT_INSTANCE_TYPE=m1.micro`` + # set in ``local.conf`` with ``DEFAULT_INSTANCE_TYPE=m1.micro`` MI_NAME=m1.micro # Create micro flavor if not present - if [[ -z $(nova flavor-list | grep $MI_NAME) ]]; then - nova flavor-create $MI_NAME 6 128 0 1 + if [[ -z $(openstack flavor list | grep $MI_NAME) ]]; then + openstack flavor create $MI_NAME --id 6 --ram 128 --disk 0 --vcpus 1 fi - - # Other Uses - # ---------- - - # Add tcp/22 and icmp to default security group - nova secgroup-add-rule default tcp 22 22 0.0.0.0/0 - nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0 - fi diff --git a/stack.sh b/stack.sh index 8fb57c4d13..965f58007d 100755 --- a/stack.sh +++ b/stack.sh @@ -1,9 +1,8 @@ #!/usr/bin/env bash # ``stack.sh`` is an opinionated OpenStack developer installation. It -# installs and configures various combinations of **Ceilometer**, **Cinder**, -# **Glance**, **Heat**, **Horizon**, **Keystone**, **Nova**, **Neutron**, -# and **Swift** +# installs and configures various combinations of **Cinder**, **Glance**, +# **Horizon**, **Keystone**, **Nova**, **Neutron**, and **Swift** # This script's options can be changed by setting appropriate environment # variables. You can configure things like which git repositories to use, @@ -13,326 +12,592 @@ # a multi-node developer install. # To keep this script simple we assume you are running on a recent **Ubuntu** -# (12.04 Precise or newer) or **Fedora** (F18 or newer) machine. (It may work -# on other platforms but support for those platforms is left to those who added -# them to DevStack.) It should work in a VM or physical server. Additionally -# we maintain a list of ``apt`` and ``rpm`` dependencies and other configuration -# files in this repo. +# (Bionic or newer) or **CentOS/RHEL/RockyLinux** +# (7 or newer) machine. (It may work on other platforms but support for those +# platforms is left to those who added them to DevStack.) It should work in +# a VM or physical server. Additionally, we maintain a list of ``deb`` and +# ``rpm`` dependencies and other configuration files in this repo. # Learn more and get the most recent version at http://devstack.org +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following along as the install occurs. +set -o xtrace + # Make sure custom grep options don't get in the way unset GREP_OPTIONS -# Sanitize language settings to avoid commands bailing out -# with "unsupported locale setting" errors. +# NOTE(sdague): why do we explicitly set locale when running stack.sh? +# +# Devstack is written in bash, and many functions used throughout +# devstack process text coming off a command (like the ip command) +# and do transforms using grep, sed, cut, awk on the strings that are +# returned. Many of these programs are internationalized, which is +# great for end users, but means that the strings that devstack +# functions depend upon might not be there in other locales. We thus +# need to pin the world to an english basis during the runs. +# +# Previously we used the C locale for this, every system has it, and +# it gives us a stable sort order. It does however mean that we +# effectively drop unicode support.... boo! :( +# +# With python3 being more unicode aware by default, that's not the +# right option. While there is a C.utf8 locale, some distros are +# shipping it as C.UTF8 for extra confusingness. And it's support +# isn't super clear across distros. This is made more challenging when +# trying to support both out of the box distros, and the gate which +# uses diskimage builder to build disk images in a different way than +# the distros do. +# +# So... en_US.utf8 it is. That's existed for a very long time. It is a +# compromise position, but it is the least worse idea at the time of +# this comment. +# +# We also have to unset other variables that might impact LC_ALL +# taking effect. unset LANG unset LANGUAGE -LC_ALL=C +LC_ALL=en_US.utf8 export LC_ALL +# Clear all OpenStack related envvars +unset `env | grep -E '^OS_' | cut -d = -f 1` + # Make sure umask is sane umask 022 -# Keep track of the devstack directory +# Not all distros have sbin in PATH for regular users. +# osc will normally be installed at /usr/local/bin/openstack so ensure +# /usr/local/bin is also in the path +PATH=$PATH:/usr/local/bin:/usr/local/sbin:/usr/sbin:/sbin + +# Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0") && pwd) +# Check for uninitialized variables, a big cause of bugs +NOUNSET=${NOUNSET:-} +if [[ -n "$NOUNSET" ]]; then + set -o nounset +fi + +# Set start of devstack timestamp +DEVSTACK_START_TIME=$(date +%s) + +# Configuration +# ============= + +# Sanity Checks +# ------------- + +# Clean up last environment var cache +if [[ -r $TOP_DIR/.stackenv ]]; then + rm $TOP_DIR/.stackenv +fi + +# ``stack.sh`` keeps the list of ``deb`` and ``rpm`` dependencies, config +# templates and other useful files in the ``files`` subdirectory +FILES=$TOP_DIR/files +if [ ! -d $FILES ]; then + set +o xtrace + echo "missing devstack/files" + exit 1 +fi + +# ``stack.sh`` keeps function libraries here +# Make sure ``$TOP_DIR/inc`` directory is present +if [ ! -d $TOP_DIR/inc ]; then + set +o xtrace + echo "missing devstack/inc" + exit 1 +fi + +# ``stack.sh`` keeps project libraries here +# Make sure ``$TOP_DIR/lib`` directory is present +if [ ! -d $TOP_DIR/lib ]; then + set +o xtrace + echo "missing devstack/lib" + exit 1 +fi + +# Check if run in POSIX shell +if [[ "${POSIXLY_CORRECT}" == "y" ]]; then + set +o xtrace + echo "You are running POSIX compatibility mode, DevStack requires bash 4.2 or newer." + exit 1 +fi + +# OpenStack is designed to be run as a non-root user; Horizon will fail to run +# as **root** since Apache will not serve content from **root** user). +# ``stack.sh`` must not be run as **root**. It aborts and suggests one course of +# action to create a suitable user account. + +if [[ $EUID -eq 0 ]]; then + set +o xtrace + echo "DevStack should be run as a user with sudo permissions, " + echo "not root." + echo "A \"stack\" user configured correctly can be created with:" + echo " $TOP_DIR/tools/create-stack-user.sh" + exit 1 +fi + +# OpenStack is designed to run at a system level, with system level +# installation of python packages. It does not support running under a +# virtual env, and will fail in really odd ways if you do this. Make +# this explicit as it has come up on the mailing list. +if [[ -n "$VIRTUAL_ENV" ]]; then + set +o xtrace + echo "You appear to be running under a python virtualenv." + echo "DevStack does not support this, as we may break the" + echo "virtualenv you are currently in by modifying " + echo "external system-level components the virtualenv relies on." + echo "We recommend you use a separate virtual-machine if " + echo "you are worried about DevStack taking over your system." + exit 1 +fi + +# Provide a safety switch for devstack. If you do a lot of devstack, +# on a lot of different environments, you sometimes run it on the +# wrong box. This makes there be a way to prevent that. +if [[ -e $HOME/.no-devstack ]]; then + set +o xtrace + echo "You've marked this host as a no-devstack host, to save yourself from" + echo "running devstack accidentally. If this is in error, please remove the" + echo "~/.no-devstack file" + exit 1 +fi + +# Prepare the environment +# ----------------------- + +# Initialize variables: +LAST_SPINNER_PID="" + # Import common functions source $TOP_DIR/functions -# Import config functions -source $TOP_DIR/lib/config +# Import 'public' stack.sh functions +source $TOP_DIR/lib/stack # Determine what system we are running on. This provides ``os_VENDOR``, -# ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` +# ``os_RELEASE``, ``os_PACKAGE``, ``os_CODENAME`` # and ``DISTRO`` GetDistro # Global Settings -# =============== +# --------------- # Check for a ``localrc`` section embedded in ``local.conf`` and extract if # ``localrc`` does not already exist # Phase: local rm -f $TOP_DIR/.localrc.auto -if [[ -r $TOP_DIR/local.conf ]]; then - LRC=$(get_meta_section_files $TOP_DIR/local.conf local) - for lfile in $LRC; do - if [[ "$lfile" == "localrc" ]]; then - if [[ -r $TOP_DIR/localrc ]]; then - warn $LINENO "localrc and local.conf:[[local]] both exist, using localrc" - else - echo "# Generated file, do not edit" >$TOP_DIR/.localrc.auto - get_meta_section $TOP_DIR/local.conf local $lfile >>$TOP_DIR/.localrc.auto - fi - fi - done -fi +extract_localrc_section $TOP_DIR/local.conf $TOP_DIR/localrc $TOP_DIR/.localrc.auto # ``stack.sh`` is customizable by setting environment variables. Override a -# default setting via export:: +# default setting via export: # # export DATABASE_PASSWORD=anothersecret # ./stack.sh # -# or by setting the variable on the command line:: +# or by setting the variable on the command line: # # DATABASE_PASSWORD=simple ./stack.sh # -# Persistent variables can be placed in a ``localrc`` file:: +# Persistent variables can be placed in a ``local.conf`` file: # +# [[local|localrc]] # DATABASE_PASSWORD=anothersecret # DATABASE_USER=hellaroot # # We try to have sensible defaults, so you should be able to run ``./stack.sh`` -# in most cases. ``localrc`` is not distributed with DevStack and will never +# in most cases. ``local.conf`` is not distributed with DevStack and will never # be overwritten by a DevStack update. # # DevStack distributes ``stackrc`` which contains locations for the OpenStack # repositories, branches to configure, and other configuration defaults. -# ``stackrc`` sources ``localrc`` to allow you to safely override those settings. +# ``stackrc`` sources the ``localrc`` section of ``local.conf`` to allow you to +# safely override those settings. if [[ ! -r $TOP_DIR/stackrc ]]; then - log_error $LINENO "missing $TOP_DIR/stackrc - did you grab more than just stack.sh?" + die $LINENO "missing $TOP_DIR/stackrc - did you grab more than just stack.sh?" fi source $TOP_DIR/stackrc - -# Local Settings -# -------------- - -# Make sure the proxy config is visible to sub-processes -export_proxy_variables - -# Destination path for installation ``DEST`` -DEST=${DEST:-/opt/stack} - - -# Sanity Check -# ------------ - -# Clean up last environment var cache -if [[ -r $TOP_DIR/.stackenv ]]; then - rm $TOP_DIR/.stackenv -fi - -# ``stack.sh`` keeps the list of ``apt`` and ``rpm`` dependencies and config -# templates and other useful files in the ``files`` subdirectory -FILES=$TOP_DIR/files -if [ ! -d $FILES ]; then - log_error $LINENO "missing devstack/files" -fi - -# ``stack.sh`` keeps function libraries here -# Make sure ``$TOP_DIR/lib`` directory is present -if [ ! -d $TOP_DIR/lib ]; then - log_error $LINENO "missing devstack/lib" -fi - -# Import common services (database, message queue) configuration -source $TOP_DIR/lib/database -source $TOP_DIR/lib/rpc_backend - -# Remove services which were negated in ENABLED_SERVICES -# using the "-" prefix (e.g., "-rabbit") instead of -# calling disable_service(). -disable_negated_services +# write /etc/devstack-version +write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (precise|saucy|trusty|7.0|wheezy|sid|testing|jessie|f19|f20|rhel6) ]]; then +SUPPORTED_DISTROS="trixie|bookworm|jammy|noble|rhel9|rhel10" + +if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" fi fi -# Look for obsolete stuff -if [[ ,${ENABLED_SERVICES}, =~ ,"swift", ]]; then - echo "FATAL: 'swift' is not supported as a service name" - echo "FATAL: Use the actual swift service names to enable tham as required:" - echo "FATAL: s-proxy s-object s-container s-account" - exit 1 -fi - -# Make sure we only have one rpc backend enabled, -# and the specified rpc backend is available on your platform. -check_rpc_backend - -# Check to see if we are already running DevStack -# Note that this may fail if USE_SCREEN=False -if type -p screen >/dev/null && screen -ls | egrep -q "[0-9].$SCREEN_NAME"; then - echo "You are already running a stack.sh session." - echo "To rejoin this session type 'screen -x stack'." - echo "To destroy this session, type './unstack.sh'." - exit 1 -fi +# Local Settings +# -------------- -# Set up logging level -VERBOSE=$(trueorfalse True $VERBOSE) +# Make sure the proxy config is visible to sub-processes +export_proxy_variables -# root Access -# ----------- +# Remove services which were negated in ``ENABLED_SERVICES`` +# using the "-" prefix (e.g., "-rabbit") instead of +# calling disable_service(). +disable_negated_services -# OpenStack is designed to be run as a non-root user; Horizon will fail to run -# as **root** since Apache will not serve content from **root** user). -# ``stack.sh`` must not be run as **root**. It aborts and suggests one course of -# action to create a suitable user account. -if [[ $EUID -eq 0 ]]; then - echo "You are running this script as root." - echo "Cut it out." - echo "Really." - echo "If you need an account to run DevStack, do this (as root, heh) to create $STACK_USER:" - echo "$TOP_DIR/tools/create-stack-user.sh" - exit 1 -fi +# Configure sudo +# -------------- -# We're not **root**, make sure ``sudo`` is available -is_package_installed sudo || install_package sudo +# We're not as **root** so make sure ``sudo`` is available +is_package_installed sudo || is_package_installed sudo-ldap || install_package sudo # UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || echo "#includedir /etc/sudoers.d" | sudo tee -a /etc/sudoers -# Set up devstack sudoers +# Conditionally setup detailed logging for sudo +if [[ -n "$LOG_SUDO" ]]; then + TEMPFILE=`mktemp` + echo "Defaults log_output" > $TEMPFILE + chmod 0440 $TEMPFILE + sudo chown root:root $TEMPFILE + sudo mv $TEMPFILE /etc/sudoers.d/00_logging +fi + +# Set up DevStack sudoers TEMPFILE=`mktemp` echo "$STACK_USER ALL=(root) NOPASSWD:ALL" >$TEMPFILE -# Some binaries might be under /sbin or /usr/sbin, so make sure sudo will -# see them by forcing PATH +# Some binaries might be under ``/sbin`` or ``/usr/sbin``, so make sure sudo will +# see them by forcing ``PATH`` echo "Defaults:$STACK_USER secure_path=/sbin:/usr/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/local/bin" >> $TEMPFILE echo "Defaults:$STACK_USER !requiretty" >> $TEMPFILE chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh -# Additional repos -# ---------------- +# Configure Distro Repositories +# ----------------------------- + +# For Debian/Ubuntu make apt attempt to retry network ops on it's own +if is_ubuntu; then + echo 'APT::Acquire::Retries "20";' | sudo tee /etc/apt/apt.conf.d/80retry >/dev/null +fi # Some distros need to add repos beyond the defaults provided by the vendor # to pick up required packages. -# The Debian Wheezy official repositories do not contain all required packages, -# add gplhost repository. -if [[ "$os_VENDOR" =~ (Debian) ]]; then - echo 'deb http://archive.gplhost.com/debian grizzly main' | sudo tee /etc/apt/sources.list.d/gplhost_wheezy-backports.list - echo 'deb http://archive.gplhost.com/debian grizzly-backports main' | sudo tee -a /etc/apt/sources.list.d/gplhost_wheezy-backports.list - apt_get update - apt_get install --force-yes gplhost-archive-keyring -fi - -if [[ is_fedora && $DISTRO =~ (rhel) ]]; then - # Installing Open vSwitch on RHEL requires enabling the RDO repo. - RHEL6_RDO_REPO_RPM=${RHEL6_RDO_REPO_RPM:-"http://rdo.fedorapeople.org/openstack-icehouse/rdo-release-icehouse.rpm"} - RHEL6_RDO_REPO_ID=${RHEL6_RDO_REPO_ID:-"openstack-icehouse"} - if ! sudo yum repolist enabled $RHEL6_RDO_REPO_ID | grep -q $RHEL6_RDO_REPO_ID; then - echo "RDO repo not detected; installing" - yum_install $RHEL6_RDO_REPO_RPM || \ - die $LINENO "Error installing RDO repo, cannot continue" - fi - # RHEL requires EPEL for many Open Stack dependencies - if [[ $DISTRO =~ (rhel7) ]]; then - EPEL_RPM=${RHEL7_EPEL_RPM:-"http://dl.fedoraproject.org/pub/epel/beta/7/x86_64/epel-release-7-0.1.noarch.rpm"} - else - EPEL_RPM=${RHEL6_EPEL_RPM:-"http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm"} - fi - if ! sudo yum repolist enabled epel | grep -q 'epel'; then - echo "EPEL not detected; installing" - yum_install ${EPEL_RPM} || \ - die $LINENO "Error installing EPEL repo, cannot continue" +function _install_epel { + # epel-release is in extras repo which is enabled by default + install_package epel-release + + # RDO repos are not tested with epel and may have incompatibilities so + # let's limit the packages fetched from epel to the ones not in RDO repos. + sudo dnf config-manager --save --setopt=includepkgs=debootstrap,dpkg epel +} + +function _install_rdo { + if [[ $DISTRO =~ "rhel" ]]; then + VERSION=${DISTRO:4:2} + rdo_release=${TARGET_BRANCH#*/} + if [[ "$TARGET_BRANCH" == "master" ]]; then + # adding delorean-deps repo to provide current master rpms + sudo wget https://trunk.rdoproject.org/centos${VERSION}-master/delorean-deps.repo -O /etc/yum.repos.d/delorean-deps.repo + else + if sudo dnf provides centos-release-openstack-${rdo_release} >/dev/null 2>&1; then + sudo dnf -y install centos-release-openstack-${rdo_release} + else + sudo wget https://trunk.rdoproject.org/centos${VERSION}-${rdo_release}/delorean-deps.repo -O /etc/yum.repos.d/delorean-deps.repo + fi + fi fi + sudo dnf -y update +} - # ... and also optional to be enabled - is_package_installed yum-utils || install_package yum-utils - sudo yum-config-manager --enable rhel-6-server-optional-rpms -fi +# Configure Target Directories +# ---------------------------- -# Filesystem setup -# ---------------- +# Destination path for installation ``DEST`` +DEST=${DEST:-/opt/stack} # Create the destination directory and ensure it is writable by the user # and read/executable by everybody for daemons (e.g. apache run for horizon) -sudo mkdir -p $DEST -safe_chown -R $STACK_USER $DEST -safe_chmod 0755 $DEST +# If directory exists do not modify the permissions. +if [[ ! -d $DEST ]]; then + sudo mkdir -p $DEST + safe_chown -R $STACK_USER $DEST + safe_chmod 0755 $DEST +fi -# a basic test for $DEST path permissions (fatal on error unless skipped) -check_path_perm_sanity ${DEST} +# Destination path for devstack logs +if [[ -n ${LOGDIR:-} ]]; then + sudo mkdir -p $LOGDIR + safe_chown -R $STACK_USER $LOGDIR + safe_chmod 0755 $LOGDIR +fi + +# Destination path for service data +DATA_DIR=${DATA_DIR:-${DEST}/data} +if [[ ! -d $DATA_DIR ]]; then + sudo mkdir -p $DATA_DIR + safe_chown -R $STACK_USER $DATA_DIR + safe_chmod 0755 $DATA_DIR +fi + +# Create and/or clean the async state directory +async_init +# Configure proper hostname # Certain services such as rabbitmq require that the local hostname resolves # correctly. Make sure it exists in /etc/hosts so that is always true. LOCAL_HOSTNAME=`hostname -s` -if [ -z "`grep ^127.0.0.1 /etc/hosts | grep $LOCAL_HOSTNAME`" ]; then +if ! grep -Fqwe "$LOCAL_HOSTNAME" /etc/hosts; then sudo sed -i "s/\(^127.0.0.1.*\)/\1 $LOCAL_HOSTNAME/" /etc/hosts fi -# Destination path for service data -DATA_DIR=${DATA_DIR:-${DEST}/data} -sudo mkdir -p $DATA_DIR -safe_chown -R $STACK_USER $DATA_DIR +# If you have all the repos installed above already setup (e.g. a CI +# situation where they are on your image) you may choose to skip this +# to speed things up +SKIP_EPEL_INSTALL=$(trueorfalse False SKIP_EPEL_INSTALL) + +if [[ $DISTRO == "rhel9" ]]; then + # for CentOS Stream 9 repository + sudo dnf config-manager --set-enabled crb + # for RHEL 9 repository + sudo dnf config-manager --set-enabled codeready-builder-for-rhel-9-x86_64-rpms + # rabbitmq and other packages are provided by RDO repositories. + _install_rdo + + # Some distributions (Rocky Linux 9) provide curl-minimal instead of curl, + # it triggers a conflict when devstack wants to install "curl". + # Swap curl-minimal with curl. + if is_package_installed curl-minimal; then + sudo dnf swap -y curl-minimal curl + fi +elif [[ $DISTRO == "rhel10" ]]; then + # for CentOS Stream 10 repository + sudo dnf config-manager --set-enabled crb + # rabbitmq and other packages are provided by RDO repositories. + _install_rdo +elif [[ $DISTRO == "openEuler-22.03" ]]; then + # There are some problem in openEuler. We should fix it first. Some required + # package/action runs before fixup script. So we can't fix there. + # + # 1. the hostname package is not installed by default + # 2. Some necessary packages are in openstack repo, for example liberasurecode-devel + # 3. python3-pip can be uninstalled by `get_pip.py` automaticly. + # 4. Ensure wget installation before use + install_package hostname openstack-release-wallaby wget + uninstall_package python3-pip + + # Add yum repository for libvirt7.X + sudo wget https://eur.openeuler.openatom.cn/coprs/g/sig-openstack/Libvirt-7.X/repo/openeuler-22.03_LTS/group_sig-openstack-Libvirt-7.X-openeuler-22.03_LTS.repo -O /etc/yum.repos.d/libvirt7.2.0.repo +fi + +# Ensure python is installed +# -------------------------- +install_python + + +# Configure Logging +# ----------------- + +# Set up logging level +VERBOSE=$(trueorfalse True VERBOSE) +VERBOSE_NO_TIMESTAMP=$(trueorfalse False VERBOSE) + +# Draw a spinner so the user knows something is happening +function spinner { + local delay=0.75 + local spinstr='/-\|' + printf "..." >&3 + while [ true ]; do + local temp=${spinstr#?} + printf "[%c]" "$spinstr" >&3 + local spinstr=$temp${spinstr%"$temp"} + sleep $delay + printf "\b\b\b" >&3 + done +} + +function kill_spinner { + if [ ! -z "$LAST_SPINNER_PID" ]; then + kill >/dev/null 2>&1 $LAST_SPINNER_PID + printf "\b\b\bdone\n" >&3 + fi +} +# Echo text to the log file, summary log file and stdout +# echo_summary "something to say" +function echo_summary { + if [[ -t 3 && "$VERBOSE" != "True" ]]; then + kill_spinner + echo -n -e $@ >&6 + spinner & + LAST_SPINNER_PID=$! + else + echo -e $@ >&6 + fi +} -# Common Configuration -# -------------------- +# Echo text only to stdout, no log files +# echo_nolog "something not for the logs" +function echo_nolog { + echo $@ >&3 +} -# Set ``OFFLINE`` to ``True`` to configure ``stack.sh`` to run cleanly without -# Internet access. ``stack.sh`` must have been previously run with Internet -# access to install prerequisites and fetch repositories. -OFFLINE=`trueorfalse False $OFFLINE` +# Set up logging for ``stack.sh`` +# Set ``LOGFILE`` to turn on logging +# Append '.xxxxxxxx' to the given name to maintain history +# where 'xxxxxxxx' is a representation of the date the file was created +TIMESTAMP_FORMAT=${TIMESTAMP_FORMAT:-"%F-%H%M%S"} +LOGDAYS=${LOGDAYS:-7} +CURRENT_LOG_TIME=$(date "+$TIMESTAMP_FORMAT") -# Set ``ERROR_ON_CLONE`` to ``True`` to configure ``stack.sh`` to exit if -# the destination git repository does not exist during the ``git_clone`` -# operation. -ERROR_ON_CLONE=`trueorfalse False $ERROR_ON_CLONE` +if [[ -n "$LOGFILE" ]]; then + # Clean up old log files. Append '.*' to the user-specified + # ``LOGFILE`` to match the date in the search template. + LOGFILE_DIR="${LOGFILE%/*}" # dirname + LOGFILE_NAME="${LOGFILE##*/}" # basename + mkdir -p $LOGFILE_DIR + find $LOGFILE_DIR -maxdepth 1 -name $LOGFILE_NAME.\* -mtime +$LOGDAYS -exec rm {} \; + LOGFILE=$LOGFILE.${CURRENT_LOG_TIME} + SUMFILE=$LOGFILE.summary.${CURRENT_LOG_TIME} -# Whether to enable the debug log level in OpenStack services -ENABLE_DEBUG_LOG_LEVEL=`trueorfalse True $ENABLE_DEBUG_LOG_LEVEL` + # Redirect output according to config -# Set fixed and floating range here so we can make sure not to use addresses -# from either range when attempting to guess the IP to use for the host. -# Note that setting FIXED_RANGE may be necessary when running DevStack -# in an OpenStack cloud that uses either of these address ranges internally. -FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24} -FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24} -FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256} + # Set fd 3 to a copy of stdout. So we can set fd 1 without losing + # stdout later. + exec 3>&1 + if [[ "$VERBOSE" == "True" ]]; then + _of_args="-v" + if [[ "$VERBOSE_NO_TIMESTAMP" == "True" ]]; then + _of_args="$_of_args --no-timestamp" + fi + # Set fd 1 and 2 to write the log file + exec 1> >( $PYTHON $TOP_DIR/tools/outfilter.py $_of_args -o "${LOGFILE}" ) 2>&1 + # Set fd 6 to summary log file + exec 6> >( $PYTHON $TOP_DIR/tools/outfilter.py -o "${SUMFILE}" ) + else + # Set fd 1 and 2 to primary logfile + exec 1> >( $PYTHON $TOP_DIR/tools/outfilter.py -o "${LOGFILE}" ) 2>&1 + # Set fd 6 to summary logfile and stdout + exec 6> >( $PYTHON $TOP_DIR/tools/outfilter.py -v -o "${SUMFILE}" >&3 ) + fi -HOST_IP=$(get_default_host_ip $FIXED_RANGE $FLOATING_RANGE "$HOST_IP_IFACE" "$HOST_IP") -if [ "$HOST_IP" == "" ]; then - die $LINENO "Could not determine host ip address. See local.conf for suggestions on setting HOST_IP." + echo_summary "stack.sh log $LOGFILE" + # Specified logfile name always links to the most recent log + ln -sf $LOGFILE $LOGFILE_DIR/$LOGFILE_NAME + ln -sf $SUMFILE $LOGFILE_DIR/$LOGFILE_NAME.summary +else + # Set up output redirection without log files + # Set fd 3 to a copy of stdout. So we can set fd 1 without losing + # stdout later. + exec 3>&1 + if [[ "$VERBOSE" != "True" ]]; then + # Throw away stdout and stderr + exec 1>/dev/null 2>&1 + fi + # Always send summary fd to original stdout + exec 6> >( $PYTHON $TOP_DIR/tools/outfilter.py -v >&3 ) fi -# Allow the use of an alternate hostname (such as localhost/127.0.0.1) for service endpoints. -SERVICE_HOST=${SERVICE_HOST:-$HOST_IP} +# Basic test for ``$DEST`` path permissions (fatal on error unless skipped) +check_path_perm_sanity ${DEST} -# Allow the use of an alternate protocol (such as https) for service endpoints -SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http} +# Configure Error Traps +# --------------------- -# Configure services to use syslog instead of writing to individual log files -SYSLOG=`trueorfalse False $SYSLOG` -SYSLOG_HOST=${SYSLOG_HOST:-$HOST_IP} -SYSLOG_PORT=${SYSLOG_PORT:-516} +# Kill background processes on exit +trap exit_trap EXIT +function exit_trap { + local r=$? + jobs=$(jobs -p) + # Only do the kill when we're logging through a process substitution, + # which currently is only to verbose logfile + if [[ -n $jobs && -n "$LOGFILE" && "$VERBOSE" == "True" ]]; then + echo "exit_trap: cleaning up child processes" + kill 2>&1 $jobs + fi + + #Remove timing data file + if [ -f "$OSCWRAP_TIMER_FILE" ] ; then + rm "$OSCWRAP_TIMER_FILE" + fi -# for DSTAT logging -DSTAT_FILE=${DSTAT_FILE:-"dstat.txt"} + # Kill the last spinner process + kill_spinner -# Use color for logging output (only available if syslog is not used) -LOG_COLOR=`trueorfalse True $LOG_COLOR` + if [[ $r -ne 0 ]]; then + echo "Error on exit" + # If we error before we've installed os-testr, this will fail. + if type -p generate-subunit > /dev/null; then + generate-subunit $DEVSTACK_START_TIME $SECONDS 'fail' >> ${SUBUNIT_OUTPUT} + fi + if [[ -z $LOGDIR ]]; then + ${PYTHON} $TOP_DIR/tools/worlddump.py + else + ${PYTHON} $TOP_DIR/tools/worlddump.py -d $LOGDIR + fi + else + # If we error before we've installed os-testr, this will fail. + if type -p generate-subunit > /dev/null; then + generate-subunit $DEVSTACK_START_TIME $SECONDS >> ${SUBUNIT_OUTPUT} + fi + fi -# Service startup timeout -SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} + exit $r +} + +# Exit on any errors so that errors don't compound +trap err_trap ERR +function err_trap { + local r=$? + set +o xtrace + if [[ -n "$LOGFILE" ]]; then + echo "${0##*/} failed: full log in $LOGFILE" + else + echo "${0##*/} failed" + fi + exit $r +} + +# Begin trapping error exit codes +set -o errexit + +# Print the kernel version +uname -a # Reset the bundle of CA certificates SSL_BUNDLE_FILE="$DATA_DIR/ca-bundle.pem" rm -f $SSL_BUNDLE_FILE +# Import common services (database, message queue) configuration +source $TOP_DIR/lib/database +source $TOP_DIR/lib/rpc_backend + +# load host tuning functions and defaults +source $TOP_DIR/lib/host +# tune host memory early to ensure zswap/ksm are configured before +# doing memory intensive operation like cloning repos or unpacking packages. +tune_host # Configure Projects # ================== -# Import apache functions +# Clone all external plugins +fetch_plugins + +# Plugin Phase 0: override_defaults - allow plugins to override +# defaults before other services are run +run_phase override_defaults + +# Import Apache functions source $TOP_DIR/lib/apache # Import TLS functions @@ -340,32 +605,29 @@ source $TOP_DIR/lib/tls # Source project function libraries source $TOP_DIR/lib/infra -source $TOP_DIR/lib/oslo -source $TOP_DIR/lib/stackforge +source $TOP_DIR/lib/libraries +source $TOP_DIR/lib/lvm source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone source $TOP_DIR/lib/glance source $TOP_DIR/lib/nova +source $TOP_DIR/lib/placement source $TOP_DIR/lib/cinder source $TOP_DIR/lib/swift -source $TOP_DIR/lib/ceilometer -source $TOP_DIR/lib/heat source $TOP_DIR/lib/neutron -source $TOP_DIR/lib/baremetal source $TOP_DIR/lib/ldap +source $TOP_DIR/lib/dstat +source $TOP_DIR/lib/atop +source $TOP_DIR/lib/tcpdump +source $TOP_DIR/lib/etcd3 +source $TOP_DIR/lib/os-vif # Extras Source # -------------- # Phase: source -if [[ -d $TOP_DIR/extras.d ]]; then - for i in $TOP_DIR/extras.d/*.sh; do - [[ -r $i ]] && source $i source - done -fi +run_phase source -# Set the destination directories for other OpenStack projects -OPENSTACKCLIENT_DIR=$DEST/python-openstackclient # Interactive Configuration # ------------------------- @@ -374,7 +636,8 @@ OPENSTACKCLIENT_DIR=$DEST/python-openstackclient # Generic helper to configure passwords function read_password { - XTRACE=$(set +o | grep xtrace) + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace var=$1; msg=$2 pw=${!var} @@ -382,7 +645,7 @@ function read_password { if [[ -f $RC_DIR/localrc ]]; then localrc=$TOP_DIR/localrc else - localrc=$TOP_DIR/.localrc.auto + localrc=$TOP_DIR/.localrc.password fi # If the password is not defined yet, proceed to prompt user for a password. @@ -392,13 +655,15 @@ function read_password { touch $localrc fi - # Presumably if we got this far it can only be that our localrc is missing - # the required password. Prompt user for a password and write to localrc. + # Presumably if we got this far it can only be that our + # localrc is missing the required password. Prompt user for a + # password and write to localrc. + echo '' echo '################################################################################' echo $msg echo '################################################################################' - echo "This value will be written to your localrc file so you don't have to enter it " + echo "This value will be written to ${localrc} file so you don't have to enter it " echo "again. Use only alphanumeric characters." echo "If you leave this blank, a random default value will be used." pw=" " @@ -410,18 +675,21 @@ function read_password { echo "Invalid chars in password. Try again:" done if [ ! $pw ]; then - pw=$(cat /dev/urandom | tr -cd 'a-f0-9' | head -c 20) + pw=$(generate_hex_string 10) fi eval "$var=$pw" echo "$var=$pw" >> $localrc fi - $XTRACE + + # restore previous xtrace value + $xtrace } # Database Configuration +# ---------------------- -# To select between database backends, add the following to ``localrc``: +# To select between database backends, add the following to ``local.conf``: # # disable_service mysql # enable_service postgresql @@ -429,221 +697,69 @@ function read_password { # The available database backends are listed in ``DATABASE_BACKENDS`` after # ``lib/database`` is sourced. ``mysql`` is the default. -initialize_database_backends && echo "Using $DATABASE_TYPE database backend" || echo "No database enabled" +if initialize_database_backends; then + echo "Using $DATABASE_TYPE database backend" + # Last chance for the database password. This must be handled here + # because read_password is not a library function. + read_password DATABASE_PASSWORD "ENTER A PASSWORD TO USE FOR THE DATABASE." + + define_database_baseurl +else + echo "No database enabled" +fi # Queue Configuration +# ------------------- # Rabbit connection info +# In multi node DevStack, second node needs ``RABBIT_USERID``, but rabbit +# isn't enabled. if is_service_enabled rabbit; then - RABBIT_HOST=${RABBIT_HOST:-$SERVICE_HOST} read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT." fi # Keystone +# -------- -if is_service_enabled key; then - # The ``SERVICE_TOKEN`` is used to bootstrap the Keystone database. It is - # just a string and is not a 'real' Keystone token. - read_password SERVICE_TOKEN "ENTER A SERVICE_TOKEN TO USE FOR THE SERVICE ADMIN TOKEN." +if is_service_enabled keystone; then # Services authenticate to Identity with servicename/``SERVICE_PASSWORD`` read_password SERVICE_PASSWORD "ENTER A SERVICE_PASSWORD TO USE FOR THE SERVICE AUTHENTICATION." # Horizon currently truncates usernames and passwords at 20 characters read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (20 CHARS OR LESS)." # Keystone can now optionally install OpenLDAP by enabling the ``ldap`` - # service in ``localrc`` (e.g. ``enable_service ldap``). + # service in ``local.conf`` (e.g. ``enable_service ldap``). # To clean out the Keystone contents in OpenLDAP set ``KEYSTONE_CLEAR_LDAP`` - # to ``yes`` (e.g. ``KEYSTONE_CLEAR_LDAP=yes``) in ``localrc``. To enable the + # to ``yes`` (e.g. ``KEYSTONE_CLEAR_LDAP=yes``) in ``local.conf``. To enable the # Keystone Identity Driver (``keystone.identity.backends.ldap.Identity``) - # set ``KEYSTONE_IDENTITY_BACKEND`` to ``ldap`` (e.g. - # ``KEYSTONE_IDENTITY_BACKEND=ldap``) in ``localrc``. - - # only request ldap password if the service is enabled - if is_service_enabled ldap; then - read_password LDAP_PASSWORD "ENTER A PASSWORD TO USE FOR LDAP" - fi -fi - - -# Swift - -if is_service_enabled s-proxy; then - # We only ask for Swift Hash if we have enabled swift service. - # ``SWIFT_HASH`` is a random unique string for a swift cluster that - # can never change. - read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH." -fi - - -# Configure logging -# ----------------- - -# Draw a spinner so the user knows something is happening -function spinner { - local delay=0.75 - local spinstr='/-\|' - printf "..." >&3 - while [ true ]; do - local temp=${spinstr#?} - printf "[%c]" "$spinstr" >&3 - local spinstr=$temp${spinstr%"$temp"} - sleep $delay - printf "\b\b\b" >&3 - done -} - -function kill_spinner { - if [ ! -z "$LAST_SPINNER_PID" ]; then - kill >/dev/null 2>&1 $LAST_SPINNER_PID - printf "\b\b\bdone\n" >&3 - fi -} - -# Echo text to the log file, summary log file and stdout -# echo_summary "something to say" -function echo_summary { - if [[ -t 3 && "$VERBOSE" != "True" ]]; then - kill_spinner - echo -n -e $@ >&6 - spinner & - LAST_SPINNER_PID=$! - else - echo -e $@ >&6 - fi -} - -# Echo text only to stdout, no log files -# echo_nolog "something not for the logs" -function echo_nolog { - echo $@ >&3 -} - -# Set up logging for ``stack.sh`` -# Set ``LOGFILE`` to turn on logging -# Append '.xxxxxxxx' to the given name to maintain history -# where 'xxxxxxxx' is a representation of the date the file was created -TIMESTAMP_FORMAT=${TIMESTAMP_FORMAT:-"%F-%H%M%S"} -if [[ -n "$LOGFILE" || -n "$SCREEN_LOGDIR" ]]; then - LOGDAYS=${LOGDAYS:-7} - CURRENT_LOG_TIME=$(date "+$TIMESTAMP_FORMAT") -fi - -if [[ -n "$LOGFILE" ]]; then - # First clean up old log files. Use the user-specified ``LOGFILE`` - # as the template to search for, appending '.*' to match the date - # we added on earlier runs. - LOGDIR=$(dirname "$LOGFILE") - LOGFILENAME=$(basename "$LOGFILE") - mkdir -p $LOGDIR - find $LOGDIR -maxdepth 1 -name $LOGFILENAME.\* -mtime +$LOGDAYS -exec rm {} \; - LOGFILE=$LOGFILE.${CURRENT_LOG_TIME} - SUMFILE=$LOGFILE.${CURRENT_LOG_TIME}.summary - - # Redirect output according to config - - # Set fd 3 to a copy of stdout. So we can set fd 1 without losing - # stdout later. - exec 3>&1 - if [[ "$VERBOSE" == "True" ]]; then - # Set fd 1 and 2 to write the log file - exec 1> >( awk -v logfile=${LOGFILE} ' - /((set \+o$)|xtrace)/ { next } - { - cmd ="date +\"%Y-%m-%d %H:%M:%S.%3N | \"" - cmd | getline now - close("date +\"%Y-%m-%d %H:%M:%S.%3N | \"") - sub(/^/, now) - print > logfile - fflush(logfile) - print - fflush("") - }' ) 2>&1 - # Set fd 6 to summary log file - exec 6> >( tee "${SUMFILE}" ) - else - # Set fd 1 and 2 to primary logfile - exec 1> "${LOGFILE}" 2>&1 - # Set fd 6 to summary logfile and stdout - exec 6> >( tee "${SUMFILE}" >&3 ) - fi - - echo_summary "stack.sh log $LOGFILE" - # Specified logfile name always links to the most recent log - ln -sf $LOGFILE $LOGDIR/$LOGFILENAME - ln -sf $SUMFILE $LOGDIR/$LOGFILENAME.summary -else - # Set up output redirection without log files - # Set fd 3 to a copy of stdout. So we can set fd 1 without losing - # stdout later. - exec 3>&1 - if [[ "$VERBOSE" != "True" ]]; then - # Throw away stdout and stderr - exec 1>/dev/null 2>&1 - fi - # Always send summary fd to original stdout - exec 6>&3 -fi - -# Set up logging of screen windows -# Set ``SCREEN_LOGDIR`` to turn on logging of screen windows to the -# directory specified in ``SCREEN_LOGDIR``, we will log to the the file -# ``screen-$SERVICE_NAME-$TIMESTAMP.log`` in that dir and have a link -# ``screen-$SERVICE_NAME.log`` to the latest log file. -# Logs are kept for as long specified in ``LOGDAYS``. -if [[ -n "$SCREEN_LOGDIR" ]]; then - - # We make sure the directory is created. - if [[ -d "$SCREEN_LOGDIR" ]]; then - # We cleanup the old logs - find $SCREEN_LOGDIR -maxdepth 1 -name screen-\*.log -mtime +$LOGDAYS -exec rm {} \; - else - mkdir -p $SCREEN_LOGDIR - fi -fi - - -# Set Up Script Execution -# ----------------------- - -# Kill background processes on exit -trap exit_trap EXIT -function exit_trap { - local r=$? - jobs=$(jobs -p) - # Only do the kill when we're logging through a process substitution, - # which currently is only to verbose logfile - if [[ -n $jobs && -n "$LOGFILE" && "$VERBOSE" == "True" ]]; then - echo "exit_trap: cleaning up child processes" - kill 2>&1 $jobs - fi - - # Kill the last spinner process - kill_spinner - - exit $r -} - -# Exit on any errors so that errors don't compound -trap err_trap ERR -function err_trap { - local r=$? - set +o xtrace - if [[ -n "$LOGFILE" ]]; then - echo "${0##*/} failed: full log in $LOGFILE" - else - echo "${0##*/} failed" + # set ``KEYSTONE_IDENTITY_BACKEND`` to ``ldap`` (e.g. + # ``KEYSTONE_IDENTITY_BACKEND=ldap``) in ``local.conf``. + + # Only request LDAP password if the service is enabled + if is_service_enabled ldap; then + read_password LDAP_PASSWORD "ENTER A PASSWORD TO USE FOR LDAP" fi - exit $r -} +fi -set -o errexit +# Swift +# ----- -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following along as the install occurs. -set -o xtrace +if is_service_enabled s-proxy; then + # We only ask for Swift Hash if we have enabled swift service. + # ``SWIFT_HASH`` is a random unique string for a swift cluster that + # can never change. + read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH." + + if [[ -z "$SWIFT_TEMPURL_KEY" ]] && [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]]; then + read_password SWIFT_TEMPURL_KEY "ENTER A KEY FOR SWIFT TEMPURLS." + fi +fi + +# Save configuration values +save_stackenv $LINENO # Install Packages @@ -651,59 +767,137 @@ set -o xtrace # OpenStack uses a fair number of other projects. +# Bring down global requirements before any use of pip_install. This is +# necessary to ensure that the constraints file is in place before we +# attempt to apply any constraints to pip installs. +# We always need the master branch in addition to any stable branch, so +# override GIT_DEPTH here. +GIT_DEPTH=0 git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH + # Install package requirements # Source it so the entire environment is available echo_summary "Installing package prerequisites" source $TOP_DIR/tools/install_prereqs.sh -# Configure an appropriate python environment +# Configure an appropriate Python environment. +# +# NOTE(ianw) 2021-08-11 : We install the latest pip here because pip +# is very active and changes are not generally reflected in the LTS +# distros. This often involves important things like dependency or +# conflict resolution, and has often been required because the +# complicated constraints etc. used by openstack have tickled bugs in +# distro versions of pip. We want to find these problems as they +# happen, rather than years later when we try to update our LTS +# distro. Whilst it is clear that global installations of upstream +# pip are less and less common, with virtualenv's being the general +# approach now; there are a lot of devstack plugins that assume a +# global install environment. if [[ "$OFFLINE" != "True" ]]; then - $TOP_DIR/tools/install_pip.sh + PYPI_ALTERNATIVE_URL=${PYPI_ALTERNATIVE_URL:-""} $TOP_DIR/tools/install_pip.sh +fi + +# Do the ugly hacks for broken packages and distros +source $TOP_DIR/tools/fixup_stuff.sh +fixup_all + +if [[ "$GLOBAL_VENV" == "True" ]] ; then + # TODO(frickler): find a better solution for this + sudo ln -sf /opt/stack/data/venv/bin/cinder-manage /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/cinder-rtstool /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/glance /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/nova-manage /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/openstack /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/privsep-helper /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/rally /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/tox /usr/local/bin + + setup_devstack_virtualenv +fi + +# Install subunit for the subunit output stream +pip_install -U os-testr + +# the default rate limit of 1000 messages / 30 seconds is not +# sufficient given how verbose our logging is. +iniset -sudo /etc/systemd/journald.conf "Journal" "RateLimitBurst" "0" +sudo systemctl restart systemd-journald + +# Virtual Environment +# ------------------- + +# Install required infra support libraries +install_infra + +# Install bindep +$VIRTUALENV_CMD $DEST/bindep-venv +# TODO(ianw) : optionally install from zuul checkout? +$DEST/bindep-venv/bin/pip install bindep +export BINDEP_CMD=${DEST}/bindep-venv/bin/bindep + +# Install packages as defined in plugin bindep.txt files +pkgs="$( _get_plugin_bindep_packages )" +if [[ -n "${pkgs}" ]]; then + install_package ${pkgs} fi -# Do the ugly hacks for borken packages and distros -$TOP_DIR/tools/fixup_stuff.sh +# Extras Pre-install +# ------------------ +# Phase: pre-install +run_phase stack pre-install + +# NOTE(danms): Set global limits before installing anything +set_systemd_override DefaultLimitNOFILE ${ULIMIT_NOFILE} install_rpc_backend +restart_rpc_backend if is_service_enabled $DATABASE_BACKENDS; then install_database fi +if [ -n "$DATABASE_TYPE" ]; then + install_database_python +fi if is_service_enabled neutron; then install_neutron_agent_packages fi -TRACK_DEPENDS=${TRACK_DEPENDS:-False} +if is_service_enabled etcd3; then + install_etcd3 +fi -# Install python packages into a virtualenv so that we can track them -if [[ $TRACK_DEPENDS = True ]]; then - echo_summary "Installing Python packages into a virtualenv $DEST/.venv" - pip_install -U virtualenv +# Setup TLS certs +# --------------- - rm -rf $DEST/.venv - virtualenv --system-site-packages $DEST/.venv - source $DEST/.venv/bin/activate - $DEST/.venv/bin/pip freeze > $DEST/requires-pre-pip +# Do this early, before any webservers are set up to ensure +# we don't run into problems with missing certs when apache +# is restarted. +if is_service_enabled tls-proxy; then + configure_CA + init_CA + init_cert fi +# Dstat +# ----- + +# Install dstat services prerequisites +install_dstat + + # Check Out and Install Source # ---------------------------- echo_summary "Installing OpenStack project source" -# Install required infra support libraries -install_infra - -# Install oslo libraries that have graduated -install_oslo +# Install additional libraries +install_libs -# Install stackforge libraries for testing -if is_service_enabled stackforge_libs; then - install_stackforge -fi +# Install uwsgi +install_apache_uwsgi -# Install clients libraries +# Install client libraries +install_keystoneauth install_keystoneclient install_glanceclient install_cinderclient @@ -714,103 +908,104 @@ fi if is_service_enabled neutron nova horizon; then install_neutronclient fi -if is_service_enabled heat horizon; then - install_heatclient -fi -git_clone $OPENSTACKCLIENT_REPO $OPENSTACKCLIENT_DIR $OPENSTACKCLIENT_BRANCH -setup_develop $OPENSTACKCLIENT_DIR +# Install middleware +install_keystonemiddleware -if is_service_enabled key; then - install_keystone - configure_keystone +if is_service_enabled keystone; then + if [ "$KEYSTONE_SERVICE_HOST" == "$SERVICE_HOST" ]; then + stack_install_service keystone + configure_keystone + fi fi -if is_service_enabled s-proxy; then - install_swift +if is_service_enabled swift; then + if is_service_enabled ceilometer; then + install_ceilometermiddleware + fi + stack_install_service swift configure_swift - # swift3 middleware to provide S3 emulation to Swift - if is_service_enabled swift3; then - # replace the nova-objectstore port by the swift port + # s3api middleware to provide S3 emulation to Swift + if is_service_enabled s3api; then + # Replace the nova-objectstore port by the swift port S3_SERVICE_PORT=8080 - git_clone $SWIFT3_REPO $SWIFT3_DIR $SWIFT3_BRANCH - setup_develop $SWIFT3_DIR fi fi if is_service_enabled g-api n-api; then - # image catalog service - install_glance + # Image catalog service + stack_install_service glance configure_glance fi if is_service_enabled cinder; then - install_cinder + # Block volume service + stack_install_service cinder configure_cinder fi if is_service_enabled neutron; then - install_neutron - install_neutron_third_party + # Network service + stack_install_service neutron fi if is_service_enabled nova; then - # compute service - install_nova - cleanup_nova + # Compute service + stack_install_service nova configure_nova fi -if is_service_enabled horizon; then - # dashboard - install_horizon - configure_horizon +if is_service_enabled placement; then + # placement api + stack_install_service placement + configure_placement fi -if is_service_enabled ceilometer; then - install_ceilometerclient - install_ceilometer - echo_summary "Configuring Ceilometer" - configure_ceilometer - configure_ceilometerclient +# create a placement-client fake service to know we need to configure +# placement connectivity. We configure the placement service for nova +# if placement-api or placement-client is active, and n-cpu on the +# same box. +if is_service_enabled placement placement-client; then + if is_service_enabled n-cpu || is_service_enabled n-sch; then + configure_placement_nova_compute + fi fi -if is_service_enabled heat; then - install_heat - cleanup_heat - configure_heat +if is_service_enabled horizon; then + # dashboard + stack_install_service horizon fi if is_service_enabled tls-proxy; then - configure_CA - init_CA - init_cert - # Add name to /etc/hosts - # don't be naive and add to existing line! + fix_system_ca_bundle_path fi +if is_service_enabled cinder || [[ "$USE_CINDER_FOR_GLANCE" == "True" ]]; then + # os-brick setup required by glance, cinder, and nova + init_os_brick +fi # Extras Install # -------------- # Phase: install -if [[ -d $TOP_DIR/extras.d ]]; then - for i in $TOP_DIR/extras.d/*.sh; do - [[ -r $i ]] && source $i stack install - done -fi +run_phase stack install -if [[ $TRACK_DEPENDS = True ]]; then - $DEST/.venv/bin/pip freeze > $DEST/requires-post-pip - if ! diff -Nru $DEST/requires-pre-pip $DEST/requires-post-pip > $DEST/requires.diff; then - echo "Detect some changes for installed packages of pip, in depend tracking mode" - cat $DEST/requires.diff +# Install the OpenStack client, needed for most setup commands +if use_library_from_git "python-openstackclient"; then + git_clone_by_name "python-openstackclient" + setup_dev_lib "python-openstackclient" +else + pip_install_gr python-openstackclient + if is_service_enabled openstack-cli-server; then + install_openstack_cli_server fi - echo "Ran stack.sh in depend tracking mode, bailing out now" - exit 0 fi +# Installs alias for osc so that we can collect timing for all +# osc commands. Alias dies with stack.sh. +install_oscwrap # Syslog # ------ @@ -818,17 +1013,15 @@ fi if [[ $SYSLOG != "False" ]]; then if [[ "$SYSLOG_HOST" = "$HOST_IP" ]]; then # Configure the master host to receive - cat </tmp/90-stack-m.conf + cat </dev/null \$ModLoad imrelp \$InputRELPServerRun $SYSLOG_PORT EOF - sudo mv /tmp/90-stack-m.conf /etc/rsyslog.d else # Set rsyslog to send to remote host - cat </tmp/90-stack-s.conf + cat </dev/null *.* :omrelp:$SYSLOG_HOST:$SYSLOG_PORT EOF - sudo mv /tmp/90-stack-s.conf /etc/rsyslog.d fi RSYSLOGCONF="/etc/rsyslog.conf" @@ -851,13 +1044,8 @@ EOF fi -# Finalize queue installation -# ---------------------------- -restart_rpc_backend - - -# Export Certicate Authority Bundle -# --------------------------------- +# Export Certificate Authority Bundle +# ----------------------------------- # If certificates were used and written to the SSL bundle file then these # should be exported so clients can validate their connections. @@ -874,115 +1062,96 @@ if is_service_enabled $DATABASE_BACKENDS; then configure_database fi +# Save configuration values +save_stackenv $LINENO -# Configure screen -# ---------------- -USE_SCREEN=$(trueorfalse True $USE_SCREEN) -if [[ "$USE_SCREEN" == "True" ]]; then - # Create a new named screen to run processes in - screen -d -m -S $SCREEN_NAME -t shell -s /bin/bash - sleep 1 +# Start Services +# ============== - # Set a reasonable status bar - if [ -z "$SCREEN_HARDSTATUS" ]; then - SCREEN_HARDSTATUS='%{= .} %-Lw%{= .}%> %n%f %t*%{= .}%+Lw%< %-=%{g}(%{d}%H/%l%{g})' - fi - screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS" - screen -r $SCREEN_NAME -X setenv PROMPT_COMMAND /bin/true -fi +# Dstat +# ----- + +# A better kind of sysstat, with the top process per time slice +start_dstat -# Clear screen rc file -SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc -if [[ -e $SCREENRC ]]; then - rm -f $SCREENRC +if is_service_enabled atop; then + configure_atop + install_atop + start_atop fi -# Initialize the directory for service status check -init_service_check +# Run a background tcpdump for debugging +# Note: must set TCPDUMP_ARGS with the enabled service +if is_service_enabled tcpdump; then + start_tcpdump +fi -# Dstat -# ------- +# Etcd +# ----- -# A better kind of sysstat, with the top process per time slice -DSTAT_OPTS="-tcmndrylp --top-cpu-adv" -if [[ -n ${SCREEN_LOGDIR} ]]; then - screen_it dstat "cd $TOP_DIR; dstat $DSTAT_OPTS | tee $SCREEN_LOGDIR/$DSTAT_FILE" -else - screen_it dstat "dstat $DSTAT_OPTS" +# etcd is a distributed key value store that provides a reliable way to store data across a cluster of machines +if is_service_enabled etcd3; then + start_etcd3 fi -# Start Services -# ============== - # Keystone # -------- -if is_service_enabled key; then - echo_summary "Starting Keystone" - init_keystone - start_keystone +if is_service_enabled tls-proxy; then + start_tls_proxy http-services '*' 443 $SERVICE_HOST 80 +fi - # Set up a temporary admin URI for Keystone - SERVICE_ENDPOINT=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0 +# Write a clouds.yaml file and use the devstack-admin cloud +write_clouds_yaml +export OS_CLOUD=${OS_CLOUD:-devstack-admin} - if is_service_enabled tls-proxy; then - export OS_CACERT=$INT_CA_DIR/ca-chain.pem - # Until the client support is fixed, just use the internal endpoint - SERVICE_ENDPOINT=http://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT_INT/v2.0 - fi +if is_service_enabled keystone; then + echo_summary "Starting Keystone" - # Setup OpenStackclient token-flow auth - export OS_TOKEN=$SERVICE_TOKEN - export OS_URL=$SERVICE_ENDPOINT + if [ "$KEYSTONE_SERVICE_HOST" == "$SERVICE_HOST" ]; then + init_keystone + start_keystone + bootstrap_keystone + fi create_keystone_accounts - create_nova_accounts - create_glance_accounts - create_cinder_accounts - create_neutron_accounts - - if is_service_enabled ceilometer; then - create_ceilometer_accounts + if is_service_enabled nova; then + async_runfunc create_nova_accounts fi - - if is_service_enabled swift; then - create_swift_accounts + if is_service_enabled glance; then + async_runfunc create_glance_accounts fi - - if is_service_enabled heat; then - create_heat_accounts + if is_service_enabled cinder; then + async_runfunc create_cinder_accounts + fi + if is_service_enabled neutron; then + async_runfunc create_neutron_accounts + fi + if is_service_enabled swift; then + async_runfunc create_swift_accounts fi - # Begone token-flow auth - unset OS_TOKEN OS_URL - - # Set up password-flow auth creds now that keystone is bootstrapped - export OS_AUTH_URL=$SERVICE_ENDPOINT - export OS_TENANT_NAME=admin - export OS_USERNAME=admin - export OS_PASSWORD=$ADMIN_PASSWORD fi - # Horizon # ------- -# Set up the django horizon application to serve via apache/wsgi - if is_service_enabled horizon; then - echo_summary "Configuring and starting Horizon" - init_horizon - start_horizon + echo_summary "Configuring Horizon" + async_runfunc configure_horizon fi +async_wait create_nova_accounts create_glance_accounts create_cinder_accounts +async_wait create_neutron_accounts create_swift_accounts configure_horizon # Glance # ------ -if is_service_enabled g-reg; then +# NOTE(yoctozepto): limited to node hosting the database which is the controller +if is_service_enabled $DATABASE_BACKENDS && is_service_enabled glance; then echo_summary "Configuring Glance" - init_glance + async_runfunc init_glance fi @@ -993,26 +1162,20 @@ if is_service_enabled neutron; then echo_summary "Configuring Neutron" configure_neutron - # Run init_neutron only on the node hosting the neutron API server - if is_service_enabled $DATABASE_BACKENDS && is_service_enabled q-svc; then - init_neutron - fi -fi -# Some Neutron plugins require network controllers which are not -# a part of the OpenStack project. Configure and start them. -if is_service_enabled neutron; then - configure_neutron_third_party - init_neutron_third_party - start_neutron_third_party + # Run init_neutron only on the node hosting the Neutron API server + if is_service_enabled $DATABASE_BACKENDS && is_service_enabled neutron; then + async_runfunc init_neutron + fi fi # Nova # ---- -if is_service_enabled n-net q-dhcp; then - # Delete traces of nova networks from prior runs +if is_service_enabled q-dhcp; then + # TODO(frickler): These are remnants from n-net, check which parts are really + # still needed for Neutron. # Do not kill any dnsmasq instance spawned by NetworkManager netman_pid=$(pidof NetworkManager || true) if [ -z "$netman_pid" ]; then @@ -1023,23 +1186,22 @@ if is_service_enabled n-net q-dhcp; then clean_iptables - if is_service_enabled n-net; then - rm -rf ${NOVA_STATE_PATH}/networks - sudo mkdir -p ${NOVA_STATE_PATH}/networks - safe_chown -R ${USER} ${NOVA_STATE_PATH}/networks - fi - # Force IP forwarding on, just in case sudo sysctl -w net.ipv4.ip_forward=1 fi +# os-vif +# ------ +if is_service_enabled nova neutron; then + configure_os_vif +fi # Storage Service # --------------- -if is_service_enabled s-proxy; then +if is_service_enabled swift; then echo_summary "Configuring Swift" - init_swift + async_runfunc init_swift fi @@ -1048,9 +1210,23 @@ fi if is_service_enabled cinder; then echo_summary "Configuring Cinder" - init_cinder + async_runfunc init_cinder fi +# Placement Service +# --------------- + +if is_service_enabled placement; then + echo_summary "Configuring placement" + async_runfunc init_placement +fi + +# Wait for neutron and placement before starting nova +async_wait init_neutron +async_wait init_placement +async_wait init_glance +async_wait init_swift +async_wait init_cinder # Compute Service # --------------- @@ -1059,21 +1235,7 @@ if is_service_enabled nova; then echo_summary "Configuring Nova" init_nova - # Additional Nova configuration that is dependent on other services - if is_service_enabled neutron; then - create_nova_conf_neutron - elif is_service_enabled n-net; then - create_nova_conf_nova_network - fi - - init_nova_cells -fi - -# Extra things to prepare nova for baremetal, before nova starts -if is_service_enabled nova && is_baremetal; then - echo_summary "Preparing for nova baremetal" - prepare_baremetal_toolchain - configure_baremetal_nova_dirs + async_runfunc configure_neutron_nova fi @@ -1081,17 +1243,13 @@ fi # ==================== # Phase: post-config -if [[ -d $TOP_DIR/extras.d ]]; then - for i in $TOP_DIR/extras.d/*.sh; do - [[ -r $i ]] && source $i stack post-config - done -fi +run_phase stack post-config # Local Configuration # =================== -# Apply configuration from local.conf if it exists for layer 2 services +# Apply configuration from ``local.conf`` if it exists for layer 2 services # Phase: post-config merge_config_group $TOP_DIR/local.conf post-config @@ -1102,78 +1260,28 @@ merge_config_group $TOP_DIR/local.conf post-config # Only run the services specified in ``ENABLED_SERVICES`` # Launch Swift Services -if is_service_enabled s-proxy; then +if is_service_enabled swift; then echo_summary "Starting Swift" start_swift fi -# Launch the Glance services -if is_service_enabled glance; then - echo_summary "Starting Glance" - start_glance -fi - -# Install Images -# ============== - -# Upload an image to glance. -# -# The default image is cirros, a small testing image which lets you login as **root** -# cirros has a ``cloud-init`` analog supporting login via keypair and sending -# scripts as userdata. -# See https://help.ubuntu.com/community/CloudInit for more on cloud-init -# -# Override ``IMAGE_URLS`` with a comma-separated list of UEC images. -# * **precise**: http://uec-images.ubuntu.com/precise/current/precise-server-cloudimg-amd64.tar.gz - -if is_service_enabled g-reg; then - TOKEN=$(keystone token-get | grep ' id ' | get_field 2) - die_if_not_set $LINENO TOKEN "Keystone fail to get token" - - if is_baremetal; then - echo_summary "Creating and uploading baremetal images" - - # build and upload separate deploy kernel & ramdisk - upload_baremetal_deploy $TOKEN - - # upload images, separating out the kernel & ramdisk for PXE boot - for image_url in ${IMAGE_URLS//,/ }; do - upload_baremetal_image $image_url $TOKEN - done - else - echo_summary "Uploading images" - - # Option to upload legacy ami-tty, which works with xenserver - if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then - IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}https://github.com/downloads/citrix-openstack/warehouse/tty.tgz" - fi - - for image_url in ${IMAGE_URLS//,/ }; do - upload_image $image_url $TOKEN - done - fi +# NOTE(lyarwood): By default use a single hardcoded fixed_key across devstack +# deployments. This ensures the keys match across nova and cinder across all +# hosts. +FIXED_KEY=${FIXED_KEY:-bae3516cc1c0eb18b05440eba8012a4a880a2ee04d584a9c1579445e675b12defdc716ec} +if is_service_enabled cinder; then + iniset $CINDER_CONF key_manager fixed_key "$FIXED_KEY" fi -# Create an access key and secret key for nova ec2 register image -if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nova; then - eval $(openstack ec2 credentials create --user nova --project $SERVICE_TENANT_NAME -f shell -c access -c secret) - iniset $NOVA_CONF DEFAULT s3_access_key "$access" - iniset $NOVA_CONF DEFAULT s3_secret_key "$secret" - iniset $NOVA_CONF DEFAULT s3_affix_tenant "True" -fi +async_wait configure_neutron_nova -# Create a randomized default value for the keymgr's fixed_key +# NOTE(clarkb): This must come after async_wait configure_neutron_nova because +# configure_neutron_nova modifies $NOVA_CONF and $NOVA_CPU_CONF as well. If +# we don't wait then these two ini updates race either other and can result +# in unexpected configs. if is_service_enabled nova; then - FIXED_KEY="" - for i in $(seq 1 64); do - FIXED_KEY+=$(echo "obase=16; $(($RANDOM % 16))" | bc); - done; - iniset $NOVA_CONF keymgr fixed_key "$FIXED_KEY" -fi - -if is_service_enabled zeromq; then - echo_summary "Starting zermomq receiver" - screen_it zeromq "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-rpc-zmq-receiver" + iniset $NOVA_CONF key_manager fixed_key "$FIXED_KEY" + iniset $NOVA_CPU_CONF key_manager fixed_key "$FIXED_KEY" fi # Launch the nova-api and wait for it to answer before continuing @@ -1182,56 +1290,91 @@ if is_service_enabled n-api; then start_nova_api fi -if is_service_enabled q-svc; then +if is_service_enabled ovn-controller ovn-controller-vtep; then + echo_summary "Starting OVN services" + start_ovn_services +fi + +if is_service_enabled q-svc neutron-api; then echo_summary "Starting Neutron" + configure_neutron_after_post_config start_neutron_service_and_check - check_neutron_third_party_integration -elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then - NM_CONF=${NOVA_CONF} - if is_service_enabled n-cell; then - NM_CONF=${NOVA_CELLS_CONF} - fi - - # Create a small network - $NOVA_BIN_DIR/nova-manage --config-file $NM_CONF network create "$PRIVATE_NETWORK_NAME" $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS - - # Create some floating ips - $NOVA_BIN_DIR/nova-manage --config-file $NM_CONF floating create $FLOATING_RANGE --pool=$PUBLIC_NETWORK_NAME +fi - # Create a second pool - $NOVA_BIN_DIR/nova-manage --config-file $NM_CONF floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL +# Start placement before any of the service that are likely to want +# to use it to manage resource providers. +if is_service_enabled placement; then + echo_summary "Starting Placement" + start_placement fi if is_service_enabled neutron; then - start_neutron_agents + start_neutron fi # Once neutron agents are started setup initial network elements -if is_service_enabled q-svc; then +if is_service_enabled q-svc neutron-api && [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]]; then echo_summary "Creating initial neutron network elements" - create_neutron_initial_network - setup_neutron_debug + # Here's where plugins can wire up their own networks instead + # of the code in lib/neutron_plugins/services/l3 + if type -p neutron_plugin_create_initial_networks > /dev/null; then + neutron_plugin_create_initial_networks + else + create_neutron_initial_network + fi + fi + if is_service_enabled nova; then echo_summary "Starting Nova" start_nova + async_runfunc create_flavors fi if is_service_enabled cinder; then echo_summary "Starting Cinder" start_cinder + create_volume_types +fi + +# This sleep is required for cinder volume service to become active and +# publish capabilities to cinder scheduler before creating the image-volume +if [[ "$USE_CINDER_FOR_GLANCE" == "True" ]]; then + sleep 30 +fi + +# Launch the Glance services +# NOTE (abhishekk): We need to start glance api service only after cinder +# service has started as on glance startup glance-api queries cinder for +# validating volume_type configured for cinder store of glance. +if is_service_enabled glance; then + echo_summary "Starting Glance" + start_glance fi -if is_service_enabled ceilometer; then - echo_summary "Starting Ceilometer" - init_ceilometer - start_ceilometer + +# Install Images +# ============== + +# Upload an image to Glance. +# +# The default image is CirrOS, a small testing image which lets you login as **root** +# CirrOS has a ``cloud-init`` analog supporting login via keypair and sending +# scripts as userdata. +# See https://help.ubuntu.com/community/CloudInit for more on ``cloud-init`` + +# NOTE(yoctozepto): limited to node hosting the database which is the controller +if is_service_enabled $DATABASE_BACKENDS && is_service_enabled glance; then + echo_summary "Uploading images" + + for image_url in ${IMAGE_URLS//,/ }; do + upload_image $image_url + done fi -# Configure and launch heat engine, api and metadata -if is_service_enabled heat; then - # Initialize heat - echo_summary "Configuring Heat" - init_heat - echo_summary "Starting Heat" - start_heat +async_wait create_flavors + +if is_service_enabled horizon; then + echo_summary "Starting Horizon" + init_horizon + start_horizon fi @@ -1242,8 +1385,8 @@ fi # This step also creates certificates for tenants and users, # which is helpful in image bundle steps. -if is_service_enabled nova && is_service_enabled key; then - USERRC_PARAMS="-PA --target-dir $TOP_DIR/accrc" +if is_service_enabled nova && is_service_enabled keystone; then + USERRC_PARAMS="-PA --target-dir $TOP_DIR/accrc --os-password $ADMIN_PASSWORD" if [ -f $SSL_BUNDLE_FILE ]; then USERRC_PARAMS="$USERRC_PARAMS --os-cacert $SSL_BUNDLE_FILE" @@ -1253,69 +1396,73 @@ if is_service_enabled nova && is_service_enabled key; then fi -# If we are running nova with baremetal driver, there are a few -# last-mile configuration bits to attend to, which must happen -# after n-api and n-sch have started. -# Also, creating the baremetal flavor must happen after images -# are loaded into glance, though just knowing the IDs is sufficient here -if is_service_enabled nova && is_baremetal; then - # create special flavor for baremetal if we know what images to associate - [[ -n "$BM_DEPLOY_KERNEL_ID" ]] && [[ -n "$BM_DEPLOY_RAMDISK_ID" ]] && \ - create_baremetal_flavor $BM_DEPLOY_KERNEL_ID $BM_DEPLOY_RAMDISK_ID - - # otherwise user can manually add it later by calling nova-baremetal-manage - [[ -n "$BM_FIRST_MAC" ]] && add_baremetal_node - - if [[ "$BM_DNSMASQ_FROM_NOVA_NETWORK" = "False" ]]; then - # NOTE: we do this here to ensure that our copy of dnsmasq is running - sudo pkill dnsmasq || true - sudo dnsmasq --conf-file= --port=0 --enable-tftp --tftp-root=/tftpboot \ - --dhcp-boot=pxelinux.0 --bind-interfaces --pid-file=/var/run/dnsmasq.pid \ - --interface=$BM_DNSMASQ_IFACE --dhcp-range=$BM_DNSMASQ_RANGE \ - ${BM_DNSMASQ_DNS:+--dhcp-option=option:dns-server,$BM_DNSMASQ_DNS} - fi - # ensure callback daemon is running - sudo pkill nova-baremetal-deploy-helper || true - screen_it baremetal "cd ; nova-baremetal-deploy-helper" -fi - # Save some values we generated for later use -CURRENT_RUN_TIME=$(date "+$TIMESTAMP_FORMAT") -echo "# $CURRENT_RUN_TIME" >$TOP_DIR/.stackenv -for i in BASE_SQL_CONN ENABLED_SERVICES HOST_IP LOGFILE \ - SERVICE_HOST SERVICE_PROTOCOL STACK_USER TLS_IP KEYSTONE_AUTH_PROTOCOL OS_CACERT; do - echo $i=${!i} >>$TOP_DIR/.stackenv -done +save_stackenv -# Local Configuration -# =================== +# Wrapup configuration +# ==================== + +# local.conf extra +# ---------------- -# Apply configuration from local.conf if it exists for layer 2 services +# Apply configuration from ``local.conf`` if it exists for layer 2 services # Phase: extra merge_config_group $TOP_DIR/local.conf extra # Run extras -# ========== +# ---------- # Phase: extra -if [[ -d $TOP_DIR/extras.d ]]; then - for i in $TOP_DIR/extras.d/*.sh; do - [[ -r $i ]] && source $i stack extra - done -fi +run_phase stack extra -# Local Configuration -# =================== -# Apply configuration from local.conf if it exists for layer 2 services +# local.conf post-extra +# --------------------- + +# Apply late configuration from ``local.conf`` if it exists for layer 2 services # Phase: post-extra merge_config_group $TOP_DIR/local.conf post-extra +# Sanity checks +# ============= + +# Check that computes are all ready +# +# TODO(sdague): there should be some generic phase here. +if is_service_enabled n-cpu; then + is_nova_ready +fi + +# Check the status of running services +service_check + +# Configure nova cellsv2 +# ---------------------- + +# Do this late because it requires compute hosts to have started +if is_service_enabled n-api; then + if is_service_enabled n-cpu; then + $TOP_DIR/tools/discover_hosts.sh + else + # Some CI systems like Hyper-V build the control plane on + # Linux, and join in non Linux Computes after setup. This + # allows them to delay the processing until after their whole + # environment is up. + echo_summary "SKIPPING Cell setup because n-cpu is not enabled. You will have to do this manually before you have a working environment." + fi + # Run the nova-status upgrade check command which can also be used + # to verify the base install. Note that this is good enough in a + # single node deployment, but in a multi-node setup it won't verify + # any subnodes - that would have to be driven from whatever tooling + # is deploying the subnodes, e.g. the zuul v3 devstack-multinode job. + $NOVA_BIN_DIR/nova-status --config-file $NOVA_CONF upgrade check +fi + # Run local script -# ================ +# ---------------- # Run ``local.sh`` if it exists to perform user-managed tasks if [[ -x $TOP_DIR/local.sh ]]; then @@ -1323,9 +1470,34 @@ if [[ -x $TOP_DIR/local.sh ]]; then $TOP_DIR/local.sh fi -# Check the status of running services -service_check +# Bash completion +# =============== + +# Prepare bash completion for OSC +# Note we use "command" to avoid the timing wrapper +# which isn't relevant here and floods logs +command openstack complete \ + | sudo tee /etc/bash_completion.d/osc.bash_completion > /dev/null +# If cinder is configured, set global_filter for PV devices +if is_service_enabled cinder; then + if is_ubuntu; then + echo_summary "Configuring lvm.conf global device filter" + set_lvm_filter + else + echo_summary "Skip setting lvm filters for non Ubuntu systems" + fi +fi + +# Run test-config +# --------------- + +# Phase: test-config +run_phase stack test-config + +# Apply late configuration from ``local.conf`` if it exists for layer 2 services +# Phase: test-config +merge_config_group $TOP_DIR/local.conf test-config # Fin # === @@ -1341,142 +1513,71 @@ else exec 1>&3 fi +# Make sure we didn't leak any background tasks +async_cleanup + +# Dump out the time totals +time_totals +async_print_timing + +if is_service_enabled mysql; then + if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" && "$MYSQL_HOST" ]]; then + echo "" + echo "" + echo "Post-stack database query stats:" + mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST stats -e \ + 'SELECT * FROM queries' -t 2>/dev/null + mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST stats -e \ + 'DELETE FROM queries' 2>/dev/null + fi +fi + # Using the cloud -# --------------- +# =============== echo "" echo "" echo "" +echo "This is your host IP address: $HOST_IP" +if [ "$HOST_IPV6" != "" ]; then + echo "This is your host IPv6 address: $HOST_IPV6" +fi # If you installed Horizon on this server you should be able # to access the site using your browser. if is_service_enabled horizon; then - echo "Horizon is now available at http://$SERVICE_HOST/" + echo "Horizon is now available at http://$SERVICE_HOST$HORIZON_APACHE_ROOT" fi # If Keystone is present you can point ``nova`` cli to this server -if is_service_enabled key; then - echo "Keystone is serving at $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/" - echo "Examples on using novaclient command line is in exercise.sh" +if is_service_enabled keystone; then + echo "Keystone is serving at $KEYSTONE_SERVICE_URI/" echo "The default users are: admin and demo" echo "The password: $ADMIN_PASSWORD" fi -# Echo ``HOST_IP`` - useful for ``build_uec.sh``, which uses dhcp to give the instance an address -echo "This is your host ip: $HOST_IP" - # Warn that a deprecated feature was used if [[ -n "$DEPRECATED_TEXT" ]]; then - echo_summary "WARNING: $DEPRECATED_TEXT" -fi - -# TODO(dtroyer): Remove EXTRA_OPTS after stable/icehouse branch is cut -# Specific warning for deprecated configs -if [[ -n "$EXTRA_OPTS" ]]; then - echo "" - echo_summary "WARNING: EXTRA_OPTS is used" - echo "You are using EXTRA_OPTS to pass configuration into nova.conf." - echo "Please convert that configuration in localrc to a nova.conf section in local.conf:" - echo "EXTRA_OPTS will be removed early in the Juno development cycle" - echo " -[[post-config|\$NOVA_CONF]] -[DEFAULT] -" - for I in "${EXTRA_OPTS[@]}"; do - # Replace the first '=' with ' ' for iniset syntax - echo ${I} - done -fi - -# TODO(dtroyer): Remove EXTRA_BAREMETAL_OPTS after stable/icehouse branch is cut -if [[ -n "$EXTRA_BAREMETAL_OPTS" ]]; then - echo "" - echo_summary "WARNING: EXTRA_BAREMETAL_OPTS is used" - echo "You are using EXTRA_BAREMETAL_OPTS to pass configuration into nova.conf." - echo "Please convert that configuration in localrc to a nova.conf section in local.conf:" - echo "EXTRA_BAREMETAL_OPTS will be removed early in the Juno development cycle" - echo " -[[post-config|\$NOVA_CONF]] -[baremetal] -" - for I in "${EXTRA_BAREMETAL_OPTS[@]}"; do - # Replace the first '=' with ' ' for iniset syntax - echo ${I} - done -fi - -# TODO(dtroyer): Remove Q_AGENT_EXTRA_AGENT_OPTS after stable/juno branch is cut -if [[ -n "$Q_AGENT_EXTRA_AGENT_OPTS" ]]; then - echo "" - echo_summary "WARNING: Q_AGENT_EXTRA_AGENT_OPTS is used" - echo "You are using Q_AGENT_EXTRA_AGENT_OPTS to pass configuration into $NEUTRON_CONF." - echo "Please convert that configuration in localrc to a $NEUTRON_CONF section in local.conf:" - echo "Q_AGENT_EXTRA_AGENT_OPTS will be removed early in the 'K' development cycle" - echo " -[[post-config|/\$Q_PLUGIN_CONF_FILE]] -[DEFAULT] -" - for I in "${Q_AGENT_EXTRA_AGENT_OPTS[@]}"; do - # Replace the first '=' with ' ' for iniset syntax - echo ${I} - done + echo + echo -e "WARNING: $DEPRECATED_TEXT" + echo fi -# TODO(dtroyer): Remove Q_AGENT_EXTRA_SRV_OPTS after stable/juno branch is cut -if [[ -n "$Q_AGENT_EXTRA_SRV_OPTS" ]]; then - echo "" - echo_summary "WARNING: Q_AGENT_EXTRA_SRV_OPTS is used" - echo "You are using Q_AGENT_EXTRA_SRV_OPTS to pass configuration into $NEUTRON_CONF." - echo "Please convert that configuration in localrc to a $NEUTRON_CONF section in local.conf:" - echo "Q_AGENT_EXTRA_AGENT_OPTS will be removed early in the 'K' development cycle" - echo " -[[post-config|/\$Q_PLUGIN_CONF_FILE]] -[DEFAULT] -" - for I in "${Q_AGENT_EXTRA_SRV_OPTS[@]}"; do - # Replace the first '=' with ' ' for iniset syntax - echo ${I} - done -fi - -# TODO(dtroyer): Remove Q_DHCP_EXTRA_DEFAULT_OPTS after stable/icehouse branch is cut -if [[ -n "$Q_DHCP_EXTRA_DEFAULT_OPTS" ]]; then - echo "" - echo_summary "WARNING: Q_DHCP_EXTRA_DEFAULT_OPTS is used" - echo "You are using Q_DHCP_EXTRA_DEFAULT_OPTS to pass configuration into $Q_DHCP_CONF_FILE." - echo "Please convert that configuration in localrc to a $Q_DHCP_CONF_FILE section in local.conf:" - echo "Q_DHCP_EXTRA_DEFAULT_OPTS will be removed early in the Juno development cycle" - echo " -[[post-config|/\$Q_DHCP_CONF_FILE]] -[DEFAULT] -" - for I in "${Q_DHCP_EXTRA_DEFAULT_OPTS[@]}"; do - # Replace the first '=' with ' ' for iniset syntax - echo ${I} - done -fi +echo +echo "Services are running under systemd unit files." +echo "For more information see: " +echo "https://docs.openstack.org/devstack/latest/systemd.html" +echo -# TODO(dtroyer): Remove Q_SRV_EXTRA_DEFAULT_OPTS after stable/icehouse branch is cut -if [[ -n "$Q_SRV_EXTRA_DEFAULT_OPTS" ]]; then - echo "" - echo_summary "WARNING: Q_SRV_EXTRA_DEFAULT_OPTS is used" - echo "You are using Q_SRV_EXTRA_DEFAULT_OPTS to pass configuration into $NEUTRON_CONF." - echo "Please convert that configuration in localrc to a $NEUTRON_CONF section in local.conf:" - echo "Q_SRV_EXTRA_DEFAULT_OPTS will be removed early in the Juno development cycle" - echo " -[[post-config|\$NEUTRON_CONF]] -[DEFAULT] -" - for I in "${Q_SRV_EXTRA_DEFAULT_OPTS[@]}"; do - # Replace the first '=' with ' ' for iniset syntax - echo ${I} - done -fi +# Useful info on current state +cat /etc/devstack-version +echo # Indicate how long this took to run (bash maintained variable ``SECONDS``) echo_summary "stack.sh completed in $SECONDS seconds." + # Restore/close logging file descriptors exec 1>&3 exec 2>&3 diff --git a/stackrc b/stackrc index 6dea983ace..93f8b1cd6d 100644 --- a/stackrc +++ b/stackrc @@ -1,8 +1,30 @@ +#!/bin/bash +# # stackrc # + +# ensure we don't re-source this in the same environment +[[ -z "$_DEVSTACK_STACKRC" ]] || return 0 +declare -r -g _DEVSTACK_STACKRC=1 + # Find the other rc files RC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) +# Source required DevStack functions and globals +source $RC_DIR/functions + +# Set the target branch. This is used so that stable branching +# does not need to update each repo below. +TARGET_BRANCH=master + +# Cycle trailing projects need to branch later than the others. +TRAILING_TARGET_BRANCH=master + +# And some repos do not create stable branches, so this is used +# to make it explicit and avoid accidentally setting to a stable +# branch. +BRANCHLESS_TARGET_BRANCH=master + # Destination path for installation DEST=/opt/stack @@ -12,6 +34,9 @@ DATA_DIR=${DEST}/data # Destination for status files SERVICE_DIR=${DEST}/status +# Path for subunit output file +SUBUNIT_OUTPUT=${DEST}/devstack.subunit + # Determine stack user if [[ $EUID -eq 0 ]]; then STACK_USER=stack @@ -19,52 +44,52 @@ else STACK_USER=$(whoami) fi +# Specify region name Region +REGION_NAME=${REGION_NAME:-RegionOne} + +# Specify name of region where identity service endpoint is registered. +# When deploying multiple DevStack instances in different regions with shared +# Keystone, set KEYSTONE_REGION_NAME to the region where Keystone is running +# for DevStack instances which do not host Keystone. +KEYSTONE_REGION_NAME=${KEYSTONE_REGION_NAME:-$REGION_NAME} + # Specify which services to launch. These generally correspond to # screen tabs. To change the default list, use the ``enable_service`` and # ``disable_service`` functions in ``local.conf``. -# For example, to enable Swift add this to ``local.conf``: +# For example, to enable Swift as part of DevStack add the following +# settings in ``local.conf``: +# [[local|localrc]] # enable_service s-proxy s-object s-container s-account -# In order to enable Neutron (a single node setup) add the following -# settings in `` localrc``: -# disable_service n-net -# enable_service q-svc -# enable_service q-agt -# enable_service q-dhcp -# enable_service q-l3 -# enable_service q-meta -# # Optional, to enable tempest configuration as part of devstack -# enable_service tempest - -# core compute (glance / keystone / nova (+ nova-network)) -ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,n-sch,n-novnc,n-xvnc,n-cauth -# cinder -ENABLED_SERVICES+=,c-sch,c-api,c-vol -# heat -ENABLED_SERVICES+=,h-eng,h-api,h-api-cfn,h-api-cw -# dashboard -ENABLED_SERVICES+=,horizon -# additional services -ENABLED_SERVICES+=,rabbit,tempest,mysql - - -# Tell Tempest which services are available. The default is set here as -# Tempest falls late in the configuration sequence. This differs from -# ``ENABLED_SERVICES`` in that the project names are used here rather than -# the service names, i.e.: TEMPEST_SERVICES="key,glance,nova" -TEMPEST_SERVICES="" - -# Set the default Nova APIs to enable -NOVA_ENABLED_APIS=ec2,osapi_compute,metadata +# This allows us to pass ``ENABLED_SERVICES`` +if ! isset ENABLED_SERVICES ; then + # Keystone - nothing works without keystone + ENABLED_SERVICES=key + # Nova - services to support libvirt based openstack clouds + ENABLED_SERVICES+=,n-api,n-cpu,n-cond,n-sch,n-novnc,n-api-meta + # Placement service needed for Nova + ENABLED_SERVICES+=,placement-api,placement-client + # Glance services needed for Nova + ENABLED_SERVICES+=,g-api + # Cinder + ENABLED_SERVICES+=,c-sch,c-api,c-vol + # OVN + ENABLED_SERVICES+=,ovn-controller,ovn-northd,ovs-vswitchd,ovsdb-server + # Neutron + ENABLED_SERVICES+=,q-svc,q-ovn-agent + # Dashboard + ENABLED_SERVICES+=,horizon + # Additional services + ENABLED_SERVICES+=,rabbit,tempest,mysql,etcd3,dstat +fi -# Configure Identity API version: 2.0, 3 -IDENTITY_API_VERSION=2.0 +# Global toggle for enabling services under mod_wsgi. If this is set to +# ``True`` all services that use HTTPD + mod_wsgi as the preferred method of +# deployment, will be deployed under Apache. If this is set to ``False`` all +# services will rely on the local toggle variable. +ENABLE_HTTPD_MOD_WSGI_SERVICES=True -# Whether to use 'dev mode' for screen windows. Dev mode works by -# stuffing text into the screen windows so that a developer can use -# ctrl-c, up-arrow, enter to restart the service. Starting services -# this way is slightly unreliable, and a bit slower, so this can -# be disabled for automated testing by setting this value to False. -USE_SCREEN=True +# Set the default Nova APIs to enable +NOVA_ENABLED_APIS=osapi_compute,metadata # allow local overrides of env variables, including repo config if [[ -f $RC_DIR/localrc ]]; then @@ -75,9 +100,112 @@ elif [[ -f $RC_DIR/.localrc.auto ]]; then source $RC_DIR/.localrc.auto fi +# CELLSV2_SETUP - how we should configure services with cells v2 +# +# - superconductor - this is one conductor for the api services, and +# one per cell managing the compute services. This is preferred +# - singleconductor - this is one conductor for the whole deployment, +# this is not recommended, and will be removed in the future. +CELLSV2_SETUP=${CELLSV2_SETUP:-"superconductor"} + +# Set the root URL for Horizon +HORIZON_APACHE_ROOT="/dashboard" + +# Whether to use user specific units for running services or global ones. +USER_UNITS=$(trueorfalse False USER_UNITS) +if [[ "$USER_UNITS" == "True" ]]; then + SYSTEMD_DIR="$HOME/.local/share/systemd/user" + SYSTEMCTL="systemctl --user" +else + SYSTEMD_DIR="/etc/systemd/system" + SYSTEMCTL="sudo systemctl" +fi + +# Passwords generated by interactive devstack runs +if [[ -r $RC_DIR/.localrc.password ]]; then + source $RC_DIR/.localrc.password +fi + +# Adding the specific version of Python 3 to this variable will install +# the app using that version of the interpreter instead of just 3. +_DEFAULT_PYTHON3_VERSION="$(_get_python_version python3)" +export PYTHON3_VERSION=${PYTHON3_VERSION:-${_DEFAULT_PYTHON3_VERSION:-3}} + +# Create a virtualenv with this +# Use the built-in venv to avoid more dependencies +export VIRTUALENV_CMD="python3 -m venv" + +# Default for log coloring is based on interactive-or-not. +# Baseline assumption is that non-interactive invocations are for CI, +# where logs are to be presented as browsable text files; hence color +# codes should be omitted. +# Simply override LOG_COLOR if your environment is different. +if [ -t 1 ]; then + _LOG_COLOR_DEFAULT=True +else + _LOG_COLOR_DEFAULT=False +fi + +# Use color for logging output (only available if syslog is not used) +LOG_COLOR=$(trueorfalse $_LOG_COLOR_DEFAULT LOG_COLOR) + +# Make tracing more educational +if [[ "$LOG_COLOR" == "True" ]]; then + # tput requires TERM or -T. If neither is present, use vt100, a + # no-frills least common denominator supported everywhere. + TPUT_T= + if ! [ $TERM ]; then + TPUT_T='-T vt100' + fi + export PS4='+\[$(tput '$TPUT_T' setaf 242)\]$(short_source)\[$(tput '$TPUT_T' sgr0)\] ' +else + export PS4='+ $(short_source): ' +fi + +# Global option for enforcing scope. If enabled, ENFORCE_SCOPE overrides +# each services ${SERVICE}_ENFORCE_SCOPE variables +ENFORCE_SCOPE=$(trueorfalse False ENFORCE_SCOPE) + +# Devstack supports the use of a global virtualenv. These variables enable +# and disable this functionality as well as set the path to the virtualenv. +# Note that the DATA_DIR is selected because grenade testing uses a shared +# DATA_DIR but different DEST dirs and we don't want two sets of venvs, +# instead we want one global set. +DEVSTACK_VENV=${DEVSTACK_VENV:-$DATA_DIR/venv} + +# NOTE(kopecmartin): remove this once this is fixed +# https://bugs.launchpad.net/devstack/+bug/2031639 +# This couldn't go to fixup_stuff as that's called after projects +# (e.g. certain paths) are set taking GLOBAL_VENV into account +if [[ "$os_VENDOR" =~ (CentOSStream|Rocky) ]]; then + GLOBAL_VENV=$(trueorfalse False GLOBAL_VENV) +else + GLOBAL_VENV=$(trueorfalse True GLOBAL_VENV) +fi + +# Enable use of Python virtual environments. Individual project use of +# venvs are controlled by the PROJECT_VENV array; every project with +# an entry in the array will be installed into the named venv. +# By default this will put each project into its own venv. +USE_VENV=$(trueorfalse False USE_VENV) + +# Add packages that need to be installed into a venv but are not in any +# requirements files here, in a comma-separated list. +# Currently only used when USE_VENV is true (individual project venvs) +ADDITIONAL_VENV_PACKAGES=${ADDITIONAL_VENV_PACKAGES:-""} + # This can be used to turn database query logging on and off # (currently only implemented for MySQL backend) -DATABASE_QUERY_LOGGING=$(trueorfalse True $DATABASE_QUERY_LOGGING) +DATABASE_QUERY_LOGGING=$(trueorfalse False DATABASE_QUERY_LOGGING) + +# This can be used to turn on various non-default items in the +# performance_schema that are of interest to us +MYSQL_GATHER_PERFORMANCE=$(trueorfalse True MYSQL_GATHER_PERFORMANCE) + +# This can be used to reduce the amount of memory mysqld uses while running. +# These are unscientifically determined, and could reduce performance or +# cause other issues. +MYSQL_REDUCE_MEMORY=$(trueorfalse True MYSQL_REDUCE_MEMORY) # Set a timeout for git operations. If git is still running when the # timeout expires, the command will be retried up to 3 times. This is @@ -90,206 +218,431 @@ DATABASE_QUERY_LOGGING=$(trueorfalse True $DATABASE_QUERY_LOGGING) # Zero disables timeouts GIT_TIMEOUT=${GIT_TIMEOUT:-0} +# How should we be handling WSGI deployments. By default we're going +# to allow for 2 modes, which is "uwsgi" which runs with an apache +# proxy uwsgi in front of it, or "mod_wsgi", which runs in +# apache. mod_wsgi is deprecated, don't use it. +WSGI_MODE=${WSGI_MODE:-"uwsgi"} +if [[ "$WSGI_MODE" != "uwsgi" ]]; then + die $LINENO "$WSGI_MODE is no longer a supported WSGI mode. Only uwsgi is valid." +fi + # Repositories # ------------ # Base GIT Repo URL -# Another option is http://review.openstack.org/p -GIT_BASE=${GIT_BASE:-git://git.openstack.org} +GIT_BASE=${GIT_BASE:-https://opendev.org} -# metering service -CEILOMETER_REPO=${CEILOMETER_REPO:-${GIT_BASE}/openstack/ceilometer.git} -CEILOMETER_BRANCH=${CEILOMETER_BRANCH:-master} +# The location of REQUIREMENTS once cloned +REQUIREMENTS_DIR=${REQUIREMENTS_DIR:-$DEST/requirements} -# ceilometer client library -CEILOMETERCLIENT_REPO=${CEILOMETERCLIENT_REPO:-${GIT_BASE}/openstack/python-ceilometerclient.git} -CEILOMETERCLIENT_BRANCH=${CEILOMETERCLIENT_BRANCH:-master} +# Which libraries should we install from git instead of using released +# versions on pypi? +# +# By default DevStack is now installing libraries from pypi instead of +# from git repositories by default. This works great if you are +# developing server components, but if you want to develop libraries +# and see them live in DevStack you need to tell DevStack it should +# install them from git. +# +# ex: LIBS_FROM_GIT=python-keystoneclient,oslo.config +# +# Will install those 2 libraries from git, the rest from pypi. +# +# Setting the variable to 'ALL' will activate the download for all +# libraries. -# volume service -CINDER_REPO=${CINDER_REPO:-${GIT_BASE}/openstack/cinder.git} -CINDER_BRANCH=${CINDER_BRANCH:-master} +DEVSTACK_SERIES="2026.1" -# volume client -CINDERCLIENT_REPO=${CINDERCLIENT_REPO:-${GIT_BASE}/openstack/python-cinderclient.git} -CINDERCLIENT_BRANCH=${CINDERCLIENT_BRANCH:-master} +############## +# +# OpenStack Server Components +# +############## + +# block storage service +CINDER_REPO=${CINDER_REPO:-${GIT_BASE}/openstack/cinder.git} +CINDER_BRANCH=${CINDER_BRANCH:-$TARGET_BRANCH} # image catalog service GLANCE_REPO=${GLANCE_REPO:-${GIT_BASE}/openstack/glance.git} -GLANCE_BRANCH=${GLANCE_BRANCH:-master} +GLANCE_BRANCH=${GLANCE_BRANCH:-$TARGET_BRANCH} -# python glance client library -GLANCECLIENT_REPO=${GLANCECLIENT_REPO:-${GIT_BASE}/openstack/python-glanceclient.git} -GLANCECLIENT_BRANCH=${GLANCECLIENT_BRANCH:-master} +# django powered web control panel for openstack +HORIZON_REPO=${HORIZON_REPO:-${GIT_BASE}/openstack/horizon.git} +HORIZON_BRANCH=${HORIZON_BRANCH:-$TARGET_BRANCH} + +# unified auth system (manages accounts/tokens) +KEYSTONE_REPO=${KEYSTONE_REPO:-${GIT_BASE}/openstack/keystone.git} +KEYSTONE_BRANCH=${KEYSTONE_BRANCH:-$TARGET_BRANCH} -# heat service -HEAT_REPO=${HEAT_REPO:-${GIT_BASE}/openstack/heat.git} -HEAT_BRANCH=${HEAT_BRANCH:-master} +# neutron service +NEUTRON_REPO=${NEUTRON_REPO:-${GIT_BASE}/openstack/neutron.git} +NEUTRON_BRANCH=${NEUTRON_BRANCH:-$TARGET_BRANCH} -# python heat client library -HEATCLIENT_REPO=${HEATCLIENT_REPO:-${GIT_BASE}/openstack/python-heatclient.git} -HEATCLIENT_BRANCH=${HEATCLIENT_BRANCH:-master} +# compute service +NOVA_REPO=${NOVA_REPO:-${GIT_BASE}/openstack/nova.git} +NOVA_BRANCH=${NOVA_BRANCH:-$TARGET_BRANCH} -# django powered web control panel for openstack -HORIZON_REPO=${HORIZON_REPO:-${GIT_BASE}/openstack/horizon.git} -HORIZON_BRANCH=${HORIZON_BRANCH:-master} +# object storage service +SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git} +SWIFT_BRANCH=${SWIFT_BRANCH:-$TARGET_BRANCH} + +# placement service +PLACEMENT_REPO=${PLACEMENT_REPO:-${GIT_BASE}/openstack/placement.git} +PLACEMENT_BRANCH=${PLACEMENT_BRANCH:-$TARGET_BRANCH} + +############## +# +# Testing Components +# +############## + +# consolidated openstack requirements +REQUIREMENTS_REPO=${REQUIREMENTS_REPO:-${GIT_BASE}/openstack/requirements.git} +REQUIREMENTS_BRANCH=${REQUIREMENTS_BRANCH:-$TARGET_BRANCH} + +# Tempest test suite +TEMPEST_REPO=${TEMPEST_REPO:-${GIT_BASE}/openstack/tempest.git} +TEMPEST_BRANCH=${TEMPEST_BRANCH:-$BRANCHLESS_TARGET_BRANCH} +TEMPEST_VENV_UPPER_CONSTRAINTS=${TEMPEST_VENV_UPPER_CONSTRAINTS:-master} + +OSTESTIMAGES_REPO=${OSTESTIMAGES_REPO:-${GIT_BASE}/openstack/os-test-images.git} +OSTESTIMAGES_BRANCH=${OSTESTIMAGES_BRANCH:-$BRANCHLESS_TARGET_BRANCH} +OSTESTIMAGES_DIR=${DEST}/os-test-images -# baremetal provisionint service -IRONIC_REPO=${IRONIC_REPO:-${GIT_BASE}/openstack/ironic.git} -IRONIC_BRANCH=${IRONIC_BRANCH:-master} +############## +# +# OpenStack Client Library Components +# Note default install is from pip, see LIBS_FROM_GIT +# +############## + +# volume client +GITREPO["python-cinderclient"]=${CINDERCLIENT_REPO:-${GIT_BASE}/openstack/python-cinderclient.git} +GITBRANCH["python-cinderclient"]=${CINDERCLIENT_BRANCH:-$TARGET_BRANCH} + +# os-brick client for local volume attachement +GITREPO["python-brick-cinderclient-ext"]=${BRICK_CINDERCLIENT_REPO:-${GIT_BASE}/openstack/python-brick-cinderclient-ext.git} +GITBRANCH["python-brick-cinderclient-ext"]=${BRICK_CINDERCLIENT_BRANCH:-$TARGET_BRANCH} + +# python barbican client library +GITREPO["python-barbicanclient"]=${BARBICANCLIENT_REPO:-${GIT_BASE}/openstack/python-barbicanclient.git} +GITBRANCH["python-barbicanclient"]=${BARBICANCLIENT_BRANCH:-$TARGET_BRANCH} +GITDIR["python-barbicanclient"]=$DEST/python-barbicanclient + +# python glance client library +GITREPO["python-glanceclient"]=${GLANCECLIENT_REPO:-${GIT_BASE}/openstack/python-glanceclient.git} +GITBRANCH["python-glanceclient"]=${GLANCECLIENT_BRANCH:-$TARGET_BRANCH} # ironic client -IRONICCLIENT_REPO=${IRONICCLIENT_REPO:-${GIT_BASE}/openstack/python-ironicclient.git} -IRONICCLIENT_BRANCH=${IRONICCLIENT_BRANCH:-master} +GITREPO["python-ironicclient"]=${IRONICCLIENT_REPO:-${GIT_BASE}/openstack/python-ironicclient.git} +GITBRANCH["python-ironicclient"]=${IRONICCLIENT_BRANCH:-$TARGET_BRANCH} +# ironic plugin is out of tree, but nova uses it. set GITDIR here. +GITDIR["python-ironicclient"]=$DEST/python-ironicclient -# unified auth system (manages accounts/tokens) -KEYSTONE_REPO=${KEYSTONE_REPO:-${GIT_BASE}/openstack/keystone.git} -KEYSTONE_BRANCH=${KEYSTONE_BRANCH:-master} +# the base authentication plugins that clients use to authenticate +GITREPO["keystoneauth"]=${KEYSTONEAUTH_REPO:-${GIT_BASE}/openstack/keystoneauth.git} +GITBRANCH["keystoneauth"]=${KEYSTONEAUTH_BRANCH:-$TARGET_BRANCH} # python keystone client library to nova that horizon uses -KEYSTONECLIENT_REPO=${KEYSTONECLIENT_REPO:-${GIT_BASE}/openstack/python-keystoneclient.git} -KEYSTONECLIENT_BRANCH=${KEYSTONECLIENT_BRANCH:-master} +GITREPO["python-keystoneclient"]=${KEYSTONECLIENT_REPO:-${GIT_BASE}/openstack/python-keystoneclient.git} +GITBRANCH["python-keystoneclient"]=${KEYSTONECLIENT_BRANCH:-$TARGET_BRANCH} -# compute service -NOVA_REPO=${NOVA_REPO:-${GIT_BASE}/openstack/nova.git} -NOVA_BRANCH=${NOVA_BRANCH:-master} +# neutron client +GITREPO["python-neutronclient"]=${NEUTRONCLIENT_REPO:-${GIT_BASE}/openstack/python-neutronclient.git} +GITBRANCH["python-neutronclient"]=${NEUTRONCLIENT_BRANCH:-$TARGET_BRANCH} # python client library to nova that horizon (and others) use -NOVACLIENT_REPO=${NOVACLIENT_REPO:-${GIT_BASE}/openstack/python-novaclient.git} -NOVACLIENT_BRANCH=${NOVACLIENT_BRANCH:-master} +GITREPO["python-novaclient"]=${NOVACLIENT_REPO:-${GIT_BASE}/openstack/python-novaclient.git} +GITBRANCH["python-novaclient"]=${NOVACLIENT_BRANCH:-$TARGET_BRANCH} + +# python swift client library +GITREPO["python-swiftclient"]=${SWIFTCLIENT_REPO:-${GIT_BASE}/openstack/python-swiftclient.git} +GITBRANCH["python-swiftclient"]=${SWIFTCLIENT_BRANCH:-$TARGET_BRANCH} # consolidated openstack python client -OPENSTACKCLIENT_REPO=${OPENSTACKCLIENT_REPO:-${GIT_BASE}/openstack/python-openstackclient.git} -OPENSTACKCLIENT_BRANCH=${OPENSTACKCLIENT_BRANCH:-master} +GITREPO["python-openstackclient"]=${OPENSTACKCLIENT_REPO:-${GIT_BASE}/openstack/python-openstackclient.git} +GITBRANCH["python-openstackclient"]=${OPENSTACKCLIENT_BRANCH:-$TARGET_BRANCH} +# this doesn't exist in a lib file, so set it here +GITDIR["python-openstackclient"]=$DEST/python-openstackclient + +# placement-api CLI +GITREPO["osc-placement"]=${OSC_PLACEMENT_REPO:-${GIT_BASE}/openstack/osc-placement.git} +GITBRANCH["osc-placement"]=${OSC_PLACEMENT_BRANCH:-$TARGET_BRANCH} + + +################### +# +# Oslo Libraries +# Note default install is from pip, see LIBS_FROM_GIT +# +################### + +# castellan key manager interface +GITREPO["castellan"]=${CASTELLAN_REPO:-${GIT_BASE}/openstack/castellan.git} +GITBRANCH["castellan"]=${CASTELLAN_BRANCH:-$TARGET_BRANCH} # cliff command line framework -CLIFF_REPO=${CLIFF_REPO:-${GIT_BASE}/openstack/cliff.git} -CLIFF_BRANCH=${CLIFF_BRANCH:-master} +GITREPO["cliff"]=${CLIFF_REPO:-${GIT_BASE}/openstack/cliff.git} +GITBRANCH["cliff"]=${CLIFF_BRANCH:-$TARGET_BRANCH} + +# async framework/helpers +GITREPO["futurist"]=${FUTURIST_REPO:-${GIT_BASE}/openstack/futurist.git} +GITBRANCH["futurist"]=${FUTURIST_BRANCH:-$TARGET_BRANCH} + +# debtcollector deprecation framework/helpers +GITREPO["debtcollector"]=${DEBTCOLLECTOR_REPO:-${GIT_BASE}/openstack/debtcollector.git} +GITBRANCH["debtcollector"]=${DEBTCOLLECTOR_BRANCH:-$TARGET_BRANCH} + +# etcd3gw library +GITREPO["etcd3gw"]=${ETCD3GW_REPO:-${GIT_BASE}/openstack/etcd3gw.git} +GITBRANCH["etcd3gw"]=${ETCD3GW_BRANCH:-$BRANCHLESS_TARGET_BRANCH} + +# helpful state machines +GITREPO["automaton"]=${AUTOMATON_REPO:-${GIT_BASE}/openstack/automaton.git} +GITBRANCH["automaton"]=${AUTOMATON_BRANCH:-$TARGET_BRANCH} + +# oslo.cache +GITREPO["oslo.cache"]=${OSLOCACHE_REPO:-${GIT_BASE}/openstack/oslo.cache.git} +GITBRANCH["oslo.cache"]=${OSLOCACHE_BRANCH:-$TARGET_BRANCH} + +# oslo.concurrency +GITREPO["oslo.concurrency"]=${OSLOCON_REPO:-${GIT_BASE}/openstack/oslo.concurrency.git} +GITBRANCH["oslo.concurrency"]=${OSLOCON_BRANCH:-$TARGET_BRANCH} # oslo.config -OSLOCFG_REPO=${OSLOCFG_REPO:-${GIT_BASE}/openstack/oslo.config.git} -OSLOCFG_BRANCH=${OSLOCFG_BRANCH:-master} +GITREPO["oslo.config"]=${OSLOCFG_REPO:-${GIT_BASE}/openstack/oslo.config.git} +GITBRANCH["oslo.config"]=${OSLOCFG_BRANCH:-$TARGET_BRANCH} + +# oslo.context +GITREPO["oslo.context"]=${OSLOCTX_REPO:-${GIT_BASE}/openstack/oslo.context.git} +GITBRANCH["oslo.context"]=${OSLOCTX_BRANCH:-$TARGET_BRANCH} + +# oslo.db +GITREPO["oslo.db"]=${OSLODB_REPO:-${GIT_BASE}/openstack/oslo.db.git} +GITBRANCH["oslo.db"]=${OSLODB_BRANCH:-$TARGET_BRANCH} + +# oslo.i18n +GITREPO["oslo.i18n"]=${OSLOI18N_REPO:-${GIT_BASE}/openstack/oslo.i18n.git} +GITBRANCH["oslo.i18n"]=${OSLOI18N_BRANCH:-$TARGET_BRANCH} + +# oslo.limit +GITREPO["oslo.limit"]=${OSLOLIMIT_REPO:-${GIT_BASE}/openstack/oslo.limit.git} +GITBRANCH["oslo.limit"]=${OSLOLIMIT_BRANCH:-$TARGET_BRANCH} + +# oslo.log +GITREPO["oslo.log"]=${OSLOLOG_REPO:-${GIT_BASE}/openstack/oslo.log.git} +GITBRANCH["oslo.log"]=${OSLOLOG_BRANCH:-$TARGET_BRANCH} # oslo.messaging -OSLOMSG_REPO=${OSLOMSG_REPO:-${GIT_BASE}/openstack/oslo.messaging.git} -OSLOMSG_BRANCH=${OSLOMSG_BRANCH:-master} +GITREPO["oslo.messaging"]=${OSLOMSG_REPO:-${GIT_BASE}/openstack/oslo.messaging.git} +GITBRANCH["oslo.messaging"]=${OSLOMSG_BRANCH:-$TARGET_BRANCH} -# oslo.rootwrap -OSLORWRAP_REPO=${OSLORWRAP_REPO:-${GIT_BASE}/openstack/oslo.rootwrap.git} -OSLORWRAP_BRANCH=${OSLORWRAP_BRANCH:-master} +# oslo.middleware +GITREPO["oslo.middleware"]=${OSLOMID_REPO:-${GIT_BASE}/openstack/oslo.middleware.git} +GITBRANCH["oslo.middleware"]=${OSLOMID_BRANCH:-$TARGET_BRANCH} -# oslo.vmware -OSLOVMWARE_REPO=${OSLOVMWARE_REPO:-${GIT_BASE}/openstack/oslo.vmware.git} -OSLOVMWARE_BRANCH=${OSLOVMWARE_BRANCH:-master} +# oslo.policy +GITREPO["oslo.policy"]=${OSLOPOLICY_REPO:-${GIT_BASE}/openstack/oslo.policy.git} +GITBRANCH["oslo.policy"]=${OSLOPOLICY_BRANCH:-$TARGET_BRANCH} -# pycadf auditing library -PYCADF_REPO=${PYCADF_REPO:-${GIT_BASE}/openstack/pycadf.git} -PYCADF_BRANCH=${PYCADF_BRANCH:-master} +# oslo.privsep +GITREPO["oslo.privsep"]=${OSLOPRIVSEP_REPO:-${GIT_BASE}/openstack/oslo.privsep.git} +GITBRANCH["oslo.privsep"]=${OSLOPRIVSEP_BRANCH:-$TARGET_BRANCH} -# stevedore plugin manager -STEVEDORE_REPO=${STEVEDORE_REPO:-${GIT_BASE}/openstack/stevedore.git} -STEVEDORE_BRANCH=${STEVEDORE_BRANCH:-master} +# oslo.reports +GITREPO["oslo.reports"]=${OSLOREPORTS_REPO:-${GIT_BASE}/openstack/oslo.reports.git} +GITBRANCH["oslo.reports"]=${OSLOREPORTS_BRANCH:-$TARGET_BRANCH} -# taskflow plugin manager -TASKFLOW_REPO=${TASKFLOW_REPO:-${GIT_BASE}/openstack/taskflow.git} -TASKFLOW_BRANCH=${TASKFLOW_BRANCH:-master} +# oslo.rootwrap +GITREPO["oslo.rootwrap"]=${OSLORWRAP_REPO:-${GIT_BASE}/openstack/oslo.rootwrap.git} +GITBRANCH["oslo.rootwrap"]=${OSLORWRAP_BRANCH:-$TARGET_BRANCH} -# pbr drives the setuptools configs -PBR_REPO=${PBR_REPO:-${GIT_BASE}/openstack-dev/pbr.git} -PBR_BRANCH=${PBR_BRANCH:-master} +# oslo.serialization +GITREPO["oslo.serialization"]=${OSLOSERIALIZATION_REPO:-${GIT_BASE}/openstack/oslo.serialization.git} +GITBRANCH["oslo.serialization"]=${OSLOSERIALIZATION_BRANCH:-$TARGET_BRANCH} -# neutron service -NEUTRON_REPO=${NEUTRON_REPO:-${GIT_BASE}/openstack/neutron.git} -NEUTRON_BRANCH=${NEUTRON_BRANCH:-master} +# oslo.service +GITREPO["oslo.service"]=${OSLOSERVICE_REPO:-${GIT_BASE}/openstack/oslo.service.git} +GITBRANCH["oslo.service"]=${OSLOSERVICE_BRANCH:-$TARGET_BRANCH} -# neutron client -NEUTRONCLIENT_REPO=${NEUTRONCLIENT_REPO:-${GIT_BASE}/openstack/python-neutronclient.git} -NEUTRONCLIENT_BRANCH=${NEUTRONCLIENT_BRANCH:-master} +# oslo.utils +GITREPO["oslo.utils"]=${OSLOUTILS_REPO:-${GIT_BASE}/openstack/oslo.utils.git} +GITBRANCH["oslo.utils"]=${OSLOUTILS_BRANCH:-$TARGET_BRANCH} -# consolidated openstack requirements -REQUIREMENTS_REPO=${REQUIREMENTS_REPO:-${GIT_BASE}/openstack/requirements.git} -REQUIREMENTS_BRANCH=${REQUIREMENTS_BRANCH:-master} +# oslo.versionedobjects +GITREPO["oslo.versionedobjects"]=${OSLOVERSIONEDOBJECTS_REPO:-${GIT_BASE}/openstack/oslo.versionedobjects.git} +GITBRANCH["oslo.versionedobjects"]=${OSLOVERSIONEDOBJECTS_BRANCH:-$TARGET_BRANCH} -# storage service -SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git} -SWIFT_BRANCH=${SWIFT_BRANCH:-master} -SWIFT3_REPO=${SWIFT3_REPO:-${GIT_BASE}/stackforge/swift3.git} -SWIFT3_BRANCH=${SWIFT3_BRANCH:-master} +# oslo.vmware +GITREPO["oslo.vmware"]=${OSLOVMWARE_REPO:-${GIT_BASE}/openstack/oslo.vmware.git} +GITBRANCH["oslo.vmware"]=${OSLOVMWARE_BRANCH:-$TARGET_BRANCH} -# python swift client library -SWIFTCLIENT_REPO=${SWIFTCLIENT_REPO:-${GIT_BASE}/openstack/python-swiftclient.git} -SWIFTCLIENT_BRANCH=${SWIFTCLIENT_BRANCH:-master} +# osprofiler +GITREPO["osprofiler"]=${OSPROFILER_REPO:-${GIT_BASE}/openstack/osprofiler.git} +GITBRANCH["osprofiler"]=${OSPROFILER_BRANCH:-$TARGET_BRANCH} -# Tempest test suite -TEMPEST_REPO=${TEMPEST_REPO:-${GIT_BASE}/openstack/tempest.git} -TEMPEST_BRANCH=${TEMPEST_BRANCH:-master} +# pycadf auditing library +GITREPO["pycadf"]=${PYCADF_REPO:-${GIT_BASE}/openstack/pycadf.git} +GITBRANCH["pycadf"]=${PYCADF_BRANCH:-$TARGET_BRANCH} +# stevedore plugin manager +GITREPO["stevedore"]=${STEVEDORE_REPO:-${GIT_BASE}/openstack/stevedore.git} +GITBRANCH["stevedore"]=${STEVEDORE_BRANCH:-$TARGET_BRANCH} -# diskimage-builder -DIB_REPO=${DIB_REPO:-${GIT_BASE}/openstack/diskimage-builder.git} -DIB_BRANCH=${DIB_BRANCH:-master} +# taskflow plugin manager +GITREPO["taskflow"]=${TASKFLOW_REPO:-${GIT_BASE}/openstack/taskflow.git} +GITBRANCH["taskflow"]=${TASKFLOW_BRANCH:-$TARGET_BRANCH} -# a websockets/html5 or flash powered VNC console for vm instances -NOVNC_REPO=${NOVNC_REPO:-https://github.com/kanaka/noVNC.git} -NOVNC_BRANCH=${NOVNC_BRANCH:-master} +# tooz plugin manager +GITREPO["tooz"]=${TOOZ_REPO:-${GIT_BASE}/openstack/tooz.git} +GITBRANCH["tooz"]=${TOOZ_BRANCH:-$TARGET_BRANCH} -# ryu service -RYU_REPO=${RYU_REPO:-https://github.com/osrg/ryu.git} -RYU_BRANCH=${RYU_BRANCH:-master} +# pbr drives the setuptools configs +GITREPO["pbr"]=${PBR_REPO:-${GIT_BASE}/openstack/pbr.git} +GITBRANCH["pbr"]=${PBR_BRANCH:-$BRANCHLESS_TARGET_BRANCH} -# a websockets/html5 or flash powered SPICE console for vm instances -SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git} -SPICE_BRANCH=${SPICE_BRANCH:-master} -# trove service -TROVE_REPO=${TROVE_REPO:-${GIT_BASE}/openstack/trove.git} -TROVE_BRANCH=${TROVE_BRANCH:-master} +################## +# +# Libraries managed by OpenStack programs (non oslo) +# +################## + +# cursive library +GITREPO["cursive"]=${CURSIVE_REPO:-${GIT_BASE}/openstack/cursive.git} +GITBRANCH["cursive"]=${CURSIVE_BRANCH:-$TARGET_BRANCH} + +# glance store library +GITREPO["glance_store"]=${GLANCE_STORE_REPO:-${GIT_BASE}/openstack/glance_store.git} +GITBRANCH["glance_store"]=${GLANCE_STORE_BRANCH:-$TARGET_BRANCH} + +# keystone middleware +GITREPO["keystonemiddleware"]=${KEYSTONEMIDDLEWARE_REPO:-${GIT_BASE}/openstack/keystonemiddleware.git} +GITBRANCH["keystonemiddleware"]=${KEYSTONEMIDDLEWARE_BRANCH:-$TARGET_BRANCH} + +# ceilometer middleware +GITREPO["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_REPO:-${GIT_BASE}/openstack/ceilometermiddleware.git} +GITBRANCH["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_BRANCH:-$TARGET_BRANCH} +GITDIR["ceilometermiddleware"]=$DEST/ceilometermiddleware + +# openstacksdk OpenStack Python SDK +GITREPO["openstacksdk"]=${OPENSTACKSDK_REPO:-${GIT_BASE}/openstack/openstacksdk.git} +GITBRANCH["openstacksdk"]=${OPENSTACKSDK_BRANCH:-$TARGET_BRANCH} + +# os-brick library to manage local volume attaches +GITREPO["os-brick"]=${OS_BRICK_REPO:-${GIT_BASE}/openstack/os-brick.git} +GITBRANCH["os-brick"]=${OS_BRICK_BRANCH:-$TARGET_BRANCH} + +# os-client-config to manage clouds.yaml and friends +GITREPO["os-client-config"]=${OS_CLIENT_CONFIG_REPO:-${GIT_BASE}/openstack/os-client-config.git} +GITBRANCH["os-client-config"]=${OS_CLIENT_CONFIG_BRANCH:-$TARGET_BRANCH} +GITDIR["os-client-config"]=$DEST/os-client-config + +# os-vif library to communicate between Neutron to Nova +GITREPO["os-vif"]=${OS_VIF_REPO:-${GIT_BASE}/openstack/os-vif.git} +GITBRANCH["os-vif"]=${OS_VIF_BRANCH:-$TARGET_BRANCH} + +# osc-lib OpenStackClient common lib +GITREPO["osc-lib"]=${OSC_LIB_REPO:-${GIT_BASE}/openstack/osc-lib.git} +GITBRANCH["osc-lib"]=${OSC_LIB_BRANCH:-$TARGET_BRANCH} + +# ironic common lib +GITREPO["ironic-lib"]=${IRONIC_LIB_REPO:-${GIT_BASE}/openstack/ironic-lib.git} +GITBRANCH["ironic-lib"]=${IRONIC_LIB_BRANCH:-$TARGET_BRANCH} +# this doesn't exist in a lib file, so set it here +GITDIR["ironic-lib"]=$DEST/ironic-lib + +# diskimage-builder tool +GITREPO["diskimage-builder"]=${DIB_REPO:-${GIT_BASE}/openstack/diskimage-builder.git} +GITBRANCH["diskimage-builder"]=${DIB_BRANCH:-$BRANCHLESS_TARGET_BRANCH} +GITDIR["diskimage-builder"]=$DEST/diskimage-builder + +# neutron-lib library containing neutron stable non-REST interfaces +GITREPO["neutron-lib"]=${NEUTRON_LIB_REPO:-${GIT_BASE}/openstack/neutron-lib.git} +GITBRANCH["neutron-lib"]=${NEUTRON_LIB_BRANCH:-$TARGET_BRANCH} +GITDIR["neutron-lib"]=$DEST/neutron-lib + +# os-resource-classes library containing a list of standardized resource classes for OpenStack +GITREPO["os-resource-classes"]=${OS_RESOURCE_CLASSES_REPO:-${GIT_BASE}/openstack/os-resource-classes.git} +GITBRANCH["os-resource-classes"]=${OS_RESOURCE_CLASSES_BRANCH:-$TARGET_BRANCH} + +# os-traits library for resource provider traits in the placement service +GITREPO["os-traits"]=${OS_TRAITS_REPO:-${GIT_BASE}/openstack/os-traits.git} +GITBRANCH["os-traits"]=${OS_TRAITS_BRANCH:-$TARGET_BRANCH} + +# ovsdbapp used by neutron +GITREPO["ovsdbapp"]=${OVSDBAPP_REPO:-${GIT_BASE}/openstack/ovsdbapp.git} +GITBRANCH["ovsdbapp"]=${OVSDBAPP_BRANCH:-$TARGET_BRANCH} +GITDIR["ovsdbapp"]=$DEST/ovsdbapp + +# os-ken used by neutron +GITREPO["os-ken"]=${OS_KEN_REPO:-${GIT_BASE}/openstack/os-ken.git} +GITBRANCH["os-ken"]=${OS_KEN_BRANCH:-$TARGET_BRANCH} +GITDIR["os-ken"]=$DEST/os-ken + + +################# +# +# 3rd Party Components (non pip installable) +# +# NOTE(sdague): these should be converted to release version installs or removed +# +################# -# trove client library test -TROVECLIENT_REPO=${TROVECLIENT_REPO:-${GIT_BASE}/openstack/python-troveclient.git} -TROVECLIENT_BRANCH=${TROVECLIENT_BRANCH:-master} +# ironic python agent +IRONIC_PYTHON_AGENT_REPO=${IRONIC_PYTHON_AGENT_REPO:-${GIT_BASE}/openstack/ironic-python-agent.git} +IRONIC_PYTHON_AGENT_BRANCH=${IRONIC_PYTHON_AGENT_BRANCH:-$TARGET_BRANCH} -# stackforge libraries that are used by OpenStack core services -# wsme -WSME_REPO=${WSME_REPO:-${GIT_BASE}/stackforge/wsme.git} -WSME_BRANCH=${WSME_BRANCH:-master} +# a websockets/html5 or flash powered VNC console for vm instances +NOVNC_REPO=${NOVNC_REPO:-https://github.com/novnc/novnc.git} +NOVNC_BRANCH=${NOVNC_BRANCH:-v1.3.0} -# pecan -PECAN_REPO=${PECAN_REPO:-${GIT_BASE}/stackforge/pecan.git} -PECAN_BRANCH=${PECAN_BRANCH:-master} +# a websockets/html5 or flash powered SPICE console for vm instances +SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git} +SPICE_BRANCH=${SPICE_BRANCH:-$BRANCHLESS_TARGET_BRANCH} +# Global flag used to configure Tempest and potentially other services if +# volume multiattach is supported. In Queens, only the libvirt compute driver +# and lvm volume driver support multiattach, and qemu must be less than 2.10 +# or libvirt must be greater than or equal to 3.10. +ENABLE_VOLUME_MULTIATTACH=$(trueorfalse False ENABLE_VOLUME_MULTIATTACH) # Nova hypervisor configuration. We default to libvirt with **kvm** but will # drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can -# also install an **LXC**, **OpenVZ** or **XenAPI** based system. If xenserver-core -# is installed, the default will be XenAPI +# also install an **LXC** or **OpenVZ** based system. DEFAULT_VIRT_DRIVER=libvirt -is_package_installed xenserver-core && DEFAULT_VIRT_DRIVER=xenserver VIRT_DRIVER=${VIRT_DRIVER:-$DEFAULT_VIRT_DRIVER} case "$VIRT_DRIVER" in ironic|libvirt) LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm} - if [[ "$os_VENDOR" =~ (Debian) ]]; then + LIBVIRT_CPU_MODE=${LIBVIRT_CPU_MODE:-custom} + LIBVIRT_CPU_MODEL=${LIBVIRT_CPU_MODEL:-Nehalem} + + if [[ -z "$os_VENDOR" ]]; then + GetOSVersion + fi + + if [[ "$os_VENDOR" =~ (Debian|Ubuntu) ]]; then LIBVIRT_GROUP=libvirt else LIBVIRT_GROUP=libvirtd fi ;; + lxd) + LXD_GROUP=${LXD_GROUP:-"lxd"} + ;; + docker|zun) + DOCKER_GROUP=${DOCKER_GROUP:-"docker"} + ;; fake) NUMBER_FAKE_NOVA_COMPUTE=${NUMBER_FAKE_NOVA_COMPUTE:-1} ;; - xenserver) - # Xen config common to nova and neutron - XENAPI_USER=${XENAPI_USER:-"root"} - # This user will be used for dom0 - domU communication - # should be able to log in to dom0 without a password - # will be used to install the plugins - DOMZERO_USER=${DOMZERO_USER:-"domzero"} - ;; *) ;; esac - # Images # ------ @@ -299,73 +652,109 @@ esac # If the file ends in .tar.gz, uncompress the tarball and and select the first # .img file inside it as the image. If present, use "*-vmlinuz*" as the kernel # and "*-initrd*" as the ramdisk -# example: http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-amd64.tar.gz +# example: https://cloud-images.ubuntu.com/releases/jammy/release/ubuntu-22.04-server-cloudimg-amd64.tar.gz # * disk image (*.img,*.img.gz) # if file ends in .img, then it will be uploaded and registered as a to # glance as a disk image. If it ends in .gz, it is uncompressed first. # example: -# http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-armel-disk1.img -# http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-rootfs.img.gz +# https://cloud-images.ubuntu.com/releases/jammy/release/ubuntu-22.04-server-cloudimg-amd64.img +# https://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz # * OpenVZ image: # OpenVZ uses its own format of image, and does not support UEC style images -#IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image -#IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-disk.img" # cirros full disk image +#IMAGE_URLS="https://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image -CIRROS_VERSION=${CIRROS_VERSION:-"0.3.2"} +CIRROS_VERSION=${CIRROS_VERSION:-"0.6.3"} +CIRROS_ARCH=${CIRROS_ARCH:-$(uname -m)} # Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of -# which may be set in ``localrc``. Also allow ``DEFAULT_IMAGE_NAME`` and -# ``IMAGE_URLS`` to be set directly in ``localrc``. -case "$VIRT_DRIVER" in - openvz) - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-12.04-x86_64} - IMAGE_URLS=${IMAGE_URLS:-"http://download.openvz.org/template/precreated/ubuntu-12.04-x86_64.tar.gz"};; - libvirt) - case "$LIBVIRT_TYPE" in - lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-x86_64-rootfs} - IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-rootfs.img.gz"};; - *) # otherwise, use the uec style image (with kernel, ramdisk, disk) - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-x86_64-uec} - IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz"};; - esac - ;; - vsphere) - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.2-i386-disk.vmdk} - IMAGE_URLS=${IMAGE_URLS:-"http://partnerweb.vmware.com/programs/vmdkimage/cirros-0.3.2-i386-disk.vmdk"};; - xenserver) - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.0-x86_64-disk} - IMAGE_URLS=${IMAGE_URLS:-"https://github.com/downloads/citrix-openstack/warehouse/cirros-0.3.0-x86_64-disk.vhd.tgz"};; - *) # Default to Cirros with kernel, ramdisk and disk image - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-x86_64-uec} - IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz"};; -esac - -# Use 64bit fedora image if heat is enabled -if [[ "$ENABLED_SERVICES" =~ 'h-api' ]]; then +# which may be set in ``local.conf``. Also allow ``DEFAULT_IMAGE_NAME`` and +# ``IMAGE_URLS`` to be set in the `localrc` section of ``local.conf``. +DOWNLOAD_DEFAULT_IMAGES=$(trueorfalse True DOWNLOAD_DEFAULT_IMAGES) +if [[ "$DOWNLOAD_DEFAULT_IMAGES" == "True" ]]; then + if [[ -n "$IMAGE_URLS" ]]; then + IMAGE_URLS+="," + fi case "$VIRT_DRIVER" in - libvirt|baremetal|ironic) - HEAT_CFN_IMAGE_URL=${HEAT_CFN_IMAGE_URL:-"https://dl.fedoraproject.org/pub/fedora/linux/releases/20/Images/x86_64/Fedora-x86_64-20-20131211.1-sda.qcow2"} - IMAGE_URLS+=",$HEAT_CFN_IMAGE_URL" - ;; - *) + libvirt) + case "$LIBVIRT_TYPE" in + lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs} + DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz} + IMAGE_URLS+="https://github.com/cirros-dev/cirros/releases/download/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; + *) # otherwise, use the qcow image + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk} + DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img} + IMAGE_URLS+="https://github.com/cirros-dev/cirros/releases/download/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; + esac ;; + vsphere) + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.2-i386-disk.vmdk} + DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-$DEFAULT_IMAGE_NAME} + IMAGE_URLS+="http://partnerweb.vmware.com/programs/vmdkimage/${DEFAULT_IMAGE_FILE_NAME}";; + fake) + # Use the same as the default for libvirt + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk} + DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img} + IMAGE_URLS+="https://github.com/cirros-dev/cirros/releases/download/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; esac + DOWNLOAD_DEFAULT_IMAGES=False fi -# Staging Area for New Images, have them here for at least 24hrs for nodepool -# to cache them otherwise the failure rates in the gate are too high -PRECACHE_IMAGES=$(trueorfalse False $PRECACHE_IMAGES) -if [[ "$PRECACHE_IMAGES" == "True" ]]; then - # staging in update for nodepool - IMAGE_URLS+=",https://dl.fedoraproject.org/pub/fedora/linux/updates/20/Images/x86_64/Fedora-x86_64-20-20140407-sda.qcow2" -fi - -# 10Gb default volume backing file size -VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-10250M} -# Name of the LVM volume group to use/create for iscsi volumes -VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} +# This is a comma separated list of extra URLS to be listed for +# download by the tools/image_list.sh script. CI environments can +# pre-download these URLS and place them in $FILES. Later scripts can +# then use "get_extra_file " which will print out the path to the +# file; it will either be downloaded on demand or acquired from the +# cache if there. +EXTRA_CACHE_URLS="" + +# etcd3 defaults +ETCD_VERSION=${ETCD_VERSION:-v3.5.21} +ETCD_SHA256_AMD64=${ETCD_SHA256_AMD64:-"adddda4b06718e68671ffabff2f8cee48488ba61ad82900e639d108f2148501c"} +ETCD_SHA256_ARM64=${ETCD_SHA256_ARM64:-"95bf6918623a097c0385b96f139d90248614485e781ec9bee4768dbb6c79c53f"} +ETCD_SHA256_PPC64=${ETCD_SHA256_PPC64:-"6fb6ecb3d1b331eb177dc610a8efad3aceb1f836d6aeb439ba0bfac5d5c2a38c"} +ETCD_SHA256_S390X=${ETCD_SHA256_S390X:-"a211a83961ba8a7e94f7d6343ad769e699db21a715ba4f3b68cf31ea28f9c951"} +# Make sure etcd3 downloads the correct architecture +if is_arch "x86_64"; then + ETCD_ARCH="amd64" + ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_AMD64} +elif is_arch "aarch64"; then + ETCD_ARCH="arm64" + ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_ARM64} +elif is_arch "ppc64le"; then + ETCD_ARCH="ppc64le" + ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_PPC64} +elif is_arch "s390x"; then + ETCD_ARCH="s390x" + ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_S390X} +else + exit_distro_not_supported "invalid hardware type - $ETCD_ARCH" +fi +ETCD_PORT=${ETCD_PORT:-2379} +ETCD_PEER_PORT=${ETCD_PEER_PORT:-2380} +ETCD_DOWNLOAD_URL=${ETCD_DOWNLOAD_URL:-https://github.com/etcd-io/etcd/releases/download} +ETCD_NAME=etcd-$ETCD_VERSION-linux-$ETCD_ARCH +ETCD_DOWNLOAD_FILE=$ETCD_NAME.tar.gz +ETCD_DOWNLOAD_LOCATION=$ETCD_DOWNLOAD_URL/$ETCD_VERSION/$ETCD_DOWNLOAD_FILE +# etcd is always required, so place it into list of pre-cached downloads +EXTRA_CACHE_URLS+=",$ETCD_DOWNLOAD_LOCATION" + +# Cache settings +CACHE_BACKEND=${CACHE_BACKEND:-"dogpile.cache.memcached"} +MEMCACHE_SERVERS=${MEMCACHE_SERVERS:-"localhost:11211"} + +# Detect duplicate values in IMAGE_URLS +for image_url in ${IMAGE_URLS//,/ }; do + if [ $(echo "$IMAGE_URLS" | grep -o -F "$image_url" | wc -l) -gt 1 ]; then + die $LINENO "$image_url is duplicate, please remove it from IMAGE_URLS." + fi +done + +# 30Gb default volume backing file size +VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-30G} + +# Prefixes for volume and instance names VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-} @@ -376,17 +765,174 @@ S3_SERVICE_PORT=${S3_SERVICE_PORT:-3333} PRIVATE_NETWORK_NAME=${PRIVATE_NETWORK_NAME:-"private"} PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-"public"} -# Compatibility until it's eradicated from CI -USE_SCREEN=${SCREEN_DEV:-$USE_SCREEN} +PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-""} + +# Allow the use of an alternate protocol (such as https) for service endpoints +SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http} + +# Sets the maximum number of workers for most services to reduce +# the memory used where there are a large number of CPUs present +# (the default number of workers for many services is the number of CPUs) +# Also sets the minimum number of workers to 2. +API_WORKERS=${API_WORKERS:=$(( ($(nproc)/4)<2 ? 2 : ($(nproc)/4) ))} + +# Service startup timeout +SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} + +# Timeout for compute node registration in Nova +NOVA_READY_TIMEOUT=${NOVA_READY_TIMEOUT:-$SERVICE_TIMEOUT} + +# Service graceful shutdown timeout +SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT=${SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT:-5} + +# Service graceful shutdown timeout +WORKER_TIMEOUT=${WORKER_TIMEOUT:-80} + +# Common Configuration +# -------------------- + +# Set ``OFFLINE`` to ``True`` to configure ``stack.sh`` to run cleanly without +# Internet access. ``stack.sh`` must have been previously run with Internet +# access to install prerequisites and fetch repositories. +OFFLINE=$(trueorfalse False OFFLINE) + +# Set ``ERROR_ON_CLONE`` to ``True`` to configure ``stack.sh`` to exit if +# the destination git repository does not exist during the ``git_clone`` +# operation. +ERROR_ON_CLONE=$(trueorfalse False ERROR_ON_CLONE) + +# Whether to enable the debug log level in OpenStack services +ENABLE_DEBUG_LOG_LEVEL=$(trueorfalse True ENABLE_DEBUG_LOG_LEVEL) + +# Set fixed and floating range here so we can make sure not to use addresses +# from either range when attempting to guess the IP to use for the host. +# Note that setting ``FIXED_RANGE`` may be necessary when running DevStack +# in an OpenStack cloud that uses either of these address ranges internally. +FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24} +IPV4_ADDRS_SAFE_TO_USE=${IPV4_ADDRS_SAFE_TO_USE:-10.0.0.0/22} +FIXED_RANGE=${FIXED_RANGE:-$IPV4_ADDRS_SAFE_TO_USE} +HOST_IP_IFACE=${HOST_IP_IFACE:-} +HOST_IP=${HOST_IP:-} +HOST_IPV6=${HOST_IPV6:-} + +HOST_IP=$(get_default_host_ip "$FIXED_RANGE" "$FLOATING_RANGE" "$HOST_IP_IFACE" "$HOST_IP" "inet") +if [ "$HOST_IP" == "" ]; then + die $LINENO "Could not determine host ip address. See local.conf for suggestions on setting HOST_IP." +fi + +HOST_IPV6=$(get_default_host_ip "" "" "$HOST_IP_IFACE" "$HOST_IPV6" "inet6") + +# Whether or not the port_security extension should be enabled for Neutron. +NEUTRON_PORT_SECURITY=$(trueorfalse True NEUTRON_PORT_SECURITY) + +# SERVICE IP version +# This is the IP version that services should be listening on, as well +# as using to register their endpoints with keystone. +SERVICE_IP_VERSION=${SERVICE_IP_VERSION:-4} + +# Validate SERVICE_IP_VERSION +# It would be nice to support "4+6" here as well, but that will require +# multiple calls into keystone to register endpoints, so for now let's +# just support one or the other. +if [[ $SERVICE_IP_VERSION != "4" ]] && [[ $SERVICE_IP_VERSION != "6" ]]; then + die $LINENO "SERVICE_IP_VERSION must be either 4 or 6" +fi + +if [[ "$SERVICE_IP_VERSION" == 4 ]]; then + DEF_SERVICE_HOST=$HOST_IP + DEF_SERVICE_LOCAL_HOST=127.0.0.1 + DEF_SERVICE_LISTEN_ADDRESS=0.0.0.0 +fi + +if [[ "$SERVICE_IP_VERSION" == 6 ]]; then + if [ "$HOST_IPV6" == "" ]; then + die $LINENO "Could not determine host IPv6 address. See local.conf for suggestions on setting HOST_IPV6." + fi + + DEF_SERVICE_HOST=[$HOST_IPV6] + DEF_SERVICE_LOCAL_HOST=::1 + DEF_SERVICE_LISTEN_ADDRESS="[::]" +fi + +# This is either 0.0.0.0 for IPv4 or [::] for IPv6 +SERVICE_LISTEN_ADDRESS=${SERVICE_LISTEN_ADDRESS:-${DEF_SERVICE_LISTEN_ADDRESS}} -# Set default screen name -SCREEN_NAME=${SCREEN_NAME:-stack} +# Allow the use of an alternate hostname (such as localhost/127.0.0.1) for +# service endpoints. Default is dependent on SERVICE_IP_VERSION above. +SERVICE_HOST=${SERVICE_HOST:-${DEF_SERVICE_HOST}} +# This is either 127.0.0.1 for IPv4 or ::1 for IPv6 +SERVICE_LOCAL_HOST=${SERVICE_LOCAL_HOST:-${DEF_SERVICE_LOCAL_HOST}} + +# TUNNEL IP version +# This is the IP version to use for tunnel endpoints +TUNNEL_IP_VERSION=${TUNNEL_IP_VERSION:-4} + +# Validate TUNNEL_IP_VERSION +if [[ $TUNNEL_IP_VERSION != "4" ]] && [[ $TUNNEL_IP_VERSION != "6" ]]; then + die $LINENO "TUNNEL_IP_VERSION must be either 4 or 6" +fi + +if [[ "$TUNNEL_IP_VERSION" == 4 ]]; then + DEF_TUNNEL_ENDPOINT_IP=$HOST_IP +fi + +if [[ "$TUNNEL_IP_VERSION" == 6 ]]; then + # Only die if the user has not over-ridden the endpoint IP + if [[ "$HOST_IPV6" == "" ]] && [[ "$TUNNEL_ENDPOINT_IP" == "" ]]; then + die $LINENO "Could not determine host IPv6 address. See local.conf for suggestions on setting HOST_IPV6." + fi + + DEF_TUNNEL_ENDPOINT_IP=$HOST_IPV6 +fi + +# Allow the use of an alternate address for tunnel endpoints. +# Default is dependent on TUNNEL_IP_VERSION above. +TUNNEL_ENDPOINT_IP=${TUNNEL_ENDPOINT_IP:-${DEF_TUNNEL_ENDPOINT_IP}} + +# Configure services to use syslog instead of writing to individual log files +SYSLOG=$(trueorfalse False SYSLOG) +SYSLOG_HOST=${SYSLOG_HOST:-$HOST_IP} +SYSLOG_PORT=${SYSLOG_PORT:-516} + +# Set global ``GIT_DEPTH=`` to limit the history depth of the git clone +# Set to 0 to disable shallow cloning +GIT_DEPTH=${GIT_DEPTH:-0} + +# We may not need to recreate database in case 2 Keystone services +# sharing the same database. It would be useful for multinode Grenade tests. +RECREATE_KEYSTONE_DB=$(trueorfalse True RECREATE_KEYSTONE_DB) + +# Following entries need to be last items in file + +# New way is LOGDIR for all logs and LOGFILE for stack.sh trace log, but if not fully-qualified will be in LOGDIR +# LOGFILE LOGDIR output +# not set not set (new) set LOGDIR from default +# set not set stack.sh log to LOGFILE, (new) set LOGDIR from LOGFILE +# not set set screen logs to LOGDIR +# set set stack.sh log to LOGFILE, screen logs to LOGDIR + +# Set up new logging defaults +if [[ -z "${LOGDIR:-}" ]]; then + default_logdir=$DEST/logs + if [[ -z "${LOGFILE:-}" ]]; then + # Nothing is set, we need a default + LOGDIR="$default_logdir" + else + # Set default LOGDIR + LOGDIR="${LOGFILE%/*}" + logfile="${LOGFILE##*/}" + if [[ -z "$LOGDIR" || "$LOGDIR" == "$logfile" ]]; then + # LOGFILE had no path, set a default + LOGDIR="$default_logdir" + fi + fi + unset default_logdir logfile +fi -# Do not install packages tagged with 'testonly' by default -INSTALL_TESTONLY_PACKAGES=${INSTALL_TESTONLY_PACKAGES:-False} +# ``LOGDIR`` is always set at this point so it is not useful as a 'enable' for service logs -# Undo requirements changes by global requirements -UNDO_REQUIREMENTS=${UNDO_REQUIREMENTS:-True} +# System-wide ulimit file descriptors override +ULIMIT_NOFILE=${ULIMIT_NOFILE:-2048} # Local variables: # mode: shell-script diff --git a/tests/fake-service.sh b/tests/fake-service.sh new file mode 100755 index 0000000000..d4b9b56bb3 --- /dev/null +++ b/tests/fake-service.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# fake-service.sh - a fake service for start/stop testing +# $1 - sleep time + +SLEEP_TIME=${1:-3} + +LOG=/tmp/fake-service.log +TIMESTAMP_FORMAT=${TIMESTAMP_FORMAT:-"%F-%H%M%S"} + +# duplicate output +exec 1> >(tee -a ${LOG}) + +echo "" +echo "Starting fake-service for ${SLEEP_TIME}" +while true; do + echo "$(date +${TIMESTAMP_FORMAT}) [$$]" + sleep ${SLEEP_TIME} +done + diff --git a/tests/functions.sh b/tests/functions.sh deleted file mode 100755 index 874d02230d..0000000000 --- a/tests/functions.sh +++ /dev/null @@ -1,198 +0,0 @@ -#!/usr/bin/env bash - -# Tests for DevStack functions - -TOP=$(cd $(dirname "$0")/.. && pwd) - -# Import common functions -source $TOP/functions - -# Import configuration -source $TOP/openrc - - -echo "Testing die_if_not_set()" - -bash -cx "source $TOP/functions; X=`echo Y && true`; die_if_not_set X 'not OK'" -if [[ $? != 0 ]]; then - echo "die_if_not_set [X='Y' true] Failed" -else - echo 'OK' -fi - -bash -cx "source $TOP/functions; X=`true`; die_if_not_set X 'OK'" -if [[ $? = 0 ]]; then - echo "die_if_not_set [X='' true] Failed" -fi - -bash -cx "source $TOP/functions; X=`echo Y && false`; die_if_not_set X 'not OK'" -if [[ $? != 0 ]]; then - echo "die_if_not_set [X='Y' false] Failed" -else - echo 'OK' -fi - -bash -cx "source $TOP/functions; X=`false`; die_if_not_set X 'OK'" -if [[ $? = 0 ]]; then - echo "die_if_not_set [X='' false] Failed" -fi - - -# Enabling/disabling services - -echo "Testing enable_service()" - -function test_enable_service { - local start="$1" - local add="$2" - local finish="$3" - - ENABLED_SERVICES="$start" - enable_service $add - if [ "$ENABLED_SERVICES" = "$finish" ]; then - echo "OK: $start + $add -> $ENABLED_SERVICES" - else - echo "changing $start to $finish with $add failed: $ENABLED_SERVICES" - fi -} - -test_enable_service '' a 'a' -test_enable_service 'a' b 'a,b' -test_enable_service 'a,b' c 'a,b,c' -test_enable_service 'a,b' c 'a,b,c' -test_enable_service 'a,b,' c 'a,b,c' -test_enable_service 'a,b' c,d 'a,b,c,d' -test_enable_service 'a,b' "c d" 'a,b,c,d' -test_enable_service 'a,b,c' c 'a,b,c' - -test_enable_service 'a,b,-c' c 'a,b' -test_enable_service 'a,b,c' -c 'a,b' - -function test_disable_service { - local start="$1" - local del="$2" - local finish="$3" - - ENABLED_SERVICES="$start" - disable_service "$del" - if [ "$ENABLED_SERVICES" = "$finish" ]; then - echo "OK: $start - $del -> $ENABLED_SERVICES" - else - echo "changing $start to $finish with $del failed: $ENABLED_SERVICES" - fi -} - -echo "Testing disable_service()" -test_disable_service 'a,b,c' a 'b,c' -test_disable_service 'a,b,c' b 'a,c' -test_disable_service 'a,b,c' c 'a,b' - -test_disable_service 'a,b,c' a 'b,c' -test_disable_service 'b,c' b 'c' -test_disable_service 'c' c '' -test_disable_service '' d '' - -test_disable_service 'a,b,c,' c 'a,b' -test_disable_service 'a,b' c 'a,b' - - -echo "Testing disable_all_services()" -ENABLED_SERVICES=a,b,c -disable_all_services - -if [[ -z "$ENABLED_SERVICES" ]]; then - echo "OK" -else - echo "disabling all services FAILED: $ENABLED_SERVICES" -fi - -echo "Testing disable_negated_services()" - - -function test_disable_negated_services { - local start="$1" - local finish="$2" - - ENABLED_SERVICES="$start" - disable_negated_services - if [ "$ENABLED_SERVICES" = "$finish" ]; then - echo "OK: $start + $add -> $ENABLED_SERVICES" - else - echo "changing $start to $finish failed: $ENABLED_SERVICES" - fi -} - -test_disable_negated_services '-a' '' -test_disable_negated_services '-a,a' '' -test_disable_negated_services '-a,-a' '' -test_disable_negated_services 'a,-a' '' -test_disable_negated_services 'b,a,-a' 'b' -test_disable_negated_services 'a,b,-a' 'b' -test_disable_negated_services 'a,-a,b' 'b' - - -echo "Testing is_package_installed()" - -if [[ -z "$os_PACKAGE" ]]; then - GetOSVersion -fi - -if [[ "$os_PACKAGE" = "deb" ]]; then - is_package_installed dpkg - VAL=$? -elif [[ "$os_PACKAGE" = "rpm" ]]; then - is_package_installed rpm - VAL=$? -else - VAL=1 -fi -if [[ "$VAL" -eq 0 ]]; then - echo "OK" -else - echo "is_package_installed() on existing package failed" -fi - -if [[ "$os_PACKAGE" = "deb" ]]; then - is_package_installed dpkg bash - VAL=$? -elif [[ "$os_PACKAGE" = "rpm" ]]; then - is_package_installed rpm bash - VAL=$? -else - VAL=1 -fi -if [[ "$VAL" -eq 0 ]]; then - echo "OK" -else - echo "is_package_installed() on more than one existing package failed" -fi - -is_package_installed zzzZZZzzz -VAL=$? -if [[ "$VAL" -ne 0 ]]; then - echo "OK" -else - echo "is_package_installed() on non-existing package failed" -fi - -# test against removed package...was a bug on Ubuntu -if is_ubuntu; then - PKG=cowsay - if ! (dpkg -s $PKG >/dev/null 2>&1); then - # it was never installed...set up the condition - sudo apt-get install -y cowsay >/dev/null 2>&1 - fi - if (dpkg -s $PKG >/dev/null 2>&1); then - # remove it to create the 'un' status - sudo dpkg -P $PKG >/dev/null 2>&1 - fi - - # now test the installed check on a deleted package - is_package_installed $PKG - VAL=$? - if [[ "$VAL" -ne 0 ]]; then - echo "OK" - else - echo "is_package_installed() on deleted package failed" - fi -fi diff --git a/tests/test_config.sh b/tests/test_config.sh deleted file mode 100755 index 5700f8df29..0000000000 --- a/tests/test_config.sh +++ /dev/null @@ -1,195 +0,0 @@ -#!/usr/bin/env bash - -# Tests for DevStack meta-config functions - -TOP=$(cd $(dirname "$0")/.. && pwd) - -# Import common functions -source $TOP/functions - -# Import config functions -source $TOP/lib/config - -# check_result() tests and reports the result values -# check_result "actual" "expected" -function check_result { - local actual=$1 - local expected=$2 - if [[ "$actual" == "$expected" ]]; then - echo "OK" - else - echo -e "failed: $actual != $expected\n" - fi -} - -TEST_1C_ADD="[eee] -type=new -multi = foo2" - -function create_test1c { - cat >test1c.conf <test2a.conf <test.conf < /dev/null 2>&1 +if [[ $? = 0 ]]; then + failed "die_if_not_set [X='' true] Failed" +fi + +bash -c "source $TOP/functions; X=`echo Y && false`; die_if_not_set $LINENO X 'not OK'" +if [[ $? != 0 ]]; then + failed "die_if_not_set [X='Y' false] Failed" +else + passed 'OK' +fi + +bash -c "source $TOP/functions; X=`false`; die_if_not_set $LINENO X 'OK'" > /dev/null 2>&1 +if [[ $? = 0 ]]; then + failed "die_if_not_set [X='' false] Failed" +fi + + +# Enabling/disabling services + +echo "Testing enable_service()" + +function test_enable_service { + local start="$1" + local add="$2" + local finish="$3" + + ENABLED_SERVICES="$start" + enable_service $add + if [ "$ENABLED_SERVICES" = "$finish" ]; then + passed "OK: $start + $add -> $ENABLED_SERVICES" + else + failed "changing $start to $finish with $add failed: $ENABLED_SERVICES" + fi +} + +test_enable_service '' a 'a' +test_enable_service 'a' b 'a,b' +test_enable_service 'a,b' c 'a,b,c' +test_enable_service 'a,b' c 'a,b,c' +test_enable_service 'a,b,' c 'a,b,c' +test_enable_service 'a,b' c,d 'a,b,c,d' +test_enable_service 'a,b' "c d" 'a,b,c,d' +test_enable_service 'a,b,c' c 'a,b,c' + +test_enable_service 'a,b,-c' c 'a,b' +test_enable_service 'a,b,c' -c 'a,b' + +function test_disable_service { + local start="$1" + local del="$2" + local finish="$3" + + ENABLED_SERVICES="$start" + disable_service "$del" + if [ "$ENABLED_SERVICES" = "$finish" ]; then + passed "OK: $start - $del -> $ENABLED_SERVICES" + else + failed "changing $start to $finish with $del failed: $ENABLED_SERVICES" + fi +} + +echo "Testing disable_service()" +test_disable_service 'a,b,c' a 'b,c' +test_disable_service 'a,b,c' b 'a,c' +test_disable_service 'a,b,c' c 'a,b' + +test_disable_service 'a,b,c' a 'b,c' +test_disable_service 'b,c' b 'c' +test_disable_service 'c' c '' +test_disable_service '' d '' + +test_disable_service 'a,b,c,' c 'a,b' +test_disable_service 'a,b' c 'a,b' + + +echo "Testing disable_all_services()" +ENABLED_SERVICES=a,b,c +disable_all_services + +if [[ -z "$ENABLED_SERVICES" ]]; then + passed "OK" +else + failed "disabling all services FAILED: $ENABLED_SERVICES" +fi + +echo "Testing disable_negated_services()" + + +function test_disable_negated_services { + local start="$1" + local finish="$2" + + ENABLED_SERVICES="$start" + disable_negated_services + if [ "$ENABLED_SERVICES" = "$finish" ]; then + passed "OK: $start + $add -> $ENABLED_SERVICES" + else + failed "changing $start to $finish failed: $ENABLED_SERVICES" + fi +} + +test_disable_negated_services '-a' '' +test_disable_negated_services '-a,a' '' +test_disable_negated_services '-a,-a' '' +test_disable_negated_services 'a,-a' '' +test_disable_negated_services 'b,a,-a' 'b' +test_disable_negated_services 'a,b,-a' 'b' +test_disable_negated_services 'a,-a,b' 'b' +test_disable_negated_services 'a,aa,-a' 'aa' +test_disable_negated_services 'aa,-a' 'aa' +test_disable_negated_services 'a_a, -a_a' '' +test_disable_negated_services 'a-b, -a-b' '' +test_disable_negated_services 'a-b, b, -a-b' 'b' +test_disable_negated_services 'a,-a,av2,b' 'av2,b' +test_disable_negated_services 'a,aa,-a' 'aa' +test_disable_negated_services 'a,av2,-a,a' 'av2' +test_disable_negated_services 'a,-a,av2' 'av2' + +echo "Testing remove_disabled_services()" + +function test_remove_disabled_services { + local service_list="$1" + local remove_list="$2" + local expected="$3" + + results=$(remove_disabled_services "$service_list" "$remove_list") + if [ "$results" = "$expected" ]; then + passed "OK: '$service_list' - '$remove_list' -> '$results'" + else + failed "getting '$expected' from '$service_list' - '$remove_list' failed: '$results'" + fi +} + +test_remove_disabled_services 'a,b,c' 'a,c' 'b' +test_remove_disabled_services 'a,b,c' 'b' 'a,c' +test_remove_disabled_services 'a,b,c,d' 'a,c d' 'b' +test_remove_disabled_services 'a,b c,d' 'a d' 'b,c' +test_remove_disabled_services 'a,b,c' 'a,b,c' '' +test_remove_disabled_services 'a,b,c' 'd' 'a,b,c' +test_remove_disabled_services 'a,b,c' '' 'a,b,c' +test_remove_disabled_services '' 'a,b,c' '' +test_remove_disabled_services '' '' '' + +echo "Testing is_package_installed()" + +if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion +fi + +if [[ "$os_PACKAGE" = "deb" ]]; then + is_package_installed dpkg + VAL=$? +elif [[ "$os_PACKAGE" = "rpm" ]]; then + is_package_installed rpm + VAL=$? +else + VAL=1 +fi +if [[ "$VAL" -eq 0 ]]; then + passed "OK" +else + failed "is_package_installed() on existing package failed" +fi + +if [[ "$os_PACKAGE" = "deb" ]]; then + is_package_installed dpkg bash + VAL=$? +elif [[ "$os_PACKAGE" = "rpm" ]]; then + is_package_installed rpm bash + VAL=$? +else + VAL=1 +fi +if [[ "$VAL" -eq 0 ]]; then + passed "OK" +else + failed "is_package_installed() on more than one existing package failed" +fi + +is_package_installed zzzZZZzzz +VAL=$? +if [[ "$VAL" -ne 0 ]]; then + passed "OK" +else + failed "is_package_installed() on non-existing package failed" +fi + +# test against removed package...was a bug on Ubuntu +if is_ubuntu; then + PKG=cowsay-off + if ! (dpkg -s $PKG >/dev/null 2>&1); then + # it was never installed...set up the condition + sudo apt-get install -y cowsay >/dev/null 2>&1 + fi + if (dpkg -s $PKG >/dev/null 2>&1); then + # remove it to create the 'un' status + sudo dpkg -P $PKG >/dev/null 2>&1 + fi + + # now test the installed check on a deleted package + is_package_installed $PKG + VAL=$? + if [[ "$VAL" -ne 0 ]]; then + passed "OK" + else + failed "is_package_installed() on deleted package failed" + fi +fi + +# test isset function +echo "Testing isset()" +you_should_not_have_this_variable=42 + +if isset "you_should_not_have_this_variable"; then + passed "OK" +else + failed "\"you_should_not_have_this_variable\" not declared. failed" +fi + +unset you_should_not_have_this_variable +if isset "you_should_not_have_this_variable"; then + failed "\"you_should_not_have_this_variable\" looks like declared variable." +else + passed "OK" +fi + +function test_export_proxy_variables { + echo "Testing export_proxy_variables()" + + local expected results + + http_proxy=http_proxy_test + https_proxy=https_proxy_test + no_proxy=no_proxy_test + + export_proxy_variables + expected=$(echo -e "http_proxy=$http_proxy\nhttps_proxy=$https_proxy\nno_proxy=$no_proxy") + results=$(env | egrep '(http(s)?|no)_proxy=' | sort) + if [[ $expected = $results ]]; then + passed "OK: Proxy variables are exported when proxy variables are set" + else + failed "Expected: $expected, Failed: $results" + fi + + unset http_proxy https_proxy no_proxy + export_proxy_variables + results=$(env | egrep '(http(s)?|no)_proxy=') + if [[ "" = $results ]]; then + passed "OK: Proxy variables aren't exported when proxy variables aren't set" + else + failed "Expected: '', Failed: $results" + fi +} +test_export_proxy_variables + +report_results diff --git a/tests/test_ini.sh b/tests/test_ini.sh deleted file mode 100755 index 598cd578f6..0000000000 --- a/tests/test_ini.sh +++ /dev/null @@ -1,240 +0,0 @@ -#!/usr/bin/env bash - -# Tests for DevStack INI functions - -TOP=$(cd $(dirname "$0")/.. && pwd) - -# Import common functions -source $TOP/functions - - -echo "Testing INI functions" - -cat >test.ini <${TEST_INI} < $thing" + fi + fi +} + +function test_all_libs_upto_date { + # this is all the magics + local found_libs=${!GITREPO[@]} + declare -A all_libs + for lib in $ALL_LIBS; do + all_libs[$lib]=1 + done + + for lib in $found_libs; do + if [[ -z ${all_libs[$lib]} ]]; then + echo "Library '$lib' not listed in unit tests, please add to ALL_LIBS" + exit 1 + fi + + done + echo "test_all_libs_upto_date PASSED" +} + +function test_libs_exist { + local lib="" + for lib in $ALL_LIBS; do + check_exists "${GITREPO[$lib]}" "GITREPO" "$lib" + check_exists "${GITBRANCH[$lib]}" "GITBRANCH" "$lib" + check_exists "${GITDIR[$lib]}" "GITDIR" "$lib" + done + + echo "test_libs_exist PASSED" +} + +function test_branch_master { + for lib in $ALL_LIBS; do + if [[ ${GITBRANCH[$lib]} != "master" ]]; then + echo "GITBRANCH for $lib not master (${GITBRANCH[$lib]})" + exit 1 + fi + done + + echo "test_branch_master PASSED" +} + +set -o errexit + +test_libs_exist +test_branch_master +test_all_libs_upto_date diff --git a/tests/test_localconf.sh b/tests/test_localconf.sh new file mode 100755 index 0000000000..d8075df442 --- /dev/null +++ b/tests/test_localconf.sh @@ -0,0 +1,475 @@ +#!/usr/bin/env bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +# Tests for DevStack INI functions + +TOP=$(cd $(dirname "$0")/.. && pwd) + +# Import config functions +source $TOP/inc/ini-config + +source $TOP/tests/unittest.sh + +echo "Testing INI local.conf functions" + +# test that can determine if file has section in specified meta-section + +function test_localconf_has_section { + local file_localconf + local file_conf1 + local file_conf2 + file_localconf=`mktemp` + file_conf1=`mktemp` + file_conf2=`mktemp` + + cat <<- EOF > $file_localconf +[[local|localrc]] +LOCALRC_VAR1=localrc_val1 +LOCALRC_VAR2=localrc_val2 +LOCALRC_VAR3=localrc_val3 + +[[post-config|$file_conf1]] +[conf1_t1] +conf1_t1_opt1=conf1_t1_val1 +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt1=conf2_t1_val1 +conf2_t1_opt2=conf2_t1_val2 +conf2_t1_opt3=conf2_t1_val3 +EOF + + localconf_has_section $file_localconf post-config $file_conf1 conf1_t1 + assert_equal $? 0 + localconf_has_section $file_localconf post-config $file_conf1 conf1_t2 + assert_equal $? 0 + localconf_has_section $file_localconf post-config $file_conf1 conf1_t3 + assert_equal $? 0 + localconf_has_section $file_localconf post-extra $file_conf2 conf2_t1 + assert_equal $? 0 + localconf_has_section $file_localconf post-config $file_conf1 conf1_t4 + assert_equal $? 1 + localconf_has_section $file_localconf post-install $file_conf1 conf1_t1 + assert_equal $? 1 + localconf_has_section $file_localconf local localrc conf1_t2 + assert_equal $? 1 + rm -f $file_localconf $file_conf1 $file_conf2 +} + +# test that can determine if file has option in specified meta-section and section +function test_localconf_has_option { + local file_localconf + local file_conf1 + local file_conf2 + file_localconf=`mktemp` + file_conf1=`mktemp` + file_conf2=`mktemp` + cat <<- EOF > $file_localconf +[[post-config|$file_conf1]] +[conf1_t1] +conf1_t1_opt1 = conf1_t1_val1 +conf1_t1_opt2 = conf1_t1_val2 +conf1_t1_opt3 = conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[local|localrc]] +LOCALRC_VAR1=localrc_val1 +LOCALRC_VAR2=localrc_val2 +LOCALRC_VAR3=localrc_val3 + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt1=conf2_t1_val1 +conf2_t1_opt2=conf2_t1_val2 +conf2_t1_opt3=conf2_t1_val3 +EOF + + localconf_has_option $file_localconf local localrc "" LOCALRC_VAR1 + assert_equal $? 0 + localconf_has_option $file_localconf local localrc "" LOCALRC_VAR2 + assert_equal $? 0 + localconf_has_option $file_localconf local localrc "" LOCALRC_VAR3 + assert_equal $? 0 + localconf_has_option $file_localconf post-config $file_conf1 conf1_t1 conf1_t1_opt1 + assert_equal $? 0 + localconf_has_option $file_localconf post-config $file_conf1 conf1_t2 conf1_t2_opt2 + assert_equal $? 0 + localconf_has_option $file_localconf post-config $file_conf1 conf1_t3 conf1_t3_opt3 + assert_equal $? 0 + localconf_has_option $file_localconf post-extra $file_conf2 conf2_t1 conf2_t1_opt2 + assert_equal $? 0 + localconf_has_option $file_localconf post-config $file_conf1 conf1_t1_opt4 + assert_equal $? 1 + localconf_has_option $file_localconf post-install $file_conf1 conf1_t1_opt1 + assert_equal $? 1 + localconf_has_option $file_localconf local localrc conf1_t2 conf1_t2_opt1 + assert_equal $? 1 + rm -f $file_localconf $file_conf1 $file_conf2 +} + +# test that update option in specified meta-section and section +function test_localconf_update_option { + local file_localconf + local file_localconf_expected + local file_conf1 + local file_conf2 + file_localconf=`mktemp` + file_localconf_expected=`mktemp` + file_conf1=`mktemp` + file_conf2=`mktemp` + cat <<- EOF > $file_localconf +[[local|localrc]] +LOCALRC_VAR1 = localrc_val1 +LOCALRC_VAR2 = localrc_val2 +LOCALRC_VAR3 = localrc_val3 + +[[post-config|$file_conf1]] +[conf1_t1] +conf1_t1_opt1=conf1_t1_val1 +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt1=conf2_t1_val1 +conf2_t1_opt2=conf2_t1_val2 +conf2_t1_opt3=conf2_t1_val3 +EOF + cat <<- EOF > $file_localconf_expected +[[local|localrc]] +LOCALRC_VAR1 = localrc_val1 +LOCALRC_VAR2 = localrc_val2_update +LOCALRC_VAR3 = localrc_val3 + +[[post-config|$file_conf1]] +[conf1_t1] +conf1_t1_opt1=conf1_t1_val1_update +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2_update +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3_update + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt1=conf2_t1_val1 +conf2_t1_opt2=conf2_t1_val2 +conf2_t1_opt3=conf2_t1_val3_update +EOF + + localconf_update_option "$SUDO" $file_localconf local localrc "" LOCALRC_VAR2 localrc_val2_update + localconf_update_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t1 conf1_t1_opt1 conf1_t1_val1_update + localconf_update_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t2 conf1_t2_opt2 conf1_t2_val2_update + localconf_update_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t3 conf1_t3_opt3 conf1_t3_val3_update + localconf_update_option "$SUDO" $file_localconf post-extra $file_conf2 conf2_t1 conf2_t1_opt3 conf2_t1_val3_update + result=`cat $file_localconf` + result_expected=`cat $file_localconf_expected` + assert_equal "$result" "$result_expected" + localconf_update_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t2 conf1_t3_opt1 conf1_t3_val1_update + localconf_update_option "$SUDO" $file_localconf post-extra $file_conf2 conf2_t1 conf2_t1_opt4 conf2_t1_val4_update + localconf_update_option "$SUDO" $file_localconf post-install $file_conf2 conf2_t1 conf2_t1_opt1 conf2_t1_val1_update + localconf_update_option "$SUDO" $file_localconf local localrc "" LOCALRC_VAR4 localrc_val4_update + result=`cat $file_localconf` + result_expected=`cat $file_localconf_expected` + assert_equal "$result" "$result_expected" + rm -f $file_localconf $file_localconf_expected $file_conf1 $file_conf2 +} + +# test that add option in specified meta-section and section +function test_localconf_add_option { + local file_localconf + local file_localconf_expected + local file_conf1 + local file_conf2 + file_localconf=`mktemp` + file_localconf_expected=`mktemp` + file_conf1=`mktemp` + file_conf2=`mktemp` + cat <<- EOF > $file_localconf +[[post-config|$file_conf1]] +[conf1_t1] +conf1_t1_opt1=conf1_t1_val1 +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[local|localrc]] +LOCALRC_VAR1=localrc_val1 +LOCALRC_VAR2=localrc_val2 +LOCALRC_VAR3=localrc_val3 + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt1 = conf2_t1_val1 +conf2_t1_opt2 = conf2_t1_val2 +conf2_t1_opt3 = conf2_t1_val3 +EOF + cat <<- EOF > $file_localconf_expected +[[post-config|$file_conf1]] +[conf1_t1] +conf1_t1_opt4 = conf1_t1_val4 +conf1_t1_opt1=conf1_t1_val1 +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt4 = conf1_t2_val4 +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt4 = conf1_t3_val4 +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[local|localrc]] +LOCALRC_VAR4 = localrc_val4 +LOCALRC_VAR1=localrc_val1 +LOCALRC_VAR2=localrc_val2 +LOCALRC_VAR3=localrc_val3 + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt4 = conf2_t1_val4 +conf2_t1_opt1 = conf2_t1_val1 +conf2_t1_opt2 = conf2_t1_val2 +conf2_t1_opt3 = conf2_t1_val3 +EOF + + localconf_add_option "$SUDO" $file_localconf local localrc "" LOCALRC_VAR4 localrc_val4 + localconf_add_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t1 conf1_t1_opt4 conf1_t1_val4 + localconf_add_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t2 conf1_t2_opt4 conf1_t2_val4 + localconf_add_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t3 conf1_t3_opt4 conf1_t3_val4 + localconf_add_option "$SUDO" $file_localconf post-extra $file_conf2 conf2_t1 conf2_t1_opt4 conf2_t1_val4 + result=`cat $file_localconf` + result_expected=`cat $file_localconf_expected` + assert_equal "$result" "$result_expected" + localconf_add_option "$SUDO" $file_localconf local localrc.conf "" LOCALRC_VAR4 localrc_val4_update + localconf_add_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t4 conf1_t4_opt1 conf1_t4_val1 + localconf_add_option "$SUDO" $file_localconf post-extra $file_conf2 conf2_t2 conf2_t2_opt4 conf2_t2_val4 + localconf_add_option "$SUDO" $file_localconf post-install $file_conf2 conf2_t1 conf2_t1_opt4 conf2_t2_val4 + result=`cat $file_localconf` + result_expected=`cat $file_localconf_expected` + assert_equal "$result" "$result_expected" + rm -f $file_localconf $file_localconf_expected $file_conf1 $file_conf2 +} + +# test that add section and option in specified meta-section +function test_localconf_add_section_and_option { + local file_localconf + local file_localconf_expected + local file_conf1 + local file_conf2 + file_localconf=`mktemp` + file_localconf_expected=`mktemp` + file_conf1=`mktemp` + file_conf2=`mktemp` + cat <<- EOF > $file_localconf +[[post-config|$file_conf1]] +[conf1_t1] +conf1_t1_opt1=conf1_t1_val1 +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[local|localrc]] +LOCALRC_VAR1=localrc_val1 +LOCALRC_VAR2=localrc_val2 +LOCALRC_VAR3=localrc_val3 + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt1=conf2_t1_val1 +conf2_t1_opt2=conf2_t1_val2 +conf2_t1_opt3=conf2_t1_val3 +EOF + cat <<- EOF > $file_localconf_expected +[[post-config|$file_conf1]] +[conf1_t4] +conf1_t4_opt1 = conf1_t4_val1 +[conf1_t1] +conf1_t1_opt1=conf1_t1_val1 +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[local|localrc]] +LOCALRC_VAR1=localrc_val1 +LOCALRC_VAR2=localrc_val2 +LOCALRC_VAR3=localrc_val3 + +[[post-extra|$file_conf2]] +[conf2_t2] +conf2_t2_opt1 = conf2_t2_val1 +[conf2_t1] +conf2_t1_opt1=conf2_t1_val1 +conf2_t1_opt2=conf2_t1_val2 +conf2_t1_opt3=conf2_t1_val3 +EOF + + localconf_add_section_and_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t4 conf1_t4_opt1 conf1_t4_val1 + localconf_add_section_and_option "$SUDO" $file_localconf post-extra $file_conf2 conf2_t2 conf2_t2_opt1 conf2_t2_val1 + result=`cat $file_localconf` + result_expected=`cat $file_localconf_expected` + assert_equal "$result" "$result_expected" + localconf_add_section_and_option "$SUDO" $file_localconf post-install $file_conf2 conf2_t2 conf2_t2_opt1 conf2_t2_val1 + result=`cat $file_localconf` + result_expected=`cat $file_localconf_expected` + assert_equal "$result" "$result_expected" + rm -f $file_localconf $file_localconf_expected $file_conf1 $file_conf2 +} + +# test that add section and option in specified meta-section +function test_localconf_set { + local file_localconf + local file_localconf_expected + local file_conf1 + local file_conf2 + file_localconf=`mktemp` + file_localconf_expected=`mktemp` + file_conf1=`mktemp` + file_conf2=`mktemp` + cat <<- EOF > $file_localconf +[[local|localrc]] +LOCALRC_VAR1=localrc_val1 +LOCALRC_VAR2=localrc_val2 +LOCALRC_VAR3=localrc_val3 + +[[post-config|$file_conf1]] +[conf1_t1] +conf1_t1_opt1=conf1_t1_val1 +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt1=conf2_t1_val1 +conf2_t1_opt2=conf2_t1_val2 +conf2_t1_opt3=conf2_t1_val3 +EOF + cat <<- EOF > $file_localconf_expected +[[local|localrc]] +LOCALRC_VAR1=localrc_val1 +LOCALRC_VAR2=localrc_val2_update +LOCALRC_VAR3=localrc_val3 + +[[post-config|$file_conf1]] +[conf1_t4] +conf1_t4_opt1 = conf1_t4_val1 +[conf1_t1] +conf1_t1_opt1=conf1_t1_val1 +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt4 = conf2_t1_val4 +conf2_t1_opt1=conf2_t1_val1 +conf2_t1_opt2=conf2_t1_val2 +conf2_t1_opt3=conf2_t1_val3 + +[[post-install|/etc/neutron/plugin/ml2/ml2_conf.ini]] +[ml2] +ml2_opt1 = ml2_val1 +EOF + + if [[ -n "$SUDO" ]]; then + SUDO_ARG="-sudo" + else + SUDO_ARG="" + fi + localconf_set $SUDO_ARG $file_localconf post-install /etc/neutron/plugin/ml2/ml2_conf.ini ml2 ml2_opt1 ml2_val1 + localconf_set $SUDO_ARG $file_localconf local localrc "" LOCALRC_VAR2 localrc_val2_update + localconf_set $SUDO_ARG $file_localconf post-config $file_conf1 conf1_t4 conf1_t4_opt1 conf1_t4_val1 + localconf_set $SUDO_ARG $file_localconf post-extra $file_conf2 conf2_t1 conf2_t1_opt4 conf2_t1_val4 + result=`cat $file_localconf` + result_expected=`cat $file_localconf_expected` + assert_equal "$result" "$result_expected" + rm -f $file_localconf $file_localconf_expected $file_conf1 $file_conf2 +} + + +test_localconf_has_section +test_localconf_has_option +test_localconf_update_option +test_localconf_add_option +test_localconf_add_section_and_option +test_localconf_set diff --git a/tests/test_meta_config.sh b/tests/test_meta_config.sh new file mode 100755 index 0000000000..30479f245a --- /dev/null +++ b/tests/test_meta_config.sh @@ -0,0 +1,436 @@ +#!/usr/bin/env bash + +# Tests for DevStack meta-config functions + +TOP=$(cd $(dirname "$0")/.. && pwd) + +# Import config functions +source $TOP/inc/ini-config +source $TOP/inc/meta-config + +set -e + +# check_result() tests and reports the result values +# check_result "actual" "expected" +function check_result { + local actual=$1 + local expected=$2 + if [[ "$actual" == "$expected" ]]; then + echo "OK" + else + echo -e "failed: $actual != $expected\n" + exit 1 + fi +} + +# mock function-common:die so that it does not +# interrupt our test script +function die { + exit -1 +} + +function warn { + return 0 +} + +TEST_1C_ADD="[eee] +type=new +multi = foo2" + +function create_test1c { + cat >test1c.conf <test2a.conf <test-etc/test4.conf <test.conf < ${UNSORTED} + sort ${UNSORTED} > ${SORTED} + + if [ -n "$(diff -c ${UNSORTED} ${SORTED})" ]; then + failed "$p is unsorted" + # output this, it's helpful to see what exactly is unsorted + diff -c ${UNSORTED} ${SORTED} + else + passed "$p is sorted" + fi +done + +rm -rf ${TMPDIR} + +report_results diff --git a/tests/test_refs.sh b/tests/test_refs.sh new file mode 100755 index 0000000000..0f9aa4a5ca --- /dev/null +++ b/tests/test_refs.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +echo "Ensuring we don't have crazy refs" + +REFS=`grep BRANCH stackrc | grep -v 'TARGET_BRANCH' | grep -v 'NOVNC_BRANCH'` +rc=$? +if [[ $rc -eq 0 ]]; then + echo "Branch defaults must be one of the *TARGET_BRANCH values. Found:" + echo $REFS + exit 1 +fi diff --git a/tests/test_truefalse.sh b/tests/test_truefalse.sh new file mode 100755 index 0000000000..03996ceab4 --- /dev/null +++ b/tests/test_truefalse.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash + +# Tests for DevStack meta-config functions + +TOP=$(cd $(dirname "$0")/.. && pwd) + +# Import common functions +source $TOP/functions +source $TOP/tests/unittest.sh + +# common mistake is to use $FOO instead of "FOO"; in that case we +# should die +bash -c "source $TOP/functions-common; VAR=\$(trueorfalse False \$FOO)" &> /dev/null +assert_equal 1 $? "missing test-value" + +VAL=$(trueorfalse False MISSING_VARIABLE) +assert_equal "False" $VAL "blank test-value" + +function test_trueorfalse { + local one=1 + local captrue=True + local lowtrue=true + local uppertrue=TRUE + local capyes=Yes + local lowyes=yes + local upperyes=YES + + for default in True False; do + for name in one captrue lowtrue uppertrue capyes lowyes upperyes; do + local msg="trueorfalse($default $name)" + assert_equal "True" $(trueorfalse $default $name) "$msg" + done + done + + local zero=0 + local capfalse=False + local lowfalse=false + local upperfalse=FALSE + local capno=No + local lowno=no + local upperno=NO + + for default in True False; do + for name in zero capfalse lowfalse upperfalse capno lowno upperno; do + local msg="trueorfalse($default $name)" + assert_equal "False" $(trueorfalse $default $name) "$msg" + done + done +} + +test_trueorfalse + +report_results diff --git a/tests/test_vercmp.sh b/tests/test_vercmp.sh new file mode 100755 index 0000000000..c88bf86d7e --- /dev/null +++ b/tests/test_vercmp.sh @@ -0,0 +1,47 @@ +#!/usr/bin/env bash + +# Tests for DevStack vercmp functionality + +TOP=$(cd $(dirname "$0")/.. && pwd) + +# Import common functions +source $TOP/functions +source $TOP/tests/unittest.sh + +assert_true "numeric gt" vercmp 2.0 ">" 1.0 +assert_true "numeric gte" vercmp 2.0 ">=" 1.0 +assert_true "numeric gt" vercmp 1.0.1 ">" 1.0 +assert_true "numeric gte" vercmp 1.0.1 ">=" 1.0 +assert_true "alpha gt" vercmp 1.0.1b ">" 1.0.1a +assert_true "alpha gte" vercmp 1.0.1b ">=" 1.0.1a +assert_true "alpha gt" vercmp b ">" a +assert_true "alpha gte" vercmp b ">=" a +assert_true "alpha gt" vercmp 2.0-rc3 ">" 2.0-rc1 +assert_true "alpha gte" vercmp 2.0-rc3 ">=" 2.0-rc1 + +assert_false "numeric gt fail" vercmp 1.0 ">" 1.0 +assert_true "numeric gte" vercmp 1.0 ">=" 1.0 +assert_false "numeric gt fail" vercmp 0.9 ">" 1.0 +assert_false "numeric gte fail" vercmp 0.9 ">=" 1.0 +assert_false "numeric gt fail" vercmp 0.9.9 ">" 1.0 +assert_false "numeric gte fail" vercmp 0.9.9 ">=" 1.0 +assert_false "numeric gt fail" vercmp 0.9a.9 ">" 1.0.1 +assert_false "numeric gte fail" vercmp 0.9a.9 ">=" 1.0.1 + +assert_false "numeric lt" vercmp 1.0 "<" 1.0 +assert_true "numeric lte" vercmp 1.0 "<=" 1.0 +assert_true "numeric lt" vercmp 1.0 "<" 1.0.1 +assert_true "numeric lte" vercmp 1.0 "<=" 1.0.1 +assert_true "alpha lt" vercmp 1.0.1a "<" 1.0.1b +assert_true "alpha lte" vercmp 1.0.1a "<=" 1.0.1b +assert_true "alpha lt" vercmp a "<" b +assert_true "alpha lte" vercmp a "<=" b +assert_true "alpha lt" vercmp 2.0-rc1 "<" 2.0-rc3 +assert_true "alpha lte" vercmp 2.0-rc1 "<=" 2.0-rc3 + +assert_true "eq" vercmp 1.0 "==" 1.0 +assert_true "eq" vercmp 1.0.1 "==" 1.0.1 +assert_false "eq fail" vercmp 1.0.1 "==" 1.0.2 +assert_false "eq fail" vercmp 2.0-rc1 "==" 2.0-rc2 + +report_results diff --git a/tests/test_worlddump.sh b/tests/test_worlddump.sh new file mode 100755 index 0000000000..919652536d --- /dev/null +++ b/tests/test_worlddump.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +# Simple test of worlddump.py + +TOP=$(cd $(dirname "$0")/.. && pwd) + +source $TOP/tests/unittest.sh + +OUT_DIR=$(mktemp -d) + +${PYTHON} $TOP/tools/worlddump.py -d $OUT_DIR + +if [[ $? -ne 0 ]]; then + fail "worlddump failed" +else + + # worlddump creates just one output file + OUT_FILE=($OUT_DIR/*.txt) + + if [ ! -r $OUT_FILE ]; then + failed "worlddump output not seen" + else + passed "worlddump output $OUT_FILE" + + if [[ $(stat -c %s $OUT_DIR/*.txt) -gt 0 ]]; then + passed "worlddump output is not zero sized" + fi + + # put more extensive examination here, if required. + fi +fi + +rm -rf $OUT_DIR + +report_results diff --git a/tests/test_write_devstack_local_conf_role.sh b/tests/test_write_devstack_local_conf_role.sh new file mode 100755 index 0000000000..71d8d51614 --- /dev/null +++ b/tests/test_write_devstack_local_conf_role.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +TOP=$(cd $(dirname "$0")/.. && pwd) + +# Import common functions +source $TOP/functions +source $TOP/tests/unittest.sh + +${PYTHON} $TOP/roles/write-devstack-local-conf/library/test.py diff --git a/tests/unittest.sh b/tests/unittest.sh new file mode 100644 index 0000000000..fced2abe65 --- /dev/null +++ b/tests/unittest.sh @@ -0,0 +1,155 @@ +#!/usr/bin/env bash + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# we always start with no errors +ERROR=0 +PASS=0 +FAILED_FUNCS="" + +export PYTHON=$(which python3 2>/dev/null) + +# pass a test, printing out MSG +# usage: passed message +function passed { + local lineno + lineno=$(caller 0 | awk '{print $1}') + local function + function=$(caller 0 | awk '{print $2}') + local msg="$1" + if [ -z "$msg" ]; then + msg="OK" + fi + PASS=$((PASS+1)) + echo "PASS: $function:L$lineno - $msg" +} + +# fail a test, printing out MSG +# usage: failed message +function failed { + local lineno + lineno=$(caller 0 | awk '{print $1}') + local function + function=$(caller 0 | awk '{print $2}') + local msg="$1" + FAILED_FUNCS+="$function:L$lineno\n" + echo "ERROR: $function:L$lineno!" + echo " $msg" + ERROR=$((ERROR+1)) +} + +# assert string comparison of val1 equal val2, printing out msg +# usage: assert_equal val1 val2 msg +function assert_equal { + local lineno + lineno=`caller 0 | awk '{print $1}'` + local function + function=`caller 0 | awk '{print $2}'` + local msg=$3 + + if [ -z "$msg" ]; then + msg="OK" + fi + if [[ "$1" != "$2" ]]; then + FAILED_FUNCS+="$function:L$lineno\n" + echo "ERROR: $1 != $2 in $function:L$lineno!" + echo " $msg" + ERROR=$((ERROR+1)) + else + PASS=$((PASS+1)) + echo "PASS: $function:L$lineno - $msg" + fi +} + +# assert variable is empty/blank, printing out msg +# usage: assert_empty VAR msg +function assert_empty { + local lineno + lineno=`caller 0 | awk '{print $1}'` + local function + function=`caller 0 | awk '{print $2}'` + local msg=$2 + + if [ -z "$msg" ]; then + msg="OK" + fi + if [[ ! -z ${!1} ]]; then + FAILED_FUNCS+="$function:L$lineno\n" + echo "ERROR: $1 not empty in $function:L$lineno!" + echo " $msg" + ERROR=$((ERROR+1)) + else + PASS=$((PASS+1)) + echo "PASS: $function:L$lineno - $msg" + fi +} + +# assert the arguments evaluate to true +# assert_true "message" arg1 arg2 +function assert_true { + local lineno + lineno=`caller 0 | awk '{print $1}'` + local function + function=`caller 0 | awk '{print $2}'` + local msg=$1 + shift + + $@ + if [ $? -eq 0 ]; then + PASS=$((PASS+1)) + echo "PASS: $function:L$lineno - $msg" + else + FAILED_FUNCS+="$function:L$lineno\n" + echo "ERROR: test failed in $function:L$lineno!" + echo " $msg" + ERROR=$((ERROR+1)) + fi +} + +# assert the arguments evaluate to false +# assert_false "message" arg1 arg2 +function assert_false { + local lineno + lineno=`caller 0 | awk '{print $1}'` + local function + function=`caller 0 | awk '{print $2}'` + local msg=$1 + shift + + $@ + if [ $? -eq 0 ]; then + FAILED_FUNCS+="$function:L$lineno\n" + echo "ERROR: test failed in $function:L$lineno!" + echo " $msg" + ERROR=$((ERROR+1)) + else + PASS=$((PASS+1)) + echo "PASS: $function:L$lineno - $msg" + fi +} + + +# Print a summary of passing and failing tests and exit +# (with an error if we have failed tests) +# usage: report_results +function report_results { + echo "$PASS Tests PASSED" + if [[ $ERROR -gt 0 ]]; then + echo + echo "The following $ERROR tests FAILED" + echo -e "$FAILED_FUNCS" + echo "---" + exit 1 + fi + exit 0 +} diff --git a/tools/bash8.py b/tools/bash8.py deleted file mode 100755 index 3abf87b484..0000000000 --- a/tools/bash8.py +++ /dev/null @@ -1,215 +0,0 @@ -#!/usr/bin/env python -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# bash8 - a pep8 equivalent for bash scripts -# -# this program attempts to be an automated style checker for bash scripts -# to fill the same part of code review that pep8 does in most OpenStack -# projects. It starts from humble beginnings, and will evolve over time. -# -# Currently Supported checks -# -# Errors -# Basic white space errors, for consistent indenting -# - E001: check that lines do not end with trailing whitespace -# - E002: ensure that indents are only spaces, and not hard tabs -# - E003: ensure all indents are a multiple of 4 spaces -# - E004: file did not end with a newline -# -# Structure errors -# -# A set of rules that help keep things consistent in control blocks. -# These are ignored on long lines that have a continuation, because -# unrolling that is kind of "interesting" -# -# - E010: *do* not on the same line as *for* -# - E011: *then* not on the same line as *if* -# - E012: heredoc didn't end before EOF - -import argparse -import fileinput -import re -import sys - -ERRORS = 0 -IGNORE = None - - -def register_ignores(ignores): - global IGNORE - if ignores: - IGNORE = '^(' + '|'.join(ignores.split(',')) + ')' - - -def should_ignore(error): - return IGNORE and re.search(IGNORE, error) - - -def print_error(error, line, - filename=None, filelineno=None): - if not filename: - filename = fileinput.filename() - if not filelineno: - filelineno = fileinput.filelineno() - global ERRORS - ERRORS = ERRORS + 1 - print("%s: '%s'" % (error, line.rstrip('\n'))) - print(" - %s: L%s" % (filename, filelineno)) - - -def not_continuation(line): - return not re.search('\\\\$', line) - - -def check_for_do(line): - if not_continuation(line): - match = re.match('^\s*(for|while|until)\s', line) - if match: - operator = match.group(1).strip() - if not re.search(';\s*do(\b|$)', line): - print_error('E010: Do not on same line as %s' % operator, - line) - - -def check_if_then(line): - if not_continuation(line): - if re.search('^\s*if \[', line): - if not re.search(';\s*then(\b|$)', line): - print_error('E011: Then non on same line as if', line) - - -def check_no_trailing_whitespace(line): - if re.search('[ \t]+$', line): - print_error('E001: Trailing Whitespace', line) - - -def check_indents(line): - m = re.search('^(?P[ \t]+)', line) - if m: - if re.search('\t', m.group('indent')): - print_error('E002: Tab indents', line) - if (len(m.group('indent')) % 4) != 0: - print_error('E003: Indent not multiple of 4', line) - -def check_function_decl(line): - failed = False - if line.startswith("function"): - if not re.search('^function [\w-]* \{$', line): - failed = True - else: - # catch the case without "function", e.g. - # things like '^foo() {' - if re.search('^\s*?\(\)\s*?\{', line): - failed = True - - if failed: - print_error('E020: Function declaration not in format ' - ' "^function name {$"', line) - - -def starts_multiline(line): - m = re.search("[^<]<<\s*(?P\w+)", line) - if m: - return m.group('token') - else: - return False - - -def end_of_multiline(line, token): - if token: - return re.search("^%s\s*$" % token, line) is not None - return False - - -def check_files(files, verbose): - in_multiline = False - multiline_start = 0 - multiline_line = "" - logical_line = "" - token = False - prev_file = None - prev_line = "" - prev_lineno = 0 - - for line in fileinput.input(files): - if fileinput.isfirstline(): - # if in_multiline when the new file starts then we didn't - # find the end of a heredoc in the last file. - if in_multiline: - print_error('E012: heredoc did not end before EOF', - multiline_line, - filename=prev_file, filelineno=multiline_start) - in_multiline = False - - # last line of a previous file should always end with a - # newline - if prev_file and not prev_line.endswith('\n'): - print_error('E004: file did not end with a newline', - prev_line, - filename=prev_file, filelineno=prev_lineno) - - prev_file = fileinput.filename() - - if verbose: - print "Running bash8 on %s" % fileinput.filename() - - # NOTE(sdague): multiline processing of heredocs is interesting - if not in_multiline: - logical_line = line - token = starts_multiline(line) - if token: - in_multiline = True - multiline_start = fileinput.filelineno() - multiline_line = line - continue - else: - logical_line = logical_line + line - if not end_of_multiline(line, token): - continue - else: - in_multiline = False - - check_no_trailing_whitespace(logical_line) - check_indents(logical_line) - check_for_do(logical_line) - check_if_then(logical_line) - check_function_decl(logical_line) - - prev_line = logical_line - prev_lineno = fileinput.filelineno() - -def get_options(): - parser = argparse.ArgumentParser( - description='A bash script style checker') - parser.add_argument('files', metavar='file', nargs='+', - help='files to scan for errors') - parser.add_argument('-i', '--ignore', help='Rules to ignore') - parser.add_argument('-v', '--verbose', action='store_true', default=False) - return parser.parse_args() - - -def main(): - opts = get_options() - register_ignores(opts.ignore) - check_files(opts.files, opts.verbose) - - if ERRORS > 0: - print("%d bash8 error(s) found" % ERRORS) - return 1 - else: - return 0 - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/tools/build_bm.sh b/tools/build_bm.sh deleted file mode 100755 index ab0ba0ef8a..0000000000 --- a/tools/build_bm.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env bash - -# **build_bm.sh** - -# Build an OpenStack install on a bare metal machine. -set +x - -# Keep track of the current directory -TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $TOOLS_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Source params -source ./stackrc - -# Param string to pass to stack.sh. Like "EC2_DMZ_HOST=192.168.1.1 MYSQL_USER=nova" -STACKSH_PARAMS=${STACKSH_PARAMS:-} - -# Option to use the version of devstack on which we are currently working -USE_CURRENT_DEVSTACK=${USE_CURRENT_DEVSTACK:-1} - -# Configure the runner -RUN_SH=`mktemp` -cat > $RUN_SH < docs/$f.html -done -for f in $(find functions lib samples -type f -name \*); do - echo $f - FILES+="$f " - mkdir -p docs/`dirname $f`; - $SHOCCO $f > docs/$f.html -done -echo "$FILES" >docs-files - -# Switch to the gh_pages repo -cd docs - -# Collect the new generated pages -find . -name \*.html -print0 | xargs -0 git add - -# Push our changes back up to the docs branch -if ! git diff-index HEAD --quiet; then - git commit -a -m "Update script docs" - if [[ -n $PUSH ]]; then - git push - fi -fi - -# Clean up or report the temp workspace -if [[ -n REPO && -n $PUSH_REPO ]]; then - rm -rf $TMP_ROOT -else - if [[ -z "$TMP_ROOT" ]]; then - TMP_ROOT="$(pwd)" - fi - echo "Built docs in $TMP_ROOT" -fi diff --git a/tools/build_pxe_env.sh b/tools/build_pxe_env.sh deleted file mode 100755 index 50d91d063c..0000000000 --- a/tools/build_pxe_env.sh +++ /dev/null @@ -1,120 +0,0 @@ -#!/bin/bash -e - -# **build_pxe_env.sh** - -# Create a PXE boot environment -# -# build_pxe_env.sh destdir -# -# Requires Ubuntu Oneiric -# -# Only needs to run as root if the destdir permissions require it - -dpkg -l syslinux || apt-get install -y syslinux - -DEST_DIR=${1:-/tmp}/tftpboot -PXEDIR=${PXEDIR:-/opt/ramstack/pxe} -PROGDIR=`dirname $0` - -# Clean up any resources that may be in use -function cleanup { - set +o errexit - - # Mop up temporary files - if [ -n "$MNTDIR" -a -d "$MNTDIR" ]; then - umount $MNTDIR - rmdir $MNTDIR - fi - - # Kill ourselves to signal any calling process - trap 2; kill -2 $$ -} - -trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT EXIT - -# Keep track of the current directory -TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=`cd $TOOLS_DIR/..; pwd` - -mkdir -p $DEST_DIR/pxelinux.cfg -cd $DEST_DIR -for i in memdisk menu.c32 pxelinux.0; do - cp -pu /usr/lib/syslinux/$i $DEST_DIR -done - -CFG=$DEST_DIR/pxelinux.cfg/default -cat >$CFG <$PXEDIR/stack-initrd.gz -fi -cp -pu $PXEDIR/stack-initrd.gz $DEST_DIR/ubuntu - -if [ ! -r $PXEDIR/vmlinuz-*-generic ]; then - MNTDIR=`mktemp -d --tmpdir mntXXXXXXXX` - mount -t ext4 -o loop $PXEDIR/stack-initrd.img $MNTDIR - - if [ ! -r $MNTDIR/boot/vmlinuz-*-generic ]; then - echo "No kernel found" - umount $MNTDIR - rmdir $MNTDIR - exit 1 - else - cp -pu $MNTDIR/boot/vmlinuz-*-generic $PXEDIR - fi - umount $MNTDIR - rmdir $MNTDIR -fi - -# Get generic kernel version -KNAME=`basename $PXEDIR/vmlinuz-*-generic` -KVER=${KNAME#vmlinuz-} -cp -pu $PXEDIR/vmlinuz-$KVER $DEST_DIR/ubuntu -cat >>$CFG <>$CFG <>$CFG <> $MNTDIR/etc/sudoers - - umount $MNTDIR - rmdir $MNTDIR - qemu-nbd -d $NBD - NBD="" - mv $DEV_FILE_TMP $DEV_FILE -fi -rm -f $DEV_FILE_TMP - - -# Clone git repositories onto the system -# ====================================== - -IMG_FILE_TMP=`mktemp $IMG_FILE.XXXXXX` - -if [ ! -r $IMG_FILE ]; then - NBD=`map_nbd $DEV_FILE` - - # Pre-create the image file - # FIXME(dt): This should really get the partition size to - # pre-create the image file - dd if=/dev/zero of=$IMG_FILE_TMP bs=1 count=1 seek=$((2*1024*1024*1024)) - # Create filesystem image for RAM disk - dd if=${NBD}p1 of=$IMG_FILE_TMP bs=1M - - qemu-nbd -d $NBD - NBD="" - mv $IMG_FILE_TMP $IMG_FILE -fi -rm -f $IMG_FILE_TMP - -MNTDIR=`mktemp -d --tmpdir mntXXXXXXXX` -mount -t ext4 -o loop $IMG_FILE $MNTDIR -cp -p /etc/resolv.conf $MNTDIR/etc/resolv.conf - -# We need to install a non-virtual kernel and modules to boot from -if [ ! -r "`ls $MNTDIR/boot/vmlinuz-*-generic | head -1`" ]; then - chroot $MNTDIR apt-get install -y linux-generic -fi - -git_clone $NOVA_REPO $DEST/nova $NOVA_BRANCH -git_clone $GLANCE_REPO $DEST/glance $GLANCE_BRANCH -git_clone $KEYSTONE_REPO $DEST/keystone $KEYSTONE_BRANCH -git_clone $NOVNC_REPO $DEST/novnc $NOVNC_BRANCH -git_clone $HORIZON_REPO $DEST/horizon $HORIZON_BRANCH -git_clone $NOVACLIENT_REPO $DEST/python-novaclient $NOVACLIENT_BRANCH -git_clone $OPENSTACKX_REPO $DEST/openstackx $OPENSTACKX_BRANCH - -# Use this version of devstack -rm -rf $MNTDIR/$DEST/devstack -cp -pr $CWD $MNTDIR/$DEST/devstack -chroot $MNTDIR chown -R $STACK_USER $DEST/devstack - -# Configure host network for DHCP -mkdir -p $MNTDIR/etc/network -cat > $MNTDIR/etc/network/interfaces <$MNTDIR/etc/hostname -echo "127.0.0.1 localhost ramstack" >$MNTDIR/etc/hosts - -# Configure the runner -RUN_SH=$MNTDIR/$DEST/run.sh -cat > $RUN_SH < $DEST/run.sh.log -echo >> $DEST/run.sh.log -echo >> $DEST/run.sh.log -echo "All done! Time to start clicking." >> $DEST/run.sh.log -EOF - -# Make the run.sh executable -chmod 755 $RUN_SH -chroot $MNTDIR chown $STACK_USER $DEST/run.sh - -umount $MNTDIR -rmdir $MNTDIR diff --git a/tools/build_uec_ramdisk.sh b/tools/build_uec_ramdisk.sh deleted file mode 100755 index 5f3acc5684..0000000000 --- a/tools/build_uec_ramdisk.sh +++ /dev/null @@ -1,180 +0,0 @@ -#!/usr/bin/env bash - -# **build_uec_ramdisk.sh** - -# Build RAM disk images based on UEC image - -# Exit on error to stop unexpected errors -set -o errexit - -if [ ! "$#" -eq "1" ]; then - echo "$0 builds a gziped Ubuntu OpenStack install" - echo "usage: $0 dest" - exit 1 -fi - -# Make sure that we have the proper version of ubuntu (only works on oneiric) -if ! egrep -q "oneiric" /etc/lsb-release; then - echo "This script only works with ubuntu oneiric." - exit 1 -fi - -# Clean up resources that may be in use -function cleanup { - set +o errexit - - if [ -n "$MNT_DIR" ]; then - umount $MNT_DIR/dev - umount $MNT_DIR - fi - - if [ -n "$DEST_FILE_TMP" ]; then - rm $DEST_FILE_TMP - fi - - # Kill ourselves to signal parents - trap 2; kill -2 $$ -} - -trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT EXIT - -# Output dest image -DEST_FILE=$1 - -# Keep track of the current directory -TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $TOOLS_DIR/..; pwd) - -# Import common functions -. $TOP_DIR/functions - -cd $TOP_DIR - -# Source params -source ./stackrc - -DEST=${DEST:-/opt/stack} - -# Ubuntu distro to install -DIST_NAME=${DIST_NAME:-oneiric} - -# Configure how large the VM should be -GUEST_SIZE=${GUEST_SIZE:-2G} - -# Exit on error to stop unexpected errors -set -o errexit -set -o xtrace - -# Abort if localrc is not set -if [ ! -e $TOP_DIR/localrc ]; then - echo "You must have a localrc with ALL necessary passwords defined before proceeding." - echo "See stack.sh for required passwords." - exit 1 -fi - -# Install deps if needed -DEPS="kvm libvirt-bin kpartx cloud-utils curl" -apt_get install -y --force-yes $DEPS - -# Where to store files and instances -CACHEDIR=${CACHEDIR:-/opt/stack/cache} -WORK_DIR=${WORK_DIR:-/opt/ramstack} - -# Where to store images -image_dir=$WORK_DIR/images/$DIST_NAME -mkdir -p $image_dir - -# Get the base image if it does not yet exist -if [ ! -e $image_dir/disk ]; then - $TOOLS_DIR/get_uec_image.sh -r 2000M $DIST_NAME $image_dir/disk -fi - -# Configure the root password of the vm to be the same as ``ADMIN_PASSWORD`` -ROOT_PASSWORD=${ADMIN_PASSWORD:-password} - -# Name of our instance, used by libvirt -GUEST_NAME=${GUEST_NAME:-devstack} - -# Pre-load the image with basic environment -if [ ! -e $image_dir/disk-primed ]; then - cp $image_dir/disk $image_dir/disk-primed - $TOOLS_DIR/warm_apts_for_uec.sh $image_dir/disk-primed - $TOOLS_DIR/copy_dev_environment_to_uec.sh $image_dir/disk-primed -fi - -# Back to devstack -cd $TOP_DIR - -DEST_FILE_TMP=`mktemp $DEST_FILE.XXXXXX` -MNT_DIR=`mktemp -d --tmpdir mntXXXXXXXX` -cp $image_dir/disk-primed $DEST_FILE_TMP -mount -t ext4 -o loop $DEST_FILE_TMP $MNT_DIR -mount -o bind /dev /$MNT_DIR/dev -cp -p /etc/resolv.conf $MNT_DIR/etc/resolv.conf -echo root:$ROOT_PASSWORD | chroot $MNT_DIR chpasswd -touch $MNT_DIR/$DEST/.ramdisk - -# We need to install a non-virtual kernel and modules to boot from -if [ ! -r "`ls $MNT_DIR/boot/vmlinuz-*-generic | head -1`" ]; then - chroot $MNT_DIR apt-get install -y linux-generic -fi - -git_clone $NOVA_REPO $DEST/nova $NOVA_BRANCH -git_clone $GLANCE_REPO $DEST/glance $GLANCE_BRANCH -git_clone $KEYSTONE_REPO $DEST/keystone $KEYSTONE_BRANCH -git_clone $NOVNC_REPO $DEST/novnc $NOVNC_BRANCH -git_clone $HORIZON_REPO $DEST/horizon $HORIZON_BRANCH -git_clone $NOVACLIENT_REPO $DEST/python-novaclient $NOVACLIENT_BRANCH -git_clone $OPENSTACKX_REPO $DEST/openstackx $OPENSTACKX_BRANCH -git_clone $TEMPEST_REPO $DEST/tempest $TEMPEST_BRANCH - -# Use this version of devstack -rm -rf $MNT_DIR/$DEST/devstack -cp -pr $TOP_DIR $MNT_DIR/$DEST/devstack -chroot $MNT_DIR chown -R stack $DEST/devstack - -# Configure host network for DHCP -mkdir -p $MNT_DIR/etc/network -cat > $MNT_DIR/etc/network/interfaces <$MNT_DIR/etc/hostname -echo "127.0.0.1 localhost ramstack" >$MNT_DIR/etc/hosts - -# Configure the runner -RUN_SH=$MNT_DIR/$DEST/run.sh -cat > $RUN_SH < $DEST/run.sh.log -echo >> $DEST/run.sh.log -echo >> $DEST/run.sh.log -echo "All done! Time to start clicking." >> $DEST/run.sh.log -EOF - -# Make the run.sh executable -chmod 755 $RUN_SH -chroot $MNT_DIR chown stack $DEST/run.sh - -umount $MNT_DIR/dev -umount $MNT_DIR -rmdir $MNT_DIR -mv $DEST_FILE_TMP $DEST_FILE -rm -f $DEST_FILE_TMP - -trap - SIGHUP SIGINT SIGTERM SIGQUIT EXIT diff --git a/tools/build_usb_boot.sh b/tools/build_usb_boot.sh deleted file mode 100755 index c97e0a143d..0000000000 --- a/tools/build_usb_boot.sh +++ /dev/null @@ -1,148 +0,0 @@ -#!/bin/bash -e - -# **build_usb_boot.sh** - -# Create a syslinux boot environment -# -# build_usb_boot.sh destdev -# -# Assumes syslinux is installed -# Needs to run as root - -DEST_DIR=${1:-/tmp/syslinux-boot} -PXEDIR=${PXEDIR:-/opt/ramstack/pxe} - -# Clean up any resources that may be in use -function cleanup { - set +o errexit - - # Mop up temporary files - if [ -n "$DEST_DEV" ]; then - umount $DEST_DIR - rmdir $DEST_DIR - fi - if [ -n "$MNTDIR" -a -d "$MNTDIR" ]; then - umount $MNTDIR - rmdir $MNTDIR - fi - - # Kill ourselves to signal any calling process - trap 2; kill -2 $$ -} - -trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT EXIT - -# Keep track of the current directory -TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=`cd $TOOLS_DIR/..; pwd` - -if [ -b $DEST_DIR ]; then - # We have a block device, install syslinux and mount it - DEST_DEV=$DEST_DIR - DEST_DIR=`mktemp -d --tmpdir mntXXXXXX` - mount $DEST_DEV $DEST_DIR - - if [ ! -d $DEST_DIR/syslinux ]; then - mkdir -p $DEST_DIR/syslinux - fi - - # Install syslinux on the device - syslinux --install --directory syslinux $DEST_DEV -else - # We have a directory (for sanity checking output) - DEST_DEV="" - if [ ! -d $DEST_DIR/syslinux ]; then - mkdir -p $DEST_DIR/syslinux - fi -fi - -# Get some more stuff from syslinux -for i in memdisk menu.c32; do - cp -pu /usr/lib/syslinux/$i $DEST_DIR/syslinux -done - -CFG=$DEST_DIR/syslinux/syslinux.cfg -cat >$CFG <$PXEDIR/stack-initrd.gz -fi -cp -pu $PXEDIR/stack-initrd.gz $DEST_DIR/ubuntu - -if [ ! -r $PXEDIR/vmlinuz-*-generic ]; then - MNTDIR=`mktemp -d --tmpdir mntXXXXXXXX` - mount -t ext4 -o loop $PXEDIR/stack-initrd.img $MNTDIR - - if [ ! -r $MNTDIR/boot/vmlinuz-*-generic ]; then - echo "No kernel found" - umount $MNTDIR - rmdir $MNTDIR - if [ -n "$DEST_DEV" ]; then - umount $DEST_DIR - rmdir $DEST_DIR - fi - exit 1 - else - cp -pu $MNTDIR/boot/vmlinuz-*-generic $PXEDIR - fi - umount $MNTDIR - rmdir $MNTDIR -fi - -# Get generic kernel version -KNAME=`basename $PXEDIR/vmlinuz-*-generic` -KVER=${KNAME#vmlinuz-} -cp -pu $PXEDIR/vmlinuz-$KVER $DEST_DIR/ubuntu -cat >>$CFG <>$CFG <>$CFG < $STAGING_DIR/etc/sudoers.d/50_stack_sh ) - -# Copy over your ssh keys and env if desired -cp_it ~/.ssh $STAGING_DIR/$DEST/.ssh -cp_it ~/.ssh/id_rsa.pub $STAGING_DIR/$DEST/.ssh/authorized_keys -cp_it ~/.gitconfig $STAGING_DIR/$DEST/.gitconfig -cp_it ~/.vimrc $STAGING_DIR/$DEST/.vimrc -cp_it ~/.bashrc $STAGING_DIR/$DEST/.bashrc - -# Copy devstack -rm -rf $STAGING_DIR/$DEST/devstack -cp_it . $STAGING_DIR/$DEST/devstack - -# Give stack ownership over $DEST so it may do the work needed -chroot $STAGING_DIR chown -R $STACK_USER $DEST - -# Unmount -umount $STAGING_DIR diff --git a/tools/create-stack-user.sh b/tools/create-stack-user.sh index 9c29ecd901..cb8d7aa328 100755 --- a/tools/create-stack-user.sh +++ b/tools/create-stack-user.sh @@ -17,14 +17,14 @@ set -o errexit -# Keep track of the devstack directory +# Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0")/.. && pwd) # Import common functions source $TOP_DIR/functions # Determine what system we are running on. This provides ``os_VENDOR``, -# ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` +# ``os_RELEASE``, ``os_PACKAGE``, ``os_CODENAME`` # and ``DISTRO`` GetDistro @@ -32,7 +32,7 @@ GetDistro source $TOP_DIR/stackrc # Give the non-root user the ability to run as **root** via ``sudo`` -is_package_installed sudo || install_package sudo +is_package_installed sudo || is_package_installed sudo-ldap || install_package sudo [[ -z "$STACK_USER" ]] && die "STACK_USER is not set. Exiting." @@ -44,6 +44,15 @@ fi if ! getent passwd $STACK_USER >/dev/null; then echo "Creating a user called $STACK_USER" useradd -g $STACK_USER -s /bin/bash -d $DEST -m $STACK_USER + # RHEL based distros create home dir with 700 permissions, + # And Ubuntu 21.04+ with 750, i.e missing executable + # permission for either group or others + # Devstack deploy will have issues with this, fix it by + # adding executable permission + if [[ $(stat -c '%A' $DEST|grep -o x|wc -l) -lt 3 ]]; then + echo "Executable permission missing for $DEST, adding it" + chmod +x $DEST + fi fi echo "Giving stack user passwordless sudo privileges" diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index 5c1c329902..f4a4edcbe2 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -7,6 +7,25 @@ # Warning: This script just for development purposes set -o errexit + +# short_source prints out the current location of the caller in a way +# that strips redundant directories. This is useful for PS4 +# usage. Needed before we start tracing due to how we set +# PS4. Normally we'd pick this up from stackrc, but that's not sourced +# here. +function short_source { + saveIFS=$IFS + IFS=" " + called=($(caller 0)) + IFS=$saveIFS + file=${called[2]} + file=${file#$RC_DIR/} + printf "%-40s " "$file:${called[1]}:${called[0]}" +} +# PS4 is exported to child shells and uses the 'short_source' function, so +# export it so child shells have access to the 'short_source' function also. +export -f short_source + set -o xtrace ACCOUNT_DIR=./accrc @@ -16,54 +35,60 @@ cat < -This script creates certificates and sourcable rc files per tenant/user. +This script creates certificates and sourcable rc files per project/user. Target account directory hierarchy: target_dir-| |-cacert.pem - |-tenant1-name| - | |- user1 - | |- user1-cert.pem - | |- user1-pk.pem - | |- user2 - | .. - |-tenant2-name.. + |-project1-name| + | |- user1 + | |- user1-cert.pem + | |- user1-pk.pem + | |- user2 + | .. + |-project2-name.. .. Optional Arguments -P include password to the rc files; with -A it assume all users password is the same -A try with all user -u create files just for the specified user --C create user and tenant, the specifid tenant will be the user's tenant --r when combined with -C and the (-u) user exists it will be the user's tenant role in the (-C)tenant (default: Member) +-C create user and project, the specifid project will be the user's project +-r when combined with -C and the (-u) user exists it will be the user's project role in the (-C)project (default: Member) -p password for the user +--heat-url --os-username --os-password ---os-tenant-name ---os-tenant-id +--os-project-name +--os-project-id +--os-user-domain-id +--os-user-domain-name +--os-project-domain-id +--os-project-domain-name --os-auth-url --os-cacert --target-dir ---skip-tenant +--skip-project --debug Example: $0 -AP -$0 -P -C mytenant -u myuser -p mypass +$0 -P -C myproject -u myuser -p mypass EOF } -if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-name:,os-tenant-id:,os-auth-url:,target-dir:,skip-tenant:,os-cacert:,help,debug -- "$@"); then +if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-id:,os-tenant-name:,os-project-name:,os-project-id:,os-project-domain-id:,os-project-domain-name:,os-user-domain-id:,os-user-domain-name:,os-auth-url:,target-dir:,heat-url:,skip-project:,os-cacert:,help,debug -- "$@"); then display_help exit 1 fi eval set -- $options ADDPASS="" +HEAT_URL="" -# The services users usually in the service tenant. +# The services users usually in the service project. # rc files for service users, is out of scope. -# Supporting different tenant for services is out of scope. -SKIP_TENANT="service" +# Supporting different project for services is out of scope. +SKIP_PROJECT="service" MODE="" ROLE=Member USER_NAME="" @@ -73,18 +98,26 @@ while [ $# -gt 0 ]; do -h|--help) display_help; exit 0 ;; --os-username) export OS_USERNAME=$2; shift ;; --os-password) export OS_PASSWORD=$2; shift ;; - --os-tenant-name) export OS_TENANT_NAME=$2; shift ;; - --os-tenant-id) export OS_TENANT_ID=$2; shift ;; - --skip-tenant) SKIP_TENANT="$SKIP_TENANT$2,"; shift ;; + --os-tenant-name) export OS_PROJECT_NAME=$2; shift ;; + --os-tenant-id) export OS_PROJECT_ID=$2; shift ;; + --os-project-name) export OS_PROJECT_NAME=$2; shift ;; + --os-project-id) export OS_PROJECT_ID=$2; shift ;; + --os-user-domain-id) export OS_USER_DOMAIN_ID=$2; shift ;; + --os-user-domain-name) export OS_USER_DOMAIN_NAME=$2; shift ;; + --os-project-domain-id) export OS_PROJECT_DOMAIN_ID=$2; shift ;; + --os-project-domain-name) export OS_PROJECT_DOMAIN_NAME=$2; shift ;; + --skip-tenant) SKIP_PROJECT="$SKIP_PROJECT$2,"; shift ;; + --skip-project) SKIP_PROJECT="$SKIP_PROJECT$2,"; shift ;; --os-auth-url) export OS_AUTH_URL=$2; shift ;; --os-cacert) export OS_CACERT=$2; shift ;; --target-dir) ACCOUNT_DIR=$2; shift ;; + --heat-url) HEAT_URL=$2; shift ;; --debug) set -o xtrace ;; -u) MODE=${MODE:-one}; USER_NAME=$2; shift ;; -p) USER_PASS=$2; shift ;; -A) MODE=all; ;; -P) ADDPASS="yes" ;; - -C) MODE=create; TENANT=$2; shift ;; + -C) MODE=create; PROJECT=$2; shift ;; -r) ROLE=$2; shift ;; (--) shift; break ;; (-*) echo "$0: error - unrecognized option $1" >&2; display_help; exit 1 ;; @@ -102,8 +135,16 @@ if [ -z "$OS_PASSWORD" ]; then fi fi -if [ -z "$OS_TENANT_NAME" -a -z "$OS_TENANT_ID" ]; then - export OS_TENANT_NAME=admin +if [ -z "$OS_PROJECT_ID" -a "$OS_TENANT_ID" ]; then + export OS_PROJECT_ID=$OS_TENANT_ID +fi + +if [ -z "$OS_PROJECT_NAME" -a "$OS_TENANT_NAME" ]; then + export OS_PROJECT_NAME=$OS_TENANT_NAME +fi + +if [ -z "$OS_PROJECT_NAME" -a -z "$OS_PROJECT_ID" ]; then + export OS_PROJECT_NAME=admin fi if [ -z "$OS_USERNAME" ]; then @@ -111,7 +152,17 @@ if [ -z "$OS_USERNAME" ]; then fi if [ -z "$OS_AUTH_URL" ]; then - export OS_AUTH_URL=http://localhost:5000/v2.0/ + export OS_AUTH_URL=http://localhost:5000/v3/ +fi + +if [ -z "$OS_USER_DOMAIN_ID" -a -z "$OS_USER_DOMAIN_NAME" ]; then + # purposefully not exported as it would force v3 auth within this file. + OS_USER_DOMAIN_ID=default +fi + +if [ -z "$OS_PROJECT_DOMAIN_ID" -a -z "$OS_PROJECT_DOMAIN_NAME" ]; then + # purposefully not exported as it would force v3 auth within this file. + OS_PROJECT_DOMAIN_ID=default fi USER_PASS=${USER_PASS:-$OS_PASSWORD} @@ -124,91 +175,39 @@ if [ -z "$MODE" ]; then exit 3 fi -export -n SERVICE_TOKEN SERVICE_ENDPOINT OS_SERVICE_TOKEN OS_SERVICE_ENDPOINT - -EC2_URL=`openstack endpoint show ec2 | grep " ec2.publicURL " | cut -d " " -f4` -if [[ -z $EC2_URL ]]; then - EC2_URL=http://localhost:8773/service/Cloud -fi - -S3_URL=`openstack endpoint show s3 | grep " s3.publicURL " | cut -d " " -f4` -if [[ -z $S3_URL ]]; then - S3_URL=http://localhost:3333 -fi - -mkdir -p "$ACCOUNT_DIR" -ACCOUNT_DIR=`readlink -f "$ACCOUNT_DIR"` -EUCALYPTUS_CERT=$ACCOUNT_DIR/cacert.pem -if [ -e "$EUCALYPTUS_CERT" ]; then - mv "$EUCALYPTUS_CERT" "$EUCALYPTUS_CERT.old" -fi -if ! nova x509-get-root-cert "$EUCALYPTUS_CERT"; then - echo "Failed to update the root certificate: $EUCALYPTUS_CERT" >&2 - if [ -e "$EUCALYPTUS_CERT.old" ]; then - mv "$EUCALYPTUS_CERT.old" "$EUCALYPTUS_CERT" - fi -fi - - function add_entry { local user_id=$1 local user_name=$2 - local tenant_id=$3 - local tenant_name=$4 + local project_id=$3 + local project_name=$4 local user_passwd=$5 - # The admin user can see all user's secret AWS keys, it does not looks good - local line=`openstack ec2 credentials list --user $user_id | grep " $tenant_id "` - if [ -z "$line" ]; then - openstack ec2 credentials create --user $user_id --project $tenant_id 1>&2 - line=`openstack ec2 credentials list --user $user_id | grep " $tenant_id "` - fi - local ec2_access_key ec2_secret_key - read ec2_access_key ec2_secret_key <<< `echo $line | awk '{print $2 " " $4 }'` - mkdir -p "$ACCOUNT_DIR/$tenant_name" - local rcfile="$ACCOUNT_DIR/$tenant_name/$user_name" - # The certs subject part are the tenant ID "dash" user ID, but the CN should be the first part of the DN - # Generally the subject DN parts should be in reverse order like the Issuer - # The Serial does not seams correctly marked either - local ec2_cert="$rcfile-cert.pem" - local ec2_private_key="$rcfile-pk.pem" - # Try to preserve the original file on fail (best effort) - if [ -e "$ec2_private_key" ]; then - mv -f "$ec2_private_key" "$ec2_private_key.old" - fi - if [ -e "$ec2_cert" ]; then - mv -f "$ec2_cert" "$ec2_cert.old" - fi - # It will not create certs when the password is incorrect - if ! nova --os-password "$user_passwd" --os-username "$user_name" --os-tenant-name "$tenant_name" x509-create-cert "$ec2_private_key" "$ec2_cert"; then - if [ -e "$ec2_private_key.old" ]; then - mv -f "$ec2_private_key.old" "$ec2_private_key" - fi - if [ -e "$ec2_cert.old" ]; then - mv -f "$ec2_cert.old" "$ec2_cert" - fi - fi + mkdir -p "$ACCOUNT_DIR/$project_name" + local rcfile="$ACCOUNT_DIR/$project_name/$user_name" + cat >"$rcfile" <>"$rcfile" fi + if [ -n "$HEAT_URL" ]; then + echo "export HEAT_URL=\"$HEAT_URL/$project_id\"" >>"$rcfile" + echo "export OS_NO_CLIENT_AUTH=True" >>"$rcfile" + fi + for v in OS_USER_DOMAIN_ID OS_USER_DOMAIN_NAME OS_PROJECT_DOMAIN_ID OS_PROJECT_DOMAIN_NAME; do + if [ ${!v} ]; then + echo "export $v=${!v}" >>"$rcfile" + else + echo "unset $v" >>"$rcfile" + fi + done } #admin users expected @@ -238,35 +237,35 @@ function get_user_id { } if [ $MODE != "create" ]; then -# looks like I can't ask for all tenant related to a specified user - openstack project list --long --quote none -f csv | grep ',True' | grep -v "${SKIP_TENANT}" | while IFS=, read tenant_id tenant_name desc enabled; do - openstack user list --project $tenant_id --long --quote none -f csv | grep ',True' | while IFS=, read user_id user_name project email enabled; do + # looks like I can't ask for all project related to a specified user + openstack project list --long --quote none -f csv | grep ',True' | grep -v "${SKIP_PROJECT}" | while IFS=, read project_id project_name desc enabled; do + openstack user list --project $project_id --long --quote none -f csv | grep ',True' | while IFS=, read user_id user_name project email enabled; do if [ $MODE = one -a "$user_name" != "$USER_NAME" ]; then continue; fi # Checks for a specific password defined for an user. - # Example for an username johndoe: - # JOHNDOE_PASSWORD=1234 - eval SPECIFIC_UPASSWORD="\$${USER_NAME^^}_PASSWORD" + # Example for an username johndoe: JOHNDOE_PASSWORD=1234 + # This mechanism is used by lib/swift + eval SPECIFIC_UPASSWORD="\$${user_name}_password" if [ -n "$SPECIFIC_UPASSWORD" ]; then USER_PASS=$SPECIFIC_UPASSWORD fi - add_entry "$user_id" "$user_name" "$tenant_id" "$tenant_name" "$USER_PASS" + add_entry "$user_id" "$user_name" "$project_id" "$project_name" "$USER_PASS" done done else - tenant_name=$TENANT - tenant_id=$(create_or_get_project "$TENANT") + project_name=$PROJECT + project_id=$(create_or_get_project "$PROJECT") user_name=$USER_NAME user_id=`get_user_id $user_name` if [ -z "$user_id" ]; then - eval $(openstack user create "$user_name" --project "$tenant_id" --password "$USER_PASS" --email "$user_name@example.com" -f shell -c id) + eval $(openstack user create "$user_name" --project "$project_id" --password "$USER_PASS" --email "$user_name@example.com" -f shell -c id) user_id=$id - add_entry "$user_id" "$user_name" "$tenant_id" "$tenant_name" "$USER_PASS" + add_entry "$user_id" "$user_name" "$project_id" "$project_name" "$USER_PASS" else role_id=$(create_or_get_role "$ROLE") - openstack role add "$role_id" --user "$user_id" --project "$tenant_id" - add_entry "$user_id" "$user_name" "$tenant_id" "$tenant_name" "$USER_PASS" + openstack role add "$role_id" --user "$user_id" --project "$project_id" + add_entry "$user_id" "$user_name" "$project_id" "$project_name" "$USER_PASS" fi fi diff --git a/tools/dbcounter/dbcounter.py b/tools/dbcounter/dbcounter.py new file mode 100644 index 0000000000..86e5529c97 --- /dev/null +++ b/tools/dbcounter/dbcounter.py @@ -0,0 +1,121 @@ +import json +import logging +import os +import threading +import time +import queue + +import sqlalchemy +from sqlalchemy.engine import CreateEnginePlugin +from sqlalchemy import event + +# https://docs.sqlalchemy.org/en/14/core/connections.html? +# highlight=createengineplugin#sqlalchemy.engine.CreateEnginePlugin + +LOG = logging.getLogger(__name__) + +# The theory of operation here is that we register this plugin with +# sqlalchemy via an entry_point. It gets loaded by virtue of plugin= +# being in the database connection URL, which gives us an opportunity +# to hook the engines that get created. +# +# We opportunistically spawn a thread, which we feed "hits" to over a +# queue, and which occasionally writes those hits to a special +# database called 'stats'. We access that database with the same user, +# pass, and host as the main connection URL for simplicity. + + +class LogCursorEventsPlugin(CreateEnginePlugin): + def __init__(self, url, kwargs): + self.db_name = url.database + LOG.info('Registered counter for database %s' % self.db_name) + new_url = sqlalchemy.engine.URL.create(url.drivername, + url.username, + url.password, + url.host, + url.port, + 'stats') + + self.engine = sqlalchemy.create_engine(new_url) + self.queue = queue.Queue() + self.thread = None + + def update_url(self, url): + return url.difference_update_query(["dbcounter"]) + + def engine_created(self, engine): + """Hook the engine creation process. + + This is the plug point for the sqlalchemy plugin. Using + plugin=$this in the URL causes this method to be called when + the engine is created, giving us a chance to hook it below. + """ + event.listen(engine, "before_cursor_execute", self._log_event) + + def ensure_writer_thread(self): + self.thread = threading.Thread(target=self.stat_writer, daemon=True) + self.thread.start() + + def _log_event(self, conn, cursor, statement, parameters, context, + executemany): + """Queue a "hit" for this operation to be recorded. + + Attepts to determine the operation by the first word of the + statement, or 'OTHER' if it cannot be determined. + """ + + # Start our thread if not running. If we were forked after the + # engine was created and this plugin was associated, our + # writer thread is gone, so respawn. + if not self.thread or not self.thread.is_alive(): + self.ensure_writer_thread() + + try: + op = statement.strip().split(' ', 1)[0] or 'OTHER' + except Exception: + op = 'OTHER' + + self.queue.put((self.db_name, op)) + + def do_incr(self, db, op, count): + """Increment the counter for (db,op) by count.""" + + query = sqlalchemy.text('INSERT INTO queries (db, op, count) ' + ' VALUES (:db, :op, :count) ' + ' ON DUPLICATE KEY UPDATE count=count+:count') + try: + with self.engine.begin() as conn: + r = conn.execute(query, {'db': db, 'op': op, 'count': count}) + except Exception as e: + LOG.error('Failed to account for access to database %r: %s', + db, e) + + def stat_writer(self): + """Consume messages from the queue and write them in batches. + + This reads "hists" from from a queue fed by _log_event() and + writes (db,op)+=count stats to the database after ten seconds + of no activity to avoid triggering a write for every SELECT + call. Write no less often than every sixty seconds to avoid being + starved by constant activity. + """ + LOG.debug('[%i] Writer thread running' % os.getpid()) + while True: + to_write = {} + last = time.time() + while time.time() - last < 60: + try: + item = self.queue.get(timeout=10) + to_write.setdefault(item, 0) + to_write[item] += 1 + except queue.Empty: + break + + if to_write: + LOG.debug('[%i] Writing DB stats %s' % ( + os.getpid(), + ','.join(['%s:%s=%i' % (db, op, count) + for (db, op), count in to_write.items()]))) + + for (db, op), count in to_write.items(): + self.do_incr(db, op, count) diff --git a/tools/dbcounter/pyproject.toml b/tools/dbcounter/pyproject.toml new file mode 100644 index 0000000000..d74d688997 --- /dev/null +++ b/tools/dbcounter/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["sqlalchemy", "setuptools>=42"] +build-backend = "setuptools.build_meta" \ No newline at end of file diff --git a/tools/dbcounter/setup.cfg b/tools/dbcounter/setup.cfg new file mode 100644 index 0000000000..12300bf619 --- /dev/null +++ b/tools/dbcounter/setup.cfg @@ -0,0 +1,14 @@ +[metadata] +name = dbcounter +author = Dan Smith +author_email = dms@danplanet.com +version = 0.1 +description = A teeny tiny dbcounter plugin for use with devstack +url = http://github.com/openstack/devstack +license = Apache + +[options] +py_modules = dbcounter +entry_points = + [sqlalchemy.plugins] + dbcounter = dbcounter:LogCursorEventsPlugin diff --git a/tools/debug_function.sh b/tools/debug_function.sh new file mode 100755 index 0000000000..68bd85dc61 --- /dev/null +++ b/tools/debug_function.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +# This is a small helper to speed development and debug with devstack. +# It is intended to help you run a single function in a project module +# without having to re-stack. +# +# For example, to run the just start_glance function, do this: +# +# ./tools/debug_function.sh glance start_glance + +if [ ! -f "lib/$1" ]; then + echo "Usage: $0 [project] [function] [function...]" +fi + +source stackrc +source lib/$1 +shift +set -x +while [ "$1" ]; do + echo ==== Running $1 ==== + $1 + echo ==== Done with $1 ==== + shift +done diff --git a/tools/discover_hosts.sh b/tools/discover_hosts.sh new file mode 100755 index 0000000000..4ec6a40511 --- /dev/null +++ b/tools/discover_hosts.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +# **discover_hosts.sh** + +# This is just a very simple script to run the +# "nova-manage cell_v2 discover_hosts" command +# which is needed to discover compute nodes and +# register them with a parent cell in Nova. +# This assumes that /etc/nova/nova.conf exists +# and has the following entries filled in: +# +# [api_database] +# connection = This is the URL to the nova_api database +# +# In other words this should be run on the primary +# (API) node in a multi-node setup. + +if [[ -x $(which nova-manage) ]]; then + nova-manage cell_v2 discover_hosts --verbose +fi diff --git a/tools/dstat.sh b/tools/dstat.sh new file mode 100755 index 0000000000..e6cbb0f21c --- /dev/null +++ b/tools/dstat.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +# **tools/dstat.sh** - Execute instances of DStat to log system load info +# +# Multiple instances of DStat are executed in order to take advantage of +# incompatible features, particularly CSV output and the "top-cpu-adv" and +# "top-io-adv" flags. +# +# Assumes: +# - dstat command is installed + +# Retrieve log directory as argument from calling script. +LOGDIR=$1 + +DSTAT_TOP_OPTS="--top-cpu-adv --top-io-adv --top-mem" +if dstat --version | grep -q 'pcp-dstat' ; then + # dstat is unmaintained, and moving to a plugin of performance + # co-pilot. Fedora 29 for example has rolled this out. It's + # mostly compatible, except for a few options which are not + # implemented (yet?) + DSTAT_TOP_OPTS="" +fi + +# Command line arguments for primary DStat process. +DSTAT_OPTS="-tcmndrylpg ${DSTAT_TOP_OPTS} --swap --tcp" + +# Command-line arguments for secondary background DStat process. +DSTAT_CSV_OPTS="-tcmndrylpg --tcp --output $LOGDIR/dstat-csv.log" + +# Execute and background the secondary dstat process and discard its output. +dstat $DSTAT_CSV_OPTS >& /dev/null & + +# Execute and background the primary dstat process, but keep its output in this +# TTY. +dstat $DSTAT_OPTS & + +# Catch any exit signals, making sure to also terminate any child processes. +trap "kill -- -$$" EXIT + +# Keep this script running as long as child dstat processes are alive. +wait diff --git a/tools/file_tracker.sh b/tools/file_tracker.sh new file mode 100755 index 0000000000..9c31b30a56 --- /dev/null +++ b/tools/file_tracker.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +set -o errexit + +# time to sleep between checks +SLEEP_TIME=20 + +function tracker { + echo "Number of open files | Number of open files not in use | Maximum number of files allowed to be opened" + while true; do + cat /proc/sys/fs/file-nr + sleep $SLEEP_TIME + done +} + +function usage { + echo "Usage: $0 [-x] [-s N]" 1>&2 + exit 1 +} + +while getopts ":s:x" opt; do + case $opt in + s) + SLEEP_TIME=$OPTARG + ;; + x) + set -o xtrace + ;; + *) + usage + ;; + esac +done + +tracker diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index e6a6a79876..9e2818f2cc 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -5,151 +5,109 @@ # fixup_stuff.sh # # All distro and package specific hacks go in here -# -# - prettytable 0.7.2 permissions are 600 in the package and -# pip 1.4 doesn't fix it (1.3 did) -# -# - httplib2 0.8 permissions are 600 in the package and -# pip 1.4 doesn't fix it (1.3 did) -# -# - RHEL6: -# -# - set selinux not enforcing -# - (re)start messagebus daemon -# - remove distro packages python-crypto and python-lxml -# - pre-install hgtools to work around a bug in RHEL6 distribute -# - install nose 1.1 from EPEL -set -o errexit -set -o xtrace -# Keep track of the current directory -TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $TOOLS_DIR/..; pwd) +# If ``TOP_DIR`` is set we're being sourced rather than running stand-alone +# or in a sub-shell +if [[ -z "$TOP_DIR" ]]; then + set -o errexit + set -o xtrace -# Change dir to top of devstack -cd $TOP_DIR + # Keep track of the current directory + TOOLS_DIR=$(cd $(dirname "$0") && pwd) + TOP_DIR=$(cd $TOOLS_DIR/..; pwd) -# Import common functions -source $TOP_DIR/functions + # Change dir to top of DevStack + cd $TOP_DIR -FILES=$TOP_DIR/files + # Import common functions + source $TOP_DIR/functions + FILES=$TOP_DIR/files +fi # Python Packages # --------------- -# get_package_path python-package # in import notation -function get_package_path { - local package=$1 - echo $(python -c "import os; import $package; print(os.path.split(os.path.realpath($package.__file__))[0])") -} - - -# Pre-install affected packages so we can fix the permissions -# These can go away once we are confident that pip 1.4.1+ is available everywhere - -# Fix prettytable 0.7.2 permissions -# Don't specify --upgrade so we use the existing package if present -pip_install 'prettytable>0.7' -PACKAGE_DIR=$(get_package_path prettytable) -# Only fix version 0.7.2 -dir=$(echo $PACKAGE_DIR/prettytable-0.7.2*) -if [[ -d $dir ]]; then - sudo chmod +r $dir/* -fi - -# Fix httplib2 0.8 permissions -# Don't specify --upgrade so we use the existing package if present -pip_install httplib2 -PACKAGE_DIR=$(get_package_path httplib2) -# Only fix version 0.8 -dir=$(echo $PACKAGE_DIR-0.8*) -if [[ -d $dir ]]; then - sudo chmod +r $dir/* -fi - -# Ubuntu 12.04 -# ------------ - -# We can regularly get kernel crashes on the 12.04 default kernel, so attempt -# to install a new kernel -if [[ ${DISTRO} =~ (precise) ]]; then - # Finally, because we suspect the Precise kernel is problematic, install a new kernel - UPGRADE_KERNEL=$(trueorfalse False $UPGRADE_KERNEL) - if [[ $UPGRADE_KERNEL == "True" ]]; then - if [[ ! `uname -r` =~ (^3\.11) ]]; then - apt_get install linux-generic-lts-saucy - echo "Installing Saucy LTS kernel, please reboot before proceeding" - exit 1 - fi +function fixup_fedora { + if ! is_fedora; then + return fi -fi - - -# RHEL6 -# ----- - -if [[ $DISTRO =~ (rhel6) ]]; then - # Disable selinux to avoid configuring to allow Apache access # to Horizon files (LP#1175444) if selinuxenabled; then + #persit selinux config across reboots + cat << EOF | sudo tee /etc/selinux/config +SELINUX=permissive +SELINUXTYPE=targeted +EOF + # then disable at runtime sudo setenforce 0 fi - # If the ``dbus`` package was installed by DevStack dependencies the - # uuid may not be generated because the service was never started (PR#598200), - # causing Nova to stop later on complaining that ``/var/lib/dbus/machine-id`` - # does not exist. - sudo service messagebus restart - - # The following workarounds break xenserver - if [ "$VIRT_DRIVER" != 'xenserver' ]; then - # An old version of ``python-crypto`` (2.0.1) may be installed on a - # fresh system via Anaconda and the dependency chain - # ``cas`` -> ``python-paramiko`` -> ``python-crypto``. - # ``pip uninstall pycrypto`` will remove the packaged ``.egg-info`` - # file but leave most of the actual library files behind in - # ``/usr/lib64/python2.6/Crypto``. Later ``pip install pycrypto`` - # will install over the packaged files resulting - # in a useless mess of old, rpm-packaged files and pip-installed files. - # Remove the package so that ``pip install python-crypto`` installs - # cleanly. - # Note: other RPM packages may require ``python-crypto`` as well. - # For example, RHEL6 does not install ``python-paramiko packages``. - uninstall_package python-crypto - - # A similar situation occurs with ``python-lxml``, which is required by - # ``ipa-client``, an auditing package we don't care about. The - # build-dependencies needed for ``pip install lxml`` (``gcc``, - # ``libxml2-dev`` and ``libxslt-dev``) are present in - # ``files/rpms/general``. - uninstall_package python-lxml + FORCE_FIREWALLD=$(trueorfalse False FORCE_FIREWALLD) + if [[ $FORCE_FIREWALLD == "False" ]]; then + # On Fedora 20 firewalld interacts badly with libvirt and + # slows things down significantly (this issue was fixed in + # later fedoras). There was also an additional issue with + # firewalld hanging after install of libvirt with polkit [1]. + # firewalld also causes problems with neturon+ipv6 [2] + # + # Note we do the same as the RDO packages and stop & disable, + # rather than remove. This is because other packages might + # have the dependency [3][4]. + # + # [1] https://bugzilla.redhat.com/show_bug.cgi?id=1099031 + # [2] https://bugs.launchpad.net/neutron/+bug/1455303 + # [3] https://github.com/redhat-openstack/openstack-puppet-modules/blob/master/firewall/manifests/linux/redhat.pp + # [4] https://docs.openstack.org/devstack/latest/guides/neutron.html + if is_package_installed firewalld; then + sudo systemctl disable firewalld + # The iptables service files are no longer included by default, + # at least on a baremetal Fedora 21 Server install. + install_package iptables-services + sudo systemctl enable iptables + sudo systemctl stop firewalld + sudo systemctl start iptables + fi fi - # ``setup.py`` contains a ``setup_requires`` package that is supposed - # to be transient. However, RHEL6 distribute has a bug where - # ``setup_requires`` registers entry points that are not cleaned - # out properly after the setup-phase resulting in installation failures - # (bz#924038). Pre-install the problem package so the ``setup_requires`` - # dependency is satisfied and it will not be installed transiently. - # Note we do this before the track-depends in ``stack.sh``. - pip_install hgtools - - - # RHEL6's version of ``python-nose`` is incompatible with Tempest. - # Install nose 1.1 (Tempest-compatible) from EPEL - install_package python-nose1.1 - # Add a symlink for the new nosetests to allow tox for Tempest to - # work unmolested. - sudo ln -sf /usr/bin/nosetests1.1 /usr/local/bin/nosetests - - # workaround for https://code.google.com/p/unittest-ext/issues/detail?id=79 - install_package python-unittest2 patch - pip_install discover - (cd /usr/lib/python2.6/site-packages/; sudo patch <"$FILES/patches/unittest2-discover.patch" || echo 'Assume already applied') - # Make sure the discover.pyc is up to date - sudo rm /usr/lib/python2.6/site-packages/discover.pyc || true - sudo python -c 'import discover' -fi + # Since pip10, pip will refuse to uninstall files from packages + # that were created with distutils (rather than more modern + # setuptools). This is because it technically doesn't have a + # manifest of what to remove. However, in most cases, simply + # overwriting works. So this hacks around those packages that + # have been dragged in by some other system dependency + sudo rm -rf /usr/lib64/python3*/site-packages/PyYAML-*.egg-info + + # After updating setuptools based on the requirements, the files from the + # python3-setuptools RPM are deleted, it breaks some tools such as semanage + # (used in diskimage-builder) that use the -s flag of the python + # interpreter, enforcing the use of the packages from /usr/lib. + # Importing setuptools in a such environment fails. + # Enforce the package re-installation to fix those applications. + if is_package_installed python3-setuptools; then + sudo dnf reinstall -y python3-setuptools + fi +} + +function fixup_ubuntu { + if ! is_ubuntu; then + return + fi + + # Since pip10, pip will refuse to uninstall files from packages + # that were created with distutils (rather than more modern + # setuptools). This is because it technically doesn't have a + # manifest of what to remove. However, in most cases, simply + # overwriting works. So this hacks around those packages that + # have been dragged in by some other system dependency + sudo rm -rf /usr/lib/python3/dist-packages/PyYAML-*.egg-info + sudo rm -rf /usr/lib/python3/dist-packages/pyasn1_modules-*.egg-info + sudo rm -rf /usr/lib/python3/dist-packages/simplejson-*.egg-info +} + +function fixup_all { + fixup_ubuntu + fixup_fedora +} diff --git a/tools/generate-devstack-plugins-list.py b/tools/generate-devstack-plugins-list.py new file mode 100644 index 0000000000..bc28515a26 --- /dev/null +++ b/tools/generate-devstack-plugins-list.py @@ -0,0 +1,86 @@ +#! /usr/bin/env python3 + +# Copyright 2016 Hewlett Packard Enterprise Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# This script is intended to be run as part of a periodic proposal bot +# job in OpenStack infrastructure. +# +# In order to function correctly, the environment in which the +# script runs must have +# * network access to the review.opendev.org Gerrit API +# working directory +# * network access to https://opendev.org/ + +import functools +import logging +import json +import requests + +from requests.adapters import HTTPAdapter +from requests.packages.urllib3.util.retry import Retry + +logging.basicConfig(level=logging.DEBUG) + +url = 'https://review.opendev.org/projects/' + +# This is what a project looks like +''' + "openstack-attic/akanda": { + "id": "openstack-attic%2Fakanda", + "state": "READ_ONLY" + }, +''' + +def is_in_wanted_namespace(proj): + # only interested in openstack or x namespace (e.g. not retired + # stackforge, etc). + # + # openstack/openstack "super-repo" of openstack projects as + # submodules, that can cause gitea to 500 timeout and thus stop + # this script. Skip it. + if proj.startswith('stackforge/') or \ + proj.startswith('stackforge-attic/') or \ + proj == "openstack/openstack": + return False + else: + return True + +# Check if this project has a plugin file +def has_devstack_plugin(session, proj): + # Don't link in the deb packaging repos + if "openstack/deb-" in proj: + return False + r = session.get("https://opendev.org/%s/raw/branch/master/devstack/plugin.sh" % proj) + return r.status_code == 200 + +logging.debug("Getting project list from %s" % url) +r = requests.get(url) +projects = sorted(filter(is_in_wanted_namespace, json.loads(r.text[4:]))) +logging.debug("Found %d projects" % len(projects)) + +s = requests.Session() +# sometimes gitea gives us a 500 error; retry sanely +# https://stackoverflow.com/a/35636367 +# We need to disable raise_on_status because if any repo endup with 500 then +# propose-updates job which run this script will fail. +retries = Retry(total=3, backoff_factor=1, + status_forcelist=[ 500 ], + raise_on_status=False) +s.mount('https://', HTTPAdapter(max_retries=retries)) + +found_plugins = filter(functools.partial(has_devstack_plugin, s), projects) + +for project in found_plugins: + print(project) diff --git a/tools/generate-devstack-plugins-list.sh b/tools/generate-devstack-plugins-list.sh new file mode 100755 index 0000000000..3307943df9 --- /dev/null +++ b/tools/generate-devstack-plugins-list.sh @@ -0,0 +1,91 @@ +#!/bin/bash -ex + +# Copyright 2016 Hewlett Packard Enterprise Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# This script is intended to be run as a periodic proposal bot job +# in OpenStack infrastructure, though you can run it as a one-off. +# +# In order to function correctly, the environment in which the +# script runs must have +# * a writable doc/source directory relative to the current +# working directory +# AND ( ( +# * git +# * all git repos meant to be searched for plugins cloned and +# at the desired level of up-to-datedness +# * the environment variable git_dir pointing to the location +# * of said git repositories +# ) OR ( +# * network access to the review.opendev.org Gerrit API +# working directory +# * network access to https://opendev.org +# )) +# +# If a file named data/devstack-plugins-registry.header or +# data/devstack-plugins-registry.footer is found relative to the +# current working directory, it will be prepended or appended to +# the generated reStructuredText plugins table respectively. + +# Print the title underline for a RST table. Argument is the length +# of the first column, second column is assumed to be "URL" +function title_underline { + local len=$1 + while [[ $len -gt 0 ]]; do + printf "=" + len=$(( len - 1)) + done + printf " ===\n" +} + +( +if [[ -r data/devstack-plugins-registry.header ]]; then + cat data/devstack-plugins-registry.header +fi + +sorted_plugins=$(python3 tools/generate-devstack-plugins-list.py) + +# find the length of the name column & pad +name_col_len=$(echo "${sorted_plugins}" | wc -L) +name_col_len=$(( name_col_len + 2 )) + +# ====================== === +# Plugin Name URL +# ====================== === +# foobar `https://... `__ +# ... + +printf "\n\n" +title_underline ${name_col_len} +printf "%-${name_col_len}s %s\n" "Plugin Name" "URL" +title_underline ${name_col_len} + +for plugin in ${sorted_plugins}; do + giturl="https://opendev.org/${plugin}" + gitlink="https://opendev.org/${plugin}" + printf "%-${name_col_len}s %s\n" "${plugin}" "\`${giturl} <${gitlink}>\`__" +done + +title_underline ${name_col_len} + +printf "\n\n" + +if [[ -r data/devstack-plugins-registry.footer ]]; then + cat data/devstack-plugins-registry.footer +fi +) > doc/source/plugin-registry.rst + +if [[ -n ${1} ]]; then + cp doc/source/plugin-registry.rst ${1}/doc/source/plugin-registry.rst +fi diff --git a/tools/get-stats.py b/tools/get-stats.py new file mode 100755 index 0000000000..b958af61b2 --- /dev/null +++ b/tools/get-stats.py @@ -0,0 +1,220 @@ +#!/usr/bin/python3 + +import argparse +import csv +import datetime +import glob +import itertools +import json +import logging +import os +import re +import socket +import subprocess +import sys + +try: + import psutil +except ImportError: + psutil = None + print('No psutil, process information will not be included', + file=sys.stderr) + +try: + import pymysql +except ImportError: + pymysql = None + print('No pymysql, database information will not be included', + file=sys.stderr) + +LOG = logging.getLogger('perf') + +# https://www.elastic.co/blog/found-crash-elasticsearch#mapping-explosion + + +def tryint(value): + try: + return int(value) + except (ValueError, TypeError): + return value + + +def get_service_stats(service): + stats = {'MemoryCurrent': 0} + output = subprocess.check_output(['/usr/bin/systemctl', 'show', service] + + ['-p%s' % stat for stat in stats]) + for line in output.decode().split('\n'): + if not line: + continue + stat, val = line.split('=') + stats[stat] = tryint(val) + + return stats + + +def get_services_stats(): + services = [os.path.basename(s) for s in + glob.glob('/etc/systemd/system/devstack@*.service')] + \ + ['apache2.service'] + return [dict(service=service, **get_service_stats(service)) + for service in services] + + +def get_process_stats(proc): + cmdline = proc.cmdline() + if 'python' in cmdline[0]: + cmdline = cmdline[1:] + return {'cmd': cmdline[0], + 'pid': proc.pid, + 'args': ' '.join(cmdline[1:]), + 'rss': proc.memory_info().rss} + + +def get_processes_stats(matches): + me = os.getpid() + procs = psutil.process_iter() + + def proc_matches(proc): + return me != proc.pid and any( + re.search(match, ' '.join(proc.cmdline())) + for match in matches) + + return [ + get_process_stats(proc) + for proc in procs + if proc_matches(proc)] + + +def get_db_stats(host, user, passwd): + dbs = [] + try: + db = pymysql.connect(host=host, user=user, password=passwd, + database='stats', + cursorclass=pymysql.cursors.DictCursor) + except pymysql.err.OperationalError as e: + if 'Unknown database' in str(e): + print('No stats database; assuming devstack failed', + file=sys.stderr) + return [] + raise + + with db: + with db.cursor() as cur: + cur.execute('SELECT db,op,count FROM queries') + for row in cur: + dbs.append({k: tryint(v) for k, v in row.items()}) + return dbs + + +def get_http_stats_for_log(logfile): + stats = {} + apache_fields = ('host', 'a', 'b', 'date', 'tz', 'request', 'status', + 'length', 'c', 'agent') + ignore_agents = ('curl', 'uwsgi', 'nova-status') + ignored_services = set() + for line in csv.reader(open(logfile), delimiter=' '): + fields = dict(zip(apache_fields, line)) + if len(fields) != len(apache_fields): + # Not a combined access log, so we can bail completely + return [] + try: + method, url, http = fields['request'].split(' ') + except ValueError: + method = url = http = '' + if 'HTTP' not in http: + # Not a combined access log, so we can bail completely + return [] + + # Tempest's User-Agent is unchanged, but client libraries and + # inter-service API calls use proper strings. So assume + # 'python-urllib' is tempest so we can tell it apart. + if 'python-urllib' in fields['agent'].lower(): + agent = 'tempest' + else: + agent = fields['agent'].split(' ')[0] + if agent.startswith('python-'): + agent = agent.replace('python-', '') + if '/' in agent: + agent = agent.split('/')[0] + + if agent in ignore_agents: + continue + + try: + service, rest = url.strip('/').split('/', 1) + except ValueError: + # Root calls like "GET /identity" + service = url.strip('/') + rest = '' + + if not service.isalpha(): + ignored_services.add(service) + continue + + method_key = '%s-%s' % (agent, method) + try: + length = int(fields['length']) + except ValueError: + LOG.warning('[%s] Failed to parse length %r from line %r' % ( + logfile, fields['length'], line)) + length = 0 + stats.setdefault(service, {'largest': 0}) + stats[service].setdefault(method_key, 0) + stats[service][method_key] += 1 + stats[service]['largest'] = max(stats[service]['largest'], + length) + + if ignored_services: + LOG.warning('Ignored services: %s' % ','.join( + sorted(ignored_services))) + + # Flatten this for ES + return [{'service': service, 'log': os.path.basename(logfile), + **vals} + for service, vals in stats.items()] + + +def get_http_stats(logfiles): + return list(itertools.chain.from_iterable(get_http_stats_for_log(log) + for log in logfiles)) + + +def get_report_info(): + return { + 'timestamp': datetime.datetime.now().isoformat(), + 'hostname': socket.gethostname(), + 'version': 2, + } + + +if __name__ == '__main__': + process_defaults = ['privsep', 'mysqld', 'erlang', 'etcd'] + parser = argparse.ArgumentParser() + parser.add_argument('--db-user', default='root', + help=('MySQL user for collecting stats ' + '(default: "root")')) + parser.add_argument('--db-pass', default=None, + help='MySQL password for db-user') + parser.add_argument('--db-host', default='localhost', + help='MySQL hostname') + parser.add_argument('--apache-log', action='append', default=[], + help='Collect API call stats from this apache log') + parser.add_argument('--process', action='append', + default=process_defaults, + help=('Include process stats for this cmdline regex ' + '(default is %s)' % ','.join(process_defaults))) + args = parser.parse_args() + + logging.basicConfig(level=logging.WARNING) + + data = { + 'services': get_services_stats(), + 'db': pymysql and args.db_pass and get_db_stats(args.db_host, + args.db_user, + args.db_pass) or [], + 'processes': psutil and get_processes_stats(args.process) or [], + 'api': get_http_stats(args.apache_log), + 'report': get_report_info(), + } + + print(json.dumps(data, indent=2)) diff --git a/tools/get_uec_image.sh b/tools/get_uec_image.sh deleted file mode 100755 index 225742c041..0000000000 --- a/tools/get_uec_image.sh +++ /dev/null @@ -1,109 +0,0 @@ -#!/bin/bash - -# **get_uec_image.sh** - -# Download and prepare Ubuntu UEC images - -CACHEDIR=${CACHEDIR:-/opt/stack/cache} -ROOTSIZE=${ROOTSIZE:-2000M} - -# Keep track of the current directory -TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $TOOLS_DIR/..; pwd) - -# Import common functions -. $TOP_DIR/functions - -# Exit on error to stop unexpected errors -set -o errexit -set -o xtrace - -function usage { - echo "Usage: $0 - Download and prepare Ubuntu UEC images" - echo "" - echo "$0 [-r rootsize] release imagefile [kernel]" - echo "" - echo "-r size - root fs size (min 2000MB)" - echo "release - Ubuntu release: lucid - quantal" - echo "imagefile - output image file" - echo "kernel - output kernel" - exit 1 -} - -# Clean up any resources that may be in use -function cleanup { - set +o errexit - - # Mop up temporary files - if [ -n "$IMG_FILE_TMP" -a -e "$IMG_FILE_TMP" ]; then - rm -f $IMG_FILE_TMP - fi - - # Kill ourselves to signal any calling process - trap 2; kill -2 $$ -} - -while getopts hr: c; do - case $c in - h) usage - ;; - r) ROOTSIZE=$OPTARG - ;; - esac -done -shift `expr $OPTIND - 1` - -if [[ ! "$#" -eq "2" && ! "$#" -eq "3" ]]; then - usage -fi - -# Default args -DIST_NAME=$1 -IMG_FILE=$2 -IMG_FILE_TMP=`mktemp $IMG_FILE.XXXXXX` -KERNEL=$3 - -case $DIST_NAME in - saucy) ;; - raring) ;; - quantal) ;; - precise) ;; - *) echo "Unknown release: $DIST_NAME" - usage - ;; -esac - -trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT EXIT - -# Check dependencies -if [ ! -x "`which qemu-img`" -o -z "`dpkg -l | grep cloud-utils`" ]; then - # Missing KVM? - apt_get install qemu-kvm cloud-utils -fi - -# Find resize script -RESIZE=`which resize-part-image || which uec-resize-image` -if [ -z "$RESIZE" ]; then - echo "resize tool from cloud-utils not found" - exit 1 -fi - -# Get the UEC image -UEC_NAME=$DIST_NAME-server-cloudimg-amd64 -if [ ! -d $CACHEDIR/$DIST_NAME ]; then - mkdir -p $CACHEDIR/$DIST_NAME -fi -if [ ! -e $CACHEDIR/$DIST_NAME/$UEC_NAME.tar.gz ]; then - (cd $CACHEDIR/$DIST_NAME && wget -N http://uec-images.ubuntu.com/$DIST_NAME/current/$UEC_NAME.tar.gz) - (cd $CACHEDIR/$DIST_NAME && tar Sxvzf $UEC_NAME.tar.gz) -fi - -$RESIZE $CACHEDIR/$DIST_NAME/$UEC_NAME.img ${ROOTSIZE} $IMG_FILE_TMP -mv $IMG_FILE_TMP $IMG_FILE - -# Copy kernel to destination -if [ -n "$KERNEL" ]; then - cp -p $CACHEDIR/$DIST_NAME/*-vmlinuz-virtual $KERNEL -fi - -trap - SIGHUP SIGINT SIGTERM SIGQUIT EXIT diff --git a/tools/image_list.sh b/tools/image_list.sh index f9a4e2f518..81231be9f3 100755 --- a/tools/image_list.sh +++ b/tools/image_list.sh @@ -1,16 +1,31 @@ #!/bin/bash -# Keep track of the devstack directory +# Print out a list of image and other files to download for caching. +# This is mostly used by the OpenStack infrasturucture during daily +# image builds to save the large images to /opt/cache/files (see [1]) +# +# The two lists of URL's downloaded are the IMAGE_URLS and +# EXTRA_CACHE_URLS, which are setup in stackrc +# +# [1] project-config:nodepool/elements/cache-devstack/extra-data.d/55-cache-devstack-repos + +# Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0")/.. && pwd) +# The following "source" implicitly calls get_default_host_ip() in +# stackrc and will die if the selected default IP happens to lie +# in the default ranges for FIXED_RANGE or FLOATING_RANGE. Since we +# do not really need HOST_IP to be properly set in the remainder of +# this script, just set it to some dummy value and make stackrc happy. +HOST_IP=SKIP source $TOP_DIR/functions # Possible virt drivers, if we have more, add them here. Always keep # dummy in the end position to trigger the fall through case. -DRIVERS="openvz ironic libvirt vsphere xenserver dummy" +DRIVERS="openvz ironic libvirt vsphere dummy" # Extra variables to trigger getting additional images. -ENABLED_SERVICES=h-api +export ENABLED_SERVICES="h-api,tr-api" HEAT_FETCHED_TEST_IMAGE="Fedora-i386-20-20131211.1-sda" PRECACHE_IMAGES=True @@ -25,12 +40,20 @@ for driver in $DRIVERS; do ALL_IMAGES+=$URLS done -# Make a nice list -echo $ALL_IMAGES | tr ',' '\n' | sort | uniq - # Sanity check - ensure we have a minimum number of images num=$(echo $ALL_IMAGES | tr ',' '\n' | sort | uniq | wc -l) -if [[ "$num" -lt 5 ]]; then +if [[ "$num" -lt 4 ]]; then echo "ERROR: We only found $num images in $ALL_IMAGES, which can't be right." exit 1 fi + +# This is extra non-image files that we want pre-cached. This is kept +# in a separate list because devstack loops over the IMAGE_LIST to +# upload files glance and these aren't images. (This was a bit of an +# after-thought which is why the naming around this is very +# image-centric) +URLS=$(source $TOP_DIR/stackrc && echo $EXTRA_CACHE_URLS) +ALL_IMAGES+=$URLS + +# Make a nice combined list +echo $ALL_IMAGES | tr ',' '\n' | sort | uniq diff --git a/tools/info.sh b/tools/info.sh index a8f9544073..282667f9d0 100755 --- a/tools/info.sh +++ b/tools/info.sh @@ -2,19 +2,19 @@ # **info.sh** -# Produce a report on the state of devstack installs +# Produce a report on the state of DevStack installs # # Output fields are separated with '|' chars # Output types are git,localrc,os,pip,pkg: # # git||[] -# localtc|= +# localrc|= # os|= # pip|| # pkg|| function usage { - echo "$0 - Report on the devstack configuration" + echo "$0 - Report on the DevStack configuration" echo "" echo "Usage: $0" exit 1 @@ -52,10 +52,6 @@ GetDistro echo "os|distro=$DISTRO" echo "os|vendor=$os_VENDOR" echo "os|release=$os_RELEASE" -if [ -n "$os_UPDATE" ]; then - echo "os|version=$os_UPDATE" -fi - # Repos # ----- diff --git a/tools/install_openvpn.sh b/tools/install_openvpn.sh deleted file mode 100755 index 9a4f0369d5..0000000000 --- a/tools/install_openvpn.sh +++ /dev/null @@ -1,221 +0,0 @@ -#!/bin/bash - -# **install_openvpn.sh** - -# Install OpenVPN and generate required certificates -# -# install_openvpn.sh --client name -# install_openvpn.sh --server [name] -# -# name is used on the CN of the generated cert, and the filename of -# the configuration, certificate and key files. -# -# --server mode configures the host with a running OpenVPN server instance -# --client mode creates a tarball of a client configuration for this server - -# Get config file -if [ -e localrc ]; then - . localrc -fi -if [ -e vpnrc ]; then - . vpnrc -fi - -# Do some IP manipulation -function cidr2netmask { - set -- $(( 5 - ($1 / 8) )) 255 255 255 255 $(( (255 << (8 - ($1 % 8))) & 255 )) 0 0 0 - if [[ $1 -gt 1 ]]; then - shift $1 - else - shift - fi - echo ${1-0}.${2-0}.${3-0}.${4-0} -} - -FIXED_NET=`echo $FIXED_RANGE | cut -d'/' -f1` -FIXED_CIDR=`echo $FIXED_RANGE | cut -d'/' -f2` -FIXED_MASK=`cidr2netmask $FIXED_CIDR` - -# VPN Config -VPN_SERVER=${VPN_SERVER:-`ifconfig eth0 | awk "/inet addr:/ { print \$2 }" | cut -d: -f2`} # 50.56.12.212 -VPN_PROTO=${VPN_PROTO:-tcp} -VPN_PORT=${VPN_PORT:-6081} -VPN_DEV=${VPN_DEV:-tap0} -VPN_BRIDGE=${VPN_BRIDGE:-br100} -VPN_BRIDGE_IF=${VPN_BRIDGE_IF:-$FLAT_INTERFACE} -VPN_CLIENT_NET=${VPN_CLIENT_NET:-$FIXED_NET} -VPN_CLIENT_MASK=${VPN_CLIENT_MASK:-$FIXED_MASK} -VPN_CLIENT_DHCP="${VPN_CLIENT_DHCP:-net.1 net.254}" - -VPN_DIR=/etc/openvpn -CA_DIR=$VPN_DIR/easy-rsa - -function usage { - echo "$0 - OpenVPN install and certificate generation" - echo "" - echo "$0 --client name" - echo "$0 --server [name]" - echo "" - echo " --server mode configures the host with a running OpenVPN server instance" - echo " --client mode creates a tarball of a client configuration for this server" - exit 1 -} - -if [ -z $1 ]; then - usage -fi - -# Install OpenVPN -VPN_EXEC=`which openvpn` -if [ -z "$VPN_EXEC" -o ! -x "$VPN_EXEC" ]; then - apt-get install -y openvpn bridge-utils -fi -if [ ! -d $CA_DIR ]; then - cp -pR /usr/share/doc/openvpn/examples/easy-rsa/2.0/ $CA_DIR -fi - -# Keep track of the current directory -TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $TOOLS_DIR/.. && pwd) - -WEB_DIR=$TOP_DIR/../vpn -if [[ ! -d $WEB_DIR ]]; then - mkdir -p $WEB_DIR -fi -WEB_DIR=$(cd $TOP_DIR/../vpn && pwd) - -cd $CA_DIR -source ./vars - -# Override the defaults -export KEY_COUNTRY="US" -export KEY_PROVINCE="TX" -export KEY_CITY="SanAntonio" -export KEY_ORG="Cloudbuilders" -export KEY_EMAIL="rcb@lists.rackspace.com" - -if [ ! -r $CA_DIR/keys/dh1024.pem ]; then - # Initialize a new CA - $CA_DIR/clean-all - $CA_DIR/build-dh - $CA_DIR/pkitool --initca - openvpn --genkey --secret $CA_DIR/keys/ta.key ## Build a TLS key -fi - -function do_server { - NAME=$1 - # Generate server certificate - $CA_DIR/pkitool --server $NAME - - (cd $CA_DIR/keys; - cp $NAME.crt $NAME.key ca.crt dh1024.pem ta.key $VPN_DIR - ) - cat >$VPN_DIR/br-up <$VPN_DIR/br-down <$VPN_DIR/$NAME.conf <$TMP_DIR/$HOST.conf <$VPN_DIR/hostname - fi - do_server $NAME - ;; - --clean) $CA_DIR/clean-all - ;; - *) usage -esac diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 1eb9e7a3f5..027693fc0a 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -2,56 +2,50 @@ # **install_pip.sh** -# install_pip.sh [--pip-version ] [--use-get-pip] [--force] -# # Update pip and friends to a known common version # Assumptions: -# - update pip to $INSTALL_PIP_VERSION +# - PYTHON3_VERSION refers to a version already installed set -o errexit -set -o xtrace # Keep track of the current directory TOOLS_DIR=$(cd $(dirname "$0") && pwd) TOP_DIR=`cd $TOOLS_DIR/..; pwd` -# Change dir to top of devstack +# Change dir to top of DevStack cd $TOP_DIR # Import common functions -source $TOP_DIR/functions +source $TOP_DIR/stackrc + +# don't start tracing until after we've sourced the world +set -o xtrace FILES=$TOP_DIR/files -# Handle arguments - -USE_GET_PIP=${USE_GET_PIP:-0} -INSTALL_PIP_VERSION=${INSTALL_PIP_VERSION:-"1.4.1"} -while [[ -n "$1" ]]; do - case $1 in - --force) - FORCE=1 - ;; - --pip-version) - INSTALL_PIP_VERSION="$2" - shift - ;; - --use-get-pip) - USE_GET_PIP=1; - ;; - esac - shift -done - -PIP_GET_PIP_URL=https://raw.github.com/pypa/pip/master/contrib/get-pip.py -PIP_TAR_URL=https://pypi.python.org/packages/source/p/pip/pip-$INSTALL_PIP_VERSION.tar.gz +# The URL from where the get-pip.py file gets downloaded. If a local +# get-pip.py mirror is available, PIP_GET_PIP_URL can be set to that +# mirror in local.conf to avoid download timeouts. +# Example: +# PIP_GET_PIP_URL="http://local-server/get-pip.py" +# +# Note that if get-pip.py already exists in $FILES this script will +# not re-download or check for a new version. For example, this is +# done by openstack-infra diskimage-builder elements as part of image +# preparation [1]. This prevents any network access, which can be +# unreliable in CI situations. +# [1] https://opendev.org/openstack/project-config/src/branch/master/nodepool/elements/cache-devstack/source-repository-pip + +PIP_GET_PIP_URL=${PIP_GET_PIP_URL:-"https://bootstrap.pypa.io/get-pip.py"} GetDistro echo "Distro: $DISTRO" function get_versions { - PIP=$(which pip 2>/dev/null || which pip-python 2>/dev/null || true) + # FIXME(dhellmann): Deal with multiple python versions here? This + # is just used for reporting, so maybe not? + PIP=$(which pip 2>/dev/null || which pip-python 2>/dev/null || which pip3 2>/dev/null || true) if [[ -n $PIP ]]; then PIP_VERSION=$($PIP --version | awk '{ print $2}') echo "pip: $PIP_VERSION" @@ -62,36 +56,94 @@ function get_versions { function install_get_pip { - if [[ ! -r $FILES/get-pip.py ]]; then - (cd $FILES; \ - curl -O $PIP_GET_PIP_URL; \ - ) + _pip_url=$PIP_GET_PIP_URL + _local_pip="$FILES/$(basename $_pip_url)" + + # If get-pip.py isn't python, delete it. This was probably an + # outage on the server. + if [[ -r $_local_pip ]]; then + if ! head -1 $_local_pip | grep -q '#!/usr/bin/env python'; then + echo "WARNING: Corrupt $_local_pip found removing" + rm $_local_pip + fi + fi + + # The OpenStack gate and others put a cached version of get-pip.py + # for this to find, explicitly to avoid download issues. + # + # However, if DevStack *did* download the file, we want to check + # for updates; people can leave their stacks around for a long + # time and in the mean-time pip might get upgraded. + # + # Thus we use curl's "-z" feature to always check the modified + # since and only download if a new version is out -- but only if + # it seems we downloaded the file originally. + if [[ ! -r $_local_pip || -r $_local_pip.downloaded ]]; then + # only test freshness if LOCAL_PIP is actually there, + # otherwise we generate a scary warning. + local timecond="" + if [[ -r $_local_pip ]]; then + timecond="-z $_local_pip" + fi + + curl -f --retry 6 --retry-delay 5 \ + $timecond -o $_local_pip $_pip_url || \ + die $LINENO "Download of get-pip.py failed" + touch $_local_pip.downloaded fi - sudo -E python $FILES/get-pip.py + sudo -H -E python${PYTHON3_VERSION} $_local_pip } -function install_pip_tarball { - if [[ ! -r $FILES/pip-$INSTALL_PIP_VERSION.tar.gz ]]; then - (cd $FILES; \ - curl -O $PIP_TAR_URL; \ - tar xvfz pip-$INSTALL_PIP_VERSION.tar.gz 1>/dev/null) + +function configure_pypi_alternative_url { + PIP_ROOT_FOLDER="$HOME/.pip" + PIP_CONFIG_FILE="$PIP_ROOT_FOLDER/pip.conf" + if [[ ! -d $PIP_ROOT_FOLDER ]]; then + echo "Creating $PIP_ROOT_FOLDER" + mkdir $PIP_ROOT_FOLDER + fi + if [[ ! -f $PIP_CONFIG_FILE ]]; then + echo "Creating $PIP_CONFIG_FILE" + touch $PIP_CONFIG_FILE fi - (cd $FILES/pip-$INSTALL_PIP_VERSION; \ - sudo -E python setup.py install 1>/dev/null) + if ! ini_has_option "$PIP_CONFIG_FILE" "global" "index-url"; then + # It means that the index-url does not exist + iniset "$PIP_CONFIG_FILE" "global" "index-url" "$PYPI_OVERRIDE" + fi + } # Show starting versions get_versions -# Do pip - -# Eradicate any and all system packages -uninstall_package python-pip +if [[ -n $PYPI_ALTERNATIVE_URL ]]; then + configure_pypi_alternative_url +fi -if [[ "$USE_GET_PIP" == "1" ]]; then - install_get_pip +if is_fedora && [[ ${DISTRO} == f* || ${DISTRO} == rhel* ]]; then + # get-pip.py will not install over the python3-pip package in + # Fedora 34 any more. + # https://bugzilla.redhat.com/show_bug.cgi?id=1988935 + # https://github.com/pypa/pip/issues/9904 + # You can still install using get-pip.py if python3-pip is *not* + # installed; this *should* remain separate under /usr/local and not break + # if python3-pip is later installed. + # For general sanity, we just use the packaged pip. It should be + # recent enough anyway. This is included via rpms/general + : # Simply fall through +elif is_ubuntu; then + # pip on Ubuntu 20.04 and higher is new enough, too + # drop setuptools from u-c + sed -i -e '/setuptools/d' $REQUIREMENTS_DIR/upper-constraints.txt else - install_pip_tarball + install_get_pip + + # Note setuptools is part of requirements.txt and we want to make sure + # we obey any versioning as described there. + pip_install_gr setuptools fi +set -x + + get_versions diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh index 9651083cb3..bb470b2927 100755 --- a/tools/install_prereqs.sh +++ b/tools/install_prereqs.sh @@ -8,21 +8,27 @@ # # -f Force an install run now -if [[ -n "$1" && "$1" = "-f" ]]; then - FORCE_PREREQ=1 -fi +FORCE_PREREQ=0 + +while getopts ":f" opt; do + case $opt in + f) + FORCE_PREREQ=1 + ;; + esac +done -# If TOP_DIR is set we're being sourced rather than running stand-alone +# If ``TOP_DIR`` is set we're being sourced rather than running stand-alone # or in a sub-shell if [[ -z "$TOP_DIR" ]]; then - # Keep track of the devstack directory + # Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0")/.. && pwd) # Import common functions source $TOP_DIR/functions # Determine what system we are running on. This provides ``os_VENDOR``, - # ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` + # ``os_RELEASE``, ``os_PACKAGE``, ``os_CODENAME`` # and ``DISTRO`` GetDistro @@ -55,9 +61,11 @@ export_proxy_variables # ================ # Install package requirements -PACKAGES=$(get_packages general $ENABLED_SERVICES) +PACKAGES=$(get_packages general,$ENABLED_SERVICES) +PACKAGES="$PACKAGES $(get_plugin_packages)" + if is_ubuntu && echo $PACKAGES | grep -q dkms ; then - # ensure headers for the running kernel are installed for any DKMS builds + # Ensure headers for the running kernel are installed for any DKMS builds PACKAGES="$PACKAGES linux-headers-$(uname -r)" fi @@ -66,13 +74,13 @@ install_package $PACKAGES if [[ -n "$SYSLOG" && "$SYSLOG" != "False" ]]; then if is_ubuntu || is_fedora; then install_package rsyslog-relp - elif is_suse; then - install_package rsyslog-module-relp else exit_distro_not_supported "rsyslog-relp installation" fi fi +# TODO(clarkb) remove these once we are switched to global venv by default +export PYTHON=$(which python${PYTHON3_VERSION} 2>/dev/null || which python3 2>/dev/null) # Mark end of run # --------------- diff --git a/tools/ironic/scripts/cleanup-nodes b/tools/ironic/scripts/cleanup-nodes deleted file mode 100755 index adeca5cd9e..0000000000 --- a/tools/ironic/scripts/cleanup-nodes +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash - -# **cleanup-nodes** - -# Cleans up baremetal poseur nodes and volumes created during ironic setup -# Assumes calling user has proper libvirt group membership and access. - -set -exu - -LIBVIRT_STORAGE_POOL=${LIBVIRT_STORAGE_POOL:-"default"} -LIBVIRT_CONNECT_URI=${LIBVIRT_CONNECT_URI:-"qemu:///system"} - -VM_COUNT=$1 -NETWORK_BRIDGE=$2 - -export VIRSH_DEFAULT_CONNECT_URI=$LIBVIRT_CONNECT_URI - -for (( idx=0; idx<$VM_COUNT; idx++ )); do - NAME="baremetal${NETWORK_BRIDGE}_${idx}" - VOL_NAME="baremetal${NETWORK_BRIDGE}-${idx}.qcow2" - virsh list | grep -q $NAME && virsh destroy $NAME - virsh list --inactive | grep -q $NAME && virsh undefine $NAME - - if virsh pool-list | grep -q $LIBVIRT_STORAGE_POOL ; then - virsh vol-list $LIBVIRT_STORAGE_POOL | grep -q $VOL_NAME && - virsh vol-delete $VOL_NAME --pool $LIBVIRT_STORAGE_POOL - fi -done diff --git a/tools/ironic/scripts/configure-vm b/tools/ironic/scripts/configure-vm deleted file mode 100755 index 4c42c491c5..0000000000 --- a/tools/ironic/scripts/configure-vm +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/env python - -import argparse -import os.path - -import libvirt - -templatedir = os.path.join(os.path.dirname(os.path.dirname(__file__)), - 'templates') - - -CONSOLE_LOG = """ - - - - - - - - - - - - - - - -""" - - -def main(): - parser = argparse.ArgumentParser( - description="Configure a kvm virtual machine for the seed image.") - parser.add_argument('--name', default='seed', - help='the name to give the machine in libvirt.') - parser.add_argument('--image', - help='Use a custom image file (must be qcow2).') - parser.add_argument('--engine', default='qemu', - help='The virtualization engine to use') - parser.add_argument('--arch', default='i686', - help='The architecture to use') - parser.add_argument('--memory', default='2097152', - help="Maximum memory for the VM in KB.") - parser.add_argument('--cpus', default='1', - help="CPU count for the VM.") - parser.add_argument('--bootdev', default='hd', - help="What boot device to use (hd/network).") - parser.add_argument('--network', default="brbm", - help='The libvirt network name to use') - parser.add_argument('--libvirt-nic-driver', default='e1000', - help='The libvirt network driver to use') - parser.add_argument('--console-log', - help='File to log console') - parser.add_argument('--emulator', default=None, - help='Path to emulator bin for vm template') - args = parser.parse_args() - with file(templatedir + '/vm.xml', 'rb') as f: - source_template = f.read() - params = { - 'name': args.name, - 'imagefile': args.image, - 'engine': args.engine, - 'arch': args.arch, - 'memory': args.memory, - 'cpus': args.cpus, - 'bootdev': args.bootdev, - 'network': args.network, - 'nicdriver': args.libvirt_nic_driver, - 'emulator': args.emulator, - } - - if args.emulator: - params['emulator'] = args.emulator - else: - if os.path.exists("/usr/bin/kvm"): # Debian - params['emulator'] = "/usr/bin/kvm" - elif os.path.exists("/usr/bin/qemu-kvm"): # Redhat - params['emulator'] = "/usr/bin/qemu-kvm" - - if args.console_log: - params['console_log'] = CONSOLE_LOG % {'console_log': args.console_log} - else: - params['console_log'] = '' - libvirt_template = source_template % params - conn = libvirt.open("qemu:///system") - - a = conn.defineXML(libvirt_template) - print ("Created machine %s with UUID %s" % (args.name, a.UUIDString())) - -if __name__ == '__main__': - main() diff --git a/tools/ironic/scripts/create-nodes b/tools/ironic/scripts/create-nodes deleted file mode 100755 index 140bffe46f..0000000000 --- a/tools/ironic/scripts/create-nodes +++ /dev/null @@ -1,84 +0,0 @@ -#!/usr/bin/env bash - -# **create-nodes** - -# Creates baremetal poseur nodes for ironic testing purposes - -set -ex - -# Keep track of the devstack directory -TOP_DIR=$(cd $(dirname "$0")/.. && pwd) - -CPU=$1 -MEM=$(( 1024 * $2 )) -# extra G to allow fuzz for partition table : flavor size and registered size -# need to be different to actual size. -DISK=$(( $3 + 1)) - -case $4 in - i386) ARCH='i686' ;; - amd64) ARCH='x86_64' ;; - *) echo "Unsupported arch $4!" ; exit 1 ;; -esac - -TOTAL=$(($5 - 1)) -BRIDGE=$6 -EMULATOR=$7 -LOGDIR=$8 - -LIBVIRT_NIC_DRIVER=${LIBVIRT_NIC_DRIVER:-"e1000"} -LIBVIRT_STORAGE_POOL=${LIBVIRT_STORAGE_POOL:-"default"} -LIBVIRT_CONNECT_URI=${LIBVIRT_CONNECT_URI:-"qemu:///system"} - -export VIRSH_DEFAULT_CONNECT_URI=$LIBVIRT_CONNECT_URI - -if ! virsh pool-list --all | grep -q $LIBVIRT_STORAGE_POOL; then - virsh pool-define-as --name $LIBVIRT_STORAGE_POOL dir --target /var/lib/libvirt/images >&2 - virsh pool-autostart $LIBVIRT_STORAGE_POOL >&2 - virsh pool-start $LIBVIRT_STORAGE_POOL >&2 -fi - -pool_state=$(virsh pool-info $LIBVIRT_STORAGE_POOL | grep State | awk '{ print $2 }') -if [ "$pool_state" != "running" ] ; then - [ ! -d /var/lib/libvirt/images ] && sudo mkdir /var/lib/libvirt/images - virsh pool-start $LIBVIRT_STORAGE_POOL >&2 -fi - -if [ -n "$LOGDIR" ] ; then - mkdir -p "$LOGDIR" -fi - -PREALLOC= -if [ -f /etc/debian_version ]; then - PREALLOC="--prealloc-metadata" -fi - -DOMS="" -for idx in $(seq 0 $TOTAL) ; do - NAME="baremetal${BRIDGE}_${idx}" - if [ -n "$LOGDIR" ] ; then - VM_LOGGING="--console-log $LOGDIR/${NAME}_console.log" - else - VM_LOGGING="" - fi - DOMS="$DOMS $NAME" - VOL_NAME="baremetal${BRIDGE}-${idx}.qcow2" - (virsh list --all | grep -q $NAME) && continue - - virsh vol-list --pool $LIBVIRT_STORAGE_POOL | grep -q $VOL_NAME && - virsh vol-delete $VOL_NAME --pool $LIBVIRT_STORAGE_POOL >&2 - virsh vol-create-as $LIBVIRT_STORAGE_POOL ${VOL_NAME} ${DISK}G --format qcow2 $PREALLOC >&2 - volume_path=$(virsh vol-path --pool $LIBVIRT_STORAGE_POOL $VOL_NAME) - # Pre-touch the VM to set +C, as it can only be set on empty files. - sudo touch "$volume_path" - sudo chattr +C "$volume_path" || true - $TOP_DIR/scripts/configure-vm \ - --bootdev network --name $NAME --image "$volume_path" \ - --arch $ARCH --cpus $CPU --memory $MEM --libvirt-nic-driver $LIBVIRT_NIC_DRIVER \ - --emulator $EMULATOR --network $BRIDGE $VM_LOGGING >&2 -done - -for dom in $DOMS ; do - # echo mac - virsh dumpxml $dom | grep "mac address" | head -1 | cut -d\' -f2 -done diff --git a/tools/ironic/scripts/setup-network b/tools/ironic/scripts/setup-network deleted file mode 100755 index e326bf8ccd..0000000000 --- a/tools/ironic/scripts/setup-network +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash - -# **setup-network** - -# Setups openvswitch libvirt network suitable for -# running baremetal poseur nodes for ironic testing purposes - -set -exu - -LIBVIRT_CONNECT_URI=${LIBVIRT_CONNECT_URI:-"qemu:///system"} - -# Keep track of the devstack directory -TOP_DIR=$(cd $(dirname "$0")/.. && pwd) -BRIDGE_SUFFIX=${1:-''} -BRIDGE_NAME=brbm$BRIDGE_SUFFIX - -export VIRSH_DEFAULT_CONNECT_URI="$LIBVIRT_CONNECT_URI" - -# Only add bridge if missing -(sudo ovs-vsctl list-br | grep ${BRIDGE_NAME}$) || sudo ovs-vsctl add-br ${BRIDGE_NAME} - -# remove bridge before replacing it. -(virsh net-list | grep "${BRIDGE_NAME} ") && virsh net-destroy ${BRIDGE_NAME} -(virsh net-list --inactive | grep "${BRIDGE_NAME} ") && virsh net-undefine ${BRIDGE_NAME} - -virsh net-define <(sed s/brbm/$BRIDGE_NAME/ $TOP_DIR/templates/brbm.xml) -virsh net-autostart ${BRIDGE_NAME} -virsh net-start ${BRIDGE_NAME} diff --git a/tools/ironic/templates/brbm.xml b/tools/ironic/templates/brbm.xml deleted file mode 100644 index 0769d3f1d0..0000000000 --- a/tools/ironic/templates/brbm.xml +++ /dev/null @@ -1,6 +0,0 @@ - - brbm - - - - diff --git a/tools/ironic/templates/tftpd-xinetd.template b/tools/ironic/templates/tftpd-xinetd.template deleted file mode 100644 index 5f3d03f3bb..0000000000 --- a/tools/ironic/templates/tftpd-xinetd.template +++ /dev/null @@ -1,14 +0,0 @@ -service tftp -{ - protocol = udp - port = 69 - socket_type = dgram - wait = yes - user = root - server = /usr/sbin/in.tftpd - server_args = -v -v -v -v -v --map-file %TFTPBOOT_DIR%/map-file %TFTPBOOT_DIR% - disable = no - # This is a workaround for Fedora, where TFTP will listen only on - # IPv6 endpoint, if IPv4 flag is not used. - flags = IPv4 -} diff --git a/tools/ironic/templates/vm.xml b/tools/ironic/templates/vm.xml deleted file mode 100644 index 4f40334b7d..0000000000 --- a/tools/ironic/templates/vm.xml +++ /dev/null @@ -1,48 +0,0 @@ - - %(name)s - %(memory)s - %(cpus)s - - hvm - - - - - - - - - - destroy - restart - restart - - %(emulator)s - - - - -
- - -
- - - - - -
- - - -