From f993b2353fdf3fc643afa78df9b64af446352220 Mon Sep 17 00:00:00 2001 From: Stef T Date: Thu, 8 Nov 2012 10:46:48 -0500 Subject: [PATCH 001/207] Enable Xen/DevStackDomU to have larger disk * Size of xvda can be specified via xenrc * Fixes bug 1076430 Change-Id: Ia4ffef98b01fa9572e43c46275a132b2b1e5f689 --- tools/xen/scripts/install_ubuntu_template.sh | 2 ++ tools/xen/xenrc | 1 + 2 files changed, 3 insertions(+) diff --git a/tools/xen/scripts/install_ubuntu_template.sh b/tools/xen/scripts/install_ubuntu_template.sh index f67547b0..43b6decd 100755 --- a/tools/xen/scripts/install_ubuntu_template.sh +++ b/tools/xen/scripts/install_ubuntu_template.sh @@ -45,6 +45,7 @@ fi # Clone built-in template to create new template new_uuid=$(xe vm-clone uuid=$builtin_uuid \ new-name-label="$UBUNTU_INST_TEMPLATE_NAME") +disk_size=$(($OSDOMU_VDI_GB * 1024 * 1024 * 1024)) # Some of these settings can be found in example preseed files # however these need to be answered before the netinstall @@ -73,6 +74,7 @@ xe template-param-set uuid=$new_uuid \ PV-args="$pvargs" \ other-config:debian-release="$UBUNTU_INST_RELEASE" \ other-config:default_template=true \ + other-config:disks='' \ other-config:install-arch="$UBUNTU_INST_ARCH" echo "Ubuntu template installed uuid:$new_uuid" diff --git a/tools/xen/xenrc b/tools/xen/xenrc index 0365a25e..1a5a2a93 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -11,6 +11,7 @@ GUEST_NAME=${GUEST_NAME:-DevStackOSDomU} # Size of image VDI_MB=${VDI_MB:-5000} OSDOMU_MEM_MB=1024 +OSDOMU_VDI_GB=8 # VM Password GUEST_PASSWORD=${GUEST_PASSWORD:-secrete} From af5cd77e180bad81aadc51e3f224910a8fe02141 Mon Sep 17 00:00:00 2001 From: Lianhao Lu Date: Mon, 12 Nov 2012 16:36:42 +0800 Subject: [PATCH 002/207] Change the ceilometer's git repository. Changed the ceilometer's git repository from stackforge/ceilometer.git to openstack/ceilometer.git. Change-Id: I8cf1854641fc7df318f42a56ba061c93614728aa --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 5be872ba..a02bdc01 100644 --- a/stackrc +++ b/stackrc @@ -28,7 +28,7 @@ NOVA_ENABLED_APIS=ec2,osapi_compute,metadata GIT_BASE=https://github.com # metering service -CEILOMETER_REPO=https://github.com/stackforge/ceilometer.git +CEILOMETER_REPO=${GIT_BASE}/openstack/ceilometer.git CEILOMETER_BRANCH=master # volume service From 5db5bfa28f48b0524db6d25d340d12c96270ac0e Mon Sep 17 00:00:00 2001 From: Nachi Ueno Date: Mon, 29 Oct 2012 11:25:29 -0700 Subject: [PATCH 003/207] Make exercise.sh with quantum work - added support for quantum-debug command - added ping and ssh method for quantum Change-Id: Iebf8a0e9e2ed2bb56bee6533e69827e6caa2bc82 --- exercises/boot_from_volume.sh | 10 +++++ exercises/euca.sh | 10 +++++ exercises/floating_ips.sh | 35 ++++++++++++------ exercises/quantum-adv-test.sh | 70 ++++++++++++++++++----------------- exercises/volumes.sh | 10 +++++ functions | 30 +++++++++++++-- lib/quantum | 69 ++++++++++++++++++++++++++++++++++ openrc | 4 +- stack.sh | 11 ++++++ 9 files changed, 199 insertions(+), 50 deletions(-) diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index 460b50cf..4c7890bb 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -32,6 +32,12 @@ source $TOP_DIR/functions # Import configuration source $TOP_DIR/openrc +# Import quantum functions if needed +if is_service_enabled quantum; then + source $TOP_DIR/lib/quantum + setup_quantum +fi + # Import exercise configuration source $TOP_DIR/exerciserc @@ -168,6 +174,10 @@ nova floating-ip-delete $FLOATING_IP || \ # Delete a secgroup nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP" +if is_service_enabled quantum; then + teardown_quantum +fi + set +o xtrace echo "*********************************************************************" echo "SUCCESS: End DevStack Exercise: $0" diff --git a/exercises/euca.sh b/exercises/euca.sh index b1214930..29141ec5 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -33,6 +33,12 @@ source $TOP_DIR/functions # Import EC2 configuration source $TOP_DIR/eucarc +# Import quantum functions if needed +if is_service_enabled quantum; then + source $TOP_DIR/lib/quantum + setup_quantum +fi + # Import exercise configuration source $TOP_DIR/exerciserc @@ -169,6 +175,10 @@ fi # Delete group euca-delete-group $SECGROUP || die "Failure deleting security group $SECGROUP" +if is_service_enabled quantum; then + teardown_quantum +fi + set +o xtrace echo "*********************************************************************" echo "SUCCESS: End DevStack Exercise: $0" diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 67878787..ae5691f4 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -31,6 +31,12 @@ source $TOP_DIR/functions # Import configuration source $TOP_DIR/openrc +# Import quantum functions if needed +if is_service_enabled quantum; then + source $TOP_DIR/lib/quantum + setup_quantum +fi + # Import exercise configuration source $TOP_DIR/exerciserc @@ -155,14 +161,16 @@ nova add-floating-ip $VM_UUID $FLOATING_IP || \ # test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT -# Allocate an IP from second floating pool -TEST_FLOATING_IP=`nova floating-ip-create $TEST_FLOATING_POOL | grep $TEST_FLOATING_POOL | get_field 1` -die_if_not_set TEST_FLOATING_IP "Failure creating floating IP in $TEST_FLOATING_POOL" +if ! is_service_enabled quantum; then + # Allocate an IP from second floating pool + TEST_FLOATING_IP=`nova floating-ip-create $TEST_FLOATING_POOL | grep $TEST_FLOATING_POOL | get_field 1` + die_if_not_set TEST_FLOATING_IP "Failure creating floating IP in $TEST_FLOATING_POOL" -# list floating addresses -if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep $TEST_FLOATING_POOL | grep -q $TEST_FLOATING_IP; do sleep 1; done"; then - echo "Floating IP not allocated" - exit 1 + # list floating addresses + if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep $TEST_FLOATING_POOL | grep -q $TEST_FLOATING_IP; do sleep 1; done"; then + echo "Floating IP not allocated" + exit 1 + fi fi # dis-allow icmp traffic (ping) @@ -171,12 +179,13 @@ nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || die "Failure deletin # FIXME (anthony): make xs support security groups if [ "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then # test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds - ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT + ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT Fail fi -# Delete second floating IP -nova floating-ip-delete $TEST_FLOATING_IP || die "Failure deleting floating IP $TEST_FLOATING_IP" - +if ! is_service_enabled quantum; then + # Delete second floating IP + nova floating-ip-delete $TEST_FLOATING_IP || die "Failure deleting floating IP $TEST_FLOATING_IP" +fi # de-allocate the floating ip nova floating-ip-delete $FLOATING_IP || die "Failure deleting floating IP $FLOATING_IP" @@ -193,6 +202,10 @@ fi # Delete a secgroup nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP" +if is_service_enabled quantum; then + teardown_quantum +fi + set +o xtrace echo "*********************************************************************" echo "SUCCESS: End DevStack Exercise: $0" diff --git a/exercises/quantum-adv-test.sh b/exercises/quantum-adv-test.sh index 8f15b634..2ee82ff2 100755 --- a/exercises/quantum-adv-test.sh +++ b/exercises/quantum-adv-test.sh @@ -52,13 +52,17 @@ source $TOP_DIR/functions # Import configuration source $TOP_DIR/openrc -# Import exercise configuration -source $TOP_DIR/exerciserc - # If quantum is not enabled we exit with exitcode 55 which mean # exercise is skipped. is_service_enabled quantum && is_service_enabled q-agt && is_service_enabled q-dhcp || exit 55 +# Import quantum fucntions +source $TOP_DIR/lib/quantum +setup_quantum + +# Import exercise configuration +source $TOP_DIR/exerciserc + #------------------------------------------------------------------------------ # Test settings for quantum #------------------------------------------------------------------------------ @@ -76,14 +80,14 @@ DEMO1_NUM_NET=1 DEMO2_NUM_NET=2 PUBLIC_NET1_CIDR="200.0.0.0/24" -DEMO1_NET1_CIDR="10.1.0.0/24" -DEMO2_NET1_CIDR="10.2.0.0/24" -DEMO2_NET2_CIDR="10.2.1.0/24" +DEMO1_NET1_CIDR="10.10.0.0/24" +DEMO2_NET1_CIDR="10.20.0.0/24" +DEMO2_NET2_CIDR="10.20.1.0/24" PUBLIC_NET1_GATEWAY="200.0.0.1" -DEMO1_NET1_GATEWAY="10.1.0.1" -DEMO2_NET1_GATEWAY="10.2.0.1" -DEMO2_NET2_GATEWAY="10.2.1.1" +DEMO1_NET1_GATEWAY="10.10.0.1" +DEMO2_NET1_GATEWAY="10.20.0.1" +DEMO2_NET2_GATEWAY="10.20.1.1" PUBLIC_NUM_VM=1 DEMO1_NUM_VM=1 @@ -188,7 +192,7 @@ function get_flavor_id { function confirm_server_active { local VM_UUID=$1 - if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova --no_cache show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then + if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then echo "server '$VM_UUID' did not become active!" false fi @@ -232,6 +236,7 @@ function create_tenants { source $TOP_DIR/openrc admin admin add_tenant demo1 demo1 demo1 add_tenant demo2 demo2 demo2 + source $TOP_DIR/openrc demo demo } function delete_tenants_and_users { @@ -241,6 +246,7 @@ function delete_tenants_and_users { remove_user demo2 remove_tenant demo2 echo "removed all tenants" + source $TOP_DIR/openrc demo demo } function create_network { @@ -256,12 +262,8 @@ function create_network { source $TOP_DIR/openrc $TENANT $TENANT local NET_ID=$(quantum net-create --tenant_id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' ) quantum subnet-create --ip_version 4 --tenant_id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR - #T0DO(nati) comment out until l3-agent is merged - #local ROUTER_ID=$($QUANTUM router-create --tenant_id $TENANT_ID $ROUTER_NAME| grep ' id ' | awk '{print $4}' ) - #for NET_NAME in ${NET_NAMES//,/ };do - # SUBNET_ID=`get_subnet_id $NET_NAME` - # $QUANTUM router-interface-create $NAME --subnet_id $SUBNET_ID - #done + quantum-debug probe-create $NET_ID + source $TOP_DIR/openrc demo demo } function create_networks { @@ -285,7 +287,7 @@ function create_vm { done #TODO (nati) Add multi-nic test #TODO (nati) Add public-net test - local VM_UUID=`nova --no_cache boot --flavor $(get_flavor_id m1.tiny) \ + local VM_UUID=`nova boot --flavor $(get_flavor_id m1.tiny) \ --image $(get_image_id) \ $NIC \ $TENANT-server$NUM | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'` @@ -301,32 +303,26 @@ function ping_ip { # Test agent connection. Assumes namespaces are disabled, and # that DHCP is in use, but not L3 local VM_NAME=$1 - IP=`nova --no_cache show $VM_NAME | grep 'network' | awk '{print $5}'` - if ! timeout $BOOT_TIMEOUT sh -c "while ! ping -c1 -w1 $IP; do sleep 1; done"; then - echo "Could not ping $VM_NAME" - false - fi + local NET_NAME=$2 + IP=`nova show $VM_NAME | grep 'network' | awk '{print $5}'` + ping_check $NET_NAME $IP $BOOT_TIMEOUT } function check_vm { local TENANT=$1 local NUM=$2 local VM_NAME="$TENANT-server$NUM" + local NET_NAME=$3 source $TOP_DIR/openrc $TENANT $TENANT - ping_ip $VM_NAME + ping_ip $VM_NAME $NET_NAME # TODO (nati) test ssh connection # TODO (nati) test inter connection between vm - # TODO (nati) test namespace dhcp # TODO (nati) test dhcp host routes # TODO (nati) test multi-nic - # TODO (nati) use test-agent - # TODO (nati) test L3 forwarding - # TODO (nati) test floating ip - # TODO (nati) test security group } function check_vms { - foreach_tenant_vm 'check_vm ${%TENANT%_NAME} %NUM%' + foreach_tenant_vm 'check_vm ${%TENANT%_NAME} %NUM% ${%TENANT%_VM%NUM%_NET}' } function shutdown_vm { @@ -334,12 +330,12 @@ function shutdown_vm { local NUM=$2 source $TOP_DIR/openrc $TENANT $TENANT VM_NAME=${TENANT}-server$NUM - nova --no_cache delete $VM_NAME + nova delete $VM_NAME } function shutdown_vms { foreach_tenant_vm 'shutdown_vm ${%TENANT%_NAME} %NUM%' - if ! timeout $TERMINATE_TIMEOUT sh -c "while nova --no_cache list | grep -q ACTIVE; do sleep 1; done"; then + if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q ACTIVE; do sleep 1; done"; then echo "Some VMs failed to shutdown" false fi @@ -347,17 +343,22 @@ function shutdown_vms { function delete_network { local TENANT=$1 + local NUM=$2 + local NET_NAME="${TENANT}-net$NUM" source $TOP_DIR/openrc admin admin local TENANT_ID=$(get_tenant_id $TENANT) #TODO(nati) comment out until l3-agent merged #for res in port subnet net router;do - for res in port subnet net;do - quantum ${res}-list -F id -F tenant_id | grep $TENANT_ID | awk '{print $2}' | xargs -I % quantum ${res}-delete % + for net_id in `quantum net-list -c id -c name | grep $NET_NAME | awk '{print $2}'`;do + delete_probe $net_id + quantum subnet-list | grep $net_id | awk '{print $2}' | xargs -I% quantum subnet-delete % + quantum net-delete $net_id done + source $TOP_DIR/openrc demo demo } function delete_networks { - foreach_tenant 'delete_network ${%TENANT%_NAME}' + foreach_tenant_net 'delete_network ${%TENANT%_NAME} ${%NUM%}' #TODO(nati) add secuirty group check after it is implemented # source $TOP_DIR/openrc demo1 demo1 # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0 @@ -474,6 +475,7 @@ main() { } +teardown_quantum #------------------------------------------------------------------------------- # Kick off script. #------------------------------------------------------------------------------- diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 1c73786e..8533993d 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -30,6 +30,12 @@ source $TOP_DIR/functions # Import configuration source $TOP_DIR/openrc +# Import quantum functions if needed +if is_service_enabled quantum; then + source $TOP_DIR/lib/quantum + setup_quantum +fi + # Import exercise configuration source $TOP_DIR/exerciserc @@ -206,6 +212,10 @@ fi # Delete a secgroup nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP" +if is_service_enabled quantum; then + teardown_quantum +fi + set +o xtrace echo "*********************************************************************" echo "SUCCESS: End DevStack Exercise: $0" diff --git a/functions b/functions index dbe9d30a..f806b5a3 100644 --- a/functions +++ b/functions @@ -852,7 +852,11 @@ function yum_install() { # ping check # Uses globals ``ENABLED_SERVICES`` function ping_check() { - _ping_check_novanet "$1" $2 $3 + if is_service_enabled quantum; then + _ping_check_quantum "$1" $2 $3 $4 + return + fi + _ping_check_novanet "$1" $2 $3 $4 } # ping check for nova @@ -861,19 +865,39 @@ function _ping_check_novanet() { local from_net=$1 local ip=$2 local boot_timeout=$3 + local expected=${4:-"True"} + local check_command="" MULTI_HOST=`trueorfalse False $MULTI_HOST` if [[ "$MULTI_HOST" = "True" && "$from_net" = "$PRIVATE_NETWORK_NAME" ]]; then sleep $boot_timeout return fi - if ! timeout $boot_timeout sh -c "while ! ping -c1 -w1 $ip; do sleep 1; done"; then - echo "Couldn't ping server" + if [[ "$expected" = "True" ]]; then + check_command="while ! ping -c1 -w1 $ip; do sleep 1; done" + else + check_command="while ping -c1 -w1 $ip; do sleep 1; done" + fi + if ! timeout $boot_timeout sh -c "$check_command"; then + if [[ "$expected" = "True" ]]; then + echo "[Fail] Couldn't ping server" + else + echo "[Fail] Could ping server" + fi exit 1 fi } # ssh check + function ssh_check() { + if is_service_enabled quantum; then + _ssh_check_quantum "$1" $2 $3 $4 $5 + return + fi + _ssh_check_novanet "$1" $2 $3 $4 $5 +} + +function _ssh_check_novanet() { local NET_NAME=$1 local KEY_FILE=$2 local FLOATING_IP=$3 diff --git a/lib/quantum b/lib/quantum index f9e17825..ba98b646 100644 --- a/lib/quantum +++ b/lib/quantum @@ -5,6 +5,8 @@ XTRACE=$(set +o | grep xtrace) set +o xtrace +export QUANTUM_TEST_CONFIG_FILE=${QUANTUM_TEST_CONFIG_FILE:-"/etc/quantum/debug.ini"} + # Configures keystone integration for quantum service and agents function quantum_setup_keystone() { local conf_file=$1 @@ -57,5 +59,72 @@ function is_quantum_ovs_base_plugin() { return 1 } +function _get_net_id() { + quantum --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD net-list | grep $1 | awk '{print $2}' +} + +function _get_probe_cmd_prefix() { + local from_net="$1" + net_id=`_get_net_id $from_net` + probe_id=`quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}' | head -n 1` + echo "sudo ip netns exec qprobe-$probe_id" +} + +function delete_probe() { + local from_net="$1" + net_id=`_get_net_id $from_net` + probe_id=`quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'` + quantum-debug --os-tenant-name admin --os-username admin probe-delete $probe_id +} + +function _ping_check_quantum() { + local from_net=$1 + local ip=$2 + local timeout_sec=$3 + local expected=${4:-"True"} + local check_command="" + probe_cmd=`_get_probe_cmd_prefix $from_net` + if [[ "$expected" = "True" ]]; then + check_command="while ! $probe_cmd ping -c1 -w1 $ip; do sleep 1; done" + else + check_command="while $probe_cmd ping -c1 -w1 $ip; do sleep 1; done" + fi + if ! timeout $timeout_sec sh -c "$check_command"; then + if [[ "$expected" = "True" ]]; then + echo "[Fail] Couldn't ping server" + else + echo "[Fail] Could ping server" + fi + exit 1 + fi +} + +# ssh check +function _ssh_check_quantum() { + local from_net=$1 + local key_file=$2 + local ip=$3 + local user=$4 + local timeout_sec=$5 + local probe_cmd = "" + probe_cmd=`_get_probe_cmd_prefix $from_net` + if ! timeout $timeout_sec sh -c "while ! $probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success ; do sleep 1; done"; then + echo "server didn't become ssh-able!" + exit 1 + fi +} + +function setup_quantum() { + public_net_id=`_get_net_id $PUBLIC_NETWORK_NAME` + quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create $public_net_id + private_net_id=`_get_net_id $PRIVATE_NETWORK_NAME` + quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create $private_net_id +} + +function teardown_quantum() { + delete_probe $PUBLIC_NETWORK_NAME + delete_probe $PRIVATE_NETWORK_NAME +} + # Restore xtrace $XTRACE diff --git a/openrc b/openrc index 0a6a2150..4b6b9b2b 100644 --- a/openrc +++ b/openrc @@ -73,5 +73,5 @@ export COMPUTE_API_VERSION=${COMPUTE_API_VERSION:-$NOVA_VERSION} # export KEYSTONECLIENT_DEBUG=1 # export NOVACLIENT_DEBUG=1 -# set qunatum debug command -export TEST_CONFIG_FILE=/etc/quantum/debug.ini +# set quantum debug command +export QUANTUM_TEST_CONFIG_FILE=${QUANTUM_TEST_CONFIG_FILE:-"/etc/quantum/debug.ini"} diff --git a/stack.sh b/stack.sh index 59b21670..d15d7e7d 100755 --- a/stack.sh +++ b/stack.sh @@ -341,6 +341,8 @@ Q_USE_NAMESPACE=${Q_USE_NAMESPACE:-True} Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP=:-True} # Meta data IP Q_META_DATA_IP=${Q_META_DATA_IP:-$HOST_IP} +# Use quantum-debug command +Q_USE_DEBUG_COMMAND=${Q_USE_DEBUG_COMMAND:-False} RYU_DIR=$DEST/ryu # Ryu API Host @@ -1503,6 +1505,15 @@ if is_service_enabled quantum; then iniset $Q_CONF_FILE DEFAULT rabbit_host $RABBIT_HOST iniset $Q_CONF_FILE DEFAULT rabbit_password $RABBIT_PASSWORD fi + if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then + Q_DEBUG_CONF_FILE=/etc/quantum/debug.ini + cp $QUANTUM_DIR/etc/l3_agent.ini $Q_DEBUG_CONF_FILE + iniset $Q_L3_CONF_FILE DEFAULT verbose False + iniset $Q_L3_CONF_FILE DEFAULT debug False + iniset $Q_L3_CONF_FILE DEFAULT metadata_ip $Q_META_DATA_IP + iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE + iniset $Q_L3_CONF_FILE DEFAULT root_helper "sudo" + fi fi # Nova From c6d54c10dff89c515e567b49247c7f13eaf4c2e8 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 13 Nov 2012 15:08:26 -0500 Subject: [PATCH 004/207] install memcached for swift via files when swift was enabled we were installing memcached via stack.sh, after marking it optional in files. Just use files instead. Change-Id: Ib8ee2d1f47254e805f4747b8aff6e89baa66913c --- files/apts/swift | 2 +- files/rpms/swift | 2 +- stack.sh | 5 ----- 3 files changed, 2 insertions(+), 7 deletions(-) diff --git a/files/apts/swift b/files/apts/swift index f2983778..c52c68b7 100644 --- a/files/apts/swift +++ b/files/apts/swift @@ -1,6 +1,6 @@ curl gcc -memcached # NOPRIME +memcached python-configobj python-coverage python-dev diff --git a/files/rpms/swift b/files/rpms/swift index c9d49e92..ce41ceb8 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -1,6 +1,6 @@ curl gcc -memcached # NOPRIME +memcached python-configobj python-coverage python-devel diff --git a/stack.sh b/stack.sh index 084f276d..8947382d 100755 --- a/stack.sh +++ b/stack.sh @@ -784,11 +784,6 @@ if is_service_enabled q-agt; then fi fi -if is_service_enabled swift; then - # Install memcached for swift. - install_package memcached -fi - TRACK_DEPENDS=${TRACK_DEPENDS:-False} # Install python packages into a virtualenv so that we can track them From da339829472ffcdc3044f79b76b6cd03608db191 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Wed, 14 Nov 2012 12:45:10 +0000 Subject: [PATCH 005/207] Remove hardwired ansolabs urls Fixes bug 1078618. The files are no longer available on ansolabs' servers. The files were put on github, and this change modifies the location used in devstack. Change-Id: I1f512ad3b52d6b04d0e28ce6a532e11bfede1462 --- stack.sh | 2 +- tools/xen/prepare_guest_template.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index 40eab36e..0a9e4d47 100755 --- a/stack.sh +++ b/stack.sh @@ -2003,7 +2003,7 @@ if is_service_enabled g-reg; then # Option to upload legacy ami-tty, which works with xenserver if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then - IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}http://images.ansolabs.com/tty.tgz" + IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}https://github.com/downloads/citrix-openstack/warehouse/tty.tgz" fi for image_url in ${IMAGE_URLS//,/ }; do diff --git a/tools/xen/prepare_guest_template.sh b/tools/xen/prepare_guest_template.sh index baf9c3a2..19bd2f84 100755 --- a/tools/xen/prepare_guest_template.sh +++ b/tools/xen/prepare_guest_template.sh @@ -60,7 +60,7 @@ if [ -e "$ISO_DIR" ]; then rm -rf $TMP_DIR else echo "WARNING: no XenServer tools found, falling back to 5.6 tools" - TOOLS_URL="http://images.ansolabs.com/xen/xe-guest-utilities_5.6.100-651_amd64.deb" + TOOLS_URL="https://github.com/downloads/citrix-openstack/warehouse/xe-guest-utilities_5.6.100-651_amd64.deb" wget $TOOLS_URL -O $XS_TOOLS_FILE_NAME cp $XS_TOOLS_FILE_NAME "${STAGING_DIR}${XS_TOOLS_PATH}" rm -rf $XS_TOOLS_FILE_NAME From 14246ac16b1c7ba02c7ca40c416ac50a44bc9af4 Mon Sep 17 00:00:00 2001 From: Eoghan Glynn Date: Wed, 14 Nov 2012 16:23:04 +0000 Subject: [PATCH 006/207] Provide credentials via ceilometer config file Fixes bug 1076831 Previously we passed these credentials to the ceilometer central agent via the OS_* environment variables. Since these credentials are now needed by the compute agent also, and have already leaked into the config file to enable the keystone auth token middleware, we now switch over to using the config file only and drop the environment variable usage. Change-Id: I0298d711905a99aa5355fe034bb0e51e53b3be21 --- lib/ceilometer | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/lib/ceilometer b/lib/ceilometer index 2b014b05..aa1b3960 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -66,6 +66,13 @@ function configure_ceilometer() { cp $CEILOMETER_DIR/etc/ceilometer/policy.json $CEILOMETER_CONF_DIR iniset $CEILOMETER_CONF DEFAULT policy_file $CEILOMETER_CONF_DIR/policy.json + # the compute and central agents need these credentials in order to + # call out to the public nova and glance APIs + iniset $CEILOMETER_CONF DEFAULT os_username ceilometer + iniset $CEILOMETER_CONF DEFAULT os_password $SERVICE_PASSWORD + iniset $CEILOMETER_CONF DEFAULT os_tenant_name $SERVICE_TENANT_NAME + iniset $CEILOMETER_CONF DEFAULT os_auth_url $OS_AUTH_URL + iniset $CEILOMETER_CONF keystone_authtoken auth_protocol http iniset $CEILOMETER_CONF keystone_authtoken admin_user ceilometer iniset $CEILOMETER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD @@ -82,7 +89,7 @@ function install_ceilometer() { # start_ceilometer() - Start running processes, including screen function start_ceilometer() { screen_it ceilometer-acompute "cd $CEILOMETER_DIR && sg libvirtd \"$CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_CONF\"" - screen_it ceilometer-acentral "export OS_USERNAME=ceilometer OS_PASSWORD=$SERVICE_PASSWORD OS_TENANT_NAME=$SERVICE_TENANT_NAME OS_AUTH_URL=$OS_AUTH_URL && cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-agent-central --config-file $CEILOMETER_CONF" + screen_it ceilometer-acentral "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-agent-central --config-file $CEILOMETER_CONF" screen_it ceilometer-collector "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-collector --config-file $CEILOMETER_CONF" screen_it ceilometer-api "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" } From 6fd2811726c098e0311bc22c84c5da0d6aa89c62 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Tue, 13 Nov 2012 16:55:41 -0800 Subject: [PATCH 007/207] Remove support for nova-volume * nova-volume has been removed in Grizzly * part of delete-nova-volume Change-Id: Iba91d69950767823d77aaaa93243b0f476dbb04d --- exercises/boot_from_volume.sh | 4 +- exercises/euca.sh | 2 +- exercises/volumes.sh | 6 +- files/keystone_data.sh | 16 ----- functions | 8 +-- lib/n-vol | 126 ---------------------------------- lib/nova | 9 --- stack.sh | 16 +---- stackrc | 4 -- unstack.sh | 11 +-- 10 files changed, 12 insertions(+), 190 deletions(-) delete mode 100644 lib/n-vol diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index b06c8ddb..4562ac0b 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -35,9 +35,9 @@ source $TOP_DIR/openrc # Import exercise configuration source $TOP_DIR/exerciserc -# If cinder or n-vol are not enabled we exit with exitcode 55 so that +# If cinder is not enabled we exit with exitcode 55 so that # the exercise is skipped -is_service_enabled cinder n-vol || exit 55 +is_service_enabled cinder || exit 55 # Boot this image, use first AMI image if unset DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} diff --git a/exercises/euca.sh b/exercises/euca.sh index b1214930..5480b76f 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -73,7 +73,7 @@ fi # Volumes # ------- -if [[ "$ENABLED_SERVICES" =~ "n-vol" || "$ENABLED_SERVICES" =~ "c-vol" ]]; then +if [[ "$ENABLED_SERVICES" =~ "c-vol" ]]; then VOLUME_ZONE=`euca-describe-availability-zones | head -n1 | cut -f2` die_if_not_set VOLUME_ZONE "Failure to find zone for volume" diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 72c8729e..68927393 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -9,7 +9,7 @@ echo "Begin DevStack Exercise: $0" echo "*********************************************************************" # This script exits on an error so that errors don't compound and you see -# only the first error that occured. +# only the first error that occurred. set -o errexit # Print the commands being run so that we can see the command that triggers @@ -33,9 +33,9 @@ source $TOP_DIR/openrc # Import exercise configuration source $TOP_DIR/exerciserc -# If cinder or n-vol are not enabled we exit with exitcode 55 which mean +# If cinder is not enabled we exit with exitcode 55 which mean # exercise is skipped. -is_service_enabled cinder n-vol || exit 55 +is_service_enabled cinder || exit 55 # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 3da11bf0..71994a81 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -140,22 +140,6 @@ if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then --role_id $RESELLER_ROLE fi -# Volume -if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - VOLUME_SERVICE=$(get_id keystone service-create \ - --name=volume \ - --type=volume \ - --description="Volume Service") - keystone endpoint-create \ - --region RegionOne \ - --service_id $VOLUME_SERVICE \ - --publicurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" \ - --adminurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" \ - --internalurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" - fi -fi - # Heat if [[ "$ENABLED_SERVICES" =~ "heat" ]]; then HEAT_USER=$(get_id keystone user-create --name=heat \ diff --git a/functions b/functions index c7f65dbd..90b3b784 100644 --- a/functions +++ b/functions @@ -462,7 +462,7 @@ function _cleanup_service_list () { # ``ENABLED_SERVICES`` list, if they are not already present. # # For example: -# enable_service n-vol +# enable_service qpid # # This function does not know about the special cases # for nova, glance, and quantum built into is_service_enabled(). @@ -484,7 +484,7 @@ function enable_service() { # ``ENABLED_SERVICES`` list, if they are present. # # For example: -# disable_service n-vol +# disable_service rabbit # # This function does not know about the special cases # for nova, glance, and quantum built into is_service_enabled(). @@ -513,8 +513,8 @@ function disable_all_services() { # Remove all services starting with '-'. For example, to install all default -# services except nova-volume (n-vol) set in ``localrc``: -# ENABLED_SERVICES+=",-n-vol" +# services except rabbit (rabbit) set in ``localrc``: +# ENABLED_SERVICES+=",-rabbit" # Uses global ``ENABLED_SERVICES`` # disable_negated_services function disable_negated_services() { diff --git a/lib/n-vol b/lib/n-vol deleted file mode 100644 index db53582b..00000000 --- a/lib/n-vol +++ /dev/null @@ -1,126 +0,0 @@ -# lib/n-vol -# Install and start Nova volume service - -# Dependencies: -# - functions -# - DATA_DIR must be defined -# - KEYSTONE_AUTH_* must be defined -# - NOVA_DIR, NOVA_BIN_DIR, NOVA_STATE_PATH must be defined -# SERVICE_{TENANT_NAME|PASSWORD} must be defined -# _configure_tgt_for_config_d() from lib/cinder - -# stack.sh -# --------- -# install_nvol -# configure_nvol -# init_nvol -# start_nvol -# stop_nvol -# cleanup_nvol - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# Name of the LVM volume group to use/create for iscsi volumes -VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} -VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} - - -# cleanup_nvol() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_nvol() { - # kill instances (nova) - # delete image files (glance) - # This function intentionally left blank - : -} - -# configure_nvol() - Set config files, create data dirs, etc -function configure_nvol() { - # sudo python setup.py deploy - # iniset $XXX_CONF ... - # This function intentionally left blank - : -} - -# init_nvol() - Initialize databases, etc. -function init_nvol() { - # Configure a default volume group called '`stack-volumes`' for the volume - # service if it does not yet exist. If you don't wish to use a file backed - # volume group, create your own volume group called ``stack-volumes`` before - # invoking ``stack.sh``. - # - # By default, the backing file is 5G in size, and is stored in ``/opt/stack/data``. - - if ! sudo vgs $VOLUME_GROUP; then - VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DATA_DIR/${VOLUME_GROUP}-backing-file} - # Only create if the file doesn't already exists - [[ -f $VOLUME_BACKING_FILE ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE - DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE` - # Only create if the loopback device doesn't contain $VOLUME_GROUP - if ! sudo vgs $VOLUME_GROUP; then sudo vgcreate $VOLUME_GROUP $DEV; fi - fi - - mkdir -p $NOVA_STATE_PATH/volumes - - if sudo vgs $VOLUME_GROUP; then - if [[ "$os_PACKAGE" = "rpm" ]]; then - # RPM doesn't start the service - start_service tgtd - fi - - # Remove nova iscsi targets - sudo tgtadm --op show --mode target | grep $VOLUME_NAME_PREFIX | grep Target | cut -f3 -d ' ' | sudo xargs -n1 tgt-admin --delete || true - # Clean out existing volumes - for lv in `sudo lvs --noheadings -o lv_name $VOLUME_GROUP`; do - # ``VOLUME_NAME_PREFIX`` prefixes the LVs we want - if [[ "${lv#$VOLUME_NAME_PREFIX}" != "$lv" ]]; then - sudo lvremove -f $VOLUME_GROUP/$lv - fi - done - fi -} - -# install_nvol() - Collect source and prepare -function install_nvol() { - # git clone xxx - # Install is handled when installing Nova - : -} - -# start_nvol() - Start running processes, including screen -function start_nvol() { - # Setup the tgt configuration file - if [[ ! -f /etc/tgt/conf.d/nova.conf ]]; then - _configure_tgt_for_config_d - sudo mkdir -p /etc/tgt/conf.d - echo "include $NOVA_STATE_PATH/volumes/*" | sudo tee /etc/tgt/conf.d/nova.conf - fi - - if [[ "$os_PACKAGE" = "deb" ]]; then - # tgt in oneiric doesn't restart properly if tgtd isn't running - # do it in two steps - sudo stop tgt || true - sudo start tgt - else - restart_service tgtd - fi - - screen_it n-vol "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-volume" -} - -# stop_nvol() - Stop running processes -function stop_nvol() { - # Kill the nova volume screen window - screen -S $SCREEN_NAME -p n-vol -X kill - - stop_service tgt -} - -# Restore xtrace -$XTRACE diff --git a/lib/nova b/lib/nova index 2c1413d3..fbb5a012 100644 --- a/lib/nova +++ b/lib/nova @@ -312,15 +312,6 @@ function create_nova_conf() { if is_service_enabled n-api; then add_nova_opt "enabled_apis=$NOVA_ENABLED_APIS" fi - if is_service_enabled n-vol; then - NOVA_ENABLED_APIS="${NOVA_ENABLED_APIS},osapi_volume" - iniset $NOVA_CONF DEFAULT enabled_apis $NOVA_ENABLED_APIS - add_nova_opt "volume_api_class=nova.volume.api.API" - add_nova_opt "volume_group=$VOLUME_GROUP" - add_nova_opt "volume_name_template=${VOLUME_NAME_PREFIX}%s" - # oneiric no longer supports ietadm - add_nova_opt "iscsi_helper=tgtadm" - fi if is_service_enabled cinder; then add_nova_opt "volume_api_class=nova.volume.cinder.API" fi diff --git a/stack.sh b/stack.sh index 40eab36e..75e0244b 100755 --- a/stack.sh +++ b/stack.sh @@ -93,7 +93,7 @@ DEST=${DEST:-/opt/stack} # ============ # Remove services which were negated in ENABLED_SERVICES -# using the "-" prefix (e.g., "-n-vol") instead of +# using the "-" prefix (e.g., "-rabbit") instead of # calling disable_service(). disable_negated_services @@ -154,12 +154,6 @@ elif [ "$rpc_backend_cnt" == 0 ]; then fi unset rpc_backend_cnt -# Make sure we only have one volume service enabled. -if is_service_enabled cinder && is_service_enabled n-vol; then - echo "ERROR: n-vol and cinder must not be enabled at the same time" - exit 1 -fi - # Set up logging level VERBOSE=$(trueorfalse True $VERBOSE) @@ -310,7 +304,6 @@ source $TOP_DIR/lib/keystone source $TOP_DIR/lib/glance source $TOP_DIR/lib/nova source $TOP_DIR/lib/cinder -source $TOP_DIR/lib/n-vol source $TOP_DIR/lib/ceilometer source $TOP_DIR/lib/heat source $TOP_DIR/lib/quantum @@ -1749,9 +1742,6 @@ fi if is_service_enabled cinder; then echo_summary "Configuring Cinder" init_cinder -elif is_service_enabled n-vol; then - echo_summary "Configuring Nova volumes" - init_nvol fi if is_service_enabled nova; then @@ -1951,10 +1941,6 @@ if is_service_enabled nova; then echo_summary "Starting Nova" start_nova fi -if is_service_enabled n-vol; then - echo_summary "Starting Nova volumes" - start_nvol -fi if is_service_enabled cinder; then echo_summary "Starting Cinder" start_cinder diff --git a/stackrc b/stackrc index a02bdc01..56897798 100644 --- a/stackrc +++ b/stackrc @@ -11,10 +11,6 @@ DEST=/opt/stack # ``disable_service`` functions in ``localrc``. # For example, to enable Swift add this to ``localrc``: # enable_service swift -# -# And to disable Cinder and use Nova Volumes instead: -# disable_service c-api c-sch c-vol cinder -# enable_service n-vol ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,mysql,rabbit # Set the default Nova APIs to enable diff --git a/unstack.sh b/unstack.sh index 6b34aa3a..1a2cad83 100755 --- a/unstack.sh +++ b/unstack.sh @@ -26,7 +26,6 @@ DATA_DIR=${DATA_DIR:-${DEST}/data} # Get project function libraries source $TOP_DIR/lib/cinder -source $TOP_DIR/lib/n-vol # Determine what system we are running on. This provides ``os_VENDOR``, # ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` @@ -58,11 +57,7 @@ fi SCSI_PERSIST_DIR=$CINDER_STATE_PATH/volumes/* # Get the iSCSI volumes -if is_service_enabled cinder n-vol; then - if is_service_enabled n-vol; then - SCSI_PERSIST_DIR=$NOVA_STATE_PATH/volumes/* - fi - +if is_service_enabled cinder; then TARGETS=$(sudo tgtadm --op show --mode target) if [ $? -ne 0 ]; then # If tgt driver isn't running this won't work obviously @@ -88,10 +83,6 @@ if is_service_enabled cinder n-vol; then sudo rm -rf $CINDER_STATE_PATH/volumes/* fi - if is_service_enabled n-vol; then - sudo rm -rf $NOVA_STATE_PATH/volumes/* - fi - if [[ "$os_PACKAGE" = "deb" ]]; then stop_service tgt else From 203edc569bec0fe845ab1d64388c53aaedc256f0 Mon Sep 17 00:00:00 2001 From: jiajun xu Date: Thu, 15 Nov 2012 10:45:44 +0800 Subject: [PATCH 008/207] Fix the parameter expansion issue in configure_tempest.sh We need follow the syntax like ${parameter:-word} for OS_PASSWORD set. Change-Id: I44f630007b578779658ddcd68417a778b242ed4d --- tools/configure_tempest.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh index b48680c9..6493822e 100755 --- a/tools/configure_tempest.sh +++ b/tools/configure_tempest.sh @@ -128,7 +128,7 @@ IDENTITY_CATALOG_TYPE=identity # OS_USERNAME et all should be defined in openrc. OS_USERNAME=${OS_USERNAME:-demo} OS_TENANT_NAME=${OS_TENANT_NAME:-demo} -OS_PASSWORD=${OS_PASSWORD:$ADMIN_PASSWORD} +OS_PASSWORD=${OS_PASSWORD:-$ADMIN_PASSWORD} # See files/keystone_data.sh where alt_demo user # and tenant are set up... From c1b486a520dd3c2c9596244a0aa899f2e35ec3bf Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 5 Nov 2012 14:26:09 -0600 Subject: [PATCH 009/207] Simplify database selection Do not require every script that sources stackrc to also source lib/databases. * Move use_databases() to functions * Set DATABASE_TYPE in stackrc * Allow setting DATABASE_TYPE in localrc to work (use_database() essentially just sets DATABASE_TYPE at this stage so continuing to use it is equivalent) * Validate DATABASE_TYPE in stack.sh. * Change sudo to postgresql user to go through root to eliminate password prompt * fix use_database error condition Change-Id: Ibb080c76e6cd7c6eebbb641a894d54b1dde78ca6 --- functions | 16 ++++++++++++++++ lib/database | 9 --------- lib/databases/postgresql | 4 ++-- stack.sh | 10 ++++++++-- stackrc | 5 ++++- 5 files changed, 30 insertions(+), 14 deletions(-) diff --git a/functions b/functions index 92c8a5f1..8ab3eefc 100644 --- a/functions +++ b/functions @@ -841,6 +841,22 @@ function upload_image() { fi } +# Set the database backend to use +# When called from stackrc/localrc DATABASE_BACKENDS has not been +# initialized yet, just save the configuration selection and call back later +# to validate it. +# $1 The name of the database backend to use (mysql, postgresql, ...) +function use_database { + if [[ -z "$DATABASE_BACKENDS" ]]; then + # The backends haven't initialized yet, just save the selection for now + DATABASE_TYPE=$1 + return + fi + use_exclusive_service DATABASE_BACKENDS DATABASE_TYPE $1 && return 0 + ret=$? + return $ret +} + # Toggle enable/disable_service for services that must run exclusive of each other # $1 The name of a variable containing a space-separated list of services # $2 The name of a variable in which to store the enabled service's name diff --git a/lib/database b/lib/database index 66fb36fb..07e37aef 100644 --- a/lib/database +++ b/lib/database @@ -62,15 +62,6 @@ function initialize_database_backends { return 0 } -# Set the database backend to use -# $1 The name of the database backend to use (mysql, postgresql, ...) -function use_database { - use_exclusive_service DATABASE_BACKENDS DATABASE_TYPE $1 && return 0 - ret=$? - echo "Invalid database '$1'" - return $ret -} - # Recreate a given database # $1 The name of the database # $2 The character set/encoding of the database diff --git a/lib/databases/postgresql b/lib/databases/postgresql index 81989f2e..ee24c8b5 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -38,8 +38,8 @@ function configure_database_postgresql { start_service postgresql # If creating the role fails, chances are it already existed. Try to alter it. - sudo -u postgres -i psql -c "CREATE ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" || \ - sudo -u postgres -i psql -c "ALTER ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" + sudo -u root sudo -u postgres -i psql -c "CREATE ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" || \ + sudo -u root sudo -u postgres -i psql -c "ALTER ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" } function install_database_postgresql { diff --git a/stack.sh b/stack.sh index f250c6bf..ec10b110 100755 --- a/stack.sh +++ b/stack.sh @@ -29,8 +29,6 @@ source $TOP_DIR/functions # and ``DISTRO`` GetDistro -# Import database library (must be loaded before stackrc which sources localrc) -source $TOP_DIR/lib/database # Settings @@ -92,6 +90,14 @@ DEST=${DEST:-/opt/stack} # Sanity Check # ============ +# Import database configuration +source $TOP_DIR/lib/database + +# Validate database selection +# Since DATABASE_BACKENDS is now set, this also gets ENABLED_SERVICES +# properly configured for the database selection. +use_database $DATABASE_TYPE || echo "Invalid database '$DATABASE_TYPE'" + # Remove services which were negated in ENABLED_SERVICES # using the "-" prefix (e.g., "-rabbit") instead of # calling disable_service(). diff --git a/stackrc b/stackrc index 56897798..01e95561 100644 --- a/stackrc +++ b/stackrc @@ -6,12 +6,15 @@ RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd) # Destination path for installation DEST=/opt/stack +# Select the default database +DATABASE_TYPE=mysql + # Specify which services to launch. These generally correspond to # screen tabs. To change the default list, use the ``enable_service`` and # ``disable_service`` functions in ``localrc``. # For example, to enable Swift add this to ``localrc``: # enable_service swift -ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,mysql,rabbit +ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,$DATABASE_TYPE # Set the default Nova APIs to enable NOVA_ENABLED_APIS=ec2,osapi_compute,metadata From c3fca0814984daaf52a2356c4ed12c495e6bf436 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Thu, 15 Nov 2012 14:14:30 -0500 Subject: [PATCH 010/207] Remove use of nonexistent postgresql-setup. On Ubuntu the default postgresql data directory is not /var/lib/pgsql/data so the check to see if that directory exists is not needed. On Fedora we can assume that the rpm will create it and initialize it properly. So this line can safely removed without any issues. Change-Id: If949f0580eb139f3803b698ee88fceebf958448e --- lib/databases/postgresql | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/databases/postgresql b/lib/databases/postgresql index ee24c8b5..10ab7219 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -28,7 +28,6 @@ function configure_database_postgresql { PG_HBA=$PG_DIR/pg_hba.conf PG_CONF=$PG_DIR/postgresql.conf fi - sudo [ -e /var/lib/pgsql/data ] || sudo postgresql-setup initdb # Listen on all addresses sudo sed -i "/listen_addresses/s/.*/listen_addresses = '*'/" $PG_CONF # Do password auth from all IPv4 clients From 6e3330967c5c7be73a8ffee3779c214768683c56 Mon Sep 17 00:00:00 2001 From: "James E. Blair" Date: Fri, 16 Nov 2012 16:41:26 -0800 Subject: [PATCH 011/207] Remove python-openstackclient. It's not used. Change-Id: I00deaa9ebcd844dd9c3c9d2560d11ad37589d847 --- stack.sh | 5 ----- stackrc | 4 ---- 2 files changed, 9 deletions(-) diff --git a/stack.sh b/stack.sh index ec10b110..8df03953 100755 --- a/stack.sh +++ b/stack.sh @@ -317,7 +317,6 @@ source $TOP_DIR/lib/tempest # Set the destination directories for OpenStack projects HORIZON_DIR=$DEST/horizon -OPENSTACKCLIENT_DIR=$DEST/python-openstackclient NOVNC_DIR=$DEST/noVNC SWIFT_DIR=$DEST/swift SWIFT3_DIR=$DEST/swift3 @@ -813,9 +812,6 @@ install_keystoneclient install_glanceclient install_novaclient -# Check out the client libs that are used most -git_clone $OPENSTACKCLIENT_REPO $OPENSTACKCLIENT_DIR $OPENSTACKCLIENT_BRANCH - # glance, swift middleware and nova api needs keystone middleware if is_service_enabled key g-api n-api swift; then # unified auth system (manages accounts/tokens) @@ -881,7 +877,6 @@ echo_summary "Configuring OpenStack projects" # allowing ``import nova`` or ``import glance.client`` configure_keystoneclient configure_novaclient -setup_develop $OPENSTACKCLIENT_DIR if is_service_enabled key g-api n-api swift; then configure_keystone fi diff --git a/stackrc b/stackrc index 01e95561..e0c69cab 100644 --- a/stackrc +++ b/stackrc @@ -76,10 +76,6 @@ HORIZON_BRANCH=master NOVACLIENT_REPO=${GIT_BASE}/openstack/python-novaclient.git NOVACLIENT_BRANCH=master -# consolidated openstack python client -OPENSTACKCLIENT_REPO=${GIT_BASE}/openstack/python-openstackclient.git -OPENSTACKCLIENT_BRANCH=master - # python keystone client library to nova that horizon uses KEYSTONECLIENT_REPO=${GIT_BASE}/openstack/python-keystoneclient KEYSTONECLIENT_BRANCH=master From 07ccefd6bee75dc6df1d6544b92682f65aa0202f Mon Sep 17 00:00:00 2001 From: Robert Collins Date: Mon, 19 Nov 2012 18:55:33 +1300 Subject: [PATCH 012/207] Default Q_HOST to HOST_IP rather than localhost. This makes it less likely to interact with e.g. http_proxy settings. I filed this as bug 1080561. Change-Id: If97459a28f2d2a77cd322bb3f6024d11fbb8fcd4 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index ec10b110..70c9e040 100755 --- a/stack.sh +++ b/stack.sh @@ -330,7 +330,7 @@ Q_PLUGIN=${Q_PLUGIN:-openvswitch} # Default Quantum Port Q_PORT=${Q_PORT:-9696} # Default Quantum Host -Q_HOST=${Q_HOST:-localhost} +Q_HOST=${Q_HOST:-$HOST_IP} # Which Quantum API nova should use # Default admin username Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-quantum} From 443ac48fdef510835bf2de1ba27f0b6baac8f5b8 Mon Sep 17 00:00:00 2001 From: Robert Collins Date: Mon, 19 Nov 2012 18:59:04 +1300 Subject: [PATCH 013/207] Make it possible to choose a different VIF driver. This is useful when working with baremetal which uses openvswitch quantum plugin, but baremetal-vif vif driver. bug 1080562 Change-Id: I9f94a8f2d7f11fa0771a5304b0aed1d0de5a3db7 --- stack.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index ec10b110..57049d9c 100755 --- a/stack.sh +++ b/stack.sh @@ -1773,11 +1773,11 @@ if is_service_enabled nova; then add_nova_opt "quantum_url=http://$Q_HOST:$Q_PORT" if [[ "$Q_PLUGIN" = "openvswitch" ]]; then - NOVA_VIF_DRIVER="nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver" + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"} elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then - NOVA_VIF_DRIVER="nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver" + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver"} elif [[ "$Q_PLUGIN" = "ryu" ]]; then - NOVA_VIF_DRIVER="quantum.plugins.ryu.nova.vif.LibvirtOpenVswitchOFPRyuDriver" + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"quantum.plugins.ryu.nova.vif.LibvirtOpenVswitchOFPRyuDriver"} add_nova_opt "libvirt_ovs_integration_bridge=$OVS_BRIDGE" add_nova_opt "linuxnet_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT" add_nova_opt "libvirt_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT" From 596b906b63e2f60a185ae969e35f58c6318480e7 Mon Sep 17 00:00:00 2001 From: Jay Pipes Date: Mon, 19 Nov 2012 10:58:50 -0500 Subject: [PATCH 014/207] Adds requisite changes to configure_tempest.sh for EC2/S3 tests * Adds all the BOTO_XXX variables to the configuration file setup that are needed by https://review.openstack.org/#/c/14689/15 Change-Id: I44b2950705807fcfd026f1069fbe0d2727632760 --- tools/configure_tempest.sh | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh index 6493822e..9b543ab0 100755 --- a/tools/configure_tempest.sh +++ b/tools/configure_tempest.sh @@ -209,6 +209,21 @@ VOLUME_CATALOG_TYPE=volume LIVE_MIGRATION_AVAILABLE=${LIVE_MIGRATION_AVAILABLE:-False} USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION=${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} +# EC2 and S3 test configuration +BOTO_EC2_URL="http://$IDENTITY_HOST:8773/services/Cloud" +BOTO_S3_URL="http://$IDENTITY_HOST:3333" +BOTO_AWS_ACCESS="" # Created in tempest... +BOTO_AWS_SECRET="" # Created in tempest... +BOTO_AWS_REGION="RegionOne" +BOTO_S3_MATERIALS_PATH=$DEST/devstack/files/images/s3-materials/cirros-0.3.0 +BOTO_ARI_MANIFEST=cirros-0.3.0-x86_64-initrd.manifest.xml +BOTO_AMI_MANIFEST=cirros-0.3.0-x86_64-blank.img.manifest.xml +BOTO_AKI_MANIFEST=cirros-0.3.0-x86_64-vmlinuz.manifest.xml +BOTO_FLAVOR_NAME=m1.tiny +BOTO_SOCKET_TIMEOUT=5 +BOTO_BUILD_TIMEOUT=${COMPUTE_BUILD_TIMEOUT:-400} +BOTO_BUILD_INTERVAL=${COMPUTE_BUILD_INTERVAL:-3} + sed -e " s,%IDENTITY_USE_SSL%,$IDENTITY_USE_SSL,g; s,%IDENTITY_HOST%,$IDENTITY_HOST,g; @@ -266,6 +281,19 @@ sed -e " s,%VOLUME_BUILD_TIMEOUT%,$VOLUME_BUILD_TIMEOUT,g; s,%LIVE_MIGRATION_AVAILABLE%,$LIVE_MIGRATION_AVAILABLE,g; s,%USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION%,$USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION,g; + s,%BOTO_EC2_URL%,$BOTO_EC2_URL,g; + s,%BOTO_S3_URL%,$BOTO_S3_URL,g; + s,%BOTO_AWS_ACCESS%,$BOTO_AWS_ACCESS,g; + s,%BOTO_AWS_SECRET%,$BOTO_AWS_SECRET,g; + s,%BOTO_AWS_REGION%,$BOTO_AWS_REGION,g; + s,%BOTO_S3_MATERIALS_PATH%,$BOTO_S3_MATERIALS_PATH,g; + s,%BOTO_ARI_MANIFEST%,$BOTO_ARI_MANIFEST,g; + s,%BOTO_AMI_MANIFEST%,$BOTO_AMI_MANIFEST,g; + s,%BOTO_AKI_MANIFEST%,$BOTO_AKI_MANIFEST,g; + s,%BOTO_FLAVOR_NAME%,$BOTO_FLAVOR_NAME,g; + s,%BOTO_SOCKET_TIMEOUT%,$BOTO_SOCKET_TIMEOUT,g; + s,%BOTO_BUILD_TIMEOUT%,$BOTO_BUILD_TIMEOUT,g; + s,%BOTO_BUILD_INTERVAL%,$BOTO_BUILD_INTERVAL,g; " -i $TEMPEST_CONF echo "Created tempest configuration file:" From d57ccf0271e2d416fb0fc73b5ab96f342eae7f28 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 15 Nov 2012 10:09:33 -0800 Subject: [PATCH 015/207] Add nova-conductor service This is a new service for nova that will soon be required for n-cpu to function. Change-Id: I9a2e62f25200a47233a7796084ad8ebabc852c59 --- lib/nova | 3 ++- stackrc | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/nova b/lib/nova index fbb5a012..b5efce96 100644 --- a/lib/nova +++ b/lib/nova @@ -434,12 +434,13 @@ function start_nova() { screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --config-file $NOVA_CONF --web ." screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --config-file $NOVA_CONF" screen_it n-cauth "cd $NOVA_DIR && ./bin/nova-consoleauth" + screen_it n-cond "cd $NOVA_DIR && ./bin/nova-conductor" } # stop_nova() - Stop running processes (non-screen) function stop_nova() { # Kill the nova screen windows - for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth; do + for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-cond; do screen -S $SCREEN_NAME -p $serv -X kill done } diff --git a/stackrc b/stackrc index 01e95561..9588cf99 100644 --- a/stackrc +++ b/stackrc @@ -14,7 +14,7 @@ DATABASE_TYPE=mysql # ``disable_service`` functions in ``localrc``. # For example, to enable Swift add this to ``localrc``: # enable_service swift -ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,$DATABASE_TYPE +ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,$DATABASE_TYPE # Set the default Nova APIs to enable NOVA_ENABLED_APIS=ec2,osapi_compute,metadata From 818a048afc2ae0935f487dec7107237c7fba2f28 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 19 Nov 2012 15:05:31 -0500 Subject: [PATCH 016/207] install nodejs-legacy on quantal quantal changed the name of the node binary on disk, which breaks horizon on 12.10 installs. Provide a work around for installing the legacy package on that environment. Fixes bug #1070083 Change-Id: If8ef211d12451ef4e1df0d2398cf18a3b2c46da3 --- files/apts/horizon | 1 + 1 file changed, 1 insertion(+) diff --git a/files/apts/horizon b/files/apts/horizon index 2161ccd3..2c2faf1a 100644 --- a/files/apts/horizon +++ b/files/apts/horizon @@ -21,4 +21,5 @@ python-coverage python-cherrypy3 # why? python-migrate nodejs +nodejs-legacy # dist:quantal python-netaddr From adfc7a3c0aec56030da5369e3598520ba18b3e9c Mon Sep 17 00:00:00 2001 From: Terry Wilson Date: Tue, 20 Nov 2012 13:08:13 -0500 Subject: [PATCH 017/207] Re-add postgresql-setup initdb for Fedora The Fedora RPM does not set up the postgresql data directory. postgresql-setup initdb must be run after installing the RPM. Change-Id: I5e5ab659e83f4ee6a024f74a23bf4562ea0065ce --- lib/databases/postgresql | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/databases/postgresql b/lib/databases/postgresql index 10ab7219..d9c2f00c 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -23,6 +23,7 @@ function configure_database_postgresql { if [[ "$os_PACKAGE" = "rpm" ]]; then PG_HBA=/var/lib/pgsql/data/pg_hba.conf PG_CONF=/var/lib/pgsql/data/postgresql.conf + sudo [ -e $PG_HBA ] || sudo postgresql-setup initdb else PG_DIR=`find /etc/postgresql -name pg_hba.conf|xargs dirname` PG_HBA=$PG_DIR/pg_hba.conf From b592b29f923b4ea137d8efd4bb5f0a6dde356075 Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Wed, 21 Nov 2012 14:20:12 +1300 Subject: [PATCH 018/207] Configure heat engine server URLs Wait conditions do not work without them. Change-Id: I64ed75e4b84c73678af11182ac951cb1da561428 --- lib/heat | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/heat b/lib/heat index efdcfad3..396c8a05 100644 --- a/lib/heat +++ b/lib/heat @@ -125,6 +125,9 @@ function configure_heat() { iniset $HEAT_ENGINE_CONF DEFAULT use_syslog $SYSLOG iniset $HEAT_ENGINE_CONF DEFAULT bind_host $HEAT_ENGINE_HOST iniset $HEAT_ENGINE_CONF DEFAULT bind_port $HEAT_ENGINE_PORT + iniset $HEAT_ENGINE_CONF DEFAULT heat_metadata_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT + iniset $HEAT_ENGINE_CONF DEFAULT heat_waitcondition_server_url http://$HEAT_METADATA_HOST:$HEAT_METADATA_PORT + iniset $HEAT_ENGINE_CONF DEFAULT heat_watch_server_url http://$HEAT_API_CW_HOST:$HEAT_API_CW_PORT local dburl database_connection_url dburl heat iniset $HEAT_ENGINE_CONF DEFAULT sql_connection $dburl From 766ae34261a33fbf7661e63f30b227dd9177bd71 Mon Sep 17 00:00:00 2001 From: Akihiro MOTOKI Date: Thu, 22 Nov 2012 20:04:02 +0900 Subject: [PATCH 019/207] Creates nova endpoint when n-api is enabled. Fixes bug 1081975 Nova endpoint in keystone was registered if n-cpu is enabled. However it is a usual case where n-cpu runs on a different host in multi-node setup and it results in no endpoint for nova. n-api is a better condition since nova-api and keystone usually run on a same host. Change-Id: Ic097e1c3bd30798d9d3c5fb76023fbdb3ae189d9 --- files/keystone_data.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 71994a81..f75d24a6 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -106,7 +106,7 @@ if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then fi # Nova -if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then +if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then NOVA_USER=$(get_id keystone user-create \ --name=nova \ --pass="$SERVICE_PASSWORD" \ From b562e6a710b34609f95bcc46e2ae50e7812aa103 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 19 Nov 2012 16:00:01 -0500 Subject: [PATCH 020/207] move horizon logic to lib to clean up stack.sh pre holiday refactor extrodinare, get the horizon code over fully into lib/horizon so that all these fixes aren't scattered through stack.sh Change-Id: I7f26c5c6708d5693048eb7b1ce792122adbc7351 --- lib/horizon | 133 ++++++++++++++++++++++++++++++++++++++++++++++++++++ stack.sh | 72 +++------------------------- unstack.sh | 3 +- 3 files changed, 142 insertions(+), 66 deletions(-) create mode 100644 lib/horizon diff --git a/lib/horizon b/lib/horizon new file mode 100644 index 00000000..c6c96dae --- /dev/null +++ b/lib/horizon @@ -0,0 +1,133 @@ +# lib/horizon +# Functions to control the configuration and operation of the horizon service +# + +# Dependencies: +# ``functions`` file +# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined +# + +# ``stack.sh`` calls the entry points in this order: +# +# install_horizon +# configure_horizon +# init_horizon +# start_horizon +# stop_horizon +# cleanup_horizon + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# + +# Set up default directories +HORIZON_DIR=$DEST/horizon + +# Allow overriding the default Apache user and group, default both to +# current user. +APACHE_USER=${APACHE_USER:-$USER} +APACHE_GROUP=${APACHE_GROUP:-$APACHE_USER} + + +# Entry Points +# ------------ + +# cleanup_horizon() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_horizon() { + # kill instances (nova) + # delete image files (glance) + # This function intentionally left blank + : +} + +# configure_horizon() - Set config files, create data dirs, etc +function configure_horizon() { + setup_develop $HORIZON_DIR +} + +# init_horizon() - Initialize databases, etc. +function init_horizon() { + # Remove stale session database. + rm -f $HORIZON_DIR/openstack_dashboard/local/dashboard_openstack.sqlite3 + + # ``local_settings.py`` is used to override horizon default settings. + local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py + cp $FILES/horizon_settings.py $local_settings + + # Initialize the horizon database (it stores sessions and notices shown to + # users). The user system is external (keystone). + cd $HORIZON_DIR + python manage.py syncdb --noinput + cd $TOP_DIR + + # Create an empty directory that apache uses as docroot + sudo mkdir -p $HORIZON_DIR/.blackhole + + + if [[ "$os_PACKAGE" = "deb" ]]; then + APACHE_NAME=apache2 + APACHE_CONF=sites-available/horizon + # Clean up the old config name + sudo rm -f /etc/apache2/sites-enabled/000-default + # Be a good citizen and use the distro tools here + sudo touch /etc/$APACHE_NAME/$APACHE_CONF + sudo a2ensite horizon + else + # Install httpd, which is NOPRIME'd + APACHE_NAME=httpd + APACHE_CONF=conf.d/horizon.conf + sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf + fi + + # Configure apache to run horizon + sudo sh -c "sed -e \" + s,%USER%,$APACHE_USER,g; + s,%GROUP%,$APACHE_GROUP,g; + s,%HORIZON_DIR%,$HORIZON_DIR,g; + s,%APACHE_NAME%,$APACHE_NAME,g; + s,%DEST%,$DEST,g; + \" $FILES/apache-horizon.template >/etc/$APACHE_NAME/$APACHE_CONF" + +} + +# install_horizon() - Collect source and prepare +function install_horizon() { + # Apache installation, because we mark it NOPRIME + if [[ "$os_PACKAGE" = "deb" ]]; then + # Install apache2, which is NOPRIME'd + install_package apache2 libapache2-mod-wsgi + else + sudo rm -f /etc/httpd/conf.d/000-* + install_package httpd mod_wsgi + fi + + # NOTE(sdague) quantal changed the name of the node binary + if [[ "$os_PACKAGE" = "deb" ]]; then + if [[ ! -e "/usr/bin/node" ]]; then + install_package nodejs-legacy + fi + fi + + git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH $HORIZON_TAG +} + +# start_horizon() - Start running processes, including screen +function start_horizon() { + restart_service $APACHE_NAME + screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/$APACHE_NAME/horizon_error.log" +} + +# stop_horizon() - Stop running processes (non-screen) +function stop_horizon() { + stop_service apache2 +} + +# Restore xtrace +$XTRACE diff --git a/stack.sh b/stack.sh index 9b830b13..9ecc7499 100755 --- a/stack.sh +++ b/stack.sh @@ -306,6 +306,7 @@ SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} # ================== # Get project function libraries +source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone source $TOP_DIR/lib/glance source $TOP_DIR/lib/nova @@ -568,15 +569,6 @@ read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE ( SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service} -# Horizon -# ------- - -# Allow overriding the default Apache user and group, default both to -# current user. -APACHE_USER=${APACHE_USER:-$USER} -APACHE_GROUP=${APACHE_GROUP:-$APACHE_USER} - - # Log files # --------- @@ -756,16 +748,6 @@ if is_service_enabled $DATABASE_BACKENDS; then install_database fi -if is_service_enabled horizon; then - if [[ "$os_PACKAGE" = "deb" ]]; then - # Install apache2, which is NOPRIME'd - install_package apache2 libapache2-mod-wsgi - else - sudo rm -f /etc/httpd/conf.d/000-* - install_package httpd mod_wsgi - fi -fi - if is_service_enabled q-agt; then if is_quantum_ovs_base_plugin "$Q_PLUGIN"; then # Install deps @@ -840,8 +822,8 @@ if is_service_enabled n-novnc; then git_clone $NOVNC_REPO $NOVNC_DIR $NOVNC_BRANCH fi if is_service_enabled horizon; then - # django powered web control panel for openstack - git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH $HORIZON_TAG + # dashboard + install_horizon fi if is_service_enabled quantum; then git_clone $QUANTUM_CLIENT_REPO $QUANTUM_CLIENT_DIR $QUANTUM_CLIENT_BRANCH @@ -899,7 +881,7 @@ if is_service_enabled nova; then configure_nova fi if is_service_enabled horizon; then - setup_develop $HORIZON_DIR + configure_horizon fi if is_service_enabled quantum; then setup_develop $QUANTUM_CLIENT_DIR @@ -1035,48 +1017,8 @@ fi if is_service_enabled horizon; then echo_summary "Configuring and starting Horizon" - - # Remove stale session database. - rm -f $HORIZON_DIR/openstack_dashboard/local/dashboard_openstack.sqlite3 - - # ``local_settings.py`` is used to override horizon default settings. - local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py - cp $FILES/horizon_settings.py $local_settings - - # Initialize the horizon database (it stores sessions and notices shown to - # users). The user system is external (keystone). - cd $HORIZON_DIR - python manage.py syncdb --noinput - cd $TOP_DIR - - # Create an empty directory that apache uses as docroot - sudo mkdir -p $HORIZON_DIR/.blackhole - - if [[ "$os_PACKAGE" = "deb" ]]; then - APACHE_NAME=apache2 - APACHE_CONF=sites-available/horizon - # Clean up the old config name - sudo rm -f /etc/apache2/sites-enabled/000-default - # Be a good citizen and use the distro tools here - sudo touch /etc/$APACHE_NAME/$APACHE_CONF - sudo a2ensite horizon - else - # Install httpd, which is NOPRIME'd - APACHE_NAME=httpd - APACHE_CONF=conf.d/horizon.conf - sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf - fi - - # Configure apache to run horizon - sudo sh -c "sed -e \" - s,%USER%,$APACHE_USER,g; - s,%GROUP%,$APACHE_GROUP,g; - s,%HORIZON_DIR%,$HORIZON_DIR,g; - s,%APACHE_NAME%,$APACHE_NAME,g; - s,%DEST%,$DEST,g; - \" $FILES/apache-horizon.template >/etc/$APACHE_NAME/$APACHE_CONF" - - restart_service $APACHE_NAME + init_horizon + start_horizon fi @@ -1958,7 +1900,7 @@ if is_service_enabled ceilometer; then echo_summary "Starting Ceilometer" start_ceilometer fi -screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/$APACHE_NAME/horizon_error.log" + screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_DIR}/proxy-server.conf -v" # Starting the nova-objectstore only if swift3 service is not enabled. diff --git a/unstack.sh b/unstack.sh index 1a2cad83..0040cf1e 100755 --- a/unstack.sh +++ b/unstack.sh @@ -26,6 +26,7 @@ DATA_DIR=${DATA_DIR:-${DEST}/data} # Get project function libraries source $TOP_DIR/lib/cinder +source $TOP_DIR/lib/horizon # Determine what system we are running on. This provides ``os_VENDOR``, # ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` @@ -51,7 +52,7 @@ fi # Apache has the WSGI processes if is_service_enabled horizon; then - stop_service apache2 + stop_horizon fi SCSI_PERSIST_DIR=$CINDER_STATE_PATH/volumes/* From 9f61d29e66433eac5c657f6d3a3903b35ecfb7d1 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 26 Nov 2012 18:56:20 +0000 Subject: [PATCH 021/207] Revert "Remove python-openstackclient." This reverts commit 6e3330967c5c7be73a8ffee3779c214768683c56 This is in fact useful to an admittedly small population. And if I had not been on vacation I'd have -2'd it... --- stack.sh | 5 +++++ stackrc | 4 ++++ 2 files changed, 9 insertions(+) diff --git a/stack.sh b/stack.sh index 8df03953..ec10b110 100755 --- a/stack.sh +++ b/stack.sh @@ -317,6 +317,7 @@ source $TOP_DIR/lib/tempest # Set the destination directories for OpenStack projects HORIZON_DIR=$DEST/horizon +OPENSTACKCLIENT_DIR=$DEST/python-openstackclient NOVNC_DIR=$DEST/noVNC SWIFT_DIR=$DEST/swift SWIFT3_DIR=$DEST/swift3 @@ -812,6 +813,9 @@ install_keystoneclient install_glanceclient install_novaclient +# Check out the client libs that are used most +git_clone $OPENSTACKCLIENT_REPO $OPENSTACKCLIENT_DIR $OPENSTACKCLIENT_BRANCH + # glance, swift middleware and nova api needs keystone middleware if is_service_enabled key g-api n-api swift; then # unified auth system (manages accounts/tokens) @@ -877,6 +881,7 @@ echo_summary "Configuring OpenStack projects" # allowing ``import nova`` or ``import glance.client`` configure_keystoneclient configure_novaclient +setup_develop $OPENSTACKCLIENT_DIR if is_service_enabled key g-api n-api swift; then configure_keystone fi diff --git a/stackrc b/stackrc index e0c69cab..01e95561 100644 --- a/stackrc +++ b/stackrc @@ -76,6 +76,10 @@ HORIZON_BRANCH=master NOVACLIENT_REPO=${GIT_BASE}/openstack/python-novaclient.git NOVACLIENT_BRANCH=master +# consolidated openstack python client +OPENSTACKCLIENT_REPO=${GIT_BASE}/openstack/python-openstackclient.git +OPENSTACKCLIENT_BRANCH=master + # python keystone client library to nova that horizon uses KEYSTONECLIENT_REPO=${GIT_BASE}/openstack/python-keystoneclient KEYSTONECLIENT_BRANCH=master From 0a9954f2c251c68a8261a5ed6999c1585e48de67 Mon Sep 17 00:00:00 2001 From: Robert Collins Date: Tue, 20 Nov 2012 11:34:25 +1300 Subject: [PATCH 022/207] Also pickup quantum distro dependencies (bug 1080886). Change-Id: Ic0fc0b03dc01782d0d85d98de765f04fcbcacd74 --- functions | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/functions b/functions index 8ab3eefc..9f540492 100644 --- a/functions +++ b/functions @@ -155,6 +155,10 @@ function get_packages() { if [[ ! $file_to_parse =~ keystone ]]; then file_to_parse="${file_to_parse} keystone" fi + elif [[ $service == q-* ]]; then + if [[ ! $file_to_parse =~ quantum ]]; then + file_to_parse="${file_to_parse} quantum" + fi fi done From 0edfd6f6e39e01b6acf29be32b2cb18a0c4f4482 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 23 Nov 2012 15:00:38 -0800 Subject: [PATCH 023/207] Use NOVA_BIN_DIR for newer binaries. We added a variable a while ago, NOVA_BIN_DIR which is set properly to the location of the binaries. Rather than using the in-tree bin-dir, which is going away in favor of entrypoints console_scripts. Change-Id: I65040cfe8321d49595a909353870f981bbd6a480 --- lib/nova | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/nova b/lib/nova index b5efce96..3ea2f2af 100644 --- a/lib/nova +++ b/lib/nova @@ -432,9 +432,9 @@ function start_nova() { screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network" screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler" screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --config-file $NOVA_CONF --web ." - screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --config-file $NOVA_CONF" - screen_it n-cauth "cd $NOVA_DIR && ./bin/nova-consoleauth" - screen_it n-cond "cd $NOVA_DIR && ./bin/nova-conductor" + screen_it n-xvnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-xvpvncproxy --config-file $NOVA_CONF" + screen_it n-cauth "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-consoleauth" + screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor" } # stop_nova() - Stop running processes (non-screen) From e83356217b48308b3a4dc975940c79a22e159238 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 27 Nov 2012 17:00:11 -0600 Subject: [PATCH 024/207] Fix ini functions to handle spaces in section names This allows section names to look like: [ default ] OpenSSL is the primary offender for this usage. Change-Id: If5c711107e73cebab9d4a26ca02a7ce572224377 --- functions | 12 ++++++------ tests/functions.sh | 12 +++++++++++- 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/functions b/functions index 8ab3eefc..cdb982d5 100644 --- a/functions +++ b/functions @@ -370,7 +370,7 @@ function inicomment() { local file=$1 local section=$2 local option=$3 - sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" $file + sed -ie "/^\[ *$section *\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" $file } # Uncomment an option in an INI file @@ -379,7 +379,7 @@ function iniuncomment() { local file=$1 local section=$2 local option=$3 - sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" $file + sed -ie "/^\[ *$section *\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" $file } @@ -390,7 +390,7 @@ function iniget() { local section=$2 local option=$3 local line - line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" $file) + line=$(sed -ne "/^\[ *$section *\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" $file) echo ${line#*=} } @@ -402,18 +402,18 @@ function iniset() { local section=$2 local option=$3 local value=$4 - if ! grep -q "^\[$section\]" $file; then + if ! grep -q "^\[ *$section *\]" $file; then # Add section at the end echo -e "\n[$section]" >>$file fi if [[ -z "$(iniget $file $section $option)" ]]; then # Add it - sed -i -e "/^\[$section\]/ a\\ + sed -ie "/^\[ *$section *\]/ a\\ $option = $value " $file else # Replace it - sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=[ \t]*\).*$|\1$value|" $file + sed -ie "/^\[ *$section *\]/,/^\[.*\]/ s|^\($option[ \t]*=[ \t]*\).*$|\1$value|" $file fi } diff --git a/tests/functions.sh b/tests/functions.sh index 3a0f3199..d2cc5c44 100755 --- a/tests/functions.sh +++ b/tests/functions.sh @@ -54,6 +54,9 @@ handlers = aa, bb [bbb] handlers=ee,ff + +[ ccc ] +spaces = yes EOF # Test with spaces @@ -74,6 +77,14 @@ else echo "iniget failed: $VAL" fi +# Test with spaces in section header + +VAL=$(iniget test.ini ccc spaces) +if [[ "$VAL" == "yes" ]]; then + echo "OK: $VAL" +else + echo "iniget failed: $VAL" +fi # Test without spaces, end of file @@ -112,7 +123,6 @@ else echo "iniget failed: $VAL" fi - # Test option not exist VAL=$(iniget test.ini aaa debug) From 856a11e0e42c253b84b6074c620d54e620cd17c6 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Wed, 21 Nov 2012 16:04:12 +0100 Subject: [PATCH 025/207] Add partial openSUSE/SLE support Note that this is the first part of the support. A second part involves dealing with the package names. Among the changes: - add several functions to determine some distro-specific behavior (how to call usermod, if some features are available on the distro, etc.) - correctly detect openSUSE and SLE in GetOSVersion, and set DISTRO accordingly - new is_suse() function to check if running on a SUSE-based distro - use zypper to install packages - adapt apache virtual host configuration for openSUSE - some simple fixes (path to pip, mysql service name) Change-Id: Id2f7c9e18a1c4a7b7cea262ea7959d183e4b0cf0 --- functions | 115 +++++++++++++++++++++++++++++++++++++++++++- lib/cinder | 6 +-- lib/databases/mysql | 6 ++- lib/horizon | 14 ++++-- lib/nova | 8 +-- stack.sh | 5 +- 6 files changed, 134 insertions(+), 20 deletions(-) diff --git a/functions b/functions index 8ab3eefc..16664d6d 100644 --- a/functions +++ b/functions @@ -223,6 +223,12 @@ GetOSVersion() { os_UPDATE="" if [[ "Debian,Ubuntu" =~ $os_VENDOR ]]; then os_PACKAGE="deb" + elif [[ "SUSE LINUX" =~ $os_VENDOR ]]; then + lsb_release -d -s | grep -q openSUSE + if [[ $? -eq 0 ]]; then + os_VENDOR="openSUSE" + fi + os_PACKAGE="rpm" else os_PACKAGE="rpm" fi @@ -246,6 +252,23 @@ GetOSVersion() { os_VENDOR="" done os_PACKAGE="rpm" + elif [[ -r /etc/SuSE-release ]]; then + for r in openSUSE "SUSE Linux"; do + if [[ "$r" = "SUSE Linux" ]]; then + os_VENDOR="SUSE LINUX" + else + os_VENDOR=$r + fi + + if [[ -n "`grep \"$r\" /etc/SuSE-release`" ]]; then + os_CODENAME=`grep "CODENAME = " /etc/SuSE-release | sed 's:.* = ::g'` + os_RELEASE=`grep "VERSION = " /etc/SuSE-release | sed 's:.* = ::g'` + os_UPDATE=`grep "PATCHLEVEL = " /etc/SuSE-release | sed 's:.* = ::g'` + break + fi + os_VENDOR="" + done + os_PACKAGE="rpm" fi export os_VENDOR os_RELEASE os_UPDATE os_PACKAGE os_CODENAME } @@ -297,6 +320,15 @@ function GetDistro() { elif [[ "$os_VENDOR" =~ (Fedora) ]]; then # For Fedora, just use 'f' and the release DISTRO="f$os_RELEASE" + elif [[ "$os_VENDOR" =~ (openSUSE) ]]; then + DISTRO="opensuse-$os_RELEASE" + elif [[ "$os_VENDOR" =~ (SUSE LINUX) ]]; then + # For SLE, also use the service pack + if [[ -z "$os_UPDATE" ]]; then + DISTRO="sle${os_RELEASE}" + else + DISTRO="sle${os_RELEASE}sp${os_UPDATE}" + fi else # Catch-all for now is Vendor + Release + Update DISTRO="$os_VENDOR-$os_RELEASE.$os_UPDATE" @@ -305,6 +337,19 @@ function GetDistro() { } +# Determine if current distribution is a SUSE-based distribution +# (openSUSE, SLE). +# is_suse +function is_suse { + if [[ -z "$os_VENDOR" ]]; then + GetOSVersion + fi + + [[ "$os_VENDOR" = "openSUSE" || "$os_VENDOR" = "SUSE LINUX" ]] + return $? +} + + # git clone only if directory doesn't exist already. Since ``DEST`` might not # be owned by the installation user, we create the directory and change the # ownership to the proper user. @@ -542,7 +587,11 @@ function install_package() { apt_get install "$@" else - yum_install "$@" + if is_suse; then + zypper_install "$@" + else + yum_install "$@" + fi fi } @@ -593,7 +642,7 @@ function pip_install { SUDO_PIP="env" else SUDO_PIP="sudo" - if [[ "$os_PACKAGE" = "deb" ]]; then + if [[ "$os_PACKAGE" = "deb" || is_suse ]]; then CMD_PIP=/usr/bin/pip else CMD_PIP=/usr/bin/pip-python @@ -946,6 +995,68 @@ function _ssh_check_novanet() { fi } + +# zypper wrapper to set arguments correctly +# zypper_install package [package ...] +function zypper_install() { + [[ "$OFFLINE" = "True" ]] && return + local sudo="sudo" + [[ "$(id -u)" = "0" ]] && sudo="env" + $sudo http_proxy=$http_proxy https_proxy=$https_proxy \ + zypper --non-interactive install --auto-agree-with-licenses "$@" +} + + +# Add a user to a group. +# add_user_to_group user group +function add_user_to_group() { + local user=$1 + local group=$2 + + if [[ -z "$os_VENDOR" ]]; then + GetOSVersion + fi + + # SLE11 and openSUSE 12.2 don't have the usual usermod + if ! is_suse || [[ "$os_VENDOR" = "openSUSE" && "$os_RELEASE" != "12.2" ]]; then + sudo usermod -a -G "$group" "$user" + else + sudo usermod -A "$group" "$user" + fi +} + + +# Get the location of the $module-rootwrap executables, where module is cinder +# or nova. +# get_rootwrap_location module +function get_rootwrap_location() { + local module=$1 + + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + + if [[ "$os_PACKAGE" = "deb" || is_suse ]]; then + echo "/usr/local/bin/$module-rootwrap" + else + echo "/usr/bin/$module-rootwrap" + fi +} + + +# Check if qpid can be used on the current distro. +# qpid_is_supported +function qpid_is_supported() { + if [[ -z "$DISTRO" ]]; then + GetDistro + fi + + # Qpid was introduced to Ubuntu in precise, disallow it on oneiric; it is + # not in openSUSE either right now. + [[ "$DISTRO" = "oneiric" || is_suse ]] + return $? +} + # Restore xtrace $XTRACE diff --git a/lib/cinder b/lib/cinder index c2cf15bf..058fcc23 100644 --- a/lib/cinder +++ b/lib/cinder @@ -63,11 +63,7 @@ function configure_cinder() { cp -p $CINDER_DIR/etc/cinder/policy.json $CINDER_CONF_DIR # Set the paths of certain binaries - if [[ "$os_PACKAGE" = "deb" ]]; then - CINDER_ROOTWRAP=/usr/local/bin/cinder-rootwrap - else - CINDER_ROOTWRAP=/usr/bin/cinder-rootwrap - fi + CINDER_ROOTWRAP=$(get_rootwrap_location cinder) # If Cinder ships the new rootwrap filters files, deploy them # (owned by root) and add a parameter to $CINDER_ROOTWRAP diff --git a/lib/databases/mysql b/lib/databases/mysql index ed59290a..fc6a3b7a 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -25,7 +25,11 @@ function configure_database_mysql { MYSQL=mysql else MY_CONF=/etc/my.cnf - MYSQL=mysqld + if is_suse; then + MYSQL=mysql + else + MYSQL=mysqld + fi fi # Start mysql-server diff --git a/lib/horizon b/lib/horizon index c6c96dae..af09f770 100644 --- a/lib/horizon +++ b/lib/horizon @@ -81,9 +81,17 @@ function init_horizon() { sudo a2ensite horizon else # Install httpd, which is NOPRIME'd - APACHE_NAME=httpd - APACHE_CONF=conf.d/horizon.conf - sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf + if is_suse; then + APACHE_NAME=apache2 + APACHE_CONF=vhosts.d/horizon.conf + # Append wsgi to the list of modules to load + grep -q "^APACHE_MODULES=.*wsgi" /etc/sysconfig/apache2 || + sudo sed '/^APACHE_MODULES=/s/^\(.*\)"$/\1 wsgi"/' -i /etc/sysconfig/apache2 + else + APACHE_NAME=httpd + APACHE_CONF=conf.d/horizon.conf + sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf + fi fi # Configure apache to run horizon diff --git a/lib/nova b/lib/nova index 3ea2f2af..d15d9e31 100644 --- a/lib/nova +++ b/lib/nova @@ -47,11 +47,7 @@ else fi # Set the paths of certain binaries -if [[ "$os_PACKAGE" = "deb" ]]; then - NOVA_ROOTWRAP=/usr/local/bin/nova-rootwrap -else - NOVA_ROOTWRAP=/usr/bin/nova-rootwrap -fi +NOVA_ROOTWRAP=$(get_rootwrap_location nova) # Allow rate limiting to be turned off for testing, like for Tempest # NOTE: Set API_RATE_LIMIT="False" to turn OFF rate limiting @@ -252,7 +248,7 @@ EOF' # The user that nova runs as needs to be member of **libvirtd** group otherwise # nova-compute will be unable to use libvirt. - sudo usermod -a -G libvirtd `whoami` + add_user_to_group `whoami` libvirtd # libvirt detects various settings on startup, as we potentially changed # the system configuration (modules, filesystems), we need to restart diff --git a/stack.sh b/stack.sh index 570fc688..70f46104 100755 --- a/stack.sh +++ b/stack.sh @@ -113,9 +113,8 @@ if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|f16|f17) ]]; then fi fi -# Qpid was introduced to Ubuntu in precise, disallow it on oneiric -if [ "${DISTRO}" = "oneiric" ] && is_service_enabled qpid ; then - echo "You must use Ubuntu Precise or newer for Qpid support." +if is_service_enabled qpid && ! qpid_is_supported; then + echo "Qpid support is not available for this version of your distribution." exit 1 fi From ca5c4713869fb88c2e8753039f80f1f8bf1d8fef Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Wed, 21 Nov 2012 17:45:49 +0100 Subject: [PATCH 026/207] Add package names for openSUSE/SLE Change-Id: I487cc7b8bd228ff77c9881528e3395cbe3c43d4a --- files/rpms-suse/ceilometer-collector | 4 +++ files/rpms-suse/cinder | 2 ++ files/rpms-suse/general | 23 +++++++++++++ files/rpms-suse/glance | 12 +++++++ files/rpms-suse/horizon | 23 +++++++++++++ files/rpms-suse/keystone | 17 ++++++++++ files/rpms-suse/n-api | 2 ++ files/rpms-suse/n-cpu | 4 +++ files/rpms-suse/n-novnc | 1 + files/rpms-suse/n-vol | 2 ++ files/rpms-suse/nova | 48 ++++++++++++++++++++++++++++ files/rpms-suse/postgresql | 1 + files/rpms-suse/quantum | 27 ++++++++++++++++ files/rpms-suse/ryu | 5 +++ files/rpms-suse/swift | 19 +++++++++++ lib/databases/mysql | 6 +++- lib/horizon | 2 ++ stack.sh | 14 ++++++-- 18 files changed, 209 insertions(+), 3 deletions(-) create mode 100644 files/rpms-suse/ceilometer-collector create mode 100644 files/rpms-suse/cinder create mode 100644 files/rpms-suse/general create mode 100644 files/rpms-suse/glance create mode 100644 files/rpms-suse/horizon create mode 100644 files/rpms-suse/keystone create mode 100644 files/rpms-suse/n-api create mode 100644 files/rpms-suse/n-cpu create mode 100644 files/rpms-suse/n-novnc create mode 100644 files/rpms-suse/n-vol create mode 100644 files/rpms-suse/nova create mode 100644 files/rpms-suse/postgresql create mode 100644 files/rpms-suse/quantum create mode 100644 files/rpms-suse/ryu create mode 100644 files/rpms-suse/swift diff --git a/files/rpms-suse/ceilometer-collector b/files/rpms-suse/ceilometer-collector new file mode 100644 index 00000000..c76454fd --- /dev/null +++ b/files/rpms-suse/ceilometer-collector @@ -0,0 +1,4 @@ +# Not available in openSUSE main repositories, but can be fetched from OBS +# (devel:languages:python and server:database projects) +mongodb +python-pymongo diff --git a/files/rpms-suse/cinder b/files/rpms-suse/cinder new file mode 100644 index 00000000..e5b47274 --- /dev/null +++ b/files/rpms-suse/cinder @@ -0,0 +1,2 @@ +lvm2 +tgt diff --git a/files/rpms-suse/general b/files/rpms-suse/general new file mode 100644 index 00000000..8ed74ec0 --- /dev/null +++ b/files/rpms-suse/general @@ -0,0 +1,23 @@ +bridge-utils +curl +euca2ools +git-core +iputils +openssh +psmisc +python-cmd2 # dist:opensuse-12.3 +python-netaddr +python-pep8 +python-pip +python-pylint +python-unittest2 +python-virtualenv +screen +tar +tcpdump +unzip +vim-enhanced +wget + +findutils-locate # useful when debugging +lsof # useful when debugging diff --git a/files/rpms-suse/glance b/files/rpms-suse/glance new file mode 100644 index 00000000..dd68ac08 --- /dev/null +++ b/files/rpms-suse/glance @@ -0,0 +1,12 @@ +gcc +libxml2-devel +python-PasteDeploy +python-Routes +python-SQLAlchemy +python-argparse +python-devel +python-eventlet +python-greenlet +python-iso8601 +python-wsgiref +python-xattr diff --git a/files/rpms-suse/horizon b/files/rpms-suse/horizon new file mode 100644 index 00000000..7e46ffe0 --- /dev/null +++ b/files/rpms-suse/horizon @@ -0,0 +1,23 @@ +apache2 # NOPRIME +apache2-mod_wsgi # NOPRIME +nodejs +python-CherryPy # why? (coming from apts) +python-Paste +python-PasteDeploy +python-Routes +python-Sphinx +python-SQLAlchemy +python-WebOb +python-anyjson +python-beautifulsoup +python-coverage +python-dateutil +python-eventlet +python-kombu +python-mox +python-netaddr +python-nose +python-pep8 +python-pylint +python-sqlalchemy-migrate +python-xattr diff --git a/files/rpms-suse/keystone b/files/rpms-suse/keystone new file mode 100644 index 00000000..b3c876ad --- /dev/null +++ b/files/rpms-suse/keystone @@ -0,0 +1,17 @@ +cyrus-sasl-devel +openldap2-devel +python-Paste +python-PasteDeploy +python-PasteScript +python-Routes +python-SQLAlchemy +python-WebOb +python-devel +python-distribute +python-setuptools # instead of python-distribute; dist:sle11sp2 +python-greenlet +python-lxml +python-mysql +python-py-bcrypt +python-pysqlite +sqlite3 diff --git a/files/rpms-suse/n-api b/files/rpms-suse/n-api new file mode 100644 index 00000000..ad943ffd --- /dev/null +++ b/files/rpms-suse/n-api @@ -0,0 +1,2 @@ +gcc # temporary because this pulls in glance to get the client without running the glance prereqs +python-dateutil diff --git a/files/rpms-suse/n-cpu b/files/rpms-suse/n-cpu new file mode 100644 index 00000000..27d3254f --- /dev/null +++ b/files/rpms-suse/n-cpu @@ -0,0 +1,4 @@ +# Stuff for diablo volumes +genisoimage +lvm2 +open-iscsi diff --git a/files/rpms-suse/n-novnc b/files/rpms-suse/n-novnc new file mode 100644 index 00000000..c8722b9f --- /dev/null +++ b/files/rpms-suse/n-novnc @@ -0,0 +1 @@ +python-numpy diff --git a/files/rpms-suse/n-vol b/files/rpms-suse/n-vol new file mode 100644 index 00000000..e5b47274 --- /dev/null +++ b/files/rpms-suse/n-vol @@ -0,0 +1,2 @@ +lvm2 +tgt diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova new file mode 100644 index 00000000..0c036786 --- /dev/null +++ b/files/rpms-suse/nova @@ -0,0 +1,48 @@ +curl +# Note: we need to package dhcp_release in dnsmasq! +dnsmasq +ebtables +gawk +iptables +iputils +kpartx +kvm +libvirt # NOPRIME +libvirt-python +libxml2-python +mysql-community-server # NOPRIME +parted +python-M2Crypto +python-m2crypto # dist:sle11sp2 +python-Paste +python-PasteDeploy +python-Routes +python-SQLAlchemy +python-Tempita +python-boto +python-carrot +python-cheetah +python-eventlet +python-feedparser +python-greenlet +python-iso8601 +python-kombu +python-lockfile +python-lxml # needed for glance which is needed for nova --- this shouldn't be here +python-mox +python-mysql +python-netaddr +python-paramiko +python-python-gflags +python-sqlalchemy-migrate +python-suds +python-xattr # needed for glance which is needed for nova --- this shouldn't be here +rabbitmq-server # NOPRIME +socat +sqlite3 +sudo +vlan + +# FIXME: qpid is not part of openSUSE, those names are tentative +python-qpid # NOPRIME +qpidd # NOPRIME diff --git a/files/rpms-suse/postgresql b/files/rpms-suse/postgresql new file mode 100644 index 00000000..bf19d397 --- /dev/null +++ b/files/rpms-suse/postgresql @@ -0,0 +1 @@ +python-psycopg2 diff --git a/files/rpms-suse/quantum b/files/rpms-suse/quantum new file mode 100644 index 00000000..068c15c2 --- /dev/null +++ b/files/rpms-suse/quantum @@ -0,0 +1,27 @@ +# Note: we need to package dhcp_release in dnsmasq! +dnsmasq +ebtables +iptables +iputils +mysql-community-server # NOPRIME +python-boto +python-eventlet +python-greenlet +python-iso8601 +python-kombu +python-mysql +python-netaddr +python-Paste +python-PasteDeploy +python-pyudev +python-Routes +python-SQLAlchemy +python-suds +rabbitmq-server # NOPRIME +sqlite3 +sudo +vlan + +# FIXME: qpid is not part of openSUSE, those names are tentative +python-qpid # NOPRIME +qpidd # NOPRIME diff --git a/files/rpms-suse/ryu b/files/rpms-suse/ryu new file mode 100644 index 00000000..763fd24c --- /dev/null +++ b/files/rpms-suse/ryu @@ -0,0 +1,5 @@ +python-distribute +python-setuptools # instead of python-distribute; dist:sle11sp2 +python-Sphinx +python-gevent +python-python-gflags diff --git a/files/rpms-suse/swift b/files/rpms-suse/swift new file mode 100644 index 00000000..db379bbc --- /dev/null +++ b/files/rpms-suse/swift @@ -0,0 +1,19 @@ +curl +gcc +memcached +python-PasteDeploy +python-WebOb +python-configobj +python-coverage +python-devel +python-distribute +python-setuptools # instead of python-distribute; dist:sle11sp2 +python-eventlet +python-greenlet +python-netifaces +python-nose +python-simplejson +python-xattr +sqlite3 +xfsprogs +xinetd diff --git a/lib/databases/mysql b/lib/databases/mysql index fc6a3b7a..eb84f2ca 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -84,7 +84,11 @@ EOF chmod 0600 $HOME/.my.cnf fi # Install mysql-server - install_package mysql-server + if is_suse; then + install_package mysql-community-server + else + install_package mysql-server + fi } function database_connection_url_mysql { diff --git a/lib/horizon b/lib/horizon index af09f770..a378baf2 100644 --- a/lib/horizon +++ b/lib/horizon @@ -111,6 +111,8 @@ function install_horizon() { if [[ "$os_PACKAGE" = "deb" ]]; then # Install apache2, which is NOPRIME'd install_package apache2 libapache2-mod-wsgi + elif is_suse; then + install_package apache2 apache2-mod_wsgi else sudo rm -f /etc/httpd/conf.d/000-* install_package httpd mod_wsgi diff --git a/stack.sh b/stack.sh index 70f46104..0e3a3b18 100755 --- a/stack.sh +++ b/stack.sh @@ -715,12 +715,18 @@ set -o xtrace echo_summary "Installing package prerequisites" if [[ "$os_PACKAGE" = "deb" ]]; then install_package $(get_packages $FILES/apts) +elif is_suse; then + install_package $(get_packages $FILES/rpms-suse) else install_package $(get_packages $FILES/rpms) fi if [[ $SYSLOG != "False" ]]; then - install_package rsyslog-relp + if is_suse; then + install_package rsyslog-module-relp + else + install_package rsyslog-relp + fi fi if is_service_enabled rabbit; then @@ -738,7 +744,11 @@ elif is_service_enabled qpid; then fi elif is_service_enabled zeromq; then if [[ "$os_PACKAGE" = "rpm" ]]; then - install_package zeromq python-zmq + if is_suse; then + install_package libzmq1 python-pyzmq + else + install_package zeromq python-zmq + fi else install_package libzmq1 python-zmq fi From afd472cb30ba90611b3b3907ad2570f26905532c Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 28 Nov 2012 11:54:45 -0600 Subject: [PATCH 027/207] Don't combine sed options Combining '-i -e' into '-ie' changes behaviour, don't do that Change-Id: Ice46c6b4f899b4c76f355cc88241dd33bc60f459 --- functions | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/functions b/functions index 9dab759a..f2b12e21 100644 --- a/functions +++ b/functions @@ -419,7 +419,7 @@ function inicomment() { local file=$1 local section=$2 local option=$3 - sed -ie "/^\[ *$section *\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" $file + sed -i -e "/^\[ *$section *\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" $file } # Uncomment an option in an INI file @@ -428,7 +428,7 @@ function iniuncomment() { local file=$1 local section=$2 local option=$3 - sed -ie "/^\[ *$section *\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" $file + sed -i -e "/^\[ *$section *\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" $file } @@ -457,12 +457,12 @@ function iniset() { fi if [[ -z "$(iniget $file $section $option)" ]]; then # Add it - sed -ie "/^\[ *$section *\]/ a\\ + sed -i -e "/^\[ *$section *\]/ a\\ $option = $value " $file else # Replace it - sed -ie "/^\[ *$section *\]/,/^\[.*\]/ s|^\($option[ \t]*=[ \t]*\).*$|\1$value|" $file + sed -i -e "/^\[ *$section *\]/,/^\[.*\]/ s|^\($option[ \t]*=[ \t]*\).*$|\1$value|" $file fi } From 9a352daf892a78bcef232e2da32b0d46c4c10fe7 Mon Sep 17 00:00:00 2001 From: Jeremy Stanley Date: Wed, 28 Nov 2012 17:22:39 +0000 Subject: [PATCH 028/207] Install Tempest's dependencies along with it. * lib/tempest(install_tempest): Directly install Tempest's tools/pip-requires list after cloning the repo. Change-Id: I5c508faab8756d5cdfec53193e08e3440fda1b2c --- lib/tempest | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/tempest b/lib/tempest index 115c9118..871e9e73 100644 --- a/lib/tempest +++ b/lib/tempest @@ -49,6 +49,10 @@ function configure_tempest() { # install_tempest() - Collect source and prepare function install_tempest() { git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH + + # Tempest doesn't satisfy its dependencies on its own, so + # install them here instead. + sudo pip install -r $TEMPEST_DIR/tools/pip-requires } From 9343df160e29a4a5193503ed6cd0e35d1e590e59 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Wed, 28 Nov 2012 10:05:53 +0000 Subject: [PATCH 029/207] Ensures that Quantum sets the correct signing directory Change-Id: I4f01a171f0ced73ba6b6000d225c8f5811f1874a --- lib/quantum | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/quantum b/lib/quantum index ba98b646..373d5217 100644 --- a/lib/quantum +++ b/lib/quantum @@ -6,6 +6,7 @@ XTRACE=$(set +o | grep xtrace) set +o xtrace export QUANTUM_TEST_CONFIG_FILE=${QUANTUM_TEST_CONFIG_FILE:-"/etc/quantum/debug.ini"} +QUANTUM_AUTH_CACHE_DIR=${QUANTUM_AUTH_CACHE_DIR:-/var/cache/quantum} # Configures keystone integration for quantum service and agents function quantum_setup_keystone() { @@ -22,6 +23,12 @@ function quantum_setup_keystone() { iniset $conf_file $section admin_tenant_name $SERVICE_TENANT_NAME iniset $conf_file $section admin_user $Q_ADMIN_USERNAME iniset $conf_file $section admin_password $SERVICE_PASSWORD + if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then + iniset $conf_file $section signing_dir $QUANTUM_AUTH_CACHE_DIR + # Create cache dir + sudo mkdir -p $QUANTUM_AUTH_CACHE_DIR + sudo chown `whoami` $QUANTUM_AUTH_CACHE_DIR + fi } function quantum_setup_ovs_bridge() { From 7c3053da69681ed5a57729812d4a357ac1c23b17 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Thu, 29 Nov 2012 09:19:16 +0100 Subject: [PATCH 030/207] Remove leftover references to files/pips/ The files/pips/* files were removed a while ago (replaced by pip-requires in individual projects). So remove leftover code that was dealing with that. Change-Id: Id521a3365ab018193607389f022a25acddb49714 --- stack.sh | 4 ---- tools/build_ramdisk.sh | 3 +-- tools/build_tempest.sh | 2 -- tools/build_uec.sh | 2 +- tools/build_uec_ramdisk.sh | 2 +- tools/{warm_apts_and_pips_for_uec.sh => warm_apts_for_uec.sh} | 4 +--- 6 files changed, 4 insertions(+), 13 deletions(-) rename tools/{warm_apts_and_pips_for_uec.sh => warm_apts_for_uec.sh} (88%) diff --git a/stack.sh b/stack.sh index 70f46104..5c5ad2a0 100755 --- a/stack.sh +++ b/stack.sh @@ -779,10 +779,6 @@ if [[ $TRACK_DEPENDS = True ]] ; then $DEST/.venv/bin/pip freeze > $DEST/requires-pre-pip fi -# Install python requirements -echo_summary "Installing Python prerequisites" -pip_install $(get_packages $FILES/pips | sort -u) - # Check Out Source # ---------------- diff --git a/tools/build_ramdisk.sh b/tools/build_ramdisk.sh index 8e2c0be9..5ff05b08 100755 --- a/tools/build_ramdisk.sh +++ b/tools/build_ramdisk.sh @@ -108,7 +108,7 @@ function map_nbd { echo $NBD } -# Prime image with as many apt/pips as we can +# Prime image with as many apt as we can DEV_FILE=$CACHEDIR/$DIST_NAME-dev.img DEV_FILE_TMP=`mktemp $DEV_FILE.XXXXXX` if [ ! -r $DEV_FILE ]; then @@ -121,7 +121,6 @@ if [ ! -r $DEV_FILE ]; then chroot $MNTDIR apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1` chroot $MNTDIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1` - chroot $MNTDIR pip install `cat files/pips/*` # Create a stack user that is a member of the libvirtd group so that stack # is able to interact with libvirt. diff --git a/tools/build_tempest.sh b/tools/build_tempest.sh index e72355c9..1758e7da 100755 --- a/tools/build_tempest.sh +++ b/tools/build_tempest.sh @@ -48,8 +48,6 @@ DEST=${DEST:-/opt/stack} TEMPEST_DIR=$DEST/tempest # Install tests and prerequisites -pip_install `cat $TOP_DIR/files/pips/tempest` - git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH trap - SIGHUP SIGINT SIGTERM SIGQUIT EXIT diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 48819c95..58c54258 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -68,7 +68,7 @@ fi # Option to warm the base image with software requirements. if [ $WARM_CACHE ]; then cd $TOOLS_DIR - ./warm_apts_and_pips_for_uec.sh $image_dir/disk + ./warm_apts_for_uec.sh $image_dir/disk fi # Name of our instance, used by libvirt diff --git a/tools/build_uec_ramdisk.sh b/tools/build_uec_ramdisk.sh index 150ecabd..3ab5dafd 100755 --- a/tools/build_uec_ramdisk.sh +++ b/tools/build_uec_ramdisk.sh @@ -98,7 +98,7 @@ GUEST_NAME=${GUEST_NAME:-devstack} # Pre-load the image with basic environment if [ ! -e $image_dir/disk-primed ]; then cp $image_dir/disk $image_dir/disk-primed - $TOOLS_DIR/warm_apts_and_pips_for_uec.sh $image_dir/disk-primed + $TOOLS_DIR/warm_apts_for_uec.sh $image_dir/disk-primed $TOOLS_DIR/copy_dev_environment_to_uec.sh $image_dir/disk-primed fi diff --git a/tools/warm_apts_and_pips_for_uec.sh b/tools/warm_apts_for_uec.sh similarity index 88% rename from tools/warm_apts_and_pips_for_uec.sh rename to tools/warm_apts_for_uec.sh index fe389ffe..3c15f52e 100755 --- a/tools/warm_apts_and_pips_for_uec.sh +++ b/tools/warm_apts_for_uec.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# **warm_apts_and_pips_for_uec.sh** +# **warm_apts_for_uec.sh** # Echo commands set -o xtrace @@ -48,8 +48,6 @@ cp /etc/resolv.conf $STAGING_DIR/etc/resolv.conf chroot $STAGING_DIR apt-get update chroot $STAGING_DIR apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1` chroot $STAGING_DIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1` || true -mkdir -p $STAGING_DIR/var/cache/pip -PIP_DOWNLOAD_CACHE=/var/cache/pip chroot $STAGING_DIR pip install `cat files/pips/*` || true # Unmount umount $STAGING_DIR From b2fdafead20f5b11e7d53406db2ddb28b518f391 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Tue, 20 Nov 2012 15:52:21 +0000 Subject: [PATCH 031/207] Additional options for XenAPINFS cinder driver Related to bp xenapi-storage-manager-nfs Add configuration options to devstack scripts, so it is easier to set up a system with a XenAPINFS volume backend. It makes possible to test this configuration with exercises. To enable the XenAPINFS driver, specify: CINDER_DRIVER=XenAPINFS CINDER_XENAPI_CONNECTION_URL= CINDER_XENAPI_CONNECTION_USERNAME= CINDER_XENAPI_CONNECTION_PASSWORD= CINDER_XENAPI_NFS_SERVER= CINDER_XENAPI_NFS_SERVERPATH= in your localrc Change-Id: Ia214172aac377d273a03849c8cc2adcbf5b8f607 --- lib/cinder | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/lib/cinder b/lib/cinder index 058fcc23..1aa34cd2 100644 --- a/lib/cinder +++ b/lib/cinder @@ -24,6 +24,9 @@ set +o xtrace # Defaults # -------- +# set up default driver +CINDER_DRIVER=${CINDER_DRIVER:-default} + # set up default directories CINDER_DIR=$DEST/cinder CINDERCLIENT_DIR=$DEST/python-cinderclient @@ -145,6 +148,19 @@ function configure_cinder() { iniset $CINDER_CONF DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" iniset $CINDER_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s TRACE %(name)s %(instance)s" fi + + if [ "$CINDER_DRIVER" == "XenAPINFS" ]; then + ( + set -u + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.xenapi_sm.XenAPINFSDriver" + iniset $CINDER_CONF DEFAULT xenapi_connection_url "$CINDER_XENAPI_CONNECTION_URL" + iniset $CINDER_CONF DEFAULT xenapi_connection_username "$CINDER_XENAPI_CONNECTION_USERNAME" + iniset $CINDER_CONF DEFAULT xenapi_connection_password "$CINDER_XENAPI_CONNECTION_PASSWORD" + iniset $CINDER_CONF DEFAULT xenapi_nfs_server "$CINDER_XENAPI_NFS_SERVER" + iniset $CINDER_CONF DEFAULT xenapi_nfs_serverpath "$CINDER_XENAPI_NFS_SERVERPATH" + ) + [ $? -ne 0 ] && exit 1 + fi } # init_cinder() - Initialize database and volume group From 901eed70b4b6257ad3a9192c0d0522969ef67509 Mon Sep 17 00:00:00 2001 From: guillaume pernot Date: Thu, 29 Nov 2012 08:44:58 +0100 Subject: [PATCH 032/207] Add ResellerAdmin role to ceilometer user. For the sake of swift metering, 'ceilometer' user needs to be a ResellerAdmin for tenant 'service'. Change-Id: I65b3bdedddded9d5f3bac5c5d714288800ffa8b6 --- files/keystone_data.sh | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index f75d24a6..35793d84 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -87,6 +87,11 @@ MEMBER_ROLE=$(get_id keystone role-create --name=Member) keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $DEMO_TENANT keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $INVIS_TENANT +# The ResellerAdmin role is used by Nova and Ceilometer so we need to keep it. +# The admin role in swift allows a user to act as an admin for their tenant, +# but ResellerAdmin is needed for a user to act as any tenant. The name of this +# role is also configurable in swift-proxy.conf +RESELLER_ROLE=$(get_id keystone role-create --name=ResellerAdmin) # Services # -------- @@ -129,11 +134,7 @@ if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then --internalurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s" fi # Nova needs ResellerAdmin role to download images when accessing - # swift through the s3 api. The admin role in swift allows a user - # to act as an admin for their tenant, but ResellerAdmin is needed - # for a user to act as any tenant. The name of this role is also - # configurable in swift-proxy.conf - RESELLER_ROLE=$(get_id keystone role-create --name=ResellerAdmin) + # swift through the s3 api. keystone user-role-add \ --tenant_id $SERVICE_TENANT \ --user_id $NOVA_USER \ @@ -255,6 +256,10 @@ if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]]; then keystone user-role-add --tenant_id $SERVICE_TENANT \ --user_id $CEILOMETER_USER \ --role_id $ADMIN_ROLE + # Ceilometer needs ResellerAdmin role to access swift account stats. + keystone user-role-add --tenant_id $SERVICE_TENANT \ + --user_id $CEILOMETER_USER \ + --role_id $RESELLER_ROLE if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then CEILOMETER_SERVICE=$(get_id keystone service-create \ --name=ceilometer \ From ed30160c0454bcd7c203db0f331e2adfcbd62ea3 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Thu, 29 Nov 2012 16:52:59 +0100 Subject: [PATCH 033/207] Add OBJECT_CATALOG_TYPE to tempest config * add OBJECT_CATALOG_TYPE Change-Id: I776f7ce65e44ceef139e34a1b1aff52e069b90e6 --- tools/configure_tempest.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh index 9b543ab0..03dc6839 100755 --- a/tools/configure_tempest.sh +++ b/tools/configure_tempest.sh @@ -209,6 +209,9 @@ VOLUME_CATALOG_TYPE=volume LIVE_MIGRATION_AVAILABLE=${LIVE_MIGRATION_AVAILABLE:-False} USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION=${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} +# Object Storage +OBJECT_CATALOG_TYPE="object-store" + # EC2 and S3 test configuration BOTO_EC2_URL="http://$IDENTITY_HOST:8773/services/Cloud" BOTO_S3_URL="http://$IDENTITY_HOST:3333" @@ -281,6 +284,7 @@ sed -e " s,%VOLUME_BUILD_TIMEOUT%,$VOLUME_BUILD_TIMEOUT,g; s,%LIVE_MIGRATION_AVAILABLE%,$LIVE_MIGRATION_AVAILABLE,g; s,%USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION%,$USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION,g; + s,%OBJECT_CATALOG_TYPE%,$OBJECT_CATALOG_TYPE,g; s,%BOTO_EC2_URL%,$BOTO_EC2_URL,g; s,%BOTO_S3_URL%,$BOTO_S3_URL,g; s,%BOTO_AWS_ACCESS%,$BOTO_AWS_ACCESS,g; From e1864c37f31a6e4d8680148c35ffc9f1bcc5d54a Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 29 Nov 2012 14:20:34 -0500 Subject: [PATCH 034/207] enable mod_wsgi on horizon_init on a clean ubuntu environment wsgi wasn't getting enabled ensure that it actually turns on wsgi to that apache can start Change-Id: I9c74f7c5d5d2f995843b2a649a52f7159c7de314 --- lib/horizon | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/horizon b/lib/horizon index af09f770..6173042f 100644 --- a/lib/horizon +++ b/lib/horizon @@ -79,6 +79,8 @@ function init_horizon() { # Be a good citizen and use the distro tools here sudo touch /etc/$APACHE_NAME/$APACHE_CONF sudo a2ensite horizon + # WSGI doesn't enable by default, enable it + sudo a2enmod wsgi else # Install httpd, which is NOPRIME'd if is_suse; then From a61eb6af5d4fe7affa3a8c8da6d3b4126e7764bf Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 29 Nov 2012 14:51:34 -0500 Subject: [PATCH 035/207] remove hard tabs from keystone_data.sh hard tabs somehow snuck into keystone_data.sh, noticed in an unrelated review. Remove for consistency. Change-Id: I04f3b4597fd3629c7f123588c512832a67228597 --- files/keystone_data.sh | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 7da07aaa..9b07d0b3 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -92,16 +92,16 @@ keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $ # Keystone if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - KEYSTONE_SERVICE=$(get_id keystone service-create \ - --name=keystone \ - --type=identity \ - --description="Keystone Identity Service") - keystone endpoint-create \ - --region RegionOne \ - --service_id $KEYSTONE_SERVICE \ - --publicurl "http://$SERVICE_HOST:\$(public_port)s/v2.0" \ - --adminurl "http://$SERVICE_HOST:\$(admin_port)s/v2.0" \ - --internalurl "http://$SERVICE_HOST:\$(public_port)s/v2.0" + KEYSTONE_SERVICE=$(get_id keystone service-create \ + --name=keystone \ + --type=identity \ + --description="Keystone Identity Service") + keystone endpoint-create \ + --region RegionOne \ + --service_id $KEYSTONE_SERVICE \ + --publicurl "http://$SERVICE_HOST:\$(public_port)s/v2.0" \ + --adminurl "http://$SERVICE_HOST:\$(admin_port)s/v2.0" \ + --internalurl "http://$SERVICE_HOST:\$(public_port)s/v2.0" fi # Nova From ff7f308e9cbdaf69fa116a628ed3114bb7aad54e Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Thu, 29 Nov 2012 22:00:51 -0500 Subject: [PATCH 036/207] Start nova-conductor before nova-compute. nova-compute is going to need to talk to nova-conductor during startup, so go ahead and start it conductor before compute. Change-Id: I565436e06b5bf4189ead0a57d57ec2ce4cf79bd8 --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index d15d9e31..6445a073 100644 --- a/lib/nova +++ b/lib/nova @@ -423,6 +423,7 @@ function start_nova() { # The group **libvirtd** is added to the current user in this script. # Use 'sg' to execute nova-compute as a member of the **libvirtd** group. # ``screen_it`` checks ``is_service_enabled``, it is not needed here + screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor" screen_it n-cpu "cd $NOVA_DIR && sg libvirtd $NOVA_BIN_DIR/nova-compute" screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert" screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network" @@ -430,7 +431,6 @@ function start_nova() { screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --config-file $NOVA_CONF --web ." screen_it n-xvnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-xvpvncproxy --config-file $NOVA_CONF" screen_it n-cauth "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-consoleauth" - screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor" } # stop_nova() - Stop running processes (non-screen) From 08b4e9b445f460d36a78a68b5273aee8155e4839 Mon Sep 17 00:00:00 2001 From: Steven Hardy Date: Fri, 30 Nov 2012 13:31:49 +0000 Subject: [PATCH 037/207] heat : Remove heat-metadata as it has been removed in heat master The heat-metadata service has been removed as of the following commit in heat master: 6ae3ff0 Remove heat-metadata service So remove the heat-metadata service and related config-file items Change-Id: If36efe5924e9e0a7697f51dd3c9fc140fed8090b Signed-off-by: Steven Hardy --- lib/heat | 29 +++-------------------------- 1 file changed, 3 insertions(+), 26 deletions(-) diff --git a/lib/heat b/lib/heat index 396c8a05..b640fbca 100644 --- a/lib/heat +++ b/lib/heat @@ -1,7 +1,7 @@ # lib/heat # Install and start Heat service # To enable, add the following to localrc -# ENABLED_SERVICES+=,heat,h-api-cfn,h-api-cw,h-eng,h-meta +# ENABLED_SERVICES+=,heat,h-api-cfn,h-api-cw,h-eng # Dependencies: # - functions @@ -52,8 +52,6 @@ function configure_heat() { HEAT_API_CFN_PORT=${HEAT_API_CFN_PORT:-8000} HEAT_ENGINE_HOST=${HEAT_ENGINE_HOST:-$SERVICE_HOST} HEAT_ENGINE_PORT=${HEAT_ENGINE_PORT:-8001} - HEAT_METADATA_HOST=${HEAT_METADATA_HOST:-$SERVICE_HOST} - HEAT_METADATA_PORT=${HEAT_METADATA_PORT:-8002} HEAT_API_CW_HOST=${HEAT_API_CW_HOST:-$SERVICE_HOST} HEAT_API_CW_PORT=${HEAT_API_CW_PORT:-8003} HEAT_API_HOST=${HEAT_API_HOST:-$SERVICE_HOST} @@ -126,7 +124,7 @@ function configure_heat() { iniset $HEAT_ENGINE_CONF DEFAULT bind_host $HEAT_ENGINE_HOST iniset $HEAT_ENGINE_CONF DEFAULT bind_port $HEAT_ENGINE_PORT iniset $HEAT_ENGINE_CONF DEFAULT heat_metadata_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT - iniset $HEAT_ENGINE_CONF DEFAULT heat_waitcondition_server_url http://$HEAT_METADATA_HOST:$HEAT_METADATA_PORT + iniset $HEAT_ENGINE_CONF DEFAULT heat_waitcondition_server_url http://$HEAT_CFN_HOST:$HEAT_CFN_PORT/v1/waitcondition iniset $HEAT_ENGINE_CONF DEFAULT heat_watch_server_url http://$HEAT_API_CW_HOST:$HEAT_API_CW_PORT local dburl database_connection_url dburl heat @@ -141,26 +139,6 @@ function configure_heat() { iniset $HEAT_ENGINE_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid fi - # metadata api - HEAT_METADATA_CONF=$HEAT_CONF_DIR/heat-metadata.conf - cp $HEAT_DIR/etc/heat/heat-metadata.conf $HEAT_METADATA_CONF - iniset $HEAT_METADATA_CONF DEFAULT debug True - inicomment $HEAT_METADATA_CONF DEFAULT log_file - iniset $HEAT_METADATA_CONF DEFAULT use_syslog $SYSLOG - iniset $HEAT_METADATA_CONF DEFAULT bind_host $HEAT_METADATA_HOST - iniset $HEAT_METADATA_CONF DEFAULT bind_port $HEAT_METADATA_PORT - - if is_service_enabled rabbit; then - iniset $HEAT_METADATA_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_kombu - iniset $HEAT_METADATA_CONF DEFAULT rabbit_password $RABBIT_PASSWORD - iniset $HEAT_METADATA_CONF DEFAULT rabbit_host $RABBIT_HOST - elif is_service_enabled qpid; then - iniset $HEAT_METADATA_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid - fi - - HEAT_METADATA_PASTE_INI=$HEAT_CONF_DIR/heat-metadata-paste.ini - cp $HEAT_DIR/etc/heat/heat-metadata-paste.ini $HEAT_METADATA_PASTE_INI - # cloudwatch api HEAT_API_CW_CONF=$HEAT_CONF_DIR/heat-api-cloudwatch.conf cp $HEAT_DIR/etc/heat/heat-api-cloudwatch.conf $HEAT_API_CW_CONF @@ -217,13 +195,12 @@ function start_heat() { screen_it h-api "cd $HEAT_DIR; bin/heat-api --config-dir=$HEAT_CONF_DIR/heat-api.conf" screen_it h-api-cfn "cd $HEAT_DIR; bin/heat-api-cfn --config-dir=$HEAT_CONF_DIR/heat-api-cfn.conf" screen_it h-api-cw "cd $HEAT_DIR; bin/heat-api-cloudwatch --config-dir=$HEAT_CONF_DIR/heat-api-cloudwatch.conf" - screen_it h-meta "cd $HEAT_DIR; bin/heat-metadata --config-dir=$HEAT_CONF_DIR/heat-metadata.conf" } # stop_heat() - Stop running processes function stop_heat() { # Kill the cinder screen windows - for serv in h-eng h-api-cfn h-api-cw h-meta; do + for serv in h-eng h-api-cfn h-api-cw; do screen -S $SCREEN_NAME -p $serv -X kill done } From ece6a332b7d5791c73071fbfea5723d4991c6c85 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Thu, 29 Nov 2012 14:19:41 +0100 Subject: [PATCH 038/207] Refactor swift installation * Optimize loops * Move install steps to the lib/swift Change-Id: Ie8a74b2627395620ccb0501171fa0150ee7497f2 --- lib/swift | 364 +++++++++++++++++++++++++++++++++++++++++++++++++++++ stack.sh | 315 +++------------------------------------------- unstack.sh | 3 +- 3 files changed, 384 insertions(+), 298 deletions(-) create mode 100644 lib/swift diff --git a/lib/swift b/lib/swift new file mode 100644 index 00000000..7acb1dfe --- /dev/null +++ b/lib/swift @@ -0,0 +1,364 @@ +# lib/swift +# Functions to control the configuration and operation of the swift service + +# Dependencies: +# ``functions`` file +# ``DEST``, ``SCREEN_NAME``, `SWIFT_HASH` must be defined +# ``SWIFT_DATA_DIR`` or ``DATA_DIR`` must be defined +# ``lib/keystone`` file +# ``stack.sh`` calls the entry points in this order: +# +# install_swift +# configure_swift +# init_swift +# start_swift +# stop_swift +# cleanup_swift + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# + +# Set up default directories + +SWIFT_DIR=$DEST/swift +SWIFTCLIENT_DIR=$DEST/python-swiftclient + +# TODO: add logging to different location. + +# Set ``SWIFT_DATA_DIR`` to the location of swift drives and objects. +# Default is the common DevStack data directory. +SWIFT_DATA_DIR=${SWIFT_DATA_DIR:-${DATA_DIR}/swift} + +# Set ``SWIFT_CONFIG_DIR`` to the location of the configuration files. +# Default is ``/etc/swift``. +SWIFT_CONFIG_DIR=${SWIFT_CONFIG_DIR:-/etc/swift} + +# DevStack will create a loop-back disk formatted as XFS to store the +# swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in bytes. +# Default is 1 gigabyte. +SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000} + +# The ring uses a configurable number of bits from a path’s MD5 hash as +# a partition index that designates a device. The number of bits kept +# from the hash is known as the partition power, and 2 to the partition +# power indicates the partition count. Partitioning the full MD5 hash +# ring allows other parts of the cluster to work in batches of items at +# once which ends up either more efficient or at least less complex than +# working with each item separately or the entire cluster all at once. +# By default we define 9 for the partition count (which mean 512). +SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9} + +# Set ``SWIFT_REPLICAS`` to configure how many replicas are to be +# configured for your Swift cluster. By default the three replicas would need a +# bit of IO and Memory on a VM you may want to lower that to 1 if you want to do +# only some quick testing. +SWIFT_REPLICAS=${SWIFT_REPLICAS:-3} +SWIFT_REPLICAS_SEQ=$(seq ${SWIFT_REPLICAS}) + +# Set ``OBJECT_PORT_BASE``, ``CONTAINER_PORT_BASE``, ``ACCOUNT_PORT_BASE`` +# Port bases used in port number calclution for the service "nodes" +# The specified port number will be used, the additinal ports calculated by +# base_port + node_num * 10 +OBJECT_PORT_BASE=6010 +CONTAINER_PORT_BASE=6011 +ACCOUNT_PORT_BASE=6012 + +# Entry Points +# ------------ + +# cleanup_swift() - Remove residual data files +function cleanup_swift() { + rm -f ${SWIFT_CONFIG_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz} + if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then + sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 + fi + if [[ -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then + rm ${SWIFT_DATA_DIR}/drives/images/swift.img + fi +} + +# configure_swift() - Set config files, create data dirs and loop image +function configure_swift() { + local swift_auth_server + local node_number + local swift_node_config + local swift_log_dir + + setup_develop $SWIFT_DIR + + # Make sure to kill all swift processes first + swift-init all stop || true + + # First do a bit of setup by creating the directories and + # changing the permissions so we can run it as our user. + + USER_GROUP=$(id -g) + sudo mkdir -p ${SWIFT_DATA_DIR}/drives + sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_DIR} + + # Create a loopback disk and format it to XFS. + if [[ -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then + if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then + sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 + fi + else + mkdir -p ${SWIFT_DATA_DIR}/drives/images + sudo touch ${SWIFT_DATA_DIR}/drives/images/swift.img + sudo chown $USER: ${SWIFT_DATA_DIR}/drives/images/swift.img + + dd if=/dev/zero of=${SWIFT_DATA_DIR}/drives/images/swift.img \ + bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE} + fi + + # Make a fresh XFS filesystem + mkfs.xfs -f -i size=1024 ${SWIFT_DATA_DIR}/drives/images/swift.img + + # Mount the disk with mount options to make it as efficient as possible + mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1 + if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then + sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 \ + ${SWIFT_DATA_DIR}/drives/images/swift.img ${SWIFT_DATA_DIR}/drives/sdb1 + fi + + # Create a link to the above mount and + # create all of the directories needed to emulate a few different servers + for node_number in ${SWIFT_REPLICAS_SEQ}; do + sudo ln -sf ${SWIFT_DATA_DIR}/drives/sdb1/$node_number ${SWIFT_DATA_DIR}/$node_number; + drive=${SWIFT_DATA_DIR}/drives/sdb1/${node_number} + node=${SWIFT_DATA_DIR}/${node_number}/node + node_device=${node}/sdb1 + [[ -d $node ]] && continue + [[ -d $drive ]] && continue + sudo install -o ${USER} -g $USER_GROUP -d $drive + sudo install -o ${USER} -g $USER_GROUP -d $node_device + sudo chown -R $USER: ${node} + done + + sudo mkdir -p ${SWIFT_CONFIG_DIR}/{object,container,account}-server /var/run/swift + sudo chown -R $USER: ${SWIFT_CONFIG_DIR} /var/run/swift + + if [[ "$SWIFT_CONFIG_DIR" != "/etc/swift" ]]; then + # Some swift tools are hard-coded to use ``/etc/swift`` and are apparently not going to be fixed. + # Create a symlink if the config dir is moved + sudo ln -sf ${SWIFT_CONFIG_DIR} /etc/swift + fi + + # Swift use rsync to synchronize between all the different + # partitions (which make more sense when you have a multi-node + # setup) we configure it with our version of rsync. + sed -e " + s/%GROUP%/${USER_GROUP}/; + s/%USER%/$USER/; + s,%SWIFT_DATA_DIR%,$SWIFT_DATA_DIR,; + " $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf + # rsyncd.conf just prepared for 4 nodes + if [[ "$os_PACKAGE" = "deb" ]]; then + sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync + else + sudo sed -i '/disable *= *yes/ { s/yes/no/ }' /etc/xinetd.d/rsync + fi + + if is_service_enabled swift3;then + swift_auth_server="s3token " + fi + + # By default Swift will be installed with the tempauth middleware + # which has some default username and password if you have + # configured keystone it will checkout the directory. + if is_service_enabled key; then + swift_auth_server+="authtoken keystoneauth" + else + swift_auth_server=tempauth + fi + + SWIFT_CONFIG_PROXY_SERVER=${SWIFT_CONFIG_DIR}/proxy-server.conf + cp ${SWIFT_DIR}/etc/proxy-server.conf-sample ${SWIFT_CONFIG_PROXY_SERVER} + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user ${USER} + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir ${SWIFT_CONFIG_DIR} + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers 1 + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level DEBUG + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080} + + # Only enable Swift3 if we have it enabled in ENABLED_SERVICES + is_service_enabled swift3 && swift3=swift3 || swift3="" + + iniset ${SWIFT_CONFIG_PROXY_SERVER} pipeline:main pipeline "catch_errors healthcheck cache ratelimit ${swift3} ${swift_auth_server} proxy-logging proxy-server" + + iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server account_autocreate true + + # Configure Keystone + sed -i '/^# \[filter:authtoken\]/,/^# \[filter:keystoneauth\]$/ s/^#[ \t]*//' ${SWIFT_CONFIG_PROXY_SERVER} + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_host $KEYSTONE_AUTH_HOST + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_port $KEYSTONE_AUTH_PORT + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_user swift + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_password $SERVICE_PASSWORD + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth use + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles "Member, admin" + + if is_service_enabled swift3; then + cat <>${SWIFT_CONFIG_PROXY_SERVER} +# NOTE(chmou): s3token middleware is not updated yet to use only +# username and password. +[filter:s3token] +paste.filter_factory = keystone.middleware.s3_token:filter_factory +auth_port = ${KEYSTONE_AUTH_PORT} +auth_host = ${KEYSTONE_AUTH_HOST} +auth_protocol = ${KEYSTONE_AUTH_PROTOCOL} +auth_token = ${SERVICE_TOKEN} +admin_token = ${SERVICE_TOKEN} + +[filter:swift3] +use = egg:swift3#swift3 +EOF + fi + + cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONFIG_DIR}/swift.conf + iniset ${SWIFT_CONFIG_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH} + + # This function generates an object/account/proxy configuration + # emulating 4 nodes on different ports + function generate_swift_config() { + local swift_node_config=$1 + local node_id=$2 + local bind_port=$3 + + log_facility=$[ node_id - 1 ] + node_path=${SWIFT_DATA_DIR}/${node_number} + + iniuncomment ${swift_node_config} DEFAULT user + iniset ${swift_node_config} DEFAULT user ${USER} + + iniuncomment ${swift_node_config} DEFAULT bind_port + iniset ${swift_node_config} DEFAULT bind_port ${bind_port} + + iniuncomment ${swift_node_config} DEFAULT swift_dir + iniset ${swift_node_config} DEFAULT swift_dir ${SWIFT_CONFIG_DIR} + + iniuncomment ${swift_node_config} DEFAULT devices + iniset ${swift_node_config} DEFAULT devices ${node_path} + + iniuncomment ${swift_node_config} DEFAULT log_facility + iniset ${swift_node_config} DEFAULT log_facility LOG_LOCAL${log_facility} + + iniuncomment ${swift_node_config} DEFAULT mount_check + iniset ${swift_node_config} DEFAULT mount_check false + + iniuncomment ${swift_node_config} ${server_type}-replicator vm_test_mode + iniset ${swift_node_config} ${server_type}-replicator vm_test_mode yes + } + + for node_number in ${SWIFT_REPLICAS_SEQ}; do + swift_node_config=${SWIFT_CONFIG_DIR}/object-server/${node_number}.conf + cp ${SWIFT_DIR}/etc/object-server.conf-sample ${swift_node_config} + generate_swift_config ${swift_node_config} ${node_number} $[OBJECT_PORT_BASE + 10 * (node_number - 1)] + + swift_node_config=${SWIFT_CONFIG_DIR}/container-server/${node_number}.conf + cp ${SWIFT_DIR}/etc/container-server.conf-sample ${swift_node_config} + generate_swift_config ${swift_node_config} ${node_number} $[CONTAINER_PORT_BASE + 10 * (node_number - 1)] + + swift_node_config=${SWIFT_CONFIG_DIR}/account-server/${node_number}.conf + cp ${SWIFT_DIR}/etc/account-server.conf-sample ${swift_node_config} + generate_swift_config ${swift_node_config} ${node_number} $[ACCOUNT_PORT_BASE + 10 * (node_number - 1)] + done + + swift_log_dir=${SWIFT_DATA_DIR}/logs + rm -rf ${swift_log_dir} + mkdir -p ${swift_log_dir}/hourly + sudo chown -R $USER:adm ${swift_log_dir} + sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \ + tee /etc/rsyslog.d/10-swift.conf + +} + +# configure_swiftclient() - Set config files, create data dirs, etc +function configure_swiftclient() { + setup_develop $SWIFTCLIENT_DIR +} + +# init_swift() - Initialize rings +function init_swift() { + local node_number + # Make sure to kill all swift processes first + swift-init all stop || true + + # This is where we create three different rings for swift with + # different object servers binding on different ports. + pushd ${SWIFT_CONFIG_DIR} >/dev/null && { + + rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz + + swift-ring-builder object.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 + swift-ring-builder container.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 + swift-ring-builder account.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 + + for node_number in ${SWIFT_REPLICAS_SEQ}; do + swift-ring-builder object.builder add z${node_number}-127.0.0.1:$[OBJECT_PORT_BASE + 10 * (node_number - 1)]/sdb1 1 + swift-ring-builder container.builder add z${node_number}-127.0.0.1:$[CONTAINER_PORT_BASE + 10 * (node_number - 1)]/sdb1 1 + swift-ring-builder account.builder add z${node_number}-127.0.0.1:$[ACCOUNT_PORT_BASE + 10 * (node_number - 1)]/sdb1 1 + done + swift-ring-builder object.builder rebalance + swift-ring-builder container.builder rebalance + swift-ring-builder account.builder rebalance + } && popd >/dev/null + +} + +function install_swift() { + git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH +} + +function install_swiftclient() { + git_clone $SWIFTCLIENT_REPO $SWIFTCLIENT_DIR $SWIFTCLIENT_BRANCH +} + + +# start_swift() - Start running processes, including screen +function start_swift() { + # (re)start rsyslog + restart_service rsyslog + # Start rsync + if [[ "$os_PACKAGE" = "deb" ]]; then + sudo /etc/init.d/rsync restart || : + else + sudo systemctl start xinetd.service + fi + + # First spawn all the swift services then kill the + # proxy service so we can run it in foreground in screen. + # ``swift-init ... {stop|restart}`` exits with '1' if no servers are running, + # ignore it just in case + swift-init all restart || true + swift-init proxy stop || true + screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_DIR}/proxy-server.conf -v" +} + +# stop_swift() - Stop running processes (non-screen) +function stop_swift() { + # screen normally killed by unstack.sh + swift-init all stop || true +} + +# Restore xtrace +$XTRACE diff --git a/stack.sh b/stack.sh index 70f46104..dbb53ecb 100755 --- a/stack.sh +++ b/stack.sh @@ -105,7 +105,7 @@ disable_negated_services # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|f16|f17) ]]; then +if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|f16|f17|f18) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then echo "If you wish to run this script anyway run with FORCE=yes" @@ -310,6 +310,7 @@ source $TOP_DIR/lib/keystone source $TOP_DIR/lib/glance source $TOP_DIR/lib/nova source $TOP_DIR/lib/cinder +source $TOP_DIR/lib/swift source $TOP_DIR/lib/ceilometer source $TOP_DIR/lib/heat source $TOP_DIR/lib/quantum @@ -319,9 +320,7 @@ source $TOP_DIR/lib/tempest HORIZON_DIR=$DEST/horizon OPENSTACKCLIENT_DIR=$DEST/python-openstackclient NOVNC_DIR=$DEST/noVNC -SWIFT_DIR=$DEST/swift SWIFT3_DIR=$DEST/swift3 -SWIFTCLIENT_DIR=$DEST/python-swiftclient QUANTUM_DIR=$DEST/quantum QUANTUM_CLIENT_DIR=$DEST/python-quantumclient @@ -503,41 +502,6 @@ if is_service_enabled rabbit; then read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT." fi - -# Swift -# ----- - -# TODO: add logging to different location. - -# Set ``SWIFT_DATA_DIR`` to the location of swift drives and objects. -# Default is the common DevStack data directory. -SWIFT_DATA_DIR=${SWIFT_DATA_DIR:-${DATA_DIR}/swift} - -# Set ``SWIFT_CONFIG_DIR`` to the location of the configuration files. -# Default is ``/etc/swift``. -SWIFT_CONFIG_DIR=${SWIFT_CONFIG_DIR:-/etc/swift} - -# DevStack will create a loop-back disk formatted as XFS to store the -# swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in bytes. -# Default is 1 gigabyte. -SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000} - -# The ring uses a configurable number of bits from a path’s MD5 hash as -# a partition index that designates a device. The number of bits kept -# from the hash is known as the partition power, and 2 to the partition -# power indicates the partition count. Partitioning the full MD5 hash -# ring allows other parts of the cluster to work in batches of items at -# once which ends up either more efficient or at least less complex than -# working with each item separately or the entire cluster all at once. -# By default we define 9 for the partition count (which mean 512). -SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9} - -# Set ``SWIFT_REPLICAS`` to configure how many replicas are to be -# configured for your Swift cluster. By default the three replicas would need a -# bit of IO and Memory on a VM you may want to lower that to 1 if you want to do -# only some quick testing. -SWIFT_REPLICAS=${SWIFT_REPLICAS:-3} - if is_service_enabled swift; then # If we are using swift3, we can default the s3 port to swift instead # of nova-objectstore @@ -793,7 +757,6 @@ echo_summary "Installing OpenStack project source" install_keystoneclient install_glanceclient install_novaclient - # Check out the client libs that are used most git_clone $OPENSTACKCLIENT_REPO $OPENSTACKCLIENT_DIR $OPENSTACKCLIENT_BRANCH @@ -802,16 +765,16 @@ if is_service_enabled key g-api n-api swift; then # unified auth system (manages accounts/tokens) install_keystone fi + if is_service_enabled swift; then - # storage service - git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH - # storage service client and and Library - git_clone $SWIFTCLIENT_REPO $SWIFTCLIENT_DIR $SWIFTCLIENT_BRANCH + install_swiftclient + install_swift if is_service_enabled swift3; then # swift3 middleware to provide S3 emulation to Swift git_clone $SWIFT3_REPO $SWIFT3_DIR $SWIFT3_BRANCH fi fi + if is_service_enabled g-api n-api; then # image catalog service install_glance @@ -867,11 +830,11 @@ if is_service_enabled key g-api n-api swift; then configure_keystone fi if is_service_enabled swift; then - setup_develop $SWIFT_DIR - setup_develop $SWIFTCLIENT_DIR -fi -if is_service_enabled swift3; then - setup_develop $SWIFT3_DIR + configure_swift + configure_swiftclient + if is_service_enabled swift3; then + setup_develop $SWIFT3_DIR + fi fi if is_service_enabled g-api n-api; then configure_glance @@ -1439,253 +1402,7 @@ fi if is_service_enabled swift; then echo_summary "Configuring Swift" - - # Make sure to kill all swift processes first - swift-init all stop || true - - # First do a bit of setup by creating the directories and - # changing the permissions so we can run it as our user. - - USER_GROUP=$(id -g) - sudo mkdir -p ${SWIFT_DATA_DIR}/drives - sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_DIR} - - # Create a loopback disk and format it to XFS. - if [[ -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then - if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then - sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 - fi - else - mkdir -p ${SWIFT_DATA_DIR}/drives/images - sudo touch ${SWIFT_DATA_DIR}/drives/images/swift.img - sudo chown $USER: ${SWIFT_DATA_DIR}/drives/images/swift.img - - dd if=/dev/zero of=${SWIFT_DATA_DIR}/drives/images/swift.img \ - bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE} - fi - - # Make a fresh XFS filesystem - mkfs.xfs -f -i size=1024 ${SWIFT_DATA_DIR}/drives/images/swift.img - - # Mount the disk with mount options to make it as efficient as possible - mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1 - if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then - sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 \ - ${SWIFT_DATA_DIR}/drives/images/swift.img ${SWIFT_DATA_DIR}/drives/sdb1 - fi - - # Create a link to the above mount - for x in $(seq ${SWIFT_REPLICAS}); do - sudo ln -sf ${SWIFT_DATA_DIR}/drives/sdb1/$x ${SWIFT_DATA_DIR}/$x; done - - # Create all of the directories needed to emulate a few different servers - for x in $(seq ${SWIFT_REPLICAS}); do - drive=${SWIFT_DATA_DIR}/drives/sdb1/${x} - node=${SWIFT_DATA_DIR}/${x}/node - node_device=${node}/sdb1 - [[ -d $node ]] && continue - [[ -d $drive ]] && continue - sudo install -o ${USER} -g $USER_GROUP -d $drive - sudo install -o ${USER} -g $USER_GROUP -d $node_device - sudo chown -R $USER: ${node} - done - - sudo mkdir -p ${SWIFT_CONFIG_DIR}/{object,container,account}-server /var/run/swift - sudo chown -R $USER: ${SWIFT_CONFIG_DIR} /var/run/swift - - if [[ "$SWIFT_CONFIG_DIR" != "/etc/swift" ]]; then - # Some swift tools are hard-coded to use ``/etc/swift`` and are apparently not going to be fixed. - # Create a symlink if the config dir is moved - sudo ln -sf ${SWIFT_CONFIG_DIR} /etc/swift - fi - - # Swift use rsync to synchronize between all the different - # partitions (which make more sense when you have a multi-node - # setup) we configure it with our version of rsync. - sed -e " - s/%GROUP%/${USER_GROUP}/; - s/%USER%/$USER/; - s,%SWIFT_DATA_DIR%,$SWIFT_DATA_DIR,; - " $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf - if [[ "$os_PACKAGE" = "deb" ]]; then - sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync - else - sudo sed -i '/disable *= *yes/ { s/yes/no/ }' /etc/xinetd.d/rsync - fi - - if is_service_enabled swift3;then - swift_auth_server="s3token " - fi - - # By default Swift will be installed with the tempauth middleware - # which has some default username and password if you have - # configured keystone it will checkout the directory. - if is_service_enabled key; then - swift_auth_server+="authtoken keystoneauth" - else - swift_auth_server=tempauth - fi - - SWIFT_CONFIG_PROXY_SERVER=${SWIFT_CONFIG_DIR}/proxy-server.conf - cp ${SWIFT_DIR}/etc/proxy-server.conf-sample ${SWIFT_CONFIG_PROXY_SERVER} - - iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user - iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user ${USER} - - iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir - iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir ${SWIFT_CONFIG_DIR} - - iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers - iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers 1 - - iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level - iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level DEBUG - - iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port - iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080} - - # Only enable Swift3 if we have it enabled in ENABLED_SERVICES - is_service_enabled swift3 && swift3=swift3 || swift3="" - - iniset ${SWIFT_CONFIG_PROXY_SERVER} pipeline:main pipeline "catch_errors healthcheck cache ratelimit ${swift3} ${swift_auth_server} proxy-logging proxy-server" - - iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server account_autocreate true - - # Configure Keystone - sed -i '/^# \[filter:authtoken\]/,/^# \[filter:keystoneauth\]$/ s/^#[ \t]*//' ${SWIFT_CONFIG_PROXY_SERVER} - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_host $KEYSTONE_AUTH_HOST - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_port $KEYSTONE_AUTH_PORT - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_user swift - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_password $SERVICE_PASSWORD - - iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth use - iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles "Member, admin" - - if is_service_enabled swift3; then - cat <>${SWIFT_CONFIG_PROXY_SERVER} -# NOTE(chmou): s3token middleware is not updated yet to use only -# username and password. -[filter:s3token] -paste.filter_factory = keystone.middleware.s3_token:filter_factory -auth_port = ${KEYSTONE_AUTH_PORT} -auth_host = ${KEYSTONE_AUTH_HOST} -auth_protocol = ${KEYSTONE_AUTH_PROTOCOL} -auth_token = ${SERVICE_TOKEN} -admin_token = ${SERVICE_TOKEN} - -[filter:swift3] -use = egg:swift3#swift3 -EOF - fi - - cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONFIG_DIR}/swift.conf - iniset ${SWIFT_CONFIG_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH} - - # This function generates an object/account/proxy configuration - # emulating 4 nodes on different ports - function generate_swift_configuration() { - local server_type=$1 - local bind_port=$2 - local log_facility=$3 - local node_number - local swift_node_config - - for node_number in $(seq ${SWIFT_REPLICAS}); do - node_path=${SWIFT_DATA_DIR}/${node_number} - swift_node_config=${SWIFT_CONFIG_DIR}/${server_type}-server/${node_number}.conf - - cp ${SWIFT_DIR}/etc/${server_type}-server.conf-sample ${swift_node_config} - - iniuncomment ${swift_node_config} DEFAULT user - iniset ${swift_node_config} DEFAULT user ${USER} - - iniuncomment ${swift_node_config} DEFAULT bind_port - iniset ${swift_node_config} DEFAULT bind_port ${bind_port} - - iniuncomment ${swift_node_config} DEFAULT swift_dir - iniset ${swift_node_config} DEFAULT swift_dir ${SWIFT_CONFIG_DIR} - - iniuncomment ${swift_node_config} DEFAULT devices - iniset ${swift_node_config} DEFAULT devices ${node_path} - - iniuncomment ${swift_node_config} DEFAULT log_facility - iniset ${swift_node_config} DEFAULT log_facility LOG_LOCAL${log_facility} - - iniuncomment ${swift_node_config} DEFAULT mount_check - iniset ${swift_node_config} DEFAULT mount_check false - - iniuncomment ${swift_node_config} ${server_type}-replicator vm_test_mode - iniset ${swift_node_config} ${server_type}-replicator vm_test_mode yes - - bind_port=$(( ${bind_port} + 10 )) - log_facility=$(( ${log_facility} + 1 )) - done - } - generate_swift_configuration object 6010 2 - generate_swift_configuration container 6011 2 - generate_swift_configuration account 6012 2 - - # Specific configuration for swift for rsyslog. See - # ``/etc/rsyslog.d/10-swift.conf`` for more info. - swift_log_dir=${SWIFT_DATA_DIR}/logs - rm -rf ${swift_log_dir} - mkdir -p ${swift_log_dir}/hourly - sudo chown -R $USER:adm ${swift_log_dir} - sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \ - tee /etc/rsyslog.d/10-swift.conf - restart_service rsyslog - - # This is where we create three different rings for swift with - # different object servers binding on different ports. - pushd ${SWIFT_CONFIG_DIR} >/dev/null && { - - rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz - - port_number=6010 - swift-ring-builder object.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 - for x in $(seq ${SWIFT_REPLICAS}); do - swift-ring-builder object.builder add z${x}-127.0.0.1:${port_number}/sdb1 1 - port_number=$[port_number + 10] - done - swift-ring-builder object.builder rebalance - - port_number=6011 - swift-ring-builder container.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 - for x in $(seq ${SWIFT_REPLICAS}); do - swift-ring-builder container.builder add z${x}-127.0.0.1:${port_number}/sdb1 1 - port_number=$[port_number + 10] - done - swift-ring-builder container.builder rebalance - - port_number=6012 - swift-ring-builder account.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 - for x in $(seq ${SWIFT_REPLICAS}); do - swift-ring-builder account.builder add z${x}-127.0.0.1:${port_number}/sdb1 1 - port_number=$[port_number + 10] - done - swift-ring-builder account.builder rebalance - - } && popd >/dev/null - - # Start rsync - if [[ "$os_PACKAGE" = "deb" ]]; then - sudo /etc/init.d/rsync restart || : - else - sudo systemctl start xinetd.service - fi - - # First spawn all the swift services then kill the - # proxy service so we can run it in foreground in screen. - # ``swift-init ... {stop|restart}`` exits with '1' if no servers are running, - # ignore it just in case - swift-init all restart || true - swift-init proxy stop || true - - unset s swift_hash swift_auth_server + init_swift fi @@ -1802,6 +1519,12 @@ fi # Only run the services specified in ``ENABLED_SERVICES`` +# Launch Swift Services +if is_service_enabled swift; then + echo_summary "Starting Swift" + start_swift +fi + # Launch the Glance services if is_service_enabled g-api g-reg; then echo_summary "Starting Glance" @@ -1905,8 +1628,6 @@ if is_service_enabled ceilometer; then start_ceilometer fi -screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_DIR}/proxy-server.conf -v" - # Starting the nova-objectstore only if swift3 service is not enabled. # Swift will act as s3 objectstore. is_service_enabled swift3 || \ diff --git a/unstack.sh b/unstack.sh index 0040cf1e..20ba17b6 100755 --- a/unstack.sh +++ b/unstack.sh @@ -27,6 +27,7 @@ DATA_DIR=${DATA_DIR:-${DEST}/data} # Get project function libraries source $TOP_DIR/lib/cinder source $TOP_DIR/lib/horizon +source $TOP_DIR/lib/swift # Determine what system we are running on. This provides ``os_VENDOR``, # ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` @@ -47,7 +48,7 @@ fi # Swift runs daemons if is_service_enabled swift; then - swift-init all stop 2>/dev/null || true + stop_swift fi # Apache has the WSGI processes From 0da8dbd0a369d90e7eafd88e54a1cff91f09448d Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 29 Nov 2012 14:37:14 -0500 Subject: [PATCH 039/207] create a horizon exercise for sanity check from time to time horizon gets broken in devstack and it takes a while to figure out why. Put a sanity check into devstack exercises that checks for horizon front page being up and not just a stack trace to use as a simple gate. Change-Id: I13a6c59881f618d5194b1625b67115013c9cb6c2 --- exercises/horizon.sh | 45 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100755 exercises/horizon.sh diff --git a/exercises/horizon.sh b/exercises/horizon.sh new file mode 100755 index 00000000..c5dae3ab --- /dev/null +++ b/exercises/horizon.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash + +# **horizon.sh** + +# Sanity check that horizon started if enabled + +echo "*********************************************************************" +echo "Begin DevStack Exercise: $0" +echo "*********************************************************************" + +# This script exits on an error so that errors don't compound and you see +# only the first error that occured. +set -o errexit + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + + +# Settings +# ======== + +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) + +# Import common functions +source $TOP_DIR/functions + +# Import configuration +source $TOP_DIR/openrc + +# Import exercise configuration +source $TOP_DIR/exerciserc + +is_service_enabled horizon || exit 55 + +# can we get the front page +curl http://$SERVICE_HOST 2>/dev/null | grep -q '

Log In

' || die "Horizon front page not functioning!" + +set +o xtrace +echo "*********************************************************************" +echo "SUCCESS: End DevStack Exercise: $0" +echo "*********************************************************************" + From 83e109571ec2c2a08a378d9c3a69bfbf3f5ec1ba Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Fri, 30 Nov 2012 23:28:07 +0100 Subject: [PATCH 040/207] Allow Versioning with swift * add the allow_versions to the container configs Change-Id: I2d39ba7c60f5f1c4cd4f80ed61a02a64979e8f19 --- lib/swift | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/swift b/lib/swift index 7acb1dfe..366c467b 100644 --- a/lib/swift +++ b/lib/swift @@ -277,6 +277,8 @@ EOF swift_node_config=${SWIFT_CONFIG_DIR}/container-server/${node_number}.conf cp ${SWIFT_DIR}/etc/container-server.conf-sample ${swift_node_config} generate_swift_config ${swift_node_config} ${node_number} $[CONTAINER_PORT_BASE + 10 * (node_number - 1)] + iniuncomment ${swift_node_config} app:container-server allow_versions + iniset ${swift_node_config} app:container-server allow_versions "true" swift_node_config=${SWIFT_CONFIG_DIR}/account-server/${node_number}.conf cp ${SWIFT_DIR}/etc/account-server.conf-sample ${swift_node_config} From 93923ebeed2e5ef4f8b9733f5457f97615f29477 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 30 Nov 2012 17:51:12 -0500 Subject: [PATCH 041/207] fix typo in get_uec_image.sh it's precise, not percise. Fixes bug #1085233 Change-Id: I857c233c9b461b7cc6e2ac356aa6f2cd6be93ff5 --- tools/get_uec_image.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/get_uec_image.sh b/tools/get_uec_image.sh index 156fd439..3c62064a 100755 --- a/tools/get_uec_image.sh +++ b/tools/get_uec_image.sh @@ -65,7 +65,7 @@ KERNEL=$3 case $DIST_NAME in quantal) ;; - percise) ;; + precise) ;; oneiric) ;; natty) ;; maverick) ;; From 99fcd8184d078efb9f8fcbe6d714b04ea44259e1 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Sun, 2 Dec 2012 13:07:39 -0800 Subject: [PATCH 042/207] A few more NOVA_BIN_DIR cleanups. Change-Id: I1d1225c894f1857a3723e01f18d0f0dade670dc9 --- stack.sh | 2 +- tools/configure_tempest.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index dbb53ecb..aca4cff0 100755 --- a/stack.sh +++ b/stack.sh @@ -1543,7 +1543,7 @@ if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nov add_nova_opt "s3_affix_tenant=True" fi -screen_it zeromq "cd $NOVA_DIR && $NOVA_DIR/bin/nova-rpc-zmq-receiver" +screen_it zeromq "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-rpc-zmq-receiver" # Launch the nova-api and wait for it to answer before continuing if is_service_enabled n-api; then diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh index 03dc6839..298fa9ba 100755 --- a/tools/configure_tempest.sh +++ b/tools/configure_tempest.sh @@ -185,7 +185,7 @@ SSH_TIMEOUT=4 # Whitebox testing configuration for Compute... COMPUTE_WHITEBOX_ENABLED=True COMPUTE_SOURCE_DIR=$NOVA_SOURCE_DIR -COMPUTE_BIN_DIR=/usr/bin/nova +COMPUTE_BIN_DIR=$NOVA_BIN_DIR COMPUTE_CONFIG_PATH=/etc/nova/nova.conf # TODO(jaypipes): Create the key file here... right now, no whitebox # tests actually use a key. From 1a7bbd255fd02fbfee6a26b990d15d38402f1992 Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Mon, 3 Dec 2012 17:04:02 +1300 Subject: [PATCH 043/207] is_suse false positives on Fedora Also, uses of is_suse were also always evaluating to true on Fedora. Change-Id: I068f3179edbfb295163a4e4faa4998f2f7b2c124 --- functions | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/functions b/functions index f2b12e21..9022675a 100644 --- a/functions +++ b/functions @@ -349,8 +349,7 @@ function is_suse { GetOSVersion fi - [[ "$os_VENDOR" = "openSUSE" || "$os_VENDOR" = "SUSE LINUX" ]] - return $? + [ "$os_VENDOR" = "openSUSE" ] || [ "$os_VENDOR" = "SUSE LINUX" ] } @@ -646,7 +645,7 @@ function pip_install { SUDO_PIP="env" else SUDO_PIP="sudo" - if [[ "$os_PACKAGE" = "deb" || is_suse ]]; then + if [[ "$os_PACKAGE" = "deb" ]] || is_suse; then CMD_PIP=/usr/bin/pip else CMD_PIP=/usr/bin/pip-python @@ -1040,7 +1039,7 @@ function get_rootwrap_location() { GetOSVersion fi - if [[ "$os_PACKAGE" = "deb" || is_suse ]]; then + if [[ "$os_PACKAGE" = "deb" ]] || is_suse; then echo "/usr/local/bin/$module-rootwrap" else echo "/usr/bin/$module-rootwrap" @@ -1057,7 +1056,7 @@ function qpid_is_supported() { # Qpid was introduced to Ubuntu in precise, disallow it on oneiric; it is # not in openSUSE either right now. - [[ "$DISTRO" = "oneiric" || is_suse ]] + [[ "$DISTRO" = "oneiric" ]] || is_suse return $? } From 8f393df3524563813068ac0a646b86bbcfa28d30 Mon Sep 17 00:00:00 2001 From: Sean Gallagher Date: Mon, 3 Dec 2012 00:17:38 -0800 Subject: [PATCH 044/207] Use TOP_DIR to find devstack directory Change two statements to use TOP_DIR instead of PWD Change-Id: I6d3d16ce853493a06850b078d39e964f873c16fe Fixes: bug #1085819 --- stack.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index dbb53ecb..e94c1082 100755 --- a/stack.sh +++ b/stack.sh @@ -196,8 +196,8 @@ if [[ $EUID -eq 0 ]]; then > /etc/sudoers.d/50_stack_sh ) echo "Copying files to stack user" - STACK_DIR="$DEST/${PWD##*/}" - cp -r -f -T "$PWD" "$STACK_DIR" + STACK_DIR="$DEST/${TOP_DIR##*/}" + cp -r -f -T "$TOP_DIR" "$STACK_DIR" chown -R stack "$STACK_DIR" if [[ "$SHELL_AFTER_RUN" != "no" ]]; then exec su -c "set -e; cd $STACK_DIR; bash stack.sh; bash" stack From 9a27dd8dd557c9bf64bfaf4965eff7a8f75fb1af Mon Sep 17 00:00:00 2001 From: Steven Hardy Date: Mon, 3 Dec 2012 12:41:02 +0000 Subject: [PATCH 045/207] heat : heat repo moved to openstack Main heat repo has now moved under the github openstack project Note the old checkout will require removal to trigger stack.sh to re-clone it from the new location Change-Id: I4163e35cad7c319961d42f0c53a68ec6244508ed Signed-off-by: Steven Hardy --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 9588cf99..39d34b0b 100644 --- a/stackrc +++ b/stackrc @@ -97,7 +97,7 @@ TEMPEST_REPO=${GIT_BASE}/openstack/tempest.git TEMPEST_BRANCH=master # heat service -HEAT_REPO=${GIT_BASE}/heat-api/heat.git +HEAT_REPO=${GIT_BASE}/openstack/heat.git HEAT_BRANCH=master # python heat client library From c2d2f52bbdf7b83fbd74a7396c7380b6da9b2ae3 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Mon, 3 Dec 2012 10:02:40 -0500 Subject: [PATCH 046/207] Fix qpid support on Fedora. The new qpid_is_supported function returned the opposite value from what it was supposed to. It returned success for the platforms where qpid is not supported. Change-Id: I0ceaae1dddaa6192657926834c6eb8006925f0cf --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index 9022675a..aaa00a49 100644 --- a/functions +++ b/functions @@ -1056,7 +1056,7 @@ function qpid_is_supported() { # Qpid was introduced to Ubuntu in precise, disallow it on oneiric; it is # not in openSUSE either right now. - [[ "$DISTRO" = "oneiric" ]] || is_suse + ( ! ([[ "$DISTRO" = "oneiric" ]] || is_suse) ) return $? } From 205bc49ef5fd642fe83fddd07cc2578ed7c6f165 Mon Sep 17 00:00:00 2001 From: Mark McClain Date: Fri, 16 Nov 2012 00:15:28 -0500 Subject: [PATCH 047/207] adding Quantum metadata service support Change-Id: I8985c169401eee7a435b99293bdf6d42f28ab66c --- stack.sh | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index dbb53ecb..6d8e1f67 100755 --- a/stack.sh +++ b/stack.sh @@ -1331,9 +1331,10 @@ if is_service_enabled q-l3; then # Set debug iniset $Q_L3_CONF_FILE DEFAULT debug True - iniset $Q_L3_CONF_FILE DEFAULT metadata_ip $Q_META_DATA_IP iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE + iniset $Q_L3_CONF_FILE DEFAULT state_path $DATA_DIR/quantum + iniset $Q_L3_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" quantum_setup_keystone $Q_L3_CONF_FILE DEFAULT set_auth_url @@ -1354,6 +1355,27 @@ if is_service_enabled q-l3; then fi fi +#Quantum Metadata +if is_service_enabled q-meta; then + AGENT_META_BINARY="$QUANTUM_DIR/bin/quantum-metadata-agent" + Q_META_CONF_FILE=/etc/quantum/metadata_agent.ini + + cp $QUANTUM_DIR/etc/metadata_agent.ini $Q_META_CONF_FILE + + # Set verbose + iniset $Q_META_CONF_FILE DEFAULT verbose True + # Set debug + iniset $Q_META_CONF_FILE DEFAULT debug True + + iniset $Q_META_CONF_FILE DEFAULT state_path $DATA_DIR/quantum + + iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP + + iniset $Q_META_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" + + quantum_setup_keystone $Q_META_CONF_FILE DEFAULT set_auth_url +fi + # Quantum RPC support - must be updated prior to starting any of the services if is_service_enabled quantum; then iniset $Q_CONF_FILE DEFAULT control_exchange quantum @@ -1442,6 +1464,9 @@ if is_service_enabled nova; then fi add_nova_opt "libvirt_vif_driver=$NOVA_VIF_DRIVER" add_nova_opt "linuxnet_interface_driver=$LINUXNET_VIF_DRIVER" + if is_service_enabled q-meta; then + add_nova_opt "service_quantum_metadata_proxy=True" + fi elif is_service_enabled n-net; then add_nova_opt "network_manager=nova.network.manager.$NET_MAN" add_nova_opt "public_interface=$PUBLIC_INTERFACE" @@ -1611,6 +1636,7 @@ fi # Start up the quantum agents if enabled screen_it q-agt "python $AGENT_BINARY --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE" screen_it q-dhcp "python $AGENT_DHCP_BINARY --config-file $Q_CONF_FILE --config-file=$Q_DHCP_CONF_FILE" +screen_it q-meta "python $AGENT_META_BINARY --config-file $Q_CONF_FILE --config-file=$Q_META_CONF_FILE" screen_it q-l3 "python $AGENT_L3_BINARY --config-file $Q_CONF_FILE --config-file=$Q_L3_CONF_FILE" if is_service_enabled nova; then From 8ec27220c5c63de59f129c839eddf5380efe46a4 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Thu, 29 Nov 2012 09:25:31 +0100 Subject: [PATCH 048/207] Add a get_pip_command function There are two places where we need to find the right command for pip, so instead of having one version we fix and a buggy version we forget, simply use a function :-) Change-Id: I728c17ad7be5c86690c4d7907f77f1f98ec2b815 --- functions | 19 ++++++++++++++----- tools/info.sh | 6 +----- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/functions b/functions index 9022675a..bc6fdc35 100644 --- a/functions +++ b/functions @@ -645,11 +645,7 @@ function pip_install { SUDO_PIP="env" else SUDO_PIP="sudo" - if [[ "$os_PACKAGE" = "deb" ]] || is_suse; then - CMD_PIP=/usr/bin/pip - else - CMD_PIP=/usr/bin/pip-python - fi + CMD_PIP=$(get_pip_command) fi if [[ "$PIP_USE_MIRRORS" != "False" ]]; then PIP_MIRROR_OPT="--use-mirrors" @@ -1046,6 +1042,19 @@ function get_rootwrap_location() { fi } +# Get the path to the pip command. +# get_pip_command +function get_pip_command() { + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + + if [[ "$os_PACKAGE" = "deb" ]] || is_suse; then + echo "/usr/bin/pip" + else + echo "/usr/bin/pip-python" + fi +} # Check if qpid can be used on the current distro. # qpid_is_supported diff --git a/tools/info.sh b/tools/info.sh index 5c9a1d3d..a872d59d 100755 --- a/tools/info.sh +++ b/tools/info.sh @@ -107,11 +107,7 @@ done # Pips # ---- -if [[ "$os_PACKAGE" = "deb" ]]; then - CMD_PIP=/usr/bin/pip -else - CMD_PIP=/usr/bin/pip-python -fi +CMD_PIP=$(get_pip_command) # Pip tells us what is currently installed FREEZE_FILE=$(mktemp --tmpdir freeze.XXXXXX) From d835de892a9426a96f16e187d23eff715311d492 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 29 Nov 2012 17:11:35 -0600 Subject: [PATCH 049/207] Move keystone account creation out of keystone_data.sh keystone_data.sh is getting unwieldly and increasingly needs configuration information for services. Also need the ability to manipulate HOST/IP information for hosts to handle service HA/proxy configurations. Begin moving the creation of service account information into the service lib files, starting with the common accounts and keystone itself. Change-Id: Ie259f7b71983c4f4a2e33ab9c8a8e2b00238ba38 --- files/keystone_data.sh | 63 ++----------------------- lib/keystone | 101 ++++++++++++++++++++++++++++++++++++++++- stack.sh | 16 ++++--- 3 files changed, 112 insertions(+), 68 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 20749bc6..c8e68dd6 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -4,7 +4,6 @@ # # Tenant User Roles # ------------------------------------------------------------------ -# admin admin admin # service glance admin # service nova admin, [ResellerAdmin (swift only)] # service quantum admin # if enabled @@ -12,9 +11,6 @@ # service cinder admin # if enabled # service heat admin # if enabled # service ceilometer admin # if enabled -# demo admin admin -# demo demo Member, anotherrole -# invisible_to_admin demo Member # Tempest Only: # alt_demo alt_demo Member # @@ -40,53 +36,14 @@ function get_id () { echo `"$@" | awk '/ id / { print $4 }'` } - -# Tenants -# ------- - -ADMIN_TENANT=$(get_id keystone tenant-create --name=admin) -SERVICE_TENANT=$(get_id keystone tenant-create --name=$SERVICE_TENANT_NAME) -DEMO_TENANT=$(get_id keystone tenant-create --name=demo) -INVIS_TENANT=$(get_id keystone tenant-create --name=invisible_to_admin) - - -# Users -# ----- - -ADMIN_USER=$(get_id keystone user-create --name=admin \ - --pass="$ADMIN_PASSWORD" \ - --email=admin@example.com) -DEMO_USER=$(get_id keystone user-create --name=demo \ - --pass="$ADMIN_PASSWORD" \ - --email=demo@example.com) +# Lookups +SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") +ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") # Roles # ----- -ADMIN_ROLE=$(get_id keystone role-create --name=admin) -KEYSTONEADMIN_ROLE=$(get_id keystone role-create --name=KeystoneAdmin) -KEYSTONESERVICE_ROLE=$(get_id keystone role-create --name=KeystoneServiceAdmin) -# ANOTHER_ROLE demonstrates that an arbitrary role may be created and used -# TODO(sleepsonthefloor): show how this can be used for rbac in the future! -ANOTHER_ROLE=$(get_id keystone role-create --name=anotherrole) - - -# Add Roles to Users in Tenants -keystone user-role-add --user_id $ADMIN_USER --role_id $ADMIN_ROLE --tenant_id $ADMIN_TENANT -keystone user-role-add --user_id $ADMIN_USER --role_id $ADMIN_ROLE --tenant_id $DEMO_TENANT -keystone user-role-add --user_id $DEMO_USER --role_id $ANOTHER_ROLE --tenant_id $DEMO_TENANT - -# TODO(termie): these two might be dubious -keystone user-role-add --user_id $ADMIN_USER --role_id $KEYSTONEADMIN_ROLE --tenant_id $ADMIN_TENANT -keystone user-role-add --user_id $ADMIN_USER --role_id $KEYSTONESERVICE_ROLE --tenant_id $ADMIN_TENANT - - -# The Member role is used by Horizon and Swift so we need to keep it: -MEMBER_ROLE=$(get_id keystone role-create --name=Member) -keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $DEMO_TENANT -keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $INVIS_TENANT - # The ResellerAdmin role is used by Nova and Ceilometer so we need to keep it. # The admin role in swift allows a user to act as an admin for their tenant, # but ResellerAdmin is needed for a user to act as any tenant. The name of this @@ -96,20 +53,6 @@ RESELLER_ROLE=$(get_id keystone role-create --name=ResellerAdmin) # Services # -------- -# Keystone -if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - KEYSTONE_SERVICE=$(get_id keystone service-create \ - --name=keystone \ - --type=identity \ - --description="Keystone Identity Service") - keystone endpoint-create \ - --region RegionOne \ - --service_id $KEYSTONE_SERVICE \ - --publicurl "http://$SERVICE_HOST:\$(public_port)s/v2.0" \ - --adminurl "http://$SERVICE_HOST:\$(admin_port)s/v2.0" \ - --internalurl "http://$SERVICE_HOST:\$(public_port)s/v2.0" -fi - # Nova if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then NOVA_USER=$(get_id keystone user-create \ diff --git a/lib/keystone b/lib/keystone index ae890567..f6a6d667 100644 --- a/lib/keystone +++ b/lib/keystone @@ -15,6 +15,7 @@ # configure_keystone # init_keystone # start_keystone +# create_keystone_accounts # stop_keystone # cleanup_keystone @@ -45,7 +46,6 @@ KEYSTONE_CATALOG=$KEYSTONE_CONF_DIR/default_catalog.templates KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-PKI} # Set Keystone interface configuration -KEYSTONE_API_PORT=${KEYSTONE_API_PORT:-5000} KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST} KEYSTONE_AUTH_PORT=${KEYSTONE_AUTH_PORT:-35357} KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-http} @@ -144,6 +144,100 @@ function configure_keystone() { } +# create_keystone_accounts() - Sets up common required keystone accounts + +# Tenant User Roles +# ------------------------------------------------------------------ +# service -- -- +# -- -- Member +# admin admin admin +# demo admin admin +# demo demo Member, anotherrole +# invisible_to_admin demo Member + +# Migrated from keystone_data.sh +create_keystone_accounts() { + + # admin + ADMIN_TENANT=$(keystone tenant-create \ + --name admin \ + | grep " id " | get_field 2) + ADMIN_USER=$(keystone user-create \ + --name admin \ + --pass "$ADMIN_PASSWORD" \ + --email admin@example.com \ + | grep " id " | get_field 2) + ADMIN_ROLE=$(keystone role-create \ + --name admin \ + | grep " id " | get_field 2) + keystone user-role-add \ + --user_id $ADMIN_USER \ + --role_id $ADMIN_ROLE \ + --tenant_id $ADMIN_TENANT + + # service + SERVICE_TENANT=$(keystone tenant-create \ + --name $SERVICE_TENANT_NAME \ + | grep " id " | get_field 2) + + # The Member role is used by Horizon and Swift so we need to keep it: + MEMBER_ROLE=$(keystone role-create --name=Member | grep " id " | get_field 2) + # ANOTHER_ROLE demonstrates that an arbitrary role may be created and used + # TODO(sleepsonthefloor): show how this can be used for rbac in the future! + ANOTHER_ROLE=$(keystone role-create --name=anotherrole | grep " id " | get_field 2) + + # invisible tenant - admin can't see this one + INVIS_TENANT=$(keystone tenant-create --name=invisible_to_admin | grep " id " | get_field 2) + + # demo + DEMO_TENANT=$(keystone tenant-create \ + --name=demo \ + | grep " id " | get_field 2) + DEMO_USER=$(keystone user-create \ + --name demo \ + --pass "$ADMIN_PASSWORD" \ + --email demo@example.com \ + | grep " id " | get_field 2) + keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $DEMO_TENANT + keystone user-role-add --user_id $ADMIN_USER --role_id $ADMIN_ROLE --tenant_id $DEMO_TENANT + keystone user-role-add --user_id $DEMO_USER --role_id $ANOTHER_ROLE --tenant_id $DEMO_TENANT + keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $INVIS_TENANT + + # Keystone + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + KEYSTONE_SERVICE=$(keystone service-create \ + --name keystone \ + --type identity \ + --description "Keystone Identity Service" \ + | grep " id " | get_field 2) + keystone endpoint-create \ + --region RegionOne \ + --service_id $KEYSTONE_SERVICE \ + --publicurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:\$(public_port)s/v2.0" \ + --adminurl "$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:\$(admin_port)s/v2.0" \ + --internalurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:\$(public_port)s/v2.0" + fi + + # TODO(dtroyer): This is part of a series of changes...remove these when + # complete if they are really unused +# KEYSTONEADMIN_ROLE=$(keystone role-create \ +# --name KeystoneAdmin \ +# | grep " id " | get_field 2) +# KEYSTONESERVICE_ROLE=$(keystone role-create \ +# --name KeystoneServiceAdmin \ +# | grep " id " | get_field 2) + + # TODO(termie): these two might be dubious +# keystone user-role-add \ +# --user_id $ADMIN_USER \ +# --role_id $KEYSTONEADMIN_ROLE \ +# --tenant_id $ADMIN_TENANT +# keystone user-role-add \ +# --user_id $ADMIN_USER \ +# --role_id $KEYSTONESERVICE_ROLE \ +# --tenant_id $ADMIN_TENANT +} + # init_keystone() - Initialize databases, etc. function init_keystone() { # (Re)create keystone database @@ -176,6 +270,11 @@ function install_keystone() { function start_keystone() { # Start Keystone in a screen window screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug" + echo "Waiting for keystone to start..." + if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= curl -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ >/dev/null; do sleep 1; done"; then + echo "keystone did not start" + exit 1 + fi } # stop_keystone() - Stop running processes diff --git a/stack.sh b/stack.sh index 8e8c5199..5ab0f8e7 100755 --- a/stack.sh +++ b/stack.sh @@ -953,15 +953,16 @@ if is_service_enabled key; then configure_keystone init_keystone start_keystone - echo "Waiting for keystone to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= curl -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/ >/dev/null; do sleep 1; done"; then - echo "keystone did not start" - exit 1 - fi - # ``keystone_data.sh`` creates services, admin and demo users, and roles. + # Set up a temporary admin URI for Keystone SERVICE_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0 + # Do the keystone-specific bits from keystone_data.sh + export OS_SERVICE_TOKEN=$SERVICE_TOKEN + export OS_SERVICE_ENDPOINT=$SERVICE_ENDPOINT + create_keystone_accounts + + # ``keystone_data.sh`` creates services, admin and demo users, and roles. ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \ SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \ S3_SERVICE_PORT=$S3_SERVICE_PORT KEYSTONE_CATALOG_BACKEND=$KEYSTONE_CATALOG_BACKEND \ @@ -974,6 +975,7 @@ if is_service_enabled key; then export OS_TENANT_NAME=admin export OS_USERNAME=admin export OS_PASSWORD=$ADMIN_PASSWORD + unset OS_SERVICE_TOKEN OS_SERVICE_ENDPOINT fi @@ -1750,7 +1752,7 @@ fi # If Keystone is present you can point ``nova`` cli to this server if is_service_enabled key; then - echo "Keystone is serving at $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/" + echo "Keystone is serving at $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/" echo "Examples on using novaclient command line is in exercise.sh" echo "The default users are: admin and demo" echo "The password: $ADMIN_PASSWORD" From 2ed63f4f8ede2a5819eb76f109a947f6bab24d0d Mon Sep 17 00:00:00 2001 From: Martin Vidner Date: Tue, 4 Dec 2012 10:33:49 +0100 Subject: [PATCH 050/207] Fix the default for APACHE_GROUP It should be the group of the effective apache user. For example, on openSUSE, we use wwwrun:www for apache and $USER:users for users. Change-Id: I8e12a8d90d45cfd18e67a41cf5462216ae404733 --- lib/horizon | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/horizon b/lib/horizon index 6173042f..1f68d795 100644 --- a/lib/horizon +++ b/lib/horizon @@ -29,10 +29,10 @@ set +o xtrace # Set up default directories HORIZON_DIR=$DEST/horizon -# Allow overriding the default Apache user and group, default both to -# current user. +# Allow overriding the default Apache user and group, default to +# current user and his default group. APACHE_USER=${APACHE_USER:-$USER} -APACHE_GROUP=${APACHE_GROUP:-$APACHE_USER} +APACHE_GROUP=${APACHE_GROUP:-$(id -gn $APACHE_USER)} # Entry Points From c18b96515279064c85cb7a71939d9e9de961d905 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Tue, 4 Dec 2012 12:36:34 +0100 Subject: [PATCH 051/207] Add is_ubuntu function This replaces all of the [[ "$os_PACKAGE" = "deb" ]] tests, except when those tests are before straight calls to dpkg. Change-Id: I8a3ebf1b1bc5a55d736f9258d5ba1d24dabf04ea --- functions | 47 +++++++++++++++++++-------------------------- lib/cinder | 2 +- lib/databases/mysql | 4 ++-- lib/horizon | 6 +++--- lib/nova | 8 ++++---- lib/swift | 4 ++-- stack.sh | 4 ++-- tools/info.sh | 2 +- unstack.sh | 4 ++-- 9 files changed, 37 insertions(+), 44 deletions(-) diff --git a/functions b/functions index 794e4747..0911557f 100644 --- a/functions +++ b/functions @@ -341,6 +341,19 @@ function GetDistro() { } +# Determine if current distribution is an Ubuntu-based distribution. +# It will also detect non-Ubuntu but Debian-based distros; this is not an issue +# since Debian and Ubuntu should be compatible. +# is_ubuntu +function is_ubuntu { + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + + [ "$os_PACKAGE" = "deb" ] +} + + # Determine if current distribution is a SUSE-based distribution # (openSUSE, SLE). # is_suse @@ -580,11 +593,7 @@ function disable_negated_services() { # Distro-agnostic package installer # install_package package [package ...] function install_package() { - if [[ -z "$os_PACKAGE" ]]; then - GetOSVersion - fi - - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then [[ "$NO_UPDATE_REPOS" = "True" ]] || apt_get update NO_UPDATE_REPOS=True @@ -609,6 +618,7 @@ function is_package_installed() { if [[ -z "$os_PACKAGE" ]]; then GetOSVersion fi + if [[ "$os_PACKAGE" = "deb" ]]; then dpkg -l "$@" > /dev/null return $? @@ -661,10 +671,7 @@ function pip_install { # Service wrapper to restart services # restart_service service-name function restart_service() { - if [[ -z "$os_PACKAGE" ]]; then - GetOSVersion - fi - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then sudo /usr/sbin/service $1 restart else sudo /sbin/service $1 restart @@ -746,10 +753,7 @@ function setup_develop() { # Service wrapper to start services # start_service service-name function start_service() { - if [[ -z "$os_PACKAGE" ]]; then - GetOSVersion - fi - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then sudo /usr/sbin/service $1 start else sudo /sbin/service $1 start @@ -760,10 +764,7 @@ function start_service() { # Service wrapper to stop services # stop_service service-name function stop_service() { - if [[ -z "$os_PACKAGE" ]]; then - GetOSVersion - fi - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then sudo /usr/sbin/service $1 stop else sudo /sbin/service $1 stop @@ -1031,11 +1032,7 @@ function add_user_to_group() { function get_rootwrap_location() { local module=$1 - if [[ -z "$os_PACKAGE" ]]; then - GetOSVersion - fi - - if [[ "$os_PACKAGE" = "deb" ]] || is_suse; then + if is_ubuntu || is_suse; then echo "/usr/local/bin/$module-rootwrap" else echo "/usr/bin/$module-rootwrap" @@ -1045,11 +1042,7 @@ function get_rootwrap_location() { # Get the path to the pip command. # get_pip_command function get_pip_command() { - if [[ -z "$os_PACKAGE" ]]; then - GetOSVersion - fi - - if [[ "$os_PACKAGE" = "deb" ]] || is_suse; then + if is_ubuntu || is_suse; then echo "/usr/bin/pip" else echo "/usr/bin/pip-python" diff --git a/lib/cinder b/lib/cinder index 1aa34cd2..ce160bf0 100644 --- a/lib/cinder +++ b/lib/cinder @@ -237,7 +237,7 @@ function _configure_tgt_for_config_d() { # start_cinder() - Start running processes, including screen function start_cinder() { if is_service_enabled c-vol; then - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then _configure_tgt_for_config_d if [[ ! -f /etc/tgt/conf.d/cinder.conf ]]; then echo "include $CINDER_STATE_PATH/volumes/*" | sudo tee /etc/tgt/conf.d/cinder.conf diff --git a/lib/databases/mysql b/lib/databases/mysql index eb84f2ca..60ea143f 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -20,7 +20,7 @@ function recreate_database_mysql { function configure_database_mysql { echo_summary "Configuring and starting MySQL" - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then MY_CONF=/etc/mysql/my.cnf MYSQL=mysql else @@ -61,7 +61,7 @@ default-storage-engine = InnoDB" $MY_CONF } function install_database_mysql { - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then # Seed configuration with mysql password so that apt-get install doesn't # prompt us for a password upon install. cat < natty ]]; then cgline="none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0" sudo mkdir -p /cgroup @@ -228,7 +228,7 @@ cgroup_device_acl = [ EOF fi - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then LIBVIRT_DAEMON=libvirt-bin else # http://wiki.libvirt.org/page/SSHPolicyKitSetup @@ -393,7 +393,7 @@ function install_novaclient() { # install_nova() - Collect source and prepare function install_nova() { if is_service_enabled n-cpu; then - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then LIBVIRT_PKG_NAME=libvirt-bin else LIBVIRT_PKG_NAME=libvirt @@ -403,7 +403,7 @@ function install_nova() { # splitting a system into many smaller parts. LXC uses cgroups and chroot # to simulate multiple systems. if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then if [[ "$DISTRO" > natty ]]; then install_package cgroup-lite fi diff --git a/lib/swift b/lib/swift index 366c467b..140e5e9b 100644 --- a/lib/swift +++ b/lib/swift @@ -159,7 +159,7 @@ function configure_swift() { s,%SWIFT_DATA_DIR%,$SWIFT_DATA_DIR,; " $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf # rsyncd.conf just prepared for 4 nodes - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync else sudo sed -i '/disable *= *yes/ { s/yes/no/ }' /etc/xinetd.d/rsync @@ -341,7 +341,7 @@ function start_swift() { # (re)start rsyslog restart_service rsyslog # Start rsync - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then sudo /etc/init.d/rsync restart || : else sudo systemctl start xinetd.service diff --git a/stack.sh b/stack.sh index 55eafa82..94283563 100755 --- a/stack.sh +++ b/stack.sh @@ -677,7 +677,7 @@ set -o xtrace # Install package requirements echo_summary "Installing package prerequisites" -if [[ "$os_PACKAGE" = "deb" ]]; then +if is_ubuntu; then install_package $(get_packages $FILES/apts) elif is_suse; then install_package $(get_packages $FILES/rpms-suse) @@ -726,7 +726,7 @@ if is_service_enabled q-agt; then if is_quantum_ovs_base_plugin "$Q_PLUGIN"; then # Install deps # FIXME add to ``files/apts/quantum``, but don't install if not needed! - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then kernel_version=`cat /proc/version | cut -d " " -f3` install_package make fakeroot dkms openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version else diff --git a/tools/info.sh b/tools/info.sh index a872d59d..583a9949 100755 --- a/tools/info.sh +++ b/tools/info.sh @@ -88,7 +88,7 @@ done # - We are going to check packages only for the services needed. # - We are parsing the packages files and detecting metadatas. -if [[ "$os_PACKAGE" = "deb" ]]; then +if is_ubuntu; then PKG_DIR=$FILES/apts else PKG_DIR=$FILES/rpms diff --git a/unstack.sh b/unstack.sh index 20ba17b6..81ce088a 100755 --- a/unstack.sh +++ b/unstack.sh @@ -65,7 +65,7 @@ if is_service_enabled cinder; then # If tgt driver isn't running this won't work obviously # So check the response and restart if need be echo "tgtd seems to be in a bad state, restarting..." - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then restart_service tgt else restart_service tgtd @@ -85,7 +85,7 @@ if is_service_enabled cinder; then sudo rm -rf $CINDER_STATE_PATH/volumes/* fi - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then stop_service tgt else stop_service tgtd From ec903059d5ab4c387f49f429976da9c147fab808 Mon Sep 17 00:00:00 2001 From: Mark McClain Date: Tue, 4 Dec 2012 10:32:41 -0500 Subject: [PATCH 052/207] Revert "Set the rabbit_durable_queues to match local consumers" This reverts commit 71cf53a9f60176419732f3ecbbce11c75190c059. The attempt to set the queue durability for Glance notifications always sets the queues to durable. We are reverting this until a refined approach is available. Change-Id: I469e5149d21e3fcdd409da8114d5ccef1ff1243c --- lib/glance | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/lib/glance b/lib/glance index 60026d54..b02a4b63 100644 --- a/lib/glance +++ b/lib/glance @@ -70,13 +70,6 @@ function configure_glanceclient() { setup_develop $GLANCECLIENT_DIR } -# durable_glance_queues() - Determine if RabbitMQ queues are durable or not -function durable_glance_queues() { - test `rabbitmqctl list_queues name durable | grep true | wc -l` -gt 0 && return 0 - test `rabbitmqctl list_exchanges name durable | grep true | wc -l` -gt 0 && return 0 - return 1 -} - # configure_glance() - Set config files, create data dirs, etc function configure_glance() { setup_develop $GLANCE_DIR @@ -127,12 +120,6 @@ function configure_glance() { iniset $GLANCE_API_CONF DEFAULT notifier_strategy rabbit iniset $GLANCE_API_CONF DEFAULT rabbit_host $RABBIT_HOST iniset $GLANCE_API_CONF DEFAULT rabbit_password $RABBIT_PASSWORD - if [[ durable_glance_queues -eq 0 ]]; then - # This gets around https://bugs.launchpad.net/glance/+bug/1074132 - # that results in a g-api server becoming unresponsive during - # startup... - iniset $GLANCE_API_CONF DEFAULT rabbit_durable_queues True - fi fi if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then iniset $GLANCE_API_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/api From d75bc1f3c19fb8eb2c79434e2fc9307dc83bff26 Mon Sep 17 00:00:00 2001 From: jiajun xu Date: Tue, 4 Dec 2012 08:51:35 +0800 Subject: [PATCH 053/207] use pip_install to install packages for tempest Change-Id: Ia892e0bc3c8c16119686daf26595c2b82d7e27b0 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 871e9e73..4bfdc50a 100644 --- a/lib/tempest +++ b/lib/tempest @@ -52,7 +52,7 @@ function install_tempest() { # Tempest doesn't satisfy its dependencies on its own, so # install them here instead. - sudo pip install -r $TEMPEST_DIR/tools/pip-requires + pip_install -r $TEMPEST_DIR/tools/pip-requires } From 3ab927c9b04e372b66196d9b253fe32cbdf75a65 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Wed, 5 Dec 2012 16:15:26 +0100 Subject: [PATCH 054/207] Use getent to find if a group exists, not grep Change-Id: Ia181f41ae122dc5e5d3d33633dfd1d0e7420f8a9 --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 6445a073..29238d43 100644 --- a/lib/nova +++ b/lib/nova @@ -232,7 +232,7 @@ EOF LIBVIRT_DAEMON=libvirt-bin else # http://wiki.libvirt.org/page/SSHPolicyKitSetup - if ! grep ^libvirtd: /etc/group >/dev/null; then + if ! getent group libvirtd >/dev/null; then sudo groupadd libvirtd fi sudo bash -c 'cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla From 818a9fa327414ec6123d572755cecb12ca483922 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Wed, 5 Dec 2012 17:39:33 +0100 Subject: [PATCH 055/207] Install qemu on openSUSE qemu is used as a fallback when kvm cannot be used. On Debian and Fedora, the binaries are there when kvm is installed, but this is not the case on openSUSE. Change-Id: I96592d105428acde9636608002109c166ac7a56a --- files/rpms-suse/nova | 2 ++ 1 file changed, 2 insertions(+) diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova index 0c036786..03067162 100644 --- a/files/rpms-suse/nova +++ b/files/rpms-suse/nova @@ -7,6 +7,8 @@ iptables iputils kpartx kvm +# qemu as fallback if kvm cannot be used +qemu libvirt # NOPRIME libvirt-python libxml2-python From 8bc21f6476304ca319489612867109d43d44cb6f Mon Sep 17 00:00:00 2001 From: Nachi Ueno Date: Mon, 19 Nov 2012 22:04:28 -0800 Subject: [PATCH 056/207] move setup_quantum to stack.sh sudo is only allowed in stack.sh on the CI, so move setup_quantum code to the stack.sh. also fixes quantum debug command setup for linuxbridge and ryu Change-Id: I11bc0aa242a690e25acc088b3e9f483ceab38f26 --- exercises/boot_from_volume.sh | 5 ----- exercises/euca.sh | 5 ----- exercises/floating_ips.sh | 5 ----- exercises/quantum-adv-test.sh | 2 -- exercises/volumes.sh | 5 ----- lib/quantum | 17 ++++++++++++++--- openrc | 3 --- stack.sh | 35 +++++++++++++++++++---------------- unstack.sh | 6 ++++++ 9 files changed, 39 insertions(+), 44 deletions(-) diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index 4c2f279e..5ebdecc7 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -35,7 +35,6 @@ source $TOP_DIR/openrc # Import quantum functions if needed if is_service_enabled quantum; then source $TOP_DIR/lib/quantum - setup_quantum fi # Import exercise configuration @@ -174,10 +173,6 @@ nova floating-ip-delete $FLOATING_IP || \ # Delete a secgroup nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP" -if is_service_enabled quantum; then - teardown_quantum -fi - set +o xtrace echo "*********************************************************************" echo "SUCCESS: End DevStack Exercise: $0" diff --git a/exercises/euca.sh b/exercises/euca.sh index c307a064..67da1bee 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -36,7 +36,6 @@ source $TOP_DIR/eucarc # Import quantum functions if needed if is_service_enabled quantum; then source $TOP_DIR/lib/quantum - setup_quantum fi # Import exercise configuration @@ -175,10 +174,6 @@ fi # Delete group euca-delete-group $SECGROUP || die "Failure deleting security group $SECGROUP" -if is_service_enabled quantum; then - teardown_quantum -fi - set +o xtrace echo "*********************************************************************" echo "SUCCESS: End DevStack Exercise: $0" diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index ae5691f4..8b18e6f4 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -34,7 +34,6 @@ source $TOP_DIR/openrc # Import quantum functions if needed if is_service_enabled quantum; then source $TOP_DIR/lib/quantum - setup_quantum fi # Import exercise configuration @@ -202,10 +201,6 @@ fi # Delete a secgroup nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP" -if is_service_enabled quantum; then - teardown_quantum -fi - set +o xtrace echo "*********************************************************************" echo "SUCCESS: End DevStack Exercise: $0" diff --git a/exercises/quantum-adv-test.sh b/exercises/quantum-adv-test.sh index 2ee82ff2..493e2239 100755 --- a/exercises/quantum-adv-test.sh +++ b/exercises/quantum-adv-test.sh @@ -58,7 +58,6 @@ is_service_enabled quantum && is_service_enabled q-agt && is_service_enabled q-d # Import quantum fucntions source $TOP_DIR/lib/quantum -setup_quantum # Import exercise configuration source $TOP_DIR/exerciserc @@ -475,7 +474,6 @@ main() { } -teardown_quantum #------------------------------------------------------------------------------- # Kick off script. #------------------------------------------------------------------------------- diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 3432763f..42f9cb4e 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -33,7 +33,6 @@ source $TOP_DIR/openrc # Import quantum functions if needed if is_service_enabled quantum; then source $TOP_DIR/lib/quantum - setup_quantum fi # Import exercise configuration @@ -212,10 +211,6 @@ fi # Delete a secgroup nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP" -if is_service_enabled quantum; then - teardown_quantum -fi - set +o xtrace echo "*********************************************************************" echo "SUCCESS: End DevStack Exercise: $0" diff --git a/lib/quantum b/lib/quantum index 373d5217..14a3a4ad 100644 --- a/lib/quantum +++ b/lib/quantum @@ -5,9 +5,20 @@ XTRACE=$(set +o | grep xtrace) set +o xtrace +QUANTUM_DIR=$DEST/quantum export QUANTUM_TEST_CONFIG_FILE=${QUANTUM_TEST_CONFIG_FILE:-"/etc/quantum/debug.ini"} QUANTUM_AUTH_CACHE_DIR=${QUANTUM_AUTH_CACHE_DIR:-/var/cache/quantum} +if is_service_enabled quantum; then + Q_CONF_FILE=/etc/quantum/quantum.conf + Q_RR_CONF_FILE=/etc/quantum/rootwrap.conf + if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then + Q_RR_COMMAND="sudo" + else + Q_RR_COMMAND="sudo $QUANTUM_DIR/bin/quantum-rootwrap $Q_RR_CONF_FILE" + fi +fi + # Configures keystone integration for quantum service and agents function quantum_setup_keystone() { local conf_file=$1 @@ -74,7 +85,7 @@ function _get_probe_cmd_prefix() { local from_net="$1" net_id=`_get_net_id $from_net` probe_id=`quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}' | head -n 1` - echo "sudo ip netns exec qprobe-$probe_id" + echo "$Q_RR_COMMAND ip netns exec qprobe-$probe_id" } function delete_probe() { @@ -92,9 +103,9 @@ function _ping_check_quantum() { local check_command="" probe_cmd=`_get_probe_cmd_prefix $from_net` if [[ "$expected" = "True" ]]; then - check_command="while ! $probe_cmd ping -c1 -w1 $ip; do sleep 1; done" + check_command="while ! $probe_cmd ping -w 1 -c 1 $ip; do sleep 1; done" else - check_command="while $probe_cmd ping -c1 -w1 $ip; do sleep 1; done" + check_command="while $probe_cmd ping -w 1 -c 1 $ip; do sleep 1; done" fi if ! timeout $timeout_sec sh -c "$check_command"; then if [[ "$expected" = "True" ]]; then diff --git a/openrc b/openrc index 4b6b9b2b..08ef98be 100644 --- a/openrc +++ b/openrc @@ -72,6 +72,3 @@ export COMPUTE_API_VERSION=${COMPUTE_API_VERSION:-$NOVA_VERSION} # set log level to DEBUG (helps debug issues) # export KEYSTONECLIENT_DEBUG=1 # export NOVACLIENT_DEBUG=1 - -# set quantum debug command -export QUANTUM_TEST_CONFIG_FILE=${QUANTUM_TEST_CONFIG_FILE:-"/etc/quantum/debug.ini"} diff --git a/stack.sh b/stack.sh index 55eafa82..1d1ad636 100755 --- a/stack.sh +++ b/stack.sh @@ -321,7 +321,6 @@ HORIZON_DIR=$DEST/horizon OPENSTACKCLIENT_DIR=$DEST/python-openstackclient NOVNC_DIR=$DEST/noVNC SWIFT3_DIR=$DEST/swift3 -QUANTUM_DIR=$DEST/quantum QUANTUM_CLIENT_DIR=$DEST/python-quantumclient # Default Quantum Plugin @@ -1153,14 +1152,7 @@ if is_service_enabled quantum; then iniset /$Q_PLUGIN_CONF_FILE DATABASE sql_connection $dburl unset dburl - Q_CONF_FILE=/etc/quantum/quantum.conf cp $QUANTUM_DIR/etc/quantum.conf $Q_CONF_FILE - Q_RR_CONF_FILE=/etc/quantum/rootwrap.conf - if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then - Q_RR_COMMAND="sudo" - else - Q_RR_COMMAND="sudo $QUANTUM_DIR/bin/quantum-rootwrap $Q_RR_CONF_FILE" - fi cp -p $QUANTUM_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE # Copy over the config and filter bits @@ -1400,13 +1392,22 @@ if is_service_enabled quantum; then iniset $Q_CONF_FILE DEFAULT rabbit_password $RABBIT_PASSWORD fi if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then - Q_DEBUG_CONF_FILE=/etc/quantum/debug.ini - cp $QUANTUM_DIR/etc/l3_agent.ini $Q_DEBUG_CONF_FILE - iniset $Q_L3_CONF_FILE DEFAULT verbose False - iniset $Q_L3_CONF_FILE DEFAULT debug False - iniset $Q_L3_CONF_FILE DEFAULT metadata_ip $Q_META_DATA_IP - iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE - iniset $Q_L3_CONF_FILE DEFAULT root_helper "sudo" + cp $QUANTUM_DIR/etc/l3_agent.ini $QUANTUM_TEST_CONFIG_FILE + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT verbose False + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT debug False + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE + quantum_setup_keystone $QUANTUM_TEST_CONFIG_FILE DEFAULT set_auth_url + if [[ "$Q_PLUGIN" == "openvswitch" ]]; then + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE + elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge '' + elif [[ "$Q_PLUGIN" = "ryu" ]]; then + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT interface_driver quantum.agent.linux.interface.RyuInterfaceDriver + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT + fi fi fi @@ -1633,7 +1634,9 @@ if is_service_enabled q-svc; then iniset $Q_L3_CONF_FILE DEFAULT router_id $ROUTER_ID fi fi - + if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then + setup_quantum + fi elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then # Create a small network $NOVA_BIN_DIR/nova-manage network create "$PRIVATE_NETWORK_NAME" $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS diff --git a/unstack.sh b/unstack.sh index 20ba17b6..a01ed6d1 100755 --- a/unstack.sh +++ b/unstack.sh @@ -37,6 +37,12 @@ if [[ "$1" == "--all" ]]; then UNSTACK_ALL=${UNSTACK_ALL:-1} fi +if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then + source $TOP_DIR/openrc + source $TOP_DIR/lib/quantum + teardown_quantum +fi + # Shut down devstack's screen to get the bulk of OpenStack services in one shot SCREEN=$(which screen) if [[ -n "$SCREEN" ]]; then From 2aa35174b0f99b1b7ea95af474ae1807542b74c6 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Wed, 5 Dec 2012 20:03:40 +0100 Subject: [PATCH 057/207] Move tempest config to lib/tempest * Using iniset * Config based on the tempest.config.sample * tools/configure_tempest.sh is pending for removal Change-Id: Ia42e98ba4b640b89bcd2674008090909d88a2efb --- lib/tempest | 208 ++++++++++++++++++++++--- stack.sh | 4 + tools/configure_tempest.sh | 308 +------------------------------------ 3 files changed, 194 insertions(+), 326 deletions(-) diff --git a/lib/tempest b/lib/tempest index 4bfdc50a..606f05ec 100644 --- a/lib/tempest +++ b/lib/tempest @@ -2,31 +2,49 @@ # Dependencies: # ``functions`` file -# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined +# ``lib/nova`` service is runing # - +# - DEST +# - ADMIN_PASSWORD +# - OS_USERNAME +# - DEFAULT_IMAGE_NAME +# - S3_SERVICE_PORT +# - SERVICE_HOST +# - BASE_SQL_CONN ``lib/database`` declares +# Optional Dependencies: +# IDENTITY_* +# ALT_* (similar vars exists in keystone_data.sh) +# IMAGE_* +# LIVE_MIGRATION_AVAILABLE +# DEFAULT_INSTANCE_TYPE +# DEFAULT_INSTANCE_USER +# USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION # ``stack.sh`` calls the entry points in this order: # -# install_XXXX -# configure_XXXX -# init_XXXX -# start_XXXX -# stop_XXXX -# cleanup_XXXX +# install_tempest +# configure_tempest +# init_tempest +## start_tempest +## stop_tempest +## cleanup_tempest # Save trace setting XTRACE=$(set +o | grep xtrace) set +o xtrace - # Defaults # -------- # # Set up default directories +NOVA_SOURCE_DIR=$DEST/nova TEMPEST_DIR=$DEST/tempest -TEMPEST_CONF_DIR=$DEST/tempest/etc +TEMPEST_CONF_DIR=$TEMPEST_DIR/etc +TEMPEST_CONF=$TEMPEST_CONF_DIR/tempest.conf + +BUILD_INTERVAL=3 +BUILD_TIMEOUT=400 # Entry Points # ------------ @@ -34,15 +52,168 @@ TEMPEST_CONF_DIR=$DEST/tempest/etc # configure_tempest() - Set config files, create data dirs, etc function configure_tempest() { + local IMAGE_LINES + local IMAGES + local NUM_IMAGES + local IMAGE_UUID + local IMAGE_UUID_ALT + local errexit + + #TODO(afazekas): # sudo python setup.py deploy - # iniset $tempest_CONF ... - # This function intentionally left blank - # - # TODO(sdague) actually move the guts of configure tempest - # into this function - cd tools - ./configure_tempest.sh - cd .. + + # This function exits on an error so that errors don't compound and you see + # only the first error that occured. + errexit=$(set +o | grep errexit) + set -o errexit + + #Save IFS + ifs=$IFS + + # Glance should already contain images to be used in tempest + # testing. Here we simply look for images stored in Glance + # and set the appropriate variables for use in the tempest config + # We ignore ramdisk and kernel images, look for the default image + # DEFAULT_IMAGE_NAME. If not found, we set the IMAGE_UUID to the + # first image returned and set IMAGE_UUID_ALT to the second, + # if there is more than one returned... + # ... Also ensure we only take active images, so we don't get snapshots in process + IMAGE_LINES=`glance image-list` + IFS=$'\n\r' + IMAGES="" + for line in $IMAGE_LINES; do + if [ -z $DEFAULT_IMAGE_NAME ]; then + IMAGES="$IMAGES `echo $line | grep -v "^\(ID\|+--\)" | grep -v "\(aki\|ari\)" | grep 'active' | cut -d' ' -f2`" + else + IMAGES="$IMAGES `echo $line | grep -v "^\(ID\|+--\)" | grep -v "\(aki\|ari\)" | grep 'active' | grep "$DEFAULT_IMAGE_NAME" | cut -d' ' -f2`" + fi + done + # Create array of image UUIDs... + IFS=" " + IMAGES=($IMAGES) + NUM_IMAGES=${#IMAGES[*]} + echo "Found $NUM_IMAGES images" + if [[ $NUM_IMAGES -eq 0 ]]; then + echo "Found no valid images to use!" + exit 1 + fi + IMAGE_UUID=${IMAGES[0]} + IMAGE_UUID_ALT=$IMAGE_UUID + if [[ $NUM_IMAGES -gt 1 ]]; then + IMAGE_UUID_ALT=${IMAGES[1]} + fi + + # Create tempest.conf from tempest.conf.sample + # copy every time, because the image UUIDS are going to change + cp $TEMPEST_CONF.sample $TEMPEST_CONF + + IDENTITY_USE_SSL=${IDENTITY_USE_SSL:-False} + IDENTITY_HOST=${IDENTITY_HOST:-127.0.0.1} + IDENTITY_PORT=${IDENTITY_PORT:-5000} + # TODO(jaypipes): This is dumb and needs to be removed + # from the Tempest configuration file entirely... + IDENTITY_PATH=${IDENTITY_PATH:-tokens} + + PASSWORD=${ADMIN_PASSWORD:-secrete} + + # See files/keystone_data.sh where alt_demo user + # and tenant are set up... + ALT_USERNAME=${ALT_USERNAME:-alt_demo} + ALT_TENANT_NAME=${ALT_TENANT_NAME:-alt_demo} + + # Check Nova for existing flavors and, if set, look for the + # DEFAULT_INSTANCE_TYPE and use that. Otherwise, just use the first flavor. + FLAVOR_LINES=`nova flavor-list` + IFS="$(echo -e "\n\r")" + FLAVORS="" + for line in $FLAVOR_LINES; do + if [ -z $DEFAULT_INSTANCE_TYPE ]; then + FLAVORS="$FLAVORS `echo $line | grep -v "^\(|\s*ID\|+--\)" | cut -d' ' -f2`" + else + FLAVORS="$FLAVORS `echo $line | grep -v "^\(|\s*ID\|+--\)" | grep "$DEFAULT_INSTANCE_TYPE" | cut -d' ' -f2`" + fi + done + + IFS=" " + FLAVORS=($FLAVORS) + NUM_FLAVORS=${#FLAVORS[*]} + echo "Found $NUM_FLAVORS flavors" + if [[ $NUM_FLAVORS -eq 0 ]]; then + echo "Found no valid flavors to use!" + exit 1 + fi + FLAVOR_REF=${FLAVORS[0]} + FLAVOR_REF_ALT=$FLAVOR_REF + if [[ $NUM_FLAVORS -gt 1 ]]; then + FLAVOR_REF_ALT=${FLAVORS[1]} + fi + + # Timeouts + iniset $TEMPEST_CONF compute build_timeout $BUILD_TIMEOUT + iniset $TEMPEST_CONF volume build_timeout $BUILD_TIMEOUT + iniset $TEMPEST_CONF boto build_timeout $BUILD_TIMEOUT + iniset $TEMPEST_CONF compute build_interval $BUILD_INTERVAL + iniset $TEMPEST_CONF volume build_interval $BUILD_INTERVAL + iniset $TEMPEST_CONF boto build_interval $BUILD_INTERVAL + iniset $TEMPEST_CONF boto http_socket_timeout 5 + + iniset $TEMPEST_CONF identity use_ssl $IDENTITY_USE_SSL + iniset $TEMPEST_CONF identity host $IDENTITY_HOST + iniset $TEMPEST_CONF identity port $IDENTITY_PORT + iniset $TEMPEST_CONF identity path $IDENTITY_PATH + + iniset $TEMPEST_CONF compute password "$PASSWORD" + iniset $TEMPEST_CONF compute alt_username $ALT_USERNAME + iniset $TEMPEST_CONF compute alt_password "$PASSWORD" + iniset $TEMPEST_CONF compute alt_tenant_name $ALT_TENANT_NAME + iniset $TEMPEST_CONF compute resize_available False + iniset $TEMPEST_CONF compute change_password_available False + iniset $TEMPEST_CONF compute compute_log_level ERROR + #Skip until #1074039 is fixed + iniset $TEMPEST_CONF compute run_ssh False + iniset $TEMPEST_CONF compute ssh_user ${DEFAULT_INSTANCE_USER:-$OS_USERNAME} + iniset $TEMPEST_CONF compute network_for_ssh private + iniset $TEMPEST_CONF compute ip_version_for_ssh 4 + iniset $TEMPEST_CONF compute ssh_timeout 4 + iniset $TEMPEST_CONF compute image_ref $IMAGE_UUID + iniset $TEMPEST_CONF compute image_ref_alt $IMAGE_UUID_ALT + iniset $TEMPEST_CONF compute flavor_ref $FLAVOR_REF + iniset $TEMPEST_CONF compute flavor_ref_alt $FLAVOR_REF_ALT + iniset $TEMPEST_CONF compute source_dir $NOVA_SOURCE_DIR + iniset $TEMPEST_CONF compute live_migration_available ${LIVE_MIGRATION_AVAILABLE:-False} + iniset $TEMPEST_CONF compute use_block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} + # Inherited behavior, might be wrong + iniset $TEMPEST_CONF compute bin_dir $NOVA_BIN_DIR + # TODO(jaypipes): Create the key file here... right now, no whitebox + # tests actually use a key. + iniset $TEMPEST_CONF compute path_to_private_key $TEMPEST_DIR/id_rsa + iniset $TEMPEST_CONF compute db_uri $BASE_SQL_CONN/nova + + # image + iniset $TEMPEST_CONF image host ${IMAGE_HOST:-127.0.0.1} + iniset $TEMPEST_CONF image port ${IMAGE_PORT:-9292} + iniset $TEMPEST_CONF image password "$PASSWORD" + + # identity-admin + iniset $TEMPEST_CONF "identity-admin" password "$PASSWORD" + + # compute admin + iniset $TEMPEST_CONF "compute-admin" password "$PASSWORD" + + # network + iniset $TEMPEST_CONF network api_version 2.0 + + #boto + iniset $TEMPEST_CONF boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud" + iniset $TEMPEST_CONF boto s3_url "http://$SERVICE_HOST:${S3_SERVICE_PORT:-3333}" + + echo "Created tempest configuration file:" + cat $TEMPEST_CONF + + # Restore IFS + IFS=$ifs + #Restore errexit + $errexit } @@ -55,6 +226,5 @@ function install_tempest() { pip_install -r $TEMPEST_DIR/tools/pip-requires } - # Restore xtrace $XTRACE diff --git a/stack.sh b/stack.sh index 94283563..c4f26f42 100755 --- a/stack.sh +++ b/stack.sh @@ -1713,7 +1713,11 @@ fi # Configure Tempest last to ensure that the runtime configuration of # the various OpenStack services can be queried. if is_service_enabled tempest; then + echo_summary "Configuring Tempest" configure_tempest + echo '**************************************************' + echo_summary "Finished Configuring Tempest" + echo '**************************************************' fi diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh index 298fa9ba..09241808 100755 --- a/tools/configure_tempest.sh +++ b/tools/configure_tempest.sh @@ -1,309 +1,3 @@ #!/usr/bin/env bash -# -# **configure_tempest.sh** -# Build a tempest configuration file from devstack - -echo "**************************************************" -echo "Configuring Tempest" -echo "**************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occured. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - -function usage { - echo "$0 - Build tempest.conf" - echo "" - echo "Usage: $0" - exit 1 -} - -if [ "$1" = "-h" ]; then - usage -fi - -# Keep track of the current directory -TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $TOOLS_DIR/..; pwd) - -# Import common functions -. $TOP_DIR/functions - -# Abort if localrc is not set -if [ ! -e $TOP_DIR/localrc ]; then - echo "You must have a localrc with necessary basic configuration defined before proceeding." - exit 1 -fi - -# Abort if openrc is not set -if [ ! -e $TOP_DIR/openrc ]; then - echo "You must have an openrc with ALL necessary passwords and credentials defined before proceeding." - exit 1 -fi - -# Source params -source $TOP_DIR/lib/database -source $TOP_DIR/openrc - -# Where Openstack code lives -DEST=${DEST:-/opt/stack} - -NOVA_SOURCE_DIR=$DEST/nova -TEMPEST_DIR=$DEST/tempest -CONFIG_DIR=$TEMPEST_DIR/etc -TEMPEST_CONF=$CONFIG_DIR/tempest.conf - -DATABASE_TYPE=${DATABASE_TYPE:-mysql} -initialize_database_backends - -# Use the GUEST_IP unless an explicit IP is set by ``HOST_IP`` -HOST_IP=${HOST_IP:-$GUEST_IP} -# Use the first IP if HOST_IP still is not set -if [ ! -n "$HOST_IP" ]; then - HOST_IP=`LC_ALL=C /sbin/ifconfig | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'` -fi - -# Glance should already contain images to be used in tempest -# testing. Here we simply look for images stored in Glance -# and set the appropriate variables for use in the tempest config -# We ignore ramdisk and kernel images, look for the default image -# DEFAULT_IMAGE_NAME. If not found, we set the IMAGE_UUID to the -# first image returned and set IMAGE_UUID_ALT to the second, -# if there is more than one returned... -# ... Also ensure we only take active images, so we don't get snapshots in process -IMAGE_LINES=`glance image-list` -IFS="$(echo -e "\n\r")" -IMAGES="" -for line in $IMAGE_LINES; do - if [ -z $DEFAULT_IMAGE_NAME ]; then - IMAGES="$IMAGES `echo $line | grep -v "^\(ID\|+--\)" | grep -v "\(aki\|ari\)" | grep 'active' | cut -d' ' -f2`" - else - IMAGES="$IMAGES `echo $line | grep -v "^\(ID\|+--\)" | grep -v "\(aki\|ari\)" | grep 'active' | grep "$DEFAULT_IMAGE_NAME" | cut -d' ' -f2`" - fi -done -# Create array of image UUIDs... -IFS=" " -IMAGES=($IMAGES) -NUM_IMAGES=${#IMAGES[*]} -echo "Found $NUM_IMAGES images" -if [[ $NUM_IMAGES -eq 0 ]]; then - echo "Found no valid images to use!" - exit 1 -fi -IMAGE_UUID=${IMAGES[0]} -IMAGE_UUID_ALT=$IMAGE_UUID -if [[ $NUM_IMAGES -gt 1 ]]; then - IMAGE_UUID_ALT=${IMAGES[1]} -fi - -# Create tempest.conf from tempest.conf.tpl -# copy every time, because the image UUIDS are going to change -cp $TEMPEST_CONF.tpl $TEMPEST_CONF - -COMPUTE_ADMIN_USERNAME=${ADMIN_USERNAME:-admin} -COMPUTE_ADMIN_PASSWORD=${ADMIN_PASSWORD:-secrete} -COMPUTE_ADMIN_TENANT_NAME=${ADMIN_TENANT:-admin} - -IDENTITY_ADMIN_USERNAME=${ADMIN_USERNAME:-admin} -IDENTITY_ADMIN_PASSWORD=${ADMIN_PASSWORD:-secrete} -IDENTITY_ADMIN_TENANT_NAME=${ADMIN_TENANT:-admin} - -IDENTITY_USE_SSL=${IDENTITY_USE_SSL:-False} -IDENTITY_HOST=${IDENTITY_HOST:-127.0.0.1} -IDENTITY_PORT=${IDENTITY_PORT:-5000} -IDENTITY_API_VERSION="v2.0" # Note: need v for now... -# TODO(jaypipes): This is dumb and needs to be removed -# from the Tempest configuration file entirely... -IDENTITY_PATH=${IDENTITY_PATH:-tokens} -IDENTITY_STRATEGY=${IDENTITY_STRATEGY:-keystone} -IDENTITY_CATALOG_TYPE=identity - -# We use regular, non-admin users in Tempest for the USERNAME -# substitutions and use ADMIN_USERNAME et al for the admin stuff. -# OS_USERNAME et all should be defined in openrc. -OS_USERNAME=${OS_USERNAME:-demo} -OS_TENANT_NAME=${OS_TENANT_NAME:-demo} -OS_PASSWORD=${OS_PASSWORD:-$ADMIN_PASSWORD} - -# See files/keystone_data.sh where alt_demo user -# and tenant are set up... -ALT_USERNAME=${ALT_USERNAME:-alt_demo} -ALT_TENANT_NAME=${ALT_TENANT_NAME:-alt_demo} -ALT_PASSWORD=$OS_PASSWORD - -# Check Nova for existing flavors and, if set, look for the -# DEFAULT_INSTANCE_TYPE and use that. Otherwise, just use the first flavor. -FLAVOR_LINES=`nova flavor-list` -IFS="$(echo -e "\n\r")" -FLAVORS="" -for line in $FLAVOR_LINES; do - if [ -z $DEFAULT_INSTANCE_TYPE ]; then - FLAVORS="$FLAVORS `echo $line | grep -v "^\(|\s*ID\|+--\)" | cut -d' ' -f2`" - else - FLAVORS="$FLAVORS `echo $line | grep -v "^\(|\s*ID\|+--\)" | grep "$DEFAULT_INSTANCE_TYPE" | cut -d' ' -f2`" - fi -done -IFS=" " -FLAVORS=($FLAVORS) -NUM_FLAVORS=${#FLAVORS[*]} -echo "Found $NUM_FLAVORS flavors" -if [[ $NUM_FLAVORS -eq 0 ]]; then - echo "Found no valid flavors to use!" - exit 1 -fi -FLAVOR_REF=${FLAVORS[0]} -FLAVOR_REF_ALT=$FLAVOR_REF -if [[ $NUM_FLAVORS -gt 1 ]]; then - FLAVOR_REF_ALT=${FLAVORS[1]} -fi - -# Do any of the following need to be configurable? -COMPUTE_CATALOG_TYPE=compute -COMPUTE_CREATE_IMAGE_ENABLED=True -COMPUTE_ALLOW_TENANT_ISOLATION=True -COMPUTE_ALLOW_TENANT_REUSE=True -COMPUTE_RESIZE_AVAILABLE=False -COMPUTE_CHANGE_PASSWORD_AVAILABLE=False # not supported with QEMU... -COMPUTE_LOG_LEVEL=ERROR -BUILD_INTERVAL=3 -BUILD_TIMEOUT=400 -COMPUTE_BUILD_INTERVAL=3 -COMPUTE_BUILD_TIMEOUT=400 -VOLUME_BUILD_INTERVAL=3 -VOLUME_BUILD_TIMEOUT=300 -RUN_SSH=True -# Check for DEFAULT_INSTANCE_USER and try to connect with that account -SSH_USER=${DEFAULT_INSTANCE_USER:-$OS_USERNAME} -NETWORK_FOR_SSH=private -IP_VERSION_FOR_SSH=4 -SSH_TIMEOUT=4 -# Whitebox testing configuration for Compute... -COMPUTE_WHITEBOX_ENABLED=True -COMPUTE_SOURCE_DIR=$NOVA_SOURCE_DIR -COMPUTE_BIN_DIR=$NOVA_BIN_DIR -COMPUTE_CONFIG_PATH=/etc/nova/nova.conf -# TODO(jaypipes): Create the key file here... right now, no whitebox -# tests actually use a key. -COMPUTE_PATH_TO_PRIVATE_KEY=$TEMPEST_DIR/id_rsa -COMPUTE_DB_URI=$BASE_SQL_CONN/nova - -# Image test configuration options... -IMAGE_HOST=${IMAGE_HOST:-127.0.0.1} -IMAGE_PORT=${IMAGE_PORT:-9292} -IMAGE_API_VERSION=1 -IMAGE_CATALOG_TYPE=image - -# Network API test configuration -NETWORK_CATALOG_TYPE=network -NETWORK_API_VERSION=2.0 - -# Volume API test configuration -VOLUME_CATALOG_TYPE=volume - -# Live migration -LIVE_MIGRATION_AVAILABLE=${LIVE_MIGRATION_AVAILABLE:-False} -USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION=${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} - -# Object Storage -OBJECT_CATALOG_TYPE="object-store" - -# EC2 and S3 test configuration -BOTO_EC2_URL="http://$IDENTITY_HOST:8773/services/Cloud" -BOTO_S3_URL="http://$IDENTITY_HOST:3333" -BOTO_AWS_ACCESS="" # Created in tempest... -BOTO_AWS_SECRET="" # Created in tempest... -BOTO_AWS_REGION="RegionOne" -BOTO_S3_MATERIALS_PATH=$DEST/devstack/files/images/s3-materials/cirros-0.3.0 -BOTO_ARI_MANIFEST=cirros-0.3.0-x86_64-initrd.manifest.xml -BOTO_AMI_MANIFEST=cirros-0.3.0-x86_64-blank.img.manifest.xml -BOTO_AKI_MANIFEST=cirros-0.3.0-x86_64-vmlinuz.manifest.xml -BOTO_FLAVOR_NAME=m1.tiny -BOTO_SOCKET_TIMEOUT=5 -BOTO_BUILD_TIMEOUT=${COMPUTE_BUILD_TIMEOUT:-400} -BOTO_BUILD_INTERVAL=${COMPUTE_BUILD_INTERVAL:-3} - -sed -e " - s,%IDENTITY_USE_SSL%,$IDENTITY_USE_SSL,g; - s,%IDENTITY_HOST%,$IDENTITY_HOST,g; - s,%IDENTITY_PORT%,$IDENTITY_PORT,g; - s,%IDENTITY_API_VERSION%,$IDENTITY_API_VERSION,g; - s,%IDENTITY_PATH%,$IDENTITY_PATH,g; - s,%IDENTITY_STRATEGY%,$IDENTITY_STRATEGY,g; - s,%IDENTITY_CATALOG_TYPE%,$IDENTITY_CATALOG_TYPE,g; - s,%USERNAME%,$OS_USERNAME,g; - s,%PASSWORD%,$OS_PASSWORD,g; - s,%TENANT_NAME%,$OS_TENANT_NAME,g; - s,%ALT_USERNAME%,$ALT_USERNAME,g; - s,%ALT_PASSWORD%,$ALT_PASSWORD,g; - s,%ALT_TENANT_NAME%,$ALT_TENANT_NAME,g; - s,%COMPUTE_CATALOG_TYPE%,$COMPUTE_CATALOG_TYPE,g; - s,%COMPUTE_ALLOW_TENANT_ISOLATION%,$COMPUTE_ALLOW_TENANT_ISOLATION,g; - s,%COMPUTE_ALLOW_TENANT_REUSE%,$COMPUTE_ALLOW_TENANT_REUSE,g; - s,%COMPUTE_CREATE_IMAGE_ENABLED%,$COMPUTE_CREATE_IMAGE_ENABLED,g; - s,%COMPUTE_RESIZE_AVAILABLE%,$COMPUTE_RESIZE_AVAILABLE,g; - s,%COMPUTE_CHANGE_PASSWORD_AVAILABLE%,$COMPUTE_CHANGE_PASSWORD_AVAILABLE,g; - s,%COMPUTE_WHITEBOX_ENABLED%,$COMPUTE_WHITEBOX_ENABLED,g; - s,%COMPUTE_LOG_LEVEL%,$COMPUTE_LOG_LEVEL,g; - s,%BUILD_INTERVAL%,$BUILD_INTERVAL,g; - s,%BUILD_TIMEOUT%,$BUILD_TIMEOUT,g; - s,%COMPUTE_BUILD_INTERVAL%,$COMPUTE_BUILD_INTERVAL,g; - s,%COMPUTE_BUILD_TIMEOUT%,$COMPUTE_BUILD_TIMEOUT,g; - s,%RUN_SSH%,$RUN_SSH,g; - s,%SSH_USER%,$SSH_USER,g; - s,%NETWORK_FOR_SSH%,$NETWORK_FOR_SSH,g; - s,%IP_VERSION_FOR_SSH%,$IP_VERSION_FOR_SSH,g; - s,%SSH_TIMEOUT%,$SSH_TIMEOUT,g; - s,%IMAGE_ID%,$IMAGE_UUID,g; - s,%IMAGE_ID_ALT%,$IMAGE_UUID_ALT,g; - s,%FLAVOR_REF%,$FLAVOR_REF,g; - s,%FLAVOR_REF_ALT%,$FLAVOR_REF_ALT,g; - s,%COMPUTE_CONFIG_PATH%,$COMPUTE_CONFIG_PATH,g; - s,%COMPUTE_SOURCE_DIR%,$COMPUTE_SOURCE_DIR,g; - s,%COMPUTE_BIN_DIR%,$COMPUTE_BIN_DIR,g; - s,%COMPUTE_PATH_TO_PRIVATE_KEY%,$COMPUTE_PATH_TO_PRIVATE_KEY,g; - s,%COMPUTE_DB_URI%,$COMPUTE_DB_URI,g; - s,%IMAGE_HOST%,$IMAGE_HOST,g; - s,%IMAGE_PORT%,$IMAGE_PORT,g; - s,%IMAGE_API_VERSION%,$IMAGE_API_VERSION,g; - s,%IMAGE_CATALOG_TYPE%,$IMAGE_CATALOG_TYPE,g; - s,%COMPUTE_ADMIN_USERNAME%,$COMPUTE_ADMIN_USERNAME,g; - s,%COMPUTE_ADMIN_PASSWORD%,$COMPUTE_ADMIN_PASSWORD,g; - s,%COMPUTE_ADMIN_TENANT_NAME%,$COMPUTE_ADMIN_TENANT_NAME,g; - s,%IDENTITY_ADMIN_USERNAME%,$IDENTITY_ADMIN_USERNAME,g; - s,%IDENTITY_ADMIN_PASSWORD%,$IDENTITY_ADMIN_PASSWORD,g; - s,%IDENTITY_ADMIN_TENANT_NAME%,$IDENTITY_ADMIN_TENANT_NAME,g; - s,%NETWORK_CATALOG_TYPE%,$NETWORK_CATALOG_TYPE,g; - s,%NETWORK_API_VERSION%,$NETWORK_API_VERSION,g; - s,%VOLUME_CATALOG_TYPE%,$VOLUME_CATALOG_TYPE,g; - s,%VOLUME_BUILD_INTERVAL%,$VOLUME_BUILD_INTERVAL,g; - s,%VOLUME_BUILD_TIMEOUT%,$VOLUME_BUILD_TIMEOUT,g; - s,%LIVE_MIGRATION_AVAILABLE%,$LIVE_MIGRATION_AVAILABLE,g; - s,%USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION%,$USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION,g; - s,%OBJECT_CATALOG_TYPE%,$OBJECT_CATALOG_TYPE,g; - s,%BOTO_EC2_URL%,$BOTO_EC2_URL,g; - s,%BOTO_S3_URL%,$BOTO_S3_URL,g; - s,%BOTO_AWS_ACCESS%,$BOTO_AWS_ACCESS,g; - s,%BOTO_AWS_SECRET%,$BOTO_AWS_SECRET,g; - s,%BOTO_AWS_REGION%,$BOTO_AWS_REGION,g; - s,%BOTO_S3_MATERIALS_PATH%,$BOTO_S3_MATERIALS_PATH,g; - s,%BOTO_ARI_MANIFEST%,$BOTO_ARI_MANIFEST,g; - s,%BOTO_AMI_MANIFEST%,$BOTO_AMI_MANIFEST,g; - s,%BOTO_AKI_MANIFEST%,$BOTO_AKI_MANIFEST,g; - s,%BOTO_FLAVOR_NAME%,$BOTO_FLAVOR_NAME,g; - s,%BOTO_SOCKET_TIMEOUT%,$BOTO_SOCKET_TIMEOUT,g; - s,%BOTO_BUILD_TIMEOUT%,$BOTO_BUILD_TIMEOUT,g; - s,%BOTO_BUILD_INTERVAL%,$BOTO_BUILD_INTERVAL,g; -" -i $TEMPEST_CONF - -echo "Created tempest configuration file:" -cat $TEMPEST_CONF - -echo "\n" -echo "**************************************************" -echo "Finished Configuring Tempest" -echo "**************************************************" +echo "$0 is scheduled for delete!!" >&2 From 53d3d6baf93572618633ac79a6d1f594bc199837 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Thu, 6 Dec 2012 15:49:17 +0000 Subject: [PATCH 058/207] Fix XenAPINFS configuration fixes bug 1087272 Trying to configure XenAPINFS volume driver through localrc options failed. This fix removes the extra check, as lib/cinder already exits on error. Change-Id: I874b7cee44861244cb7a340cc4094ef3f8b48a5a --- lib/cinder | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/cinder b/lib/cinder index ce160bf0..d47c83a4 100644 --- a/lib/cinder +++ b/lib/cinder @@ -159,7 +159,6 @@ function configure_cinder() { iniset $CINDER_CONF DEFAULT xenapi_nfs_server "$CINDER_XENAPI_NFS_SERVER" iniset $CINDER_CONF DEFAULT xenapi_nfs_serverpath "$CINDER_XENAPI_NFS_SERVERPATH" ) - [ $? -ne 0 ] && exit 1 fi } From b79574b4954406d6d9e65ce5b1fb8d07678e7128 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Sat, 1 Dec 2012 10:42:46 +0100 Subject: [PATCH 059/207] Name the tgt/conf.d enties based on the vg name On one system multiple volume and volume manger could be installed and needs dedicated tgt config entries. cinder-volumes, stack-volumes, nova-volumes are the default volume group names. /etc/tgt/conf.d/ files should be named based on the volume-group name. The vg name is uniq on one system. In devstack case the stack.conf is usable. Changes: * Rename conf.d/cinder.conf to conf.d/stack.conf * Handle conf.d similary on all distribution Change-Id: I856cdf4a21a414d2940d8f9d8b0b0368b1fad887 --- lib/cinder | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/lib/cinder b/lib/cinder index ce160bf0..039c5cba 100644 --- a/lib/cinder +++ b/lib/cinder @@ -9,12 +9,12 @@ # stack.sh # --------- -# install_XXX -# configure_XXX -# init_XXX -# start_XXX -# stop_XXX -# cleanup_XXX +# install_cinder +# configure_cinder +# init_cinder +# start_cinder +# stop_cinder +# cleanup_cinder # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -229,7 +229,7 @@ function install_cinder() { # apply config.d approach (e.g. Oneiric does not have this) function _configure_tgt_for_config_d() { if [[ ! -d /etc/tgt/conf.d/ ]]; then - sudo mkdir /etc/tgt/conf.d + sudo mkdir -p /etc/tgt/conf.d echo "include /etc/tgt/conf.d/*.conf" | sudo tee -a /etc/tgt/targets.conf fi } @@ -237,11 +237,11 @@ function _configure_tgt_for_config_d() { # start_cinder() - Start running processes, including screen function start_cinder() { if is_service_enabled c-vol; then + _configure_tgt_for_config_d + if [[ ! -f /etc/tgt/conf.d/stack.conf ]]; then + echo "include $CINDER_STATE_PATH/volumes/*" | sudo tee /etc/tgt/conf.d/stack.conf + fi if is_ubuntu; then - _configure_tgt_for_config_d - if [[ ! -f /etc/tgt/conf.d/cinder.conf ]]; then - echo "include $CINDER_STATE_PATH/volumes/*" | sudo tee /etc/tgt/conf.d/cinder.conf - fi # tgt in oneiric doesn't restart properly if tgtd isn't running # do it in two steps sudo stop tgt || true From e5eee5834b4ae62857830a7a0266df76ec640b2d Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Thu, 6 Dec 2012 09:47:53 -0500 Subject: [PATCH 060/207] Add a timestamp to the log output for better diagnosis when we are looking at logs generated by the gate jobs we need timestamps on the output of stack.sh so we can figure out what was being executed around the time when there was a problem in say nova-network Change-Id: I203e8dae97715d6ee46a4088c7577b9be66cf09d --- stack.sh | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index b38c5791..154fd76f 100755 --- a/stack.sh +++ b/stack.sh @@ -598,7 +598,15 @@ if [[ -n "$LOGFILE" ]]; then exec 3>&1 if [[ "$VERBOSE" == "True" ]]; then # Redirect stdout/stderr to tee to write the log file - exec 1> >( tee "${LOGFILE}" ) 2>&1 + exec 1> >( awk ' + { + cmd ="date +\"%Y-%m-%d %H:%M:%S \"" + cmd | getline now + close("date +\"%Y-%m-%d %H:%M:%S \"") + sub(/^/, now) + print + fflush() + }' | tee "${LOGFILE}" ) 2>&1 # Set up a second fd for output exec 6> >( tee "${SUMFILE}" ) else From a0ca45f17379b76aaa8d58cb3bc26b2c64dba689 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Thu, 6 Dec 2012 17:45:49 +0000 Subject: [PATCH 061/207] Fix XenAPINFSDriver's path Fixes bug 1087329 As the driver was moved to a different location, devstack script needed an update. Change-Id: Iaa1db94a84b6e9cb99514ce886025600809e9f29 --- lib/cinder | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/cinder b/lib/cinder index d47c83a4..5d2c5961 100644 --- a/lib/cinder +++ b/lib/cinder @@ -152,7 +152,7 @@ function configure_cinder() { if [ "$CINDER_DRIVER" == "XenAPINFS" ]; then ( set -u - iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.xenapi_sm.XenAPINFSDriver" + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.xenapi.sm.XenAPINFSDriver" iniset $CINDER_CONF DEFAULT xenapi_connection_url "$CINDER_XENAPI_CONNECTION_URL" iniset $CINDER_CONF DEFAULT xenapi_connection_username "$CINDER_XENAPI_CONNECTION_USERNAME" iniset $CINDER_CONF DEFAULT xenapi_connection_password "$CINDER_XENAPI_CONNECTION_PASSWORD" From 72cffd56bf68afcb4ca087f6ed852f875b3bc90e Mon Sep 17 00:00:00 2001 From: John Garbutt Date: Tue, 4 Dec 2012 16:14:04 +0000 Subject: [PATCH 062/207] Make the standard devstack logging work with XenServer Fixes bug 1087387 Stop redirecting the output in devstack to ensure when you configure LOGFILE that the devstack scripts keep working Change-Id: I00dce315f5f79c4fc351d9ab11c504274d998fce --- tools/xen/build_xva.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh index 9eae1903..c359c558 100755 --- a/tools/xen/build_xva.sh +++ b/tools/xen/build_xva.sh @@ -66,7 +66,7 @@ cat <$STAGING_DIR/etc/rc.local # network restart required for getting the right gateway /etc/init.d/networking restart chown -R stack /opt/stack -su -c "/opt/stack/run.sh > /opt/stack/run.sh.log 2>&1" stack +su -c "/opt/stack/run.sh > /opt/stack/run.sh.log" stack exit 0 EOF From a784748527404f59e2c920c889e7958c6532f408 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Tue, 4 Dec 2012 16:06:52 -0500 Subject: [PATCH 063/207] Enable the correct instance managers for instance/floating ips. Currently the default is nova.network.dns_driver.DNSDriver for both. We need to switch to nova.network.minidns.MiniDNS for both instance_dns_manager and floating_ip_dns_manager. nova.network.dns_driver.DNSDriver is just the interface, we need a good implementation as the default Fixes LP #1040236 Change-Id: If6e65cb1c7802b1ba0c1e64d4c06185cabf9eeca --- stack.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stack.sh b/stack.sh index cddb64e0..b3e858f1 100755 --- a/stack.sh +++ b/stack.sh @@ -1478,6 +1478,8 @@ if is_service_enabled nova; then fi elif is_service_enabled n-net; then add_nova_opt "network_manager=nova.network.manager.$NET_MAN" + add_nova_opt "instance_dns_manager=nova.network.minidns.MiniDNS" + add_nova_opt "floating_ip_dns_manager=nova.network.minidns.MiniDNS" add_nova_opt "public_interface=$PUBLIC_INTERFACE" add_nova_opt "vlan_interface=$VLAN_INTERFACE" add_nova_opt "flat_network_bridge=$FLAT_NETWORK_BRIDGE" From 6994296bf64f8b07db7e970b53691502d5341298 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Fri, 7 Dec 2012 08:36:14 +0100 Subject: [PATCH 064/207] Remove tools/configure_tempest.sh Related bug #1087203 Change-Id: I2df4601197214d9d50d86876d4a2892b3421217a --- tools/configure_tempest.sh | 3 --- 1 file changed, 3 deletions(-) delete mode 100755 tools/configure_tempest.sh diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh deleted file mode 100755 index 09241808..00000000 --- a/tools/configure_tempest.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env bash - -echo "$0 is scheduled for delete!!" >&2 From 00011c0847a9972b78051954e272f54e9d07ef51 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Thu, 6 Dec 2012 09:56:32 +0100 Subject: [PATCH 065/207] Add is_fedora and exit_distro_not_supported functions Between is_fedora, is_ubuntu and is_suse, we can make the code a bit simpler to read. We also use exit_distro_not_supported to identify places where we need implementation details for new distros. As "/sbin/service --skip-redirect" is Fedora-specific, guard this with a is_fedora test too. Change-Id: Ic77c0697ed9be0dbb5df8e73da93463e76025f0c --- functions | 55 +++++++++++++++++++++++++++++++--------- lib/cinder | 12 ++++++--- lib/databases/mysql | 30 ++++++++++++++-------- lib/databases/postgresql | 8 +++--- lib/horizon | 29 +++++++++++---------- lib/nova | 8 +++--- stack.sh | 34 +++++++++++++++---------- tests/functions.sh | 8 ++++-- tools/info.sh | 8 ++++-- 9 files changed, 128 insertions(+), 64 deletions(-) diff --git a/functions b/functions index 0911557f..3ee43d3d 100644 --- a/functions +++ b/functions @@ -354,6 +354,18 @@ function is_ubuntu { } +# Determine if current distribution is a Fedora-based distribution +# (Fedora, RHEL, CentOS). +# is_fedora +function is_fedora { + if [[ -z "$os_VENDOR" ]]; then + GetOSVersion + fi + + [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || [ "$os_VENDOR" = "CentOS" ] +} + + # Determine if current distribution is a SUSE-based distribution # (openSUSE, SLE). # is_suse @@ -366,6 +378,23 @@ function is_suse { } +# Exit after outputting a message about the distribution not being supported. +# exit_distro_not_supported [optional-string-telling-what-is-missing] +function exit_distro_not_supported { + if [[ -z "$DISTRO" ]]; then + GetDistro + fi + + if [ $# -gt 0 ]; then + echo "Support for $DISTRO is incomplete: no support for $@" + else + echo "Support for $DISTRO is incomplete." + fi + + exit 1 +} + + # git clone only if directory doesn't exist already. Since ``DEST`` might not # be owned by the installation user, we create the directory and change the # ownership to the proper user. @@ -598,12 +627,12 @@ function install_package() { NO_UPDATE_REPOS=True apt_get install "$@" + elif is_fedora; then + yum_install "$@" + elif is_suse; then + zypper_install "$@" else - if is_suse; then - zypper_install "$@" - else - yum_install "$@" - fi + exit_distro_not_supported "installing packages" fi } @@ -622,9 +651,11 @@ function is_package_installed() { if [[ "$os_PACKAGE" = "deb" ]]; then dpkg -l "$@" > /dev/null return $? - else + elif [[ "$os_PACKAGE" = "rpm" ]]; then rpm --quiet -q "$@" return $? + else + exit_distro_not_supported "finding if a package is installed" fi } @@ -1032,20 +1063,20 @@ function add_user_to_group() { function get_rootwrap_location() { local module=$1 - if is_ubuntu || is_suse; then - echo "/usr/local/bin/$module-rootwrap" - else + if is_fedora; then echo "/usr/bin/$module-rootwrap" + else + echo "/usr/local/bin/$module-rootwrap" fi } # Get the path to the pip command. # get_pip_command function get_pip_command() { - if is_ubuntu || is_suse; then - echo "/usr/bin/pip" - else + if is_fedora; then echo "/usr/bin/pip-python" + else + echo "/usr/bin/pip" fi } diff --git a/lib/cinder b/lib/cinder index 9b9d50d1..a43f0a16 100644 --- a/lib/cinder +++ b/lib/cinder @@ -195,8 +195,8 @@ function init_cinder() { mkdir -p $CINDER_STATE_PATH/volumes if sudo vgs $VOLUME_GROUP; then - if [[ "$os_PACKAGE" = "rpm" ]]; then - # RPM doesn't start the service + if is_fedora || is_suse; then + # service is not started by default start_service tgtd fi @@ -245,9 +245,15 @@ function start_cinder() { # do it in two steps sudo stop tgt || true sudo start tgt - else + elif is_fedora; then # bypass redirection to systemctl during restart sudo /sbin/service --skip-redirect tgtd restart + elif is_suse; then + restart_service tgtd + else + # note for other distros: unstack.sh also uses the tgt/tgtd service + # name, and would need to be adjusted too + exit_distro_not_supported "restarting tgt" fi fi diff --git a/lib/databases/mysql b/lib/databases/mysql index 60ea143f..68e9adc5 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -23,22 +23,28 @@ function configure_database_mysql { if is_ubuntu; then MY_CONF=/etc/mysql/my.cnf MYSQL=mysql - else + elif is_fedora; then + MY_CONF=/etc/my.cnf + MYSQL=mysqld + elif is_suse; then MY_CONF=/etc/my.cnf - if is_suse; then - MYSQL=mysql - else - MYSQL=mysqld - fi + MYSQL=mysql + else + exit_distro_not_supported "mysql configuration" fi # Start mysql-server - if [[ "$os_PACKAGE" = "rpm" ]]; then - # RPM doesn't start the service + if is_fedora || is_suse; then + # service is not started by default start_service $MYSQL - # Set the root password - only works the first time + fi + + # Set the root password - only works the first time. For Ubuntu, we already + # did that with debconf before installing the package. + if ! is_ubuntu; then sudo mysqladmin -u root password $DATABASE_PASSWORD || true fi + # Update the DB to give user ‘$DATABASE_USER’@’%’ full control of the all databases: sudo mysql -uroot -p$DATABASE_PASSWORD -h127.0.0.1 -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';" @@ -84,10 +90,12 @@ EOF chmod 0600 $HOME/.my.cnf fi # Install mysql-server - if is_suse; then + if is_ubuntu || is_fedora; then + install_package mysql-server + elif is_suse; then install_package mysql-community-server else - install_package mysql-server + exit_distro_not_supported "mysql installation" fi } diff --git a/lib/databases/postgresql b/lib/databases/postgresql index d9c2f00c..20ade857 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -20,7 +20,7 @@ function recreate_database_postgresql { function configure_database_postgresql { echo_summary "Configuring and starting PostgreSQL" - if [[ "$os_PACKAGE" = "rpm" ]]; then + if is_fedora || is_suse; then PG_HBA=/var/lib/pgsql/data/pg_hba.conf PG_CONF=/var/lib/pgsql/data/postgresql.conf sudo [ -e $PG_HBA ] || sudo postgresql-setup initdb @@ -53,10 +53,12 @@ EOF else sed -i "s/:root:\w\+/:root:$DATABASE_PASSWORD/" $PGPASS fi - if [[ "$os_PACKAGE" = "rpm" ]]; then + if is_ubuntu; then + install_package postgresql + elif is_fedora || is_suse; then install_package postgresql-server else - install_package postgresql + exit_distro_not_supported "postgresql installation" fi } diff --git a/lib/horizon b/lib/horizon index 7321cbcc..68337ab8 100644 --- a/lib/horizon +++ b/lib/horizon @@ -81,19 +81,18 @@ function init_horizon() { sudo a2ensite horizon # WSGI doesn't enable by default, enable it sudo a2enmod wsgi + elif is_fedora; then + APACHE_NAME=httpd + APACHE_CONF=conf.d/horizon.conf + sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf + elif is_suse; then + APACHE_NAME=apache2 + APACHE_CONF=vhosts.d/horizon.conf + # Append wsgi to the list of modules to load + grep -q "^APACHE_MODULES=.*wsgi" /etc/sysconfig/apache2 || + sudo sed '/^APACHE_MODULES=/s/^\(.*\)"$/\1 wsgi"/' -i /etc/sysconfig/apache2 else - # Install httpd, which is NOPRIME'd - if is_suse; then - APACHE_NAME=apache2 - APACHE_CONF=vhosts.d/horizon.conf - # Append wsgi to the list of modules to load - grep -q "^APACHE_MODULES=.*wsgi" /etc/sysconfig/apache2 || - sudo sed '/^APACHE_MODULES=/s/^\(.*\)"$/\1 wsgi"/' -i /etc/sysconfig/apache2 - else - APACHE_NAME=httpd - APACHE_CONF=conf.d/horizon.conf - sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf - fi + exit_distro_not_supported "apache configuration" fi # Configure apache to run horizon @@ -113,11 +112,13 @@ function install_horizon() { if is_ubuntu; then # Install apache2, which is NOPRIME'd install_package apache2 libapache2-mod-wsgi + elif is_fedora; then + sudo rm -f /etc/httpd/conf.d/000-* + install_package httpd mod_wsgi elif is_suse; then install_package apache2 apache2-mod_wsgi else - sudo rm -f /etc/httpd/conf.d/000-* - install_package httpd mod_wsgi + exit_distro_not_supported "apache installation" fi # NOTE(sdague) quantal changed the name of the node binary diff --git a/lib/nova b/lib/nova index 3a4d34d8..8272ef0d 100644 --- a/lib/nova +++ b/lib/nova @@ -394,11 +394,13 @@ function install_novaclient() { function install_nova() { if is_service_enabled n-cpu; then if is_ubuntu; then - LIBVIRT_PKG_NAME=libvirt-bin + install_package libvirt-bin + elif is_fedora || is_suse; then + install_package libvirt else - LIBVIRT_PKG_NAME=libvirt + exit_distro_not_supported "libvirt installation" fi - install_package $LIBVIRT_PKG_NAME + # Install and configure **LXC** if specified. LXC is another approach to # splitting a system into many smaller parts. LXC uses cgroups and chroot # to simulate multiple systems. diff --git a/stack.sh b/stack.sh index cddb64e0..6483de3b 100755 --- a/stack.sh +++ b/stack.sh @@ -678,17 +678,21 @@ set -o xtrace echo_summary "Installing package prerequisites" if is_ubuntu; then install_package $(get_packages $FILES/apts) +elif is_fedora; then + install_package $(get_packages $FILES/rpms) elif is_suse; then install_package $(get_packages $FILES/rpms-suse) else - install_package $(get_packages $FILES/rpms) + exit_distro_not_supported "list of packages" fi if [[ $SYSLOG != "False" ]]; then - if is_suse; then + if is_ubuntu || is_fedora; then + install_package rsyslog-relp + elif is_suse; then install_package rsyslog-module-relp else - install_package rsyslog-relp + exit_distro_not_supported "rsyslog-relp installation" fi fi @@ -700,20 +704,22 @@ if is_service_enabled rabbit; then cat "$tfile" rm -f "$tfile" elif is_service_enabled qpid; then - if [[ "$os_PACKAGE" = "rpm" ]]; then + if is_fedora; then install_package qpid-cpp-server-daemon - else + elif is_ubuntu; then install_package qpidd + else + exit_distro_not_supported "qpid installation" fi elif is_service_enabled zeromq; then - if [[ "$os_PACKAGE" = "rpm" ]]; then - if is_suse; then - install_package libzmq1 python-pyzmq - else - install_package zeromq python-zmq - fi - else + if is_fedora; then + install_package zeromq python-zmq + elif is_ubuntu; then install_package libzmq1 python-zmq + elif is_suse; then + install_package libzmq1 python-pyzmq + else + exit_distro_not_supported "zeromq installation" fi fi @@ -909,8 +915,8 @@ fi if is_service_enabled rabbit; then # Start rabbitmq-server echo_summary "Starting RabbitMQ" - if [[ "$os_PACKAGE" = "rpm" ]]; then - # RPM doesn't start the service + if is_fedora || is_suse; then + # service is not started by default restart_service rabbitmq-server fi # change the rabbit password since the default is "guest" diff --git a/tests/functions.sh b/tests/functions.sh index d2cc5c44..be48729f 100755 --- a/tests/functions.sh +++ b/tests/functions.sh @@ -260,9 +260,11 @@ fi if [[ "$os_PACKAGE" = "deb" ]]; then is_package_installed dpkg VAL=$? -else +elif [[ "$os_PACKAGE" = "rpm" ]]; then is_package_installed rpm VAL=$? +else + VAL=1 fi if [[ "$VAL" -eq 0 ]]; then echo "OK" @@ -273,9 +275,11 @@ fi if [[ "$os_PACKAGE" = "deb" ]]; then is_package_installed dpkg bash VAL=$? -else +elif [[ "$os_PACKAGE" = "rpm" ]]; then is_package_installed rpm bash VAL=$? +else + VAL=1 fi if [[ "$VAL" -eq 0 ]]; then echo "OK" diff --git a/tools/info.sh b/tools/info.sh index 583a9949..f01dbea0 100755 --- a/tools/info.sh +++ b/tools/info.sh @@ -90,15 +90,19 @@ done if is_ubuntu; then PKG_DIR=$FILES/apts -else +elif is_fedora; then PKG_DIR=$FILES/rpms +else + exit_distro_not_supported "list of packages" fi for p in $(get_packages $PKG_DIR); do if [[ "$os_PACKAGE" = "deb" ]]; then ver=$(dpkg -s $p 2>/dev/null | grep '^Version: ' | cut -d' ' -f2) - else + elif [[ "$os_PACKAGE" = "rpm" ]]; then ver=$(rpm -q --queryformat "%{VERSION}-%{RELEASE}\n" $p) + else + exit_distro_not_supported "finding version of a package" fi echo "pkg|${p}|${ver}" done From 1e32d0ab191bfe8a8c89580b9f84fe38ded7af0a Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Fri, 7 Dec 2012 12:46:15 +0000 Subject: [PATCH 066/207] exercises/euca: Fix volume timeout Fixes bug 1087656 In euca exercise, the timeout for one of the volume operations was specified as ASSOCIATE_TIMEOUT, whereas the timeout error message was mentioning RUNNING_TIMEOUT. This fix changes the timeout to RUNNING_TIMEOUT so that it is consistent with the error message. As RUNNING is usually larger than ASSOCIATE, it leaves more time for the volume operation. Change-Id: Ic016c7920ae6e4ec9a476bb5612b7df9eed01c75 --- exercises/euca.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exercises/euca.sh b/exercises/euca.sh index 67da1bee..982653ef 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -90,7 +90,7 @@ if [[ "$ENABLED_SERVICES" =~ "c-vol" ]]; then die_if_not_set VOLUME "Failure to get volume" # Test volume has become available - if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q available; do sleep 1; done"; then + if ! timeout $RUNNING_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q available; do sleep 1; done"; then echo "volume didnt become available within $RUNNING_TIMEOUT seconds" exit 1 fi From 65c0846e379ba629fcc389486057322d5e30b34a Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Fri, 7 Dec 2012 14:20:51 +0100 Subject: [PATCH 067/207] Local variable cosmetic changes in lib/tempest Change-Id: I5e83531c32968bc734abb0f9a8d03e2f9500a074 --- lib/tempest | 121 +++++++++++++++++++++++++++------------------------- 1 file changed, 63 insertions(+), 58 deletions(-) diff --git a/lib/tempest b/lib/tempest index 606f05ec..7fa15df0 100644 --- a/lib/tempest +++ b/lib/tempest @@ -4,21 +4,21 @@ # ``functions`` file # ``lib/nova`` service is runing # -# - DEST -# - ADMIN_PASSWORD -# - OS_USERNAME -# - DEFAULT_IMAGE_NAME -# - S3_SERVICE_PORT -# - SERVICE_HOST -# - BASE_SQL_CONN ``lib/database`` declares +# - ``DEST`` +# - ``ADMIN_PASSWORD`` +# - ``DEFAULT_IMAGE_NAME`` +# - ``S3_SERVICE_PORT`` +# - ``SERVICE_HOST`` +# - ``BASE_SQL_CONN`` ``lib/database`` declares # Optional Dependencies: -# IDENTITY_* +# IDENTITY_USE_SSL, IDENTITY_HOST, IDENTITY_PORT, IDENTITY_PATH # ALT_* (similar vars exists in keystone_data.sh) -# IMAGE_* -# LIVE_MIGRATION_AVAILABLE -# DEFAULT_INSTANCE_TYPE -# DEFAULT_INSTANCE_USER -# USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION +# ``OS_USERNAME`` +# ``IMAGE_PORT``, ``IMAGE_HOST`` +# ``LIVE_MIGRATION_AVAILABLE`` +# ``USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION`` +# ``DEFAULT_INSTANCE_TYPE`` +# ``DEFAULT_INSTANCE_USER`` # ``stack.sh`` calls the entry points in this order: # # install_tempest @@ -52,12 +52,17 @@ BUILD_TIMEOUT=400 # configure_tempest() - Set config files, create data dirs, etc function configure_tempest() { - local IMAGE_LINES - local IMAGES - local NUM_IMAGES - local IMAGE_UUID - local IMAGE_UUID_ALT + local image_lines + local images + local num_images + local image_uuid + local image_uuid_alt local errexit + local password + local line + local flavors + local flavors_ref + local flavor_lines #TODO(afazekas): # sudo python setup.py deploy @@ -74,33 +79,33 @@ function configure_tempest() { # testing. Here we simply look for images stored in Glance # and set the appropriate variables for use in the tempest config # We ignore ramdisk and kernel images, look for the default image - # DEFAULT_IMAGE_NAME. If not found, we set the IMAGE_UUID to the - # first image returned and set IMAGE_UUID_ALT to the second, + # ``DEFAULT_IMAGE_NAME``. If not found, we set the ``image_uuid`` to the + # first image returned and set ``image_uuid_alt`` to the second, # if there is more than one returned... # ... Also ensure we only take active images, so we don't get snapshots in process - IMAGE_LINES=`glance image-list` + image_lines=`glance image-list` IFS=$'\n\r' - IMAGES="" - for line in $IMAGE_LINES; do + images="" + for line in $image_lines; do if [ -z $DEFAULT_IMAGE_NAME ]; then - IMAGES="$IMAGES `echo $line | grep -v "^\(ID\|+--\)" | grep -v "\(aki\|ari\)" | grep 'active' | cut -d' ' -f2`" + images="$images `echo $line | grep -v "^\(ID\|+--\)" | grep -v "\(aki\|ari\)" | grep 'active' | cut -d' ' -f2`" else - IMAGES="$IMAGES `echo $line | grep -v "^\(ID\|+--\)" | grep -v "\(aki\|ari\)" | grep 'active' | grep "$DEFAULT_IMAGE_NAME" | cut -d' ' -f2`" + images="$images `echo $line | grep -v "^\(ID\|+--\)" | grep -v "\(aki\|ari\)" | grep 'active' | grep "$DEFAULT_IMAGE_NAME" | cut -d' ' -f2`" fi done # Create array of image UUIDs... IFS=" " - IMAGES=($IMAGES) - NUM_IMAGES=${#IMAGES[*]} - echo "Found $NUM_IMAGES images" - if [[ $NUM_IMAGES -eq 0 ]]; then + images=($images) + num_images=${#images[*]} + echo "Found $num_images images" + if [[ $num_images -eq 0 ]]; then echo "Found no valid images to use!" exit 1 fi - IMAGE_UUID=${IMAGES[0]} - IMAGE_UUID_ALT=$IMAGE_UUID - if [[ $NUM_IMAGES -gt 1 ]]; then - IMAGE_UUID_ALT=${IMAGES[1]} + image_uuid=${images[0]} + image_uuid_alt=$image_uuid + if [[ $num_images -gt 1 ]]; then + image_uuid_alt=${images[1]} fi # Create tempest.conf from tempest.conf.sample @@ -114,7 +119,7 @@ function configure_tempest() { # from the Tempest configuration file entirely... IDENTITY_PATH=${IDENTITY_PATH:-tokens} - PASSWORD=${ADMIN_PASSWORD:-secrete} + password=${ADMIN_PASSWORD:-secrete} # See files/keystone_data.sh where alt_demo user # and tenant are set up... @@ -122,30 +127,30 @@ function configure_tempest() { ALT_TENANT_NAME=${ALT_TENANT_NAME:-alt_demo} # Check Nova for existing flavors and, if set, look for the - # DEFAULT_INSTANCE_TYPE and use that. Otherwise, just use the first flavor. - FLAVOR_LINES=`nova flavor-list` - IFS="$(echo -e "\n\r")" - FLAVORS="" - for line in $FLAVOR_LINES; do + # ``DEFAULT_INSTANCE_TYPE`` and use that. Otherwise, just use the first flavor. + flavor_lines=`nova flavor-list` + IFS=$'\r\n' + flavors="" + for line in $flavor_lines; do if [ -z $DEFAULT_INSTANCE_TYPE ]; then - FLAVORS="$FLAVORS `echo $line | grep -v "^\(|\s*ID\|+--\)" | cut -d' ' -f2`" + flavors="$flavors `echo $line | grep -v "^\(|\s*ID\|+--\)" | cut -d' ' -f2`" else - FLAVORS="$FLAVORS `echo $line | grep -v "^\(|\s*ID\|+--\)" | grep "$DEFAULT_INSTANCE_TYPE" | cut -d' ' -f2`" + flavors="$flavors `echo $line | grep -v "^\(|\s*ID\|+--\)" | grep "$DEFAULT_INSTANCE_TYPE" | cut -d' ' -f2`" fi done IFS=" " - FLAVORS=($FLAVORS) - NUM_FLAVORS=${#FLAVORS[*]} - echo "Found $NUM_FLAVORS flavors" - if [[ $NUM_FLAVORS -eq 0 ]]; then + flavors=($flavors) + num_flavors=${#flavors[*]} + echo "Found $num_flavors flavors" + if [[ $num_flavors -eq 0 ]]; then echo "Found no valid flavors to use!" exit 1 fi - FLAVOR_REF=${FLAVORS[0]} - FLAVOR_REF_ALT=$FLAVOR_REF - if [[ $NUM_FLAVORS -gt 1 ]]; then - FLAVOR_REF_ALT=${FLAVORS[1]} + flavor_ref=${flavors[0]} + flavor_ref_alt=$flavor_ref + if [[ $num_flavors -gt 1 ]]; then + flavor_ref_alt=${flavors[1]} fi # Timeouts @@ -162,9 +167,9 @@ function configure_tempest() { iniset $TEMPEST_CONF identity port $IDENTITY_PORT iniset $TEMPEST_CONF identity path $IDENTITY_PATH - iniset $TEMPEST_CONF compute password "$PASSWORD" + iniset $TEMPEST_CONF compute password "$password" iniset $TEMPEST_CONF compute alt_username $ALT_USERNAME - iniset $TEMPEST_CONF compute alt_password "$PASSWORD" + iniset $TEMPEST_CONF compute alt_password "$password" iniset $TEMPEST_CONF compute alt_tenant_name $ALT_TENANT_NAME iniset $TEMPEST_CONF compute resize_available False iniset $TEMPEST_CONF compute change_password_available False @@ -175,10 +180,10 @@ function configure_tempest() { iniset $TEMPEST_CONF compute network_for_ssh private iniset $TEMPEST_CONF compute ip_version_for_ssh 4 iniset $TEMPEST_CONF compute ssh_timeout 4 - iniset $TEMPEST_CONF compute image_ref $IMAGE_UUID - iniset $TEMPEST_CONF compute image_ref_alt $IMAGE_UUID_ALT - iniset $TEMPEST_CONF compute flavor_ref $FLAVOR_REF - iniset $TEMPEST_CONF compute flavor_ref_alt $FLAVOR_REF_ALT + iniset $TEMPEST_CONF compute image_ref $image_uuid + iniset $TEMPEST_CONF compute image_ref_alt $image_uuid_alt + iniset $TEMPEST_CONF compute flavor_ref $flavor_ref + iniset $TEMPEST_CONF compute flavor_ref_alt $flavor_ref_alt iniset $TEMPEST_CONF compute source_dir $NOVA_SOURCE_DIR iniset $TEMPEST_CONF compute live_migration_available ${LIVE_MIGRATION_AVAILABLE:-False} iniset $TEMPEST_CONF compute use_block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} @@ -192,13 +197,13 @@ function configure_tempest() { # image iniset $TEMPEST_CONF image host ${IMAGE_HOST:-127.0.0.1} iniset $TEMPEST_CONF image port ${IMAGE_PORT:-9292} - iniset $TEMPEST_CONF image password "$PASSWORD" + iniset $TEMPEST_CONF image password "$password" # identity-admin - iniset $TEMPEST_CONF "identity-admin" password "$PASSWORD" + iniset $TEMPEST_CONF "identity-admin" password "$password" # compute admin - iniset $TEMPEST_CONF "compute-admin" password "$PASSWORD" + iniset $TEMPEST_CONF "compute-admin" password "$password" # network iniset $TEMPEST_CONF network api_version 2.0 From a9414249af522324c68e4d8fe1656283162e5738 Mon Sep 17 00:00:00 2001 From: jiajun xu Date: Thu, 6 Dec 2012 16:30:57 +0800 Subject: [PATCH 068/207] Add a service_check function There is no function to check if the services invoked by devstack are running well or not. We could use the function to check their status and print them at the end of devstack running. Change-Id: I845f6b5dddce5cffa7165ec58517f9ae5d8632a6 --- functions | 45 ++++++++++++++++++++++++++++++++++++++++++++- stack.sh | 4 ++++ 2 files changed, 48 insertions(+), 1 deletion(-) diff --git a/functions b/functions index 0911557f..85ff4202 100644 --- a/functions +++ b/functions @@ -684,6 +684,8 @@ function restart_service() { function screen_it { NL=`echo -ne '\015'` SCREEN_NAME=${SCREEN_NAME:-stack} + SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} + if is_service_enabled $1; then # Append the service to the screen rc file screen_rc "$1" "$2" @@ -699,7 +701,7 @@ function screen_it { screen -S $SCREEN_NAME -p $1 -X log on ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log fi - screen -S $SCREEN_NAME -p $1 -X stuff "$2$NL" + screen -S $SCREEN_NAME -p $1 -X stuff "$2 || touch \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL" fi } @@ -724,6 +726,47 @@ function screen_rc { fi } +# Helper to remove the *.failure files under $SERVICE_DIR/$SCREEN_NAME +# This is used for service_check when all the screen_it are called finished +# init_service_check +function init_service_check() { + SCREEN_NAME=${SCREEN_NAME:-stack} + SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} + + if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then + mkdir -p "$SERVICE_DIR/$SCREEN_NAME" + fi + + rm -f "$SERVICE_DIR/$SCREEN_NAME"/*.failure +} + +# Helper to get the status of each running service +# service_check +function service_check() { + local service + local failures + SCREEN_NAME=${SCREEN_NAME:-stack} + SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} + + + if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then + echo "No service status directory found" + return + fi + + # Check if there is any falure flag file under $SERVICE_DIR/$SCREEN_NAME + failures=`ls "$SERVICE_DIR/$SCREEN_NAME"/*.failure 2>/dev/null` + + for service in $failures; do + service=`basename $service` + service=${service::-8} + echo "Error: Service $service is not running" + done + + if [ -n "$failures" ]; then + echo "More details about the above errors can be found with screen, with ./rejoin-stack.sh" + fi +} # ``pip install`` the dependencies of the package before ``setup.py develop`` # so pip and not distutils processes the dependency chain diff --git a/stack.sh b/stack.sh index 94283563..40708afd 100755 --- a/stack.sh +++ b/stack.sh @@ -954,6 +954,8 @@ sleep 1 # Set a reasonable status bar screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS" +# Initialize the directory for service status check +init_service_check # Keystone # -------- @@ -1726,6 +1728,8 @@ if [[ -x $TOP_DIR/local.sh ]]; then $TOP_DIR/local.sh fi +# Check the status of running services +service_check # Fin # === From eb1aa3d5ed4388119fac56038b4655648bca7e76 Mon Sep 17 00:00:00 2001 From: Nachi Ueno Date: Thu, 6 Dec 2012 11:55:29 -0800 Subject: [PATCH 069/207] setup quantum-rootrwapper Add quantum-rootwrapper for /etc/sudoers.d This is needed to run quantum in CI env Change-Id: Ib59351c106f0a45bb45476edf032c97744873923 --- lib/quantum | 36 +++++++++++++++++++++++++++++++++++- stack.sh | 7 +------ 2 files changed, 36 insertions(+), 7 deletions(-) diff --git a/lib/quantum b/lib/quantum index 14a3a4ad..cb683398 100644 --- a/lib/quantum +++ b/lib/quantum @@ -15,10 +15,44 @@ if is_service_enabled quantum; then if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then Q_RR_COMMAND="sudo" else - Q_RR_COMMAND="sudo $QUANTUM_DIR/bin/quantum-rootwrap $Q_RR_CONF_FILE" + QUANTUM_ROOTWRAP=$(get_rootwrap_location quantum) + Q_RR_COMMAND="sudo $QUANTUM_ROOTWRAP $Q_RR_CONF_FILE" fi fi +# configure_quantum_rootwrap() - configure Quantum's rootwrap +function configure_quantum_rootwrap() { + if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then + return + fi + # Deploy new rootwrap filters files (owned by root). + # Wipe any existing rootwrap.d files first + Q_CONF_ROOTWRAP_D=/etc/quantum/rootwrap.d + if [[ -d $Q_CONF_ROOTWRAP_D ]]; then + sudo rm -rf $Q_CONF_ROOTWRAP_D + fi + # Deploy filters to /etc/quantum/rootwrap.d + mkdir -p -m 755 $Q_CONF_ROOTWRAP_D + cp -pr $QUANTUM_DIR/etc/quantum/rootwrap.d/* $Q_CONF_ROOTWRAP_D/ + sudo chown -R root:root $Q_CONF_ROOTWRAP_D + sudo chmod 644 $Q_CONF_ROOTWRAP_D/* + # Set up rootwrap.conf, pointing to /etc/quantum/rootwrap.d + sudo cp -p $QUANTUM_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE + sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE + sudo chown root:root $Q_RR_CONF_FILE + sudo chmod 0644 $Q_RR_CONF_FILE + # Specify rootwrap.conf as first parameter to quantum-rootwrap + ROOTWRAP_SUDOER_CMD="$QUANTUM_ROOTWRAP $Q_RR_CONF_FILE *" + + # Set up the rootwrap sudoers for quantum + TEMPFILE=`mktemp` + echo "$USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE + chmod 0440 $TEMPFILE + sudo chown root:root $TEMPFILE + sudo mv $TEMPFILE /etc/sudoers.d/quantum-rootwrap +} + + # Configures keystone integration for quantum service and agents function quantum_setup_keystone() { local conf_file=$1 diff --git a/stack.sh b/stack.sh index cddb64e0..33da2cff 100755 --- a/stack.sh +++ b/stack.sh @@ -1149,12 +1149,7 @@ if is_service_enabled quantum; then unset dburl cp $QUANTUM_DIR/etc/quantum.conf $Q_CONF_FILE - cp -p $QUANTUM_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE - - # Copy over the config and filter bits - Q_CONF_ROOTWRAP_D=/etc/quantum/rootwrap.d - mkdir -p $Q_CONF_ROOTWRAP_D - cp -pr $QUANTUM_DIR/etc/quantum/rootwrap.d/* $Q_CONF_ROOTWRAP_D/ + configure_quantum_rootwrap fi # Quantum service (for controller node) From 3c6a57a3c4a668b5f1522bd42ca79cdb05360fc2 Mon Sep 17 00:00:00 2001 From: Nachi Ueno Date: Sat, 8 Dec 2012 22:07:11 -0800 Subject: [PATCH 070/207] Setup rootwrapper for quantum-debug command Setup rootwrapper for quantum-debug command This change is needed to quantum-gating Change-Id: I032f26c0c020374ac978e00bdf72856da795096d --- stack.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/stack.sh b/stack.sh index 7b87cd67..772e1142 100755 --- a/stack.sh +++ b/stack.sh @@ -1387,6 +1387,7 @@ if is_service_enabled quantum; then iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT verbose False iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT debug False iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT root_helper "$Q_RR_COMMAND" quantum_setup_keystone $QUANTUM_TEST_CONFIG_FILE DEFAULT set_auth_url if [[ "$Q_PLUGIN" == "openvswitch" ]]; then iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver From 60e9c0ab22309d1b0b857761be16d4d58a1b251e Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 6 Dec 2012 15:52:52 -0600 Subject: [PATCH 071/207] Quantum cleanups * Set base default env vars in lib/quantum * Rename common dirs to match devstack conventions Q_CONF_FILE -> QUANTUM_CONF QUANTUM_CLIENT_* -> QUANTUMCLIENT_* Change-Id: I7a2a92b50ef953195f078ac62cb975f28892c05c --- lib/quantum | 69 +++++++++++++++++++++++++++++++++++----- stack.sh | 90 +++++++++++++++-------------------------------------- stackrc | 4 +-- 3 files changed, 89 insertions(+), 74 deletions(-) diff --git a/lib/quantum b/lib/quantum index cb683398..4e9f2987 100644 --- a/lib/quantum +++ b/lib/quantum @@ -1,17 +1,69 @@ # lib/quantum # functions - funstions specific to quantum +# Dependencies: +# ``functions`` file +# ``DEST`` must be defined + + +# Quantum Networking +# ------------------ + +# Make sure that quantum is enabled in ``ENABLED_SERVICES``. If you want +# to run Quantum on this host, make sure that q-svc is also in +# ``ENABLED_SERVICES``. +# +# If you're planning to use the Quantum openvswitch plugin, set +# ``Q_PLUGIN`` to "openvswitch" and make sure the q-agt service is enabled +# in ``ENABLED_SERVICES``. If you're planning to use the Quantum +# linuxbridge plugin, set ``Q_PLUGIN`` to "linuxbridge" and make sure the +# q-agt service is enabled in ``ENABLED_SERVICES``. +# +# See "Quantum Network Configuration" below for additional variables +# that must be set in localrc for connectivity across hosts with +# Quantum. +# +# With Quantum networking the NET_MAN variable is ignored. + + # Save trace setting XTRACE=$(set +o | grep xtrace) set +o xtrace + +# Defaults +# -------- + +# Set up default directories QUANTUM_DIR=$DEST/quantum -export QUANTUM_TEST_CONFIG_FILE=${QUANTUM_TEST_CONFIG_FILE:-"/etc/quantum/debug.ini"} +QUANTUMCLIENT_DIR=$DEST/python-quantumclient QUANTUM_AUTH_CACHE_DIR=${QUANTUM_AUTH_CACHE_DIR:-/var/cache/quantum} +QUANTUM_CONF_DIR=/etc/quantum +QUANTUM_CONF=$QUANTUM_CONF_DIR/quantum.conf +export QUANTUM_TEST_CONFIG_FILE=${QUANTUM_TEST_CONFIG_FILE:-"$QUANTUM_CONF_DIR/debug.ini"} + +# Default Quantum Plugin +Q_PLUGIN=${Q_PLUGIN:-openvswitch} +# Default Quantum Port +Q_PORT=${Q_PORT:-9696} +# Default Quantum Host +Q_HOST=${Q_HOST:-$HOST_IP} +# Which Quantum API nova should use +# Default admin username +Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-quantum} +# Default auth strategy +Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone} +# Use namespace or not +Q_USE_NAMESPACE=${Q_USE_NAMESPACE:-True} +Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True} +# Meta data IP +Q_META_DATA_IP=${Q_META_DATA_IP:-$HOST_IP} +# Use quantum-debug command +Q_USE_DEBUG_COMMAND=${Q_USE_DEBUG_COMMAND:-False} + if is_service_enabled quantum; then - Q_CONF_FILE=/etc/quantum/quantum.conf - Q_RR_CONF_FILE=/etc/quantum/rootwrap.conf + Q_RR_CONF_FILE=$QUANTUM_CONF_DIR/rootwrap.conf if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then Q_RR_COMMAND="sudo" else @@ -20,6 +72,10 @@ if is_service_enabled quantum; then fi fi + +# Entry Points +# ------------ + # configure_quantum_rootwrap() - configure Quantum's rootwrap function configure_quantum_rootwrap() { if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then @@ -27,16 +83,16 @@ function configure_quantum_rootwrap() { fi # Deploy new rootwrap filters files (owned by root). # Wipe any existing rootwrap.d files first - Q_CONF_ROOTWRAP_D=/etc/quantum/rootwrap.d + Q_CONF_ROOTWRAP_D=$QUANTUM_CONF_DIR/rootwrap.d if [[ -d $Q_CONF_ROOTWRAP_D ]]; then sudo rm -rf $Q_CONF_ROOTWRAP_D fi - # Deploy filters to /etc/quantum/rootwrap.d + # Deploy filters to $QUANTUM_CONF_DIR/rootwrap.d mkdir -p -m 755 $Q_CONF_ROOTWRAP_D cp -pr $QUANTUM_DIR/etc/quantum/rootwrap.d/* $Q_CONF_ROOTWRAP_D/ sudo chown -R root:root $Q_CONF_ROOTWRAP_D sudo chmod 644 $Q_CONF_ROOTWRAP_D/* - # Set up rootwrap.conf, pointing to /etc/quantum/rootwrap.d + # Set up rootwrap.conf, pointing to $QUANTUM_CONF_DIR/rootwrap.d sudo cp -p $QUANTUM_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE sudo chown root:root $Q_RR_CONF_FILE @@ -52,7 +108,6 @@ function configure_quantum_rootwrap() { sudo mv $TEMPFILE /etc/sudoers.d/quantum-rootwrap } - # Configures keystone integration for quantum service and agents function quantum_setup_keystone() { local conf_file=$1 diff --git a/stack.sh b/stack.sh index 48071828..d58f5f5c 100755 --- a/stack.sh +++ b/stack.sh @@ -321,26 +321,6 @@ HORIZON_DIR=$DEST/horizon OPENSTACKCLIENT_DIR=$DEST/python-openstackclient NOVNC_DIR=$DEST/noVNC SWIFT3_DIR=$DEST/swift3 -QUANTUM_CLIENT_DIR=$DEST/python-quantumclient - -# Default Quantum Plugin -Q_PLUGIN=${Q_PLUGIN:-openvswitch} -# Default Quantum Port -Q_PORT=${Q_PORT:-9696} -# Default Quantum Host -Q_HOST=${Q_HOST:-$HOST_IP} -# Which Quantum API nova should use -# Default admin username -Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-quantum} -# Default auth strategy -Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone} -# Use namespace or not -Q_USE_NAMESPACE=${Q_USE_NAMESPACE:-True} -Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True} -# Meta data IP -Q_META_DATA_IP=${Q_META_DATA_IP:-$HOST_IP} -# Use quantum-debug command -Q_USE_DEBUG_COMMAND=${Q_USE_DEBUG_COMMAND:-False} RYU_DIR=$DEST/ryu # Ryu API Host @@ -458,26 +438,6 @@ FLAT_INTERFACE=${FLAT_INTERFACE-$GUEST_INTERFACE_DEFAULT} ## FIXME(ja): should/can we check that FLAT_INTERFACE is sane? -# Quantum Networking -# ------------------ - -# Make sure that quantum is enabled in ENABLED_SERVICES. If you want -# to run Quantum on this host, make sure that q-svc is also in -# ENABLED_SERVICES. -# -# If you're planning to use the Quantum openvswitch plugin, set -# Q_PLUGIN to "openvswitch" and make sure the q-agt service is enabled -# in ENABLED_SERVICES. If you're planning to use the Quantum -# linuxbridge plugin, set Q_PLUGIN to "linuxbridge" and make sure the -# q-agt service is enabled in ENABLED_SERVICES. -# -# See "Quantum Network Configuration" below for additional variables -# that must be set in localrc for connectivity across hosts with -# Quantum. -# -# With Quantum networking the NET_MAN variable is ignored. - - # Database Configuration # ---------------------- @@ -805,7 +765,7 @@ if is_service_enabled horizon; then install_horizon fi if is_service_enabled quantum; then - git_clone $QUANTUM_CLIENT_REPO $QUANTUM_CLIENT_DIR $QUANTUM_CLIENT_BRANCH + git_clone $QUANTUMCLIENT_REPO $QUANTUMCLIENT_DIR $QUANTUMCLIENT_BRANCH fi if is_service_enabled quantum; then # quantum @@ -864,7 +824,7 @@ if is_service_enabled horizon; then configure_horizon fi if is_service_enabled quantum; then - setup_develop $QUANTUM_CLIENT_DIR + setup_develop $QUANTUMCLIENT_DIR setup_develop $QUANTUM_DIR fi if is_service_enabled heat; then @@ -1119,11 +1079,11 @@ if is_service_enabled quantum; then # Example: ``OVS_ENABLE_TUNNELING=True`` OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS} - # Put config files in ``/etc/quantum`` for everyone to find - if [[ ! -d /etc/quantum ]]; then - sudo mkdir -p /etc/quantum + # Put config files in ``QUANTUM_CONF_DIR`` for everyone to find + if [[ ! -d $QUANTUM_CONF_DIR ]]; then + sudo mkdir -p $QUANTUM_CONF_DIR fi - sudo chown `whoami` /etc/quantum + sudo chown `whoami` $QUANTUM_CONF_DIR if [[ "$Q_PLUGIN" = "openvswitch" ]]; then Q_PLUGIN_CONF_PATH=etc/quantum/plugins/openvswitch @@ -1147,7 +1107,7 @@ if is_service_enabled quantum; then exit 1 fi - # If needed, move config file from ``$QUANTUM_DIR/etc/quantum`` to ``/etc/quantum`` + # If needed, move config file from ``$QUANTUM_DIR/etc/quantum`` to ``QUANTUM_CONF_DIR`` mkdir -p /$Q_PLUGIN_CONF_PATH Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME cp $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE @@ -1156,14 +1116,14 @@ if is_service_enabled quantum; then iniset /$Q_PLUGIN_CONF_FILE DATABASE sql_connection $dburl unset dburl - cp $QUANTUM_DIR/etc/quantum.conf $Q_CONF_FILE + cp $QUANTUM_DIR/etc/quantum.conf $QUANTUM_CONF configure_quantum_rootwrap fi # Quantum service (for controller node) if is_service_enabled q-svc; then - Q_API_PASTE_FILE=/etc/quantum/api-paste.ini - Q_POLICY_FILE=/etc/quantum/policy.json + Q_API_PASTE_FILE=$QUANTUM_CONF_DIR/api-paste.ini + Q_POLICY_FILE=$QUANTUM_CONF_DIR/policy.json cp $QUANTUM_DIR/etc/api-paste.ini $Q_API_PASTE_FILE cp $QUANTUM_DIR/etc/policy.json $Q_POLICY_FILE @@ -1176,9 +1136,9 @@ if is_service_enabled q-svc; then fi # Update either configuration file with plugin - iniset $Q_CONF_FILE DEFAULT core_plugin $Q_PLUGIN_CLASS + iniset $QUANTUM_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS - iniset $Q_CONF_FILE DEFAULT auth_strategy $Q_AUTH_STRATEGY + iniset $QUANTUM_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY quantum_setup_keystone $Q_API_PASTE_FILE filter:authtoken # Configure plugin @@ -1295,7 +1255,7 @@ fi if is_service_enabled q-dhcp; then AGENT_DHCP_BINARY="$QUANTUM_DIR/bin/quantum-dhcp-agent" - Q_DHCP_CONF_FILE=/etc/quantum/dhcp_agent.ini + Q_DHCP_CONF_FILE=$QUANTUM_CONF_DIR/dhcp_agent.ini cp $QUANTUM_DIR/etc/dhcp_agent.ini $Q_DHCP_CONF_FILE @@ -1325,7 +1285,7 @@ fi if is_service_enabled q-l3; then AGENT_L3_BINARY="$QUANTUM_DIR/bin/quantum-l3-agent" PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} - Q_L3_CONF_FILE=/etc/quantum/l3_agent.ini + Q_L3_CONF_FILE=$QUANTUM_CONF_DIR/l3_agent.ini cp $QUANTUM_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE @@ -1361,7 +1321,7 @@ fi #Quantum Metadata if is_service_enabled q-meta; then AGENT_META_BINARY="$QUANTUM_DIR/bin/quantum-metadata-agent" - Q_META_CONF_FILE=/etc/quantum/metadata_agent.ini + Q_META_CONF_FILE=$QUANTUM_CONF_DIR/metadata_agent.ini cp $QUANTUM_DIR/etc/metadata_agent.ini $Q_META_CONF_FILE @@ -1381,14 +1341,14 @@ fi # Quantum RPC support - must be updated prior to starting any of the services if is_service_enabled quantum; then - iniset $Q_CONF_FILE DEFAULT control_exchange quantum + iniset $QUANTUM_CONF DEFAULT control_exchange quantum if is_service_enabled qpid ; then - iniset $Q_CONF_FILE DEFAULT rpc_backend quantum.openstack.common.rpc.impl_qpid + iniset $QUANTUM_CONF DEFAULT rpc_backend quantum.openstack.common.rpc.impl_qpid elif is_service_enabled zeromq; then - iniset $Q_CONF_FILE DEFAULT rpc_backend quantum.openstack.common.rpc.impl_zmq + iniset $QUANTUM_CONF DEFAULT rpc_backend quantum.openstack.common.rpc.impl_zmq elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then - iniset $Q_CONF_FILE DEFAULT rabbit_host $RABBIT_HOST - iniset $Q_CONF_FILE DEFAULT rabbit_password $RABBIT_PASSWORD + iniset $QUANTUM_CONF DEFAULT rabbit_host $RABBIT_HOST + iniset $QUANTUM_CONF DEFAULT rabbit_password $RABBIT_PASSWORD fi if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then cp $QUANTUM_DIR/etc/l3_agent.ini $QUANTUM_TEST_CONFIG_FILE @@ -1598,7 +1558,7 @@ fi if is_service_enabled q-svc; then echo_summary "Starting Quantum" # Start the Quantum service - screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE" + screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE" echo "Waiting for Quantum to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:9696; do sleep 1; done"; then echo "Quantum did not start" @@ -1650,10 +1610,10 @@ elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then fi # Start up the quantum agents if enabled -screen_it q-agt "python $AGENT_BINARY --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE" -screen_it q-dhcp "python $AGENT_DHCP_BINARY --config-file $Q_CONF_FILE --config-file=$Q_DHCP_CONF_FILE" -screen_it q-meta "python $AGENT_META_BINARY --config-file $Q_CONF_FILE --config-file=$Q_META_CONF_FILE" -screen_it q-l3 "python $AGENT_L3_BINARY --config-file $Q_CONF_FILE --config-file=$Q_L3_CONF_FILE" +screen_it q-agt "python $AGENT_BINARY --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE" +screen_it q-dhcp "python $AGENT_DHCP_BINARY --config-file $QUANTUM_CONF --config-file=$Q_DHCP_CONF_FILE" +screen_it q-meta "python $AGENT_META_BINARY --config-file $QUANTUM_CONF --config-file=$Q_META_CONF_FILE" +screen_it q-l3 "python $AGENT_L3_BINARY --config-file $QUANTUM_CONF --config-file=$Q_L3_CONF_FILE" if is_service_enabled nova; then echo_summary "Starting Nova" diff --git a/stackrc b/stackrc index 39d34b0b..8ac6ec59 100644 --- a/stackrc +++ b/stackrc @@ -89,8 +89,8 @@ QUANTUM_REPO=${GIT_BASE}/openstack/quantum QUANTUM_BRANCH=master # quantum client -QUANTUM_CLIENT_REPO=${GIT_BASE}/openstack/python-quantumclient -QUANTUM_CLIENT_BRANCH=master +QUANTUMCLIENT_REPO=${GIT_BASE}/openstack/python-quantumclient +QUANTUMCLIENT_BRANCH=master # Tempest test suite TEMPEST_REPO=${GIT_BASE}/openstack/tempest.git From c50a86e917a4bbc9f9f6affeaae94ff4e80b556a Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Mon, 10 Dec 2012 15:10:23 -0500 Subject: [PATCH 072/207] Don't set the dns managers Revert to previous behavior, pick up whatever is set as default in nova. This was causing an issue in a tempest run. Tempest has a test where multiple servers can be spun up with the same name this test failed. Change-Id: Ie71eda94caf38db0489d6b2385dc80808a39864d --- stack.sh | 2 -- 1 file changed, 2 deletions(-) diff --git a/stack.sh b/stack.sh index 700b520c..05f5f35d 100755 --- a/stack.sh +++ b/stack.sh @@ -1482,8 +1482,6 @@ if is_service_enabled nova; then fi elif is_service_enabled n-net; then add_nova_opt "network_manager=nova.network.manager.$NET_MAN" - add_nova_opt "instance_dns_manager=nova.network.minidns.MiniDNS" - add_nova_opt "floating_ip_dns_manager=nova.network.minidns.MiniDNS" add_nova_opt "public_interface=$PUBLIC_INTERFACE" add_nova_opt "vlan_interface=$VLAN_INTERFACE" add_nova_opt "flat_network_bridge=$FLAT_NETWORK_BRIDGE" From 77f076a56d511378eb1ba3ab1267f54a291996e9 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 10 Dec 2012 16:49:20 -0500 Subject: [PATCH 073/207] enable tempest by default turn on tempest by default, as that will provide the end users with a testing environment to use with openstack out of the box. Change-Id: I74160a25cfbc6325eea30c81df36e6acbb938bfd --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 39d34b0b..1d2cf8d9 100644 --- a/stackrc +++ b/stackrc @@ -14,7 +14,7 @@ DATABASE_TYPE=mysql # ``disable_service`` functions in ``localrc``. # For example, to enable Swift add this to ``localrc``: # enable_service swift -ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,$DATABASE_TYPE +ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,tempest,$DATABASE_TYPE # Set the default Nova APIs to enable NOVA_ENABLED_APIS=ec2,osapi_compute,metadata From 9ec34214fce505892937b1cb91c5ece60cdd7882 Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Tue, 11 Dec 2012 14:18:02 +1300 Subject: [PATCH 074/207] Fix Heat waitcondition URL configuration Change-Id: I32fb7f5ef91aebdf574a98845988b3a2a91d5550 --- lib/heat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/heat b/lib/heat index b640fbca..feaadec2 100644 --- a/lib/heat +++ b/lib/heat @@ -124,7 +124,7 @@ function configure_heat() { iniset $HEAT_ENGINE_CONF DEFAULT bind_host $HEAT_ENGINE_HOST iniset $HEAT_ENGINE_CONF DEFAULT bind_port $HEAT_ENGINE_PORT iniset $HEAT_ENGINE_CONF DEFAULT heat_metadata_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT - iniset $HEAT_ENGINE_CONF DEFAULT heat_waitcondition_server_url http://$HEAT_CFN_HOST:$HEAT_CFN_PORT/v1/waitcondition + iniset $HEAT_ENGINE_CONF DEFAULT heat_waitcondition_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1/waitcondition iniset $HEAT_ENGINE_CONF DEFAULT heat_watch_server_url http://$HEAT_API_CW_HOST:$HEAT_API_CW_PORT local dburl database_connection_url dburl heat From 053a5f8425395efb7b2b7111120fa92c6134fc0b Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Tue, 11 Dec 2012 17:08:48 +1300 Subject: [PATCH 075/207] Add the role heat_stack_user for heat Change-Id: I0c3ac92d222ff746baca817002821f109815fee9 --- files/keystone_data.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index c8e68dd6..a4f08e42 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -93,6 +93,8 @@ if [[ "$ENABLED_SERVICES" =~ "heat" ]]; then keystone user-role-add --tenant_id $SERVICE_TENANT \ --user_id $HEAT_USER \ --role_id $ADMIN_ROLE + # heat_stack_user role is for users created by Heat + keystone role-create --name heat_stack_user if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then HEAT_CFN_SERVICE=$(get_id keystone service-create \ --name=heat-cfn \ From 5e3deb678e95737e05f43832d07a37d74c4e8aca Mon Sep 17 00:00:00 2001 From: Akihiro MOTOKI Date: Tue, 11 Dec 2012 17:09:02 +0900 Subject: [PATCH 076/207] Always create signing_dir regardless of token format Fixes bug 1088801 devstack does not create signing_dir when keystone token format is UUID. If the default value of signing_dir is read-only, OpenStack services such as Quantum server failed to start due to permission denied. On the keystone client cannot know which token_format is used in keystone in advance, so signing_dir should be created regardless of the token format. Change-Id: I1b0d25c1ac4d22d9fb2c5443d15b96fdaa5a4c81 --- lib/cinder | 13 ++++--------- lib/glance | 20 +++++++------------- lib/nova | 12 ++++-------- lib/quantum | 10 ++++------ 4 files changed, 19 insertions(+), 36 deletions(-) diff --git a/lib/cinder b/lib/cinder index 9b9d50d1..0dc86cad 100644 --- a/lib/cinder +++ b/lib/cinder @@ -105,10 +105,7 @@ function configure_cinder() { iniset $CINDER_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $CINDER_API_PASTE_INI filter:authtoken admin_user cinder iniset $CINDER_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD - - if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then - iniset $CINDER_API_PASTE_INI filter:authtoken signing_dir $CINDER_AUTH_CACHE_DIR - fi + iniset $CINDER_API_PASTE_INI filter:authtoken signing_dir $CINDER_AUTH_CACHE_DIR cp $CINDER_DIR/etc/cinder/cinder.conf.sample $CINDER_CONF iniset $CINDER_CONF DEFAULT auth_strategy keystone @@ -212,11 +209,9 @@ function init_cinder() { fi fi - if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then - # Create cache dir - sudo mkdir -p $CINDER_AUTH_CACHE_DIR - sudo chown `whoami` $CINDER_AUTH_CACHE_DIR - fi + # Create cache dir + sudo mkdir -p $CINDER_AUTH_CACHE_DIR + sudo chown `whoami` $CINDER_AUTH_CACHE_DIR } # install_cinder() - Collect source and prepare diff --git a/lib/glance b/lib/glance index b02a4b63..4f631b2c 100644 --- a/lib/glance +++ b/lib/glance @@ -95,9 +95,7 @@ function configure_glance() { iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_user glance iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_password $SERVICE_PASSWORD - if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then - iniset $GLANCE_REGISTRY_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/registry - fi + iniset $GLANCE_REGISTRY_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/registry cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF iniset $GLANCE_API_CONF DEFAULT debug True @@ -121,9 +119,7 @@ function configure_glance() { iniset $GLANCE_API_CONF DEFAULT rabbit_host $RABBIT_HOST iniset $GLANCE_API_CONF DEFAULT rabbit_password $RABBIT_PASSWORD fi - if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then - iniset $GLANCE_API_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/api - fi + iniset $GLANCE_API_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/api cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI @@ -163,13 +159,11 @@ function init_glance() { $GLANCE_BIN_DIR/glance-manage db_sync - if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then - # Create cache dir - sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/api - sudo chown `whoami` $GLANCE_AUTH_CACHE_DIR/api - sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/registry - sudo chown `whoami` $GLANCE_AUTH_CACHE_DIR/registry - fi + # Create cache dir + sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/api + sudo chown `whoami` $GLANCE_AUTH_CACHE_DIR/api + sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/registry + sudo chown `whoami` $GLANCE_AUTH_CACHE_DIR/registry } # install_glanceclient() - Collect source and prepare diff --git a/lib/nova b/lib/nova index 3a4d34d8..f059576d 100644 --- a/lib/nova +++ b/lib/nova @@ -172,9 +172,7 @@ function configure_nova() { " -i $NOVA_API_PASTE_INI fi - if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then - iniset $NOVA_API_PASTE_INI filter:authtoken signing_dir $NOVA_AUTH_CACHE_DIR - fi + iniset $NOVA_API_PASTE_INI filter:authtoken signing_dir $NOVA_AUTH_CACHE_DIR if is_service_enabled n-cpu; then # Force IP forwarding on, just on case @@ -378,11 +376,9 @@ function init_nova() { $NOVA_BIN_DIR/nova-manage db sync fi - if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then - # Create cache dir - sudo mkdir -p $NOVA_AUTH_CACHE_DIR - sudo chown `whoami` $NOVA_AUTH_CACHE_DIR - fi + # Create cache dir + sudo mkdir -p $NOVA_AUTH_CACHE_DIR + sudo chown `whoami` $NOVA_AUTH_CACHE_DIR } # install_novaclient() - Collect source and prepare diff --git a/lib/quantum b/lib/quantum index cb683398..f7fe90a0 100644 --- a/lib/quantum +++ b/lib/quantum @@ -68,12 +68,10 @@ function quantum_setup_keystone() { iniset $conf_file $section admin_tenant_name $SERVICE_TENANT_NAME iniset $conf_file $section admin_user $Q_ADMIN_USERNAME iniset $conf_file $section admin_password $SERVICE_PASSWORD - if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then - iniset $conf_file $section signing_dir $QUANTUM_AUTH_CACHE_DIR - # Create cache dir - sudo mkdir -p $QUANTUM_AUTH_CACHE_DIR - sudo chown `whoami` $QUANTUM_AUTH_CACHE_DIR - fi + iniset $conf_file $section signing_dir $QUANTUM_AUTH_CACHE_DIR + # Create cache dir + sudo mkdir -p $QUANTUM_AUTH_CACHE_DIR + sudo chown `whoami` $QUANTUM_AUTH_CACHE_DIR } function quantum_setup_ovs_bridge() { From c83a7e125fc1fea0370fffed37435097346befa6 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 29 Nov 2012 11:47:58 -0600 Subject: [PATCH 077/207] Add TLS support for keystone via proxy * Adds lib/tls to create test CA/certs * Start proxy if 'tls-proxy' is enabled * Configure keystone service catalog for TLS * Tear down proxy in unstack.sh * Set auth protocol and ca-cert chain in openrc * Add DATA_DIR to stackrc This is the first in a series of patches to enable TLS support for the service API endpoints. Change-Id: Ia1c91dc8f1aaf94fbec9dc71da322559a83d14b6 --- files/apts/tls-proxy | 1 + lib/keystone | 37 ++++- lib/tls | 314 +++++++++++++++++++++++++++++++++++++++++++ openrc | 9 +- stack.sh | 17 ++- stackrc | 3 + unstack.sh | 5 + 7 files changed, 376 insertions(+), 10 deletions(-) create mode 100644 files/apts/tls-proxy create mode 100644 lib/tls diff --git a/files/apts/tls-proxy b/files/apts/tls-proxy new file mode 100644 index 00000000..dce9c07d --- /dev/null +++ b/files/apts/tls-proxy @@ -0,0 +1 @@ +stud diff --git a/lib/keystone b/lib/keystone index f6a6d667..2d21c2c3 100644 --- a/lib/keystone +++ b/lib/keystone @@ -4,7 +4,7 @@ # Dependencies: # ``functions`` file # ``BASE_SQL_CONN`` -# ``SERVICE_HOST`` +# ``SERVICE_HOST``, ``SERVICE_PROTOCOL`` # ``SERVICE_TOKEN`` # ``S3_SERVICE_PORT`` (template backend only) @@ -48,10 +48,14 @@ KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-PKI} # Set Keystone interface configuration KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST} KEYSTONE_AUTH_PORT=${KEYSTONE_AUTH_PORT:-35357} -KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-http} +KEYSTONE_AUTH_PORT_INT=${KEYSTONE_AUTH_PORT_INT:-35358} +KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-$SERVICE_PROTOCOL} + +# Public facing bits KEYSTONE_SERVICE_HOST=${KEYSTONE_SERVICE_HOST:-$SERVICE_HOST} KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000} -KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-http} +KEYSTONE_SERVICE_PORT_INT=${KEYSTONE_SERVICE_PORT_INT:-5001} +KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} # Entry Points @@ -88,6 +92,13 @@ function configure_keystone() { # Rewrite stock ``keystone.conf`` local dburl database_connection_url dburl keystone + + if is_service_enabled tls-proxy; then + # Set the service ports for a proxy to take the originals + iniset $KEYSTONE_CONF DEFAULT public_port $KEYSTONE_SERVICE_PORT_INT + iniset $KEYSTONE_CONF DEFAULT admin_port $KEYSTONE_AUTH_PORT_INT + fi + iniset $KEYSTONE_CONF DEFAULT admin_token "$SERVICE_TOKEN" iniset $KEYSTONE_CONF signing token_format "$KEYSTONE_TOKEN_FORMAT" iniset $KEYSTONE_CONF sql connection $dburl @@ -213,9 +224,9 @@ create_keystone_accounts() { keystone endpoint-create \ --region RegionOne \ --service_id $KEYSTONE_SERVICE \ - --publicurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:\$(public_port)s/v2.0" \ - --adminurl "$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:\$(admin_port)s/v2.0" \ - --internalurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:\$(public_port)s/v2.0" + --publicurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0" \ + --adminurl "$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0" \ + --internalurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0" fi # TODO(dtroyer): This is part of a series of changes...remove these when @@ -268,13 +279,25 @@ function install_keystone() { # start_keystone() - Start running processes, including screen function start_keystone() { + # Get right service port for testing + local service_port=$KEYSTONE_SERVICE_PORT + if is_service_enabled tls-proxy; then + service_port=$KEYSTONE_SERVICE_PORT_INT + fi + # Start Keystone in a screen window screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug" echo "Waiting for keystone to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= curl -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ >/dev/null; do sleep 1; done"; then + if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= curl -s http://$SERVICE_HOST:$service_port/v2.0/ >/dev/null; do sleep 1; done"; then echo "keystone did not start" exit 1 fi + + # Start proxies if enabled + if is_service_enabled tls-proxy; then + start_tls_proxy '*' $KEYSTONE_SERVICE_PORT $KEYSTONE_SERVICE_HOST $KEYSTONE_SERVICE_PORT_INT & + start_tls_proxy '*' $KEYSTONE_AUTH_PORT $KEYSTONE_AUTH_HOST $KEYSTONE_AUTH_PORT_INT & + fi } # stop_keystone() - Stop running processes diff --git a/lib/tls b/lib/tls new file mode 100644 index 00000000..1e2a8993 --- /dev/null +++ b/lib/tls @@ -0,0 +1,314 @@ +# lib/tls +# Functions to control the configuration and operation of the TLS proxy service + +# Dependencies: +# !! source _before_ any services that use ``SERVICE_HOST`` +# ``functions`` file +# ``DEST``, ``DATA_DIR`` must be defined +# ``HOST_IP``, ``SERVICE_HOST`` +# ``KEYSTONE_TOKEN_FORMAT`` must be defined + +# Entry points: +# configure_CA +# init_CA + +# configure_proxy +# start_tls_proxy + +# make_root_ca +# make_int_ca +# new_cert $INT_CA_DIR int-server "abc" +# start_tls_proxy HOST_IP 5000 localhost 5000 + + +if is_service_enabled tls-proxy; then + # TODO(dtroyer): revisit this below after the search for HOST_IP has been done + TLS_IP=${TLS_IP:-$SERVICE_IP} + + # Set the default ``SERVICE_PROTOCOL`` for TLS + SERVICE_PROTOCOL=https +fi + +# Make up a hostname for cert purposes +# will be added to /etc/hosts? +DEVSTACK_HOSTNAME=secure.devstack.org +DEVSTACK_CERT_NAME=devstack-cert +DEVSTACK_CERT=$DATA_DIR/$DEVSTACK_CERT_NAME.pem + +# CA configuration +ROOT_CA_DIR=${ROOT_CA_DIR:-$DATA_DIR/CA/root-ca} +INT_CA_DIR=${INT_CA_DIR:-$DATA_DIR/CA/int-ca} + +ORG_NAME="OpenStack" +ORG_UNIT_NAME="DevStack" + +# Stud configuration +STUD_PROTO="--tls" +STUD_CIPHERS='TLSv1+HIGH:!DES:!aNULL:!eNULL:@STRENGTH' + + +# CA Functions +# ============ + +# There may be more than one, get specific +OPENSSL=${OPENSSL:-/usr/bin/openssl} + +# Do primary CA configuration +function configure_CA() { + # build common config file + + # Verify ``TLS_IP`` is good + if [[ -n "$HOST_IP" && "$HOST_IP" != "$TLS_IP" ]]; then + # auto-discover has changed the IP + TLS_IP=$HOST_IP + fi +} + +# Creates a new CA directory structure +# create_CA_base ca-dir +function create_CA_base() { + local ca_dir=$1 + + if [[ -d $ca_dir ]]; then + # Bail out it exists + return 0 + fi + + for i in certs crl newcerts private; do + mkdir -p $ca_dir/$i + done + chmod 710 $ca_dir/private + echo "01" >$ca_dir/serial + cp /dev/null $ca_dir/index.txt +} + + +# Create a new CA configuration file +# create_CA_config ca-dir common-name +function create_CA_config() { + local ca_dir=$1 + local common_name=$2 + + echo " +[ ca ] +default_ca = CA_default + +[ CA_default ] +dir = $ca_dir +policy = policy_match +database = \$dir/index.txt +serial = \$dir/serial +certs = \$dir/certs +crl_dir = \$dir/crl +new_certs_dir = \$dir/newcerts +certificate = \$dir/cacert.pem +private_key = \$dir/private/cacert.key +RANDFILE = \$dir/private/.rand +default_md = default + +[ req ] +default_bits = 1024 +default_md = sha1 + +prompt = no +distinguished_name = ca_distinguished_name + +x509_extensions = ca_extensions + +[ ca_distinguished_name ] +organizationName = $ORG_NAME +organizationalUnitName = $ORG_UNIT_NAME Certificate Authority +commonName = $common_name + +[ policy_match ] +countryName = optional +stateOrProvinceName = optional +organizationName = match +organizationalUnitName = optional +commonName = supplied + +[ ca_extensions ] +basicConstraints = critical,CA:true +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always, issuer +keyUsage = cRLSign, keyCertSign + +" >$ca_dir/ca.conf +} + +# Create a new signing configuration file +# create_signing_config ca-dir +function create_signing_config() { + local ca_dir=$1 + + echo " +[ ca ] +default_ca = CA_default + +[ CA_default ] +dir = $ca_dir +policy = policy_match +database = \$dir/index.txt +serial = \$dir/serial +certs = \$dir/certs +crl_dir = \$dir/crl +new_certs_dir = \$dir/newcerts +certificate = \$dir/cacert.pem +private_key = \$dir/private/cacert.key +RANDFILE = \$dir/private/.rand +default_md = default + +[ req ] +default_bits = 1024 +default_md = sha1 + +prompt = no +distinguished_name = req_distinguished_name + +x509_extensions = req_extensions + +[ req_distinguished_name ] +organizationName = $ORG_NAME +organizationalUnitName = $ORG_UNIT_NAME Server Farm + +[ policy_match ] +countryName = optional +stateOrProvinceName = optional +organizationName = match +organizationalUnitName = optional +commonName = supplied + +[ req_extensions ] +basicConstraints = CA:false +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always, issuer +keyUsage = digitalSignature, keyEncipherment, keyAgreement +extendedKeyUsage = serverAuth, clientAuth +subjectAltName = \$ENV::SUBJECT_ALT_NAME + +" >$ca_dir/signing.conf +} + +# Create root and intermediate CAs and an initial server cert +# init_CA +function init_CA { + # Ensure CAs are built + make_root_CA $ROOT_CA_DIR + make_int_CA $INT_CA_DIR $ROOT_CA_DIR + + # Create the CA bundle + cat $ROOT_CA_DIR/cacert.pem $INT_CA_DIR/cacert.pem >>$INT_CA_DIR/ca-chain.pem + + if [[ ! -r $DEVSTACK_CERT ]]; then + if [[ -n "$TLS_IP" ]]; then + # Lie to let incomplete match routines work + TLS_IP="DNS:$TLS_IP" + fi + make_cert $INT_CA_DIR $DEVSTACK_CERT_NAME $DEVSTACK_HOSTNAME "$TLS_IP" + + # Create a cert bundle + cat $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/cacert.pem >$DEVSTACK_CERT + fi +} + + +# make_cert creates and signs a new certificate with the given commonName and CA +# make_cert ca-dir cert-name "common-name" ["alt-name" ...] +function make_cert() { + local ca_dir=$1 + local cert_name=$2 + local common_name=$3 + local alt_names=$4 + + # Generate a signing request + $OPENSSL req \ + -sha1 \ + -newkey rsa \ + -nodes \ + -keyout $ca_dir/private/$cert_name.key \ + -out $ca_dir/$cert_name.csr \ + -subj "/O=${ORG_NAME}/OU=${ORG_UNIT_NAME} Servers/CN=${common_name}" + + if [[ -z "$alt_names" ]]; then + alt_names="DNS:${common_name}" + else + alt_names="DNS:${common_name},${alt_names}" + fi + + # Sign the request valid for 1 year + SUBJECT_ALT_NAME="$alt_names" \ + $OPENSSL ca -config $ca_dir/signing.conf \ + -extensions req_extensions \ + -days 365 \ + -notext \ + -in $ca_dir/$cert_name.csr \ + -out $ca_dir/$cert_name.crt \ + -subj "/O=${ORG_NAME}/OU=${ORG_UNIT_NAME} Servers/CN=${common_name}" \ + -batch +} + + +# Make an intermediate CA to sign everything else +# make_int_CA ca-dir signing-ca-dir +function make_int_CA() { + local ca_dir=$1 + local signing_ca_dir=$2 + + # Create the root CA + create_CA_base $ca_dir + create_CA_config $ca_dir 'Intermediate CA' + create_signing_config $ca_dir + + # Create a signing certificate request + $OPENSSL req -config $ca_dir/ca.conf \ + -sha1 \ + -newkey rsa \ + -nodes \ + -keyout $ca_dir/private/cacert.key \ + -out $ca_dir/cacert.csr \ + -outform PEM + + # Sign the intermediate request valid for 1 year + $OPENSSL ca -config $signing_ca_dir/ca.conf \ + -extensions ca_extensions \ + -days 365 \ + -notext \ + -in $ca_dir/cacert.csr \ + -out $ca_dir/cacert.pem \ + -batch +} + +# Make a root CA to sign other CAs +# make_root_CA ca-dir +function make_root_CA() { + local ca_dir=$1 + + # Create the root CA + create_CA_base $ca_dir + create_CA_config $ca_dir 'Root CA' + + # Create a self-signed certificate valid for 5 years + $OPENSSL req -config $ca_dir/ca.conf \ + -x509 \ + -nodes \ + -newkey rsa \ + -days 21360 \ + -keyout $ca_dir/private/cacert.key \ + -out $ca_dir/cacert.pem \ + -outform PEM +} + + +# Proxy Functions +# =============== + +# Starts the TLS proxy for the given IP/ports +# start_tls_proxy front-host front-port back-host back-port +function start_tls_proxy() { + local f_host=$1 + local f_port=$2 + local b_host=$3 + local b_port=$4 + + stud $STUD_PROTO -f $f_host,$f_port -b $b_host,$b_port $DEVSTACK_CERT 2>/dev/null +} diff --git a/openrc b/openrc index 08ef98be..2553b4aa 100644 --- a/openrc +++ b/openrc @@ -26,6 +26,9 @@ source $RC_DIR/functions # Load local configuration source $RC_DIR/stackrc +# Get some necessary configuration +source $RC_DIR/lib/tls + # The introduction of Keystone to the OpenStack ecosystem has standardized the # term **tenant** as the entity that owns resources. In some places references # still exist to the original Nova term **project** for this use. Also, @@ -49,6 +52,7 @@ export OS_NO_CACHE=${OS_NO_CACHE:-1} # which is convenient for some localrc configurations. HOST_IP=${HOST_IP:-127.0.0.1} SERVICE_HOST=${SERVICE_HOST:-$HOST_IP} +SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http} # Some exercises call glance directly. On a single-node installation, Glance # should be listening on HOST_IP. If its running elsewhere, it can be set here @@ -61,7 +65,10 @@ GLANCE_HOST=${GLANCE_HOST:-$HOST_IP} # # *NOTE*: Using the 2.0 *identity api* does not mean that compute api is 2.0. We # will use the 1.1 *compute api* -export OS_AUTH_URL=http://$SERVICE_HOST:5000/v2.0 +export OS_AUTH_URL=$SERVICE_PROTOCOL://$SERVICE_HOST:5000/v2.0 + +# Set the pointer to our CA certificate chain. Harmless if TLS is not used. +export OS_CACERT=$INT_CA_DIR/ca-chain.pem # Currently novaclient needs you to specify the *compute api* version. This # needs to match the config of your catalog returned by Keystone. diff --git a/stack.sh b/stack.sh index b8e59bc0..69c983cc 100755 --- a/stack.sh +++ b/stack.sh @@ -288,6 +288,7 @@ fi # Allow the use of an alternate hostname (such as localhost/127.0.0.1) for service endpoints. SERVICE_HOST=${SERVICE_HOST:-$HOST_IP} +SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http} # Configure services to use syslog instead of writing to individual log files SYSLOG=`trueorfalse False $SYSLOG` @@ -305,6 +306,7 @@ SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} # ================== # Get project function libraries +source $TOP_DIR/lib/tls source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone source $TOP_DIR/lib/glance @@ -847,6 +849,12 @@ if [[ $TRACK_DEPENDS = True ]] ; then exit 0 fi +if is_service_enabled tls-proxy; then + configure_CA + init_CA + # Add name to /etc/hosts + # don't be naive and add to existing line! +fi # Syslog # ------ @@ -923,12 +931,17 @@ screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS" if is_service_enabled key; then echo_summary "Starting Keystone" - configure_keystone init_keystone start_keystone # Set up a temporary admin URI for Keystone - SERVICE_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0 + SERVICE_ENDPOINT=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0 + + if is_service_enabled tls-proxy; then + export OS_CACERT=$INT_CA_DIR/ca-chain.pem + # Until the client support is fixed, just use the internal endpoint + SERVICE_ENDPOINT=http://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT_INT/v2.0 + fi # Do the keystone-specific bits from keystone_data.sh export OS_SERVICE_TOKEN=$SERVICE_TOKEN diff --git a/stackrc b/stackrc index 8ac6ec59..41627808 100644 --- a/stackrc +++ b/stackrc @@ -6,6 +6,9 @@ RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd) # Destination path for installation DEST=/opt/stack +# Destination for working data +DATA_DIR=${DEST}/data + # Select the default database DATABASE_TYPE=mysql diff --git a/unstack.sh b/unstack.sh index 34195c21..09e0de6b 100755 --- a/unstack.sh +++ b/unstack.sh @@ -62,6 +62,11 @@ if is_service_enabled horizon; then stop_horizon fi +# Kill TLS proxies +if is_service_enabled tls-proxy; then + killall stud +fi + SCSI_PERSIST_DIR=$CINDER_STATE_PATH/volumes/* # Get the iSCSI volumes From a0dce264d93909af3052e1fa59210032bb9a994d Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 11 Dec 2012 16:52:37 -0600 Subject: [PATCH 078/207] Move Nova account creation out of keystone_data.sh Supports the coming HA/proxy configuration for Nova Change-Id: I2baf1f51486537a1489f1376d38f5710bd96c314 --- files/keystone_data.sh | 26 ++------------------------ lib/nova | 40 ++++++++++++++++++++++++++++++++++++++++ stack.sh | 1 + 3 files changed, 43 insertions(+), 24 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index c8e68dd6..32d4e1a0 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -5,7 +5,6 @@ # Tenant User Roles # ------------------------------------------------------------------ # service glance admin -# service nova admin, [ResellerAdmin (swift only)] # service quantum admin # if enabled # service swift admin # if enabled # service cinder admin # if enabled @@ -53,29 +52,8 @@ RESELLER_ROLE=$(get_id keystone role-create --name=ResellerAdmin) # Services # -------- -# Nova -if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then - NOVA_USER=$(get_id keystone user-create \ - --name=nova \ - --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=nova@example.com) - keystone user-role-add \ - --tenant_id $SERVICE_TENANT \ - --user_id $NOVA_USER \ - --role_id $ADMIN_ROLE - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - NOVA_SERVICE=$(get_id keystone service-create \ - --name=nova \ - --type=compute \ - --description="Nova Compute Service") - keystone endpoint-create \ - --region RegionOne \ - --service_id $NOVA_SERVICE \ - --publicurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s" \ - --adminurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s" \ - --internalurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s" - fi +if [[ "$ENABLED_SERVICES" =~ "n-api" ]] && [[ "$ENABLED_SERVICES" =~ "swift" ]]; then + NOVA_USER=$(keystone user-list | awk "/ nova / { print \$2 }") # Nova needs ResellerAdmin role to download images when accessing # swift through the s3 api. keystone user-role-add \ diff --git a/lib/nova b/lib/nova index 3a4d34d8..095c65ef 100644 --- a/lib/nova +++ b/lib/nova @@ -277,6 +277,46 @@ EOF' fi } +# create_nova_accounts() - Set up common required nova accounts + +# Tenant User Roles +# ------------------------------------------------------------------ +# service nova admin, [ResellerAdmin (swift only)] + +# Migrated from keystone_data.sh +create_nova_accounts() { + + SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + + # Nova + if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then + NOVA_USER=$(keystone user-create \ + --name=nova \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=nova@example.com \ + | grep " id " | get_field 2) + keystone user-role-add \ + --tenant_id $SERVICE_TENANT \ + --user_id $NOVA_USER \ + --role_id $ADMIN_ROLE + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + NOVA_SERVICE=$(keystone service-create \ + --name=nova \ + --type=compute \ + --description="Nova Compute Service" \ + | grep " id " | get_field 2) + keystone endpoint-create \ + --region RegionOne \ + --service_id $NOVA_SERVICE \ + --publicurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s" \ + --adminurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s" \ + --internalurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s" + fi + fi +} + # create_nova_conf() - Create a new nova.conf file function create_nova_conf() { # Remove legacy ``nova.conf`` diff --git a/stack.sh b/stack.sh index b8e59bc0..99ed91f8 100755 --- a/stack.sh +++ b/stack.sh @@ -934,6 +934,7 @@ if is_service_enabled key; then export OS_SERVICE_TOKEN=$SERVICE_TOKEN export OS_SERVICE_ENDPOINT=$SERVICE_ENDPOINT create_keystone_accounts + create_nova_accounts # ``keystone_data.sh`` creates services, admin and demo users, and roles. ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \ From 5c1bedd1edcd04c749721d55710c629bc3d91d12 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Wed, 12 Dec 2012 12:03:19 +0000 Subject: [PATCH 079/207] Update aggregates test for aggregates bp Instead of implementing availability zones in the service table, availability zones will be implemented using general aggregate metadata. So when an aggregate is created it will already have metadata. Part of blueprint aggregate-based-availability-zones Change-Id: I0fd22399b99a14087fef63fc91d0baef746efbed --- exercises/aggregates.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh index adc3393b..deb1a038 100755 --- a/exercises/aggregates.sh +++ b/exercises/aggregates.sh @@ -99,8 +99,8 @@ META_DATA_1_KEY=asdf META_DATA_2_KEY=foo META_DATA_3_KEY=bar -#ensure no metadata is set -nova aggregate-details $AGGREGATE_ID | grep {} +#ensure no additional metadata is set +nova aggregate-details $AGGREGATE_ID | egrep "{u'availability_zone': u'$AGGREGATE_A_ZONE'}|{}" nova aggregate-set-metadata $AGGREGATE_ID ${META_DATA_1_KEY}=123 nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY @@ -117,7 +117,7 @@ nova aggregate-details $AGGREGATE_ID | grep $META_DATA_3_KEY nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY && die "ERROR metadata was not cleared" nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_3_KEY $META_DATA_1_KEY -nova aggregate-details $AGGREGATE_ID | grep {} +nova aggregate-details $AGGREGATE_ID | egrep "{u'availability_zone': u'$AGGREGATE_A_ZONE'}|{}" # Test aggregate-add/remove-host From f2a18c065e2447083e874eeae59bf6988491ae3f Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Tue, 4 Dec 2012 18:34:25 +0100 Subject: [PATCH 080/207] Two small fixes for openSUSE support - Use right package files for openSUSE in tools/info.sh - Use a2enmod to enable the wsgi apache module Change-Id: I51e3019be32dc0938674c9c8d285a55f5b023707 --- lib/horizon | 7 +++---- tools/info.sh | 2 ++ 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/lib/horizon b/lib/horizon index 68337ab8..5d479d5d 100644 --- a/lib/horizon +++ b/lib/horizon @@ -79,7 +79,7 @@ function init_horizon() { # Be a good citizen and use the distro tools here sudo touch /etc/$APACHE_NAME/$APACHE_CONF sudo a2ensite horizon - # WSGI doesn't enable by default, enable it + # WSGI isn't enabled by default, enable it sudo a2enmod wsgi elif is_fedora; then APACHE_NAME=httpd @@ -88,9 +88,8 @@ function init_horizon() { elif is_suse; then APACHE_NAME=apache2 APACHE_CONF=vhosts.d/horizon.conf - # Append wsgi to the list of modules to load - grep -q "^APACHE_MODULES=.*wsgi" /etc/sysconfig/apache2 || - sudo sed '/^APACHE_MODULES=/s/^\(.*\)"$/\1 wsgi"/' -i /etc/sysconfig/apache2 + # WSGI isn't enabled by default, enable it + sudo a2enmod wsgi else exit_distro_not_supported "apache configuration" fi diff --git a/tools/info.sh b/tools/info.sh index f01dbea0..ef1f3380 100755 --- a/tools/info.sh +++ b/tools/info.sh @@ -92,6 +92,8 @@ if is_ubuntu; then PKG_DIR=$FILES/apts elif is_fedora; then PKG_DIR=$FILES/rpms +elif is_suse; then + PKG_DIR=$FILES/rpms-suse else exit_distro_not_supported "list of packages" fi From b1b04d066d56162013fe08fd893e51060365653e Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Thu, 6 Dec 2012 11:59:29 +0100 Subject: [PATCH 081/207] Fix postgresql setup on openSUSE The initdb call is part of starting the service for the first time, so we need to do that. Also, restart postgresql after sed'ing its configuration files: if it was already running for some reason, it needs to be restarted, not started. Change-Id: Ib7d3ff5217d06a7764a62a36084090514a1825ea --- lib/databases/postgresql | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/lib/databases/postgresql b/lib/databases/postgresql index 20ade857..e1463c5a 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -20,14 +20,21 @@ function recreate_database_postgresql { function configure_database_postgresql { echo_summary "Configuring and starting PostgreSQL" - if is_fedora || is_suse; then + if is_fedora; then PG_HBA=/var/lib/pgsql/data/pg_hba.conf PG_CONF=/var/lib/pgsql/data/postgresql.conf sudo [ -e $PG_HBA ] || sudo postgresql-setup initdb - else + elif is_ubuntu; then PG_DIR=`find /etc/postgresql -name pg_hba.conf|xargs dirname` PG_HBA=$PG_DIR/pg_hba.conf PG_CONF=$PG_DIR/postgresql.conf + elif is_suse; then + PG_HBA=/var/lib/pgsql/data/pg_hba.conf + PG_CONF=/var/lib/pgsql/data/postgresql.conf + # initdb is called when postgresql is first started + sudo [ -e $PG_HBA ] || start_service postgresql + else + exit_distro_not_supported "postgresql configuration" fi # Listen on all addresses sudo sed -i "/listen_addresses/s/.*/listen_addresses = '*'/" $PG_CONF @@ -35,7 +42,7 @@ function configure_database_postgresql { sudo sed -i "/^host/s/all\s\+127.0.0.1\/32\s\+ident/$DATABASE_USER\t0.0.0.0\/0\tpassword/" $PG_HBA # Do password auth for all IPv6 clients sudo sed -i "/^host/s/all\s\+::1\/128\s\+ident/$DATABASE_USER\t::0\/0\tpassword/" $PG_HBA - start_service postgresql + restart_service postgresql # If creating the role fails, chances are it already existed. Try to alter it. sudo -u root sudo -u postgres -i psql -c "CREATE ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" || \ From f1c094cbcd7917593a2f92b82a5d29931a5698a7 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Wed, 5 Dec 2012 17:59:04 +0100 Subject: [PATCH 082/207] Fix polkit configuration to allow usage of libvirt on openSUSE There is a buggy limitation with pkla files on openSUSE, that blocks using 'unix-group:libvirtd' from working. A pkla with such a matching identity will be overruled by the pkla generated by polkit-default-privs containing 'unix-group:*' (which will match the other groups the user belongs to, likely after matching libvirtd). To work around this, explicitly allow the user instead. Also, move the creation of the libvirtd group a bit later, to clarify the code. Change-Id: Ia3e4ae982accfc247a744eaa6d6aa4935e4f404c --- lib/nova | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/lib/nova b/lib/nova index 86db5611..9530df46 100644 --- a/lib/nova +++ b/lib/nova @@ -231,10 +231,13 @@ EOF if is_ubuntu; then LIBVIRT_DAEMON=libvirt-bin else - # http://wiki.libvirt.org/page/SSHPolicyKitSetup - if ! getent group libvirtd >/dev/null; then - sudo groupadd libvirtd - fi + LIBVIRT_DAEMON=libvirtd + fi + + # For distributions using polkit to authorize access to libvirt, + # configure polkit accordingly. + # Based on http://wiki.libvirt.org/page/SSHPolicyKitSetup + if is_fedora; then sudo bash -c 'cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla [libvirt Management Access] Identity=unix-group:libvirtd @@ -243,11 +246,24 @@ ResultAny=yes ResultInactive=yes ResultActive=yes EOF' - LIBVIRT_DAEMON=libvirtd + elif is_suse; then + # Work around the fact that polkit-default-privs overrules pklas + # with 'unix-group:$group'. + sudo bash -c "cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla +[libvirt Management Access] +Identity=unix-user:$USER +Action=org.libvirt.unix.manage +ResultAny=yes +ResultInactive=yes +ResultActive=yes +EOF" fi # The user that nova runs as needs to be member of **libvirtd** group otherwise # nova-compute will be unable to use libvirt. + if ! getent group libvirtd >/dev/null; then + sudo groupadd libvirtd + fi add_user_to_group `whoami` libvirtd # libvirt detects various settings on startup, as we potentially changed From 1bd2a1b18601fa4f896160c348f2f5988d8b0106 Mon Sep 17 00:00:00 2001 From: Clint Byrum Date: Wed, 12 Dec 2012 12:57:16 -0800 Subject: [PATCH 083/207] Fixes lib/heat to use DATABASE_PASSWORD instead of MYSQL_PASSWORD The latter is not used anywhere else, but the former is used in all of the other projects sharing the mysql database for devstack. Change-Id: I2a0cd7e5b908eb144468caf410a6c41e0e5e3fd0 --- lib/heat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/heat b/lib/heat index feaadec2..43115cb8 100644 --- a/lib/heat +++ b/lib/heat @@ -175,7 +175,7 @@ function init_heat() { # (re)create heat database recreate_database heat utf8 - $HEAT_DIR/bin/heat-db-setup $os_PACKAGE -r $MYSQL_PASSWORD + $HEAT_DIR/bin/heat-db-setup $os_PACKAGE -r $DATABASE_PASSWORD $HEAT_DIR/tools/nova_create_flavors.sh } From 33cb43034e13ceb9b55d26ac95e28eeaf47a9cec Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 10 Dec 2012 16:47:36 -0600 Subject: [PATCH 084/207] Write selected env variables to .stackenv There are some environment variables that are derived in stack.sh and cubersome to re-create later, so save them at the end of stack.sh for use by other supporting scripts, such as openrc. Change-Id: I1bbf717b970f8ceac0ff7da74aeaf19474997e07 --- openrc | 5 +++++ stack.sh | 15 ++++++++++++++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/openrc b/openrc index 2553b4aa..3ef44fd1 100644 --- a/openrc +++ b/openrc @@ -26,6 +26,11 @@ source $RC_DIR/functions # Load local configuration source $RC_DIR/stackrc +# Load the last env variables if available +if [[ -r $TOP_DIR/.stackenv ]]; then + source $TOP_DIR/.stackenv +fi + # Get some necessary configuration source $RC_DIR/lib/tls diff --git a/stack.sh b/stack.sh index f2fd68cc..5002f8b9 100755 --- a/stack.sh +++ b/stack.sh @@ -90,6 +90,11 @@ DEST=${DEST:-/opt/stack} # Sanity Check # ============ +# Clean up last environment var cache +if [[ -r $TOP_DIR/.stackenv ]]; then + rm $TOP_DIR/.stackenv +fi + # Import database configuration source $TOP_DIR/lib/database @@ -537,9 +542,9 @@ function echo_nolog() { # Set ``LOGFILE`` to turn on logging # Append '.xxxxxxxx' to the given name to maintain history # where 'xxxxxxxx' is a representation of the date the file was created +TIMESTAMP_FORMAT=${TIMESTAMP_FORMAT:-"%F-%H%M%S"} if [[ -n "$LOGFILE" || -n "$SCREEN_LOGDIR" ]]; then LOGDAYS=${LOGDAYS:-7} - TIMESTAMP_FORMAT=${TIMESTAMP_FORMAT:-"%F-%H%M%S"} CURRENT_LOG_TIME=$(date "+$TIMESTAMP_FORMAT") fi @@ -1705,6 +1710,14 @@ if is_service_enabled tempest; then echo '**************************************************' fi +# Save some values we generated for later use +CURRENT_RUN_TIME=$(date "+$TIMESTAMP_FORMAT") +echo "# $CURRENT_RUN_TIME" >$TOP_DIR/.stackenv +for i in BASE_SQL_CONN ENABLED_SERVICES HOST_IP LOGFILE \ + SERVICE_HOST SERVICE_PROTOCOL TLS_IP; do + echo $i=${!i} >>$TOP_DIR/.stackenv +done + # Run local script # ================ From 90e10888547dd1b256e851c6e5fd2488ddda8b5b Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Thu, 13 Dec 2012 08:47:06 +0100 Subject: [PATCH 085/207] Always chown $KEYSTONE_CONF_DIR If the directory exists but is owned by another user, then this will cause failures. Note that we already do this for other components (glance, for instance). Change-Id: Ic7d2a2dd179f721636afc9ea9c3fe6bb314c9b33 --- lib/keystone | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index 2d21c2c3..2185f6fb 100644 --- a/lib/keystone +++ b/lib/keystone @@ -81,8 +81,8 @@ function configure_keystone() { if [[ ! -d $KEYSTONE_CONF_DIR ]]; then sudo mkdir -p $KEYSTONE_CONF_DIR - sudo chown `whoami` $KEYSTONE_CONF_DIR fi + sudo chown `whoami` $KEYSTONE_CONF_DIR if [[ "$KEYSTONE_CONF_DIR" != "$KEYSTONE_DIR/etc" ]]; then cp -p $KEYSTONE_DIR/etc/keystone.conf.sample $KEYSTONE_CONF From 7e86dbe16695808f8206b26d73c1dcfddd173d13 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Thu, 13 Dec 2012 08:50:37 +0100 Subject: [PATCH 086/207] Do not use sudo when sed'ing $KEYSTONE_CATALOG We already edited this file earlier without sudo. Change-Id: I366053edd1a2ad729cfd983ea7491c6252cad905 --- lib/keystone | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index 2d21c2c3..3add0359 100644 --- a/lib/keystone +++ b/lib/keystone @@ -133,7 +133,7 @@ function configure_keystone() { echo "catalog.RegionOne.network.name = Quantum Service" >> $KEYSTONE_CATALOG fi - sudo sed -e " + sed -e " s,%SERVICE_HOST%,$SERVICE_HOST,g; s,%S3_SERVICE_PORT%,$S3_SERVICE_PORT,g; " -i $KEYSTONE_CATALOG From 90dd96d4785bf12d66199c3fc8e2ea6a83090602 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Thu, 13 Dec 2012 08:59:57 +0100 Subject: [PATCH 087/207] Use the right service name for tgt/tgtd in stop_cinder Change-Id: I58cf8cdf88a3edebed729f4460e8ce222db3664d --- lib/cinder | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/cinder b/lib/cinder index 16cbaf36..586cfcbb 100644 --- a/lib/cinder +++ b/lib/cinder @@ -265,7 +265,11 @@ function stop_cinder() { done if is_service_enabled c-vol; then - stop_service tgt + if is_ubuntu; then + stop_service tgt + else + stop_service tgtd + fi fi } From b0d8a8288be6ad23114563c4bf62338c79766501 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 13 Dec 2012 16:08:48 +0000 Subject: [PATCH 088/207] make volume size parametric in boot_from_volume Fixes bug #1090007 Change-Id: Ifa13b0b7b62be75805db2730cb7154406f0c1b94 --- exerciserc | 4 ++++ exercises/boot_from_volume.sh | 2 +- exercises/volumes.sh | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/exerciserc b/exerciserc index 82c74b7f..c26ec2ce 100644 --- a/exerciserc +++ b/exerciserc @@ -26,3 +26,7 @@ export VOLUME_TIMEOUT=${VOLUME_TIMEOUT:-30} # Max time to wait for a euca-delete command to propogate export VOLUME_DELETE_TIMEOUT=${SNAPSHOT_DELETE_TIMEOUT:-60} + +# The size of the volume we want to boot from; some storage back-ends +# do not allow a disk resize, so it's important that this can be tuned +export DEFAULT_VOLUME_SIZE=${DEFAULT_VOLUME_SIZE:-1} diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index 5ebdecc7..5ada2370 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -117,7 +117,7 @@ if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep -q $ fi # Create the bootable volume -cinder create --display_name=$VOL_NAME --image-id $IMAGE 1 +cinder create --display_name=$VOL_NAME --image-id $IMAGE $DEFAULT_VOLUME_SIZE # Wait for volume to activate if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 42f9cb4e..48a976ed 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -142,7 +142,7 @@ if [[ -n "`cinder list | grep $VOL_NAME | head -1 | get_field 2`" ]]; then fi # Create a new volume -cinder create --display_name $VOL_NAME --display_description "test volume: $VOL_NAME" 1 +cinder create --display_name $VOL_NAME --display_description "test volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE if [[ $? != 0 ]]; then echo "Failure creating volume $VOL_NAME" exit 1 From 671c16e63aad003e12151bc94ee2a82365141507 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 13 Dec 2012 16:22:38 -0600 Subject: [PATCH 089/207] Move cinder account creation out of keystone_data.sh Supports the coming HA/proxy configuration for Cinder Change-Id: If2e08e45430dce895ed6bb1070612517a38ca4bc --- files/keystone_data.sh | 25 ++--------------------- lib/cinder | 45 ++++++++++++++++++++++++++++++++++++++++-- stack.sh | 1 + 3 files changed, 46 insertions(+), 25 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 17e8c59e..71a8e5ef 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -7,7 +7,6 @@ # service glance admin # service quantum admin # if enabled # service swift admin # if enabled -# service cinder admin # if enabled # service heat admin # if enabled # service ceilometer admin # if enabled # Tempest Only: @@ -38,6 +37,7 @@ function get_id () { # Lookups SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") +MEMBER_ROLE=$(keystone role-list | awk "/ Member / { print \$2 }") # Roles @@ -49,6 +49,7 @@ ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") # role is also configurable in swift-proxy.conf RESELLER_ROLE=$(get_id keystone role-create --name=ResellerAdmin) + # Services # -------- @@ -243,25 +244,3 @@ if [[ "$ENABLED_SERVICES" =~ "tempest" ]]; then --user_id $ALT_DEMO_USER \ --role_id $MEMBER_ROLE fi - -if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then - CINDER_USER=$(get_id keystone user-create --name=cinder \ - --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=cinder@example.com) - keystone user-role-add --tenant_id $SERVICE_TENANT \ - --user_id $CINDER_USER \ - --role_id $ADMIN_ROLE - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - CINDER_SERVICE=$(get_id keystone service-create \ - --name=cinder \ - --type=volume \ - --description="Cinder Service") - keystone endpoint-create \ - --region RegionOne \ - --service_id $CINDER_SERVICE \ - --publicurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" \ - --adminurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" \ - --internalurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" - fi -fi diff --git a/lib/cinder b/lib/cinder index 16cbaf36..17005af4 100644 --- a/lib/cinder +++ b/lib/cinder @@ -31,9 +31,11 @@ CINDER_DRIVER=${CINDER_DRIVER:-default} CINDER_DIR=$DEST/cinder CINDERCLIENT_DIR=$DEST/python-cinderclient CINDER_STATE_PATH=${CINDER_STATE_PATH:=$DATA_DIR/cinder} +CINDER_AUTH_CACHE_DIR=${CINDER_AUTH_CACHE_DIR:-/var/cache/cinder} + CINDER_CONF_DIR=/etc/cinder CINDER_CONF=$CINDER_CONF_DIR/cinder.conf -CINDER_AUTH_CACHE_DIR=${CINDER_AUTH_CACHE_DIR:-/var/cache/cinder} +CINDER_API_PASTE_INI=$CINDER_CONF_DIR/api-paste.ini # Support entry points installation of console scripts if [[ -d $CINDER_DIR/bin ]]; then @@ -97,7 +99,6 @@ function configure_cinder() { sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/cinder-rootwrap - CINDER_API_PASTE_INI=$CINDER_CONF_DIR/api-paste.ini cp $CINDER_DIR/etc/cinder/api-paste.ini $CINDER_API_PASTE_INI iniset $CINDER_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST iniset $CINDER_API_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT @@ -159,6 +160,46 @@ function configure_cinder() { fi } +# create_cinder_accounts() - Set up common required cinder accounts + +# Tenant User Roles +# ------------------------------------------------------------------ +# service cinder admin # if enabled + +# Migrated from keystone_data.sh +create_cinder_accounts() { + + SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + + # Cinder + if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then + CINDER_USER=$(keystone user-create \ + --name=cinder \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=cinder@example.com \ + | grep " id " | get_field 2) + keystone user-role-add \ + --tenant_id $SERVICE_TENANT \ + --user_id $CINDER_USER \ + --role_id $ADMIN_ROLE + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + CINDER_SERVICE=$(keystone service-create \ + --name=cinder \ + --type=volume \ + --description="Cinder Volume Service" \ + | grep " id " | get_field 2) + keystone endpoint-create \ + --region RegionOne \ + --service_id $CINDER_SERVICE \ + --publicurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" \ + --adminurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" \ + --internalurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" + fi + fi +} + # init_cinder() - Initialize database and volume group function init_cinder() { # Force nova volumes off diff --git a/stack.sh b/stack.sh index f2fd68cc..94403fcc 100755 --- a/stack.sh +++ b/stack.sh @@ -956,6 +956,7 @@ if is_service_enabled key; then export OS_SERVICE_ENDPOINT=$SERVICE_ENDPOINT create_keystone_accounts create_nova_accounts + create_cinder_accounts # ``keystone_data.sh`` creates services, admin and demo users, and roles. ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \ From a5c774ea62ef00b36ffc30b314c6ca08895c75e3 Mon Sep 17 00:00:00 2001 From: Maru Newby Date: Mon, 10 Dec 2012 10:40:01 +0000 Subject: [PATCH 090/207] Add tempest config support for quantum. * Supports fix for 1043980 Change-Id: I047989dacc263b30992a90181fb07a5ac47787d4 --- lib/tempest | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/lib/tempest b/lib/tempest index 7fa15df0..18599219 100644 --- a/lib/tempest +++ b/lib/tempest @@ -63,6 +63,8 @@ function configure_tempest() { local flavors local flavors_ref local flavor_lines + local public_network_id + local tenant_networks_reachable #TODO(afazekas): # sudo python setup.py deploy @@ -153,6 +155,17 @@ function configure_tempest() { flavor_ref_alt=${flavors[1]} fi + if [ "$Q_USE_NAMESPACE" != "False" ]; then + tenant_networks_reachable=false + else + tenant_networks_reachable=true + fi + + if is_service_enabled q-l3; then + public_network_id=$(quantum net-list | grep $PUBLIC_NETWORK_NAME | \ + awk '{print $2}') + fi + # Timeouts iniset $TEMPEST_CONF compute build_timeout $BUILD_TIMEOUT iniset $TEMPEST_CONF volume build_timeout $BUILD_TIMEOUT @@ -205,8 +218,14 @@ function configure_tempest() { # compute admin iniset $TEMPEST_CONF "compute-admin" password "$password" + # network admin + iniset $TEMPEST_CONF "network-admin" password "$password" + # network iniset $TEMPEST_CONF network api_version 2.0 + iniset $TEMPEST_CONF network password "$password" + iniset $TEMPEST_CONF network tenant_networks_reachable "$tenant_networks_reachable" + iniset $TEMPEST_CONF network public_network_id "$public_network_id" #boto iniset $TEMPEST_CONF boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud" From eb4ae630bc706736d9ad0e7fce201ac0a29a037e Mon Sep 17 00:00:00 2001 From: "James E. Blair" Date: Sun, 16 Dec 2012 19:00:26 -0800 Subject: [PATCH 091/207] Stud is only in >=precise. Change-Id: Ieb302e80af69a783736f2ebbdc9077e2cafe6a35 --- files/apts/tls-proxy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/apts/tls-proxy b/files/apts/tls-proxy index dce9c07d..0a440159 100644 --- a/files/apts/tls-proxy +++ b/files/apts/tls-proxy @@ -1 +1 @@ -stud +stud # only available in dist:precise,quantal From 251d3b5fbcf445c41e127c6afd6350af47b3e011 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Sun, 16 Dec 2012 15:05:44 +0100 Subject: [PATCH 092/207] Remove unnecessary returns * remove duplicated xtrace * remove some unnecessary return Change-Id: If9e0a979e0bd5a334e82d42572ac0b149de341d7 --- functions | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/functions b/functions index 1b7d1308..1d0a6445 100644 --- a/functions +++ b/functions @@ -73,7 +73,6 @@ function die_if_not_set() { set +o xtrace local evar=$1; shift if ! is_set $evar || [ $exitcode != 0 ]; then - set +o xtrace echo $@ exit -1 fi @@ -650,10 +649,8 @@ function is_package_installed() { if [[ "$os_PACKAGE" = "deb" ]]; then dpkg -l "$@" > /dev/null - return $? elif [[ "$os_PACKAGE" = "rpm" ]]; then rpm --quiet -q "$@" - return $? else exit_distro_not_supported "finding if a package is installed" fi @@ -664,10 +661,7 @@ function is_package_installed() { # is_set env-var function is_set() { local var=\$"$1" - if eval "[ -z \"$var\" ]"; then - return 1 - fi - return 0 + eval "[ -n \"$var\" ]" # For ex.: sh -c "[ -n \"$var\" ]" would be better, but several exercises depends on this } @@ -973,11 +967,9 @@ function use_database { if [[ -z "$DATABASE_BACKENDS" ]]; then # The backends haven't initialized yet, just save the selection for now DATABASE_TYPE=$1 - return + else + use_exclusive_service DATABASE_BACKENDS DATABASE_TYPE $1 fi - use_exclusive_service DATABASE_BACKENDS DATABASE_TYPE $1 && return 0 - ret=$? - return $ret } # Toggle enable/disable_service for services that must run exclusive of each other @@ -1133,7 +1125,6 @@ function qpid_is_supported() { # Qpid was introduced to Ubuntu in precise, disallow it on oneiric; it is # not in openSUSE either right now. ( ! ([[ "$DISTRO" = "oneiric" ]] || is_suse) ) - return $? } # Restore xtrace From 23431f352618372b6204c3a591653784d77effa1 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 12 Dec 2012 15:57:33 -0800 Subject: [PATCH 093/207] Clear out the keystone pki data for each run This prevents old invalid tokens from working after a rerun of stack.sh and potentially providing users and tenants that don't exist. Fixes bug 1089700 Change-Id: Icfc22978e41e459d51b50bc7ad2e6d98d766e402 --- lib/cinder | 1 + lib/glance | 2 ++ lib/keystone | 2 ++ lib/nova | 1 + lib/quantum | 1 + 5 files changed, 7 insertions(+) diff --git a/lib/cinder b/lib/cinder index 16cbaf36..0ab3fd4e 100644 --- a/lib/cinder +++ b/lib/cinder @@ -212,6 +212,7 @@ function init_cinder() { # Create cache dir sudo mkdir -p $CINDER_AUTH_CACHE_DIR sudo chown `whoami` $CINDER_AUTH_CACHE_DIR + rm -f $CINDER_AUTH_CACHE_DIR/* } # install_cinder() - Collect source and prepare diff --git a/lib/glance b/lib/glance index 4f631b2c..8ba04b3a 100644 --- a/lib/glance +++ b/lib/glance @@ -162,8 +162,10 @@ function init_glance() { # Create cache dir sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/api sudo chown `whoami` $GLANCE_AUTH_CACHE_DIR/api + rm -f $GLANCE_AUTH_CACHE_DIR/api/* sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/registry sudo chown `whoami` $GLANCE_AUTH_CACHE_DIR/registry + rm -f $GLANCE_AUTH_CACHE_DIR/registry/* } # install_glanceclient() - Collect source and prepare diff --git a/lib/keystone b/lib/keystone index 2d21c2c3..acef8ce3 100644 --- a/lib/keystone +++ b/lib/keystone @@ -259,11 +259,13 @@ function init_keystone() { if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then # Set up certificates + rm -rf $KEYSTONE_CONF_DIR/ssl $KEYSTONE_DIR/bin/keystone-manage pki_setup # Create cache dir sudo mkdir -p $KEYSTONE_AUTH_CACHE_DIR sudo chown `whoami` $KEYSTONE_AUTH_CACHE_DIR + rm -f $KEYSTONE_AUTH_CACHE_DIR/* fi } diff --git a/lib/nova b/lib/nova index 840965ee..e07e61c2 100644 --- a/lib/nova +++ b/lib/nova @@ -435,6 +435,7 @@ function init_nova() { # Create cache dir sudo mkdir -p $NOVA_AUTH_CACHE_DIR sudo chown `whoami` $NOVA_AUTH_CACHE_DIR + rm -f $NOVA_AUTH_CACHE_DIR/* } # install_novaclient() - Collect source and prepare diff --git a/lib/quantum b/lib/quantum index 288a3279..480aaa17 100644 --- a/lib/quantum +++ b/lib/quantum @@ -127,6 +127,7 @@ function quantum_setup_keystone() { # Create cache dir sudo mkdir -p $QUANTUM_AUTH_CACHE_DIR sudo chown `whoami` $QUANTUM_AUTH_CACHE_DIR + rm -f $QUANTUM_AUTH_CACHE_DIR/* } function quantum_setup_ovs_bridge() { From 9efcf6042c9eddd84f8abd70ca2cdf9d20258264 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Wed, 19 Dec 2012 10:23:06 +0000 Subject: [PATCH 094/207] xenapi: Enhance devstack progress monitoring Fixes bug 1091299 XenServer - devstack install monitors a log file by tailing it, to see, if the devstack installation is finished. In some cases this script does not detect, that the startup script is finished, and just waiting, causing build failures with timeouts. With this change, the install_os_domU script monitors, if the run.sh script is still running, thus guaranteed to exit as soon as run.sh is done. Change-Id: I24a7a46e93ce26be024096828c7954bc694af2b2 --- tools/xen/install_os_domU.sh | 37 ++++++++++++------------------------ 1 file changed, 12 insertions(+), 25 deletions(-) diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index c78c6f2e..e270e59b 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -376,35 +376,22 @@ if [ "$WAIT_TILL_LAUNCH" = "1" ] && [ -e ~/.ssh/id_rsa.pub ] && [ "$COPYENV" = sleep 10 done - # output the run.sh.log - ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no stack@$DOMU_IP 'tail -f run.sh.log' & - TAIL_PID=$! - - function kill_tail() { - kill -9 $TAIL_PID - exit 1 - } - # Let Ctrl-c kill tail and exit - trap kill_tail SIGINT - - # ensure we kill off the tail if we exit the script early - # for other reasons - add_on_exit "kill -9 $TAIL_PID || true" - - # wait silently until stack.sh has finished - set +o xtrace - while ! ssh_no_check -q stack@$DOMU_IP "tail run.sh.log | grep -q 'stack.sh completed in'"; do + set +x + echo -n "Waiting for startup script to finish" + while [ `ssh_no_check -q stack@$DOMU_IP pgrep -c run.sh` -ge 1 ] + do sleep 10 + echo -n "." done - set -o xtrace + echo "done!" + set -x - # kill the tail process now stack.sh has finished - kill -9 $TAIL_PID + # output the run.sh.log + ssh_no_check -q stack@$DOMU_IP 'cat run.sh.log' + + # Fail if the expected text is not found + ssh_no_check -q stack@$DOMU_IP 'cat run.sh.log' | grep -q 'stack.sh completed in' - # check for a failure - if ssh_no_check -q stack@$DOMU_IP "grep -q 'stack.sh failed' run.sh.log"; then - exit 1 - fi echo "################################################################################" echo "" echo "All Finished!" From c439b5dfc2da1b8582f2cafcae20bfe55207372b Mon Sep 17 00:00:00 2001 From: Bob Melander Date: Wed, 19 Dec 2012 14:49:34 +0100 Subject: [PATCH 095/207] Changed so that nova databases are only created once in a multi-host setup. Change-Id: Idb17bcb8b6d558f03c188d14509bb5e732079585 Fixes: bug #1091730 --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index e07e61c2..d5cefc92 100644 --- a/lib/nova +++ b/lib/nova @@ -421,7 +421,7 @@ function init_nova() { # All nova components talk to a central database. We will need to do this step # only once for an entire cluster. - if is_service_enabled $DATABASE_BACKENDS && is_service_enabled nova; then + if is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-api; then # (Re)create nova database # Explicitly use latin1: to avoid lp#829209, nova expects the database to # use latin1 by default, and then upgrades the database to utf8 (see the From ac25b0359ae1beb348b7a47ca3e9984773e88168 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 19 Dec 2012 11:15:46 -0500 Subject: [PATCH 096/207] turn debug on for euca-upload-bundle this spuriously fails in devstack gate, and need to turn this on to get to the bottom of the fails. Change-Id: I7d5c1b4d3230efacdd8a3b89e5e40e98ac894a1d --- exercises/bundle.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exercises/bundle.sh b/exercises/bundle.sh index daff5f9c..12f27323 100755 --- a/exercises/bundle.sh +++ b/exercises/bundle.sh @@ -51,7 +51,7 @@ IMAGE=bundle.img truncate -s 5M /tmp/$IMAGE euca-bundle-image -i /tmp/$IMAGE || die "Failure bundling image $IMAGE" -euca-upload-bundle -b $BUCKET -m /tmp/$IMAGE.manifest.xml || die "Failure uploading bundle $IMAGE to $BUCKET" +euca-upload-bundle --debug -b $BUCKET -m /tmp/$IMAGE.manifest.xml || die "Failure uploading bundle $IMAGE to $BUCKET" AMI=`euca-register $BUCKET/$IMAGE.manifest.xml | cut -f2` die_if_not_set AMI "Failure registering $BUCKET/$IMAGE" From 7be0b047dc668a0095d0c7e51948edf83b66d5b8 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 12 Dec 2012 12:50:38 -0600 Subject: [PATCH 097/207] Create tools/install_prereqs.sh * Factor system package prereq installs out to tools/install_prereqs.sh * Set minimum time between runs with PREREQ_RERUN_HOURS default = 2 hours * Create re_export_proxy_variables Change-Id: I4a182b1da685f403d6abdd8540d2114796c01682 --- functions | 21 +++++++++++ stack.sh | 20 +---------- tools/install_prereqs.sh | 78 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 100 insertions(+), 19 deletions(-) create mode 100755 tools/install_prereqs.sh diff --git a/functions b/functions index 3ee43d3d..7de5a44f 100644 --- a/functions +++ b/functions @@ -710,6 +710,27 @@ function restart_service() { } +# HTTP and HTTPS proxy servers are supported via the usual environment variables [1] +# ``http_proxy``, ``https_proxy`` and ``no_proxy``. They can be set in +# ``localrc`` or on the command line if necessary:: +# +# [1] http://www.w3.org/Daemon/User/Proxies/ProxyClients.html +# +# http_proxy=http://proxy.example.com:3128/ no_proxy=repo.example.net ./stack.sh + +function re_export_proxy_variables() { + if [[ -n "$http_proxy" ]]; then + export http_proxy=$http_proxy + fi + if [[ -n "$https_proxy" ]]; then + export https_proxy=$https_proxy + fi + if [[ -n "$no_proxy" ]]; then + export no_proxy=$no_proxy + fi +} + + # Helper to launch a service in a named screen # screen_it service "command-line" function screen_it { diff --git a/stack.sh b/stack.sh index da0faed0..c8b8db44 100755 --- a/stack.sh +++ b/stack.sh @@ -644,25 +644,7 @@ set -o xtrace # Install package requirements echo_summary "Installing package prerequisites" -if is_ubuntu; then - install_package $(get_packages $FILES/apts) -elif is_fedora; then - install_package $(get_packages $FILES/rpms) -elif is_suse; then - install_package $(get_packages $FILES/rpms-suse) -else - exit_distro_not_supported "list of packages" -fi - -if [[ $SYSLOG != "False" ]]; then - if is_ubuntu || is_fedora; then - install_package rsyslog-relp - elif is_suse; then - install_package rsyslog-module-relp - else - exit_distro_not_supported "rsyslog-relp installation" - fi -fi +$TOP_DIR/tools/install_prereqs.sh if is_service_enabled rabbit; then # Install rabbitmq-server diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh new file mode 100755 index 00000000..0bf217b3 --- /dev/null +++ b/tools/install_prereqs.sh @@ -0,0 +1,78 @@ +#!/usr/bin/env bash + +# **install_prereqs.sh** + +# Install system package prerequisites +# +# install_prereqs.sh [-f] +# +# -f Force an install run now + + +if [[ -n "$1" && "$1" = "-f" ]]; then + FORCE=1 +fi + +# Keep track of the devstack directory +TOP_DIR=$(cd $(dirname "$0")/.. && pwd) + +# Import common functions +source $TOP_DIR/functions + +# Determine what system we are running on. This provides ``os_VENDOR``, +# ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` +# and ``DISTRO`` +GetDistro + +# Needed to get ``ENABLED_SERVICES`` +source $TOP_DIR/stackrc + +# Prereq dirs are here +FILES=$TOP_DIR/files + +# Minimum wait time +PREREQ_RERUN_MARKER=${PREREQ_RERUN_MARKER:-$TOP_DIR/.prereqs} +PREREQ_RERUN_HOURS=${PREREQ_RERUN_HOURS:-2} +PREREQ_RERUN_SECONDS=$((60*60*$PREREQ_RERUN_HOURS)) + +NOW=$(date "+%s") +LAST_RUN=$(head -1 $PREREQ_RERUN_MARKER 2>/dev/null || echo "0") +DELTA=$(($NOW - $LAST_RUN)) +if [[ $DELTA -lt $PREREQ_RERUN_SECONDS && -z "$FORCE" ]]; then + echo "Re-run time has not expired ($(($PREREQ_RERUN_SECONDS - $DELTA)) seconds remaining); exiting..." + exit 0 +fi + +# Make sure the proxy config is visible to sub-processes +re_export_proxy_variables + +# Install Packages +# ================ + +# Install package requirements +if is_ubuntu; then + install_package $(get_packages $FILES/apts) +elif is_fedora; then + install_package $(get_packages $FILES/rpms) +elif is_suse; then + install_package $(get_packages $FILES/rpms-suse) +else + exit_distro_not_supported "list of packages" +fi + +if [[ -n "$SYSLOG" && "$SYSLOG" != "False" ]]; then + if is_ubuntu || is_fedora; then + install_package rsyslog-relp + elif is_suse; then + install_package rsyslog-module-relp + else + exit_distro_not_supported "rsyslog-relp installation" + fi +fi + + +# Mark end of run +# --------------- + +date "+%s" >$PREREQ_RERUN_MARKER +date >>$PREREQ_RERUN_MARKER From 560346b506616a505718dd18c5053b4bf4360a5b Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 13 Dec 2012 17:05:24 -0600 Subject: [PATCH 098/207] Set up Cinder for TLS * Start c-api proxy if 'tls-proxy' is enabled * Configure Cinder service catalog for TLS Change-Id: Ic692a0a16ffa51bfd4bfb67f827cd941ac0e72a4 --- lib/cinder | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/lib/cinder b/lib/cinder index 5477e26f..d69790c9 100644 --- a/lib/cinder +++ b/lib/cinder @@ -37,6 +37,12 @@ CINDER_CONF_DIR=/etc/cinder CINDER_CONF=$CINDER_CONF_DIR/cinder.conf CINDER_API_PASTE_INI=$CINDER_CONF_DIR/api-paste.ini +# Public facing bits +CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST} +CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776} +CINDER_SERVICE_PORT_INT=${CINDER_SERVICE_PORT_INT:-18776} +CINDER_SERVICE_PROTOCOL=${CINDER_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} + # Support entry points installation of console scripts if [[ -d $CINDER_DIR/bin ]]; then CINDER_BIN_DIR=$CINDER_DIR/bin @@ -122,6 +128,11 @@ function configure_cinder() { iniset $CINDER_CONF DEFAULT osapi_volume_extension cinder.api.openstack.volume.contrib.standard_extensions iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH + if is_service_enabled tls-proxy; then + # Set the service port for a proxy to take the original + iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT + fi + if [ "$SYSLOG" != "False" ]; then iniset $CINDER_CONF DEFAULT use_syslog True fi @@ -193,9 +204,9 @@ create_cinder_accounts() { keystone endpoint-create \ --region RegionOne \ --service_id $CINDER_SERVICE \ - --publicurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" \ - --adminurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" \ - --internalurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" + --publicurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \ + --adminurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \ + --internalurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" fi fi } @@ -297,6 +308,11 @@ function start_cinder() { screen_it c-api "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-api --config-file $CINDER_CONF" screen_it c-vol "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-volume --config-file $CINDER_CONF" screen_it c-sch "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-scheduler --config-file $CINDER_CONF" + + # Start proxies if enabled + if is_service_enabled c-api && is_service_enabled tls-proxy; then + start_tls_proxy '*' $CINDER_SERVICE_PORT $CINDER_SERVICE_HOST $CINDER_SERVICE_PORT_INT & + fi } # stop_cinder() - Stop running processes From a3b0255313996ec13153d83e898e59b98bbbe973 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 19 Dec 2012 16:27:12 -0600 Subject: [PATCH 099/207] Fix script matching in exercise.sh This prevents false matches in exercise skip test Change-Id: I5656a20bcf11b2ccaf55e280655d0600124adedc --- exercise.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exercise.sh b/exercise.sh index a0349ce4..5b3c56e2 100755 --- a/exercise.sh +++ b/exercise.sh @@ -28,7 +28,7 @@ skips="" # Loop over each possible script (by basename) for script in $basenames; do - if [[ "$SKIP_EXERCISES" =~ $script ]] ; then + if [[ ,$SKIP_EXERCISES, =~ ,$script, ]] ; then skips="$skips $script" else echo "=====================================================================" From 588eb4129d34ea58fd40438eb1c6edd1a9f9a2d0 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Thu, 20 Dec 2012 10:57:16 +0100 Subject: [PATCH 100/207] Fix iniset and his friends * In python the white spaces are part of the section name * Handle options with empty value * Support paths with white spaces Change-Id: I69a584608853cfdb8b7dce1e24d929216ef2fc41 --- functions | 28 +++++++++++++++++++--------- tests/functions.sh | 43 ++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 61 insertions(+), 10 deletions(-) diff --git a/functions b/functions index 1b7d1308..3bf06552 100644 --- a/functions +++ b/functions @@ -460,7 +460,7 @@ function inicomment() { local file=$1 local section=$2 local option=$3 - sed -i -e "/^\[ *$section *\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" $file + sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" "$file" } # Uncomment an option in an INI file @@ -469,7 +469,7 @@ function iniuncomment() { local file=$1 local section=$2 local option=$3 - sed -i -e "/^\[ *$section *\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" $file + sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" "$file" } @@ -480,10 +480,20 @@ function iniget() { local section=$2 local option=$3 local line - line=$(sed -ne "/^\[ *$section *\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" $file) + line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") echo ${line#*=} } +# Determinate is the given option present in the INI file +# ini_has_option config-file section option +function ini_has_option() { + local file=$1 + local section=$2 + local option=$3 + local line + line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") + [ -n "$line" ] +} # Set an option in an INI file # iniset config-file section option value @@ -492,18 +502,18 @@ function iniset() { local section=$2 local option=$3 local value=$4 - if ! grep -q "^\[ *$section *\]" $file; then + if ! grep -q "^\[$section\]" "$file"; then # Add section at the end - echo -e "\n[$section]" >>$file + echo -e "\n[$section]" >>"$file" fi - if [[ -z "$(iniget $file $section $option)" ]]; then + if ! ini_has_option "$file" "$section" "$option"; then # Add it - sed -i -e "/^\[ *$section *\]/ a\\ + sed -i -e "/^\[$section\]/ a\\ $option = $value -" $file +" "$file" else # Replace it - sed -i -e "/^\[ *$section *\]/,/^\[.*\]/ s|^\($option[ \t]*=[ \t]*\).*$|\1$value|" $file + sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=[ \t]*\).*$|\1$value|" "$file" fi } diff --git a/tests/functions.sh b/tests/functions.sh index be48729f..4fe64436 100755 --- a/tests/functions.sh +++ b/tests/functions.sh @@ -57,6 +57,9 @@ handlers=ee,ff [ ccc ] spaces = yes + +[ddd] +empty = EOF # Test with spaces @@ -79,13 +82,22 @@ fi # Test with spaces in section header -VAL=$(iniget test.ini ccc spaces) +VAL=$(iniget test.ini " ccc " spaces) if [[ "$VAL" == "yes" ]]; then echo "OK: $VAL" else echo "iniget failed: $VAL" fi +iniset test.ini "b b" opt_ion 42 + +VAL=$(iniget test.ini "b b" opt_ion) +if [[ "$VAL" == "42" ]]; then + echo "OK: $VAL" +else + echo "iniget failed: $VAL" +fi + # Test without spaces, end of file VAL=$(iniget test.ini bbb handlers) @@ -104,6 +116,29 @@ else echo "iniget failed: $VAL" fi +# test empty option +if ini_has_option test.ini ddd empty; then + echo "OK: ddd.empty present" +else + echo "ini_has_option failed: ddd.empty not found" +fi + +# test non-empty option +if ini_has_option test.ini bbb handlers; then + echo "OK: bbb.handlers present" +else + echo "ini_has_option failed: bbb.handlers not found" +fi + +# test changing empty option +iniset test.ini ddd empty "42" + +VAL=$(iniget test.ini ddd empty) +if [[ "$VAL" == "42" ]]; then + echo "OK: $VAL" +else + echo "iniget failed: $VAL" +fi # Test section not exist @@ -132,6 +167,12 @@ else echo "iniget failed: $VAL" fi +if ! ini_has_option test.ini aaa debug; then + echo "OK aaa.debug not present" +else + echo "ini_has_option failed: aaa.debug" +fi + iniset test.ini aaa debug "999" VAL=$(iniget test.ini aaa debug) From e7bca2f86446e773472603b18728ce38c33acde9 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Thu, 20 Dec 2012 15:09:20 +0100 Subject: [PATCH 101/207] Make opensuse-12.2 a supported distribution This has been working well in my tests, and has been confirmed to work by others. I'll of course step up to fix things for openSUSE when needed. Change-Id: I1ecd345adf975b082aff3a473ab94291b39c8c93 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index cf638e83..43511f47 100755 --- a/stack.sh +++ b/stack.sh @@ -110,7 +110,7 @@ disable_negated_services # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|f16|f17|f18) ]]; then +if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|f16|f17|f18|opensuse-12.2) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then echo "If you wish to run this script anyway run with FORCE=yes" From 22ef57317222b3e64eb5d2dcb3ae0588738062e3 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Sun, 16 Dec 2012 14:03:06 +0100 Subject: [PATCH 102/207] Add generic account rc creater * Creates account rc files for all tenant user * Able to create new accounts * The rc files contains certificates for image bundle * euca related steps can be simpler in the future Change-Id: I917bffb64e09a5d85c84cde45777c49eaca65e64 --- .gitignore | 1 + stack.sh | 11 ++ tools/create_userrc.sh | 254 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 266 insertions(+) create mode 100755 tools/create_userrc.sh diff --git a/.gitignore b/.gitignore index 17cb38c8..5e770c80 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,4 @@ files/*.gz files/images stack-screenrc *.pem +accrc diff --git a/stack.sh b/stack.sh index cf638e83..26376b63 100755 --- a/stack.sh +++ b/stack.sh @@ -1672,6 +1672,17 @@ if is_service_enabled heat; then start_heat fi +# Create account rc files +# ======================= + +# Creates source able script files for easier user switching. +# This step also creates certificates for tenants and users, +# which is helpful in image bundle steps. + +if is_service_enabled nova && is_service_enabled key; then + $TOP_DIR/tools/create_userrc.sh -PA --target-dir $TOP_DIR/accrc +fi + # Install Images # ============== diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh new file mode 100755 index 00000000..e39c1570 --- /dev/null +++ b/tools/create_userrc.sh @@ -0,0 +1,254 @@ +#!/usr/bin/env bash + +#Warning: This script just for development purposes + +ACCOUNT_DIR=./accrc + +display_help() +{ +cat < + +This script creates certificates and sourcable rc files per tenant/user. + +Target account directory hierarchy: +target_dir-| + |-cacert.pem + |-tenant1-name| + | |- user1 + | |- user1-cert.pem + | |- user1-pk.pem + | |- user2 + | .. + |-tenant2-name.. + .. + +Optional Arguments +-P include password to the rc files; with -A it assume all users password is the same +-A try with all user +-u create files just for the specified user +-C create user and tenant, the specifid tenant will be the user's tenant +-r when combined with -C and the (-u) user exists it will be the user's tenant role in the (-C)tenant (default: Member) +-p password for the user +--os-username +--os-password +--os-tenant-name +--os-tenant-id +--os-auth-url +--target-dir +--skip-tenant +--debug + +Example: +$0 -AP +$0 -P -C mytenant -u myuser -p mypass +EOF +} + +if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-name:,os-tenant-id:,os-auth-url:,target-dir:,skip-tenant:,help,debug -- "$@") +then + #parse error + display_help + exit 1 +fi +eval set -- $options +ADDPASS="" + +# The services users usually in the service tenant. +# rc files for service users, is out of scope. +# Supporting different tanent for services is out of scope. +SKIP_TENANT=",service," # tenant names are between commas(,) +MODE="" +ROLE=Member +USER_NAME="" +USER_PASS="" +while [ $# -gt 0 ] +do + case "$1" in + -h|--help) display_help; exit 0 ;; + --os-username) export OS_USERNAME=$2; shift ;; + --os-password) export OS_PASSWORD=$2; shift ;; + --os-tenant-name) export OS_TENANT_NAME=$2; shift ;; + --os-tenant-id) export OS_TENANT_ID=$2; shift ;; + --skip-tenant) SKIP_TENANT="$SKIP_TENANT$2,"; shift ;; + --os-auth-url) export OS_AUTH_URL=$2; shift ;; + --target-dir) ACCOUNT_DIR=$2; shift ;; + --debug) set -o xtrace ;; + -u) MODE=${MODE:-one}; USER_NAME=$2; shift ;; + -p) USER_PASS=$2; shift ;; + -A) MODE=all; ;; + -P) ADDPASS="yes" ;; + -C) MODE=create; TENANT=$2; shift ;; + -r) ROLE=$2; shift ;; + (--) shift; break ;; + (-*) echo "$0: error - unrecognized option $1" >&2; display_help; exit 1 ;; + (*) echo "$0: error - unexpected argument $1" >&2; display_help; exit 1 ;; + esac + shift +done + +if [ -z "$OS_PASSWORD" ]; then + if [ -z "$ADMIN_PASSWORD" ];then + echo "The admin password is required option!" >&2 + exit 2 + else + OS_PASSWORD=$ADMIN_PASSWORD + fi +fi + +if [ -z "$OS_TENANT_NAME" -a -z "$OS_TENANT_ID" ]; then + export OS_TENANT_NAME=admin +fi + +if [ -z "$OS_USERNAME" ]; then + export OS_USERNAME=admin +fi + +if [ -z "$OS_AUTH_URL" ]; then + export OS_AUTH_URL=http://localhost:5000/v2.0/ +fi + +USER_PASS=${USER_PASS:-$OS_PASSWORD} +USER_NAME=${USER_NAME:-$OS_USERNAME} + +if [ -z "$MODE" ]; then + echo "You must specify at least -A or -u parameter!" >&2 + echo + display_help + exit 3 +fi + +export -n SERVICE_TOKEN SERVICE_ENDPOINT OS_SERVICE_TOKEN OS_SERVICE_ENDPOINT + +EC2_URL=http://localhost:8773/service/Cloud +S3_URL=http://localhost:3333 + +ec2=`keystone endpoint-get --service ec2 | awk '/\|[[:space:]]*ec2.publicURL/ {print $4}'` +[ -n "$ec2" ] && EC2_URL=$ec2 + +s3=`keystone endpoint-get --service s3 | awk '/\|[[:space:]]*s3.publicURL/ {print $4}'` +[ -n "$s3" ] && S3_URL=$s3 + + +mkdir -p "$ACCOUNT_DIR" +ACCOUNT_DIR=`readlink -f "$ACCOUNT_DIR"` +EUCALYPTUS_CERT=$ACCOUNT_DIR/cacert.pem +mv "$EUCALYPTUS_CERT" "$EUCALYPTUS_CERT.old" &>/dev/null +if ! nova x509-get-root-cert "$EUCALYPTUS_CERT"; then + echo "Failed to update the root certificate: $EUCALYPTUS_CERT" >&2 + mv "$EUCALYPTUS_CERT.old" "$EUCALYPTUS_CERT" &>/dev/null +fi + + +function add_entry(){ + local user_id=$1 + local user_name=$2 + local tenant_id=$3 + local tenant_name=$4 + local user_passwd=$5 + + # The admin user can see all user's secret AWS keys, it does not looks good + local line=`keystone ec2-credentials-list --user_id $user_id | grep -E "^\\|[[:space:]]*($tenant_name|$tenant_id)[[:space:]]*\\|" | head -n 1` + if [ -z "$line" ]; then + keystone ec2-credentials-create --user-id $user_id --tenant-id $tenant_id 1>&2 + line=`keystone ec2-credentials-list --user_id $user_id | grep -E "^\\|[[:space:]]*($tenant_name|$tenant_id)[[:space:]]*\\|" | head -n 1` + fi + local ec2_access_key ec2_secret_key + read ec2_access_key ec2_secret_key <<< `echo $line | awk '{print $4 " " $6 }'` + mkdir -p "$ACCOUNT_DIR/$tenant_name" + local rcfile="$ACCOUNT_DIR/$tenant_name/$user_name" + # The certs subject part are the tenant ID "dash" user ID, but the CN should be the first part of the DN + # Generally the subject DN parts should be in reverse order like the Issuer + # The Serial does not seams correctly marked either + local ec2_cert="$rcfile-cert.pem" + local ec2_private_key="$rcfile-pk.pem" + # Try to preserve the original file on fail (best effort) + mv "$ec2_private_key" "$ec2_private_key.old" &>/dev/null + mv "$ec2_cert" "$ec2_cert.old" &>/dev/null + # It will not create certs when the password is incorrect + if ! nova --os-password "$user_passwd" --os-username "$user_name" --os-tenant-name "$tenant_name" x509-create-cert "$ec2_private_key" "$ec2_cert"; then + mv "$ec2_private_key.old" "$ec2_private_key" &>/dev/null + mv "$ec2_cert.old" "$ec2_cert" &>/dev/null + fi + cat >"$rcfile" <>"$rcfile" + fi +} + +#admin users expected +function create_or_get_tenant(){ + local tenant_name=$1 + local tenant_id=`keystone tenant-list | awk '/\|[[:space:]]*'"$tenant_name"'[[:space:]]*\|.*\|/ {print $2}'` + if [ -n "$tenant_id" ]; then + echo $tenant_id + else + keystone tenant-create --name "$tenant_name" | awk '/\|[[:space:]]*id[[:space:]]*\|.*\|/ {print $4}' + fi +} + +function create_or_get_role(){ + local role_name=$1 + local role_id=`keystone role-list| awk '/\|[[:space:]]*'"$role_name"'[[:space:]]*\|/ {print $2}'` + if [ -n "$role_id" ]; then + echo $role_id + else + keystone tenant-create --name "$role_name" |awk '/\|[[:space:]]*id[[:space:]]*\|.*\|/ {print $4}' + fi +} + +# Provides empty string when the user does not exists +function get_user_id(){ + local user_name=$1 + keystone user-list | awk '/^\|[^|]*\|[[:space:]]*'"$user_name"'[[:space:]]*\|.*\|/ {print $2}' +} + +if [ $MODE != "create" ]; then +# looks like I can't ask for all tenant related to a specified user + for tenant_id_at_name in `keystone tenant-list | awk 'BEGIN {IGNORECASE = 1} /true[[:space:]]*\|$/ {print $2 "@" $4}'`; do + read tenant_id tenant_name <<< `echo "$tenant_id_at_name" | sed 's/@/ /'` + if echo $SKIP_TENANT| grep -q ",$tenant_name,"; then + continue; + fi + for user_id_at_name in `keystone user-list --tenant-id $tenant_id | awk 'BEGIN {IGNORECASE = 1} /true[[:space:]]*\|[^|]*\|$/ {print $2 "@" $4}'`; do + read user_id user_name <<< `echo "$user_id_at_name" | sed 's/@/ /'` + if [ $MODE = one -a "$user_name" != "$USER_NAME" ]; then + continue; + fi + add_entry "$user_id" "$user_name" "$tenant_id" "$tenant_name" "$USER_PASS" + done + done +else + tenant_name=$TENANT + tenant_id=`create_or_get_tenant "$TENANT"` + user_name=$USER_NAME + user_id=`get_user_id $user_name` + if [ -z "$user_id" ]; then + #new user + user_id=`keystone user-create --name "$user_name" --tenant-id "$tenant_id" --pass "$USER_PASS" --email "$user_name@example.com" | awk '/\|[[:space:]]*id[[:space:]]*\|.*\|/ {print $4}'` + #The password is in the cmd line. It is not a good thing + add_entry "$user_id" "$user_name" "$tenant_id" "$tenant_name" "$USER_PASS" + else + #new role + role_id=`create_or_get_role "$ROLE"` + keystone user-role-add --user-id "$user_id" --tenant-id "$tenant_id" --role-id "$role_id" + add_entry "$user_id" "$user_name" "$tenant_id" "$tenant_name" "$USER_PASS" + fi +fi From 3a3a2bac674041f5bb92bc1ef59c7fc55a9946bd Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 11 Dec 2012 15:26:24 -0600 Subject: [PATCH 103/207] Set up Nova for TLS * Start n-api proxy if 'tls-proxy' is enabled * Configure nova service catalog for TLS Change-Id: If031eb315f76c5c441a25fe3582b626bbee73c6e --- functions | 8 ++++++++ lib/nova | 41 ++++++++++++++++++++++++++++++++++++++--- stack.sh | 7 +------ 3 files changed, 47 insertions(+), 9 deletions(-) diff --git a/functions b/functions index 1b7d1308..9565e10d 100644 --- a/functions +++ b/functions @@ -996,6 +996,14 @@ function use_exclusive_service { return 0 } +# Wait for an HTTP server to start answering requests +# wait_for_service timeout url +function wait_for_service() { + local timeout=$1 + local url=$2 + timeout $timeout sh -c "while ! http_proxy= https_proxy= curl -s $url >/dev/null; do sleep 1; done" +} + # Wrapper for ``yum`` to set proxy environment variables # Uses globals ``OFFLINE``, ``*_proxy` # yum_install package [package ...] diff --git a/lib/nova b/lib/nova index 840965ee..04a869e7 100644 --- a/lib/nova +++ b/lib/nova @@ -39,6 +39,12 @@ NOVA_CONF_DIR=/etc/nova NOVA_CONF=$NOVA_CONF_DIR/nova.conf NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini} +# Public facing bits +NOVA_SERVICE_HOST=${NOVA_SERVICE_HOST:-$SERVICE_HOST} +NOVA_SERVICE_PORT=${NOVA_SERVICE_PORT:-8774} +NOVA_SERVICE_PORT_INT=${NOVA_SERVICE_PORT_INT:-18774} +NOVA_SERVICE_PROTOCOL=${NOVA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} + # Support entry points installation of console scripts if [[ -d $NOVA_DIR/bin ]]; then NOVA_BIN_DIR=$NOVA_DIR/bin @@ -170,6 +176,10 @@ function configure_nova() { s,%SERVICE_TENANT_NAME%,$SERVICE_TENANT_NAME,g; s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g; " -i $NOVA_API_PASTE_INI + iniset $NOVA_API_PASTE_INI filter:authtoken auth_host $SERVICE_HOST + if is_service_enabled tls-proxy; then + iniset $NOVA_API_PASTE_INI filter:authtoken auth_protocol $SERVICE_PROTOCOL + fi fi iniset $NOVA_API_PASTE_INI filter:authtoken signing_dir $NOVA_AUTH_CACHE_DIR @@ -324,9 +334,9 @@ create_nova_accounts() { keystone endpoint-create \ --region RegionOne \ --service_id $NOVA_SERVICE \ - --publicurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s" \ - --adminurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s" \ - --internalurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s" + --publicurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \ + --adminurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \ + --internalurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" fi fi } @@ -361,6 +371,10 @@ function create_nova_conf() { if is_service_enabled n-api; then add_nova_opt "enabled_apis=$NOVA_ENABLED_APIS" + if is_service_enabled tls-proxy; then + # Set the service port for a proxy to take the original + add_nova_opt "osapi_compute_listen_port=$NOVA_SERVICE_PORT_INT" + fi fi if is_service_enabled cinder; then add_nova_opt "volume_api_class=nova.volume.cinder.API" @@ -472,6 +486,27 @@ function install_nova() { git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH } +# start_nova_api() - Start the API process ahead of other things +function start_nova_api() { + # Get right service port for testing + local service_port=$NOVA_SERVICE_PORT + if is_service_enabled tls-proxy; then + service_port=$NOVA_SERVICE_PORT_INT + fi + + screen_it n-api "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api" + echo "Waiting for nova-api to start..." + if ! wait_for_service $SERVICE_TIMEOUT http://$SERVICE_HOST:$service_port; then + echo "nova-api did not start" + exit 1 + fi + + # Start proxies if enabled + if is_service_enabled tls-proxy; then + start_tls_proxy '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT & + fi +} + # start_nova() - Start running processes, including screen function start_nova() { # The group **libvirtd** is added to the current user in this script. diff --git a/stack.sh b/stack.sh index f2fd68cc..a3772177 100755 --- a/stack.sh +++ b/stack.sh @@ -1568,12 +1568,7 @@ screen_it zeromq "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-rpc-zmq-receiver" # Launch the nova-api and wait for it to answer before continuing if is_service_enabled n-api; then echo_summary "Starting Nova API" - screen_it n-api "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api" - echo "Waiting for nova-api to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:8774; do sleep 1; done"; then - echo "nova-api did not start" - exit 1 - fi + start_nova_api fi if is_service_enabled q-svc; then From 252f2f533ba8cb6607ddbbcdd1c4aff01dbfb5c3 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 20 Dec 2012 16:41:57 -0500 Subject: [PATCH 104/207] clean up cinder on an unstack cinder currently has issues that leave volumes around after tempest tests. Make sure that cinder gets cleaned up to a zero state on an unstack.sh so that we can reset the environment. Change-Id: I448340899bf0fae7d4d16fa26da17feafcef888f --- lib/cinder | 36 ++++++++++++++++++++++++++++++++++-- unstack.sh | 31 +------------------------------ 2 files changed, 35 insertions(+), 32 deletions(-) diff --git a/lib/cinder b/lib/cinder index 2b2f8f1b..dadc8f14 100644 --- a/lib/cinder +++ b/lib/cinder @@ -51,8 +51,40 @@ VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} # cleanup_cinder() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_cinder() { - # This function intentionally left blank - : + # ensure the volume group is cleared up because fails might + # leave dead volumes in the group + TARGETS=$(sudo tgtadm --op show --mode target) + if [ $? -ne 0 ]; then + # If tgt driver isn't running this won't work obviously + # So check the response and restart if need be + echo "tgtd seems to be in a bad state, restarting..." + if is_ubuntu; then + restart_service tgt + else + restart_service tgtd + fi + TARGETS=$(sudo tgtadm --op show --mode target) + fi + + if [[ -n "$TARGETS" ]]; then + iqn_list=( $(grep --no-filename -r iqn $SCSI_PERSIST_DIR | sed 's///') ) + for i in "${iqn_list[@]}"; do + echo removing iSCSI target: $i + sudo tgt-admin --delete $i + done + fi + + if is_service_enabled cinder; then + sudo rm -rf $CINDER_STATE_PATH/volumes/* + fi + + if is_ubuntu; then + stop_service tgt + else + stop_service tgtd + fi + + sudo vgremove -f $VOLUME_GROUP } # configure_cinder() - Set config files, create data dirs, etc diff --git a/unstack.sh b/unstack.sh index 09e0de6b..949745e5 100755 --- a/unstack.sh +++ b/unstack.sh @@ -71,36 +71,7 @@ SCSI_PERSIST_DIR=$CINDER_STATE_PATH/volumes/* # Get the iSCSI volumes if is_service_enabled cinder; then - TARGETS=$(sudo tgtadm --op show --mode target) - if [ $? -ne 0 ]; then - # If tgt driver isn't running this won't work obviously - # So check the response and restart if need be - echo "tgtd seems to be in a bad state, restarting..." - if is_ubuntu; then - restart_service tgt - else - restart_service tgtd - fi - TARGETS=$(sudo tgtadm --op show --mode target) - fi - - if [[ -n "$TARGETS" ]]; then - iqn_list=( $(grep --no-filename -r iqn $SCSI_PERSIST_DIR | sed 's///') ) - for i in "${iqn_list[@]}"; do - echo removing iSCSI target: $i - sudo tgt-admin --delete $i - done - fi - - if is_service_enabled cinder; then - sudo rm -rf $CINDER_STATE_PATH/volumes/* - fi - - if is_ubuntu; then - stop_service tgt - else - stop_service tgtd - fi + cleanup_cinder fi if [[ -n "$UNSTACK_ALL" ]]; then From c24e23b43e42aeec636d58a5a66787541b5b4488 Mon Sep 17 00:00:00 2001 From: "Cody A.W. Somerville" Date: Fri, 21 Dec 2012 02:10:45 -0500 Subject: [PATCH 105/207] Fix selection of image(s) tested by tempest. The variable DEFAULT_IMAGE_NAME is set to 'cirros-0.3.0-x86_64-uec' by default. This will cause configure_tempest to 'exit 1' and abort stack.sh if an image with that name is not uploaded to glance. According to the relevant code comment, this behaviour is incorrect. Updated code to match behaviour described in comment: If image with name matching DEFAULT_IMAGE_NAME exists, use it for both primary and secondary test image otherwise select first image and, if available, second image listed by glance. Will still 'exit 1' if no images are available at all (though it probably shouldn't). Change-Id: I92773d4afd52cf533d16772ae2a087e23e206f8c Fixes: bug #1092713 --- lib/tempest | 50 +++++++++++++++++++++++++++----------------------- 1 file changed, 27 insertions(+), 23 deletions(-) diff --git a/lib/tempest b/lib/tempest index 18599219..c28af860 100644 --- a/lib/tempest +++ b/lib/tempest @@ -85,30 +85,34 @@ function configure_tempest() { # first image returned and set ``image_uuid_alt`` to the second, # if there is more than one returned... # ... Also ensure we only take active images, so we don't get snapshots in process - image_lines=`glance image-list` - IFS=$'\n\r' - images="" - for line in $image_lines; do - if [ -z $DEFAULT_IMAGE_NAME ]; then - images="$images `echo $line | grep -v "^\(ID\|+--\)" | grep -v "\(aki\|ari\)" | grep 'active' | cut -d' ' -f2`" - else - images="$images `echo $line | grep -v "^\(ID\|+--\)" | grep -v "\(aki\|ari\)" | grep 'active' | grep "$DEFAULT_IMAGE_NAME" | cut -d' ' -f2`" + declare -a images + + while read -r IMAGE_NAME IMAGE_UUID; do + if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then + image_uuid="$IMAGE_UUID" + image_uuid_alt="$IMAGE_UUID" fi - done - # Create array of image UUIDs... - IFS=" " - images=($images) - num_images=${#images[*]} - echo "Found $num_images images" - if [[ $num_images -eq 0 ]]; then - echo "Found no valid images to use!" - exit 1 - fi - image_uuid=${images[0]} - image_uuid_alt=$image_uuid - if [[ $num_images -gt 1 ]]; then - image_uuid_alt=${images[1]} - fi + images+=($IMAGE_UUID) + done < <(glance image-list --status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }') + + case "${#images[*]}" in + 0) + echo "Found no valid images to use!" + exit 1 + ;; + 1) + if [ -z "$image_uuid" ]; then + image_uuid=${images[0]} + image_uuid_alt=${images[0]} + fi + ;; + *) + if [ -z "$image_uuid" ]; then + image_uuid=${images[0]} + image_uuid_alt=${images[1]} + fi + ;; + esac # Create tempest.conf from tempest.conf.sample # copy every time, because the image UUIDS are going to change From 8e36cbe8c8f06576f634452cdb16c9876840572e Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 21 Dec 2012 15:39:28 +0100 Subject: [PATCH 106/207] Use new RPC notifier Stop using the deprecated rabbit_notifier. Change-Id: I84574c555031b23fb5f256d248af1cdafc8979ce Signed-off-by: Julien Danjou --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index d5cefc92..46b00696 100644 --- a/lib/nova +++ b/lib/nova @@ -395,7 +395,7 @@ function create_nova_conf() { if is_service_enabled ceilometer; then add_nova_opt "instance_usage_audit=True" add_nova_opt "instance_usage_audit_period=hour" - add_nova_opt "notification_driver=nova.openstack.common.notifier.rabbit_notifier" + add_nova_opt "notification_driver=nova.openstack.common.notifier.rpc_notifier" add_nova_opt "notification_driver=ceilometer.compute.nova_notifier" fi From 66afb47cb9b470bfa40f11f23ca4f80483cb7aad Mon Sep 17 00:00:00 2001 From: Akihiro MOTOKI Date: Fri, 21 Dec 2012 15:34:13 +0900 Subject: [PATCH 107/207] Refactor quantum installation * Move quantum installation to lib/quantum * Refactor quantum configuration * Move Quantum service account creation from keystone_data.sh to lib/quantum * Define generic functions to install third party programs * Minor cleanups related to Quantum * Kill dnsmasq which watches an interface 'ns-XXXXXX' in unstack.sh * Set default_floating_pool in nova.conf to make default flaoting pool work when PUBLIC_NETWORK_NAME is other than 'nova' * Make tempest work even when PRIVATE_NETWORK_NAME is other than 'private' Change-Id: I4a6e7fcebfb11556968f53ab6a0e862ce16bb139 --- AUTHORS | 1 + files/keystone_data.sh | 25 -- lib/nova | 11 + lib/quantum | 742 ++++++++++++++++++++++++++++++++++++++--- lib/ryu | 63 ++++ lib/tempest | 4 +- stack.sh | 517 ++-------------------------- unstack.sh | 11 +- 8 files changed, 803 insertions(+), 571 deletions(-) create mode 100644 lib/ryu diff --git a/AUTHORS b/AUTHORS index cd0acac1..ba68e329 100644 --- a/AUTHORS +++ b/AUTHORS @@ -1,6 +1,7 @@ Aaron Lee Aaron Rosen Adam Gandelman +Akihiro MOTOKI Andrew Laski Andy Smith Anthony Young diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 71a8e5ef..4c76c9b5 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -5,7 +5,6 @@ # Tenant User Roles # ------------------------------------------------------------------ # service glance admin -# service quantum admin # if enabled # service swift admin # if enabled # service heat admin # if enabled # service ceilometer admin # if enabled @@ -148,30 +147,6 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then fi fi -if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then - QUANTUM_USER=$(get_id keystone user-create \ - --name=quantum \ - --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=quantum@example.com) - keystone user-role-add \ - --tenant_id $SERVICE_TENANT \ - --user_id $QUANTUM_USER \ - --role_id $ADMIN_ROLE - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - QUANTUM_SERVICE=$(get_id keystone service-create \ - --name=quantum \ - --type=network \ - --description="Quantum Service") - keystone endpoint-create \ - --region RegionOne \ - --service_id $QUANTUM_SERVICE \ - --publicurl "http://$SERVICE_HOST:9696/" \ - --adminurl "http://$SERVICE_HOST:9696/" \ - --internalurl "http://$SERVICE_HOST:9696/" - fi -fi - if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]]; then CEILOMETER_USER=$(get_id keystone user-create --name=ceilometer \ --pass="$SERVICE_PASSWORD" \ diff --git a/lib/nova b/lib/nova index d5cefc92..26c5d3c6 100644 --- a/lib/nova +++ b/lib/nova @@ -348,6 +348,7 @@ function create_nova_conf() { add_nova_opt "dhcpbridge_flagfile=$NOVA_CONF" add_nova_opt "force_dhcp_release=True" add_nova_opt "fixed_range=$FIXED_RANGE" + add_nova_opt "default_floating_pool=$PUBLIC_NETWORK_NAME" add_nova_opt "s3_host=$SERVICE_HOST" add_nova_opt "s3_port=$S3_SERVICE_PORT" add_nova_opt "osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions" @@ -413,6 +414,16 @@ function create_nova_conf() { done } +function create_nova_conf_nova_network() { + add_nova_opt "network_manager=nova.network.manager.$NET_MAN" + add_nova_opt "public_interface=$PUBLIC_INTERFACE" + add_nova_opt "vlan_interface=$VLAN_INTERFACE" + add_nova_opt "flat_network_bridge=$FLAT_NETWORK_BRIDGE" + if [ -n "$FLAT_INTERFACE" ]; then + add_nova_opt "flat_interface=$FLAT_INTERFACE" + fi +} + # init_nova() - Initialize databases, etc. function init_nova() { # Nova Database diff --git a/lib/quantum b/lib/quantum index 480aaa17..ea0e311c 100644 --- a/lib/quantum +++ b/lib/quantum @@ -5,6 +5,36 @@ # ``functions`` file # ``DEST`` must be defined +# ``stack.sh`` calls the entry points in this order: +# +# install_quantum +# install_quantumclient +# install_quantum_agent_packages +# install_quantum_third_party +# setup_quantum +# setup_quantumclient +# configure_quantum +# init_quantum +# configure_quantum_third_party +# init_quantum_third_party +# start_quantum_third_party +# create_nova_conf_quantum +# start_quantum_service_and_check +# create_quantum_initial_network +# setup_quantum_debug +# start_quantum_agents +# +# ``unstack.sh`` calls the entry points in this order: +# +# stop_quantum + +# Functions in lib/quantum are classified into the following categories: +# +# - entry points (called from stack.sh or unstack.sh) +# - internal functions +# - quantum exercises +# - 3rd party programs + # Quantum Networking # ------------------ @@ -31,8 +61,8 @@ XTRACE=$(set +o | grep xtrace) set +o xtrace -# Defaults -# -------- +# Quantum Network Configuration +# ----------------------------- # Set up default directories QUANTUM_DIR=$DEST/quantum @@ -49,7 +79,6 @@ Q_PLUGIN=${Q_PLUGIN:-openvswitch} Q_PORT=${Q_PORT:-9696} # Default Quantum Host Q_HOST=${Q_HOST:-$HOST_IP} -# Which Quantum API nova should use # Default admin username Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-quantum} # Default auth strategy @@ -59,6 +88,8 @@ Q_USE_NAMESPACE=${Q_USE_NAMESPACE:-True} Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True} # Meta data IP Q_META_DATA_IP=${Q_META_DATA_IP:-$HOST_IP} +# Allow Overlapping IP among subnets +Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-False} # Use quantum-debug command Q_USE_DEBUG_COMMAND=${Q_USE_DEBUG_COMMAND:-False} @@ -70,14 +101,587 @@ if is_service_enabled quantum; then QUANTUM_ROOTWRAP=$(get_rootwrap_location quantum) Q_RR_COMMAND="sudo $QUANTUM_ROOTWRAP $Q_RR_CONF_FILE" fi -fi + # Provider Network Configurations + # -------------------------------- + + # The following variables control the Quantum openvswitch and + # linuxbridge plugins' allocation of tenant networks and + # availability of provider networks. If these are not configured + # in localrc, tenant networks will be local to the host (with no + # remote connectivity), and no physical resources will be + # available for the allocation of provider networks. + + # To use GRE tunnels for tenant networks, set to True in + # localrc. GRE tunnels are only supported by the openvswitch + # plugin, and currently only on Ubuntu. + ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-False} + + # If using GRE tunnels for tenant networks, specify the range of + # tunnel IDs from which tenant networks are allocated. Can be + # overriden in localrc in necesssary. + TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGE:-1:1000} + + # To use VLANs for tenant networks, set to True in localrc. VLANs + # are supported by the openvswitch and linuxbridge plugins, each + # requiring additional configuration described below. + ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False} + + # If using VLANs for tenant networks, set in localrc to specify + # the range of VLAN VIDs from which tenant networks are + # allocated. An external network switch must be configured to + # trunk these VLANs between hosts for multi-host connectivity. + # + # Example: ``TENANT_VLAN_RANGE=1000:1999`` + TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} + + # If using VLANs for tenant networks, or if using flat or VLAN + # provider networks, set in localrc to the name of the physical + # network, and also configure OVS_PHYSICAL_BRIDGE for the + # openvswitch agent or LB_PHYSICAL_INTERFACE for the linuxbridge + # agent, as described below. + # + # Example: ``PHYSICAL_NETWORK=default`` + PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-} + + # With the openvswitch plugin, if using VLANs for tenant networks, + # or if using flat or VLAN provider networks, set in localrc to + # the name of the OVS bridge to use for the physical network. The + # bridge will be created if it does not already exist, but a + # physical interface must be manually added to the bridge as a + # port for external connectivity. + # + # Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` + OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-} + + # With the linuxbridge plugin, if using VLANs for tenant networks, + # or if using flat or VLAN provider networks, set in localrc to + # the name of the network interface to use for the physical + # network. + # + # Example: ``LB_PHYSICAL_INTERFACE=eth1`` + LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-} + + # With the openvswitch plugin, set to True in localrc to enable + # provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False. + # + # Example: ``OVS_ENABLE_TUNNELING=True`` + OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS} +fi # Entry Points # ------------ -# configure_quantum_rootwrap() - configure Quantum's rootwrap -function configure_quantum_rootwrap() { +# configure_quantum() +# Set common config for all quantum server and agents. +function configure_quantum() { + _configure_quantum_common + _configure_quantum_rpc + + if is_service_enabled q-svc; then + _configure_quantum_service + fi + if is_service_enabled q-agt; then + _configure_quantum_plugin_agent + fi + if is_service_enabled q-dhcp; then + _configure_quantum_dhcp_agent + fi + if is_service_enabled q-l3; then + _configure_quantum_l3_agent + fi + if is_service_enabled q-meta; then + _configure_quantum_metadata_agent + fi + + _configure_quantum_debug_command + + _cleanup_quantum +} + +function create_nova_conf_quantum() { + add_nova_opt "network_api_class=nova.network.quantumv2.api.API" + add_nova_opt "quantum_admin_username=$Q_ADMIN_USERNAME" + add_nova_opt "quantum_admin_password=$SERVICE_PASSWORD" + add_nova_opt "quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0" + add_nova_opt "quantum_auth_strategy=$Q_AUTH_STRATEGY" + add_nova_opt "quantum_admin_tenant_name=$SERVICE_TENANT_NAME" + add_nova_opt "quantum_url=http://$Q_HOST:$Q_PORT" + + if [[ "$Q_PLUGIN" = "openvswitch" ]]; then + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"} + elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver"} + elif [[ "$Q_PLUGIN" = "ryu" ]]; then + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"quantum.plugins.ryu.nova.vif.LibvirtOpenVswitchOFPRyuDriver"} + add_nova_opt "libvirt_ovs_integration_bridge=$OVS_BRIDGE" + add_nova_opt "linuxnet_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT" + add_nova_opt "libvirt_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT" + fi + add_nova_opt "libvirt_vif_driver=$NOVA_VIF_DRIVER" + add_nova_opt "linuxnet_interface_driver=$LINUXNET_VIF_DRIVER" + if is_service_enabled q-meta; then + add_nova_opt "service_quantum_metadata_proxy=True" + fi +} + +# create_quantum_accounts() - Set up common required quantum accounts + +# Tenant User Roles +# ------------------------------------------------------------------ +# service quantum admin # if enabled + +# Migrated from keystone_data.sh +function create_quantum_accounts() { + + SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + + if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then + QUANTUM_USER=$(keystone user-create \ + --name=quantum \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=quantum@example.com \ + | grep " id " | get_field 2) + keystone user-role-add \ + --tenant_id $SERVICE_TENANT \ + --user_id $QUANTUM_USER \ + --role_id $ADMIN_ROLE + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + QUANTUM_SERVICE=$(keystone service-create \ + --name=quantum \ + --type=network \ + --description="Quantum Service" \ + | grep " id " | get_field 2) + keystone endpoint-create \ + --region RegionOne \ + --service_id $QUANTUM_SERVICE \ + --publicurl "http://$SERVICE_HOST:9696/" \ + --adminurl "http://$SERVICE_HOST:9696/" \ + --internalurl "http://$SERVICE_HOST:9696/" + fi + fi +} + +function create_quantum_initial_network() { + TENANT_ID=$(keystone tenant-list | grep " demo " | get_field 1) + + # Create a small network + # Since quantum command is executed in admin context at this point, + # ``--tenant_id`` needs to be specified. + NET_ID=$(quantum net-create --tenant_id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) + SUBNET_ID=$(quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) + + if is_service_enabled q-l3; then + # Create a router, and add the private subnet as one of its interfaces + ROUTER_ID=$(quantum router-create --tenant_id $TENANT_ID router1 | grep ' id ' | get_field 2) + quantum router-interface-add $ROUTER_ID $SUBNET_ID + # Create an external network, and a subnet. Configure the external network as router gw + EXT_NET_ID=$(quantum net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2) + EXT_GW_IP=$(quantum subnet-create --ip_version 4 $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2) + quantum router-gateway-set $ROUTER_ID $EXT_NET_ID + + if is_quantum_ovs_base_plugin "$Q_PLUGIN" && [[ "$Q_USE_NAMESPACE" = "True" ]]; then + CIDR_LEN=${FLOATING_RANGE#*/} + sudo ip addr add $EXT_GW_IP/$CIDR_LEN dev $PUBLIC_BRIDGE + sudo ip link set $PUBLIC_BRIDGE up + ROUTER_GW_IP=`quantum port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' '{ print $8; }'` + sudo route add -net $FIXED_RANGE gw $ROUTER_GW_IP + fi + if [[ "$Q_USE_NAMESPACE" == "False" ]]; then + # Explicitly set router id in l3 agent configuration + iniset $Q_L3_CONF_FILE DEFAULT router_id $ROUTER_ID + fi + fi +} + +# init_quantum() - Initialize databases, etc. +function init_quantum() { + : +} + +# install_quantum() - Collect source and prepare +function install_quantum() { + git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH +} + +# install_quantumclient() - Collect source and prepare +function install_quantumclient() { + git_clone $QUANTUMCLIENT_REPO $QUANTUMCLIENT_DIR $QUANTUMCLIENT_BRANCH +} + +# install_quantum_agent_packages() - Collect source and prepare +function install_quantum_agent_packages() { + if is_quantum_ovs_base_plugin "$Q_PLUGIN"; then + # Install deps + # FIXME add to ``files/apts/quantum``, but don't install if not needed! + if is_ubuntu; then + kernel_version=`cat /proc/version | cut -d " " -f3` + install_package make fakeroot dkms openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version + else + ### FIXME(dtroyer): Find RPMs for OpenVSwitch + echo "OpenVSwitch packages need to be located" + # Fedora does not started OVS by default + restart_service openvswitch + fi + elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then + install_package bridge-utils + fi +} + +function is_quantum_ovs_base_plugin() { + local plugin=$1 + if [[ ",openvswitch,ryu," =~ ,${plugin}, ]]; then + return 0 + fi + return 1 +} + +function setup_quantum() { + setup_develop $QUANTUM_DIR +} + +function setup_quantumclient() { + setup_develop $QUANTUMCLIENT_DIR +} + +# Start running processes, including screen +function start_quantum_service_and_check() { + # Start the Quantum service + screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE" + echo "Waiting for Quantum to start..." + if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:9696; do sleep 1; done"; then + echo "Quantum did not start" + exit 1 + fi +} + +# Start running processes, including screen +function start_quantum_agents() { + # Start up the quantum agents if enabled + screen_it q-agt "python $AGENT_BINARY --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE" + screen_it q-dhcp "python $AGENT_DHCP_BINARY --config-file $QUANTUM_CONF --config-file=$Q_DHCP_CONF_FILE" + screen_it q-meta "python $AGENT_META_BINARY --config-file $QUANTUM_CONF --config-file=$Q_META_CONF_FILE" + screen_it q-l3 "python $AGENT_L3_BINARY --config-file $QUANTUM_CONF --config-file=$Q_L3_CONF_FILE" +} + +# stop_quantum() - Stop running processes (non-screen) +function stop_quantum() { + if is_service_enabled q-dhcp; then + pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }') + [ ! -z "$pid" ] && sudo kill -9 $pid + fi +} + +# _cleanup_quantum() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function _cleanup_quantum() { + : +} + +# _configure_quantum_common() +# Set common config for all quantum server and agents. +# This MUST be called before other _configure_quantum_* functions. +function _configure_quantum_common() { + # Put config files in ``QUANTUM_CONF_DIR`` for everyone to find + if [[ ! -d $QUANTUM_CONF_DIR ]]; then + sudo mkdir -p $QUANTUM_CONF_DIR + fi + sudo chown `whoami` $QUANTUM_CONF_DIR + + cp $QUANTUM_DIR/etc/quantum.conf $QUANTUM_CONF + + if [[ "$Q_PLUGIN" = "openvswitch" ]]; then + Q_PLUGIN_CONF_PATH=etc/quantum/plugins/openvswitch + Q_PLUGIN_CONF_FILENAME=ovs_quantum_plugin.ini + Q_DB_NAME="ovs_quantum" + Q_PLUGIN_CLASS="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2" + elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then + Q_PLUGIN_CONF_PATH=etc/quantum/plugins/linuxbridge + Q_PLUGIN_CONF_FILENAME=linuxbridge_conf.ini + Q_DB_NAME="quantum_linux_bridge" + Q_PLUGIN_CLASS="quantum.plugins.linuxbridge.lb_quantum_plugin.LinuxBridgePluginV2" + elif [[ "$Q_PLUGIN" = "ryu" ]]; then + Q_PLUGIN_CONF_PATH=etc/quantum/plugins/ryu + Q_PLUGIN_CONF_FILENAME=ryu.ini + Q_DB_NAME="ovs_quantum" + Q_PLUGIN_CLASS="quantum.plugins.ryu.ryu_quantum_plugin.RyuQuantumPluginV2" + fi + + if [[ $Q_PLUGIN_CONF_PATH == '' || $Q_PLUGIN_CONF_FILENAME == '' || $Q_PLUGIN_CLASS == '' ]]; then + echo "Quantum plugin not set.. exiting" + exit 1 + fi + + # If needed, move config file from ``$QUANTUM_DIR/etc/quantum`` to ``QUANTUM_CONF_DIR`` + mkdir -p /$Q_PLUGIN_CONF_PATH + Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME + cp $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE + + database_connection_url dburl $Q_DB_NAME + iniset /$Q_PLUGIN_CONF_FILE DATABASE sql_connection $dburl + unset dburl + + _quantum_setup_rootwrap +} + +function _configure_quantum_debug_command() { + if [[ "$Q_USE_DEBUG_COMMAND" != "True" ]]; then + return + fi + + cp $QUANTUM_DIR/etc/l3_agent.ini $QUANTUM_TEST_CONFIG_FILE + + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT verbose False + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT debug False + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT root_helper "$Q_RR_COMMAND" + + _quantum_setup_keystone $QUANTUM_TEST_CONFIG_FILE DEFAULT set_auth_url + _quantum_setup_interface_driver $QUANTUM_TEST_CONFIG_FILE + + if is_quantum_ovs_base_plugin "$Q_PLUGIN"; then + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE + elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge '' + fi + + if [[ "$Q_PLUGIN" = "ryu" ]]; then + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT + fi +} + +function _configure_quantum_dhcp_agent() { + AGENT_DHCP_BINARY="$QUANTUM_DIR/bin/quantum-dhcp-agent" + Q_DHCP_CONF_FILE=$QUANTUM_CONF_DIR/dhcp_agent.ini + + cp $QUANTUM_DIR/etc/dhcp_agent.ini $Q_DHCP_CONF_FILE + + iniset $Q_DHCP_CONF_FILE DEFAULT verbose True + iniset $Q_DHCP_CONF_FILE DEFAULT debug True + iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE + iniset $Q_DHCP_CONF_FILE DEFAULT state_path $DATA_DIR/quantum + iniset $Q_DHCP_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" + + _quantum_setup_keystone $Q_DHCP_CONF_FILE DEFAULT set_auth_url + _quantum_setup_interface_driver $Q_DHCP_CONF_FILE + + if [[ "$Q_PLUGIN" = "ryu" ]]; then + iniset $Q_DHCP_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT + fi +} + +function _configure_quantum_l3_agent() { + AGENT_L3_BINARY="$QUANTUM_DIR/bin/quantum-l3-agent" + PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} + Q_L3_CONF_FILE=$QUANTUM_CONF_DIR/l3_agent.ini + + cp $QUANTUM_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE + + iniset $Q_L3_CONF_FILE DEFAULT verbose True + iniset $Q_L3_CONF_FILE DEFAULT debug True + iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE + iniset $Q_L3_CONF_FILE DEFAULT state_path $DATA_DIR/quantum + iniset $Q_L3_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" + + _quantum_setup_keystone $Q_L3_CONF_FILE DEFAULT set_auth_url + _quantum_setup_interface_driver $Q_L3_CONF_FILE + + if is_quantum_ovs_base_plugin "$Q_PLUGIN"; then + iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE + _quantum_setup_external_bridge $PUBLIC_BRIDGE + elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then + iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge '' + fi + + if [[ "$Q_PLUGIN" = "ryu" ]]; then + iniset $Q_L3_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT + fi +} + +function _configure_quantum_metadata_agent() { + AGENT_META_BINARY="$QUANTUM_DIR/bin/quantum-metadata-agent" + Q_META_CONF_FILE=$QUANTUM_CONF_DIR/metadata_agent.ini + + cp $QUANTUM_DIR/etc/metadata_agent.ini $Q_META_CONF_FILE + + iniset $Q_META_CONF_FILE DEFAULT verbose True + iniset $Q_META_CONF_FILE DEFAULT debug True + iniset $Q_META_CONF_FILE DEFAULT state_path $DATA_DIR/quantum + iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP + iniset $Q_META_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" + + _quantum_setup_keystone $Q_META_CONF_FILE DEFAULT set_auth_url +} + +# _configure_quantum_plugin_agent() - Set config files for quantum plugin agent +# It is called when q-agt is enabled. +function _configure_quantum_plugin_agent() { + # Configure agent for plugin + if [[ "$Q_PLUGIN" = "openvswitch" ]]; then + _configure_quantum_plugin_agent_openvswitch + elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then + _configure_quantum_plugin_agent_linuxbridge + elif [[ "$Q_PLUGIN" = "ryu" ]]; then + _configure_quantum_plugin_agent_ryu + fi + + iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" +} + +function _configure_quantum_plugin_agent_linuxbridge() { + # Setup physical network interface mappings. Override + # ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` for more + # complex physical network configurations. + if [[ "$LB_INTERFACE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$LB_PHYSICAL_INTERFACE" != "" ]]; then + LB_INTERFACE_MAPPINGS=$PHYSICAL_NETWORK:$LB_PHYSICAL_INTERFACE + fi + if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE physical_interface_mappings $LB_INTERFACE_MAPPINGS + fi + AGENT_BINARY="$QUANTUM_DIR/bin/quantum-linuxbridge-agent" +} + +function _configure_quantum_plugin_agent_openvswitch() { + # Setup integration bridge + OVS_BRIDGE=${OVS_BRIDGE:-br-int} + _quantum_setup_ovs_bridge $OVS_BRIDGE + + # Setup agent for tunneling + if [[ "$OVS_ENABLE_TUNNELING" = "True" ]]; then + # Verify tunnels are supported + # REVISIT - also check kernel module support for GRE and patch ports + OVS_VERSION=`ovs-vsctl --version | head -n 1 | awk '{print $4;}'` + if [ $OVS_VERSION \< "1.4" ] && ! is_service_enabled q-svc ; then + echo "You are running OVS version $OVS_VERSION." + echo "OVS 1.4+ is required for tunneling between multiple hosts." + exit 1 + fi + iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True + iniset /$Q_PLUGIN_CONF_FILE OVS local_ip $HOST_IP + fi + + # Setup physical network bridge mappings. Override + # ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` for more + # complex physical network configurations. + if [[ "$OVS_BRIDGE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then + OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE + + # Configure bridge manually with physical interface as port for multi-node + sudo ovs-vsctl --no-wait -- --may-exist add-br $OVS_PHYSICAL_BRIDGE + fi + if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings $OVS_BRIDGE_MAPPINGS + fi + AGENT_BINARY="$QUANTUM_DIR/bin/quantum-openvswitch-agent" +} + +function _configure_quantum_plugin_agent_ryu() { + # Set up integration bridge + OVS_BRIDGE=${OVS_BRIDGE:-br-int} + _quantum_setup_ovs_bridge $OVS_BRIDGE + if [ -n "$RYU_INTERNAL_INTERFACE" ]; then + sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_BRIDGE $RYU_INTERNAL_INTERFACE + fi + AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/ryu/agent/ryu_quantum_agent.py" +} + +# Quantum RPC support - must be updated prior to starting any of the services +function _configure_quantum_rpc() { + iniset $QUANTUM_CONF DEFAULT control_exchange quantum + if is_service_enabled qpid ; then + iniset $QUANTUM_CONF DEFAULT rpc_backend quantum.openstack.common.rpc.impl_qpid + elif is_service_enabled zeromq; then + iniset $QUANTUM_CONF DEFAULT rpc_backend quantum.openstack.common.rpc.impl_zmq + elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then + iniset $QUANTUM_CONF DEFAULT rabbit_host $RABBIT_HOST + iniset $QUANTUM_CONF DEFAULT rabbit_password $RABBIT_PASSWORD + fi +} + +# _configure_quantum_service() - Set config files for quantum service +# It is called when q-svc is enabled. +function _configure_quantum_service() { + Q_API_PASTE_FILE=$QUANTUM_CONF_DIR/api-paste.ini + Q_POLICY_FILE=$QUANTUM_CONF_DIR/policy.json + + cp $QUANTUM_DIR/etc/api-paste.ini $Q_API_PASTE_FILE + cp $QUANTUM_DIR/etc/policy.json $Q_POLICY_FILE + + if is_service_enabled $DATABASE_BACKENDS; then + recreate_database $Q_DB_NAME utf8 + else + echo "A database must be enabled in order to use the $Q_PLUGIN Quantum plugin." + exit 1 + fi + + # Update either configuration file with plugin + iniset $QUANTUM_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS + + iniset $QUANTUM_CONF DEFAULT verbose True + iniset $QUANTUM_CONF DEFAULT debug True + iniset $QUANTUM_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP + + iniset $QUANTUM_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY + _quantum_setup_keystone $Q_API_PASTE_FILE filter:authtoken + + # Configure plugin + if [[ "$Q_PLUGIN" = "openvswitch" ]]; then + if [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then + iniset /$Q_PLUGIN_CONF_FILE OVS tenant_network_type gre + iniset /$Q_PLUGIN_CONF_FILE OVS tunnel_id_ranges $TENANT_TUNNEL_RANGES + elif [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then + iniset /$Q_PLUGIN_CONF_FILE OVS tenant_network_type vlan + else + echo "WARNING - The openvswitch plugin is using local tenant networks, with no connectivity between hosts." + fi + + # Override ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` + # for more complex physical network configurations. + if [[ "$OVS_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then + OVS_VLAN_RANGES=$PHYSICAL_NETWORK + if [[ "$TENANT_VLAN_RANGE" != "" ]]; then + OVS_VLAN_RANGES=$OVS_VLAN_RANGES:$TENANT_VLAN_RANGE + fi + fi + if [[ "$OVS_VLAN_RANGES" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE OVS network_vlan_ranges $OVS_VLAN_RANGES + fi + + # Enable tunnel networks if selected + if [[ $OVS_ENABLE_TUNNELING = "True" ]]; then + iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True + fi + elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then + if [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then + iniset /$Q_PLUGIN_CONF_FILE VLANS tenant_network_type vlan + else + echo "WARNING - The linuxbridge plugin is using local tenant networks, with no connectivity between hosts." + fi + + # Override ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` + # for more complex physical network configurations. + if [[ "$LB_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then + LB_VLAN_RANGES=$PHYSICAL_NETWORK + if [[ "$TENANT_VLAN_RANGE" != "" ]]; then + LB_VLAN_RANGES=$LB_VLAN_RANGES:$TENANT_VLAN_RANGE + fi + fi + if [[ "$LB_VLAN_RANGES" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE VLANS network_vlan_ranges $LB_VLAN_RANGES + fi + elif [[ "$Q_PLUGIN" = "ryu" ]]; then + iniset /$Q_PLUGIN_CONF_FILE OVS openflow_controller $RYU_OFP_HOST:$RYU_OFP_PORT + iniset /$Q_PLUGIN_CONF_FILE OVS openflow_rest_api $RYU_API_HOST:$RYU_API_PORT + fi +} + +# Utility Functions +#------------------ + +# _quantum_setup_rootwrap() - configure Quantum's rootwrap +function _quantum_setup_rootwrap() { if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then return fi @@ -109,7 +713,7 @@ function configure_quantum_rootwrap() { } # Configures keystone integration for quantum service and agents -function quantum_setup_keystone() { +function _quantum_setup_keystone() { local conf_file=$1 local section=$2 local use_auth_url=$3 @@ -130,39 +734,54 @@ function quantum_setup_keystone() { rm -f $QUANTUM_AUTH_CACHE_DIR/* } -function quantum_setup_ovs_bridge() { +function _quantum_setup_ovs_bridge() { local bridge=$1 - for PORT in `sudo ovs-vsctl --no-wait list-ports $bridge`; do - if [[ "$PORT" =~ tap* ]]; then echo `sudo ip link delete $PORT` > /dev/null; fi - sudo ovs-vsctl --no-wait del-port $bridge $PORT - done - sudo ovs-vsctl --no-wait -- --if-exists del-br $bridge - sudo ovs-vsctl --no-wait add-br $bridge + quantum-ovs-cleanup --ovs_integration_bridge $bridge + sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge } -function quantum_setup_external_bridge() { +function _quantum_setup_interface_driver() { + local conf_file=$1 + if [[ "$Q_PLUGIN" == "openvswitch" ]]; then + iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver + elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then + iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver + elif [[ "$Q_PLUGIN" = "ryu" ]]; then + iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.RyuInterfaceDriver + fi +} + +function _quantum_setup_external_bridge() { local bridge=$1 - # Create it if it does not exist + quantum-ovs-cleanup --external_network_bridge $bridge sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge - # remove internal ports - for PORT in `sudo ovs-vsctl --no-wait list-ports $bridge`; do - TYPE=$(sudo ovs-vsctl get interface $PORT type) - if [[ "$TYPE" == "internal" ]]; then - echo `sudo ip link delete $PORT` > /dev/null - sudo ovs-vsctl --no-wait del-port $bridge $PORT - fi - done # ensure no IP is configured on the public bridge sudo ip addr flush dev $bridge } -function is_quantum_ovs_base_plugin() { - local plugin=$1 - if [[ ",openvswitch,ryu," =~ ,${plugin}, ]]; then - return 0 +# Functions for Quantum Exercises +#-------------------------------- + +function delete_probe() { + local from_net="$1" + net_id=`_get_net_id $from_net` + probe_id=`quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'` + quantum-debug --os-tenant-name admin --os-username admin probe-delete $probe_id +} + +function setup_quantum_debug() { + if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then + public_net_id=`_get_net_id $PUBLIC_NETWORK_NAME` + quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create $public_net_id + private_net_id=`_get_net_id $PRIVATE_NETWORK_NAME` + quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create $private_net_id fi - return 1 +} + +function teardown_quantum_debug() { + delete_probe $PUBLIC_NETWORK_NAME + delete_probe $PRIVATE_NETWORK_NAME } function _get_net_id() { @@ -176,13 +795,6 @@ function _get_probe_cmd_prefix() { echo "$Q_RR_COMMAND ip netns exec qprobe-$probe_id" } -function delete_probe() { - local from_net="$1" - net_id=`_get_net_id $from_net` - probe_id=`quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'` - quantum-debug --os-tenant-name admin --os-username admin probe-delete $probe_id -} - function _ping_check_quantum() { local from_net=$1 local ip=$2 @@ -220,17 +832,59 @@ function _ssh_check_quantum() { fi } -function setup_quantum() { - public_net_id=`_get_net_id $PUBLIC_NETWORK_NAME` - quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create $public_net_id - private_net_id=`_get_net_id $PRIVATE_NETWORK_NAME` - quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create $private_net_id +# Quantum 3rd party programs +#--------------------------- +# A comma-separated list of 3rd party programs +QUANTUM_THIRD_PARTIES="ryu" +for third_party in ${QUANTUM_THIRD_PARTIES//,/ }; do + source lib/$third_party +done + +# configure_quantum_third_party() - Set config files, create data dirs, etc +function configure_quantum_third_party() { + for third_party in ${QUANTUM_THIRD_PARTIES//,/ }; do + if is_service_enabled $third_party; then + configure_${third_party} + fi + done } -function teardown_quantum() { - delete_probe $PUBLIC_NETWORK_NAME - delete_probe $PRIVATE_NETWORK_NAME +# init_quantum_third_party() - Initialize databases, etc. +function init_quantum_third_party() { + for third_party in ${QUANTUM_THIRD_PARTIES//,/ }; do + if is_service_enabled $third_party; then + init_${third_party} + fi + done +} + +# install_quantum_third_party() - Collect source and prepare +function install_quantum_third_party() { + for third_party in ${QUANTUM_THIRD_PARTIES//,/ }; do + if is_service_enabled $third_party; then + install_${third_party} + fi + done +} + +# start_quantum_third_party() - Start running processes, including screen +function start_quantum_third_party() { + for third_party in ${QUANTUM_THIRD_PARTIES//,/ }; do + if is_service_enabled $third_party; then + start_${third_party} + fi + done } +# stop_quantum_third_party - Stop running processes (non-screen) +function stop_quantum_third_party() { + for third_party in ${QUANTUM_THIRD_PARTIES//,/ }; do + if is_service_enabled $third_party; then + stop_${third_party} + fi + done +} + + # Restore xtrace $XTRACE diff --git a/lib/ryu b/lib/ryu new file mode 100644 index 00000000..ac3462bb --- /dev/null +++ b/lib/ryu @@ -0,0 +1,63 @@ +# Ryu OpenFlow Controller +# ----------------------- + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +RYU_DIR=$DEST/ryu +# Ryu API Host +RYU_API_HOST=${RYU_API_HOST:-127.0.0.1} +# Ryu API Port +RYU_API_PORT=${RYU_API_PORT:-8080} +# Ryu OFP Host +RYU_OFP_HOST=${RYU_OFP_HOST:-127.0.0.1} +# Ryu OFP Port +RYU_OFP_PORT=${RYU_OFP_PORT:-6633} +# Ryu Applications +RYU_APPS=${RYU_APPS:-ryu.app.simple_isolation,ryu.app.rest} + +function configure_ryu() { + setup_develop $RYU_DIR +} + +function init_ryu() { + RYU_CONF_DIR=/etc/ryu + if [[ ! -d $RYU_CONF_DIR ]]; then + sudo mkdir -p $RYU_CONF_DIR + fi + sudo chown `whoami` $RYU_CONF_DIR + RYU_CONF=$RYU_CONF_DIR/ryu.conf + sudo rm -rf $RYU_CONF + + cat < $RYU_CONF +--app_lists=$RYU_APPS +--wsapi_host=$RYU_API_HOST +--wsapi_port=$RYU_API_PORT +--ofp_listen_host=$RYU_OFP_HOST +--ofp_tcp_listen_port=$RYU_OFP_PORT +EOF +} + +function install_ryu() { + git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH +} + +function is_ryu_required() { + if is_service_enabled ryu || (is_service_enabled quantum && [[ "$Q_PLUGIN" = "ryu" ]]); then + return 0 + fi + return 1 +} + +function start_ryu() { + screen_it ryu "cd $RYU_DIR && $RYU_DIR/bin/ryu-manager --flagfile $RYU_CONF" +} + +function stop_ryu() { + : +} + +# Restore xtrace +$XTRACE diff --git a/lib/tempest b/lib/tempest index 18599219..337be75b 100644 --- a/lib/tempest +++ b/lib/tempest @@ -190,7 +190,7 @@ function configure_tempest() { #Skip until #1074039 is fixed iniset $TEMPEST_CONF compute run_ssh False iniset $TEMPEST_CONF compute ssh_user ${DEFAULT_INSTANCE_USER:-$OS_USERNAME} - iniset $TEMPEST_CONF compute network_for_ssh private + iniset $TEMPEST_CONF compute network_for_ssh $PRIVATE_NETWORK_NAME iniset $TEMPEST_CONF compute ip_version_for_ssh 4 iniset $TEMPEST_CONF compute ssh_timeout 4 iniset $TEMPEST_CONF compute image_ref $image_uuid @@ -199,7 +199,7 @@ function configure_tempest() { iniset $TEMPEST_CONF compute flavor_ref_alt $flavor_ref_alt iniset $TEMPEST_CONF compute source_dir $NOVA_SOURCE_DIR iniset $TEMPEST_CONF compute live_migration_available ${LIVE_MIGRATION_AVAILABLE:-False} - iniset $TEMPEST_CONF compute use_block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} + iniset $TEMPEST_CONF compute use_block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} # Inherited behavior, might be wrong iniset $TEMPEST_CONF compute bin_dir $NOVA_BIN_DIR # TODO(jaypipes): Create the key file here... right now, no whitebox diff --git a/stack.sh b/stack.sh index cf638e83..10a86206 100755 --- a/stack.sh +++ b/stack.sh @@ -329,18 +329,6 @@ OPENSTACKCLIENT_DIR=$DEST/python-openstackclient NOVNC_DIR=$DEST/noVNC SWIFT3_DIR=$DEST/swift3 -RYU_DIR=$DEST/ryu -# Ryu API Host -RYU_API_HOST=${RYU_API_HOST:-127.0.0.1} -# Ryu API Port -RYU_API_PORT=${RYU_API_PORT:-8080} -# Ryu OFP Host -RYU_OFP_HOST=${RYU_OFP_HOST:-127.0.0.1} -# Ryu OFP Port -RYU_OFP_PORT=${RYU_OFP_PORT:-6633} -# Ryu Applications -RYU_APPS=${RYU_APPS:-ryu.app.simple_isolation,ryu.app.rest} - # Should cinder perform secure deletion of volumes? # Defaults to true, can be set to False to avoid this bug when testing: # https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1023755 @@ -703,21 +691,7 @@ if is_service_enabled $DATABASE_BACKENDS; then fi if is_service_enabled q-agt; then - if is_quantum_ovs_base_plugin "$Q_PLUGIN"; then - # Install deps - # FIXME add to ``files/apts/quantum``, but don't install if not needed! - if is_ubuntu; then - kernel_version=`cat /proc/version | cut -d " " -f3` - install_package make fakeroot dkms openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version - else - ### FIXME(dtroyer): Find RPMs for OpenVSwitch - echo "OpenVSwitch packages need to be located" - # Fedora does not started OVS by default - restart_service openvswitch - fi - elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then - install_package bridge-utils - fi + install_quantum_agent_packages fi TRACK_DEPENDS=${TRACK_DEPENDS:-False} @@ -778,11 +752,9 @@ if is_service_enabled horizon; then install_horizon fi if is_service_enabled quantum; then - git_clone $QUANTUMCLIENT_REPO $QUANTUMCLIENT_DIR $QUANTUMCLIENT_BRANCH -fi -if is_service_enabled quantum; then - # quantum - git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH + install_quantum + install_quantumclient + install_quantum_third_party fi if is_service_enabled heat; then install_heat @@ -797,9 +769,6 @@ fi if is_service_enabled tempest; then install_tempest fi -if is_service_enabled ryu || (is_service_enabled quantum && [[ "$Q_PLUGIN" = "ryu" ]]); then - git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH -fi # Initialization @@ -837,8 +806,8 @@ if is_service_enabled horizon; then configure_horizon fi if is_service_enabled quantum; then - setup_develop $QUANTUMCLIENT_DIR - setup_develop $QUANTUM_DIR + setup_quantumclient + setup_quantum fi if is_service_enabled heat; then configure_heat @@ -847,9 +816,6 @@ fi if is_service_enabled cinder; then configure_cinder fi -if is_service_enabled ryu || (is_service_enabled quantum && [[ "$Q_PLUGIN" = "ryu" ]]); then - setup_develop $RYU_DIR -fi if [[ $TRACK_DEPENDS = True ]] ; then $DEST/.venv/bin/pip freeze > $DEST/requires-post-pip @@ -962,6 +928,7 @@ if is_service_enabled key; then create_keystone_accounts create_nova_accounts create_cinder_accounts + create_quantum_accounts # ``keystone_data.sh`` creates services, admin and demo users, and roles. ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \ @@ -1011,392 +978,22 @@ if is_service_enabled g-reg; then fi -# Ryu -# --- - -# Ryu is not a part of OpenStack project. Please ignore following block if -# you are not interested in Ryu. -# launch ryu manager -if is_service_enabled ryu; then - RYU_CONF_DIR=/etc/ryu - if [[ ! -d $RYU_CONF_DIR ]]; then - sudo mkdir -p $RYU_CONF_DIR - fi - sudo chown `whoami` $RYU_CONF_DIR - RYU_CONF=$RYU_CONF_DIR/ryu.conf - sudo rm -rf $RYU_CONF - - cat < $RYU_CONF ---app_lists=$RYU_APPS ---wsapi_host=$RYU_API_HOST ---wsapi_port=$RYU_API_PORT ---ofp_listen_host=$RYU_OFP_HOST ---ofp_tcp_listen_port=$RYU_OFP_PORT -EOF - screen_it ryu "cd $RYU_DIR && $RYU_DIR/bin/ryu-manager --flagfile $RYU_CONF" -fi - - # Quantum # ------- -# Quantum Network Configuration if is_service_enabled quantum; then echo_summary "Configuring Quantum" - # The following variables control the Quantum openvswitch and - # linuxbridge plugins' allocation of tenant networks and - # availability of provider networks. If these are not configured - # in localrc, tenant networks will be local to the host (with no - # remote connectivity), and no physical resources will be - # available for the allocation of provider networks. - - # To use GRE tunnels for tenant networks, set to True in - # localrc. GRE tunnels are only supported by the openvswitch - # plugin, and currently only on Ubuntu. - ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-False} - - # If using GRE tunnels for tenant networks, specify the range of - # tunnel IDs from which tenant networks are allocated. Can be - # overriden in localrc in necesssary. - TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGE:-1:1000} - - # To use VLANs for tenant networks, set to True in localrc. VLANs - # are supported by the openvswitch and linuxbridge plugins, each - # requiring additional configuration described below. - ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False} - - # If using VLANs for tenant networks, set in localrc to specify - # the range of VLAN VIDs from which tenant networks are - # allocated. An external network switch must be configured to - # trunk these VLANs between hosts for multi-host connectivity. - # - # Example: ``TENANT_VLAN_RANGE=1000:1999`` - TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} - - # If using VLANs for tenant networks, or if using flat or VLAN - # provider networks, set in localrc to the name of the physical - # network, and also configure OVS_PHYSICAL_BRIDGE for the - # openvswitch agent or LB_PHYSICAL_INTERFACE for the linuxbridge - # agent, as described below. - # - # Example: ``PHYSICAL_NETWORK=default`` - PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-} - - # With the openvswitch plugin, if using VLANs for tenant networks, - # or if using flat or VLAN provider networks, set in localrc to - # the name of the OVS bridge to use for the physical network. The - # bridge will be created if it does not already exist, but a - # physical interface must be manually added to the bridge as a - # port for external connectivity. - # - # Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` - OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-} - - # With the linuxbridge plugin, if using VLANs for tenant networks, - # or if using flat or VLAN provider networks, set in localrc to - # the name of the network interface to use for the physical - # network. - # - # Example: ``LB_PHYSICAL_INTERFACE=eth1`` - LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-} - - # With the openvswitch plugin, set to True in localrc to enable - # provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False. - # - # Example: ``OVS_ENABLE_TUNNELING=True`` - OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS} - - # Put config files in ``QUANTUM_CONF_DIR`` for everyone to find - if [[ ! -d $QUANTUM_CONF_DIR ]]; then - sudo mkdir -p $QUANTUM_CONF_DIR - fi - sudo chown `whoami` $QUANTUM_CONF_DIR - - if [[ "$Q_PLUGIN" = "openvswitch" ]]; then - Q_PLUGIN_CONF_PATH=etc/quantum/plugins/openvswitch - Q_PLUGIN_CONF_FILENAME=ovs_quantum_plugin.ini - Q_DB_NAME="ovs_quantum" - Q_PLUGIN_CLASS="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2" - elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then - Q_PLUGIN_CONF_PATH=etc/quantum/plugins/linuxbridge - Q_PLUGIN_CONF_FILENAME=linuxbridge_conf.ini - Q_DB_NAME="quantum_linux_bridge" - Q_PLUGIN_CLASS="quantum.plugins.linuxbridge.lb_quantum_plugin.LinuxBridgePluginV2" - elif [[ "$Q_PLUGIN" = "ryu" ]]; then - Q_PLUGIN_CONF_PATH=etc/quantum/plugins/ryu - Q_PLUGIN_CONF_FILENAME=ryu.ini - Q_DB_NAME="ovs_quantum" - Q_PLUGIN_CLASS="quantum.plugins.ryu.ryu_quantum_plugin.RyuQuantumPluginV2" - fi - - if [[ $Q_PLUGIN_CONF_PATH == '' || $Q_PLUGIN_CONF_FILENAME == '' || $Q_PLUGIN_CLASS == '' ]]; then - echo "Quantum plugin not set.. exiting" - exit 1 - fi - - # If needed, move config file from ``$QUANTUM_DIR/etc/quantum`` to ``QUANTUM_CONF_DIR`` - mkdir -p /$Q_PLUGIN_CONF_PATH - Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME - cp $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE - - database_connection_url dburl $Q_DB_NAME - iniset /$Q_PLUGIN_CONF_FILE DATABASE sql_connection $dburl - unset dburl - - cp $QUANTUM_DIR/etc/quantum.conf $QUANTUM_CONF - configure_quantum_rootwrap -fi - -# Quantum service (for controller node) -if is_service_enabled q-svc; then - Q_API_PASTE_FILE=$QUANTUM_CONF_DIR/api-paste.ini - Q_POLICY_FILE=$QUANTUM_CONF_DIR/policy.json - - cp $QUANTUM_DIR/etc/api-paste.ini $Q_API_PASTE_FILE - cp $QUANTUM_DIR/etc/policy.json $Q_POLICY_FILE - - if is_service_enabled $DATABASE_BACKENDS; then - recreate_database $Q_DB_NAME utf8 - else - echo "A database must be enabled in order to use the $Q_PLUGIN Quantum plugin." - exit 1 - fi - - # Update either configuration file with plugin - iniset $QUANTUM_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS - - iniset $QUANTUM_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY - quantum_setup_keystone $Q_API_PASTE_FILE filter:authtoken - - # Configure plugin - if [[ "$Q_PLUGIN" = "openvswitch" ]]; then - if [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE OVS tenant_network_type gre - iniset /$Q_PLUGIN_CONF_FILE OVS tunnel_id_ranges $TENANT_TUNNEL_RANGES - elif [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE OVS tenant_network_type vlan - else - echo "WARNING - The openvswitch plugin is using local tenant networks, with no connectivity between hosts." - fi - - # Override ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` - # for more complex physical network configurations. - if [[ "$OVS_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then - OVS_VLAN_RANGES=$PHYSICAL_NETWORK - if [[ "$TENANT_VLAN_RANGE" != "" ]]; then - OVS_VLAN_RANGES=$OVS_VLAN_RANGES:$TENANT_VLAN_RANGE - fi - fi - if [[ "$OVS_VLAN_RANGES" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE OVS network_vlan_ranges $OVS_VLAN_RANGES - fi - - # Enable tunnel networks if selected - if [[ $OVS_ENABLE_TUNNELING = "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True - fi - elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then - if [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE VLANS tenant_network_type vlan - else - echo "WARNING - The linuxbridge plugin is using local tenant networks, with no connectivity between hosts." - fi - - # Override ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` - # for more complex physical network configurations. - if [[ "$LB_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then - LB_VLAN_RANGES=$PHYSICAL_NETWORK - if [[ "$TENANT_VLAN_RANGE" != "" ]]; then - LB_VLAN_RANGES=$LB_VLAN_RANGES:$TENANT_VLAN_RANGE - fi - fi - if [[ "$LB_VLAN_RANGES" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE VLANS network_vlan_ranges $LB_VLAN_RANGES - fi - elif [[ "$Q_PLUGIN" = "ryu" ]]; then - iniset /$Q_PLUGIN_CONF_FILE OVS openflow_controller $RYU_OFP_HOST:$RYU_OFP_PORT - iniset /$Q_PLUGIN_CONF_FILE OVS openflow_rest_api $RYU_API_HOST:$RYU_API_PORT - fi -fi - -# Quantum agent (for compute nodes) -if is_service_enabled q-agt; then - # Configure agent for plugin - if [[ "$Q_PLUGIN" = "openvswitch" ]]; then - # Setup integration bridge - OVS_BRIDGE=${OVS_BRIDGE:-br-int} - quantum_setup_ovs_bridge $OVS_BRIDGE - - # Setup agent for tunneling - if [[ "$OVS_ENABLE_TUNNELING" = "True" ]]; then - # Verify tunnels are supported - # REVISIT - also check kernel module support for GRE and patch ports - OVS_VERSION=`ovs-vsctl --version | head -n 1 | awk '{print $4;}'` - if [ $OVS_VERSION \< "1.4" ] && ! is_service_enabled q-svc ; then - echo "You are running OVS version $OVS_VERSION." - echo "OVS 1.4+ is required for tunneling between multiple hosts." - exit 1 - fi - iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True - iniset /$Q_PLUGIN_CONF_FILE OVS local_ip $HOST_IP - fi - - # Setup physical network bridge mappings. Override - # ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` for more - # complex physical network configurations. - if [[ "$OVS_BRIDGE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then - OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE - - # Configure bridge manually with physical interface as port for multi-node - sudo ovs-vsctl --no-wait -- --may-exist add-br $OVS_PHYSICAL_BRIDGE - fi - if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings $OVS_BRIDGE_MAPPINGS - fi - AGENT_BINARY="$QUANTUM_DIR/bin/quantum-openvswitch-agent" - elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then - # Setup physical network interface mappings. Override - # ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` for more - # complex physical network configurations. - if [[ "$LB_INTERFACE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$LB_PHYSICAL_INTERFACE" != "" ]]; then - LB_INTERFACE_MAPPINGS=$PHYSICAL_NETWORK:$LB_PHYSICAL_INTERFACE - fi - if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE physical_interface_mappings $LB_INTERFACE_MAPPINGS - fi - AGENT_BINARY="$QUANTUM_DIR/bin/quantum-linuxbridge-agent" - elif [[ "$Q_PLUGIN" = "ryu" ]]; then - # Set up integration bridge - OVS_BRIDGE=${OVS_BRIDGE:-br-int} - quantum_setup_ovs_bridge $OVS_BRIDGE - if [ -n "$RYU_INTERNAL_INTERFACE" ]; then - sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_BRIDGE $RYU_INTERNAL_INTERFACE - fi - AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/ryu/agent/ryu_quantum_agent.py" - fi - # Update config w/rootwrap - iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" + configure_quantum + init_quantum fi -# Quantum DHCP -if is_service_enabled q-dhcp; then - AGENT_DHCP_BINARY="$QUANTUM_DIR/bin/quantum-dhcp-agent" - - Q_DHCP_CONF_FILE=$QUANTUM_CONF_DIR/dhcp_agent.ini - - cp $QUANTUM_DIR/etc/dhcp_agent.ini $Q_DHCP_CONF_FILE - - # Set verbose - iniset $Q_DHCP_CONF_FILE DEFAULT verbose True - # Set debug - iniset $Q_DHCP_CONF_FILE DEFAULT debug True - iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE - iniset $Q_DHCP_CONF_FILE DEFAULT state_path $DATA_DIR/quantum - - quantum_setup_keystone $Q_DHCP_CONF_FILE DEFAULT set_auth_url - - # Update config w/rootwrap - iniset $Q_DHCP_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" - - if [[ "$Q_PLUGIN" = "openvswitch" ]]; then - iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver - elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then - iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver - elif [[ "$Q_PLUGIN" = "ryu" ]]; then - iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.RyuInterfaceDriver - iniset $Q_DHCP_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT - fi -fi - -# Quantum L3 -if is_service_enabled q-l3; then - AGENT_L3_BINARY="$QUANTUM_DIR/bin/quantum-l3-agent" - PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} - Q_L3_CONF_FILE=$QUANTUM_CONF_DIR/l3_agent.ini - - cp $QUANTUM_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE - - # Set verbose - iniset $Q_L3_CONF_FILE DEFAULT verbose True - # Set debug - iniset $Q_L3_CONF_FILE DEFAULT debug True - - iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE - - iniset $Q_L3_CONF_FILE DEFAULT state_path $DATA_DIR/quantum - - iniset $Q_L3_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" - - quantum_setup_keystone $Q_L3_CONF_FILE DEFAULT set_auth_url - if [[ "$Q_PLUGIN" == "openvswitch" ]]; then - iniset $Q_L3_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver - iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE - # Set up external bridge - quantum_setup_external_bridge $PUBLIC_BRIDGE - elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then - iniset $Q_L3_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver - iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge '' - elif [[ "$Q_PLUGIN" = "ryu" ]]; then - iniset $Q_L3_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.RyuInterfaceDriver - iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE - iniset $Q_L3_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT - # Set up external bridge - quantum_setup_external_bridge $PUBLIC_BRIDGE - fi -fi - -#Quantum Metadata -if is_service_enabled q-meta; then - AGENT_META_BINARY="$QUANTUM_DIR/bin/quantum-metadata-agent" - Q_META_CONF_FILE=$QUANTUM_CONF_DIR/metadata_agent.ini - - cp $QUANTUM_DIR/etc/metadata_agent.ini $Q_META_CONF_FILE - - # Set verbose - iniset $Q_META_CONF_FILE DEFAULT verbose True - # Set debug - iniset $Q_META_CONF_FILE DEFAULT debug True - - iniset $Q_META_CONF_FILE DEFAULT state_path $DATA_DIR/quantum - - iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP - - iniset $Q_META_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" - - quantum_setup_keystone $Q_META_CONF_FILE DEFAULT set_auth_url -fi - -# Quantum RPC support - must be updated prior to starting any of the services +# Some Quantum plugins require network controllers which are not +# a part of the OpenStack project. Configure and start them. if is_service_enabled quantum; then - iniset $QUANTUM_CONF DEFAULT control_exchange quantum - if is_service_enabled qpid ; then - iniset $QUANTUM_CONF DEFAULT rpc_backend quantum.openstack.common.rpc.impl_qpid - elif is_service_enabled zeromq; then - iniset $QUANTUM_CONF DEFAULT rpc_backend quantum.openstack.common.rpc.impl_zmq - elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then - iniset $QUANTUM_CONF DEFAULT rabbit_host $RABBIT_HOST - iniset $QUANTUM_CONF DEFAULT rabbit_password $RABBIT_PASSWORD - fi - if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then - cp $QUANTUM_DIR/etc/l3_agent.ini $QUANTUM_TEST_CONFIG_FILE - iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT verbose False - iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT debug False - iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE - iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT root_helper "$Q_RR_COMMAND" - quantum_setup_keystone $QUANTUM_TEST_CONFIG_FILE DEFAULT set_auth_url - if [[ "$Q_PLUGIN" == "openvswitch" ]]; then - iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver - iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE - elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then - iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver - iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge '' - elif [[ "$Q_PLUGIN" = "ryu" ]]; then - iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT interface_driver quantum.agent.linux.interface.RyuInterfaceDriver - iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE - iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT - fi - fi + configure_quantum_third_party + init_quantum_third_party + start_quantum_third_party fi @@ -1445,37 +1042,9 @@ if is_service_enabled nova; then # Additional Nova configuration that is dependent on other services if is_service_enabled quantum; then - add_nova_opt "network_api_class=nova.network.quantumv2.api.API" - add_nova_opt "quantum_admin_username=$Q_ADMIN_USERNAME" - add_nova_opt "quantum_admin_password=$SERVICE_PASSWORD" - add_nova_opt "quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0" - add_nova_opt "quantum_auth_strategy=$Q_AUTH_STRATEGY" - add_nova_opt "quantum_admin_tenant_name=$SERVICE_TENANT_NAME" - add_nova_opt "quantum_url=http://$Q_HOST:$Q_PORT" - - if [[ "$Q_PLUGIN" = "openvswitch" ]]; then - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"} - elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver"} - elif [[ "$Q_PLUGIN" = "ryu" ]]; then - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"quantum.plugins.ryu.nova.vif.LibvirtOpenVswitchOFPRyuDriver"} - add_nova_opt "libvirt_ovs_integration_bridge=$OVS_BRIDGE" - add_nova_opt "linuxnet_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT" - add_nova_opt "libvirt_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT" - fi - add_nova_opt "libvirt_vif_driver=$NOVA_VIF_DRIVER" - add_nova_opt "linuxnet_interface_driver=$LINUXNET_VIF_DRIVER" - if is_service_enabled q-meta; then - add_nova_opt "service_quantum_metadata_proxy=True" - fi + create_nova_conf_quantum elif is_service_enabled n-net; then - add_nova_opt "network_manager=nova.network.manager.$NET_MAN" - add_nova_opt "public_interface=$PUBLIC_INTERFACE" - add_nova_opt "vlan_interface=$VLAN_INTERFACE" - add_nova_opt "flat_network_bridge=$FLAT_NETWORK_BRIDGE" - if [ -n "$FLAT_INTERFACE" ]; then - add_nova_opt "flat_interface=$FLAT_INTERFACE" - fi + create_nova_conf_nova_network fi # All nova-compute workers need to know the vnc configuration options # These settings don't hurt anything if n-xvnc and n-novnc are disabled @@ -1584,64 +1153,24 @@ fi if is_service_enabled q-svc; then echo_summary "Starting Quantum" - # Start the Quantum service - screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE" - echo "Waiting for Quantum to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:9696; do sleep 1; done"; then - echo "Quantum did not start" - exit 1 - fi - # Configure Quantum elements - # Configure internal network & subnet - - TENANT_ID=$(keystone tenant-list | grep " demo " | get_field 1) - - # Create a small network - # Since quantum command is executed in admin context at this point, - # ``--tenant_id`` needs to be specified. - NET_ID=$(quantum net-create --tenant_id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) - SUBNET_ID=$(quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) - if is_service_enabled q-l3; then - # Create a router, and add the private subnet as one of its interfaces - ROUTER_ID=$(quantum router-create --tenant_id $TENANT_ID router1 | grep ' id ' | get_field 2) - quantum router-interface-add $ROUTER_ID $SUBNET_ID - # Create an external network, and a subnet. Configure the external network as router gw - EXT_NET_ID=$(quantum net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2) - EXT_GW_IP=$(quantum subnet-create --ip_version 4 $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2) - quantum router-gateway-set $ROUTER_ID $EXT_NET_ID - if is_quantum_ovs_base_plugin "$Q_PLUGIN" && [[ "$Q_USE_NAMESPACE" = "True" ]]; then - CIDR_LEN=${FLOATING_RANGE#*/} - sudo ip addr add $EXT_GW_IP/$CIDR_LEN dev $PUBLIC_BRIDGE - sudo ip link set $PUBLIC_BRIDGE up - ROUTER_GW_IP=`quantum port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' '{ print $8; }'` - sudo route add -net $FIXED_RANGE gw $ROUTER_GW_IP - fi - if [[ "$Q_USE_NAMESPACE" == "False" ]]; then - # Explicitly set router id in l3 agent configuration - iniset $Q_L3_CONF_FILE DEFAULT router_id $ROUTER_ID - fi - fi - if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then - setup_quantum - fi + start_quantum_service_and_check + create_quantum_initial_network + setup_quantum_debug elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then # Create a small network $NOVA_BIN_DIR/nova-manage network create "$PRIVATE_NETWORK_NAME" $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS # Create some floating ips - $NOVA_BIN_DIR/nova-manage floating create $FLOATING_RANGE --pool=$PUBLIC_NETWORK + $NOVA_BIN_DIR/nova-manage floating create $FLOATING_RANGE --pool=$PUBLIC_NETWORK_NAME # Create a second pool $NOVA_BIN_DIR/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL fi -# Start up the quantum agents if enabled -screen_it q-agt "python $AGENT_BINARY --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE" -screen_it q-dhcp "python $AGENT_DHCP_BINARY --config-file $QUANTUM_CONF --config-file=$Q_DHCP_CONF_FILE" -screen_it q-meta "python $AGENT_META_BINARY --config-file $QUANTUM_CONF --config-file=$Q_META_CONF_FILE" -screen_it q-l3 "python $AGENT_L3_BINARY --config-file $QUANTUM_CONF --config-file=$Q_L3_CONF_FILE" - +if is_service_enabled quantum; then + start_quantum_agents +fi if is_service_enabled nova; then echo_summary "Starting Nova" start_nova diff --git a/unstack.sh b/unstack.sh index 09e0de6b..975a0793 100755 --- a/unstack.sh +++ b/unstack.sh @@ -28,6 +28,7 @@ DATA_DIR=${DATA_DIR:-${DEST}/data} source $TOP_DIR/lib/cinder source $TOP_DIR/lib/horizon source $TOP_DIR/lib/swift +source $TOP_DIR/lib/quantum # Determine what system we are running on. This provides ``os_VENDOR``, # ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` @@ -39,8 +40,7 @@ fi if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then source $TOP_DIR/openrc - source $TOP_DIR/lib/quantum - teardown_quantum + teardown_quantum_debug fi # Shut down devstack's screen to get the bulk of OpenStack services in one shot @@ -119,8 +119,7 @@ if [[ -n "$UNSTACK_ALL" ]]; then fi fi -# Quantum dhcp agent runs dnsmasq -if is_service_enabled q-dhcp; then - pid=$(ps aux | awk '/[d]nsmasq.+interface=tap/ { print $2 }') - [ ! -z "$pid" ] && sudo kill -9 $pid +if is_service_enabled quantum; then + stop_quantum + stop_quantum_third_party fi From 796342c06e8ca3dcfd2e8f1ba4e0300a703b8de1 Mon Sep 17 00:00:00 2001 From: Michael Still Date: Fri, 28 Dec 2012 11:08:20 +1100 Subject: [PATCH 108/207] Handle the new behaviour for invalid instances. The behaviour of this case changed with bug/836978. Requesting the status of an invalid instance will now return an error message including the instance id, so we need to filter that out. Resolves the devstack elements of bug 836978. Change-Id: I385eb6f04cff90e1ddc0b79d835fbbdf92e4e9ff --- exercises/euca.sh | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/exercises/euca.sh b/exercises/euca.sh index 982653ef..76df254b 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -165,8 +165,11 @@ fi euca-terminate-instances $INSTANCE || \ die "Failure terminating instance $INSTANCE" -# Assure it has terminated within a reasonable time -if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -q $INSTANCE; do sleep 1; done"; then +# Assure it has terminated within a reasonable time. The behaviour of this +# case changed with bug/836978. Requesting the status of an invalid instance +# will now return an error message including the instance id, so we need to +# filter that out. +if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE |grep -v \"InstanceNotFound\" | grep -q $INSTANCE; do sleep 1; done"; then echo "server didn't terminate within $TERMINATE_TIMEOUT seconds" exit 1 fi From f35cf91a1d4f13cfa77f9411a1eef38953abebbc Mon Sep 17 00:00:00 2001 From: Devananda van der Veen Date: Mon, 12 Nov 2012 17:58:38 -0800 Subject: [PATCH 109/207] adding support for baremetal hypervisor New files for baremetal driver: - lib/baremetal - files/apts/baremetal Adds two dependencies: - google shell-in-a-box - diskimage-builder Enable by setting both: VIRT_DRIVER=baremetal ENABLED_SERVICES="$ENABLED_SERVICES,baremetal" Change-Id: Ibf6fe1671a759a449c9eb0df47751d1b31ade591 --- files/apts/baremetal | 9 + lib/baremetal | 403 +++++++++++++++++++++++++++++++++++++++++++ lib/nova | 20 +++ stack.sh | 97 +++++++++-- stackrc | 4 + 5 files changed, 522 insertions(+), 11 deletions(-) create mode 100644 files/apts/baremetal create mode 100644 lib/baremetal diff --git a/files/apts/baremetal b/files/apts/baremetal new file mode 100644 index 00000000..54e76e00 --- /dev/null +++ b/files/apts/baremetal @@ -0,0 +1,9 @@ +busybox +dnsmasq +gcc +ipmitool +make +open-iscsi +qemu-kvm +syslinux +tgt diff --git a/lib/baremetal b/lib/baremetal new file mode 100644 index 00000000..f82633a4 --- /dev/null +++ b/lib/baremetal @@ -0,0 +1,403 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +# This file provides devstack with the environment and utilities to +# control nova-compute's baremetal driver. +# It sets reasonable defaults to run within a single host, +# using virtual machines in place of physical hardware. +# However, by changing just a few options, devstack+baremetal can in fact +# control physical hardware resources on the same network, if you know +# the MAC address(es) and IPMI credentials. +# +# At a minimum, to enable the baremetal driver, you must set these in loclarc: +# VIRT_DRIVER=baremetal +# ENABLED_SERVICES="$ENABLED_SERVICES,baremetal" +# +# +# We utilize diskimage-builder to create a ramdisk, and then +# baremetal driver uses that to push a disk image onto the node(s). +# +# Below we define various defaults which control the behavior of the +# baremetal compute service, and inform it of the hardware it will contorl. +# +# Below that, various functions are defined, which are called by devstack +# in the following order: +# +# before nova-cpu starts: +# - prepare_baremetal_toolchain +# - configure_baremetal_nova_dirs +# +# after nova and glance have started: +# - build_and_upload_baremetal_deploy_k_and_r $token +# - create_baremetal_flavor $BM_DEPLOY_KERNEL_ID $BM_DEPLOY_RAMDISK_ID +# - upload_baremetal_image $url $token +# - add_baremetal_node + + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +# Sub-driver settings +# ------------------- + +# sub-driver to use for kernel deployment +# - nova.virt.baremetal.pxe.PXE +# - nova.virt.baremetal.tilera.TILERA +BM_DRIVER=${BM_DRIVER:-nova.virt.baremetal.pxe.PXE} + +# sub-driver to use for remote power management +# - nova.virt.baremetal.fake.FakePowerManager, for manual power control +# - nova.virt.baremetal.ipmi.Ipmi, for remote IPMI +# - nova.virt.baremetal.tilera_pdu.Pdu, for TilePro hardware +BM_POWER_MANAGER=${BM_POWER_MANAGER:-nova.virt.baremetal.fake.FakePowerManager} + + +# These should be customized to your environment and hardware +# ----------------------------------------------------------- + +# BM_DNSMASQ_* options must be changed to suit your network environment +BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-$PUBLIC_INTERFACE} +BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-eth0} +BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-192.0.2.32,192.0.2.48} + +# BM_FIRST_MAC *must* be set to the MAC address of the node you will boot. +# This is passed to dnsmasq along with the kernel/ramdisk to +# deploy via PXE. +BM_FIRST_MAC=${BM_FIRST_MAC:-} + +# BM_SECOND_MAC is only important if the host has >1 NIC. +BM_SECOND_MAC=${BM_SECOND_MAC:-} + +# Hostname for the baremetal nova-compute node, if not run on this host +BM_HOSTNAME=${BM_HOSTNAME:-$(hostname -f)} + +# BM_PM_* options are only necessary if BM_POWER_MANAGER=...IPMI +BM_PM_ADDR=${BM_PM_ADDR:-0.0.0.0} +BM_PM_USER=${BM_PM_USER:-user} +BM_PM_PASS=${BM_PM_PASS:-pass} + +# BM_FLAVOR_* options are arbitrary and not necessarily related to physical +# hardware capacity. These can be changed if you are testing +# BaremetalHostManager with multiple nodes and different flavors. +BM_CPU_ARCH=${BM_CPU_ARCH:-x86_64} +BM_FLAVOR_CPU=${BM_FLAVOR_CPU:-1} +BM_FLAVOR_RAM=${BM_FLAVOR_RAM:-1024} +BM_FLAVOR_ROOT_DISK=${BM_FLAVOR_ROOT_DISK:-10} +BM_FLAVOR_EPHEMERAL_DISK=${BM_FLAVOR_EPHEMERAL_DISK:-0} +BM_FLAVOR_SWAP=${BM_FLAVOR_SWAP:-1} +BM_FLAVOR_NAME=${BM_FLAVOR_NAME:-bm.small} +BM_FLAVOR_ID=${BM_FLAVOR_ID:-11} +BM_FLAVOR_ARCH=${BM_FLAVOR_ARCH:-$BM_CPU_ARCH} + + +# Below this, we set some path and filenames. +# Defaults are probably sufficient. + +BM_IMAGE_BUILD_DIR=${BM_IMAGE_BUILD_DIR:-$DEST/diskimage-builder} + +BM_HOST_CURRENT_KERNEL=$(uname -r) +BM_DEPLOY_RAMDISK=${BM_DEPLOY_RAMDISK:-bm-deploy-$BM_HOST_CURRENT_KERNEL-initrd} +BM_DEPLOY_KERNEL=${BM_DEPLOY_KERNEL:-bm-deploy-$BM_HOST_CURRENT_KERNEL-vmlinuz} + +# If you need to add any extra flavors to the deploy ramdisk image +# eg, specific network drivers, specify them here +BM_DEPLOY_FLAVOR=${BM_DEPLOY_FLAVOR:-} + +# set URL and version for google shell-in-a-box +BM_SHELL_IN_A_BOX=${BM_SHELL_IN_A_BOX:-http://shellinabox.googlecode.com/files/shellinabox-2.14.tar.gz} + + +# Functions +# --------- + +# Check if baremetal is properly enabled +# Returns false if VIRT_DRIVER is not baremetal, or if ENABLED_SERVICES +# does not contain "baremetal" +function is_baremetal() { + if [[ "$ENABLED_SERVICES" =~ 'baremetal' && "$VIRT_DRIVER" = 'baremetal' ]]; then + return 0 + fi + return 1 +} + +# Install diskimage-builder and shell-in-a-box +# so that we can build the deployment kernel & ramdisk +function prepare_baremetal_toolchain() { + git_clone $BM_IMAGE_BUILD_REPO $BM_IMAGE_BUILD_DIR $BM_IMAGE_BUILD_BRANCH + + local shellinabox_basename=$(basename $BM_SHELL_IN_A_BOX) + if [[ ! -e $DEST/$shellinabox_basename ]]; then + cd $DEST + wget $BM_SHELL_IN_A_BOX + fi + if [[ ! -d $DEST/${shellinabox_basename%%.tar.gz} ]]; then + cd $DEST + tar xzf $shellinabox_basename + fi + if [[ ! $(which shellinaboxd) ]]; then + cd $DEST/${shellinabox_basename%%.tar.gz} + ./configure + make + sudo make install + fi +} + +# prepare various directories needed by baremetal hypervisor +function configure_baremetal_nova_dirs() { + # ensure /tftpboot is prepared + sudo mkdir -p /tftpboot + sudo mkdir -p /tftpboot/pxelinux.cfg + sudo cp /usr/lib/syslinux/pxelinux.0 /tftpboot/ + sudo chown -R `whoami`:libvirtd /tftpboot + + # ensure $NOVA_STATE_PATH/baremetal is prepared + sudo mkdir -p $NOVA_STATE_PATH/baremetal + sudo mkdir -p $NOVA_STATE_PATH/baremetal/console + sudo mkdir -p $NOVA_STATE_PATH/baremetal/dnsmasq + sudo touch $NOVA_STATE_PATH/baremetal/dnsmasq/dnsmasq-dhcp.host + sudo chown -R `whoami` $NOVA_STATE_PATH/baremetal + + # ensure dnsmasq is installed but not running + # because baremetal driver will reconfigure and restart this as needed + if [ ! is_package_installed dnsmasq ]; then + install_package dnsmasq + fi + stop_service dnsmasq +} + +# build deploy kernel+ramdisk, then upload them to glance +# this function sets BM_DEPLOY_KERNEL_ID and BM_DEPLOY_RAMDISK_ID +function upload_baremetal_deploy() { + token=$1 + + if [ ! -e $TOP_DIR/files/$BM_DEPLOY_KERNEL -a -e /boot/vmlinuz-$BM_HOST_CURRENT_KERNEL ]; then + sudo cp /boot/vmlinuz-$BM_HOST_CURRENT_KERNEL $TOP_DIR/files/$BM_DEPLOY_KERNEL + sudo chmod a+r $TOP_DIR/files/$BM_DEPLOY_KERNEL + fi + if [ ! -e $TOP_DIR/files/$BM_DEPLOY_RAMDISK ]; then + $BM_IMAGE_BUILD_DIR/bin/ramdisk-image-create $BM_DEPLOY_FLAVOR deploy \ + -o $TOP_DIR/files/$BM_DEPLOY_RAMDISK -k $BM_HOST_CURRENT_KERNEL + fi + + # load them into glance + BM_DEPLOY_KERNEL_ID=$(glance \ + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name $BM_DEPLOY_KERNEL \ + --public --disk-format=aki \ + < $TOP_DIR/files/$BM_DEPLOY_KERNEL | grep ' id ' | get_field 2) + BM_DEPLOY_RAMDISK_ID=$(glance \ + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name $BM_DEPLOY_RAMDISK \ + --public --disk-format=ari \ + < $TOP_DIR/files/$BM_DEPLOY_RAMDISK | grep ' id ' | get_field 2) +} + +# create a basic baremetal flavor, associated with deploy kernel & ramdisk +# +# Usage: create_baremetal_flavor +function create_baremetal_flavor() { + aki=$1 + ari=$2 + nova flavor-create $BM_FLAVOR_NAME $BM_FLAVOR_ID \ + $BM_FLAVOR_RAM $BM_FLAVOR_ROOT_DISK $BM_FLAVOR_CPU + nova-manage instance_type set_key \ + --name=$BM_FLAVOR_NAME --key cpu_arch --value $BM_FLAVOR_ARCH + nova-manage instance_type set_key \ + --name=$BM_FLAVOR_NAME --key deploy_kernel_id --value $aki + nova-manage instance_type set_key \ + --name=$BM_FLAVOR_NAME --key deploy_ramdisk_id --value $ari +} + +# pull run-time kernel/ramdisk out of disk image and load into glance +# note that $file is currently expected to be in qcow2 format +# Sets KERNEL_ID and RAMDISK_ID +# +# Usage: extract_and_upload_k_and_r_from_image $token $file +function extract_and_upload_k_and_r_from_image() { + token=$1 + file=$2 + image_name=$(basename "$file" ".qcow2") + + # this call returns the file names as "$kernel,$ramdisk" + out=$($BM_IMAGE_BUILD_DIR/bin/disk-image-get-kernel \ + -x -d $TOP_DIR/files -o bm-deploy -i $file) + if [ $? -ne 0 ]; then + die "Failed to get kernel and ramdisk from $file" + fi + XTRACE=$(set +o | grep xtrace) + set +o xtrace + out=$(echo "$out" | tail -1) + $XTRACE + OUT_KERNEL=${out%%,*} + OUT_RAMDISK=${out##*,} + + # load them into glance + KERNEL_ID=$(glance \ + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name $image_name-kernel \ + --public --disk-format=aki \ + < $TOP_DIR/files/$OUT_KERNEL | grep ' id ' | get_field 2) + RAMDISK_ID=$(glance \ + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name $image_name-initrd \ + --public --disk-format=ari \ + < $TOP_DIR/files/$OUT_RAMDISK | grep ' id ' | get_field 2) +} + + +# Re-implementation of devstack's "upload_image" function +# +# Takes the same parameters, but has some peculiarities which made it +# easier to create a separate method, rather than complicate the logic +# of the existing function. +function upload_baremetal_image() { + local image_url=$1 + local token=$2 + + # Create a directory for the downloaded image tarballs. + mkdir -p $FILES/images + + # Downloads the image (uec ami+aki style), then extracts it. + IMAGE_FNAME=`basename "$image_url"` + if [[ ! -f $FILES/$IMAGE_FNAME || \ + "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then + wget -c $image_url -O $FILES/$IMAGE_FNAME + if [[ $? -ne 0 ]]; then + echo "Not found: $image_url" + return + fi + fi + + local KERNEL="" + local RAMDISK="" + local DISK_FORMAT="" + local CONTAINER_FORMAT="" + case "$IMAGE_FNAME" in + *.tar.gz|*.tgz) + # Extract ami and aki files + [ "${IMAGE_FNAME%.tar.gz}" != "$IMAGE_FNAME" ] && + IMAGE_NAME="${IMAGE_FNAME%.tar.gz}" || + IMAGE_NAME="${IMAGE_FNAME%.tgz}" + xdir="$FILES/images/$IMAGE_NAME" + rm -Rf "$xdir"; + mkdir "$xdir" + tar -zxf $FILES/$IMAGE_FNAME -C "$xdir" + KERNEL=$(for f in "$xdir/"*-vmlinuz* "$xdir/"aki-*/image; do + [ -f "$f" ] && echo "$f" && break; done; true) + RAMDISK=$(for f in "$xdir/"*-initrd* "$xdir/"ari-*/image; do + [ -f "$f" ] && echo "$f" && break; done; true) + IMAGE=$(for f in "$xdir/"*.img "$xdir/"ami-*/image; do + [ -f "$f" ] && echo "$f" && break; done; true) + if [[ -z "$IMAGE_NAME" ]]; then + IMAGE_NAME=$(basename "$IMAGE" ".img") + fi + DISK_FORMAT=ami + CONTAINER_FORMAT=ami + ;; + *.qcow2) + IMAGE="$FILES/${IMAGE_FNAME}" + IMAGE_NAME=$(basename "$IMAGE" ".qcow2") + DISK_FORMAT=qcow2 + CONTAINER_FORMAT=bare + ;; + *) echo "Do not know what to do with $IMAGE_FNAME"; false;; + esac + + if [ "$CONTAINER_FORMAT" = "bare" ]; then + extract_and_upload_k_and_r_from_image $token $IMAGE + elif [ "$CONTAINER_FORMAT" = "ami" ]; then + KERNEL_ID=$(glance \ + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name "$IMAGE_NAME-kernel" --public \ + --container-format aki \ + --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2) + RAMDISK_ID=$(glance \ + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name "$IMAGE_NAME-ramdisk" --public \ + --container-format ari \ + --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2) + else + # TODO(deva): add support for other image types + return + fi + + glance \ + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name "${IMAGE_NAME%.img}" --public \ + --container-format $CONTAINER_FORMAT \ + --disk-format $DISK_FORMAT \ + ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} \ + ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}" + + # override DEFAULT_IMAGE_NAME so that tempest can find the image + # that we just uploaded in glance + DEFAULT_IMAGE_NAME="${IMAGE_NAME%.img}" +} + +function clear_baremetal_of_all_nodes() { + list=$(nova-baremetal-manage node list | tail -n +2 | awk '{print $1}' ) + for node in $list + do + nova-baremetal-manage node delete $node + done + list=$(nova-baremetal-manage interface list | tail -n +2 | awk '{print $1}' ) + for iface in $list + do + nova-baremetal-manage interface delete $iface + done +} + +# inform nova-baremetal about nodes, MACs, etc +# Defaults to using BM_FIRST_MAC and BM_SECOND_MAC if parameters not specified +# +# Usage: add_baremetal_node +function add_baremetal_node() { + mac_1=${1:-$BM_FIRST_MAC} + mac_2=${2:-$BM_SECOND_MAC} + + id=$(nova-baremetal-manage node create \ + --host=$BM_HOSTNAME --prov_mac=$mac_1 \ + --cpus=$BM_FLAVOR_CPU --memory_mb=$BM_FLAVOR_RAM \ + --local_gb=$BM_FLAVOR_ROOT_DISK --terminal_port=0 \ + --pm_address=$BM_PM_ADDR --pm_user=$BM_PM_USER --pm_password=$BM_PM_PASS \ + ) + [ $? -eq 0 ] || [ "$id" ] || die "Error adding baremetal node" + id2=$(nova-baremetal-manage interface create \ + --node_id=$id --mac_address=$mac_2 --datapath_id=0 --port_no=0 \ + ) + [ $? -eq 0 ] || [ "$id2" ] || die "Error adding interface to barmetal node $id" +} + + +# Restore xtrace +$XTRACE diff --git a/lib/nova b/lib/nova index 26c5d3c6..80741533 100644 --- a/lib/nova +++ b/lib/nova @@ -214,6 +214,11 @@ function configure_nova() { fi fi + # Prepare directories and packages for baremetal driver + if is_baremetal; then + configure_baremetal_nova_dirs + fi + if is_service_enabled quantum && is_quantum_ovs_base_plugin "$Q_PLUGIN" && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF ; then # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces cat < Date: Wed, 12 Dec 2012 16:52:55 -0800 Subject: [PATCH 110/207] update baremetal option names update power_manager and instance_type_extra_specs config opts to match the new values in nova, introduced by https://review.openstack.org/#/c/17994/ Change-Id: Ic624362df17c217406e142ef4c2e65a4c0c2765d --- stack.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index 1ee669d6..53b892f5 100755 --- a/stack.sh +++ b/stack.sh @@ -1124,8 +1124,8 @@ if is_service_enabled nova; then add_nova_opt "firewall_driver=$LIBVIRT_FIREWALL_DRIVER" add_nova_opt "baremetal_driver=$BM_DRIVER" add_nova_opt "baremetal_tftp_root=/tftpboot" - add_nova_opt "instance_type_extra_specs=cpu_arch:$BM_CPU_ARCH" - add_nova_opt "power_manager=$BM_POWER_MANAGER" + add_nova_opt "baremetal_instance_type_extra_specs=cpu_arch:$BM_CPU_ARCH" + add_nova_opt "baremetal_power_manager=$BM_POWER_MANAGER" add_nova_opt "scheduler_host_manager=nova.scheduler.baremetal_host_manager.BaremetalHostManager" add_nova_opt "scheduler_default_filters=AllHostsFilter" From 7611c894b598c876912ab967642f0e8c8ad9171b Mon Sep 17 00:00:00 2001 From: Devananda van der Veen Date: Fri, 23 Nov 2012 10:54:54 -0800 Subject: [PATCH 111/207] Add fake env support to baremetal Use bm_poseur to create VM and network bridge so that, in the absence of physical hardware, baremetal driver still has something to manipulate. Change-Id: Id80ede13a35e4380f358b47f08d41ff98ea9d70f --- lib/baremetal | 46 +++++++++++++++++++++++++++++++++++++++++----- stack.sh | 3 +++ stackrc | 7 +++++++ unstack.sh | 6 ++++++ 4 files changed, 57 insertions(+), 5 deletions(-) diff --git a/lib/baremetal b/lib/baremetal index f82633a4..62605fb8 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -71,10 +71,24 @@ BM_POWER_MANAGER=${BM_POWER_MANAGER:-nova.virt.baremetal.fake.FakePowerManager} # These should be customized to your environment and hardware # ----------------------------------------------------------- -# BM_DNSMASQ_* options must be changed to suit your network environment -BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-$PUBLIC_INTERFACE} -BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-eth0} -BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-192.0.2.32,192.0.2.48} +# whether to create a fake environment, eg. for devstack-gate +BM_USE_FAKE_ENV=`trueorfalse False $BM_USE_FAKE_ENV` + +# Extra options to pass to bm_poseur +# change the bridge name or IP: --bridge br99 --bridge-ip 192.0.2.1 +# change the virtualization type: --engine qemu +BM_POSEUR_EXTRA_OPTS=${BM_POSEUR_EXTRA_OPTS:-} + +# BM_DNSMASQ_IFACE should match FLAT_NETWORK_BRIDGE +if [ "$BM_USE_FAKE_ENV" ]; then + BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-br99} + BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-192.0.2.32,192.0.2.48} +else + BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-eth0} + # if testing on a physical network, + # BM_DNSMASQ_RANGE must be changed to suit your network + BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-} +fi # BM_FIRST_MAC *must* be set to the MAC address of the node you will boot. # This is passed to dnsmasq along with the kernel/ramdisk to @@ -108,8 +122,8 @@ BM_FLAVOR_ARCH=${BM_FLAVOR_ARCH:-$BM_CPU_ARCH} # Below this, we set some path and filenames. # Defaults are probably sufficient. - BM_IMAGE_BUILD_DIR=${BM_IMAGE_BUILD_DIR:-$DEST/diskimage-builder} +BM_POSEUR_DIR=${BM_POSEUR_DIR:-$DEST/bm_poseur} BM_HOST_CURRENT_KERNEL=$(uname -r) BM_DEPLOY_RAMDISK=${BM_DEPLOY_RAMDISK:-bm-deploy-$BM_HOST_CURRENT_KERNEL-initrd} @@ -140,6 +154,7 @@ function is_baremetal() { # so that we can build the deployment kernel & ramdisk function prepare_baremetal_toolchain() { git_clone $BM_IMAGE_BUILD_REPO $BM_IMAGE_BUILD_DIR $BM_IMAGE_BUILD_BRANCH + git_clone $BM_POSEUR_REPO $BM_POSEUR_DIR $BM_POSEUR_BRANCH local shellinabox_basename=$(basename $BM_SHELL_IN_A_BOX) if [[ ! -e $DEST/$shellinabox_basename ]]; then @@ -158,6 +173,27 @@ function prepare_baremetal_toolchain() { fi } +# set up virtualized environment for devstack-gate testing +function create_fake_baremetal_env() { + local bm_poseur="$BM_POSEUR_DIR/bm_poseur" + # TODO(deva): add support for >1 VM + sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS create-bridge + sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS create-vm + BM_FIRST_MAC=$(sudo $bm_poseur get-macs) + + # NOTE: there is currently a limitation in baremetal driver + # that requires second MAC even if it is not used. + # Passing a fake value allows this to work. + # TODO(deva): remove this after driver issue is fixed. + BM_SECOND_MAC='12:34:56:78:90:12' +} + +function cleanup_fake_baremetal_env() { + local bm_poseur="$BM_POSEUR_DIR/bm_poseur" + sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS destroy-vm + sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS destroy-bridge +} + # prepare various directories needed by baremetal hypervisor function configure_baremetal_nova_dirs() { # ensure /tftpboot is prepared diff --git a/stack.sh b/stack.sh index 53b892f5..87c193a7 100755 --- a/stack.sh +++ b/stack.sh @@ -1145,6 +1145,9 @@ if is_service_enabled nova && is_baremetal; then echo_summary "Preparing for nova baremetal" prepare_baremetal_toolchain configure_baremetal_nova_dirs + if [[ "$BM_USE_FAKE_ENV" = "True" ]]; then + create_fake_baremetal_env + fi fi # Launch Services diff --git a/stackrc b/stackrc index 49ccaa82..0e84db80 100644 --- a/stackrc +++ b/stackrc @@ -115,6 +115,13 @@ RYU_BRANCH=master BM_IMAGE_BUILD_REPO=https://github.com/stackforge/diskimage-builder.git BM_IMAGE_BUILD_BRANCH=master +# bm_poseur +# Used to simulate a hardware environment for baremetal +# Only used if BM_USE_FAKE_ENV is set +BM_POSEUR_REPO=https://github.com/tripleo/bm_poseur.git +BM_POSEUR_BRANCH=master + + # Nova hypervisor configuration. We default to libvirt with **kvm** but will # drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can # also install an **LXC** or **OpenVZ** based system. diff --git a/unstack.sh b/unstack.sh index 2a0a40a8..fd70916d 100755 --- a/unstack.sh +++ b/unstack.sh @@ -25,6 +25,7 @@ source $TOP_DIR/stackrc DATA_DIR=${DATA_DIR:-${DEST}/data} # Get project function libraries +source $TOP_DIR/lib/baremetal source $TOP_DIR/lib/cinder source $TOP_DIR/lib/horizon source $TOP_DIR/lib/swift @@ -67,6 +68,11 @@ if is_service_enabled tls-proxy; then killall stud fi +# baremetal might have created a fake environment +if is_service_enabled baremetal && [[ "$BM_USE_FAKE_ENV" = "True" ]]; then + cleanup_fake_baremetal_env +fi + SCSI_PERSIST_DIR=$CINDER_STATE_PATH/volumes/* # Get the iSCSI volumes From 64ab774313a5791cfbd8798c68a93068a11229ac Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 28 Dec 2012 15:38:28 -0600 Subject: [PATCH 112/207] Set up swift's auth cache dir Swift backing glance doesn't work due to auth_token failing without a signing_dir configured and set up. Create the dir and configure in proxy-server.conf Bug 1092783 Change-Id: If9ac46592bb7fc09e6cfd0a802a4fa61304fc369 --- lib/swift | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/swift b/lib/swift index 140e5e9b..713b38c7 100644 --- a/lib/swift +++ b/lib/swift @@ -29,6 +29,7 @@ set +o xtrace SWIFT_DIR=$DEST/swift SWIFTCLIENT_DIR=$DEST/python-swiftclient +SWIFT_AUTH_CACHE_DIR=${SWIFT_AUTH_CACHE_DIR:-/var/cache/swift} # TODO: add logging to different location. @@ -212,6 +213,7 @@ function configure_swift() { iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_user swift iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_password $SERVICE_PASSWORD + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken signing_dir $SWIFT_AUTH_CACHE_DIR iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth use iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles @@ -325,6 +327,10 @@ function init_swift() { swift-ring-builder account.builder rebalance } && popd >/dev/null + # Create cache dir + sudo mkdir -p $SWIFT_AUTH_CACHE_DIR + sudo chown `whoami` $SWIFT_AUTH_CACHE_DIR + rm -f $SWIFT_AUTH_CACHE_DIR/* } function install_swift() { From 6d04fd7ba59450c4d9c6c7317eef05c7812056b1 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 21 Dec 2012 11:03:37 -0600 Subject: [PATCH 113/207] Holiday docs and comment formatting cleanup Change-Id: Ia4ca88c67d3b94e306a79a669805a2fa1b0dc069 --- HACKING.rst | 20 +++++++++- exercises/quantum-adv-test.sh | 69 +++++++++++------------------------ lib/ceilometer | 25 +++++++------ lib/cinder | 2 +- lib/databases/mysql | 4 +- lib/databases/postgresql | 4 +- lib/glance | 5 +-- lib/heat | 23 +++++++----- lib/keystone | 3 -- lib/nova | 2 +- lib/swift | 7 +--- lib/tempest | 18 ++++----- 12 files changed, 81 insertions(+), 101 deletions(-) diff --git a/HACKING.rst b/HACKING.rst index e8f90c78..c4641fa0 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -15,6 +15,16 @@ https://github.com/openstack-dev/devstack.git. Besides the master branch that tracks the OpenStack trunk branches a separate branch is maintained for all OpenStack releases starting with Diablo (stable/diablo). +Contributing code to DevStack follows the usual OpenStack process as described +in `How To Contribute`__ in the OpenStack wiki. `DevStack's LaunchPad project`__ +contains the usual links for blueprints, bugs, tec. + +__ contribute_ +.. _contribute: http://wiki.openstack.org/HowToContribute. + +__ lp_ +.. _lp: https://launchpad.net/~devstack + The primary script in DevStack is ``stack.sh``, which performs the bulk of the work for DevStack's use cases. There is a subscript ``functions`` that contains generally useful shell functions and is used by a number of the scripts in @@ -53,8 +63,8 @@ configuration of the user environment:: source $TOP_DIR/openrc ``stack.sh`` is a rather large monolithic script that flows through from beginning -to end. The process of breaking it down into project-level sub-scripts has begun -with the introduction of ``lib/cinder`` and ``lib/ceilometer``. +to end. The process of breaking it down into project-level sub-scripts is nearly +complete and should make ``stack.sh`` easier to read and manage. These library sub-scripts have a number of fixed entry points, some of which may just be stubs. These entry points will be called by ``stack.sh`` in the @@ -71,6 +81,12 @@ There is a sub-script template in ``lib/templates`` to be used in creating new service sub-scripts. The comments in ``<>`` are meta comments describing how to use the template and should be removed. +In order to show the dependencies and conditions under which project functions +are executed the top-level conditional testing for things like ``is_service_enabled`` +should be done in ``stack.sh``. There may be nested conditionals that need +to be in the sub-script, such as testing for keystone being enabled in +``configure_swift()``. + Documentation ------------- diff --git a/exercises/quantum-adv-test.sh b/exercises/quantum-adv-test.sh index 493e2239..bc33fe82 100755 --- a/exercises/quantum-adv-test.sh +++ b/exercises/quantum-adv-test.sh @@ -1,10 +1,9 @@ #!/usr/bin/env bash # -# **quantum.sh** +# **quantum-adv-test.sh** -# We will use this test to perform integration testing of nova and -# other components with Quantum. +# Perform integration testing of Nova and other components with Quantum. echo "*********************************************************************" echo "Begin DevStack Exercise: $0" @@ -14,6 +13,7 @@ echo "*********************************************************************" # only the first error that occured. set -o errtrace + trap failed ERR failed() { local r=$? @@ -30,17 +30,8 @@ failed() { # an error. It is also useful for following allowing as the install occurs. set -o xtrace -#------------------------------------------------------------------------------ -# Quantum config check -#------------------------------------------------------------------------------ -# Warn if quantum is not enabled -if [[ ! "$ENABLED_SERVICES" =~ "q-svc" ]]; then - echo "WARNING: Running quantum test without enabling quantum" -fi - -#------------------------------------------------------------------------------ # Environment -#------------------------------------------------------------------------------ +# ----------- # Keep track of the current directory EXERCISE_DIR=$(cd $(dirname "$0") && pwd) @@ -62,9 +53,8 @@ source $TOP_DIR/lib/quantum # Import exercise configuration source $TOP_DIR/exerciserc -#------------------------------------------------------------------------------ -# Test settings for quantum -#------------------------------------------------------------------------------ +# Quantum Settings +# ---------------- TENANTS="DEMO1" # TODO (nati)_Test public network @@ -106,24 +96,17 @@ PUBLIC_ROUTER1_NET="admin-net1" DEMO1_ROUTER1_NET="demo1-net1" DEMO2_ROUTER1_NET="demo2-net1" -#------------------------------------------------------------------------------ -# Keystone settings. -#------------------------------------------------------------------------------ KEYSTONE="keystone" -#------------------------------------------------------------------------------ -# Get a token for clients that don't support service catalog -#------------------------------------------------------------------------------ - -# manually create a token by querying keystone (sending JSON data). Keystone +# Manually create a token by querying keystone (sending JSON data). Keystone # returns a token and catalog of endpoints. We use python to parse the token # and save it. TOKEN=`keystone token-get | grep ' id ' | awk '{print $4}'` -#------------------------------------------------------------------------------ -# Various functions. -#------------------------------------------------------------------------------ +# Various functions +# ----------------- + function foreach_tenant { COMMAND=$1 for TENANT in ${TENANTS//,/ };do @@ -192,10 +175,9 @@ function get_flavor_id { function confirm_server_active { local VM_UUID=$1 if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then - echo "server '$VM_UUID' did not become active!" - false -fi - + echo "server '$VM_UUID' did not become active!" + false + fi } function add_tenant { @@ -214,23 +196,15 @@ function add_tenant { function remove_tenant { local TENANT=$1 local TENANT_ID=$(get_tenant_id $TENANT) - $KEYSTONE tenant-delete $TENANT_ID } function remove_user { local USER=$1 local USER_ID=$(get_user_id $USER) - $KEYSTONE user-delete $USER_ID } - - -#------------------------------------------------------------------------------ -# "Create" functions -#------------------------------------------------------------------------------ - function create_tenants { source $TOP_DIR/openrc admin admin add_tenant demo1 demo1 demo1 @@ -383,9 +357,9 @@ function all { delete_all } -#------------------------------------------------------------------------------ -# Test functions. -#------------------------------------------------------------------------------ +# Test functions +# -------------- + function test_functions { IMAGE=$(get_image_id) echo $IMAGE @@ -400,9 +374,9 @@ function test_functions { echo $NETWORK_ID } -#------------------------------------------------------------------------------ -# Usage and main. -#------------------------------------------------------------------------------ +# Usage and main +# -------------- + usage() { echo "$0: [-h]" echo " -h, --help Display help message" @@ -473,10 +447,9 @@ main() { fi } +# Kick off script +# --------------- -#------------------------------------------------------------------------------- -# Kick off script. -#------------------------------------------------------------------------------- echo $* main $* diff --git a/lib/ceilometer b/lib/ceilometer index aa1b3960..76ab254d 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -1,9 +1,9 @@ # lib/ceilometer -# Install and start Ceilometer service +# Install and start **Ceilometer** service + # To enable, add the following to localrc # ENABLED_SERVICES+=ceilometer-acompute,ceilometer-acentral,ceilometer-collector,ceilometer-api - # Dependencies: # - functions # - OS_AUTH_URL for auth in api @@ -12,12 +12,12 @@ # stack.sh # --------- -# install_XXX -# configure_XXX -# init_XXX -# start_XXX -# stop_XXX -# cleanup_XXX +# install_ceilometer +# configure_ceilometer +# init_ceilometer +# start_ceilometer +# stop_ceilometer +# cleanup_ceilometer # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -27,17 +27,18 @@ set +o xtrace # Defaults # -------- -# set up default directories +# Set up default directories CEILOMETER_DIR=$DEST/ceilometer +CEILOMETER_CONF_DIR=/etc/ceilometer +CEILOMETER_CONF=$CEILOMETER_CONF_DIR/ceilometer.conf +CEILOMETER_API_LOG_DIR=/var/log/ceilometer-api + # Support potential entry-points console scripts if [ -d $CEILOMETER_DIR/bin ] ; then CEILOMETER_BIN_DIR=$CEILOMETER_DIR/bin else CEILOMETER_BIN_DIR=/usr/local/bin fi -CEILOMETER_CONF_DIR=/etc/ceilometer -CEILOMETER_CONF=$CEILOMETER_CONF_DIR/ceilometer.conf -CEILOMETER_API_LOG_DIR=/var/log/ceilometer-api # cleanup_ceilometer() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up diff --git a/lib/cinder b/lib/cinder index dadc8f14..701effd3 100644 --- a/lib/cinder +++ b/lib/cinder @@ -1,5 +1,5 @@ # lib/cinder -# Install and start Cinder volume service +# Install and start **Cinder** volume service # Dependencies: # - functions diff --git a/lib/databases/mysql b/lib/databases/mysql index 68e9adc5..1c0f5ebf 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -1,5 +1,5 @@ -# lib/mysql -# Functions to control the configuration and operation of the MySQL database backend +# lib/databases/mysql +# Functions to control the configuration and operation of the **MySQL** database backend # Dependencies: # DATABASE_{HOST,USER,PASSWORD} must be defined diff --git a/lib/databases/postgresql b/lib/databases/postgresql index e1463c5a..04db714a 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -1,5 +1,5 @@ -# lib/postgresql -# Functions to control the configuration and operation of the PostgreSQL database backend +# lib/databases/postgresql +# Functions to control the configuration and operation of the **PostgreSQL** database backend # Dependencies: # DATABASE_{HOST,USER,PASSWORD} must be defined diff --git a/lib/glance b/lib/glance index 8ba04b3a..dff247a5 100644 --- a/lib/glance +++ b/lib/glance @@ -1,5 +1,5 @@ # lib/glance -# Functions to control the configuration and operation of the Glance service +# Functions to control the configuration and operation of the **Glance** service # Dependencies: # ``functions`` file @@ -25,8 +25,6 @@ set +o xtrace # Defaults # -------- -# - # Set up default directories GLANCE_DIR=$DEST/glance GLANCECLIENT_DIR=$DEST/python-glanceclient @@ -141,7 +139,6 @@ function configure_glance() { iniset $GLANCE_CACHE_CONF DEFAULT admin_password $SERVICE_PASSWORD cp -p $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON - } # init_glance() - Initialize databases, etc. diff --git a/lib/heat b/lib/heat index 43115cb8..a6f72862 100644 --- a/lib/heat +++ b/lib/heat @@ -1,5 +1,6 @@ # lib/heat -# Install and start Heat service +# Install and start **Heat** service + # To enable, add the following to localrc # ENABLED_SERVICES+=,heat,h-api-cfn,h-api-cw,h-eng @@ -8,12 +9,14 @@ # stack.sh # --------- -# install_XXX -# configure_XXX -# init_XXX -# start_XXX -# stop_XXX -# cleanup_XXX +# install_heatclient +# install_heat +# configure_heatclient +# configure_heat +# init_heat +# start_heat +# stop_heat +# cleanup_heat # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -57,7 +60,7 @@ function configure_heat() { HEAT_API_HOST=${HEAT_API_HOST:-$SERVICE_HOST} HEAT_API_PORT=${HEAT_API_PORT:-8004} - # cloudformation api + # Cloudformation API HEAT_API_CFN_CONF=$HEAT_CONF_DIR/heat-api-cfn.conf cp $HEAT_DIR/etc/heat/heat-api-cfn.conf $HEAT_API_CFN_CONF iniset $HEAT_API_CFN_CONF DEFAULT debug True @@ -86,7 +89,7 @@ function configure_heat() { iniset $HEAT_API_CFN_PASTE_INI filter:ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 iniset $HEAT_API_CFN_PASTE_INI filter:ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens - # openstack api + # OpenStack API HEAT_API_CONF=$HEAT_CONF_DIR/heat-api.conf cp $HEAT_DIR/etc/heat/heat-api.conf $HEAT_API_CONF iniset $HEAT_API_CONF DEFAULT debug True @@ -139,7 +142,7 @@ function configure_heat() { iniset $HEAT_ENGINE_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid fi - # cloudwatch api + # Cloudwatch API HEAT_API_CW_CONF=$HEAT_CONF_DIR/heat-api-cloudwatch.conf cp $HEAT_DIR/etc/heat/heat-api-cloudwatch.conf $HEAT_API_CW_CONF iniset $HEAT_API_CW_CONF DEFAULT debug True diff --git a/lib/keystone b/lib/keystone index 4dddedb1..34f33723 100644 --- a/lib/keystone +++ b/lib/keystone @@ -8,7 +8,6 @@ # ``SERVICE_TOKEN`` # ``S3_SERVICE_PORT`` (template backend only) - # ``stack.sh`` calls the entry points in this order: # # install_keystone @@ -27,8 +26,6 @@ set +o xtrace # Defaults # -------- -# - # Set up default directories KEYSTONE_DIR=$DEST/keystone KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone} diff --git a/lib/nova b/lib/nova index 5224d4de..594195ea 100644 --- a/lib/nova +++ b/lib/nova @@ -1,5 +1,5 @@ # lib/nova -# Functions to control the configuration and operation of the XXXX service +# Functions to control the configuration and operation of the **Nova** service # Dependencies: # ``functions`` file diff --git a/lib/swift b/lib/swift index 713b38c7..89342644 100644 --- a/lib/swift +++ b/lib/swift @@ -1,5 +1,5 @@ # lib/swift -# Functions to control the configuration and operation of the swift service +# Functions to control the configuration and operation of the **Swift** service # Dependencies: # ``functions`` file @@ -23,10 +23,7 @@ set +o xtrace # Defaults # -------- -# - # Set up default directories - SWIFT_DIR=$DEST/swift SWIFTCLIENT_DIR=$DEST/python-swiftclient SWIFT_AUTH_CACHE_DIR=${SWIFT_AUTH_CACHE_DIR:-/var/cache/swift} @@ -71,6 +68,7 @@ OBJECT_PORT_BASE=6010 CONTAINER_PORT_BASE=6011 ACCOUNT_PORT_BASE=6012 + # Entry Points # ------------ @@ -293,7 +291,6 @@ EOF sudo chown -R $USER:adm ${swift_log_dir} sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \ tee /etc/rsyslog.d/10-swift.conf - } # configure_swiftclient() - Set config files, create data dirs, etc diff --git a/lib/tempest b/lib/tempest index 337be75b..190d77f1 100644 --- a/lib/tempest +++ b/lib/tempest @@ -1,4 +1,5 @@ # lib/tempest +# Install and configure Tempest # Dependencies: # ``functions`` file @@ -23,33 +24,29 @@ # # install_tempest # configure_tempest -# init_tempest -## start_tempest -## stop_tempest -## cleanup_tempest # Save trace setting XTRACE=$(set +o | grep xtrace) set +o xtrace + # Defaults # -------- -# - # Set up default directories -NOVA_SOURCE_DIR=$DEST/nova TEMPEST_DIR=$DEST/tempest TEMPEST_CONF_DIR=$TEMPEST_DIR/etc TEMPEST_CONF=$TEMPEST_CONF_DIR/tempest.conf +NOVA_SOURCE_DIR=$DEST/nova + BUILD_INTERVAL=3 BUILD_TIMEOUT=400 + # Entry Points # ------------ - # configure_tempest() - Set config files, create data dirs, etc function configure_tempest() { local image_lines @@ -66,7 +63,7 @@ function configure_tempest() { local public_network_id local tenant_networks_reachable - #TODO(afazekas): + # TODO(afazekas): # sudo python setup.py deploy # This function exits on an error so that errors don't compound and you see @@ -74,7 +71,7 @@ function configure_tempest() { errexit=$(set +o | grep errexit) set -o errexit - #Save IFS + # Save IFS ifs=$IFS # Glance should already contain images to be used in tempest @@ -240,7 +237,6 @@ function configure_tempest() { $errexit } - # install_tempest() - Collect source and prepare function install_tempest() { git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH From 75eaaf43c7c0798c4d92726e448d4c0e9372ccf2 Mon Sep 17 00:00:00 2001 From: Devananda van der Veen Date: Fri, 28 Dec 2012 15:40:21 -0800 Subject: [PATCH 114/207] Minor fixes for lib/baremetal A recent patch moved the flavor management out of nova-manage and into python-novaclient. This corrects the behaviour of lib/baremetal so that it calls "nova flavor-key" instead of "nova-manage". This also fixes a logical error in testing whether dnsmasq is installed. Change-Id: I3fa821c22ae45a49e283d091b6e5ed51c9757e88 --- lib/baremetal | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/lib/baremetal b/lib/baremetal index 62605fb8..112fd6d9 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -211,9 +211,7 @@ function configure_baremetal_nova_dirs() { # ensure dnsmasq is installed but not running # because baremetal driver will reconfigure and restart this as needed - if [ ! is_package_installed dnsmasq ]; then - install_package dnsmasq - fi + is_package_installed dnsmasq || install_package dnsmasq stop_service dnsmasq } @@ -256,12 +254,10 @@ function create_baremetal_flavor() { ari=$2 nova flavor-create $BM_FLAVOR_NAME $BM_FLAVOR_ID \ $BM_FLAVOR_RAM $BM_FLAVOR_ROOT_DISK $BM_FLAVOR_CPU - nova-manage instance_type set_key \ - --name=$BM_FLAVOR_NAME --key cpu_arch --value $BM_FLAVOR_ARCH - nova-manage instance_type set_key \ - --name=$BM_FLAVOR_NAME --key deploy_kernel_id --value $aki - nova-manage instance_type set_key \ - --name=$BM_FLAVOR_NAME --key deploy_ramdisk_id --value $ari + nova flavor-key $BM_FLAVOR_NAME set \ + cpu_arch=$BM_FLAVOR_ARCH \ + deploy_kernel_id=$aki \ + deploy_ramdisk_id=$ari } # pull run-time kernel/ramdisk out of disk image and load into glance From eac9370a12c1491643457e576613c8368b8037c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A1draig=20Brady?= Date: Wed, 2 Jan 2013 16:02:54 +0000 Subject: [PATCH 115/207] Adjust CINDER_SECURE_DELETE flag to cinder changes Cater for the pending more general cinder support for configurable volume wiping method at: https://review.openstack.org/#/c/12521 This change is done here first so as to not trigger a CI lockup when the referenced patch above lands. When that's in place, we can remove the older secure_delete config adjustment in a subsequent patch. Change-Id: I73fe2e0d1cf2815ab6025121584951cb5ff56fa3 --- lib/cinder | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/cinder b/lib/cinder index 701effd3..e3df98a4 100644 --- a/lib/cinder +++ b/lib/cinder @@ -169,6 +169,7 @@ function configure_cinder() { if [[ "$CINDER_SECURE_DELETE" == "False" ]]; then iniset $CINDER_CONF DEFAULT secure_delete False + iniset $CINDER_CONF DEFAULT volume_clear none fi if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then From b73e247ad45eee9ef45c32eff9bfa0daa3b1d733 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 2 Jan 2013 13:59:47 -0500 Subject: [PATCH 116/207] Revert "Create tools/install_prereqs.sh" This reverts commit 7be0b04 This work breaks the ability to do multi database installs, revert until there is a working solution here, as this is going to make fixing postgresql in tempest impossible. Change-Id: I39a2b78542fe60233806d1005186ce1b31d4be17 --- functions | 21 ----------- stack.sh | 20 ++++++++++- tools/install_prereqs.sh | 78 ---------------------------------------- 3 files changed, 19 insertions(+), 100 deletions(-) delete mode 100755 tools/install_prereqs.sh diff --git a/functions b/functions index 23aee935..9565e10d 100644 --- a/functions +++ b/functions @@ -710,27 +710,6 @@ function restart_service() { } -# HTTP and HTTPS proxy servers are supported via the usual environment variables [1] -# ``http_proxy``, ``https_proxy`` and ``no_proxy``. They can be set in -# ``localrc`` or on the command line if necessary:: -# -# [1] http://www.w3.org/Daemon/User/Proxies/ProxyClients.html -# -# http_proxy=http://proxy.example.com:3128/ no_proxy=repo.example.net ./stack.sh - -function re_export_proxy_variables() { - if [[ -n "$http_proxy" ]]; then - export http_proxy=$http_proxy - fi - if [[ -n "$https_proxy" ]]; then - export https_proxy=$https_proxy - fi - if [[ -n "$no_proxy" ]]; then - export no_proxy=$no_proxy - fi -} - - # Helper to launch a service in a named screen # screen_it service "command-line" function screen_it { diff --git a/stack.sh b/stack.sh index 7306b588..9f734b9e 100755 --- a/stack.sh +++ b/stack.sh @@ -648,7 +648,25 @@ set -o xtrace # Install package requirements echo_summary "Installing package prerequisites" -$TOP_DIR/tools/install_prereqs.sh +if is_ubuntu; then + install_package $(get_packages $FILES/apts) +elif is_fedora; then + install_package $(get_packages $FILES/rpms) +elif is_suse; then + install_package $(get_packages $FILES/rpms-suse) +else + exit_distro_not_supported "list of packages" +fi + +if [[ $SYSLOG != "False" ]]; then + if is_ubuntu || is_fedora; then + install_package rsyslog-relp + elif is_suse; then + install_package rsyslog-module-relp + else + exit_distro_not_supported "rsyslog-relp installation" + fi +fi if is_service_enabled rabbit; then # Install rabbitmq-server diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh deleted file mode 100755 index 0bf217b3..00000000 --- a/tools/install_prereqs.sh +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env bash - -# **install_prereqs.sh** - -# Install system package prerequisites -# -# install_prereqs.sh [-f] -# -# -f Force an install run now - - -if [[ -n "$1" && "$1" = "-f" ]]; then - FORCE=1 -fi - -# Keep track of the devstack directory -TOP_DIR=$(cd $(dirname "$0")/.. && pwd) - -# Import common functions -source $TOP_DIR/functions - -# Determine what system we are running on. This provides ``os_VENDOR``, -# ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` -# and ``DISTRO`` -GetDistro - -# Needed to get ``ENABLED_SERVICES`` -source $TOP_DIR/stackrc - -# Prereq dirs are here -FILES=$TOP_DIR/files - -# Minimum wait time -PREREQ_RERUN_MARKER=${PREREQ_RERUN_MARKER:-$TOP_DIR/.prereqs} -PREREQ_RERUN_HOURS=${PREREQ_RERUN_HOURS:-2} -PREREQ_RERUN_SECONDS=$((60*60*$PREREQ_RERUN_HOURS)) - -NOW=$(date "+%s") -LAST_RUN=$(head -1 $PREREQ_RERUN_MARKER 2>/dev/null || echo "0") -DELTA=$(($NOW - $LAST_RUN)) -if [[ $DELTA -lt $PREREQ_RERUN_SECONDS && -z "$FORCE" ]]; then - echo "Re-run time has not expired ($(($PREREQ_RERUN_SECONDS - $DELTA)) seconds remaining); exiting..." - exit 0 -fi - -# Make sure the proxy config is visible to sub-processes -re_export_proxy_variables - -# Install Packages -# ================ - -# Install package requirements -if is_ubuntu; then - install_package $(get_packages $FILES/apts) -elif is_fedora; then - install_package $(get_packages $FILES/rpms) -elif is_suse; then - install_package $(get_packages $FILES/rpms-suse) -else - exit_distro_not_supported "list of packages" -fi - -if [[ -n "$SYSLOG" && "$SYSLOG" != "False" ]]; then - if is_ubuntu || is_fedora; then - install_package rsyslog-relp - elif is_suse; then - install_package rsyslog-module-relp - else - exit_distro_not_supported "rsyslog-relp installation" - fi -fi - - -# Mark end of run -# --------------- - -date "+%s" >$PREREQ_RERUN_MARKER -date >>$PREREQ_RERUN_MARKER From c99853ca7187d20a8ba6b59c6e44f089c2d7d74f Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Thu, 3 Jan 2013 17:39:16 -0800 Subject: [PATCH 117/207] Enable millisecond logging for nova and cinder Change-Id: Ic28867ae9a436e81c7f2fcf79f40a1ecc251072c --- lib/cinder | 6 +++--- lib/nova | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/lib/cinder b/lib/cinder index 701effd3..385a5a2a 100644 --- a/lib/cinder +++ b/lib/cinder @@ -173,10 +173,10 @@ function configure_cinder() { if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then # Add color to logging output - iniset $CINDER_CONF DEFAULT logging_context_format_string "%(asctime)s %(color)s%(levelname)s %(name)s [%(request_id)s %(user_id)s %(project_id)s%(color)s] %(instance)s%(color)s%(message)s" - iniset $CINDER_CONF DEFAULT logging_default_format_string "%(asctime)s %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" + iniset $CINDER_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_id)s %(project_id)s%(color)s] %(instance)s%(color)s%(message)s" + iniset $CINDER_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" iniset $CINDER_CONF DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" - iniset $CINDER_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s TRACE %(name)s %(instance)s" + iniset $CINDER_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)d TRACE %(name)s %(instance)s" fi if [ "$CINDER_DRIVER" == "XenAPINFS" ]; then diff --git a/lib/nova b/lib/nova index 594195ea..4fef5527 100644 --- a/lib/nova +++ b/lib/nova @@ -408,13 +408,13 @@ function create_nova_conf() { fi if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then # Add color to logging output - add_nova_opt "logging_context_format_string=%(asctime)s %(color)s%(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s%(color)s] %(instance)s%(color)s%(message)s" - add_nova_opt "logging_default_format_string=%(asctime)s %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" + add_nova_opt "logging_context_format_string=%(asctime)s.%(msecs)d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s%(color)s] %(instance)s%(color)s%(message)s" + add_nova_opt "logging_default_format_string=%(asctime)s.%(msecs)d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" add_nova_opt "logging_debug_format_suffix=from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" - add_nova_opt "logging_exception_prefix=%(color)s%(asctime)s TRACE %(name)s %(instance)s" + add_nova_opt "logging_exception_prefix=%(color)s%(asctime)s.%(msecs)d TRACE %(name)s %(instance)s" else # Show user_name and project_name instead of user_id and project_id - add_nova_opt "logging_context_format_string=%(asctime)s %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" + add_nova_opt "logging_context_format_string=%(asctime)s.%(msecs) %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" fi if is_service_enabled ceilometer; then add_nova_opt "instance_usage_audit=True" From 35138ed6732fd2d4d06b9ffa92a391626acddfb1 Mon Sep 17 00:00:00 2001 From: Nikhil Manchanda Date: Thu, 3 Jan 2013 17:49:58 -0800 Subject: [PATCH 118/207] Use 'which pip' rather than /usr/bin/pip Bug 1095472: Bugfix to use 'which pip' rather than assume pip always lives in /usr/bin Change-Id: I0cc8a5e35306372653c3c27da9504c64e39d56dd --- functions | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/functions b/functions index 23aee935..bb03c558 100644 --- a/functions +++ b/functions @@ -1146,9 +1146,9 @@ function get_rootwrap_location() { # get_pip_command function get_pip_command() { if is_fedora; then - echo "/usr/bin/pip-python" + which pip-python else - echo "/usr/bin/pip" + which pip fi } From b38d61bc3becb7e82429a130729e1721b1827f0d Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Fri, 4 Jan 2013 13:38:23 -0500 Subject: [PATCH 119/207] Add libjs-jquery-tablesorter to package list. Coverage html reports have a symlink to jquery.tablesorter.min.js which is provided by libjs-jquery-tablesorter. If the package is not installed coverage html reports will have a broken symlink to that file. Change-Id: I7496a22f36d4f2fda2f030962b10d5afbc8f448f --- files/apts/nova | 1 + 1 file changed, 1 insertion(+) diff --git a/files/apts/nova b/files/apts/nova index c16a7087..b7d1e928 100644 --- a/files/apts/nova +++ b/files/apts/nova @@ -16,6 +16,7 @@ sqlite3 sudo kvm libvirt-bin # NOPRIME +libjs-jquery-tablesorter # Needed for coverage html reports vlan curl rabbitmq-server # NOPRIME From 756c842a7743a84a084b4cc211998e3fdd171592 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Fri, 4 Jan 2013 13:37:22 -0800 Subject: [PATCH 120/207] Properly wait until volumes are gone in volumes.sh A logic error in volume exercise's wait for volume deletion causes the test to do the opopsite, and continue on even tho the volume is in in the 'deleting' state. If using a volume backend that can quickly delete volumes (ceph), and the volume is gone before entering the wait, the loop will spin, timeout and fail the test. Change-Id: I8e3d2aaa04e6a165e0dee32bedac97d35e13d5eb --- exercises/volumes.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 48a976ed..5c5e0e44 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -192,7 +192,7 @@ echo "Completed volume-detach in $((end_time - start_time)) seconds" # Delete volume start_time=`date +%s` cinder delete $VOL_ID || die "Failure deleting volume $VOL_NAME" -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME; do sleep 1; done"; then +if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then echo "Volume $VOL_NAME not deleted" exit 1 fi From 06fac37d064f93f06948534517cffdaa8fdf504e Mon Sep 17 00:00:00 2001 From: Nachi Ueno Date: Wed, 26 Dec 2012 14:09:43 -0800 Subject: [PATCH 121/207] Turn off tenant_isolation in tempest for quantum Current tempest didn't create networks for each tenant, so let tempest use demo tenant for tesing if we enable quantum. Change-Id: I5f139b5f1bdf5c176130b8db9e896e2cb48b4420 --- lib/tempest | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/tempest b/lib/tempest index 18599219..a9a05eeb 100644 --- a/lib/tempest +++ b/lib/tempest @@ -187,6 +187,12 @@ function configure_tempest() { iniset $TEMPEST_CONF compute resize_available False iniset $TEMPEST_CONF compute change_password_available False iniset $TEMPEST_CONF compute compute_log_level ERROR + # Note(nati) current tempest don't create network for each tenant + # so reuse same tenant for now + if is_service_enabled quantum; then + TEMPEST_ALLOW_TENANT_ISOLATION=${TEMPEST_ALLOW_TENANT_ISOLATION:-False} + fi + iniset $TEMPEST_CONF compute allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} #Skip until #1074039 is fixed iniset $TEMPEST_CONF compute run_ssh False iniset $TEMPEST_CONF compute ssh_user ${DEFAULT_INSTANCE_USER:-$OS_USERNAME} From 22853c1974ca1ce50b946290bc7bf9b2dd34b64d Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 7 Jan 2013 15:18:12 -0600 Subject: [PATCH 122/207] Clean up cinder volume group rather than remove it Removing the cinder volume group breaks devstack installations that share that volume group with other logical volumes. It also was leaking loopback devices. Change-Id: Ice4470e06e08ce49a0e1f82af70abcc015c91c20 --- lib/cinder | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/lib/cinder b/lib/cinder index 385a5a2a..8949cfcc 100644 --- a/lib/cinder +++ b/lib/cinder @@ -48,6 +48,20 @@ fi VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} +# _clean_volume_group removes all cinder volumes from the specified volume group +# _clean_volume_group $VOLUME_GROUP $VOLUME_NAME_PREFIX +function _clean_volume_group() { + local vg=$1 + local vg_prefix=$2 + # Clean out existing volumes + for lv in `sudo lvs --noheadings -o lv_name $vg`; do + # vg_prefix prefixes the LVs we want + if [[ "${lv#$vg_prefix}" != "$lv" ]]; then + sudo lvremove -f $vg/$lv + fi + done +} + # cleanup_cinder() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_cinder() { @@ -84,7 +98,8 @@ function cleanup_cinder() { stop_service tgtd fi - sudo vgremove -f $VOLUME_GROUP + # Campsite rule: leave behind a volume group at least as clean as we found it + _clean_volume_group $VOLUME_GROUP $VOLUME_NAME_PREFIX } # configure_cinder() - Set config files, create data dirs, etc @@ -272,13 +287,8 @@ function init_cinder() { # Remove iscsi targets sudo tgtadm --op show --mode target | grep $VOLUME_NAME_PREFIX | grep Target | cut -f3 -d ' ' | sudo xargs -n1 tgt-admin --delete || true - # Clean out existing volumes - for lv in `sudo lvs --noheadings -o lv_name $VOLUME_GROUP`; do - # VOLUME_NAME_PREFIX prefixes the LVs we want - if [[ "${lv#$VOLUME_NAME_PREFIX}" != "$lv" ]]; then - sudo lvremove -f $VOLUME_GROUP/$lv - fi - done + # Start with a clean volume group + _clean_volume_group $VOLUME_GROUP $VOLUME_NAME_PREFIX fi fi From 5b813bc489eff682025d530557e2beda50db2eac Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Tue, 8 Jan 2013 16:51:05 +0100 Subject: [PATCH 123/207] Fix role creation in tools/create_userrc.sh * use role-create instead of tenant-create * add some missing quote Change-Id: I3e263bfbfe63a35c5a95248f05d78bd6a1c2e593 --- tools/create_userrc.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index e39c1570..55cb8fac 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -173,10 +173,10 @@ function add_entry(){ fi cat >"$rcfile" < Date: Tue, 8 Jan 2013 11:54:43 -0800 Subject: [PATCH 124/207] Use apt git package instead of git-core. The git-core package is deprecated in favor of the git package. Use the git package instead. Change-Id: Ib136e34c7a0d4f87b02e32996420b3f507ca0bf4 --- files/apts/general | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/apts/general b/files/apts/general index 12a92e0c..0264066a 100644 --- a/files/apts/general +++ b/files/apts/general @@ -6,7 +6,7 @@ screen unzip wget psmisc -git-core +git lsof # useful when debugging openssh-server vim-nox From e583d9b8f9bc8f3367df96027a83996ac1303b43 Mon Sep 17 00:00:00 2001 From: "Yunhong, Jiang" Date: Wed, 9 Jan 2013 09:33:07 +0800 Subject: [PATCH 125/207] Add ceilometer client in devstack Ceilometer client CLI is helpful to develop ceilometer related code. Add it to devstack also involve more developer to use it. Change-Id: I4147e50c00cb520ec15d63a0c34524ba8cb6654f Signed-off-by: Yunhong, Jiang --- lib/ceilometer | 11 +++++++++++ stack.sh | 2 ++ stackrc | 4 ++++ 3 files changed, 17 insertions(+) diff --git a/lib/ceilometer b/lib/ceilometer index 76ab254d..c31fcb92 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -29,6 +29,7 @@ set +o xtrace # Set up default directories CEILOMETER_DIR=$DEST/ceilometer +CEILOMETERCLIENT_DIR=$DEST/python-ceilometerclient CEILOMETER_CONF_DIR=/etc/ceilometer CEILOMETER_CONF=$CEILOMETER_CONF_DIR/ceilometer.conf CEILOMETER_API_LOG_DIR=/var/log/ceilometer-api @@ -46,6 +47,11 @@ function cleanup_ceilometer() { mongo ceilometer --eval "db.dropDatabase();" } +# configure_ceilometerclient() - Set config files, create data dirs, etc +function configure_ceilometerclient() { + setup_develop $CEILOMETERCLIENT_DIR +} + # configure_ceilometer() - Set config files, create data dirs, etc function configure_ceilometer() { setup_develop $CEILOMETER_DIR @@ -87,6 +93,11 @@ function install_ceilometer() { git_clone $CEILOMETER_REPO $CEILOMETER_DIR $CEILOMETER_BRANCH } +# install_ceilometerclient() - Collect source and prepare +function install_ceilometerclient() { + git_clone $CEILOMETERCLIENT_REPO $CEILOMETERCLIENT_DIR $CEILOMETERCLIENT_BRANCH +} + # start_ceilometer() - Start running processes, including screen function start_ceilometer() { screen_it ceilometer-acompute "cd $CEILOMETER_DIR && sg libvirtd \"$CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_CONF\"" diff --git a/stack.sh b/stack.sh index 9f734b9e..247b860d 100755 --- a/stack.sh +++ b/stack.sh @@ -773,6 +773,7 @@ if is_service_enabled cinder; then install_cinder fi if is_service_enabled ceilometer; then + install_ceilometerclient install_ceilometer fi if is_service_enabled tempest; then @@ -1218,6 +1219,7 @@ fi if is_service_enabled ceilometer; then echo_summary "Configuring Ceilometer" configure_ceilometer + configure_ceilometerclient echo_summary "Starting Ceilometer" start_ceilometer fi diff --git a/stackrc b/stackrc index 0e84db80..4e03a2f4 100644 --- a/stackrc +++ b/stackrc @@ -33,6 +33,10 @@ GIT_BASE=https://github.com CEILOMETER_REPO=${GIT_BASE}/openstack/ceilometer.git CEILOMETER_BRANCH=master +# ceilometer client library +CEILOMETERCLIENT_REPO=${GIT_BASE}/openstack/python-ceilometerclient +CEILOMETERCLIENT_BRANCH=master + # volume service CINDER_REPO=${GIT_BASE}/openstack/cinder CINDER_BRANCH=master From 4ce35c46cc80d07bb9f5ea4f1d4c6961c5f50d3e Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 9 Jan 2013 08:13:39 -0500 Subject: [PATCH 126/207] fix msec format string in the else case, which was triggered for devstack gate the msec format string was incorrect, thus largely scrambling the usefulness of logs. Fix this to make devstack readable. Change-Id: I59d0e73932daf27dc1d718dfcf217fe2edf4d491 --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 21157dc4..a43c0918 100644 --- a/lib/nova +++ b/lib/nova @@ -414,7 +414,7 @@ function create_nova_conf() { add_nova_opt "logging_exception_prefix=%(color)s%(asctime)s.%(msecs)d TRACE %(name)s %(instance)s" else # Show user_name and project_name instead of user_id and project_id - add_nova_opt "logging_context_format_string=%(asctime)s.%(msecs) %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" + add_nova_opt "logging_context_format_string=%(asctime)s.%(msecs)d %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" fi if is_service_enabled ceilometer; then add_nova_opt "instance_usage_audit=True" From 8e5d2f0c7a29a8002c3be1c94f1abca65ddaea08 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 20 Dec 2012 13:11:43 +0000 Subject: [PATCH 127/207] Set recon_cache_path to ${SWIFT_DATA_DIR}/cache. - Fixes bug 1092538. Change-Id: Id9eb9446b32a800b1c7e0ef72882747424c65b6e --- lib/swift | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/swift b/lib/swift index 89342644..c433387d 100644 --- a/lib/swift +++ b/lib/swift @@ -99,7 +99,7 @@ function configure_swift() { # changing the permissions so we can run it as our user. USER_GROUP=$(id -g) - sudo mkdir -p ${SWIFT_DATA_DIR}/drives + sudo mkdir -p ${SWIFT_DATA_DIR}/{drives,cache} sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_DIR} # Create a loopback disk and format it to XFS. @@ -273,16 +273,22 @@ EOF swift_node_config=${SWIFT_CONFIG_DIR}/object-server/${node_number}.conf cp ${SWIFT_DIR}/etc/object-server.conf-sample ${swift_node_config} generate_swift_config ${swift_node_config} ${node_number} $[OBJECT_PORT_BASE + 10 * (node_number - 1)] + iniset ${swift_node_config} filter:recon recon_cache_path ${SWIFT_DATA_DIR}/cache + # Using a sed and not iniset/iniuncomment because we want to a global + # modification and make sure it works for new sections. + sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config} swift_node_config=${SWIFT_CONFIG_DIR}/container-server/${node_number}.conf cp ${SWIFT_DIR}/etc/container-server.conf-sample ${swift_node_config} generate_swift_config ${swift_node_config} ${node_number} $[CONTAINER_PORT_BASE + 10 * (node_number - 1)] iniuncomment ${swift_node_config} app:container-server allow_versions iniset ${swift_node_config} app:container-server allow_versions "true" + sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config} swift_node_config=${SWIFT_CONFIG_DIR}/account-server/${node_number}.conf cp ${SWIFT_DIR}/etc/account-server.conf-sample ${swift_node_config} generate_swift_config ${swift_node_config} ${node_number} $[ACCOUNT_PORT_BASE + 10 * (node_number - 1)] + sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config} done swift_log_dir=${SWIFT_DATA_DIR}/logs From 9bc47db29c3767cb4aac492e1fd6f1c74e85ca5c Mon Sep 17 00:00:00 2001 From: Devananda van der Veen Date: Wed, 12 Dec 2012 16:52:55 -0800 Subject: [PATCH 128/207] convert add_nova_opt to iniset Convert all calls to add_nova_opt to use iniset $NOVA_CONF DEFAULT Convert baremetal options to use iniset $NOVA_CONF baremetal Change-Id: I03ce2149e1f3abc2feb40c156c50de7dabaf47a2 --- lib/nova | 88 ++++++++++++++++++++++++++--------------------------- lib/quantum | 26 ++++++++-------- stack.sh | 64 +++++++++++++++++++------------------- 3 files changed, 89 insertions(+), 89 deletions(-) diff --git a/lib/nova b/lib/nova index a43c0918..781cc097 100644 --- a/lib/nova +++ b/lib/nova @@ -354,73 +354,73 @@ function create_nova_conf() { # (Re)create ``nova.conf`` rm -f $NOVA_CONF add_nova_opt "[DEFAULT]" - add_nova_opt "verbose=True" - add_nova_opt "auth_strategy=keystone" - add_nova_opt "allow_resize_to_same_host=True" - add_nova_opt "api_paste_config=$NOVA_API_PASTE_INI" - add_nova_opt "rootwrap_config=$NOVA_CONF_DIR/rootwrap.conf" - add_nova_opt "compute_scheduler_driver=$SCHEDULER" - add_nova_opt "dhcpbridge_flagfile=$NOVA_CONF" - add_nova_opt "force_dhcp_release=True" - add_nova_opt "fixed_range=$FIXED_RANGE" - add_nova_opt "default_floating_pool=$PUBLIC_NETWORK_NAME" - add_nova_opt "s3_host=$SERVICE_HOST" - add_nova_opt "s3_port=$S3_SERVICE_PORT" - add_nova_opt "osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions" - add_nova_opt "my_ip=$HOST_IP" + iniset $NOVA_CONF DEFAULT verbose "True" + iniset $NOVA_CONF DEFAULT auth_strategy "keystone" + iniset $NOVA_CONF DEFAULT allow_resize_to_same_host "True" + iniset $NOVA_CONF DEFAULT api_paste_config "$NOVA_API_PASTE_INI" + iniset $NOVA_CONF DEFAULT rootwrap_config "$NOVA_CONF_DIR/rootwrap.conf" + iniset $NOVA_CONF DEFAULT compute_scheduler_driver "$SCHEDULER" + iniset $NOVA_CONF DEFAULT dhcpbridge_flagfile "$NOVA_CONF" + iniset $NOVA_CONF DEFAULT force_dhcp_release "True" + iniset $NOVA_CONF DEFAULT fixed_range "$FIXED_RANGE" + iniset $NOVA_CONF DEFAULT default_floating_pool "$PUBLIC_NETWORK_NAME" + iniset $NOVA_CONF DEFAULT s3_host "$SERVICE_HOST" + iniset $NOVA_CONF DEFAULT s3_port "$S3_SERVICE_PORT" + iniset $NOVA_CONF DEFAULT osapi_compute_extension "nova.api.openstack.compute.contrib.standard_extensions" + iniset $NOVA_CONF DEFAULT my_ip "$HOST_IP" local dburl database_connection_url dburl nova - add_nova_opt "sql_connection=$dburl" + iniset $NOVA_CONF DEFAULT sql_connection "$dburl" if is_baremetal; then database_connection_url dburl nova_bm - add_nova_opt "baremetal_sql_connection=$dburl" + iniset $NOVA_CONF baremetal sql_connection $dburl fi - add_nova_opt "libvirt_type=$LIBVIRT_TYPE" - add_nova_opt "libvirt_cpu_mode=none" - add_nova_opt "instance_name_template=${INSTANCE_NAME_PREFIX}%08x" + iniset $NOVA_CONF DEFAULT libvirt_type "$LIBVIRT_TYPE" + iniset $NOVA_CONF DEFAULT libvirt_cpu_mode "none" + iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x" if is_service_enabled n-api; then - add_nova_opt "enabled_apis=$NOVA_ENABLED_APIS" + iniset $NOVA_CONF DEFAULT enabled_apis "$NOVA_ENABLED_APIS" if is_service_enabled tls-proxy; then # Set the service port for a proxy to take the original - add_nova_opt "osapi_compute_listen_port=$NOVA_SERVICE_PORT_INT" + iniset $NOVA_CONF DEFAULT osapi_compute_listen_port "$NOVA_SERVICE_PORT_INT" fi fi if is_service_enabled cinder; then - add_nova_opt "volume_api_class=nova.volume.cinder.API" + iniset $NOVA_CONF DEFAULT volume_api_class "nova.volume.cinder.API" fi if [ -n "$NOVA_STATE_PATH" ]; then - add_nova_opt "state_path=$NOVA_STATE_PATH" - add_nova_opt "lock_path=$NOVA_STATE_PATH" + iniset $NOVA_CONF DEFAULT state_path "$NOVA_STATE_PATH" + iniset $NOVA_CONF DEFAULT lock_path "$NOVA_STATE_PATH" fi if [ -n "$NOVA_INSTANCES_PATH" ]; then - add_nova_opt "instances_path=$NOVA_INSTANCES_PATH" + iniset $NOVA_CONF DEFAULT instances_path "$NOVA_INSTANCES_PATH" fi if [ "$MULTI_HOST" != "False" ]; then - add_nova_opt "multi_host=True" - add_nova_opt "send_arp_for_ha=True" + iniset $NOVA_CONF DEFAULT multi_host "True" + iniset $NOVA_CONF DEFAULT send_arp_for_ha "True" fi if [ "$SYSLOG" != "False" ]; then - add_nova_opt "use_syslog=True" + iniset $NOVA_CONF DEFAULT use_syslog "True" fi if [ "$API_RATE_LIMIT" != "True" ]; then - add_nova_opt "api_rate_limit=False" + iniset $NOVA_CONF DEFAULT api_rate_limit "False" fi if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then # Add color to logging output - add_nova_opt "logging_context_format_string=%(asctime)s.%(msecs)d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s%(color)s] %(instance)s%(color)s%(message)s" - add_nova_opt "logging_default_format_string=%(asctime)s.%(msecs)d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" - add_nova_opt "logging_debug_format_suffix=from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" - add_nova_opt "logging_exception_prefix=%(color)s%(asctime)s.%(msecs)d TRACE %(name)s %(instance)s" + iniset $NOVA_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s%(color)s] %(instance)s%(color)s%(message)s" + iniset $NOVA_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" + iniset $NOVA_CONF DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" + iniset $NOVA_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)d TRACE %(name)s %(instance)s" else # Show user_name and project_name instead of user_id and project_id - add_nova_opt "logging_context_format_string=%(asctime)s.%(msecs)d %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" + iniset $NOVA_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)d %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" fi if is_service_enabled ceilometer; then - add_nova_opt "instance_usage_audit=True" - add_nova_opt "instance_usage_audit_period=hour" - add_nova_opt "notification_driver=nova.openstack.common.notifier.rpc_notifier" - add_nova_opt "notification_driver=ceilometer.compute.nova_notifier" + iniset $NOVA_CONF DEFAULT instance_usage_audit "True" + iniset $NOVA_CONF DEFAULT instance_usage_audit_period "hour" + iniset $NOVA_CONF DEFAULT notification_driver "nova.openstack.common.notifier.rpc_notifier" + iniset $NOVA_CONF DEFAULT notification_driver "ceilometer.compute.nova_notifier" fi @@ -433,17 +433,17 @@ function create_nova_conf() { # For Example: ``EXTRA_OPTS=(foo=true bar=2)`` for I in "${EXTRA_OPTS[@]}"; do # Attempt to convert flags to options - add_nova_opt ${I//--} + iniset $NOVA_CONF DEFAULT ${I//=/ } done } function create_nova_conf_nova_network() { - add_nova_opt "network_manager=nova.network.manager.$NET_MAN" - add_nova_opt "public_interface=$PUBLIC_INTERFACE" - add_nova_opt "vlan_interface=$VLAN_INTERFACE" - add_nova_opt "flat_network_bridge=$FLAT_NETWORK_BRIDGE" + iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NET_MAN" + iniset $NOVA_CONF DEFAULT public_interface "$PUBLIC_INTERFACE" + iniset $NOVA_CONF DEFAULT vlan_interface "$VLAN_INTERFACE" + iniset $NOVA_CONF DEFAULT flat_network_bridge "$FLAT_NETWORK_BRIDGE" if [ -n "$FLAT_INTERFACE" ]; then - add_nova_opt "flat_interface=$FLAT_INTERFACE" + iniset $NOVA_CONF DEFAULT flat_interface "$FLAT_INTERFACE" fi } diff --git a/lib/quantum b/lib/quantum index ea0e311c..f74eead6 100644 --- a/lib/quantum +++ b/lib/quantum @@ -200,13 +200,13 @@ function configure_quantum() { } function create_nova_conf_quantum() { - add_nova_opt "network_api_class=nova.network.quantumv2.api.API" - add_nova_opt "quantum_admin_username=$Q_ADMIN_USERNAME" - add_nova_opt "quantum_admin_password=$SERVICE_PASSWORD" - add_nova_opt "quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0" - add_nova_opt "quantum_auth_strategy=$Q_AUTH_STRATEGY" - add_nova_opt "quantum_admin_tenant_name=$SERVICE_TENANT_NAME" - add_nova_opt "quantum_url=http://$Q_HOST:$Q_PORT" + iniset $NOVA_CONF DEFAULT network_api_class "nova.network.quantumv2.api.API" + iniset $NOVA_CONF DEFAULT quantum_admin_username "$Q_ADMIN_USERNAME" + iniset $NOVA_CONF DEFAULT quantum_admin_password "$SERVICE_PASSWORD" + iniset $NOVA_CONF DEFAULT quantum_admin_auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0" + iniset $NOVA_CONF DEFAULT quantum_auth_strategy "$Q_AUTH_STRATEGY" + iniset $NOVA_CONF DEFAULT quantum_admin_tenant_name "$SERVICE_TENANT_NAME" + iniset $NOVA_CONF DEFAULT quantum_url "http://$Q_HOST:$Q_PORT" if [[ "$Q_PLUGIN" = "openvswitch" ]]; then NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"} @@ -214,14 +214,14 @@ function create_nova_conf_quantum() { NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver"} elif [[ "$Q_PLUGIN" = "ryu" ]]; then NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"quantum.plugins.ryu.nova.vif.LibvirtOpenVswitchOFPRyuDriver"} - add_nova_opt "libvirt_ovs_integration_bridge=$OVS_BRIDGE" - add_nova_opt "linuxnet_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT" - add_nova_opt "libvirt_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT" + iniset $NOVA_CONF DEFAULT libvirt_ovs_integration_bridge "$OVS_BRIDGE" + iniset $NOVA_CONF DEFAULT linuxnet_ovs_ryu_api_host "$RYU_API_HOST:$RYU_API_PORT" + iniset $NOVA_CONF DEFAULT libvirt_ovs_ryu_api_host "$RYU_API_HOST:$RYU_API_PORT" fi - add_nova_opt "libvirt_vif_driver=$NOVA_VIF_DRIVER" - add_nova_opt "linuxnet_interface_driver=$LINUXNET_VIF_DRIVER" + iniset $NOVA_CONF DEFAULT libvirt_vif_driver "$NOVA_VIF_DRIVER" + iniset $NOVA_CONF DEFAULT linuxnet_interface_driver "$LINUXNET_VIF_DRIVER" if is_service_enabled q-meta; then - add_nova_opt "service_quantum_metadata_proxy=True" + iniset $NOVA_CONF DEFAULT service_quantum_metadata_proxy "True" fi } diff --git a/stack.sh b/stack.sh index 9f734b9e..5a02e076 100755 --- a/stack.sh +++ b/stack.sh @@ -1059,9 +1059,9 @@ if is_service_enabled nova; then # These settings don't hurt anything if n-xvnc and n-novnc are disabled if is_service_enabled n-cpu; then NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"} - add_nova_opt "novncproxy_base_url=$NOVNCPROXY_URL" + iniset $NOVA_CONF DEFAULT novncproxy_base_url "$NOVNCPROXY_URL" XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"} - add_nova_opt "xvpvncproxy_base_url=$XVPVNCPROXY_URL" + iniset $NOVA_CONF DEFAULT xvpvncproxy_base_url "$XVPVNCPROXY_URL" fi if [ "$VIRT_DRIVER" = 'xenserver' ]; then VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1} @@ -1071,18 +1071,18 @@ if is_service_enabled nova; then # Address on which instance vncservers will listen on compute hosts. # For multi-host, this should be the management ip of the compute host. VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1} - add_nova_opt "vncserver_listen=$VNCSERVER_LISTEN" - add_nova_opt "vncserver_proxyclient_address=$VNCSERVER_PROXYCLIENT_ADDRESS" - add_nova_opt "ec2_dmz_host=$EC2_DMZ_HOST" + iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN" + iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" + iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST" if is_service_enabled zeromq; then - add_nova_opt "rpc_backend=nova.openstack.common.rpc.impl_zmq" + iniset $NOVA_CONF DEFAULT rpc_backend "nova.openstack.common.rpc.impl_zmq" elif is_service_enabled qpid; then - add_nova_opt "rpc_backend=nova.rpc.impl_qpid" + iniset $NOVA_CONF DEFAULT rpc_backend "nova.rpc.impl_qpid" elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then - add_nova_opt "rabbit_host=$RABBIT_HOST" - add_nova_opt "rabbit_password=$RABBIT_PASSWORD" + iniset $NOVA_CONF DEFAULT rabbit_host "$RABBIT_HOST" + iniset $NOVA_CONF DEFAULT rabbit_password "$RABBIT_PASSWORD" fi - add_nova_opt "glance_api_servers=$GLANCE_HOSTPORT" + iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT" # XenServer @@ -1091,16 +1091,16 @@ if is_service_enabled nova; then if [ "$VIRT_DRIVER" = 'xenserver' ]; then echo_summary "Using XenServer virtualization driver" read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN." - add_nova_opt "compute_driver=xenapi.XenAPIDriver" + iniset $NOVA_CONF DEFAULT compute_driver "xenapi.XenAPIDriver" XENAPI_CONNECTION_URL=${XENAPI_CONNECTION_URL:-"http://169.254.0.1"} XENAPI_USER=${XENAPI_USER:-"root"} - add_nova_opt "xenapi_connection_url=$XENAPI_CONNECTION_URL" - add_nova_opt "xenapi_connection_username=$XENAPI_USER" - add_nova_opt "xenapi_connection_password=$XENAPI_PASSWORD" - add_nova_opt "flat_injected=False" + iniset $NOVA_CONF DEFAULT xenapi_connection_url "$XENAPI_CONNECTION_URL" + iniset $NOVA_CONF DEFAULT xenapi_connection_username "$XENAPI_USER" + iniset $NOVA_CONF DEFAULT xenapi_connection_password "$XENAPI_PASSWORD" + iniset $NOVA_CONF DEFAULT flat_injected "False" # Need to avoid crash due to new firewall support XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"} - add_nova_opt "firewall_driver=$XEN_FIREWALL_DRIVER" + iniset $NOVA_CONF DEFAULT firewall_driver "$XEN_FIREWALL_DRIVER" # OpenVZ # ------ @@ -1109,34 +1109,34 @@ if is_service_enabled nova; then echo_summary "Using OpenVZ virtualization driver" # TODO(deva): OpenVZ driver does not yet work if compute_driver is set here. # Replace connection_type when this is fixed. - # add_nova_opt "compute_driver=openvz.connection.OpenVzConnection" - add_nova_opt "connection_type=openvz" + # iniset $NOVA_CONF DEFAULT compute_driver "openvz.connection.OpenVzConnection" + iniset $NOVA_CONF DEFAULT connection_type "openvz" LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} - add_nova_opt "firewall_driver=$LIBVIRT_FIREWALL_DRIVER" + iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER" # Bare Metal # ---------- elif [ "$VIRT_DRIVER" = 'baremetal' ]; then echo_summary "Using BareMetal driver" - add_nova_opt "compute_driver=nova.virt.baremetal.driver.BareMetalDriver" LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.firewall.NoopFirewallDriver"} - add_nova_opt "firewall_driver=$LIBVIRT_FIREWALL_DRIVER" - add_nova_opt "baremetal_driver=$BM_DRIVER" - add_nova_opt "baremetal_tftp_root=/tftpboot" - add_nova_opt "baremetal_instance_type_extra_specs=cpu_arch:$BM_CPU_ARCH" - add_nova_opt "baremetal_power_manager=$BM_POWER_MANAGER" - add_nova_opt "scheduler_host_manager=nova.scheduler.baremetal_host_manager.BaremetalHostManager" - add_nova_opt "scheduler_default_filters=AllHostsFilter" + iniset $NOVA_CONF DEFAULT compute_driver nova.virt.baremetal.driver.BareMetalDriver + iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER + iniset $NOVA_CONF DEFAULT scheduler_host_manager nova.scheduler.baremetal_host_manager.BaremetalHostManager + iniset $NOVA_CONF DEFAULT scheduler_default_filters AllHostsFilter + iniset $NOVA_CONF baremetal driver $BM_DRIVER + iniset $NOVA_CONF baremetal instance_type_extra_specs cpu_arch:$BM_CPU_ARCH + iniset $NOVA_CONF baremetal power_manager $BM_POWER_MANAGER + iniset $NOVA_CONF baremetal tftp_root /tftpboot # Default # ------- else echo_summary "Using libvirt virtualization driver" - add_nova_opt "compute_driver=libvirt.LibvirtDriver" + iniset $NOVA_CONF DEFAULT compute_driver "libvirt.LibvirtDriver" LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} - add_nova_opt "firewall_driver=$LIBVIRT_FIREWALL_DRIVER" + iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER" fi fi @@ -1174,9 +1174,9 @@ if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nov CREDS=$(keystone ec2-credentials-create --user_id $NOVA_USER_ID --tenant_id $NOVA_TENANT_ID) ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }') SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }') - add_nova_opt "s3_access_key=$ACCESS_KEY" - add_nova_opt "s3_secret_key=$SECRET_KEY" - add_nova_opt "s3_affix_tenant=True" + iniset $NOVA_CONF DEFAULT s3_access_key "$ACCESS_KEY" + iniset $NOVA_CONF DEFAULT s3_secret_key "$SECRET_KEY" + iniset $NOVA_CONF DEFAULT s3_affix_tenant "True" fi screen_it zeromq "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-rpc-zmq-receiver" From 1edba3318b5d0b52d00905edf0f1632c258225d4 Mon Sep 17 00:00:00 2001 From: Devananda van der Veen Date: Wed, 9 Jan 2013 15:29:03 -0800 Subject: [PATCH 129/207] Add EXTRA_BAREMETAL_OPTS to stack.sh Allow the passing of extra options to the [baremetal] option group by specifying EXTRA_BAREMETAL_OPTS, using the same format as the existing flag EXTRA_OPTS. Change-Id: I209675786c6a33a68d83a371292a1e1749ecb14c --- stack.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/stack.sh b/stack.sh index 5a02e076..ebb0bad4 100755 --- a/stack.sh +++ b/stack.sh @@ -1129,6 +1129,12 @@ if is_service_enabled nova; then iniset $NOVA_CONF baremetal power_manager $BM_POWER_MANAGER iniset $NOVA_CONF baremetal tftp_root /tftpboot + # Define extra baremetal nova conf flags by defining the array ``EXTRA_BAREMETAL_OPTS``. + for I in "${EXTRA_BAREMETAL_OPTS[@]}"; do + # Attempt to convert flags to options + iniset $NOVA_CONF baremetal ${I//=/ } + done + # Default # ------- From 8c54849d79b8d78679898bd4fcc47b340ecc9bdb Mon Sep 17 00:00:00 2001 From: Lianhao Lu Date: Wed, 9 Jan 2013 10:41:54 +0800 Subject: [PATCH 130/207] Create signing_dir for ceilometer. Create and initialize the signing_dir for ceilometer keystone authentication. This ensures the ceilometer to use its own PKI cache directory for authentication and avoids the authentication error due to the invalid cached certifications. Change-Id: I6fbc364695ae9be800245d14fd8945d531679550 --- lib/ceilometer | 10 ++++++++++ stack.sh | 1 + 2 files changed, 11 insertions(+) diff --git a/lib/ceilometer b/lib/ceilometer index 76ab254d..50b353f9 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -32,6 +32,7 @@ CEILOMETER_DIR=$DEST/ceilometer CEILOMETER_CONF_DIR=/etc/ceilometer CEILOMETER_CONF=$CEILOMETER_CONF_DIR/ceilometer.conf CEILOMETER_API_LOG_DIR=/var/log/ceilometer-api +CEILOMETER_AUTH_CACHE_DIR=${CEILOMETER_AUTH_CACHE_DIR:-/var/cache/ceilometer} # Support potential entry-points console scripts if [ -d $CEILOMETER_DIR/bin ] ; then @@ -78,10 +79,19 @@ function configure_ceilometer() { iniset $CEILOMETER_CONF keystone_authtoken admin_user ceilometer iniset $CEILOMETER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD iniset $CEILOMETER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $CEILOMETER_CONF keystone_authtoken signing_dir $CEILOMETER_AUTH_CACHE_DIR cleanup_ceilometer } +# init_ceilometer() - Initialize etc. +function init_ceilometer() { + # Create cache dir + sudo mkdir -p $CEILOMETER_AUTH_CACHE_DIR + sudo chown `whoami` $CEILOMETER_AUTH_CACHE_DIR + rm -f $CEILOMETER_AUTH_CACHE_DIR/* +} + # install_ceilometer() - Collect source and prepare function install_ceilometer() { git_clone $CEILOMETER_REPO $CEILOMETER_DIR $CEILOMETER_BRANCH diff --git a/stack.sh b/stack.sh index 9f734b9e..8b279c19 100755 --- a/stack.sh +++ b/stack.sh @@ -1219,6 +1219,7 @@ if is_service_enabled ceilometer; then echo_summary "Configuring Ceilometer" configure_ceilometer echo_summary "Starting Ceilometer" + init_ceilometer start_ceilometer fi From 768295e9f1b0ee74635f8b3002cf7e1971bbdddf Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 9 Jan 2013 13:42:03 -0600 Subject: [PATCH 131/207] Add mechanism to automatically load additional projects This adds an extras.d directory to contain startup scripts that stack.sh runs automatically at the end. Similar to local.sh except the scripts are sourced into the stack.sh process rather than executed as a child process. This gives them complete access to the stack.sh environment. Convert Tempest to use this format as an example. Change-Id: Ibc95e6aaecf4211da948319eb452293ae4357780 --- extras.d/80-tempest.sh | 20 ++++++++++++++++++++ extras.d/README | 14 ++++++++++++++ stack.sh | 24 ++++++++++-------------- unstack.sh | 9 +++++++++ 4 files changed, 53 insertions(+), 14 deletions(-) create mode 100644 extras.d/80-tempest.sh create mode 100644 extras.d/README diff --git a/extras.d/80-tempest.sh b/extras.d/80-tempest.sh new file mode 100644 index 00000000..506ccef7 --- /dev/null +++ b/extras.d/80-tempest.sh @@ -0,0 +1,20 @@ +# tempest.sh - DevStack extras script + +source $TOP_DIR/lib/tempest + +if [[ "$1" == "stack" ]]; then + # Configure Tempest last to ensure that the runtime configuration of + # the various OpenStack services can be queried. + if is_service_enabled tempest; then + echo_summary "Configuring Tempest" + install_tempest + configure_tempest + fi +fi + +if [[ "$1" == "unstack" ]]; then + # no-op + : +fi + + diff --git a/extras.d/README b/extras.d/README new file mode 100644 index 00000000..ffc6793a --- /dev/null +++ b/extras.d/README @@ -0,0 +1,14 @@ +The extras.d directory contains project initialization scripts to be +sourced by stack.sh at the end of its run. This is expected to be +used by external projects that want to be configured, started and +stopped with DevStack. + +Order is controlled by prefixing the script names with the a two digit +sequence number. Script names must end with '.sh'. This provides a +convenient way to disable scripts by simoy renaming them. + +DevStack reserves the sequence numbers 00 through 09 and 90 through 99 +for its own use. + +The scripts are called with an argument of 'stack' by stack.sh and +with an argument of 'unstack' by unstack.sh. diff --git a/stack.sh b/stack.sh index 9f734b9e..53300998 100755 --- a/stack.sh +++ b/stack.sh @@ -321,7 +321,6 @@ source $TOP_DIR/lib/swift source $TOP_DIR/lib/ceilometer source $TOP_DIR/lib/heat source $TOP_DIR/lib/quantum -source $TOP_DIR/lib/tempest source $TOP_DIR/lib/baremetal # Set the destination directories for OpenStack projects @@ -775,9 +774,6 @@ fi if is_service_enabled ceilometer; then install_ceilometer fi -if is_service_enabled tempest; then - install_tempest -fi # Initialization @@ -1314,16 +1310,6 @@ if is_service_enabled nova && is_baremetal; then screen_it baremetal "nova-baremetal-deploy-helper" fi -# Configure Tempest last to ensure that the runtime configuration of -# the various OpenStack services can be queried. -if is_service_enabled tempest; then - echo_summary "Configuring Tempest" - configure_tempest - echo '**************************************************' - echo_summary "Finished Configuring Tempest" - echo '**************************************************' -fi - # Save some values we generated for later use CURRENT_RUN_TIME=$(date "+$TIMESTAMP_FORMAT") echo "# $CURRENT_RUN_TIME" >$TOP_DIR/.stackenv @@ -1333,6 +1319,16 @@ for i in BASE_SQL_CONN ENABLED_SERVICES HOST_IP LOGFILE \ done +# Run extras +# ========== + +if [[ -d $TOP_DIR/extras.d ]]; then + for i in $TOP_DIR/extras.d/*.sh; do + [[ -r $i ]] && source $i stack + done +fi + + # Run local script # ================ diff --git a/unstack.sh b/unstack.sh index fd70916d..1d4bfd56 100755 --- a/unstack.sh +++ b/unstack.sh @@ -39,6 +39,15 @@ if [[ "$1" == "--all" ]]; then UNSTACK_ALL=${UNSTACK_ALL:-1} fi +# Run extras +# ========== + +if [[ -d $TOP_DIR/extras.d ]]; then + for i in $TOP_DIR/extras.d/*.sh; do + [[ -r $i ]] && source $i unstack + done +fi + if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then source $TOP_DIR/openrc teardown_quantum_debug From ca8021712325dd4d4ac7185a287cb81cb10fd23d Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 9 Jan 2013 19:08:02 -0600 Subject: [PATCH 132/207] Add tools/make_cert.sh This allows use of either the DevStack CA or creating another CA independent of stack.sh. Change-Id: I055679b5fd06e830c8e6d7d7331c52dd8782d0b6 --- lib/tls | 6 ++++- stack.sh | 1 + tools/make_cert.sh | 55 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 61 insertions(+), 1 deletion(-) create mode 100755 tools/make_cert.sh diff --git a/lib/tls b/lib/tls index 1e2a8993..202edeff 100644 --- a/lib/tls +++ b/lib/tls @@ -189,7 +189,7 @@ subjectAltName = \$ENV::SUBJECT_ALT_NAME " >$ca_dir/signing.conf } -# Create root and intermediate CAs and an initial server cert +# Create root and intermediate CAs # init_CA function init_CA { # Ensure CAs are built @@ -198,7 +198,11 @@ function init_CA { # Create the CA bundle cat $ROOT_CA_DIR/cacert.pem $INT_CA_DIR/cacert.pem >>$INT_CA_DIR/ca-chain.pem +} +# Create an initial server cert +# init_cert +function init_cert { if [[ ! -r $DEVSTACK_CERT ]]; then if [[ -n "$TLS_IP" ]]; then # Lie to let incomplete match routines work diff --git a/stack.sh b/stack.sh index 9f734b9e..d43e81c0 100755 --- a/stack.sh +++ b/stack.sh @@ -838,6 +838,7 @@ fi if is_service_enabled tls-proxy; then configure_CA init_CA + init_cert # Add name to /etc/hosts # don't be naive and add to existing line! fi diff --git a/tools/make_cert.sh b/tools/make_cert.sh new file mode 100755 index 00000000..cb93e57c --- /dev/null +++ b/tools/make_cert.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +# **make_cert.sh** + +# Create a CA hierarchy (if necessary) and server certificate +# +# This mimics the CA structure that DevStack sets up when ``tls_proxy`` is enabled +# but in the curent directory unless ``DATA_DIR`` is set + +ENABLE_TLS=True +DATA_DIR=${DATA_DIR:-`pwd`/ca-data} + +ROOT_CA_DIR=$DATA_DIR/root +INT_CA_DIR=$DATA_DIR/int + +# Import common functions +source $TOP_DIR/functions + +# Import TLS functions +source lib/tls + +function usage { + echo "$0 - Create CA and/or certs" + echo "" + echo "Usage: $0 commonName [orgUnit]" + exit 1 +} + +CN=$1 +if [ -z "$CN" ]]; then + usage +fi +ORG_UNIT_NAME=${2:-$ORG_UNIT_NAME} + +# Useful on OS/X +if [[ `uname -s` == 'Darwin' && -d /usr/local/Cellar/openssl ]]; then + # set up for brew-installed modern OpenSSL + OPENSSL_CONF=/usr/local/etc/openssl/openssl.cnf + OPENSSL=/usr/local/Cellar/openssl/*/bin/openssl +fi + +DEVSTACK_CERT_NAME=$CN +DEVSTACK_HOSTNAME=$CN +DEVSTACK_CERT=$DATA_DIR/$DEVSTACK_CERT_NAME.pem + +# Make sure the CA is set up +configure_CA +init_CA + +# Create the server cert +make_cert $INT_CA_DIR $DEVSTACK_CERT_NAME $DEVSTACK_HOSTNAME + +# Create a cert bundle +cat $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/cacert.pem >$DEVSTACK_CERT + From ceaa38b3299d56adc1e65e7128bb67cb7364acd1 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 12 Dec 2012 17:09:57 -0600 Subject: [PATCH 133/207] Fix tempest flavors and DEFAULT_INSTANCE_TYPE The flavor selection was broken if DEFAULT_INSTANCE_TYPE is defined but not yet created, for example when it is created in local.sh. This also has the side effect of setting flavor_ref_alt to the first flavor where it was unset in the previous code. Change-Id: I1fa48b3f90af45144c92298b6b07a4f7ee3b499f --- lib/tempest | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/lib/tempest b/lib/tempest index b408b113..fa637c12 100644 --- a/lib/tempest +++ b/lib/tempest @@ -134,12 +134,14 @@ function configure_tempest() { flavor_lines=`nova flavor-list` IFS=$'\r\n' flavors="" - for line in $flavor_lines; do - if [ -z $DEFAULT_INSTANCE_TYPE ]; then - flavors="$flavors `echo $line | grep -v "^\(|\s*ID\|+--\)" | cut -d' ' -f2`" - else - flavors="$flavors `echo $line | grep -v "^\(|\s*ID\|+--\)" | grep "$DEFAULT_INSTANCE_TYPE" | cut -d' ' -f2`" + if [[ -n "$DEFAULT_INSTANCE_TYPE" ]]; then + for line in $flavor_lines; do + f=$(echo $line | awk "/ $DEFAULT_INSTANCE_TYPE / { print \$2 }") + flavors="$flavors $f" + done fi + for line in $flavor_lines; do + flavors="$flavors `echo $line | grep -v "^\(|\s*ID\|+--\)" | cut -d' ' -f2`" done IFS=" " From d66c965b0c00209905747754cd580fc2f887af0a Mon Sep 17 00:00:00 2001 From: Kevin Lyda Date: Wed, 9 Jan 2013 13:39:57 +0000 Subject: [PATCH 134/207] Correct comment about SWIFT_LOOPBACK_DISK_SIZE. The comment regarding SWIFT_LOOPBACK_DISK_SIZE in lib/swift used the incorrect unit (bytes instead of kilobytes). Change-Id: I86117e36141c0a028a6fa6878a4d540f624e759d --- lib/swift | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/swift b/lib/swift index c433387d..b418eda8 100644 --- a/lib/swift +++ b/lib/swift @@ -39,7 +39,8 @@ SWIFT_DATA_DIR=${SWIFT_DATA_DIR:-${DATA_DIR}/swift} SWIFT_CONFIG_DIR=${SWIFT_CONFIG_DIR:-/etc/swift} # DevStack will create a loop-back disk formatted as XFS to store the -# swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in bytes. +# swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in +# kilobytes. # Default is 1 gigabyte. SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000} From 9a28c86b4c43eb8e311c60dc7400b9989a6745bb Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 10 Jan 2013 15:42:49 +0100 Subject: [PATCH 135/207] Add .stackenv to gitignore. Change-Id: I39e91aaf5e9ff29b025fd5a1aa74bad01c5e5bdd --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 5e770c80..f9e26445 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,4 @@ files/images stack-screenrc *.pem accrc +.stackenv From df1cf94cf0f6191842dd48b5e9a640510c33b3c0 Mon Sep 17 00:00:00 2001 From: Clint Byrum Date: Thu, 10 Jan 2013 11:12:45 -0800 Subject: [PATCH 136/207] Fixing python-heatclient's git repo path. heatclient was moved to the official openstack repo. Thanks Simon Pasquier for the patch. Change-Id: I561bf1ea11f1c74b5e75ab93170bd367ba36f90f Fixes: bug #1096922 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 4e03a2f4..89d4f090 100644 --- a/stackrc +++ b/stackrc @@ -108,7 +108,7 @@ HEAT_REPO=${GIT_BASE}/openstack/heat.git HEAT_BRANCH=master # python heat client library -HEATCLIENT_REPO=${GIT_BASE}/heat-api/python-heatclient.git +HEATCLIENT_REPO=${GIT_BASE}/openstack/python-heatclient.git HEATCLIENT_BRANCH=master # ryu service From 2b7ce5a8f37232c8cc28f86c6d659a0ec3f3f00b Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 10 Jan 2013 13:22:45 -0600 Subject: [PATCH 137/207] Add stackrc comments to HACKING Change-Id: I46ff885184a2b5b71caca905c27f28d8b1304011 --- HACKING.rst | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/HACKING.rst b/HACKING.rst index c4641fa0..6ad8c7e6 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -88,6 +88,30 @@ to be in the sub-script, such as testing for keystone being enabled in ``configure_swift()``. +stackrc +------- + +``stackrc`` is the global configuration file for DevStack. It is responsible for +calling ``localrc`` if it exists so configuration can be overridden by the user. + +The criteria for what belongs in ``stackrc`` can be vaguely summarized as +follows: + +* All project respositories and branches (for historical reasons) +* Global configuration that may be referenced in ``localrc``, i.e. ``DEST``, ``DATA_DIR`` +* Global service configuration like ``ENABLED_SERVICES`` +* Variables used by multiple services that do not have a clear owner, i.e. + ``VOLUME_BACKING_FILE_SIZE`` (nova-volumes and cinder) or ``PUBLIC_NETWORK_NAME`` + (nova-network and quantum) +* Variables that can not be cleanly declared in a project file due to + dependency ordering, i.e. the order of sourcing the project files can + not be changed for other reasons but the earlier file needs to dereference a + variable set in the later file. This should be rare. + +Also, variable declarations in ``stackrc`` do NOT allow overriding (the form +``FOO=${FOO:-baz}``); if they did then they can already be changed in ``localrc`` +and can stay in the project file. + Documentation ------------- From dff95122f79c83e7e3b108b12e6b8a48aa62c01d Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 10 Jan 2013 20:51:28 -0600 Subject: [PATCH 138/207] Fix EXTRA_OPTS handling In the conversion away from add_nova_opt the EXTRA_OPTS handling inadvertently replaced all '=' chars in the value rather than just the first. Additional '=' is legal for an option value. FWIW here is the setting that tripped it: EXTRA_OPTS=default_log_levels=sqlalchemy=WARN,boto=WARN,eventlet.wsgi.server=WARN Change-Id: I2deb139171250eb0ef5028bb924569cec31e1a4e --- lib/nova | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/nova b/lib/nova index 781cc097..a987008b 100644 --- a/lib/nova +++ b/lib/nova @@ -432,8 +432,8 @@ function create_nova_conf() { # Define extra nova conf flags by defining the array ``EXTRA_OPTS``. # For Example: ``EXTRA_OPTS=(foo=true bar=2)`` for I in "${EXTRA_OPTS[@]}"; do - # Attempt to convert flags to options - iniset $NOVA_CONF DEFAULT ${I//=/ } + # Replace the first '=' with ' ' for iniset syntax + iniset $NOVA_CONF DEFAULT ${I/=/ } done } From 91b8d13edad4d21bfd5b67219347f934728ee462 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Sun, 6 Jan 2013 22:40:09 +0100 Subject: [PATCH 139/207] Fix "sudo: sorry, you must have a tty to run sudo" On many systems the requiretty sudoers option is turned on by default. With "requiretty" option the sudo ensures the user have real tty access. Just several "su" variant has an option for skipping the new session creation step. Only one session can posses a tty, so after a "su -c" the sudo will not work. We will use sudo instead of su, when we create the stack account. This change adds new variable the STACK_USER for service username. Change-Id: I1b3fbd903686884e74a5a22d82c0c0890e1be03c --- lib/baremetal | 4 ++-- lib/ceilometer | 3 ++- lib/cinder | 6 ++--- lib/glance | 8 +++---- lib/heat | 2 +- lib/keystone | 5 ++-- lib/nova | 12 +++++----- lib/quantum | 4 ++-- lib/ryu | 2 +- lib/swift | 3 ++- stack.sh | 35 +++++++++++++++------------- stackrc | 3 +++ tools/build_ramdisk.sh | 12 +++++----- tools/build_uec.sh | 8 +++---- tools/copy_dev_environment_to_uec.sh | 11 +++++---- tools/xen/build_xva.sh | 4 ++-- tools/xen/prepare_guest.sh | 9 +++---- 17 files changed, 72 insertions(+), 59 deletions(-) diff --git a/lib/baremetal b/lib/baremetal index 112fd6d9..3cc24291 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -200,14 +200,14 @@ function configure_baremetal_nova_dirs() { sudo mkdir -p /tftpboot sudo mkdir -p /tftpboot/pxelinux.cfg sudo cp /usr/lib/syslinux/pxelinux.0 /tftpboot/ - sudo chown -R `whoami`:libvirtd /tftpboot + sudo chown -R $STACK_USER:libvirtd /tftpboot # ensure $NOVA_STATE_PATH/baremetal is prepared sudo mkdir -p $NOVA_STATE_PATH/baremetal sudo mkdir -p $NOVA_STATE_PATH/baremetal/console sudo mkdir -p $NOVA_STATE_PATH/baremetal/dnsmasq sudo touch $NOVA_STATE_PATH/baremetal/dnsmasq/dnsmasq-dhcp.host - sudo chown -R `whoami` $NOVA_STATE_PATH/baremetal + sudo chown -R $STACK_USER $NOVA_STATE_PATH/baremetal # ensure dnsmasq is installed but not running # because baremetal driver will reconfigure and restart this as needed diff --git a/lib/ceilometer b/lib/ceilometer index 749e785c..0fae3973 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -9,6 +9,7 @@ # - OS_AUTH_URL for auth in api # - DEST set to the destination directory # - SERVICE_PASSWORD, SERVICE_TENANT_NAME for auth in api +# - STACK_USER service user # stack.sh # --------- @@ -94,7 +95,7 @@ function configure_ceilometer() { function init_ceilometer() { # Create cache dir sudo mkdir -p $CEILOMETER_AUTH_CACHE_DIR - sudo chown `whoami` $CEILOMETER_AUTH_CACHE_DIR + sudo chown $STACK_USER $CEILOMETER_AUTH_CACHE_DIR rm -f $CEILOMETER_AUTH_CACHE_DIR/* } diff --git a/lib/cinder b/lib/cinder index 4aaea5d0..cbeb1d7a 100644 --- a/lib/cinder +++ b/lib/cinder @@ -3,7 +3,7 @@ # Dependencies: # - functions -# - DEST, DATA_DIR must be defined +# - DEST, DATA_DIR, STACK_USER must be defined # SERVICE_{TENANT_NAME|PASSWORD} must be defined # ``KEYSTONE_TOKEN_FORMAT`` must be defined @@ -110,7 +110,7 @@ function configure_cinder() { if [[ ! -d $CINDER_CONF_DIR ]]; then sudo mkdir -p $CINDER_CONF_DIR fi - sudo chown `whoami` $CINDER_CONF_DIR + sudo chown $STACK_USER $CINDER_CONF_DIR cp -p $CINDER_DIR/etc/cinder/policy.json $CINDER_CONF_DIR @@ -295,7 +295,7 @@ function init_cinder() { # Create cache dir sudo mkdir -p $CINDER_AUTH_CACHE_DIR - sudo chown `whoami` $CINDER_AUTH_CACHE_DIR + sudo chown $STACK_USER $CINDER_AUTH_CACHE_DIR rm -f $CINDER_AUTH_CACHE_DIR/* } diff --git a/lib/glance b/lib/glance index dff247a5..1c56a675 100644 --- a/lib/glance +++ b/lib/glance @@ -3,7 +3,7 @@ # Dependencies: # ``functions`` file -# ``DEST``, ``DATA_DIR`` must be defined +# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined # ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined # ``SERVICE_HOST`` # ``KEYSTONE_TOKEN_FORMAT`` must be defined @@ -75,7 +75,7 @@ function configure_glance() { if [[ ! -d $GLANCE_CONF_DIR ]]; then sudo mkdir -p $GLANCE_CONF_DIR fi - sudo chown `whoami` $GLANCE_CONF_DIR + sudo chown $STACK_USER $GLANCE_CONF_DIR # Copy over our glance configurations and update them cp $GLANCE_DIR/etc/glance-registry.conf $GLANCE_REGISTRY_CONF @@ -158,10 +158,10 @@ function init_glance() { # Create cache dir sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/api - sudo chown `whoami` $GLANCE_AUTH_CACHE_DIR/api + sudo chown $STACK_USER $GLANCE_AUTH_CACHE_DIR/api rm -f $GLANCE_AUTH_CACHE_DIR/api/* sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/registry - sudo chown `whoami` $GLANCE_AUTH_CACHE_DIR/registry + sudo chown $STACK_USER $GLANCE_AUTH_CACHE_DIR/registry rm -f $GLANCE_AUTH_CACHE_DIR/registry/* } diff --git a/lib/heat b/lib/heat index a6f72862..89bd44f0 100644 --- a/lib/heat +++ b/lib/heat @@ -49,7 +49,7 @@ function configure_heat() { if [[ ! -d $HEAT_CONF_DIR ]]; then sudo mkdir -p $HEAT_CONF_DIR fi - sudo chown `whoami` $HEAT_CONF_DIR + sudo chown $STACK_USER $HEAT_CONF_DIR HEAT_API_CFN_HOST=${HEAT_API_CFN_HOST:-$SERVICE_HOST} HEAT_API_CFN_PORT=${HEAT_API_CFN_PORT:-8000} diff --git a/lib/keystone b/lib/keystone index 34f33723..7a70cc41 100644 --- a/lib/keystone +++ b/lib/keystone @@ -7,6 +7,7 @@ # ``SERVICE_HOST``, ``SERVICE_PROTOCOL`` # ``SERVICE_TOKEN`` # ``S3_SERVICE_PORT`` (template backend only) +# ``STACK_USER`` # ``stack.sh`` calls the entry points in this order: # @@ -79,7 +80,7 @@ function configure_keystone() { if [[ ! -d $KEYSTONE_CONF_DIR ]]; then sudo mkdir -p $KEYSTONE_CONF_DIR fi - sudo chown `whoami` $KEYSTONE_CONF_DIR + sudo chown $STACK_USER $KEYSTONE_CONF_DIR if [[ "$KEYSTONE_CONF_DIR" != "$KEYSTONE_DIR/etc" ]]; then cp -p $KEYSTONE_DIR/etc/keystone.conf.sample $KEYSTONE_CONF @@ -261,7 +262,7 @@ function init_keystone() { # Create cache dir sudo mkdir -p $KEYSTONE_AUTH_CACHE_DIR - sudo chown `whoami` $KEYSTONE_AUTH_CACHE_DIR + sudo chown $STACK_USER $KEYSTONE_AUTH_CACHE_DIR rm -f $KEYSTONE_AUTH_CACHE_DIR/* fi } diff --git a/lib/nova b/lib/nova index 781cc097..9803acbf 100644 --- a/lib/nova +++ b/lib/nova @@ -3,7 +3,7 @@ # Dependencies: # ``functions`` file -# ``DEST``, ``DATA_DIR`` must be defined +# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined # ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined # ``LIBVIRT_TYPE`` must be defined # ``INSTANCE_NAME_PREFIX``, ``VOLUME_NAME_PREFIX`` must be defined @@ -149,7 +149,7 @@ function configure_nova() { if [[ ! -d $NOVA_CONF_DIR ]]; then sudo mkdir -p $NOVA_CONF_DIR fi - sudo chown `whoami` $NOVA_CONF_DIR + sudo chown $STACK_USER $NOVA_CONF_DIR cp -p $NOVA_DIR/etc/nova/policy.json $NOVA_CONF_DIR @@ -277,7 +277,7 @@ EOF" if ! getent group libvirtd >/dev/null; then sudo groupadd libvirtd fi - add_user_to_group `whoami` libvirtd + add_user_to_group $STACK_USER libvirtd # libvirt detects various settings on startup, as we potentially changed # the system configuration (modules, filesystems), we need to restart @@ -297,7 +297,7 @@ EOF" if [ -L /dev/disk/by-label/nova-instances ]; then if ! mount -n | grep -q $NOVA_INSTANCES_PATH; then sudo mount -L nova-instances $NOVA_INSTANCES_PATH - sudo chown -R `whoami` $NOVA_INSTANCES_PATH + sudo chown -R $STACK_USER $NOVA_INSTANCES_PATH fi fi @@ -474,13 +474,13 @@ function init_nova() { # Create cache dir sudo mkdir -p $NOVA_AUTH_CACHE_DIR - sudo chown `whoami` $NOVA_AUTH_CACHE_DIR + sudo chown $STACK_USER $NOVA_AUTH_CACHE_DIR rm -f $NOVA_AUTH_CACHE_DIR/* # Create the keys folder sudo mkdir -p ${NOVA_STATE_PATH}/keys # make sure we own NOVA_STATE_PATH and all subdirs - sudo chown -R `whoami` ${NOVA_STATE_PATH} + sudo chown -R $STACK_USER ${NOVA_STATE_PATH} } # install_novaclient() - Collect source and prepare diff --git a/lib/quantum b/lib/quantum index f74eead6..f081d9b6 100644 --- a/lib/quantum +++ b/lib/quantum @@ -388,7 +388,7 @@ function _configure_quantum_common() { if [[ ! -d $QUANTUM_CONF_DIR ]]; then sudo mkdir -p $QUANTUM_CONF_DIR fi - sudo chown `whoami` $QUANTUM_CONF_DIR + sudo chown $STACK_USER $QUANTUM_CONF_DIR cp $QUANTUM_DIR/etc/quantum.conf $QUANTUM_CONF @@ -730,7 +730,7 @@ function _quantum_setup_keystone() { iniset $conf_file $section signing_dir $QUANTUM_AUTH_CACHE_DIR # Create cache dir sudo mkdir -p $QUANTUM_AUTH_CACHE_DIR - sudo chown `whoami` $QUANTUM_AUTH_CACHE_DIR + sudo chown $STACK_USER $QUANTUM_AUTH_CACHE_DIR rm -f $QUANTUM_AUTH_CACHE_DIR/* } diff --git a/lib/ryu b/lib/ryu index ac3462bb..1292313e 100644 --- a/lib/ryu +++ b/lib/ryu @@ -27,7 +27,7 @@ function init_ryu() { if [[ ! -d $RYU_CONF_DIR ]]; then sudo mkdir -p $RYU_CONF_DIR fi - sudo chown `whoami` $RYU_CONF_DIR + sudo chown $STACK_USER $RYU_CONF_DIR RYU_CONF=$RYU_CONF_DIR/ryu.conf sudo rm -rf $RYU_CONF diff --git a/lib/swift b/lib/swift index b418eda8..46c6eb20 100644 --- a/lib/swift +++ b/lib/swift @@ -4,6 +4,7 @@ # Dependencies: # ``functions`` file # ``DEST``, ``SCREEN_NAME``, `SWIFT_HASH` must be defined +# ``STACK_USER`` must be defined # ``SWIFT_DATA_DIR`` or ``DATA_DIR`` must be defined # ``lib/keystone`` file # ``stack.sh`` calls the entry points in this order: @@ -333,7 +334,7 @@ function init_swift() { # Create cache dir sudo mkdir -p $SWIFT_AUTH_CACHE_DIR - sudo chown `whoami` $SWIFT_AUTH_CACHE_DIR + sudo chown $STACK_USER $SWIFT_AUTH_CACHE_DIR rm -f $SWIFT_AUTH_CACHE_DIR/* } diff --git a/stack.sh b/stack.sh index da623531..9b084bee 100755 --- a/stack.sh +++ b/stack.sh @@ -177,40 +177,43 @@ VERBOSE=$(trueorfalse True $VERBOSE) # sudo privileges and runs as that user. if [[ $EUID -eq 0 ]]; then + STACK_USER=$DEFAULT_STACK_USER ROOTSLEEP=${ROOTSLEEP:-10} echo "You are running this script as root." - echo "In $ROOTSLEEP seconds, we will create a user 'stack' and run as that user" + echo "In $ROOTSLEEP seconds, we will create a user '$STACK_USER' and run as that user" sleep $ROOTSLEEP # Give the non-root user the ability to run as **root** via ``sudo`` is_package_installed sudo || install_package sudo - if ! getent group stack >/dev/null; then - echo "Creating a group called stack" - groupadd stack + if ! getent group $STACK_USER >/dev/null; then + echo "Creating a group called $STACK_USER" + groupadd $STACK_USER fi - if ! getent passwd stack >/dev/null; then - echo "Creating a user called stack" - useradd -g stack -s /bin/bash -d $DEST -m stack + if ! getent passwd $STACK_USER >/dev/null; then + echo "Creating a user called $STACK_USER" + useradd -g $STACK_USER -s /bin/bash -d $DEST -m $STACK_USER fi echo "Giving stack user passwordless sudo privileges" # UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || echo "#includedir /etc/sudoers.d" >> /etc/sudoers - ( umask 226 && echo "stack ALL=(ALL) NOPASSWD:ALL" \ + ( umask 226 && echo "$STACK_USER ALL=(ALL) NOPASSWD:ALL" \ > /etc/sudoers.d/50_stack_sh ) - echo "Copying files to stack user" + echo "Copying files to $STACK_USER user" STACK_DIR="$DEST/${TOP_DIR##*/}" cp -r -f -T "$TOP_DIR" "$STACK_DIR" - chown -R stack "$STACK_DIR" + chown -R $STACK_USER "$STACK_DIR" + cd "$STACK_DIR" if [[ "$SHELL_AFTER_RUN" != "no" ]]; then - exec su -c "set -e; cd $STACK_DIR; bash stack.sh; bash" stack + exec sudo -u $STACK_USER bash -l -c "set -e; bash stack.sh; bash" else - exec su -c "set -e; cd $STACK_DIR; bash stack.sh" stack + exec sudo -u $STACK_USER bash -l -c "set -e; source stack.sh" fi exit 1 else + STACK_USER=`whoami` # We're not **root**, make sure ``sudo`` is available is_package_installed sudo || die "Sudo is required. Re-run stack.sh as root ONE TIME ONLY to set up sudo." @@ -220,10 +223,10 @@ else # Set up devstack sudoers TEMPFILE=`mktemp` - echo "`whoami` ALL=(root) NOPASSWD:ALL" >$TEMPFILE + echo "$STACK_USER ALL=(root) NOPASSWD:ALL" >$TEMPFILE # Some binaries might be under /sbin or /usr/sbin, so make sure sudo will # see them by forcing PATH - echo "Defaults:`whoami` secure_path=/sbin:/usr/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/local/bin" >> $TEMPFILE + echo "Defaults:$STACK_USER secure_path=/sbin:/usr/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/local/bin" >> $TEMPFILE chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh @@ -235,7 +238,7 @@ fi # Create the destination directory and ensure it is writable by the user sudo mkdir -p $DEST if [ ! -w $DEST ]; then - sudo chown `whoami` $DEST + sudo chown $STACK_USER $DEST fi # Set ``OFFLINE`` to ``True`` to configure ``stack.sh`` to run cleanly without @@ -251,7 +254,7 @@ ERROR_ON_CLONE=`trueorfalse False $ERROR_ON_CLONE` # Destination path for service data DATA_DIR=${DATA_DIR:-${DEST}/data} sudo mkdir -p $DATA_DIR -sudo chown `whoami` $DATA_DIR +sudo chown $STACK_USER $DATA_DIR # Common Configuration diff --git a/stackrc b/stackrc index 4e03a2f4..96f0ee58 100644 --- a/stackrc +++ b/stackrc @@ -12,6 +12,9 @@ DATA_DIR=${DEST}/data # Select the default database DATABASE_TYPE=mysql +# Default stack user +DEFAULT_STACK_USER=stack + # Specify which services to launch. These generally correspond to # screen tabs. To change the default list, use the ``enable_service`` and # ``disable_service`` functions in ``localrc``. diff --git a/tools/build_ramdisk.sh b/tools/build_ramdisk.sh index 5ff05b08..cfcca51f 100755 --- a/tools/build_ramdisk.sh +++ b/tools/build_ramdisk.sh @@ -125,17 +125,17 @@ if [ ! -r $DEV_FILE ]; then # Create a stack user that is a member of the libvirtd group so that stack # is able to interact with libvirt. chroot $MNTDIR groupadd libvirtd - chroot $MNTDIR useradd stack -s /bin/bash -d $DEST -G libvirtd + chroot $MNTDIR useradd $DEFAULT_STACK_USER -s /bin/bash -d $DEST -G libvirtd mkdir -p $MNTDIR/$DEST - chroot $MNTDIR chown stack $DEST + chroot $MNTDIR chown $DEFAULT_STACK_USER $DEST # A simple password - pass - echo stack:pass | chroot $MNTDIR chpasswd + echo $DEFAULT_STACK_USER:pass | chroot $MNTDIR chpasswd echo root:$ROOT_PASSWORD | chroot $MNTDIR chpasswd # And has sudo ability (in the future this should be limited to only what # stack requires) - echo "stack ALL=(ALL) NOPASSWD: ALL" >> $MNTDIR/etc/sudoers + echo "$DEFAULT_STACK_USER ALL=(ALL) NOPASSWD: ALL" >> $MNTDIR/etc/sudoers umount $MNTDIR rmdir $MNTDIR @@ -187,7 +187,7 @@ git_clone $OPENSTACKX_REPO $DEST/openstackx $OPENSTACKX_BRANCH # Use this version of devstack rm -rf $MNTDIR/$DEST/devstack cp -pr $CWD $MNTDIR/$DEST/devstack -chroot $MNTDIR chown -R stack $DEST/devstack +chroot $MNTDIR chown -R $DEFAULT_STACK_USER $DEST/devstack # Configure host network for DHCP mkdir -p $MNTDIR/etc/network @@ -225,7 +225,7 @@ EOF # Make the run.sh executable chmod 755 $RUN_SH -chroot $MNTDIR chown stack $DEST/run.sh +chroot $MNTDIR chown $DEFAULT_STACK_USER $DEST/run.sh umount $MNTDIR rmdir $MNTDIR diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 58c54258..5748b390 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -207,11 +207,11 @@ ROOTSLEEP=0 `cat $TOP_DIR/localrc` LOCAL_EOF fi -useradd -U -G sudo -s /bin/bash -d /opt/stack -m stack -echo stack:pass | chpasswd +useradd -U -G sudo -s /bin/bash -d /opt/stack -m $DEFAULT_STACK_USER +echo $DEFAULT_STACK_USER:pass | chpasswd mkdir -p /opt/stack/.ssh echo "$PUB_KEY" > /opt/stack/.ssh/authorized_keys -chown -R stack /opt/stack +chown -R $DEFAULT_STACK_USER /opt/stack chmod 700 /opt/stack/.ssh chmod 600 /opt/stack/.ssh/authorized_keys @@ -224,7 +224,7 @@ fi # Run stack.sh cat >> $vm_dir/uec/user-data< $STAGING_DIR/etc/sudoers.d/50_stack_sh ) # Copy over your ssh keys and env if desired @@ -64,7 +67,7 @@ rm -rf $STAGING_DIR/$DEST/devstack cp_it . $STAGING_DIR/$DEST/devstack # Give stack ownership over $DEST so it may do the work needed -chroot $STAGING_DIR chown -R stack $DEST +chroot $STAGING_DIR chown -R $DEFAULT_STACK_USER $DEST # Unmount umount $STAGING_DIR diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh index c359c558..f3f166fe 100755 --- a/tools/xen/build_xva.sh +++ b/tools/xen/build_xva.sh @@ -65,8 +65,8 @@ cd $TOP_DIR cat <$STAGING_DIR/etc/rc.local # network restart required for getting the right gateway /etc/init.d/networking restart -chown -R stack /opt/stack -su -c "/opt/stack/run.sh > /opt/stack/run.sh.log" stack +chown -R $DEFAULT_STACK_USER /opt/stack +su -c "/opt/stack/run.sh > /opt/stack/run.sh.log" $DEFAULT_STACK_USER exit 0 EOF diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh index 4aa4554f..fe524454 100755 --- a/tools/xen/prepare_guest.sh +++ b/tools/xen/prepare_guest.sh @@ -19,6 +19,7 @@ GUEST_PASSWORD=${GUEST_PASSWORD:-secrete} STAGING_DIR=${STAGING_DIR:-stage} DO_TGZ=${DO_TGZ:-1} XS_TOOLS_PATH=${XS_TOOLS_PATH:-"/root/xs-tools.deb"} +STACK_USER=${STACK_USER:-stack} # Install basics chroot $STAGING_DIR apt-get update @@ -46,12 +47,12 @@ rm -f $STAGING_DIR/etc/localtime # Add stack user chroot $STAGING_DIR groupadd libvirtd -chroot $STAGING_DIR useradd stack -s /bin/bash -d /opt/stack -G libvirtd -echo stack:$GUEST_PASSWORD | chroot $STAGING_DIR chpasswd -echo "stack ALL=(ALL) NOPASSWD: ALL" >> $STAGING_DIR/etc/sudoers +chroot $STAGING_DIR useradd $STACK_USER -s /bin/bash -d /opt/stack -G libvirtd +echo $STACK_USER:$GUEST_PASSWORD | chroot $STAGING_DIR chpasswd +echo "$STACK_USER ALL=(ALL) NOPASSWD: ALL" >> $STAGING_DIR/etc/sudoers # Give ownership of /opt/stack to stack user -chroot $STAGING_DIR chown -R stack /opt/stack +chroot $STAGING_DIR chown -R $STACK_USER /opt/stack # Make our ip address hostnames look nice at the command prompt echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $STAGING_DIR/opt/stack/.bashrc From 15bda3e4630618135b26bd5a41f48e8c2fb0112b Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 11 Jan 2013 15:07:53 -0600 Subject: [PATCH 140/207] Handle existing security group rules in volume exercise Change-Id: I0aa3bc0c6179f92a12c1e9bbace61597778ffa1f --- exercises/volumes.sh | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 5c5e0e44..45b8645b 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -86,8 +86,12 @@ if ! nova secgroup-list | grep -q $SECGROUP; then fi # Configure Security Group Rules -nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 -nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0 +if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then + nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 +fi +if ! nova secgroup-list-rules $SECGROUP | grep -q " tcp .* 22 "; then + nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0 +fi # determinine instance type # ------------------------- From 4b26d3191ee82e96fedaffa68362919deb8a3813 Mon Sep 17 00:00:00 2001 From: Sumit Naiksatam Date: Fri, 4 Jan 2013 10:32:54 -0800 Subject: [PATCH 141/207] Quantum FloodLight/BigSwitch Plugin Support The patch introduces devstack support for the Quantum FloodLight/BigSwitch RESTProxy Plugin. Change-Id: I8c032fd16723ed6055821de0860fae508df371b7 Implements: blueprint quantum-floodlight-bigswitch-plugin-support --- AUTHORS | 1 + lib/bigswitch_floodlight | 50 ++++++++++++++++++++++++++++++++++++++++ lib/quantum | 18 +++++++++++++-- 3 files changed, 67 insertions(+), 2 deletions(-) create mode 100644 lib/bigswitch_floodlight diff --git a/AUTHORS b/AUTHORS index ba68e329..7ec1f663 100644 --- a/AUTHORS +++ b/AUTHORS @@ -35,6 +35,7 @@ Matt Joyce Osamu Habuka Russell Bryant Scott Moser +Sumit Naiksatam Thierry Carrez Todd Willey Tres Henry diff --git a/lib/bigswitch_floodlight b/lib/bigswitch_floodlight new file mode 100644 index 00000000..77aeb61d --- /dev/null +++ b/lib/bigswitch_floodlight @@ -0,0 +1,50 @@ +# Big Switch/FloodLight OpenFlow Controller +# ------------------------------------------ + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +BS_FL_CONTROLLERS_PORT=${BS_FL_CONTROLLERS_PORT:-localhost:80} +BS_FL_OF_PORT=${BS_FL_OF_PORT:-6633} +OVS_BRIDGE=${OVS_BRIDGE:-br-int} + +function configure_bigswitch_floodlight() { + : +} + +function init_bigswitch_floodlight() { + install_quantum_agent_packages + + echo -n "Installing OVS managed by the openflow controllers:" + echo ${BS_FL_CONTROLLERS_PORT} + + # Create local OVS bridge and configure it + sudo ovs-vsctl --no-wait -- --if-exists del-br ${OVS_BRIDGE} + sudo ovs-vsctl --no-wait add-br ${OVS_BRIDGE} + sudo ovs-vsctl --no-wait br-set-external-id ${OVS_BRIDGE} bridge-id ${OVS_BRIDGE} + + ctrls= + for ctrl in `echo ${BS_FL_CONTROLLERS_PORT} | tr ',' ' '` + do + ctrl=${ctrl%:*} + ctrls="${ctrls} tcp:${ctrl}:${BS_FL_OF_PORT}" + done + echo "Adding Network conttrollers: " ${ctrls} + sudo ovs-vsctl --no-wait set-controller ${OVS_BRIDGE} ${ctrls} +} + +function install_bigswitch_floodlight() { + : +} + +function start_bigswitch_floodlight() { + : +} + +function stop_bigswitch_floodlight() { + : +} + +# Restore xtrace +$XTRACE diff --git a/lib/quantum b/lib/quantum index f74eead6..b8b115af 100644 --- a/lib/quantum +++ b/lib/quantum @@ -217,6 +217,8 @@ function create_nova_conf_quantum() { iniset $NOVA_CONF DEFAULT libvirt_ovs_integration_bridge "$OVS_BRIDGE" iniset $NOVA_CONF DEFAULT linuxnet_ovs_ryu_api_host "$RYU_API_HOST:$RYU_API_PORT" iniset $NOVA_CONF DEFAULT libvirt_ovs_ryu_api_host "$RYU_API_HOST:$RYU_API_PORT" + elif [[ "$Q_PLUGIN" = "bigswitch_floodlight" ]]; then + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"} fi iniset $NOVA_CONF DEFAULT libvirt_vif_driver "$NOVA_VIF_DRIVER" iniset $NOVA_CONF DEFAULT linuxnet_interface_driver "$LINUXNET_VIF_DRIVER" @@ -332,7 +334,7 @@ function install_quantum_agent_packages() { function is_quantum_ovs_base_plugin() { local plugin=$1 - if [[ ",openvswitch,ryu," =~ ,${plugin}, ]]; then + if [[ ",openvswitch,ryu,bigswitch_floodlight," =~ ,${plugin}, ]]; then return 0 fi return 1 @@ -407,6 +409,13 @@ function _configure_quantum_common() { Q_PLUGIN_CONF_FILENAME=ryu.ini Q_DB_NAME="ovs_quantum" Q_PLUGIN_CLASS="quantum.plugins.ryu.ryu_quantum_plugin.RyuQuantumPluginV2" + elif [[ "$Q_PLUGIN" = "bigswitch_floodlight" ]]; then + Q_PLUGIN_CONF_PATH=etc/quantum/plugins/bigswitch + Q_PLUGIN_CONF_FILENAME=restproxy.ini + Q_DB_NAME="restproxy_quantum" + Q_PLUGIN_CLASS="quantum.plugins.bigswitch.plugin.QuantumRestProxyV2" + BS_FL_CONTROLLERS_PORT=${BS_FL_CONTROLLERS_PORT:-localhost:80} + BS_FL_CONTROLLER_TIMEOUT=${BS_FL_CONTROLLER_TIMEOUT:-10} fi if [[ $Q_PLUGIN_CONF_PATH == '' || $Q_PLUGIN_CONF_FILENAME == '' || $Q_PLUGIN_CLASS == '' ]]; then @@ -674,6 +683,9 @@ function _configure_quantum_service() { elif [[ "$Q_PLUGIN" = "ryu" ]]; then iniset /$Q_PLUGIN_CONF_FILE OVS openflow_controller $RYU_OFP_HOST:$RYU_OFP_PORT iniset /$Q_PLUGIN_CONF_FILE OVS openflow_rest_api $RYU_API_HOST:$RYU_API_PORT + elif [[ "$Q_PLUGIN" = "bigswitch_floodlight" ]]; then + iniset /$Q_PLUGIN_CONF_FILE RESTPROXY servers $BS_FL_CONTROLLERS_PORT + iniset /$Q_PLUGIN_CONF_FILE RESTPROXY servertimeout $BS_FL_CONTROLLER_TIMEOUT fi } @@ -749,6 +761,8 @@ function _quantum_setup_interface_driver() { iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver elif [[ "$Q_PLUGIN" = "ryu" ]]; then iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.RyuInterfaceDriver + elif [[ "$Q_PLUGIN" = "bigswitch_floodlight" ]]; then + iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver fi } @@ -835,7 +849,7 @@ function _ssh_check_quantum() { # Quantum 3rd party programs #--------------------------- # A comma-separated list of 3rd party programs -QUANTUM_THIRD_PARTIES="ryu" +QUANTUM_THIRD_PARTIES="ryu,bigswitch_floodlight" for third_party in ${QUANTUM_THIRD_PARTIES//,/ }; do source lib/$third_party done From 43eb0b3159d8ad1eb14e0430124cc72cb50ae3c2 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Sat, 12 Jan 2013 20:10:34 +0000 Subject: [PATCH 142/207] Make sure to cleanup swift on unstack/relaunch. - Fixes bug 1049553. Change-Id: I9fef93d25512c014dfb882adf0e169487bf877d8 --- lib/swift | 15 ++++++++------- unstack.sh | 1 + 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/lib/swift b/lib/swift index b418eda8..aff45967 100644 --- a/lib/swift +++ b/lib/swift @@ -107,16 +107,17 @@ function configure_swift() { if [[ -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 + sudo rm -f ${SWIFT_DATA_DIR}/drives/images/swift.img fi - else - mkdir -p ${SWIFT_DATA_DIR}/drives/images - sudo touch ${SWIFT_DATA_DIR}/drives/images/swift.img - sudo chown $USER: ${SWIFT_DATA_DIR}/drives/images/swift.img - - dd if=/dev/zero of=${SWIFT_DATA_DIR}/drives/images/swift.img \ - bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE} fi + mkdir -p ${SWIFT_DATA_DIR}/drives/images + sudo touch ${SWIFT_DATA_DIR}/drives/images/swift.img + sudo chown $USER: ${SWIFT_DATA_DIR}/drives/images/swift.img + + dd if=/dev/zero of=${SWIFT_DATA_DIR}/drives/images/swift.img \ + bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE} + # Make a fresh XFS filesystem mkfs.xfs -f -i size=1024 ${SWIFT_DATA_DIR}/drives/images/swift.img diff --git a/unstack.sh b/unstack.sh index 1d4bfd56..a086d5c6 100755 --- a/unstack.sh +++ b/unstack.sh @@ -65,6 +65,7 @@ fi # Swift runs daemons if is_service_enabled swift; then stop_swift + cleanup_swift fi # Apache has the WSGI processes From 7bf1dd351fc859346ac8cdd3574b86f294e97def Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Sat, 12 Jan 2013 17:31:26 +0100 Subject: [PATCH 143/207] Tempest should create his own flavors * Decrease memory usage caused by tempest significantly Change-Id: I0ea59d9bb1fbeb93f04353bc6b4e148637edf945 --- lib/tempest | 54 ++++++++++++++++++++++++++++++----------------------- 1 file changed, 31 insertions(+), 23 deletions(-) diff --git a/lib/tempest b/lib/tempest index fa637c12..906ca6ab 100644 --- a/lib/tempest +++ b/lib/tempest @@ -129,33 +129,41 @@ function configure_tempest() { ALT_USERNAME=${ALT_USERNAME:-alt_demo} ALT_TENANT_NAME=${ALT_TENANT_NAME:-alt_demo} - # Check Nova for existing flavors and, if set, look for the - # ``DEFAULT_INSTANCE_TYPE`` and use that. Otherwise, just use the first flavor. - flavor_lines=`nova flavor-list` - IFS=$'\r\n' - flavors="" - if [[ -n "$DEFAULT_INSTANCE_TYPE" ]]; then + # If the ``DEFAULT_INSTANCE_TYPE`` not declared, use the new behavior + # Tempest creates instane types for himself + if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then + nova flavor-create m1.pico 42 32 0 1 + flavor_ref=42 + nova flavor-create m1.nano 84 64 0 1 + flavor_ref_alt=84 + else + # Check Nova for existing flavors and, if set, look for the + # ``DEFAULT_INSTANCE_TYPE`` and use that. + flavor_lines=`nova flavor-list` + IFS=$'\r\n' + flavors="" for line in $flavor_lines; do f=$(echo $line | awk "/ $DEFAULT_INSTANCE_TYPE / { print \$2 }") flavors="$flavors $f" done - fi - for line in $flavor_lines; do - flavors="$flavors `echo $line | grep -v "^\(|\s*ID\|+--\)" | cut -d' ' -f2`" - done - - IFS=" " - flavors=($flavors) - num_flavors=${#flavors[*]} - echo "Found $num_flavors flavors" - if [[ $num_flavors -eq 0 ]]; then - echo "Found no valid flavors to use!" - exit 1 - fi - flavor_ref=${flavors[0]} - flavor_ref_alt=$flavor_ref - if [[ $num_flavors -gt 1 ]]; then - flavor_ref_alt=${flavors[1]} + + for line in $flavor_lines; do + flavors="$flavors `echo $line | grep -v "^\(|\s*ID\|+--\)" | cut -d' ' -f2`" + done + + IFS=" " + flavors=($flavors) + num_flavors=${#flavors[*]} + echo "Found $num_flavors flavors" + if [[ $num_flavors -eq 0 ]]; then + echo "Found no valid flavors to use!" + exit 1 + fi + flavor_ref=${flavors[0]} + flavor_ref_alt=$flavor_ref + if [[ $num_flavors -gt 1 ]]; then + flavor_ref_alt=${flavors[1]} + fi fi if [ "$Q_USE_NAMESPACE" != "False" ]; then From af988fd1d20aff684cde07d8683ecf5e0d539dfe Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Sun, 13 Jan 2013 14:20:47 +0100 Subject: [PATCH 144/207] Support RHEL with lsb_release Consider all distributor as "Red Hat" which id matches to the Red.*Hat regexp. Example Distributor ID: "RedHatEnterpriseServer" Change-Id: I29cc2e83cccaafa3e1e056e506fda5c9771764a1 --- functions | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/functions b/functions index 80e17969..55a81c54 100644 --- a/functions +++ b/functions @@ -224,6 +224,7 @@ GetOSVersion() { os_VENDOR=$(lsb_release -i -s) os_RELEASE=$(lsb_release -r -s) os_UPDATE="" + os_PACKAGE="rpm" if [[ "Debian,Ubuntu" =~ $os_VENDOR ]]; then os_PACKAGE="deb" elif [[ "SUSE LINUX" =~ $os_VENDOR ]]; then @@ -231,9 +232,8 @@ GetOSVersion() { if [[ $? -eq 0 ]]; then os_VENDOR="openSUSE" fi - os_PACKAGE="rpm" - else - os_PACKAGE="rpm" + elif [[ $os_VENDOR =~ Red.*Hat ]]; then + os_VENDOR="Red Hat" fi os_CODENAME=$(lsb_release -c -s) elif [[ -r /etc/redhat-release ]]; then From b0f1c38bdcb02068e2e3d0daf2d65695d9d58478 Mon Sep 17 00:00:00 2001 From: Akihiro MOTOKI Date: Sun, 13 Jan 2013 17:58:12 +0900 Subject: [PATCH 145/207] Refactor rpc backend configuration logic This commit also changes the following: - Fixes Nova QPID module path - Fixes a bug Cinder ZeroMQ RPC points to nova module - Adds ZeroMQ setting for Heat RPC qpid_is_supported is moved from functions to lib/rpc_backend. This work is based on the work by Isaku Yamahata in https://review.openstack.org/#/c/19074/. Change-Id: I45e21b1fb85e539213f5243764132a37906d7455 --- functions | 12 ----- lib/cinder | 9 +--- lib/heat | 32 ++----------- lib/quantum | 15 +----- lib/rpc_backend | 123 ++++++++++++++++++++++++++++++++++++++++++++++++ stack.sh | 78 ++++-------------------------- 6 files changed, 137 insertions(+), 132 deletions(-) create mode 100644 lib/rpc_backend diff --git a/functions b/functions index 80e17969..47950420 100644 --- a/functions +++ b/functions @@ -1133,18 +1133,6 @@ function get_pip_command() { fi } -# Check if qpid can be used on the current distro. -# qpid_is_supported -function qpid_is_supported() { - if [[ -z "$DISTRO" ]]; then - GetDistro - fi - - # Qpid was introduced to Ubuntu in precise, disallow it on oneiric; it is - # not in openSUSE either right now. - ( ! ([[ "$DISTRO" = "oneiric" ]] || is_suse) ) -} - # Restore xtrace $XTRACE diff --git a/lib/cinder b/lib/cinder index d9f8d63f..8b1ccd71 100644 --- a/lib/cinder +++ b/lib/cinder @@ -184,14 +184,7 @@ function configure_cinder() { iniset $CINDER_CONF DEFAULT use_syslog True fi - if is_service_enabled qpid ; then - iniset $CINDER_CONF DEFAULT rpc_backend cinder.openstack.common.rpc.impl_qpid - elif is_service_enabled zeromq; then - iniset $CINDER_CONF DEFAULT rpc_backend nova.openstack.common.rpc.impl_zmq - elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then - iniset $CINDER_CONF DEFAULT rabbit_host $RABBIT_HOST - iniset $CINDER_CONF DEFAULT rabbit_password $RABBIT_PASSWORD - fi + iniset_rpc_backend cinder $CINDER_CONF DEFAULT if [[ "$CINDER_SECURE_DELETE" == "False" ]]; then iniset $CINDER_CONF DEFAULT secure_delete False diff --git a/lib/heat b/lib/heat index 89bd44f0..5b8b360a 100644 --- a/lib/heat +++ b/lib/heat @@ -69,13 +69,7 @@ function configure_heat() { iniset $HEAT_API_CFN_CONF DEFAULT bind_host $HEAT_API_CFN_HOST iniset $HEAT_API_CFN_CONF DEFAULT bind_port $HEAT_API_CFN_PORT - if is_service_enabled rabbit; then - iniset $HEAT_API_CFN_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_kombu - iniset $HEAT_API_CFN_CONF DEFAULT rabbit_password $RABBIT_PASSWORD - iniset $HEAT_API_CFN_CONF DEFAULT rabbit_host $RABBIT_HOST - elif is_service_enabled qpid; then - iniset $HEAT_API_CFN_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid - fi + iniset_rpc_backend heat $HEAT_API_CFN_CONF DEFAULT HEAT_API_CFN_PASTE_INI=$HEAT_CONF_DIR/heat-api-cfn-paste.ini cp $HEAT_DIR/etc/heat/heat-api-cfn-paste.ini $HEAT_API_CFN_PASTE_INI @@ -98,13 +92,7 @@ function configure_heat() { iniset $HEAT_API_CONF DEFAULT bind_host $HEAT_API_HOST iniset $HEAT_API_CONF DEFAULT bind_port $HEAT_API_PORT - if is_service_enabled rabbit; then - iniset $HEAT_API_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_kombu - iniset $HEAT_API_CONF DEFAULT rabbit_password $RABBIT_PASSWORD - iniset $HEAT_API_CONF DEFAULT rabbit_host $RABBIT_HOST - elif is_service_enabled qpid; then - iniset $HEAT_API_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid - fi + iniset_rpc_backend heat $HEAT_API_CONF DEFAULT HEAT_API_PASTE_INI=$HEAT_CONF_DIR/heat-api-paste.ini cp $HEAT_DIR/etc/heat/heat-api-paste.ini $HEAT_API_PASTE_INI @@ -134,13 +122,7 @@ function configure_heat() { iniset $HEAT_ENGINE_CONF DEFAULT sql_connection $dburl iniset $HEAT_ENGINE_CONF DEFAULT auth_encryption_key `hexdump -n 16 -v -e '/1 "%02x"' /dev/random` - if is_service_enabled rabbit; then - iniset $HEAT_ENGINE_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_kombu - iniset $HEAT_ENGINE_CONF DEFAULT rabbit_password $RABBIT_PASSWORD - iniset $HEAT_ENGINE_CONF DEFAULT rabbit_host $RABBIT_HOST - elif is_service_enabled qpid; then - iniset $HEAT_ENGINE_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid - fi + iniset_rpc_backend heat $HEAT_ENGINE_CONF DEFAULT # Cloudwatch API HEAT_API_CW_CONF=$HEAT_CONF_DIR/heat-api-cloudwatch.conf @@ -151,13 +133,7 @@ function configure_heat() { iniset $HEAT_API_CW_CONF DEFAULT bind_host $HEAT_API_CW_HOST iniset $HEAT_API_CW_CONF DEFAULT bind_port $HEAT_API_CW_PORT - if is_service_enabled rabbit; then - iniset $HEAT_API_CW_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_kombu - iniset $HEAT_API_CW_CONF DEFAULT rabbit_password $RABBIT_PASSWORD - iniset $HEAT_API_CW_CONF DEFAULT rabbit_host $RABBIT_HOST - elif is_service_enabled qpid; then - iniset $HEAT_API_CW_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid - fi + iniset_rpc_backend heat $HEAT_API_CW_CONF DEFAULT HEAT_API_CW_PASTE_INI=$HEAT_CONF_DIR/heat-api-cloudwatch-paste.ini cp $HEAT_DIR/etc/heat/heat-api-cloudwatch-paste.ini $HEAT_API_CW_PASTE_INI diff --git a/lib/quantum b/lib/quantum index 343e5a9b..19df4990 100644 --- a/lib/quantum +++ b/lib/quantum @@ -176,7 +176,7 @@ fi # Set common config for all quantum server and agents. function configure_quantum() { _configure_quantum_common - _configure_quantum_rpc + iniset_rpc_backend quantum $QUANTUM_CONF DEFAULT if is_service_enabled q-svc; then _configure_quantum_service @@ -596,19 +596,6 @@ function _configure_quantum_plugin_agent_ryu() { AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/ryu/agent/ryu_quantum_agent.py" } -# Quantum RPC support - must be updated prior to starting any of the services -function _configure_quantum_rpc() { - iniset $QUANTUM_CONF DEFAULT control_exchange quantum - if is_service_enabled qpid ; then - iniset $QUANTUM_CONF DEFAULT rpc_backend quantum.openstack.common.rpc.impl_qpid - elif is_service_enabled zeromq; then - iniset $QUANTUM_CONF DEFAULT rpc_backend quantum.openstack.common.rpc.impl_zmq - elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then - iniset $QUANTUM_CONF DEFAULT rabbit_host $RABBIT_HOST - iniset $QUANTUM_CONF DEFAULT rabbit_password $RABBIT_PASSWORD - fi -} - # _configure_quantum_service() - Set config files for quantum service # It is called when q-svc is enabled. function _configure_quantum_service() { diff --git a/lib/rpc_backend b/lib/rpc_backend new file mode 100644 index 00000000..4d7f8d2f --- /dev/null +++ b/lib/rpc_backend @@ -0,0 +1,123 @@ +# lib/rpc_backend +# Interface for interactig with different rpc backend +# rpc backend settings + +# Dependencies: +# ``functions`` file +# ``RABBIT_{HOST|PASSWORD}`` must be defined when RabbitMQ is used + +# ``stack.sh`` calls the entry points in this order: +# +# check_rpc_backend +# install_rpc_backend +# restart_rpc_backend +# iniset_rpc_backend + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +# Entry Points +# ------------ + +# Make sure we only have one rpc backend enabled. +# Also check the specified rpc backend is available on your platform. +function check_rpc_backend() { + local rpc_backend_cnt=0 + for svc in qpid zeromq rabbit; do + is_service_enabled $svc && + ((rpc_backend_cnt++)) + done + if [ "$rpc_backend_cnt" -gt 1 ]; then + echo "ERROR: only one rpc backend may be enabled," + echo " set only one of 'rabbit', 'qpid', 'zeromq'" + echo " via ENABLED_SERVICES." + elif [ "$rpc_backend_cnt" == 0 ]; then + echo "ERROR: at least one rpc backend must be enabled," + echo " set one of 'rabbit', 'qpid', 'zeromq'" + echo " via ENABLED_SERVICES." + fi + + if is_service_enabled qpid && ! qpid_is_supported; then + echo "Qpid support is not available for this version of your distribution." + exit 1 + fi +} + +# install rpc backend +function install_rpc_backend() { + if is_service_enabled rabbit; then + # Install rabbitmq-server + # the temp file is necessary due to LP: #878600 + tfile=$(mktemp) + install_package rabbitmq-server > "$tfile" 2>&1 + cat "$tfile" + rm -f "$tfile" + elif is_service_enabled qpid; then + if is_fedora; then + install_package qpid-cpp-server-daemon + elif is_ubuntu; then + install_package qpidd + else + exit_distro_not_supported "qpid installation" + fi + elif is_service_enabled zeromq; then + if is_fedora; then + install_package zeromq python-zmq + elif is_ubuntu; then + install_package libzmq1 python-zmq + elif is_suse; then + install_package libzmq1 python-pyzmq + else + exit_distro_not_supported "zeromq installation" + fi + fi +} + +# restart the rpc backend +function restart_rpc_backend() { + if is_service_enabled rabbit; then + # Start rabbitmq-server + echo_summary "Starting RabbitMQ" + if is_fedora || is_suse; then + # service is not started by default + restart_service rabbitmq-server + fi + # change the rabbit password since the default is "guest" + sudo rabbitmqctl change_password guest $RABBIT_PASSWORD + elif is_service_enabled qpid; then + echo_summary "Starting qpid" + restart_service qpidd + fi +} + +# iniset cofiguration +function iniset_rpc_backend() { + local package=$1 + local file=$2 + local section=$3 + if is_service_enabled zeromq; then + iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_zmq + elif is_service_enabled qpid; then + iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_qpid + elif is_service_enabled rabbit; then + iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_kombu + iniset $file $section rabbit_host $RABBIT_HOST + iniset $file $section rabbit_password $RABBIT_PASSWORD + fi +} + +# Check if qpid can be used on the current distro. +# qpid_is_supported +function qpid_is_supported() { + if [[ -z "$DISTRO" ]]; then + GetDistro + fi + + # Qpid was introduced to Ubuntu in precise, disallow it on oneiric; it is + # not in openSUSE either right now. + ( ! ([[ "$DISTRO" = "oneiric" ]] || is_suse) ) +} + +# Restore xtrace +$XTRACE diff --git a/stack.sh b/stack.sh index 5c071fcf..bf473ca7 100755 --- a/stack.sh +++ b/stack.sh @@ -95,8 +95,9 @@ if [[ -r $TOP_DIR/.stackenv ]]; then rm $TOP_DIR/.stackenv fi -# Import database configuration +# Import common services (database, message queue) configuration source $TOP_DIR/lib/database +source $TOP_DIR/lib/rpc_backend # Validate database selection # Since DATABASE_BACKENDS is now set, this also gets ENABLED_SERVICES @@ -118,10 +119,9 @@ if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|f16|f17|f18|opensuse-12.2) fi fi -if is_service_enabled qpid && ! qpid_is_supported; then - echo "Qpid support is not available for this version of your distribution." - exit 1 -fi +# Make sure we only have one rpc backend enabled, +# and the specified rpc backend is available on your platform. +check_rpc_backend # ``stack.sh`` keeps function libraries here # Make sure ``$TOP_DIR/lib`` directory is present @@ -147,23 +147,6 @@ if type -p screen >/dev/null && screen -ls | egrep -q "[0-9].$SCREEN_NAME"; then exit 1 fi -# Make sure we only have one rpc backend enabled. -rpc_backend_cnt=0 -for svc in qpid zeromq rabbit; do - is_service_enabled $svc && - ((rpc_backend_cnt++)) -done -if [ "$rpc_backend_cnt" -gt 1 ]; then - echo "ERROR: only one rpc backend may be enabled," - echo " set only one of 'rabbit', 'qpid', 'zeromq'" - echo " via ENABLED_SERVICES." -elif [ "$rpc_backend_cnt" == 0 ]; then - echo "ERROR: at least one rpc backend must be enabled," - echo " set one of 'rabbit', 'qpid', 'zeromq'" - echo " via ENABLED_SERVICES." -fi -unset rpc_backend_cnt - # Set up logging level VERBOSE=$(trueorfalse True $VERBOSE) @@ -670,32 +653,7 @@ if [[ $SYSLOG != "False" ]]; then fi fi -if is_service_enabled rabbit; then - # Install rabbitmq-server - # the temp file is necessary due to LP: #878600 - tfile=$(mktemp) - install_package rabbitmq-server > "$tfile" 2>&1 - cat "$tfile" - rm -f "$tfile" -elif is_service_enabled qpid; then - if is_fedora; then - install_package qpid-cpp-server-daemon - elif is_ubuntu; then - install_package qpidd - else - exit_distro_not_supported "qpid installation" - fi -elif is_service_enabled zeromq; then - if is_fedora; then - install_package zeromq python-zmq - elif is_ubuntu; then - install_package libzmq1 python-zmq - elif is_suse; then - install_package libzmq1 python-pyzmq - else - exit_distro_not_supported "zeromq installation" - fi -fi +install_rpc_backend if is_service_enabled $DATABASE_BACKENDS; then install_database @@ -868,20 +826,7 @@ fi # Finalize queue installation # ---------------------------- - -if is_service_enabled rabbit; then - # Start rabbitmq-server - echo_summary "Starting RabbitMQ" - if is_fedora || is_suse; then - # service is not started by default - restart_service rabbitmq-server - fi - # change the rabbit password since the default is "guest" - sudo rabbitmqctl change_password guest $RABBIT_PASSWORD -elif is_service_enabled qpid; then - echo_summary "Starting qpid" - restart_service qpidd -fi +restart_rpc_backend # Configure database @@ -1075,14 +1020,7 @@ if is_service_enabled nova; then iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN" iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST" - if is_service_enabled zeromq; then - iniset $NOVA_CONF DEFAULT rpc_backend "nova.openstack.common.rpc.impl_zmq" - elif is_service_enabled qpid; then - iniset $NOVA_CONF DEFAULT rpc_backend "nova.rpc.impl_qpid" - elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then - iniset $NOVA_CONF DEFAULT rabbit_host "$RABBIT_HOST" - iniset $NOVA_CONF DEFAULT rabbit_password "$RABBIT_PASSWORD" - fi + iniset_rpc_backend nova $NOVA_CONF DEFAULT iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT" From 3860a9252a8546326a7ac9f0c8b2c09afe655491 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Mon, 14 Jan 2013 13:14:34 +0100 Subject: [PATCH 146/207] Have ecua.sh to accept the correct error code ecua.sh will accept both the current and the correct error code Change-Id: I364e411986b9780fd5c5df29697753f04a9a4935 --- exercises/euca.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exercises/euca.sh b/exercises/euca.sh index 76df254b..46e40251 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -169,7 +169,7 @@ euca-terminate-instances $INSTANCE || \ # case changed with bug/836978. Requesting the status of an invalid instance # will now return an error message including the instance id, so we need to # filter that out. -if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE |grep -v \"InstanceNotFound\" | grep -q $INSTANCE; do sleep 1; done"; then +if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -ve \"\\\(InstanceNotFound\\\|InvalidInstanceId\[.\]NotFound\\\)\" | grep -q $INSTANCE; do sleep 1; done"; then echo "server didn't terminate within $TERMINATE_TIMEOUT seconds" exit 1 fi From 7c73e8dee705b4670cd051fad53e20e4a3cbe623 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Mon, 7 Jan 2013 08:17:01 +0000 Subject: [PATCH 147/207] Enable MySQL slow query log. Change-Id: I3db33839bea28abaff01f1d7b7d6698c5dd2c083 --- lib/databases/mysql | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/lib/databases/mysql b/lib/databases/mysql index 1c0f5ebf..965df6ee 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -63,6 +63,21 @@ function configure_database_mysql { default-storage-engine = InnoDB" $MY_CONF fi + # Turn on slow query log + sudo sed -i '/log.slow.queries/d' $MY_CONF + sudo sed -i -e "/^\[mysqld\]/ a \ +log-slow-queries = /var/log/mysql/mysql-slow.log" $MY_CONF + + # Log any query taking longer than a second + sudo sed -i '/long.query.time/d' $MY_CONF + sudo sed -i -e "/^\[mysqld\]/ a \ +long-query-time = 1" $MY_CONF + + # Log all non-indexed queries + sudo sed -i '/log.queries.not.using.indexes/d' $MY_CONF + sudo sed -i -e "/^\[mysqld\]/ a \ +log-queries-not-using-indexes" $MY_CONF + restart_service $MYSQL } From 02c0bcc38c143e0a9f66b9c4080f3881f8b3ddfd Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Mon, 14 Jan 2013 19:10:17 +0100 Subject: [PATCH 148/207] Increase tempest memory 32 MB not enough for boot correctly Change-Id: I58ca4c7e8dd303450a9970136d6f60661ea70f67 --- lib/tempest | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index 906ca6ab..84afc099 100644 --- a/lib/tempest +++ b/lib/tempest @@ -132,9 +132,9 @@ function configure_tempest() { # If the ``DEFAULT_INSTANCE_TYPE`` not declared, use the new behavior # Tempest creates instane types for himself if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then - nova flavor-create m1.pico 42 32 0 1 + nova flavor-create m1.nano 42 64 0 1 flavor_ref=42 - nova flavor-create m1.nano 84 64 0 1 + nova flavor-create m1.micro 84 128 0 1 flavor_ref_alt=84 else # Check Nova for existing flavors and, if set, look for the From 532908f6021f5e031cae01aa2374cd62da0200a9 Mon Sep 17 00:00:00 2001 From: Steven Dake Date: Mon, 14 Jan 2013 11:35:17 -0700 Subject: [PATCH 149/207] Remove error from httpd/apache on unstack.sh Fedora/RHEL use httpd for the package name of httpd. This is handled in other parts of the horizon startup code, but not in shutdown. Change-Id: I2732dad652d83a9cbe055f5f077678b7111ca782 Fixes: bug #1099538 --- lib/horizon | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/lib/horizon b/lib/horizon index 5d479d5d..9180370b 100644 --- a/lib/horizon +++ b/lib/horizon @@ -138,7 +138,15 @@ function start_horizon() { # stop_horizon() - Stop running processes (non-screen) function stop_horizon() { - stop_service apache2 + if is_ubuntu; then + stop_service apache2 + elif is_fedora; then + stop_service httpd + elif is_suse; then + stop_service apache2 + else + exit_distro_not_supported "apache configuration" + fi } # Restore xtrace From 31c94ab510a6896f3e87912443006ed43e61cf72 Mon Sep 17 00:00:00 2001 From: Maru Newby Date: Wed, 19 Dec 2012 03:59:20 +0000 Subject: [PATCH 150/207] Improve quantum l3 and tempest config. * Previously, configuration for the q-l3 agent was creating a tenant-owned router. This change maintains that behaviour if namespaces are enabled, but creates a public (not tenant-owned) router if namespaces are disabled. Since the L3 agent can only manage a single router if namespaces are disabled, the change ensures that the single router can be shared by multiple tenants. * Add tempest configuration for the public router. Change-Id: I2878a7eb9797bfd71082a55d4773519dc5198abc --- lib/quantum | 11 ++++++++++- lib/tempest | 11 +++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/lib/quantum b/lib/quantum index 343e5a9b..9c06f457 100644 --- a/lib/quantum +++ b/lib/quantum @@ -92,6 +92,8 @@ Q_META_DATA_IP=${Q_META_DATA_IP:-$HOST_IP} Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-False} # Use quantum-debug command Q_USE_DEBUG_COMMAND=${Q_USE_DEBUG_COMMAND:-False} +# The name of the default q-l3 router +Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1} if is_service_enabled quantum; then Q_RR_CONF_FILE=$QUANTUM_CONF_DIR/rootwrap.conf @@ -277,7 +279,14 @@ function create_quantum_initial_network() { if is_service_enabled q-l3; then # Create a router, and add the private subnet as one of its interfaces - ROUTER_ID=$(quantum router-create --tenant_id $TENANT_ID router1 | grep ' id ' | get_field 2) + if [[ "$Q_USE_NAMESPACE" == "True" ]]; then + # If namespaces are enabled, create a tenant-owned router. + ROUTER_ID=$(quantum router-create --tenant_id $TENANT_ID $Q_ROUTER_NAME | grep ' id ' | get_field 2) + else + # If namespaces are disabled, the L3 agent can only target + # a single router, which should not be tenant-owned. + ROUTER_ID=$(quantum router-create $Q_ROUTER_NAME | grep ' id ' | get_field 2) + fi quantum router-interface-add $ROUTER_ID $SUBNET_ID # Create an external network, and a subnet. Configure the external network as router gw EXT_NET_ID=$(quantum net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2) diff --git a/lib/tempest b/lib/tempest index 84afc099..0835234c 100644 --- a/lib/tempest +++ b/lib/tempest @@ -11,6 +11,9 @@ # - ``S3_SERVICE_PORT`` # - ``SERVICE_HOST`` # - ``BASE_SQL_CONN`` ``lib/database`` declares +# - ``PUBLIC_NETWORK_NAME`` +# - ``Q_USE_NAMESPACE`` +# - ``Q_ROUTER_NAME`` # Optional Dependencies: # IDENTITY_USE_SSL, IDENTITY_HOST, IDENTITY_PORT, IDENTITY_PATH # ALT_* (similar vars exists in keystone_data.sh) @@ -61,6 +64,7 @@ function configure_tempest() { local flavors_ref local flavor_lines local public_network_id + local public_router_id local tenant_networks_reachable # TODO(afazekas): @@ -175,6 +179,12 @@ function configure_tempest() { if is_service_enabled q-l3; then public_network_id=$(quantum net-list | grep $PUBLIC_NETWORK_NAME | \ awk '{print $2}') + if [ "$Q_USE_NAMESPACE" == "False" ]; then + # If namespaces are disabled, devstack will create a single + # public router that tempest should be configured to use. + public_router_id=$(quantum router-list | awk "/ $Q_ROUTER_NAME / \ + { print \$2 }") + fi fi # Timeouts @@ -243,6 +253,7 @@ function configure_tempest() { iniset $TEMPEST_CONF network password "$password" iniset $TEMPEST_CONF network tenant_networks_reachable "$tenant_networks_reachable" iniset $TEMPEST_CONF network public_network_id "$public_network_id" + iniset $TEMPEST_CONF network public_router_id "$public_router_id" #boto iniset $TEMPEST_CONF boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud" From 2298ca4f705e28dcc4b2aa605b73470612f6bb61 Mon Sep 17 00:00:00 2001 From: Maru Newby Date: Thu, 25 Oct 2012 23:46:42 +0000 Subject: [PATCH 151/207] Add limited support for Quantum+OVS on XS/XCP. * Add priliminary support for running the OVS L2 and DHCP agents in domU: * Configure Nova to use the correct vif driver and integration bridge. * Configure the ovs agent to target the dom0 integration bridge. * Install a xapi plugin supporting dom0 execution of ovs agent commands. * Config doc: http://wiki.openstack.org/QuantumDevstackOvsXcp * Supports blueprint xenapi-ovs Change-Id: If5ab07daab1dc3918004eb4bfb6fed6cab0a71fd --- lib/quantum | 46 ++++++++++++++++++++++++++++++++++-- tools/xen/install_os_domU.sh | 13 ++++++++++ 2 files changed, 57 insertions(+), 2 deletions(-) diff --git a/lib/quantum b/lib/quantum index 9c06f457..bfea2b55 100644 --- a/lib/quantum +++ b/lib/quantum @@ -212,6 +212,10 @@ function create_nova_conf_quantum() { if [[ "$Q_PLUGIN" = "openvswitch" ]]; then NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"} + if [ "$VIRT_DRIVER" = 'xenserver' ]; then + add_nova_opt "xenapi_vif_driver=nova.virt.xenapi.vif.XenAPIOpenVswitchDriver" + add_nova_opt "xenapi_ovs_integration_bridge=$FLAT_NETWORK_BRIDGE" + fi elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver"} elif [[ "$Q_PLUGIN" = "ryu" ]]; then @@ -536,6 +540,11 @@ function _configure_quantum_metadata_agent() { # _configure_quantum_plugin_agent() - Set config files for quantum plugin agent # It is called when q-agt is enabled. function _configure_quantum_plugin_agent() { + + # Specify the default root helper prior to agent configuration to + # ensure that an agent's configuration can override the default. + iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" + # Configure agent for plugin if [[ "$Q_PLUGIN" = "openvswitch" ]]; then _configure_quantum_plugin_agent_openvswitch @@ -544,8 +553,6 @@ function _configure_quantum_plugin_agent() { elif [[ "$Q_PLUGIN" = "ryu" ]]; then _configure_quantum_plugin_agent_ryu fi - - iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" } function _configure_quantum_plugin_agent_linuxbridge() { @@ -593,6 +600,41 @@ function _configure_quantum_plugin_agent_openvswitch() { iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings $OVS_BRIDGE_MAPPINGS fi AGENT_BINARY="$QUANTUM_DIR/bin/quantum-openvswitch-agent" + + if [ "$VIRT_DRIVER" = 'xenserver' ]; then + # Nova will always be installed along with quantum for a domU + # devstack install, so it should be safe to rely on nova.conf + # for xenapi configuration. + Q_RR_DOM0_COMMAND="$QUANTUM_DIR/bin/quantum-rootwrap-dom0 $NOVA_CONF" + # Under XS/XCP, the ovs agent needs to target the dom0 + # integration bridge. This is enabled by using a root wrapper + # that executes commands on dom0 via a XenAPI plugin. + iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_DOM0_COMMAND" + + # FLAT_NETWORK_BRIDGE is the dom0 integration bridge. To + # ensure the bridge lacks direct connectivity, set + # VM_VLAN=-1;VM_DEV=invalid in localrc + iniset /$Q_PLUGIN_CONF_FILE OVS integration_bridge $FLAT_NETWORK_BRIDGE + + # The ovs agent needs to ensure that the ports associated with + # a given network share the same local vlan tag. On + # single-node XS/XCP, this requires monitoring both the dom0 + # bridge, where VM's are attached, and the domU bridge, where + # dhcp servers are attached. + if is_service_enabled q-dhcp; then + iniset /$Q_PLUGIN_CONF_FILE OVS domu_integration_bridge $OVS_BRIDGE + # DomU will use the regular rootwrap + iniset /$Q_PLUGIN_CONF_FILE AGENT domu_root_helper "$Q_RR_COMMAND" + # Plug the vm interface into the domU integration bridge. + sudo ip addr flush dev $GUEST_INTERFACE_DEFAULT + sudo ip link set $OVS_BRIDGE up + # Assign the VM IP only if it has been set explicitly + if [[ "$VM_IP" != "" ]]; then + sudo ip addr add $VM_IP dev $OVS_BRIDGE + fi + sudo ovs-vsctl add-port $OVS_BRIDGE $GUEST_INTERFACE_DEFAULT + fi + fi } function _configure_quantum_plugin_agent_ryu() { diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index e270e59b..b4fbb699 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -68,6 +68,19 @@ if [ ! -d $XAPI_PLUGIN_DIR ]; then XAPI_PLUGIN_DIR=/usr/lib/xcp/plugins/ fi cp -pr ./nova/*/plugins/xenserver/xenapi/etc/xapi.d/plugins/* $XAPI_PLUGIN_DIR + +# Install the netwrap xapi plugin to support agent control of dom0 networking +if [[ "$ENABLED_SERVICES" =~ "q-agt" && "$Q_PLUGIN" = "openvswitch" ]]; then + if [ -f ./quantum ]; then + rm -rf ./quantum + fi + # get quantum + QUANTUM_ZIPBALL_URL=${QUANTUM_ZIPBALL_URL:-$(echo $QUANTUM_REPO | sed "s:\.git$::;s:$:/zipball/$QUANTUM_BRANCH:g")} + wget $QUANTUM_ZIPBALL_URL -O quantum-zipball --no-check-certificate + unzip -o quantum-zipball -d ./quantum + cp -pr ./quantum/*/quantum/plugins/openvswitch/agent/xenapi/etc/xapi.d/plugins/* $XAPI_PLUGIN_DIR +fi + chmod a+x ${XAPI_PLUGIN_DIR}* mkdir -p /boot/guest From 1d29d8bcf734cdd6db54da3c1458bfdb636e453c Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Mon, 7 Jan 2013 15:51:32 +0100 Subject: [PATCH 152/207] Add basic uec image preparation to tempest Add uec image preparation to lib/tempest. cirros as image is hard coded at the moment. If the images does not exists or the system is not able to use uec images the image prepare step will be skipped and tempest will skip the related tests as well. Setting ssh username correctly. Setting instance type for the boto test. Change-Id: I0d36ac7834e1eb677007e2c92dfc375d134a6023 --- extras.d/80-tempest.sh | 1 + lib/tempest | 41 ++++++++++++++++++++++++++++++++++++++--- 2 files changed, 39 insertions(+), 3 deletions(-) diff --git a/extras.d/80-tempest.sh b/extras.d/80-tempest.sh index 506ccef7..f1599557 100644 --- a/extras.d/80-tempest.sh +++ b/extras.d/80-tempest.sh @@ -9,6 +9,7 @@ if [[ "$1" == "stack" ]]; then echo_summary "Configuring Tempest" install_tempest configure_tempest + init_tempest fi fi diff --git a/lib/tempest b/lib/tempest index 0835234c..c08a4306 100644 --- a/lib/tempest +++ b/lib/tempest @@ -14,10 +14,11 @@ # - ``PUBLIC_NETWORK_NAME`` # - ``Q_USE_NAMESPACE`` # - ``Q_ROUTER_NAME`` +# - ``VIRT_DRIVER`` +# - ``LIBVIRT_TYPE`` # Optional Dependencies: # IDENTITY_USE_SSL, IDENTITY_HOST, IDENTITY_PORT, IDENTITY_PATH # ALT_* (similar vars exists in keystone_data.sh) -# ``OS_USERNAME`` # ``IMAGE_PORT``, ``IMAGE_HOST`` # ``LIVE_MIGRATION_AVAILABLE`` # ``USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION`` @@ -27,6 +28,7 @@ # # install_tempest # configure_tempest +# init_tempest # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -47,6 +49,8 @@ BUILD_INTERVAL=3 BUILD_TIMEOUT=400 +BOTO_MATERIALS_PATH="$DEST/devstack/files/images/s3-materials/cirros-0.3.0" + # Entry Points # ------------ @@ -66,6 +70,7 @@ function configure_tempest() { local public_network_id local public_router_id local tenant_networks_reachable + local boto_instance_type="m1.tiny" # TODO(afazekas): # sudo python setup.py deploy @@ -138,11 +143,13 @@ function configure_tempest() { if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then nova flavor-create m1.nano 42 64 0 1 flavor_ref=42 + boto_instance_type=m1.nano nova flavor-create m1.micro 84 128 0 1 flavor_ref_alt=84 else # Check Nova for existing flavors and, if set, look for the # ``DEFAULT_INSTANCE_TYPE`` and use that. + boto_instance_type=$DEFAULT_INSTANCE_TYPE flavor_lines=`nova flavor-list` IFS=$'\r\n' flavors="" @@ -216,10 +223,10 @@ function configure_tempest() { iniset $TEMPEST_CONF compute allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} #Skip until #1074039 is fixed iniset $TEMPEST_CONF compute run_ssh False - iniset $TEMPEST_CONF compute ssh_user ${DEFAULT_INSTANCE_USER:-$OS_USERNAME} + iniset $TEMPEST_CONF compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} iniset $TEMPEST_CONF compute network_for_ssh $PRIVATE_NETWORK_NAME iniset $TEMPEST_CONF compute ip_version_for_ssh 4 - iniset $TEMPEST_CONF compute ssh_timeout 4 + iniset $TEMPEST_CONF compute ssh_timeout $BUILD_TIMEOUT iniset $TEMPEST_CONF compute image_ref $image_uuid iniset $TEMPEST_CONF compute image_ref_alt $image_uuid_alt iniset $TEMPEST_CONF compute flavor_ref $flavor_ref @@ -258,6 +265,9 @@ function configure_tempest() { #boto iniset $TEMPEST_CONF boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud" iniset $TEMPEST_CONF boto s3_url "http://$SERVICE_HOST:${S3_SERVICE_PORT:-3333}" + iniset $TEMPEST_CONF boto s3_materials_path "$BOTO_MATERIALS_PATH" + iniset $TEMPEST_CONF boto instance_type "$boto_instance_type" + iniset $TEMPEST_CONF boto http_socket_timeout 30 echo "Created tempest configuration file:" cat $TEMPEST_CONF @@ -277,5 +287,30 @@ function install_tempest() { pip_install -r $TEMPEST_DIR/tools/pip-requires } +# init_tempest() - Initialize ec2 images +function init_tempest() { + local base_image_name=cirros-0.3.0-x86_64 + # /opt/stack/devstack/files/images/cirros-0.3.0-x86_64-uec + local devstack_dir="$DEST/devstack" + local image_dir="$devstack_dir/files/images/${base_image_name}-uec" + local kernel="$image_dir/${base_image_name}-vmlinuz" + local ramdisk="$image_dir/${base_image_name}-initrd" + local disk_image="$image_dir/${base_image_name}-blank.img" + # if the cirros uec downloaded and the system is uec capable + if [ -f "$kernel" -a -f "$ramdisk" -a -f "$disk_image" -a "$VIRT_DRIVER" != "openvz" \ + -a \( "$LIBVIRT_TYPE" != "lxc" -o "$VIRT_DRIVER" != "libvirt" \) ]; then + echo "Prepare aki/ari/ami Images" + ( #new namespace + # tenant:demo ; user: demo + source $devstack_dir/accrc/demo/demo + euca-bundle-image -i "$kernel" --kernel true -d "$BOTO_MATERIALS_PATH" + euca-bundle-image -i "$ramdisk" --ramdisk true -d "$BOTO_MATERIALS_PATH" + euca-bundle-image -i "$disk_image" -d "$BOTO_MATERIALS_PATH" + ) 2>&1 Date: Wed, 16 Jan 2013 08:38:17 +0100 Subject: [PATCH 153/207] Case correct InvalidInstanceID.NotFound Change-Id: Iab067398205f51d640355ef91f0896afaecc4dea --- exercises/euca.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exercises/euca.sh b/exercises/euca.sh index 46e40251..7b35f6fe 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -169,7 +169,7 @@ euca-terminate-instances $INSTANCE || \ # case changed with bug/836978. Requesting the status of an invalid instance # will now return an error message including the instance id, so we need to # filter that out. -if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -ve \"\\\(InstanceNotFound\\\|InvalidInstanceId\[.\]NotFound\\\)\" | grep -q $INSTANCE; do sleep 1; done"; then +if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -ve \"\\\(InstanceNotFound\\\|InvalidInstanceID\[.\]NotFound\\\)\" | grep -q $INSTANCE; do sleep 1; done"; then echo "server didn't terminate within $TERMINATE_TIMEOUT seconds" exit 1 fi From af22a477d1326c345cc1c59049bf3b16e3510acd Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Thu, 17 Jan 2013 16:16:25 +0900 Subject: [PATCH 154/207] lib/cinder: add sheepdog support This enables us to use Sheepdog as a Cinder backend storage by setting the CINDER_DRIVER environment variable. Change-Id: I70cfb7f89ac3260d277fa160c457d220255de065 --- lib/cinder | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/cinder b/lib/cinder index 8b1ccd71..a730cd62 100644 --- a/lib/cinder +++ b/lib/cinder @@ -209,6 +209,8 @@ function configure_cinder() { iniset $CINDER_CONF DEFAULT xenapi_nfs_server "$CINDER_XENAPI_NFS_SERVER" iniset $CINDER_CONF DEFAULT xenapi_nfs_serverpath "$CINDER_XENAPI_NFS_SERVERPATH" ) + elif [ "$CINDER_DRIVER" == "sheepdog" ]; then + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.sheepdog.SheepdogDriver" fi } From 3edddd108a4b5c785c7916b40aa153e055d54d8a Mon Sep 17 00:00:00 2001 From: "Daniel P. Berrange" Date: Thu, 3 Jan 2013 12:06:47 +0000 Subject: [PATCH 155/207] Run nova-novncproxy binary in the nova GIT trree The nova-novncproxy binary was pulled into the nova GIT tree several months back, so devstack should run that version, rather than the legacy version from the noVNC GIT tree. Change-Id: I289989d5c0831c75117f059dd8f2decb1f9d3a90 Signed-off-by: Daniel P. Berrange --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 8135bf15..dd684321 100644 --- a/lib/nova +++ b/lib/nova @@ -549,7 +549,7 @@ function start_nova() { screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert" screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network" screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler" - screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --config-file $NOVA_CONF --web ." + screen_it n-novnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-novncproxy --config-file $NOVA_CONF --web $NOVNC_DIR" screen_it n-xvnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-xvpvncproxy --config-file $NOVA_CONF" screen_it n-cauth "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-consoleauth" } From d10e12f1335492550d558d56c510fdf8ce55a9dc Mon Sep 17 00:00:00 2001 From: "Daniel P. Berrange" Date: Thu, 3 Jan 2013 11:51:42 +0000 Subject: [PATCH 156/207] Add support for setting up nova-spicehtml5proxy Add a new service 'n-spice' which is off by default, but can be enabled to turn on SPICE support in the Nova libvirt driver. Also if neither n-novnc or n-xvnc are enabled, then disable VNC support. This allows running in a SPICE only environment. The spice-html5 repo will be checked out to support the Horizon client integration Change-Id: If74fad33a7b491450afd823758d35b06ebe72cb9 Signed-off-by: Daniel P. Berrange --- files/rpms/n-spice | 1 + lib/nova | 3 ++- stack.sh | 36 +++++++++++++++++++++++++++++++----- stackrc | 4 ++++ 4 files changed, 38 insertions(+), 6 deletions(-) create mode 100644 files/rpms/n-spice diff --git a/files/rpms/n-spice b/files/rpms/n-spice new file mode 100644 index 00000000..24ce15ab --- /dev/null +++ b/files/rpms/n-spice @@ -0,0 +1 @@ +numpy diff --git a/lib/nova b/lib/nova index dd684321..ccf93d9e 100644 --- a/lib/nova +++ b/lib/nova @@ -551,13 +551,14 @@ function start_nova() { screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler" screen_it n-novnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-novncproxy --config-file $NOVA_CONF --web $NOVNC_DIR" screen_it n-xvnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-xvpvncproxy --config-file $NOVA_CONF" + screen_it n-spice "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $NOVA_CONF --web $SPICE_DIR" screen_it n-cauth "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-consoleauth" } # stop_nova() - Stop running processes (non-screen) function stop_nova() { # Kill the nova screen windows - for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-cond; do + for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-cond n-spice; do screen -S $SCREEN_NAME -p $serv -X kill done } diff --git a/stack.sh b/stack.sh index bf473ca7..005d88e7 100755 --- a/stack.sh +++ b/stack.sh @@ -313,6 +313,7 @@ source $TOP_DIR/lib/baremetal HORIZON_DIR=$DEST/horizon OPENSTACKCLIENT_DIR=$DEST/python-openstackclient NOVNC_DIR=$DEST/noVNC +SPICE_DIR=$DEST/spice-html5 SWIFT3_DIR=$DEST/swift3 # Should cinder perform secure deletion of volumes? @@ -716,6 +717,10 @@ if is_service_enabled n-novnc; then # a websockets/html5 or flash powered VNC console for vm instances git_clone $NOVNC_REPO $NOVNC_DIR $NOVNC_BRANCH fi +if is_service_enabled n-spice; then + # a websockets/html5 or flash powered SPICE console for vm instances + git_clone $SPICE_REPO $SPICE_DIR $SPICE_BRANCH +fi if is_service_enabled horizon; then # dashboard install_horizon @@ -1008,17 +1013,38 @@ if is_service_enabled nova; then iniset $NOVA_CONF DEFAULT novncproxy_base_url "$NOVNCPROXY_URL" XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"} iniset $NOVA_CONF DEFAULT xvpvncproxy_base_url "$XVPVNCPROXY_URL" + SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:6082/spice_auto.html"} + iniset $NOVA_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL" fi if [ "$VIRT_DRIVER" = 'xenserver' ]; then VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1} else VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1} fi - # Address on which instance vncservers will listen on compute hosts. - # For multi-host, this should be the management ip of the compute host. - VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1} - iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN" - iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" + + if is_service_enabled n-novnc || is_service_enabled n-xvnc ; then + # Address on which instance vncservers will listen on compute hosts. + # For multi-host, this should be the management ip of the compute host. + VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1} + iniset $NOVA_CONF DEFAULT vnc_enabled true + iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN" + iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" + else + iniset $NOVA_CONF DEFAULT vnc_enabled false + fi + + if is_service_enabled n-spice; then + # Address on which instance spiceservers will listen on compute hosts. + # For multi-host, this should be the management ip of the compute host. + SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=127.0.0.1} + SPICESERVER_LISTEN=${SPICESERVER_LISTEN=127.0.0.1} + iniset $NOVA_CONF spice enabled true + iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN" + iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS" + else + iniset $NOVA_CONF spice enabled false + fi + iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST" iniset_rpc_backend nova $NOVA_CONF DEFAULT iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT" diff --git a/stackrc b/stackrc index 8d194405..cfc4d1fb 100644 --- a/stackrc +++ b/stackrc @@ -78,6 +78,10 @@ KEYSTONE_BRANCH=master NOVNC_REPO=https://github.com/kanaka/noVNC.git NOVNC_BRANCH=master +# a websockets/html5 or flash powered SPICE console for vm instances +SPICE_REPO=http://anongit.freedesktop.org/git/spice/spice-html5.git +SPICE_BRANCH=master + # django powered web control panel for openstack HORIZON_REPO=${GIT_BASE}/openstack/horizon.git HORIZON_BRANCH=master From 029598ea74ab2adf08801e384b919cc2cd13398c Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 17 Jan 2013 11:17:16 -0600 Subject: [PATCH 157/207] Fix secgroups exercise on postgres This should fix the failing (but non-voting) postgres gate tests. Why does postgresql change the value '0.0.0.0/00' to '0.0.0.0/0'? Clearly the correct value for the network CIDR bits is with only one zero but even an incorrect value shouldn't be changing. SQLalchemy is given this for the column: Column('cidr', String(length=255)), Change-Id: Ib19dad23789654664e90518087e5a462fa8b8034 --- exercises/sec_groups.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh index f6810e3e..fbd9c8e1 100755 --- a/exercises/sec_groups.sh +++ b/exercises/sec_groups.sh @@ -48,7 +48,7 @@ nova secgroup-create $SEC_GROUP_NAME 'a test security group' RULES_TO_ADD=( 22 3389 5900 ) for RULE in "${RULES_TO_ADD[@]}"; do - nova secgroup-add-rule $SEC_GROUP_NAME tcp $RULE $RULE 0.0.0.0/00 + nova secgroup-add-rule $SEC_GROUP_NAME tcp $RULE $RULE 0.0.0.0/0 done # Check to make sure rules were added @@ -63,7 +63,7 @@ done # Delete rules and secgroup for RULE in "${RULES_TO_ADD[@]}"; do - nova secgroup-delete-rule $SEC_GROUP_NAME tcp $RULE $RULE 0.0.0.0/00 + nova secgroup-delete-rule $SEC_GROUP_NAME tcp $RULE $RULE 0.0.0.0/0 done nova secgroup-delete $SEC_GROUP_NAME From 8750b3d533df1174fe7d11290f97ef6a5779758c Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Thu, 17 Jan 2013 23:49:50 -0500 Subject: [PATCH 158/207] Enable nova Verbose logging With oslo-incubator commit 751c35b1c8ff0730883a8ccdda9b77a49fff2405, (Change-Id: Ic9e3cb5979b2d7283552ad3a461870373f45a239) Verbose does not enable debug level logging. Change-Id: I1741ec0ca61e4c7234ee4e29cbd52ded73995451 --- lib/nova | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/nova b/lib/nova index 8135bf15..d323425d 100644 --- a/lib/nova +++ b/lib/nova @@ -355,6 +355,7 @@ function create_nova_conf() { rm -f $NOVA_CONF add_nova_opt "[DEFAULT]" iniset $NOVA_CONF DEFAULT verbose "True" + iniset $NOVA_CONF DEFAULT debug "True" iniset $NOVA_CONF DEFAULT auth_strategy "keystone" iniset $NOVA_CONF DEFAULT allow_resize_to_same_host "True" iniset $NOVA_CONF DEFAULT api_paste_config "$NOVA_API_PASTE_INI" From f2696c0d85de4070504699bce767a27f4dd5a297 Mon Sep 17 00:00:00 2001 From: "Walter A. Boring IV" Date: Thu, 17 Jan 2013 20:40:09 -0800 Subject: [PATCH 159/207] Add Nova fibre channel support required packages This patch is to support the nova patch that adds Fibre Channel support to nova. Fibre Channel requires sysfsutils, sg3-utils, multipath-tools Change-Id: I9e44ef9152f1916b245dba3be77076f0283fed44 --- files/apts/n-cpu | 2 ++ files/rpms-suse/n-cpu | 2 ++ files/rpms/n-cpu | 2 ++ 3 files changed, 6 insertions(+) diff --git a/files/apts/n-cpu b/files/apts/n-cpu index a40b6590..ad2d6d71 100644 --- a/files/apts/n-cpu +++ b/files/apts/n-cpu @@ -3,3 +3,5 @@ lvm2 open-iscsi open-iscsi-utils genisoimage +sysfsutils +sg3-utils diff --git a/files/rpms-suse/n-cpu b/files/rpms-suse/n-cpu index 27d3254f..7040b843 100644 --- a/files/rpms-suse/n-cpu +++ b/files/rpms-suse/n-cpu @@ -2,3 +2,5 @@ genisoimage lvm2 open-iscsi +sysfsutils +sg3_utils diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu index f7054e82..149672ac 100644 --- a/files/rpms/n-cpu +++ b/files/rpms/n-cpu @@ -2,3 +2,5 @@ iscsi-initiator-utils lvm2 genisoimage +sysfsutils +sg3_utils From 97d3d202ff6ad40d9201b43a37861f58a9503d14 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Sat, 19 Jan 2013 19:20:49 +0100 Subject: [PATCH 160/207] Support Tempest config file format transition * Identity server location just configure by an URI * Image service location resolved by service endpoint * Credentials will be defined only in the identity section * Whitebox gets it's own section * ssh username is per image option Change-Id: I3b0d51a323560451c5636363896cadb39e0ea2d6 --- lib/tempest | 65 ++++++++++++++++++++++++++++------------------------- 1 file changed, 34 insertions(+), 31 deletions(-) diff --git a/lib/tempest b/lib/tempest index c08a4306..9057854d 100644 --- a/lib/tempest +++ b/lib/tempest @@ -16,10 +16,9 @@ # - ``Q_ROUTER_NAME`` # - ``VIRT_DRIVER`` # - ``LIBVIRT_TYPE`` +# - ``KEYSTONE_SERVICE_PROTOCOL``, ``KEYSTONE_SERVICE_HOST`` from lib/keystone # Optional Dependencies: -# IDENTITY_USE_SSL, IDENTITY_HOST, IDENTITY_PORT, IDENTITY_PATH # ALT_* (similar vars exists in keystone_data.sh) -# ``IMAGE_PORT``, ``IMAGE_HOST`` # ``LIVE_MIGRATION_AVAILABLE`` # ``USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION`` # ``DEFAULT_INSTANCE_TYPE`` @@ -124,13 +123,6 @@ function configure_tempest() { # copy every time, because the image UUIDS are going to change cp $TEMPEST_CONF.sample $TEMPEST_CONF - IDENTITY_USE_SSL=${IDENTITY_USE_SSL:-False} - IDENTITY_HOST=${IDENTITY_HOST:-127.0.0.1} - IDENTITY_PORT=${IDENTITY_PORT:-5000} - # TODO(jaypipes): This is dumb and needs to be removed - # from the Tempest configuration file entirely... - IDENTITY_PATH=${IDENTITY_PATH:-tokens} - password=${ADMIN_PASSWORD:-secrete} # See files/keystone_data.sh where alt_demo user @@ -203,15 +195,19 @@ function configure_tempest() { iniset $TEMPEST_CONF boto build_interval $BUILD_INTERVAL iniset $TEMPEST_CONF boto http_socket_timeout 5 - iniset $TEMPEST_CONF identity use_ssl $IDENTITY_USE_SSL - iniset $TEMPEST_CONF identity host $IDENTITY_HOST - iniset $TEMPEST_CONF identity port $IDENTITY_PORT - iniset $TEMPEST_CONF identity path $IDENTITY_PATH - - iniset $TEMPEST_CONF compute password "$password" - iniset $TEMPEST_CONF compute alt_username $ALT_USERNAME - iniset $TEMPEST_CONF compute alt_password "$password" - iniset $TEMPEST_CONF compute alt_tenant_name $ALT_TENANT_NAME + # Identity + iniset $TEMPEST_CONF identity uri "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v2.0/" + iniset $TEMPEST_CONF identity password "$password" + iniset $TEMPEST_CONF identity alt_username $ALT_USERNAME + iniset $TEMPEST_CONF identity alt_password "$password" + iniset $TEMPEST_CONF identity alt_tenant_name $ALT_TENANT_NAME + iniset $TEMPEST_CONF identity admin_password "$password" + + # Compute + iniset $TEMPEST_CONF compute password "$password" # DEPRECATED + iniset $TEMPEST_CONF compute alt_username $ALT_USERNAME # DEPRECATED + iniset $TEMPEST_CONF compute alt_password "$password" # DEPRECATED + iniset $TEMPEST_CONF compute alt_tenant_name $ALT_TENANT_NAME # DEPRECATED iniset $TEMPEST_CONF compute resize_available False iniset $TEMPEST_CONF compute change_password_available False iniset $TEMPEST_CONF compute compute_log_level ERROR @@ -223,41 +219,47 @@ function configure_tempest() { iniset $TEMPEST_CONF compute allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} #Skip until #1074039 is fixed iniset $TEMPEST_CONF compute run_ssh False - iniset $TEMPEST_CONF compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} + iniset $TEMPEST_CONF compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED iniset $TEMPEST_CONF compute network_for_ssh $PRIVATE_NETWORK_NAME iniset $TEMPEST_CONF compute ip_version_for_ssh 4 iniset $TEMPEST_CONF compute ssh_timeout $BUILD_TIMEOUT iniset $TEMPEST_CONF compute image_ref $image_uuid + iniset $TEMPEST_CONF compute image_ssh_user ${DEFAULT_INSTANCE_USER:-cirros} iniset $TEMPEST_CONF compute image_ref_alt $image_uuid_alt + iniset $TEMPEST_CONF compute image_alt_ssh_user ${DEFAULT_INSTANCE_USER:-cirros} iniset $TEMPEST_CONF compute flavor_ref $flavor_ref iniset $TEMPEST_CONF compute flavor_ref_alt $flavor_ref_alt - iniset $TEMPEST_CONF compute source_dir $NOVA_SOURCE_DIR iniset $TEMPEST_CONF compute live_migration_available ${LIVE_MIGRATION_AVAILABLE:-False} iniset $TEMPEST_CONF compute use_block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} - # Inherited behavior, might be wrong - iniset $TEMPEST_CONF compute bin_dir $NOVA_BIN_DIR + iniset $TEMPEST_CONF compute source_dir $NOVA_SOURCE_DIR # DEPRECATED + iniset $TEMPEST_CONF compute bin_dir $NOVA_BIN_DIR # DEPRECATED + iniset $TEMPEST_CONF compute path_to_private_key $TEMPEST_DIR/id_rsa # DEPRECATED + iniset $TEMPEST_CONF compute db_uri $BASE_SQL_CONN/nova # DEPRECATED + + # Whitebox + iniset $TEMPEST_CONF whitebox source_dir $NOVA_SOURCE_DIR + iniset $TEMPEST_CONF whitebox bin_dir $NOVA_BIN_DIR # TODO(jaypipes): Create the key file here... right now, no whitebox # tests actually use a key. - iniset $TEMPEST_CONF compute path_to_private_key $TEMPEST_DIR/id_rsa - iniset $TEMPEST_CONF compute db_uri $BASE_SQL_CONN/nova + iniset $TEMPEST_CONF whitebox path_to_private_key $TEMPEST_DIR/id_rsa + iniset $TEMPEST_CONF whitebox db_uri $BASE_SQL_CONN/nova + # image - iniset $TEMPEST_CONF image host ${IMAGE_HOST:-127.0.0.1} - iniset $TEMPEST_CONF image port ${IMAGE_PORT:-9292} - iniset $TEMPEST_CONF image password "$password" + iniset $TEMPEST_CONF image password "$password" # DEPRECATED # identity-admin - iniset $TEMPEST_CONF "identity-admin" password "$password" + iniset $TEMPEST_CONF "identity-admin" password "$password" # DEPRECATED # compute admin - iniset $TEMPEST_CONF "compute-admin" password "$password" + iniset $TEMPEST_CONF "compute-admin" password "$password" # DEPRECATED # network admin - iniset $TEMPEST_CONF "network-admin" password "$password" + iniset $TEMPEST_CONF "network-admin" password "$password" # DEPRECATED # network iniset $TEMPEST_CONF network api_version 2.0 - iniset $TEMPEST_CONF network password "$password" + iniset $TEMPEST_CONF network password "$password" # DEPRECATED iniset $TEMPEST_CONF network tenant_networks_reachable "$tenant_networks_reachable" iniset $TEMPEST_CONF network public_network_id "$public_network_id" iniset $TEMPEST_CONF network public_router_id "$public_router_id" @@ -268,6 +270,7 @@ function configure_tempest() { iniset $TEMPEST_CONF boto s3_materials_path "$BOTO_MATERIALS_PATH" iniset $TEMPEST_CONF boto instance_type "$boto_instance_type" iniset $TEMPEST_CONF boto http_socket_timeout 30 + iniset $TEMPEST_CONF boto ssh_user ${DEFAULT_INSTANCE_USER:-cirros} echo "Created tempest configuration file:" cat $TEMPEST_CONF From 0dd34df455637ee29176525974d6dab93f530e66 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Fri, 28 Dec 2012 13:15:31 +0900 Subject: [PATCH 161/207] lib/quantum: refactor quantum plugins and third party As quantum plugin support is coming like floodlight, nvp and nec, it's worth while to refactor quantum plugin logic so that each plugin can be modified/enhanced intervening with other quantum plugin. And new plugin support can be added easily (hopefully) without modifying core logic. Change-Id: Ic5ab5b993272fdd3b4e779823323777a845ee681 --- AUTHORS | 1 + lib/nova | 2 +- lib/quantum | 328 +++--------------- lib/quantum_plugins/README.md | 34 ++ lib/quantum_plugins/bigswitch_floodlight | 55 +++ lib/quantum_plugins/linuxbridge | 79 +++++ lib/quantum_plugins/openvswitch | 144 ++++++++ lib/quantum_plugins/ovs_base | 49 +++ lib/quantum_plugins/ryu | 63 ++++ lib/quantum_thirdparty/README.md | 36 ++ .../bigswitch_floodlight | 0 lib/{ => quantum_thirdparty}/ryu | 30 +- 12 files changed, 517 insertions(+), 304 deletions(-) create mode 100644 lib/quantum_plugins/README.md create mode 100644 lib/quantum_plugins/bigswitch_floodlight create mode 100644 lib/quantum_plugins/linuxbridge create mode 100644 lib/quantum_plugins/openvswitch create mode 100644 lib/quantum_plugins/ovs_base create mode 100644 lib/quantum_plugins/ryu create mode 100644 lib/quantum_thirdparty/README.md rename lib/{ => quantum_thirdparty}/bigswitch_floodlight (100%) rename lib/{ => quantum_thirdparty}/ryu (69%) diff --git a/AUTHORS b/AUTHORS index 7ec1f663..35c0a522 100644 --- a/AUTHORS +++ b/AUTHORS @@ -19,6 +19,7 @@ Gabriel Hurley Gary Kotton Hengqing Hu Hua ZHANG +Isaku Yamahata Jake Dahn James E. Blair Jason Cannavale diff --git a/lib/nova b/lib/nova index f0456d61..7165ae22 100644 --- a/lib/nova +++ b/lib/nova @@ -229,7 +229,7 @@ function configure_nova() { configure_baremetal_nova_dirs fi - if is_service_enabled quantum && is_quantum_ovs_base_plugin "$Q_PLUGIN" && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF ; then + if is_service_enabled quantum && is_quantum_ovs_base_plugin && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF ; then # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces cat <`` + * The corresponding file name should be same to service name, ````. + +functions +--------- +``lib/quantum`` calls the following functions when the ```` is enabled + +functions to be implemented +* ``configure_``: + set config files, create data dirs, etc + e.g. + sudo python setup.py deploy + iniset $XXXX_CONF... + +* ``init_``: + initialize databases, etc + +* ``install_``: + collect source and prepare + e.g. + git clone xxx + +* ``start_``: + start running processes, including screen + e.g. + screen_it XXXX "cd $XXXXY_DIR && $XXXX_DIR/bin/XXXX-bin" + +* ``stop_``: + stop running processes (non-screen) diff --git a/lib/bigswitch_floodlight b/lib/quantum_thirdparty/bigswitch_floodlight similarity index 100% rename from lib/bigswitch_floodlight rename to lib/quantum_thirdparty/bigswitch_floodlight diff --git a/lib/ryu b/lib/quantum_thirdparty/ryu similarity index 69% rename from lib/ryu rename to lib/quantum_thirdparty/ryu index 1292313e..f11951a3 100644 --- a/lib/ryu +++ b/lib/quantum_thirdparty/ryu @@ -17,6 +17,21 @@ RYU_OFP_HOST=${RYU_OFP_HOST:-127.0.0.1} RYU_OFP_PORT=${RYU_OFP_PORT:-6633} # Ryu Applications RYU_APPS=${RYU_APPS:-ryu.app.simple_isolation,ryu.app.rest} +# Ryu configuration +RYU_CONF_CONTENTS=${RYU_CONF_CONTENTS:-" +--app_lists=$RYU_APPS +--wsapi_host=$RYU_API_HOST +--wsapi_port=$RYU_API_PORT +--ofp_listen_host=$RYU_OFP_HOST +--ofp_tcp_listen_port=$RYU_OFP_PORT +--quantum_url=http://$Q_HOST:$Q_PORT +--quantum_admin_username=$Q_ADMIN_USERNAME +--quantum_admin_password=$SERVICE_PASSWORD +--quantum_admin_tenant_name=$SERVICE_TENANT_NAME +--quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0 +--quantum_auth_strategy=$Q_AUTH_STRATEGY +--quantum_controller_addr=tcp:$RYU_OFP_HOST:$RYU_OFP_PORT +"} function configure_ryu() { setup_develop $RYU_DIR @@ -31,26 +46,13 @@ function init_ryu() { RYU_CONF=$RYU_CONF_DIR/ryu.conf sudo rm -rf $RYU_CONF - cat < $RYU_CONF ---app_lists=$RYU_APPS ---wsapi_host=$RYU_API_HOST ---wsapi_port=$RYU_API_PORT ---ofp_listen_host=$RYU_OFP_HOST ---ofp_tcp_listen_port=$RYU_OFP_PORT -EOF + echo "${RYU_CONF_CONTENTS}" > $RYU_CONF } function install_ryu() { git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH } -function is_ryu_required() { - if is_service_enabled ryu || (is_service_enabled quantum && [[ "$Q_PLUGIN" = "ryu" ]]); then - return 0 - fi - return 1 -} - function start_ryu() { screen_it ryu "cd $RYU_DIR && $RYU_DIR/bin/ryu-manager --flagfile $RYU_CONF" } From 3c52922f4f2a94cec2c94fdd56474a9224c13213 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Mon, 21 Jan 2013 06:50:33 +0100 Subject: [PATCH 162/207] Use the correct directory for image files in tempest Change-Id: Ic40065a04b8015333b4cb6844211e9a0afb9d4c1 --- lib/tempest | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/lib/tempest b/lib/tempest index 9057854d..4d91d900 100644 --- a/lib/tempest +++ b/lib/tempest @@ -5,7 +5,7 @@ # ``functions`` file # ``lib/nova`` service is runing # -# - ``DEST`` +# - ``DEST``, ``FILES`` # - ``ADMIN_PASSWORD`` # - ``DEFAULT_IMAGE_NAME`` # - ``S3_SERVICE_PORT`` @@ -48,7 +48,7 @@ BUILD_INTERVAL=3 BUILD_TIMEOUT=400 -BOTO_MATERIALS_PATH="$DEST/devstack/files/images/s3-materials/cirros-0.3.0" +BOTO_MATERIALS_PATH="$FILES/images/s3-materials/cirros-0.3.0" # Entry Points # ------------ @@ -294,8 +294,7 @@ function install_tempest() { function init_tempest() { local base_image_name=cirros-0.3.0-x86_64 # /opt/stack/devstack/files/images/cirros-0.3.0-x86_64-uec - local devstack_dir="$DEST/devstack" - local image_dir="$devstack_dir/files/images/${base_image_name}-uec" + local image_dir="$FILES/images/${base_image_name}-uec" local kernel="$image_dir/${base_image_name}-vmlinuz" local ramdisk="$image_dir/${base_image_name}-initrd" local disk_image="$image_dir/${base_image_name}-blank.img" From a534e0bf1a1ec0abc1d1f673af1b70fbf8239350 Mon Sep 17 00:00:00 2001 From: Sunil Thaha Date: Mon, 21 Jan 2013 17:00:50 +1000 Subject: [PATCH 163/207] Fixes nova-compute failing to start on Fedora 18 Fixes bug #1086784 Adds a rule to the policy-kit allowing the stack user to manage libvirt Change-Id: I6e9c0106c932f5f5f5c5c18ff79ac81a050c4599 --- lib/nova | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/lib/nova b/lib/nova index f0456d61..28933669 100644 --- a/lib/nova +++ b/lib/nova @@ -247,11 +247,25 @@ EOF LIBVIRT_DAEMON=libvirtd fi - # For distributions using polkit to authorize access to libvirt, - # configure polkit accordingly. - # Based on http://wiki.libvirt.org/page/SSHPolicyKitSetup + + if is_fedora; then - sudo bash -c 'cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla + # Starting with fedora 18 enable stack-user to virsh -c qemu:///system + # by creating a policy-kit rule for stack-user + if [[ "$os_RELEASE" -ge "18" ]]; then + rules_dir=/etc/polkit-1/rules.d + sudo mkdir -p $rules_dir + sudo bash -c "cat < $rules_dir/50-libvirt-$STACK_USER.rules +polkit.addRule(function(action, subject) { + if (action.id == 'org.libvirt.unix.manage' && + subject.user == '"$STACK_USER"') { + return polkit.Result.YES; + } +}); +EOF" + unset rules_dir + else + sudo bash -c 'cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla [libvirt Management Access] Identity=unix-group:libvirtd Action=org.libvirt.unix.manage @@ -259,6 +273,7 @@ ResultAny=yes ResultInactive=yes ResultActive=yes EOF' + fi elif is_suse; then # Work around the fact that polkit-default-privs overrules pklas # with 'unix-group:$group'. From 4a30b849ec69344e82b13070d839a1ffb4504e7c Mon Sep 17 00:00:00 2001 From: jiajun xu Date: Tue, 22 Jan 2013 11:49:03 +0800 Subject: [PATCH 164/207] Add check for RABBIT_HOST and RABBIT_PASSWORD in iniset_rpc_backend In multi-node environment, RABBIT_HOST and RABBIT_PASSWORD are used to indicate the server running rabbitmq service. We should check the variables in iniset_rpc_backend. Change-Id: Iaea8cc87315be91429a8747254310d6474930eec --- lib/rpc_backend | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/rpc_backend b/lib/rpc_backend index 4d7f8d2f..f35f9dbd 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -100,7 +100,7 @@ function iniset_rpc_backend() { iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_zmq elif is_service_enabled qpid; then iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_qpid - elif is_service_enabled rabbit; then + elif is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_kombu iniset $file $section rabbit_host $RABBIT_HOST iniset $file $section rabbit_password $RABBIT_PASSWORD From ad8b27626ecef3509a1ffca8dac7392d32e1b2d6 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 10 Jan 2013 15:40:01 +0100 Subject: [PATCH 165/207] have the run and logs files in $SWIFT_DATA_DIR - Set all the run lock and logs files to go to $SWIFT_DATA_DIR. Change-Id: I42b72572e9700457475398043057d37d0dbc65ac --- files/swift/rsyncd.conf | 28 ++++++++++++++-------------- lib/swift | 16 ++++++++-------- 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/files/swift/rsyncd.conf b/files/swift/rsyncd.conf index 4e0dcbf9..c670531b 100644 --- a/files/swift/rsyncd.conf +++ b/files/swift/rsyncd.conf @@ -1,79 +1,79 @@ uid = %USER% gid = %GROUP% -log file = /var/log/rsyncd.log -pid file = /var/run/rsyncd.pid +log file = %SWIFT_DATA_DIR%/logs/rsyncd.log +pid file = %SWIFT_DATA_DIR%/run/rsyncd.pid address = 127.0.0.1 [account6012] max connections = 25 path = %SWIFT_DATA_DIR%/1/node/ read only = false -lock file = /var/lock/account6012.lock +lock file = %SWIFT_DATA_DIR%/run/account6012.lock [account6022] max connections = 25 path = %SWIFT_DATA_DIR%/2/node/ read only = false -lock file = /var/lock/account6022.lock +lock file = %SWIFT_DATA_DIR%/run/account6022.lock [account6032] max connections = 25 path = %SWIFT_DATA_DIR%/3/node/ read only = false -lock file = /var/lock/account6032.lock +lock file = %SWIFT_DATA_DIR%/run/account6032.lock [account6042] max connections = 25 path = %SWIFT_DATA_DIR%/4/node/ read only = false -lock file = /var/lock/account6042.lock +lock file = %SWIFT_DATA_DIR%/run/account6042.lock [container6011] max connections = 25 path = %SWIFT_DATA_DIR%/1/node/ read only = false -lock file = /var/lock/container6011.lock +lock file = %SWIFT_DATA_DIR%/run/container6011.lock [container6021] max connections = 25 path = %SWIFT_DATA_DIR%/2/node/ read only = false -lock file = /var/lock/container6021.lock +lock file = %SWIFT_DATA_DIR%/run/container6021.lock [container6031] max connections = 25 path = %SWIFT_DATA_DIR%/3/node/ read only = false -lock file = /var/lock/container6031.lock +lock file = %SWIFT_DATA_DIR%/run/container6031.lock [container6041] max connections = 25 path = %SWIFT_DATA_DIR%/4/node/ read only = false -lock file = /var/lock/container6041.lock +lock file = %SWIFT_DATA_DIR%/run/container6041.lock [object6010] max connections = 25 path = %SWIFT_DATA_DIR%/1/node/ read only = false -lock file = /var/lock/object6010.lock +lock file = %SWIFT_DATA_DIR%/run/object6010.lock [object6020] max connections = 25 path = %SWIFT_DATA_DIR%/2/node/ read only = false -lock file = /var/lock/object6020.lock +lock file = %SWIFT_DATA_DIR%/run/object6020.lock [object6030] max connections = 25 path = %SWIFT_DATA_DIR%/3/node/ read only = false -lock file = /var/lock/object6030.lock +lock file = %SWIFT_DATA_DIR%/run/object6030.lock [object6040] max connections = 25 path = %SWIFT_DATA_DIR%/4/node/ read only = false -lock file = /var/lock/object6040.lock +lock file = %SWIFT_DATA_DIR%/run/object6040.lock diff --git a/lib/swift b/lib/swift index a4faf031..5ba7e56f 100644 --- a/lib/swift +++ b/lib/swift @@ -95,13 +95,13 @@ function configure_swift() { setup_develop $SWIFT_DIR # Make sure to kill all swift processes first - swift-init all stop || true + swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true # First do a bit of setup by creating the directories and # changing the permissions so we can run it as our user. USER_GROUP=$(id -g) - sudo mkdir -p ${SWIFT_DATA_DIR}/{drives,cache} + sudo mkdir -p ${SWIFT_DATA_DIR}/{drives,cache,run,logs} sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_DIR} # Create a loopback disk and format it to XFS. @@ -143,8 +143,8 @@ function configure_swift() { sudo chown -R $USER: ${node} done - sudo mkdir -p ${SWIFT_CONFIG_DIR}/{object,container,account}-server /var/run/swift - sudo chown -R $USER: ${SWIFT_CONFIG_DIR} /var/run/swift + sudo mkdir -p ${SWIFT_CONFIG_DIR}/{object,container,account}-server + sudo chown -R $USER: ${SWIFT_CONFIG_DIR} if [[ "$SWIFT_CONFIG_DIR" != "/etc/swift" ]]; then # Some swift tools are hard-coded to use ``/etc/swift`` and are apparently not going to be fixed. @@ -311,7 +311,7 @@ function configure_swiftclient() { function init_swift() { local node_number # Make sure to kill all swift processes first - swift-init all stop || true + swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true # This is where we create three different rings for swift with # different object servers binding on different ports. @@ -363,15 +363,15 @@ function start_swift() { # proxy service so we can run it in foreground in screen. # ``swift-init ... {stop|restart}`` exits with '1' if no servers are running, # ignore it just in case - swift-init all restart || true - swift-init proxy stop || true + swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true + swift-init --run-dir=${SWIFT_DATA_DIR}/run proxy stop || true screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_DIR}/proxy-server.conf -v" } # stop_swift() - Stop running processes (non-screen) function stop_swift() { # screen normally killed by unstack.sh - swift-init all stop || true + swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true } # Restore xtrace From 767cd631796b5404e6331cee72977a1fcec68024 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Fri, 18 Jan 2013 17:15:44 -0500 Subject: [PATCH 166/207] Set MySQL slow log to record every query Since devstack is for development, lets record all SQL queries to enable debugging, and further development. Change-Id: Idb4078a0d3a84151ad4c506f8861637d84ae47ad --- lib/databases/mysql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index 965df6ee..95242536 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -68,10 +68,10 @@ default-storage-engine = InnoDB" $MY_CONF sudo sed -i -e "/^\[mysqld\]/ a \ log-slow-queries = /var/log/mysql/mysql-slow.log" $MY_CONF - # Log any query taking longer than a second + # Log all queries (any query taking longer than 0 seconds) sudo sed -i '/long.query.time/d' $MY_CONF sudo sed -i -e "/^\[mysqld\]/ a \ -long-query-time = 1" $MY_CONF +long-query-time = 0" $MY_CONF # Log all non-indexed queries sudo sed -i '/log.queries.not.using.indexes/d' $MY_CONF From 74c67fd8616f778061c27d4c929c9364b59e2b92 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 22 Jan 2013 18:10:16 -0500 Subject: [PATCH 167/207] add numpy to package list, saves lots of time websockify was added to the pip requires, and it has a dependency of numpy. Because we didn't specify it in the package list, it was built from source every time, adding 3 minutes to an average run. Stop testing whether numpy compiles 100 times a day in CI. Change-Id: Ic9d9b8135a917deb846911c6b266aec87d05781a --- files/apts/nova | 1 + files/rpms/nova | 1 + 2 files changed, 2 insertions(+) diff --git a/files/apts/nova b/files/apts/nova index b7d1e928..39b4060e 100644 --- a/files/apts/nova +++ b/files/apts/nova @@ -31,6 +31,7 @@ python-libvirt python-libxml2 python-routes python-netaddr +python-numpy # used by websockify for spice console python-pastedeploy python-eventlet python-cheetah diff --git a/files/rpms/nova b/files/rpms/nova index 88ad8c31..568ee7f5 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -10,6 +10,7 @@ kvm libvirt-bin # NOPRIME libvirt-python libxml2-python +numpy # needed by websockify for spice console m2crypto mysql-server # NOPRIME parted From b0b98b709650c1ffb940e66d26baf29d38515692 Mon Sep 17 00:00:00 2001 From: Chris Yeoh Date: Wed, 23 Jan 2013 21:14:49 +1030 Subject: [PATCH 168/207] Renames old $devstack_dir to $TOP_DIR This patch renames $devstack_dir which is no longer defined to $TOP_DIR. Fixes problem where initialisation of ec2 images for tempest testing was failing. Change-Id: Ie0cd43209e58c903b1fe6cc528a4971896e6fab1 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 4d91d900..c163a0d4 100644 --- a/lib/tempest +++ b/lib/tempest @@ -304,7 +304,7 @@ function init_tempest() { echo "Prepare aki/ari/ami Images" ( #new namespace # tenant:demo ; user: demo - source $devstack_dir/accrc/demo/demo + source $TOP_DIR/accrc/demo/demo euca-bundle-image -i "$kernel" --kernel true -d "$BOTO_MATERIALS_PATH" euca-bundle-image -i "$ramdisk" --ramdisk true -d "$BOTO_MATERIALS_PATH" euca-bundle-image -i "$disk_image" -d "$BOTO_MATERIALS_PATH" From 74759aa17a3b9e687aebf30c11b9bcb477aa48ef Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 24 Jan 2013 14:19:55 -0600 Subject: [PATCH 169/207] Remove DEFAULT_STACK_USER, just use STACK_USER STACK_USER needs to be set for more than just stack.sh, there was no real distinction for using DEFAULT_STACK_USER instead of just setting STACK_USER directly in stackrc and allowing it to be overridden in localrc. Change-Id: I9e8d70db29bb421f1ce3dbf40a5ad299cc7ea785 --- stack.sh | 4 +--- stackrc | 8 ++++++-- tools/build_ramdisk.sh | 12 ++++++------ tools/build_uec.sh | 8 ++++---- tools/copy_dev_environment_to_uec.sh | 8 ++++---- tools/xen/build_xva.sh | 4 ++-- 6 files changed, 23 insertions(+), 21 deletions(-) diff --git a/stack.sh b/stack.sh index 005d88e7..e50cc493 100755 --- a/stack.sh +++ b/stack.sh @@ -160,7 +160,6 @@ VERBOSE=$(trueorfalse True $VERBOSE) # sudo privileges and runs as that user. if [[ $EUID -eq 0 ]]; then - STACK_USER=$DEFAULT_STACK_USER ROOTSLEEP=${ROOTSLEEP:-10} echo "You are running this script as root." echo "In $ROOTSLEEP seconds, we will create a user '$STACK_USER' and run as that user" @@ -196,7 +195,6 @@ if [[ $EUID -eq 0 ]]; then fi exit 1 else - STACK_USER=`whoami` # We're not **root**, make sure ``sudo`` is available is_package_installed sudo || die "Sudo is required. Re-run stack.sh as root ONE TIME ONLY to set up sudo." @@ -1291,7 +1289,7 @@ fi CURRENT_RUN_TIME=$(date "+$TIMESTAMP_FORMAT") echo "# $CURRENT_RUN_TIME" >$TOP_DIR/.stackenv for i in BASE_SQL_CONN ENABLED_SERVICES HOST_IP LOGFILE \ - SERVICE_HOST SERVICE_PROTOCOL TLS_IP; do + SERVICE_HOST SERVICE_PROTOCOL STACK_USER TLS_IP; do echo $i=${!i} >>$TOP_DIR/.stackenv done diff --git a/stackrc b/stackrc index cfc4d1fb..789fc82d 100644 --- a/stackrc +++ b/stackrc @@ -12,8 +12,12 @@ DATA_DIR=${DEST}/data # Select the default database DATABASE_TYPE=mysql -# Default stack user -DEFAULT_STACK_USER=stack +# Determine stack user +if [[ $EUID -eq 0 ]]; then + STACK_USER=stack +else + STACK_USER=$(whoami) +fi # Specify which services to launch. These generally correspond to # screen tabs. To change the default list, use the ``enable_service`` and diff --git a/tools/build_ramdisk.sh b/tools/build_ramdisk.sh index cfcca51f..2c455685 100755 --- a/tools/build_ramdisk.sh +++ b/tools/build_ramdisk.sh @@ -125,17 +125,17 @@ if [ ! -r $DEV_FILE ]; then # Create a stack user that is a member of the libvirtd group so that stack # is able to interact with libvirt. chroot $MNTDIR groupadd libvirtd - chroot $MNTDIR useradd $DEFAULT_STACK_USER -s /bin/bash -d $DEST -G libvirtd + chroot $MNTDIR useradd $STACK_USER -s /bin/bash -d $DEST -G libvirtd mkdir -p $MNTDIR/$DEST - chroot $MNTDIR chown $DEFAULT_STACK_USER $DEST + chroot $MNTDIR chown $STACK_USER $DEST # A simple password - pass - echo $DEFAULT_STACK_USER:pass | chroot $MNTDIR chpasswd + echo $STACK_USER:pass | chroot $MNTDIR chpasswd echo root:$ROOT_PASSWORD | chroot $MNTDIR chpasswd # And has sudo ability (in the future this should be limited to only what # stack requires) - echo "$DEFAULT_STACK_USER ALL=(ALL) NOPASSWD: ALL" >> $MNTDIR/etc/sudoers + echo "$STACK_USER ALL=(ALL) NOPASSWD: ALL" >> $MNTDIR/etc/sudoers umount $MNTDIR rmdir $MNTDIR @@ -187,7 +187,7 @@ git_clone $OPENSTACKX_REPO $DEST/openstackx $OPENSTACKX_BRANCH # Use this version of devstack rm -rf $MNTDIR/$DEST/devstack cp -pr $CWD $MNTDIR/$DEST/devstack -chroot $MNTDIR chown -R $DEFAULT_STACK_USER $DEST/devstack +chroot $MNTDIR chown -R $STACK_USER $DEST/devstack # Configure host network for DHCP mkdir -p $MNTDIR/etc/network @@ -225,7 +225,7 @@ EOF # Make the run.sh executable chmod 755 $RUN_SH -chroot $MNTDIR chown $DEFAULT_STACK_USER $DEST/run.sh +chroot $MNTDIR chown $STACK_USER $DEST/run.sh umount $MNTDIR rmdir $MNTDIR diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 5748b390..6c4a26c2 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -207,11 +207,11 @@ ROOTSLEEP=0 `cat $TOP_DIR/localrc` LOCAL_EOF fi -useradd -U -G sudo -s /bin/bash -d /opt/stack -m $DEFAULT_STACK_USER -echo $DEFAULT_STACK_USER:pass | chpasswd +useradd -U -G sudo -s /bin/bash -d /opt/stack -m $STACK_USER +echo $STACK_USER:pass | chpasswd mkdir -p /opt/stack/.ssh echo "$PUB_KEY" > /opt/stack/.ssh/authorized_keys -chown -R $DEFAULT_STACK_USER /opt/stack +chown -R $STACK_USER /opt/stack chmod 700 /opt/stack/.ssh chmod 600 /opt/stack/.ssh/authorized_keys @@ -224,7 +224,7 @@ fi # Run stack.sh cat >> $vm_dir/uec/user-data< $STAGING_DIR/etc/sudoers.d/50_stack_sh ) # Copy over your ssh keys and env if desired @@ -67,7 +67,7 @@ rm -rf $STAGING_DIR/$DEST/devstack cp_it . $STAGING_DIR/$DEST/devstack # Give stack ownership over $DEST so it may do the work needed -chroot $STAGING_DIR chown -R $DEFAULT_STACK_USER $DEST +chroot $STAGING_DIR chown -R $STACK_USER $DEST # Unmount umount $STAGING_DIR diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh index f3f166fe..0e874cfe 100755 --- a/tools/xen/build_xva.sh +++ b/tools/xen/build_xva.sh @@ -65,8 +65,8 @@ cd $TOP_DIR cat <$STAGING_DIR/etc/rc.local # network restart required for getting the right gateway /etc/init.d/networking restart -chown -R $DEFAULT_STACK_USER /opt/stack -su -c "/opt/stack/run.sh > /opt/stack/run.sh.log" $DEFAULT_STACK_USER +chown -R $STACK_USER /opt/stack +su -c "/opt/stack/run.sh > /opt/stack/run.sh.log" $STACK_USER exit 0 EOF From 9f22f07a154a2d94c0e0f6d419497e3f94fbe929 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Mon, 28 Jan 2013 09:53:38 -0500 Subject: [PATCH 170/207] Cinder: update osapi_volume_extension default. Updates the Cinder config file to use the new extension loader location. Change-Id: I515e16e00b54c69ae3c09e64841818eb4a9c8f73 --- lib/cinder | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/cinder b/lib/cinder index a730cd62..5f4f979b 100644 --- a/lib/cinder +++ b/lib/cinder @@ -172,7 +172,7 @@ function configure_cinder() { iniset $CINDER_CONF DEFAULT sql_connection $dburl iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI iniset $CINDER_CONF DEFAULT root_helper "sudo ${CINDER_ROOTWRAP}" - iniset $CINDER_CONF DEFAULT osapi_volume_extension cinder.api.openstack.volume.contrib.standard_extensions + iniset $CINDER_CONF DEFAULT osapi_volume_extension cinder.api.contrib.standard_extensions iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH if is_service_enabled tls-proxy; then From 98e18e99d18a1a294fb2a7f3dceb48bd81520c03 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Mon, 28 Jan 2013 14:26:56 +0000 Subject: [PATCH 171/207] Quantum root_helper update We are currently moving root_helper to the [AGENT] section. This patch is intended to enable the transition for that process. Change-Id: Iff8144f74a1a5f8b0fc9af44bccf0213f4bfad7e --- lib/quantum | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/quantum b/lib/quantum index 27b3509f..c5fc6e81 100644 --- a/lib/quantum +++ b/lib/quantum @@ -472,9 +472,8 @@ function _configure_quantum_metadata_agent() { # _configure_quantum_plugin_agent() - Set config files for quantum plugin agent # It is called when q-agt is enabled. function _configure_quantum_plugin_agent() { - # Specify the default root helper prior to agent configuration to - # ensure that an agent's configuration can override the default. + # ensure that an agent's configuration can override the default iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" # Configure agent for plugin @@ -544,6 +543,9 @@ function _quantum_setup_rootwrap() { chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/quantum-rootwrap + + # Update the root_helper + iniset $QUANTUM_CONF AGENT root_helper "$Q_RR_COMMAND" } # Configures keystone integration for quantum service and agents From 41bf4520231bb6454333d6acb1e011bfc9976bae Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 28 Jan 2013 14:04:39 -0600 Subject: [PATCH 172/207] Handle nested xtrace states The lib/database abstraction includes the appropriate database file that also contains the $XTRACE bits at entry and exit. The nested XTRACE handling overwrote the value from lib/database. So...make the nested files use their own XTRACE variables. Change-Id: Ibdfc8d7d1e1457a9bc889b781ce176b417789ea1 --- lib/databases/mysql | 4 ++-- lib/databases/postgresql | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index 95242536..94aedc64 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -5,7 +5,7 @@ # DATABASE_{HOST,USER,PASSWORD} must be defined # Save trace setting -XTRACE=$(set +o | grep xtrace) +MY_XTRACE=$(set +o | grep xtrace) set +o xtrace register_database mysql @@ -121,4 +121,4 @@ function database_connection_url_mysql { } # Restore xtrace -$XTRACE +$MY_XTRACE diff --git a/lib/databases/postgresql b/lib/databases/postgresql index 04db714a..2c37f49b 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -5,7 +5,7 @@ # DATABASE_{HOST,USER,PASSWORD} must be defined # Save trace setting -XTRACE=$(set +o | grep xtrace) +PG_XTRACE=$(set +o | grep xtrace) set +o xtrace register_database postgresql @@ -76,4 +76,4 @@ function database_connection_url_postgresql { } # Restore xtrace -$XTRACE +$PG_XTRACE From 95fb0d440ceb4934c3116454cc2ff6349d39fca1 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Mon, 28 Jan 2013 16:24:14 -0500 Subject: [PATCH 173/207] Run setup_develop for tempest. This setups the development environment for tempest. Without running setup_develop for tempest import tempest.* breaks outside of the tempest directory. Change-Id: I6954733d68125dd116cc9bfa58b9aece674405c3 --- lib/tempest | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/tempest b/lib/tempest index 4d91d900..5ad9b320 100644 --- a/lib/tempest +++ b/lib/tempest @@ -55,6 +55,7 @@ BOTO_MATERIALS_PATH="$FILES/images/s3-materials/cirros-0.3.0" # configure_tempest() - Set config files, create data dirs, etc function configure_tempest() { + setup_develop $TEMPEST_DIR local image_lines local images local num_images From d71d6e71b37d97e3fd4922608ae41f9ff53bc4d0 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Mon, 28 Jan 2013 19:15:57 -0500 Subject: [PATCH 174/207] Dns stops working on precise when network manager is enabled In Precise and Quantal, we nuke the dnsmasq launched by NetworkManager Fixes LP# 993666 Change-Id: I4b39010765e2cbbea1ca3fc3120bf329015b7a56 --- stack.sh | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 005d88e7..4c10c74f 100755 --- a/stack.sh +++ b/stack.sh @@ -967,7 +967,14 @@ fi if is_service_enabled n-net q-dhcp; then # Delete traces of nova networks from prior runs - sudo killall dnsmasq || true + # Do not kill any dnsmasq instance spawned by NetworkManager + netman_pid=$(pidof NetworkManager || true) + if [ -z "$netman_pid" ]; then + sudo killall dnsmasq || true + else + sudo ps h -o pid,ppid -C dnsmasq | grep -v $netman_pid | awk '{print $1}' | sudo xargs kill || true + fi + clean_iptables rm -rf ${NOVA_STATE_PATH}/networks sudo mkdir -p ${NOVA_STATE_PATH}/networks From 63c6c2b006a3b23ecbb324de93db51d7725ccd52 Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Thu, 24 Jan 2013 13:13:51 +0000 Subject: [PATCH 175/207] Improved feedback for domU install on XS Change-Id: I5b9d07493eb334169fb2643047a014f56ee265fa --- tools/xen/install_os_domU.sh | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index b4fbb699..0e275705 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -236,6 +236,12 @@ SNAME_PREPARED="template_prepared" SNAME_FIRST_BOOT="before_first_boot" function wait_for_VM_to_halt() { + set +x + echo "Waiting for the VM to halt. Progress in-VM can be checked with vncviewer:" + mgmt_ip=$(echo $XENAPI_CONNECTION_URL | tr -d -c '1234567890.') + domid=$(xe vm-list name-label="$GUEST_NAME" params=dom-id minimal=true) + port=$(xenstore-read /local/domain/$domid/console/vnc-port) + echo "vncviewer -via $mgmt_ip localhost:${port:2}" while true do state=$(xe_min vm-list name-label="$GUEST_NAME" power-state=halted) @@ -243,10 +249,11 @@ function wait_for_VM_to_halt() { then break else - echo "Waiting for "$GUEST_NAME" to finish installation..." + echo -n "." sleep 20 fi done + set -x } templateuuid=$(xe template-list name-label="$TNAME") @@ -405,12 +412,14 @@ if [ "$WAIT_TILL_LAUNCH" = "1" ] && [ -e ~/.ssh/id_rsa.pub ] && [ "$COPYENV" = # Fail if the expected text is not found ssh_no_check -q stack@$DOMU_IP 'cat run.sh.log' | grep -q 'stack.sh completed in' + set +x echo "################################################################################" echo "" echo "All Finished!" echo "You can visit the OpenStack Dashboard" echo "at http://$DOMU_IP, and contact other services at the usual ports." else + set +x echo "################################################################################" echo "" echo "All Finished!" From 5bd96f967fc83d8fdbed707113b19117e4de05cf Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Tue, 29 Jan 2013 15:12:20 -0500 Subject: [PATCH 176/207] Support a flag for setting Keystone Token backend Fixes LP# 1073274 Change-Id: Ib4373a4a4d31e440e40f977f8f7ffa312f2d97be --- lib/keystone | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/lib/keystone b/lib/keystone index 7a70cc41..57146708 100644 --- a/lib/keystone +++ b/lib/keystone @@ -39,6 +39,9 @@ KEYSTONECLIENT_DIR=$DEST/python-keystoneclient KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-sql} KEYSTONE_CATALOG=$KEYSTONE_CONF_DIR/default_catalog.templates +# Select the backend for Tokens +KEYSTONE_TOKEN_BACKEND=${KEYSTONE_TOKEN_BACKEND:-sql} + # Select Keystone's token format # Choose from 'UUID' and 'PKI' KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-PKI} @@ -108,6 +111,12 @@ function configure_keystone() { # Append the S3 bits iniset $KEYSTONE_CONF filter:s3_extension paste.filter_factory "keystone.contrib.s3:S3Extension.factory" + if [[ "$KEYSTONE_TOKEN_BACKEND" = "sql" ]]; then + iniset $KEYSTONE_CONF token driver keystone.token.backends.sql.Token + else + iniset $KEYSTONE_CONF token driver keystone.token.backends.kvs.Token + fi + if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then # Configure ``keystone.conf`` to use sql iniset $KEYSTONE_CONF catalog driver keystone.catalog.backends.sql.Catalog From a263ef283e422fc25e7ecff2e9c272eab1a336a6 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Mon, 28 Jan 2013 21:56:02 -0500 Subject: [PATCH 177/207] Pick just the first route when looking for the host interface The current code will fail miserably if multiple default routes are present (perfectly normal if they have different metrics). Fixing the code to pick the first/top entry Fixes LP# 1084470 Change-Id: Ieda16b575685071ff831c92e6b2a29737d6f849b --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 005d88e7..27096670 100755 --- a/stack.sh +++ b/stack.sh @@ -253,7 +253,7 @@ FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256} NETWORK_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1} # Find the interface used for the default route -HOST_IP_IFACE=${HOST_IP_IFACE:-$(ip route | sed -n '/^default/{ s/.*dev \(\w\+\)\s\+.*/\1/; p; }')} +HOST_IP_IFACE=${HOST_IP_IFACE:-$(ip route | sed -n '/^default/{ s/.*dev \(\w\+\)\s\+.*/\1/; p; }' | head -1)} # Search for an IP unless an explicit is set by ``HOST_IP`` environment variable if [ -z "$HOST_IP" -o "$HOST_IP" == "dhcp" ]; then HOST_IP="" From d9ca2b2fd657031f4d8ff84c0d137d2b9cabb8fb Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Wed, 30 Jan 2013 13:52:43 +0000 Subject: [PATCH 178/207] Ensure that debug agent has root_helper in [AGENT] section This is a temporary fix until the Quantum patch lands. Then devstack will be updated to remove all of the root_helper settings in the DEFAULT section. The Quantum patch in Question is: https://review.openstack.org/#/c/20603/ Change-Id: Ie17ae76e8b525481e1002bd941812390cf2e2afc --- lib/quantum | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/quantum b/lib/quantum index c5fc6e81..d5733b3a 100644 --- a/lib/quantum +++ b/lib/quantum @@ -410,6 +410,9 @@ function _configure_quantum_debug_command() { iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT debug False iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT root_helper "$Q_RR_COMMAND" + # Intermediate fix until Quantum patch lands and then line above will + # be cleaned. + iniset $QUANTUM_TEST_CONFIG_FILE AGENT root_helper "$Q_RR_COMMAND" _quantum_setup_keystone $QUANTUM_TEST_CONFIG_FILE DEFAULT set_auth_url _quantum_setup_interface_driver $QUANTUM_TEST_CONFIG_FILE From 4196d5565e48608c64fccdd9e17fcc01dd8d06fe Mon Sep 17 00:00:00 2001 From: Jakub Ruzicka Date: Wed, 30 Jan 2013 15:35:54 +0100 Subject: [PATCH 179/207] Introduce get_python_exec_prefix function. get_python_exec_prefix returns the path to the direcotry where python executables are installed, that is /usr/bin on Fedora and /usr/local/bin everywhere else. It is used to properly locate OpenStack executables. Fixes: bug #1068386 Change-Id: I228498ebe2762568d00757d065e37377ee2c8fb3 --- functions | 16 +++++++++++----- lib/ceilometer | 2 +- lib/cinder | 2 +- lib/glance | 2 +- lib/nova | 2 +- 5 files changed, 15 insertions(+), 9 deletions(-) diff --git a/functions b/functions index 79483785..68aec5d2 100644 --- a/functions +++ b/functions @@ -1110,17 +1110,23 @@ function add_user_to_group() { } +# Get the path to the direcotry where python executables are installed. +# get_python_exec_prefix +function get_python_exec_prefix() { + if is_fedora; then + echo "/usr/bin" + else + echo "/usr/local/bin" + fi +} + # Get the location of the $module-rootwrap executables, where module is cinder # or nova. # get_rootwrap_location module function get_rootwrap_location() { local module=$1 - if is_fedora; then - echo "/usr/bin/$module-rootwrap" - else - echo "/usr/local/bin/$module-rootwrap" - fi + echo "$(get_python_exec_prefix)/$module-rootwrap" } # Get the path to the pip command. diff --git a/lib/ceilometer b/lib/ceilometer index 0fae3973..41a5f53d 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -40,7 +40,7 @@ CEILOMETER_AUTH_CACHE_DIR=${CEILOMETER_AUTH_CACHE_DIR:-/var/cache/ceilometer} if [ -d $CEILOMETER_DIR/bin ] ; then CEILOMETER_BIN_DIR=$CEILOMETER_DIR/bin else - CEILOMETER_BIN_DIR=/usr/local/bin + CEILOMETER_BIN_DIR=$(get_python_exec_prefix) fi # cleanup_ceilometer() - Remove residual data files, anything left over from previous diff --git a/lib/cinder b/lib/cinder index 5f4f979b..28b3caa5 100644 --- a/lib/cinder +++ b/lib/cinder @@ -47,7 +47,7 @@ CINDER_SERVICE_PROTOCOL=${CINDER_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} if [[ -d $CINDER_DIR/bin ]]; then CINDER_BIN_DIR=$CINDER_DIR/bin else - CINDER_BIN_DIR=/usr/local/bin + CINDER_BIN_DIR=$(get_python_exec_prefix) fi # Name of the lvm volume group to use/create for iscsi volumes diff --git a/lib/glance b/lib/glance index 1c56a675..5d48129d 100644 --- a/lib/glance +++ b/lib/glance @@ -44,7 +44,7 @@ GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json if [[ -d $GLANCE_DIR/bin ]]; then GLANCE_BIN_DIR=$GLANCE_DIR/bin else - GLANCE_BIN_DIR=/usr/local/bin + GLANCE_BIN_DIR=$(get_python_exec_prefix) fi # Glance connection info. Note the port must be specified. diff --git a/lib/nova b/lib/nova index 7e5bb996..41162781 100644 --- a/lib/nova +++ b/lib/nova @@ -49,7 +49,7 @@ NOVA_SERVICE_PROTOCOL=${NOVA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} if [[ -d $NOVA_DIR/bin ]]; then NOVA_BIN_DIR=$NOVA_DIR/bin else - NOVA_BIN_DIR=/usr/local/bin + NOVA_BIN_DIR=$(get_python_exec_prefix) fi # Set the paths of certain binaries From 07db713549fab67e7288ebe8e2190f9629b2e9df Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Wed, 30 Jan 2013 13:07:25 -0800 Subject: [PATCH 180/207] Improve millisecond logging Always display 3 digits for milliseconds (appends 0s) Based on I3f1461839258be0723e2d3616ec225a830d13029 Change-Id: I48b3f3781b4d34ed6a5fb9e4e78cee919afda6c1 --- lib/cinder | 6 +++--- lib/nova | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/lib/cinder b/lib/cinder index 5f4f979b..014fefaf 100644 --- a/lib/cinder +++ b/lib/cinder @@ -193,10 +193,10 @@ function configure_cinder() { if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then # Add color to logging output - iniset $CINDER_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_id)s %(project_id)s%(color)s] %(instance)s%(color)s%(message)s" - iniset $CINDER_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" + iniset $CINDER_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_id)s %(project_id)s%(color)s] %(instance)s%(color)s%(message)s" + iniset $CINDER_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" iniset $CINDER_CONF DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" - iniset $CINDER_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)d TRACE %(name)s %(instance)s" + iniset $CINDER_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" fi if [ "$CINDER_DRIVER" == "XenAPINFS" ]; then diff --git a/lib/nova b/lib/nova index 7e5bb996..6cf5a5d8 100644 --- a/lib/nova +++ b/lib/nova @@ -424,13 +424,13 @@ function create_nova_conf() { fi if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then # Add color to logging output - iniset $NOVA_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s%(color)s] %(instance)s%(color)s%(message)s" - iniset $NOVA_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" + iniset $NOVA_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s%(color)s] %(instance)s%(color)s%(message)s" + iniset $NOVA_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" iniset $NOVA_CONF DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" - iniset $NOVA_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)d TRACE %(name)s %(instance)s" + iniset $NOVA_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" else # Show user_name and project_name instead of user_id and project_id - iniset $NOVA_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)d %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" + iniset $NOVA_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" fi if is_service_enabled ceilometer; then iniset $NOVA_CONF DEFAULT instance_usage_audit "True" From a814f22ce49a3674fd6f266f52bf7de990521adc Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Thu, 31 Jan 2013 15:21:43 +0000 Subject: [PATCH 181/207] Minor fix to readme - corrected indentation for some commands Change-Id: I0a16c59d258be4ce8bb8cdebfb3d1cbc30ce9d54 --- tools/xen/README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/xen/README.md b/tools/xen/README.md index f20ad04b..1cd45cff 100644 --- a/tools/xen/README.md +++ b/tools/xen/README.md @@ -18,7 +18,7 @@ https://www.citrix.com/English/ss/downloads/details.asp?downloadId=2311504&produ For details on installation, see: http://wiki.openstack.org/XenServer/Install Here are some sample Xenserver network settings for when you are just -getting started (I use settings like this with a lappy + cheap wifi router): +getting started (Settings like this have been used with a laptop + cheap wifi router): * XenServer Host IP: 192.168.1.10 * XenServer Netmask: 255.255.255.0 @@ -29,9 +29,9 @@ Step 2: Download devstack -------------------------- On your XenServer host, run the following commands as root: -wget --no-check-certificate https://github.com/openstack-dev/devstack/zipball/master -unzip -o master -d ./devstack -cd devstack/*/ + wget --no-check-certificate https://github.com/openstack-dev/devstack/zipball/master + unzip -o master -d ./devstack + cd devstack/*/ Step 3: Configure your localrc inside the devstack directory ------------------------------------------------------------ From f127e2f316f1161bacdf4cccdbc3e56b2b8a54a8 Mon Sep 17 00:00:00 2001 From: Brad Topol Date: Tue, 22 Jan 2013 10:17:50 -0600 Subject: [PATCH 182/207] Add optional silent install and config of ldap to devstack Edited initial ldap entries and olcdb template file as recommended by Brant. Change-Id: I1404cc5c754f878e32a2d10254840d092211e6e6 --- files/apts/ldap | 3 ++ files/ldap/manager.ldif.in | 10 ++++++ files/ldap/openstack.ldif | 21 +++++++++++ files/rpms/ldap | 3 ++ lib/keystone | 15 ++++++++ lib/ldap | 74 ++++++++++++++++++++++++++++++++++++++ stack.sh | 15 ++++++++ 7 files changed, 141 insertions(+) create mode 100644 files/apts/ldap create mode 100644 files/ldap/manager.ldif.in create mode 100644 files/ldap/openstack.ldif create mode 100644 files/rpms/ldap create mode 100644 lib/ldap diff --git a/files/apts/ldap b/files/apts/ldap new file mode 100644 index 00000000..81a00f27 --- /dev/null +++ b/files/apts/ldap @@ -0,0 +1,3 @@ +ldap-utils +slapd # NOPRIME +python-ldap diff --git a/files/ldap/manager.ldif.in b/files/ldap/manager.ldif.in new file mode 100644 index 00000000..e522150f --- /dev/null +++ b/files/ldap/manager.ldif.in @@ -0,0 +1,10 @@ +dn: olcDatabase={${LDAP_OLCDB_NUMBER}}hdb,cn=config +changetype: modify +replace: olcSuffix +olcSuffix: dc=openstack,dc=org +- +replace: olcRootDN +olcRootDN: dc=Manager,dc=openstack,dc=org +- +${LDAP_ROOTPW_COMMAND}: olcRootPW +olcRootPW: ${SLAPPASS} diff --git a/files/ldap/openstack.ldif b/files/ldap/openstack.ldif new file mode 100644 index 00000000..287fda45 --- /dev/null +++ b/files/ldap/openstack.ldif @@ -0,0 +1,21 @@ +dn: dc=openstack,dc=org +dc: openstack +objectClass: dcObject +objectClass: organizationalUnit +ou: openstack + +dn: ou=Groups,dc=openstack,dc=org +objectClass: organizationalUnit +ou: Groups + +dn: ou=Users,dc=openstack,dc=org +objectClass: organizationalUnit +ou: Users + +dn: ou=Roles,dc=openstack,dc=org +objectClass: organizationalUnit +ou: Roles + +dn: ou=Projects,dc=openstack,dc=org +objectClass: organizationalUnit +ou: Projects diff --git a/files/rpms/ldap b/files/rpms/ldap new file mode 100644 index 00000000..2f7ab5de --- /dev/null +++ b/files/rpms/ldap @@ -0,0 +1,3 @@ +openldap-servers +openldap-clients +python-ldap diff --git a/lib/keystone b/lib/keystone index 57146708..866c62e1 100644 --- a/lib/keystone +++ b/lib/keystone @@ -94,6 +94,17 @@ function configure_keystone() { local dburl database_connection_url dburl keystone + if is_service_enabled ldap; then + #Set all needed ldap values + iniset $KEYSTONE_CONF ldap password $LDAP_PASSWORD + iniset $KEYSTONE_CONF ldap user "dc=Manager,dc=openstack,dc=org" + iniset $KEYSTONE_CONF ldap suffix "dc=openstack,dc=org" + fi + + if [[ "$KEYSTONE_IDENTITY_BACKEND" == "ldap" ]]; then + iniset $KEYSTONE_CONF identity driver "keystone.identity.backends.ldap.Identity" + fi + if is_service_enabled tls-proxy; then # Set the service ports for a proxy to take the originals iniset $KEYSTONE_CONF DEFAULT public_port $KEYSTONE_SERVICE_PORT_INT @@ -283,6 +294,10 @@ function install_keystoneclient() { # install_keystone() - Collect source and prepare function install_keystone() { + # only install ldap if the service has been enabled + if is_service_enabled ldap; then + install_ldap + fi git_clone $KEYSTONE_REPO $KEYSTONE_DIR $KEYSTONE_BRANCH } diff --git a/lib/ldap b/lib/ldap new file mode 100644 index 00000000..5cb45347 --- /dev/null +++ b/lib/ldap @@ -0,0 +1,74 @@ +# lib/ldap +# Functions to control the installation and configuration of **ldap** + +# ``stack.sh`` calls the entry points in this order: +# + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +# install_ldap +# install_ldap() - Collect source and prepare +function install_ldap() { + echo "Installing LDAP inside function" + echo "LDAP_PASSWORD is $LDAP_PASSWORD" + echo "os_VENDOR is $os_VENDOR" + printf "installing" + if is_ubuntu; then + echo "os vendor is Ubuntu" + LDAP_OLCDB_NUMBER=1 + LDAP_ROOTPW_COMMAND=replace + sudo DEBIAN_FRONTEND=noninteractive apt-get install slapd ldap-utils + #automatically starts LDAP on ubuntu so no need to call start_ldap + elif is_fedora; then + echo "os vendor is Fedora" + LDAP_OLCDB_NUMBER=2 + LDAP_ROOTPW_COMMAND=add + start_ldap + fi + + printf "generate password file" + SLAPPASS=`slappasswd -s $LDAP_PASSWORD` + + printf "secret is $SLAPPASS\n" + #create manager.ldif + TMP_MGR_DIFF_FILE=`mktemp -t manager_ldiff.$$.XXXXXXXXXX.ldif` + sed -e "s|\${LDAP_OLCDB_NUMBER}|$LDAP_OLCDB_NUMBER|" -e "s|\${SLAPPASS}|$SLAPPASS|" -e "s|\${LDAP_ROOTPW_COMMAND}|$LDAP_ROOTPW_COMMAND|" $FILES/ldap/manager.ldif.in >> $TMP_MGR_DIFF_FILE + + #update ldap olcdb + sudo ldapmodify -Y EXTERNAL -H ldapi:/// -f $TMP_MGR_DIFF_FILE + + # add our top level ldap nodes + if ldapsearch -x -w $LDAP_PASSWORD -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -x -b dc=openstack,dc=org | grep -q "Success" ; then + printf "LDAP already configured for OpenStack\n" + if [[ "$KEYSTONE_CLEAR_LDAP" == "yes" ]]; then + # clear LDAP state + clear_ldap_state + # reconfigure LDAP for OpenStack + ldapadd -c -x -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -w $LDAP_PASSWORD -f $FILES/ldap/openstack.ldif + fi + else + printf "Configuring LDAP for OpenStack\n" + ldapadd -c -x -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -w $LDAP_PASSWORD -f $FILES/ldap/openstack.ldif + fi +} + +# start_ldap() - Start LDAP +function start_ldap() { + sudo service slapd restart +} + + +# stop_ldap() - Stop LDAP +function stop_ldap() { + sudo service slapd stop +} + +# clear_ldap_state() - Clear LDAP State +function clear_ldap_state() { + ldapdelete -x -w $LDAP_PASSWORD -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -x -r "dc=openstack,dc=org" +} + +# Restore xtrace +$XTRACE diff --git a/stack.sh b/stack.sh index 46086482..0521ced3 100755 --- a/stack.sh +++ b/stack.sh @@ -306,6 +306,7 @@ source $TOP_DIR/lib/ceilometer source $TOP_DIR/lib/heat source $TOP_DIR/lib/quantum source $TOP_DIR/lib/baremetal +source $TOP_DIR/lib/ldap # Set the destination directories for OpenStack projects HORIZON_DIR=$DEST/horizon @@ -475,6 +476,20 @@ read_password SERVICE_TOKEN "ENTER A SERVICE_TOKEN TO USE FOR THE SERVICE ADMIN read_password SERVICE_PASSWORD "ENTER A SERVICE_PASSWORD TO USE FOR THE SERVICE AUTHENTICATION." # Horizon currently truncates usernames and passwords at 20 characters read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (20 CHARS OR LESS)." +# Keystone can now optionally install OpenLDAP by adding ldap to the list +# of enabled services in the localrc file (e.g. ENABLED_SERVICES=key,ldap). +# If OpenLDAP has already been installed but you need to clear out +# the Keystone contents of LDAP set KEYSTONE_CLEAR_LDAP to yes +# (e.g. KEYSTONE_CLEAR_LDAP=yes ) in the localrc file. To enable the +# Keystone Identity Driver (keystone.identity.backends.ldap.Identity) +# set KEYSTONE_IDENTITY_BACKEND to ldap (e.g. KEYSTONE_IDENTITY_BACKEND=ldap) +# in the localrc file. + + +# only request ldap password if the service is enabled +if is_service_enabled ldap; then + read_password LDAP_PASSWORD "ENTER A PASSWORD TO USE FOR LDAP" +fi # Set the tenant for service accounts in Keystone SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service} From 88a3bc1d8db8c7f912d05bc2fd201b6b2c4f1e86 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Mon, 4 Feb 2013 09:16:14 -0500 Subject: [PATCH 183/207] Remove bad options calling quantum-ovs-cleanup --external_network_bridge and --ovs_integration_bridge are no longer a valid options Fixes LP# 1115213 Change-Id: I9af4514a0cc661f4b72b3f0e00407be163c48945 --- lib/quantum_plugins/ovs_base | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/quantum_plugins/ovs_base b/lib/quantum_plugins/ovs_base index d9f6fd0e..f34e8621 100644 --- a/lib/quantum_plugins/ovs_base +++ b/lib/quantum_plugins/ovs_base @@ -12,7 +12,7 @@ function is_quantum_ovs_base_plugin() { function _quantum_ovs_base_setup_bridge() { local bridge=$1 - quantum-ovs-cleanup --ovs_integration_bridge $bridge + quantum-ovs-cleanup sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge } @@ -39,7 +39,7 @@ function _quantum_ovs_base_configure_debug_command() { function _quantum_ovs_base_configure_l3_agent() { iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE - quantum-ovs-cleanup --external_network_bridge $PUBLIC_BRIDGE + quantum-ovs-cleanup sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE # ensure no IP is configured on the public bridge sudo ip addr flush dev $PUBLIC_BRIDGE From 35336282da621d5189bad97c5bddd1840721f632 Mon Sep 17 00:00:00 2001 From: Chris Krelle Date: Sun, 3 Feb 2013 15:48:43 -0800 Subject: [PATCH 184/207] Add dhcp server option to dnsmasq for baremetal This sets the dns option for baremetal deploy clients. this is useful for clients who may require external access during the deployment process. Change-Id: Ibe680d2acaad826e4868223ebfd1f112d7796662 Authored-by: Chris Krelle --- lib/baremetal | 3 +++ stack.sh | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/lib/baremetal b/lib/baremetal index 3cc24291..7c31d1fd 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -90,6 +90,9 @@ else BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-} fi +# BM_DNSMASQ_DNS provide dns server to bootstrap clients +BM_DNSMASQ_DNS=${BM_DNSMASQ_DNS:-} + # BM_FIRST_MAC *must* be set to the MAC address of the node you will boot. # This is passed to dnsmasq along with the kernel/ramdisk to # deploy via PXE. diff --git a/stack.sh b/stack.sh index 46086482..1550e445 100755 --- a/stack.sh +++ b/stack.sh @@ -1285,8 +1285,8 @@ if is_service_enabled nova && is_baremetal; then sudo pkill dnsmasq || true sudo dnsmasq --conf-file= --port=0 --enable-tftp --tftp-root=/tftpboot \ --dhcp-boot=pxelinux.0 --bind-interfaces --pid-file=/var/run/dnsmasq.pid \ - --interface=$BM_DNSMASQ_IFACE --dhcp-range=$BM_DNSMASQ_RANGE - + --interface=$BM_DNSMASQ_IFACE --dhcp-range=$BM_DNSMASQ_RANGE \ + ${$BM_DNSMASQ_DNS:+--dhcp-option=option:dns-server,$BM_DNSMASQ_DNS} # ensure callback daemon is running sudo pkill nova-baremetal-deploy-helper || true screen_it baremetal "nova-baremetal-deploy-helper" From 8407b2de2ad0e83690c9f1b193b50b984a40ddfb Mon Sep 17 00:00:00 2001 From: "Yunhong, Jiang" Date: Thu, 7 Feb 2013 13:48:33 +0800 Subject: [PATCH 185/207] Copy the pipeline configuration file Update the pipeline configuration file for ceilometer Change-Id: I7a46f61391b76447d7973be5c43b7d0360c56da0 Signed-off-by: Yunhong, Jiang --- lib/ceilometer | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ceilometer b/lib/ceilometer index 41a5f53d..bc37d92b 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -73,6 +73,7 @@ function configure_ceilometer() { # Install the policy file for the API server cp $CEILOMETER_DIR/etc/ceilometer/policy.json $CEILOMETER_CONF_DIR + cp $CEILOMETER_DIR/etc/ceilometer/pipeline.yaml $CEILOMETER_CONF_DIR iniset $CEILOMETER_CONF DEFAULT policy_file $CEILOMETER_CONF_DIR/policy.json # the compute and central agents need these credentials in order to From 8d55be31a95043236d52ee891bacae5ea9f5ed37 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 7 Feb 2013 17:16:35 -0600 Subject: [PATCH 186/207] Handle nested xtrace states (Quantum edition) The lib/quantum_* directories include plugin files that also contains the $XTRACE bits at entry and exit. The nested XTRACE handling overwrote the value from lib/quantum. So...make the nested files use their own XTRACE variables. Change-Id: Ib5e643371666b21402eef2ec58bfb1dfb7e1ccc4 --- lib/quantum_plugins/bigswitch_floodlight | 4 ++-- lib/quantum_plugins/linuxbridge | 4 ++-- lib/quantum_plugins/openvswitch | 4 ++-- lib/quantum_plugins/ovs_base | 4 ++-- lib/quantum_plugins/ryu | 4 ++-- lib/quantum_thirdparty/bigswitch_floodlight | 4 ++-- lib/quantum_thirdparty/ryu | 4 ++-- 7 files changed, 14 insertions(+), 14 deletions(-) diff --git a/lib/quantum_plugins/bigswitch_floodlight b/lib/quantum_plugins/bigswitch_floodlight index 35276a55..2c928bec 100644 --- a/lib/quantum_plugins/bigswitch_floodlight +++ b/lib/quantum_plugins/bigswitch_floodlight @@ -2,7 +2,7 @@ # ------------------------------------ # Save trace setting -XTRACE=$(set +o | grep xtrace) +MY_XTRACE=$(set +o | grep xtrace) set +o xtrace source $TOP_DIR/lib/quantum_plugins/ovs_base @@ -52,4 +52,4 @@ function quantum_plugin_setup_interface_driver() { } # Restore xtrace -$XTRACE +$MY_XTRACE diff --git a/lib/quantum_plugins/linuxbridge b/lib/quantum_plugins/linuxbridge index e8ba68c3..6d5d4e08 100644 --- a/lib/quantum_plugins/linuxbridge +++ b/lib/quantum_plugins/linuxbridge @@ -2,7 +2,7 @@ # --------------------------- # Save trace setting -XTRACE=$(set +o | grep xtrace) +MY_XTRACE=$(set +o | grep xtrace) set +o xtrace function is_quantum_ovs_base_plugin() { @@ -76,4 +76,4 @@ function quantum_plugin_setup_interface_driver() { } # Restore xtrace -$XTRACE +$MY_XTRACE diff --git a/lib/quantum_plugins/openvswitch b/lib/quantum_plugins/openvswitch index 5415e869..12bc2442 100644 --- a/lib/quantum_plugins/openvswitch +++ b/lib/quantum_plugins/openvswitch @@ -2,7 +2,7 @@ # --------------------------- # Save trace setting -XTRACE=$(set +o | grep xtrace) +MY_XTRACE=$(set +o | grep xtrace) set +o xtrace source $TOP_DIR/lib/quantum_plugins/ovs_base @@ -141,4 +141,4 @@ function quantum_plugin_setup_interface_driver() { } # Restore xtrace -$XTRACE +$MY_XTRACE diff --git a/lib/quantum_plugins/ovs_base b/lib/quantum_plugins/ovs_base index d9f6fd0e..4c334731 100644 --- a/lib/quantum_plugins/ovs_base +++ b/lib/quantum_plugins/ovs_base @@ -2,7 +2,7 @@ # ------------------------------------- # Save trace setting -XTRACE=$(set +o | grep xtrace) +MY_XTRACE=$(set +o | grep xtrace) set +o xtrace function is_quantum_ovs_base_plugin() { @@ -46,4 +46,4 @@ function _quantum_ovs_base_configure_l3_agent() { } # Restore xtrace -$XTRACE +$MY_XTRACE diff --git a/lib/quantum_plugins/ryu b/lib/quantum_plugins/ryu index 86105bc8..f44f4ae3 100644 --- a/lib/quantum_plugins/ryu +++ b/lib/quantum_plugins/ryu @@ -2,7 +2,7 @@ # ------------------ # Save trace setting -XTRACE=$(set +o | grep xtrace) +MY_XTRACE=$(set +o | grep xtrace) set +o xtrace source $TOP_DIR/lib/quantum_plugins/ovs_base @@ -60,4 +60,4 @@ function quantum_plugin_setup_interface_driver() { } # Restore xtrace -$XTRACE +$MY_XTRACE diff --git a/lib/quantum_thirdparty/bigswitch_floodlight b/lib/quantum_thirdparty/bigswitch_floodlight index 77aeb61d..60e39248 100644 --- a/lib/quantum_thirdparty/bigswitch_floodlight +++ b/lib/quantum_thirdparty/bigswitch_floodlight @@ -2,7 +2,7 @@ # ------------------------------------------ # Save trace setting -XTRACE=$(set +o | grep xtrace) +MY_XTRACE=$(set +o | grep xtrace) set +o xtrace BS_FL_CONTROLLERS_PORT=${BS_FL_CONTROLLERS_PORT:-localhost:80} @@ -47,4 +47,4 @@ function stop_bigswitch_floodlight() { } # Restore xtrace -$XTRACE +$MY_XTRACE diff --git a/lib/quantum_thirdparty/ryu b/lib/quantum_thirdparty/ryu index f11951a3..de8e0861 100644 --- a/lib/quantum_thirdparty/ryu +++ b/lib/quantum_thirdparty/ryu @@ -2,7 +2,7 @@ # ----------------------- # Save trace setting -XTRACE=$(set +o | grep xtrace) +MY_XTRACE=$(set +o | grep xtrace) set +o xtrace @@ -62,4 +62,4 @@ function stop_ryu() { } # Restore xtrace -$XTRACE +$MY_XTRACE From 429b39d8f854318f2d30d592b71526791f3fac9f Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Wed, 9 Jan 2013 18:35:55 +0900 Subject: [PATCH 187/207] Quantum/plugins/ryu: minor update for Quantum Ryu plugin - updated package list for Ryu ryu plugin/agent needs python-netifaces - quantum-ryu-agent also needs ryu module Change-Id: I5b49efceb65e8139a49a8e82f55ea6aa7d1eebac Signed-off-by: Isaku Yamahata --- files/apts/ryu | 1 + files/rpms/ryu | 1 + lib/quantum_plugins/ryu | 3 +++ lib/quantum_thirdparty/ryu | 10 +++++++++- 4 files changed, 14 insertions(+), 1 deletion(-) diff --git a/files/apts/ryu b/files/apts/ryu index 1e8f2d2b..4a4fc523 100644 --- a/files/apts/ryu +++ b/files/apts/ryu @@ -1,4 +1,5 @@ python-setuptools python-gevent python-gflags +python-netifaces python-sphinx diff --git a/files/rpms/ryu b/files/rpms/ryu index 1e8f2d2b..4a4fc523 100644 --- a/files/rpms/ryu +++ b/files/rpms/ryu @@ -1,4 +1,5 @@ python-setuptools python-gevent python-gflags +python-netifaces python-sphinx diff --git a/lib/quantum_plugins/ryu b/lib/quantum_plugins/ryu index 86105bc8..3f960ea8 100644 --- a/lib/quantum_plugins/ryu +++ b/lib/quantum_plugins/ryu @@ -15,6 +15,9 @@ function quantum_plugin_create_nova_conf() { function quantum_plugin_install_agent_packages() { _quantum_ovs_base_install_agent_packages + + # quantum_ryu_agent requires ryu module + install_ryu } function quantum_plugin_configure_common() { diff --git a/lib/quantum_thirdparty/ryu b/lib/quantum_thirdparty/ryu index f11951a3..5717d821 100644 --- a/lib/quantum_thirdparty/ryu +++ b/lib/quantum_thirdparty/ryu @@ -49,8 +49,16 @@ function init_ryu() { echo "${RYU_CONF_CONTENTS}" > $RYU_CONF } +# install_ryu can be called multiple times as quantum_pluing/ryu may call +# this function for quantum-ryu-agent +# Make this function idempotent and avoid cloning same repo many times +# with RECLONE=yes +_RYU_INSTALLED=${_RYU_INSTALLED:-False} function install_ryu() { - git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH + if [[ "$_RYU_INSTALLED" == "False" ]]; then + git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH + _RYU_INSTALLED=True + fi } function start_ryu() { From ec06efc607328bce9dc535be79e9539d5edec536 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Fri, 1 Feb 2013 15:16:51 +0000 Subject: [PATCH 188/207] Disable non-dhcp resolv.conf Fixes: bug #1119268 Change-Id: Icf7d420a31eb7a0cb46b2e59a4328f6b640deb57 --- tools/xen/build_xva.sh | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh index 0e874cfe..b0fd003d 100755 --- a/tools/xen/build_xva.sh +++ b/tools/xen/build_xva.sh @@ -44,12 +44,9 @@ if [ ! -d $STAGING_DIR/etc ]; then exit 1 fi -# Configure dns (use same dns as dom0) -# but only when not precise -if [ "$UBUNTU_INST_RELEASE" != "precise" ]; then - cp /etc/resolv.conf $STAGING_DIR/etc/resolv.conf -elif [ "$MGT_IP" != "dhcp" ] && [ "$PUB_IP" != "dhcp" ]; then - echo "Configuration without DHCP not supported on Precise" +# Only support DHCP for now - don't support how different versions of Ubuntu handle resolv.conf +if [ "$MGT_IP" != "dhcp" ] && [ "$PUB_IP" != "dhcp" ]; then + echo "Configuration without DHCP not supported" exit 1 fi From 48352ee7c05cf79734abf74a2e7ac47425babb3a Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 12 Dec 2012 12:50:38 -0600 Subject: [PATCH 189/207] Create tools/install_prereqs.sh * Factor system package prereq installs out to tools/install_prereqs.sh * Set minimum time between runs with PREREQ_RERUN_HOURS default = 2 hours * Create export_proxy_variables * Force an update with install_prereqs.sh -f or by setting FORCE_PREREQ=true Fixed an issue with exit/return in tools/install_prereqs.sh Change-Id: I9a62090ad2f900b9b150cacb9cb02b326cb46972 --- functions | 21 ++++++++++ stack.sh | 49 ++++-------------------- tools/install_prereqs.sh | 82 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 111 insertions(+), 41 deletions(-) create mode 100755 tools/install_prereqs.sh diff --git a/functions b/functions index 68aec5d2..3f26b7fd 100644 --- a/functions +++ b/functions @@ -80,6 +80,27 @@ function die_if_not_set() { } +# HTTP and HTTPS proxy servers are supported via the usual environment variables [1] +# ``http_proxy``, ``https_proxy`` and ``no_proxy``. They can be set in +# ``localrc`` or on the command line if necessary:: +# +# [1] http://www.w3.org/Daemon/User/Proxies/ProxyClients.html +# +# http_proxy=http://proxy.example.com:3128/ no_proxy=repo.example.net ./stack.sh + +function export_proxy_variables() { + if [[ -n "$http_proxy" ]]; then + export http_proxy=$http_proxy + fi + if [[ -n "$https_proxy" ]]; then + export https_proxy=$https_proxy + fi + if [[ -n "$no_proxy" ]]; then + export no_proxy=$no_proxy + fi +} + + # Grab a numbered field from python prettytable output # Fields are numbered starting with 1 # Reverse syntax is supported: -1 is the last field, -2 is second to last, etc. diff --git a/stack.sh b/stack.sh index 46086482..7a8bd802 100755 --- a/stack.sh +++ b/stack.sh @@ -30,9 +30,8 @@ source $TOP_DIR/functions GetDistro - -# Settings -# ======== +# Global Settings +# =============== # ``stack.sh`` is customizable through setting environment variables. If you # want to override a setting you can set and export it:: @@ -62,33 +61,18 @@ fi source $TOP_DIR/stackrc -# Proxy Settings +# Local Settings # -------------- -# HTTP and HTTPS proxy servers are supported via the usual environment variables [1] -# ``http_proxy``, ``https_proxy`` and ``no_proxy``. They can be set in -# ``localrc`` if necessary or on the command line:: -# -# [1] http://www.w3.org/Daemon/User/Proxies/ProxyClients.html -# -# http_proxy=http://proxy.example.com:3128/ no_proxy=repo.example.net ./stack.sh - -if [[ -n "$http_proxy" ]]; then - export http_proxy=$http_proxy -fi -if [[ -n "$https_proxy" ]]; then - export https_proxy=$https_proxy -fi -if [[ -n "$no_proxy" ]]; then - export no_proxy=$no_proxy -fi +# Make sure the proxy config is visible to sub-processes +export_proxy_variables # Destination path for installation ``DEST`` DEST=${DEST:-/opt/stack} # Sanity Check -# ============ +# ------------ # Clean up last environment var cache if [[ -r $TOP_DIR/.stackenv ]]; then @@ -631,26 +615,9 @@ set -o xtrace # OpenStack uses a fair number of other projects. # Install package requirements +# Source it so the entire environment is available echo_summary "Installing package prerequisites" -if is_ubuntu; then - install_package $(get_packages $FILES/apts) -elif is_fedora; then - install_package $(get_packages $FILES/rpms) -elif is_suse; then - install_package $(get_packages $FILES/rpms-suse) -else - exit_distro_not_supported "list of packages" -fi - -if [[ $SYSLOG != "False" ]]; then - if is_ubuntu || is_fedora; then - install_package rsyslog-relp - elif is_suse; then - install_package rsyslog-module-relp - else - exit_distro_not_supported "rsyslog-relp installation" - fi -fi +source $TOP_DIR/tools/install_prereqs.sh install_rpc_backend diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh new file mode 100755 index 00000000..4d151db2 --- /dev/null +++ b/tools/install_prereqs.sh @@ -0,0 +1,82 @@ +#!/usr/bin/env bash + +# **install_prereqs.sh** + +# Install system package prerequisites +# +# install_prereqs.sh [-f] +# +# -f Force an install run now + +if [[ -n "$1" && "$1" = "-f" ]]; then + FORCE_PREREQ=1 +fi + +# If TOP_DIR is set we're being sourced rather than running stand-alone +# or in a sub-shell +if [[ -z "$TOP_DIR" ]]; then + # Keep track of the devstack directory + TOP_DIR=$(cd $(dirname "$0")/.. && pwd) + + # Import common functions + source $TOP_DIR/functions + + # Determine what system we are running on. This provides ``os_VENDOR``, + # ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` + # and ``DISTRO`` + GetDistro + + # Needed to get ``ENABLED_SERVICES`` + source $TOP_DIR/stackrc + + # Prereq dirs are here + FILES=$TOP_DIR/files +fi + +# Minimum wait time +PREREQ_RERUN_MARKER=${PREREQ_RERUN_MARKER:-$TOP_DIR/.prereqs} +PREREQ_RERUN_HOURS=${PREREQ_RERUN_HOURS:-2} +PREREQ_RERUN_SECONDS=$((60*60*$PREREQ_RERUN_HOURS)) + +NOW=$(date "+%s") +LAST_RUN=$(head -1 $PREREQ_RERUN_MARKER 2>/dev/null || echo "0") +DELTA=$(($NOW - $LAST_RUN)) +if [[ $DELTA -lt $PREREQ_RERUN_SECONDS && -z "$FORCE_PREREQ" ]]; then + echo "Re-run time has not expired ($(($PREREQ_RERUN_SECONDS - $DELTA)) seconds remaining); exiting..." + return 0 +fi + +# Make sure the proxy config is visible to sub-processes +export_proxy_variables + + +# Install Packages +# ================ + +# Install package requirements +if is_ubuntu; then + install_package $(get_packages $FILES/apts) +elif is_fedora; then + install_package $(get_packages $FILES/rpms) +elif is_suse; then + install_package $(get_packages $FILES/rpms-suse) +else + exit_distro_not_supported "list of packages" +fi + +if [[ -n "$SYSLOG" && "$SYSLOG" != "False" ]]; then + if is_ubuntu || is_fedora; then + install_package rsyslog-relp + elif is_suse; then + install_package rsyslog-module-relp + else + exit_distro_not_supported "rsyslog-relp installation" + fi +fi + + +# Mark end of run +# --------------- + +date "+%s" >$PREREQ_RERUN_MARKER +date >>$PREREQ_RERUN_MARKER From 5a3f90bea7296a9b9ec5b99452c03280a056a232 Mon Sep 17 00:00:00 2001 From: Daniel Salinas Date: Fri, 8 Feb 2013 17:17:53 -0600 Subject: [PATCH 190/207] Fixes openvz driver support for nova when deployed with devstack. Change-Id: I70599333a09267cbe2cd8afd075658f3f7d8bc9d Fixes: bug #1119859 --- stack.sh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index 8a814a01..11fafc12 100755 --- a/stack.sh +++ b/stack.sh @@ -1092,9 +1092,7 @@ if is_service_enabled nova; then elif [ "$VIRT_DRIVER" = 'openvz' ]; then echo_summary "Using OpenVZ virtualization driver" - # TODO(deva): OpenVZ driver does not yet work if compute_driver is set here. - # Replace connection_type when this is fixed. - # iniset $NOVA_CONF DEFAULT compute_driver "openvz.connection.OpenVzConnection" + iniset $NOVA_CONF DEFAULT compute_driver "openvz.driver.OpenVzDriver" iniset $NOVA_CONF DEFAULT connection_type "openvz" LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER" From a52095b18c112ac301b336c36c2affd6471ee61d Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Sat, 9 Feb 2013 07:24:33 -0500 Subject: [PATCH 191/207] allow resize tests to run resize tests were turned off explicitly, which they shouldn't be turn these back on in a default config as we've set nova to allow resize_to_same_host. Change-Id: Iacedf11e56aff3a541f1b67b208e8ed3a30b2c44 --- lib/tempest | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index cb172a80..49d0da7a 100644 --- a/lib/tempest +++ b/lib/tempest @@ -209,7 +209,6 @@ function configure_tempest() { iniset $TEMPEST_CONF compute alt_username $ALT_USERNAME # DEPRECATED iniset $TEMPEST_CONF compute alt_password "$password" # DEPRECATED iniset $TEMPEST_CONF compute alt_tenant_name $ALT_TENANT_NAME # DEPRECATED - iniset $TEMPEST_CONF compute resize_available False iniset $TEMPEST_CONF compute change_password_available False iniset $TEMPEST_CONF compute compute_log_level ERROR # Note(nati) current tempest don't create network for each tenant From 37a8d157a11abe55736707fdec2fc8a273027a2f Mon Sep 17 00:00:00 2001 From: Devananda van der Veen Date: Tue, 15 Jan 2013 17:27:34 -0800 Subject: [PATCH 192/207] Add quantum support for baremetal virtual env. Add quantum networking support when nova-baremetal is active. This creates a ctlplane network and br-ctlplane bridge, and moves IPs from PHYSICAL_INTERFACE to OVS_PHYSICAL_BRIDGE. Change-Id: If2026c01b93de0ccc7c3f9112de07b3a9c01ac20 --- lib/quantum | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/lib/quantum b/lib/quantum index d5733b3a..5b045580 100644 --- a/lib/quantum +++ b/lib/quantum @@ -270,8 +270,19 @@ function create_quantum_initial_network() { # Create a small network # Since quantum command is executed in admin context at this point, # ``--tenant_id`` needs to be specified. - NET_ID=$(quantum net-create --tenant_id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) - SUBNET_ID=$(quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) + if is_baremetal; then + sudo ovs-vsctl add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE + for IP in $(ip addr show dev $PUBLIC_INTERFACE | grep ' inet ' | awk '{print $2}'); do + sudo ip addr del $IP dev $PUBLIC_INTERFACE + sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE + done + NET_ID=$(quantum net-create $PHYSICAL_NETWORK --tenant_id $TENANT_ID --provider:network_type flat --provider:physical_network "$PHYSICAL_NETWORK" | grep ' id ' | get_field 2) + SUBNET_ID=$(quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) + sudo ifconfig $OVS_PHYSICAL_BRIDGE up + else + NET_ID=$(quantum net-create --tenant_id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) + SUBNET_ID=$(quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) + fi if is_service_enabled q-l3; then # Create a router, and add the private subnet as one of its interfaces From 712feb663c314da65cd215f3dae3045bf2c8b057 Mon Sep 17 00:00:00 2001 From: Akihiro MOTOKI Date: Mon, 11 Feb 2013 23:45:19 +0900 Subject: [PATCH 193/207] Move auth_token configurations to quantum.conf keystone auth_token middleware now allows quantum to have auth_token configuration in quantum.conf. auth_token middleware supports auth_token configuration both in api-paste.ini and quantum.conf, so we can apply this change at any timing. Change-Id: Ie5dd63e6c6938d2c8118e0f6090ef057c21a772a --- lib/quantum | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/lib/quantum b/lib/quantum index d5733b3a..da4d6f60 100644 --- a/lib/quantum +++ b/lib/quantum @@ -507,7 +507,11 @@ function _configure_quantum_service() { iniset $QUANTUM_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP iniset $QUANTUM_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY - _quantum_setup_keystone $Q_API_PASTE_FILE filter:authtoken + _quantum_setup_keystone $QUANTUM_CONF keystone_authtoken + # Comment out keystone authtoken configuration in api-paste.ini + # It is required to avoid any breakage in Quantum where the sample + # api-paste.ini has authtoken configurations. + _quantum_commentout_keystone_authtoken $Q_API_PASTE_FILE filter:authtoken # Configure plugin quantum_plugin_configure_service @@ -573,6 +577,21 @@ function _quantum_setup_keystone() { rm -f $QUANTUM_AUTH_CACHE_DIR/* } +function _quantum_commentout_keystone_authtoken() { + local conf_file=$1 + local section=$2 + + inicomment $conf_file $section auth_host + inicomment $conf_file $section auth_port + inicomment $conf_file $section auth_protocol + inicomment $conf_file $section auth_url + + inicomment $conf_file $section admin_tenant_name + inicomment $conf_file $section admin_user + inicomment $conf_file $section admin_password + inicomment $conf_file $section signing_dir +} + function _quantum_setup_interface_driver() { quantum_plugin_setup_interface_driver $1 } From b205cc8ff17885790a38a55bb5ee9facfac769cf Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 11 Feb 2013 17:34:39 -0600 Subject: [PATCH 194/207] Clean up configure_nova() a bit Change-Id: I2228221051a5a4413a34ca359856d90794fce69a --- lib/nova | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/lib/nova b/lib/nova index 1681af77..9ecf4ebf 100644 --- a/lib/nova +++ b/lib/nova @@ -166,20 +166,13 @@ function configure_nova() { # Get the sample configuration file in place cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_CONF_DIR - # Rewrite the authtoken configuration for our Keystone service. - # This is a bit defensive to allow the sample file some variance. - sed -e " - /^admin_token/i admin_tenant_name = $SERVICE_TENANT_NAME - /admin_tenant_name/s/^.*$/admin_tenant_name = $SERVICE_TENANT_NAME/; - /admin_user/s/^.*$/admin_user = nova/; - /admin_password/s/^.*$/admin_password = $SERVICE_PASSWORD/; - s,%SERVICE_TENANT_NAME%,$SERVICE_TENANT_NAME,g; - s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g; - " -i $NOVA_API_PASTE_INI iniset $NOVA_API_PASTE_INI filter:authtoken auth_host $SERVICE_HOST if is_service_enabled tls-proxy; then iniset $NOVA_API_PASTE_INI filter:authtoken auth_protocol $SERVICE_PROTOCOL fi + iniset $NOVA_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $NOVA_API_PASTE_INI filter:authtoken admin_user nova + iniset $NOVA_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD fi iniset $NOVA_API_PASTE_INI filter:authtoken signing_dir $NOVA_AUTH_CACHE_DIR From 58e21349644f42d4aff078e4da26ecd98d76ba19 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 11 Feb 2013 16:48:12 -0800 Subject: [PATCH 195/207] Add option to make screen starting more robust. We have seen a number of failures in ci where a host is overloaded and the 1.5 second sleep before stuffing data into screen is not long enough. This means the service doesn't start and tests fail. This change adds a config option to allow us to turn off the developer friendly option to stuff text into the screen. When SCREEN_DEV is set to False it will use a simple exec in screen instead of stuff. This should be far more reliable because we don't have to wait for bash to start. Change-Id: I7f1b5dbf5329b23507cb767d54a2795be0d73e01 --- functions | 27 ++++++++++++++++----------- stackrc | 7 +++++++ 2 files changed, 23 insertions(+), 11 deletions(-) diff --git a/functions b/functions index 3f26b7fd..79c82a45 100644 --- a/functions +++ b/functions @@ -738,26 +738,31 @@ function restart_service() { # Helper to launch a service in a named screen # screen_it service "command-line" function screen_it { - NL=`echo -ne '\015'` SCREEN_NAME=${SCREEN_NAME:-stack} SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} + SCREEN_DEV=`trueorfalse True $SCREEN_DEV` if is_service_enabled $1; then # Append the service to the screen rc file screen_rc "$1" "$2" screen -S $SCREEN_NAME -X screen -t $1 - # sleep to allow bash to be ready to be send the command - we are - # creating a new window in screen and then sends characters, so if - # bash isn't running by the time we send the command, nothing happens - sleep 1.5 - - if [[ -n ${SCREEN_LOGDIR} ]]; then - screen -S $SCREEN_NAME -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log - screen -S $SCREEN_NAME -p $1 -X log on - ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log + if [[ "$SCREEN_DEV" = "True" ]]; then + # sleep to allow bash to be ready to be send the command - we are + # creating a new window in screen and then sends characters, so if + # bash isn't running by the time we send the command, nothing happens + sleep 1.5 + + if [[ -n ${SCREEN_LOGDIR} ]]; then + screen -S $SCREEN_NAME -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log + screen -S $SCREEN_NAME -p $1 -X log on + ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log + fi + NL=`echo -ne '\015'` + screen -S $SCREEN_NAME -p $1 -X stuff "$2 || touch \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL" + else + screen -S $SCREEN_NAME -p $1 -X exec /bin/bash -c "$2 || touch \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"" fi - screen -S $SCREEN_NAME -p $1 -X stuff "$2 || touch \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL" fi } diff --git a/stackrc b/stackrc index 789fc82d..91f4e2b5 100644 --- a/stackrc +++ b/stackrc @@ -29,6 +29,13 @@ ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,cinder,c-s # Set the default Nova APIs to enable NOVA_ENABLED_APIS=ec2,osapi_compute,metadata +# Whether to use 'dev mode' for screen windows. Dev mode works by +# stuffing text into the screen windows so that a developer can use +# ctrl-c, up-arrow, enter to restart the service. Starting services +# this way is slightly unreliable, and a bit slower, so this can +# be disabled for automated testing by setting this value to false. +SCREEN_DEV=True + # Repositories # ------------ From a1a61c8a5bc318a1de81fb9724045f189a0d8b85 Mon Sep 17 00:00:00 2001 From: Maru Newby Date: Wed, 13 Feb 2013 19:20:03 +0000 Subject: [PATCH 196/207] Fix name of xen dom0 rootwrap for quantum ovs. * Supports blueprint xenapi-ovs Change-Id: I1d5ac0ce1f226aa3f6c0d7f7bd1eb968aef1eeba --- lib/quantum_plugins/openvswitch | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/quantum_plugins/openvswitch b/lib/quantum_plugins/openvswitch index 12bc2442..181e7e71 100644 --- a/lib/quantum_plugins/openvswitch +++ b/lib/quantum_plugins/openvswitch @@ -75,7 +75,7 @@ function quantum_plugin_configure_plugin_agent() { # Nova will always be installed along with quantum for a domU # devstack install, so it should be safe to rely on nova.conf # for xenapi configuration. - Q_RR_DOM0_COMMAND="$QUANTUM_DIR/bin/quantum-rootwrap-dom0 $NOVA_CONF" + Q_RR_DOM0_COMMAND="$QUANTUM_DIR/bin/quantum-rootwrap-xen-dom0 $NOVA_CONF" # Under XS/XCP, the ovs agent needs to target the dom0 # integration bridge. This is enabled by using a root wrapper # that executes commands on dom0 via a XenAPI plugin. From 25c42f6eb4955d0032c911c991db8b72643ea7c4 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Thu, 14 Feb 2013 15:00:02 +0100 Subject: [PATCH 197/207] Remove old tempest configuration variables Change-Id: I4c15c876514e1a8071a557ce79f56266a83b24b8 --- lib/tempest | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/lib/tempest b/lib/tempest index 49d0da7a..e43f6d75 100644 --- a/lib/tempest +++ b/lib/tempest @@ -205,12 +205,7 @@ function configure_tempest() { iniset $TEMPEST_CONF identity admin_password "$password" # Compute - iniset $TEMPEST_CONF compute password "$password" # DEPRECATED - iniset $TEMPEST_CONF compute alt_username $ALT_USERNAME # DEPRECATED - iniset $TEMPEST_CONF compute alt_password "$password" # DEPRECATED - iniset $TEMPEST_CONF compute alt_tenant_name $ALT_TENANT_NAME # DEPRECATED iniset $TEMPEST_CONF compute change_password_available False - iniset $TEMPEST_CONF compute compute_log_level ERROR # Note(nati) current tempest don't create network for each tenant # so reuse same tenant for now if is_service_enabled quantum; then @@ -231,10 +226,6 @@ function configure_tempest() { iniset $TEMPEST_CONF compute flavor_ref_alt $flavor_ref_alt iniset $TEMPEST_CONF compute live_migration_available ${LIVE_MIGRATION_AVAILABLE:-False} iniset $TEMPEST_CONF compute use_block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} - iniset $TEMPEST_CONF compute source_dir $NOVA_SOURCE_DIR # DEPRECATED - iniset $TEMPEST_CONF compute bin_dir $NOVA_BIN_DIR # DEPRECATED - iniset $TEMPEST_CONF compute path_to_private_key $TEMPEST_DIR/id_rsa # DEPRECATED - iniset $TEMPEST_CONF compute db_uri $BASE_SQL_CONN/nova # DEPRECATED # Whitebox iniset $TEMPEST_CONF whitebox source_dir $NOVA_SOURCE_DIR @@ -245,21 +236,11 @@ function configure_tempest() { iniset $TEMPEST_CONF whitebox db_uri $BASE_SQL_CONN/nova - # image - iniset $TEMPEST_CONF image password "$password" # DEPRECATED - - # identity-admin - iniset $TEMPEST_CONF "identity-admin" password "$password" # DEPRECATED - # compute admin iniset $TEMPEST_CONF "compute-admin" password "$password" # DEPRECATED - # network admin - iniset $TEMPEST_CONF "network-admin" password "$password" # DEPRECATED - # network iniset $TEMPEST_CONF network api_version 2.0 - iniset $TEMPEST_CONF network password "$password" # DEPRECATED iniset $TEMPEST_CONF network tenant_networks_reachable "$tenant_networks_reachable" iniset $TEMPEST_CONF network public_network_id "$public_network_id" iniset $TEMPEST_CONF network public_router_id "$public_router_id" From 24f796149a4cb7cf588d5481cf2786c4c9fe735d Mon Sep 17 00:00:00 2001 From: Arata Notsu Date: Wed, 13 Feb 2013 21:01:18 +0900 Subject: [PATCH 198/207] Use 'nova baremetal-*' instead of nova-baremetal-manage Change-Id: Iee4dd721387dce39c8e46ea0e1e428513498c5a0 --- lib/baremetal | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/lib/baremetal b/lib/baremetal index 7c31d1fd..26593867 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -400,15 +400,10 @@ function upload_baremetal_image() { } function clear_baremetal_of_all_nodes() { - list=$(nova-baremetal-manage node list | tail -n +2 | awk '{print $1}' ) + list=$(nova baremetal-node-list | awk -F '| ' 'NR>3 {print $2}' ) for node in $list do - nova-baremetal-manage node delete $node - done - list=$(nova-baremetal-manage interface list | tail -n +2 | awk '{print $1}' ) - for iface in $list - do - nova-baremetal-manage interface delete $iface + nova baremetal-node-delete $node done } @@ -420,16 +415,18 @@ function add_baremetal_node() { mac_1=${1:-$BM_FIRST_MAC} mac_2=${2:-$BM_SECOND_MAC} - id=$(nova-baremetal-manage node create \ - --host=$BM_HOSTNAME --prov_mac=$mac_1 \ - --cpus=$BM_FLAVOR_CPU --memory_mb=$BM_FLAVOR_RAM \ - --local_gb=$BM_FLAVOR_ROOT_DISK --terminal_port=0 \ - --pm_address=$BM_PM_ADDR --pm_user=$BM_PM_USER --pm_password=$BM_PM_PASS \ - ) + id=$(nova baremetal-node-create \ + --pm_address="$BM_PM_ADDR" \ + --pm_user="$BM_PM_USER" \ + --pm_password="$BM_PM_PASS" \ + "$BM_HOSTNAME" \ + "$BM_FLAVOR_CPU" \ + "$BM_FLAVOR_RAM" \ + "$BM_FLAVOR_ROOT_DISK" \ + "$mac_1" \ + | grep ' id ' | get_field 2 ) [ $? -eq 0 ] || [ "$id" ] || die "Error adding baremetal node" - id2=$(nova-baremetal-manage interface create \ - --node_id=$id --mac_address=$mac_2 --datapath_id=0 --port_no=0 \ - ) + id2=$(nova baremetal-add-interface "$id" "$mac_2" ) [ $? -eq 0 ] || [ "$id2" ] || die "Error adding interface to barmetal node $id" } From da85cdadd9a2331a28d4913b878e18c9c5abaccf Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 15 Feb 2013 11:07:14 -0600 Subject: [PATCH 199/207] Exercise cleanup * Make common steps consistent * Make comments consistent aggregates.sh boot_from_volume.sh client-args.sh client-env.sh euca.sh floating_ips.sh sec_groups.sh swift.sh volumes.sh Change-Id: Ib93dcdfdead93c259e3cd184fbc5ccc0a4a87c9a --- exercises/aggregates.sh | 19 ++-- exercises/boot_from_volume.sh | 174 ++++++++++++++++++++-------------- exercises/client-args.sh | 34 ++++++- exercises/client-env.sh | 28 ++++++ exercises/euca.sh | 5 +- exercises/floating_ips.sh | 130 +++++++++++++------------ exercises/sec_groups.sh | 8 +- exercises/swift.sh | 8 +- exercises/volumes.sh | 114 +++++++++++----------- 9 files changed, 302 insertions(+), 218 deletions(-) diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh index deb1a038..ae3198f9 100755 --- a/exercises/aggregates.sh +++ b/exercises/aggregates.sh @@ -39,9 +39,8 @@ source $TOP_DIR/openrc # Import exercise configuration source $TOP_DIR/exerciserc -# run test as the admin user -_OLD_USERNAME=$OS_USERNAME -OS_USERNAME=admin +# Test as the admin user +. openrc admin admin # Create an aggregate @@ -54,7 +53,7 @@ AGGREGATE_A_ZONE=nova exit_if_aggregate_present() { aggregate_name=$1 - if [ `nova aggregate-list | grep -c " $aggregate_name "` == 0 ]; then + if [ $(nova aggregate-list | grep -c " $aggregate_name ") == 0 ]; then echo "SUCCESS $aggregate_name not present" else echo "ERROR found aggregate: $aggregate_name" @@ -64,8 +63,8 @@ exit_if_aggregate_present() { exit_if_aggregate_present $AGGREGATE_NAME -AGGREGATE_ID=`nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE_NAME " | get_field 1` -AGGREGATE2_ID=`nova aggregate-create $AGGREGATE2_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE2_NAME " | get_field 1` +AGGREGATE_ID=$(nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE_NAME " | get_field 1) +AGGREGATE2_ID=$(nova aggregate-create $AGGREGATE2_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE2_NAME " | get_field 1) # check aggregate created nova aggregate-list | grep -q " $AGGREGATE_NAME " || die "Aggregate $AGGREGATE_NAME not created" @@ -125,7 +124,7 @@ nova aggregate-details $AGGREGATE_ID | egrep "{u'availability_zone': u'$AGGREGAT if [ "$VIRT_DRIVER" == "xenserver" ]; then echo "TODO(johngarbutt) add tests for add/remove host from pool aggregate" fi -FIRST_HOST=`nova host-list | grep compute | get_field 1 | head -1` +FIRST_HOST=$(nova host-list | grep compute | get_field 1 | head -1) # Make sure can add two aggregates to same host nova aggregate-add-host $AGGREGATE_ID $FIRST_HOST nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST @@ -142,12 +141,6 @@ nova aggregate-delete $AGGREGATE_ID nova aggregate-delete $AGGREGATE2_ID exit_if_aggregate_present $AGGREGATE_NAME - -# Test complete -# ============= -OS_USERNAME=$_OLD_USERNAME -echo "AGGREGATE TEST PASSED" - set +o xtrace echo "**************************************************" echo "End DevStack Exercise: $0" diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index 5ada2370..679091bb 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -44,52 +44,80 @@ source $TOP_DIR/exerciserc # the exercise is skipped is_service_enabled cinder || exit 55 +# Instance type to create +DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} + # Boot this image, use first AMI image if unset DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} -# Instance type -DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} +# Security group name +SECGROUP=${SECGROUP:-boot_secgroup} -# Default floating IP pool name -DEFAULT_FLOATING_POOL=${DEFAULT_FLOATING_POOL:-nova} +# Instance and volume names +VM_NAME=${VM_NAME:-ex-bfv-inst} +VOL_NAME=${VOL_NAME:-ex-vol-bfv} -# Default user -DEFAULT_INSTANCE_USER=${DEFAULT_INSTANCE_USER:-cirros} -# Security group name -SECGROUP=${SECGROUP:-boot_secgroup} +# Launching a server +# ================== + +# List servers for tenant: +nova list +# Images +# ------ -# Launching servers -# ================= +# List the images available +glance image-list # Grab the id of the image to launch -IMAGE=`glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1` -die_if_not_set IMAGE "Failure getting image" +IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) +die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" + +# Security Groups +# --------------- + +# List security groups +nova secgroup-list + +# Create a secgroup +if ! nova secgroup-list | grep -q $SECGROUP; then + nova secgroup-create $SECGROUP "$SECGROUP description" + if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then + echo "Security group not created" + exit 1 + fi +fi -# Instance and volume names -VOL_INSTANCE_NAME=${VOL_INSTANCE_NAME:-test_vol_instance} -VOL_NAME=${VOL_NAME:-test_volume} +# Configure Security Group Rules +if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then + nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 +fi +if ! nova secgroup-list-rules $SECGROUP | grep -q " tcp .* 22 "; then + nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0 +fi -# Clean-up from previous runs -nova delete $VOL_INSTANCE_NAME || true +# List secgroup rules +nova secgroup-list-rules $SECGROUP -if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VOL_INSTANCE_NAME; do sleep 1; done"; then - echo "server didn't terminate!" - exit 1 -fi +# Set up instance +# --------------- -# Configure Security Groups -nova secgroup-delete $SECGROUP || true -nova secgroup-create $SECGROUP "$SECGROUP description" -nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 -nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0 +# List flavors +nova flavor-list -# Determinine instance type -INSTANCE_TYPE=`nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | cut -d"|" -f2` +# Select a flavor +INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) if [[ -z "$INSTANCE_TYPE" ]]; then # grab the first flavor in the list to launch if default doesn't exist - INSTANCE_TYPE=`nova flavor-list | head -n 4 | tail -n 1 | cut -d"|" -f2` + INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) +fi + +# Clean-up from previous runs +nova delete $VM_NAME || true +if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then + echo "server didn't terminate!" + exit 1 fi # Setup Keypair @@ -99,78 +127,80 @@ nova keypair-delete $KEY_NAME || true nova keypair-add $KEY_NAME > $KEY_FILE chmod 600 $KEY_FILE -# Delete the old volume -cinder delete $VOL_NAME || true +# Set up volume +# ------------- -# Free every floating ips - setting FREE_ALL_FLOATING_IPS=True in localrc will make life easier for testers -if [ "$FREE_ALL_FLOATING_IPS" = "True" ]; then - nova floating-ip-list | grep nova | cut -d "|" -f2 | tr -d " " | xargs -n1 nova floating-ip-delete || true -fi - -# Allocate floating ip -FLOATING_IP=`nova floating-ip-create | grep $DEFAULT_FLOATING_POOL | get_field 1` - -# Make sure the ip gets allocated -if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep -q $FLOATING_IP; do sleep 1; done"; then - echo "Floating IP not allocated" +# Delete any old volume +cinder delete $VOL_NAME || true +if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then + echo "Volume $VOL_NAME not deleted" exit 1 fi # Create the bootable volume -cinder create --display_name=$VOL_NAME --image-id $IMAGE $DEFAULT_VOLUME_SIZE - -# Wait for volume to activate +start_time=$(date +%s) +cinder create --image-id $IMAGE --display_name=$VOL_NAME --display_description "test bootable volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \ + die "Failure creating volume $VOL_NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then echo "Volume $VOL_NAME not created" exit 1 fi +end_time=$(date +%s) +echo "Completed cinder create in $((end_time - start_time)) seconds" + +# Get volume ID +VOL_ID=$(cinder list | grep $VOL_NAME | get_field 1) +die_if_not_set VOL_ID "Failure retrieving volume ID for $VOL_NAME" -VOLUME_ID=`cinder list | grep $VOL_NAME | get_field 1` +# Boot instance +# ------------- -# Boot instance from volume! This is done with the --block_device_mapping param. -# The format of mapping is: +# Boot using the --block_device_mapping param. The format of mapping is: # =::: # Leaving the middle two fields blank appears to do-the-right-thing -VOL_VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE --block_device_mapping vda=$VOLUME_ID:::0 --security_groups=$SECGROUP --key_name $KEY_NAME $VOL_INSTANCE_NAME | grep ' id ' | get_field 2` -die_if_not_set VOL_VM_UUID "Failure launching $VOL_INSTANCE_NAME" +VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --block-device-mapping vda=$VOL_ID --security_groups=$SECGROUP --key_name $KEY_NAME $VM_NAME | grep ' id ' | get_field 2) +die_if_not_set VM_UUID "Failure launching $VM_NAME" # Check that the status is active within ACTIVE_TIMEOUT seconds -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VOL_VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then +if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then echo "server didn't become active!" exit 1 fi -# Add floating ip to our server -nova add-floating-ip $VOL_VM_UUID $FLOATING_IP - -# Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds -ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT +# Get the instance IP +IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2) +die_if_not_set IP "Failure retrieving IP address" -# Make sure our volume-backed instance launched -ssh_check "$PUBLIC_NETWORK_NAME" $KEY_FILE $FLOATING_IP $DEFAULT_INSTANCE_USER $ACTIVE_TIMEOUT +# Private IPs can be pinged in single node deployments +ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT -# Remove floating ip from volume-backed instance -nova remove-floating-ip $VOL_VM_UUID $FLOATING_IP +# Clean up +# -------- # Delete volume backed instance -nova delete $VOL_INSTANCE_NAME || \ - die "Failure deleting instance volume $VOL_INSTANCE_NAME" +nova delete $VM_UUID || die "Failure deleting instance $VM_NAME" +if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then + echo "Server $VM_NAME not deleted" + exit 1 +fi -# Wait till our volume is no longer in-use +# Wait for volume to be released if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then - echo "Volume $VOL_NAME not created" + echo "Volume $VOL_NAME not released" exit 1 fi -# Delete the volume -cinder delete $VOL_NAME || \ - die "Failure deleting volume $VOLUME_NAME" - -# De-allocate the floating ip -nova floating-ip-delete $FLOATING_IP || \ - die "Failure deleting floating IP $FLOATING_IP" +# Delete volume +start_time=$(date +%s) +cinder delete $VOL_ID || die "Failure deleting volume $VOLUME_NAME" +if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then + echo "Volume $VOL_NAME not deleted" + exit 1 +fi +end_time=$(date +%s) +echo "Completed cinder delete in $((end_time - start_time)) seconds" -# Delete a secgroup +# Delete secgroup nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP" set +o xtrace diff --git a/exercises/client-args.sh b/exercises/client-args.sh index b3e2ad8d..894da742 100755 --- a/exercises/client-args.sh +++ b/exercises/client-args.sh @@ -8,6 +8,14 @@ echo "*********************************************************************" echo "Begin DevStack Exercise: $0" echo "*********************************************************************" +# This script exits on an error so that errors don't compound and you see +# only the first error that occured. +set -o errexit + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + # Settings # ======== @@ -63,7 +71,7 @@ if [[ "$ENABLED_SERVICES" =~ "key" ]]; then STATUS_KEYSTONE="Skipped" else echo -e "\nTest Keystone" - if keystone $TENANT_ARG $ARGS catalog --service identity; then + if keystone $TENANT_ARG_DASH $ARGS_DASH catalog --service identity; then STATUS_KEYSTONE="Succeeded" else STATUS_KEYSTONE="Failed" @@ -82,7 +90,7 @@ if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then else # Test OSAPI echo -e "\nTest Nova" - if nova $TENANT_ARG $ARGS flavor-list; then + if nova $TENANT_ARG_DASH $ARGS_DASH flavor-list; then STATUS_NOVA="Succeeded" else STATUS_NOVA="Failed" @@ -91,6 +99,23 @@ if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then fi fi +# Cinder client +# ------------- + +if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then + if [[ "$SKIP_EXERCISES" =~ "c-api" ]] ; then + STATUS_CINDER="Skipped" + else + echo -e "\nTest Cinder" + if cinder $TENANT_ARG_DASH $ARGS_DASH list; then + STATUS_CINDER="Succeeded" + else + STATUS_CINDER="Failed" + RETURN=1 + fi + fi +fi + # Glance client # ------------- @@ -116,7 +141,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then STATUS_SWIFT="Skipped" else echo -e "\nTest Swift" - if swift $TENANT_ARG $ARGS stat; then + if swift $TENANT_ARG_DASH $ARGS_DASH stat; then STATUS_SWIFT="Succeeded" else STATUS_SWIFT="Failed" @@ -125,6 +150,8 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then fi fi +set +o xtrace + # Results # ------- @@ -137,6 +164,7 @@ function report() { echo -e "\n" report "Keystone" $STATUS_KEYSTONE report "Nova" $STATUS_NOVA +report "Cinder" $STATUS_CINDER report "Glance" $STATUS_GLANCE report "Swift" $STATUS_SWIFT diff --git a/exercises/client-env.sh b/exercises/client-env.sh index 68c0e5ad..c84e84e5 100755 --- a/exercises/client-env.sh +++ b/exercises/client-env.sh @@ -8,6 +8,14 @@ echo "*********************************************************************" echo "Begin DevStack Exercise: $0" echo "*********************************************************************" +# This script exits on an error so that errors don't compound and you see +# only the first error that occured. +set -o errexit + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + # Settings # ======== @@ -99,6 +107,23 @@ if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then fi fi +# Cinder client +# ------------- + +if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then + if [[ "$SKIP_EXERCISES" =~ "c-api" ]] ; then + STATUS_CINDER="Skipped" + else + echo -e "\nTest Cinder" + if cinder list; then + STATUS_CINDER="Succeeded" + else + STATUS_CINDER="Failed" + RETURN=1 + fi + fi +fi + # Glance client # ------------- @@ -133,6 +158,8 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then fi fi +set +o xtrace + # Results # ------- @@ -146,6 +173,7 @@ echo -e "\n" report "Keystone" $STATUS_KEYSTONE report "Nova" $STATUS_NOVA report "EC2" $STATUS_EC2 +report "Cinder" $STATUS_CINDER report "Glance" $STATUS_GLANCE report "Swift" $STATUS_SWIFT diff --git a/exercises/euca.sh b/exercises/euca.sh index 7b35f6fe..8b15da8d 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -44,7 +44,7 @@ source $TOP_DIR/exerciserc # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} -# Boot this image, use first AMI-format image if unset +# Boot this image, use first AMI image if unset DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} # Security group name @@ -56,6 +56,7 @@ SECGROUP=${SECGROUP:-euca_secgroup} # Find a machine image to boot IMAGE=`euca-describe-images | grep machine | grep ${DEFAULT_IMAGE_NAME} | cut -f2 | head -n1` +die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" # Add a secgroup if ! euca-describe-groups | grep -q $SECGROUP; then @@ -174,7 +175,7 @@ if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | exit 1 fi -# Delete group +# Delete secgroup euca-delete-group $SECGROUP || die "Failure deleting security group $SECGROUP" set +o xtrace diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 8b18e6f4..34ab69d9 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -2,8 +2,7 @@ # **floating_ips.sh** - using the cloud can be fun -# we will use the ``nova`` cli tool provided by the ``python-novaclient`` -# package to work out the instance connectivity +# Test instance connectivity with the ``nova`` command from ``python-novaclient`` echo "*********************************************************************" echo "Begin DevStack Exercise: $0" @@ -42,7 +41,7 @@ source $TOP_DIR/exerciserc # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} -# Boot this image, use first AMi image if unset +# Boot this image, use first AMI image if unset DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} # Security group name @@ -54,6 +53,9 @@ DEFAULT_FLOATING_POOL=${DEFAULT_FLOATING_POOL:-nova} # Additional floating IP pool and range TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test} +# Instance name +VM_NAME="ex-float" + # Launching a server # ================== @@ -64,19 +66,17 @@ nova list # Images # ------ -# Nova has a **deprecated** way of listing images. -nova image-list - -# But we recommend using glance directly +# List the images available glance image-list # Grab the id of the image to launch IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) +die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" # Security Groups # --------------- -# List of secgroups: +# List security groups nova secgroup-list # Create a secgroup @@ -88,81 +88,79 @@ if ! nova secgroup-list | grep -q $SECGROUP; then fi fi -# Determinine instance type -# ------------------------- +# Configure Security Group Rules +if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then + nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 +fi +if ! nova secgroup-list-rules $SECGROUP | grep -q " tcp .* 22 "; then + nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0 +fi -# List of instance types: +# List secgroup rules +nova secgroup-list-rules $SECGROUP + +# Set up instance +# --------------- + +# List flavors nova flavor-list -INSTANCE_TYPE=`nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1` +# Select a flavor +INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) if [[ -z "$INSTANCE_TYPE" ]]; then # grab the first flavor in the list to launch if default doesn't exist - INSTANCE_TYPE=`nova flavor-list | head -n 4 | tail -n 1 | get_field 1` + INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) fi -NAME="ex-float" - -VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE $NAME --security_groups=$SECGROUP | grep ' id ' | get_field 2` -die_if_not_set VM_UUID "Failure launching $NAME" - - -# Testing -# ======= +# Clean-up from previous runs +nova delete $VM_NAME || true +if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then + echo "server didn't terminate!" + exit 1 +fi -# First check if it spins up (becomes active and responds to ping on -# internal ip). If you run this script from a nova node, you should -# bypass security groups and have direct access to the server. +# Boot instance +# ------------- -# Waiting for boot -# ---------------- +VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security_groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2) +die_if_not_set VM_UUID "Failure launching $VM_NAME" -# check that the status is active within ACTIVE_TIMEOUT seconds +# Check that the status is active within ACTIVE_TIMEOUT seconds if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then echo "server didn't become active!" exit 1 fi -# get the IP of the server -IP=`nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2` +# Get the instance IP +IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2) die_if_not_set IP "Failure retrieving IP address" +# Private IPs can be pinged in single node deployments ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT -# Security Groups & Floating IPs -# ------------------------------ - -if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then - # allow icmp traffic (ping) - nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 - if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list-rules $SECGROUP | grep -q icmp; do sleep 1; done"; then - echo "Security group rule not created" - exit 1 - fi -fi - -# List rules for a secgroup -nova secgroup-list-rules $SECGROUP +# Floating IPs +# ------------ -# allocate a floating ip from default pool -FLOATING_IP=`nova floating-ip-create | grep $DEFAULT_FLOATING_POOL | get_field 1` -die_if_not_set FLOATING_IP "Failure creating floating IP" +# Allocate a floating IP from the default pool +FLOATING_IP=$(nova floating-ip-create | grep $DEFAULT_FLOATING_POOL | get_field 1) +die_if_not_set FLOATING_IP "Failure creating floating IP from pool $DEFAULT_FLOATING_POOL" -# list floating addresses +# List floating addresses if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep -q $FLOATING_IP; do sleep 1; done"; then echo "Floating IP not allocated" exit 1 fi -# add floating ip to our server +# Add floating IP to our server nova add-floating-ip $VM_UUID $FLOATING_IP || \ - die "Failure adding floating IP $FLOATING_IP to $NAME" + die "Failure adding floating IP $FLOATING_IP to $VM_NAME" -# test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds +# Test we can ping our floating IP within ASSOCIATE_TIMEOUT seconds ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT if ! is_service_enabled quantum; then # Allocate an IP from second floating pool - TEST_FLOATING_IP=`nova floating-ip-create $TEST_FLOATING_POOL | grep $TEST_FLOATING_POOL | get_field 1` + TEST_FLOATING_IP=$(nova floating-ip-create $TEST_FLOATING_POOL | grep $TEST_FLOATING_POOL | get_field 1) die_if_not_set TEST_FLOATING_IP "Failure creating floating IP in $TEST_FLOATING_POOL" # list floating addresses @@ -172,34 +170,40 @@ if ! is_service_enabled quantum; then fi fi -# dis-allow icmp traffic (ping) -nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || die "Failure deleting security group rule from $SECGROUP" +# Dis-allow icmp traffic (ping) +nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || \ + die "Failure deleting security group rule from $SECGROUP" # FIXME (anthony): make xs support security groups if [ "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then - # test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds + # Test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT Fail fi +# Clean up +# -------- + if ! is_service_enabled quantum; then # Delete second floating IP - nova floating-ip-delete $TEST_FLOATING_IP || die "Failure deleting floating IP $TEST_FLOATING_IP" + nova floating-ip-delete $TEST_FLOATING_IP || \ + die "Failure deleting floating IP $TEST_FLOATING_IP" fi -# de-allocate the floating ip -nova floating-ip-delete $FLOATING_IP || die "Failure deleting floating IP $FLOATING_IP" - -# Shutdown the server -nova delete $VM_UUID || die "Failure deleting instance $NAME" +# Delete the floating ip +nova floating-ip-delete $FLOATING_IP || \ + die "Failure deleting floating IP $FLOATING_IP" +# Delete instance +nova delete $VM_UUID || die "Failure deleting instance $VM_NAME" # Wait for termination if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then - echo "Server $NAME not deleted" + echo "Server $VM_NAME not deleted" exit 1 fi -# Delete a secgroup -nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP" +# Delete secgroup +nova secgroup-delete $SECGROUP || \ + die "Failure deleting security group $SECGROUP" set +o xtrace echo "*********************************************************************" diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh index fbd9c8e1..a33c9c63 100755 --- a/exercises/sec_groups.sh +++ b/exercises/sec_groups.sh @@ -2,7 +2,7 @@ # **sec_groups.sh** -# Test security groups via the command line tools that ship with it. +# Test security groups via the command line echo "*********************************************************************" echo "Begin DevStack Exercise: $0" @@ -41,7 +41,7 @@ source $TOP_DIR/exerciserc nova secgroup-list # Create random name for new sec group and create secgroup of said name -SEC_GROUP_NAME="sec-group-$(openssl rand -hex 4)" +SEC_GROUP_NAME="ex-secgroup-$(openssl rand -hex 4)" nova secgroup-create $SEC_GROUP_NAME 'a test security group' # Add some rules to the secgroup @@ -65,8 +65,10 @@ done for RULE in "${RULES_TO_ADD[@]}"; do nova secgroup-delete-rule $SEC_GROUP_NAME tcp $RULE $RULE 0.0.0.0/0 done -nova secgroup-delete $SEC_GROUP_NAME +# Delete secgroup +nova secgroup-delete $SEC_GROUP_NAME || \ + die "Failure deleting security group $SEC_GROUP_NAME" set +o xtrace echo "*********************************************************************" diff --git a/exercises/swift.sh b/exercises/swift.sh index 4cd487bc..a75f955a 100755 --- a/exercises/swift.sh +++ b/exercises/swift.sh @@ -2,7 +2,7 @@ # **swift.sh** -# Test swift via the command line tools that ship with it. +# Test swift via the ``swift`` command line from ``python-swiftclient` echo "*********************************************************************" echo "Begin DevStack Exercise: $0" @@ -33,13 +33,13 @@ source $TOP_DIR/openrc # Import exercise configuration source $TOP_DIR/exerciserc -# Container name -CONTAINER=ex-swift - # If swift is not enabled we exit with exitcode 55 which mean # exercise is skipped. is_service_enabled swift || exit 55 +# Container name +CONTAINER=ex-swift + # Testing Swift # ============= diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 45b8645b..45cb0c8e 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -2,7 +2,7 @@ # **volumes.sh** -# Test cinder volumes with the cinder command from python-cinderclient +# Test cinder volumes with the ``cinder`` command from ``python-cinderclient`` echo "*********************************************************************" echo "Begin DevStack Exercise: $0" @@ -45,12 +45,16 @@ is_service_enabled cinder || exit 55 # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} -# Boot this image, use first AMi image if unset +# Boot this image, use first AMI image if unset DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} # Security group name SECGROUP=${SECGROUP:-vol_secgroup} +# Instance and volume names +VM_NAME=${VM_NAME:-ex-vol-inst} +VOL_NAME="ex-vol-$(openssl rand -hex 4)" + # Launching a server # ================== @@ -61,19 +65,17 @@ nova list # Images # ------ -# Nova has a **deprecated** way of listing images. -nova image-list - -# But we recommend using glance directly +# List the images available glance image-list # Grab the id of the image to launch IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) +die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" # Security Groups # --------------- -# List of secgroups: +# List security groups nova secgroup-list # Create a secgroup @@ -93,126 +95,122 @@ if ! nova secgroup-list-rules $SECGROUP | grep -q " tcp .* 22 "; then nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0 fi -# determinine instance type -# ------------------------- +# List secgroup rules +nova secgroup-list-rules $SECGROUP + +# Set up instance +# --------------- -# List of instance types: +# List flavors nova flavor-list -INSTANCE_TYPE=`nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1` +# Select a flavor +INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) if [[ -z "$INSTANCE_TYPE" ]]; then # grab the first flavor in the list to launch if default doesn't exist - INSTANCE_TYPE=`nova flavor-list | head -n 4 | tail -n 1 | get_field 1` + INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) fi -NAME="ex-vol" - -VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE $NAME --security_groups=$SECGROUP | grep ' id ' | get_field 2` -die_if_not_set VM_UUID "Failure launching $NAME" - - -# Testing -# ======= +# Clean-up from previous runs +nova delete $VM_NAME || true +if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then + echo "server didn't terminate!" + exit 1 +fi -# First check if it spins up (becomes active and responds to ping on -# internal ip). If you run this script from a nova node, you should -# bypass security groups and have direct access to the server. +# Boot instance +# ------------- -# Waiting for boot -# ---------------- +VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security_groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2) +die_if_not_set VM_UUID "Failure launching $VM_NAME" -# check that the status is active within ACTIVE_TIMEOUT seconds +# Check that the status is active within ACTIVE_TIMEOUT seconds if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then echo "server didn't become active!" exit 1 fi -# get the IP of the server -IP=`nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2` +# Get the instance IP +IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2) die_if_not_set IP "Failure retrieving IP address" -# for single node deployments, we can ping private ips +# Private IPs can be pinged in single node deployments ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT # Volumes # ------- -VOL_NAME="myvol-$(openssl rand -hex 4)" - # Verify it doesn't exist -if [[ -n "`cinder list | grep $VOL_NAME | head -1 | get_field 2`" ]]; then +if [[ -n $(cinder list | grep $VOL_NAME | head -1 | get_field 2) ]]; then echo "Volume $VOL_NAME already exists" exit 1 fi # Create a new volume -cinder create --display_name $VOL_NAME --display_description "test volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE -if [[ $? != 0 ]]; then - echo "Failure creating volume $VOL_NAME" - exit 1 -fi - -start_time=`date +%s` +start_time=$(date +%s) +cinder create --display_name $VOL_NAME --display_description "test volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \ + die "Failure creating volume $VOL_NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then echo "Volume $VOL_NAME not created" exit 1 fi -end_time=`date +%s` +end_time=$(date +%s) echo "Completed cinder create in $((end_time - start_time)) seconds" # Get volume ID -VOL_ID=`cinder list | grep $VOL_NAME | head -1 | get_field 1` +VOL_ID=$(cinder list | grep $VOL_NAME | head -1 | get_field 1) die_if_not_set VOL_ID "Failure retrieving volume ID for $VOL_NAME" # Attach to server DEVICE=/dev/vdb -start_time=`date +%s` +start_time=$(date +%s) nova volume-attach $VM_UUID $VOL_ID $DEVICE || \ - die "Failure attaching volume $VOL_NAME to $NAME" + die "Failure attaching volume $VOL_NAME to $VM_NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep in-use; do sleep 1; done"; then - echo "Volume $VOL_NAME not attached to $NAME" + echo "Volume $VOL_NAME not attached to $VM_NAME" exit 1 fi -end_time=`date +%s` +end_time=$(date +%s) echo "Completed volume-attach in $((end_time - start_time)) seconds" -VOL_ATTACH=`cinder list | grep $VOL_NAME | head -1 | get_field -1` +VOL_ATTACH=$(cinder list | grep $VOL_NAME | head -1 | get_field -1) die_if_not_set VOL_ATTACH "Failure retrieving $VOL_NAME status" if [[ "$VOL_ATTACH" != $VM_UUID ]]; then echo "Volume not attached to correct instance" exit 1 fi +# Clean up +# -------- + # Detach volume -start_time=`date +%s` -nova volume-detach $VM_UUID $VOL_ID || die "Failure detaching volume $VOL_NAME from $NAME" +start_time=$(date +%s) +nova volume-detach $VM_UUID $VOL_ID || die "Failure detaching volume $VOL_NAME from $VM_NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then - echo "Volume $VOL_NAME not detached from $NAME" + echo "Volume $VOL_NAME not detached from $VM_NAME" exit 1 fi -end_time=`date +%s` +end_time=$(date +%s) echo "Completed volume-detach in $((end_time - start_time)) seconds" # Delete volume -start_time=`date +%s` +start_time=$(date +%s) cinder delete $VOL_ID || die "Failure deleting volume $VOL_NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then echo "Volume $VOL_NAME not deleted" exit 1 fi -end_time=`date +%s` +end_time=$(date +%s) echo "Completed cinder delete in $((end_time - start_time)) seconds" -# Shutdown the server -nova delete $VM_UUID || die "Failure deleting instance $NAME" - -# Wait for termination +# Delete instance +nova delete $VM_UUID || die "Failure deleting instance $VM_NAME" if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then - echo "Server $NAME not deleted" + echo "Server $VM_NAME not deleted" exit 1 fi -# Delete a secgroup +# Delete secgroup nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP" set +o xtrace From f03bafeb84ed87e5e5fd219e063ee1eb067f1c49 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 12 Feb 2013 10:58:28 -0600 Subject: [PATCH 200/207] Refactor init functions to simplify Grenade upgrades * Glance: create_glance_cache_dir() * Cinder: create_cinder_cache_dir() and create_cinder_volume_group() * Nova: create_nova_cache_dir() and create_nova_keys_dir() * Random tidy-up changes Change-Id: I20d995d4c2e5facfb912ee03a6cda6c56f20bbe9 --- lib/cinder | 59 ++++++++++++++++++++++++++++++++++-------------------- lib/glance | 22 ++++++++++++-------- lib/nova | 36 ++++++++++++++++++--------------- 3 files changed, 71 insertions(+), 46 deletions(-) diff --git a/lib/cinder b/lib/cinder index fd5f8cf1..4d1ab420 100644 --- a/lib/cinder +++ b/lib/cinder @@ -254,37 +254,55 @@ create_cinder_accounts() { fi } +# create_cinder_cache_dir() - Part of the init_cinder() process +function create_cinder_cache_dir() { + # Create cache dir + sudo mkdir -p $CINDER_AUTH_CACHE_DIR + sudo chown $STACK_USER $CINDER_AUTH_CACHE_DIR + rm -f $CINDER_AUTH_CACHE_DIR/* +} + +create_cinder_volume_group() { + # Configure a default volume group called '`stack-volumes`' for the volume + # service if it does not yet exist. If you don't wish to use a file backed + # volume group, create your own volume group called ``stack-volumes`` before + # invoking ``stack.sh``. + # + # By default, the backing file is 5G in size, and is stored in ``/opt/stack/data``. + + if ! sudo vgs $VOLUME_GROUP; then + VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DATA_DIR/${VOLUME_GROUP}-backing-file} + + # Only create if the file doesn't already exists + [[ -f $VOLUME_BACKING_FILE ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE + + DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE` + + # Only create if the loopback device doesn't contain $VOLUME_GROUP + if ! sudo vgs $VOLUME_GROUP; then + sudo vgcreate $VOLUME_GROUP $DEV + fi + fi + + mkdir -p $CINDER_STATE_PATH/volumes +} + # init_cinder() - Initialize database and volume group function init_cinder() { # Force nova volumes off NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/osapi_volume,//") if is_service_enabled $DATABASE_BACKENDS; then - # (re)create cinder database + # (Re)create cinder database recreate_database cinder utf8 - # (re)create cinder database + # Migrate cinder database $CINDER_BIN_DIR/cinder-manage db sync fi if is_service_enabled c-vol; then - # Configure a default volume group called '`stack-volumes`' for the volume - # service if it does not yet exist. If you don't wish to use a file backed - # volume group, create your own volume group called ``stack-volumes`` before - # invoking ``stack.sh``. - # - # By default, the backing file is 5G in size, and is stored in ``/opt/stack/data``. - if ! sudo vgs $VOLUME_GROUP; then - VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DATA_DIR/${VOLUME_GROUP}-backing-file} - # Only create if the file doesn't already exists - [[ -f $VOLUME_BACKING_FILE ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE - DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE` - # Only create if the loopback device doesn't contain $VOLUME_GROUP - if ! sudo vgs $VOLUME_GROUP; then sudo vgcreate $VOLUME_GROUP $DEV; fi - fi - - mkdir -p $CINDER_STATE_PATH/volumes + create_cinder_volume_group if sudo vgs $VOLUME_GROUP; then if is_fedora || is_suse; then @@ -299,10 +317,7 @@ function init_cinder() { fi fi - # Create cache dir - sudo mkdir -p $CINDER_AUTH_CACHE_DIR - sudo chown $STACK_USER $CINDER_AUTH_CACHE_DIR - rm -f $CINDER_AUTH_CACHE_DIR/* + create_cinder_cache_dir } # install_cinder() - Collect source and prepare diff --git a/lib/glance b/lib/glance index 5d48129d..80d3902a 100644 --- a/lib/glance +++ b/lib/glance @@ -141,6 +141,17 @@ function configure_glance() { cp -p $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON } +# create_glance_cache_dir() - Part of the init_glance() process +function create_glance_cache_dir() { + # Create cache dir + sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/api + sudo chown $STACK_USER $GLANCE_AUTH_CACHE_DIR/api + rm -f $GLANCE_AUTH_CACHE_DIR/api/* + sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/registry + sudo chown $STACK_USER $GLANCE_AUTH_CACHE_DIR/registry + rm -f $GLANCE_AUTH_CACHE_DIR/registry/* +} + # init_glance() - Initialize databases, etc. function init_glance() { # Delete existing images @@ -151,18 +162,13 @@ function init_glance() { rm -rf $GLANCE_CACHE_DIR mkdir -p $GLANCE_CACHE_DIR - # (re)create glance database + # (Re)create glance database recreate_database glance utf8 + # Migrate glance database $GLANCE_BIN_DIR/glance-manage db_sync - # Create cache dir - sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/api - sudo chown $STACK_USER $GLANCE_AUTH_CACHE_DIR/api - rm -f $GLANCE_AUTH_CACHE_DIR/api/* - sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/registry - sudo chown $STACK_USER $GLANCE_AUTH_CACHE_DIR/registry - rm -f $GLANCE_AUTH_CACHE_DIR/registry/* + create_glance_cache_dir } # install_glanceclient() - Collect source and prepare diff --git a/lib/nova b/lib/nova index 1681af77..e3597196 100644 --- a/lib/nova +++ b/lib/nova @@ -453,6 +453,14 @@ function create_nova_conf() { done } +# create_nova_cache_dir() - Part of the init_nova() process +function create_nova_cache_dir() { + # Create cache dir + sudo mkdir -p $NOVA_AUTH_CACHE_DIR + sudo chown $STACK_USER $NOVA_AUTH_CACHE_DIR + rm -f $NOVA_AUTH_CACHE_DIR/* +} + function create_nova_conf_nova_network() { iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NET_MAN" iniset $NOVA_CONF DEFAULT public_interface "$PUBLIC_INTERFACE" @@ -463,14 +471,17 @@ function create_nova_conf_nova_network() { fi } +# create_nova_keys_dir() - Part of the init_nova() process +function create_nova_keys_dir() { + # Create keys dir + sudo mkdir -p ${NOVA_STATE_PATH}/keys + sudo chown -R $STACK_USER ${NOVA_STATE_PATH} +} + # init_nova() - Initialize databases, etc. function init_nova() { - # Nova Database - # ------------- - - # All nova components talk to a central database. We will need to do this step - # only once for an entire cluster. - + # All nova components talk to a central database. + # Only do this step once on the API node for an entire cluster. if is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-api; then # (Re)create nova database # Explicitly use latin1: to avoid lp#829209, nova expects the database to @@ -478,7 +489,7 @@ function init_nova() { # 082_essex.py in nova) recreate_database nova latin1 - # (Re)create nova database + # Migrate nova database $NOVA_BIN_DIR/nova-manage db sync # (Re)create nova baremetal database @@ -488,15 +499,8 @@ function init_nova() { fi fi - # Create cache dir - sudo mkdir -p $NOVA_AUTH_CACHE_DIR - sudo chown $STACK_USER $NOVA_AUTH_CACHE_DIR - rm -f $NOVA_AUTH_CACHE_DIR/* - - # Create the keys folder - sudo mkdir -p ${NOVA_STATE_PATH}/keys - # make sure we own NOVA_STATE_PATH and all subdirs - sudo chown -R $STACK_USER ${NOVA_STATE_PATH} + create_nova_cache_dir + create_nova_keys_dir } # install_novaclient() - Collect source and prepare From f29bb32d5c72cbc3b4fed49936982fbbc00690a4 Mon Sep 17 00:00:00 2001 From: Shiv Haris Date: Wed, 23 Jan 2013 03:00:16 +0000 Subject: [PATCH 201/207] Devstack changes for Brocade Quantum Plugin blueprint brocade-quantum-plugin Change-Id: I238ee0a89742ac904ead0f4700f027e841f04fe1 --- lib/quantum_plugins/brocade | 49 +++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 lib/quantum_plugins/brocade diff --git a/lib/quantum_plugins/brocade b/lib/quantum_plugins/brocade new file mode 100644 index 00000000..c372c19f --- /dev/null +++ b/lib/quantum_plugins/brocade @@ -0,0 +1,49 @@ +# Brocade Quantum Plugin +# ---------------------- + +# Save trace setting +BRCD_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +function is_quantum_ovs_base_plugin() { + return 1 +} + +function quantum_plugin_create_nova_conf() { + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver"} +} + +function quantum_plugin_install_agent_packages() { + install_package bridge-utils +} + +function quantum_plugin_configure_common() { + Q_PLUGIN_CONF_PATH=etc/quantum/plugins/brocade + Q_PLUGIN_CONF_FILENAME=brocade.ini + Q_DB_NAME="brcd_quantum" + Q_PLUGIN_CLASS="quantum.plugins.brocade.QuantumPlugin.BrocadePluginV2" +} + +function quantum_plugin_configure_debug_command() { + : +} + +function quantum_plugin_configure_dhcp_agent() { + : +} + +function quantum_plugin_configure_l3_agent() { + : +} + +function quantum_plugin_configure_plugin_agent() { + AGENT_BINARY="$QUANTUM_DIR/bin/quantum-linuxbridge-agent" +} + +function quantum_plugin_setup_interface_driver() { + local conf_file=$1 + iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver +} + +# Restore xtrace +$BRCD_XTRACE From 25ebbcd1a7bc95e69ad32b19245ce0990a29eaa7 Mon Sep 17 00:00:00 2001 From: Jeremy Stanley Date: Sun, 17 Feb 2013 15:45:55 +0000 Subject: [PATCH 202/207] Bring back screen logs for noninteractive runs. * functions(screen_it): Prior to 58e2134 screen logs were generated even when run without $SCREEN_DEV. We want to be able to capture these with devstack-gate for later inspection, so this patch reintroduces that capability. Change-Id: Ife127b47d7700878e02285281854595bc6585972 --- functions | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/functions b/functions index 79c82a45..ae63436a 100644 --- a/functions +++ b/functions @@ -747,17 +747,19 @@ function screen_it { screen_rc "$1" "$2" screen -S $SCREEN_NAME -X screen -t $1 + + if [[ -n ${SCREEN_LOGDIR} ]]; then + screen -S $SCREEN_NAME -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log + screen -S $SCREEN_NAME -p $1 -X log on + ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log + fi + if [[ "$SCREEN_DEV" = "True" ]]; then # sleep to allow bash to be ready to be send the command - we are # creating a new window in screen and then sends characters, so if # bash isn't running by the time we send the command, nothing happens sleep 1.5 - if [[ -n ${SCREEN_LOGDIR} ]]; then - screen -S $SCREEN_NAME -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log - screen -S $SCREEN_NAME -p $1 -X log on - ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log - fi NL=`echo -ne '\015'` screen -S $SCREEN_NAME -p $1 -X stuff "$2 || touch \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL" else From 5096ba79c1ba8ebf83933054d69e21741984e54c Mon Sep 17 00:00:00 2001 From: Arata Notsu Date: Mon, 18 Feb 2013 18:49:01 +0900 Subject: [PATCH 203/207] Correct syntax error in stack.sh for baremtal dhcp-option Change-Id: I0e29bf2e429b65065fdcd9e38b16a7ab6c04d917 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 0f009fc6..c9ca43fc 100755 --- a/stack.sh +++ b/stack.sh @@ -1266,7 +1266,7 @@ if is_service_enabled nova && is_baremetal; then sudo dnsmasq --conf-file= --port=0 --enable-tftp --tftp-root=/tftpboot \ --dhcp-boot=pxelinux.0 --bind-interfaces --pid-file=/var/run/dnsmasq.pid \ --interface=$BM_DNSMASQ_IFACE --dhcp-range=$BM_DNSMASQ_RANGE \ - ${$BM_DNSMASQ_DNS:+--dhcp-option=option:dns-server,$BM_DNSMASQ_DNS} + ${BM_DNSMASQ_DNS:+--dhcp-option=option:dns-server,$BM_DNSMASQ_DNS} # ensure callback daemon is running sudo pkill nova-baremetal-deploy-helper || true screen_it baremetal "nova-baremetal-deploy-helper" From 8396d4f27c296eed4ced5e44d868e6d90257e73e Mon Sep 17 00:00:00 2001 From: Stephen Ma Date: Mon, 18 Feb 2013 05:32:59 -0800 Subject: [PATCH 204/207] Added option Q_FLOATING_ALLOCATION_POOL to define an allocation-pool for floating IPs. Change-Id: If31b34ebb8095aa260c19292cf63826522908db9 --- lib/quantum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/quantum b/lib/quantum index f3a3ec4d..61a5218e 100644 --- a/lib/quantum +++ b/lib/quantum @@ -297,7 +297,7 @@ function create_quantum_initial_network() { quantum router-interface-add $ROUTER_ID $SUBNET_ID # Create an external network, and a subnet. Configure the external network as router gw EXT_NET_ID=$(quantum net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2) - EXT_GW_IP=$(quantum subnet-create --ip_version 4 $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2) + EXT_GW_IP=$(quantum subnet-create --ip_version 4 ${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2) quantum router-gateway-set $ROUTER_ID $EXT_NET_ID if is_quantum_ovs_base_plugin && [[ "$Q_USE_NAMESPACE" = "True" ]]; then From 86d8fbb59c774569d9b273302e22ae5a664cdaff Mon Sep 17 00:00:00 2001 From: Devananda van der Veen Date: Tue, 12 Feb 2013 21:58:33 -0800 Subject: [PATCH 205/207] Baremetal should start using scheduler filters. The baremetal driver should start using scheduler filters, particularly the RetryFilter and ComputeFilter, as some functionality in Nova depends on these. However, the ComputeCapabilitiesFilter currently does not work with baremetal in devstack due to an order-of-operations issue, so we explicitly list the other filters that we do want. Change-Id: Icc4b074c6f99e3e4ffbcf5eef0f9bb6f0f5c1996 --- stack.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index 0f009fc6..ea5ddb28 100755 --- a/stack.sh +++ b/stack.sh @@ -1073,9 +1073,11 @@ if is_service_enabled nova; then iniset $NOVA_CONF DEFAULT compute_driver nova.virt.baremetal.driver.BareMetalDriver iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER iniset $NOVA_CONF DEFAULT scheduler_host_manager nova.scheduler.baremetal_host_manager.BaremetalHostManager - iniset $NOVA_CONF DEFAULT scheduler_default_filters AllHostsFilter - iniset $NOVA_CONF baremetal driver $BM_DRIVER + # NOTE(deva): ComputeCapabilitiesFilter does not currently work with Baremetal. See bug # 1129485 + # As a work around, we disable CCFilter by explicitly enabling all the other default filters. + iniset $NOVA_CONF DEFAULT scheduler_default_filters ComputeFilter,RetryFilter,AvailabilityZoneFilter,ImagePropertiesFilter iniset $NOVA_CONF baremetal instance_type_extra_specs cpu_arch:$BM_CPU_ARCH + iniset $NOVA_CONF baremetal driver $BM_DRIVER iniset $NOVA_CONF baremetal power_manager $BM_POWER_MANAGER iniset $NOVA_CONF baremetal tftp_root /tftpboot From 2bccb8604e3ef9ccfd3e33471b049a2ca6dfeb9c Mon Sep 17 00:00:00 2001 From: Andrea Frittoli Date: Wed, 20 Feb 2013 16:31:34 +0000 Subject: [PATCH 206/207] Remove ssh tests diabling as #1074039 is fixed Removed the setting completely so that this is left under control of tempest.conf.sample Fixes LP# 1130750 Change-Id: I710f1c111e66834f4bc7020cad82c04bf495441c --- lib/tempest | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index e43f6d75..364323de 100644 --- a/lib/tempest +++ b/lib/tempest @@ -212,8 +212,6 @@ function configure_tempest() { TEMPEST_ALLOW_TENANT_ISOLATION=${TEMPEST_ALLOW_TENANT_ISOLATION:-False} fi iniset $TEMPEST_CONF compute allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} - #Skip until #1074039 is fixed - iniset $TEMPEST_CONF compute run_ssh False iniset $TEMPEST_CONF compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED iniset $TEMPEST_CONF compute network_for_ssh $PRIVATE_NETWORK_NAME iniset $TEMPEST_CONF compute ip_version_for_ssh 4 From 0f2d954b82e44d7bbd646e200510beb1ca3e469e Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 20 Feb 2013 17:51:19 -0600 Subject: [PATCH 207/207] Fix create_userrc.sh private key hang tools/create_userrc.sh hangs in a couple of mv commands now that private keys are created with mode 400. mv is prompting to override the permissions, so let's just -f it all. Change-Id: I8fbb24da6582edcff741653ffdf8bf683b79851a --- tools/create_userrc.sh | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index 55cb8fac..619d63f7 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -1,6 +1,10 @@ #!/usr/bin/env bash -#Warning: This script just for development purposes +# **create_userrc.sh** + +# Pre-create rc files and credentials for the default users. + +# Warning: This script just for development purposes ACCOUNT_DIR=./accrc @@ -164,12 +168,12 @@ function add_entry(){ local ec2_cert="$rcfile-cert.pem" local ec2_private_key="$rcfile-pk.pem" # Try to preserve the original file on fail (best effort) - mv "$ec2_private_key" "$ec2_private_key.old" &>/dev/null - mv "$ec2_cert" "$ec2_cert.old" &>/dev/null + mv -f "$ec2_private_key" "$ec2_private_key.old" &>/dev/null + mv -f "$ec2_cert" "$ec2_cert.old" &>/dev/null # It will not create certs when the password is incorrect if ! nova --os-password "$user_passwd" --os-username "$user_name" --os-tenant-name "$tenant_name" x509-create-cert "$ec2_private_key" "$ec2_cert"; then - mv "$ec2_private_key.old" "$ec2_private_key" &>/dev/null - mv "$ec2_cert.old" "$ec2_cert" &>/dev/null + mv -f "$ec2_private_key.old" "$ec2_private_key" &>/dev/null + mv -f "$ec2_cert.old" "$ec2_cert" &>/dev/null fi cat >"$rcfile" <